We have a huge legacy code base which is multithreaded and uses vectors extensively. To cut down the time spent in dynamic memory allocation, we are moving to a pools. The plan is to use Boost small vector with a custom allocator. The custom allocator will create a thread local pool per each container type. I have implemented a custom allocator based on the above idea and tested it. For some reason, the code falls in an infinite pattern inside find_prev method in Boost simple segregated storage. There are lots of places where there is a nesting of containers, like vector>> etc. Is this the right way of defining allocator ??
template<typename T, typename allocatorType>
class customAllocator
{
public:
static thread_local allocatorType *_allocator;
typedef T value_type;
typedef allocatorType allocator_Type;
template <class X> struct rebind
{
typedef customAllocator<X, allocatorType> other;
};
customAllocator()
{
_allocator = new allocatorType;
assert(_allocator);
return;
}
~customAllocator()
{
delete _allocator;
_allocator = nullptr;
return;
}
template<class X, class Y> customAllocator(const customAllocator<X, Y>& other)
{
_allocator = other._allocator;
return;
}
template<class X, class Y> customAllocator(customAllocator<X, Y>&& other)
{
_allocator = other._allocator;
other._allocator = nullptr;
return;
}
template<class X, class Y> customAllocator& operator=(const customAllocator<X, Y>& other)
{
_allocator = other._allocator;
return *this;
}
template<class X, class Y> customAllocator& operator=(customAllocator<X, Y>&& other)
{
_allocator = other._allocator;
other._allocator = nullptr;
return *this;
}
T* allocate(size_t n)
{
return _allocator->allocate(n * sizeof(T));
}
void deallocate(T* ptr, size_t n)
{
_allocator->deallocate(ptr, n);
return;
}
template<class X, class Y> bool operator==(const customAllocator<X, Y>& other) const noexcept
{ return (*this._allocator == other.allocator); }
template<class X, class Y> bool operator!=(const customAllocator<X, Y>& other) const noexcept
{ return !(*this._allocator == other._allocator); }
};
template <typename T1, typename T2>
thread_local T2 *customAllocator<T1, T2>::_allocator = nullptr;
using smallVector = boost::container::small_vector<
T,
DEFAULT_SMALL_VECTOR_LENGTH,
customAllocator<T,
boost::pool_allocator<
T,
boost::default_user_allocator_new_delete,
boost::details::pool::null_mutex,
2,
4
>>>;
Related
I'm looking into a solution of building containers which track stored size of their elements in addition to basic functions.
So far I didn't saw a solution which doesn't create a huge amount of boilerplate code of each invalidating member of container. This also assumes that stored elements cannot change size after being stored.
Unless standard containers have some feature that allows to inject such behaviour. The following example should be working one, albeit abridged for brevity. The declarations used are:
typedef uint8_t Byte;
typedef Byte PacketId;
template <class T>
struct CollectionTraits {
typedef T collection_type;
typedef typename collection_type::value_type value_type;
typedef typename collection_type::size_type size_type;
typedef typename collection_type::iterator iterator;
typedef typename collection_type::reference reference;
typedef typename collection_type::const_iterator const_iterator;
const_iterator begin() const { return _collection.begin(); }
const_iterator end() const { return _collection.end(); }
iterator begin() { return _collection.begin(); }
iterator end() { return _collection.end(); }
size_type size() const { return _collection.size(); }
protected:
T _collection;
};
struct Packet : CollectionTraits<std::vector<Byte>>
{
PacketId id;
};
The container itself:
struct PacketList : CollectionTraits<std::deque<Packet>>
{
public:
typedef Packet::size_type data_size;
void clear() { _collection.clear(); _total_size = 0; }
data_size total_size() const { return _total_size; }
void push_back(const Packet& v) {
_collection.push_back(v);
_add(v);
}
void push_back(const Packet&& v) {
_collection.push_back(std::move(v));
_add(v);
}
void push_front(const Packet& v) {
_collection.push_front(v);
_add(v);
}
void push_front(const Packet&& v) {
_collection.push_front(std::move(v));
_add(v);
}
void pop_back() {
_remove(_collection.back());
_collection.pop_back();
}
void erase(const_iterator first, const_iterator last) {
for(auto it = first; it != last; ++it) _remove(*it);
_collection.erase(first, last);
}
PacketList() : _total_size(0) {}
PacketList(const PacketList& other) : _total_size(other._total_size) {}
private:
void _add(const Packet& v) { _total_size += v.size(); }
void _remove(const Packet& v) { _total_size -= v.size(); }
data_size _total_size;
};
The interface in result should similar to a standard container. Is there a way to avoid this amount of repeated code? Is there some standard solution for this problem?
So I was Playing around with c++11 Varidiacs, and I wanted to create a thing called CallClass, basically a class that warps a function, for later call,when all variables are set(truly I have No Idea If It can Be Useful):
#include <tuple>
template <typename OBJ,typename F,typename... VARGS>
class CallClass
{
public:
CallClass(OBJ& object,F callFunction)
:_object(&object),_func(callFunction)
{ }
CallClass(const CallClass& other)
:_func_args(other._func_args)
,_object(other._object)
,_func(other._func)
{ }
template <size_t INDEX>
auto get(){ return std::get<INDEX>(_func_args); }
template <size_t INDEX,typename T>
void set(const T& val){ std::get<INDEX>(_func_args) = val; }
template <size_t INDEX,typename T>
void set(T&& val){ std::get<INDEX>(_func_args) = val; }
auto Call()
{
//throws segmentation Fault Here
return InnerCall<0>(_func_args);
}
virtual ~CallClass() {}
protected:
private:
std::tuple<VARGS...> _func_args;
OBJ* _object;
F _func;
template <size_t INDEX,typename... ARGS>
auto InnerCall(std::tuple<VARGS...>& tup,ARGS... args)
{
auto arg = std::get<INDEX>(tup);
return InnerCall<INDEX + 1>(tup,args...,arg);
}
template <size_t INDEX,VARGS...>
auto InnerCall(std::tuple<VARGS...>& tup,VARGS... args)
{
return (_object->*_func)(args...);
}
};
Now when I try to compile(compiling using IDE:code::blocks, configured to use MINGW On windows ), it prints Compiler:Segmentation Fault, anybody any Ideas?
Usage:
class obj{
public:
obj(int a)
:_a(a)
{ }
virtual ~obj() {}
int add(int b,int c){
return _a + b + c;
}
private:
int _a;
};
int main(){
obj ob(6);
CallClass<obj,decltype(obj::add),int,int> callAdd(ob,obj::add);
callAdd.set<0,int>(5);
callAdd.set<1,int>(7);
cout << "result is " << callAdd.Call() << endl;
return 0;
}
After a Bit of a search i stumbled upon a similar issue, in a way.
apparently the way I'm unpacking the tuple is an issue, so i decided to use a different approach as shown in: enter link description here
had to add a few changes to suit my needs:
changes:
namespace detail
{
template <typename OBJ,typename F, typename Tuple, bool Done, int Total, int... N>
struct call_impl
{
static auto call(OBJ& obj,F f, Tuple && t)
{
return call_impl<OBJ,F, Tuple, Total == 1 + sizeof...(N), Total, N..., sizeof...(N)>::call(obj,f, std::forward<Tuple>(t));
}
};
template <typename OBJ,typename F, typename Tuple, int Total, int... N>
struct call_impl<OBJ,F, Tuple, true, Total, N...>
{
static auto call(OBJ& obj,F f, Tuple && t)
{
return (obj.*f)(std::get<N>(std::forward<Tuple>(t))...);
}
};
}
// user invokes this
template <typename OBJ,typename F, typename Tuple>
auto call(OBJ& obj,F f, Tuple && t)
{
typedef typename std::decay<Tuple>::type ttype;
return detail::call_impl<OBJ,F, Tuple, 0 == std::tuple_size<ttype>::value, std::tuple_size<ttype>::value>::call(obj,f, std::forward<Tuple>(t));
}
and changed Call():
auto Call()
{
std::tuple<VARGS...> func_args = _func_args;
return call(*_object,_func, std::move(func_args));
}
I will probably make a few more changes, like passing the tuple as a reference, and making the structs a part of my class.
i'm new to lambda functions in c++ and am trying to make a simple one but have some problems. i've tried to make a heterogeneous container which include stacks, queues, and lists and one of the exercise is to make a lambda function which check if an element answers a specific condition defined as:
using Condition = bool (*)(T const&);
so here is a piece of my heterogeneous container:
For example for stacks:
template <typename T>
using Condition1 = bool (*)(T const&);
template <typename T>
class LinkedStack {
private:
StackElement<T>* top;
public:
LinkedStack();
LinkedStack(LinkedStack const&);
LinkedStack& operator=(LinkedStack const&);
bool empty() const;
bool member(T const& x);
T peek() const;
void push(T const&);
T pop();
~LinkedStack();
};
template<typename T,typename Condition1>
bool q_filter(Condition1 func,LinkedStack<T>& s){
LinkedStack<T> tmp;
tmp = s;
if((tmp).empty())
return false;
while(!tmp.empty()){
if (func(tmp.peek()))
return true;
else
tmp.pop();
}
return false;
}
the stack-object(which is need to perform object in h-container):
template <typename T>
class Object {
public:
using Condition = bool (*)(T const&);
virtual bool insert(T const&) = 0;
virtual bool remove(T&) = 0;
virtual bool member(T const&) = 0;
virtual int l_size() = 0;
virtual void sort() = 0;
virtual bool special_condition(Condition);
virtual void print(ostream& os) const = 0;
virtual ~Object() {}
};
template <typename T>
class StackObject : public Object<T>, private LinkedStack<T> {
public:
using Condition = bool (*)(T const&);
// включване
bool insert(T const& x) {
LinkedStack<T>::push(x);
return true;
}
// изключване
bool remove(T& x) {
if (LinkedStack<T>::empty())
return false;
x = LinkedStack<T>::pop();
return true;
}
// проверка
bool member(T const& x){
return LinkedStack<T>::member(x);
}
int l_size() {
return my_size(*this);
}
// извеждане
void print(ostream& os) const {
os << *this;
}
void sort(){
s_sort(*this);
}
bool special_condition(Condition c){
return q_filter(c,*this);
}
};
and main-function:
int main(){
QueueStackList qsl;
qsl.read_from_file();
(*(qsl.begin()))->special_condition([](int x) -> bool { return x%2 != 0; });
return 0;
}
QueueStackList is implemented like a linked-list and qsl.begin() returns
an iterator for the first element in the heterogeneous list;
when i compile it returns this kind of errors:
invalid user-defined conversion from 'main()::<lambda(int)>' to 'Object<int>::Condition {aka bool (*)(const int&)}' [-fpermissive]|
candidate is: main()::<lambda(int)>::operator bool (*)(int)() const <near match>|
no known conversion for implicit 'this' parameter from 'bool (*)(int)' to 'Object<int>::Condition {aka bool (*)(const int&)}'|
which i really don't know what mean ?
I want to replace some code that uses boost::interprocess shared memory. One advantage of shared memory is that you can impose limits on the maximum amount of memory it can use. I'm looking for a custom allocator, based off std::allocator that can do this.
Only particular classes in the program will use this allocator, everything else uses the defaulted std::allocator and are only limited by available RAM.
I'm trying to write one of my own but I'm running into issues, mainly with how to share state among the allocator copies that are created by STL containers. State includes the number of free bytes remaining and the maximum size the allocator can use. I thought I could get away with making them thread_local but then several different instances of the same class will all allocate and deallocate from the same limited heap, which is not what I want. I'm beginning to think it's not possible, hence this question here. Neither contiguous allocation nor performance are major requirements for now.
The hard limit on the memory size cannot be a template parameter either, it's read from a config file.
Edit: The issue with sharing state is that some containers call the default constructor of the allocator type. Obviously this constructor cannot easily know anything about the outside world even if shared_ptr is used it will be nullptr initialised. For example, look at the source code for std::string::clear
g++ (Ubuntu 4.8.4-2ubuntu1~14.04) 4.8.4
After following the hints above I came up with this which seems to work ok for POD types, but things fall apart when I try to make a Vector or Map that uses String:
#include <string>
#include <vector>
#include <map>
#include <atomic>
#include <memory>
struct SharedState
{
SharedState()
: m_maxSize(0),
m_bytesRemaining(0)
{
}
SharedState(std::size_t maxSize)
: m_maxSize(maxSize),
m_bytesRemaining(maxSize)
{
}
void allocate(std::size_t bytes) const {
if (m_bytesRemaining < bytes) {
throw std::bad_alloc();
}
m_bytesRemaining -= bytes;
}
void deallocate(std::size_t bytes) const {
m_bytesRemaining += bytes;
}
std::size_t getBytesRemaining() const {
return m_bytesRemaining;
}
const std::size_t m_maxSize;
mutable std::atomic<std::size_t> m_bytesRemaining;
};
// --------------------------------------
template <typename T>
class BaseLimitedAllocator : public std::allocator<T>
{
public:
using size_type = std::size_t;
using pointer = T*;
using const_pointer = const T*;
using propagate_on_container_move_assignment = std::true_type;
template <typename U>
struct rebind
{
typedef BaseLimitedAllocator<U> other;
};
BaseLimitedAllocator() noexcept = default;
BaseLimitedAllocator(std::size_t maxSize) noexcept
: m_state(new SharedState(maxSize)) {
}
BaseLimitedAllocator(const BaseLimitedAllocator& other) noexcept {
m_state = other.m_state;
}
template <typename U>
BaseLimitedAllocator(const BaseLimitedAllocator<U>& other) noexcept {
m_state = other.m_state;
}
pointer allocate(size_type n, const void* hint = nullptr) {
m_state->allocate(n * sizeof(T));
return std::allocator<T>::allocate(n, hint);
}
void deallocate(pointer p, size_type n) {
std::allocator<T>::deallocate(p, n);
m_state->deallocate(n * sizeof(T));
}
public:
std::shared_ptr<SharedState> m_state; // This must be public for the rebind copy constructor.
};
template <typename T, typename U>
inline bool operator==(const BaseLimitedAllocator<T>&, const BaseLimitedAllocator<U>&) {
return true;
}
template <typename T, typename U>
inline bool operator!=(const BaseLimitedAllocator<T>&, const BaseLimitedAllocator<U>&) {
return false;
}
struct LimitedAllocator : public BaseLimitedAllocator<char>
{
LimitedAllocator(std::size_t maxSize)
: BaseLimitedAllocator<char>(maxSize) {
}
template <typename U>
using Other = typename BaseLimitedAllocator<char>::template rebind<U>::other;
};
// -----------------------------------------
// Example usage:
class SomeClass
{
public:
using String = std::basic_string<char, std::char_traits<char>, LimitedAllocator::Other<char>>;
template <typename T>
using Vector = std::vector<T, LimitedAllocator::Other<T>>;
template <typename K, typename V>
using Map = std::map<K, V, std::less<K>, LimitedAllocator::Other<std::pair<const K, V>>>;
Complex()
: allocator(256),
s(allocator),
v(allocator),
m(std::less<int>(), allocator) // Cannot only specify the allocator. Annoying.
{
}
const LimitedAllocator allocator;
String s;
Vector<int> v;
Map<int, String> m;
};
In std::map, this ends up causing an error when the first object is constructed. I've checked the debugger, and I see that free_list::init() creates the consecutive memory addresses correctly. I'm aware this allocator cannot be used in vector or other related containers, but it's only meant to work with the nodular containers.
I get a run-time error from this in xutility (in VC12), at line 158:
_Container_proxy *_Parent_proxy = _Parent->_Myproxy;
Checking the debugger, it appears that _Parent was never initialized, bringing about the 0xC0000005 run-time error. Why or how it didn't get initialized and why this occurred when the first object was being constructed (after std::map did 3 separate allocations), I do not know.
I would like to have this work with std::map and std::list and the other nodular containers and am not worried about whether it can perform in std::vector, etc.
#include <algorithm>
class free_list {
public:
free_list() {}
free_list(free_list&& other)
: m_next(other.m_next) {
other.m_next = nullptr;
}
free_list(void* data, std::size_t num_elements, std::size_t element_size) {
init(data, num_elements, element_size);
}
free_list& operator=(free_list&& other) {
m_next = other.m_next;
other.m_next = nullptr;
}
void init(void* data, std::size_t num_elements, std::size_t element_size) {
union building {
void* as_void;
char* as_char;
free_list* as_self;
};
building b;
b.as_void = data;
m_next = b.as_self;
b.as_char += element_size;
free_list* runner = m_next;
for (std::size_t s = 1; s < num_elements; ++s) {
runner->m_next = b.as_self;
runner = runner->m_next;
b.as_char += element_size;
}
runner->m_next = nullptr;
}
free_list* obtain() {
if (m_next == nullptr) {
return nullptr;
}
free_list* head = m_next;
m_next = head->m_next;
return head;
}
void give_back(free_list* ptr) {
ptr->m_next = m_next;
m_next = ptr;
}
free_list* m_next;
};
template<class T>
class pool_alloc {
typedef pool_alloc<T> myt;
public:
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef T value_type;
typedef T& reference;
typedef const T& const_reference;
typedef T* pointer;
typedef const T* const_pointer;
typedef std::false_type propagate_on_container_copy_assignment;
typedef std::true_type propagate_on_container_move_assignment;
typedef std::true_type propagate_on_container_swap;
template<class U> struct rebind {
typedef pool_alloc<U> other;
};
~pool_alloc() {
destroy();
}
pool_alloc() : data(nullptr), fl(), capacity(4096) {
}
pool_alloc(size_type capacity) : data(nullptr), fl(), capacity(capacity) {}
pool_alloc(const myt& other)
: data(nullptr), fl(), capacity(other.capacity) {}
pool_alloc(myt&& other)
: data(other.data), fl(std::move(other.fl)), capacity(other.capacity) {
other.data = nullptr;
}
template<class U>
pool_alloc(const pool_alloc<U>& other)
: data(nullptr), fl(), capacity(other.max_size()) {}
myt& operator=(const myt& other) {
destroy();
capacity = other.capacity;
}
myt& operator=(myt&& other) {
destroy();
data = other.data;
other.data = nullptr;
capacity = other.capacity;
fl = std::move(other.fl);
}
static pointer address(reference ref) {
return &ref;
}
static const_pointer address(const_reference ref) {
return &ref;
}
size_type max_size() const {
return capacity;
}
pointer allocate(size_type) {
if (data == nullptr) create();
return reinterpret_cast<pointer>(fl.obtain());
}
void deallocate(pointer ptr, size_type) {
fl.give_back(reinterpret_cast<free_list*>(ptr));
}
template<class... Args>
static void construct(pointer ptr, Args&&... args) {
::new (ptr) T(std::forward<Args>(args)...);
}
static void destroy(pointer ptr) {
ptr->~T();
}
bool operator==(const myt& other) const {
return reinterpret_cast<char*>(data) ==
reinterpret_cast<char*>(other.data);
}
bool operator!=(const myt& other) const {
return !operator==(other);
}
private:
void create() {
data = ::operator new(capacity * sizeof(value_type));
fl.init(data, capacity, sizeof(value_type));
}
void destroy() {
::operator delete(data);
data = nullptr;
}
void* data;
free_list fl;
size_type capacity;
};
template<>
class pool_alloc < void > {
public:
template <class U> struct rebind { typedef pool_alloc<U> other; };
typedef void* pointer;
typedef const void* const_pointer;
typedef void value_type;
};
The problem comes when std::pair is being constructed (in MSVC12 utility at line 214):
template<class _Other1,
class _Other2,
class = typename enable_if<is_convertible<_Other1, _Ty1>::value
&& is_convertible<_Other2, _Ty2>::value,
void>::type>
pair(_Other1&& _Val1, _Other2&& _Val2)
_NOEXCEPT_OP((is_nothrow_constructible<_Ty1, _Other1&&>::value
&& is_nothrow_constructible<_Ty2, _Other2&&>::value))
: first(_STD forward<_Other1>(_Val1)),
second(_STD forward<_Other2>(_Val2))
{ // construct from moved values
}
Even after stepping in, the run-time error occurs, the same as described above with _Parent not being initialized.
I was able to answer my own question through extensive debugging. Apparently, VC12's std::map implementation at least at times will cast an _Alnod (permanent allocator that stays in scope for the life of the map, which is used to allocate and deallocate the nodes in the map, what I'd expect to be what actually calls allocate() and deallocate()) as an _Alproxy, a temporary allocator which creates some sort of object called _Mproxy (or something like that) using allocate(). The problem, though, is that VC12's implementation then lets _Alproxy go out of scope while still expecting the pointer to the allocated object to remain valid, so it is clear then that I would have to use ::operator new and ::operator delete on an object like _Mproxy: using a memory pool that then goes out of scope while a pointer to a location in it remains is what causes the crash.
I came up with what I suppose could be called a dirty trick, a test that is performed when copy-constructing or copy-assigning an allocator to another allocator type:
template<class U>
pool_alloc(const pool_alloc<U>& other)
: data(nullptr), fl(), capacity(other.max_size()), use_data(true) {
if (sizeof(T) < sizeof(U)) use_data = false;
}
I added the bool member use_data to the allocator class, which if true means to use the memory pool and which if false means to use ::operator new and ::operator delete. By default, it is true. The question of its value arises when the allocator gets cast as another allocator type whose template parameter's size is smaller than that of the source allocator; in that case, use_data is set to false. Because this _Mproxy object or whatever it's called is rather small, this fix seems to work, even when using std::set with char as the element type.
I've tested this using std::set with type char in both VC12 and GCC 4.8.1 in 32-bit and have found that in both cases it works. When allocating and deallocating the nodes in both cases, the memory pool is used.
Here is the full source code:
#include <algorithm>
class free_list {
public:
free_list() {}
free_list(free_list&& other)
: m_next(other.m_next) {
other.m_next = nullptr;
}
free_list(void* data, std::size_t num_elements, std::size_t element_size) {
init(data, num_elements, element_size);
}
free_list& operator=(free_list&& other) {
if (this != &other) {
m_next = other.m_next;
other.m_next = nullptr;
}
return *this;
}
void init(void* data, std::size_t num_elements, std::size_t element_size) {
union building {
void* as_void;
char* as_char;
free_list* as_self;
};
building b;
b.as_void = data;
m_next = b.as_self;
b.as_char += element_size;
free_list* runner = m_next;
for (std::size_t s = 1; s < num_elements; ++s) {
runner->m_next = b.as_self;
runner = runner->m_next;
b.as_char += element_size;
}
runner->m_next = nullptr;
}
free_list* obtain() {
if (m_next == nullptr) {
return nullptr;
}
free_list* head = m_next;
m_next = head->m_next;
return head;
}
void give_back(free_list* ptr) {
ptr->m_next = m_next;
m_next = ptr;
}
free_list* m_next;
};
template<class T>
class pool_alloc {
typedef pool_alloc<T> myt;
public:
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef T value_type;
typedef T& reference;
typedef const T& const_reference;
typedef T* pointer;
typedef const T* const_pointer;
typedef std::false_type propagate_on_container_copy_assignment;
typedef std::true_type propagate_on_container_move_assignment;
typedef std::true_type propagate_on_container_swap;
myt select_on_container_copy_construction() const {
return *this;
}
template<class U> struct rebind {
typedef pool_alloc<U> other;
};
~pool_alloc() {
clear();
}
pool_alloc() : data(nullptr), fl(), capacity(4096), use_data(true) {}
pool_alloc(size_type capacity) : data(nullptr), fl(),
capacity(capacity), use_data(true) {}
pool_alloc(const myt& other)
: data(nullptr), fl(), capacity(other.capacity),
use_data(other.use_data) {}
pool_alloc(myt&& other)
: data(other.data), fl(std::move(other.fl)), capacity(other.capacity),
use_data(other.use_data) {
other.data = nullptr;
}
template<class U>
pool_alloc(const pool_alloc<U>& other)
: data(nullptr), fl(), capacity(other.max_size()), use_data(true) {
if (sizeof(T) < sizeof(U)) use_data = false;
}
myt& operator=(const myt& other) {
if (*this != other) {
clear();
capacity = other.capacity;
use_data = other.use_data;
}
}
myt& operator=(myt&& other) {
if (*this != other) {
clear();
data = other.data;
other.data = nullptr;
capacity = other.capacity;
use_data = other.use_data;
fl = std::move(other.fl);
}
return *this;
}
template<class U>
myt& operator=(const pool_alloc<U>& other) {
if (this != reinterpret_cast<myt*>(&other)) {
capacity = other.max_size();
if (sizeof(T) < sizeof(U))
use_data = false;
else
use_data = true;
}
return *this;
}
static pointer address(reference ref) {
return &ref;
}
static const_pointer address(const_reference ref) {
return &ref;
}
size_type max_size() const {
return capacity;
}
pointer allocate(size_type) {
if (use_data) {
if (data == nullptr) create();
return reinterpret_cast<pointer>(fl.obtain());
} else {
return reinterpret_cast<pointer>(::operator new(sizeof(T)));
}
}
void deallocate(pointer ptr, size_type) {
if (use_data) {
fl.give_back(reinterpret_cast<free_list*>(ptr));
} else {
::operator delete(reinterpret_cast<void*>(ptr));
}
}
template<class... Args>
static void construct(pointer ptr, Args&&... args) {
::new ((void*)ptr) value_type(std::forward<Args>(args)...);
}
static void destroy(pointer ptr) {
ptr->~value_type();
}
bool operator==(const myt& other) const {
return reinterpret_cast<char*>(data) ==
reinterpret_cast<char*>(other.data);
}
bool operator!=(const myt& other) const {
return !operator==(other);
}
private:
void create() {
size_type size = sizeof(value_type) < sizeof(free_list*) ?
sizeof(free_list*) : sizeof(value_type);
data = ::operator new(capacity * size);
fl.init(data, capacity, size);
}
void clear() {
::operator delete(data);
data = nullptr;
}
void* data;
free_list fl;
size_type capacity;
bool use_data;
};
template<>
class pool_alloc < void > {
public:
template <class U> struct rebind { typedef pool_alloc<U> other; };
typedef void* pointer;
typedef const void* const_pointer;
typedef void value_type;
};
template<class Container, class Alloc>
void change_capacity(Container& c, typename Alloc::size_type new_capacity) {
Container temp(c, Alloc(new_capacity));
c = std::move(temp);
}
Since the allocator is not automatic-growing (don't know how to make such a thing), I have added the change_capacity() function.