the game where you go into mines and start crafting! but for consoles (forked directly from smartcmd's github)
1// Copyright (C) 2008, 2009, 2010, 2011 Tim Blechmann
2//
3// Distributed under the Boost Software License, Version 1.0. (See
4// accompanying file LICENSE_1_0.txt or copy at
5// http://www.boost.org/LICENSE_1_0.txt)
6
7#ifndef BOOST_LOCKFREE_STACK_HPP_INCLUDED
8#define BOOST_LOCKFREE_STACK_HPP_INCLUDED
9
10#include <boost/assert.hpp>
11#include <boost/checked_delete.hpp>
12#include <boost/integer_traits.hpp>
13#include <boost/noncopyable.hpp>
14#include <boost/static_assert.hpp>
15#include <boost/tuple/tuple.hpp>
16#include <boost/type_traits/has_trivial_assign.hpp>
17#include <boost/type_traits/has_trivial_destructor.hpp>
18
19#include <boost/lockfree/detail/atomic.hpp>
20#include <boost/lockfree/detail/copy_payload.hpp>
21#include <boost/lockfree/detail/freelist.hpp>
22#include <boost/lockfree/detail/parameter.hpp>
23#include <boost/lockfree/detail/tagged_ptr.hpp>
24
25namespace boost {
26namespace lockfree {
27namespace detail {
28
29typedef parameter::parameters<boost::parameter::optional<tag::allocator>,
30 boost::parameter::optional<tag::capacity>
31 > stack_signature;
32
33}
34
35/** The stack class provides a multi-writer/multi-reader stack, pushing and popping is lock-free,
36 * construction/destruction has to be synchronized. It uses a freelist for memory management,
37 * freed nodes are pushed to the freelist and not returned to the OS before the stack is destroyed.
38 *
39 * \b Policies:
40 *
41 * - \c boost::lockfree::fixed_sized<>, defaults to \c boost::lockfree::fixed_sized<false> <br>
42 * Can be used to completely disable dynamic memory allocations during push in order to ensure lockfree behavior.<br>
43 * If the data structure is configured as fixed-sized, the internal nodes are stored inside an array and they are addressed
44 * by array indexing. This limits the possible size of the stack to the number of elements that can be addressed by the index
45 * type (usually 2**16-2), but on platforms that lack double-width compare-and-exchange instructions, this is the best way
46 * to achieve lock-freedom.
47 *
48 * - \c boost::lockfree::capacity<>, optional <br>
49 * If this template argument is passed to the options, the size of the stack is set at compile-time. <br>
50 * It this option implies \c fixed_sized<true>
51 *
52 * - \c boost::lockfree::allocator<>, defaults to \c boost::lockfree::allocator<std::allocator<void>> <br>
53 * Specifies the allocator that is used for the internal freelist
54 *
55 * \b Requirements:
56 * - T must have a copy constructor
57 * */
58#ifndef BOOST_DOXYGEN_INVOKED
59template <typename T,
60 class A0 = boost::parameter::void_,
61 class A1 = boost::parameter::void_,
62 class A2 = boost::parameter::void_>
63#else
64template <typename T, ...Options>
65#endif
66class stack:
67 boost::noncopyable
68{
69private:
70#ifndef BOOST_DOXYGEN_INVOKED
71 BOOST_STATIC_ASSERT(boost::has_trivial_assign<T>::value);
72 BOOST_STATIC_ASSERT(boost::has_trivial_destructor<T>::value);
73
74 typedef typename detail::stack_signature::bind<A0, A1, A2>::type bound_args;
75
76 static const bool has_capacity = detail::extract_capacity<bound_args>::has_capacity;
77 static const size_t capacity = detail::extract_capacity<bound_args>::capacity;
78 static const bool fixed_sized = detail::extract_fixed_sized<bound_args>::value;
79 static const bool node_based = !(has_capacity || fixed_sized);
80 static const bool compile_time_sized = has_capacity;
81
82 struct node
83 {
84 node(T const & val):
85 v(val)
86 {}
87
88 typedef typename detail::select_tagged_handle<node, node_based>::handle_type handle_t;
89 handle_t next;
90 const T v;
91 };
92
93 typedef typename detail::extract_allocator<bound_args, node>::type node_allocator;
94 typedef typename detail::select_freelist<node, node_allocator, compile_time_sized, fixed_sized, capacity>::type pool_t;
95 typedef typename pool_t::tagged_node_handle tagged_node_handle;
96
97 // check compile-time capacity
98 BOOST_STATIC_ASSERT((mpl::if_c<has_capacity,
99 mpl::bool_<capacity - 1 < boost::integer_traits<boost::uint16_t>::const_max>,
100 mpl::true_
101 >::type::value));
102
103 struct implementation_defined
104 {
105 typedef node_allocator allocator;
106 typedef std::size_t size_type;
107 };
108
109#endif
110
111public:
112 typedef T value_type;
113 typedef typename implementation_defined::allocator allocator;
114 typedef typename implementation_defined::size_type size_type;
115
116 /**
117 * \return true, if implementation is lock-free.
118 *
119 * \warning It only checks, if the top stack node and the freelist can be modified in a lock-free manner.
120 * On most platforms, the whole implementation is lock-free, if this is true. Using c++0x-style atomics,
121 * there is no possibility to provide a completely accurate implementation, because one would need to test
122 * every internal node, which is impossible if further nodes will be allocated from the operating system.
123 *
124 * */
125 bool is_lock_free (void) const
126 {
127 return tos.is_lock_free() && pool.is_lock_free();
128 }
129
130 //! Construct stack
131 // @{
132 stack(void):
133 pool(node_allocator(), capacity)
134 {
135 BOOST_ASSERT(has_capacity);
136 initialize();
137 }
138
139 template <typename U>
140 explicit stack(typename node_allocator::template rebind<U>::other const & alloc):
141 pool(alloc, capacity)
142 {
143 BOOST_STATIC_ASSERT(has_capacity);
144 initialize();
145 }
146
147 explicit stack(allocator const & alloc):
148 pool(alloc, capacity)
149 {
150 BOOST_ASSERT(has_capacity);
151 initialize();
152 }
153 // @}
154
155 //! Construct stack, allocate n nodes for the freelist.
156 // @{
157 explicit stack(size_type n):
158 pool(node_allocator(), n)
159 {
160 BOOST_ASSERT(!has_capacity);
161 initialize();
162 }
163
164 template <typename U>
165 stack(size_type n, typename node_allocator::template rebind<U>::other const & alloc):
166 pool(alloc, n)
167 {
168 BOOST_STATIC_ASSERT(!has_capacity);
169 initialize();
170 }
171 // @}
172
173 /** Allocate n nodes for freelist
174 *
175 * \pre only valid if no capacity<> argument given
176 * \note thread-safe, may block if memory allocator blocks
177 *
178 * */
179 void reserve(size_type n)
180 {
181 BOOST_STATIC_ASSERT(!has_capacity);
182 pool.reserve(n);
183 }
184
185 /** Allocate n nodes for freelist
186 *
187 * \pre only valid if no capacity<> argument given
188 * \note not thread-safe, may block if memory allocator blocks
189 *
190 * */
191 void reserve_unsafe(size_type n)
192 {
193 BOOST_STATIC_ASSERT(!has_capacity);
194 pool.reserve_unsafe(n);
195 }
196
197 /** Destroys stack, free all nodes from freelist.
198 *
199 * \note not thread-safe
200 *
201 * */
202 ~stack(void)
203 {
204 T dummy;
205 while(unsynchronized_pop(dummy))
206 {}
207 }
208
209private:
210#ifndef BOOST_DOXYGEN_INVOKED
211 void initialize(void)
212 {
213 tos.store(tagged_node_handle(pool.null_handle(), 0));
214 }
215
216 void link_nodes_atomic(node * new_top_node, node * end_node)
217 {
218 tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
219 for (;;) {
220 tagged_node_handle new_tos (pool.get_handle(new_top_node), old_tos.get_tag());
221 end_node->next = pool.get_handle(old_tos);
222
223 if (tos.compare_exchange_weak(old_tos, new_tos))
224 break;
225 }
226 }
227
228 void link_nodes_unsafe(node * new_top_node, node * end_node)
229 {
230 tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
231
232 tagged_node_handle new_tos (pool.get_handle(new_top_node), old_tos.get_tag());
233 end_node->next = pool.get_pointer(old_tos);
234
235 tos.store(new_tos, memory_order_relaxed);
236 }
237
238 template <bool Threadsafe, bool Bounded, typename ConstIterator>
239 tuple<node*, node*> prepare_node_list(ConstIterator begin, ConstIterator end, ConstIterator & ret)
240 {
241 ConstIterator it = begin;
242 node * end_node = pool.template construct<Threadsafe, Bounded>(*it++);
243 if (end_node == NULL) {
244 ret = begin;
245 return make_tuple<node*, node*>(NULL, NULL);
246 }
247
248 node * new_top_node = end_node;
249 end_node->next = NULL;
250
251 try {
252 /* link nodes */
253 for (; it != end; ++it) {
254 node * newnode = pool.template construct<Threadsafe, Bounded>(*it);
255 if (newnode == NULL)
256 break;
257 newnode->next = new_top_node;
258 new_top_node = newnode;
259 }
260 } catch (...) {
261 for (node * current_node = new_top_node; current_node != NULL;) {
262 node * next = current_node->next;
263 pool.template destruct<Threadsafe>(current_node);
264 current_node = next;
265 }
266 throw;
267 }
268 ret = it;
269 return make_tuple(new_top_node, end_node);
270 }
271#endif
272
273public:
274 /** Pushes object t to the stack.
275 *
276 * \post object will be pushed to the stack, if internal node can be allocated
277 * \returns true, if the push operation is successful.
278 *
279 * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
280 * from the OS. This may not be lock-free.
281 * \throws if memory allocator throws
282 * */
283 bool push(T const & v)
284 {
285 return do_push<false>(v);
286 }
287
288 /** Pushes object t to the stack.
289 *
290 * \post object will be pushed to the stack, if internal node can be allocated
291 * \returns true, if the push operation is successful.
292 *
293 * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail
294 * */
295 bool bounded_push(T const & v)
296 {
297 return do_push<true>(v);
298 }
299
300#ifndef BOOST_DOXYGEN_INVOKED
301private:
302 template <bool Bounded>
303 bool do_push(T const & v)
304 {
305 node * newnode = pool.template construct<true, Bounded>(v);
306 if (newnode == 0)
307 return false;
308
309 link_nodes_atomic(newnode, newnode);
310 return true;
311 }
312
313 template <bool Bounded, typename ConstIterator>
314 ConstIterator do_push(ConstIterator begin, ConstIterator end)
315 {
316 node * new_top_node;
317 node * end_node;
318 ConstIterator ret;
319
320 tie(new_top_node, end_node) = prepare_node_list<true, Bounded>(begin, end, ret);
321 if (new_top_node)
322 link_nodes_atomic(new_top_node, end_node);
323
324 return ret;
325 }
326
327public:
328#endif
329
330 /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
331 *
332 * \return iterator to the first element, which has not been pushed
333 *
334 * \note Operation is applied atomically
335 * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
336 * from the OS. This may not be lock-free.
337 * \throws if memory allocator throws
338 */
339 template <typename ConstIterator>
340 ConstIterator push(ConstIterator begin, ConstIterator end)
341 {
342 return do_push<false, ConstIterator>(begin, end);
343 }
344
345 /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
346 *
347 * \return iterator to the first element, which has not been pushed
348 *
349 * \note Operation is applied atomically
350 * \note Thread-safe and non-blocking. If internal memory pool is exhausted, the push operation will fail
351 * \throws if memory allocator throws
352 */
353 template <typename ConstIterator>
354 ConstIterator bounded_push(ConstIterator begin, ConstIterator end)
355 {
356 return do_push<true, ConstIterator>(begin, end);
357 }
358
359
360 /** Pushes object t to the stack.
361 *
362 * \post object will be pushed to the stack, if internal node can be allocated
363 * \returns true, if the push operation is successful.
364 *
365 * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
366 * from the OS. This may not be lock-free.
367 * \throws if memory allocator throws
368 * */
369 bool unsynchronized_push(T const & v)
370 {
371 node * newnode = pool.template construct<false, false>(v);
372 if (newnode == 0)
373 return false;
374
375 link_nodes_unsafe(newnode, newnode);
376 return true;
377 }
378
379 /** Pushes as many objects from the range [begin, end) as freelist node can be allocated.
380 *
381 * \return iterator to the first element, which has not been pushed
382 *
383 * \note Not thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
384 * from the OS. This may not be lock-free.
385 * \throws if memory allocator throws
386 */
387 template <typename ConstIterator>
388 ConstIterator unsynchronized_push(ConstIterator begin, ConstIterator end)
389 {
390 node * new_top_node;
391 node * end_node;
392 ConstIterator ret;
393
394 tie(new_top_node, end_node) = prepare_node_list<false, false>(begin, end, ret);
395 if (new_top_node)
396 link_nodes_unsafe(new_top_node, end_node);
397
398 return ret;
399 }
400
401
402 /** Pops object from stack.
403 *
404 * \post if pop operation is successful, object will be copied to ret.
405 * \returns true, if the pop operation is successful, false if stack was empty.
406 *
407 * \note Thread-safe and non-blocking
408 *
409 * */
410 bool pop(T & ret)
411 {
412 return pop<T>(ret);
413 }
414
415 /** Pops object from stack.
416 *
417 * \pre type T must be convertible to U
418 * \post if pop operation is successful, object will be copied to ret.
419 * \returns true, if the pop operation is successful, false if stack was empty.
420 *
421 * \note Thread-safe and non-blocking
422 *
423 * */
424 template <typename U>
425 bool pop(U & ret)
426 {
427 BOOST_STATIC_ASSERT((boost::is_convertible<T, U>::value));
428 tagged_node_handle old_tos = tos.load(detail::memory_order_consume);
429
430 for (;;) {
431 node * old_tos_pointer = pool.get_pointer(old_tos);
432 if (!old_tos_pointer)
433 return false;
434
435 tagged_node_handle new_tos(old_tos_pointer->next, old_tos.get_tag() + 1);
436
437 if (tos.compare_exchange_weak(old_tos, new_tos)) {
438 detail::copy_payload(old_tos_pointer->v, ret);
439 pool.template destruct<true>(old_tos);
440 return true;
441 }
442 }
443 }
444
445
446 /** Pops object from stack.
447 *
448 * \post if pop operation is successful, object will be copied to ret.
449 * \returns true, if the pop operation is successful, false if stack was empty.
450 *
451 * \note Not thread-safe, but non-blocking
452 *
453 * */
454 bool unsynchronized_pop(T & ret)
455 {
456 return unsynchronized_pop<T>(ret);
457 }
458
459 /** Pops object from stack.
460 *
461 * \pre type T must be convertible to U
462 * \post if pop operation is successful, object will be copied to ret.
463 * \returns true, if the pop operation is successful, false if stack was empty.
464 *
465 * \note Not thread-safe, but non-blocking
466 *
467 * */
468 template <typename U>
469 bool unsynchronized_pop(U & ret)
470 {
471 BOOST_STATIC_ASSERT((boost::is_convertible<T, U>::value));
472 tagged_node_handle old_tos = tos.load(detail::memory_order_relaxed);
473 node * old_tos_pointer = pool.get_pointer(old_tos);
474
475 if (!pool.get_pointer(old_tos))
476 return false;
477
478 node * new_tos_ptr = pool.get_pointer(old_tos_pointer->next);
479 tagged_node_handle new_tos(pool.get_handle(new_tos_ptr), old_tos.get_tag() + 1);
480
481 tos.store(new_tos, memory_order_relaxed);
482 detail::copy_payload(old_tos_pointer->v, ret);
483 pool.template destruct<false>(old_tos);
484 return true;
485 }
486
487 /**
488 * \return true, if stack is empty.
489 *
490 * \note It only guarantees that at some point during the execution of the function the stack has been empty.
491 * It is rarely practical to use this value in program logic, because the stack can be modified by other threads.
492 * */
493 bool empty(void) const
494 {
495 return pool.get_pointer(tos.load()) == NULL;
496 }
497
498private:
499#ifndef BOOST_DOXYGEN_INVOKED
500 detail::atomic<tagged_node_handle> tos;
501
502 static const int padding_size = BOOST_LOCKFREE_CACHELINE_BYTES - sizeof(tagged_node_handle);
503 char padding[padding_size];
504
505 pool_t pool;
506#endif
507};
508
509} /* namespace lockfree */
510} /* namespace boost */
511
512#endif /* BOOST_LOCKFREE_STACK_HPP_INCLUDED */