1 #ifndef BOOST_SMART_PTR_DETAIL_QUICK_ALLOCATOR_HPP_INCLUDED
2 #define BOOST_SMART_PTR_DETAIL_QUICK_ALLOCATOR_HPP_INCLUDED
4 // MS compatible compilers support #pragma once
6 #if defined(_MSC_VER) && (_MSC_VER >= 1020)
11 // detail/quick_allocator.hpp
13 // Copyright (c) 2003 David Abrahams
14 // Copyright (c) 2003 Peter Dimov
16 // Distributed under the Boost Software License, Version 1.0. (See
17 // accompanying file LICENSE_1_0.txt or copy at
18 // http://www.boost.org/LICENSE_1_0.txt)
21 #include <boost/config.hpp>
23 #include <boost/smart_ptr/detail/lightweight_mutex.hpp>
24 #include <boost/type_traits/type_with_alignment.hpp>
25 #include <boost/type_traits/alignment_of.hpp>
27 #include <new> // ::operator new, ::operator delete
28 #include <cstddef> // std::size_t
36 template<unsigned size, unsigned align_> union freeblock
38 typedef typename boost::type_with_alignment<align_>::type aligner_type;
44 template<unsigned size, unsigned align_> struct allocator_impl
46 typedef freeblock<size, align_> block;
48 // It may seem odd to use such small pages.
50 // However, on a typical Windows implementation that uses
51 // the OS allocator, "normal size" pages interact with the
52 // "ordinary" operator new, slowing it down dramatically.
54 // 512 byte pages are handled by the small object allocator,
55 // and don't interfere with ::new.
57 // The other alternative is to use much bigger pages (1M.)
59 // It is surprisingly easy to hit pathological behavior by
60 // varying the page size. g++ 2.96 on Red Hat Linux 7.2,
61 // for example, passionately dislikes 496. 512 seems OK.
63 #if defined(BOOST_QA_PAGE_SIZE)
65 enum { items_per_page = BOOST_QA_PAGE_SIZE / size };
69 enum { items_per_page = 512 / size }; // 1048560 / size
73 #ifdef BOOST_HAS_THREADS
75 static lightweight_mutex & mutex()
77 static lightweight_mutex m;
81 static lightweight_mutex * mutex_init;
89 static inline void * alloc()
91 #ifdef BOOST_HAS_THREADS
92 lightweight_mutex::scoped_lock lock( mutex() );
101 if(last == items_per_page)
103 // "Listen to me carefully: there is no memory leak"
104 // -- Scott Meyers, Eff C++ 2nd Ed Item 10
105 page = ::new block[items_per_page];
109 return &page[last++];
113 static inline void * alloc(std::size_t n)
115 if(n != size) // class-specific new called for a derived object
117 return ::operator new(n);
121 #ifdef BOOST_HAS_THREADS
122 lightweight_mutex::scoped_lock lock( mutex() );
131 if(last == items_per_page)
133 page = ::new block[items_per_page];
137 return &page[last++];
142 static inline void dealloc(void * pv)
144 if(pv != 0) // 18.4.1.1/13
146 #ifdef BOOST_HAS_THREADS
147 lightweight_mutex::scoped_lock lock( mutex() );
149 block * pb = static_cast<block *>(pv);
155 static inline void dealloc(void * pv, std::size_t n)
157 if(n != size) // class-specific delete called for a derived object
159 ::operator delete(pv);
161 else if(pv != 0) // 18.4.1.1/13
163 #ifdef BOOST_HAS_THREADS
164 lightweight_mutex::scoped_lock lock( mutex() );
166 block * pb = static_cast<block *>(pv);
173 #ifdef BOOST_HAS_THREADS
175 template<unsigned size, unsigned align_>
176 lightweight_mutex * allocator_impl<size, align_>::mutex_init = &allocator_impl<size, align_>::mutex();
180 template<unsigned size, unsigned align_>
181 freeblock<size, align_> * allocator_impl<size, align_>::free = 0;
183 template<unsigned size, unsigned align_>
184 freeblock<size, align_> * allocator_impl<size, align_>::page = 0;
186 template<unsigned size, unsigned align_>
187 unsigned allocator_impl<size, align_>::last = allocator_impl<size, align_>::items_per_page;
190 struct quick_allocator: public allocator_impl< sizeof(T), boost::alignment_of<T>::value >
194 } // namespace detail
198 #endif // #ifndef BOOST_SMART_PTR_DETAIL_QUICK_ALLOCATOR_HPP_INCLUDED