C++基础:内存池

说真的,这玩意要是想写出一个在效率上高于malloc的,还挺难。。。

自从win7以及linux3.7采用更为优秀的内存管理之后,内存管理器带来的提升微乎其微——那我这是在干什么呢?

这不是吃饱撑地吗?

 

ver 0.3(我确定我改了一个bug)

ver 1.2(优化:多线程情况下使用stl原生互斥锁;不使用封装过的list)

ver 1.3(BUG FIX:修复了一个bug,此bug曾导致 __lock 没有正确生效)

 

mempool.h

  1 #pragma once
  2 
  3 /**
  4  * @brief Allocator, (Memory pool v2)
  5  * @author Even
  6  * @date 2022-01-14 
  7  */
  8 
  9 #include <memory>
 10 #include <mutex>
 11 
 12 namespace lc
 13 {
 14 
 15 #define __BLOCK_CAP (128)
 16 
 17 class mem_pool
 18 {
 19 private:
 20     struct __block { __block* _next = nullptr; };
 21 
 22     struct __cache_block
 23     {
 24         __cache_block() = delete;
 25         __cache_block(char* ptr, int size) 
 26             : _ptr(ptr), _size(size), _next(nullptr){}
 27 
 28         std::unique_ptr<char[]> _ptr;
 29         int _size;
 30 
 31         __cache_block* _next;
 32     };
 33 
 34     class __mem_cache
 35     {
 36     public:
 37         __mem_cache() : _cache(nullptr) {}
 38         ~__mem_cache();
 39 
 40     private:
 41         void create(int size);
 42 
 43     public:
 44         //@return list.
 45         struct __apply_ret{__block* _b; __block* _e;};
 46         void apply(int& block_size, __apply_ret& ret);
 47         
 48     public:
 49         __cache_block* _cache;   //It's a list.
 50     };
 51 
 52 public:
 53     mem_pool();
 54     ~mem_pool();
 55 
 56 public:
 57     static inline mem_pool& ins() noexcept { static mem_pool ins_; return ins_; }
 58 
 59 public:
 60     void* alloc(int size) noexcept;
 61     void free(void* ptr, int size) noexcept;
 62 
 63 private:
 64     void fill(int size);
 65 
 66 private:
 67     __mem_cache _mem_cache;
 68     __block* _free[__BLOCK_CAP];
 69 };
 70 
 71 #define __THREAD_MAX (128)
 72 #define __THREAD_MAX_EXP (7)
 73 static_assert((1 << __THREAD_MAX_EXP) == __THREAD_MAX, "Error __THREAD_MAX_EXP or __THREAD_MAX .");
 74 
 75 class mem_pool_s
 76 {
 77 private:
 78     struct __thread_mem_pool
 79     {
 80         mem_pool _pool;
 81         bool _isVaild = false;
 82     };
 83 
 84 public:
 85     struct __lock
 86     {
 87         __lock(){ while(!__lock::_mx.try_lock());}
 88         ~__lock() { __lock::_mx.unlock(); }
 89 
 90     private:
 91         static std::mutex _mx;
 92     };
 93 
 94 public:
 95     mem_pool_s() noexcept{}
 96     ~mem_pool_s(){}
 97 
 98 public:
 99     static inline mem_pool_s& ins() noexcept { static mem_pool_s _ins; return _ins; }
100 
101 public:
102     void* alloc(int size) noexcept;
103     void free(void* ptr, int size) noexcept;
104 
105 private:
106     int try_build_tidx() noexcept;
107 
108 private:
109     __thread_mem_pool _pool[__THREAD_MAX];
110 
111 #ifdef __GNUC__
112     static __thread int _tidx;
113 #else
114     static __declspec(thread) int _tidx;
115 #endif
116 };
117 
118 };  // end of 'namespace lc'

 

 

mempool.cpp

  1 #include <cstring>
  2 #include <thread>
  3 
  4 #include "mempool.h"
  5 
  6 static_assert((__BLOCK_CAP & (__BLOCK_CAP - 1)) == 0, "The __BLOCK_CAP is must be pow of 2.");
  7 
  8 #define __ALIGN (8)
  9 #define __ALIGN_EXP (3)
 10 static_assert((1 << __ALIGN_EXP) == __ALIGN, "Error __ALIGN_EXP or __ALIGN .");
 11 static_assert(__ALIGN >= 8, "The __ALIGN is must be greater than 8.");
 12 static_assert((__ALIGN & (__ALIGN - 1)) == 0, "The __ALIGN is must be pow of 2.");
 13 
 14 #define __IS_ALLOW(s_) (s_ <= (__BLOCK_CAP << __ALIGN_EXP))
 15 #define __F_ALIGN(s_) ((s_ + __ALIGN - 1) & ~(__ALIGN - 1))
 16 #define __F_INDEX(s_) (((s_ + __ALIGN - 1) >> __ALIGN_EXP) - 1)
 17 
 18 #define __DEFAULT_EACH_BLOCK_STORE (16)
 19 
 20 namespace lc
 21 {
 22 
 23 std::mutex mem_pool_s::__lock::_mx = std::mutex();
 24 
 25 mem_pool::__mem_cache::~__mem_cache()
 26 {
 27     if(_cache == nullptr)
 28         return;
 29 
 30     delete _cache;
 31     _cache = nullptr;
 32 }
 33 
 34 /**
 35  * @brief Create the big-block for _cache. Until then, the 'size' needs to be aligned.
 36  * @param size The size of create.
 37  */
 38 void mem_pool::__mem_cache::create(int size)
 39 {
 40     //if(size <= 0) return;
 41     __cache_block* block = new(std::nothrow)__cache_block((char*)(malloc(size)), size);
 42     if(!(block && block->_ptr))
 43     {
 44         std::__throw_bad_alloc();
 45         return;
 46     }
 47 
 48     /*insert front.*/
 49     block->_next = _cache;  
 50     _cache = block;
 51 }
 52 
 53 /**
 54  * @brief The first-level memory pool alloc, 
 55  *      converts it into a small piece of memory and gives it to the second-level memory pool.
 56  * 
 57  * @param block_size The size of each block of memory
 58  * @param ret Return value of apply
 59  */
 60 void mem_pool::__mem_cache::apply(int& block_size, mem_pool::__mem_cache::__apply_ret& ret)
 61 {
 62     /* The 'block_size' to be aligned. */
 63     //block_size = __F_ALIGN(block_size);
 64 
 65     if(_cache == nullptr || _cache->_size <= 0)
 66         create(block_size * __DEFAULT_EACH_BLOCK_STORE);
 67 
 68     __cache_block* block = _cache;
 69     char* ptr = block->_ptr.get();
 70     int& size = block->_size;
 71 
 72     if(size <= block_size)
 73     {
 74         block_size = size;
 75         ret._b = ret._e = (__block*)(ptr);
 76         return;
 77     }
 78 
 79     __block * list = nullptr,
 80             * begin = nullptr,
 81             * end = nullptr;
 82 
 83     int ret_count = 0;
 84     for (;size >= block_size;)
 85     {
 86         char* block_ptr = ptr + size - block_size;
 87 
 88         if(list == nullptr)
 89         {
 90             list = (__block*)(block_ptr);
 91             begin = end = list;
 92         }
 93         else
 94         {
 95             list->_next = (__block*)(block_ptr);
 96             end = list = list->_next;
 97         }
 98 
 99         size -= block_size;
100         ++ret_count;
101 
102         if(ret_count >= __DEFAULT_EACH_BLOCK_STORE)
103         {
104             ret._b = begin;
105             ret._e = end;
106             return;
107         }
108     }
109 
110     ret._b = begin;
111     ret._e = end;
112     return;
113 
114     // /* The 'block_size' to be aligned. */
115     // //block_size = __F_ALIGN(block_size);
116 
117     // /*Create the new block.*/
118     // create(block_size * __DEFAULT_EACH_BLOCK_STORE);
119     
120     // /* Creation complete, get the big-block. */
121     // __cache_block* big_block = _cache;
122     // char* ptr = big_block->_ptr.get();
123     // int& size = big_block->_size;
124 
125     // /* Splitting. */
126     // __block * list = nullptr,
127     //         * begin = nullptr,
128     //         * end = nullptr;
129 
130     // int ret_count = 0;
131     // for (;size >= block_size;)
132     // {
133     //     /* Split. */
134     //     /* |________________|________________________________________________| */
135     //     /* |   block_size   |              size - block_size                 | */
136     //     char* block_ptr = ptr + block_size * ret_count;
137 
138     //     if(list != nullptr)
139     //     {
140     //         /* Push the new block. */
141     //         list->_next = (__block*)(block_ptr);
142 
143     //         /* Reset the tail for next block. */
144     //         end = list = list->_next;
145     //     }
146     //     else
147     //     {
148     //         /* At the begin. */
149     //         list = (__block*)(block_ptr);
150     //         begin = end = list;
151     //     }
152 
153     //     /* Update remain size. */
154     //     size -= block_size;
155 
156     //     if(++ret_count >= __DEFAULT_EACH_BLOCK_STORE)
157     //     {
158     //         ret._b = begin;
159     //         ret._e = end;
160     //         return;
161     //     }
162     // }
163 
164     // ret._b = begin;
165     // ret._e = end;
166     // return;
167 }
168 
169 mem_pool::mem_pool()
170 {
171     std::memset(_free, 0, sizeof(_free));
172 }
173 
174 mem_pool::~mem_pool()
175 {
176     std::memset(_free, 0, sizeof(_free));
177     _mem_cache.~__mem_cache();
178 }
179 
180 void* mem_pool::alloc(int size) noexcept
181 {
182     if(!size)
183         return nullptr;
184 
185     if(!__IS_ALLOW(size))
186         return ::malloc(size);
187     
188     int idx = __F_INDEX(size);
189     __block* ret = nullptr;
190 
191 LABLE_ALLOC:
192     __block*& block = _free[idx];
193     if(block)
194     {
195         ret = block;
196         block = block->_next;
197         return ret;
198     }
199 
200     fill(idx);
201     goto LABLE_ALLOC;
202 
203     return ret;
204 }
205 
206 void mem_pool::free(void* ptr, int size) noexcept
207 {
208     if(ptr == nullptr)
209         return;
210     
211     if(!__IS_ALLOW(size) || !size)
212     {
213         ::free(ptr);
214         return;
215     }
216 
217     __block* block = (__block*)(ptr);
218     __block*& bfree = _free[__F_INDEX(size)];
219     block->_next = bfree;
220     bfree = block;
221 }
222 
223 void mem_pool::fill(int idx)
224 {
225     mem_pool::__mem_cache::__apply_ret apply_ret;
226     int size = __ALIGN * (idx + 1);
227     _mem_cache.apply(size, apply_ret);
228     
229     __block*& p = _free[__F_INDEX(size)];
230     apply_ret._e->_next = p;
231     p = apply_ret._b;
232 }
233 
234 #ifdef __GNUC__
235     __thread int mem_pool_s::_tidx = 0;
236 #else
237     __declspec(thread) int mem_pool_s::_tidx = 0;
238 #endif
239 
240 int __thread_id()
241 {
242     #ifdef __GNUC__
243         static __thread int tid = 0;
244     #else
245         static __declspec(thread) int tid = 0;
246     #endif
247 
248     if(tid != 0) 
249         return tid;
250 
251     tid = std::hash<std::thread::id>()(std::this_thread::get_id());
252     return tid;
253 }
254 
255 int mem_pool_s::try_build_tidx() noexcept
256 {
257     if(_tidx)
258         return _tidx;
259     
260     const int tid = __thread_id();
261     _tidx = tid ^ (tid >> __THREAD_MAX_EXP << __THREAD_MAX_EXP);
262 
263     /* loop start by tid. */
264     /* find the empty pool. */
265     {
266         int i = _tidx;
267         do
268         {
269             if(i >= __THREAD_MAX)
270             {
271                 i = 0;
272                 if(i == _tidx) break;
273             }
274             if(!(_pool[i]._isVaild))
275             {
276                 _tidx = i;
277                 break;
278             }
279             ++i;
280         }while(i != _tidx);
281     }
282 
283     return _tidx;
284 }
285 
286 void* mem_pool_s::alloc(int size) noexcept
287 {
288     __lock lk;
289     try_build_tidx();
290     return _pool[_tidx]._pool.alloc(size);
291 }
292 
293 void mem_pool_s::free(void* ptr, int size) noexcept
294 {
295     __lock lk;
296     try_build_tidx();
297     _pool[_tidx]._pool.free(ptr, size);
298 }
299 
300 
301 };

 

posted on 2021-06-08 15:58  __Even  阅读(245)  评论(0编辑  收藏  举报

导航