参考leveldb LRU cache 写模版&线程安全&引用计数安全 lru cache
1 #pragma once 2 3 #include <list> 4 #include <unordered_map> 5 #include <mutex> 6 #include <algorithm> 7 #include <assert.h> 8 9 /** 10 * @brief Template cache with an LRU removal policy. 11 * synchronized and thread-safe 12 * reference from https://github.com/google/leveldb/blob/master/util/cache.cc 13 * https://github.com/paudley/lru_cache 14 */ 15 16 #if __cplusplus >= 201703L 17 template< class Key, class Data, size_t (*cap_fun)(const Data&) = [](const Data& data) constexpr ->size_t {return sizeof(data);}, void (*delete_fun)(Data&) = [](Data& data) constexpr{}, class Hash = std::hash<Key> > 18 #else 19 template <typename T> 20 static size_t default_cap_fun(const T& data) { 21 return sizeof(data); 22 } 23 24 template <typename T> 25 static void default_delete_fun(T& data) {} 26 27 template< typename Key, typename Data, size_t (*cap_fun)(const Data&) = default_cap_fun<Data>, void (*delete_fun)(Data&) = default_delete_fun<Data>, typename Hash = std::hash<Key> > 28 #endif 29 30 class RefLRUCache 31 { 32 public: 33 struct Node; 34 typedef std::list<Node> List; // Main cache storage typedef 35 typedef typename List::iterator Node_Iter; // Main cache iterator 36 typedef typename List::const_iterator Node_cIter; // Main cache iterator (const) 37 typedef std::unordered_map<Key, Node_Iter, Hash> Map; // Index typedef 38 typedef typename Map::iterator Map_Iter; // Index iterator 39 typedef typename Map::const_iterator Map_cIter; // Index iterator (const) 40 41 struct Node 42 { 43 Node(Key key_, Data value_, int32_t refs_, bool in_use_, bool in_lru_): 44 key(key_), 45 value(value_), 46 refs(refs_), 47 in_use(in_use_), 48 in_lru(in_lru_), 49 charge(cap_fun(value_)), 50 trash_it() 51 {} 52 53 Key key; 54 Data value; 55 int32_t refs; 56 bool in_use; 57 bool in_lru; 58 size_t charge; 59 Node_Iter trash_it; 60 }; 61 62 public: 63 RefLRUCache() = delete; 64 RefLRUCache(const RefLRUCache&) = delete; 65 RefLRUCache& operator= (const RefLRUCache&) = delete; 66 RefLRUCache(RefLRUCache&&) = delete; 67 RefLRUCache& operator= (const RefLRUCache&&) = delete; 68 69 explicit RefLRUCache(uint64_t mem_capacity) : 70 _mem_capacity(mem_capacity), 71 _mem_usage(0) 72 {} 73 74 ~RefLRUCache() { 75 assert(_use.empty()); 76 assert(_trash.empty()); 77 for_each(_lru.begin(), _lru.end(), 78 [](Node& elem) { 79 elem.in_lru = false; 80 elem.in_use = false; 81 _unref(elem); 82 }); 83 84 _lru.clear(); 85 _index.clear(); 86 } 87 88 /** @brief returns the number of elements. 89 */ 90 inline size_t size() const { 91 std::lock_guard<std::mutex> lock(_mutex); 92 return _index.size(); 93 } 94 95 inline uint64_t mem_capacity() const { 96 std::lock_guard<std::mutex> lock(_mutex); 97 return _mem_capacity; 98 } 99 100 inline uint64_t mem_usage() const { 101 std::lock_guard<std::mutex> lock(_mutex); 102 return _mem_usage; 103 } 104 105 /** @brief Removes a key-node from the cache. 106 */ 107 inline void erase( const Key &key ) { 108 std::unique_lock<std::mutex> lock{_mutex}; 109 _finish_erase(key); 110 111 if (_mem_usage <= _mem_capacity && _lru.empty()) { 112 lock.unlock(); 113 _space_available.notify_one(); 114 } 115 } 116 117 /** 118 * @brief lookup_node_it function with release function ia a pair 119 */ 120 inline Node_Iter lookup_node_it(const Key &key) { 121 std::lock_guard<std::mutex> lock(_mutex); 122 Map_Iter miter = _index.find(key); 123 if (miter == _index.end()) 124 return Node_Iter(); 125 126 _ref(*(miter->second)); 127 128 return miter->second; 129 } 130 131 /** @brief release once node 132 */ 133 inline void release(Node_Iter node_it) { 134 std::unique_lock<std::mutex> lock{_mutex}; 135 _unref(*node_it); 136 137 if (!_lru.empty() && _mem_usage > _mem_capacity) { 138 lock.unlock(); 139 _space_available.notify_one(); 140 } 141 } 142 143 /** 144 * @brief insert function with release function ia a pair 145 */ 146 inline Node_Iter insert(const Key& key, const Data& data) { 147 constexpr const std::chrono::milliseconds max_wait{10}; 148 while (_mem_usage > _mem_capacity && _lru.empty()) { 149 std::unique_lock<std::mutex> lock{_mutex}; 150 _space_available.wait_for(lock, max_wait, [this] { 151 return !(_mem_usage > _mem_capacity && _lru.empty()); 152 }); 153 } 154 155 std::lock_guard<std::mutex> lock(_mutex); 156 _finish_erase(key); 157 158 // If compiler support use emplace_front interface 159 //_lru.emplace_front(key, data, 2, true, false); 160 _use.push_front(Node(key, data, 2, true, false)); 161 _mem_usage += _use.begin()->charge; 162 _index[key] = _use.begin(); 163 164 while (_mem_usage > _mem_capacity && !_lru.empty()) { 165 Node_Iter liter = _lru.end(); 166 --liter; 167 Node& node = *liter; 168 _finish_erase(node.key); 169 } 170 171 return _use.begin(); 172 } 173 174 private: 175 inline void _finish_erase(const Key &key){ 176 Map_Iter miter = _index.find(key); 177 if (miter == _index.end()) 178 return; 179 180 Node& node = *(miter->second); 181 _mem_usage -= node.charge; 182 183 if (node.in_lru) { 184 _trash.splice(_trash.begin(), _lru, miter->second); 185 node.in_lru = false; 186 node.trash_it = _trash.begin(); 187 } 188 189 if (node.in_use) { 190 _trash.splice(_trash.begin(), _use, miter->second); 191 node.in_use = false; 192 node.trash_it = _trash.begin(); 193 } 194 195 _index.erase(miter); 196 _unref(node); 197 } 198 199 inline void _ref(Node& node) { 200 // If on _lru list, move to _in_use list. 201 if (node.refs == 1 && node.in_lru) { 202 Map_Iter miter = _index.find(node.key); 203 _use.splice(_use.begin(), _lru, miter->second); 204 node.in_lru = false; 205 node.in_use = true; 206 } 207 208 node.refs++; 209 } 210 211 inline void _unref(Node& node) { 212 assert(node.refs > 0); 213 node.refs--; 214 215 if (node.refs == 0) { 216 delete_fun(node.value); 217 if (node.trash_it != Node_Iter()) { 218 _trash.erase(node.trash_it); 219 } 220 }else if (node.refs == 1 && node.in_use) { 221 // No longer in use; move to lru_ list. 222 Map_Iter miter = _index.find(node.key); 223 _lru.splice(_lru.begin(), _use, miter->second); 224 node.in_lru = true; 225 node.in_use = false; 226 } 227 } 228 229 private: 230 List _lru; // lru cache storage 231 List _use; // using cache storage 232 List _trash; // trash storage, not use cache capacity 233 Map _index; // hashtable key is cahche key, value is irtator from cache storage(_lru,_use) 234 uint64_t _mem_capacity; // cache memory capacity 235 uint64_t _mem_usage; // cache memory usage 236 237 std::condition_variable _space_available; // Used to signal producers when cache is not full 238 mutable std::mutex _mutex; 239 };
1 #ifndef OSMIUM_THREAD_QUEUE_HPP 2 #define OSMIUM_THREAD_QUEUE_HPP 3 4 /* 5 This file is part of Osmium (https://osmcode.org/libosmium). 6 Copyright 2013-2020 Jochen Topf <jochen@topf.org> and others (see README). 7 Boost Software License - Version 1.0 - August 17th, 2003 8 Permission is hereby granted, free of charge, to any person or organization 9 obtaining a copy of the software and accompanying documentation covered by 10 this license (the "Software") to use, reproduce, display, distribute, 11 execute, and transmit the Software, and to prepare derivative works of the 12 Software, and to permit third-parties to whom the Software is furnished to 13 do so, all subject to the following: 14 The copyright notices in the Software and this entire statement, including 15 the above license grant, this restriction and the following disclaimer, 16 must be included in all copies of the Software, in whole or in part, and 17 all derivative works of the Software, unless such copies or derivative 18 works are solely in the form of machine-executable object code generated by 19 a source language processor. 20 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT 23 SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE 24 FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, 25 ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 26 DEALINGS IN THE SOFTWARE. 27 */ 28 29 #include <chrono> 30 #include <condition_variable> 31 #include <cstddef> 32 #include <mutex> 33 #include <queue> 34 #include <string> 35 #include <utility> // IWYU pragma: keep 36 37 #ifdef OSMIUM_DEBUG_QUEUE_SIZE 38 # include <atomic> 39 # include <iostream> 40 #endif 41 42 namespace osmium { 43 44 namespace thread { 45 46 /** 47 * A thread-safe queue. 48 */ 49 template <typename T> 50 class Queue { 51 52 /// Maximum size of this queue. If the queue is full pushing to 53 /// the queue will block. 54 const std::size_t m_max_size; 55 56 /// Name of this queue (for debugging only). 57 const std::string m_name; 58 59 mutable std::mutex m_mutex; 60 61 std::queue<T> m_queue; 62 63 /// Used to signal consumers when data is available in the queue. 64 std::condition_variable m_data_available; 65 66 /// Used to signal producers when queue is not full. 67 std::condition_variable m_space_available; 68 69 #ifdef OSMIUM_DEBUG_QUEUE_SIZE 70 /// The largest size the queue has been so far. 71 std::size_t m_largest_size; 72 73 /// The number of times push() was called on the queue. 74 std::atomic<int> m_push_counter; 75 76 /// The number of times the queue was full and a thread pushing 77 /// to the queue was blocked. 78 std::atomic<int> m_full_counter; 79 80 /** 81 * The number of times wait_and_pop(with_timeout)() was called 82 * on the queue. 83 */ 84 std::atomic<int> m_pop_counter; 85 86 /// The number of times the queue was full and a thread pushing 87 /// to the queue was blocked. 88 std::atomic<int> m_empty_counter; 89 #endif 90 91 public: 92 93 /** 94 * Construct a multithreaded queue. 95 * 96 * @param max_size Maximum number of elements in the queue. Set to 97 * 0 for an unlimited size. 98 * @param name Optional name for this queue. (Used for debugging.) 99 */ 100 explicit Queue(std::size_t max_size = 0, std::string name = "") : 101 m_max_size(max_size), 102 m_name(std::move(name)), 103 m_queue() 104 #ifdef OSMIUM_DEBUG_QUEUE_SIZE 105 , 106 m_largest_size(0), 107 m_push_counter(0), 108 m_full_counter(0), 109 m_pop_counter(0), 110 m_empty_counter(0) 111 #endif 112 { 113 } 114 115 Queue(const Queue&) = delete; 116 Queue& operator=(const Queue&) = delete; 117 118 Queue(Queue&&) = delete; 119 Queue& operator=(Queue&&) = delete; 120 121 #ifdef OSMIUM_DEBUG_QUEUE_SIZE 122 ~Queue() { 123 std::cerr << "queue '" << m_name 124 << "' with max_size=" << m_max_size 125 << " had largest size " << m_largest_size 126 << " and was full " << m_full_counter 127 << " times in " << m_push_counter 128 << " push() calls and was empty " << m_empty_counter 129 << " times in " << m_pop_counter 130 << " pop() calls\n"; 131 } 132 #else 133 ~Queue() = default; 134 #endif 135 136 /** 137 * Push an element onto the queue. If the queue has a max size, 138 * this call will block if the queue is full. 139 */ 140 void push(T value) { 141 constexpr const std::chrono::milliseconds max_wait{10}; 142 #ifdef OSMIUM_DEBUG_QUEUE_SIZE 143 ++m_push_counter; 144 #endif 145 if (m_max_size) { 146 while (size() >= m_max_size) { 147 std::unique_lock<std::mutex> lock{m_mutex}; 148 m_space_available.wait_for(lock, max_wait, [this] { 149 return m_queue.size() < m_max_size; 150 }); 151 #ifdef OSMIUM_DEBUG_QUEUE_SIZE 152 ++m_full_counter; 153 #endif 154 } 155 } 156 std::lock_guard<std::mutex> lock{m_mutex}; 157 m_queue.push(std::move(value)); 158 #ifdef OSMIUM_DEBUG_QUEUE_SIZE 159 if (m_largest_size < m_queue.size()) { 160 m_largest_size = m_queue.size(); 161 } 162 #endif 163 m_data_available.notify_one(); 164 } 165 166 void wait_and_pop(T& value) { 167 #ifdef OSMIUM_DEBUG_QUEUE_SIZE 168 ++m_pop_counter; 169 #endif 170 std::unique_lock<std::mutex> lock{m_mutex}; 171 #ifdef OSMIUM_DEBUG_QUEUE_SIZE 172 if (m_queue.empty()) { 173 ++m_empty_counter; 174 } 175 #endif 176 m_data_available.wait(lock, [this] { 177 return !m_queue.empty(); 178 }); 179 if (!m_queue.empty()) { 180 value = std::move(m_queue.front()); 181 m_queue.pop(); 182 lock.unlock(); 183 if (m_max_size) { 184 m_space_available.notify_one(); 185 } 186 } 187 } 188 189 bool try_pop(T& value) { 190 #ifdef OSMIUM_DEBUG_QUEUE_SIZE 191 ++m_pop_counter; 192 #endif 193 { 194 std::lock_guard<std::mutex> lock{m_mutex}; 195 if (m_queue.empty()) { 196 #ifdef OSMIUM_DEBUG_QUEUE_SIZE 197 ++m_empty_counter; 198 #endif 199 return false; 200 } 201 value = std::move(m_queue.front()); 202 m_queue.pop(); 203 } 204 if (m_max_size) { 205 m_space_available.notify_one(); 206 } 207 return true; 208 } 209 210 bool empty() const { 211 std::lock_guard<std::mutex> lock{m_mutex}; 212 return m_queue.empty(); 213 } 214 215 std::size_t size() const { 216 std::lock_guard<std::mutex> lock{m_mutex}; 217 return m_queue.size(); 218 } 219 220 }; // class Queue 221 222 } // namespace thread 223 224 } // namespace osmium 225 226 #endif // OSMIUM_THREAD_QUEUE_HPP 227 228 #include "ref_lru_cache.h" 229 230 RefLRUCache<int, uint64_t>::Node_Iter productor(RefLRUCache<int, uint64_t>& lll, int i) 231 { 232 std::this_thread::sleep_for(chrono::nanoseconds(1000)); 233 RefLRUCache<int, uint64_t>::Node_Iter it = lll.lookup_node_it(i); 234 if (RefLRUCache<int, uint64_t>::Node_Iter() == it) 235 { 236 return lll.insert(i, i*1000); 237 } 238 else 239 { 240 return it; 241 } 242 } 243 244 void consume(RefLRUCache<int, uint64_t>& lll, RefLRUCache<int, uint64_t>::Node_Iter it) 245 { 246 std::this_thread::sleep_for(chrono::nanoseconds(100000)); 247 uint64_t value = it->value; 248 lll.release(it); 249 } 250 251 osmium::thread::Queue<RefLRUCache<int, uint64_t>::Node_Iter> outputqueue; 252 253 void productor_thread_fun(RefLRUCache<int, uint64_t>& lll) 254 { 255 int i = 1; 256 while(true) 257 { 258 if (i > 100000) 259 break; 260 261 outputqueue.push(productor(lll, i++)); 262 } 263 } 264 265 void consume_thread_fun(RefLRUCache<int, uint64_t>& lll) 266 { 267 int i = 1; 268 while(true) 269 { 270 if (i++ > 100000) 271 break; 272 273 RefLRUCache<int, uint64_t>::Node_Iter it; 274 outputqueue.wait_and_pop(it); 275 consume(lll, it); 276 } 277 } 278 279 int main() { 280 RefLRUCache<int, uint64_t> lll(10000); 281 282 std::thread thhh1(productor_thread_fun, std::ref(lll)); 283 284 std::thread thhh2(consume_thread_fun, std::ref(lll)); 285 286 thhh1.join(); 287 thhh2.join(); 288 289 return 0; 290 291 }