[LeetCode] LRU Cache

Design and implement a data structure for Least Recently Used (LRU) cache. It should support the following operations: get and set.

get(key) - Get the value (will always be positive) of the key if the key exists in the cache, otherwise return -1.
set(key, value) - Set or insert the value if the key is not already present. When the cache reached its capacity, it should invalidate the least recently used item before inserting a new item.

 

Hide Tags
 Data Structure
 
 
 

分析
为了使查找、插入和删除都有较高的性能,我们使用一个双向链表 (std::list) 和一个哈希表
(std::unordered_map),因为:
• 哈希表保存每个节点的地址,可以基本保证在 O(1) 时间内查找节点
• 双向链表插入和删除效率高,单向链表插入和删除时,还要查找节点的前驱节点
具体实现细节:
• 越靠近链表头部,表示节点上次访问距离现在时间最短,尾部的节点表示最近访问最少
• 访问节点时,如果节点存在,把该节点交换到链表头部,同时更新 hash 表中该节点的地址
• 插入节点时,如果 cache 的 size 达到了上限 capacity,则删除尾部节点,同时要在 hash 表中删
除对应的项;新节点插入链表头部

 

更多分析见  http://www.cnblogs.com/diegodu/p/4569048.html    http://www.cnblogs.com/dolphin0520/p/3741519.html

struct cacheNode{
   int  key;
   int  val;
   cacheNode(int k, int v)
   {
        key = k;
        val = v;
   }
};


class LRUCache{
    private:
        int m_capacity;
        list<cacheNode> m_list;//double link of cacheNode
        map< int, list<cacheNode>::iterator > m_map; // map of key and list::iterator

    public:
        LRUCache(int capacity)
        {
            m_capacity = capacity;
        }

        int get(int key) {
            if(m_map.find(key) == m_map.end())
            {
                return -1;
            }
            else
            {
                //move the node to head of double list
                list<cacheNode>::iterator it = m_map[key];
                m_list.splice(m_list.begin(), m_list, it);
                return m_list.begin()->val;
            }
        }

        void set(int key, int value)
        {
            if(m_map.find(key) == m_map.end())
            {
                //delete the back one if reach capacity
                if(m_capacity == m_list.size())
                {
                    cacheNode tmp = m_list.back();
                    m_list.pop_back();
                    m_map.erase(tmp.key);
                }
                // insert new one into the head
                cacheNode node(key, value);
                m_list.push_front(node);
                m_map[key]= m_list.begin();
            }
            else
            {
                //move the node to head of double list
                list<cacheNode>::iterator it = m_map[key];
                m_list.splice(m_list.begin(), m_list, it);
                //update value
                m_list.begin()->val = value;

            }

        }

        void printCache()
        {
            for(list<cacheNode>::iterator it = m_list.begin(); it != m_list.end(); it++)
            {
                cout << "key:\t" << it->key <<"\tvalue\t" << it->val <<endl;
            }
            cout << endl;
        }
};


int main()
{
    LRUCache cache(5);
    cache.set(1, 1);
    cache.set(2, 2);
    cache.set(3, 4);
    cache.set(4, 4);
    cache.set(5, 5);
    cache.printCache();
    cache.set(6, 6);
    cache.printCache();
    cache.set(2, 9);
    cache.printCache();
    cache.set(3, 3);
    cache.printCache();
    cache.get(5);
    cache.printCache();
    return 0;
}

 

 

 

posted @ 2015-06-25 11:45  穆穆兔兔  阅读(220)  评论(0编辑  收藏  举报