Thursday, November 23, 2017

[LeetCode]LFU Cache


是一道很麻烦的题。我们可以参考LRU Cache的思路,存一个map用来记录key和对应的NodeList的关系,NodeList存的就是所有access频率相同的key/value pair按照LRU的顺序排序的序列。NodeList的实现就是简化版本的LRU Cache,我们不用考虑超出capacity的情况,我们只需要implement insert, delete, get, deleteLast,不需要特地考虑在LRU Cache中提升优先级的方法,因为access一次之后,其频率就会+1,那么我们就应该从当前NodeList中remove key/value pair,然后插入对于的频率+1的NodeList当中。插入的时候永远从头部插入,我们就可以保证NodeList的元素是按照LRU排序。deleteLast是为了当LFU超出容量时,remove频率最少的NodeList中least recently used的元素。对于NodeList,其也是一个hashmap + doubly linked list的结构,每一个node就是对应的NodeList,不同的NodeList对应不同的频率,从大到小排序。hashmap存的是key和对应的NodeList,同时还有head和tail指针记录首尾。对于get和put:

  • get的话,我们找到对应的NodeList查询value,之后从当前NodeList中删除,插入当前NodeList频率+1的NodeList当中,没有的话需要自己新建
  • put的话
    • 如果LFU Cache中已经存在,过程和get类似
    • 如果不存在,如果cache已经满了,删掉频率最低的NodeList中的最后一个元素,更新map。插入新元素到频率为1的NodeList当中,没有的话自己新建
操作都是常数时间。代码入选:

struct Node
{
int val, key;
Node* prev, *next;
Node(int k, int v)
{
key = k;
val = v;
prev = nullptr;
next = nullptr;
}
};
class NodeList
{
public:
NodeList* next, *prev;
NodeList(int count)
{
m_head = nullptr;
m_tail = nullptr;
next = nullptr;
prev = nullptr;
m_count = count;
}
void insert(int key, int val)
{
Node* node = new Node(key, val);
node->next = m_head;
if(m_head)m_head->prev = node;
m_head = node;
if(!m_tail)m_tail = node;
m_map[key] = node;
}
int deleteLast()
{
auto node = m_tail;
if(m_tail == nullptr)return -1;
else if(m_tail == m_head)
{
m_head = nullptr;
m_tail = nullptr;
}
else
{
m_tail = m_tail->prev;
m_tail->next = nullptr;
node->prev = nullptr;
}
int key = node->key;
m_map.erase(key);
delete node;
return key;
}
void del(int key)
{
if(!contains(key))return;
Node* node = m_map[key];
if(node == m_head)m_head = node->next;
if(node == m_tail)m_tail = node->prev;
if(node->next)node->next->prev = node->prev;
if(node->prev)node->prev->next = node->next;
node->prev = nullptr;
node->next = nullptr;
delete node;
m_map.erase(key);
}
int get(int key)
{
if(contains(key))
return m_map[key]->val;
return -1;
}
int getCount()
{
return m_count;
}
bool contains(int key)
{
return m_map.find(key) != m_map.end();
}
bool empty()
{
return m_head == nullptr;
}
private:
unordered_map<int, Node*> m_map;
Node* m_head, *m_tail;
int m_count;
};
class LFUCache {
public:
LFUCache(int capacity) {
m_cap = capacity;
m_size = 0;
m_head = nullptr;
m_tail = nullptr;
}
int get(int key) {
if(contains(key))
{
int val = m_map[key]->get(key);
updatePriority(key, val);
return val;
}
return -1;
}
void put(int key, int value) {
if(!m_cap)return;
if(contains(key))
{
updatePriority(key, value);
}
else
{
++m_size;
if(m_size > m_cap)
{
int key = m_head->deleteLast();
if(m_head->empty())deleteNode(m_head);
m_map.erase(key);
--m_size;
}
NodeList* node = m_head && m_head->getCount() == 1? m_head: new NodeList(1);
if(!m_head)
{
m_head = node;
m_tail = node;
}
if(node != m_head)
{
node->next = m_head;
m_head->prev = node;
m_head = node;
}
node->insert(key, value);
m_map[key] = node;
}
}
private:
int m_cap, m_size;
unordered_map<int, NodeList*> m_map;
NodeList* m_head, *m_tail;
bool contains(int key)
{
return m_map.find(key) != m_map.end();
}
void updatePriority(int key, int val)
{
auto node = m_map[key];
node->del(key);
if(node->empty())deleteNode(node);
NodeList* newNode = findNext(node);
newNode->insert(key, val);
m_map[key] = newNode;
}
void deleteNode(NodeList* node)
{
if(node == m_head)m_head = node->next;
if(node == m_tail)m_tail = node->prev;
if(node->next)node->next->prev = node->prev;
if(node->prev)node->prev->next = node->next;
node->prev = nullptr;
node->next = nullptr;
delete node;
}
NodeList* findNext(NodeList* curr)
{
if(curr->next && curr->next->getCount() == curr->getCount() + 1)return curr->next;
NodeList* node = new NodeList(curr->getCount() + 1);
if(curr->next)curr->next->prev = node;
node->next = curr->next;
curr->next = node;
node->prev = curr;
if(m_tail == curr)m_tail = node;
return node;
}
};
/**
* Your LFUCache object will be instantiated and called as such:
* LFUCache obj = new LFUCache(capacity);
* int param_1 = obj.get(key);
* obj.put(key,value);
*/
view raw LFU Cache.cpp hosted with ❤ by GitHub

我们也可以用c++自带的list来implement,可以减少代码的行数。这样我们不需要自己实现NodeList,运用list的emplace方法可以实现常数时间对任意位置的插入。代码如下:


struct Node
{
int key, val;
Node(int k, int v) : key(k), val(v)
{
}
};
struct NodeList
{
int count;
list<Node> nodes;
NodeList(int k, int v, int c)
{
count = c;
nodes = {Node(k, v)};
}
};
class LFUCache {
public:
LFUCache(int capacity) {
m_cap = capacity;
m_size = 0;
}
int get(int key) {
if(m_map.find(key) == m_map.end())return -1;
auto col = m_map[key].second;
int val = col->val;
increasePriority(key, val);
return val;
}
void put(int key, int value) {
if(!m_cap)return;
if(m_map.find(key) == m_map.end())
{
++m_size;
if(m_size > m_cap)
{
auto row = m_matrix.begin();
auto col = m_matrix.front().nodes.end();
--col;
int key = col->key;
row->nodes.erase(col);
if(row->nodes.empty())m_matrix.erase(row);
m_map.erase(key);
--m_size;
}
if(m_matrix.empty() || m_matrix.front().count != 1)
{
m_matrix.emplace_front(key, value, 1);
m_map[key] = make_pair(m_matrix.begin(), m_matrix.front().nodes.begin());
}
else
{
auto head = m_matrix.begin();
head->nodes.emplace_front(key, value);
m_map[key] = make_pair(head, head->nodes.begin());
}
}
else
{
increasePriority(key, value);
}
}
private:
int m_cap, m_size;
list<NodeList> m_matrix;
unordered_map<int, pair<list<NodeList>::iterator, list<Node>::iterator>> m_map;
void increasePriority(int key, int val)
{
auto row = m_map[key].first;
auto col = m_map[key].second;
auto next = row;
++next;
if(next != m_matrix.end() && next->count == row->count + 1)
{
next->nodes.emplace_front(key, val);
m_map[key] = make_pair(next, next->nodes.begin());
}
else
{
auto iter = m_matrix.emplace(next, key, val, row->count + 1);
m_map[key] = make_pair(iter, iter->nodes.begin());
}
row->nodes.erase(col);
if(row->nodes.empty())m_matrix.erase(row);
}
};
/**
* Your LFUCache object will be instantiated and called as such:
* LFUCache obj = new LFUCache(capacity);
* int param_1 = obj.get(key);
* obj.put(key,value);
*/
view raw LFU Cache_2.cpp hosted with ❤ by GitHub

No comments:

Post a Comment