Skip to content

Commit

Permalink
revised
Browse files Browse the repository at this point in the history
  • Loading branch information
zzjjzzgggg committed Aug 11, 2018
1 parent 53a6131 commit 9aef4a4
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 11 deletions.
28 changes: 17 additions & 11 deletions adv/LRUCache.h
Original file line number Diff line number Diff line change
Expand Up @@ -89,30 +89,32 @@ class Cache {
using Guard = std::lock_guard<lock_type>;
/**
* the max size is the hard limit of keys and (maxSize + elasticity) is the
* soft limit
* the cache is allowed to grow till maxSize + elasticity and is pruned back
* to maxSize keys
* set maxSize = 0 for an unbounded cache (but in that case, you're better
* off
* using a std::unordered_map
* directly anyway! :)
* soft limit the cache is allowed to grow till maxSize + elasticity and is
* pruned back to maxSize keys set maxSize = 0 for an unbounded cache (but
* in that case, you're better off using a std::unordered_map directly
* anyway! :)
*/
explicit Cache(size_t maxSize = 64, size_t elasticity = 10)
: maxSize_(maxSize), elasticity_(elasticity) {}

virtual ~Cache() = default;

size_t size() const {
Guard g(lock_);
return cache_.size();
}

bool empty() const {
Guard g(lock_);
return cache_.empty();
}

void clear() {
Guard g(lock_);
cache_.clear();
keys_.clear();
}

void insert(const Key& k, const Value& v) {
Guard g(lock_);
const auto iter = cache_.find(k);
Expand All @@ -126,6 +128,7 @@ class Cache {
cache_[k] = keys_.begin();
prune();
}

bool tryGet(const Key& kIn, Value& vOut) {
Guard g(lock_);
const auto iter = cache_.find(kIn);
Expand All @@ -136,6 +139,7 @@ class Cache {
vOut = iter->second->value;
return true;
}

/**
* The const reference returned here is only
* guaranteed to be valid till the next insert/delete
Expand All @@ -149,10 +153,12 @@ class Cache {
keys_.splice(keys_.begin(), keys_, iter->second);
return iter->second->value;
}

/**
* returns a copy of the stored object (if found)
*/
Value getCopy(const Key& k) { return get(k); }

bool remove(const Key& k) {
Guard g(lock_);
auto iter = cache_.find(k);
Expand All @@ -163,6 +169,7 @@ class Cache {
cache_.erase(iter);
return true;
}

bool contains(const Key& k) {
Guard g(lock_);
return cache_.find(k) != cache_.end();
Expand All @@ -171,6 +178,7 @@ class Cache {
size_t getMaxSize() const { return maxSize_; }
size_t getElasticity() const { return elasticity_; }
size_t getMaxAllowedSize() const { return maxSize_ + elasticity_; }

template <typename F>
void cwalk(F& f) const {
Guard g(lock_);
Expand All @@ -180,9 +188,7 @@ class Cache {
protected:
size_t prune() {
size_t maxAllowed = maxSize_ + elasticity_;
if (maxSize_ == 0 || cache_.size() < maxAllowed) {
return 0;
}
if (maxSize_ == 0 || cache_.size() < maxAllowed) return 0;
size_t count = 0;
while (cache_.size() > maxSize_) {
cache_.erase(keys_.back().key);
Expand All @@ -204,6 +210,6 @@ class Cache {
size_t elasticity_;
};

} // namespace LRUCache11
} // namespace lru

#endif /* __LRUCACHE_H__ */
14 changes: 14 additions & 0 deletions io/ioutils.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
#define __IOUTILS_H__

#include <tuple>
#include <map>
#include <unordered_map>

#include "iobase.h"
Expand Down Expand Up @@ -163,6 +164,19 @@ void saveTupleVec(const std::vector<std::tuple<T...>>& vec,

// map
template <typename TKey, typename TVal>
void saveMap(const std::map<TKey, TVal>& map,
const std::string& filename, const bool echo = true,
const std::string& format = "{}\t{}\n",
const std::string& anno = "") {
std::unique_ptr<IOOut> out_ptr = getIOOut(filename);
if (!anno.empty()) out_ptr->save(anno);
for (auto& pr : map)
out_ptr->save(fmt::format(format, pr.first, pr.second));
if (echo) printf("saved to %s\n", filename.c_str());
}

// unordered_map
template <typename TKey, typename TVal>
void saveMap(const std::unordered_map<TKey, TVal>& map,
const std::string& filename, const bool echo = true,
const std::string& format = "{}\t{}\n",
Expand Down

0 comments on commit 9aef4a4

Please sign in to comment.