From 9c38034765305b0ce8fdad302ccfe499f8998196 Mon Sep 17 00:00:00 2001 From: Qian Sun Date: Tue, 17 Jan 2017 14:11:45 -0800 Subject: [PATCH] Add simple lru cache into utils. (#8) * Add simple lru cache into utils. * Format files. --- mixerclient/BUILD | 24 + mixerclient/utils/google_macros.h | 24 + mixerclient/utils/simple_lru_cache.h | 50 + mixerclient/utils/simple_lru_cache_inl.h | 1090 +++++++++++++++++++ mixerclient/utils/simple_lru_cache_test.cc | 1112 ++++++++++++++++++++ 5 files changed, 2300 insertions(+) create mode 100644 mixerclient/utils/google_macros.h create mode 100644 mixerclient/utils/simple_lru_cache.h create mode 100644 mixerclient/utils/simple_lru_cache_inl.h create mode 100644 mixerclient/utils/simple_lru_cache_test.cc diff --git a/mixerclient/BUILD b/mixerclient/BUILD index 47d1c7d6d553..21b23a52f830 100644 --- a/mixerclient/BUILD +++ b/mixerclient/BUILD @@ -33,6 +33,16 @@ cc_library( ], ) +cc_library( + name = "simple_lru_cache", + srcs = ["utils/google_macros.h"], + hdrs = [ + "utils/simple_lru_cache.h", + "utils/simple_lru_cache_inl.h", + ], + visibility = ["//visibility:public"], +) + cc_test( name = "mixer_client_impl_test", size = "small", @@ -43,3 +53,17 @@ cc_test( "//external:googletest_main", ], ) + +cc_test( + name = "simple_lru_cache_test", + size = "small", + srcs = ["utils/simple_lru_cache_test.cc"], + linkopts = [ + "-lm", + "-lpthread", + ], + deps = [ + ":simple_lru_cache", + "//external:googletest_main", + ], +) \ No newline at end of file diff --git a/mixerclient/utils/google_macros.h b/mixerclient/utils/google_macros.h new file mode 100644 index 000000000000..475eddecca46 --- /dev/null +++ b/mixerclient/utils/google_macros.h @@ -0,0 +1,24 @@ +/* Copyright 2017 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MIXERCLIENT_UTILS_GOOGLE_MACROS_H_ +#define MIXERCLIENT_UTILS_GOOGLE_MACROS_H_ + +#undef GOOGLE_DISALLOW_EVIL_CONSTRUCTORS +#define GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(TypeName) \ + TypeName(const TypeName&); \ + void operator=(const TypeName&) + +#endif // MIXERCLIENT_UTILS_GOOGLE_MACROS_H_ diff --git a/mixerclient/utils/simple_lru_cache.h b/mixerclient/utils/simple_lru_cache.h new file mode 100644 index 000000000000..3a05f47cebcb --- /dev/null +++ b/mixerclient/utils/simple_lru_cache.h @@ -0,0 +1,50 @@ +/* Copyright 2017 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// For inclusion in .h files. The real class definition is in +// simple_lru_cache_inl.h. + +#ifndef MIXERCLIENT_UTILS_SIMPLE_LRU_CACHE_H_ +#define MIXERCLIENT_UTILS_SIMPLE_LRU_CACHE_H_ + +#include +#include // for hash<> + +namespace istio { +namespace mixer_client { + +namespace internal { +template +struct SimpleLRUHash : public std::hash {}; +} // namespace internal + +template , + typename EQ = std::equal_to > +class SimpleLRUCache; + +// Deleter is a functor that defines how to delete a Value*. That is, it +// contains a public method: +// operator() (Value* value) +// See example in the associated unittest. +template , + typename EQ = std::equal_to > +class SimpleLRUCacheWithDeleter; + +} // namespace mixer_client +} // namespace istio + +#endif // MIXERCLIENT_UTILS_SIMPLE_LRU_CACHE_H_ diff --git a/mixerclient/utils/simple_lru_cache_inl.h b/mixerclient/utils/simple_lru_cache_inl.h new file mode 100644 index 000000000000..1fbf5d54564e --- /dev/null +++ b/mixerclient/utils/simple_lru_cache_inl.h @@ -0,0 +1,1090 @@ +/* Copyright 2017 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// A generic LRU cache that maps from type Key to Value*. +// +// . Memory usage is fairly high: on a 64-bit architecture, a cache with +// 8-byte keys can use 108 bytes per element, not counting the +// size of the values. This overhead can be significant if many small +// elements are stored in the cache. +// +// . Lookup returns a "Value*". Client should call "Release" when done. +// +// . Override "RemoveElement" if you want to be notified when an +// element is being removed. The default implementation simply calls +// "delete" on the pointer. +// +// . Call Clear() before destruction. +// +// . No internal locking is done: if the same cache will be shared +// by multiple threads, the caller should perform the required +// synchronization before invoking any operations on the cache. +// Note a reader lock is not sufficient as Lookup() updates the pin count. +// +// . We provide support for setting a "max_idle_time". Entries +// are discarded when they have not been used for a time +// greater than the specified max idle time. If you do not +// call SetMaxIdleSeconds(), entries never expire (they can +// only be removed to meet size constraints). +// +// . We also provide support for a strict age-based eviction policy +// instead of LRU. See SetAgeBasedEviction(). + +#ifndef MIXERCLIENT_UTILS_SIMPLE_LRU_CACHE_INL_H_ +#define MIXERCLIENT_UTILS_SIMPLE_LRU_CACHE_INL_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "google_macros.h" +#include "simple_lru_cache.h" + +namespace istio { +namespace mixer_client { + +// Define number of microseconds for a second. +const int64_t kSecToUsec = 1000000; + +// Define a simple cycle timer interface to encapsulate timer related code. +// The concept is from CPU cycle. The cycle clock code from +// https://github.com/google/benchmark/blob/master/src/cycleclock.h can be used. +// But that code only works for some platforms. To make code works for all +// platforms, SimpleCycleTimer class uses a fake CPU cycle each taking a +// microsecond. If needed, this timer class can be easily replaced by a +// real cycle_clock. +class SimpleCycleTimer { + public: + // Return the current cycle in microseconds. + static int64_t Now() { + struct timeval tv; + gettimeofday(&tv, NULL); + return static_cast(tv.tv_sec * kSecToUsec + tv.tv_usec); + } + // Return number of cycles in a second. + static int64_t Frequency() { return kSecToUsec; } + + private: + SimpleCycleTimer(); // no instances +}; + +// A constant iterator. a client of SimpleLRUCache should not create these +// objects directly, instead, create objects of type +// SimpleLRUCache::const_iterator. This is created inside of +// SimpleLRUCache::begin(),end(). Key and Value are the same as the template +// args to SimpleLRUCache Elem - the Value type for the internal hash_map that +// the SimpleLRUCache maintains H and EQ are the same as the template arguments +// for SimpleLRUCache +// +// NOTE: the iterator needs to keep a copy of end() for the Cache it is +// iterating over this is so SimpleLRUCacheConstIterator does not try to update +// its internal pair if its internal hash_map iterator is pointing +// to end see the implementation of operator++ for an example. +// +// NOTE: DO NOT SAVE POINTERS TO THE ITEM RETURNED BY THIS ITERATOR +// e.g. SimpleLRUCacheConstIterator it = something; do not say KeyToSave +// &something->first this will NOT work., as soon as you increment the iterator +// this will be gone. :( + +template +class SimpleLRUCacheConstIterator + : public std::iterator> { + public: + typedef typename MapType::const_iterator HashMapConstIterator; + // Allow parent template's types to be referenced without qualification. + typedef typename SimpleLRUCacheConstIterator::reference reference; + typedef typename SimpleLRUCacheConstIterator::pointer pointer; + + // This default constructed Iterator can only be assigned to or destroyed. + // All other operations give undefined behaviour. + SimpleLRUCacheConstIterator() {} + SimpleLRUCacheConstIterator(HashMapConstIterator it, + HashMapConstIterator end); + SimpleLRUCacheConstIterator& operator++(); + + reference operator*() { return external_view_; } + pointer operator->() { return &external_view_; } + + // For LRU mode, last_use_time() returns elements last use time. + // See GetLastUseTime() description for more information. + int64_t last_use_time() const { return last_use_; } + + // For age-based mode, insertion_time() returns elements insertion time. + // See GetInsertionTime() description for more information. + int64_t insertion_time() const { return last_use_; } + + friend bool operator==(const SimpleLRUCacheConstIterator& a, + const SimpleLRUCacheConstIterator& b) { + return a.it_ == b.it_; + } + + friend bool operator!=(const SimpleLRUCacheConstIterator& a, + const SimpleLRUCacheConstIterator& b) { + return !(a == b); + } + + private: + HashMapConstIterator it_; + HashMapConstIterator end_; + std::pair external_view_; + int64_t last_use_; +}; + +// Each entry uses the following structure +template +struct SimpleLRUCacheElem { + Key key; // The key + Value* value; // The stored value + int pin; // Number of outstanding releases + size_t units; // Number of units for this value + SimpleLRUCacheElem* next = nullptr; // Next entry in LRU chain + SimpleLRUCacheElem* prev = nullptr; // Prev entry in LRU chain + int64_t last_use_; // Timestamp of last use (in LRU mode) + // or creation (in age-based mode) + + SimpleLRUCacheElem(const Key& k, Value* v, int p, size_t u, int64_t last_use) + : key(k), value(v), pin(p), units(u), last_use_(last_use) {} + + bool IsLinked() const { + // If we are in the LRU then next and prev should be non-NULL. Otherwise + // both should be properly initialized to nullptr. + assert(static_cast(next == nullptr) == + static_cast(prev == nullptr)); + return next != nullptr; + } + + void Unlink() { + if (!IsLinked()) return; + prev->next = next; + next->prev = prev; + prev = nullptr; + next = nullptr; + } + + void Link(SimpleLRUCacheElem* head) { + next = head->next; + prev = head; + next->prev = this; // i.e. head->next->prev = this; + prev->next = this; // i.e. head->next = this; + } + static const int64_t kNeverUsed = -1; +}; + +template +const int64_t SimpleLRUCacheElem::kNeverUsed; + +// A simple class passed into various cache methods to change the +// behavior for that single call. +class SimpleLRUCacheOptions { + public: + SimpleLRUCacheOptions() : update_eviction_order_(true) {} + + // If false neither the last modified time (for based age eviction) nor + // the element ordering (for LRU eviction) will be updated. + // This value must be the same for both Lookup and Release. + // The default is true. + bool update_eviction_order() const { return update_eviction_order_; } + void set_update_eviction_order(bool v) { update_eviction_order_ = v; } + + private: + bool update_eviction_order_; +}; + +// The MapType's value_type must be pair +template +class SimpleLRUCacheBase { + public: + // class ScopedLookup + // If you have some code that looks like this: + // val = c->Lookup(key); + // if (val) { + // if (something) { + // c->Release(key, val); + // return; + // } + // if (something else) { + // c->Release(key, val); + // return; + // } + // Then ScopedLookup will make the code simpler. It automatically + // releases the value when the instance goes out of scope. + // Example: + // ScopedLookup lookup(c, key); + // if (lookup.Found()) { + // ... + // + // NOTE: Be extremely careful when using ScopedLookup with Mutexes. This + // code is safe since the lock will be released after the ScopedLookup is + // destroyed. + // MutexLock l(&mu_); + // ScopedLookup lookup(....); + // + // This is NOT safe since the lock is released before the ScopedLookup is + // destroyed, and consequently the value will be unpinned without the lock + // being held. + // mu_.Lock(); + // ScopedLookup lookup(....); + // ... + // mu_.Unlock(); + class ScopedLookup { + public: + ScopedLookup(SimpleLRUCacheBase* cache, const Key& key) + : cache_(cache), + key_(key), + value_(cache_->LookupWithOptions(key_, options_)) {} + + ScopedLookup(SimpleLRUCacheBase* cache, const Key& key, + const SimpleLRUCacheOptions& options) + : cache_(cache), + key_(key), + options_(options), + value_(cache_->LookupWithOptions(key_, options_)) {} + + ~ScopedLookup() { + if (value_ != nullptr) cache_->ReleaseWithOptions(key_, value_, options_); + } + const Key& key() const { return key_; } + Value* value() const { return value_; } + bool Found() const { return value_ != nullptr; } + const SimpleLRUCacheOptions& options() const { return options_; } + + private: + SimpleLRUCacheBase* const cache_; + const Key key_; + const SimpleLRUCacheOptions options_; + Value* const value_; + + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ScopedLookup); + }; + + // Create a cache that will hold up to the specified number of units. + // Usually the units will be byte sizes, but in some caches different + // units may be used. For instance, we may want each open file to + // be one unit in an open-file cache. + // + // By default, the max_idle_time is infinity; i.e. entries will + // stick around in the cache regardless of how old they are. + explicit SimpleLRUCacheBase(int64_t total_units); + + // Release all resources. Cache must have been "Clear"ed. This + // requirement is imposed because "Clear()" will call + // "RemoveElement" for each element in the cache. The destructor + // cannot do that because it runs after any subclass destructor. + virtual ~SimpleLRUCacheBase() { + assert(table_.size() == 0); + assert(defer_.size() == 0); + } + + // Change the maximum size of the cache to the specified number of units. + // If necessary, entries will be evicted to comply with the new size. + void SetMaxSize(int64_t total_units) { + max_units_ = total_units; + GarbageCollect(); + } + + // Change the max idle time to the specified number of seconds. + // If "seconds" is a negative number, it sets the max idle time + // to infinity. + void SetMaxIdleSeconds(double seconds) { + SetTimeout(seconds, true /* lru */); + } + + // Stop using the LRU eviction policy and instead expire anything + // that has been in the cache for more than the specified number + // of seconds. + // If "seconds" is a negative number, entries don't expire but if + // we need to make room the oldest entries will be removed first. + // You can't set both a max idle time and age-based eviction. + void SetAgeBasedEviction(double seconds) { + SetTimeout(seconds, false /* lru */); + } + + // If cache contains an entry for "k", return a pointer to it. + // Else return nullptr. + // + // If a value is returned, the caller must call "Release" when it no + // longer needs that value. This functionality is useful to prevent + // the value from being evicted from the cache until it is no longer + // being used. + Value* Lookup(const Key& k) { + return LookupWithOptions(k, SimpleLRUCacheOptions()); + } + + // Same as "Lookup(Key)" but allows for additional options. See + // the SimpleLRUCacheOptions object for more information. + Value* LookupWithOptions(const Key& k, const SimpleLRUCacheOptions& options); + + // Removes the pinning done by an earlier "Lookup". After this call, + // the caller should no longer depend on the value sticking around. + // + // If there are no more pins on this entry, it may be deleted if + // either it has been "Remove"d, or the cache is overfull. + // In this case "RemoveElement" will be called. + void Release(const Key& k, Value* value) { + ReleaseWithOptions(k, value, SimpleLRUCacheOptions()); + } + + // Same as "Release(Key, Value)" but allows for additional options. See + // the SimpleLRUCacheOptions object for more information. Take care + // that the SimpleLRUCacheOptions object passed into this method is + // compatible with SimpleLRUCacheOptions object passed into Lookup. + // If they are incompatible it can put the cache into some unexpected + // states. Better yet, just use a ScopedLookup which takes care of this + // for you. + void ReleaseWithOptions(const Key& k, Value* value, + const SimpleLRUCacheOptions& options); + + // Insert the specified "k,value" pair in the cache. Remembers that + // the value occupies "units" units. For "InsertPinned", the newly + // inserted value will be pinned in the cache: the caller should + // call "Release" when it wants to remove the pin. + // + // Any old entry for "k" is "Remove"d. + // + // If the insertion causes the cache to become overfull, unpinned + // entries will be deleted in an LRU order to make room. + // "RemoveElement" will be called for each such entry. + void Insert(const Key& k, Value* value, size_t units) { + InsertPinned(k, value, units); + Release(k, value); + } + void InsertPinned(const Key& k, Value* value, size_t units); + + // Change the reported size of an object. + void UpdateSize(const Key& k, const Value* value, size_t units); + + // return true iff pair is still in use + // (i.e., either in the table or the deferred list) + // Note, if (value == nullptr), only key is used for matching + bool StillInUse(const Key& k) const { return StillInUse(k, nullptr); } + bool StillInUse(const Key& k, const Value* value) const; + + // Remove any entry corresponding to "k" from the cache. Note that + // if the entry is pinned because of an earlier Lookup or + // InsertPinned operation, the entry will disappear from further + // Lookups, but will not actually be deleted until all of the pins + // are released. + // + // "RemoveElement" will be called if an entry is actually removed. + void Remove(const Key& k); + + // Removes all entries from the cache. The pinned entries will + // disappear from further Lookups, but will not actually be deleted + // until all of the pins are released. This is different from Clear() + // because Clear() cleans up everything and requires that all Values are + // unpinned. + // + // "Remove" will be called for each cache entry. + void RemoveAll(); + + // Remove all unpinned entries from the cache. + // "RemoveElement" will be called for each such entry. + void RemoveUnpinned(); + + // Remove all entries from the cache. It is an error to call this + // operation if any entry in the cache is currently pinned. + // + // "RemoveElement" will be called for all removed entries. + void Clear(); + + // Remove all entries which have exceeded their max idle time or age + // set using SetMaxIdleSeconds() or SetAgeBasedEviction() respectively. + void RemoveExpiredEntries() { + if (max_idle_ >= 0) DiscardIdle(max_idle_); + } + + // Return current size of cache + int64_t Size() const { return units_; } + + // Return number of entries in the cache. This value may differ + // from Size() if some of the elements have a cost != 1. + int64_t Entries() const { return table_.size(); } + + // Return size of deferred deletions + int64_t DeferredSize() const; + + // Return number of deferred deletions + int64_t DeferredEntries() const; + + // Return size of entries that are pinned but not deferred + int64_t PinnedSize() const { return pinned_units_; } + + // Return maximum size of cache + int64_t MaxSize() const { return max_units_; } + + // Return the age (in microseconds) of the least recently used element in + // the cache. If the cache is empty, zero (0) is returned. + int64_t AgeOfLRUItemInMicroseconds() const; + + // In LRU mode, this is the time of last use in cycles. Last use is defined + // as time of last Release(), Insert() or InsertPinned() methods. + // + // The timer is not updated on Lookup(), so GetLastUseTime() will + // still return time of previous access until Release(). + // + // Returns -1 if key was not found, CycleClock cycle count otherwise. + // REQUIRES: LRU mode + int64_t GetLastUseTime(const Key& k) const; + + // For age-based mode, this is the time of element insertion in cycles, + // set by Insert() and InsertPinned() methods. + // Returns -1 if key was not found, CycleClock cycle count otherwise. + // REQUIRES: age-based mode + int64_t GetInsertionTime(const Key& k) const; + + // Invokes 'DebugIterator' on each element in the cache. The + // 'pin_count' argument supplied will be the pending reference count + // for the element. The 'is_deferred' argument will be true for + // elements that have been removed but whose removal is deferred. + // The supplied value for "ouput" will be passed to the DebugIterator. + void DebugOutput(std::string* output) const; + + // Return a std::string that summarizes the contents of the cache. + // + // The output of this function is not suitable for parsing by borgmon. For an + // example of exporting the summary information in a borgmon mapped-value + // format, see GFS_CS_BufferCache::ExportSummaryAsMap in + // file/gfs/chunkserver/gfs_chunkserver.{cc,h} + std::string Summary() const { + std::stringstream ss; + ss << PinnedSize() << "/" << DeferredSize() << "/" << Size() << " p/d/a"; + return ss.str(); + } + + // STL style const_iterator support + typedef SimpleLRUCacheConstIterator const_iterator; + friend class SimpleLRUCacheConstIterator; + const_iterator begin() const { + return const_iterator(table_.begin(), table_.end()); + } + const_iterator end() const { + return const_iterator(table_.end(), table_.end()); + } + + // Invokes the 'resize' operation on the underlying map with the given + // size hint. The exact meaning of this operation and its availability + // depends on the supplied MapType. + void ResizeTable(typename MapType::size_type size_hint) { + table_.resize(size_hint); + } + + protected: + // Override this operation if you want to control how a value is + // cleaned up. For example, if the value is a "File", you may want + // to "Close" it instead of "delete"ing it. + // + // Not actually implemented here because often value's destructor is + // protected, and the derived SimpleLRUCache is declared a friend, + // so we implement it in the derived SimpleLRUCache. + virtual void RemoveElement(const Key& k, Value* value) = 0; + + virtual void DebugIterator(const Key& k, const Value* value, int pin_count, + int64_t last_timestamp, bool is_deferred, + std::string* output) const { + std::stringstream ss; + ss << "ox" << std::hex << value << std::dec << ": pin: " << pin_count; + ss << ", is_deferred: " << is_deferred; + ss << ", last_use: " << last_timestamp << std::endl; + *output += ss.str(); + } + + // Override this operation if you want to evict cache entries + // based on parameters other than the total units stored. + // For example, if the cache stores open sstables, where the cost + // is the size in bytes of the open sstable, you may want to evict + // entries from the cache not only before the max size in bytes + // is reached but also before reaching the limit of open file + // descriptors. Thus, you may want to override this function in a + // subclass and return true if either Size() is too large or + // Entries() is too large. + virtual bool IsOverfull() const { return units_ > max_units_; } + + private: + typedef SimpleLRUCacheElem Elem; + typedef MapType Table; + typedef typename Table::iterator TableIterator; + typedef typename Table::const_iterator TableConstIterator; + typedef MapType DeferredTable; + typedef typename DeferredTable::iterator DeferredTableIterator; + typedef typename DeferredTable::const_iterator DeferredTableConstIterator; + + Table table_; // Main table + // Pinned entries awaiting to be released before they can be discarded. + // This is a key -> list mapping (multiple deferred entries for the same key) + // The machinery used to maintain main LRU list is reused here, though this + // list is not necessarily LRU and we don't care about the order of elements. + DeferredTable defer_; + int64_t units_; // Combined units of all elements + int64_t max_units_; // Max allowed units + int64_t pinned_units_; // Combined units of all pinned elements + Elem head_; // Dummy head of LRU list (next is mru elem) + int64_t max_idle_; // Maximum number of idle cycles + bool lru_; // LRU or age-based eviction? + + // Representation invariants: + // . LRU list is circular doubly-linked list + // . Each live "Elem" is either in "table_" or "defer_" + // . LRU list contains elements in "table_" that can be removed to free space + // . Each "Elem" in "defer_" has a non-zero pin count + + void Discard(Elem* e) { + assert(e->pin == 0); + units_ -= e->units; + RemoveElement(e->key, e->value); + delete e; + } + + // Count the number and total size of the elements in the deferred table. + void CountDeferredEntries(int64_t* num_entries, int64_t* total_size) const; + + // Currently in deferred table? + // Note, if (value == nullptr), only key is used for matching. + bool InDeferredTable(const Key& k, const Value* value) const; + + void GarbageCollect(); // Discard to meet space constraints + void DiscardIdle(int64_t max_idle); // Discard to meet idle-time constraints + + void SetTimeout(double seconds, bool lru); + + bool IsOverfullInternal() const { + return ((units_ > max_units_) || IsOverfull()); + } + void Remove(Elem* e); + + public: + static const size_t kElemSize = sizeof(Elem); + + private: + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(SimpleLRUCacheBase); +}; + +template +SimpleLRUCacheBase::SimpleLRUCacheBase( + int64_t total_units) + : head_(Key(), nullptr, 0, 0, Elem::kNeverUsed) { + units_ = 0; + pinned_units_ = 0; + max_units_ = total_units; + head_.next = &head_; + head_.prev = &head_; + max_idle_ = -1; // Stands for "no expiration" + lru_ = true; // default to LRU, not age-based +} + +template +void SimpleLRUCacheBase::SetTimeout(double seconds, + bool lru) { + if (seconds < 0 || std::isinf(seconds)) { + // Treat as no expiration based on idle time + lru_ = lru; + max_idle_ = -1; + } else if (max_idle_ >= 0 && lru != lru_) { + // LOG(DFATAL) << "Can't SetMaxIdleSeconds() and SetAgeBasedEviction()"; + // In production we'll just ignore the second call + assert(0); + } else { + lru_ = lru; + + // Convert to cycles ourselves in order to perform all calculations in + // floating point so that we avoid integer overflow. + // NOTE: The largest representable int64_t cannot be represented exactly as + // a + // double, so the cast results in a slightly larger value which cannot be + // converted back to an int64_t. The next smallest double is representable + // as + // an int64_t, however, so if we make sure that `timeout_cycles` is strictly + // smaller than the result of the cast, we know that casting + // `timeout_cycles` to int64_t will not overflow. + // NOTE 2: If you modify the computation here, make sure to update the + // GetBoundaryTimeout() method in the test as well. + const double timeout_cycles = seconds * SimpleCycleTimer::Frequency(); + if (timeout_cycles >= std::numeric_limits::max()) { + // The value is outside the range of int64_t, so "round" down to something + // that can be represented. + max_idle_ = std::numeric_limits::max(); + } else { + max_idle_ = static_cast(timeout_cycles); + } + DiscardIdle(max_idle_); + } +} + +template +void SimpleLRUCacheBase::RemoveAll() { + // For each element: call "Remove" + for (TableIterator iter = table_.begin(); iter != table_.end(); ++iter) { + Remove(iter->second); + } + table_.clear(); +} + +template +void SimpleLRUCacheBase::RemoveUnpinned() { + for (Elem* e = head_.next; e != &head_;) { + Elem* next = e->next; + if (e->pin == 0) Remove(e->key); + e = next; + } +} + +template +void SimpleLRUCacheBase::Clear() { + // For each element: call "RemoveElement" and delete it + for (TableConstIterator iter = table_.begin(); iter != table_.end();) { + Elem* e = iter->second; + // Pre-increment the iterator to avoid possible + // accesses to deleted memory in cases where the + // key is a pointer to the memory that is freed by + // Discard. + ++iter; + Discard(e); + } + // Pinned entries cannot be Discarded and defer_ contains nothing but pinned + // entries. Therefore, it must be already be empty at this point. + assert(defer_.empty()); + // Get back into pristine state + table_.clear(); + head_.next = &head_; + head_.prev = &head_; + units_ = 0; + pinned_units_ = 0; +} + +template +Value* SimpleLRUCacheBase::LookupWithOptions( + const Key& k, const SimpleLRUCacheOptions& options) { + RemoveExpiredEntries(); + + TableIterator iter = table_.find(k); + if (iter != table_.end()) { + // We set last_use_ upon Release, not during Lookup. + Elem* e = iter->second; + if (e->pin == 0) { + pinned_units_ += e->units; + // We are pinning this entry, take it off the LRU list if we are in LRU + // mode. In strict age-based mode entries stay on the list while pinned. + if (lru_ && options.update_eviction_order()) e->Unlink(); + } + e->pin++; + return e->value; + } + return nullptr; +} + +template +void SimpleLRUCacheBase::ReleaseWithOptions( + const Key& k, Value* value, const SimpleLRUCacheOptions& options) { + { // First check to see if this is a deferred value + DeferredTableIterator iter = defer_.find(k); + if (iter != defer_.end()) { + const Elem* const head = iter->second; + // Go from oldest to newest, assuming that oldest entries get released + // first. This may or may not be true and makes no semantic difference. + Elem* e = head->prev; + while (e != head && e->value != value) { + e = e->prev; + } + if (e->value == value) { + // Found in deferred list: release it + assert(e->pin > 0); + e->pin--; + if (e->pin == 0) { + if (e == head) { + // When changing the head, remove the head item and re-insert the + // second item on the list (if there are any left). Do not re-use + // the key from the first item. + // Even though the two keys compare equal, the lifetimes may be + // different (such as a key of Std::StringPiece). + defer_.erase(iter); + if (e->prev != e) { + defer_[e->prev->key] = e->prev; + } + } + e->Unlink(); + Discard(e); + } + return; + } + } + } + { // Not deferred; so look in hash table + TableIterator iter = table_.find(k); + assert(iter != table_.end()); + Elem* e = iter->second; + assert(e->value == value); + assert(e->pin > 0); + if (lru_ && options.update_eviction_order()) { + e->last_use_ = SimpleCycleTimer::Now(); + } + e->pin--; + + if (e->pin == 0) { + if (lru_ && options.update_eviction_order()) e->Link(&head_); + pinned_units_ -= e->units; + if (IsOverfullInternal()) { + // This element is no longer needed, and we are full. So kick it out. + Remove(k); + } + } + } +} + +template +void SimpleLRUCacheBase::InsertPinned(const Key& k, + Value* value, + size_t units) { + // Get rid of older entry (if any) from table + Remove(k); + + // Make new element + Elem* e = new Elem(k, value, 1, units, SimpleCycleTimer::Now()); + + // Adjust table, total units fields. + units_ += units; + pinned_units_ += units; + table_[k] = e; + + // If we are in the strict age-based eviction mode, the entry goes on the LRU + // list now and is never removed. In the LRU mode, the list will only contain + // unpinned entries. + if (!lru_) e->Link(&head_); + GarbageCollect(); +} + +template +void SimpleLRUCacheBase::UpdateSize(const Key& k, + const Value* value, + size_t units) { + TableIterator table_iter = table_.find(k); + if ((table_iter != table_.end()) && + ((value == nullptr) || (value == table_iter->second->value))) { + Elem* e = table_iter->second; + units_ -= e->units; + if (e->pin > 0) { + pinned_units_ -= e->units; + } + e->units = units; + units_ += e->units; + if (e->pin > 0) { + pinned_units_ += e->units; + } + } else { + const DeferredTableIterator iter = defer_.find(k); + if (iter != defer_.end()) { + const Elem* const head = iter->second; + Elem* e = iter->second; + do { + if (e->value == value || value == nullptr) { + units_ -= e->units; + e->units = units; + units_ += e->units; + } + e = e->prev; + } while (e != head); + } + } + GarbageCollect(); +} + +template +bool SimpleLRUCacheBase::StillInUse( + const Key& k, const Value* value) const { + TableConstIterator iter = table_.find(k); + if ((iter != table_.end()) && + ((value == nullptr) || (value == iter->second->value))) { + return true; + } else { + return InDeferredTable(k, value); + } +} + +template +bool SimpleLRUCacheBase::InDeferredTable( + const Key& k, const Value* value) const { + const DeferredTableConstIterator iter = defer_.find(k); + if (iter != defer_.end()) { + const Elem* const head = iter->second; + const Elem* e = head; + do { + if (e->value == value || value == nullptr) return true; + e = e->prev; + } while (e != head); + } + return false; +} + +template +void SimpleLRUCacheBase::Remove(const Key& k) { + TableIterator iter = table_.find(k); + if (iter != table_.end()) { + Elem* e = iter->second; + table_.erase(iter); + Remove(e); + } +} + +template +void SimpleLRUCacheBase::Remove(Elem* e) { + // Unlink e whether it is in the LRU or the deferred list. It is safe to call + // Unlink() if it is not in either list. + e->Unlink(); + if (e->pin > 0) { + pinned_units_ -= e->units; + + // Now add it to the deferred table. + DeferredTableIterator iter = defer_.find(e->key); + if (iter == defer_.end()) { + // Inserting a new key, the element becomes the head of the list. + e->prev = e->next = e; + defer_[e->key] = e; + } else { + // There is already a deferred list for this key, attach the element to it + Elem* head = iter->second; + e->Link(head); + } + } else { + Discard(e); + } +} + +template +void SimpleLRUCacheBase::GarbageCollect() { + Elem* e = head_.prev; + while (IsOverfullInternal() && (e != &head_)) { + Elem* prev = e->prev; + if (e->pin == 0) { + // Erase from hash-table + TableIterator iter = table_.find(e->key); + assert(iter != table_.end()); + assert(iter->second == e); + table_.erase(iter); + e->Unlink(); + Discard(e); + } + e = prev; + } +} + +// Not using cycle. Instead using second from time() +static const int kAcceptableClockSynchronizationDriftCycles = 1; + +template +void SimpleLRUCacheBase::DiscardIdle( + int64_t max_idle) { + if (max_idle < 0) return; + + Elem* e = head_.prev; + const int64_t threshold = SimpleCycleTimer::Now() - max_idle; +#ifndef NDEBUG + int64_t last = 0; +#endif + while ((e != &head_) && (e->last_use_ < threshold)) { +// Sanity check: LRU list should be sorted by last_use_. We could +// check the entire list, but that gives quadratic behavior. +// +// TSCs on different cores of multi-core machines sometime get slightly out +// of sync; compensate for this by allowing clock to go backwards by up to +// kAcceptableClockSynchronizationDriftCycles CPU cycles. +// +// A kernel bug (http://b/issue?id=777807) sometimes causes TSCs to become +// widely unsynchronized, in which case this CHECK will fail. As a +// temporary work-around, running +// +// $ sudo bash +// # echo performance>/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor +// # /etc/init.d/cpufrequtils restart +// +// fixes the problem. +#ifndef NDEBUG + assert(last <= e->last_use_ + kAcceptableClockSynchronizationDriftCycles); + last = e->last_use_; +#endif + + Elem* prev = e->prev; + // There are no pinned elements on the list in the LRU mode, and in the + // age-based mode we push them out of the main table regardless of pinning. + assert(e->pin == 0 || !lru_); + Remove(e->key); + e = prev; + } +} + +template +void SimpleLRUCacheBase::CountDeferredEntries( + int64_t* num_entries, int64_t* total_size) const { + *num_entries = *total_size = 0; + for (DeferredTableConstIterator iter = defer_.begin(); iter != defer_.end(); + ++iter) { + const Elem* const head = iter->second; + const Elem* e = head; + do { + (*num_entries)++; + *total_size += e->units; + e = e->prev; + } while (e != head); + } +} + +template +int64_t SimpleLRUCacheBase::DeferredSize() const { + int64_t entries, size; + CountDeferredEntries(&entries, &size); + return size; +} + +template +int64_t SimpleLRUCacheBase::DeferredEntries() const { + int64_t entries, size; + CountDeferredEntries(&entries, &size); + return entries; +} + +template +int64_t SimpleLRUCacheBase::AgeOfLRUItemInMicroseconds() const { + if (head_.prev == &head_) return 0; + return kSecToUsec * (SimpleCycleTimer::Now() - head_.prev->last_use_) / + SimpleCycleTimer::Frequency(); +} + +template +int64_t SimpleLRUCacheBase::GetLastUseTime( + const Key& k) const { + // GetLastUseTime works only in LRU mode + assert(lru_); + TableConstIterator iter = table_.find(k); + if (iter == table_.end()) return -1; + const Elem* e = iter->second; + return e->last_use_; +} + +template +int64_t SimpleLRUCacheBase::GetInsertionTime( + const Key& k) const { + // GetInsertionTime works only in age-based mode + assert(!lru_); + TableConstIterator iter = table_.find(k); + if (iter == table_.end()) return -1; + const Elem* e = iter->second; + return e->last_use_; +} + +template +void SimpleLRUCacheBase::DebugOutput( + std::string* output) const { + std::stringstream ss; + ss << "SimpleLRUCache of " << table_.size(); + ss << " elements plus " << DeferredEntries(); + ss << " deferred elements (" << Size(); + ss << " units, " << MaxSize() << " max units)"; + *output += ss.str(); + for (TableConstIterator iter = table_.begin(); iter != table_.end(); ++iter) { + const Elem* e = iter->second; + DebugIterator(e->key, e->value, e->pin, e->last_use_, false, output); + } + *output += "Deferred elements\n"; + for (DeferredTableConstIterator iter = defer_.begin(); iter != defer_.end(); + ++iter) { + const Elem* const head = iter->second; + const Elem* e = head; + do { + DebugIterator(e->key, e->value, e->pin, e->last_use_, true, output); + e = e->prev; + } while (e != head); + } +} + +// construct an iterator be sure to save a copy of end() as well, so we don't +// update external_view_ in that case. this is b/c if it_ == end(), calling +// it_->first segfaults. we could do this by making sure a specific field in +// it_ is not nullptr but that relies on the internal implementation of it_, so +// we pass in end() instead +template +SimpleLRUCacheConstIterator::SimpleLRUCacheConstIterator( + HashMapConstIterator it, HashMapConstIterator end) + : it_(it), end_(end) { + if (it_ != end_) { + external_view_.first = it_->first; + external_view_.second = it_->second->value; + last_use_ = it_->second->last_use_; + } +} + +template +auto SimpleLRUCacheConstIterator::operator++() + -> SimpleLRUCacheConstIterator& { + it_++; + if (it_ != end_) { + external_view_.first = it_->first; + external_view_.second = it_->second->value; + last_use_ = it_->second->last_use_; + } + return *this; +} + +template +class SimpleLRUCache + : public SimpleLRUCacheBase< + Key, Value, + std::unordered_map*, H, EQ>, EQ> { + public: + explicit SimpleLRUCache(int64_t total_units) + : SimpleLRUCacheBase< + Key, Value, + std::unordered_map*, H, EQ>, + EQ>(total_units) {} + + protected: + virtual void RemoveElement(const Key& k, Value* value) { delete value; } + + private: + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(SimpleLRUCache); +}; + +template +class SimpleLRUCacheWithDeleter + : public SimpleLRUCacheBase< + Key, Value, + std::unordered_map*, H, EQ>, EQ> { + typedef std::unordered_map*, H, EQ> + HashMap; + typedef SimpleLRUCacheBase Base; + + public: + explicit SimpleLRUCacheWithDeleter(int64_t total_units) + : Base(total_units), deleter_() {} + + SimpleLRUCacheWithDeleter(int64_t total_units, Deleter deleter) + : Base(total_units), deleter_(deleter) {} + + protected: + virtual void RemoveElement(const Key& k, Value* value) { deleter_(value); } + + private: + Deleter deleter_; + GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(SimpleLRUCacheWithDeleter); +}; + +} // namespace mixer_client +} // namespace istio + +#endif // MIXERCLIENT_UTILS_SIMPLE_LRU_CACHE_INL_H_ diff --git a/mixerclient/utils/simple_lru_cache_test.cc b/mixerclient/utils/simple_lru_cache_test.cc new file mode 100644 index 000000000000..99fe59ba8bf5 --- /dev/null +++ b/mixerclient/utils/simple_lru_cache_test.cc @@ -0,0 +1,1112 @@ +/* Copyright 2017 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// +// Tests of SimpleLRUCache + +#include "simple_lru_cache.h" +#include "simple_lru_cache_inl.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using ::testing::HasSubstr; +using ::testing::NotNull; + +namespace istio { +namespace mixer_client { + +// Keep track of whether or not specific values are in the cache +static const int kElems = 100; +static const int kCacheSize = 10; +static bool in_cache[kElems]; + +namespace { + +// Blocks until SimpleCycleTimer::Now() returns a new value. +void TickClock() { + int64_t start = SimpleCycleTimer::Now(); + const int kMaxAttempts = 10; + int num_attempts = 0; + do { + // sleep one microsecond. + usleep(1); + } while (++num_attempts < kMaxAttempts && SimpleCycleTimer::Now() == start); + // Unable to tick the clock + assert(num_attempts < kMaxAttempts); +} + +} // namespace + +// Value type +struct TestValue { + int label; // Index into "in_cache" + explicit TestValue(int l) : label(l) {} + + protected: + // Make sure that TestCache can delete TestValue when declared as friend. + friend class SimpleLRUCache; + friend class TestCache; + ~TestValue() {} +}; + +class TestCache : public SimpleLRUCache { + public: + explicit TestCache(int64_t size, bool check_in_cache = true) + : SimpleLRUCache(size), check_in_cache_(check_in_cache) {} + + protected: + virtual void RemoveElement(const int& key, TestValue* v) { + if (v && check_in_cache_) { + assert(in_cache[v->label]); + std::cout << " Evict:" << v->label; + in_cache[v->label] = false; + } + delete v; + } + + const bool check_in_cache_; +}; + +class SimpleLRUCacheTest : public ::testing::Test { + protected: + SimpleLRUCacheTest() {} + virtual ~SimpleLRUCacheTest() {} + + virtual void SetUp() { + for (int i = 0; i < kElems; ++i) in_cache[i] = false; + } + + virtual void TearDown() { + if (cache_) cache_->Clear(); + for (int i = 0; i < kElems; i++) { + assert(!in_cache[i]); + } + } + + void TestInOrderEvictions(int cache_size); + void TestSetMaxSize(); + void TestOverfullEvictionPolicy(); + void TestRemoveUnpinned(); + void TestExpiration(bool lru, bool release_quickly); + void TestLargeExpiration(bool lru, double timeout); + + std::unique_ptr cache_; +}; + +TEST_F(SimpleLRUCacheTest, IteratorDefaultConstruct) { + TestCache::const_iterator default_unused; +} + +TEST_F(SimpleLRUCacheTest, Iteration) { + int count = 0; + cache_.reset(new TestCache(kCacheSize)); + + // fill the cache, evict some items, ensure i can iterate over all remaining + for (int i = 0; i < kElems; ++i) { + ASSERT_TRUE(!cache_->Lookup(i)); + TestValue* v = new TestValue(i); + in_cache[i] = true; + cache_->Insert(i, v, 1); + } + for (TestCache::const_iterator pos = cache_->begin(); pos != cache_->end(); + ++pos) { + ++count; + ASSERT_EQ(pos->first, pos->second->label); + ASSERT_TRUE(in_cache[pos->second->label]); + } + ASSERT_EQ(count, kCacheSize); + ASSERT_EQ(cache_->Entries(), kCacheSize); + cache_->Clear(); + + // iterate over the cache w/o filling the cache to capacity first + for (int i = 0; i < kCacheSize / 2; ++i) { + ASSERT_TRUE(!cache_->Lookup(i)); + TestValue* v = new TestValue(i); + in_cache[i] = true; + cache_->Insert(i, v, 1); + } + count = 0; + for (TestCache::const_iterator pos = cache_->begin(); pos != cache_->end(); + ++pos) { + ++count; + ASSERT_EQ(pos->first, pos->second->label); + ASSERT_TRUE(in_cache[pos->second->label]); + } + ASSERT_EQ(count, kCacheSize / 2); + ASSERT_EQ(cache_->Entries(), kCacheSize / 2); +} + +TEST_F(SimpleLRUCacheTest, StdCopy) { + cache_.reset(new TestCache(kCacheSize)); + for (int i = 0; i < kElems; ++i) { + in_cache[i] = true; + cache_->InsertPinned(i, new TestValue(i), 1); + } + // All entries are pinned, they are all in cache + ASSERT_EQ(cache_->Entries(), kElems); + ASSERT_EQ(cache_->PinnedSize(), kElems); + // Non have been removed, so Defer size is 0 + ASSERT_EQ(cache_->DeferredEntries(), 0); + + std::vector> to_release; + std::copy(cache_->begin(), cache_->end(), std::back_inserter(to_release)); + for (const auto& entry : to_release) { + cache_->Release(entry.first, entry.second); + } + + // After all of them un-pinned + ASSERT_EQ(cache_->Entries(), kCacheSize); + ASSERT_EQ(cache_->PinnedSize(), 0); + ASSERT_EQ(cache_->DeferredEntries(), 0); +} + +void SimpleLRUCacheTest::TestInOrderEvictions(int cache_size) { + for (int i = 0; i < kElems; i++) { + ASSERT_TRUE(!cache_->Lookup(i)); + TestValue* v = new TestValue(i); + in_cache[i] = true; + cache_->Insert(i, v, 1); + + if (i >= cache_size) { + ASSERT_TRUE(!in_cache[i - cache_size]); + } + } +} + +TEST_F(SimpleLRUCacheTest, InOrderEvictions) { + cache_.reset(new TestCache(kCacheSize)); + TestInOrderEvictions(kCacheSize); +} + +TEST_F(SimpleLRUCacheTest, InOrderEvictionsWithIdleEvictionEnabled) { + cache_.reset(new TestCache(kCacheSize)); + cache_->SetMaxIdleSeconds(2000); + TestInOrderEvictions(kCacheSize); +} + +TEST_F(SimpleLRUCacheTest, InOrderEvictionsWithAgeBasedEvictionEnabled) { + cache_.reset(new TestCache(kCacheSize)); + cache_->SetAgeBasedEviction(2000); + TestInOrderEvictions(kCacheSize); +} + +void SimpleLRUCacheTest::TestSetMaxSize() { + int cache_size = cache_->MaxSize(); + + // Fill the cache exactly and verify all values are present. + for (int i = 0; i < cache_size; i++) { + ASSERT_TRUE(!cache_->Lookup(i)); + TestValue* v = new TestValue(i); + in_cache[i] = true; + cache_->Insert(i, v, 1); + } + EXPECT_EQ(cache_size, cache_->Size()); + int elems = cache_size; + for (int i = 0; i < elems; i++) { + ASSERT_TRUE(in_cache[i]) << i; + } + + // Double the size; all values should still be present. + cache_size *= 2; + ASSERT_LE(cache_size, kElems); + cache_->SetMaxSize(cache_size); + EXPECT_EQ(elems, cache_->Size()); + for (int i = 0; i < elems; i++) { + ASSERT_TRUE(in_cache[i]) << i; + } + + // Fill the cache to the new size and ensure all values are present. + for (int i = elems; i < cache_size; i++) { + ASSERT_TRUE(!cache_->Lookup(i)) << i; + TestValue* v = new TestValue(i); + in_cache[i] = true; + cache_->Insert(i, v, 1); + } + EXPECT_EQ(cache_size, cache_->Size()); + elems = cache_size; + for (int i = 0; i < cache_size; i++) { + ASSERT_TRUE(in_cache[i]) << i; + } + + // Cut the size to half of the original size, elements should be evicted. + cache_size /= 4; + ASSERT_GT(cache_size, 0); + cache_->SetMaxSize(cache_size); + EXPECT_EQ(cache_size, cache_->Size()); + for (int i = 0; i < elems; i++) { + if (i < elems - cache_size) { + ASSERT_TRUE(!in_cache[i]) << i; + } else { + ASSERT_TRUE(in_cache[i]) << i; + } + } + + // Clear the cache and run the in order evictions test with the final size. + cache_->Clear(); + TestInOrderEvictions(cache_size); + EXPECT_EQ(cache_size, cache_->Size()); +} + +TEST_F(SimpleLRUCacheTest, SetMaxSize) { + cache_.reset(new TestCache(kCacheSize)); + TestSetMaxSize(); +} + +TEST_F(SimpleLRUCacheTest, SetMaxSizeWithIdleEvictionEnabled) { + cache_.reset(new TestCache(kCacheSize)); + cache_->SetMaxIdleSeconds(2000); + TestSetMaxSize(); +} + +TEST_F(SimpleLRUCacheTest, SetMaxSizeWithAgeBasedEvictionEnabled) { + cache_.reset(new TestCache(kCacheSize)); + cache_->SetAgeBasedEviction(2000); + TestSetMaxSize(); +} + +TEST_F(SimpleLRUCacheTest, VoidValues) { + // + // This naive code may double-pin at Lookup() the second time + // around if GetThing() returns 0 (which may be ok): + // + // Thing* thing = cache.Lookup(key); + // if (!thing) { + // thing = GetThing(key); + // cache.InsertPinned(key, thing, 1); + // } + // UseThing(thing); + // cache.Release(key, thing); + // + // One cannot distinguish between "not present" and "nullptr value" using + // return value from Lookup(), so let's do it with StillInUse(). + // + + cache_.reset(new TestCache(1)); + + cache_->InsertPinned(5, 0, 1); + cache_->Release(5, 0); + + if (cache_->StillInUse(5, 0)) { + // Released, but still in there + // This path is executed given Dec 2007 implementation + + // Lookup pins 5, even though it returns nullptr + ASSERT_TRUE(nullptr == cache_->Lookup(5)); + } else { + // Not in there, let's insert it + // This path is not executed given Dec 2007 implementation + cache_->InsertPinned(5, 0, 1); + } + + ASSERT_EQ(1, cache_->PinnedSize()); + cache_->Release(5, 0); + ASSERT_EQ(0, cache_->PinnedSize()); + + cache_->Clear(); +} + +void SimpleLRUCacheTest::TestOverfullEvictionPolicy() { + // Fill with elements that should stick around if used over and over + for (int i = 0; i < kCacheSize; i++) { + ASSERT_TRUE(!cache_->Lookup(i)); + TestValue* v = new TestValue(i); + in_cache[i] = true; + cache_->Insert(i, v, 1); + } + + for (int i = kCacheSize; i < kElems; i++) { + // Access all of the elements that should stick around + for (int j = 0; j < kCacheSize; j++) { + TestValue* v = cache_->Lookup(j); + ASSERT_TRUE(v != nullptr); + cache_->Release(j, v); + } + + // Insert new value + ASSERT_TRUE(!cache_->Lookup(i)); + TestValue* v = new TestValue(i); + in_cache[i] = true; + cache_->Insert(i, v, 1); + ASSERT_TRUE(in_cache[i]); + if (i > kCacheSize) { + ASSERT_TRUE(!in_cache[i - 1]); + } + } +} + +TEST_F(SimpleLRUCacheTest, OverfullEvictionPolicy) { + cache_.reset(new TestCache(kCacheSize + 1)); + TestOverfullEvictionPolicy(); +} + +TEST_F(SimpleLRUCacheTest, OverfullEvictionPolicyWithIdleEvictionEnabled) { + cache_.reset(new TestCache(kCacheSize + 1)); + // Here we are not testing idle eviction, just that LRU eviction + // still works correctly when the cache is overfull. + cache_->SetMaxIdleSeconds(2000); + TestOverfullEvictionPolicy(); +} + +TEST_F(SimpleLRUCacheTest, OverfullEvictionPolicyWithAgeBasedEvictionEnabled) { + cache_.reset(new TestCache(kCacheSize)); + // With age-based eviction usage is ignored and instead the oldest inserted + // element is evicted when cahce becomes overfull. + cache_->SetAgeBasedEviction(2000); + + for (int i = 0; i < kCacheSize; i++) { + ASSERT_TRUE(!cache_->Lookup(i)); + TestValue* v = new TestValue(i); + in_cache[i] = true; + cache_->Insert(i, v, 1); + } + + // Access all of the elements in the reverse order. + for (int j = kCacheSize - 1; j >= 0; j--) { + TestCache::ScopedLookup lv(cache_.get(), j); + ASSERT_TRUE(lv.value() != nullptr); + } + + // Key 0 was accessed most recently, yet new value evicts it because it is + // the oldest one. + ASSERT_TRUE(!cache_->Lookup(kCacheSize)); + TestValue* v = new TestValue(kCacheSize); + in_cache[kCacheSize] = true; + cache_->Insert(kCacheSize, v, 1); + ASSERT_TRUE(in_cache[kCacheSize]); + ASSERT_TRUE(!in_cache[0]); +} + +TEST_F(SimpleLRUCacheTest, Update) { + cache_.reset(new TestCache(kCacheSize, false)); // Don't check in_cache. + // Insert some values. + for (int i = 0; i < kCacheSize; i++) { + ASSERT_TRUE(!cache_->Lookup(i)); + TestValue* v = new TestValue(i); + cache_->Insert(i, v, 1); + } + // Update them. + for (int i = 0; i < kCacheSize; i++) { + TestCache::ScopedLookup lookup(cache_.get(), i); + ASSERT_TRUE(lookup.Found()); + EXPECT_TRUE(lookup.value()->label == i); + lookup.value()->label = -i; + } + // Read them back. + for (int i = 0; i < kCacheSize; i++) { + TestCache::ScopedLookup lookup(cache_.get(), i); + ASSERT_TRUE(lookup.Found()); + EXPECT_TRUE(lookup.value()->label == -i); + } + // Flush them out. + for (int i = 0; i < kCacheSize; i++) { + TestValue* v = new TestValue(i); + cache_->Insert(i + kCacheSize, v, 1); + } + // Original values are gone. + for (int i = 0; i < kCacheSize; i++) { + TestCache::ScopedLookup lookup(cache_.get(), i + kCacheSize); + ASSERT_TRUE(lookup.Found()); + TestCache::ScopedLookup lookup2(cache_.get(), i); + ASSERT_TRUE(!lookup2.Found()); + } +} + +TEST_F(SimpleLRUCacheTest, Pinning) { + static const int kPinned = kCacheSize + 4; + cache_.reset(new TestCache(kCacheSize)); + for (int i = 0; i < kElems; i++) { + ASSERT_TRUE(!cache_->Lookup(i)); + TestValue* v = new TestValue(i); + in_cache[i] = true; + if (i < kPinned) { + cache_->InsertPinned(i, v, 1); + } else { + cache_->Insert(i, v, 1); + } + } + for (int i = 0; i < kPinned; i++) { + ASSERT_TRUE(in_cache[i]); + TestValue* v = cache_->Lookup(i); + ASSERT_TRUE(v != nullptr); + cache_->Release(i, v); // For initial InsertPinned + cache_->Release(i, v); // For the previous Lookup + } +} + +TEST_F(SimpleLRUCacheTest, Remove) { + cache_.reset(new TestCache(kCacheSize)); + for (int i = 0; i < kElems; i++) { + ASSERT_TRUE(!cache_->Lookup(i)); + TestValue* v = new TestValue(i); + in_cache[i] = true; + cache_->Insert(i, v, 1); + + // Remove previous element, but leave "0" alone + if (i > 1) { + const int key = i - 1; + int prev_entries = cache_->Entries(); + if ((key % 2) == 0) { // test normal removal + cache_->Remove(key); + } else { // test different removal status + TestValue* const v2 = cache_->Lookup(key); + ASSERT_TRUE(v2) << ": key=" << key; + cache_->Remove(key); + ASSERT_TRUE(cache_->StillInUse(key)) << ": " << key; + cache_->Remove(key); + ASSERT_TRUE(cache_->StillInUse(key)) << ": " << key; + + cache_->Release(key, v2); + } + ASSERT_EQ(cache_->Entries(), prev_entries - 1); + ASSERT_TRUE(!in_cache[key]); + ASSERT_TRUE(!cache_->StillInUse(key)) << ": " << key; + } + } + ASSERT_TRUE(in_cache[0]); + ASSERT_TRUE(cache_->StillInUse(0)); +} + +void SimpleLRUCacheTest::TestRemoveUnpinned() { + for (int i = 0; i < kCacheSize; i++) { + ASSERT_TRUE(!cache_->Lookup(i)); + TestValue* v = new TestValue(i); + in_cache[i] = true; + cache_->Insert(i, v, 1); + } + + TestValue* const val = cache_->Lookup(1); + ASSERT_TRUE(val); + cache_->RemoveUnpinned(); + ASSERT_EQ(cache_->Entries(), 1); + // Check that only value 1 is still in the cache + for (int i = 0; i < kCacheSize; i++) { + if (i != 1) { + ASSERT_TRUE(!in_cache[i]); + } + } + ASSERT_TRUE(in_cache[1]); + cache_->Release(1, val); + cache_->RemoveUnpinned(); + ASSERT_EQ(cache_->Entries(), 0); + ASSERT_TRUE(!in_cache[1]); +} + +TEST_F(SimpleLRUCacheTest, RemoveUnpinned) { + cache_.reset(new TestCache(kCacheSize)); + TestRemoveUnpinned(); +} + +TEST_F(SimpleLRUCacheTest, RemoveUnpinnedWithIdleEvictionEnabled) { + cache_.reset(new TestCache(kCacheSize)); + // Here we are not testing idle eviction, just that RemoveUnpinned + // works correctly with it enabled. + cache_->SetMaxIdleSeconds(2000); + TestRemoveUnpinned(); +} + +TEST_F(SimpleLRUCacheTest, RemoveUnpinnedWithAgeBasedEvictionEnabled) { + cache_.reset(new TestCache(kCacheSize)); + // Here we are not testing age-based eviction, just that RemoveUnpinned + // works correctly with it enabled. + cache_->SetAgeBasedEviction(2000); + TestRemoveUnpinned(); +} + +TEST_F(SimpleLRUCacheTest, MultiInsert) { + cache_.reset(new TestCache(kCacheSize)); + for (int i = 0; i < kElems; i++) { + TestValue* v = new TestValue(i); + in_cache[i] = true; + cache_->Insert(0, v, 1); + if (i > 0) { + ASSERT_TRUE(!in_cache[i - 1]); // Older entry must have been evicted + } + } +} + +TEST_F(SimpleLRUCacheTest, MultiInsertPinned) { + cache_.reset(new TestCache(kCacheSize)); + TestValue* list[kElems]; + for (int i = 0; i < kElems; i++) { + TestValue* v = new TestValue(i); + in_cache[i] = true; + cache_->InsertPinned(0, v, 1); + list[i] = v; + } + for (int i = 0; i < kElems; i++) { + ASSERT_TRUE(in_cache[i]); + ASSERT_TRUE(cache_->StillInUse(0, list[i])); + } + for (int i = 0; i < kElems; i++) { + cache_->Release(0, list[i]); + } +} + +void SimpleLRUCacheTest::TestExpiration(bool lru, bool release_quickly) { + cache_.reset(new TestCache(kCacheSize)); + if (lru) { + cache_->SetMaxIdleSeconds(0.2); // 200 milliseconds + } else { + cache_->SetAgeBasedEviction(0.2); // 200 milliseconds + } + for (int i = 0; i < kCacheSize; i++) { + TestValue* v = new TestValue(i); + in_cache[i] = true; + cache_->Insert(i, v, 1); + } + for (int i = 0; i < kCacheSize; i++) ASSERT_TRUE(in_cache[i]); + + usleep(110 * 1000); + + TestValue* v1 = cache_->Lookup(0); + ASSERT_TRUE(v1 != nullptr); + if (release_quickly) { + cache_->Release(0, v1); + v1 = nullptr; + } + for (int i = 0; i < kCacheSize; i++) ASSERT_TRUE(in_cache[i]); + + // Sleep more: should cause expiration of everything we + // haven't touched, and the one we touched if age-based. + usleep(110 * 1000); + + // Nothing gets expired until we call one of the cache methods. + for (int i = 0; i < kCacheSize; i++) ASSERT_TRUE(in_cache[i]); + + // It's now 220 ms since element 0 was created, and + // 110 ms since we last looked at it. If we configured + // the cache in LRU mode it should still be there, but + // if we configured it in age-based mode it should be gone. + // This is true even if the element was checked out: it should + // be on the defer_ list, not the table_ list as it is expired. + // Whether or not the element was pinned shouldn't matter: + // it should be expired either way in AgeBased mode, + // and not expired either way in lru mode. + TestValue* v2 = cache_->Lookup(0); + ASSERT_EQ(v2 == nullptr, !lru); + + // In either case all the other elements should now be gone. + for (int i = 1; i < kCacheSize; i++) ASSERT_TRUE(!in_cache[i]); + + // Clean up + bool cleaned_up = false; + if (v1 != nullptr) { + cache_->Release(0, v1); + cleaned_up = true; + } + if (v2 != nullptr) { + cache_->Release(0, v2); + cleaned_up = true; + } + if (cleaned_up) { + cache_->Remove(0); + } +} + +TEST_F(SimpleLRUCacheTest, ExpirationLRUShortHeldPins) { + TestExpiration(true /* lru */, true /* release_quickly */); +} +TEST_F(SimpleLRUCacheTest, ExpirationLRULongHeldPins) { + TestExpiration(true /* lru */, false /* release_quickly */); +} +TEST_F(SimpleLRUCacheTest, ExpirationAgeBasedShortHeldPins) { + TestExpiration(false /* lru */, true /* release_quickly */); +} +TEST_F(SimpleLRUCacheTest, ExpirationAgeBasedLongHeldPins) { + TestExpiration(false /* lru */, false /* release_quickly */); +} + +void SimpleLRUCacheTest::TestLargeExpiration(bool lru, double timeout) { + // Make sure that setting a large timeout doesn't result in overflow and + // cache entries expiring immediately. + cache_.reset(new TestCache(kCacheSize)); + if (lru) { + cache_->SetMaxIdleSeconds(timeout); + } else { + cache_->SetAgeBasedEviction(timeout); + } + for (int i = 0; i < kCacheSize; i++) { + TestValue* v = new TestValue(i); + in_cache[i] = true; + cache_->Insert(i, v, 1); + } + for (int i = 0; i < kCacheSize; i++) { + TestCache::ScopedLookup lookup(cache_.get(), i); + ASSERT_TRUE(lookup.Found()) << "Entry " << i << " not found"; + } +} + +TEST_F(SimpleLRUCacheTest, InfiniteExpirationLRU) { + TestLargeExpiration(true /* lru */, std::numeric_limits::infinity()); +} + +TEST_F(SimpleLRUCacheTest, InfiniteExpirationAgeBased) { + TestLargeExpiration(false /* lru */, std::numeric_limits::infinity()); +} + +static double GetBoundaryTimeout() { + // Search for the smallest timeout value that will result in overflow when + // converted to an integral number of cycles. + const double seconds_to_cycles = SimpleCycleTimer::Frequency(); + double seconds = static_cast(std::numeric_limits::max()) / + seconds_to_cycles; + // Because of floating point rounding, we are not certain that the previous + // computation will result in precisely the right value. So, jitter the value + // until we know we found the correct value. First, look for a value that we + // know will not result in overflow. + while ((seconds * seconds_to_cycles) >= std::numeric_limits::max()) { + seconds = std::nextafter(seconds, -std::numeric_limits::infinity()); + } + // Now, look for the first value that will result in overflow. + while ((seconds * seconds_to_cycles) < std::numeric_limits::max()) { + seconds = std::nextafter(seconds, std::numeric_limits::infinity()); + } + return seconds; +} + +TEST_F(SimpleLRUCacheTest, LargeExpirationLRU) { + TestLargeExpiration(true /* lru */, GetBoundaryTimeout()); +} + +TEST_F(SimpleLRUCacheTest, LargeExpirationAgeBased) { + TestLargeExpiration(false /* lru */, GetBoundaryTimeout()); +} + +TEST_F(SimpleLRUCacheTest, UpdateSize) { + // Create a cache larger than kCacheSize, to give us some overhead to + // change the objects' sizes. We don't want an UpdateSize operation + // to force a GC and throw off our ASSERT_TRUE()s down below. + cache_.reset(new TestCache(kCacheSize * 2)); + for (int i = 0; i < kCacheSize; i++) { + TestValue* v = new TestValue(i); + in_cache[i] = true; + cache_->Insert(i, v, 1); + } + ASSERT_EQ(cache_->Entries(), kCacheSize); + + // *** Check the basic operations *** + // We inserted kCacheSize items, each of size 1. + // So the total should be kCacheSize, with none deferred and none pinned. + + ASSERT_EQ(cache_->Size(), kCacheSize); + ASSERT_EQ(cache_->MaxSize(), kCacheSize * 2); + ASSERT_EQ(cache_->DeferredSize(), 0); + ASSERT_EQ(cache_->PinnedSize(), 0); + + // Now lock a value -- total should be the same, but one should be pinned. + TestValue* found = cache_->Lookup(0); + + ASSERT_EQ(cache_->Size(), kCacheSize); + ASSERT_EQ(cache_->MaxSize(), kCacheSize * 2); + ASSERT_EQ(cache_->DeferredSize(), 0); + ASSERT_EQ(cache_->PinnedSize(), 1); + + // Now [try to] remove the locked value. + // This should leave zero pinned, but one deferred. + cache_->Remove(0); + + ASSERT_EQ(cache_->Size(), kCacheSize); + ASSERT_EQ(cache_->MaxSize(), kCacheSize * 2); + ASSERT_EQ(cache_->DeferredSize(), 1); + ASSERT_EQ(cache_->PinnedSize(), 0); + + // Now release the locked value. Both the deferred and pinned should be + // zero, and the total size should be one less than the total before. + cache_->Release(0, found); + found = nullptr; + + ASSERT_EQ(cache_->Size(), (kCacheSize - 1)); + ASSERT_EQ(cache_->MaxSize(), kCacheSize * 2); + ASSERT_EQ(cache_->DeferredSize(), 0); + ASSERT_EQ(cache_->PinnedSize(), 0); + + // *** Okay, math works. Now try changing the sizes in mid-stream. *** + + // Chane one item to have a size of two. The should bring the total + // back up to kCacheSize. + cache_->UpdateSize(1, nullptr, 2); + + ASSERT_EQ(cache_->Size(), kCacheSize); + ASSERT_EQ(cache_->MaxSize(), kCacheSize * 2); + ASSERT_EQ(cache_->DeferredSize(), 0); + ASSERT_EQ(cache_->PinnedSize(), 0); + + // What if we pin a value, and then change its size? + + // Pin [2]; total is still kCacheSize, pinned is one -- just like before ... + found = cache_->Lookup(2); + + ASSERT_EQ(cache_->Size(), kCacheSize); + ASSERT_EQ(cache_->MaxSize(), kCacheSize * 2); + ASSERT_EQ(cache_->DeferredSize(), 0); + ASSERT_EQ(cache_->PinnedSize(), 1); + + // Update that item to be of size two ... + cache_->UpdateSize(2, found, 2); + + // ... and the total should be one greater, and pinned should be two. + ASSERT_EQ(cache_->Size(), (kCacheSize + 1)); + ASSERT_EQ(cache_->MaxSize(), kCacheSize * 2); + ASSERT_EQ(cache_->DeferredSize(), 0); + ASSERT_EQ(cache_->PinnedSize(), 2); + + // Okay, remove it; pinned should go to zero, Deferred should go to two. + cache_->Remove(2); + + ASSERT_EQ(cache_->Size(), (kCacheSize + 1)); + ASSERT_EQ(cache_->MaxSize(), kCacheSize * 2); + ASSERT_EQ(cache_->DeferredSize(), 2); + ASSERT_EQ(cache_->PinnedSize(), 0); + + // Now, change it again. Let's change it back to size one-- + // the total should go back to kCacheSize, and Deferred should + // drop to one. + cache_->UpdateSize(2, found, 1); + + ASSERT_EQ(cache_->Size(), kCacheSize); + ASSERT_EQ(cache_->MaxSize(), kCacheSize * 2); + ASSERT_EQ(cache_->DeferredSize(), 1); + ASSERT_EQ(cache_->PinnedSize(), 0); + + // Release it. Total should drop by one, Deferred goes to zero. + cache_->Release(2, found); + found = nullptr; + + ASSERT_EQ(cache_->Size(), (kCacheSize - 1)); + ASSERT_EQ(cache_->MaxSize(), kCacheSize * 2); + ASSERT_EQ(cache_->DeferredSize(), 0); + ASSERT_EQ(cache_->PinnedSize(), 0); + + // So far we've disposed of 2 entries. + ASSERT_EQ(cache_->Entries(), kCacheSize - 2); + + // Now blow the cache up from the inside: resize an entry to an enormous size. + // This will push everything out except the entry itself because it's pinned. + TestValue* v = new TestValue(0); + in_cache[0] = true; + cache_->InsertPinned(0, v, 1); + ASSERT_EQ(cache_->Entries(), kCacheSize - 1); + cache_->UpdateSize(0, v, kCacheSize * 3); + ASSERT_EQ(cache_->Entries(), 1); + ASSERT_EQ(cache_->Size(), kCacheSize * 3); + // The entry is disposed of as soon as it is released. + cache_->Release(0, v); + ASSERT_EQ(cache_->Entries(), 0); + ASSERT_EQ(cache_->Size(), 0); +} + +TEST_F(SimpleLRUCacheTest, DontUpdateEvictionOrder) { + cache_.reset(new TestCache(kCacheSize)); + int64_t original_start, original_end; + + SimpleLRUCacheOptions options; + options.set_update_eviction_order(false); + + // Fully populate the cache and keep track of the time range for this + // population. + original_start = SimpleCycleTimer::Now(); + TickClock(); + for (int i = 0; i < kCacheSize; i++) { + ASSERT_TRUE(!cache_->Lookup(i)); + cache_->Insert(i, new TestValue(i), 1); + in_cache[i] = true; + } + TickClock(); + original_end = SimpleCycleTimer::Now(); + + // At each step validate the current state of the cache and then insert + // a new element. + for (int step = 0; step < kElems - kCacheSize; ++step) { + // Look from end to beginning (the reverse the order of insertion). This + // makes sure nothing changes cache ordering. + for (int this_elem = kElems - 1; this_elem >= 0; this_elem--) { + if (!in_cache[this_elem]) { + ASSERT_EQ(-1, cache_->GetLastUseTime(this_elem)); + } else if (this_elem < kCacheSize) { + // All elements < kCacheSize were part of the original insertion. + ASSERT_GT(cache_->GetLastUseTime(this_elem), original_start); + ASSERT_LT(cache_->GetLastUseTime(this_elem), original_end); + } else { + // All elements >= kCacheSize are newer. + ASSERT_GT(cache_->GetLastUseTime(this_elem), original_end); + } + + TestValue* value = cache_->LookupWithOptions(this_elem, options); + TestCache::ScopedLookup scoped_lookup(cache_.get(), this_elem, options); + if (in_cache[this_elem]) { + ASSERT_TRUE(value != nullptr); + ASSERT_EQ(this_elem, value->label); + ASSERT_TRUE(scoped_lookup.value() != nullptr); + ASSERT_EQ(this_elem, scoped_lookup.value()->label); + cache_->ReleaseWithOptions(this_elem, value, options); + } else { + ASSERT_TRUE(value == nullptr); + ASSERT_TRUE(scoped_lookup.value() == nullptr); + } + } + + // Insert TestValue(kCacheSize + step) which should evict the TestValue with + // label step. + cache_->Insert(kCacheSize + step, new TestValue(kCacheSize + step), 1); + in_cache[kCacheSize + step] = true; + in_cache[step] = false; + } +} + +TEST_F(SimpleLRUCacheTest, ScopedLookup) { + cache_.reset(new TestCache(kElems)); + for (int i = 0; i < kElems; i++) { + ASSERT_TRUE(!cache_->Lookup(i)); + TestValue* v = new TestValue(i); + in_cache[i] = true; + cache_->Insert(i, v, 1); + } + ASSERT_EQ(cache_->PinnedSize(), 0); + { + typedef TestCache::ScopedLookup ScopedLookup; + // Test two successful lookups + ScopedLookup lookup1(cache_.get(), 1); + ASSERT_TRUE(lookup1.Found()); + ASSERT_EQ(cache_->PinnedSize(), 1); + + ScopedLookup lookup2(cache_.get(), 2); + ASSERT_TRUE(lookup2.Found()); + ASSERT_EQ(cache_->PinnedSize(), 2); + + // Test a lookup of an elem not in the cache. + ScopedLookup lookup3(cache_.get(), kElems + 1); + ASSERT_TRUE(!lookup3.Found()); + ASSERT_EQ(cache_->PinnedSize(), 2); + } + // Make sure the destructors released properly. + ASSERT_EQ(cache_->PinnedSize(), 0); +} + +TEST_F(SimpleLRUCacheTest, AgeOfLRUItemInMicroseconds) { + // Make sure empty cache returns zero. + cache_.reset(new TestCache(kElems)); + ASSERT_EQ(cache_->AgeOfLRUItemInMicroseconds(), 0); + + // Make sure non-empty cache doesn't return zero. + TestValue* v = new TestValue(1); + in_cache[1] = true; + cache_->Insert(1, v, 1); + TickClock(); // must let at least 1us go by + ASSERT_NE(cache_->AgeOfLRUItemInMicroseconds(), 0); + + // Make sure "oldest" ages as time goes by. + int64_t oldest = cache_->AgeOfLRUItemInMicroseconds(); + TickClock(); + ASSERT_GT(cache_->AgeOfLRUItemInMicroseconds(), oldest); + + // Make sure new addition doesn't count as "oldest". + oldest = cache_->AgeOfLRUItemInMicroseconds(); + TickClock(); + v = new TestValue(2); + in_cache[2] = true; + cache_->Insert(2, v, 1); + ASSERT_GT(cache_->AgeOfLRUItemInMicroseconds(), oldest); + + // Make sure removal of oldest drops to next oldest. + oldest = cache_->AgeOfLRUItemInMicroseconds(); + cache_->Remove(1); + ASSERT_LT(cache_->AgeOfLRUItemInMicroseconds(), oldest); + + // Make sure that empty cache one again returns zero. + cache_->Remove(2); + TickClock(); + ASSERT_EQ(cache_->AgeOfLRUItemInMicroseconds(), 0); +} + +TEST_F(SimpleLRUCacheTest, GetLastUseTime) { + cache_.reset(new TestCache(kElems)); + int64_t now, last; + + // Make sure nonexistent key returns -1 + ASSERT_EQ(cache_->GetLastUseTime(1), -1); + + // Make sure existent key returns something > last and < now + last = SimpleCycleTimer::Now(); + TickClock(); + in_cache[1] = true; + TestValue* v = new TestValue(1); + cache_->Insert(1, v, 1); + TickClock(); + now = SimpleCycleTimer::Now(); + ASSERT_GT(cache_->GetLastUseTime(1), last); + ASSERT_LT(cache_->GetLastUseTime(1), now); + + // Make sure next element > stored time and < now + in_cache[2] = true; + v = new TestValue(2); + cache_->Insert(2, v, 1); + TickClock(); + now = SimpleCycleTimer::Now(); + ASSERT_GT(cache_->GetLastUseTime(2), cache_->GetLastUseTime(1)); + ASSERT_LT(cache_->GetLastUseTime(2), now); + + // Make sure last use doesn't change after Lookup + last = cache_->GetLastUseTime(1); + v = cache_->Lookup(1); + ASSERT_EQ(cache_->GetLastUseTime(1), last); + + // Make sure last use changes after Release, and is > last use of 2 < now + TickClock(); + cache_->Release(1, v); + TickClock(); + now = SimpleCycleTimer::Now(); + ASSERT_GT(cache_->GetLastUseTime(1), cache_->GetLastUseTime(2)); + ASSERT_LT(cache_->GetLastUseTime(1), now); + + // Make sure Insert updates last use, > last use of 1 < now + v = new TestValue(3); + cache_->Insert(2, v, 1); + in_cache[3] = true; + TickClock(); + now = SimpleCycleTimer::Now(); + ASSERT_GT(cache_->GetLastUseTime(2), cache_->GetLastUseTime(1)); + ASSERT_LT(cache_->GetLastUseTime(2), now); + + // Make sure iterator returns the same value as GetLastUseTime + for (TestCache::const_iterator it = cache_->begin(); it != cache_->end(); + ++it) { + ASSERT_EQ(it.last_use_time(), cache_->GetLastUseTime(it->first)); + } + + // Make sure after Remove returns -1 + cache_->Remove(2); + ASSERT_EQ(cache_->GetLastUseTime(2), -1); +} + +TEST_F(SimpleLRUCacheTest, GetInsertionTime) { + cache_.reset(new TestCache(kElems)); + int64_t now, last; + + cache_->SetAgeBasedEviction(-1); + + // Make sure nonexistent key returns -1 + ASSERT_EQ(cache_->GetInsertionTime(1), -1); + + // Make sure existent key returns something > last and < now + last = SimpleCycleTimer::Now(); + TickClock(); + in_cache[1] = true; + TestValue* v = new TestValue(1); + cache_->Insert(1, v, 1); + TickClock(); + now = SimpleCycleTimer::Now(); + ASSERT_GT(cache_->GetInsertionTime(1), last); + ASSERT_LT(cache_->GetInsertionTime(1), now); + + // Make sure next element > time of el. 1 and < now + in_cache[2] = true; + v = new TestValue(2); + cache_->Insert(2, v, 1); + TickClock(); + now = SimpleCycleTimer::Now(); + ASSERT_GT(cache_->GetInsertionTime(2), cache_->GetInsertionTime(1)); + ASSERT_LT(cache_->GetInsertionTime(2), now); + + // Make sure insertion time doesn't change after Lookup + last = cache_->GetInsertionTime(1); + v = cache_->Lookup(1); + ASSERT_EQ(cache_->GetInsertionTime(1), last); + + // Make sure insertion time doesn't change after Release + TickClock(); + cache_->Release(1, v); + ASSERT_EQ(cache_->GetInsertionTime(1), last); + + // Make sure Insert updates time, > insertion time of 2 < now + in_cache[3] = true; + v = new TestValue(3); + cache_->Insert(1, v, 1); + TickClock(); + now = SimpleCycleTimer::Now(); + ASSERT_GT(cache_->GetInsertionTime(1), cache_->GetInsertionTime(2)); + ASSERT_LT(cache_->GetInsertionTime(1), now); + + // Make sure iterator returns the same value as GetInsertionTime + for (TestCache::const_iterator it = cache_->begin(); it != cache_->end(); + ++it) { + ASSERT_EQ(it.insertion_time(), cache_->GetInsertionTime(it->first)); + } + + // Make sure after Remove returns -1 + cache_->Remove(2); + ASSERT_EQ(cache_->GetInsertionTime(2), -1); +} + +std::string StringPrintf(void* p, int pin, int defer) { + std::stringstream ss; + ss << std::hex << p << std::dec << ": pin: " << pin; + ss << ", is_deferred: " << defer; + return ss.str(); +} + +TEST_F(SimpleLRUCacheTest, DebugOutput) { + cache_.reset(new TestCache(kCacheSize, false /* check_in_cache */)); + TestValue* v1 = new TestValue(0); + cache_->InsertPinned(0, v1, 1); + TestValue* v2 = new TestValue(0); + cache_->InsertPinned(0, v2, 1); + TestValue* v3 = new TestValue(0); + cache_->Insert(0, v3, 1); + + std::string s; + cache_->DebugOutput(&s); + EXPECT_THAT(s, HasSubstr(StringPrintf(v1, 1, 1))); + EXPECT_THAT(s, HasSubstr(StringPrintf(v2, 1, 1))); + EXPECT_THAT(s, HasSubstr(StringPrintf(v3, 0, 0))); + + cache_->Release(0, v1); + cache_->Release(0, v2); +} + +TEST_F(SimpleLRUCacheTest, LookupWithoutEvictionOrderUpdateAndRemove) { + cache_.reset(new TestCache(kCacheSize, false /* check_in_cache */)); + + for (int i = 0; i < 3; ++i) { + cache_->Insert(i, new TestValue(0), 1); + } + + SimpleLRUCacheOptions no_update_options; + no_update_options.set_update_eviction_order(false); + TestValue* value = cache_->LookupWithOptions(1, no_update_options); + // Remove the second element before calling ReleaseWithOptions. Since we used + // update_eviction_order = false for the LookupWithOptions call the value was + // not removed from the LRU. Remove() is responsible for taking the value out + // of the LRU. + cache_->Remove(1); + // ReleaseWithOptions will now delete the pinned value. + cache_->ReleaseWithOptions(1, value, no_update_options); + + // When using ASan these lookups verify that the LRU has not been corrupted. + EXPECT_THAT(TestCache::ScopedLookup(cache_.get(), 0).value(), NotNull()); + EXPECT_THAT(TestCache::ScopedLookup(cache_.get(), 2).value(), NotNull()); +} + +} // namespace mixer_client +} // namespace istio