Pārlūkot izejas kodu

Platform detection during build, plus compatibility patches for machines without <cstdatomic>.

This revision adds two major changes:
1. build_detect_platform which generates build_config.mk
   with platform-dependent flags for the build process
2. /port/atomic_pointer.h with anAtomicPointerimplementation
   for platforms without <cstdatomic>

Some of this code is loosely based on patches submitted to the 
LevelDB mailing list at https://groups.google.com/forum/#!forum/leveldb
Tip of the hat to Dave Smith and Edouard A, who both sent patches.

The presence of Snappy (http://code.google.com/p/snappy/) and
cstdatomic are now both detected in the build_detect_platform
script (1.) which gets executing during make.

For (2.), instead of broadly importing atomicops_* from Chromium or
the Google performance tools, we chose to just implement AtomicPointer 
and the limited atomic load and store operations it needs. 
This resulted in much less code and fewer files - everything is 
contained in atomic_pointer.h.



git-svn-id: https://leveldb.googlecode.com/svn/trunk@34 62dab493-f737-651d-591e-8d6aee1b9529
xry
gabor@google.com pirms 13 gadiem
vecāks
revīzija
f57e23351f
6 mainītis faili ar 431 papildinājumiem un 124 dzēšanām
  1. +22
    -33
      Makefile
  2. +69
    -0
      build_detect_platform
  3. +213
    -0
      port/atomic_pointer.h
  4. +0
    -2
      port/port.h
  5. +34
    -29
      port/port_posix.h
  6. +93
    -60
      util/cache.cc

+ 22
- 33
Makefile Parādīt failu

@ -13,37 +13,32 @@ OPT = -O2 -DNDEBUG # (A) Production use (optimized mode)
# OPT = -O2 -g2 -DNDEBUG # (C) Profiling mode: opt, but w/debugging symbols
#-----------------------------------------------
# detect what platform we're building on
$(shell sh ./build_detect_platform)
# this file is generated by build_detect_platform to set build flags
include build_config.mk
UNAME := $(shell uname)
ifeq ($(UNAME), Darwin)
# To build for iOS, set PLATFORM=IOS.
ifndef PLATFORM
PLATFORM=OSX
endif # PLATFORM
PLATFORM_CFLAGS = -DLEVELDB_PLATFORM_OSX
PORT_MODULE = port_osx.o
else # UNAME
PLATFORM_CFLAGS = -DLEVELDB_PLATFORM_POSIX -std=c++0x
PORT_MODULE = port_posix.o
endif # UNAME
# Set 'SNAPPY' to 1 if you have the Snappy compression library
# installed and want to enable its use in LevelDB
# If Snappy is installed, add compilation and linker flags
# (see http://code.google.com/p/snappy/)
SNAPPY=0
ifeq ($(SNAPPY), 0)
ifeq ($(SNAPPY), 1)
SNAPPY_CFLAGS=-DSNAPPY
SNAPPY_LDFLAGS=-lsnappy
else
SNAPPY_CFLAGS=
SNAPPY_LDFLAGS=
endif
# If Google Perf Tools are installed, add compilation and linker flags
# (see http://code.google.com/p/google-perftools/)
ifeq ($(GOOGLE_PERFTOOLS), 1)
GOOGLE_PERFTOOLS_LDFLAGS=-ltcmalloc
else
SNAPPY_CFLAGS=-DSNAPPY
SNAPPY_LDFLAGS=-lsnappy
GOOGLE_PERFTOOLS_LDFLAGS=
endif
CFLAGS = -c -I. -I./include $(PLATFORM_CFLAGS) $(OPT) $(SNAPPY_CFLAGS)
CFLAGS = -c -I. -I./include $(PORT_CFLAGS) $(PLATFORM_CCFLAGS) $(OPT) $(SNAPPY_CFLAGS)
LDFLAGS=-lpthread $(SNAPPY_LDFLAGS)
LDFLAGS=$(PLATFORM_LDFLAGS) $(SNAPPY_LDFLAGS) $(GOOGLE_PERFTOOLS_LDFLAGS)
LIBOBJECTS = \
./db/builder.o \
@ -59,7 +54,7 @@ LIBOBJECTS = \
./db/version_edit.o \
./db/version_set.o \
./db/write_batch.o \
./port/$(PORT_MODULE) \
./port/port_posix.o \
./table/block.o \
./table/block_builder.o \
./table/format.o \
@ -105,19 +100,15 @@ PROGRAMS = db_bench $(TESTS)
LIBRARY = libleveldb.a
ifeq ($(PLATFORM), IOS)
# Only XCode can build executable applications for iOS.
all: $(LIBRARY)
else
all: $(PROGRAMS) $(LIBRARY)
endif
check: $(TESTS)
check: $(PROGRAMS) $(TESTS)
for t in $(TESTS); do echo "***** Running $$t"; ./$$t || exit 1; done
clean:
-rm -f $(PROGRAMS) $(LIBRARY) */*.o ios-x86/*/*.o ios-arm/*/*.o
-rmdir -p ios-x86/* ios-arm/*
-rm -rf ios-x86/* ios-arm/*
-rm build_config.mk
$(LIBRARY): $(LIBOBJECTS)
rm -f $@
@ -188,5 +179,3 @@ else
$(CC) $(CFLAGS) $< -o $@
endif
# TODO(gabor): dependencies for .o files
# TODO(gabor): Build library

+ 69
- 0
build_detect_platform Parādīt failu

@ -0,0 +1,69 @@
#!/bin/sh
# Detects OS we're compiling on and generates build_config.mk,
# which in turn gets read while processing Makefile.
# build_config.mk will set the following variables:
# - PORT_CFLAGS will either set:
# -DLEVELDB_PLATFORM_POSIX if cstatomic is present
# -DLEVELDB_PLATFORM_NOATOMIC if it is not
# - PLATFORM_CFLAGS with compiler flags for the platform
# - PLATFORM_LDFLAGS with linker flags for the platform
# Delete existing build_config.mk
rm -f build_config.mk
# Detect OS
case `uname -s` in
Darwin)
PLATFORM=OS_MACOSX
echo "PLATFORM_CFLAGS=-pthread -DOS_MACOSX" >> build_config.mk
echo "PLATFORM_LDFLAGS=-lpthread" >> build_config.mk
;;
Linux)
PLATFORM=OS_LINUX
echo "PLATFORM_CFLAGS=-pthread -DOS_LINUX" >> build_config.mk
echo "PLATFORM_LDFLAGS=-lpthread" >> build_config.mk
;;
SunOS)
PLATFORM=OS_SOLARIS
echo "PLATFORM_CFLAGS=-D_REENTRANT -DOS_SOLARIS" >> build_config.mk
echo "PLATFORM_LDFLAGS=-lpthread -lrt" >> build_config.mk
;;
*)
echo "Unknown platform!"
exit 1
esac
echo "PLATFORM=$PLATFORM" >> build_config.mk
# On GCC, use libc's memcmp, not GCC's memcmp
PORT_CFLAGS="-fno-builtin-memcmp"
# Detect C++0x -- this determines whether we'll use port_noatomic.h
# or port_posix.h by:
# 1. Rrying to compile with -std=c++0x and including <cstdatomic>.
# 2. If g++ returns error code, we know to use port_posix.h
g++ $CFLAGS -std=c++0x -x c++ - -o /dev/null 2>/dev/null <<EOF
#include <cstdatomic>
int main() {}
EOF
if [ "$?" = 0 ]; then
PORT_CFLAGS+=" -DLEVELDB_PLATFORM_POSIX -DLEVELDB_CSTDATOMIC_PRESENT -std=c++0x"
else
PORT_CFLAGS+=" -DLEVELDB_PLATFORM_POSIX"
fi
# Test whether Snappy library is installed
# http://code.google.com/p/snappy/
g++ $CFLAGS -x c++ - -o /dev/null 2>/dev/null <<EOF
#include <snappy.h>
int main() {}
EOF
if [ "$?" = 0 ]; then
echo "SNAPPY=1" >> build_config.mk
else
echo "SNAPPY=0" >> build_config.mk
fi
echo "PORT_CFLAGS=$PORT_CFLAGS" >> build_config.mk

+ 213
- 0
port/atomic_pointer.h Parādīt failu

@ -0,0 +1,213 @@
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
// AtomicPointer provides storage for a lock-free pointer.
// Platform-dependent implementation of AtomicPointer:
// - If cstdatomic is present (on newer versions of gcc, it is), we use
// a cstdatomic-based AtomicPointer
// - If it is not, we define processor-dependent AtomicWord operations,
// and then use them to build AtomicPointer
//
// This code is based on atomicops-internals-* in Google's perftools:
// http://code.google.com/p/google-perftools/source/browse/#svn%2Ftrunk%2Fsrc%2Fbase
#ifndef PORT_ATOMIC_POINTER_H_
#define PORT_ATOMIC_POINTER_H_
#ifdef LEVELDB_CSTDATOMIC_PRESENT
///////////////////////////////////////////////////////////////////////////////
// WE HAVE <cstdatomic>
// Use a <cstdatomic>-based AtomicPointer
#include <cstdatomic>
#include <stdint.h>
namespace leveldb {
namespace port {
// Storage for a lock-free pointer
class AtomicPointer {
private:
std::atomic<void*> rep_;
public:
AtomicPointer() { }
explicit AtomicPointer(void* v) : rep_(v) { }
inline void* Acquire_Load() const {
return rep_.load(std::memory_order_acquire);
}
inline void Release_Store(void* v) {
rep_.store(v, std::memory_order_release);
}
inline void* NoBarrier_Load() const {
return rep_.load(std::memory_order_relaxed);
}
inline void NoBarrier_Store(void* v) {
rep_.store(v, std::memory_order_relaxed);
}
};
} // namespace leveldb::port
} // namespace leveldb
#else
///////////////////////////////////////////////////////////////////////////////
// NO <cstdatomic>
// The entire rest of this file covers that case
#if defined(_M_X64) || defined(__x86_64__)
#define ARCH_CPU_X86_FAMILY 1
#elif defined(_M_IX86) || defined(__i386__) || defined(__i386)
#define ARCH_CPU_X86_FAMILY 1
#elif defined(__ARMEL__)
#define ARCH_CPU_ARM_FAMILY 1
#else
#warning Please add support for your architecture in atomicpointer.h
#endif
namespace leveldb {
namespace port {
namespace internal {
// AtomicWord is a machine-sized pointer.
typedef intptr_t AtomicWord;
} // namespace leveldb::port::internal
} // namespace leveldb::port
} // namespace leveldb
// Include our platform specific implementation.
///////////////////////////////////////////////////////////////////////////////
// Windows on x86
#if defined(OS_WIN) && defined(COMPILER_MSVC) && defined(ARCH_CPU_X86_FAMILY)
// void MemoryBarrier(void) macro is defined in windows.h:
// http://msdn.microsoft.com/en-us/library/ms684208(v=vs.85).aspx
// Including windows.h here; MemoryBarrier() gets used below.
#include <windows.h>
///////////////////////////////////////////////////////////////////////////////
// Mac OS on x86
#elif defined(OS_MACOSX) && defined(ARCH_CPU_X86_FAMILY)
#include <libkern/OSAtomic.h>
namespace leveldb {
namespace port {
namespace internal {
inline void MemoryBarrier() {
#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
// See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on
// this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering.
__asm__ __volatile__("" : : : "memory");
#else
OSMemoryBarrier();
#endif
}
} // namespace leveldb::port::internal
} // namespace leveldb::port
} // namespace leveldb
///////////////////////////////////////////////////////////////////////////////
// Any x86 CPU
#elif defined(ARCH_CPU_X86_FAMILY)
namespace leveldb {
namespace port {
namespace internal {
inline void MemoryBarrier() {
__asm__ __volatile__("" : : : "memory");
}
} // namespace leveldb::port::internal
} // namespace leveldb::port
} // namespace leveldb
#undef ATOMICOPS_COMPILER_BARRIER
///////////////////////////////////////////////////////////////////////////////
// ARM
#elif defined(ARCH_CPU_ARM_FAMILY)
namespace leveldb {
namespace port {
namespace internal {
typedef void (*LinuxKernelMemoryBarrierFunc)(void);
LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
(LinuxKernelMemoryBarrierFunc) 0xffff0fa0;
inline void MemoryBarrier() {
pLinuxKernelMemoryBarrier();
}
} // namespace leveldb::port::internal
} // namespace leveldb::port
} // namespace leveldb
#else
#error "Atomic operations are not supported on your platform"
#endif
///////////////////////////////////////////////////////////////////////////////
// Implementation of AtomicPointer based on MemoryBarriers above
namespace leveldb {
namespace port {
namespace internal {
// Atomic operations using per-system MemoryBarrier()s
inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
AtomicWord value = *ptr;
MemoryBarrier();
return value;
}
inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
MemoryBarrier();
*ptr = value;
}
inline AtomicWord NoBarrier_Load(volatile const AtomicWord* ptr) {
return *ptr;
}
inline void NoBarrier_Store(volatile AtomicWord* ptr, AtomicWord value) {
*ptr = value;
}
} // namespace leveldb::port::internal
// AtomicPointer definition for systems without <cstdatomic>.
class AtomicPointer {
private:
typedef internal::AtomicWord Rep;
Rep rep_;
public:
AtomicPointer() { }
explicit AtomicPointer(void* p) : rep_(reinterpret_cast<Rep>(p)) {}
inline void* Acquire_Load() const {
return reinterpret_cast<void*>(internal::Acquire_Load(&rep_));
}
inline void Release_Store(void* v) {
internal::Release_Store(&rep_, reinterpret_cast<Rep>(v));
}
inline void* NoBarrier_Load() const {
return reinterpret_cast<void*>(internal::NoBarrier_Load(&rep_));
}
inline void NoBarrier_Store(void* v) {
internal::NoBarrier_Store(&rep_, reinterpret_cast<Rep>(v));
}
};
} // namespace leveldb::port
} // namespace leveldb
#endif // LEVELDB_CSTDATOMIC_PRESENT
#endif // PORT_ATOMIC_POINTER_H_

+ 0
- 2
port/port.h Parādīt failu

@ -16,8 +16,6 @@
# include "port/port_chromium.h"
#elif defined(LEVELDB_PLATFORM_ANDROID)
# include "port/port_android.h"
#elif defined(LEVELDB_PLATFORM_OSX)
# include "port/port_osx.h"
#endif
#endif // STORAGE_LEVELDB_PORT_PORT_H_

+ 34
- 29
port/port_posix.h Parādīt failu

@ -7,20 +7,46 @@
#ifndef STORAGE_LEVELDB_PORT_PORT_POSIX_H_
#define STORAGE_LEVELDB_PORT_PORT_POSIX_H_
#include <endian.h>
#if defined(OS_MACOSX)
#include <machine/endian.h>
#elif defined(OS_SOLARIS)
#include <sys/isa_defs.h>
#ifdef _LITTLE_ENDIAN
#define LITTLE_ENDIAN
#else
#define BIG_ENDIAN
#endif
#else
#include <endian.h>
#endif
#include <pthread.h>
#ifdef SNAPPY
#include <snappy.h>
#endif
#include <stdint.h>
#include <string>
#include <cstdatomic>
#include <cstring>
#include "port/atomic_pointer.h"
#ifdef LITTLE_ENDIAN
#define IS_LITTLE_ENDIAN true
#else
#define IS_LITTLE_ENDIAN (__BYTE_ORDER == __LITTLE_ENDIAN)
#endif
#if defined(OS_MACOSX) || defined(OS_SOLARIS)
#define fread_unlocked fread
#define fwrite_unlocked fwrite
#define fflush_unlocked fflush
#endif
#if defined(OS_MACOSX)
#define fdatasync fsync
#endif
namespace leveldb {
namespace port {
static const bool kLittleEndian = (__BYTE_ORDER == __LITTLE_ENDIAN);
static const bool kLittleEndian = IS_LITTLE_ENDIAN;
class CondVar;
@ -54,29 +80,8 @@ class CondVar {
Mutex* mu_;
};
// Storage for a lock-free pointer
class AtomicPointer {
private:
std::atomic<void*> rep_;
public:
AtomicPointer() { }
explicit AtomicPointer(void* v) : rep_(v) { }
inline void* Acquire_Load() const {
return rep_.load(std::memory_order_acquire);
}
inline void Release_Store(void* v) {
rep_.store(v, std::memory_order_release);
}
inline void* NoBarrier_Load() const {
return rep_.load(std::memory_order_relaxed);
}
inline void NoBarrier_Store(void* v) {
rep_.store(v, std::memory_order_relaxed);
}
};
inline bool Snappy_Compress(const char* input, size_t input_length,
std::string* output) {
::std::string* output) {
#ifdef SNAPPY
output->resize(snappy::MaxCompressedLength(input_length));
size_t outlen;
@ -89,7 +94,7 @@ inline bool Snappy_Compress(const char* input, size_t input_length,
}
inline bool Snappy_Uncompress(const char* input_data, size_t input_length,
std::string* output) {
::std::string* output) {
#ifdef SNAPPY
size_t ulength;
if (!snappy::GetUncompressedLength(input_data, ulength, &ulength)) {
@ -106,7 +111,7 @@ inline bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg) {
return false;
}
}
}
} // namespace port
} // namespace leveldb
#endif // STORAGE_LEVELDB_PORT_PORT_POSIX_H_

+ 93
- 60
util/cache.cc Parādīt failu

@ -2,17 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#if defined(LEVELDB_PLATFORM_POSIX) || defined(LEVELDB_PLATFORM_ANDROID)
#include <unordered_set>
#elif defined(LEVELDB_PLATFORM_OSX)
#include <ext/hash_set>
#elif defined(LEVELDB_PLATFORM_CHROMIUM)
#include "base/hash_tables.h"
#else
#include <hash_set> // TODO(sanjay): Switch to unordered_set when possible.
#endif
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include "leveldb/cache.h"
#include "port/port.h"
@ -33,6 +25,7 @@ namespace {
struct LRUHandle {
void* value;
void (*deleter)(const Slice&, void* value);
LRUHandle* next_hash;
LRUHandle* next;
LRUHandle* prev;
size_t charge; // TODO(opt): Only allow uint32_t?
@ -51,43 +44,93 @@ struct LRUHandle {
}
};
// Pick a platform specific hash_set instantiation
#if defined(LEVELDB_PLATFORM_CHROMIUM) && defined(OS_WIN)
// Microsoft's hash_set deviates from the standard. See
// http://msdn.microsoft.com/en-us/library/1t4xas78(v=vs.80).aspx
// for details. Basically the 2 param () operator is a less than and
// the 1 param () operator is a hash function.
struct HandleHashCompare : public stdext::hash_compare<LRUHandle*> {
size_t operator() (LRUHandle* h) const {
Slice k = h->key();
return Hash(k.data(), k.size(), 0);
// We provide our own simple hash table since it removes a whole bunch
// of porting hacks and is also faster than some of the built-in hash
// table implementations in some of the compiler/runtime combinations
// we have tested. E.g., readrandom speeds up by ~5% over the g++
// 4.4.3's builtin hashtable.
class HandleTable {
public:
HandleTable() : length_(0), elems_(0), list_(NULL) { Resize(); }
~HandleTable() { delete[] list_; }
LRUHandle* Lookup(LRUHandle* h) {
return *FindPointer(h);
}
LRUHandle* Insert(LRUHandle* h) {
LRUHandle** ptr = FindPointer(h);
LRUHandle* old = *ptr;
h->next_hash = (old == NULL ? NULL : old->next_hash);
*ptr = h;
if (old == NULL) {
++elems_;
if (elems_ > length_) {
// Since each cache entry is fairly large, we aim for a small
// average linked list length (<= 1).
Resize();
}
}
bool operator() (LRUHandle* a, LRUHandle* b) const {
return a->key().compare(b->key()) < 0;
return old;
}
LRUHandle* Remove(LRUHandle* h) {
LRUHandle** ptr = FindPointer(h);
LRUHandle* result = *ptr;
if (result != NULL) {
*ptr = result->next_hash;
--elems_;
}
};
typedef base::hash_set<LRUHandle*, HandleHashCompare> HandleTable;
#else
struct HandleHash {
inline size_t operator()(LRUHandle* h) const {
Slice k = h->key();
return Hash(k.data(), k.size(), 0);
return result;
}
private:
// The table consists of an array of buckets where each bucket is
// a linked list of cache entries that hash into the bucket.
uint32_t length_;
uint32_t elems_;
LRUHandle** list_;
// Return a pointer to slot that points to a cache entry that
// matches *h. If there is no such cache entry, return a pointer to
// the trailing slot in the corresponding linked list.
LRUHandle** FindPointer(LRUHandle* h) {
Slice key = h->key();
uint32_t hash = Hash(key.data(), key.size(), 0);
LRUHandle** ptr = &list_[hash & (length_ - 1)];
while (*ptr != NULL && key != (*ptr)->key()) {
ptr = &(*ptr)->next_hash;
}
};
return ptr;
}
struct HandleEq {
inline bool operator()(LRUHandle* a, LRUHandle* b) const {
return a->key() == b->key();
void Resize() {
uint32_t new_length = 4;
while (new_length < elems_) {
new_length *= 2;
}
LRUHandle** new_list = new LRUHandle*[new_length];
memset(new_list, 0, sizeof(new_list[0]) * new_length);
uint32_t count = 0;
for (int i = 0; i < length_; i++) {
LRUHandle* h = list_[i];
while (h != NULL) {
LRUHandle* next = h->next_hash;
Slice key = h->key();
uint32_t hash = Hash(key.data(), key.size(), 0);
LRUHandle** ptr = &new_list[hash & (new_length - 1)];
h->next_hash = *ptr;
*ptr = h;
h = next;
count++;
}
}
};
# if defined(LEVELDB_PLATFORM_CHROMIUM)
typedef base::hash_set<LRUHandle*, HandleHash, HandleEq> HandleTable;
# elif defined(LEVELDB_PLATFORM_POSIX) || defined(LEVELDB_PLATFORM_ANDROID)
typedef std::unordered_set<LRUHandle*, HandleHash, HandleEq> HandleTable;
# else
typedef __gnu_cxx::hash_set<LRUHandle*, HandleHash, HandleEq> HandleTable;
# endif
#endif
assert(elems_ == count);
delete[] list_;
list_ = new_list;
length_ = new_length;
}
};
class LRUCache : public Cache {
public:
@ -132,7 +175,6 @@ LRUCache::LRUCache(size_t capacity)
}
LRUCache::~LRUCache() {
table_.clear();
for (LRUHandle* e = lru_.next; e != &lru_; ) {
LRUHandle* next = e->next;
assert(e->refs == 1); // Error if caller has an unreleased handle
@ -170,16 +212,13 @@ Cache::Handle* LRUCache::Lookup(const Slice& key) {
LRUHandle dummy;
dummy.next = &dummy;
dummy.value = const_cast<Slice*>(&key);
HandleTable::iterator iter = table_.find(&dummy);
if (iter == table_.end()) {
return NULL;
} else {
LRUHandle* e = const_cast<LRUHandle*>(*iter);
LRUHandle* e = table_.Lookup(&dummy);
if (e != NULL) {
e->refs++;
LRU_Remove(e);
LRU_Append(e);
return reinterpret_cast<Handle*>(e);
}
return reinterpret_cast<Handle*>(e);
}
void* LRUCache::Value(Handle* handle) {
@ -206,20 +245,16 @@ Cache::Handle* LRUCache::Insert(const Slice& key, void* value, size_t charge,
LRU_Append(e);
usage_ += charge;
std::pair<HandleTable::iterator,bool> p = table_.insert(e);
if (!p.second) {
// Kill existing entry
LRUHandle* old = const_cast<LRUHandle*>(*(p.first));
LRUHandle* old = table_.Insert(e);
if (old != NULL) {
LRU_Remove(old);
table_.erase(p.first);
table_.insert(e);
Unref(old);
}
while (usage_ > capacity_ && lru_.next != &lru_) {
LRUHandle* old = lru_.next;
LRU_Remove(old);
table_.erase(old);
table_.Remove(old);
Unref(old);
}
@ -232,11 +267,9 @@ void LRUCache::Erase(const Slice& key) {
LRUHandle dummy;
dummy.next = &dummy;
dummy.value = const_cast<Slice*>(&key);
HandleTable::iterator iter = table_.find(&dummy);
if (iter != table_.end()) {
LRUHandle* e = const_cast<LRUHandle*>(*iter);
LRUHandle* e = table_.Remove(&dummy);
if (e != NULL) {
LRU_Remove(e);
table_.erase(iter);
Unref(e);
}
}

Notiek ielāde…
Atcelt
Saglabāt