瀏覽代碼

Correct class/structure declaration order.

1. Correct the class/struct declaration order to be IAW
   the Google C++ style guide[1].
2. For non-copyable classes, switched from non-implemented
   private methods to explicitly deleted[2] methods.
3. Minor const and member initialization fixes.

[1] https://google.github.io/styleguide/cppguide.html#Declaration_Order
[2] http://eel.is/c++draft/dcl.fct.def.delete

PiperOrigin-RevId: 246521844
main
Chris Mumford 5 年之前
父節點
當前提交
9bd23c7676
共有 44 個檔案被更改,包括 412 行新增403 行删除
  1. +6
    -5
      db/autocompact_test.cc
  2. +17
    -15
      db/c.cc
  3. +12
    -10
      db/corruption_test.cc
  4. +19
    -18
      db/db_impl.cc
  5. +33
    -31
      db/db_impl.h
  6. +4
    -6
      db/db_iter.cc
  7. +12
    -12
      db/db_test.cc
  8. +3
    -4
      db/dbformat.h
  9. +4
    -2
      db/dumpfile.cc
  10. +22
    -23
      db/log_reader.h
  11. +76
    -75
      db/log_test.cc
  12. +5
    -6
      db/log_writer.h
  13. +7
    -7
      db/memtable.h
  14. +16
    -16
      db/repair.cc
  15. +15
    -16
      db/skiplist.h
  16. +2
    -2
      db/table_cache.h
  17. +2
    -2
      db/version_edit.h
  18. +25
    -25
      db/version_set.h
  19. +5
    -3
      db/version_set_test.cc
  20. +7
    -6
      helpers/memenv/memenv.cc
  21. +2
    -2
      helpers/memenv/memenv_test.cc
  22. +3
    -3
      include/leveldb/db.h
  23. +6
    -6
      include/leveldb/iterator.h
  24. +7
    -7
      include/leveldb/options.h
  25. +7
    -7
      include/leveldb/status.h
  26. +6
    -4
      include/leveldb/table.h
  27. +1
    -1
      include/leveldb/table_builder.h
  28. +7
    -6
      include/leveldb/write_batch.h
  29. +5
    -6
      table/block.h
  30. +3
    -4
      table/block_builder.h
  31. +3
    -4
      table/filter_block.h
  32. +8
    -8
      table/format.h
  33. +3
    -3
      table/merger.cc
  34. +16
    -16
      table/table_builder.cc
  35. +2
    -4
      util/arena.cc
  36. +4
    -4
      util/arena.h
  37. +4
    -4
      util/bloom.cc
  38. +5
    -5
      util/bloom_test.cc
  39. +2
    -2
      util/cache_test.cc
  40. +4
    -3
      util/env_posix_test.cc
  41. +2
    -1
      util/env_test.cc
  42. +7
    -9
      util/env_windows.cc
  43. +4
    -3
      util/env_windows_test.cc
  44. +9
    -7
      util/histogram.h

+ 6
- 5
db/autocompact_test.cc 查看文件

@ -12,11 +12,6 @@ namespace leveldb {
class AutoCompactTest {
public:
std::string dbname_;
Cache* tiny_cache_;
Options options_;
DB* db_;
AutoCompactTest() {
dbname_ = test::TmpDir() + "/autocompact_test";
tiny_cache_ = NewLRUCache(100);
@ -47,6 +42,12 @@ class AutoCompactTest {
}
void DoReads(int n);
private:
std::string dbname_;
Cache* tiny_cache_;
Options options_;
DB* db_;
};
static const int kValueSize = 200 * 1024;

+ 17
- 15
db/c.cc 查看文件

@ -84,12 +84,6 @@ struct leveldb_filelock_t {
};
struct leveldb_comparator_t : public Comparator {
void* state_;
void (*destructor_)(void*);
int (*compare_)(void*, const char* a, size_t alen, const char* b,
size_t blen);
const char* (*name_)(void*);
virtual ~leveldb_comparator_t() { (*destructor_)(state_); }
virtual int Compare(const Slice& a, const Slice& b) const {
@ -101,18 +95,15 @@ struct leveldb_comparator_t : public Comparator {
// No-ops since the C binding does not support key shortening methods.
virtual void FindShortestSeparator(std::string*, const Slice&) const {}
virtual void FindShortSuccessor(std::string* key) const {}
};
struct leveldb_filterpolicy_t : public FilterPolicy {
void* state_;
void (*destructor_)(void*);
int (*compare_)(void*, const char* a, size_t alen, const char* b,
size_t blen);
const char* (*name_)(void*);
char* (*create_)(void*, const char* const* key_array,
const size_t* key_length_array, int num_keys,
size_t* filter_length);
unsigned char (*key_match_)(void*, const char* key, size_t length,
const char* filter, size_t filter_length);
};
struct leveldb_filterpolicy_t : public FilterPolicy {
virtual ~leveldb_filterpolicy_t() { (*destructor_)(state_); }
virtual const char* Name() const { return (*name_)(state_); }
@ -134,6 +125,15 @@ struct leveldb_filterpolicy_t : public FilterPolicy {
return (*key_match_)(state_, key.data(), key.size(), filter.data(),
filter.size());
}
void* state_;
void (*destructor_)(void*);
const char* (*name_)(void*);
char* (*create_)(void*, const char* const* key_array,
const size_t* key_length_array, int num_keys,
size_t* filter_length);
unsigned char (*key_match_)(void*, const char* key, size_t length,
const char* filter, size_t filter_length);
};
struct leveldb_env_t {
@ -470,7 +470,8 @@ leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(int bits_per_key) {
// they delegate to a NewBloomFilterPolicy() instead of user
// supplied C functions.
struct Wrapper : public leveldb_filterpolicy_t {
const FilterPolicy* rep_;
static void DoNothing(void*) {}
~Wrapper() { delete rep_; }
const char* Name() const { return rep_->Name(); }
void CreateFilter(const Slice* keys, int n, std::string* dst) const {
@ -479,7 +480,8 @@ leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(int bits_per_key) {
bool KeyMayMatch(const Slice& key, const Slice& filter) const {
return rep_->KeyMayMatch(key, filter);
}
static void DoNothing(void*) {}
const FilterPolicy* rep_;
};
Wrapper* wrapper = new Wrapper;
wrapper->rep_ = NewBloomFilterPolicy(bits_per_key);

+ 12
- 10
db/corruption_test.cc 查看文件

@ -22,20 +22,14 @@ static const int kValueSize = 1000;
class CorruptionTest {
public:
test::ErrorEnv env_;
std::string dbname_;
Cache* tiny_cache_;
Options options_;
DB* db_;
CorruptionTest() {
tiny_cache_ = NewLRUCache(100);
CorruptionTest()
: db_(nullptr),
dbname_("/memenv/corruption_test"),
tiny_cache_(NewLRUCache(100)) {
options_.env = &env_;
options_.block_cache = tiny_cache_;
dbname_ = "/memenv/corruption_test";
DestroyDB(dbname_, options_);
db_ = nullptr;
options_.create_if_missing = true;
Reopen();
options_.create_if_missing = false;
@ -185,6 +179,14 @@ class CorruptionTest {
Random r(k);
return test::RandomString(&r, kValueSize, storage);
}
test::ErrorEnv env_;
Options options_;
DB* db_;
private:
std::string dbname_;
Cache* tiny_cache_;
};
TEST(CorruptionTest, Recovery) {

+ 19
- 18
db/db_impl.cc 查看文件

@ -42,38 +42,23 @@ const int kNumNonTableCacheFiles = 10;
// Information kept for every waiting writer
struct DBImpl::Writer {
explicit Writer(port::Mutex* mu)
: batch(nullptr), sync(false), done(false), cv(mu) {}
Status status;
WriteBatch* batch;
bool sync;
bool done;
port::CondVar cv;
explicit Writer(port::Mutex* mu)
: batch(nullptr), sync(false), done(false), cv(mu) {}
};
struct DBImpl::CompactionState {
Compaction* const compaction;
// Sequence numbers < smallest_snapshot are not significant since we
// will never have to service a snapshot below smallest_snapshot.
// Therefore if we have seen a sequence number S <= smallest_snapshot,
// we can drop all entries for the same key with sequence numbers < S.
SequenceNumber smallest_snapshot;
// Files produced by compaction
struct Output {
uint64_t number;
uint64_t file_size;
InternalKey smallest, largest;
};
std::vector<Output> outputs;
// State kept for output being generated
WritableFile* outfile;
TableBuilder* builder;
uint64_t total_bytes;
Output* current_output() { return &outputs[outputs.size() - 1]; }
@ -83,6 +68,22 @@ struct DBImpl::CompactionState {
outfile(nullptr),
builder(nullptr),
total_bytes(0) {}
Compaction* const compaction;
// Sequence numbers < smallest_snapshot are not significant since we
// will never have to service a snapshot below smallest_snapshot.
// Therefore if we have seen a sequence number S <= smallest_snapshot,
// we can drop all entries for the same key with sequence numbers < S.
SequenceNumber smallest_snapshot;
std::vector<Output> outputs;
// State kept for output being generated
WritableFile* outfile;
TableBuilder* builder;
uint64_t total_bytes;
};
// Fix user-supplied options to be reasonable

+ 33
- 31
db/db_impl.h 查看文件

@ -29,6 +29,10 @@ class VersionSet;
class DBImpl : public DB {
public:
DBImpl(const Options& options, const std::string& dbname);
DBImpl(const DBImpl&) = delete;
DBImpl& operator=(const DBImpl&) = delete;
virtual ~DBImpl();
// Implementations of the DB interface
@ -71,6 +75,31 @@ class DBImpl : public DB {
struct CompactionState;
struct Writer;
// Information for a manual compaction
struct ManualCompaction {
int level;
bool done;
const InternalKey* begin; // null means beginning of key range
const InternalKey* end; // null means end of key range
InternalKey tmp_storage; // Used to keep track of compaction progress
};
// Per level compaction stats. stats_[level] stores the stats for
// compactions that produced data for the specified "level".
struct CompactionStats {
CompactionStats() : micros(0), bytes_read(0), bytes_written(0) {}
void Add(const CompactionStats& c) {
this->micros += c.micros;
this->bytes_read += c.bytes_read;
this->bytes_written += c.bytes_written;
}
int64_t micros;
int64_t bytes_read;
int64_t bytes_written;
};
Iterator* NewInternalIterator(const ReadOptions&,
SequenceNumber* latest_snapshot,
uint32_t* seed);
@ -121,6 +150,10 @@ class DBImpl : public DB {
Status InstallCompactionResults(CompactionState* compact)
EXCLUSIVE_LOCKS_REQUIRED(mutex_);
const Comparator* user_comparator() const {
return internal_comparator_.user_comparator();
}
// Constant after construction
Env* const env_;
const InternalKeyComparator internal_comparator_;
@ -161,14 +194,6 @@ class DBImpl : public DB {
// Has a background compaction been scheduled or is running?
bool background_compaction_scheduled_ GUARDED_BY(mutex_);
// Information for a manual compaction
struct ManualCompaction {
int level;
bool done;
const InternalKey* begin; // null means beginning of key range
const InternalKey* end; // null means end of key range
InternalKey tmp_storage; // Used to keep track of compaction progress
};
ManualCompaction* manual_compaction_ GUARDED_BY(mutex_);
VersionSet* const versions_;
@ -176,30 +201,7 @@ class DBImpl : public DB {
// Have we encountered a background error in paranoid mode?
Status bg_error_ GUARDED_BY(mutex_);
// Per level compaction stats. stats_[level] stores the stats for
// compactions that produced data for the specified "level".
struct CompactionStats {
int64_t micros;
int64_t bytes_read;
int64_t bytes_written;
CompactionStats() : micros(0), bytes_read(0), bytes_written(0) {}
void Add(const CompactionStats& c) {
this->micros += c.micros;
this->bytes_read += c.bytes_read;
this->bytes_written += c.bytes_written;
}
};
CompactionStats stats_[config::kNumLevels] GUARDED_BY(mutex_);
// No copying allowed
DBImpl(const DBImpl&);
void operator=(const DBImpl&);
const Comparator* user_comparator() const {
return internal_comparator_.user_comparator();
}
};
// Sanitize db options. The caller should delete result.info_log if

+ 4
- 6
db/db_iter.cc 查看文件

@ -55,6 +55,10 @@ class DBIter : public Iterator {
valid_(false),
rnd_(seed),
bytes_until_read_sampling_(RandomCompactionPeriod()) {}
DBIter(const DBIter&) = delete;
DBIter& operator=(const DBIter&) = delete;
virtual ~DBIter() { delete iter_; }
virtual bool Valid() const { return valid_; }
virtual Slice key() const {
@ -106,19 +110,13 @@ class DBIter : public Iterator {
const Comparator* const user_comparator_;
Iterator* const iter_;
SequenceNumber const sequence_;
Status status_;
std::string saved_key_; // == current key when direction_==kReverse
std::string saved_value_; // == current raw value when direction_==kReverse
Direction direction_;
bool valid_;
Random rnd_;
size_t bytes_until_read_sampling_;
// No copying allowed
DBIter(const DBIter&);
void operator=(const DBIter&);
};
inline bool DBIter::ParseKey(ParsedInternalKey* ikey) {

+ 12
- 12
db/db_test.cc 查看文件

@ -40,10 +40,6 @@ static std::string RandomKey(Random* rnd) {
namespace {
class AtomicCounter {
private:
port::Mutex mu_;
int count_ GUARDED_BY(mu_);
public:
AtomicCounter() : count_(0) {}
void Increment() { IncrementBy(1); }
@ -59,6 +55,10 @@ class AtomicCounter {
MutexLock l(&mu_);
count_ = 0;
}
private:
port::Mutex mu_;
int count_ GUARDED_BY(mu_);
};
void DelayMilliseconds(int millis) {
@ -227,13 +227,6 @@ class SpecialEnv : public EnvWrapper {
};
class DBTest {
private:
const FilterPolicy* filter_policy_;
// Sequence of option configurations to try
enum OptionConfig { kDefault, kReuse, kFilter, kUncompressed, kEnd };
int option_config_;
public:
std::string dbname_;
SpecialEnv* env_;
@ -241,7 +234,7 @@ class DBTest {
Options last_options_;
DBTest() : option_config_(kDefault), env_(new SpecialEnv(Env::Default())) {
DBTest() : env_(new SpecialEnv(Env::Default())), option_config_(kDefault) {
filter_policy_ = NewBloomFilterPolicy(10);
dbname_ = test::TmpDir() + "/db_test";
DestroyDB(dbname_, Options());
@ -533,6 +526,13 @@ class DBTest {
}
return files_renamed;
}
private:
// Sequence of option configurations to try
enum OptionConfig { kDefault, kReuse, kFilter, kUncompressed, kEnd };
const FilterPolicy* filter_policy_;
int option_config_;
};
TEST(DBTest, Empty) {

+ 3
- 4
db/dbformat.h 查看文件

@ -181,6 +181,9 @@ class LookupKey {
// the specified sequence number.
LookupKey(const Slice& user_key, SequenceNumber sequence);
LookupKey(const LookupKey&) = delete;
LookupKey& operator=(const LookupKey&) = delete;
~LookupKey();
// Return a key suitable for lookup in a MemTable.
@ -204,10 +207,6 @@ class LookupKey {
const char* kstart_;
const char* end_;
char space_[200]; // Avoid allocation for short keys
// No copying allowed
LookupKey(const LookupKey&);
void operator=(const LookupKey&);
};
inline LookupKey::~LookupKey() {

+ 4
- 2
db/dumpfile.cc 查看文件

@ -38,7 +38,6 @@ bool GuessType(const std::string& fname, FileType* type) {
// Notified when log reader encounters corruption.
class CorruptionReporter : public log::Reader::Reporter {
public:
WritableFile* dst_;
virtual void Corruption(size_t bytes, const Status& status) {
std::string r = "corruption: ";
AppendNumberTo(&r, bytes);
@ -47,6 +46,8 @@ class CorruptionReporter : public log::Reader::Reporter {
r.push_back('\n');
dst_->Append(r);
}
WritableFile* dst_;
};
// Print contents of a log file. (*func)() is called on every record.
@ -73,7 +74,6 @@ Status PrintLogContents(Env* env, const std::string& fname,
// Called on every item found in a WriteBatch.
class WriteBatchItemPrinter : public WriteBatch::Handler {
public:
WritableFile* dst_;
virtual void Put(const Slice& key, const Slice& value) {
std::string r = " put '";
AppendEscapedStringTo(&r, key);
@ -88,6 +88,8 @@ class WriteBatchItemPrinter : public WriteBatch::Handler {
r += "'\n";
dst_->Append(r);
}
WritableFile* dst_;
};
// Called on every log record (each one of which is a WriteBatch)

+ 22
- 23
db/log_reader.h 查看文件

@ -43,6 +43,9 @@ class Reader {
Reader(SequentialFile* file, Reporter* reporter, bool checksum,
uint64_t initial_offset);
Reader(const Reader&) = delete;
Reader& operator=(const Reader&) = delete;
~Reader();
// Read the next record into *record. Returns true if read
@ -58,26 +61,6 @@ class Reader {
uint64_t LastRecordOffset();
private:
SequentialFile* const file_;
Reporter* const reporter_;
bool const checksum_;
char* const backing_store_;
Slice buffer_;
bool eof_; // Last Read() indicated EOF by returning < kBlockSize
// Offset of the last record returned by ReadRecord.
uint64_t last_record_offset_;
// Offset of the first location past the end of buffer_.
uint64_t end_of_buffer_offset_;
// Offset at which to start looking for the first record to return
uint64_t const initial_offset_;
// True if we are resynchronizing after a seek (initial_offset_ > 0). In
// particular, a run of kMiddleType and kLastType records can be silently
// skipped in this mode
bool resyncing_;
// Extend record types with the following special values
enum {
kEof = kMaxRecordType + 1,
@ -102,9 +85,25 @@ class Reader {
void ReportCorruption(uint64_t bytes, const char* reason);
void ReportDrop(uint64_t bytes, const Status& reason);
// No copying allowed
Reader(const Reader&);
void operator=(const Reader&);
SequentialFile* const file_;
Reporter* const reporter_;
bool const checksum_;
char* const backing_store_;
Slice buffer_;
bool eof_; // Last Read() indicated EOF by returning < kBlockSize
// Offset of the last record returned by ReadRecord.
uint64_t last_record_offset_;
// Offset of the first location past the end of buffer_.
uint64_t end_of_buffer_offset_;
// Offset at which to start looking for the first record to return
uint64_t const initial_offset_;
// True if we are resynchronizing after a seek (initial_offset_ > 0). In
// particular, a run of kMiddleType and kLastType records can be silently
// skipped in this mode
bool resyncing_;
};
} // namespace log

+ 76
- 75
db/log_test.cc 查看文件

@ -37,81 +37,6 @@ static std::string RandomSkewedString(int i, Random* rnd) {
}
class LogTest {
private:
class StringDest : public WritableFile {
public:
std::string contents_;
virtual Status Close() { return Status::OK(); }
virtual Status Flush() { return Status::OK(); }
virtual Status Sync() { return Status::OK(); }
virtual Status Append(const Slice& slice) {
contents_.append(slice.data(), slice.size());
return Status::OK();
}
};
class StringSource : public SequentialFile {
public:
Slice contents_;
bool force_error_;
bool returned_partial_;
StringSource() : force_error_(false), returned_partial_(false) {}
virtual Status Read(size_t n, Slice* result, char* scratch) {
ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error";
if (force_error_) {
force_error_ = false;
returned_partial_ = true;
return Status::Corruption("read error");
}
if (contents_.size() < n) {
n = contents_.size();
returned_partial_ = true;
}
*result = Slice(contents_.data(), n);
contents_.remove_prefix(n);
return Status::OK();
}
virtual Status Skip(uint64_t n) {
if (n > contents_.size()) {
contents_.clear();
return Status::NotFound("in-memory file skipped past end");
}
contents_.remove_prefix(n);
return Status::OK();
}
};
class ReportCollector : public Reader::Reporter {
public:
size_t dropped_bytes_;
std::string message_;
ReportCollector() : dropped_bytes_(0) {}
virtual void Corruption(size_t bytes, const Status& status) {
dropped_bytes_ += bytes;
message_.append(status.ToString());
}
};
StringDest dest_;
StringSource source_;
ReportCollector report_;
bool reading_;
Writer* writer_;
Reader* reader_;
// Record metadata for testing initial offset functionality
static size_t initial_offset_record_sizes_[];
static uint64_t initial_offset_last_record_offsets_[];
static int num_initial_offset_records_;
public:
LogTest()
: reading_(false),
@ -232,6 +157,82 @@ class LogTest {
}
delete offset_reader;
}
private:
class StringDest : public WritableFile {
public:
virtual Status Close() { return Status::OK(); }
virtual Status Flush() { return Status::OK(); }
virtual Status Sync() { return Status::OK(); }
virtual Status Append(const Slice& slice) {
contents_.append(slice.data(), slice.size());
return Status::OK();
}
std::string contents_;
};
class StringSource : public SequentialFile {
public:
StringSource() : force_error_(false), returned_partial_(false) {}
virtual Status Read(size_t n, Slice* result, char* scratch) {
ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error";
if (force_error_) {
force_error_ = false;
returned_partial_ = true;
return Status::Corruption("read error");
}
if (contents_.size() < n) {
n = contents_.size();
returned_partial_ = true;
}
*result = Slice(contents_.data(), n);
contents_.remove_prefix(n);
return Status::OK();
}
virtual Status Skip(uint64_t n) {
if (n > contents_.size()) {
contents_.clear();
return Status::NotFound("in-memory file skipped past end");
}
contents_.remove_prefix(n);
return Status::OK();
}
Slice contents_;
bool force_error_;
bool returned_partial_;
};
class ReportCollector : public Reader::Reporter {
public:
ReportCollector() : dropped_bytes_(0) {}
virtual void Corruption(size_t bytes, const Status& status) {
dropped_bytes_ += bytes;
message_.append(status.ToString());
}
size_t dropped_bytes_;
std::string message_;
};
// Record metadata for testing initial offset functionality
static size_t initial_offset_record_sizes_[];
static uint64_t initial_offset_last_record_offsets_[];
static int num_initial_offset_records_;
StringDest dest_;
StringSource source_;
ReportCollector report_;
bool reading_;
Writer* writer_;
Reader* reader_;
};
size_t LogTest::initial_offset_record_sizes_[] = {

+ 5
- 6
db/log_writer.h 查看文件

@ -29,11 +29,16 @@ class Writer {
// "*dest" must remain live while this Writer is in use.
Writer(WritableFile* dest, uint64_t dest_length);
Writer(const Writer&) = delete;
Writer& operator=(const Writer&) = delete;
~Writer();
Status AddRecord(const Slice& slice);
private:
Status EmitPhysicalRecord(RecordType type, const char* ptr, size_t length);
WritableFile* dest_;
int block_offset_; // Current offset in block
@ -41,12 +46,6 @@ class Writer {
// pre-computed to reduce the overhead of computing the crc of the
// record type stored in the header.
uint32_t type_crc_[kMaxRecordType + 1];
Status EmitPhysicalRecord(RecordType type, const char* ptr, size_t length);
// No copying allowed
Writer(const Writer&);
void operator=(const Writer&);
};
} // namespace log

+ 7
- 7
db/memtable.h 查看文件

@ -23,6 +23,9 @@ class MemTable {
// is zero and the caller must call Ref() at least once.
explicit MemTable(const InternalKeyComparator& comparator);
MemTable(const MemTable&) = delete;
MemTable& operator=(const MemTable&) = delete;
// Increase reference count.
void Ref() { ++refs_; }
@ -60,26 +63,23 @@ class MemTable {
bool Get(const LookupKey& key, std::string* value, Status* s);
private:
~MemTable(); // Private since only Unref() should be used to delete it
friend class MemTableIterator;
friend class MemTableBackwardIterator;
struct KeyComparator {
const InternalKeyComparator comparator;
explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) {}
int operator()(const char* a, const char* b) const;
};
friend class MemTableIterator;
friend class MemTableBackwardIterator;
typedef SkipList<const char*, KeyComparator> Table;
~MemTable(); // Private since only Unref() should be used to delete it
KeyComparator comparator_;
int refs_;
Arena arena_;
Table table_;
// No copying allowed
MemTable(const MemTable&);
void operator=(const MemTable&);
};
} // namespace leveldb

+ 16
- 16
db/repair.cc 查看文件

@ -95,22 +95,6 @@ class Repairer {
SequenceNumber max_sequence;
};
std::string const dbname_;
Env* const env_;
InternalKeyComparator const icmp_;
InternalFilterPolicy const ipolicy_;
Options const options_;
bool owns_info_log_;
bool owns_cache_;
TableCache* table_cache_;
VersionEdit edit_;
std::vector<std::string> manifests_;
std::vector<uint64_t> table_numbers_;
std::vector<uint64_t> logs_;
std::vector<TableInfo> tables_;
uint64_t next_file_number_;
Status FindFiles() {
std::vector<std::string> filenames;
Status status = env_->GetChildren(dbname_, &filenames);
@ -439,6 +423,22 @@ class Repairer {
Log(options_.info_log, "Archiving %s: %s\n", fname.c_str(),
s.ToString().c_str());
}
const std::string dbname_;
Env* const env_;
InternalKeyComparator const icmp_;
InternalFilterPolicy const ipolicy_;
const Options options_;
bool owns_info_log_;
bool owns_cache_;
TableCache* table_cache_;
VersionEdit edit_;
std::vector<std::string> manifests_;
std::vector<uint64_t> table_numbers_;
std::vector<uint64_t> logs_;
std::vector<TableInfo> tables_;
uint64_t next_file_number_;
};
} // namespace

+ 15
- 16
db/skiplist.h 查看文件

@ -49,6 +49,9 @@ class SkipList {
// must remain allocated for the lifetime of the skiplist object.
explicit SkipList(Comparator cmp, Arena* arena);
SkipList(const SkipList&) = delete;
SkipList& operator=(const SkipList&) = delete;
// Insert key into the list.
// REQUIRES: nothing that compares equal to key is currently in the list.
void Insert(const Key& key);
@ -98,23 +101,10 @@ class SkipList {
private:
enum { kMaxHeight = 12 };
// Immutable after construction
Comparator const compare_;
Arena* const arena_; // Arena used for allocations of nodes
Node* const head_;
// Modified only by Insert(). Read racily by readers, but stale
// values are ok.
std::atomic<int> max_height_; // Height of the entire list
inline int GetMaxHeight() const {
return max_height_.load(std::memory_order_relaxed);
}
// Read/written only by Insert().
Random rnd_;
Node* NewNode(const Key& key, int height);
int RandomHeight();
bool Equal(const Key& a, const Key& b) const { return (compare_(a, b) == 0); }
@ -137,9 +127,18 @@ class SkipList {
// Return head_ if list is empty.
Node* FindLast() const;
// No copying allowed
SkipList(const SkipList&);
void operator=(const SkipList&);
// Immutable after construction
Comparator const compare_;
Arena* const arena_; // Arena used for allocations of nodes
Node* const head_;
// Modified only by Insert(). Read racily by readers, but stale
// values are ok.
std::atomic<int> max_height_; // Height of the entire list
// Read/written only by Insert().
Random rnd_;
};
// Implementation details follow

+ 2
- 2
db/table_cache.h 查看文件

@ -45,12 +45,12 @@ class TableCache {
void Evict(uint64_t file_number);
private:
Status FindTable(uint64_t file_number, uint64_t file_size, Cache::Handle**);
Env* const env_;
const std::string dbname_;
const Options& options_;
Cache* cache_;
Status FindTable(uint64_t file_number, uint64_t file_size, Cache::Handle**);
};
} // namespace leveldb

+ 2
- 2
db/version_edit.h 查看文件

@ -16,14 +16,14 @@ namespace leveldb {
class VersionSet;
struct FileMetaData {
FileMetaData() : refs(0), allowed_seeks(1 << 30), file_size(0) {}
int refs;
int allowed_seeks; // Seeks allowed until compaction
uint64_t number;
uint64_t file_size; // File size in bytes
InternalKey smallest; // Smallest internal key served by table
InternalKey largest; // Largest internal key served by table
FileMetaData() : refs(0), allowed_seeks(1 << 30), file_size(0) {}
};
class VersionEdit {

+ 25
- 25
db/version_set.h 查看文件

@ -59,11 +59,6 @@ bool SomeFileOverlapsRange(const InternalKeyComparator& icmp,
class Version {
public:
// Append to *iters a sequence of iterators that will
// yield the contents of this Version when merged together.
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
void AddIterators(const ReadOptions&, std::vector<Iterator*>* iters);
// Lookup the value for key. If found, store it in *val and
// return OK. Else return a non-OK status. Fills *stats.
// REQUIRES: lock is not held
@ -71,6 +66,12 @@ class Version {
FileMetaData* seek_file;
int seek_file_level;
};
// Append to *iters a sequence of iterators that will
// yield the contents of this Version when merged together.
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
void AddIterators(const ReadOptions&, std::vector<Iterator*>* iters);
Status Get(const ReadOptions&, const LookupKey& key, std::string* val,
GetStats* stats);
@ -118,6 +119,22 @@ class Version {
friend class VersionSet;
class LevelFileNumIterator;
explicit Version(VersionSet* vset)
: vset_(vset),
next_(this),
prev_(this),
refs_(0),
file_to_compact_(nullptr),
file_to_compact_level_(-1),
compaction_score_(-1),
compaction_level_(-1) {}
Version(const Version&) = delete;
Version& operator=(const Version&) = delete;
~Version();
Iterator* NewConcatenatingIterator(const ReadOptions&, int level) const;
// Call func(arg, level, f) for every file that overlaps user_key in
@ -145,28 +162,15 @@ class Version {
// are initialized by Finalize().
double compaction_score_;
int compaction_level_;
explicit Version(VersionSet* vset)
: vset_(vset),
next_(this),
prev_(this),
refs_(0),
file_to_compact_(nullptr),
file_to_compact_level_(-1),
compaction_score_(-1),
compaction_level_(-1) {}
~Version();
// No copying allowed
Version(const Version&);
void operator=(const Version&);
};
class VersionSet {
public:
VersionSet(const std::string& dbname, const Options* options,
TableCache* table_cache, const InternalKeyComparator*);
VersionSet(const VersionSet&) = delete;
VersionSet& operator=(const VersionSet&) = delete;
~VersionSet();
// Apply *edit to the current version to form a new descriptor that
@ -309,10 +313,6 @@ class VersionSet {
// Per-level key at which the next compaction at that level should start.
// Either an empty string, or a valid InternalKey.
std::string compact_pointer_[config::kNumLevels];
// No copying allowed
VersionSet(const VersionSet&);
void operator=(const VersionSet&);
};
// A Compaction encapsulates information about a compaction.

+ 5
- 3
db/version_set_test.cc 查看文件

@ -11,9 +11,6 @@ namespace leveldb {
class FindFileTest {
public:
std::vector<FileMetaData*> files_;
bool disjoint_sorted_files_;
FindFileTest() : disjoint_sorted_files_(true) {}
~FindFileTest() {
@ -46,6 +43,11 @@ class FindFileTest {
(smallest != nullptr ? &s : nullptr),
(largest != nullptr ? &l : nullptr));
}
bool disjoint_sorted_files_;
private:
std::vector<FileMetaData*> files_;
};
TEST(FindFileTest, Empty) {

+ 7
- 6
helpers/memenv/memenv.cc 查看文件

@ -27,6 +27,10 @@ class FileState {
// and the caller must call Ref() at least once.
FileState() : refs_(0), size_(0) {}
// No copying allowed.
FileState(const FileState&) = delete;
FileState& operator=(const FileState&) = delete;
// Increase the reference count.
void Ref() {
MutexLock lock(&refs_mutex_);
@ -133,21 +137,17 @@ class FileState {
}
private:
enum { kBlockSize = 8 * 1024 };
// Private since only Unref() should be used to delete it.
~FileState() { Truncate(); }
// No copying allowed.
FileState(const FileState&);
void operator=(const FileState&);
port::Mutex refs_mutex_;
int refs_ GUARDED_BY(refs_mutex_);
mutable port::Mutex blocks_mutex_;
std::vector<char*> blocks_ GUARDED_BY(blocks_mutex_);
uint64_t size_ GUARDED_BY(blocks_mutex_);
enum { kBlockSize = 8 * 1024 };
};
class SequentialFileImpl : public SequentialFile {
@ -380,6 +380,7 @@ class InMemoryEnv : public EnvWrapper {
private:
// Map from filenames to FileState objects, representing a simple file system.
typedef std::map<std::string, FileState*> FileSystem;
port::Mutex mutex_;
FileSystem file_map_ GUARDED_BY(mutex_);
};

+ 2
- 2
helpers/memenv/memenv_test.cc 查看文件

@ -16,10 +16,10 @@ namespace leveldb {
class MemEnvTest {
public:
Env* env_;
MemEnvTest() : env_(NewMemEnv(Env::Default())) {}
~MemEnvTest() { delete env_; }
Env* env_;
};
TEST(MemEnvTest, Basics) {

+ 3
- 3
include/leveldb/db.h 查看文件

@ -33,11 +33,11 @@ class LEVELDB_EXPORT Snapshot {
// A range of keys
struct LEVELDB_EXPORT Range {
Slice start; // Included in the range
Slice limit; // Not included in the range
Range() {}
Range(const Slice& s, const Slice& l) : start(s), limit(l) {}
Slice start; // Included in the range
Slice limit; // Not included in the range
};
// A DB is a persistent ordered map from keys to values.

+ 6
- 6
include/leveldb/iterator.h 查看文件

@ -84,12 +84,6 @@ class LEVELDB_EXPORT Iterator {
// Cleanup functions are stored in a single-linked list.
// The list's head node is inlined in the iterator.
struct CleanupNode {
// The head node is used if the function pointer is not null.
CleanupFunction function;
void* arg1;
void* arg2;
CleanupNode* next;
// True if the node is not used. Only head nodes might be unused.
bool IsEmpty() const { return function == nullptr; }
// Invokes the cleanup function.
@ -97,6 +91,12 @@ class LEVELDB_EXPORT Iterator {
assert(function != nullptr);
(*function)(arg1, arg2);
}
// The head node is used if the function pointer is not null.
CleanupFunction function;
void* arg1;
void* arg2;
CleanupNode* next;
};
CleanupNode cleanup_head_;
};

+ 7
- 7
include/leveldb/options.h 查看文件

@ -31,6 +31,9 @@ enum CompressionType {
// Options to control the behavior of a database (passed to DB::Open)
struct LEVELDB_EXPORT Options {
// Create an Options object with default values for all fields.
Options();
// -------------------
// Parameters that affect behavior
@ -137,13 +140,12 @@ struct LEVELDB_EXPORT Options {
// Many applications will benefit from passing the result of
// NewBloomFilterPolicy() here.
const FilterPolicy* filter_policy = nullptr;
// Create an Options object with default values for all fields.
Options();
};
// Options that control read operations
struct LEVELDB_EXPORT ReadOptions {
ReadOptions() = default;
// If true, all data read from underlying storage will be
// verified against corresponding checksums.
bool verify_checksums = false;
@ -157,12 +159,12 @@ struct LEVELDB_EXPORT ReadOptions {
// not have been released). If "snapshot" is null, use an implicit
// snapshot of the state at the beginning of this read operation.
const Snapshot* snapshot = nullptr;
ReadOptions() = default;
};
// Options that control write operations
struct LEVELDB_EXPORT WriteOptions {
WriteOptions() = default;
// If true, the write will be flushed from the operating system
// buffer cache (by calling WritableFile::Sync()) before the write
// is considered complete. If this flag is true, writes will be
@ -178,8 +180,6 @@ struct LEVELDB_EXPORT WriteOptions {
// with sync==true has similar crash semantics to a "write()"
// system call followed by "fsync()".
bool sync = false;
WriteOptions() = default;
};
} // namespace leveldb

+ 7
- 7
include/leveldb/status.h 查看文件

@ -76,13 +76,6 @@ class LEVELDB_EXPORT Status {
std::string ToString() const;
private:
// OK status has a null state_. Otherwise, state_ is a new[] array
// of the following form:
// state_[0..3] == length of message
// state_[4] == code
// state_[5..] == message
const char* state_;
enum Code {
kOk = 0,
kNotFound = 1,
@ -98,6 +91,13 @@ class LEVELDB_EXPORT Status {
Status(Code code, const Slice& msg, const Slice& msg2);
static const char* CopyState(const char* s);
// OK status has a null state_. Otherwise, state_ is a new[] array
// of the following form:
// state_[0..3] == length of message
// state_[4] == code
// state_[5..] == message
const char* state_;
};
inline Status::Status(const Status& rhs) {

+ 6
- 4
include/leveldb/table.h 查看文件

@ -41,7 +41,7 @@ class LEVELDB_EXPORT Table {
uint64_t file_size, Table** table);
Table(const Table&) = delete;
void operator=(const Table&) = delete;
Table& operator=(const Table&) = delete;
~Table();
@ -59,22 +59,24 @@ class LEVELDB_EXPORT Table {
uint64_t ApproximateOffsetOf(const Slice& key) const;
private:
friend class TableCache;
struct Rep;
Rep* rep_;
explicit Table(Rep* rep) { rep_ = rep; }
static Iterator* BlockReader(void*, const ReadOptions&, const Slice&);
explicit Table(Rep* rep) : rep_(rep) {}
// Calls (*handle_result)(arg, ...) with the entry found after a call
// to Seek(key). May not make such a call if filter policy says
// that key is not present.
friend class TableCache;
Status InternalGet(const ReadOptions&, const Slice& key, void* arg,
void (*handle_result)(void* arg, const Slice& k,
const Slice& v));
void ReadMeta(const Footer& footer);
void ReadFilter(const Slice& filter_handle_value);
Rep* const rep_;
};
} // namespace leveldb

+ 1
- 1
include/leveldb/table_builder.h 查看文件

@ -33,7 +33,7 @@ class LEVELDB_EXPORT TableBuilder {
TableBuilder(const Options& options, WritableFile* file);
TableBuilder(const TableBuilder&) = delete;
void operator=(const TableBuilder&) = delete;
TableBuilder& operator=(const TableBuilder&) = delete;
// REQUIRES: Either Finish() or Abandon() has been called.
~TableBuilder();

+ 7
- 6
include/leveldb/write_batch.h 查看文件

@ -32,6 +32,13 @@ class Slice;
class LEVELDB_EXPORT WriteBatch {
public:
class LEVELDB_EXPORT Handler {
public:
virtual ~Handler();
virtual void Put(const Slice& key, const Slice& value) = 0;
virtual void Delete(const Slice& key) = 0;
};
WriteBatch();
// Intentionally copyable.
@ -63,12 +70,6 @@ class LEVELDB_EXPORT WriteBatch {
void Append(const WriteBatch& source);
// Support for iterating over the contents of a batch.
class LEVELDB_EXPORT Handler {
public:
virtual ~Handler();
virtual void Put(const Slice& key, const Slice& value) = 0;
virtual void Delete(const Slice& key) = 0;
};
Status Iterate(Handler* handler) const;
private:

+ 5
- 6
table/block.h 查看文件

@ -20,24 +20,23 @@ class Block {
// Initialize the block with the specified contents.
explicit Block(const BlockContents& contents);
Block(const Block&) = delete;
Block& operator=(const Block&) = delete;
~Block();
size_t size() const { return size_; }
Iterator* NewIterator(const Comparator* comparator);
private:
class Iter;
uint32_t NumRestarts() const;
const char* data_;
size_t size_;
uint32_t restart_offset_; // Offset in data_ of restart array
bool owned_; // Block owns data_[]
// No copying allowed
Block(const Block&);
void operator=(const Block&);
class Iter;
};
} // namespace leveldb

+ 3
- 4
table/block_builder.h 查看文件

@ -19,6 +19,9 @@ class BlockBuilder {
public:
explicit BlockBuilder(const Options* options);
BlockBuilder(const BlockBuilder&) = delete;
BlockBuilder& operator=(const BlockBuilder&) = delete;
// Reset the contents as if the BlockBuilder was just constructed.
void Reset();
@ -45,10 +48,6 @@ class BlockBuilder {
int counter_; // Number of entries emitted since restart
bool finished_; // Has Finish() been called?
std::string last_key_;
// No copying allowed
BlockBuilder(const BlockBuilder&);
void operator=(const BlockBuilder&);
};
} // namespace leveldb

+ 3
- 4
table/filter_block.h 查看文件

@ -32,6 +32,9 @@ class FilterBlockBuilder {
public:
explicit FilterBlockBuilder(const FilterPolicy*);
FilterBlockBuilder(const FilterBlockBuilder&) = delete;
FilterBlockBuilder& operator=(const FilterBlockBuilder&) = delete;
void StartBlock(uint64_t block_offset);
void AddKey(const Slice& key);
Slice Finish();
@ -45,10 +48,6 @@ class FilterBlockBuilder {
std::string result_; // Filter data computed so far
std::vector<Slice> tmp_keys_; // policy_->CreateFilter() argument
std::vector<uint32_t> filter_offsets_;
// No copying allowed
FilterBlockBuilder(const FilterBlockBuilder&);
void operator=(const FilterBlockBuilder&);
};
class FilterBlockReader {

+ 8
- 8
table/format.h 查看文件

@ -23,6 +23,9 @@ struct ReadOptions;
// block or a meta block.
class BlockHandle {
public:
// Maximum encoding length of a BlockHandle
enum { kMaxEncodedLength = 10 + 10 };
BlockHandle();
// The offset of the block in the file.
@ -36,9 +39,6 @@ class BlockHandle {
void EncodeTo(std::string* dst) const;
Status DecodeFrom(Slice* input);
// Maximum encoding length of a BlockHandle
enum { kMaxEncodedLength = 10 + 10 };
private:
uint64_t offset_;
uint64_t size_;
@ -48,6 +48,11 @@ class BlockHandle {
// end of every table file.
class Footer {
public:
// Encoded length of a Footer. Note that the serialization of a
// Footer will always occupy exactly this many bytes. It consists
// of two block handles and a magic number.
enum { kEncodedLength = 2 * BlockHandle::kMaxEncodedLength + 8 };
Footer() {}
// The block handle for the metaindex block of the table
@ -61,11 +66,6 @@ class Footer {
void EncodeTo(std::string* dst) const;
Status DecodeFrom(Slice* input);
// Encoded length of a Footer. Note that the serialization of a
// Footer will always occupy exactly this many bytes. It consists
// of two block handles and a magic number.
enum { kEncodedLength = 2 * BlockHandle::kMaxEncodedLength + 8 };
private:
BlockHandle metaindex_handle_;
BlockHandle index_handle_;

+ 3
- 3
table/merger.cc 查看文件

@ -129,6 +129,9 @@ class MergingIterator : public Iterator {
}
private:
// Which direction is the iterator moving?
enum Direction { kForward, kReverse };
void FindSmallest();
void FindLargest();
@ -139,9 +142,6 @@ class MergingIterator : public Iterator {
IteratorWrapper* children_;
int n_;
IteratorWrapper* current_;
// Which direction is the iterator moving?
enum Direction { kForward, kReverse };
Direction direction_;
};

+ 16
- 16
table/table_builder.cc 查看文件

@ -19,6 +19,22 @@
namespace leveldb {
struct TableBuilder::Rep {
Rep(const Options& opt, WritableFile* f)
: options(opt),
index_block_options(opt),
file(f),
offset(0),
data_block(&options),
index_block(&index_block_options),
num_entries(0),
closed(false),
filter_block(opt.filter_policy == nullptr
? nullptr
: new FilterBlockBuilder(opt.filter_policy)),
pending_index_entry(false) {
index_block_options.block_restart_interval = 1;
}
Options options;
Options index_block_options;
WritableFile* file;
@ -44,22 +60,6 @@ struct TableBuilder::Rep {
BlockHandle pending_handle; // Handle to add to index block
std::string compressed_output;
Rep(const Options& opt, WritableFile* f)
: options(opt),
index_block_options(opt),
file(f),
offset(0),
data_block(&options),
index_block(&index_block_options),
num_entries(0),
closed(false),
filter_block(opt.filter_policy == nullptr
? nullptr
: new FilterBlockBuilder(opt.filter_policy)),
pending_index_entry(false) {
index_block_options.block_restart_interval = 1;
}
};
TableBuilder::TableBuilder(const Options& options, WritableFile* file)

+ 2
- 4
util/arena.cc 查看文件

@ -8,10 +8,8 @@ namespace leveldb {
static const int kBlockSize = 4096;
Arena::Arena() : memory_usage_(0) {
alloc_ptr_ = nullptr; // First allocation will allocate a block
alloc_bytes_remaining_ = 0;
}
Arena::Arena()
: alloc_ptr_(nullptr), alloc_bytes_remaining_(0), memory_usage_(0) {}
Arena::~Arena() {
for (size_t i = 0; i < blocks_.size(); i++) {

+ 4
- 4
util/arena.h 查看文件

@ -16,6 +16,10 @@ namespace leveldb {
class Arena {
public:
Arena();
Arena(const Arena&) = delete;
Arena& operator=(const Arena&) = delete;
~Arena();
// Return a pointer to a newly allocated memory block of "bytes" bytes.
@ -46,10 +50,6 @@ class Arena {
// TODO(costan): This member is accessed via atomics, but the others are
// accessed without any locking. Is this OK?
std::atomic<size_t> memory_usage_;
// No copying allowed
Arena(const Arena&);
void operator=(const Arena&);
};
inline char* Arena::Allocate(size_t bytes) {

+ 4
- 4
util/bloom.cc 查看文件

@ -15,10 +15,6 @@ static uint32_t BloomHash(const Slice& key) {
}
class BloomFilterPolicy : public FilterPolicy {
private:
size_t bits_per_key_;
size_t k_;
public:
explicit BloomFilterPolicy(int bits_per_key) : bits_per_key_(bits_per_key) {
// We intentionally round down to reduce probing cost a little bit
@ -82,6 +78,10 @@ class BloomFilterPolicy : public FilterPolicy {
}
return true;
}
private:
size_t bits_per_key_;
size_t k_;
};
} // namespace

+ 5
- 5
util/bloom_test.cc 查看文件

@ -19,11 +19,6 @@ static Slice Key(int i, char* buffer) {
}
class BloomTest {
private:
const FilterPolicy* policy_;
std::string filter_;
std::vector<std::string> keys_;
public:
BloomTest() : policy_(NewBloomFilterPolicy(10)) {}
@ -78,6 +73,11 @@ class BloomTest {
}
return result / 10000.0;
}
private:
const FilterPolicy* policy_;
std::string filter_;
std::vector<std::string> keys_;
};
TEST(BloomTest, EmptyFilter) {

+ 2
- 2
util/cache_test.cc 查看文件

@ -25,8 +25,6 @@ static int DecodeValue(void* v) { return reinterpret_cast(v); }
class CacheTest {
public:
static CacheTest* current_;
static void Deleter(const Slice& key, void* v) {
current_->deleted_keys_.push_back(DecodeKey(key));
current_->deleted_values_.push_back(DecodeValue(v));
@ -61,6 +59,8 @@ class CacheTest {
}
void Erase(int key) { cache_->Erase(EncodeKey(key)); }
static CacheTest* current_;
};
CacheTest* CacheTest::current_;

+ 4
- 3
util/env_posix_test.cc 查看文件

@ -14,13 +14,14 @@ static const int kMMapLimit = 4;
class EnvPosixTest {
public:
Env* env_;
EnvPosixTest() : env_(Env::Default()) {}
static void SetFileLimits(int read_only_file_limit, int mmap_limit) {
EnvPosixTestHelper::SetReadOnlyFDLimit(read_only_file_limit);
EnvPosixTestHelper::SetReadOnlyMMapLimit(mmap_limit);
}
EnvPosixTest() : env_(Env::Default()) {}
Env* env_;
};
TEST(EnvPosixTest, TestOpenOnRead) {

+ 2
- 1
util/env_test.cc 查看文件

@ -19,8 +19,9 @@ static const int kDelayMicros = 100000;
class EnvTest {
public:
Env* env_;
EnvTest() : env_(Env::Default()) {}
Env* env_;
};
namespace {

+ 7
- 9
util/env_windows.cc 查看文件

@ -626,21 +626,19 @@ class WindowsEnv : public Env {
}
private:
// BGThread() is the body of the background thread
void BGThread();
std::mutex mu_;
std::condition_variable bgsignal_;
bool started_bgthread_;
// Entry per Schedule() call
struct BGItem {
void* arg;
void (*function)(void*);
};
typedef std::deque<BGItem> BGQueue;
BGQueue queue_;
// BGThread() is the body of the background thread
void BGThread();
std::mutex mu_;
std::condition_variable bgsignal_;
bool started_bgthread_;
std::deque<BGItem> queue_;
Limiter mmap_limiter_;
};

+ 4
- 3
util/env_windows_test.cc 查看文件

@ -14,12 +14,13 @@ static const int kMMapLimit = 4;
class EnvWindowsTest {
public:
Env* env_;
EnvWindowsTest() : env_(Env::Default()) {}
static void SetFileLimits(int mmap_limit) {
EnvWindowsTestHelper::SetReadOnlyMMapLimit(mmap_limit);
}
EnvWindowsTest() : env_(Env::Default()) {}
Env* env_;
};
TEST(EnvWindowsTest, TestOpenOnRead) {

+ 9
- 7
util/histogram.h 查看文件

@ -21,20 +21,22 @@ class Histogram {
std::string ToString() const;
private:
enum { kNumBuckets = 154 };
double Median() const;
double Percentile(double p) const;
double Average() const;
double StandardDeviation() const;
static const double kBucketLimit[kNumBuckets];
double min_;
double max_;
double num_;
double sum_;
double sum_squares_;
enum { kNumBuckets = 154 };
static const double kBucketLimit[kNumBuckets];
double buckets_[kNumBuckets];
double Median() const;
double Percentile(double p) const;
double Average() const;
double StandardDeviation() const;
};
} // namespace leveldb

Loading…
取消
儲存