From 09217fd0677a4fd9713c7a4d774c494a7d3c1f15 Mon Sep 17 00:00:00 2001 From: costan Date: Tue, 10 Apr 2018 16:18:06 -0700 Subject: [PATCH] Replace NULL with nullptr in C++ files. ------------- Created by MOE: https://github.com/google/moe MOE_MIGRATED_REVID=192365747 --- db/builder.cc | 2 +- db/c.cc | 26 +++--- db/corruption_test.cc | 18 ++--- db/db_bench.cc | 42 +++++----- db/db_impl.cc | 184 +++++++++++++++++++++--------------------- db/db_impl.h | 8 +- db/db_test.cc | 164 ++++++++++++++++++------------------- db/dumpfile.cc | 4 +- db/fault_injection_test.cc | 10 +-- db/log_reader.cc | 2 +- db/log_reader.h | 2 +- db/recovery_test.cc | 14 ++-- db/repair.cc | 12 +-- db/skiplist.h | 36 ++++----- db/skiplist_test.cc | 4 +- db/table_cache.cc | 18 ++--- db/table_cache.h | 10 +-- db/version_edit.cc | 8 +- db/version_set.cc | 77 +++++++++--------- db/version_set.h | 20 ++--- db/version_set_test.cc | 52 ++++++------ doc/bench/db_bench_sqlite3.cc | 59 +++++++------- doc/bench/db_bench_tree_db.cc | 24 +++--- helpers/memenv/memenv.cc | 6 +- include/leveldb/cache.h | 2 +- include/leveldb/db.h | 8 +- include/leveldb/env.h | 12 +-- include/leveldb/options.h | 24 +++--- include/leveldb/status.h | 12 +-- include/leveldb/table.h | 2 +- port/port_posix.cc | 4 +- table/block.cc | 16 ++-- table/filter_block.cc | 4 +- table/iterator.cc | 12 +-- table/iterator_wrapper.h | 8 +- table/merger.cc | 12 +-- table/table.cc | 24 +++--- table/table_builder.cc | 12 +-- table/table_test.cc | 22 ++--- table/two_level_iterator.cc | 32 ++++---- util/arena.cc | 2 +- util/cache.cc | 24 +++--- util/cache_test.cc | 4 +- util/coding.cc | 12 +-- util/coding.h | 2 +- util/coding_test.cc | 19 ++--- util/env.cc | 2 +- util/env_posix.cc | 40 ++++----- util/env_posix_test.cc | 2 +- util/env_test.cc | 6 +- util/options.cc | 6 +- util/posix_logger.h | 2 +- util/status.cc | 2 +- util/testharness.cc | 10 +-- util/testutil.h | 4 +- 55 files changed, 575 insertions(+), 570 deletions(-) diff --git a/db/builder.cc b/db/builder.cc index 729f9fd..5fa405d 100644 --- a/db/builder.cc +++ b/db/builder.cc @@ -56,7 +56,7 @@ Status BuildTable(const std::string& dbname, s = file->Close(); } delete file; - file = NULL; + file = nullptr; if (s.ok()) { // Verify that the table is usable diff --git a/db/c.cc b/db/c.cc index 0ccf08c..77b33d5 100644 --- a/db/c.cc +++ b/db/c.cc @@ -129,10 +129,10 @@ struct leveldb_env_t { }; static bool SaveError(char** errptr, const Status& s) { - assert(errptr != NULL); + assert(errptr != nullptr); if (s.ok()) { return false; - } else if (*errptr == NULL) { + } else if (*errptr == nullptr) { *errptr = strdup(s.ToString().c_str()); } else { // TODO(sanjay): Merge with existing error? @@ -154,7 +154,7 @@ leveldb_t* leveldb_open( char** errptr) { DB* db; if (SaveError(errptr, DB::Open(options->rep, std::string(name), &db))) { - return NULL; + return nullptr; } leveldb_t* result = new leveldb_t; result->rep = db; @@ -199,7 +199,7 @@ char* leveldb_get( const char* key, size_t keylen, size_t* vallen, char** errptr) { - char* result = NULL; + char* result = nullptr; std::string tmp; Status s = db->rep->Get(options->rep, Slice(key, keylen), &tmp); if (s.ok()) { @@ -244,7 +244,7 @@ char* leveldb_property_value( // We use strdup() since we expect human readable output. return strdup(tmp.c_str()); } else { - return NULL; + return nullptr; } } @@ -269,9 +269,9 @@ void leveldb_compact_range( const char* limit_key, size_t limit_key_len) { Slice a, b; db->rep->CompactRange( - // Pass NULL Slice if corresponding "const char*" is NULL - (start_key ? (a = Slice(start_key, start_key_len), &a) : NULL), - (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : NULL)); + // Pass null Slice if corresponding "const char*" is null + (start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr), + (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr)); } void leveldb_destroy_db( @@ -418,11 +418,11 @@ void leveldb_options_set_paranoid_checks( } void leveldb_options_set_env(leveldb_options_t* opt, leveldb_env_t* env) { - opt->rep.env = (env ? env->rep : NULL); + opt->rep.env = (env ? env->rep : nullptr); } void leveldb_options_set_info_log(leveldb_options_t* opt, leveldb_logger_t* l) { - opt->rep.info_log = (l ? l->rep : NULL); + opt->rep.info_log = (l ? l->rep : nullptr); } void leveldb_options_set_write_buffer_size(leveldb_options_t* opt, size_t s) { @@ -517,7 +517,7 @@ leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(int bits_per_key) { }; Wrapper* wrapper = new Wrapper; wrapper->rep_ = NewBloomFilterPolicy(bits_per_key); - wrapper->state_ = NULL; + wrapper->state_ = nullptr; wrapper->destructor_ = &Wrapper::DoNothing; return wrapper; } @@ -544,7 +544,7 @@ void leveldb_readoptions_set_fill_cache( void leveldb_readoptions_set_snapshot( leveldb_readoptions_t* opt, const leveldb_snapshot_t* snap) { - opt->rep.snapshot = (snap ? snap->rep : NULL); + opt->rep.snapshot = (snap ? snap->rep : nullptr); } leveldb_writeoptions_t* leveldb_writeoptions_create() { @@ -586,7 +586,7 @@ void leveldb_env_destroy(leveldb_env_t* env) { char* leveldb_env_get_test_directory(leveldb_env_t* env) { std::string result; if (!env->rep->GetTestDirectory(&result).ok()) { - return NULL; + return nullptr; } char* buffer = static_cast(malloc(result.size() + 1)); diff --git a/db/corruption_test.cc b/db/corruption_test.cc index 37a484d..0b93c24 100644 --- a/db/corruption_test.cc +++ b/db/corruption_test.cc @@ -39,7 +39,7 @@ class CorruptionTest { dbname_ = test::TmpDir() + "/corruption_test"; DestroyDB(dbname_, options_); - db_ = NULL; + db_ = nullptr; options_.create_if_missing = true; Reopen(); options_.create_if_missing = false; @@ -53,7 +53,7 @@ class CorruptionTest { Status TryReopen() { delete db_; - db_ = NULL; + db_ = nullptr; return DB::Open(options_, dbname_, &db_); } @@ -63,7 +63,7 @@ class CorruptionTest { void RepairDB() { delete db_; - db_ = NULL; + db_ = nullptr; ASSERT_OK(::leveldb::RepairDB(dbname_, options_)); } @@ -237,8 +237,8 @@ TEST(CorruptionTest, TableFile) { Build(100); DBImpl* dbi = reinterpret_cast(db_); dbi->TEST_CompactMemTable(); - dbi->TEST_CompactRange(0, NULL, NULL); - dbi->TEST_CompactRange(1, NULL, NULL); + dbi->TEST_CompactRange(0, nullptr, nullptr); + dbi->TEST_CompactRange(1, nullptr, nullptr); Corrupt(kTableFile, 100, 1); Check(90, 99); @@ -251,8 +251,8 @@ TEST(CorruptionTest, TableFileRepair) { Build(100); DBImpl* dbi = reinterpret_cast(db_); dbi->TEST_CompactMemTable(); - dbi->TEST_CompactRange(0, NULL, NULL); - dbi->TEST_CompactRange(1, NULL, NULL); + dbi->TEST_CompactRange(0, nullptr, nullptr); + dbi->TEST_CompactRange(1, nullptr, nullptr); Corrupt(kTableFile, 100, 1); RepairDB(); @@ -302,7 +302,7 @@ TEST(CorruptionTest, CorruptedDescriptor) { ASSERT_OK(db_->Put(WriteOptions(), "foo", "hello")); DBImpl* dbi = reinterpret_cast(db_); dbi->TEST_CompactMemTable(); - dbi->TEST_CompactRange(0, NULL, NULL); + dbi->TEST_CompactRange(0, nullptr, nullptr); Corrupt(kDescriptorFile, 0, 1000); Status s = TryReopen(); @@ -343,7 +343,7 @@ TEST(CorruptionTest, CompactionInputErrorParanoid) { Corrupt(kTableFile, 100, 1); env_.SleepForMicroseconds(100000); } - dbi->CompactRange(NULL, NULL); + dbi->CompactRange(nullptr, nullptr); // Write must fail because of corrupted table std::string tmp1, tmp2; diff --git a/db/db_bench.cc b/db/db_bench.cc index 701b128..115cf45 100644 --- a/db/db_bench.cc +++ b/db/db_bench.cc @@ -111,12 +111,12 @@ static bool FLAGS_use_existing_db = false; static bool FLAGS_reuse_logs = false; // Use the db with the following name. -static const char* FLAGS_db = NULL; +static const char* FLAGS_db = nullptr; namespace leveldb { namespace { -leveldb::Env* g_env = NULL; +leveldb::Env* g_env = nullptr; // Helper for quickly generating random data. class RandomGenerator { @@ -370,18 +370,18 @@ class Benchmark { kMajorVersion, kMinorVersion); #if defined(__linux) - time_t now = time(NULL); + time_t now = time(nullptr); fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline FILE* cpuinfo = fopen("/proc/cpuinfo", "r"); - if (cpuinfo != NULL) { + if (cpuinfo != nullptr) { char line[1000]; int num_cpus = 0; std::string cpu_type; std::string cache_size; - while (fgets(line, sizeof(line), cpuinfo) != NULL) { + while (fgets(line, sizeof(line), cpuinfo) != nullptr) { const char* sep = strchr(line, ':'); - if (sep == NULL) { + if (sep == nullptr) { continue; } Slice key = TrimSpace(Slice(line, sep - 1 - line)); @@ -402,11 +402,11 @@ class Benchmark { public: Benchmark() - : cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : NULL), + : cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : nullptr), filter_policy_(FLAGS_bloom_bits >= 0 ? NewBloomFilterPolicy(FLAGS_bloom_bits) - : NULL), - db_(NULL), + : nullptr), + db_(nullptr), num_(FLAGS_num), value_size_(FLAGS_value_size), entries_per_batch_(1), @@ -435,12 +435,12 @@ class Benchmark { Open(); const char* benchmarks = FLAGS_benchmarks; - while (benchmarks != NULL) { + while (benchmarks != nullptr) { const char* sep = strchr(benchmarks, ','); Slice name; - if (sep == NULL) { + if (sep == nullptr) { name = benchmarks; - benchmarks = NULL; + benchmarks = nullptr; } else { name = Slice(benchmarks, sep - benchmarks); benchmarks = sep + 1; @@ -453,7 +453,7 @@ class Benchmark { entries_per_batch_ = 1; write_options_ = WriteOptions(); - void (Benchmark::*method)(ThreadState*) = NULL; + void (Benchmark::*method)(ThreadState*) = nullptr; bool fresh_db = false; int num_threads = FLAGS_threads; @@ -532,16 +532,16 @@ class Benchmark { if (FLAGS_use_existing_db) { fprintf(stdout, "%-12s : skipped (--use_existing_db is true)\n", name.ToString().c_str()); - method = NULL; + method = nullptr; } else { delete db_; - db_ = NULL; + db_ = nullptr; DestroyDB(FLAGS_db, Options()); Open(); } } - if (method != NULL) { + if (method != nullptr) { RunBenchmark(num_threads, name, method); } } @@ -643,7 +643,7 @@ class Benchmark { int dummy; port::AtomicPointer ap(&dummy); int count = 0; - void *ptr = NULL; + void *ptr = nullptr; thread->stats.AddMessage("(each op is 1000 loads)"); while (count < 100000) { for (int i = 0; i < 1000; i++) { @@ -652,7 +652,7 @@ class Benchmark { count++; thread->stats.FinishedSingleOp(); } - if (ptr == NULL) exit(1); // Disable unused variable warning. + if (ptr == nullptr) exit(1); // Disable unused variable warning. } void SnappyCompress(ThreadState* thread) { @@ -703,7 +703,7 @@ class Benchmark { } void Open() { - assert(db_ == NULL); + assert(db_ == nullptr); Options options; options.env = g_env; options.create_if_missing = !FLAGS_use_existing_db; @@ -914,7 +914,7 @@ class Benchmark { } void Compact(ThreadState* thread) { - db_->CompactRange(NULL, NULL); + db_->CompactRange(nullptr, nullptr); } void PrintStats(const char* key) { @@ -1004,7 +1004,7 @@ int main(int argc, char** argv) { leveldb::g_env = leveldb::Env::Default(); // Choose a location for the test database if none given with --db= - if (FLAGS_db == NULL) { + if (FLAGS_db == nullptr) { leveldb::g_env->GetTestDirectory(&default_db_path); default_db_path += "/dbbench"; FLAGS_db = default_db_path.c_str(); diff --git a/db/db_impl.cc b/db/db_impl.cc index cd8792e..02a6872 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -77,8 +77,8 @@ struct DBImpl::CompactionState { explicit CompactionState(Compaction* c) : compaction(c), - outfile(NULL), - builder(NULL), + outfile(nullptr), + builder(nullptr), total_bytes(0) { } }; @@ -95,22 +95,22 @@ Options SanitizeOptions(const std::string& dbname, const Options& src) { Options result = src; result.comparator = icmp; - result.filter_policy = (src.filter_policy != NULL) ? ipolicy : NULL; + result.filter_policy = (src.filter_policy != nullptr) ? ipolicy : nullptr; ClipToRange(&result.max_open_files, 64 + kNumNonTableCacheFiles, 50000); ClipToRange(&result.write_buffer_size, 64<<10, 1<<30); ClipToRange(&result.max_file_size, 1<<20, 1<<30); ClipToRange(&result.block_size, 1<<10, 4<<20); - if (result.info_log == NULL) { + if (result.info_log == nullptr) { // Open a log file in the same directory as the db src.env->CreateDir(dbname); // In case it does not exist src.env->RenameFile(InfoLogFileName(dbname), OldInfoLogFileName(dbname)); Status s = src.env->NewLogger(InfoLogFileName(dbname), &result.info_log); if (!s.ok()) { // No place suitable for logging - result.info_log = NULL; + result.info_log = nullptr; } } - if (result.block_cache == NULL) { + if (result.block_cache == nullptr) { result.block_cache = NewLRUCache(8 << 20); } return result; @@ -131,39 +131,39 @@ DBImpl::DBImpl(const Options& raw_options, const std::string& dbname) owns_cache_(options_.block_cache != raw_options.block_cache), dbname_(dbname), table_cache_(new TableCache(dbname_, options_, TableCacheSize(options_))), - db_lock_(NULL), - shutting_down_(NULL), + db_lock_(nullptr), + shutting_down_(nullptr), background_work_finished_signal_(&mutex_), - mem_(NULL), - imm_(NULL), - logfile_(NULL), + mem_(nullptr), + imm_(nullptr), + logfile_(nullptr), logfile_number_(0), - log_(NULL), + log_(nullptr), seed_(0), tmp_batch_(new WriteBatch), background_compaction_scheduled_(false), - manual_compaction_(NULL), + manual_compaction_(nullptr), versions_(new VersionSet(dbname_, &options_, table_cache_, &internal_comparator_)) { - has_imm_.Release_Store(NULL); + has_imm_.Release_Store(nullptr); } DBImpl::~DBImpl() { // Wait for background work to finish mutex_.Lock(); - shutting_down_.Release_Store(this); // Any non-NULL value is ok + shutting_down_.Release_Store(this); // Any non-null value is ok while (background_compaction_scheduled_) { background_work_finished_signal_.Wait(); } mutex_.Unlock(); - if (db_lock_ != NULL) { + if (db_lock_ != nullptr) { env_->UnlockFile(db_lock_); } delete versions_; - if (mem_ != NULL) mem_->Unref(); - if (imm_ != NULL) imm_->Unref(); + if (mem_ != nullptr) mem_->Unref(); + if (imm_ != nullptr) imm_->Unref(); delete tmp_batch_; delete log_; delete logfile_; @@ -283,7 +283,7 @@ Status DBImpl::Recover(VersionEdit* edit, bool *save_manifest) { // committed only when the descriptor is created, and this directory // may already exist from a previous failed creation attempt. env_->CreateDir(dbname_); - assert(db_lock_ == NULL); + assert(db_lock_ == nullptr); Status s = env_->LockFile(LockFileName(dbname_), &db_lock_); if (!s.ok()) { return s; @@ -374,12 +374,12 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log, Env* env; Logger* info_log; const char* fname; - Status* status; // NULL if options_.paranoid_checks==false + Status* status; // null if options_.paranoid_checks==false virtual void Corruption(size_t bytes, const Status& s) { Log(info_log, "%s%s: dropping %d bytes; %s", - (this->status == NULL ? "(ignoring error) " : ""), + (this->status == nullptr ? "(ignoring error) " : ""), fname, static_cast(bytes), s.ToString().c_str()); - if (this->status != NULL && this->status->ok()) *this->status = s; + if (this->status != nullptr && this->status->ok()) *this->status = s; } }; @@ -399,7 +399,7 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log, reporter.env = env_; reporter.info_log = options_.info_log; reporter.fname = fname.c_str(); - reporter.status = (options_.paranoid_checks ? &status : NULL); + reporter.status = (options_.paranoid_checks ? &status : nullptr); // We intentionally make log::Reader do checksumming even if // paranoid_checks==false so that corruptions cause entire commits // to be skipped instead of propagating bad information (like overly @@ -414,7 +414,7 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log, Slice record; WriteBatch batch; int compactions = 0; - MemTable* mem = NULL; + MemTable* mem = nullptr; while (reader.ReadRecord(&record, &scratch) && status.ok()) { if (record.size() < 12) { @@ -424,7 +424,7 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log, } WriteBatchInternal::SetContents(&batch, record); - if (mem == NULL) { + if (mem == nullptr) { mem = new MemTable(internal_comparator_); mem->Ref(); } @@ -443,9 +443,9 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log, if (mem->ApproximateMemoryUsage() > options_.write_buffer_size) { compactions++; *save_manifest = true; - status = WriteLevel0Table(mem, edit, NULL); + status = WriteLevel0Table(mem, edit, nullptr); mem->Unref(); - mem = NULL; + mem = nullptr; if (!status.ok()) { // Reflect errors immediately so that conditions like full // file-systems cause the DB::Open() to fail. @@ -458,31 +458,31 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log, // See if we should keep reusing the last log file. if (status.ok() && options_.reuse_logs && last_log && compactions == 0) { - assert(logfile_ == NULL); - assert(log_ == NULL); - assert(mem_ == NULL); + assert(logfile_ == nullptr); + assert(log_ == nullptr); + assert(mem_ == nullptr); uint64_t lfile_size; if (env_->GetFileSize(fname, &lfile_size).ok() && env_->NewAppendableFile(fname, &logfile_).ok()) { Log(options_.info_log, "Reusing old log %s \n", fname.c_str()); log_ = new log::Writer(logfile_, lfile_size); logfile_number_ = log_number; - if (mem != NULL) { + if (mem != nullptr) { mem_ = mem; - mem = NULL; + mem = nullptr; } else { - // mem can be NULL if lognum exists but was empty. + // mem can be nullptr if lognum exists but was empty. mem_ = new MemTable(internal_comparator_); mem_->Ref(); } } } - if (mem != NULL) { + if (mem != nullptr) { // mem did not get reused; compact it. if (status.ok()) { *save_manifest = true; - status = WriteLevel0Table(mem, edit, NULL); + status = WriteLevel0Table(mem, edit, nullptr); } mem->Unref(); } @@ -522,7 +522,7 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit, if (s.ok() && meta.file_size > 0) { const Slice min_user_key = meta.smallest.user_key(); const Slice max_user_key = meta.largest.user_key(); - if (base != NULL) { + if (base != nullptr) { level = base->PickLevelForMemTableOutput(min_user_key, max_user_key); } edit->AddFile(level, meta.number, meta.file_size, @@ -538,7 +538,7 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit, void DBImpl::CompactMemTable() { mutex_.AssertHeld(); - assert(imm_ != NULL); + assert(imm_ != nullptr); // Save the contents of the memtable as a new Table VersionEdit edit; @@ -561,8 +561,8 @@ void DBImpl::CompactMemTable() { if (s.ok()) { // Commit to the new state imm_->Unref(); - imm_ = NULL; - has_imm_.Release_Store(NULL); + imm_ = nullptr; + has_imm_.Release_Store(nullptr); DeleteObsoleteFiles(); } else { RecordBackgroundError(s); @@ -596,14 +596,14 @@ void DBImpl::TEST_CompactRange(int level, const Slice* begin, ManualCompaction manual; manual.level = level; manual.done = false; - if (begin == NULL) { - manual.begin = NULL; + if (begin == nullptr) { + manual.begin = nullptr; } else { begin_storage = InternalKey(*begin, kMaxSequenceNumber, kValueTypeForSeek); manual.begin = &begin_storage; } - if (end == NULL) { - manual.end = NULL; + if (end == nullptr) { + manual.end = nullptr; } else { end_storage = InternalKey(*end, 0, static_cast(0)); manual.end = &end_storage; @@ -611,7 +611,7 @@ void DBImpl::TEST_CompactRange(int level, const Slice* begin, MutexLock l(&mutex_); while (!manual.done && !shutting_down_.Acquire_Load() && bg_error_.ok()) { - if (manual_compaction_ == NULL) { // Idle + if (manual_compaction_ == nullptr) { // Idle manual_compaction_ = &manual; MaybeScheduleCompaction(); } else { // Running either my compaction or another compaction. @@ -620,20 +620,20 @@ void DBImpl::TEST_CompactRange(int level, const Slice* begin, } if (manual_compaction_ == &manual) { // Cancel my manual compaction since we aborted early for some reason. - manual_compaction_ = NULL; + manual_compaction_ = nullptr; } } Status DBImpl::TEST_CompactMemTable() { - // NULL batch means just wait for earlier writes to be done - Status s = Write(WriteOptions(), NULL); + // nullptr batch means just wait for earlier writes to be done + Status s = Write(WriteOptions(), nullptr); if (s.ok()) { // Wait until the compaction completes MutexLock l(&mutex_); - while (imm_ != NULL && bg_error_.ok()) { + while (imm_ != nullptr && bg_error_.ok()) { background_work_finished_signal_.Wait(); } - if (imm_ != NULL) { + if (imm_ != nullptr) { s = bg_error_; } } @@ -656,8 +656,8 @@ void DBImpl::MaybeScheduleCompaction() { // DB is being deleted; no more background compactions } else if (!bg_error_.ok()) { // Already got an error; no more changes - } else if (imm_ == NULL && - manual_compaction_ == NULL && + } else if (imm_ == nullptr && + manual_compaction_ == nullptr && !versions_->NeedsCompaction()) { // No work to be done } else { @@ -692,19 +692,19 @@ void DBImpl::BackgroundCall() { void DBImpl::BackgroundCompaction() { mutex_.AssertHeld(); - if (imm_ != NULL) { + if (imm_ != nullptr) { CompactMemTable(); return; } Compaction* c; - bool is_manual = (manual_compaction_ != NULL); + bool is_manual = (manual_compaction_ != nullptr); InternalKey manual_end; if (is_manual) { ManualCompaction* m = manual_compaction_; c = versions_->CompactRange(m->level, m->begin, m->end); - m->done = (c == NULL); - if (c != NULL) { + m->done = (c == nullptr); + if (c != nullptr) { manual_end = c->input(0, c->num_input_files(0) - 1)->largest; } Log(options_.info_log, @@ -718,7 +718,7 @@ void DBImpl::BackgroundCompaction() { } Status status; - if (c == NULL) { + if (c == nullptr) { // Nothing to do } else if (!is_manual && c->IsTrivialMove()) { // Move file to next level @@ -770,18 +770,18 @@ void DBImpl::BackgroundCompaction() { m->tmp_storage = manual_end; m->begin = &m->tmp_storage; } - manual_compaction_ = NULL; + manual_compaction_ = nullptr; } } void DBImpl::CleanupCompaction(CompactionState* compact) { mutex_.AssertHeld(); - if (compact->builder != NULL) { + if (compact->builder != nullptr) { // May happen if we get a shutdown call in the middle of compaction compact->builder->Abandon(); delete compact->builder; } else { - assert(compact->outfile == NULL); + assert(compact->outfile == nullptr); } delete compact->outfile; for (size_t i = 0; i < compact->outputs.size(); i++) { @@ -792,8 +792,8 @@ void DBImpl::CleanupCompaction(CompactionState* compact) { } Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) { - assert(compact != NULL); - assert(compact->builder == NULL); + assert(compact != nullptr); + assert(compact->builder == nullptr); uint64_t file_number; { mutex_.Lock(); @@ -818,9 +818,9 @@ Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) { Status DBImpl::FinishCompactionOutputFile(CompactionState* compact, Iterator* input) { - assert(compact != NULL); - assert(compact->outfile != NULL); - assert(compact->builder != NULL); + assert(compact != nullptr); + assert(compact->outfile != nullptr); + assert(compact->builder != nullptr); const uint64_t output_number = compact->current_output()->number; assert(output_number != 0); @@ -837,7 +837,7 @@ Status DBImpl::FinishCompactionOutputFile(CompactionState* compact, compact->current_output()->file_size = current_bytes; compact->total_bytes += current_bytes; delete compact->builder; - compact->builder = NULL; + compact->builder = nullptr; // Finish and check for file errors if (s.ok()) { @@ -847,7 +847,7 @@ Status DBImpl::FinishCompactionOutputFile(CompactionState* compact, s = compact->outfile->Close(); } delete compact->outfile; - compact->outfile = NULL; + compact->outfile = nullptr; if (s.ok() && current_entries > 0) { // Verify that the table is usable @@ -901,8 +901,8 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) { compact->compaction->level() + 1); assert(versions_->NumLevelFiles(compact->compaction->level()) > 0); - assert(compact->builder == NULL); - assert(compact->outfile == NULL); + assert(compact->builder == nullptr); + assert(compact->outfile == nullptr); if (snapshots_.empty()) { compact->smallest_snapshot = versions_->LastSequence(); } else { @@ -921,10 +921,10 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) { SequenceNumber last_sequence_for_key = kMaxSequenceNumber; for (; input->Valid() && !shutting_down_.Acquire_Load(); ) { // Prioritize immutable compaction work - if (has_imm_.NoBarrier_Load() != NULL) { + if (has_imm_.NoBarrier_Load() != nullptr) { const uint64_t imm_start = env_->NowMicros(); mutex_.Lock(); - if (imm_ != NULL) { + if (imm_ != nullptr) { CompactMemTable(); // Wake up MakeRoomForWrite() if necessary. background_work_finished_signal_.SignalAll(); @@ -935,7 +935,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) { Slice key = input->key(); if (compact->compaction->ShouldStopBefore(key) && - compact->builder != NULL) { + compact->builder != nullptr) { status = FinishCompactionOutputFile(compact, input); if (!status.ok()) { break; @@ -989,7 +989,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) { if (!drop) { // Open output file if necessary - if (compact->builder == NULL) { + if (compact->builder == nullptr) { status = OpenCompactionOutputFile(compact); if (!status.ok()) { break; @@ -1017,14 +1017,14 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) { if (status.ok() && shutting_down_.Acquire_Load()) { status = Status::IOError("Deleting DB during compaction"); } - if (status.ok() && compact->builder != NULL) { + if (status.ok() && compact->builder != nullptr) { status = FinishCompactionOutputFile(compact, input); } if (status.ok()) { status = input->status(); } delete input; - input = NULL; + input = nullptr; CompactionStats stats; stats.micros = env_->NowMicros() - start_micros - imm_micros; @@ -1068,7 +1068,7 @@ static void CleanupIteratorState(void* arg1, void* arg2) { IterState* state = reinterpret_cast(arg1); state->mu->Lock(); state->mem->Unref(); - if (state->imm != NULL) state->imm->Unref(); + if (state->imm != nullptr) state->imm->Unref(); state->version->Unref(); state->mu->Unlock(); delete state; @@ -1086,7 +1086,7 @@ Iterator* DBImpl::NewInternalIterator(const ReadOptions& options, std::vector list; list.push_back(mem_->NewIterator()); mem_->Ref(); - if (imm_ != NULL) { + if (imm_ != nullptr) { list.push_back(imm_->NewIterator()); imm_->Ref(); } @@ -1096,7 +1096,7 @@ Iterator* DBImpl::NewInternalIterator(const ReadOptions& options, versions_->current()->Ref(); IterState* cleanup = new IterState(&mutex_, mem_, imm_, versions_->current()); - internal_iter->RegisterCleanup(CleanupIteratorState, cleanup, NULL); + internal_iter->RegisterCleanup(CleanupIteratorState, cleanup, nullptr); *seed = ++seed_; mutex_.Unlock(); @@ -1120,7 +1120,7 @@ Status DBImpl::Get(const ReadOptions& options, Status s; MutexLock l(&mutex_); SequenceNumber snapshot; - if (options.snapshot != NULL) { + if (options.snapshot != nullptr) { snapshot = reinterpret_cast(options.snapshot)->number_; } else { snapshot = versions_->LastSequence(); @@ -1130,7 +1130,7 @@ Status DBImpl::Get(const ReadOptions& options, MemTable* imm = imm_; Version* current = versions_->current(); mem->Ref(); - if (imm != NULL) imm->Ref(); + if (imm != nullptr) imm->Ref(); current->Ref(); bool have_stat_update = false; @@ -1143,7 +1143,7 @@ Status DBImpl::Get(const ReadOptions& options, LookupKey lkey(key, snapshot); if (mem->Get(lkey, value, &s)) { // Done - } else if (imm != NULL && imm->Get(lkey, value, &s)) { + } else if (imm != nullptr && imm->Get(lkey, value, &s)) { // Done } else { s = current->Get(options, lkey, value, &stats); @@ -1156,7 +1156,7 @@ Status DBImpl::Get(const ReadOptions& options, MaybeScheduleCompaction(); } mem->Unref(); - if (imm != NULL) imm->Unref(); + if (imm != nullptr) imm->Unref(); current->Unref(); return s; } @@ -1167,7 +1167,7 @@ Iterator* DBImpl::NewIterator(const ReadOptions& options) { Iterator* iter = NewInternalIterator(options, &latest_snapshot, &seed); return NewDBIterator( this, user_comparator(), iter, - (options.snapshot != NULL + (options.snapshot != nullptr ? reinterpret_cast(options.snapshot)->number_ : latest_snapshot), seed); @@ -1215,10 +1215,10 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) { } // May temporarily unlock and wait. - Status status = MakeRoomForWrite(my_batch == NULL); + Status status = MakeRoomForWrite(my_batch == nullptr); uint64_t last_sequence = versions_->LastSequence(); Writer* last_writer = &w; - if (status.ok() && my_batch != NULL) { // NULL batch is for compactions + if (status.ok() && my_batch != nullptr) { // nullptr batch is for compactions WriteBatch* updates = BuildBatchGroup(&last_writer); WriteBatchInternal::SetSequence(updates, last_sequence + 1); last_sequence += WriteBatchInternal::Count(updates); @@ -1273,13 +1273,13 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) { } // REQUIRES: Writer list must be non-empty -// REQUIRES: First writer must have a non-NULL batch +// REQUIRES: First writer must have a non-null batch WriteBatch* DBImpl::BuildBatchGroup(Writer** last_writer) { mutex_.AssertHeld(); assert(!writers_.empty()); Writer* first = writers_.front(); WriteBatch* result = first->batch; - assert(result != NULL); + assert(result != nullptr); size_t size = WriteBatchInternal::ByteSize(first->batch); @@ -1301,7 +1301,7 @@ WriteBatch* DBImpl::BuildBatchGroup(Writer** last_writer) { break; } - if (w->batch != NULL) { + if (w->batch != nullptr) { size += WriteBatchInternal::ByteSize(w->batch); if (size > max_size) { // Do not make batch too big @@ -1351,7 +1351,7 @@ Status DBImpl::MakeRoomForWrite(bool force) { (mem_->ApproximateMemoryUsage() <= options_.write_buffer_size)) { // There is room in current memtable break; - } else if (imm_ != NULL) { + } else if (imm_ != nullptr) { // We have filled up the current memtable, but the previous // one is still being compacted, so we wait. Log(options_.info_log, "Current memtable full; waiting...\n"); @@ -1364,7 +1364,7 @@ Status DBImpl::MakeRoomForWrite(bool force) { // Attempt to switch to a new memtable and trigger compaction of old assert(versions_->PrevLogNumber() == 0); uint64_t new_log_number = versions_->NewFileNumber(); - WritableFile* lfile = NULL; + WritableFile* lfile = nullptr; s = env_->NewWritableFile(LogFileName(dbname_, new_log_number), &lfile); if (!s.ok()) { // Avoid chewing through file number space in a tight loop. @@ -1498,7 +1498,7 @@ DB::~DB() { } Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) { - *dbptr = NULL; + *dbptr = nullptr; DBImpl* impl = new DBImpl(options, dbname); impl->mutex_.Lock(); @@ -1506,7 +1506,7 @@ Status DB::Open(const Options& options, const std::string& dbname, // Recover handles create_if_missing, error_if_exists bool save_manifest = false; Status s = impl->Recover(&edit, &save_manifest); - if (s.ok() && impl->mem_ == NULL) { + if (s.ok() && impl->mem_ == nullptr) { // Create new log and a corresponding memtable. uint64_t new_log_number = impl->versions_->NewFileNumber(); WritableFile* lfile; @@ -1532,7 +1532,7 @@ Status DB::Open(const Options& options, const std::string& dbname, } impl->mutex_.Unlock(); if (s.ok()) { - assert(impl->mem_ != NULL); + assert(impl->mem_ != nullptr); *dbptr = impl; } else { delete impl; diff --git a/db/db_impl.h b/db/db_impl.h index 6344112..00e800a 100644 --- a/db/db_impl.h +++ b/db/db_impl.h @@ -131,7 +131,7 @@ class DBImpl : public DB { // table_cache_ provides its own synchronization TableCache* const table_cache_; - // Lock over the persistent DB state. Non-NULL iff successfully acquired. + // Lock over the persistent DB state. Non-null iff successfully acquired. FileLock* db_lock_; // State below is protected by mutex_ @@ -140,7 +140,7 @@ class DBImpl : public DB { port::CondVar background_work_finished_signal_ GUARDED_BY(mutex_); MemTable* mem_; MemTable* imm_ GUARDED_BY(mutex_); // Memtable being compacted - port::AtomicPointer has_imm_; // So bg thread can detect non-NULL imm_ + port::AtomicPointer has_imm_; // So bg thread can detect non-null imm_ WritableFile* logfile_; uint64_t logfile_number_ GUARDED_BY(mutex_); log::Writer* log_; @@ -163,8 +163,8 @@ class DBImpl : public DB { struct ManualCompaction { int level; bool done; - const InternalKey* begin; // NULL means beginning of key range - const InternalKey* end; // NULL means end of key range + const InternalKey* begin; // null means beginning of key range + const InternalKey* end; // null means end of key range InternalKey tmp_storage; // Used to keep track of compaction progress }; ManualCompaction* manual_compaction_ GUARDED_BY(mutex_); diff --git a/db/db_test.cc b/db/db_test.cc index b1d2cd8..47e3287 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -96,35 +96,35 @@ class TestEnv : public EnvWrapper { // Special Env used to delay background operations class SpecialEnv : public EnvWrapper { public: - // sstable/log Sync() calls are blocked while this pointer is non-NULL. + // sstable/log Sync() calls are blocked while this pointer is non-null. port::AtomicPointer delay_data_sync_; // sstable/log Sync() calls return an error. port::AtomicPointer data_sync_error_; - // Simulate no-space errors while this pointer is non-NULL. + // Simulate no-space errors while this pointer is non-null. port::AtomicPointer no_space_; - // Simulate non-writable file system while this pointer is non-NULL + // Simulate non-writable file system while this pointer is non-null. port::AtomicPointer non_writable_; - // Force sync of manifest files to fail while this pointer is non-NULL + // Force sync of manifest files to fail while this pointer is non-null. port::AtomicPointer manifest_sync_error_; - // Force write to manifest files to fail while this pointer is non-NULL + // Force write to manifest files to fail while this pointer is non-null. port::AtomicPointer manifest_write_error_; bool count_random_reads_; AtomicCounter random_read_counter_; explicit SpecialEnv(Env* base) : EnvWrapper(base) { - delay_data_sync_.Release_Store(NULL); - data_sync_error_.Release_Store(NULL); - no_space_.Release_Store(NULL); - non_writable_.Release_Store(NULL); + delay_data_sync_.Release_Store(nullptr); + data_sync_error_.Release_Store(nullptr); + no_space_.Release_Store(nullptr); + non_writable_.Release_Store(nullptr); count_random_reads_ = false; - manifest_sync_error_.Release_Store(NULL); - manifest_write_error_.Release_Store(NULL); + manifest_sync_error_.Release_Store(nullptr); + manifest_write_error_.Release_Store(nullptr); } Status NewWritableFile(const std::string& f, WritableFile** r) { @@ -140,7 +140,7 @@ class SpecialEnv : public EnvWrapper { } ~DataFile() { delete base_; } Status Append(const Slice& data) { - if (env_->no_space_.Acquire_Load() != NULL) { + if (env_->no_space_.Acquire_Load() != nullptr) { // Drop writes on the floor return Status::OK(); } else { @@ -150,10 +150,10 @@ class SpecialEnv : public EnvWrapper { Status Close() { return base_->Close(); } Status Flush() { return base_->Flush(); } Status Sync() { - if (env_->data_sync_error_.Acquire_Load() != NULL) { + if (env_->data_sync_error_.Acquire_Load() != nullptr) { return Status::IOError("simulated data sync error"); } - while (env_->delay_data_sync_.Acquire_Load() != NULL) { + while (env_->delay_data_sync_.Acquire_Load() != nullptr) { DelayMilliseconds(100); } return base_->Sync(); @@ -167,7 +167,7 @@ class SpecialEnv : public EnvWrapper { ManifestFile(SpecialEnv* env, WritableFile* b) : env_(env), base_(b) { } ~ManifestFile() { delete base_; } Status Append(const Slice& data) { - if (env_->manifest_write_error_.Acquire_Load() != NULL) { + if (env_->manifest_write_error_.Acquire_Load() != nullptr) { return Status::IOError("simulated writer error"); } else { return base_->Append(data); @@ -176,7 +176,7 @@ class SpecialEnv : public EnvWrapper { Status Close() { return base_->Close(); } Status Flush() { return base_->Flush(); } Status Sync() { - if (env_->manifest_sync_error_.Acquire_Load() != NULL) { + if (env_->manifest_sync_error_.Acquire_Load() != nullptr) { return Status::IOError("simulated sync error"); } else { return base_->Sync(); @@ -184,16 +184,16 @@ class SpecialEnv : public EnvWrapper { } }; - if (non_writable_.Acquire_Load() != NULL) { + if (non_writable_.Acquire_Load() != nullptr) { return Status::IOError("simulated write error"); } Status s = target()->NewWritableFile(f, r); if (s.ok()) { - if (strstr(f.c_str(), ".ldb") != NULL || - strstr(f.c_str(), ".log") != NULL) { + if (strstr(f.c_str(), ".ldb") != nullptr || + strstr(f.c_str(), ".log") != nullptr) { *r = new DataFile(this, *r); - } else if (strstr(f.c_str(), "MANIFEST") != NULL) { + } else if (strstr(f.c_str(), "MANIFEST") != nullptr) { *r = new ManifestFile(this, *r); } } @@ -251,7 +251,7 @@ class DBTest { filter_policy_ = NewBloomFilterPolicy(10); dbname_ = test::TmpDir() + "/db_test"; DestroyDB(dbname_, Options()); - db_ = NULL; + db_ = nullptr; Reopen(); } @@ -298,27 +298,27 @@ class DBTest { return reinterpret_cast(db_); } - void Reopen(Options* options = NULL) { + void Reopen(Options* options = nullptr) { ASSERT_OK(TryReopen(options)); } void Close() { delete db_; - db_ = NULL; + db_ = nullptr; } - void DestroyAndReopen(Options* options = NULL) { + void DestroyAndReopen(Options* options = nullptr) { delete db_; - db_ = NULL; + db_ = nullptr; DestroyDB(dbname_, Options()); ASSERT_OK(TryReopen(options)); } Status TryReopen(Options* options) { delete db_; - db_ = NULL; + db_ = nullptr; Options opts; - if (options != NULL) { + if (options != nullptr) { opts = *options; } else { opts = CurrentOptions(); @@ -337,7 +337,7 @@ class DBTest { return db_->Delete(WriteOptions(), k); } - std::string Get(const std::string& k, const Snapshot* snapshot = NULL) { + std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) { ReadOptions options; options.snapshot = snapshot; std::string result; @@ -549,7 +549,7 @@ class DBTest { TEST(DBTest, Empty) { do { - ASSERT_TRUE(db_ != NULL); + ASSERT_TRUE(db_ != nullptr); ASSERT_EQ("NOT_FOUND", Get("foo")); } while (ChangeOptions()); } @@ -590,7 +590,7 @@ TEST(DBTest, GetFromImmutableLayer) { Put("k1", std::string(100000, 'x')); // Fill memtable Put("k2", std::string(100000, 'y')); // Trigger compaction ASSERT_EQ("v1", Get("foo")); - env_->delay_data_sync_.Release_Store(NULL); // Release sync calls + env_->delay_data_sync_.Release_Store(nullptr); // Release sync calls } while (ChangeOptions()); } @@ -695,7 +695,7 @@ TEST(DBTest, GetEncountersEmptyLevel) { } // Step 2: clear level 1 if necessary. - dbfull()->TEST_CompactRange(1, NULL, NULL); + dbfull()->TEST_CompactRange(1, nullptr, nullptr); ASSERT_EQ(NumTableFilesAtLevel(0), 1); ASSERT_EQ(NumTableFilesAtLevel(1), 0); ASSERT_EQ(NumTableFilesAtLevel(2), 1); @@ -1032,7 +1032,7 @@ TEST(DBTest, CompactionsGenerateMultipleFiles) { // Reopening moves updates to level-0 Reopen(&options); - dbfull()->TEST_CompactRange(0, NULL, NULL); + dbfull()->TEST_CompactRange(0, nullptr, nullptr); ASSERT_EQ(NumTableFilesAtLevel(0), 0); ASSERT_GT(NumTableFilesAtLevel(1), 1); @@ -1083,7 +1083,7 @@ TEST(DBTest, SparseMerge) { } Put("C", "vc"); dbfull()->TEST_CompactMemTable(); - dbfull()->TEST_CompactRange(0, NULL, NULL); + dbfull()->TEST_CompactRange(0, nullptr, nullptr); // Make sparse update Put("A", "va2"); @@ -1094,9 +1094,9 @@ TEST(DBTest, SparseMerge) { // Compactions should not cause us to create a situation where // a file overlaps too much data at the next level. ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576); - dbfull()->TEST_CompactRange(0, NULL, NULL); + dbfull()->TEST_CompactRange(0, nullptr, nullptr); ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576); - dbfull()->TEST_CompactRange(1, NULL, NULL); + dbfull()->TEST_CompactRange(1, nullptr, nullptr); ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576); } @@ -1207,7 +1207,7 @@ TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) { ASSERT_TRUE(Between(Size(Key(3), Key(5)), 110000, 111000)); - dbfull()->TEST_CompactRange(0, NULL, NULL); + dbfull()->TEST_CompactRange(0, nullptr, nullptr); } } while (ChangeOptions()); } @@ -1283,11 +1283,11 @@ TEST(DBTest, HiddenValuesAreRemoved) { db_->ReleaseSnapshot(snapshot); ASSERT_EQ(AllEntriesFor("foo"), "[ tiny, " + big + " ]"); Slice x("x"); - dbfull()->TEST_CompactRange(0, NULL, &x); + dbfull()->TEST_CompactRange(0, nullptr, &x); ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]"); ASSERT_EQ(NumTableFilesAtLevel(0), 0); ASSERT_GE(NumTableFilesAtLevel(1), 1); - dbfull()->TEST_CompactRange(1, NULL, &x); + dbfull()->TEST_CompactRange(1, nullptr, &x); ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]"); ASSERT_TRUE(Between(Size("", "pastfoo"), 0, 1000)); @@ -1313,11 +1313,11 @@ TEST(DBTest, DeletionMarkers1) { ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2 ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]"); Slice z("z"); - dbfull()->TEST_CompactRange(last-2, NULL, &z); + dbfull()->TEST_CompactRange(last-2, nullptr, &z); // DEL eliminated, but v1 remains because we aren't compacting that level // (DEL can be eliminated because v2 hides v1). ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]"); - dbfull()->TEST_CompactRange(last-1, NULL, NULL); + dbfull()->TEST_CompactRange(last-1, nullptr, nullptr); // Merging last-1 w/ last, so we are the base level for "foo", so // DEL is removed. (as is v1). ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]"); @@ -1340,10 +1340,10 @@ TEST(DBTest, DeletionMarkers2) { ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]"); ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2 ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]"); - dbfull()->TEST_CompactRange(last-2, NULL, NULL); + dbfull()->TEST_CompactRange(last-2, nullptr, nullptr); // DEL kept: "last" file overlaps ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]"); - dbfull()->TEST_CompactRange(last-1, NULL, NULL); + dbfull()->TEST_CompactRange(last-1, nullptr, nullptr); // Merging last-1 w/ last, so we are the base level for "foo", so // DEL is removed. (as is v1). ASSERT_EQ(AllEntriesFor("foo"), "[ ]"); @@ -1376,8 +1376,8 @@ TEST(DBTest, OverlapInLevel0) { ASSERT_EQ("2,1,1", FilesPerLevel()); // Compact away the placeholder files we created initially - dbfull()->TEST_CompactRange(1, NULL, NULL); - dbfull()->TEST_CompactRange(2, NULL, NULL); + dbfull()->TEST_CompactRange(1, nullptr, nullptr); + dbfull()->TEST_CompactRange(2, nullptr, nullptr); ASSERT_EQ("2", FilesPerLevel()); // Do a memtable compaction. Before bug-fix, the compaction would @@ -1437,7 +1437,7 @@ TEST(DBTest, Fflush_Issue474) { static const int kNum = 100000; Random rnd(test::RandomSeed()); for (int i = 0; i < kNum; i++) { - fflush(NULL); + fflush(nullptr); ASSERT_OK(Put(RandomKey(&rnd), RandomString(&rnd, 100))); } } @@ -1495,7 +1495,7 @@ TEST(DBTest, CustomComparator) { Options new_options = CurrentOptions(); new_options.create_if_missing = true; new_options.comparator = &cmp; - new_options.filter_policy = NULL; // Cannot use bloom filters + new_options.filter_policy = nullptr; // Cannot use bloom filters new_options.write_buffer_size = 1000; // Compact more often DestroyAndReopen(&new_options); ASSERT_OK(Put("[10]", "ten")); @@ -1550,7 +1550,7 @@ TEST(DBTest, ManualCompaction) { // Compact all MakeTables(1, "a", "z"); ASSERT_EQ("0,1,2", FilesPerLevel()); - db_->CompactRange(NULL, NULL); + db_->CompactRange(nullptr, nullptr); ASSERT_EQ("0,0,1", FilesPerLevel()); } @@ -1559,38 +1559,38 @@ TEST(DBTest, DBOpen_Options) { DestroyDB(dbname, Options()); // Does not exist, and create_if_missing == false: error - DB* db = NULL; + DB* db = nullptr; Options opts; opts.create_if_missing = false; Status s = DB::Open(opts, dbname, &db); - ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != NULL); - ASSERT_TRUE(db == NULL); + ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != nullptr); + ASSERT_TRUE(db == nullptr); // Does not exist, and create_if_missing == true: OK opts.create_if_missing = true; s = DB::Open(opts, dbname, &db); ASSERT_OK(s); - ASSERT_TRUE(db != NULL); + ASSERT_TRUE(db != nullptr); delete db; - db = NULL; + db = nullptr; // Does exist, and error_if_exists == true: error opts.create_if_missing = false; opts.error_if_exists = true; s = DB::Open(opts, dbname, &db); - ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != NULL); - ASSERT_TRUE(db == NULL); + ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != nullptr); + ASSERT_TRUE(db == nullptr); // Does exist, and error_if_exists == false: OK opts.create_if_missing = true; opts.error_if_exists = false; s = DB::Open(opts, dbname, &db); ASSERT_OK(s); - ASSERT_TRUE(db != NULL); + ASSERT_TRUE(db != nullptr); delete db; - db = NULL; + db = nullptr; } TEST(DBTest, DestroyEmptyDir) { @@ -1628,9 +1628,9 @@ TEST(DBTest, DestroyOpenDB) { Options opts; opts.create_if_missing = true; - DB* db = NULL; + DB* db = nullptr; ASSERT_OK(DB::Open(opts, dbname, &db)); - ASSERT_TRUE(db != NULL); + ASSERT_TRUE(db != nullptr); // Must fail to destroy an open db. ASSERT_TRUE(env_->FileExists(dbname)); @@ -1638,7 +1638,7 @@ TEST(DBTest, DestroyOpenDB) { ASSERT_TRUE(env_->FileExists(dbname)); delete db; - db = NULL; + db = nullptr; // Should succeed destroying a closed db. ASSERT_OK(DestroyDB(dbname, Options())); @@ -1646,7 +1646,7 @@ TEST(DBTest, DestroyOpenDB) { } TEST(DBTest, Locking) { - DB* db2 = NULL; + DB* db2 = nullptr; Status s = DB::Open(CurrentOptions(), dbname_, &db2); ASSERT_TRUE(!s.ok()) << "Locking did not prevent re-opening db"; } @@ -1664,10 +1664,10 @@ TEST(DBTest, NoSpace) { env_->no_space_.Release_Store(env_); // Force out-of-space errors for (int i = 0; i < 10; i++) { for (int level = 0; level < config::kNumLevels-1; level++) { - dbfull()->TEST_CompactRange(level, NULL, NULL); + dbfull()->TEST_CompactRange(level, nullptr, nullptr); } } - env_->no_space_.Release_Store(NULL); + env_->no_space_.Release_Store(nullptr); ASSERT_LT(CountFiles(), num_files + 3); } @@ -1688,7 +1688,7 @@ TEST(DBTest, NonWritableFileSystem) { } } ASSERT_GT(errors, 0); - env_->non_writable_.Release_Store(NULL); + env_->non_writable_.Release_Store(nullptr); } TEST(DBTest, WriteSyncError) { @@ -1712,7 +1712,7 @@ TEST(DBTest, WriteSyncError) { ASSERT_EQ("NOT_FOUND", Get("k2")); // (d) make sync behave normally - env_->data_sync_error_.Release_Store(NULL); + env_->data_sync_error_.Release_Store(nullptr); // (e) Do a non-sync write; should fail w.sync = false; @@ -1753,11 +1753,11 @@ TEST(DBTest, ManifestWriteError) { // Merging compaction (will fail) error_type->Release_Store(env_); - dbfull()->TEST_CompactRange(last, NULL, NULL); // Should fail + dbfull()->TEST_CompactRange(last, nullptr, nullptr); // Should fail ASSERT_EQ("bar", Get("foo")); // Recovery: should not lose data - error_type->Release_Store(NULL); + error_type->Release_Store(nullptr); Reopen(&options); ASSERT_EQ("bar", Get("foo")); } @@ -1849,7 +1849,7 @@ TEST(DBTest, BloomFilter) { fprintf(stderr, "%d missing => %d reads\n", N, reads); ASSERT_LE(reads, 3*N/100); - env_->delay_data_sync_.Release_Store(NULL); + env_->delay_data_sync_.Release_Store(nullptr); Close(); delete options.block_cache; delete options.filter_policy; @@ -1883,7 +1883,7 @@ static void MTThreadBody(void* arg) { Random rnd(1000 + id); std::string value; char valbuf[1500]; - while (t->state->stop.Acquire_Load() == NULL) { + while (t->state->stop.Acquire_Load() == nullptr) { t->state->counter[id].Release_Store(reinterpret_cast(counter)); int key = rnd.Uniform(kNumKeys); @@ -1946,7 +1946,7 @@ TEST(DBTest, MultiThreaded) { // Stop the threads and wait for them to finish mt.stop.Release_Store(&mt); for (int id = 0; id < kNumThreads; id++) { - while (mt.thread_done[id].Acquire_Load() == NULL) { + while (mt.thread_done[id].Acquire_Load() == nullptr) { DelayMilliseconds(100); } } @@ -1978,7 +1978,7 @@ class ModelDB: public DB { return Status::NotFound(key); } virtual Iterator* NewIterator(const ReadOptions& options) { - if (options.snapshot == NULL) { + if (options.snapshot == nullptr) { KVMap* saved = new KVMap; *saved = map_; return new ModelIter(saved, true); @@ -2112,8 +2112,8 @@ TEST(DBTest, Randomized) { do { ModelDB model(CurrentOptions()); const int N = 10000; - const Snapshot* model_snap = NULL; - const Snapshot* db_snap = NULL; + const Snapshot* model_snap = nullptr; + const Snapshot* db_snap = nullptr; std::string k, v; for (int step = 0; step < N; step++) { if (step % 100 == 0) { @@ -2158,23 +2158,23 @@ TEST(DBTest, Randomized) { } if ((step % 100) == 0) { - ASSERT_TRUE(CompareIterators(step, &model, db_, NULL, NULL)); + ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr)); ASSERT_TRUE(CompareIterators(step, &model, db_, model_snap, db_snap)); // Save a snapshot from each DB this time that we'll use next // time we compare things, to make sure the current state is // preserved with the snapshot - if (model_snap != NULL) model.ReleaseSnapshot(model_snap); - if (db_snap != NULL) db_->ReleaseSnapshot(db_snap); + if (model_snap != nullptr) model.ReleaseSnapshot(model_snap); + if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap); Reopen(); - ASSERT_TRUE(CompareIterators(step, &model, db_, NULL, NULL)); + ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr)); model_snap = model.GetSnapshot(); db_snap = db_->GetSnapshot(); } } - if (model_snap != NULL) model.ReleaseSnapshot(model_snap); - if (db_snap != NULL) db_->ReleaseSnapshot(db_snap); + if (model_snap != nullptr) model.ReleaseSnapshot(model_snap); + if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap); } while (ChangeOptions()); } @@ -2188,15 +2188,15 @@ void BM_LogAndApply(int iters, int num_base_files) { std::string dbname = test::TmpDir() + "/leveldb_test_benchmark"; DestroyDB(dbname, Options()); - DB* db = NULL; + DB* db = nullptr; Options opts; opts.create_if_missing = true; Status s = DB::Open(opts, dbname, &db); ASSERT_OK(s); - ASSERT_TRUE(db != NULL); + ASSERT_TRUE(db != nullptr); delete db; - db = NULL; + db = nullptr; Env* env = Env::Default(); @@ -2205,7 +2205,7 @@ void BM_LogAndApply(int iters, int num_base_files) { InternalKeyComparator cmp(BytewiseComparator()); Options options; - VersionSet vset(dbname, &options, NULL, &cmp); + VersionSet vset(dbname, &options, nullptr, &cmp); bool save_manifest; ASSERT_OK(vset.Recover(&save_manifest)); VersionEdit vbase; diff --git a/db/dumpfile.cc b/db/dumpfile.cc index 2f1b5d9..941988b 100644 --- a/db/dumpfile.cc +++ b/db/dumpfile.cc @@ -145,8 +145,8 @@ Status DumpDescriptor(Env* env, const std::string& fname, WritableFile* dst) { Status DumpTable(Env* env, const std::string& fname, WritableFile* dst) { uint64_t file_size; - RandomAccessFile* file = NULL; - Table* table = NULL; + RandomAccessFile* file = nullptr; + Table* table = nullptr; Status s = env->GetFileSize(fname, &file_size); if (s.ok()) { s = env->NewRandomAccessFile(fname, &file); diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc index caead37..7894999 100644 --- a/db/fault_injection_test.cc +++ b/db/fault_injection_test.cc @@ -172,7 +172,7 @@ TestWritableFile::TestWritableFile(const FileState& state, target_(f), writable_file_opened_(true), env_(env) { - assert(f != NULL); + assert(f != nullptr); } TestWritableFile::~TestWritableFile() { @@ -378,7 +378,7 @@ class FaultInjectionTest { FaultInjectionTest() : env_(new FaultInjectionTestEnv), tiny_cache_(NewLRUCache(100)), - db_(NULL) { + db_(nullptr) { dbname_ = test::TmpDir() + "/fault_test"; DestroyDB(dbname_, Options()); // Destroy any db from earlier run options_.reuse_logs = true; @@ -457,14 +457,14 @@ class FaultInjectionTest { Status OpenDB() { delete db_; - db_ = NULL; + db_ = nullptr; env_->ResetState(); return DB::Open(options_, dbname_, &db_); } void CloseDB() { delete db_; - db_ = NULL; + db_ = nullptr; } void DeleteAllData() { @@ -493,7 +493,7 @@ class FaultInjectionTest { void PartialCompactTestPreFault(int num_pre_sync, int num_post_sync) { DeleteAllData(); Build(0, num_pre_sync); - db_->CompactRange(NULL, NULL); + db_->CompactRange(nullptr, nullptr); Build(num_pre_sync, num_post_sync); } diff --git a/db/log_reader.cc b/db/log_reader.cc index 48ae863..19c4df6 100644 --- a/db/log_reader.cc +++ b/db/log_reader.cc @@ -185,7 +185,7 @@ void Reader::ReportCorruption(uint64_t bytes, const char* reason) { } void Reader::ReportDrop(uint64_t bytes, const Status& reason) { - if (reporter_ != NULL && + if (reporter_ != nullptr && end_of_buffer_offset_ - buffer_.size() - bytes >= initial_offset_) { reporter_->Corruption(static_cast(bytes), reason); } diff --git a/db/log_reader.h b/db/log_reader.h index 8389d61..7dcce8e 100644 --- a/db/log_reader.h +++ b/db/log_reader.h @@ -32,7 +32,7 @@ class Reader { // Create a reader that will return log records from "*file". // "*file" must remain live while this Reader is in use. // - // If "reporter" is non-NULL, it is notified whenever some data is + // If "reporter" is non-null, it is notified whenever some data is // dropped due to a detected corruption. "*reporter" must remain // live while this Reader is in use. // diff --git a/db/recovery_test.cc b/db/recovery_test.cc index e5a7c80..c852803 100644 --- a/db/recovery_test.cc +++ b/db/recovery_test.cc @@ -17,7 +17,7 @@ namespace leveldb { class RecoveryTest { public: - RecoveryTest() : env_(Env::Default()), db_(NULL) { + RecoveryTest() : env_(Env::Default()), db_(nullptr) { dbname_ = test::TmpDir() + "/recovery_test"; DestroyDB(dbname_, Options()); Open(); @@ -44,25 +44,25 @@ class RecoveryTest { void Close() { delete db_; - db_ = NULL; + db_ = nullptr; } - Status OpenWithStatus(Options* options = NULL) { + Status OpenWithStatus(Options* options = nullptr) { Close(); Options opts; - if (options != NULL) { + if (options != nullptr) { opts = *options; } else { opts.reuse_logs = true; // TODO(sanjay): test both ways opts.create_if_missing = true; } - if (opts.env == NULL) { + if (opts.env == nullptr) { opts.env = env_; } return DB::Open(opts, dbname_, &db_); } - void Open(Options* options = NULL) { + void Open(Options* options = nullptr) { ASSERT_OK(OpenWithStatus(options)); ASSERT_EQ(1, NumLogs()); } @@ -71,7 +71,7 @@ class RecoveryTest { return db_->Put(WriteOptions(), k, v); } - std::string Get(const std::string& k, const Snapshot* snapshot = NULL) { + std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) { std::string result; Status s = db_->Get(ReadOptions(), k, &result); if (s.IsNotFound()) { diff --git a/db/repair.cc b/db/repair.cc index c10da82..df8dcd2 100644 --- a/db/repair.cc +++ b/db/repair.cc @@ -227,7 +227,7 @@ class Repairer { status = BuildTable(dbname_, env_, options_, table_cache_, iter, &meta); delete iter; mem->Unref(); - mem = NULL; + mem = nullptr; if (status.ok()) { if (meta.file_size > 0) { table_numbers_.push_back(meta.number); @@ -350,13 +350,13 @@ class Repairer { } } delete builder; - builder = NULL; + builder = nullptr; if (s.ok()) { s = file->Close(); } delete file; - file = NULL; + file = nullptr; if (counter > 0 && s.ok()) { std::string orig = TableFileName(dbname_, t.meta.number); @@ -410,7 +410,7 @@ class Repairer { status = file->Close(); } delete file; - file = NULL; + file = nullptr; if (!status.ok()) { env_->DeleteFile(tmp); @@ -438,14 +438,14 @@ class Repairer { // dir/lost/foo const char* slash = strrchr(fname.c_str(), '/'); std::string new_dir; - if (slash != NULL) { + if (slash != nullptr) { new_dir.assign(fname.data(), slash - fname.data()); } new_dir.append("/lost"); env_->CreateDir(new_dir); // Ignore error std::string new_file = new_dir; new_file.append("/"); - new_file.append((slash == NULL) ? fname.c_str() : slash + 1); + new_file.append((slash == nullptr) ? fname.c_str() : slash + 1); Status s = env_->RenameFile(fname, new_file); Log(options_.info_log, "Archiving %s: %s\n", fname.c_str(), s.ToString().c_str()); diff --git a/db/skiplist.h b/db/skiplist.h index 8bd7776..b806ce0 100644 --- a/db/skiplist.h +++ b/db/skiplist.h @@ -123,9 +123,9 @@ class SkipList { bool KeyIsAfterNode(const Key& key, Node* n) const; // Return the earliest node that comes at or after key. - // Return NULL if there is no such node. + // Return nullptr if there is no such node. // - // If prev is non-NULL, fills prev[level] with pointer to previous + // If prev is non-null, fills prev[level] with pointer to previous // node at "level" for every level in [0..max_height_-1]. Node* FindGreaterOrEqual(const Key& key, Node** prev) const; @@ -190,12 +190,12 @@ SkipList::NewNode(const Key& key, int height) { template inline SkipList::Iterator::Iterator(const SkipList* list) { list_ = list; - node_ = NULL; + node_ = nullptr; } template inline bool SkipList::Iterator::Valid() const { - return node_ != NULL; + return node_ != nullptr; } template @@ -217,13 +217,13 @@ inline void SkipList::Iterator::Prev() { assert(Valid()); node_ = list_->FindLessThan(node_->key); if (node_ == list_->head_) { - node_ = NULL; + node_ = nullptr; } } template inline void SkipList::Iterator::Seek(const Key& target) { - node_ = list_->FindGreaterOrEqual(target, NULL); + node_ = list_->FindGreaterOrEqual(target, nullptr); } template @@ -235,7 +235,7 @@ template inline void SkipList::Iterator::SeekToLast() { node_ = list_->FindLast(); if (node_ == list_->head_) { - node_ = NULL; + node_ = nullptr; } } @@ -254,8 +254,8 @@ int SkipList::RandomHeight() { template bool SkipList::KeyIsAfterNode(const Key& key, Node* n) const { - // NULL n is considered infinite - return (n != NULL) && (compare_(n->key, key) < 0); + // null n is considered infinite + return (n != nullptr) && (compare_(n->key, key) < 0); } template @@ -269,7 +269,7 @@ typename SkipList::Node* SkipList::FindGreaterOr // Keep searching in this list x = next; } else { - if (prev != NULL) prev[level] = x; + if (prev != nullptr) prev[level] = x; if (level == 0) { return next; } else { @@ -288,7 +288,7 @@ SkipList::FindLessThan(const Key& key) const { while (true) { assert(x == head_ || compare_(x->key, key) < 0); Node* next = x->Next(level); - if (next == NULL || compare_(next->key, key) >= 0) { + if (next == nullptr || compare_(next->key, key) >= 0) { if (level == 0) { return x; } else { @@ -308,7 +308,7 @@ typename SkipList::Node* SkipList::FindLast() int level = GetMaxHeight() - 1; while (true) { Node* next = x->Next(level); - if (next == NULL) { + if (next == nullptr) { if (level == 0) { return x; } else { @@ -329,7 +329,7 @@ SkipList::SkipList(Comparator cmp, Arena* arena) max_height_(reinterpret_cast(1)), rnd_(0xdeadbeef) { for (int i = 0; i < kMaxHeight; i++) { - head_->SetNext(i, NULL); + head_->SetNext(i, nullptr); } } @@ -341,7 +341,7 @@ void SkipList::Insert(const Key& key) { Node* x = FindGreaterOrEqual(key, prev); // Our data structure does not allow duplicate insertion - assert(x == NULL || !Equal(key, x->key)); + assert(x == nullptr || !Equal(key, x->key)); int height = RandomHeight(); if (height > GetMaxHeight()) { @@ -353,9 +353,9 @@ void SkipList::Insert(const Key& key) { // It is ok to mutate max_height_ without any synchronization // with concurrent readers. A concurrent reader that observes // the new value of max_height_ will see either the old value of - // new level pointers from head_ (NULL), or a new value set in + // new level pointers from head_ (nullptr), or a new value set in // the loop below. In the former case the reader will - // immediately drop to the next level since NULL sorts after all + // immediately drop to the next level since nullptr sorts after all // keys. In the latter case the reader will use the new node. max_height_.NoBarrier_Store(reinterpret_cast(height)); } @@ -371,8 +371,8 @@ void SkipList::Insert(const Key& key) { template bool SkipList::Contains(const Key& key) const { - Node* x = FindGreaterOrEqual(key, NULL); - if (x != NULL && Equal(key, x->key)) { + Node* x = FindGreaterOrEqual(key, nullptr); + if (x != nullptr && Equal(key, x->key)) { return true; } else { return false; diff --git a/db/skiplist_test.cc b/db/skiplist_test.cc index 90f9d0e..24e0887 100644 --- a/db/skiplist_test.cc +++ b/db/skiplist_test.cc @@ -310,7 +310,7 @@ class TestState { explicit TestState(int s) : seed_(s), - quit_flag_(NULL), + quit_flag_(nullptr), state_(STARTING), state_cv_(&mu_) {} @@ -362,7 +362,7 @@ static void RunConcurrent(int run) { for (int i = 0; i < kSize; i++) { state.t_.WriteStep(&rnd); } - state.quit_flag_.Release_Store(&state); // Any non-NULL arg will do + state.quit_flag_.Release_Store(&state); // Any non-null arg will do state.Wait(TestState::DONE); } } diff --git a/db/table_cache.cc b/db/table_cache.cc index 6cf005b..7226d3b 100644 --- a/db/table_cache.cc +++ b/db/table_cache.cc @@ -49,10 +49,10 @@ Status TableCache::FindTable(uint64_t file_number, uint64_t file_size, EncodeFixed64(buf, file_number); Slice key(buf, sizeof(buf)); *handle = cache_->Lookup(key); - if (*handle == NULL) { + if (*handle == nullptr) { std::string fname = TableFileName(dbname_, file_number); - RandomAccessFile* file = NULL; - Table* table = NULL; + RandomAccessFile* file = nullptr; + Table* table = nullptr; s = env_->NewRandomAccessFile(fname, &file); if (!s.ok()) { std::string old_fname = SSTTableFileName(dbname_, file_number); @@ -65,7 +65,7 @@ Status TableCache::FindTable(uint64_t file_number, uint64_t file_size, } if (!s.ok()) { - assert(table == NULL); + assert(table == nullptr); delete file; // We do not cache error results so that if the error is transient, // or somebody repairs the file, we recover automatically. @@ -83,11 +83,11 @@ Iterator* TableCache::NewIterator(const ReadOptions& options, uint64_t file_number, uint64_t file_size, Table** tableptr) { - if (tableptr != NULL) { - *tableptr = NULL; + if (tableptr != nullptr) { + *tableptr = nullptr; } - Cache::Handle* handle = NULL; + Cache::Handle* handle = nullptr; Status s = FindTable(file_number, file_size, &handle); if (!s.ok()) { return NewErrorIterator(s); @@ -96,7 +96,7 @@ Iterator* TableCache::NewIterator(const ReadOptions& options, Table* table = reinterpret_cast(cache_->Value(handle))->table; Iterator* result = table->NewIterator(options); result->RegisterCleanup(&UnrefEntry, cache_, handle); - if (tableptr != NULL) { + if (tableptr != nullptr) { *tableptr = table; } return result; @@ -108,7 +108,7 @@ Status TableCache::Get(const ReadOptions& options, const Slice& k, void* arg, void (*saver)(void*, const Slice&, const Slice&)) { - Cache::Handle* handle = NULL; + Cache::Handle* handle = nullptr; Status s = FindTable(file_number, file_size, &handle); if (s.ok()) { Table* t = reinterpret_cast(cache_->Value(handle))->table; diff --git a/db/table_cache.h b/db/table_cache.h index e9191dc..ae8bee5 100644 --- a/db/table_cache.h +++ b/db/table_cache.h @@ -25,15 +25,15 @@ class TableCache { // Return an iterator for the specified file number (the corresponding // file length must be exactly "file_size" bytes). If "tableptr" is - // non-NULL, also sets "*tableptr" to point to the Table object - // underlying the returned iterator, or NULL if no Table object underlies - // the returned iterator. The returned "*tableptr" object is owned by - // the cache and should not be deleted, and is valid for as long as the + // non-null, also sets "*tableptr" to point to the Table object + // underlying the returned iterator, or to nullptr if no Table object + // underlies the returned iterator. The returned "*tableptr" object is owned + // by the cache and should not be deleted, and is valid for as long as the // returned iterator is live. Iterator* NewIterator(const ReadOptions& options, uint64_t file_number, uint64_t file_size, - Table** tableptr = NULL); + Table** tableptr = nullptr); // If a seek to internal key "k" in specified file finds an entry, // call (*handle_result)(arg, found_key, found_value). diff --git a/db/version_edit.cc b/db/version_edit.cc index f10a2d5..b7a366d 100644 --- a/db/version_edit.cc +++ b/db/version_edit.cc @@ -109,7 +109,7 @@ static bool GetLevel(Slice* input, int* level) { Status VersionEdit::DecodeFrom(const Slice& src) { Clear(); Slice input = src; - const char* msg = NULL; + const char* msg = nullptr; uint32_t tag; // Temporary storage for parsing @@ -119,7 +119,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) { Slice str; InternalKey key; - while (msg == NULL && GetVarint32(&input, &tag)) { + while (msg == nullptr && GetVarint32(&input, &tag)) { switch (tag) { case kComparator: if (GetLengthPrefixedSlice(&input, &str)) { @@ -198,12 +198,12 @@ Status VersionEdit::DecodeFrom(const Slice& src) { } } - if (msg == NULL && !input.empty()) { + if (msg == nullptr && !input.empty()) { msg = "invalid tag"; } Status result; - if (msg != NULL) { + if (msg != nullptr) { result = Status::Corruption("VersionEdit", msg); } return result; diff --git a/db/version_set.cc b/db/version_set.cc index 7022b27..02cc66f 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -106,15 +106,15 @@ int FindFile(const InternalKeyComparator& icmp, static bool AfterFile(const Comparator* ucmp, const Slice* user_key, const FileMetaData* f) { - // NULL user_key occurs before all keys and is therefore never after *f - return (user_key != NULL && + // null user_key occurs before all keys and is therefore never after *f + return (user_key != nullptr && ucmp->Compare(*user_key, f->largest.user_key()) > 0); } static bool BeforeFile(const Comparator* ucmp, const Slice* user_key, const FileMetaData* f) { - // NULL user_key occurs after all keys and is therefore never before *f - return (user_key != NULL && + // null user_key occurs after all keys and is therefore never before *f + return (user_key != nullptr && ucmp->Compare(*user_key, f->smallest.user_key()) < 0); } @@ -141,7 +141,7 @@ bool SomeFileOverlapsRange( // Binary search over file list uint32_t index = 0; - if (smallest_user_key != NULL) { + if (smallest_user_key != nullptr) { // Find the earliest possible internal key for smallest_user_key InternalKey small(*smallest_user_key, kMaxSequenceNumber,kValueTypeForSeek); index = FindFile(icmp, files, small.Encode()); @@ -338,9 +338,9 @@ Status Version::Get(const ReadOptions& options, const Comparator* ucmp = vset_->icmp_.user_comparator(); Status s; - stats->seek_file = NULL; + stats->seek_file = nullptr; stats->seek_file_level = -1; - FileMetaData* last_file_read = NULL; + FileMetaData* last_file_read = nullptr; int last_file_read_level = -1; // We can search level-by-level since entries never hop across @@ -374,13 +374,13 @@ Status Version::Get(const ReadOptions& options, // Binary search to find earliest index whose largest key >= ikey. uint32_t index = FindFile(vset_->icmp_, files_[level], ikey); if (index >= num_files) { - files = NULL; + files = nullptr; num_files = 0; } else { tmp2 = files[index]; if (ucmp->Compare(user_key, tmp2->smallest.user_key()) < 0) { // All of "tmp2" is past any data for user_key - files = NULL; + files = nullptr; num_files = 0; } else { files = &tmp2; @@ -390,7 +390,7 @@ Status Version::Get(const ReadOptions& options, } for (uint32_t i = 0; i < num_files; ++i) { - if (last_file_read != NULL && stats->seek_file == NULL) { + if (last_file_read != nullptr && stats->seek_file == nullptr) { // We have had more than one seek for this read. Charge the 1st file. stats->seek_file = last_file_read; stats->seek_file_level = last_file_read_level; @@ -430,9 +430,9 @@ Status Version::Get(const ReadOptions& options, bool Version::UpdateStats(const GetStats& stats) { FileMetaData* f = stats.seek_file; - if (f != NULL) { + if (f != nullptr) { f->allowed_seeks--; - if (f->allowed_seeks <= 0 && file_to_compact_ == NULL) { + if (f->allowed_seeks <= 0 && file_to_compact_ == nullptr) { file_to_compact_ = f; file_to_compact_level_ = stats.seek_file_level; return true; @@ -537,10 +537,10 @@ void Version::GetOverlappingInputs( assert(level < config::kNumLevels); inputs->clear(); Slice user_begin, user_end; - if (begin != NULL) { + if (begin != nullptr) { user_begin = begin->user_key(); } - if (end != NULL) { + if (end != nullptr) { user_end = end->user_key(); } const Comparator* user_cmp = vset_->icmp_.user_comparator(); @@ -548,20 +548,21 @@ void Version::GetOverlappingInputs( FileMetaData* f = files_[level][i++]; const Slice file_start = f->smallest.user_key(); const Slice file_limit = f->largest.user_key(); - if (begin != NULL && user_cmp->Compare(file_limit, user_begin) < 0) { + if (begin != nullptr && user_cmp->Compare(file_limit, user_begin) < 0) { // "f" is completely before specified range; skip it - } else if (end != NULL && user_cmp->Compare(file_start, user_end) > 0) { + } else if (end != nullptr && user_cmp->Compare(file_start, user_end) > 0) { // "f" is completely after specified range; skip it } else { inputs->push_back(f); if (level == 0) { // Level-0 files may overlap each other. So check if the newly // added file has expanded the range. If so, restart search. - if (begin != NULL && user_cmp->Compare(file_start, user_begin) < 0) { + if (begin != nullptr && user_cmp->Compare(file_start, user_begin) < 0) { user_begin = file_start; inputs->clear(); i = 0; - } else if (end != NULL && user_cmp->Compare(file_limit, user_end) > 0) { + } else if (end != nullptr && user_cmp->Compare(file_limit, + user_end) > 0) { user_end = file_limit; inputs->clear(); i = 0; @@ -786,10 +787,10 @@ VersionSet::VersionSet(const std::string& dbname, last_sequence_(0), log_number_(0), prev_log_number_(0), - descriptor_file_(NULL), - descriptor_log_(NULL), + descriptor_file_(nullptr), + descriptor_log_(nullptr), dummy_versions_(this), - current_(NULL) { + current_(nullptr) { AppendVersion(new Version(this)); } @@ -804,7 +805,7 @@ void VersionSet::AppendVersion(Version* v) { // Make "v" current assert(v->refs_ == 0); assert(v != current_); - if (current_ != NULL) { + if (current_ != nullptr) { current_->Unref(); } current_ = v; @@ -844,10 +845,10 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) { // a temporary file that contains a snapshot of the current version. std::string new_manifest_file; Status s; - if (descriptor_log_ == NULL) { + if (descriptor_log_ == nullptr) { // No reason to unlock *mu here since we only hit this path in the // first call to LogAndApply (when opening the database). - assert(descriptor_file_ == NULL); + assert(descriptor_file_ == nullptr); new_manifest_file = DescriptorFileName(dbname_, manifest_file_number_); edit->SetNextFile(next_file_number_); s = env_->NewWritableFile(new_manifest_file, &descriptor_file_); @@ -893,8 +894,8 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) { if (!new_manifest_file.empty()) { delete descriptor_log_; delete descriptor_file_; - descriptor_log_ = NULL; - descriptor_file_ = NULL; + descriptor_log_ = nullptr; + descriptor_file_ = nullptr; env_->DeleteFile(new_manifest_file); } } @@ -986,7 +987,7 @@ Status VersionSet::Recover(bool *save_manifest) { } } delete file; - file = NULL; + file = nullptr; if (s.ok()) { if (!have_next_file) { @@ -1044,12 +1045,12 @@ bool VersionSet::ReuseManifest(const std::string& dscname, return false; } - assert(descriptor_file_ == NULL); - assert(descriptor_log_ == NULL); + assert(descriptor_file_ == nullptr); + assert(descriptor_log_ == nullptr); Status r = env_->NewAppendableFile(dscname, &descriptor_file_); if (!r.ok()) { Log(options_->info_log, "Reuse MANIFEST: %s\n", r.ToString().c_str()); - assert(descriptor_file_ == NULL); + assert(descriptor_file_ == nullptr); return false; } @@ -1176,7 +1177,7 @@ uint64_t VersionSet::ApproximateOffsetOf(Version* v, const InternalKey& ikey) { Table* tableptr; Iterator* iter = table_cache_->NewIterator( ReadOptions(), files[i]->number, files[i]->file_size, &tableptr); - if (tableptr != NULL) { + if (tableptr != nullptr) { result += tableptr->ApproximateOffsetOf(ikey.Encode()); } delete iter; @@ -1299,7 +1300,7 @@ Compaction* VersionSet::PickCompaction() { // We prefer compactions triggered by too much data in a level over // the compactions triggered by seeks. const bool size_compaction = (current_->compaction_score_ >= 1); - const bool seek_compaction = (current_->file_to_compact_ != NULL); + const bool seek_compaction = (current_->file_to_compact_ != nullptr); if (size_compaction) { level = current_->compaction_level_; assert(level >= 0); @@ -1324,7 +1325,7 @@ Compaction* VersionSet::PickCompaction() { c = new Compaction(options_, level); c->inputs_[0].push_back(current_->file_to_compact_); } else { - return NULL; + return nullptr; } c->input_version_ = current_; @@ -1414,7 +1415,7 @@ Compaction* VersionSet::CompactRange( std::vector inputs; current_->GetOverlappingInputs(level, begin, end, &inputs); if (inputs.empty()) { - return NULL; + return nullptr; } // Avoid compacting too much in one shot in case the range is large. @@ -1445,7 +1446,7 @@ Compaction* VersionSet::CompactRange( Compaction::Compaction(const Options* options, int level) : level_(level), max_output_file_size_(MaxFileSizeForLevel(options, level)), - input_version_(NULL), + input_version_(nullptr), grandparent_index_(0), seen_key_(false), overlapped_bytes_(0) { @@ -1455,7 +1456,7 @@ Compaction::Compaction(const Options* options, int level) } Compaction::~Compaction() { - if (input_version_ != NULL) { + if (input_version_ != nullptr) { input_version_->Unref(); } } @@ -1523,9 +1524,9 @@ bool Compaction::ShouldStopBefore(const Slice& internal_key) { } void Compaction::ReleaseInputs() { - if (input_version_ != NULL) { + if (input_version_ != nullptr) { input_version_->Unref(); - input_version_ = NULL; + input_version_ = nullptr; } } diff --git a/db/version_set.h b/db/version_set.h index 80d448a..77b9895 100644 --- a/db/version_set.h +++ b/db/version_set.h @@ -45,8 +45,8 @@ int FindFile(const InternalKeyComparator& icmp, // Returns true iff some file in "files" overlaps the user key range // [*smallest,*largest]. -// smallest==NULL represents a key smaller than all keys in the DB. -// largest==NULL represents a key largest than all keys in the DB. +// smallest==nullptr represents a key smaller than all keys in the DB. +// largest==nullptr represents a key largest than all keys in the DB. // REQUIRES: If disjoint_sorted_files, files[] contains disjoint ranges // in sorted order. bool SomeFileOverlapsRange(const InternalKeyComparator& icmp, @@ -90,14 +90,14 @@ class Version { void GetOverlappingInputs( int level, - const InternalKey* begin, // NULL means before all keys - const InternalKey* end, // NULL means after all keys + const InternalKey* begin, // nullptr means before all keys + const InternalKey* end, // nullptr means after all keys std::vector* inputs); // Returns true iff some file in the specified level overlaps // some part of [*smallest_user_key,*largest_user_key]. - // smallest_user_key==NULL represents a key smaller than all keys in the DB. - // largest_user_key==NULL represents a key largest than all keys in the DB. + // smallest_user_key==nullptr represents a key smaller than all the DB's keys. + // largest_user_key==nullptr represents a key largest than all the DB's keys. bool OverlapInLevel(int level, const Slice* smallest_user_key, const Slice* largest_user_key); @@ -148,7 +148,7 @@ class Version { explicit Version(VersionSet* vset) : vset_(vset), next_(this), prev_(this), refs_(0), - file_to_compact_(NULL), + file_to_compact_(nullptr), file_to_compact_level_(-1), compaction_score_(-1), compaction_level_(-1) { @@ -224,13 +224,13 @@ class VersionSet { uint64_t PrevLogNumber() const { return prev_log_number_; } // Pick level and inputs for a new compaction. - // Returns NULL if there is no compaction to be done. + // Returns nullptr if there is no compaction to be done. // Otherwise returns a pointer to a heap-allocated object that // describes the compaction. Caller should delete the result. Compaction* PickCompaction(); // Return a compaction object for compacting the range [begin,end] in - // the specified level. Returns NULL if there is nothing in that + // the specified level. Returns nullptr if there is nothing in that // level that overlaps the specified range. Caller should delete // the result. Compaction* CompactRange( @@ -249,7 +249,7 @@ class VersionSet { // Returns true iff some level needs a compaction. bool NeedsCompaction() const { Version* v = current_; - return (v->compaction_score_ >= 1) || (v->file_to_compact_ != NULL); + return (v->compaction_score_ >= 1) || (v->file_to_compact_ != nullptr); } // Add all files listed in any live version to *live. diff --git a/db/version_set_test.cc b/db/version_set_test.cc index 501e34d..d21433e 100644 --- a/db/version_set_test.cc +++ b/db/version_set_test.cc @@ -40,20 +40,20 @@ class FindFileTest { bool Overlaps(const char* smallest, const char* largest) { InternalKeyComparator cmp(BytewiseComparator()); - Slice s(smallest != NULL ? smallest : ""); - Slice l(largest != NULL ? largest : ""); + Slice s(smallest != nullptr ? smallest : ""); + Slice l(largest != nullptr ? largest : ""); return SomeFileOverlapsRange(cmp, disjoint_sorted_files_, files_, - (smallest != NULL ? &s : NULL), - (largest != NULL ? &l : NULL)); + (smallest != nullptr ? &s : nullptr), + (largest != nullptr ? &l : nullptr)); } }; TEST(FindFileTest, Empty) { ASSERT_EQ(0, Find("foo")); ASSERT_TRUE(! Overlaps("a", "z")); - ASSERT_TRUE(! Overlaps(NULL, "z")); - ASSERT_TRUE(! Overlaps("a", NULL)); - ASSERT_TRUE(! Overlaps(NULL, NULL)); + ASSERT_TRUE(! Overlaps(nullptr, "z")); + ASSERT_TRUE(! Overlaps("a", nullptr)); + ASSERT_TRUE(! Overlaps(nullptr, nullptr)); } TEST(FindFileTest, Single) { @@ -78,12 +78,12 @@ TEST(FindFileTest, Single) { ASSERT_TRUE(Overlaps("q", "q")); ASSERT_TRUE(Overlaps("q", "q1")); - ASSERT_TRUE(! Overlaps(NULL, "j")); - ASSERT_TRUE(! Overlaps("r", NULL)); - ASSERT_TRUE(Overlaps(NULL, "p")); - ASSERT_TRUE(Overlaps(NULL, "p1")); - ASSERT_TRUE(Overlaps("q", NULL)); - ASSERT_TRUE(Overlaps(NULL, NULL)); + ASSERT_TRUE(! Overlaps(nullptr, "j")); + ASSERT_TRUE(! Overlaps("r", nullptr)); + ASSERT_TRUE(Overlaps(nullptr, "p")); + ASSERT_TRUE(Overlaps(nullptr, "p1")); + ASSERT_TRUE(Overlaps("q", nullptr)); + ASSERT_TRUE(Overlaps(nullptr, nullptr)); } @@ -130,19 +130,19 @@ TEST(FindFileTest, MultipleNullBoundaries) { Add("200", "250"); Add("300", "350"); Add("400", "450"); - ASSERT_TRUE(! Overlaps(NULL, "149")); - ASSERT_TRUE(! Overlaps("451", NULL)); - ASSERT_TRUE(Overlaps(NULL, NULL)); - ASSERT_TRUE(Overlaps(NULL, "150")); - ASSERT_TRUE(Overlaps(NULL, "199")); - ASSERT_TRUE(Overlaps(NULL, "200")); - ASSERT_TRUE(Overlaps(NULL, "201")); - ASSERT_TRUE(Overlaps(NULL, "400")); - ASSERT_TRUE(Overlaps(NULL, "800")); - ASSERT_TRUE(Overlaps("100", NULL)); - ASSERT_TRUE(Overlaps("200", NULL)); - ASSERT_TRUE(Overlaps("449", NULL)); - ASSERT_TRUE(Overlaps("450", NULL)); + ASSERT_TRUE(! Overlaps(nullptr, "149")); + ASSERT_TRUE(! Overlaps("451", nullptr)); + ASSERT_TRUE(Overlaps(nullptr, nullptr)); + ASSERT_TRUE(Overlaps(nullptr, "150")); + ASSERT_TRUE(Overlaps(nullptr, "199")); + ASSERT_TRUE(Overlaps(nullptr, "200")); + ASSERT_TRUE(Overlaps(nullptr, "201")); + ASSERT_TRUE(Overlaps(nullptr, "400")); + ASSERT_TRUE(Overlaps(nullptr, "800")); + ASSERT_TRUE(Overlaps("100", nullptr)); + ASSERT_TRUE(Overlaps("200", nullptr)); + ASSERT_TRUE(Overlaps("449", nullptr)); + ASSERT_TRUE(Overlaps("450", nullptr)); } TEST(FindFileTest, OverlapSequenceChecks) { diff --git a/doc/bench/db_bench_sqlite3.cc b/doc/bench/db_bench_sqlite3.cc index e63aaa8..7e05de2 100644 --- a/doc/bench/db_bench_sqlite3.cc +++ b/doc/bench/db_bench_sqlite3.cc @@ -76,7 +76,7 @@ static bool FLAGS_transaction = true; static bool FLAGS_WAL_enabled = true; // Use the db with the following name. -static const char* FLAGS_db = NULL; +static const char* FLAGS_db = nullptr; inline static void ExecErrorCheck(int status, char *err_msg) { @@ -107,7 +107,8 @@ inline static void WalCheckpoint(sqlite3* db_) { // Flush all writes to disk if (FLAGS_WAL_enabled) { - sqlite3_wal_checkpoint_v2(db_, NULL, SQLITE_CHECKPOINT_FULL, NULL, NULL); + sqlite3_wal_checkpoint_v2(db_, nullptr, SQLITE_CHECKPOINT_FULL, nullptr, + nullptr); } } @@ -207,18 +208,18 @@ class Benchmark { fprintf(stderr, "SQLite: version %s\n", SQLITE_VERSION); #if defined(__linux) - time_t now = time(NULL); + time_t now = time(nullptr); fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline FILE* cpuinfo = fopen("/proc/cpuinfo", "r"); - if (cpuinfo != NULL) { + if (cpuinfo != nullptr) { char line[1000]; int num_cpus = 0; std::string cpu_type; std::string cache_size; - while (fgets(line, sizeof(line), cpuinfo) != NULL) { + while (fgets(line, sizeof(line), cpuinfo) != nullptr) { const char* sep = strchr(line, ':'); - if (sep == NULL) { + if (sep == nullptr) { continue; } Slice key = TrimSpace(Slice(line, sep - 1 - line)); @@ -313,7 +314,7 @@ class Benchmark { }; Benchmark() - : db_(NULL), + : db_(nullptr), db_num_(0), num_(FLAGS_num), reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads), @@ -345,12 +346,12 @@ class Benchmark { Open(); const char* benchmarks = FLAGS_benchmarks; - while (benchmarks != NULL) { + while (benchmarks != nullptr) { const char* sep = strchr(benchmarks, ','); Slice name; - if (sep == NULL) { + if (sep == nullptr) { name = benchmarks; - benchmarks = NULL; + benchmarks = nullptr; } else { name = Slice(benchmarks, sep - benchmarks); benchmarks = sep + 1; @@ -415,11 +416,11 @@ class Benchmark { } void Open() { - assert(db_ == NULL); + assert(db_ == nullptr); int status; char file_name[100]; - char* err_msg = NULL; + char* err_msg = nullptr; db_num_++; // Open database @@ -439,7 +440,7 @@ class Benchmark { char cache_size[100]; snprintf(cache_size, sizeof(cache_size), "PRAGMA cache_size = %d", FLAGS_num_pages); - status = sqlite3_exec(db_, cache_size, NULL, NULL, &err_msg); + status = sqlite3_exec(db_, cache_size, nullptr, nullptr, &err_msg); ExecErrorCheck(status, err_msg); // FLAGS_page_size is defaulted to 1024 @@ -447,7 +448,7 @@ class Benchmark { char page_size[100]; snprintf(page_size, sizeof(page_size), "PRAGMA page_size = %d", FLAGS_page_size); - status = sqlite3_exec(db_, page_size, NULL, NULL, &err_msg); + status = sqlite3_exec(db_, page_size, nullptr, nullptr, &err_msg); ExecErrorCheck(status, err_msg); } @@ -457,9 +458,10 @@ class Benchmark { // LevelDB's default cache size is a combined 4 MB std::string WAL_checkpoint = "PRAGMA wal_autocheckpoint = 4096"; - status = sqlite3_exec(db_, WAL_stmt.c_str(), NULL, NULL, &err_msg); + status = sqlite3_exec(db_, WAL_stmt.c_str(), nullptr, nullptr, &err_msg); ExecErrorCheck(status, err_msg); - status = sqlite3_exec(db_, WAL_checkpoint.c_str(), NULL, NULL, &err_msg); + status = sqlite3_exec(db_, WAL_checkpoint.c_str(), nullptr, nullptr, + &err_msg); ExecErrorCheck(status, err_msg); } @@ -470,7 +472,8 @@ class Benchmark { std::string stmt_array[] = { locking_stmt, create_stmt }; int stmt_array_length = sizeof(stmt_array) / sizeof(std::string); for (int i = 0; i < stmt_array_length; i++) { - status = sqlite3_exec(db_, stmt_array[i].c_str(), NULL, NULL, &err_msg); + status = sqlite3_exec(db_, stmt_array[i].c_str(), nullptr, nullptr, + &err_msg); ExecErrorCheck(status, err_msg); } } @@ -484,7 +487,7 @@ class Benchmark { return; } sqlite3_close(db_); - db_ = NULL; + db_ = nullptr; Open(); Start(); } @@ -495,7 +498,7 @@ class Benchmark { message_ = msg; } - char* err_msg = NULL; + char* err_msg = nullptr; int status; sqlite3_stmt *replace_stmt, *begin_trans_stmt, *end_trans_stmt; @@ -506,18 +509,18 @@ class Benchmark { // Check for synchronous flag in options std::string sync_stmt = (write_sync) ? "PRAGMA synchronous = FULL" : "PRAGMA synchronous = OFF"; - status = sqlite3_exec(db_, sync_stmt.c_str(), NULL, NULL, &err_msg); + status = sqlite3_exec(db_, sync_stmt.c_str(), nullptr, nullptr, &err_msg); ExecErrorCheck(status, err_msg); // Preparing sqlite3 statements status = sqlite3_prepare_v2(db_, replace_str.c_str(), -1, - &replace_stmt, NULL); + &replace_stmt, nullptr); ErrorCheck(status); status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1, - &begin_trans_stmt, NULL); + &begin_trans_stmt, nullptr); ErrorCheck(status); status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, - &end_trans_stmt, NULL); + &end_trans_stmt, nullptr); ErrorCheck(status); bool transaction = (entries_per_batch > 1); @@ -588,12 +591,12 @@ class Benchmark { // Preparing sqlite3 statements status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1, - &begin_trans_stmt, NULL); + &begin_trans_stmt, nullptr); ErrorCheck(status); status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, - &end_trans_stmt, NULL); + &end_trans_stmt, nullptr); ErrorCheck(status); - status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &read_stmt, NULL); + status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &read_stmt, nullptr); ErrorCheck(status); bool transaction = (entries_per_batch > 1); @@ -651,7 +654,7 @@ class Benchmark { sqlite3_stmt *pStmt; std::string read_str = "SELECT * FROM test ORDER BY key"; - status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &pStmt, NULL); + status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &pStmt, nullptr); ErrorCheck(status); for (int i = 0; i < reads_ && SQLITE_ROW == sqlite3_step(pStmt); i++) { bytes_ += sqlite3_column_bytes(pStmt, 1) + sqlite3_column_bytes(pStmt, 2); @@ -706,7 +709,7 @@ int main(int argc, char** argv) { } // Choose a location for the test database if none given with --db= - if (FLAGS_db == NULL) { + if (FLAGS_db == nullptr) { leveldb::Env::Default()->GetTestDirectory(&default_db_path); default_db_path += "/dbbench"; FLAGS_db = default_db_path.c_str(); diff --git a/doc/bench/db_bench_tree_db.cc b/doc/bench/db_bench_tree_db.cc index 4ca381f..9f8fb90 100644 --- a/doc/bench/db_bench_tree_db.cc +++ b/doc/bench/db_bench_tree_db.cc @@ -69,7 +69,7 @@ static bool FLAGS_use_existing_db = false; static bool FLAGS_compression = true; // Use the db with the following name. -static const char* FLAGS_db = NULL; +static const char* FLAGS_db = nullptr; inline static void DBSynchronize(kyotocabinet::TreeDB* db_) @@ -183,18 +183,18 @@ class Benchmark { kyotocabinet::VERSION, kyotocabinet::LIBVER, kyotocabinet::LIBREV); #if defined(__linux) - time_t now = time(NULL); + time_t now = time(nullptr); fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline FILE* cpuinfo = fopen("/proc/cpuinfo", "r"); - if (cpuinfo != NULL) { + if (cpuinfo != nullptr) { char line[1000]; int num_cpus = 0; std::string cpu_type; std::string cache_size; - while (fgets(line, sizeof(line), cpuinfo) != NULL) { + while (fgets(line, sizeof(line), cpuinfo) != nullptr) { const char* sep = strchr(line, ':'); - if (sep == NULL) { + if (sep == nullptr) { continue; } Slice key = TrimSpace(Slice(line, sep - 1 - line)); @@ -289,7 +289,7 @@ class Benchmark { }; Benchmark() - : db_(NULL), + : db_(nullptr), num_(FLAGS_num), reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads), bytes_(0), @@ -321,12 +321,12 @@ class Benchmark { Open(false); const char* benchmarks = FLAGS_benchmarks; - while (benchmarks != NULL) { + while (benchmarks != nullptr) { const char* sep = strchr(benchmarks, ','); Slice name; - if (sep == NULL) { + if (sep == nullptr) { name = benchmarks; - benchmarks = NULL; + benchmarks = nullptr; } else { name = Slice(benchmarks, sep - benchmarks); benchmarks = sep + 1; @@ -387,7 +387,7 @@ class Benchmark { private: void Open(bool sync) { - assert(db_ == NULL); + assert(db_ == nullptr); // Initialize db_ db_ = new kyotocabinet::TreeDB(); @@ -430,7 +430,7 @@ class Benchmark { return; } delete db_; - db_ = NULL; + db_ = nullptr; Open(sync); Start(); // Do not count time taken to destroy/open } @@ -516,7 +516,7 @@ int main(int argc, char** argv) { } // Choose a location for the test database if none given with --db= - if (FLAGS_db == NULL) { + if (FLAGS_db == nullptr) { leveldb::Env::Default()->GetTestDirectory(&default_db_path); default_db_path += "/dbbench"; FLAGS_db = default_db_path.c_str(); diff --git a/helpers/memenv/memenv.cc b/helpers/memenv/memenv.cc index 43b009d..d44627b 100644 --- a/helpers/memenv/memenv.cc +++ b/helpers/memenv/memenv.cc @@ -246,7 +246,7 @@ class InMemoryEnv : public EnvWrapper { SequentialFile** result) { MutexLock lock(&mutex_); if (file_map_.find(fname) == file_map_.end()) { - *result = NULL; + *result = nullptr; return Status::IOError(fname, "File not found"); } @@ -258,7 +258,7 @@ class InMemoryEnv : public EnvWrapper { RandomAccessFile** result) { MutexLock lock(&mutex_); if (file_map_.find(fname) == file_map_.end()) { - *result = NULL; + *result = nullptr; return Status::IOError(fname, "File not found"); } @@ -286,7 +286,7 @@ class InMemoryEnv : public EnvWrapper { MutexLock lock(&mutex_); FileState** sptr = &file_map_[fname]; FileState* file = *sptr; - if (file == NULL) { + if (file == nullptr) { file = new FileState(); file->Ref(); } diff --git a/include/leveldb/cache.h b/include/leveldb/cache.h index 145ad69..e416ea5 100644 --- a/include/leveldb/cache.h +++ b/include/leveldb/cache.h @@ -56,7 +56,7 @@ class LEVELDB_EXPORT Cache { virtual Handle* Insert(const Slice& key, void* value, size_t charge, void (*deleter)(const Slice& key, void* value)) = 0; - // If the cache has no mapping for "key", returns NULL. + // If the cache has no mapping for "key", returns nullptr. // // Else return a handle that corresponds to the mapping. The caller // must call this->Release(handle) when the returned mapping is no diff --git a/include/leveldb/db.h b/include/leveldb/db.h index 092a154..84c32bc 100644 --- a/include/leveldb/db.h +++ b/include/leveldb/db.h @@ -47,7 +47,7 @@ class LEVELDB_EXPORT DB { // Open the database with the specified "name". // Stores a pointer to a heap-allocated database in *dbptr and returns // OK on success. - // Stores NULL in *dbptr and returns a non-OK status on error. + // Stores nullptr in *dbptr and returns a non-OK status on error. // Caller should delete *dbptr when it is no longer needed. static Status Open(const Options& options, const std::string& name, @@ -141,10 +141,10 @@ class LEVELDB_EXPORT DB { // needed to access the data. This operation should typically only // be invoked by users who understand the underlying implementation. // - // begin==NULL is treated as a key before all keys in the database. - // end==NULL is treated as a key after all keys in the database. + // begin==nullptr is treated as a key before all keys in the database. + // end==nullptr is treated as a key after all keys in the database. // Therefore the following call will compact the entire database: - // db->CompactRange(NULL, NULL); + // db->CompactRange(nullptr, nullptr); virtual void CompactRange(const Slice* begin, const Slice* end) = 0; }; diff --git a/include/leveldb/env.h b/include/leveldb/env.h index 54c9e3b..87dc06e 100644 --- a/include/leveldb/env.h +++ b/include/leveldb/env.h @@ -47,7 +47,7 @@ class LEVELDB_EXPORT Env { // Create a brand new sequentially-readable file with the specified name. // On success, stores a pointer to the new file in *result and returns OK. - // On failure stores NULL in *result and returns non-OK. If the file does + // On failure stores nullptr in *result and returns non-OK. If the file does // not exist, returns a non-OK status. Implementations should return a // NotFound status when the file does not exist. // @@ -57,7 +57,7 @@ class LEVELDB_EXPORT Env { // Create a brand new random access read-only file with the // specified name. On success, stores a pointer to the new file in - // *result and returns OK. On failure stores NULL in *result and + // *result and returns OK. On failure stores nullptr in *result and // returns non-OK. If the file does not exist, returns a non-OK // status. Implementations should return a NotFound status when the file does // not exist. @@ -69,7 +69,7 @@ class LEVELDB_EXPORT Env { // Create an object that writes to a new file with the specified // name. Deletes any existing file with the same name and creates a // new file. On success, stores a pointer to the new file in - // *result and returns OK. On failure stores NULL in *result and + // *result and returns OK. On failure stores nullptr in *result and // returns non-OK. // // The returned file will only be accessed by one thread at a time. @@ -79,7 +79,7 @@ class LEVELDB_EXPORT Env { // Create an object that either appends to an existing file, or // writes to a new file (if the file does not exist to begin with). // On success, stores a pointer to the new file in *result and - // returns OK. On failure stores NULL in *result and returns + // returns OK. On failure stores nullptr in *result and returns // non-OK. // // The returned file will only be accessed by one thread at a time. @@ -117,7 +117,7 @@ class LEVELDB_EXPORT Env { const std::string& target) = 0; // Lock the specified file. Used to prevent concurrent access to - // the same db by multiple processes. On failure, stores NULL in + // the same db by multiple processes. On failure, stores nullptr in // *lock and returns non-OK. // // On success, stores a pointer to the object that represents the @@ -264,7 +264,7 @@ class LEVELDB_EXPORT FileLock { virtual ~FileLock(); }; -// Log the specified data to *info_log if info_log is non-NULL. +// Log the specified data to *info_log if info_log is non-null. void Log(Logger* info_log, const char* format, ...) # if defined(__GNUC__) || defined(__clang__) __attribute__((__format__ (__printf__, 2, 3))) diff --git a/include/leveldb/options.h b/include/leveldb/options.h index 1c42921..b6ddbd8 100644 --- a/include/leveldb/options.h +++ b/include/leveldb/options.h @@ -63,9 +63,9 @@ struct LEVELDB_EXPORT Options { Env* env; // Any internal progress/error information generated by the db will - // be written to info_log if it is non-NULL, or to a file stored - // in the same directory as the DB contents if info_log is NULL. - // Default: NULL + // be written to info_log if it is non-null, or to a file stored + // in the same directory as the DB contents if info_log is null. + // Default: nullptr Logger* info_log; // ------------------- @@ -93,9 +93,9 @@ struct LEVELDB_EXPORT Options { // Control over blocks (user data is stored in a set of blocks, and // a block is the unit of reading from disk). - // If non-NULL, use the specified cache for blocks. - // If NULL, leveldb will automatically create and use an 8MB internal cache. - // Default: NULL + // If non-null, use the specified cache for blocks. + // If null, leveldb will automatically create and use an 8MB internal cache. + // Default: nullptr Cache* block_cache; // Approximate size of user data packed per block. Note that the @@ -147,11 +147,11 @@ struct LEVELDB_EXPORT Options { // Default: currently false, but may become true later. bool reuse_logs; - // If non-NULL, use the specified filter policy to reduce disk reads. + // If non-null, use the specified filter policy to reduce disk reads. // Many applications will benefit from passing the result of // NewBloomFilterPolicy() here. // - // Default: NULL + // Default: nullptr const FilterPolicy* filter_policy; // Create an Options object with default values for all fields. @@ -170,17 +170,17 @@ struct LEVELDB_EXPORT ReadOptions { // Default: true bool fill_cache; - // If "snapshot" is non-NULL, read as of the supplied snapshot + // If "snapshot" is non-null, read as of the supplied snapshot // (which must belong to the DB that is being read and which must - // not have been released). If "snapshot" is NULL, use an implicit + // not have been released). If "snapshot" is null, use an implicit // snapshot of the state at the beginning of this read operation. - // Default: NULL + // Default: nullptr const Snapshot* snapshot; ReadOptions() : verify_checksums(false), fill_cache(true), - snapshot(NULL) { + snapshot(nullptr) { } }; diff --git a/include/leveldb/status.h b/include/leveldb/status.h index 42ad4bb..39d692d 100644 --- a/include/leveldb/status.h +++ b/include/leveldb/status.h @@ -22,7 +22,7 @@ namespace leveldb { class LEVELDB_EXPORT Status { public: // Create a success status. - Status() : state_(NULL) { } + Status() : state_(nullptr) { } ~Status() { delete[] state_; } // Copy the specified status. @@ -50,7 +50,7 @@ class LEVELDB_EXPORT Status { } // Returns true iff the status indicates success. - bool ok() const { return (state_ == NULL); } + bool ok() const { return (state_ == nullptr); } // Returns true iff the status indicates a NotFound error. bool IsNotFound() const { return code() == kNotFound; } @@ -72,7 +72,7 @@ class LEVELDB_EXPORT Status { std::string ToString() const; private: - // OK status has a NULL state_. Otherwise, state_ is a new[] array + // OK status has a null state_. Otherwise, state_ is a new[] array // of the following form: // state_[0..3] == length of message // state_[4] == code @@ -89,7 +89,7 @@ class LEVELDB_EXPORT Status { }; Code code() const { - return (state_ == NULL) ? kOk : static_cast(state_[4]); + return (state_ == nullptr) ? kOk : static_cast(state_[4]); } Status(Code code, const Slice& msg, const Slice& msg2); @@ -97,14 +97,14 @@ class LEVELDB_EXPORT Status { }; inline Status::Status(const Status& s) { - state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_); + state_ = (s.state_ == nullptr) ? nullptr : CopyState(s.state_); } inline void Status::operator=(const Status& s) { // The following condition catches both aliasing (when this == &s), // and the common case where both s and *this are ok. if (state_ != s.state_) { delete[] state_; - state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_); + state_ = (s.state_ == nullptr) ? nullptr : CopyState(s.state_); } } diff --git a/include/leveldb/table.h b/include/leveldb/table.h index 83078d2..e9f6641 100644 --- a/include/leveldb/table.h +++ b/include/leveldb/table.h @@ -31,7 +31,7 @@ class LEVELDB_EXPORT Table { // If successful, returns ok and sets "*table" to the newly opened // table. The client should delete "*table" when no longer needed. // If there was an error while initializing the table, sets "*table" - // to NULL and returns a non-ok status. Does not take ownership of + // to nullptr and returns a non-ok status. Does not take ownership of // "*source", but the client must ensure that "source" remains live // for the duration of the returned table's lifetime. // diff --git a/port/port_posix.cc b/port/port_posix.cc index 30e8007..04095bb 100644 --- a/port/port_posix.cc +++ b/port/port_posix.cc @@ -18,7 +18,7 @@ static void PthreadCall(const char* label, int result) { } } -Mutex::Mutex() { PthreadCall("init mutex", pthread_mutex_init(&mu_, NULL)); } +Mutex::Mutex() { PthreadCall("init mutex", pthread_mutex_init(&mu_, nullptr)); } Mutex::~Mutex() { PthreadCall("destroy mutex", pthread_mutex_destroy(&mu_)); } @@ -28,7 +28,7 @@ void Mutex::Unlock() { PthreadCall("unlock", pthread_mutex_unlock(&mu_)); } CondVar::CondVar(Mutex* mu) : mu_(mu) { - PthreadCall("init cv", pthread_cond_init(&cv_, NULL)); + PthreadCall("init cv", pthread_cond_init(&cv_, nullptr)); } CondVar::~CondVar() { PthreadCall("destroy cv", pthread_cond_destroy(&cv_)); } diff --git a/table/block.cc b/table/block.cc index 43e402c..6fdfdea 100644 --- a/table/block.cc +++ b/table/block.cc @@ -48,13 +48,13 @@ Block::~Block() { // and the length of the value in "*shared", "*non_shared", and // "*value_length", respectively. Will not dereference past "limit". // -// If any errors are detected, returns NULL. Otherwise, returns a +// If any errors are detected, returns nullptr. Otherwise, returns a // pointer to the key delta (just past the three decoded values). static inline const char* DecodeEntry(const char* p, const char* limit, uint32_t* shared, uint32_t* non_shared, uint32_t* value_length) { - if (limit - p < 3) return NULL; + if (limit - p < 3) return nullptr; *shared = reinterpret_cast(p)[0]; *non_shared = reinterpret_cast(p)[1]; *value_length = reinterpret_cast(p)[2]; @@ -62,13 +62,13 @@ static inline const char* DecodeEntry(const char* p, const char* limit, // Fast path: all three values are encoded in one byte each p += 3; } else { - if ((p = GetVarint32Ptr(p, limit, shared)) == NULL) return NULL; - if ((p = GetVarint32Ptr(p, limit, non_shared)) == NULL) return NULL; - if ((p = GetVarint32Ptr(p, limit, value_length)) == NULL) return NULL; + if ((p = GetVarint32Ptr(p, limit, shared)) == nullptr) return nullptr; + if ((p = GetVarint32Ptr(p, limit, non_shared)) == nullptr) return nullptr; + if ((p = GetVarint32Ptr(p, limit, value_length)) == nullptr) return nullptr; } if (static_cast(limit - p) < (*non_shared + *value_length)) { - return NULL; + return nullptr; } return p; } @@ -174,7 +174,7 @@ class Block::Iter : public Iterator { const char* key_ptr = DecodeEntry(data_ + region_offset, data_ + restarts_, &shared, &non_shared, &value_length); - if (key_ptr == NULL || (shared != 0)) { + if (key_ptr == nullptr || (shared != 0)) { CorruptionError(); return; } @@ -237,7 +237,7 @@ class Block::Iter : public Iterator { // Decode next entry uint32_t shared, non_shared, value_length; p = DecodeEntry(p, limit, &shared, &non_shared, &value_length); - if (p == NULL || key_.size() < shared) { + if (p == nullptr || key_.size() < shared) { CorruptionError(); return false; } else { diff --git a/table/filter_block.cc b/table/filter_block.cc index 1ed5134..ce0aa04 100644 --- a/table/filter_block.cc +++ b/table/filter_block.cc @@ -78,8 +78,8 @@ void FilterBlockBuilder::GenerateFilter() { FilterBlockReader::FilterBlockReader(const FilterPolicy* policy, const Slice& contents) : policy_(policy), - data_(NULL), - offset_(NULL), + data_(nullptr), + offset_(nullptr), num_(0), base_lg_(0) { size_t n = contents.size(); diff --git a/table/iterator.cc b/table/iterator.cc index 3d1c87f..aff0e59 100644 --- a/table/iterator.cc +++ b/table/iterator.cc @@ -7,14 +7,14 @@ namespace leveldb { Iterator::Iterator() { - cleanup_.function = NULL; - cleanup_.next = NULL; + cleanup_.function = nullptr; + cleanup_.next = nullptr; } Iterator::~Iterator() { - if (cleanup_.function != NULL) { + if (cleanup_.function != nullptr) { (*cleanup_.function)(cleanup_.arg1, cleanup_.arg2); - for (Cleanup* c = cleanup_.next; c != NULL; ) { + for (Cleanup* c = cleanup_.next; c != nullptr; ) { (*c->function)(c->arg1, c->arg2); Cleanup* next = c->next; delete c; @@ -24,9 +24,9 @@ Iterator::~Iterator() { } void Iterator::RegisterCleanup(CleanupFunction func, void* arg1, void* arg2) { - assert(func != NULL); + assert(func != nullptr); Cleanup* c; - if (cleanup_.function == NULL) { + if (cleanup_.function == nullptr) { c = &cleanup_; } else { c = new Cleanup; diff --git a/table/iterator_wrapper.h b/table/iterator_wrapper.h index f410c3f..f1814ca 100644 --- a/table/iterator_wrapper.h +++ b/table/iterator_wrapper.h @@ -16,8 +16,8 @@ namespace leveldb { // cache locality. class IteratorWrapper { public: - IteratorWrapper(): iter_(NULL), valid_(false) { } - explicit IteratorWrapper(Iterator* iter): iter_(NULL) { + IteratorWrapper(): iter_(nullptr), valid_(false) { } + explicit IteratorWrapper(Iterator* iter): iter_(nullptr) { Set(iter); } ~IteratorWrapper() { delete iter_; } @@ -28,7 +28,7 @@ class IteratorWrapper { void Set(Iterator* iter) { delete iter_; iter_ = iter; - if (iter_ == NULL) { + if (iter_ == nullptr) { valid_ = false; } else { Update(); @@ -40,7 +40,7 @@ class IteratorWrapper { bool Valid() const { return valid_; } Slice key() const { assert(Valid()); return key_; } Slice value() const { assert(Valid()); return iter_->value(); } - // Methods below require iter() != NULL + // Methods below require iter() != nullptr Status status() const { assert(iter_); return iter_->status(); } void Next() { assert(iter_); iter_->Next(); Update(); } void Prev() { assert(iter_); iter_->Prev(); Update(); } diff --git a/table/merger.cc b/table/merger.cc index 2dde4dc..e079680 100644 --- a/table/merger.cc +++ b/table/merger.cc @@ -17,7 +17,7 @@ class MergingIterator : public Iterator { : comparator_(comparator), children_(new IteratorWrapper[n]), n_(n), - current_(NULL), + current_(nullptr), direction_(kForward) { for (int i = 0; i < n; i++) { children_[i].Set(children[i]); @@ -29,7 +29,7 @@ class MergingIterator : public Iterator { } virtual bool Valid() const { - return (current_ != NULL); + return (current_ != nullptr); } virtual void SeekToFirst() { @@ -153,11 +153,11 @@ class MergingIterator : public Iterator { }; void MergingIterator::FindSmallest() { - IteratorWrapper* smallest = NULL; + IteratorWrapper* smallest = nullptr; for (int i = 0; i < n_; i++) { IteratorWrapper* child = &children_[i]; if (child->Valid()) { - if (smallest == NULL) { + if (smallest == nullptr) { smallest = child; } else if (comparator_->Compare(child->key(), smallest->key()) < 0) { smallest = child; @@ -168,11 +168,11 @@ void MergingIterator::FindSmallest() { } void MergingIterator::FindLargest() { - IteratorWrapper* largest = NULL; + IteratorWrapper* largest = nullptr; for (int i = n_-1; i >= 0; i--) { IteratorWrapper* child = &children_[i]; if (child->Valid()) { - if (largest == NULL) { + if (largest == nullptr) { largest = child; } else if (comparator_->Compare(child->key(), largest->key()) > 0) { largest = child; diff --git a/table/table.cc b/table/table.cc index ff73cee..8e737e1 100644 --- a/table/table.cc +++ b/table/table.cc @@ -39,7 +39,7 @@ Status Table::Open(const Options& options, RandomAccessFile* file, uint64_t size, Table** table) { - *table = NULL; + *table = nullptr; if (size < Footer::kEncodedLength) { return Status::Corruption("file is too short to be an sstable"); } @@ -74,8 +74,8 @@ Status Table::Open(const Options& options, rep->metaindex_handle = footer.metaindex_handle(); rep->index_block = index_block; rep->cache_id = (options.block_cache ? options.block_cache->NewId() : 0); - rep->filter_data = NULL; - rep->filter = NULL; + rep->filter_data = nullptr; + rep->filter = nullptr; *table = new Table(rep); (*table)->ReadMeta(footer); } @@ -84,7 +84,7 @@ Status Table::Open(const Options& options, } void Table::ReadMeta(const Footer& footer) { - if (rep_->options.filter_policy == NULL) { + if (rep_->options.filter_policy == nullptr) { return; // Do not need any metadata } @@ -161,8 +161,8 @@ Iterator* Table::BlockReader(void* arg, const Slice& index_value) { Table* table = reinterpret_cast(arg); Cache* block_cache = table->rep_->options.block_cache; - Block* block = NULL; - Cache::Handle* cache_handle = NULL; + Block* block = nullptr; + Cache::Handle* cache_handle = nullptr; BlockHandle handle; Slice input = index_value; @@ -172,13 +172,13 @@ Iterator* Table::BlockReader(void* arg, if (s.ok()) { BlockContents contents; - if (block_cache != NULL) { + if (block_cache != nullptr) { char cache_key_buffer[16]; EncodeFixed64(cache_key_buffer, table->rep_->cache_id); EncodeFixed64(cache_key_buffer+8, handle.offset()); Slice key(cache_key_buffer, sizeof(cache_key_buffer)); cache_handle = block_cache->Lookup(key); - if (cache_handle != NULL) { + if (cache_handle != nullptr) { block = reinterpret_cast(block_cache->Value(cache_handle)); } else { s = ReadBlock(table->rep_->file, options, handle, &contents); @@ -199,10 +199,10 @@ Iterator* Table::BlockReader(void* arg, } Iterator* iter; - if (block != NULL) { + if (block != nullptr) { iter = block->NewIterator(table->rep_->options.comparator); - if (cache_handle == NULL) { - iter->RegisterCleanup(&DeleteBlock, block, NULL); + if (cache_handle == nullptr) { + iter->RegisterCleanup(&DeleteBlock, block, nullptr); } else { iter->RegisterCleanup(&ReleaseBlock, block_cache, cache_handle); } @@ -228,7 +228,7 @@ Status Table::InternalGet(const ReadOptions& options, const Slice& k, Slice handle_value = iiter->value(); FilterBlockReader* filter = rep_->filter; BlockHandle handle; - if (filter != NULL && + if (filter != nullptr && handle.DecodeFrom(&handle_value).ok() && !filter->KeyMayMatch(handle.offset(), k)) { // Not found diff --git a/table/table_builder.cc b/table/table_builder.cc index 62002c8..444d4f9 100644 --- a/table/table_builder.cc +++ b/table/table_builder.cc @@ -53,7 +53,7 @@ struct TableBuilder::Rep { index_block(&index_block_options), num_entries(0), closed(false), - filter_block(opt.filter_policy == NULL ? NULL + filter_block(opt.filter_policy == nullptr ? nullptr : new FilterBlockBuilder(opt.filter_policy)), pending_index_entry(false) { index_block_options.block_restart_interval = 1; @@ -62,7 +62,7 @@ struct TableBuilder::Rep { TableBuilder::TableBuilder(const Options& options, WritableFile* file) : rep_(new Rep(options, file)) { - if (rep_->filter_block != NULL) { + if (rep_->filter_block != nullptr) { rep_->filter_block->StartBlock(0); } } @@ -106,7 +106,7 @@ void TableBuilder::Add(const Slice& key, const Slice& value) { r->pending_index_entry = false; } - if (r->filter_block != NULL) { + if (r->filter_block != nullptr) { r->filter_block->AddKey(key); } @@ -131,7 +131,7 @@ void TableBuilder::Flush() { r->pending_index_entry = true; r->status = r->file->Flush(); } - if (r->filter_block != NULL) { + if (r->filter_block != nullptr) { r->filter_block->StartBlock(r->offset); } } @@ -205,7 +205,7 @@ Status TableBuilder::Finish() { BlockHandle filter_block_handle, metaindex_block_handle, index_block_handle; // Write filter block - if (ok() && r->filter_block != NULL) { + if (ok() && r->filter_block != nullptr) { WriteRawBlock(r->filter_block->Finish(), kNoCompression, &filter_block_handle); } @@ -213,7 +213,7 @@ Status TableBuilder::Finish() { // Write metaindex block if (ok()) { BlockBuilder meta_index_block(&r->options); - if (r->filter_block != NULL) { + if (r->filter_block != nullptr) { // Add mapping from "filter.Name" to location of filter data std::string key = "filter."; key.append(r->options.filter_policy->Name()); diff --git a/table/table_test.cc b/table/table_test.cc index abf6e24..e47db3d 100644 --- a/table/table_test.cc +++ b/table/table_test.cc @@ -172,7 +172,7 @@ class Constructor { virtual const KVMap& data() { return data_; } - virtual DB* db() const { return NULL; } // Overridden in DBConstructor + virtual DB* db() const { return nullptr; } // Overridden in DBConstructor private: KVMap data_; @@ -183,13 +183,13 @@ class BlockConstructor: public Constructor { explicit BlockConstructor(const Comparator* cmp) : Constructor(cmp), comparator_(cmp), - block_(NULL) { } + block_(nullptr) { } ~BlockConstructor() { delete block_; } virtual Status FinishImpl(const Options& options, const KVMap& data) { delete block_; - block_ = NULL; + block_ = nullptr; BlockBuilder builder(&options); for (KVMap::const_iterator it = data.begin(); @@ -222,7 +222,7 @@ class TableConstructor: public Constructor { public: TableConstructor(const Comparator* cmp) : Constructor(cmp), - source_(NULL), table_(NULL) { + source_(nullptr), table_(nullptr) { } ~TableConstructor() { Reset(); @@ -262,8 +262,8 @@ class TableConstructor: public Constructor { void Reset() { delete table_; delete source_; - table_ = NULL; - source_ = NULL; + table_ = nullptr; + source_ = nullptr; } StringSource* source_; @@ -351,7 +351,7 @@ class DBConstructor: public Constructor { explicit DBConstructor(const Comparator* cmp) : Constructor(cmp), comparator_(cmp) { - db_ = NULL; + db_ = nullptr; NewDB(); } ~DBConstructor() { @@ -359,7 +359,7 @@ class DBConstructor: public Constructor { } virtual Status FinishImpl(const Options& options, const KVMap& data) { delete db_; - db_ = NULL; + db_ = nullptr; NewDB(); for (KVMap::const_iterator it = data.begin(); it != data.end(); @@ -436,11 +436,11 @@ static const int kNumTestArgs = sizeof(kTestArgList) / sizeof(kTestArgList[0]); class Harness { public: - Harness() : constructor_(NULL) { } + Harness() : constructor_(nullptr) { } void Init(const TestArgs& args) { delete constructor_; - constructor_ = NULL; + constructor_ = nullptr; options_ = Options(); options_.block_restart_interval = args.restart_interval; @@ -636,7 +636,7 @@ class Harness { } } - // Returns NULL if not running against a DB + // Returns nullptr if not running against a DB DB* db() const { return constructor_->db(); } private: diff --git a/table/two_level_iterator.cc b/table/two_level_iterator.cc index 7822eba..4e6f420 100644 --- a/table/two_level_iterator.cc +++ b/table/two_level_iterator.cc @@ -46,7 +46,7 @@ class TwoLevelIterator: public Iterator { // It'd be nice if status() returned a const Status& instead of a Status if (!index_iter_.status().ok()) { return index_iter_.status(); - } else if (data_iter_.iter() != NULL && !data_iter_.status().ok()) { + } else if (data_iter_.iter() != nullptr && !data_iter_.status().ok()) { return data_iter_.status(); } else { return status_; @@ -67,8 +67,8 @@ class TwoLevelIterator: public Iterator { const ReadOptions options_; Status status_; IteratorWrapper index_iter_; - IteratorWrapper data_iter_; // May be NULL - // If data_iter_ is non-NULL, then "data_block_handle_" holds the + IteratorWrapper data_iter_; // May be nullptr + // If data_iter_ is non-null, then "data_block_handle_" holds the // "index_value" passed to block_function_ to create the data_iter_. std::string data_block_handle_; }; @@ -82,7 +82,7 @@ TwoLevelIterator::TwoLevelIterator( arg_(arg), options_(options), index_iter_(index_iter), - data_iter_(NULL) { + data_iter_(nullptr) { } TwoLevelIterator::~TwoLevelIterator() { @@ -91,21 +91,21 @@ TwoLevelIterator::~TwoLevelIterator() { void TwoLevelIterator::Seek(const Slice& target) { index_iter_.Seek(target); InitDataBlock(); - if (data_iter_.iter() != NULL) data_iter_.Seek(target); + if (data_iter_.iter() != nullptr) data_iter_.Seek(target); SkipEmptyDataBlocksForward(); } void TwoLevelIterator::SeekToFirst() { index_iter_.SeekToFirst(); InitDataBlock(); - if (data_iter_.iter() != NULL) data_iter_.SeekToFirst(); + if (data_iter_.iter() != nullptr) data_iter_.SeekToFirst(); SkipEmptyDataBlocksForward(); } void TwoLevelIterator::SeekToLast() { index_iter_.SeekToLast(); InitDataBlock(); - if (data_iter_.iter() != NULL) data_iter_.SeekToLast(); + if (data_iter_.iter() != nullptr) data_iter_.SeekToLast(); SkipEmptyDataBlocksBackward(); } @@ -123,42 +123,42 @@ void TwoLevelIterator::Prev() { void TwoLevelIterator::SkipEmptyDataBlocksForward() { - while (data_iter_.iter() == NULL || !data_iter_.Valid()) { + while (data_iter_.iter() == nullptr || !data_iter_.Valid()) { // Move to next block if (!index_iter_.Valid()) { - SetDataIterator(NULL); + SetDataIterator(nullptr); return; } index_iter_.Next(); InitDataBlock(); - if (data_iter_.iter() != NULL) data_iter_.SeekToFirst(); + if (data_iter_.iter() != nullptr) data_iter_.SeekToFirst(); } } void TwoLevelIterator::SkipEmptyDataBlocksBackward() { - while (data_iter_.iter() == NULL || !data_iter_.Valid()) { + while (data_iter_.iter() == nullptr || !data_iter_.Valid()) { // Move to next block if (!index_iter_.Valid()) { - SetDataIterator(NULL); + SetDataIterator(nullptr); return; } index_iter_.Prev(); InitDataBlock(); - if (data_iter_.iter() != NULL) data_iter_.SeekToLast(); + if (data_iter_.iter() != nullptr) data_iter_.SeekToLast(); } } void TwoLevelIterator::SetDataIterator(Iterator* data_iter) { - if (data_iter_.iter() != NULL) SaveError(data_iter_.status()); + if (data_iter_.iter() != nullptr) SaveError(data_iter_.status()); data_iter_.Set(data_iter); } void TwoLevelIterator::InitDataBlock() { if (!index_iter_.Valid()) { - SetDataIterator(NULL); + SetDataIterator(nullptr); } else { Slice handle = index_iter_.value(); - if (data_iter_.iter() != NULL && handle.compare(data_block_handle_) == 0) { + if (data_iter_.iter() != nullptr && handle.compare(data_block_handle_) == 0) { // data_iter_ is already constructed with this iterator, so // no need to change anything } else { diff --git a/util/arena.cc b/util/arena.cc index 7407821..a0338bf 100644 --- a/util/arena.cc +++ b/util/arena.cc @@ -10,7 +10,7 @@ namespace leveldb { static const int kBlockSize = 4096; Arena::Arena() : memory_usage_(0) { - alloc_ptr_ = NULL; // First allocation will allocate a block + alloc_ptr_ = nullptr; // First allocation will allocate a block alloc_bytes_remaining_ = 0; } diff --git a/util/cache.cc b/util/cache.cc index 10b7103..7cc2cea 100644 --- a/util/cache.cc +++ b/util/cache.cc @@ -69,7 +69,7 @@ struct LRUHandle { // 4.4.3's builtin hashtable. class HandleTable { public: - HandleTable() : length_(0), elems_(0), list_(NULL) { Resize(); } + HandleTable() : length_(0), elems_(0), list_(nullptr) { Resize(); } ~HandleTable() { delete[] list_; } LRUHandle* Lookup(const Slice& key, uint32_t hash) { @@ -79,9 +79,9 @@ class HandleTable { LRUHandle* Insert(LRUHandle* h) { LRUHandle** ptr = FindPointer(h->key(), h->hash); LRUHandle* old = *ptr; - h->next_hash = (old == NULL ? NULL : old->next_hash); + h->next_hash = (old == nullptr ? nullptr : old->next_hash); *ptr = h; - if (old == NULL) { + if (old == nullptr) { ++elems_; if (elems_ > length_) { // Since each cache entry is fairly large, we aim for a small @@ -95,7 +95,7 @@ class HandleTable { LRUHandle* Remove(const Slice& key, uint32_t hash) { LRUHandle** ptr = FindPointer(key, hash); LRUHandle* result = *ptr; - if (result != NULL) { + if (result != nullptr) { *ptr = result->next_hash; --elems_; } @@ -114,7 +114,7 @@ class HandleTable { // pointer to the trailing slot in the corresponding linked list. LRUHandle** FindPointer(const Slice& key, uint32_t hash) { LRUHandle** ptr = &list_[hash & (length_ - 1)]; - while (*ptr != NULL && + while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) { ptr = &(*ptr)->next_hash; } @@ -131,7 +131,7 @@ class HandleTable { uint32_t count = 0; for (uint32_t i = 0; i < length_; i++) { LRUHandle* h = list_[i]; - while (h != NULL) { + while (h != nullptr) { LRUHandle* next = h->next_hash; uint32_t hash = h->hash; LRUHandle** ptr = &new_list[hash & (new_length - 1)]; @@ -255,7 +255,7 @@ void LRUCache::LRU_Append(LRUHandle* list, LRUHandle* e) { Cache::Handle* LRUCache::Lookup(const Slice& key, uint32_t hash) { MutexLock l(&mutex_); LRUHandle* e = table_.Lookup(key, hash); - if (e != NULL) { + if (e != nullptr) { Ref(e); } return reinterpret_cast(e); @@ -290,7 +290,7 @@ Cache::Handle* LRUCache::Insert( FinishErase(table_.Insert(e)); } else { // don't cache. (capacity_==0 is supported and turns off caching.) // next is read by key() in an assert, so it must be initialized - e->next = NULL; + e->next = nullptr; } while (usage_ > capacity_ && lru_.next != &lru_) { LRUHandle* old = lru_.next; @@ -304,17 +304,17 @@ Cache::Handle* LRUCache::Insert( return reinterpret_cast(e); } -// If e != NULL, finish removing *e from the cache; it has already been removed -// from the hash table. Return whether e != NULL. Requires mutex_ held. +// If e != nullptr, finish removing *e from the cache; it has already been +// removed from the hash table. Return whether e != nullptr. bool LRUCache::FinishErase(LRUHandle* e) { - if (e != NULL) { + if (e != nullptr) { assert(e->in_cache); LRU_Remove(e); e->in_cache = false; usage_ -= e->charge; Unref(e); } - return e != NULL; + return e != nullptr; } void LRUCache::Erase(const Slice& key, uint32_t hash) { diff --git a/util/cache_test.cc b/util/cache_test.cc index 246ab8e..8647feb 100644 --- a/util/cache_test.cc +++ b/util/cache_test.cc @@ -47,8 +47,8 @@ class CacheTest { int Lookup(int key) { Cache::Handle* handle = cache_->Lookup(EncodeKey(key)); - const int r = (handle == NULL) ? -1 : DecodeValue(cache_->Value(handle)); - if (handle != NULL) { + const int r = (handle == nullptr) ? -1 : DecodeValue(cache_->Value(handle)); + if (handle != nullptr) { cache_->Release(handle); } return r; diff --git a/util/coding.cc b/util/coding.cc index 21e3186..9e72613 100644 --- a/util/coding.cc +++ b/util/coding.cc @@ -125,14 +125,14 @@ const char* GetVarint32PtrFallback(const char* p, return reinterpret_cast(p); } } - return NULL; + return nullptr; } bool GetVarint32(Slice* input, uint32_t* value) { const char* p = input->data(); const char* limit = p + input->size(); const char* q = GetVarint32Ptr(p, limit, value); - if (q == NULL) { + if (q == nullptr) { return false; } else { *input = Slice(q, limit - q); @@ -154,14 +154,14 @@ const char* GetVarint64Ptr(const char* p, const char* limit, uint64_t* value) { return reinterpret_cast(p); } } - return NULL; + return nullptr; } bool GetVarint64(Slice* input, uint64_t* value) { const char* p = input->data(); const char* limit = p + input->size(); const char* q = GetVarint64Ptr(p, limit, value); - if (q == NULL) { + if (q == nullptr) { return false; } else { *input = Slice(q, limit - q); @@ -173,8 +173,8 @@ const char* GetLengthPrefixedSlice(const char* p, const char* limit, Slice* result) { uint32_t len; p = GetVarint32Ptr(p, limit, &len); - if (p == NULL) return NULL; - if (p + len > limit) return NULL; + if (p == nullptr) return nullptr; + if (p + len > limit) return nullptr; *result = Slice(p, len); return p + len; } diff --git a/util/coding.h b/util/coding.h index 1fb3d66..f0fa2cb 100644 --- a/util/coding.h +++ b/util/coding.h @@ -35,7 +35,7 @@ bool GetLengthPrefixedSlice(Slice* input, Slice* result); // Pointer-based variants of GetVarint... These either store a value // in *v and return a pointer just past the parsed value, or return -// NULL on error. These routines only look at bytes in the range +// nullptr on error. These routines only look at bytes in the range // [p..limit-1] const char* GetVarint32Ptr(const char* p, const char* limit, uint32_t* v); const char* GetVarint64Ptr(const char* p, const char* limit, uint64_t* v); diff --git a/util/coding_test.cc b/util/coding_test.cc index 22f6adc..d315e19 100644 --- a/util/coding_test.cc +++ b/util/coding_test.cc @@ -89,7 +89,7 @@ TEST(Coding, Varint32) { uint32_t actual; const char* start = p; p = GetVarint32Ptr(p, limit, &actual); - ASSERT_TRUE(p != NULL); + ASSERT_TRUE(p != nullptr); ASSERT_EQ(expected, actual); ASSERT_EQ(VarintLength(actual), p - start); } @@ -124,19 +124,18 @@ TEST(Coding, Varint64) { uint64_t actual; const char* start = p; p = GetVarint64Ptr(p, limit, &actual); - ASSERT_TRUE(p != NULL); + ASSERT_TRUE(p != nullptr); ASSERT_EQ(values[i], actual); ASSERT_EQ(VarintLength(actual), p - start); } ASSERT_EQ(p, limit); - } TEST(Coding, Varint32Overflow) { uint32_t result; std::string input("\x81\x82\x83\x84\x85\x11"); ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(), &result) - == NULL); + == nullptr); } TEST(Coding, Varint32Truncation) { @@ -145,9 +144,10 @@ TEST(Coding, Varint32Truncation) { PutVarint32(&s, large_value); uint32_t result; for (size_t len = 0; len < s.size() - 1; len++) { - ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == NULL); + ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == nullptr); } - ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + s.size(), &result) != NULL); + ASSERT_TRUE( + GetVarint32Ptr(s.data(), s.data() + s.size(), &result) != nullptr); ASSERT_EQ(large_value, result); } @@ -155,7 +155,7 @@ TEST(Coding, Varint64Overflow) { uint64_t result; std::string input("\x81\x82\x83\x84\x85\x81\x82\x83\x84\x85\x11"); ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(), &result) - == NULL); + == nullptr); } TEST(Coding, Varint64Truncation) { @@ -164,9 +164,10 @@ TEST(Coding, Varint64Truncation) { PutVarint64(&s, large_value); uint64_t result; for (size_t len = 0; len < s.size() - 1; len++) { - ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == NULL); + ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == nullptr); } - ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) != NULL); + ASSERT_TRUE( + GetVarint64Ptr(s.data(), s.data() + s.size(), &result) != nullptr); ASSERT_EQ(large_value, result); } diff --git a/util/env.cc b/util/env.cc index c58a082..40a1363 100644 --- a/util/env.cc +++ b/util/env.cc @@ -29,7 +29,7 @@ FileLock::~FileLock() { } void Log(Logger* info_log, const char* format, ...) { - if (info_log != NULL) { + if (info_log != nullptr) { va_list ap; va_start(ap, format); info_log->Logv(format, ap); diff --git a/util/env_posix.cc b/util/env_posix.cc index 4bfaf6c..e758d5f 100644 --- a/util/env_posix.cc +++ b/util/env_posix.cc @@ -282,7 +282,7 @@ class PosixWritableFile : public WritableFile { const char* sep = strrchr(f, '/'); Slice basename; std::string dir; - if (sep == NULL) { + if (sep == nullptr) { dir = "."; basename = f; } else { @@ -390,7 +390,7 @@ class PosixEnv : public Env { SequentialFile** result) { int fd = open(fname.c_str(), O_RDONLY); if (fd < 0) { - *result = NULL; + *result = nullptr; return PosixError(fname, errno); } else { *result = new PosixSequentialFile(fname, fd); @@ -400,7 +400,7 @@ class PosixEnv : public Env { virtual Status NewRandomAccessFile(const std::string& fname, RandomAccessFile** result) { - *result = NULL; + *result = nullptr; Status s; int fd = open(fname.c_str(), O_RDONLY); if (fd < 0) { @@ -409,7 +409,7 @@ class PosixEnv : public Env { uint64_t size; s = GetFileSize(fname, &size); if (s.ok()) { - void* base = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, 0); + void* base = mmap(nullptr, size, PROT_READ, MAP_SHARED, fd, 0); if (base != MAP_FAILED) { *result = new PosixMmapReadableFile(fname, base, size, &mmap_limit_); } else { @@ -431,7 +431,7 @@ class PosixEnv : public Env { Status s; int fd = open(fname.c_str(), O_TRUNC | O_WRONLY | O_CREAT, 0644); if (fd < 0) { - *result = NULL; + *result = nullptr; s = PosixError(fname, errno); } else { *result = new PosixWritableFile(fname, fd); @@ -444,7 +444,7 @@ class PosixEnv : public Env { Status s; int fd = open(fname.c_str(), O_APPEND | O_WRONLY | O_CREAT, 0644); if (fd < 0) { - *result = NULL; + *result = nullptr; s = PosixError(fname, errno); } else { *result = new PosixWritableFile(fname, fd); @@ -460,11 +460,11 @@ class PosixEnv : public Env { std::vector* result) { result->clear(); DIR* d = opendir(dir.c_str()); - if (d == NULL) { + if (d == nullptr) { return PosixError(dir, errno); } struct dirent* entry; - while ((entry = readdir(d)) != NULL) { + while ((entry = readdir(d)) != nullptr) { result->push_back(entry->d_name); } closedir(d); @@ -516,7 +516,7 @@ class PosixEnv : public Env { } virtual Status LockFile(const std::string& fname, FileLock** lock) { - *lock = NULL; + *lock = nullptr; Status result; int fd = open(fname.c_str(), O_RDWR | O_CREAT, 0644); if (fd < 0) { @@ -576,8 +576,8 @@ class PosixEnv : public Env { virtual Status NewLogger(const std::string& fname, Logger** result) { FILE* f = fopen(fname.c_str(), "w"); - if (f == NULL) { - *result = NULL; + if (f == nullptr) { + *result = nullptr; return PosixError(fname, errno); } else { *result = new PosixLogger(f, &PosixEnv::gettid); @@ -587,7 +587,7 @@ class PosixEnv : public Env { virtual uint64_t NowMicros() { struct timeval tv; - gettimeofday(&tv, NULL); + gettimeofday(&tv, nullptr); return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; } @@ -607,7 +607,7 @@ class PosixEnv : public Env { void BGThread(); static void* BGThreadWrapper(void* arg) { reinterpret_cast(arg)->BGThread(); - return NULL; + return nullptr; } pthread_mutex_t mu_; @@ -657,8 +657,8 @@ PosixEnv::PosixEnv() : started_bgthread_(false), mmap_limit_(MaxMmaps()), fd_limit_(MaxOpenFiles()) { - PthreadCall("mutex_init", pthread_mutex_init(&mu_, NULL)); - PthreadCall("cvar_init", pthread_cond_init(&bgsignal_, NULL)); + PthreadCall("mutex_init", pthread_mutex_init(&mu_, nullptr)); + PthreadCall("cvar_init", pthread_cond_init(&bgsignal_, nullptr)); } void PosixEnv::Schedule(void (*function)(void*), void* arg) { @@ -669,7 +669,7 @@ void PosixEnv::Schedule(void (*function)(void*), void* arg) { started_bgthread_ = true; PthreadCall( "create thread", - pthread_create(&bgthread_, NULL, &PosixEnv::BGThreadWrapper, this)); + pthread_create(&bgthread_, nullptr, &PosixEnv::BGThreadWrapper, this)); } // If the queue is currently empty, the background thread may currently be @@ -713,7 +713,7 @@ static void* StartThreadWrapper(void* arg) { StartThreadState* state = reinterpret_cast(arg); state->user_function(state->arg); delete state; - return NULL; + return nullptr; } void PosixEnv::StartThread(void (*function)(void* arg), void* arg) { @@ -722,7 +722,7 @@ void PosixEnv::StartThread(void (*function)(void* arg), void* arg) { state->user_function = function; state->arg = arg; PthreadCall("start thread", - pthread_create(&t, NULL, &StartThreadWrapper, state)); + pthread_create(&t, nullptr, &StartThreadWrapper, state)); } } // namespace @@ -732,12 +732,12 @@ static Env* default_env; static void InitDefaultEnv() { default_env = new PosixEnv; } void EnvPosixTestHelper::SetReadOnlyFDLimit(int limit) { - assert(default_env == NULL); + assert(default_env == nullptr); open_read_only_file_limit = limit; } void EnvPosixTestHelper::SetReadOnlyMMapLimit(int limit) { - assert(default_env == NULL); + assert(default_env == nullptr); mmap_limit = limit; } diff --git a/util/env_posix_test.cc b/util/env_posix_test.cc index 295f8ae..e28df9a 100644 --- a/util/env_posix_test.cc +++ b/util/env_posix_test.cc @@ -32,7 +32,7 @@ TEST(EnvPosixTest, TestOpenOnRead) { std::string test_file = test_dir + "/open_on_read.txt"; FILE* f = fopen(test_file.c_str(), "w"); - ASSERT_TRUE(f != NULL); + ASSERT_TRUE(f != nullptr); const char kFileData[] = "abcdefghijklmnopqrstuvwxyz"; fputs(kFileData, f); fclose(f); diff --git a/util/env_test.cc b/util/env_test.cc index fd89b4c..070109b 100644 --- a/util/env_test.cc +++ b/util/env_test.cc @@ -77,14 +77,14 @@ TEST(EnvTest, ReadWrite) { } TEST(EnvTest, RunImmediately) { - port::AtomicPointer called (NULL); + port::AtomicPointer called(nullptr); env_->Schedule(&SetBool, &called); env_->SleepForMicroseconds(kDelayMicros); - ASSERT_TRUE(called.NoBarrier_Load() != NULL); + ASSERT_TRUE(called.NoBarrier_Load() != nullptr); } TEST(EnvTest, RunMany) { - port::AtomicPointer last_id (NULL); + port::AtomicPointer last_id(nullptr); struct CB { port::AtomicPointer* last_id_ptr; // Pointer to shared slot diff --git a/util/options.cc b/util/options.cc index b5e6227..351fa39 100644 --- a/util/options.cc +++ b/util/options.cc @@ -15,16 +15,16 @@ Options::Options() error_if_exists(false), paranoid_checks(false), env(Env::Default()), - info_log(NULL), + info_log(nullptr), write_buffer_size(4<<20), max_open_files(1000), - block_cache(NULL), + block_cache(nullptr), block_size(4096), block_restart_interval(16), max_file_size(2<<20), compression(kSnappyCompression), reuse_logs(false), - filter_policy(NULL) { + filter_policy(nullptr) { } } // namespace leveldb diff --git a/util/posix_logger.h b/util/posix_logger.h index 9741b1a..1909e61 100644 --- a/util/posix_logger.h +++ b/util/posix_logger.h @@ -45,7 +45,7 @@ class PosixLogger : public Logger { char* limit = base + bufsize; struct timeval now_tv; - gettimeofday(&now_tv, NULL); + gettimeofday(&now_tv, nullptr); const time_t seconds = now_tv.tv_sec; struct tm t; localtime_r(&seconds, &t); diff --git a/util/status.cc b/util/status.cc index a44f35b..5591381 100644 --- a/util/status.cc +++ b/util/status.cc @@ -34,7 +34,7 @@ Status::Status(Code code, const Slice& msg, const Slice& msg2) { } std::string Status::ToString() const { - if (state_ == NULL) { + if (state_ == nullptr) { return "OK"; } else { char tmp[30]; diff --git a/util/testharness.cc b/util/testharness.cc index 95f025f..37ba410 100644 --- a/util/testharness.cc +++ b/util/testharness.cc @@ -26,7 +26,7 @@ std::vector* tests; } bool RegisterTest(const char* base, const char* name, void (*func)()) { - if (tests == NULL) { + if (tests == nullptr) { tests = new std::vector; } Test t; @@ -41,14 +41,14 @@ int RunAllTests() { const char* matcher = getenv("LEVELDB_TESTS"); int num = 0; - if (tests != NULL) { + if (tests != nullptr) { for (size_t i = 0; i < tests->size(); i++) { const Test& t = (*tests)[i]; - if (matcher != NULL) { + if (matcher != nullptr) { std::string name = t.base; name.push_back('.'); name.append(t.name); - if (strstr(name.c_str(), matcher) == NULL) { + if (strstr(name.c_str(), matcher) == nullptr) { continue; } } @@ -70,7 +70,7 @@ std::string TmpDir() { int RandomSeed() { const char* env = getenv("TEST_RANDOM_SEED"); - int result = (env != NULL ? atoi(env) : 301); + int result = (env != nullptr ? atoi(env) : 301); if (result <= 0) { result = 301; } diff --git a/util/testutil.h b/util/testutil.h index 8726bf7..dc77ac3 100644 --- a/util/testutil.h +++ b/util/testutil.h @@ -40,7 +40,7 @@ class ErrorEnv : public EnvWrapper { WritableFile** result) { if (writable_file_error_) { ++num_writable_file_errors_; - *result = NULL; + *result = nullptr; return Status::IOError(fname, "fake error"); } return target()->NewWritableFile(fname, result); @@ -50,7 +50,7 @@ class ErrorEnv : public EnvWrapper { WritableFile** result) { if (writable_file_error_) { ++num_writable_file_errors_; - *result = NULL; + *result = nullptr; return Status::IOError(fname, "fake error"); } return target()->NewAppendableFile(fname, result);