diff --git a/db/version_set.cc b/db/version_set.cc index 7891dfc..56493ac 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -1347,78 +1347,81 @@ Compaction* VersionSet::PickCompaction() { return c; } -// find the largest key in a vector of files. returns true if files it not empty -bool FindLargestKey(const InternalKeyComparator & icmp, const std::vector & files, InternalKey *largestKey) { +// Finds the largest key in a vector of files. Returns true if files it not +// empty. +bool FindLargestKey(const InternalKeyComparator& icmp, + const std::vector& files, + InternalKey* largest_key) { if (files.empty()) { return false; } - *largestKey = files[0]->largest; + *largest_key = files[0]->largest; for (size_t i = 1; i < files.size(); ++i) { FileMetaData* f = files[i]; - if (icmp.Compare(f->largest, *largestKey) > 0) { - *largestKey = f->largest; + if (icmp.Compare(f->largest, *largest_key) > 0) { + *largest_key = f->largest; } } return true; } -// find minimum file b2=(l2, u2) in level file for which l2 > u1 and user_key(l2) = user_key(u1) -FileMetaData* FindSmallestBoundaryFile(const InternalKeyComparator & icmp, - const std::vector & levelFiles, - const InternalKey & largestKey) { +// Finds minimum file b2=(l2, u2) in level file for which l2 > u1 and +// user_key(l2) = user_key(u1) +FileMetaData* FindSmallestBoundaryFile( + const InternalKeyComparator& icmp, + const std::vector& level_files, + const InternalKey& largest_key) { const Comparator* user_cmp = icmp.user_comparator(); - FileMetaData* smallestBoundaryFile = NULL; - for (size_t i = 0; i < levelFiles.size(); ++i) { - FileMetaData* f = levelFiles[i]; - if (icmp.Compare(f->smallest, largestKey) > 0 && - user_cmp->Compare(f->smallest.user_key(), largestKey.user_key()) == 0) { - if (smallestBoundaryFile == NULL || - icmp.Compare(f->smallest, smallestBoundaryFile->smallest) < 0) { - smallestBoundaryFile = f; + FileMetaData* smallest_boundary_file = nullptr; + for (size_t i = 0; i < level_files.size(); ++i) { + FileMetaData* f = level_files[i]; + if (icmp.Compare(f->smallest, largest_key) > 0 && + user_cmp->Compare(f->smallest.user_key(), largest_key.user_key()) == + 0) { + if (smallest_boundary_file == nullptr || + icmp.Compare(f->smallest, smallest_boundary_file->smallest) < 0) { + smallest_boundary_file = f; } } } - return smallestBoundaryFile; + return smallest_boundary_file; } -// If there are two blocks, b1=(l1, u1) and b2=(l2, u2) and -// user_key(u1) = user_key(l2), and if we compact b1 but not -// b2 then a subsequent get operation will yield an incorrect -// result because it will return the record from b2 in level -// i rather than from b1 because it searches level by level -// for records matching the supplied user key. +// Extracts the largest file b1 from |compaction_files| and then searches for a +// b2 in |level_files| for which user_key(u1) = user_key(l2). If it finds such a +// file b2 (known as a boundary file) it adds it to |compaction_files| and then +// searches again using this new upper bound. // -// This function extracts the largest file b1 from compactionFiles -// and then searches for a b2 in levelFiles for which user_key(u1) = -// user_key(l2). If it finds such a file b2 (known as a boundary file) -// it adds it to compactionFiles and then searches again using this -// new upper bound. +// If there are two blocks, b1=(l1, u1) and b2=(l2, u2) and +// user_key(u1) = user_key(l2), and if we compact b1 but not b2 then a +// subsequent get operation will yield an incorrect result because it will +// return the record from b2 in level i rather than from b1 because it searches +// level by level for records matching the supplied user key. // // parameters: -// in levelFiles: list of files to search for boundary files -// in/out compactionFiles: list of files to extend by adding boundary files +// in level_files: List of files to search for boundary files. +// in/out compaction_files: List of files to extend by adding boundary files. void AddBoundaryInputs(const InternalKeyComparator& icmp, - const std::vector& levelFiles, - std::vector* compactionFiles) { - InternalKey largestKey; + const std::vector& level_files, + std::vector* compaction_files) { + InternalKey largest_key; - // find largestKey in compactionFiles, quick return if compactionFiles is - // empty - if (!FindLargestKey(icmp, *compactionFiles, &largestKey)) { + // Quick return if compaction_files is empty. + if (!FindLargestKey(icmp, *compaction_files, &largest_key)) { return; } - bool continueSearching = true; - while (continueSearching) { - FileMetaData* smallestBoundaryFile = - FindSmallestBoundaryFile(icmp, levelFiles, largestKey); + bool continue_searching = true; + while (continue_searching) { + FileMetaData* smallest_boundary_file = + FindSmallestBoundaryFile(icmp, level_files, largest_key); - // if a boundary file was found advance largestKey, otherwise we're done - if (smallestBoundaryFile != NULL) { - compactionFiles->push_back(smallestBoundaryFile); - largestKey = smallestBoundaryFile->largest; + // If a boundary file was found advance largest_key, otherwise we're done. + if (smallest_boundary_file != NULL) { + compaction_files->push_back(smallest_boundary_file); + largest_key = smallest_boundary_file->largest; } else { - continueSearching = false; + continue_searching = false; } } } diff --git a/db/version_set_test.cc b/db/version_set_test.cc index 090f115..b32e2e5 100644 --- a/db/version_set_test.cc +++ b/db/version_set_test.cc @@ -172,154 +172,160 @@ TEST(FindFileTest, OverlappingFiles) { ASSERT_TRUE(Overlaps("600", "700")); } -void AddBoundaryInputs(const InternalKeyComparator &icmp, - const std::vector &levelFiles, - std::vector *compactionFiles); +void AddBoundaryInputs(const InternalKeyComparator& icmp, + const std::vector& level_files, + std::vector* compaction_files); class AddBoundaryInputsTest { public: - std::vector levelFiles_; - std::vector compactionFiles_; - std::vector allFiles_; + std::vector level_files_; + std::vector compaction_files_; + std::vector all_files_; InternalKeyComparator icmp_; AddBoundaryInputsTest() : icmp_(BytewiseComparator()){}; ~AddBoundaryInputsTest() { - for (size_t i = 0; i < allFiles_.size(); ++i) { - delete allFiles_[i]; + for (size_t i = 0; i < all_files_.size(); ++i) { + delete all_files_[i]; } - allFiles_.clear(); + all_files_.clear(); }; - FileMetaData *CreateFileMetaData(uint64_t number, InternalKey smallest, + FileMetaData* CreateFileMetaData(uint64_t number, InternalKey smallest, InternalKey largest) { - FileMetaData *f = new FileMetaData(); + FileMetaData* f = new FileMetaData(); f->number = number; f->smallest = smallest; f->largest = largest; - allFiles_.push_back(f); + all_files_.push_back(f); return f; } }; TEST(AddBoundaryInputsTest, TestEmptyFileSets) { - AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_); - ASSERT_TRUE(compactionFiles_.empty()); - ASSERT_TRUE(levelFiles_.empty()); + AddBoundaryInputs(icmp_, level_files_, &compaction_files_); + ASSERT_TRUE(compaction_files_.empty()); + ASSERT_TRUE(level_files_.empty()); } TEST(AddBoundaryInputsTest, TestEmptyLevelFiles) { - FileMetaData *f1 = + FileMetaData* f1 = CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), InternalKey(InternalKey("100", 1, kTypeValue))); - compactionFiles_.push_back(f1); + compaction_files_.push_back(f1); - AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_); - ASSERT_EQ(1, compactionFiles_.size()); - ASSERT_EQ(f1, compactionFiles_[0]); - ASSERT_TRUE(levelFiles_.empty()); + AddBoundaryInputs(icmp_, level_files_, &compaction_files_); + ASSERT_EQ(1, compaction_files_.size()); + ASSERT_EQ(f1, compaction_files_[0]); + ASSERT_TRUE(level_files_.empty()); } TEST(AddBoundaryInputsTest, TestEmptyCompactionFiles) { - FileMetaData *f1 = + FileMetaData* f1 = CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), InternalKey(InternalKey("100", 1, kTypeValue))); - levelFiles_.push_back(f1); + level_files_.push_back(f1); - AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_); - ASSERT_TRUE(compactionFiles_.empty()); - ASSERT_EQ(1, levelFiles_.size()); - ASSERT_EQ(f1, levelFiles_[0]); + AddBoundaryInputs(icmp_, level_files_, &compaction_files_); + ASSERT_TRUE(compaction_files_.empty()); + ASSERT_EQ(1, level_files_.size()); + ASSERT_EQ(f1, level_files_[0]); } TEST(AddBoundaryInputsTest, TestNoBoundaryFiles) { - FileMetaData *f1 = + FileMetaData* f1 = CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), InternalKey(InternalKey("100", 1, kTypeValue))); - FileMetaData *f2 = + FileMetaData* f2 = CreateFileMetaData(1, InternalKey("200", 2, kTypeValue), InternalKey(InternalKey("200", 1, kTypeValue))); - FileMetaData *f3 = + FileMetaData* f3 = CreateFileMetaData(1, InternalKey("300", 2, kTypeValue), InternalKey(InternalKey("300", 1, kTypeValue))); - levelFiles_.push_back(f3); - levelFiles_.push_back(f2); - levelFiles_.push_back(f1); - compactionFiles_.push_back(f2); - compactionFiles_.push_back(f3); + level_files_.push_back(f3); + level_files_.push_back(f2); + level_files_.push_back(f1); + compaction_files_.push_back(f2); + compaction_files_.push_back(f3); - AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_); - ASSERT_EQ(2, compactionFiles_.size()); + AddBoundaryInputs(icmp_, level_files_, &compaction_files_); + ASSERT_EQ(2, compaction_files_.size()); } TEST(AddBoundaryInputsTest, TestOneBoundaryFiles) { - FileMetaData *f1 = + FileMetaData* f1 = CreateFileMetaData(1, InternalKey("100", 3, kTypeValue), InternalKey(InternalKey("100", 2, kTypeValue))); - FileMetaData *f2 = + FileMetaData* f2 = CreateFileMetaData(1, InternalKey("100", 1, kTypeValue), InternalKey(InternalKey("200", 3, kTypeValue))); - FileMetaData *f3 = + FileMetaData* f3 = CreateFileMetaData(1, InternalKey("300", 2, kTypeValue), InternalKey(InternalKey("300", 1, kTypeValue))); - levelFiles_.push_back(f3); - levelFiles_.push_back(f2); - levelFiles_.push_back(f1); - compactionFiles_.push_back(f1); + level_files_.push_back(f3); + level_files_.push_back(f2); + level_files_.push_back(f1); + compaction_files_.push_back(f1); - AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_); - ASSERT_EQ(2, compactionFiles_.size()); - ASSERT_EQ(f1, compactionFiles_[0]); - ASSERT_EQ(f2, compactionFiles_[1]); + AddBoundaryInputs(icmp_, level_files_, &compaction_files_); + ASSERT_EQ(2, compaction_files_.size()); + ASSERT_EQ(f1, compaction_files_[0]); + ASSERT_EQ(f2, compaction_files_[1]); } TEST(AddBoundaryInputsTest, TestTwoBoundaryFiles) { - FileMetaData *f1 = + FileMetaData* f1 = CreateFileMetaData(1, InternalKey("100", 6, kTypeValue), InternalKey(InternalKey("100", 5, kTypeValue))); - FileMetaData *f2 = + FileMetaData* f2 = CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), InternalKey(InternalKey("300", 1, kTypeValue))); - FileMetaData *f3 = + FileMetaData* f3 = CreateFileMetaData(1, InternalKey("100", 4, kTypeValue), InternalKey(InternalKey("100", 3, kTypeValue))); - levelFiles_.push_back(f2); - levelFiles_.push_back(f3); - levelFiles_.push_back(f1); - compactionFiles_.push_back(f1); + level_files_.push_back(f2); + level_files_.push_back(f3); + level_files_.push_back(f1); + compaction_files_.push_back(f1); - AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_); - ASSERT_EQ(3, compactionFiles_.size()); - ASSERT_EQ(f1, compactionFiles_[0]); - ASSERT_EQ(f3, compactionFiles_[1]); - ASSERT_EQ(f2, compactionFiles_[2]); + AddBoundaryInputs(icmp_, level_files_, &compaction_files_); + ASSERT_EQ(3, compaction_files_.size()); + ASSERT_EQ(f1, compaction_files_[0]); + ASSERT_EQ(f3, compaction_files_[1]); + ASSERT_EQ(f2, compaction_files_[2]); } TEST(AddBoundaryInputsTest, TestDisjoinFilePointers) { - FileMetaData *f1 = CreateFileMetaData(1, InternalKey("100", 6, kTypeValue), InternalKey(InternalKey("100", 5, kTypeValue))); - FileMetaData *f2 = CreateFileMetaData(1, InternalKey("100", 6, kTypeValue), InternalKey(InternalKey("100", 5, kTypeValue))); - FileMetaData *f3 = CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), InternalKey(InternalKey("300", 1, kTypeValue))); - FileMetaData *f4 = CreateFileMetaData(1, InternalKey("100", 4, kTypeValue), InternalKey(InternalKey("100", 3, kTypeValue))); - - levelFiles_.push_back(f2); - levelFiles_.push_back(f3); - levelFiles_.push_back(f4); - - compactionFiles_.push_back(f1); - - AddBoundaryInputs(icmp_, levelFiles_, &compactionFiles_); - ASSERT_EQ(3, compactionFiles_.size()); - ASSERT_EQ(f1, compactionFiles_[0]); - ASSERT_EQ(f4, compactionFiles_[1]); - ASSERT_EQ(f3, compactionFiles_[2]); -} + FileMetaData* f1 = + CreateFileMetaData(1, InternalKey("100", 6, kTypeValue), + InternalKey(InternalKey("100", 5, kTypeValue))); + FileMetaData* f2 = + CreateFileMetaData(1, InternalKey("100", 6, kTypeValue), + InternalKey(InternalKey("100", 5, kTypeValue))); + FileMetaData* f3 = + CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), + InternalKey(InternalKey("300", 1, kTypeValue))); + FileMetaData* f4 = + CreateFileMetaData(1, InternalKey("100", 4, kTypeValue), + InternalKey(InternalKey("100", 3, kTypeValue))); -} // namespace leveldb + level_files_.push_back(f2); + level_files_.push_back(f3); + level_files_.push_back(f4); -int main(int argc, char** argv) { - return leveldb::test::RunAllTests(); + compaction_files_.push_back(f1); + + AddBoundaryInputs(icmp_, level_files_, &compaction_files_); + ASSERT_EQ(3, compaction_files_.size()); + ASSERT_EQ(f1, compaction_files_[0]); + ASSERT_EQ(f4, compaction_files_[1]); + ASSERT_EQ(f3, compaction_files_[2]); } + +} // namespace leveldb + +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } diff --git a/issues/issue320_test.cc b/issues/issue320_test.cc index a7c37b1..28ef1b8 100644 --- a/issues/issue320_test.cc +++ b/issues/issue320_test.cc @@ -1,47 +1,43 @@ // Copyright (c) 2019 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include +#include #include -#include #include -#include #include #include -#include - #include "leveldb/db.h" #include "leveldb/write_batch.h" #include "util/testharness.h" -using namespace std; - namespace leveldb { namespace { -unsigned int random(unsigned int max) { - return std::rand() % max; -} +// Creates a random number in the range of [0, max). +int GenerateRandomNumber(int max) { return std::rand() % max; } -string newString(int32_t index) { - const unsigned int len = 1024; +std::string CreateRandomString(int32_t index) { + static const size_t len = 1024; char bytes[len]; - unsigned int i = 0; + size_t i = 0; while (i < 8) { bytes[i] = 'a' + ((index >> (4 * i)) & 0xf); ++i; } while (i < sizeof(bytes)) { - bytes[i] = 'a' + random(26); + bytes[i] = 'a' + GenerateRandomNumber(26); ++i; } - return string(bytes, sizeof(bytes)); + return std::string(bytes, sizeof(bytes)); } } // namespace -class Issue320 { }; +class Issue320 {}; TEST(Issue320, Test) { std::srand(0); @@ -49,8 +45,8 @@ TEST(Issue320, Test) { bool delete_before_put = false; bool keep_snapshots = true; - vector*> test_map(10000, nullptr); - vector snapshots(100, nullptr); + std::vector*> test_map(10000, nullptr); + std::vector snapshots(100, nullptr); DB* db; Options options; @@ -59,11 +55,11 @@ TEST(Issue320, Test) { std::string dbpath = test::TmpDir() + "/leveldb_issue320_test"; ASSERT_OK(DB::Open(options, dbpath, &db)); - unsigned int target_size = 10000; - unsigned int num_items = 0; - unsigned long count = 0; - string key; - string value, old_value; + uint32_t target_size = 10000; + uint32_t num_items = 0; + uint32_t count = 0; + std::string key; + std::string value, old_value; WriteOptions writeOptions; ReadOptions readOptions; @@ -72,13 +68,13 @@ TEST(Issue320, Test) { cout << "count: " << count << endl; } - unsigned int index = random(test_map.size()); + int index = GenerateRandomNumber(test_map.size()); WriteBatch batch; if (test_map[index] == nullptr) { num_items++; - test_map[index] = - new pair(newString(index), newString(index)); + test_map[index] = new std::pair( + CreateRandomString(index), CreateRandomString(index)); batch.Put(test_map[index]->first, test_map[index]->second); } else { ASSERT_OK(db->Get(readOptions, test_map[index]->first, &old_value)); @@ -92,13 +88,13 @@ TEST(Issue320, Test) { ASSERT_EQ(old_value, test_map[index]->second); } - if (num_items >= target_size && random(100) > 30) { + if (num_items >= target_size && GenerateRandomNumber(100) > 30) { batch.Delete(test_map[index]->first); delete test_map[index]; test_map[index] = nullptr; --num_items; } else { - test_map[index]->second = newString(index); + test_map[index]->second = CreateRandomString(index); if (delete_before_put) batch.Delete(test_map[index]->first); batch.Put(test_map[index]->first, test_map[index]->second); } @@ -106,8 +102,8 @@ TEST(Issue320, Test) { ASSERT_OK(db->Write(writeOptions, &batch)); - if (keep_snapshots && random(10) == 0) { - unsigned int i = random(snapshots.size()); + if (keep_snapshots && GenerateRandomNumber(10) == 0) { + int i = GenerateRandomNumber(snapshots.size()); if (snapshots[i] != nullptr) { db->ReleaseSnapshot(snapshots[i]); } @@ -134,6 +130,4 @@ TEST(Issue320, Test) { } // namespace leveldb -int main(int argc, char** argv) { - return leveldb::test::RunAllTests(); -} +int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }