作者: 谢瑞阳 10225101483 徐翔宇 10225101535
Nelze vybrat více než 25 témat Téma musí začínat písmenem nebo číslem, může obsahovat pomlčky („-“) a může být dlouhé až 35 znaků.

1386 řádky
43 KiB

  1. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  4. #include "db/version_set.h"
  5. #include <algorithm>
  6. #include <stdio.h>
  7. #include "db/filename.h"
  8. #include "db/log_reader.h"
  9. #include "db/log_writer.h"
  10. #include "db/memtable.h"
  11. #include "db/table_cache.h"
  12. #include "leveldb/env.h"
  13. #include "leveldb/table_builder.h"
  14. #include "table/merger.h"
  15. #include "table/two_level_iterator.h"
  16. #include "util/coding.h"
  17. #include "util/logging.h"
  18. namespace leveldb {
  19. static const int kTargetFileSize = 2 * 1048576;
  20. // Maximum bytes of overlaps in grandparent (i.e., level+2) before we
  21. // stop building a single file in a level->level+1 compaction.
  22. static const int64_t kMaxGrandParentOverlapBytes = 10 * kTargetFileSize;
  23. static double MaxBytesForLevel(int level) {
  24. // Note: the result for level zero is not really used since we set
  25. // the level-0 compaction threshold based on number of files.
  26. double result = 10 * 1048576.0; // Result for both level-0 and level-1
  27. while (level > 1) {
  28. result *= 10;
  29. level--;
  30. }
  31. return result;
  32. }
  33. static uint64_t MaxFileSizeForLevel(int level) {
  34. return kTargetFileSize; // We could vary per level to reduce number of files?
  35. }
  36. static int64_t TotalFileSize(const std::vector<FileMetaData*>& files) {
  37. int64_t sum = 0;
  38. for (size_t i = 0; i < files.size(); i++) {
  39. sum += files[i]->file_size;
  40. }
  41. return sum;
  42. }
  43. namespace {
  44. std::string IntSetToString(const std::set<uint64_t>& s) {
  45. std::string result = "{";
  46. for (std::set<uint64_t>::const_iterator it = s.begin();
  47. it != s.end();
  48. ++it) {
  49. result += (result.size() > 1) ? "," : "";
  50. result += NumberToString(*it);
  51. }
  52. result += "}";
  53. return result;
  54. }
  55. } // namespace
  56. Version::~Version() {
  57. assert(refs_ == 0);
  58. // Remove from linked list
  59. prev_->next_ = next_;
  60. next_->prev_ = prev_;
  61. // Drop references to files
  62. for (int level = 0; level < config::kNumLevels; level++) {
  63. for (size_t i = 0; i < files_[level].size(); i++) {
  64. FileMetaData* f = files_[level][i];
  65. assert(f->refs > 0);
  66. f->refs--;
  67. if (f->refs <= 0) {
  68. delete f;
  69. }
  70. }
  71. }
  72. }
  73. int FindFile(const InternalKeyComparator& icmp,
  74. const std::vector<FileMetaData*>& files,
  75. const Slice& key) {
  76. uint32_t left = 0;
  77. uint32_t right = files.size();
  78. while (left < right) {
  79. uint32_t mid = (left + right) / 2;
  80. const FileMetaData* f = files[mid];
  81. if (icmp.InternalKeyComparator::Compare(f->largest.Encode(), key) < 0) {
  82. // Key at "mid.largest" is < "target". Therefore all
  83. // files at or before "mid" are uninteresting.
  84. left = mid + 1;
  85. } else {
  86. // Key at "mid.largest" is >= "target". Therefore all files
  87. // after "mid" are uninteresting.
  88. right = mid;
  89. }
  90. }
  91. return right;
  92. }
  93. static bool AfterFile(const Comparator* ucmp,
  94. const Slice* user_key, const FileMetaData* f) {
  95. // NULL user_key occurs before all keys and is therefore never after *f
  96. return (user_key != NULL &&
  97. ucmp->Compare(*user_key, f->largest.user_key()) > 0);
  98. }
  99. static bool BeforeFile(const Comparator* ucmp,
  100. const Slice* user_key, const FileMetaData* f) {
  101. // NULL user_key occurs after all keys and is therefore never before *f
  102. return (user_key != NULL &&
  103. ucmp->Compare(*user_key, f->smallest.user_key()) < 0);
  104. }
  105. bool SomeFileOverlapsRange(
  106. const InternalKeyComparator& icmp,
  107. bool disjoint_sorted_files,
  108. const std::vector<FileMetaData*>& files,
  109. const Slice* smallest_user_key,
  110. const Slice* largest_user_key) {
  111. const Comparator* ucmp = icmp.user_comparator();
  112. if (!disjoint_sorted_files) {
  113. // Need to check against all files
  114. for (int i = 0; i < files.size(); i++) {
  115. const FileMetaData* f = files[i];
  116. if (AfterFile(ucmp, smallest_user_key, f) ||
  117. BeforeFile(ucmp, largest_user_key, f)) {
  118. // No overlap
  119. } else {
  120. return true; // Overlap
  121. }
  122. }
  123. return false;
  124. }
  125. // Binary search over file list
  126. uint32_t index = 0;
  127. if (smallest_user_key != NULL) {
  128. // Find the earliest possible internal key for smallest_user_key
  129. InternalKey small(*smallest_user_key, kMaxSequenceNumber,kValueTypeForSeek);
  130. index = FindFile(icmp, files, small.Encode());
  131. }
  132. if (index >= files.size()) {
  133. // beginning of range is after all files, so no overlap.
  134. return false;
  135. }
  136. return !BeforeFile(ucmp, largest_user_key, files[index]);
  137. }
  138. // An internal iterator. For a given version/level pair, yields
  139. // information about the files in the level. For a given entry, key()
  140. // is the largest key that occurs in the file, and value() is an
  141. // 16-byte value containing the file number and file size, both
  142. // encoded using EncodeFixed64.
  143. class Version::LevelFileNumIterator : public Iterator {
  144. public:
  145. LevelFileNumIterator(const InternalKeyComparator& icmp,
  146. const std::vector<FileMetaData*>* flist)
  147. : icmp_(icmp),
  148. flist_(flist),
  149. index_(flist->size()) { // Marks as invalid
  150. }
  151. virtual bool Valid() const {
  152. return index_ < flist_->size();
  153. }
  154. virtual void Seek(const Slice& target) {
  155. index_ = FindFile(icmp_, *flist_, target);
  156. }
  157. virtual void SeekToFirst() { index_ = 0; }
  158. virtual void SeekToLast() {
  159. index_ = flist_->empty() ? 0 : flist_->size() - 1;
  160. }
  161. virtual void Next() {
  162. assert(Valid());
  163. index_++;
  164. }
  165. virtual void Prev() {
  166. assert(Valid());
  167. if (index_ == 0) {
  168. index_ = flist_->size(); // Marks as invalid
  169. } else {
  170. index_--;
  171. }
  172. }
  173. Slice key() const {
  174. assert(Valid());
  175. return (*flist_)[index_]->largest.Encode();
  176. }
  177. Slice value() const {
  178. assert(Valid());
  179. EncodeFixed64(value_buf_, (*flist_)[index_]->number);
  180. EncodeFixed64(value_buf_+8, (*flist_)[index_]->file_size);
  181. return Slice(value_buf_, sizeof(value_buf_));
  182. }
  183. virtual Status status() const { return Status::OK(); }
  184. private:
  185. const InternalKeyComparator icmp_;
  186. const std::vector<FileMetaData*>* const flist_;
  187. uint32_t index_;
  188. // Backing store for value(). Holds the file number and size.
  189. mutable char value_buf_[16];
  190. };
  191. static Iterator* GetFileIterator(void* arg,
  192. const ReadOptions& options,
  193. const Slice& file_value) {
  194. TableCache* cache = reinterpret_cast<TableCache*>(arg);
  195. if (file_value.size() != 16) {
  196. return NewErrorIterator(
  197. Status::Corruption("FileReader invoked with unexpected value"));
  198. } else {
  199. return cache->NewIterator(options,
  200. DecodeFixed64(file_value.data()),
  201. DecodeFixed64(file_value.data() + 8));
  202. }
  203. }
  204. Iterator* Version::NewConcatenatingIterator(const ReadOptions& options,
  205. int level) const {
  206. return NewTwoLevelIterator(
  207. new LevelFileNumIterator(vset_->icmp_, &files_[level]),
  208. &GetFileIterator, vset_->table_cache_, options);
  209. }
  210. void Version::AddIterators(const ReadOptions& options,
  211. std::vector<Iterator*>* iters) {
  212. // Merge all level zero files together since they may overlap
  213. for (size_t i = 0; i < files_[0].size(); i++) {
  214. iters->push_back(
  215. vset_->table_cache_->NewIterator(
  216. options, files_[0][i]->number, files_[0][i]->file_size));
  217. }
  218. // For levels > 0, we can use a concatenating iterator that sequentially
  219. // walks through the non-overlapping files in the level, opening them
  220. // lazily.
  221. for (int level = 1; level < config::kNumLevels; level++) {
  222. if (!files_[level].empty()) {
  223. iters->push_back(NewConcatenatingIterator(options, level));
  224. }
  225. }
  226. }
  227. // If "*iter" points at a value or deletion for user_key, store
  228. // either the value, or a NotFound error and return true.
  229. // Else return false.
  230. static bool GetValue(const Comparator* cmp,
  231. Iterator* iter, const Slice& user_key,
  232. std::string* value,
  233. Status* s) {
  234. if (!iter->Valid()) {
  235. return false;
  236. }
  237. ParsedInternalKey parsed_key;
  238. if (!ParseInternalKey(iter->key(), &parsed_key)) {
  239. *s = Status::Corruption("corrupted key for ", user_key);
  240. return true;
  241. }
  242. if (cmp->Compare(parsed_key.user_key, user_key) != 0) {
  243. return false;
  244. }
  245. switch (parsed_key.type) {
  246. case kTypeDeletion:
  247. *s = Status::NotFound(Slice()); // Use an empty error message for speed
  248. break;
  249. case kTypeValue: {
  250. Slice v = iter->value();
  251. value->assign(v.data(), v.size());
  252. break;
  253. }
  254. }
  255. return true;
  256. }
  257. static bool NewestFirst(FileMetaData* a, FileMetaData* b) {
  258. return a->number > b->number;
  259. }
  260. Status Version::Get(const ReadOptions& options,
  261. const LookupKey& k,
  262. std::string* value,
  263. GetStats* stats) {
  264. Slice ikey = k.internal_key();
  265. Slice user_key = k.user_key();
  266. const Comparator* ucmp = vset_->icmp_.user_comparator();
  267. Status s;
  268. stats->seek_file = NULL;
  269. stats->seek_file_level = -1;
  270. FileMetaData* last_file_read = NULL;
  271. int last_file_read_level = -1;
  272. // We can search level-by-level since entries never hop across
  273. // levels. Therefore we are guaranteed that if we find data
  274. // in an smaller level, later levels are irrelevant.
  275. std::vector<FileMetaData*> tmp;
  276. FileMetaData* tmp2;
  277. for (int level = 0; level < config::kNumLevels; level++) {
  278. size_t num_files = files_[level].size();
  279. if (num_files == 0) continue;
  280. // Get the list of files to search in this level
  281. FileMetaData* const* files = &files_[level][0];
  282. if (level == 0) {
  283. // Level-0 files may overlap each other. Find all files that
  284. // overlap user_key and process them in order from newest to oldest.
  285. tmp.reserve(num_files);
  286. for (uint32_t i = 0; i < num_files; i++) {
  287. FileMetaData* f = files[i];
  288. if (ucmp->Compare(user_key, f->smallest.user_key()) >= 0 &&
  289. ucmp->Compare(user_key, f->largest.user_key()) <= 0) {
  290. tmp.push_back(f);
  291. }
  292. }
  293. if (tmp.empty()) continue;
  294. std::sort(tmp.begin(), tmp.end(), NewestFirst);
  295. files = &tmp[0];
  296. num_files = tmp.size();
  297. } else {
  298. // Binary search to find earliest index whose largest key >= ikey.
  299. uint32_t index = FindFile(vset_->icmp_, files_[level], ikey);
  300. if (index >= num_files) {
  301. files = NULL;
  302. num_files = 0;
  303. } else {
  304. tmp2 = files[index];
  305. if (ucmp->Compare(user_key, tmp2->smallest.user_key()) < 0) {
  306. // All of "tmp2" is past any data for user_key
  307. files = NULL;
  308. num_files = 0;
  309. } else {
  310. files = &tmp2;
  311. num_files = 1;
  312. }
  313. }
  314. }
  315. for (uint32_t i = 0; i < num_files; ++i) {
  316. if (last_file_read != NULL && stats->seek_file == NULL) {
  317. // We have had more than one seek for this read. Charge the 1st file.
  318. stats->seek_file = last_file_read;
  319. stats->seek_file_level = last_file_read_level;
  320. }
  321. FileMetaData* f = files[i];
  322. last_file_read = f;
  323. last_file_read_level = level;
  324. Iterator* iter = vset_->table_cache_->NewIterator(
  325. options,
  326. f->number,
  327. f->file_size);
  328. iter->Seek(ikey);
  329. const bool done = GetValue(ucmp, iter, user_key, value, &s);
  330. if (!iter->status().ok()) {
  331. s = iter->status();
  332. delete iter;
  333. return s;
  334. } else {
  335. delete iter;
  336. if (done) {
  337. return s;
  338. }
  339. }
  340. }
  341. }
  342. return Status::NotFound(Slice()); // Use an empty error message for speed
  343. }
  344. bool Version::UpdateStats(const GetStats& stats) {
  345. FileMetaData* f = stats.seek_file;
  346. if (f != NULL) {
  347. f->allowed_seeks--;
  348. if (f->allowed_seeks <= 0 && file_to_compact_ == NULL) {
  349. file_to_compact_ = f;
  350. file_to_compact_level_ = stats.seek_file_level;
  351. return true;
  352. }
  353. }
  354. return false;
  355. }
  356. void Version::Ref() {
  357. ++refs_;
  358. }
  359. void Version::Unref() {
  360. assert(this != &vset_->dummy_versions_);
  361. assert(refs_ >= 1);
  362. --refs_;
  363. if (refs_ == 0) {
  364. delete this;
  365. }
  366. }
  367. bool Version::OverlapInLevel(int level,
  368. const Slice* smallest_user_key,
  369. const Slice* largest_user_key) {
  370. return SomeFileOverlapsRange(vset_->icmp_, (level > 0), files_[level],
  371. smallest_user_key, largest_user_key);
  372. }
  373. int Version::PickLevelForMemTableOutput(
  374. const Slice& smallest_user_key,
  375. const Slice& largest_user_key) {
  376. int level = 0;
  377. if (!OverlapInLevel(0, &smallest_user_key, &largest_user_key)) {
  378. // Push to next level if there is no overlap in next level,
  379. // and the #bytes overlapping in the level after that are limited.
  380. InternalKey start(smallest_user_key, kMaxSequenceNumber, kValueTypeForSeek);
  381. InternalKey limit(largest_user_key, 0, static_cast<ValueType>(0));
  382. std::vector<FileMetaData*> overlaps;
  383. while (level < config::kMaxMemCompactLevel) {
  384. if (OverlapInLevel(level + 1, &smallest_user_key, &largest_user_key)) {
  385. break;
  386. }
  387. GetOverlappingInputs(level + 2, &start, &limit, &overlaps);
  388. const int64_t sum = TotalFileSize(overlaps);
  389. if (sum > kMaxGrandParentOverlapBytes) {
  390. break;
  391. }
  392. level++;
  393. }
  394. }
  395. return level;
  396. }
  397. // Store in "*inputs" all files in "level" that overlap [begin,end]
  398. void Version::GetOverlappingInputs(
  399. int level,
  400. const InternalKey* begin,
  401. const InternalKey* end,
  402. std::vector<FileMetaData*>* inputs) {
  403. inputs->clear();
  404. Slice user_begin, user_end;
  405. if (begin != NULL) {
  406. user_begin = begin->user_key();
  407. }
  408. if (end != NULL) {
  409. user_end = end->user_key();
  410. }
  411. const Comparator* user_cmp = vset_->icmp_.user_comparator();
  412. for (size_t i = 0; i < files_[level].size(); ) {
  413. FileMetaData* f = files_[level][i++];
  414. const Slice file_start = f->smallest.user_key();
  415. const Slice file_limit = f->largest.user_key();
  416. if (begin != NULL && user_cmp->Compare(file_limit, user_begin) < 0) {
  417. // "f" is completely before specified range; skip it
  418. } else if (end != NULL && user_cmp->Compare(file_start, user_end) > 0) {
  419. // "f" is completely after specified range; skip it
  420. } else {
  421. inputs->push_back(f);
  422. if (level == 0) {
  423. // Level-0 files may overlap each other. So check if the newly
  424. // added file has expanded the range. If so, restart search.
  425. if (begin != NULL && user_cmp->Compare(file_start, user_begin) < 0) {
  426. user_begin = file_start;
  427. inputs->clear();
  428. i = 0;
  429. } else if (end != NULL && user_cmp->Compare(file_limit, user_end) > 0) {
  430. user_end = file_limit;
  431. inputs->clear();
  432. i = 0;
  433. }
  434. }
  435. }
  436. }
  437. }
  438. std::string Version::DebugString() const {
  439. std::string r;
  440. for (int level = 0; level < config::kNumLevels; level++) {
  441. // E.g.,
  442. // --- level 1 ---
  443. // 17:123['a' .. 'd']
  444. // 20:43['e' .. 'g']
  445. r.append("--- level ");
  446. AppendNumberTo(&r, level);
  447. r.append(" ---\n");
  448. const std::vector<FileMetaData*>& files = files_[level];
  449. for (size_t i = 0; i < files.size(); i++) {
  450. r.push_back(' ');
  451. AppendNumberTo(&r, files[i]->number);
  452. r.push_back(':');
  453. AppendNumberTo(&r, files[i]->file_size);
  454. r.append("[");
  455. r.append(files[i]->smallest.DebugString());
  456. r.append(" .. ");
  457. r.append(files[i]->largest.DebugString());
  458. r.append("]\n");
  459. }
  460. }
  461. return r;
  462. }
  463. // A helper class so we can efficiently apply a whole sequence
  464. // of edits to a particular state without creating intermediate
  465. // Versions that contain full copies of the intermediate state.
  466. class VersionSet::Builder {
  467. private:
  468. // Helper to sort by v->files_[file_number].smallest
  469. struct BySmallestKey {
  470. const InternalKeyComparator* internal_comparator;
  471. bool operator()(FileMetaData* f1, FileMetaData* f2) const {
  472. int r = internal_comparator->Compare(f1->smallest, f2->smallest);
  473. if (r != 0) {
  474. return (r < 0);
  475. } else {
  476. // Break ties by file number
  477. return (f1->number < f2->number);
  478. }
  479. }
  480. };
  481. typedef std::set<FileMetaData*, BySmallestKey> FileSet;
  482. struct LevelState {
  483. std::set<uint64_t> deleted_files;
  484. FileSet* added_files;
  485. };
  486. VersionSet* vset_;
  487. Version* base_;
  488. LevelState levels_[config::kNumLevels];
  489. public:
  490. // Initialize a builder with the files from *base and other info from *vset
  491. Builder(VersionSet* vset, Version* base)
  492. : vset_(vset),
  493. base_(base) {
  494. base_->Ref();
  495. BySmallestKey cmp;
  496. cmp.internal_comparator = &vset_->icmp_;
  497. for (int level = 0; level < config::kNumLevels; level++) {
  498. levels_[level].added_files = new FileSet(cmp);
  499. }
  500. }
  501. ~Builder() {
  502. for (int level = 0; level < config::kNumLevels; level++) {
  503. const FileSet* added = levels_[level].added_files;
  504. std::vector<FileMetaData*> to_unref;
  505. to_unref.reserve(added->size());
  506. for (FileSet::const_iterator it = added->begin();
  507. it != added->end(); ++it) {
  508. to_unref.push_back(*it);
  509. }
  510. delete added;
  511. for (uint32_t i = 0; i < to_unref.size(); i++) {
  512. FileMetaData* f = to_unref[i];
  513. f->refs--;
  514. if (f->refs <= 0) {
  515. delete f;
  516. }
  517. }
  518. }
  519. base_->Unref();
  520. }
  521. // Apply all of the edits in *edit to the current state.
  522. void Apply(VersionEdit* edit) {
  523. // Update compaction pointers
  524. for (size_t i = 0; i < edit->compact_pointers_.size(); i++) {
  525. const int level = edit->compact_pointers_[i].first;
  526. vset_->compact_pointer_[level] =
  527. edit->compact_pointers_[i].second.Encode().ToString();
  528. }
  529. // Delete files
  530. const VersionEdit::DeletedFileSet& del = edit->deleted_files_;
  531. for (VersionEdit::DeletedFileSet::const_iterator iter = del.begin();
  532. iter != del.end();
  533. ++iter) {
  534. const int level = iter->first;
  535. const uint64_t number = iter->second;
  536. levels_[level].deleted_files.insert(number);
  537. }
  538. // Add new files
  539. for (size_t i = 0; i < edit->new_files_.size(); i++) {
  540. const int level = edit->new_files_[i].first;
  541. FileMetaData* f = new FileMetaData(edit->new_files_[i].second);
  542. f->refs = 1;
  543. // We arrange to automatically compact this file after
  544. // a certain number of seeks. Let's assume:
  545. // (1) One seek costs 10ms
  546. // (2) Writing or reading 1MB costs 10ms (100MB/s)
  547. // (3) A compaction of 1MB does 25MB of IO:
  548. // 1MB read from this level
  549. // 10-12MB read from next level (boundaries may be misaligned)
  550. // 10-12MB written to next level
  551. // This implies that 25 seeks cost the same as the compaction
  552. // of 1MB of data. I.e., one seek costs approximately the
  553. // same as the compaction of 40KB of data. We are a little
  554. // conservative and allow approximately one seek for every 16KB
  555. // of data before triggering a compaction.
  556. f->allowed_seeks = (f->file_size / 16384);
  557. if (f->allowed_seeks < 100) f->allowed_seeks = 100;
  558. levels_[level].deleted_files.erase(f->number);
  559. levels_[level].added_files->insert(f);
  560. }
  561. }
  562. // Save the current state in *v.
  563. void SaveTo(Version* v) {
  564. BySmallestKey cmp;
  565. cmp.internal_comparator = &vset_->icmp_;
  566. for (int level = 0; level < config::kNumLevels; level++) {
  567. // Merge the set of added files with the set of pre-existing files.
  568. // Drop any deleted files. Store the result in *v.
  569. const std::vector<FileMetaData*>& base_files = base_->files_[level];
  570. std::vector<FileMetaData*>::const_iterator base_iter = base_files.begin();
  571. std::vector<FileMetaData*>::const_iterator base_end = base_files.end();
  572. const FileSet* added = levels_[level].added_files;
  573. v->files_[level].reserve(base_files.size() + added->size());
  574. for (FileSet::const_iterator added_iter = added->begin();
  575. added_iter != added->end();
  576. ++added_iter) {
  577. // Add all smaller files listed in base_
  578. for (std::vector<FileMetaData*>::const_iterator bpos
  579. = std::upper_bound(base_iter, base_end, *added_iter, cmp);
  580. base_iter != bpos;
  581. ++base_iter) {
  582. MaybeAddFile(v, level, *base_iter);
  583. }
  584. MaybeAddFile(v, level, *added_iter);
  585. }
  586. // Add remaining base files
  587. for (; base_iter != base_end; ++base_iter) {
  588. MaybeAddFile(v, level, *base_iter);
  589. }
  590. #ifndef NDEBUG
  591. // Make sure there is no overlap in levels > 0
  592. if (level > 0) {
  593. for (uint32_t i = 1; i < v->files_[level].size(); i++) {
  594. const InternalKey& prev_end = v->files_[level][i-1]->largest;
  595. const InternalKey& this_begin = v->files_[level][i]->smallest;
  596. if (vset_->icmp_.Compare(prev_end, this_begin) >= 0) {
  597. fprintf(stderr, "overlapping ranges in same level %s vs. %s\n",
  598. prev_end.DebugString().c_str(),
  599. this_begin.DebugString().c_str());
  600. abort();
  601. }
  602. }
  603. }
  604. #endif
  605. }
  606. }
  607. void MaybeAddFile(Version* v, int level, FileMetaData* f) {
  608. if (levels_[level].deleted_files.count(f->number) > 0) {
  609. // File is deleted: do nothing
  610. } else {
  611. std::vector<FileMetaData*>* files = &v->files_[level];
  612. if (level > 0 && !files->empty()) {
  613. // Must not overlap
  614. assert(vset_->icmp_.Compare((*files)[files->size()-1]->largest,
  615. f->smallest) < 0);
  616. }
  617. f->refs++;
  618. files->push_back(f);
  619. }
  620. }
  621. };
  622. VersionSet::VersionSet(const std::string& dbname,
  623. const Options* options,
  624. TableCache* table_cache,
  625. const InternalKeyComparator* cmp)
  626. : env_(options->env),
  627. dbname_(dbname),
  628. options_(options),
  629. table_cache_(table_cache),
  630. icmp_(*cmp),
  631. next_file_number_(2),
  632. manifest_file_number_(0), // Filled by Recover()
  633. last_sequence_(0),
  634. log_number_(0),
  635. prev_log_number_(0),
  636. descriptor_file_(NULL),
  637. descriptor_log_(NULL),
  638. dummy_versions_(this),
  639. current_(NULL) {
  640. AppendVersion(new Version(this));
  641. }
  642. VersionSet::~VersionSet() {
  643. current_->Unref();
  644. assert(dummy_versions_.next_ == &dummy_versions_); // List must be empty
  645. delete descriptor_log_;
  646. delete descriptor_file_;
  647. }
  648. void VersionSet::AppendVersion(Version* v) {
  649. // Make "v" current
  650. assert(v->refs_ == 0);
  651. assert(v != current_);
  652. if (current_ != NULL) {
  653. current_->Unref();
  654. }
  655. current_ = v;
  656. v->Ref();
  657. // Append to linked list
  658. v->prev_ = dummy_versions_.prev_;
  659. v->next_ = &dummy_versions_;
  660. v->prev_->next_ = v;
  661. v->next_->prev_ = v;
  662. }
  663. Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) {
  664. if (edit->has_log_number_) {
  665. assert(edit->log_number_ >= log_number_);
  666. assert(edit->log_number_ < next_file_number_);
  667. } else {
  668. edit->SetLogNumber(log_number_);
  669. }
  670. if (!edit->has_prev_log_number_) {
  671. edit->SetPrevLogNumber(prev_log_number_);
  672. }
  673. edit->SetNextFile(next_file_number_);
  674. edit->SetLastSequence(last_sequence_);
  675. Version* v = new Version(this);
  676. {
  677. Builder builder(this, current_);
  678. builder.Apply(edit);
  679. builder.SaveTo(v);
  680. }
  681. Finalize(v);
  682. // Initialize new descriptor log file if necessary by creating
  683. // a temporary file that contains a snapshot of the current version.
  684. std::string new_manifest_file;
  685. Status s;
  686. if (descriptor_log_ == NULL) {
  687. // No reason to unlock *mu here since we only hit this path in the
  688. // first call to LogAndApply (when opening the database).
  689. assert(descriptor_file_ == NULL);
  690. new_manifest_file = DescriptorFileName(dbname_, manifest_file_number_);
  691. edit->SetNextFile(next_file_number_);
  692. s = env_->NewWritableFile(new_manifest_file, &descriptor_file_);
  693. if (s.ok()) {
  694. descriptor_log_ = new log::Writer(descriptor_file_);
  695. s = WriteSnapshot(descriptor_log_);
  696. }
  697. }
  698. // Unlock during expensive MANIFEST log write
  699. {
  700. mu->Unlock();
  701. // Write new record to MANIFEST log
  702. if (s.ok()) {
  703. std::string record;
  704. edit->EncodeTo(&record);
  705. s = descriptor_log_->AddRecord(record);
  706. if (s.ok()) {
  707. s = descriptor_file_->Sync();
  708. }
  709. }
  710. // If we just created a new descriptor file, install it by writing a
  711. // new CURRENT file that points to it.
  712. if (s.ok() && !new_manifest_file.empty()) {
  713. s = SetCurrentFile(env_, dbname_, manifest_file_number_);
  714. }
  715. mu->Lock();
  716. }
  717. // Install the new version
  718. if (s.ok()) {
  719. AppendVersion(v);
  720. log_number_ = edit->log_number_;
  721. prev_log_number_ = edit->prev_log_number_;
  722. } else {
  723. delete v;
  724. if (!new_manifest_file.empty()) {
  725. delete descriptor_log_;
  726. delete descriptor_file_;
  727. descriptor_log_ = NULL;
  728. descriptor_file_ = NULL;
  729. env_->DeleteFile(new_manifest_file);
  730. }
  731. }
  732. return s;
  733. }
  734. Status VersionSet::Recover() {
  735. struct LogReporter : public log::Reader::Reporter {
  736. Status* status;
  737. virtual void Corruption(size_t bytes, const Status& s) {
  738. if (this->status->ok()) *this->status = s;
  739. }
  740. };
  741. // Read "CURRENT" file, which contains a pointer to the current manifest file
  742. std::string current;
  743. Status s = ReadFileToString(env_, CurrentFileName(dbname_), &current);
  744. if (!s.ok()) {
  745. return s;
  746. }
  747. if (current.empty() || current[current.size()-1] != '\n') {
  748. return Status::Corruption("CURRENT file does not end with newline");
  749. }
  750. current.resize(current.size() - 1);
  751. std::string dscname = dbname_ + "/" + current;
  752. SequentialFile* file;
  753. s = env_->NewSequentialFile(dscname, &file);
  754. if (!s.ok()) {
  755. return s;
  756. }
  757. bool have_log_number = false;
  758. bool have_prev_log_number = false;
  759. bool have_next_file = false;
  760. bool have_last_sequence = false;
  761. uint64_t next_file = 0;
  762. uint64_t last_sequence = 0;
  763. uint64_t log_number = 0;
  764. uint64_t prev_log_number = 0;
  765. Builder builder(this, current_);
  766. {
  767. LogReporter reporter;
  768. reporter.status = &s;
  769. log::Reader reader(file, &reporter, true/*checksum*/, 0/*initial_offset*/);
  770. Slice record;
  771. std::string scratch;
  772. while (reader.ReadRecord(&record, &scratch) && s.ok()) {
  773. VersionEdit edit;
  774. s = edit.DecodeFrom(record);
  775. if (s.ok()) {
  776. if (edit.has_comparator_ &&
  777. edit.comparator_ != icmp_.user_comparator()->Name()) {
  778. s = Status::InvalidArgument(
  779. edit.comparator_ + "does not match existing comparator ",
  780. icmp_.user_comparator()->Name());
  781. }
  782. }
  783. if (s.ok()) {
  784. builder.Apply(&edit);
  785. }
  786. if (edit.has_log_number_) {
  787. log_number = edit.log_number_;
  788. have_log_number = true;
  789. }
  790. if (edit.has_prev_log_number_) {
  791. prev_log_number = edit.prev_log_number_;
  792. have_prev_log_number = true;
  793. }
  794. if (edit.has_next_file_number_) {
  795. next_file = edit.next_file_number_;
  796. have_next_file = true;
  797. }
  798. if (edit.has_last_sequence_) {
  799. last_sequence = edit.last_sequence_;
  800. have_last_sequence = true;
  801. }
  802. }
  803. }
  804. delete file;
  805. file = NULL;
  806. if (s.ok()) {
  807. if (!have_next_file) {
  808. s = Status::Corruption("no meta-nextfile entry in descriptor");
  809. } else if (!have_log_number) {
  810. s = Status::Corruption("no meta-lognumber entry in descriptor");
  811. } else if (!have_last_sequence) {
  812. s = Status::Corruption("no last-sequence-number entry in descriptor");
  813. }
  814. if (!have_prev_log_number) {
  815. prev_log_number = 0;
  816. }
  817. MarkFileNumberUsed(prev_log_number);
  818. MarkFileNumberUsed(log_number);
  819. }
  820. if (s.ok()) {
  821. Version* v = new Version(this);
  822. builder.SaveTo(v);
  823. // Install recovered version
  824. Finalize(v);
  825. AppendVersion(v);
  826. manifest_file_number_ = next_file;
  827. next_file_number_ = next_file + 1;
  828. last_sequence_ = last_sequence;
  829. log_number_ = log_number;
  830. prev_log_number_ = prev_log_number;
  831. }
  832. return s;
  833. }
  834. void VersionSet::MarkFileNumberUsed(uint64_t number) {
  835. if (next_file_number_ <= number) {
  836. next_file_number_ = number + 1;
  837. }
  838. }
  839. void VersionSet::Finalize(Version* v) {
  840. // Precomputed best level for next compaction
  841. int best_level = -1;
  842. double best_score = -1;
  843. for (int level = 0; level < config::kNumLevels-1; level++) {
  844. double score;
  845. if (level == 0) {
  846. // We treat level-0 specially by bounding the number of files
  847. // instead of number of bytes for two reasons:
  848. //
  849. // (1) With larger write-buffer sizes, it is nice not to do too
  850. // many level-0 compactions.
  851. //
  852. // (2) The files in level-0 are merged on every read and
  853. // therefore we wish to avoid too many files when the individual
  854. // file size is small (perhaps because of a small write-buffer
  855. // setting, or very high compression ratios, or lots of
  856. // overwrites/deletions).
  857. score = v->files_[level].size() /
  858. static_cast<double>(config::kL0_CompactionTrigger);
  859. } else {
  860. // Compute the ratio of current size to size limit.
  861. const uint64_t level_bytes = TotalFileSize(v->files_[level]);
  862. score = static_cast<double>(level_bytes) / MaxBytesForLevel(level);
  863. }
  864. if (score > best_score) {
  865. best_level = level;
  866. best_score = score;
  867. }
  868. }
  869. v->compaction_level_ = best_level;
  870. v->compaction_score_ = best_score;
  871. }
  872. Status VersionSet::WriteSnapshot(log::Writer* log) {
  873. // TODO: Break up into multiple records to reduce memory usage on recovery?
  874. // Save metadata
  875. VersionEdit edit;
  876. edit.SetComparatorName(icmp_.user_comparator()->Name());
  877. // Save compaction pointers
  878. for (int level = 0; level < config::kNumLevels; level++) {
  879. if (!compact_pointer_[level].empty()) {
  880. InternalKey key;
  881. key.DecodeFrom(compact_pointer_[level]);
  882. edit.SetCompactPointer(level, key);
  883. }
  884. }
  885. // Save files
  886. for (int level = 0; level < config::kNumLevels; level++) {
  887. const std::vector<FileMetaData*>& files = current_->files_[level];
  888. for (size_t i = 0; i < files.size(); i++) {
  889. const FileMetaData* f = files[i];
  890. edit.AddFile(level, f->number, f->file_size, f->smallest, f->largest);
  891. }
  892. }
  893. std::string record;
  894. edit.EncodeTo(&record);
  895. return log->AddRecord(record);
  896. }
  897. int VersionSet::NumLevelFiles(int level) const {
  898. assert(level >= 0);
  899. assert(level < config::kNumLevels);
  900. return current_->files_[level].size();
  901. }
  902. const char* VersionSet::LevelSummary(LevelSummaryStorage* scratch) const {
  903. // Update code if kNumLevels changes
  904. assert(config::kNumLevels == 7);
  905. snprintf(scratch->buffer, sizeof(scratch->buffer),
  906. "files[ %d %d %d %d %d %d %d ]",
  907. int(current_->files_[0].size()),
  908. int(current_->files_[1].size()),
  909. int(current_->files_[2].size()),
  910. int(current_->files_[3].size()),
  911. int(current_->files_[4].size()),
  912. int(current_->files_[5].size()),
  913. int(current_->files_[6].size()));
  914. return scratch->buffer;
  915. }
  916. uint64_t VersionSet::ApproximateOffsetOf(Version* v, const InternalKey& ikey) {
  917. uint64_t result = 0;
  918. for (int level = 0; level < config::kNumLevels; level++) {
  919. const std::vector<FileMetaData*>& files = v->files_[level];
  920. for (size_t i = 0; i < files.size(); i++) {
  921. if (icmp_.Compare(files[i]->largest, ikey) <= 0) {
  922. // Entire file is before "ikey", so just add the file size
  923. result += files[i]->file_size;
  924. } else if (icmp_.Compare(files[i]->smallest, ikey) > 0) {
  925. // Entire file is after "ikey", so ignore
  926. if (level > 0) {
  927. // Files other than level 0 are sorted by meta->smallest, so
  928. // no further files in this level will contain data for
  929. // "ikey".
  930. break;
  931. }
  932. } else {
  933. // "ikey" falls in the range for this table. Add the
  934. // approximate offset of "ikey" within the table.
  935. Table* tableptr;
  936. Iterator* iter = table_cache_->NewIterator(
  937. ReadOptions(), files[i]->number, files[i]->file_size, &tableptr);
  938. if (tableptr != NULL) {
  939. result += tableptr->ApproximateOffsetOf(ikey.Encode());
  940. }
  941. delete iter;
  942. }
  943. }
  944. }
  945. return result;
  946. }
  947. void VersionSet::AddLiveFiles(std::set<uint64_t>* live) {
  948. for (Version* v = dummy_versions_.next_;
  949. v != &dummy_versions_;
  950. v = v->next_) {
  951. for (int level = 0; level < config::kNumLevels; level++) {
  952. const std::vector<FileMetaData*>& files = v->files_[level];
  953. for (size_t i = 0; i < files.size(); i++) {
  954. live->insert(files[i]->number);
  955. }
  956. }
  957. }
  958. }
  959. int64_t VersionSet::NumLevelBytes(int level) const {
  960. assert(level >= 0);
  961. assert(level < config::kNumLevels);
  962. return TotalFileSize(current_->files_[level]);
  963. }
  964. int64_t VersionSet::MaxNextLevelOverlappingBytes() {
  965. int64_t result = 0;
  966. std::vector<FileMetaData*> overlaps;
  967. for (int level = 1; level < config::kNumLevels - 1; level++) {
  968. for (size_t i = 0; i < current_->files_[level].size(); i++) {
  969. const FileMetaData* f = current_->files_[level][i];
  970. current_->GetOverlappingInputs(level+1, &f->smallest, &f->largest,
  971. &overlaps);
  972. const int64_t sum = TotalFileSize(overlaps);
  973. if (sum > result) {
  974. result = sum;
  975. }
  976. }
  977. }
  978. return result;
  979. }
  980. // Stores the minimal range that covers all entries in inputs in
  981. // *smallest, *largest.
  982. // REQUIRES: inputs is not empty
  983. void VersionSet::GetRange(const std::vector<FileMetaData*>& inputs,
  984. InternalKey* smallest,
  985. InternalKey* largest) {
  986. assert(!inputs.empty());
  987. smallest->Clear();
  988. largest->Clear();
  989. for (size_t i = 0; i < inputs.size(); i++) {
  990. FileMetaData* f = inputs[i];
  991. if (i == 0) {
  992. *smallest = f->smallest;
  993. *largest = f->largest;
  994. } else {
  995. if (icmp_.Compare(f->smallest, *smallest) < 0) {
  996. *smallest = f->smallest;
  997. }
  998. if (icmp_.Compare(f->largest, *largest) > 0) {
  999. *largest = f->largest;
  1000. }
  1001. }
  1002. }
  1003. }
  1004. // Stores the minimal range that covers all entries in inputs1 and inputs2
  1005. // in *smallest, *largest.
  1006. // REQUIRES: inputs is not empty
  1007. void VersionSet::GetRange2(const std::vector<FileMetaData*>& inputs1,
  1008. const std::vector<FileMetaData*>& inputs2,
  1009. InternalKey* smallest,
  1010. InternalKey* largest) {
  1011. std::vector<FileMetaData*> all = inputs1;
  1012. all.insert(all.end(), inputs2.begin(), inputs2.end());
  1013. GetRange(all, smallest, largest);
  1014. }
  1015. Iterator* VersionSet::MakeInputIterator(Compaction* c) {
  1016. ReadOptions options;
  1017. options.verify_checksums = options_->paranoid_checks;
  1018. options.fill_cache = false;
  1019. // Level-0 files have to be merged together. For other levels,
  1020. // we will make a concatenating iterator per level.
  1021. // TODO(opt): use concatenating iterator for level-0 if there is no overlap
  1022. const int space = (c->level() == 0 ? c->inputs_[0].size() + 1 : 2);
  1023. Iterator** list = new Iterator*[space];
  1024. int num = 0;
  1025. for (int which = 0; which < 2; which++) {
  1026. if (!c->inputs_[which].empty()) {
  1027. if (c->level() + which == 0) {
  1028. const std::vector<FileMetaData*>& files = c->inputs_[which];
  1029. for (size_t i = 0; i < files.size(); i++) {
  1030. list[num++] = table_cache_->NewIterator(
  1031. options, files[i]->number, files[i]->file_size);
  1032. }
  1033. } else {
  1034. // Create concatenating iterator for the files from this level
  1035. list[num++] = NewTwoLevelIterator(
  1036. new Version::LevelFileNumIterator(icmp_, &c->inputs_[which]),
  1037. &GetFileIterator, table_cache_, options);
  1038. }
  1039. }
  1040. }
  1041. assert(num <= space);
  1042. Iterator* result = NewMergingIterator(&icmp_, list, num);
  1043. delete[] list;
  1044. return result;
  1045. }
  1046. Compaction* VersionSet::PickCompaction() {
  1047. Compaction* c;
  1048. int level;
  1049. // We prefer compactions triggered by too much data in a level over
  1050. // the compactions triggered by seeks.
  1051. const bool size_compaction = (current_->compaction_score_ >= 1);
  1052. const bool seek_compaction = (current_->file_to_compact_ != NULL);
  1053. if (size_compaction) {
  1054. level = current_->compaction_level_;
  1055. assert(level >= 0);
  1056. assert(level+1 < config::kNumLevels);
  1057. c = new Compaction(level);
  1058. // Pick the first file that comes after compact_pointer_[level]
  1059. for (size_t i = 0; i < current_->files_[level].size(); i++) {
  1060. FileMetaData* f = current_->files_[level][i];
  1061. if (compact_pointer_[level].empty() ||
  1062. icmp_.Compare(f->largest.Encode(), compact_pointer_[level]) > 0) {
  1063. c->inputs_[0].push_back(f);
  1064. break;
  1065. }
  1066. }
  1067. if (c->inputs_[0].empty()) {
  1068. // Wrap-around to the beginning of the key space
  1069. c->inputs_[0].push_back(current_->files_[level][0]);
  1070. }
  1071. } else if (seek_compaction) {
  1072. level = current_->file_to_compact_level_;
  1073. c = new Compaction(level);
  1074. c->inputs_[0].push_back(current_->file_to_compact_);
  1075. } else {
  1076. return NULL;
  1077. }
  1078. c->input_version_ = current_;
  1079. c->input_version_->Ref();
  1080. // Files in level 0 may overlap each other, so pick up all overlapping ones
  1081. if (level == 0) {
  1082. InternalKey smallest, largest;
  1083. GetRange(c->inputs_[0], &smallest, &largest);
  1084. // Note that the next call will discard the file we placed in
  1085. // c->inputs_[0] earlier and replace it with an overlapping set
  1086. // which will include the picked file.
  1087. current_->GetOverlappingInputs(0, &smallest, &largest, &c->inputs_[0]);
  1088. assert(!c->inputs_[0].empty());
  1089. }
  1090. SetupOtherInputs(c);
  1091. return c;
  1092. }
  1093. void VersionSet::SetupOtherInputs(Compaction* c) {
  1094. const int level = c->level();
  1095. InternalKey smallest, largest;
  1096. GetRange(c->inputs_[0], &smallest, &largest);
  1097. current_->GetOverlappingInputs(level+1, &smallest, &largest, &c->inputs_[1]);
  1098. // Get entire range covered by compaction
  1099. InternalKey all_start, all_limit;
  1100. GetRange2(c->inputs_[0], c->inputs_[1], &all_start, &all_limit);
  1101. // See if we can grow the number of inputs in "level" without
  1102. // changing the number of "level+1" files we pick up.
  1103. if (!c->inputs_[1].empty()) {
  1104. std::vector<FileMetaData*> expanded0;
  1105. current_->GetOverlappingInputs(level, &all_start, &all_limit, &expanded0);
  1106. if (expanded0.size() > c->inputs_[0].size()) {
  1107. InternalKey new_start, new_limit;
  1108. GetRange(expanded0, &new_start, &new_limit);
  1109. std::vector<FileMetaData*> expanded1;
  1110. current_->GetOverlappingInputs(level+1, &new_start, &new_limit,
  1111. &expanded1);
  1112. if (expanded1.size() == c->inputs_[1].size()) {
  1113. Log(options_->info_log,
  1114. "Expanding@%d %d+%d to %d+%d\n",
  1115. level,
  1116. int(c->inputs_[0].size()),
  1117. int(c->inputs_[1].size()),
  1118. int(expanded0.size()),
  1119. int(expanded1.size()));
  1120. smallest = new_start;
  1121. largest = new_limit;
  1122. c->inputs_[0] = expanded0;
  1123. c->inputs_[1] = expanded1;
  1124. GetRange2(c->inputs_[0], c->inputs_[1], &all_start, &all_limit);
  1125. }
  1126. }
  1127. }
  1128. // Compute the set of grandparent files that overlap this compaction
  1129. // (parent == level+1; grandparent == level+2)
  1130. if (level + 2 < config::kNumLevels) {
  1131. current_->GetOverlappingInputs(level + 2, &all_start, &all_limit,
  1132. &c->grandparents_);
  1133. }
  1134. if (false) {
  1135. Log(options_->info_log, "Compacting %d '%s' .. '%s'",
  1136. level,
  1137. smallest.DebugString().c_str(),
  1138. largest.DebugString().c_str());
  1139. }
  1140. // Update the place where we will do the next compaction for this level.
  1141. // We update this immediately instead of waiting for the VersionEdit
  1142. // to be applied so that if the compaction fails, we will try a different
  1143. // key range next time.
  1144. compact_pointer_[level] = largest.Encode().ToString();
  1145. c->edit_.SetCompactPointer(level, largest);
  1146. }
  1147. Compaction* VersionSet::CompactRange(
  1148. int level,
  1149. const InternalKey* begin,
  1150. const InternalKey* end) {
  1151. std::vector<FileMetaData*> inputs;
  1152. current_->GetOverlappingInputs(level, begin, end, &inputs);
  1153. if (inputs.empty()) {
  1154. return NULL;
  1155. }
  1156. // Avoid compacting too much in one shot in case the range is large.
  1157. const uint64_t limit = MaxFileSizeForLevel(level);
  1158. uint64_t total = 0;
  1159. for (int i = 0; i < inputs.size(); i++) {
  1160. uint64_t s = inputs[i]->file_size;
  1161. total += s;
  1162. if (total >= limit) {
  1163. inputs.resize(i + 1);
  1164. break;
  1165. }
  1166. }
  1167. Compaction* c = new Compaction(level);
  1168. c->input_version_ = current_;
  1169. c->input_version_->Ref();
  1170. c->inputs_[0] = inputs;
  1171. SetupOtherInputs(c);
  1172. return c;
  1173. }
  1174. Compaction::Compaction(int level)
  1175. : level_(level),
  1176. max_output_file_size_(MaxFileSizeForLevel(level)),
  1177. input_version_(NULL),
  1178. grandparent_index_(0),
  1179. seen_key_(false),
  1180. overlapped_bytes_(0) {
  1181. for (int i = 0; i < config::kNumLevels; i++) {
  1182. level_ptrs_[i] = 0;
  1183. }
  1184. }
  1185. Compaction::~Compaction() {
  1186. if (input_version_ != NULL) {
  1187. input_version_->Unref();
  1188. }
  1189. }
  1190. bool Compaction::IsTrivialMove() const {
  1191. // Avoid a move if there is lots of overlapping grandparent data.
  1192. // Otherwise, the move could create a parent file that will require
  1193. // a very expensive merge later on.
  1194. return (num_input_files(0) == 1 &&
  1195. num_input_files(1) == 0 &&
  1196. TotalFileSize(grandparents_) <= kMaxGrandParentOverlapBytes);
  1197. }
  1198. void Compaction::AddInputDeletions(VersionEdit* edit) {
  1199. for (int which = 0; which < 2; which++) {
  1200. for (size_t i = 0; i < inputs_[which].size(); i++) {
  1201. edit->DeleteFile(level_ + which, inputs_[which][i]->number);
  1202. }
  1203. }
  1204. }
  1205. bool Compaction::IsBaseLevelForKey(const Slice& user_key) {
  1206. // Maybe use binary search to find right entry instead of linear search?
  1207. const Comparator* user_cmp = input_version_->vset_->icmp_.user_comparator();
  1208. for (int lvl = level_ + 2; lvl < config::kNumLevels; lvl++) {
  1209. const std::vector<FileMetaData*>& files = input_version_->files_[lvl];
  1210. for (; level_ptrs_[lvl] < files.size(); ) {
  1211. FileMetaData* f = files[level_ptrs_[lvl]];
  1212. if (user_cmp->Compare(user_key, f->largest.user_key()) <= 0) {
  1213. // We've advanced far enough
  1214. if (user_cmp->Compare(user_key, f->smallest.user_key()) >= 0) {
  1215. // Key falls in this file's range, so definitely not base level
  1216. return false;
  1217. }
  1218. break;
  1219. }
  1220. level_ptrs_[lvl]++;
  1221. }
  1222. }
  1223. return true;
  1224. }
  1225. bool Compaction::ShouldStopBefore(const Slice& internal_key) {
  1226. // Scan to find earliest grandparent file that contains key.
  1227. const InternalKeyComparator* icmp = &input_version_->vset_->icmp_;
  1228. while (grandparent_index_ < grandparents_.size() &&
  1229. icmp->Compare(internal_key,
  1230. grandparents_[grandparent_index_]->largest.Encode()) > 0) {
  1231. if (seen_key_) {
  1232. overlapped_bytes_ += grandparents_[grandparent_index_]->file_size;
  1233. }
  1234. grandparent_index_++;
  1235. }
  1236. seen_key_ = true;
  1237. if (overlapped_bytes_ > kMaxGrandParentOverlapBytes) {
  1238. // Too much overlap for current output; start new output
  1239. overlapped_bytes_ = 0;
  1240. return true;
  1241. } else {
  1242. return false;
  1243. }
  1244. }
  1245. void Compaction::ReleaseInputs() {
  1246. if (input_version_ != NULL) {
  1247. input_version_->Unref();
  1248. input_version_ = NULL;
  1249. }
  1250. }
  1251. } // namespace leveldb