小组成员:姚凯文(kevinyao0901),姜嘉琪
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1372 lines
42 KiB

  1. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  4. #include "db/version_set.h"
  5. #include <algorithm>
  6. #include <stdio.h>
  7. #include "db/filename.h"
  8. #include "db/log_reader.h"
  9. #include "db/log_writer.h"
  10. #include "db/memtable.h"
  11. #include "db/table_cache.h"
  12. #include "leveldb/env.h"
  13. #include "leveldb/table_builder.h"
  14. #include "table/merger.h"
  15. #include "table/two_level_iterator.h"
  16. #include "util/coding.h"
  17. #include "util/logging.h"
  18. namespace leveldb {
  19. static const int kTargetFileSize = 2 * 1048576;
  20. // Maximum bytes of overlaps in grandparent (i.e., level+2) before we
  21. // stop building a single file in a level->level+1 compaction.
  22. static const int64_t kMaxGrandParentOverlapBytes = 10 * kTargetFileSize;
  23. static double MaxBytesForLevel(int level) {
  24. // Note: the result for level zero is not really used since we set
  25. // the level-0 compaction threshold based on number of files.
  26. double result = 10 * 1048576.0; // Result for both level-0 and level-1
  27. while (level > 1) {
  28. result *= 10;
  29. level--;
  30. }
  31. return result;
  32. }
  33. static uint64_t MaxFileSizeForLevel(int level) {
  34. return kTargetFileSize; // We could vary per level to reduce number of files?
  35. }
  36. static int64_t TotalFileSize(const std::vector<FileMetaData*>& files) {
  37. int64_t sum = 0;
  38. for (size_t i = 0; i < files.size(); i++) {
  39. sum += files[i]->file_size;
  40. }
  41. return sum;
  42. }
  43. namespace {
  44. std::string IntSetToString(const std::set<uint64_t>& s) {
  45. std::string result = "{";
  46. for (std::set<uint64_t>::const_iterator it = s.begin();
  47. it != s.end();
  48. ++it) {
  49. result += (result.size() > 1) ? "," : "";
  50. result += NumberToString(*it);
  51. }
  52. result += "}";
  53. return result;
  54. }
  55. }
  56. Version::~Version() {
  57. assert(refs_ == 0);
  58. // Remove from linked list
  59. prev_->next_ = next_;
  60. next_->prev_ = prev_;
  61. // Drop references to files
  62. for (int level = 0; level < config::kNumLevels; level++) {
  63. for (size_t i = 0; i < files_[level].size(); i++) {
  64. FileMetaData* f = files_[level][i];
  65. assert(f->refs > 0);
  66. f->refs--;
  67. if (f->refs <= 0) {
  68. delete f;
  69. }
  70. }
  71. }
  72. }
  73. int FindFile(const InternalKeyComparator& icmp,
  74. const std::vector<FileMetaData*>& files,
  75. const Slice& key) {
  76. uint32_t left = 0;
  77. uint32_t right = files.size();
  78. while (left < right) {
  79. uint32_t mid = (left + right) / 2;
  80. const FileMetaData* f = files[mid];
  81. if (icmp.InternalKeyComparator::Compare(f->largest.Encode(), key) < 0) {
  82. // Key at "mid.largest" is < "target". Therefore all
  83. // files at or before "mid" are uninteresting.
  84. left = mid + 1;
  85. } else {
  86. // Key at "mid.largest" is >= "target". Therefore all files
  87. // after "mid" are uninteresting.
  88. right = mid;
  89. }
  90. }
  91. return right;
  92. }
  93. static bool AfterFile(const Comparator* ucmp,
  94. const Slice* user_key, const FileMetaData* f) {
  95. // NULL user_key occurs before all keys and is therefore never after *f
  96. return (user_key != NULL &&
  97. ucmp->Compare(*user_key, f->largest.user_key()) > 0);
  98. }
  99. static bool BeforeFile(const Comparator* ucmp,
  100. const Slice* user_key, const FileMetaData* f) {
  101. // NULL user_key occurs after all keys and is therefore never before *f
  102. return (user_key != NULL &&
  103. ucmp->Compare(*user_key, f->smallest.user_key()) < 0);
  104. }
  105. bool SomeFileOverlapsRange(
  106. const InternalKeyComparator& icmp,
  107. bool disjoint_sorted_files,
  108. const std::vector<FileMetaData*>& files,
  109. const Slice* smallest_user_key,
  110. const Slice* largest_user_key) {
  111. const Comparator* ucmp = icmp.user_comparator();
  112. if (!disjoint_sorted_files) {
  113. // Need to check against all files
  114. for (int i = 0; i < files.size(); i++) {
  115. const FileMetaData* f = files[i];
  116. if (AfterFile(ucmp, smallest_user_key, f) ||
  117. BeforeFile(ucmp, largest_user_key, f)) {
  118. // No overlap
  119. } else {
  120. return true; // Overlap
  121. }
  122. }
  123. return false;
  124. }
  125. // Binary search over file list
  126. uint32_t index = 0;
  127. if (smallest_user_key != NULL) {
  128. // Find the earliest possible internal key for smallest_user_key
  129. InternalKey small(*smallest_user_key, kMaxSequenceNumber,kValueTypeForSeek);
  130. index = FindFile(icmp, files, small.Encode());
  131. }
  132. if (index >= files.size()) {
  133. // beginning of range is after all files, so no overlap.
  134. return false;
  135. }
  136. return !BeforeFile(ucmp, largest_user_key, files[index]);
  137. }
  138. // An internal iterator. For a given version/level pair, yields
  139. // information about the files in the level. For a given entry, key()
  140. // is the largest key that occurs in the file, and value() is an
  141. // 16-byte value containing the file number and file size, both
  142. // encoded using EncodeFixed64.
  143. class Version::LevelFileNumIterator : public Iterator {
  144. public:
  145. LevelFileNumIterator(const InternalKeyComparator& icmp,
  146. const std::vector<FileMetaData*>* flist)
  147. : icmp_(icmp),
  148. flist_(flist),
  149. index_(flist->size()) { // Marks as invalid
  150. }
  151. virtual bool Valid() const {
  152. return index_ < flist_->size();
  153. }
  154. virtual void Seek(const Slice& target) {
  155. index_ = FindFile(icmp_, *flist_, target);
  156. }
  157. virtual void SeekToFirst() { index_ = 0; }
  158. virtual void SeekToLast() {
  159. index_ = flist_->empty() ? 0 : flist_->size() - 1;
  160. }
  161. virtual void Next() {
  162. assert(Valid());
  163. index_++;
  164. }
  165. virtual void Prev() {
  166. assert(Valid());
  167. if (index_ == 0) {
  168. index_ = flist_->size(); // Marks as invalid
  169. } else {
  170. index_--;
  171. }
  172. }
  173. Slice key() const {
  174. assert(Valid());
  175. return (*flist_)[index_]->largest.Encode();
  176. }
  177. Slice value() const {
  178. assert(Valid());
  179. EncodeFixed64(value_buf_, (*flist_)[index_]->number);
  180. EncodeFixed64(value_buf_+8, (*flist_)[index_]->file_size);
  181. return Slice(value_buf_, sizeof(value_buf_));
  182. }
  183. virtual Status status() const { return Status::OK(); }
  184. private:
  185. const InternalKeyComparator icmp_;
  186. const std::vector<FileMetaData*>* const flist_;
  187. uint32_t index_;
  188. // Backing store for value(). Holds the file number and size.
  189. mutable char value_buf_[16];
  190. };
  191. static Iterator* GetFileIterator(void* arg,
  192. const ReadOptions& options,
  193. const Slice& file_value) {
  194. TableCache* cache = reinterpret_cast<TableCache*>(arg);
  195. if (file_value.size() != 16) {
  196. return NewErrorIterator(
  197. Status::Corruption("FileReader invoked with unexpected value"));
  198. } else {
  199. return cache->NewIterator(options,
  200. DecodeFixed64(file_value.data()),
  201. DecodeFixed64(file_value.data() + 8));
  202. }
  203. }
  204. Iterator* Version::NewConcatenatingIterator(const ReadOptions& options,
  205. int level) const {
  206. return NewTwoLevelIterator(
  207. new LevelFileNumIterator(vset_->icmp_, &files_[level]),
  208. &GetFileIterator, vset_->table_cache_, options);
  209. }
  210. void Version::AddIterators(const ReadOptions& options,
  211. std::vector<Iterator*>* iters) {
  212. // Merge all level zero files together since they may overlap
  213. for (size_t i = 0; i < files_[0].size(); i++) {
  214. iters->push_back(
  215. vset_->table_cache_->NewIterator(
  216. options, files_[0][i]->number, files_[0][i]->file_size));
  217. }
  218. // For levels > 0, we can use a concatenating iterator that sequentially
  219. // walks through the non-overlapping files in the level, opening them
  220. // lazily.
  221. for (int level = 1; level < config::kNumLevels; level++) {
  222. if (!files_[level].empty()) {
  223. iters->push_back(NewConcatenatingIterator(options, level));
  224. }
  225. }
  226. }
  227. // If "*iter" points at a value or deletion for user_key, store
  228. // either the value, or a NotFound error and return true.
  229. // Else return false.
  230. static bool GetValue(Iterator* iter, const Slice& user_key,
  231. std::string* value,
  232. Status* s) {
  233. if (!iter->Valid()) {
  234. return false;
  235. }
  236. ParsedInternalKey parsed_key;
  237. if (!ParseInternalKey(iter->key(), &parsed_key)) {
  238. *s = Status::Corruption("corrupted key for ", user_key);
  239. return true;
  240. }
  241. if (parsed_key.user_key != user_key) {
  242. return false;
  243. }
  244. switch (parsed_key.type) {
  245. case kTypeDeletion:
  246. *s = Status::NotFound(Slice()); // Use an empty error message for speed
  247. break;
  248. case kTypeValue: {
  249. Slice v = iter->value();
  250. value->assign(v.data(), v.size());
  251. break;
  252. }
  253. }
  254. return true;
  255. }
  256. static bool NewestFirst(FileMetaData* a, FileMetaData* b) {
  257. return a->number > b->number;
  258. }
  259. Status Version::Get(const ReadOptions& options,
  260. const LookupKey& k,
  261. std::string* value,
  262. GetStats* stats) {
  263. Slice ikey = k.internal_key();
  264. Slice user_key = k.user_key();
  265. const Comparator* ucmp = vset_->icmp_.user_comparator();
  266. Status s;
  267. stats->seek_file = NULL;
  268. stats->seek_file_level = -1;
  269. FileMetaData* last_file_read = NULL;
  270. int last_file_read_level = -1;
  271. // We can search level-by-level since entries never hop across
  272. // levels. Therefore we are guaranteed that if we find data
  273. // in an smaller level, later levels are irrelevant.
  274. std::vector<FileMetaData*> tmp;
  275. FileMetaData* tmp2;
  276. for (int level = 0; level < config::kNumLevels; level++) {
  277. size_t num_files = files_[level].size();
  278. if (num_files == 0) continue;
  279. // Get the list of files to search in this level
  280. FileMetaData* const* files = &files_[level][0];
  281. if (level == 0) {
  282. // Level-0 files may overlap each other. Find all files that
  283. // overlap user_key and process them in order from newest to oldest.
  284. tmp.reserve(num_files);
  285. for (uint32_t i = 0; i < num_files; i++) {
  286. FileMetaData* f = files[i];
  287. if (ucmp->Compare(user_key, f->smallest.user_key()) >= 0 &&
  288. ucmp->Compare(user_key, f->largest.user_key()) <= 0) {
  289. tmp.push_back(f);
  290. }
  291. }
  292. if (tmp.empty()) continue;
  293. std::sort(tmp.begin(), tmp.end(), NewestFirst);
  294. files = &tmp[0];
  295. num_files = tmp.size();
  296. } else {
  297. // Binary search to find earliest index whose largest key >= ikey.
  298. uint32_t index = FindFile(vset_->icmp_, files_[level], ikey);
  299. if (index >= num_files) {
  300. files = NULL;
  301. num_files = 0;
  302. } else {
  303. tmp2 = files[index];
  304. if (ucmp->Compare(user_key, tmp2->smallest.user_key()) < 0) {
  305. // All of "tmp2" is past any data for user_key
  306. files = NULL;
  307. num_files = 0;
  308. } else {
  309. files = &tmp2;
  310. num_files = 1;
  311. }
  312. }
  313. }
  314. for (uint32_t i = 0; i < num_files; ++i) {
  315. if (last_file_read != NULL && stats->seek_file == NULL) {
  316. // We have had more than one seek for this read. Charge the 1st file.
  317. stats->seek_file = last_file_read;
  318. stats->seek_file_level = last_file_read_level;
  319. }
  320. FileMetaData* f = files[i];
  321. last_file_read = f;
  322. last_file_read_level = level;
  323. Iterator* iter = vset_->table_cache_->NewIterator(
  324. options,
  325. f->number,
  326. f->file_size);
  327. iter->Seek(ikey);
  328. const bool done = GetValue(iter, user_key, value, &s);
  329. if (!iter->status().ok()) {
  330. s = iter->status();
  331. delete iter;
  332. return s;
  333. } else {
  334. delete iter;
  335. if (done) {
  336. return s;
  337. }
  338. }
  339. }
  340. }
  341. return Status::NotFound(Slice()); // Use an empty error message for speed
  342. }
  343. bool Version::UpdateStats(const GetStats& stats) {
  344. FileMetaData* f = stats.seek_file;
  345. if (f != NULL) {
  346. f->allowed_seeks--;
  347. if (f->allowed_seeks <= 0 && file_to_compact_ == NULL) {
  348. file_to_compact_ = f;
  349. file_to_compact_level_ = stats.seek_file_level;
  350. return true;
  351. }
  352. }
  353. return false;
  354. }
  355. void Version::Ref() {
  356. ++refs_;
  357. }
  358. void Version::Unref() {
  359. assert(this != &vset_->dummy_versions_);
  360. assert(refs_ >= 1);
  361. --refs_;
  362. if (refs_ == 0) {
  363. delete this;
  364. }
  365. }
  366. bool Version::OverlapInLevel(int level,
  367. const Slice* smallest_user_key,
  368. const Slice* largest_user_key) {
  369. return SomeFileOverlapsRange(vset_->icmp_, (level > 0), files_[level],
  370. smallest_user_key, largest_user_key);
  371. }
  372. int Version::PickLevelForMemTableOutput(
  373. const Slice& smallest_user_key,
  374. const Slice& largest_user_key) {
  375. int level = 0;
  376. if (!OverlapInLevel(0, &smallest_user_key, &largest_user_key)) {
  377. // Push to next level if there is no overlap in next level,
  378. // and the #bytes overlapping in the level after that are limited.
  379. InternalKey start(smallest_user_key, kMaxSequenceNumber, kValueTypeForSeek);
  380. InternalKey limit(largest_user_key, 0, static_cast<ValueType>(0));
  381. std::vector<FileMetaData*> overlaps;
  382. while (level < config::kMaxMemCompactLevel) {
  383. if (OverlapInLevel(level + 1, &smallest_user_key, &largest_user_key)) {
  384. break;
  385. }
  386. GetOverlappingInputs(level + 2, &start, &limit, &overlaps);
  387. const int64_t sum = TotalFileSize(overlaps);
  388. if (sum > kMaxGrandParentOverlapBytes) {
  389. break;
  390. }
  391. level++;
  392. }
  393. }
  394. return level;
  395. }
  396. // Store in "*inputs" all files in "level" that overlap [begin,end]
  397. void Version::GetOverlappingInputs(
  398. int level,
  399. const InternalKey* begin,
  400. const InternalKey* end,
  401. std::vector<FileMetaData*>* inputs) {
  402. inputs->clear();
  403. Slice user_begin, user_end;
  404. if (begin != NULL) {
  405. user_begin = begin->user_key();
  406. }
  407. if (end != NULL) {
  408. user_end = end->user_key();
  409. }
  410. const Comparator* user_cmp = vset_->icmp_.user_comparator();
  411. for (size_t i = 0; i < files_[level].size(); i++) {
  412. FileMetaData* f = files_[level][i];
  413. if (begin != NULL &&
  414. user_cmp->Compare(f->largest.user_key(), user_begin) < 0) {
  415. // "f" is completely before specified range; skip it
  416. } else if (end != NULL &&
  417. user_cmp->Compare(f->smallest.user_key(), user_end) > 0) {
  418. // "f" is completely after specified range; skip it
  419. } else {
  420. inputs->push_back(f);
  421. }
  422. }
  423. }
  424. std::string Version::DebugString() const {
  425. std::string r;
  426. for (int level = 0; level < config::kNumLevels; level++) {
  427. // E.g.,
  428. // --- level 1 ---
  429. // 17:123['a' .. 'd']
  430. // 20:43['e' .. 'g']
  431. r.append("--- level ");
  432. AppendNumberTo(&r, level);
  433. r.append(" ---\n");
  434. const std::vector<FileMetaData*>& files = files_[level];
  435. for (size_t i = 0; i < files.size(); i++) {
  436. r.push_back(' ');
  437. AppendNumberTo(&r, files[i]->number);
  438. r.push_back(':');
  439. AppendNumberTo(&r, files[i]->file_size);
  440. r.append("[");
  441. r.append(files[i]->smallest.DebugString());
  442. r.append(" .. ");
  443. r.append(files[i]->largest.DebugString());
  444. r.append("]\n");
  445. }
  446. }
  447. return r;
  448. }
  449. // A helper class so we can efficiently apply a whole sequence
  450. // of edits to a particular state without creating intermediate
  451. // Versions that contain full copies of the intermediate state.
  452. class VersionSet::Builder {
  453. private:
  454. // Helper to sort by v->files_[file_number].smallest
  455. struct BySmallestKey {
  456. const InternalKeyComparator* internal_comparator;
  457. bool operator()(FileMetaData* f1, FileMetaData* f2) const {
  458. int r = internal_comparator->Compare(f1->smallest, f2->smallest);
  459. if (r != 0) {
  460. return (r < 0);
  461. } else {
  462. // Break ties by file number
  463. return (f1->number < f2->number);
  464. }
  465. }
  466. };
  467. typedef std::set<FileMetaData*, BySmallestKey> FileSet;
  468. struct LevelState {
  469. std::set<uint64_t> deleted_files;
  470. FileSet* added_files;
  471. };
  472. VersionSet* vset_;
  473. Version* base_;
  474. LevelState levels_[config::kNumLevels];
  475. public:
  476. // Initialize a builder with the files from *base and other info from *vset
  477. Builder(VersionSet* vset, Version* base)
  478. : vset_(vset),
  479. base_(base) {
  480. base_->Ref();
  481. BySmallestKey cmp;
  482. cmp.internal_comparator = &vset_->icmp_;
  483. for (int level = 0; level < config::kNumLevels; level++) {
  484. levels_[level].added_files = new FileSet(cmp);
  485. }
  486. }
  487. ~Builder() {
  488. for (int level = 0; level < config::kNumLevels; level++) {
  489. const FileSet* added = levels_[level].added_files;
  490. std::vector<FileMetaData*> to_unref;
  491. to_unref.reserve(added->size());
  492. for (FileSet::const_iterator it = added->begin();
  493. it != added->end(); ++it) {
  494. to_unref.push_back(*it);
  495. }
  496. delete added;
  497. for (uint32_t i = 0; i < to_unref.size(); i++) {
  498. FileMetaData* f = to_unref[i];
  499. f->refs--;
  500. if (f->refs <= 0) {
  501. delete f;
  502. }
  503. }
  504. }
  505. base_->Unref();
  506. }
  507. // Apply all of the edits in *edit to the current state.
  508. void Apply(VersionEdit* edit) {
  509. // Update compaction pointers
  510. for (size_t i = 0; i < edit->compact_pointers_.size(); i++) {
  511. const int level = edit->compact_pointers_[i].first;
  512. vset_->compact_pointer_[level] =
  513. edit->compact_pointers_[i].second.Encode().ToString();
  514. }
  515. // Delete files
  516. const VersionEdit::DeletedFileSet& del = edit->deleted_files_;
  517. for (VersionEdit::DeletedFileSet::const_iterator iter = del.begin();
  518. iter != del.end();
  519. ++iter) {
  520. const int level = iter->first;
  521. const uint64_t number = iter->second;
  522. levels_[level].deleted_files.insert(number);
  523. }
  524. // Add new files
  525. for (size_t i = 0; i < edit->new_files_.size(); i++) {
  526. const int level = edit->new_files_[i].first;
  527. FileMetaData* f = new FileMetaData(edit->new_files_[i].second);
  528. f->refs = 1;
  529. // We arrange to automatically compact this file after
  530. // a certain number of seeks. Let's assume:
  531. // (1) One seek costs 10ms
  532. // (2) Writing or reading 1MB costs 10ms (100MB/s)
  533. // (3) A compaction of 1MB does 25MB of IO:
  534. // 1MB read from this level
  535. // 10-12MB read from next level (boundaries may be misaligned)
  536. // 10-12MB written to next level
  537. // This implies that 25 seeks cost the same as the compaction
  538. // of 1MB of data. I.e., one seek costs approximately the
  539. // same as the compaction of 40KB of data. We are a little
  540. // conservative and allow approximately one seek for every 16KB
  541. // of data before triggering a compaction.
  542. f->allowed_seeks = (f->file_size / 16384);
  543. if (f->allowed_seeks < 100) f->allowed_seeks = 100;
  544. levels_[level].deleted_files.erase(f->number);
  545. levels_[level].added_files->insert(f);
  546. }
  547. }
  548. // Save the current state in *v.
  549. void SaveTo(Version* v) {
  550. BySmallestKey cmp;
  551. cmp.internal_comparator = &vset_->icmp_;
  552. for (int level = 0; level < config::kNumLevels; level++) {
  553. // Merge the set of added files with the set of pre-existing files.
  554. // Drop any deleted files. Store the result in *v.
  555. const std::vector<FileMetaData*>& base_files = base_->files_[level];
  556. std::vector<FileMetaData*>::const_iterator base_iter = base_files.begin();
  557. std::vector<FileMetaData*>::const_iterator base_end = base_files.end();
  558. const FileSet* added = levels_[level].added_files;
  559. v->files_[level].reserve(base_files.size() + added->size());
  560. for (FileSet::const_iterator added_iter = added->begin();
  561. added_iter != added->end();
  562. ++added_iter) {
  563. // Add all smaller files listed in base_
  564. for (std::vector<FileMetaData*>::const_iterator bpos
  565. = std::upper_bound(base_iter, base_end, *added_iter, cmp);
  566. base_iter != bpos;
  567. ++base_iter) {
  568. MaybeAddFile(v, level, *base_iter);
  569. }
  570. MaybeAddFile(v, level, *added_iter);
  571. }
  572. // Add remaining base files
  573. for (; base_iter != base_end; ++base_iter) {
  574. MaybeAddFile(v, level, *base_iter);
  575. }
  576. #ifndef NDEBUG
  577. // Make sure there is no overlap in levels > 0
  578. if (level > 0) {
  579. for (uint32_t i = 1; i < v->files_[level].size(); i++) {
  580. const InternalKey& prev_end = v->files_[level][i-1]->largest;
  581. const InternalKey& this_begin = v->files_[level][i]->smallest;
  582. if (vset_->icmp_.Compare(prev_end, this_begin) >= 0) {
  583. fprintf(stderr, "overlapping ranges in same level %s vs. %s\n",
  584. prev_end.DebugString().c_str(),
  585. this_begin.DebugString().c_str());
  586. abort();
  587. }
  588. }
  589. }
  590. #endif
  591. }
  592. }
  593. void MaybeAddFile(Version* v, int level, FileMetaData* f) {
  594. if (levels_[level].deleted_files.count(f->number) > 0) {
  595. // File is deleted: do nothing
  596. } else {
  597. std::vector<FileMetaData*>* files = &v->files_[level];
  598. if (level > 0 && !files->empty()) {
  599. // Must not overlap
  600. assert(vset_->icmp_.Compare((*files)[files->size()-1]->largest,
  601. f->smallest) < 0);
  602. }
  603. f->refs++;
  604. files->push_back(f);
  605. }
  606. }
  607. };
  608. VersionSet::VersionSet(const std::string& dbname,
  609. const Options* options,
  610. TableCache* table_cache,
  611. const InternalKeyComparator* cmp)
  612. : env_(options->env),
  613. dbname_(dbname),
  614. options_(options),
  615. table_cache_(table_cache),
  616. icmp_(*cmp),
  617. next_file_number_(2),
  618. manifest_file_number_(0), // Filled by Recover()
  619. last_sequence_(0),
  620. log_number_(0),
  621. prev_log_number_(0),
  622. descriptor_file_(NULL),
  623. descriptor_log_(NULL),
  624. dummy_versions_(this),
  625. current_(NULL) {
  626. AppendVersion(new Version(this));
  627. }
  628. VersionSet::~VersionSet() {
  629. current_->Unref();
  630. assert(dummy_versions_.next_ == &dummy_versions_); // List must be empty
  631. delete descriptor_log_;
  632. delete descriptor_file_;
  633. }
  634. void VersionSet::AppendVersion(Version* v) {
  635. // Make "v" current
  636. assert(v->refs_ == 0);
  637. assert(v != current_);
  638. if (current_ != NULL) {
  639. current_->Unref();
  640. }
  641. current_ = v;
  642. v->Ref();
  643. // Append to linked list
  644. v->prev_ = dummy_versions_.prev_;
  645. v->next_ = &dummy_versions_;
  646. v->prev_->next_ = v;
  647. v->next_->prev_ = v;
  648. }
  649. Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) {
  650. if (edit->has_log_number_) {
  651. assert(edit->log_number_ >= log_number_);
  652. assert(edit->log_number_ < next_file_number_);
  653. } else {
  654. edit->SetLogNumber(log_number_);
  655. }
  656. if (!edit->has_prev_log_number_) {
  657. edit->SetPrevLogNumber(prev_log_number_);
  658. }
  659. edit->SetNextFile(next_file_number_);
  660. edit->SetLastSequence(last_sequence_);
  661. Version* v = new Version(this);
  662. {
  663. Builder builder(this, current_);
  664. builder.Apply(edit);
  665. builder.SaveTo(v);
  666. }
  667. Finalize(v);
  668. // Initialize new descriptor log file if necessary by creating
  669. // a temporary file that contains a snapshot of the current version.
  670. std::string new_manifest_file;
  671. Status s;
  672. if (descriptor_log_ == NULL) {
  673. // No reason to unlock *mu here since we only hit this path in the
  674. // first call to LogAndApply (when opening the database).
  675. assert(descriptor_file_ == NULL);
  676. new_manifest_file = DescriptorFileName(dbname_, manifest_file_number_);
  677. edit->SetNextFile(next_file_number_);
  678. s = env_->NewWritableFile(new_manifest_file, &descriptor_file_);
  679. if (s.ok()) {
  680. descriptor_log_ = new log::Writer(descriptor_file_);
  681. s = WriteSnapshot(descriptor_log_);
  682. }
  683. }
  684. // Unlock during expensive MANIFEST log write
  685. {
  686. mu->Unlock();
  687. // Write new record to MANIFEST log
  688. if (s.ok()) {
  689. std::string record;
  690. edit->EncodeTo(&record);
  691. s = descriptor_log_->AddRecord(record);
  692. if (s.ok()) {
  693. s = descriptor_file_->Sync();
  694. }
  695. }
  696. // If we just created a new descriptor file, install it by writing a
  697. // new CURRENT file that points to it.
  698. if (s.ok() && !new_manifest_file.empty()) {
  699. s = SetCurrentFile(env_, dbname_, manifest_file_number_);
  700. }
  701. mu->Lock();
  702. }
  703. // Install the new version
  704. if (s.ok()) {
  705. AppendVersion(v);
  706. log_number_ = edit->log_number_;
  707. prev_log_number_ = edit->prev_log_number_;
  708. } else {
  709. delete v;
  710. if (!new_manifest_file.empty()) {
  711. delete descriptor_log_;
  712. delete descriptor_file_;
  713. descriptor_log_ = NULL;
  714. descriptor_file_ = NULL;
  715. env_->DeleteFile(new_manifest_file);
  716. }
  717. }
  718. return s;
  719. }
  720. Status VersionSet::Recover() {
  721. struct LogReporter : public log::Reader::Reporter {
  722. Status* status;
  723. virtual void Corruption(size_t bytes, const Status& s) {
  724. if (this->status->ok()) *this->status = s;
  725. }
  726. };
  727. // Read "CURRENT" file, which contains a pointer to the current manifest file
  728. std::string current;
  729. Status s = ReadFileToString(env_, CurrentFileName(dbname_), &current);
  730. if (!s.ok()) {
  731. return s;
  732. }
  733. if (current.empty() || current[current.size()-1] != '\n') {
  734. return Status::Corruption("CURRENT file does not end with newline");
  735. }
  736. current.resize(current.size() - 1);
  737. std::string dscname = dbname_ + "/" + current;
  738. SequentialFile* file;
  739. s = env_->NewSequentialFile(dscname, &file);
  740. if (!s.ok()) {
  741. return s;
  742. }
  743. bool have_log_number = false;
  744. bool have_prev_log_number = false;
  745. bool have_next_file = false;
  746. bool have_last_sequence = false;
  747. uint64_t next_file = 0;
  748. uint64_t last_sequence = 0;
  749. uint64_t log_number = 0;
  750. uint64_t prev_log_number = 0;
  751. Builder builder(this, current_);
  752. {
  753. LogReporter reporter;
  754. reporter.status = &s;
  755. log::Reader reader(file, &reporter, true/*checksum*/, 0/*initial_offset*/);
  756. Slice record;
  757. std::string scratch;
  758. while (reader.ReadRecord(&record, &scratch) && s.ok()) {
  759. VersionEdit edit;
  760. s = edit.DecodeFrom(record);
  761. if (s.ok()) {
  762. if (edit.has_comparator_ &&
  763. edit.comparator_ != icmp_.user_comparator()->Name()) {
  764. s = Status::InvalidArgument(
  765. edit.comparator_ + "does not match existing comparator ",
  766. icmp_.user_comparator()->Name());
  767. }
  768. }
  769. if (s.ok()) {
  770. builder.Apply(&edit);
  771. }
  772. if (edit.has_log_number_) {
  773. log_number = edit.log_number_;
  774. have_log_number = true;
  775. }
  776. if (edit.has_prev_log_number_) {
  777. prev_log_number = edit.prev_log_number_;
  778. have_prev_log_number = true;
  779. }
  780. if (edit.has_next_file_number_) {
  781. next_file = edit.next_file_number_;
  782. have_next_file = true;
  783. }
  784. if (edit.has_last_sequence_) {
  785. last_sequence = edit.last_sequence_;
  786. have_last_sequence = true;
  787. }
  788. }
  789. }
  790. delete file;
  791. file = NULL;
  792. if (s.ok()) {
  793. if (!have_next_file) {
  794. s = Status::Corruption("no meta-nextfile entry in descriptor");
  795. } else if (!have_log_number) {
  796. s = Status::Corruption("no meta-lognumber entry in descriptor");
  797. } else if (!have_last_sequence) {
  798. s = Status::Corruption("no last-sequence-number entry in descriptor");
  799. }
  800. if (!have_prev_log_number) {
  801. prev_log_number = 0;
  802. }
  803. MarkFileNumberUsed(prev_log_number);
  804. MarkFileNumberUsed(log_number);
  805. }
  806. if (s.ok()) {
  807. Version* v = new Version(this);
  808. builder.SaveTo(v);
  809. // Install recovered version
  810. Finalize(v);
  811. AppendVersion(v);
  812. manifest_file_number_ = next_file;
  813. next_file_number_ = next_file + 1;
  814. last_sequence_ = last_sequence;
  815. log_number_ = log_number;
  816. prev_log_number_ = prev_log_number;
  817. }
  818. return s;
  819. }
  820. void VersionSet::MarkFileNumberUsed(uint64_t number) {
  821. if (next_file_number_ <= number) {
  822. next_file_number_ = number + 1;
  823. }
  824. }
  825. void VersionSet::Finalize(Version* v) {
  826. // Precomputed best level for next compaction
  827. int best_level = -1;
  828. double best_score = -1;
  829. for (int level = 0; level < config::kNumLevels-1; level++) {
  830. double score;
  831. if (level == 0) {
  832. // We treat level-0 specially by bounding the number of files
  833. // instead of number of bytes for two reasons:
  834. //
  835. // (1) With larger write-buffer sizes, it is nice not to do too
  836. // many level-0 compactions.
  837. //
  838. // (2) The files in level-0 are merged on every read and
  839. // therefore we wish to avoid too many files when the individual
  840. // file size is small (perhaps because of a small write-buffer
  841. // setting, or very high compression ratios, or lots of
  842. // overwrites/deletions).
  843. score = v->files_[level].size() /
  844. static_cast<double>(config::kL0_CompactionTrigger);
  845. } else {
  846. // Compute the ratio of current size to size limit.
  847. const uint64_t level_bytes = TotalFileSize(v->files_[level]);
  848. score = static_cast<double>(level_bytes) / MaxBytesForLevel(level);
  849. }
  850. if (score > best_score) {
  851. best_level = level;
  852. best_score = score;
  853. }
  854. }
  855. v->compaction_level_ = best_level;
  856. v->compaction_score_ = best_score;
  857. }
  858. Status VersionSet::WriteSnapshot(log::Writer* log) {
  859. // TODO: Break up into multiple records to reduce memory usage on recovery?
  860. // Save metadata
  861. VersionEdit edit;
  862. edit.SetComparatorName(icmp_.user_comparator()->Name());
  863. // Save compaction pointers
  864. for (int level = 0; level < config::kNumLevels; level++) {
  865. if (!compact_pointer_[level].empty()) {
  866. InternalKey key;
  867. key.DecodeFrom(compact_pointer_[level]);
  868. edit.SetCompactPointer(level, key);
  869. }
  870. }
  871. // Save files
  872. for (int level = 0; level < config::kNumLevels; level++) {
  873. const std::vector<FileMetaData*>& files = current_->files_[level];
  874. for (size_t i = 0; i < files.size(); i++) {
  875. const FileMetaData* f = files[i];
  876. edit.AddFile(level, f->number, f->file_size, f->smallest, f->largest);
  877. }
  878. }
  879. std::string record;
  880. edit.EncodeTo(&record);
  881. return log->AddRecord(record);
  882. }
  883. int VersionSet::NumLevelFiles(int level) const {
  884. assert(level >= 0);
  885. assert(level < config::kNumLevels);
  886. return current_->files_[level].size();
  887. }
  888. const char* VersionSet::LevelSummary(LevelSummaryStorage* scratch) const {
  889. // Update code if kNumLevels changes
  890. assert(config::kNumLevels == 7);
  891. snprintf(scratch->buffer, sizeof(scratch->buffer),
  892. "files[ %d %d %d %d %d %d %d ]",
  893. int(current_->files_[0].size()),
  894. int(current_->files_[1].size()),
  895. int(current_->files_[2].size()),
  896. int(current_->files_[3].size()),
  897. int(current_->files_[4].size()),
  898. int(current_->files_[5].size()),
  899. int(current_->files_[6].size()));
  900. return scratch->buffer;
  901. }
  902. uint64_t VersionSet::ApproximateOffsetOf(Version* v, const InternalKey& ikey) {
  903. uint64_t result = 0;
  904. for (int level = 0; level < config::kNumLevels; level++) {
  905. const std::vector<FileMetaData*>& files = v->files_[level];
  906. for (size_t i = 0; i < files.size(); i++) {
  907. if (icmp_.Compare(files[i]->largest, ikey) <= 0) {
  908. // Entire file is before "ikey", so just add the file size
  909. result += files[i]->file_size;
  910. } else if (icmp_.Compare(files[i]->smallest, ikey) > 0) {
  911. // Entire file is after "ikey", so ignore
  912. if (level > 0) {
  913. // Files other than level 0 are sorted by meta->smallest, so
  914. // no further files in this level will contain data for
  915. // "ikey".
  916. break;
  917. }
  918. } else {
  919. // "ikey" falls in the range for this table. Add the
  920. // approximate offset of "ikey" within the table.
  921. Table* tableptr;
  922. Iterator* iter = table_cache_->NewIterator(
  923. ReadOptions(), files[i]->number, files[i]->file_size, &tableptr);
  924. if (tableptr != NULL) {
  925. result += tableptr->ApproximateOffsetOf(ikey.Encode());
  926. }
  927. delete iter;
  928. }
  929. }
  930. }
  931. return result;
  932. }
  933. void VersionSet::AddLiveFiles(std::set<uint64_t>* live) {
  934. for (Version* v = dummy_versions_.next_;
  935. v != &dummy_versions_;
  936. v = v->next_) {
  937. for (int level = 0; level < config::kNumLevels; level++) {
  938. const std::vector<FileMetaData*>& files = v->files_[level];
  939. for (size_t i = 0; i < files.size(); i++) {
  940. live->insert(files[i]->number);
  941. }
  942. }
  943. }
  944. }
  945. int64_t VersionSet::NumLevelBytes(int level) const {
  946. assert(level >= 0);
  947. assert(level < config::kNumLevels);
  948. return TotalFileSize(current_->files_[level]);
  949. }
  950. int64_t VersionSet::MaxNextLevelOverlappingBytes() {
  951. int64_t result = 0;
  952. std::vector<FileMetaData*> overlaps;
  953. for (int level = 1; level < config::kNumLevels - 1; level++) {
  954. for (size_t i = 0; i < current_->files_[level].size(); i++) {
  955. const FileMetaData* f = current_->files_[level][i];
  956. current_->GetOverlappingInputs(level+1, &f->smallest, &f->largest,
  957. &overlaps);
  958. const int64_t sum = TotalFileSize(overlaps);
  959. if (sum > result) {
  960. result = sum;
  961. }
  962. }
  963. }
  964. return result;
  965. }
  966. // Stores the minimal range that covers all entries in inputs in
  967. // *smallest, *largest.
  968. // REQUIRES: inputs is not empty
  969. void VersionSet::GetRange(const std::vector<FileMetaData*>& inputs,
  970. InternalKey* smallest,
  971. InternalKey* largest) {
  972. assert(!inputs.empty());
  973. smallest->Clear();
  974. largest->Clear();
  975. for (size_t i = 0; i < inputs.size(); i++) {
  976. FileMetaData* f = inputs[i];
  977. if (i == 0) {
  978. *smallest = f->smallest;
  979. *largest = f->largest;
  980. } else {
  981. if (icmp_.Compare(f->smallest, *smallest) < 0) {
  982. *smallest = f->smallest;
  983. }
  984. if (icmp_.Compare(f->largest, *largest) > 0) {
  985. *largest = f->largest;
  986. }
  987. }
  988. }
  989. }
  990. // Stores the minimal range that covers all entries in inputs1 and inputs2
  991. // in *smallest, *largest.
  992. // REQUIRES: inputs is not empty
  993. void VersionSet::GetRange2(const std::vector<FileMetaData*>& inputs1,
  994. const std::vector<FileMetaData*>& inputs2,
  995. InternalKey* smallest,
  996. InternalKey* largest) {
  997. std::vector<FileMetaData*> all = inputs1;
  998. all.insert(all.end(), inputs2.begin(), inputs2.end());
  999. GetRange(all, smallest, largest);
  1000. }
  1001. Iterator* VersionSet::MakeInputIterator(Compaction* c) {
  1002. ReadOptions options;
  1003. options.verify_checksums = options_->paranoid_checks;
  1004. options.fill_cache = false;
  1005. // Level-0 files have to be merged together. For other levels,
  1006. // we will make a concatenating iterator per level.
  1007. // TODO(opt): use concatenating iterator for level-0 if there is no overlap
  1008. const int space = (c->level() == 0 ? c->inputs_[0].size() + 1 : 2);
  1009. Iterator** list = new Iterator*[space];
  1010. int num = 0;
  1011. for (int which = 0; which < 2; which++) {
  1012. if (!c->inputs_[which].empty()) {
  1013. if (c->level() + which == 0) {
  1014. const std::vector<FileMetaData*>& files = c->inputs_[which];
  1015. for (size_t i = 0; i < files.size(); i++) {
  1016. list[num++] = table_cache_->NewIterator(
  1017. options, files[i]->number, files[i]->file_size);
  1018. }
  1019. } else {
  1020. // Create concatenating iterator for the files from this level
  1021. list[num++] = NewTwoLevelIterator(
  1022. new Version::LevelFileNumIterator(icmp_, &c->inputs_[which]),
  1023. &GetFileIterator, table_cache_, options);
  1024. }
  1025. }
  1026. }
  1027. assert(num <= space);
  1028. Iterator* result = NewMergingIterator(&icmp_, list, num);
  1029. delete[] list;
  1030. return result;
  1031. }
  1032. Compaction* VersionSet::PickCompaction() {
  1033. Compaction* c;
  1034. int level;
  1035. // We prefer compactions triggered by too much data in a level over
  1036. // the compactions triggered by seeks.
  1037. const bool size_compaction = (current_->compaction_score_ >= 1);
  1038. const bool seek_compaction = (current_->file_to_compact_ != NULL);
  1039. if (size_compaction) {
  1040. level = current_->compaction_level_;
  1041. assert(level >= 0);
  1042. assert(level+1 < config::kNumLevels);
  1043. c = new Compaction(level);
  1044. // Pick the first file that comes after compact_pointer_[level]
  1045. for (size_t i = 0; i < current_->files_[level].size(); i++) {
  1046. FileMetaData* f = current_->files_[level][i];
  1047. if (compact_pointer_[level].empty() ||
  1048. icmp_.Compare(f->largest.Encode(), compact_pointer_[level]) > 0) {
  1049. c->inputs_[0].push_back(f);
  1050. break;
  1051. }
  1052. }
  1053. if (c->inputs_[0].empty()) {
  1054. // Wrap-around to the beginning of the key space
  1055. c->inputs_[0].push_back(current_->files_[level][0]);
  1056. }
  1057. } else if (seek_compaction) {
  1058. level = current_->file_to_compact_level_;
  1059. c = new Compaction(level);
  1060. c->inputs_[0].push_back(current_->file_to_compact_);
  1061. } else {
  1062. return NULL;
  1063. }
  1064. c->input_version_ = current_;
  1065. c->input_version_->Ref();
  1066. // Files in level 0 may overlap each other, so pick up all overlapping ones
  1067. if (level == 0) {
  1068. InternalKey smallest, largest;
  1069. GetRange(c->inputs_[0], &smallest, &largest);
  1070. // Note that the next call will discard the file we placed in
  1071. // c->inputs_[0] earlier and replace it with an overlapping set
  1072. // which will include the picked file.
  1073. current_->GetOverlappingInputs(0, &smallest, &largest, &c->inputs_[0]);
  1074. assert(!c->inputs_[0].empty());
  1075. }
  1076. SetupOtherInputs(c);
  1077. return c;
  1078. }
  1079. void VersionSet::SetupOtherInputs(Compaction* c) {
  1080. const int level = c->level();
  1081. InternalKey smallest, largest;
  1082. GetRange(c->inputs_[0], &smallest, &largest);
  1083. current_->GetOverlappingInputs(level+1, &smallest, &largest, &c->inputs_[1]);
  1084. // Get entire range covered by compaction
  1085. InternalKey all_start, all_limit;
  1086. GetRange2(c->inputs_[0], c->inputs_[1], &all_start, &all_limit);
  1087. // See if we can grow the number of inputs in "level" without
  1088. // changing the number of "level+1" files we pick up.
  1089. if (!c->inputs_[1].empty()) {
  1090. std::vector<FileMetaData*> expanded0;
  1091. current_->GetOverlappingInputs(level, &all_start, &all_limit, &expanded0);
  1092. if (expanded0.size() > c->inputs_[0].size()) {
  1093. InternalKey new_start, new_limit;
  1094. GetRange(expanded0, &new_start, &new_limit);
  1095. std::vector<FileMetaData*> expanded1;
  1096. current_->GetOverlappingInputs(level+1, &new_start, &new_limit,
  1097. &expanded1);
  1098. if (expanded1.size() == c->inputs_[1].size()) {
  1099. Log(options_->info_log,
  1100. "Expanding@%d %d+%d to %d+%d\n",
  1101. level,
  1102. int(c->inputs_[0].size()),
  1103. int(c->inputs_[1].size()),
  1104. int(expanded0.size()),
  1105. int(expanded1.size()));
  1106. smallest = new_start;
  1107. largest = new_limit;
  1108. c->inputs_[0] = expanded0;
  1109. c->inputs_[1] = expanded1;
  1110. GetRange2(c->inputs_[0], c->inputs_[1], &all_start, &all_limit);
  1111. }
  1112. }
  1113. }
  1114. // Compute the set of grandparent files that overlap this compaction
  1115. // (parent == level+1; grandparent == level+2)
  1116. if (level + 2 < config::kNumLevels) {
  1117. current_->GetOverlappingInputs(level + 2, &all_start, &all_limit,
  1118. &c->grandparents_);
  1119. }
  1120. if (false) {
  1121. Log(options_->info_log, "Compacting %d '%s' .. '%s'",
  1122. level,
  1123. smallest.DebugString().c_str(),
  1124. largest.DebugString().c_str());
  1125. }
  1126. // Update the place where we will do the next compaction for this level.
  1127. // We update this immediately instead of waiting for the VersionEdit
  1128. // to be applied so that if the compaction fails, we will try a different
  1129. // key range next time.
  1130. compact_pointer_[level] = largest.Encode().ToString();
  1131. c->edit_.SetCompactPointer(level, largest);
  1132. }
  1133. Compaction* VersionSet::CompactRange(
  1134. int level,
  1135. const InternalKey* begin,
  1136. const InternalKey* end) {
  1137. std::vector<FileMetaData*> inputs;
  1138. current_->GetOverlappingInputs(level, begin, end, &inputs);
  1139. if (inputs.empty()) {
  1140. return NULL;
  1141. }
  1142. // Avoid compacting too much in one shot in case the range is large.
  1143. const uint64_t limit = MaxFileSizeForLevel(level);
  1144. uint64_t total = 0;
  1145. for (int i = 0; i < inputs.size(); i++) {
  1146. uint64_t s = inputs[i]->file_size;
  1147. total += s;
  1148. if (total >= limit) {
  1149. inputs.resize(i + 1);
  1150. break;
  1151. }
  1152. }
  1153. Compaction* c = new Compaction(level);
  1154. c->input_version_ = current_;
  1155. c->input_version_->Ref();
  1156. c->inputs_[0] = inputs;
  1157. SetupOtherInputs(c);
  1158. return c;
  1159. }
  1160. Compaction::Compaction(int level)
  1161. : level_(level),
  1162. max_output_file_size_(MaxFileSizeForLevel(level)),
  1163. input_version_(NULL),
  1164. grandparent_index_(0),
  1165. seen_key_(false),
  1166. overlapped_bytes_(0) {
  1167. for (int i = 0; i < config::kNumLevels; i++) {
  1168. level_ptrs_[i] = 0;
  1169. }
  1170. }
  1171. Compaction::~Compaction() {
  1172. if (input_version_ != NULL) {
  1173. input_version_->Unref();
  1174. }
  1175. }
  1176. bool Compaction::IsTrivialMove() const {
  1177. // Avoid a move if there is lots of overlapping grandparent data.
  1178. // Otherwise, the move could create a parent file that will require
  1179. // a very expensive merge later on.
  1180. return (num_input_files(0) == 1 &&
  1181. num_input_files(1) == 0 &&
  1182. TotalFileSize(grandparents_) <= kMaxGrandParentOverlapBytes);
  1183. }
  1184. void Compaction::AddInputDeletions(VersionEdit* edit) {
  1185. for (int which = 0; which < 2; which++) {
  1186. for (size_t i = 0; i < inputs_[which].size(); i++) {
  1187. edit->DeleteFile(level_ + which, inputs_[which][i]->number);
  1188. }
  1189. }
  1190. }
  1191. bool Compaction::IsBaseLevelForKey(const Slice& user_key) {
  1192. // Maybe use binary search to find right entry instead of linear search?
  1193. const Comparator* user_cmp = input_version_->vset_->icmp_.user_comparator();
  1194. for (int lvl = level_ + 2; lvl < config::kNumLevels; lvl++) {
  1195. const std::vector<FileMetaData*>& files = input_version_->files_[lvl];
  1196. for (; level_ptrs_[lvl] < files.size(); ) {
  1197. FileMetaData* f = files[level_ptrs_[lvl]];
  1198. if (user_cmp->Compare(user_key, f->largest.user_key()) <= 0) {
  1199. // We've advanced far enough
  1200. if (user_cmp->Compare(user_key, f->smallest.user_key()) >= 0) {
  1201. // Key falls in this file's range, so definitely not base level
  1202. return false;
  1203. }
  1204. break;
  1205. }
  1206. level_ptrs_[lvl]++;
  1207. }
  1208. }
  1209. return true;
  1210. }
  1211. bool Compaction::ShouldStopBefore(const Slice& internal_key) {
  1212. // Scan to find earliest grandparent file that contains key.
  1213. const InternalKeyComparator* icmp = &input_version_->vset_->icmp_;
  1214. while (grandparent_index_ < grandparents_.size() &&
  1215. icmp->Compare(internal_key,
  1216. grandparents_[grandparent_index_]->largest.Encode()) > 0) {
  1217. if (seen_key_) {
  1218. overlapped_bytes_ += grandparents_[grandparent_index_]->file_size;
  1219. }
  1220. grandparent_index_++;
  1221. }
  1222. seen_key_ = true;
  1223. if (overlapped_bytes_ > kMaxGrandParentOverlapBytes) {
  1224. // Too much overlap for current output; start new output
  1225. overlapped_bytes_ = 0;
  1226. return true;
  1227. } else {
  1228. return false;
  1229. }
  1230. }
  1231. void Compaction::ReleaseInputs() {
  1232. if (input_version_ != NULL) {
  1233. input_version_->Unref();
  1234. input_version_ = NULL;
  1235. }
  1236. }
  1237. }