小组成员: 曹可心-10223903406 朴祉燕-10224602413
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

219 lines
7.6 KiB

1 month ago
Add Env::Remove{File,Dir} which obsolete Env::Delete{File,Dir}. The "DeleteFile" method name causes pain for Windows developers, because <windows.h> #defines a DeleteFile macro to DeleteFileW or DeleteFileA. Current code uses workarounds, like #undefining DeleteFile everywhere an Env is declared, implemented, or used. This CL removes the need for workarounds by renaming Env::DeleteFile to Env::RemoveFile. For consistency, Env::DeleteDir is also renamed to Env::RemoveDir. A few internal methods are also renamed for consistency. Software that supports Windows is expected to migrate any Env implementations and usage to Remove{File,Dir}, and never use the name Env::Delete{File,Dir} in its code. The renaming is done in a backwards-compatible way, at the risk of making it slightly more difficult to build a new correct Env implementation. The backwards compatibility is achieved using the following hacks: 1) Env::Remove{File,Dir} methods are added, with a default implementation that calls into Env::Delete{File,Dir}. This makes old Env implementations compatible with code that calls into the updated API. 2) The Env::Delete{File,Dir} methods are no longer pure virtuals. Instead, they gain a default implementation that calls into Env::Remove{File,Dir}. This makes updated Env implementations compatible with code that calls into the old API. The cost of this approach is that it's possible to write an Env without overriding either Rename{File,Dir} or Delete{File,Dir}, without getting a compiler warning. However, attempting to run the test suite will immediately fail with an infinite call stack ending in {Remove,Delete}{File,Dir}, making developers aware of the problem. PiperOrigin-RevId: 288710907
5 years ago
  1. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  4. #ifndef STORAGE_LEVELDB_DB_DB_IMPL_H_
  5. #define STORAGE_LEVELDB_DB_DB_IMPL_H_
  6. #include <atomic>
  7. #include <deque>
  8. #include <set>
  9. #include <string>
  10. #include "db/dbformat.h"
  11. #include "db/log_writer.h"
  12. #include "db/snapshot.h"
  13. #include "leveldb/db.h"
  14. #include "leveldb/env.h"
  15. #include "port/port.h"
  16. #include "port/thread_annotations.h"
  17. namespace leveldb {
  18. class MemTable;
  19. class TableCache;
  20. class Version;
  21. class VersionEdit;
  22. class VersionSet;
  23. class DBImpl : public DB {
  24. public:
  25. DBImpl(const Options& options, const std::string& dbname);
  26. DBImpl(const DBImpl&) = delete;
  27. DBImpl& operator=(const DBImpl&) = delete;
  28. ~DBImpl() override;
  29. // Implementations of the DB interface
  30. Status Put(const WriteOptions&, const Slice& key,
  31. const Slice& value) override;
  32. Status Put(const WriteOptions&, const Slice& key,
  33. const Slice& value, uint64_t ttl) override; //实现新的put接口,心
  34. Status Delete(const WriteOptions&, const Slice& key) override;
  35. Status Write(const WriteOptions& options, WriteBatch* updates) override;
  36. Status Get(const ReadOptions& options, const Slice& key,
  37. std::string* value) override;
  38. Iterator* NewIterator(const ReadOptions&) override;
  39. const Snapshot* GetSnapshot() override;
  40. void ReleaseSnapshot(const Snapshot* snapshot) override;
  41. bool GetProperty(const Slice& property, std::string* value) override;
  42. void GetApproximateSizes(const Range* range, int n, uint64_t* sizes) override;
  43. void CompactRange(const Slice* begin, const Slice* end) override;
  44. // Extra methods (for testing) that are not in the public DB interface
  45. // Compact any files in the named level that overlap [*begin,*end]
  46. void TEST_CompactRange(int level, const Slice* begin, const Slice* end);
  47. // Force current memtable contents to be compacted.
  48. Status TEST_CompactMemTable();
  49. // Return an internal iterator over the current state of the database.
  50. // The keys of this iterator are internal keys (see format.h).
  51. // The returned iterator should be deleted when no longer needed.
  52. Iterator* TEST_NewInternalIterator();
  53. // Return the maximum overlapping data (in bytes) at next level for any
  54. // file at a level >= 1.
  55. int64_t TEST_MaxNextLevelOverlappingBytes();
  56. // Record a sample of bytes read at the specified internal key.
  57. // Samples are taken approximately once every config::kReadBytesPeriod
  58. // bytes.
  59. void RecordReadSample(Slice key);
  60. private:
  61. friend class DB;
  62. struct CompactionState;
  63. struct Writer;
  64. // Information for a manual compaction
  65. struct ManualCompaction {
  66. int level;
  67. bool done;
  68. const InternalKey* begin; // null means beginning of key range
  69. const InternalKey* end; // null means end of key range
  70. InternalKey tmp_storage; // Used to keep track of compaction progress
  71. };
  72. // Per level compaction stats. stats_[level] stores the stats for
  73. // compactions that produced data for the specified "level".
  74. struct CompactionStats {
  75. CompactionStats() : micros(0), bytes_read(0), bytes_written(0) {}
  76. void Add(const CompactionStats& c) {
  77. this->micros += c.micros;
  78. this->bytes_read += c.bytes_read;
  79. this->bytes_written += c.bytes_written;
  80. }
  81. int64_t micros;
  82. int64_t bytes_read;
  83. int64_t bytes_written;
  84. };
  85. Iterator* NewInternalIterator(const ReadOptions&,
  86. SequenceNumber* latest_snapshot,
  87. uint32_t* seed);
  88. Status NewDB();
  89. // Recover the descriptor from persistent storage. May do a significant
  90. // amount of work to recover recently logged updates. Any changes to
  91. // be made to the descriptor are added to *edit.
  92. Status Recover(VersionEdit* edit, bool* save_manifest)
  93. EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  94. void MaybeIgnoreError(Status* s) const;
  95. // Delete any unneeded files and stale in-memory entries.
  96. void RemoveObsoleteFiles() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  97. // Compact the in-memory write buffer to disk. Switches to a new
  98. // log-file/memtable and writes a new descriptor iff successful.
  99. // Errors are recorded in bg_error_.
  100. void CompactMemTable() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  101. Status RecoverLogFile(uint64_t log_number, bool last_log, bool* save_manifest,
  102. VersionEdit* edit, SequenceNumber* max_sequence)
  103. EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  104. Status WriteLevel0Table(MemTable* mem, VersionEdit* edit, Version* base)
  105. EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  106. Status MakeRoomForWrite(bool force /* compact even if there is room? */)
  107. EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  108. WriteBatch* BuildBatchGroup(Writer** last_writer)
  109. EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  110. void RecordBackgroundError(const Status& s);
  111. void MaybeScheduleCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  112. static void BGWork(void* db);
  113. void BackgroundCall();
  114. void BackgroundCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  115. void CleanupCompaction(CompactionState* compact)
  116. EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  117. Status DoCompactionWork(CompactionState* compact)
  118. EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  119. Status OpenCompactionOutputFile(CompactionState* compact);
  120. Status FinishCompactionOutputFile(CompactionState* compact, Iterator* input);
  121. Status InstallCompactionResults(CompactionState* compact)
  122. EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  123. const Comparator* user_comparator() const {
  124. return internal_comparator_.user_comparator();
  125. }
  126. // Constant after construction
  127. Env* const env_;
  128. const InternalKeyComparator internal_comparator_;
  129. const InternalFilterPolicy internal_filter_policy_;
  130. const Options options_; // options_.comparator == &internal_comparator_
  131. const bool owns_info_log_;
  132. const bool owns_cache_;
  133. const std::string dbname_;
  134. // table_cache_ provides its own synchronization
  135. TableCache* const table_cache_;
  136. // Lock over the persistent DB state. Non-null iff successfully acquired.
  137. FileLock* db_lock_;
  138. // State below is protected by mutex_
  139. port::Mutex mutex_;
  140. std::atomic<bool> shutting_down_;
  141. port::CondVar background_work_finished_signal_ GUARDED_BY(mutex_);
  142. MemTable* mem_;
  143. MemTable* imm_ GUARDED_BY(mutex_); // Memtable being compacted
  144. std::atomic<bool> has_imm_; // So bg thread can detect non-null imm_
  145. WritableFile* logfile_;
  146. uint64_t logfile_number_ GUARDED_BY(mutex_);
  147. log::Writer* log_;
  148. uint32_t seed_ GUARDED_BY(mutex_); // For sampling.
  149. // Queue of writers.
  150. std::deque<Writer*> writers_ GUARDED_BY(mutex_);
  151. WriteBatch* tmp_batch_ GUARDED_BY(mutex_);
  152. SnapshotList snapshots_ GUARDED_BY(mutex_);
  153. // Set of table files to protect from deletion because they are
  154. // part of ongoing compactions.
  155. std::set<uint64_t> pending_outputs_ GUARDED_BY(mutex_);
  156. // Has a background compaction been scheduled or is running?
  157. bool background_compaction_scheduled_ GUARDED_BY(mutex_);
  158. ManualCompaction* manual_compaction_ GUARDED_BY(mutex_);
  159. VersionSet* const versions_ GUARDED_BY(mutex_);
  160. // Have we encountered a background error in paranoid mode?
  161. Status bg_error_ GUARDED_BY(mutex_);
  162. CompactionStats stats_[config::kNumLevels] GUARDED_BY(mutex_);
  163. };
  164. // Sanitize db options. The caller should delete result.info_log if
  165. // it is not equal to src.info_log.
  166. Options SanitizeOptions(const std::string& db,
  167. const InternalKeyComparator* icmp,
  168. const InternalFilterPolicy* ipolicy,
  169. const Options& src);
  170. } // namespace leveldb
  171. #endif // STORAGE_LEVELDB_DB_DB_IMPL_H_