LevelDB project 1 10225501460 林子骥 10211900416 郭夏辉
Nevar pievienot vairāk kā 25 tēmas Tēmai ir jāsākas ar burtu vai ciparu, tā var saturēt domu zīmes ('-') un var būt līdz 35 simboliem gara.

224 rindas
7.8 KiB

pirms 1 mēnesi
pirms 1 mēnesi
pirms 1 mēnesi
pirms 1 mēnesi
pirms 1 mēnesi
  1. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  4. #ifndef STORAGE_LEVELDB_DB_DB_IMPL_H_
  5. #define STORAGE_LEVELDB_DB_DB_IMPL_H_
  6. #include <atomic>
  7. #include <deque>
  8. #include <set>
  9. #include <string>
  10. #include "db/dbformat.h"
  11. #include "db/log_writer.h"
  12. #include "db/snapshot.h"
  13. #include "leveldb/db.h"
  14. #include "leveldb/env.h"
  15. #include "port/port.h"
  16. #include "port/thread_annotations.h"
  17. namespace leveldb {
  18. class MemTable;
  19. class TableCache;
  20. class Version;
  21. class VersionEdit;
  22. class VersionSet;
  23. class DBImpl : public DB {
  24. public:
  25. DBImpl(const Options& options, const std::string& dbname);
  26. DBImpl(const DBImpl&) = delete;
  27. DBImpl& operator=(const DBImpl&) = delete;
  28. ~DBImpl() override;
  29. // Implementations of the DB interface
  30. Status Put(const WriteOptions&, const Slice& key,
  31. const Slice& value) override;
  32. Status Put(const WriteOptions& options, const Slice& key, const Slice& value,
  33. uint64_t ttl) override;
  34. Status Delete(const WriteOptions&, const Slice& key) override;
  35. Status Write(const WriteOptions& options, WriteBatch* updates) override;
  36. Status Get(const ReadOptions& options, const Slice& key,
  37. std::string* value) override;
  38. Iterator* NewIterator(const ReadOptions&) override;
  39. const Snapshot* GetSnapshot() override;
  40. void ReleaseSnapshot(const Snapshot* snapshot) override;
  41. bool GetProperty(const Slice& property, std::string* value) override;
  42. void GetApproximateSizes(const Range* range, int n, uint64_t* sizes) override;
  43. void CompactRange(const Slice* begin, const Slice* end) override;
  44. void AppendTS(const Slice& val, std::string* val_with_ts,uint64_t ttl);
  45. static uint64_t GetTS(std::string* val);
  46. // Extra methods (for testing) that are not in the public DB interface
  47. // Compact any files in the named level that overlap [*begin,*end]
  48. void TEST_CompactRange(int level, const Slice* begin, const Slice* end);
  49. // Force current memtable contents to be compacted.
  50. Status TEST_CompactMemTable();
  51. // Return an internal iterator over the current state of the database.
  52. // The keys of this iterator are internal keys (see format.h).
  53. // The returned iterator should be deleted when no longer needed.
  54. Iterator* TEST_NewInternalIterator();
  55. // Return the maximum overlapping data (in bytes) at next level for any
  56. // file at a level >= 1.
  57. int64_t TEST_MaxNextLevelOverlappingBytes();
  58. // Record a sample of bytes read at the specified internal key.
  59. // Samples are taken approximately once every config::kReadBytesPeriod
  60. // bytes.
  61. void RecordReadSample(Slice key);
  62. // Status Write(const WriteOptions& options, WriteBatch* updates,
  63. // uint64_t ttl) override;
  64. private:
  65. friend class DB;
  66. struct CompactionState;
  67. struct Writer;
  68. // Information for a manual compaction
  69. struct ManualCompaction {
  70. int level;
  71. bool done;
  72. const InternalKey* begin; // null means beginning of key range
  73. const InternalKey* end; // null means end of key range
  74. InternalKey tmp_storage; // Used to keep track of compaction progress
  75. };
  76. // Per level compaction stats. stats_[level] stores the stats for
  77. // compactions that produced data for the specified "level".
  78. struct CompactionStats {
  79. CompactionStats() : micros(0), bytes_read(0), bytes_written(0) {}
  80. void Add(const CompactionStats& c) {
  81. this->micros += c.micros;
  82. this->bytes_read += c.bytes_read;
  83. this->bytes_written += c.bytes_written;
  84. }
  85. int64_t micros;
  86. int64_t bytes_read;
  87. int64_t bytes_written;
  88. };
  89. Iterator* NewInternalIterator(const ReadOptions&,
  90. SequenceNumber* latest_snapshot,
  91. uint32_t* seed);
  92. Status NewDB();
  93. // Recover the descriptor from persistent storage. May do a significant
  94. // amount of work to recover recently logged updates. Any changes to
  95. // be made to the descriptor are added to *edit.
  96. Status Recover(VersionEdit* edit, bool* save_manifest)
  97. EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  98. void MaybeIgnoreError(Status* s) const;
  99. // Delete any unneeded files and stale in-memory entries.
  100. void RemoveObsoleteFiles() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  101. // Compact the in-memory write buffer to disk. Switches to a new
  102. // log-file/memtable and writes a new descriptor iff successful.
  103. // Errors are recorded in bg_error_.
  104. void CompactMemTable() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  105. Status RecoverLogFile(uint64_t log_number, bool last_log, bool* save_manifest,
  106. VersionEdit* edit, SequenceNumber* max_sequence)
  107. EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  108. Status WriteLevel0Table(MemTable* mem, VersionEdit* edit, Version* base)
  109. EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  110. Status MakeRoomForWrite(bool force /* compact even if there is room? */)
  111. EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  112. WriteBatch* BuildBatchGroup(Writer** last_writer)
  113. EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  114. void RecordBackgroundError(const Status& s);
  115. void MaybeScheduleCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  116. static void BGWork(void* db);
  117. void BackgroundCall();
  118. void BackgroundCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  119. void CleanupCompaction(CompactionState* compact)
  120. EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  121. Status DoCompactionWork(CompactionState* compact)
  122. EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  123. Status OpenCompactionOutputFile(CompactionState* compact);
  124. Status FinishCompactionOutputFile(CompactionState* compact, Iterator* input);
  125. Status InstallCompactionResults(CompactionState* compact)
  126. EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  127. const Comparator* user_comparator() const {
  128. return internal_comparator_.user_comparator();
  129. }
  130. // Constant after construction
  131. Env* const env_;
  132. const InternalKeyComparator internal_comparator_;
  133. const InternalFilterPolicy internal_filter_policy_;
  134. const Options options_; // options_.comparator == &internal_comparator_
  135. const bool owns_info_log_;
  136. const bool owns_cache_;
  137. const std::string dbname_;
  138. // table_cache_ provides its own synchronization
  139. TableCache* const table_cache_;
  140. // Lock over the persistent DB state. Non-null iff successfully acquired.
  141. FileLock* db_lock_;
  142. // State below is protected by mutex_
  143. port::Mutex mutex_;
  144. std::atomic<bool> shutting_down_;
  145. port::CondVar background_work_finished_signal_ GUARDED_BY(mutex_);
  146. MemTable* mem_;
  147. MemTable* imm_ GUARDED_BY(mutex_); // Memtable being compacted
  148. std::atomic<bool> has_imm_; // So bg thread can detect non-null imm_
  149. WritableFile* logfile_;
  150. uint64_t logfile_number_ GUARDED_BY(mutex_);
  151. log::Writer* log_;
  152. uint32_t seed_ GUARDED_BY(mutex_); // For sampling.
  153. // Queue of writers.
  154. std::deque<Writer*> writers_ GUARDED_BY(mutex_);
  155. WriteBatch* tmp_batch_ GUARDED_BY(mutex_);
  156. SnapshotList snapshots_ GUARDED_BY(mutex_);
  157. // Set of table files to protect from deletion because they are
  158. // part of ongoing compactions.
  159. std::set<uint64_t> pending_outputs_ GUARDED_BY(mutex_);
  160. // Has a background compaction been scheduled or is running?
  161. bool background_compaction_scheduled_ GUARDED_BY(mutex_);
  162. ManualCompaction* manual_compaction_ GUARDED_BY(mutex_);
  163. VersionSet* const versions_ GUARDED_BY(mutex_);
  164. // Have we encountered a background error in paranoid mode?
  165. Status bg_error_ GUARDED_BY(mutex_);
  166. CompactionStats stats_[config::kNumLevels] GUARDED_BY(mutex_);
  167. Status CheckIsExpire(std::string* value);
  168. };
  169. // Sanitize db options. The caller should delete result.info_log if
  170. // it is not equal to src.info_log.
  171. Options SanitizeOptions(const std::string& db,
  172. const InternalKeyComparator* icmp,
  173. const InternalFilterPolicy* ipolicy,
  174. const Options& src);
  175. } // namespace leveldb
  176. #endif // STORAGE_LEVELDB_DB_DB_IMPL_H_