Nie możesz wybrać więcej, niż 25 tematów Tematy muszą się zaczynać od litery lub cyfry, mogą zawierać myślniki ('-') i mogą mieć do 35 znaków.

210 wiersze
7.1 KiB

  1. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  4. #ifndef STORAGE_LEVELDB_DB_DB_IMPL_H_
  5. #define STORAGE_LEVELDB_DB_DB_IMPL_H_
  6. #include <deque>
  7. #include <set>
  8. #include "db/dbformat.h"
  9. #include "db/log_writer.h"
  10. #include "db/snapshot.h"
  11. #include "leveldb/db.h"
  12. #include "leveldb/env.h"
  13. #include "port/port.h"
  14. #include "port/thread_annotations.h"
  15. namespace leveldb {
  16. class MemTable;
  17. class TableCache;
  18. class Version;
  19. class VersionEdit;
  20. class VersionSet;
  21. class DBImpl : public DB {
  22. public:
  23. DBImpl(const Options& options, const std::string& dbname);
  24. virtual ~DBImpl();
  25. // Implementations of the DB interface
  26. virtual Status Put(const WriteOptions&, const Slice& key, const Slice& value);
  27. virtual Status Delete(const WriteOptions&, const Slice& key);
  28. virtual Status Write(const WriteOptions& options, WriteBatch* updates);
  29. virtual Status Get(const ReadOptions& options,
  30. const Slice& key,
  31. std::string* value);
  32. virtual Iterator* NewIterator(const ReadOptions&);
  33. virtual const Snapshot* GetSnapshot();
  34. virtual void ReleaseSnapshot(const Snapshot* snapshot);
  35. virtual bool GetProperty(const Slice& property, std::string* value);
  36. virtual void GetApproximateSizes(const Range* range, int n, uint64_t* sizes);
  37. virtual void CompactRange(const Slice* begin, const Slice* end);
  38. // Extra methods (for testing) that are not in the public DB interface
  39. // Compact any files in the named level that overlap [*begin,*end]
  40. void TEST_CompactRange(int level, const Slice* begin, const Slice* end);
  41. // Force current memtable contents to be compacted.
  42. Status TEST_CompactMemTable();
  43. // Return an internal iterator over the current state of the database.
  44. // The keys of this iterator are internal keys (see format.h).
  45. // The returned iterator should be deleted when no longer needed.
  46. Iterator* TEST_NewInternalIterator();
  47. // Return the maximum overlapping data (in bytes) at next level for any
  48. // file at a level >= 1.
  49. int64_t TEST_MaxNextLevelOverlappingBytes();
  50. // Record a sample of bytes read at the specified internal key.
  51. // Samples are taken approximately once every config::kReadBytesPeriod
  52. // bytes.
  53. void RecordReadSample(Slice key);
  54. private:
  55. friend class DB;
  56. struct CompactionState;
  57. struct Writer;
  58. Iterator* NewInternalIterator(const ReadOptions&,
  59. SequenceNumber* latest_snapshot,
  60. uint32_t* seed);
  61. Status NewDB();
  62. // Recover the descriptor from persistent storage. May do a significant
  63. // amount of work to recover recently logged updates. Any changes to
  64. // be made to the descriptor are added to *edit.
  65. Status Recover(VersionEdit* edit) EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  66. void MaybeIgnoreError(Status* s) const;
  67. // Delete any unneeded files and stale in-memory entries.
  68. void DeleteObsoleteFiles();
  69. // Compact the in-memory write buffer to disk. Switches to a new
  70. // log-file/memtable and writes a new descriptor iff successful.
  71. Status CompactMemTable()
  72. EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  73. Status RecoverLogFile(uint64_t log_number,
  74. VersionEdit* edit,
  75. SequenceNumber* max_sequence)
  76. EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  77. Status WriteLevel0Table(MemTable* mem, VersionEdit* edit, Version* base)
  78. EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  79. Status MakeRoomForWrite(bool force /* compact even if there is room? */)
  80. EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  81. WriteBatch* BuildBatchGroup(Writer** last_writer);
  82. void MaybeScheduleCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  83. static void BGWork(void* db);
  84. void BackgroundCall();
  85. Status BackgroundCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  86. void CleanupCompaction(CompactionState* compact)
  87. EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  88. Status DoCompactionWork(CompactionState* compact)
  89. EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  90. Status OpenCompactionOutputFile(CompactionState* compact);
  91. Status FinishCompactionOutputFile(CompactionState* compact, Iterator* input);
  92. Status InstallCompactionResults(CompactionState* compact)
  93. EXCLUSIVE_LOCKS_REQUIRED(mutex_);
  94. // Constant after construction
  95. Env* const env_;
  96. const InternalKeyComparator internal_comparator_;
  97. const InternalFilterPolicy internal_filter_policy_;
  98. const Options options_; // options_.comparator == &internal_comparator_
  99. bool owns_info_log_;
  100. bool owns_cache_;
  101. const std::string dbname_;
  102. // table_cache_ provides its own synchronization
  103. TableCache* table_cache_;
  104. // Lock over the persistent DB state. Non-NULL iff successfully acquired.
  105. FileLock* db_lock_;
  106. // State below is protected by mutex_
  107. port::Mutex mutex_;
  108. port::AtomicPointer shutting_down_;
  109. port::CondVar bg_cv_; // Signalled when background work finishes
  110. MemTable* mem_;
  111. MemTable* imm_; // Memtable being compacted
  112. port::AtomicPointer has_imm_; // So bg thread can detect non-NULL imm_
  113. WritableFile* logfile_;
  114. uint64_t logfile_number_;
  115. log::Writer* log_;
  116. uint32_t seed_; // For sampling.
  117. // Queue of writers.
  118. std::deque<Writer*> writers_;
  119. WriteBatch* tmp_batch_;
  120. SnapshotList snapshots_;
  121. // Set of table files to protect from deletion because they are
  122. // part of ongoing compactions.
  123. std::set<uint64_t> pending_outputs_;
  124. // Has a background compaction been scheduled or is running?
  125. bool bg_compaction_scheduled_;
  126. // Information for a manual compaction
  127. struct ManualCompaction {
  128. int level;
  129. bool done;
  130. const InternalKey* begin; // NULL means beginning of key range
  131. const InternalKey* end; // NULL means end of key range
  132. InternalKey tmp_storage; // Used to keep track of compaction progress
  133. };
  134. ManualCompaction* manual_compaction_;
  135. VersionSet* versions_;
  136. // Have we encountered a background error in paranoid mode?
  137. Status bg_error_;
  138. int consecutive_compaction_errors_;
  139. // Per level compaction stats. stats_[level] stores the stats for
  140. // compactions that produced data for the specified "level".
  141. struct CompactionStats {
  142. int64_t micros;
  143. int64_t bytes_read;
  144. int64_t bytes_written;
  145. CompactionStats() : micros(0), bytes_read(0), bytes_written(0) { }
  146. void Add(const CompactionStats& c) {
  147. this->micros += c.micros;
  148. this->bytes_read += c.bytes_read;
  149. this->bytes_written += c.bytes_written;
  150. }
  151. };
  152. CompactionStats stats_[config::kNumLevels];
  153. // No copying allowed
  154. DBImpl(const DBImpl&);
  155. void operator=(const DBImpl&);
  156. const Comparator* user_comparator() const {
  157. return internal_comparator_.user_comparator();
  158. }
  159. };
  160. // Sanitize db options. The caller should delete result.info_log if
  161. // it is not equal to src.info_log.
  162. extern Options SanitizeOptions(const std::string& db,
  163. const InternalKeyComparator* icmp,
  164. const InternalFilterPolicy* ipolicy,
  165. const Options& src);
  166. } // namespace leveldb
  167. #endif // STORAGE_LEVELDB_DB_DB_IMPL_H_