10225501448 李度 10225101546 陈胤遒 10215501422 高宇菲
Ви не можете вибрати більше 25 тем Теми мають розпочинатися з літери або цифри, можуть містити дефіси (-) і не повинні перевищувати 35 символів.

195 рядки
6.7 KiB

  1. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  4. #ifndef STORAGE_LEVELDB_INCLUDE_OPTIONS_H_
  5. #define STORAGE_LEVELDB_INCLUDE_OPTIONS_H_
  6. #include <stddef.h>
  7. namespace leveldb {
  8. class Cache;
  9. class Comparator;
  10. class Env;
  11. class FilterPolicy;
  12. class Logger;
  13. class Snapshot;
  14. // DB contents are stored in a set of blocks, each of which holds a
  15. // sequence of key,value pairs. Each block may be compressed before
  16. // being stored in a file. The following enum describes which
  17. // compression method (if any) is used to compress a block.
  18. enum CompressionType {
  19. // NOTE: do not change the values of existing entries, as these are
  20. // part of the persistent format on disk.
  21. kNoCompression = 0x0,
  22. kSnappyCompression = 0x1
  23. };
  24. // Options to control the behavior of a database (passed to DB::Open)
  25. struct Options {
  26. // -------------------
  27. // Parameters that affect behavior
  28. // Comparator used to define the order of keys in the table.
  29. // Default: a comparator that uses lexicographic byte-wise ordering
  30. //
  31. // REQUIRES: The client must ensure that the comparator supplied
  32. // here has the same name and orders keys *exactly* the same as the
  33. // comparator provided to previous open calls on the same DB.
  34. const Comparator* comparator;
  35. // If true, the database will be created if it is missing.
  36. // Default: false
  37. bool create_if_missing;
  38. // If true, an error is raised if the database already exists.
  39. // Default: false
  40. bool error_if_exists;
  41. // If true, the implementation will do aggressive checking of the
  42. // data it is processing and will stop early if it detects any
  43. // errors. This may have unforeseen ramifications: for example, a
  44. // corruption of one DB entry may cause a large number of entries to
  45. // become unreadable or for the entire DB to become unopenable.
  46. // Default: false
  47. bool paranoid_checks;
  48. // Use the specified object to interact with the environment,
  49. // e.g. to read/write files, schedule background work, etc.
  50. // Default: Env::Default()
  51. Env* env;
  52. // Any internal progress/error information generated by the db will
  53. // be written to info_log if it is non-NULL, or to a file stored
  54. // in the same directory as the DB contents if info_log is NULL.
  55. // Default: NULL
  56. Logger* info_log;
  57. // -------------------
  58. // Parameters that affect performance
  59. // Amount of data to build up in memory (backed by an unsorted log
  60. // on disk) before converting to a sorted on-disk file.
  61. //
  62. // Larger values increase performance, especially during bulk loads.
  63. // Up to two write buffers may be held in memory at the same time,
  64. // so you may wish to adjust this parameter to control memory usage.
  65. // Also, a larger write buffer will result in a longer recovery time
  66. // the next time the database is opened.
  67. //
  68. // Default: 4MB
  69. size_t write_buffer_size;
  70. // Number of open files that can be used by the DB. You may need to
  71. // increase this if your database has a large working set (budget
  72. // one open file per 2MB of working set).
  73. //
  74. // Default: 1000
  75. int max_open_files;
  76. // Control over blocks (user data is stored in a set of blocks, and
  77. // a block is the unit of reading from disk).
  78. // If non-NULL, use the specified cache for blocks.
  79. // If NULL, leveldb will automatically create and use an 8MB internal cache.
  80. // Default: NULL
  81. Cache* block_cache;
  82. // Approximate size of user data packed per block. Note that the
  83. // block size specified here corresponds to uncompressed data. The
  84. // actual size of the unit read from disk may be smaller if
  85. // compression is enabled. This parameter can be changed dynamically.
  86. //
  87. // Default: 4K
  88. size_t block_size;
  89. // Number of keys between restart points for delta encoding of keys.
  90. // This parameter can be changed dynamically. Most clients should
  91. // leave this parameter alone.
  92. //
  93. // Default: 16
  94. int block_restart_interval;
  95. // Compress blocks using the specified compression algorithm. This
  96. // parameter can be changed dynamically.
  97. //
  98. // Default: kSnappyCompression, which gives lightweight but fast
  99. // compression.
  100. //
  101. // Typical speeds of kSnappyCompression on an Intel(R) Core(TM)2 2.4GHz:
  102. // ~200-500MB/s compression
  103. // ~400-800MB/s decompression
  104. // Note that these speeds are significantly faster than most
  105. // persistent storage speeds, and therefore it is typically never
  106. // worth switching to kNoCompression. Even if the input data is
  107. // incompressible, the kSnappyCompression implementation will
  108. // efficiently detect that and will switch to uncompressed mode.
  109. CompressionType compression;
  110. // If non-NULL, use the specified filter policy to reduce disk reads.
  111. // Many applications will benefit from passing the result of
  112. // NewBloomFilterPolicy() here.
  113. //
  114. // Default: NULL
  115. const FilterPolicy* filter_policy;
  116. // Create an Options object with default values for all fields.
  117. Options();
  118. };
  119. // Options that control read operations
  120. struct ReadOptions {
  121. // If true, all data read from underlying storage will be
  122. // verified against corresponding checksums.
  123. // Default: false
  124. bool verify_checksums;
  125. // Should the data read for this iteration be cached in memory?
  126. // Callers may wish to set this field to false for bulk scans.
  127. // Default: true
  128. bool fill_cache;
  129. // If "snapshot" is non-NULL, read as of the supplied snapshot
  130. // (which must belong to the DB that is being read and which must
  131. // not have been released). If "snapshot" is NULL, use an impliicit
  132. // snapshot of the state at the beginning of this read operation.
  133. // Default: NULL
  134. const Snapshot* snapshot;
  135. ReadOptions()
  136. : verify_checksums(false),
  137. fill_cache(true),
  138. snapshot(NULL) {
  139. }
  140. };
  141. // Options that control write operations
  142. struct WriteOptions {
  143. // If true, the write will be flushed from the operating system
  144. // buffer cache (by calling WritableFile::Sync()) before the write
  145. // is considered complete. If this flag is true, writes will be
  146. // slower.
  147. //
  148. // If this flag is false, and the machine crashes, some recent
  149. // writes may be lost. Note that if it is just the process that
  150. // crashes (i.e., the machine does not reboot), no writes will be
  151. // lost even if sync==false.
  152. //
  153. // In other words, a DB write with sync==false has similar
  154. // crash semantics as the "write()" system call. A DB write
  155. // with sync==true has similar crash semantics to a "write()"
  156. // system call followed by "fsync()".
  157. //
  158. // Default: false
  159. bool sync;
  160. WriteOptions()
  161. : sync(false) {
  162. }
  163. };
  164. } // namespace leveldb
  165. #endif // STORAGE_LEVELDB_INCLUDE_OPTIONS_H_