作者: 韩晨旭 10225101440 李畅 10225102463
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

214 line
7.5 KiB

Release 1.18 Changes are: * Update version number to 1.18 * Replace the basic fprintf call with a call to fwrite in order to work around the apparent compiler optimization/rewrite failure that we are seeing with the new toolchain/iOS SDKs provided with Xcode6 and iOS8. * Fix ALL the header guards. * Createed a README.md with the LevelDB project description. * A new CONTRIBUTING file. * Don't implicitly convert uint64_t to size_t or int. Either preserve it as uint64_t, or explicitly cast. This fixes MSVC warnings about possible value truncation when compiling this code in Chromium. * Added a DumpFile() library function that encapsulates the guts of the "leveldbutil dump" command. This will allow clients to dump data to their log files instead of stdout. It will also allow clients to supply their own environment. * leveldb: Remove unused function 'ConsumeChar'. * leveldbutil: Remove unused member variables from WriteBatchItemPrinter. * OpenBSD, NetBSD and DragonflyBSD have _LITTLE_ENDIAN, so define PLATFORM_IS_LITTLE_ENDIAN like on FreeBSD. This fixes: * issue #143 * issue #198 * issue #249 * Switch from <cstdatomic> to <atomic>. The former never made it into the standard and doesn't exist in modern gcc versions at all. The later contains everything that leveldb was using from the former. This problem was noticed when porting to Portable Native Client where no memory barrier is defined. The fact that <cstdatomic> is missing normally goes unnoticed since memory barriers are defined for most architectures. * Make Hash() treat its input as unsigned. Before this change LevelDB files from platforms with different signedness of char were not compatible. This change fixes: issue #243 * Verify checksums of index/meta/filter blocks when paranoid_checks set. * Invoke all tools for iOS with xcrun. (This was causing problems with the new XCode 5.1.1 image on pulse.) * include <sys/stat.h> only once, and fix the following linter warning: "Found C system header after C++ system header" * When encountering a corrupted table file, return Status::Corruption instead of Status::InvalidArgument. * Support cygwin as build platform, patch is from https://code.google.com/p/leveldb/issues/detail?id=188 * Fix typo, merge patch from https://code.google.com/p/leveldb/issues/detail?id=159 * Fix typos and comments, and address the following two issues: * issue #166 * issue #241 * Add missing db synchronize after "fillseq" in the benchmark. * Removed unused variable in SeekRandom: value (issue #201)
10 年之前
  1. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  4. #ifndef STORAGE_LEVELDB_INCLUDE_OPTIONS_H_
  5. #define STORAGE_LEVELDB_INCLUDE_OPTIONS_H_
  6. #include <stddef.h>
  7. #include "leveldb/export.h"
  8. namespace leveldb {
  9. class Cache;
  10. class Comparator;
  11. class Env;
  12. class FilterPolicy;
  13. class Logger;
  14. class Snapshot;
  15. // DB contents are stored in a set of blocks, each of which holds a
  16. // sequence of key,value pairs. Each block may be compressed before
  17. // being stored in a file. The following enum describes which
  18. // compression method (if any) is used to compress a block.
  19. enum CompressionType {
  20. // NOTE: do not change the values of existing entries, as these are
  21. // part of the persistent format on disk.
  22. kNoCompression = 0x0,
  23. kSnappyCompression = 0x1
  24. };
  25. // Options to control the behavior of a database (passed to DB::Open)
  26. struct LEVELDB_EXPORT Options {
  27. // -------------------
  28. // Parameters that affect behavior
  29. // Comparator used to define the order of keys in the table.
  30. // Default: a comparator that uses lexicographic byte-wise ordering
  31. //
  32. // REQUIRES: The client must ensure that the comparator supplied
  33. // here has the same name and orders keys *exactly* the same as the
  34. // comparator provided to previous open calls on the same DB.
  35. const Comparator* comparator;
  36. // If true, the database will be created if it is missing.
  37. // Default: false
  38. bool create_if_missing;
  39. // If true, an error is raised if the database already exists.
  40. // Default: false
  41. bool error_if_exists;
  42. // If true, the implementation will do aggressive checking of the
  43. // data it is processing and will stop early if it detects any
  44. // errors. This may have unforeseen ramifications: for example, a
  45. // corruption of one DB entry may cause a large number of entries to
  46. // become unreadable or for the entire DB to become unopenable.
  47. // Default: false
  48. bool paranoid_checks;
  49. // Use the specified object to interact with the environment,
  50. // e.g. to read/write files, schedule background work, etc.
  51. // Default: Env::Default()
  52. Env* env;
  53. // Any internal progress/error information generated by the db will
  54. // be written to info_log if it is non-NULL, or to a file stored
  55. // in the same directory as the DB contents if info_log is NULL.
  56. // Default: NULL
  57. Logger* info_log;
  58. // -------------------
  59. // Parameters that affect performance
  60. // Amount of data to build up in memory (backed by an unsorted log
  61. // on disk) before converting to a sorted on-disk file.
  62. //
  63. // Larger values increase performance, especially during bulk loads.
  64. // Up to two write buffers may be held in memory at the same time,
  65. // so you may wish to adjust this parameter to control memory usage.
  66. // Also, a larger write buffer will result in a longer recovery time
  67. // the next time the database is opened.
  68. //
  69. // Default: 4MB
  70. size_t write_buffer_size;
  71. // Number of open files that can be used by the DB. You may need to
  72. // increase this if your database has a large working set (budget
  73. // one open file per 2MB of working set).
  74. //
  75. // Default: 1000
  76. int max_open_files;
  77. // Control over blocks (user data is stored in a set of blocks, and
  78. // a block is the unit of reading from disk).
  79. // If non-NULL, use the specified cache for blocks.
  80. // If NULL, leveldb will automatically create and use an 8MB internal cache.
  81. // Default: NULL
  82. Cache* block_cache;
  83. // Approximate size of user data packed per block. Note that the
  84. // block size specified here corresponds to uncompressed data. The
  85. // actual size of the unit read from disk may be smaller if
  86. // compression is enabled. This parameter can be changed dynamically.
  87. //
  88. // Default: 4K
  89. size_t block_size;
  90. // Number of keys between restart points for delta encoding of keys.
  91. // This parameter can be changed dynamically. Most clients should
  92. // leave this parameter alone.
  93. //
  94. // Default: 16
  95. int block_restart_interval;
  96. // Leveldb will write up to this amount of bytes to a file before
  97. // switching to a new one.
  98. // Most clients should leave this parameter alone. However if your
  99. // filesystem is more efficient with larger files, you could
  100. // consider increasing the value. The downside will be longer
  101. // compactions and hence longer latency/performance hiccups.
  102. // Another reason to increase this parameter might be when you are
  103. // initially populating a large database.
  104. //
  105. // Default: 2MB
  106. size_t max_file_size;
  107. // Compress blocks using the specified compression algorithm. This
  108. // parameter can be changed dynamically.
  109. //
  110. // Default: kSnappyCompression, which gives lightweight but fast
  111. // compression.
  112. //
  113. // Typical speeds of kSnappyCompression on an Intel(R) Core(TM)2 2.4GHz:
  114. // ~200-500MB/s compression
  115. // ~400-800MB/s decompression
  116. // Note that these speeds are significantly faster than most
  117. // persistent storage speeds, and therefore it is typically never
  118. // worth switching to kNoCompression. Even if the input data is
  119. // incompressible, the kSnappyCompression implementation will
  120. // efficiently detect that and will switch to uncompressed mode.
  121. CompressionType compression;
  122. // EXPERIMENTAL: If true, append to existing MANIFEST and log files
  123. // when a database is opened. This can significantly speed up open.
  124. //
  125. // Default: currently false, but may become true later.
  126. bool reuse_logs;
  127. // If non-NULL, use the specified filter policy to reduce disk reads.
  128. // Many applications will benefit from passing the result of
  129. // NewBloomFilterPolicy() here.
  130. //
  131. // Default: NULL
  132. const FilterPolicy* filter_policy;
  133. // Create an Options object with default values for all fields.
  134. Options();
  135. };
  136. // Options that control read operations
  137. struct LEVELDB_EXPORT ReadOptions {
  138. // If true, all data read from underlying storage will be
  139. // verified against corresponding checksums.
  140. // Default: false
  141. bool verify_checksums;
  142. // Should the data read for this iteration be cached in memory?
  143. // Callers may wish to set this field to false for bulk scans.
  144. // Default: true
  145. bool fill_cache;
  146. // If "snapshot" is non-NULL, read as of the supplied snapshot
  147. // (which must belong to the DB that is being read and which must
  148. // not have been released). If "snapshot" is NULL, use an implicit
  149. // snapshot of the state at the beginning of this read operation.
  150. // Default: NULL
  151. const Snapshot* snapshot;
  152. ReadOptions()
  153. : verify_checksums(false),
  154. fill_cache(true),
  155. snapshot(NULL) {
  156. }
  157. };
  158. // Options that control write operations
  159. struct LEVELDB_EXPORT WriteOptions {
  160. // If true, the write will be flushed from the operating system
  161. // buffer cache (by calling WritableFile::Sync()) before the write
  162. // is considered complete. If this flag is true, writes will be
  163. // slower.
  164. //
  165. // If this flag is false, and the machine crashes, some recent
  166. // writes may be lost. Note that if it is just the process that
  167. // crashes (i.e., the machine does not reboot), no writes will be
  168. // lost even if sync==false.
  169. //
  170. // In other words, a DB write with sync==false has similar
  171. // crash semantics as the "write()" system call. A DB write
  172. // with sync==true has similar crash semantics to a "write()"
  173. // system call followed by "fsync()".
  174. //
  175. // Default: false
  176. bool sync;
  177. WriteOptions()
  178. : sync(false) {
  179. }
  180. };
  181. } // namespace leveldb
  182. #endif // STORAGE_LEVELDB_INCLUDE_OPTIONS_H_