Ви не можете вибрати більше 25 тем Теми мають розпочинатися з літери або цифри, можуть містити дефіси (-) і не повинні перевищувати 35 символів.

2300 рядки
66 KiB

Release 1.18 Changes are: * Update version number to 1.18 * Replace the basic fprintf call with a call to fwrite in order to work around the apparent compiler optimization/rewrite failure that we are seeing with the new toolchain/iOS SDKs provided with Xcode6 and iOS8. * Fix ALL the header guards. * Createed a README.md with the LevelDB project description. * A new CONTRIBUTING file. * Don't implicitly convert uint64_t to size_t or int. Either preserve it as uint64_t, or explicitly cast. This fixes MSVC warnings about possible value truncation when compiling this code in Chromium. * Added a DumpFile() library function that encapsulates the guts of the "leveldbutil dump" command. This will allow clients to dump data to their log files instead of stdout. It will also allow clients to supply their own environment. * leveldb: Remove unused function 'ConsumeChar'. * leveldbutil: Remove unused member variables from WriteBatchItemPrinter. * OpenBSD, NetBSD and DragonflyBSD have _LITTLE_ENDIAN, so define PLATFORM_IS_LITTLE_ENDIAN like on FreeBSD. This fixes: * issue #143 * issue #198 * issue #249 * Switch from <cstdatomic> to <atomic>. The former never made it into the standard and doesn't exist in modern gcc versions at all. The later contains everything that leveldb was using from the former. This problem was noticed when porting to Portable Native Client where no memory barrier is defined. The fact that <cstdatomic> is missing normally goes unnoticed since memory barriers are defined for most architectures. * Make Hash() treat its input as unsigned. Before this change LevelDB files from platforms with different signedness of char were not compatible. This change fixes: issue #243 * Verify checksums of index/meta/filter blocks when paranoid_checks set. * Invoke all tools for iOS with xcrun. (This was causing problems with the new XCode 5.1.1 image on pulse.) * include <sys/stat.h> only once, and fix the following linter warning: "Found C system header after C++ system header" * When encountering a corrupted table file, return Status::Corruption instead of Status::InvalidArgument. * Support cygwin as build platform, patch is from https://code.google.com/p/leveldb/issues/detail?id=188 * Fix typo, merge patch from https://code.google.com/p/leveldb/issues/detail?id=159 * Fix typos and comments, and address the following two issues: * issue #166 * issue #241 * Add missing db synchronize after "fillseq" in the benchmark. * Removed unused variable in SeekRandom: value (issue #201)
10 роки тому
  1. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  4. #include "leveldb/db.h"
  5. #include "leveldb/filter_policy.h"
  6. #include "db/db_impl.h"
  7. #include "db/filename.h"
  8. #include "db/version_set.h"
  9. #include "db/write_batch_internal.h"
  10. #include "leveldb/cache.h"
  11. #include "leveldb/env.h"
  12. #include "leveldb/table.h"
  13. #include "port/port.h"
  14. #include "port/thread_annotations.h"
  15. #include "util/hash.h"
  16. #include "util/logging.h"
  17. #include "util/mutexlock.h"
  18. #include "util/testharness.h"
  19. #include "util/testutil.h"
  20. namespace leveldb {
  21. static std::string RandomString(Random* rnd, int len) {
  22. std::string r;
  23. test::RandomString(rnd, len, &r);
  24. return r;
  25. }
  26. static std::string RandomKey(Random* rnd) {
  27. int len = (rnd->OneIn(3)
  28. ? 1 // Short sometimes to encourage collisions
  29. : (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
  30. return test::RandomKey(rnd, len);
  31. }
  32. namespace {
  33. class AtomicCounter {
  34. private:
  35. port::Mutex mu_;
  36. int count_ GUARDED_BY(mu_);
  37. public:
  38. AtomicCounter() : count_(0) { }
  39. void Increment() {
  40. IncrementBy(1);
  41. }
  42. void IncrementBy(int count) LOCKS_EXCLUDED(mu_) {
  43. MutexLock l(&mu_);
  44. count_ += count;
  45. }
  46. int Read() LOCKS_EXCLUDED(mu_) {
  47. MutexLock l(&mu_);
  48. return count_;
  49. }
  50. void Reset() LOCKS_EXCLUDED(mu_) {
  51. MutexLock l(&mu_);
  52. count_ = 0;
  53. }
  54. };
  55. void DelayMilliseconds(int millis) {
  56. Env::Default()->SleepForMicroseconds(millis * 1000);
  57. }
  58. }
  59. // Test Env to override default Env behavior for testing.
  60. class TestEnv : public EnvWrapper {
  61. public:
  62. explicit TestEnv(Env* base) : EnvWrapper(base), ignore_dot_files_(false) {}
  63. void SetIgnoreDotFiles(bool ignored) { ignore_dot_files_ = ignored; }
  64. Status GetChildren(const std::string& dir,
  65. std::vector<std::string>* result) override {
  66. Status s = target()->GetChildren(dir, result);
  67. if (!s.ok() || !ignore_dot_files_) {
  68. return s;
  69. }
  70. std::vector<std::string>::iterator it = result->begin();
  71. while (it != result->end()) {
  72. if ((*it == ".") || (*it == "..")) {
  73. it = result->erase(it);
  74. } else {
  75. ++it;
  76. }
  77. }
  78. return s;
  79. }
  80. private:
  81. bool ignore_dot_files_;
  82. };
  83. // Special Env used to delay background operations
  84. class SpecialEnv : public EnvWrapper {
  85. public:
  86. // sstable/log Sync() calls are blocked while this pointer is non-null.
  87. port::AtomicPointer delay_data_sync_;
  88. // sstable/log Sync() calls return an error.
  89. port::AtomicPointer data_sync_error_;
  90. // Simulate no-space errors while this pointer is non-null.
  91. port::AtomicPointer no_space_;
  92. // Simulate non-writable file system while this pointer is non-null.
  93. port::AtomicPointer non_writable_;
  94. // Force sync of manifest files to fail while this pointer is non-null.
  95. port::AtomicPointer manifest_sync_error_;
  96. // Force write to manifest files to fail while this pointer is non-null.
  97. port::AtomicPointer manifest_write_error_;
  98. bool count_random_reads_;
  99. AtomicCounter random_read_counter_;
  100. explicit SpecialEnv(Env* base) : EnvWrapper(base) {
  101. delay_data_sync_.Release_Store(nullptr);
  102. data_sync_error_.Release_Store(nullptr);
  103. no_space_.Release_Store(nullptr);
  104. non_writable_.Release_Store(nullptr);
  105. count_random_reads_ = false;
  106. manifest_sync_error_.Release_Store(nullptr);
  107. manifest_write_error_.Release_Store(nullptr);
  108. }
  109. Status NewWritableFile(const std::string& f, WritableFile** r) {
  110. class DataFile : public WritableFile {
  111. private:
  112. SpecialEnv* env_;
  113. WritableFile* base_;
  114. public:
  115. DataFile(SpecialEnv* env, WritableFile* base)
  116. : env_(env),
  117. base_(base) {
  118. }
  119. ~DataFile() { delete base_; }
  120. Status Append(const Slice& data) {
  121. if (env_->no_space_.Acquire_Load() != nullptr) {
  122. // Drop writes on the floor
  123. return Status::OK();
  124. } else {
  125. return base_->Append(data);
  126. }
  127. }
  128. Status Close() { return base_->Close(); }
  129. Status Flush() { return base_->Flush(); }
  130. Status Sync() {
  131. if (env_->data_sync_error_.Acquire_Load() != nullptr) {
  132. return Status::IOError("simulated data sync error");
  133. }
  134. while (env_->delay_data_sync_.Acquire_Load() != nullptr) {
  135. DelayMilliseconds(100);
  136. }
  137. return base_->Sync();
  138. }
  139. };
  140. class ManifestFile : public WritableFile {
  141. private:
  142. SpecialEnv* env_;
  143. WritableFile* base_;
  144. public:
  145. ManifestFile(SpecialEnv* env, WritableFile* b) : env_(env), base_(b) { }
  146. ~ManifestFile() { delete base_; }
  147. Status Append(const Slice& data) {
  148. if (env_->manifest_write_error_.Acquire_Load() != nullptr) {
  149. return Status::IOError("simulated writer error");
  150. } else {
  151. return base_->Append(data);
  152. }
  153. }
  154. Status Close() { return base_->Close(); }
  155. Status Flush() { return base_->Flush(); }
  156. Status Sync() {
  157. if (env_->manifest_sync_error_.Acquire_Load() != nullptr) {
  158. return Status::IOError("simulated sync error");
  159. } else {
  160. return base_->Sync();
  161. }
  162. }
  163. };
  164. if (non_writable_.Acquire_Load() != nullptr) {
  165. return Status::IOError("simulated write error");
  166. }
  167. Status s = target()->NewWritableFile(f, r);
  168. if (s.ok()) {
  169. if (strstr(f.c_str(), ".ldb") != nullptr ||
  170. strstr(f.c_str(), ".log") != nullptr) {
  171. *r = new DataFile(this, *r);
  172. } else if (strstr(f.c_str(), "MANIFEST") != nullptr) {
  173. *r = new ManifestFile(this, *r);
  174. }
  175. }
  176. return s;
  177. }
  178. Status NewRandomAccessFile(const std::string& f, RandomAccessFile** r) {
  179. class CountingFile : public RandomAccessFile {
  180. private:
  181. RandomAccessFile* target_;
  182. AtomicCounter* counter_;
  183. public:
  184. CountingFile(RandomAccessFile* target, AtomicCounter* counter)
  185. : target_(target), counter_(counter) {
  186. }
  187. virtual ~CountingFile() { delete target_; }
  188. virtual Status Read(uint64_t offset, size_t n, Slice* result,
  189. char* scratch) const {
  190. counter_->Increment();
  191. return target_->Read(offset, n, result, scratch);
  192. }
  193. };
  194. Status s = target()->NewRandomAccessFile(f, r);
  195. if (s.ok() && count_random_reads_) {
  196. *r = new CountingFile(*r, &random_read_counter_);
  197. }
  198. return s;
  199. }
  200. };
  201. class DBTest {
  202. private:
  203. const FilterPolicy* filter_policy_;
  204. // Sequence of option configurations to try
  205. enum OptionConfig {
  206. kDefault,
  207. kReuse,
  208. kFilter,
  209. kUncompressed,
  210. kEnd
  211. };
  212. int option_config_;
  213. public:
  214. std::string dbname_;
  215. SpecialEnv* env_;
  216. DB* db_;
  217. Options last_options_;
  218. DBTest() : option_config_(kDefault),
  219. env_(new SpecialEnv(Env::Default())) {
  220. filter_policy_ = NewBloomFilterPolicy(10);
  221. dbname_ = test::TmpDir() + "/db_test";
  222. DestroyDB(dbname_, Options());
  223. db_ = nullptr;
  224. Reopen();
  225. }
  226. ~DBTest() {
  227. delete db_;
  228. DestroyDB(dbname_, Options());
  229. delete env_;
  230. delete filter_policy_;
  231. }
  232. // Switch to a fresh database with the next option configuration to
  233. // test. Return false if there are no more configurations to test.
  234. bool ChangeOptions() {
  235. option_config_++;
  236. if (option_config_ >= kEnd) {
  237. return false;
  238. } else {
  239. DestroyAndReopen();
  240. return true;
  241. }
  242. }
  243. // Return the current option configuration.
  244. Options CurrentOptions() {
  245. Options options;
  246. options.reuse_logs = false;
  247. switch (option_config_) {
  248. case kReuse:
  249. options.reuse_logs = true;
  250. break;
  251. case kFilter:
  252. options.filter_policy = filter_policy_;
  253. break;
  254. case kUncompressed:
  255. options.compression = kNoCompression;
  256. break;
  257. default:
  258. break;
  259. }
  260. return options;
  261. }
  262. DBImpl* dbfull() {
  263. return reinterpret_cast<DBImpl*>(db_);
  264. }
  265. void Reopen(Options* options = nullptr) {
  266. ASSERT_OK(TryReopen(options));
  267. }
  268. void Close() {
  269. delete db_;
  270. db_ = nullptr;
  271. }
  272. void DestroyAndReopen(Options* options = nullptr) {
  273. delete db_;
  274. db_ = nullptr;
  275. DestroyDB(dbname_, Options());
  276. ASSERT_OK(TryReopen(options));
  277. }
  278. Status TryReopen(Options* options) {
  279. delete db_;
  280. db_ = nullptr;
  281. Options opts;
  282. if (options != nullptr) {
  283. opts = *options;
  284. } else {
  285. opts = CurrentOptions();
  286. opts.create_if_missing = true;
  287. }
  288. last_options_ = opts;
  289. return DB::Open(opts, dbname_, &db_);
  290. }
  291. Status Put(const std::string& k, const std::string& v) {
  292. return db_->Put(WriteOptions(), k, v);
  293. }
  294. Status Delete(const std::string& k) {
  295. return db_->Delete(WriteOptions(), k);
  296. }
  297. std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) {
  298. ReadOptions options;
  299. options.snapshot = snapshot;
  300. std::string result;
  301. Status s = db_->Get(options, k, &result);
  302. if (s.IsNotFound()) {
  303. result = "NOT_FOUND";
  304. } else if (!s.ok()) {
  305. result = s.ToString();
  306. }
  307. return result;
  308. }
  309. // Return a string that contains all key,value pairs in order,
  310. // formatted like "(k1->v1)(k2->v2)".
  311. std::string Contents() {
  312. std::vector<std::string> forward;
  313. std::string result;
  314. Iterator* iter = db_->NewIterator(ReadOptions());
  315. for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
  316. std::string s = IterStatus(iter);
  317. result.push_back('(');
  318. result.append(s);
  319. result.push_back(')');
  320. forward.push_back(s);
  321. }
  322. // Check reverse iteration results are the reverse of forward results
  323. size_t matched = 0;
  324. for (iter->SeekToLast(); iter->Valid(); iter->Prev()) {
  325. ASSERT_LT(matched, forward.size());
  326. ASSERT_EQ(IterStatus(iter), forward[forward.size() - matched - 1]);
  327. matched++;
  328. }
  329. ASSERT_EQ(matched, forward.size());
  330. delete iter;
  331. return result;
  332. }
  333. std::string AllEntriesFor(const Slice& user_key) {
  334. Iterator* iter = dbfull()->TEST_NewInternalIterator();
  335. InternalKey target(user_key, kMaxSequenceNumber, kTypeValue);
  336. iter->Seek(target.Encode());
  337. std::string result;
  338. if (!iter->status().ok()) {
  339. result = iter->status().ToString();
  340. } else {
  341. result = "[ ";
  342. bool first = true;
  343. while (iter->Valid()) {
  344. ParsedInternalKey ikey;
  345. if (!ParseInternalKey(iter->key(), &ikey)) {
  346. result += "CORRUPTED";
  347. } else {
  348. if (last_options_.comparator->Compare(ikey.user_key, user_key) != 0) {
  349. break;
  350. }
  351. if (!first) {
  352. result += ", ";
  353. }
  354. first = false;
  355. switch (ikey.type) {
  356. case kTypeValue:
  357. result += iter->value().ToString();
  358. break;
  359. case kTypeDeletion:
  360. result += "DEL";
  361. break;
  362. }
  363. }
  364. iter->Next();
  365. }
  366. if (!first) {
  367. result += " ";
  368. }
  369. result += "]";
  370. }
  371. delete iter;
  372. return result;
  373. }
  374. int NumTableFilesAtLevel(int level) {
  375. std::string property;
  376. ASSERT_TRUE(
  377. db_->GetProperty("leveldb.num-files-at-level" + NumberToString(level),
  378. &property));
  379. return atoi(property.c_str());
  380. }
  381. int TotalTableFiles() {
  382. int result = 0;
  383. for (int level = 0; level < config::kNumLevels; level++) {
  384. result += NumTableFilesAtLevel(level);
  385. }
  386. return result;
  387. }
  388. // Return spread of files per level
  389. std::string FilesPerLevel() {
  390. std::string result;
  391. int last_non_zero_offset = 0;
  392. for (int level = 0; level < config::kNumLevels; level++) {
  393. int f = NumTableFilesAtLevel(level);
  394. char buf[100];
  395. snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f);
  396. result += buf;
  397. if (f > 0) {
  398. last_non_zero_offset = result.size();
  399. }
  400. }
  401. result.resize(last_non_zero_offset);
  402. return result;
  403. }
  404. int CountFiles() {
  405. std::vector<std::string> files;
  406. env_->GetChildren(dbname_, &files);
  407. return static_cast<int>(files.size());
  408. }
  409. uint64_t Size(const Slice& start, const Slice& limit) {
  410. Range r(start, limit);
  411. uint64_t size;
  412. db_->GetApproximateSizes(&r, 1, &size);
  413. return size;
  414. }
  415. void Compact(const Slice& start, const Slice& limit) {
  416. db_->CompactRange(&start, &limit);
  417. }
  418. // Do n memtable compactions, each of which produces an sstable
  419. // covering the range [small,large].
  420. void MakeTables(int n, const std::string& small, const std::string& large) {
  421. for (int i = 0; i < n; i++) {
  422. Put(small, "begin");
  423. Put(large, "end");
  424. dbfull()->TEST_CompactMemTable();
  425. }
  426. }
  427. // Prevent pushing of new sstables into deeper levels by adding
  428. // tables that cover a specified range to all levels.
  429. void FillLevels(const std::string& smallest, const std::string& largest) {
  430. MakeTables(config::kNumLevels, smallest, largest);
  431. }
  432. void DumpFileCounts(const char* label) {
  433. fprintf(stderr, "---\n%s:\n", label);
  434. fprintf(stderr, "maxoverlap: %lld\n",
  435. static_cast<long long>(
  436. dbfull()->TEST_MaxNextLevelOverlappingBytes()));
  437. for (int level = 0; level < config::kNumLevels; level++) {
  438. int num = NumTableFilesAtLevel(level);
  439. if (num > 0) {
  440. fprintf(stderr, " level %3d : %d files\n", level, num);
  441. }
  442. }
  443. }
  444. std::string DumpSSTableList() {
  445. std::string property;
  446. db_->GetProperty("leveldb.sstables", &property);
  447. return property;
  448. }
  449. std::string IterStatus(Iterator* iter) {
  450. std::string result;
  451. if (iter->Valid()) {
  452. result = iter->key().ToString() + "->" + iter->value().ToString();
  453. } else {
  454. result = "(invalid)";
  455. }
  456. return result;
  457. }
  458. bool DeleteAnSSTFile() {
  459. std::vector<std::string> filenames;
  460. ASSERT_OK(env_->GetChildren(dbname_, &filenames));
  461. uint64_t number;
  462. FileType type;
  463. for (size_t i = 0; i < filenames.size(); i++) {
  464. if (ParseFileName(filenames[i], &number, &type) && type == kTableFile) {
  465. ASSERT_OK(env_->DeleteFile(TableFileName(dbname_, number)));
  466. return true;
  467. }
  468. }
  469. return false;
  470. }
  471. // Returns number of files renamed.
  472. int RenameLDBToSST() {
  473. std::vector<std::string> filenames;
  474. ASSERT_OK(env_->GetChildren(dbname_, &filenames));
  475. uint64_t number;
  476. FileType type;
  477. int files_renamed = 0;
  478. for (size_t i = 0; i < filenames.size(); i++) {
  479. if (ParseFileName(filenames[i], &number, &type) && type == kTableFile) {
  480. const std::string from = TableFileName(dbname_, number);
  481. const std::string to = SSTTableFileName(dbname_, number);
  482. ASSERT_OK(env_->RenameFile(from, to));
  483. files_renamed++;
  484. }
  485. }
  486. return files_renamed;
  487. }
  488. };
  489. TEST(DBTest, Empty) {
  490. do {
  491. ASSERT_TRUE(db_ != nullptr);
  492. ASSERT_EQ("NOT_FOUND", Get("foo"));
  493. } while (ChangeOptions());
  494. }
  495. TEST(DBTest, ReadWrite) {
  496. do {
  497. ASSERT_OK(Put("foo", "v1"));
  498. ASSERT_EQ("v1", Get("foo"));
  499. ASSERT_OK(Put("bar", "v2"));
  500. ASSERT_OK(Put("foo", "v3"));
  501. ASSERT_EQ("v3", Get("foo"));
  502. ASSERT_EQ("v2", Get("bar"));
  503. } while (ChangeOptions());
  504. }
  505. TEST(DBTest, PutDeleteGet) {
  506. do {
  507. ASSERT_OK(db_->Put(WriteOptions(), "foo", "v1"));
  508. ASSERT_EQ("v1", Get("foo"));
  509. ASSERT_OK(db_->Put(WriteOptions(), "foo", "v2"));
  510. ASSERT_EQ("v2", Get("foo"));
  511. ASSERT_OK(db_->Delete(WriteOptions(), "foo"));
  512. ASSERT_EQ("NOT_FOUND", Get("foo"));
  513. } while (ChangeOptions());
  514. }
  515. TEST(DBTest, GetFromImmutableLayer) {
  516. do {
  517. Options options = CurrentOptions();
  518. options.env = env_;
  519. options.write_buffer_size = 100000; // Small write buffer
  520. Reopen(&options);
  521. ASSERT_OK(Put("foo", "v1"));
  522. ASSERT_EQ("v1", Get("foo"));
  523. env_->delay_data_sync_.Release_Store(env_); // Block sync calls
  524. Put("k1", std::string(100000, 'x')); // Fill memtable
  525. Put("k2", std::string(100000, 'y')); // Trigger compaction
  526. ASSERT_EQ("v1", Get("foo"));
  527. env_->delay_data_sync_.Release_Store(nullptr); // Release sync calls
  528. } while (ChangeOptions());
  529. }
  530. TEST(DBTest, GetFromVersions) {
  531. do {
  532. ASSERT_OK(Put("foo", "v1"));
  533. dbfull()->TEST_CompactMemTable();
  534. ASSERT_EQ("v1", Get("foo"));
  535. } while (ChangeOptions());
  536. }
  537. TEST(DBTest, GetMemUsage) {
  538. do {
  539. ASSERT_OK(Put("foo", "v1"));
  540. std::string val;
  541. ASSERT_TRUE(db_->GetProperty("leveldb.approximate-memory-usage", &val));
  542. int mem_usage = atoi(val.c_str());
  543. ASSERT_GT(mem_usage, 0);
  544. ASSERT_LT(mem_usage, 5*1024*1024);
  545. } while (ChangeOptions());
  546. }
  547. TEST(DBTest, GetSnapshot) {
  548. do {
  549. // Try with both a short key and a long key
  550. for (int i = 0; i < 2; i++) {
  551. std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x');
  552. ASSERT_OK(Put(key, "v1"));
  553. const Snapshot* s1 = db_->GetSnapshot();
  554. ASSERT_OK(Put(key, "v2"));
  555. ASSERT_EQ("v2", Get(key));
  556. ASSERT_EQ("v1", Get(key, s1));
  557. dbfull()->TEST_CompactMemTable();
  558. ASSERT_EQ("v2", Get(key));
  559. ASSERT_EQ("v1", Get(key, s1));
  560. db_->ReleaseSnapshot(s1);
  561. }
  562. } while (ChangeOptions());
  563. }
  564. TEST(DBTest, GetIdenticalSnapshots) {
  565. do {
  566. // Try with both a short key and a long key
  567. for (int i = 0; i < 2; i++) {
  568. std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x');
  569. ASSERT_OK(Put(key, "v1"));
  570. const Snapshot* s1 = db_->GetSnapshot();
  571. const Snapshot* s2 = db_->GetSnapshot();
  572. const Snapshot* s3 = db_->GetSnapshot();
  573. ASSERT_OK(Put(key, "v2"));
  574. ASSERT_EQ("v2", Get(key));
  575. ASSERT_EQ("v1", Get(key, s1));
  576. ASSERT_EQ("v1", Get(key, s2));
  577. ASSERT_EQ("v1", Get(key, s3));
  578. db_->ReleaseSnapshot(s1);
  579. dbfull()->TEST_CompactMemTable();
  580. ASSERT_EQ("v2", Get(key));
  581. ASSERT_EQ("v1", Get(key, s2));
  582. db_->ReleaseSnapshot(s2);
  583. ASSERT_EQ("v1", Get(key, s3));
  584. db_->ReleaseSnapshot(s3);
  585. }
  586. } while (ChangeOptions());
  587. }
  588. TEST(DBTest, IterateOverEmptySnapshot) {
  589. do {
  590. const Snapshot* snapshot = db_->GetSnapshot();
  591. ReadOptions read_options;
  592. read_options.snapshot = snapshot;
  593. ASSERT_OK(Put("foo", "v1"));
  594. ASSERT_OK(Put("foo", "v2"));
  595. Iterator* iterator1 = db_->NewIterator(read_options);
  596. iterator1->SeekToFirst();
  597. ASSERT_TRUE(!iterator1->Valid());
  598. delete iterator1;
  599. dbfull()->TEST_CompactMemTable();
  600. Iterator* iterator2 = db_->NewIterator(read_options);
  601. iterator2->SeekToFirst();
  602. ASSERT_TRUE(!iterator2->Valid());
  603. delete iterator2;
  604. db_->ReleaseSnapshot(snapshot);
  605. } while (ChangeOptions());
  606. }
  607. TEST(DBTest, GetLevel0Ordering) {
  608. do {
  609. // Check that we process level-0 files in correct order. The code
  610. // below generates two level-0 files where the earlier one comes
  611. // before the later one in the level-0 file list since the earlier
  612. // one has a smaller "smallest" key.
  613. ASSERT_OK(Put("bar", "b"));
  614. ASSERT_OK(Put("foo", "v1"));
  615. dbfull()->TEST_CompactMemTable();
  616. ASSERT_OK(Put("foo", "v2"));
  617. dbfull()->TEST_CompactMemTable();
  618. ASSERT_EQ("v2", Get("foo"));
  619. } while (ChangeOptions());
  620. }
  621. TEST(DBTest, GetOrderedByLevels) {
  622. do {
  623. ASSERT_OK(Put("foo", "v1"));
  624. Compact("a", "z");
  625. ASSERT_EQ("v1", Get("foo"));
  626. ASSERT_OK(Put("foo", "v2"));
  627. ASSERT_EQ("v2", Get("foo"));
  628. dbfull()->TEST_CompactMemTable();
  629. ASSERT_EQ("v2", Get("foo"));
  630. } while (ChangeOptions());
  631. }
  632. TEST(DBTest, GetPicksCorrectFile) {
  633. do {
  634. // Arrange to have multiple files in a non-level-0 level.
  635. ASSERT_OK(Put("a", "va"));
  636. Compact("a", "b");
  637. ASSERT_OK(Put("x", "vx"));
  638. Compact("x", "y");
  639. ASSERT_OK(Put("f", "vf"));
  640. Compact("f", "g");
  641. ASSERT_EQ("va", Get("a"));
  642. ASSERT_EQ("vf", Get("f"));
  643. ASSERT_EQ("vx", Get("x"));
  644. } while (ChangeOptions());
  645. }
  646. TEST(DBTest, GetEncountersEmptyLevel) {
  647. do {
  648. // Arrange for the following to happen:
  649. // * sstable A in level 0
  650. // * nothing in level 1
  651. // * sstable B in level 2
  652. // Then do enough Get() calls to arrange for an automatic compaction
  653. // of sstable A. A bug would cause the compaction to be marked as
  654. // occurring at level 1 (instead of the correct level 0).
  655. // Step 1: First place sstables in levels 0 and 2
  656. int compaction_count = 0;
  657. while (NumTableFilesAtLevel(0) == 0 ||
  658. NumTableFilesAtLevel(2) == 0) {
  659. ASSERT_LE(compaction_count, 100) << "could not fill levels 0 and 2";
  660. compaction_count++;
  661. Put("a", "begin");
  662. Put("z", "end");
  663. dbfull()->TEST_CompactMemTable();
  664. }
  665. // Step 2: clear level 1 if necessary.
  666. dbfull()->TEST_CompactRange(1, nullptr, nullptr);
  667. ASSERT_EQ(NumTableFilesAtLevel(0), 1);
  668. ASSERT_EQ(NumTableFilesAtLevel(1), 0);
  669. ASSERT_EQ(NumTableFilesAtLevel(2), 1);
  670. // Step 3: read a bunch of times
  671. for (int i = 0; i < 1000; i++) {
  672. ASSERT_EQ("NOT_FOUND", Get("missing"));
  673. }
  674. // Step 4: Wait for compaction to finish
  675. DelayMilliseconds(1000);
  676. ASSERT_EQ(NumTableFilesAtLevel(0), 0);
  677. } while (ChangeOptions());
  678. }
  679. TEST(DBTest, IterEmpty) {
  680. Iterator* iter = db_->NewIterator(ReadOptions());
  681. iter->SeekToFirst();
  682. ASSERT_EQ(IterStatus(iter), "(invalid)");
  683. iter->SeekToLast();
  684. ASSERT_EQ(IterStatus(iter), "(invalid)");
  685. iter->Seek("foo");
  686. ASSERT_EQ(IterStatus(iter), "(invalid)");
  687. delete iter;
  688. }
  689. TEST(DBTest, IterSingle) {
  690. ASSERT_OK(Put("a", "va"));
  691. Iterator* iter = db_->NewIterator(ReadOptions());
  692. iter->SeekToFirst();
  693. ASSERT_EQ(IterStatus(iter), "a->va");
  694. iter->Next();
  695. ASSERT_EQ(IterStatus(iter), "(invalid)");
  696. iter->SeekToFirst();
  697. ASSERT_EQ(IterStatus(iter), "a->va");
  698. iter->Prev();
  699. ASSERT_EQ(IterStatus(iter), "(invalid)");
  700. iter->SeekToLast();
  701. ASSERT_EQ(IterStatus(iter), "a->va");
  702. iter->Next();
  703. ASSERT_EQ(IterStatus(iter), "(invalid)");
  704. iter->SeekToLast();
  705. ASSERT_EQ(IterStatus(iter), "a->va");
  706. iter->Prev();
  707. ASSERT_EQ(IterStatus(iter), "(invalid)");
  708. iter->Seek("");
  709. ASSERT_EQ(IterStatus(iter), "a->va");
  710. iter->Next();
  711. ASSERT_EQ(IterStatus(iter), "(invalid)");
  712. iter->Seek("a");
  713. ASSERT_EQ(IterStatus(iter), "a->va");
  714. iter->Next();
  715. ASSERT_EQ(IterStatus(iter), "(invalid)");
  716. iter->Seek("b");
  717. ASSERT_EQ(IterStatus(iter), "(invalid)");
  718. delete iter;
  719. }
  720. TEST(DBTest, IterMulti) {
  721. ASSERT_OK(Put("a", "va"));
  722. ASSERT_OK(Put("b", "vb"));
  723. ASSERT_OK(Put("c", "vc"));
  724. Iterator* iter = db_->NewIterator(ReadOptions());
  725. iter->SeekToFirst();
  726. ASSERT_EQ(IterStatus(iter), "a->va");
  727. iter->Next();
  728. ASSERT_EQ(IterStatus(iter), "b->vb");
  729. iter->Next();
  730. ASSERT_EQ(IterStatus(iter), "c->vc");
  731. iter->Next();
  732. ASSERT_EQ(IterStatus(iter), "(invalid)");
  733. iter->SeekToFirst();
  734. ASSERT_EQ(IterStatus(iter), "a->va");
  735. iter->Prev();
  736. ASSERT_EQ(IterStatus(iter), "(invalid)");
  737. iter->SeekToLast();
  738. ASSERT_EQ(IterStatus(iter), "c->vc");
  739. iter->Prev();
  740. ASSERT_EQ(IterStatus(iter), "b->vb");
  741. iter->Prev();
  742. ASSERT_EQ(IterStatus(iter), "a->va");
  743. iter->Prev();
  744. ASSERT_EQ(IterStatus(iter), "(invalid)");
  745. iter->SeekToLast();
  746. ASSERT_EQ(IterStatus(iter), "c->vc");
  747. iter->Next();
  748. ASSERT_EQ(IterStatus(iter), "(invalid)");
  749. iter->Seek("");
  750. ASSERT_EQ(IterStatus(iter), "a->va");
  751. iter->Seek("a");
  752. ASSERT_EQ(IterStatus(iter), "a->va");
  753. iter->Seek("ax");
  754. ASSERT_EQ(IterStatus(iter), "b->vb");
  755. iter->Seek("b");
  756. ASSERT_EQ(IterStatus(iter), "b->vb");
  757. iter->Seek("z");
  758. ASSERT_EQ(IterStatus(iter), "(invalid)");
  759. // Switch from reverse to forward
  760. iter->SeekToLast();
  761. iter->Prev();
  762. iter->Prev();
  763. iter->Next();
  764. ASSERT_EQ(IterStatus(iter), "b->vb");
  765. // Switch from forward to reverse
  766. iter->SeekToFirst();
  767. iter->Next();
  768. iter->Next();
  769. iter->Prev();
  770. ASSERT_EQ(IterStatus(iter), "b->vb");
  771. // Make sure iter stays at snapshot
  772. ASSERT_OK(Put("a", "va2"));
  773. ASSERT_OK(Put("a2", "va3"));
  774. ASSERT_OK(Put("b", "vb2"));
  775. ASSERT_OK(Put("c", "vc2"));
  776. ASSERT_OK(Delete("b"));
  777. iter->SeekToFirst();
  778. ASSERT_EQ(IterStatus(iter), "a->va");
  779. iter->Next();
  780. ASSERT_EQ(IterStatus(iter), "b->vb");
  781. iter->Next();
  782. ASSERT_EQ(IterStatus(iter), "c->vc");
  783. iter->Next();
  784. ASSERT_EQ(IterStatus(iter), "(invalid)");
  785. iter->SeekToLast();
  786. ASSERT_EQ(IterStatus(iter), "c->vc");
  787. iter->Prev();
  788. ASSERT_EQ(IterStatus(iter), "b->vb");
  789. iter->Prev();
  790. ASSERT_EQ(IterStatus(iter), "a->va");
  791. iter->Prev();
  792. ASSERT_EQ(IterStatus(iter), "(invalid)");
  793. delete iter;
  794. }
  795. TEST(DBTest, IterSmallAndLargeMix) {
  796. ASSERT_OK(Put("a", "va"));
  797. ASSERT_OK(Put("b", std::string(100000, 'b')));
  798. ASSERT_OK(Put("c", "vc"));
  799. ASSERT_OK(Put("d", std::string(100000, 'd')));
  800. ASSERT_OK(Put("e", std::string(100000, 'e')));
  801. Iterator* iter = db_->NewIterator(ReadOptions());
  802. iter->SeekToFirst();
  803. ASSERT_EQ(IterStatus(iter), "a->va");
  804. iter->Next();
  805. ASSERT_EQ(IterStatus(iter), "b->" + std::string(100000, 'b'));
  806. iter->Next();
  807. ASSERT_EQ(IterStatus(iter), "c->vc");
  808. iter->Next();
  809. ASSERT_EQ(IterStatus(iter), "d->" + std::string(100000, 'd'));
  810. iter->Next();
  811. ASSERT_EQ(IterStatus(iter), "e->" + std::string(100000, 'e'));
  812. iter->Next();
  813. ASSERT_EQ(IterStatus(iter), "(invalid)");
  814. iter->SeekToLast();
  815. ASSERT_EQ(IterStatus(iter), "e->" + std::string(100000, 'e'));
  816. iter->Prev();
  817. ASSERT_EQ(IterStatus(iter), "d->" + std::string(100000, 'd'));
  818. iter->Prev();
  819. ASSERT_EQ(IterStatus(iter), "c->vc");
  820. iter->Prev();
  821. ASSERT_EQ(IterStatus(iter), "b->" + std::string(100000, 'b'));
  822. iter->Prev();
  823. ASSERT_EQ(IterStatus(iter), "a->va");
  824. iter->Prev();
  825. ASSERT_EQ(IterStatus(iter), "(invalid)");
  826. delete iter;
  827. }
  828. TEST(DBTest, IterMultiWithDelete) {
  829. do {
  830. ASSERT_OK(Put("a", "va"));
  831. ASSERT_OK(Put("b", "vb"));
  832. ASSERT_OK(Put("c", "vc"));
  833. ASSERT_OK(Delete("b"));
  834. ASSERT_EQ("NOT_FOUND", Get("b"));
  835. Iterator* iter = db_->NewIterator(ReadOptions());
  836. iter->Seek("c");
  837. ASSERT_EQ(IterStatus(iter), "c->vc");
  838. iter->Prev();
  839. ASSERT_EQ(IterStatus(iter), "a->va");
  840. delete iter;
  841. } while (ChangeOptions());
  842. }
  843. TEST(DBTest, Recover) {
  844. do {
  845. ASSERT_OK(Put("foo", "v1"));
  846. ASSERT_OK(Put("baz", "v5"));
  847. Reopen();
  848. ASSERT_EQ("v1", Get("foo"));
  849. ASSERT_EQ("v1", Get("foo"));
  850. ASSERT_EQ("v5", Get("baz"));
  851. ASSERT_OK(Put("bar", "v2"));
  852. ASSERT_OK(Put("foo", "v3"));
  853. Reopen();
  854. ASSERT_EQ("v3", Get("foo"));
  855. ASSERT_OK(Put("foo", "v4"));
  856. ASSERT_EQ("v4", Get("foo"));
  857. ASSERT_EQ("v2", Get("bar"));
  858. ASSERT_EQ("v5", Get("baz"));
  859. } while (ChangeOptions());
  860. }
  861. TEST(DBTest, RecoveryWithEmptyLog) {
  862. do {
  863. ASSERT_OK(Put("foo", "v1"));
  864. ASSERT_OK(Put("foo", "v2"));
  865. Reopen();
  866. Reopen();
  867. ASSERT_OK(Put("foo", "v3"));
  868. Reopen();
  869. ASSERT_EQ("v3", Get("foo"));
  870. } while (ChangeOptions());
  871. }
  872. // Check that writes done during a memtable compaction are recovered
  873. // if the database is shutdown during the memtable compaction.
  874. TEST(DBTest, RecoverDuringMemtableCompaction) {
  875. do {
  876. Options options = CurrentOptions();
  877. options.env = env_;
  878. options.write_buffer_size = 1000000;
  879. Reopen(&options);
  880. // Trigger a long memtable compaction and reopen the database during it
  881. ASSERT_OK(Put("foo", "v1")); // Goes to 1st log file
  882. ASSERT_OK(Put("big1", std::string(10000000, 'x'))); // Fills memtable
  883. ASSERT_OK(Put("big2", std::string(1000, 'y'))); // Triggers compaction
  884. ASSERT_OK(Put("bar", "v2")); // Goes to new log file
  885. Reopen(&options);
  886. ASSERT_EQ("v1", Get("foo"));
  887. ASSERT_EQ("v2", Get("bar"));
  888. ASSERT_EQ(std::string(10000000, 'x'), Get("big1"));
  889. ASSERT_EQ(std::string(1000, 'y'), Get("big2"));
  890. } while (ChangeOptions());
  891. }
  892. static std::string Key(int i) {
  893. char buf[100];
  894. snprintf(buf, sizeof(buf), "key%06d", i);
  895. return std::string(buf);
  896. }
  897. TEST(DBTest, MinorCompactionsHappen) {
  898. Options options = CurrentOptions();
  899. options.write_buffer_size = 10000;
  900. Reopen(&options);
  901. const int N = 500;
  902. int starting_num_tables = TotalTableFiles();
  903. for (int i = 0; i < N; i++) {
  904. ASSERT_OK(Put(Key(i), Key(i) + std::string(1000, 'v')));
  905. }
  906. int ending_num_tables = TotalTableFiles();
  907. ASSERT_GT(ending_num_tables, starting_num_tables);
  908. for (int i = 0; i < N; i++) {
  909. ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(Key(i)));
  910. }
  911. Reopen();
  912. for (int i = 0; i < N; i++) {
  913. ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(Key(i)));
  914. }
  915. }
  916. TEST(DBTest, RecoverWithLargeLog) {
  917. {
  918. Options options = CurrentOptions();
  919. Reopen(&options);
  920. ASSERT_OK(Put("big1", std::string(200000, '1')));
  921. ASSERT_OK(Put("big2", std::string(200000, '2')));
  922. ASSERT_OK(Put("small3", std::string(10, '3')));
  923. ASSERT_OK(Put("small4", std::string(10, '4')));
  924. ASSERT_EQ(NumTableFilesAtLevel(0), 0);
  925. }
  926. // Make sure that if we re-open with a small write buffer size that
  927. // we flush table files in the middle of a large log file.
  928. Options options = CurrentOptions();
  929. options.write_buffer_size = 100000;
  930. Reopen(&options);
  931. ASSERT_EQ(NumTableFilesAtLevel(0), 3);
  932. ASSERT_EQ(std::string(200000, '1'), Get("big1"));
  933. ASSERT_EQ(std::string(200000, '2'), Get("big2"));
  934. ASSERT_EQ(std::string(10, '3'), Get("small3"));
  935. ASSERT_EQ(std::string(10, '4'), Get("small4"));
  936. ASSERT_GT(NumTableFilesAtLevel(0), 1);
  937. }
  938. TEST(DBTest, CompactionsGenerateMultipleFiles) {
  939. Options options = CurrentOptions();
  940. options.write_buffer_size = 100000000; // Large write buffer
  941. Reopen(&options);
  942. Random rnd(301);
  943. // Write 8MB (80 values, each 100K)
  944. ASSERT_EQ(NumTableFilesAtLevel(0), 0);
  945. std::vector<std::string> values;
  946. for (int i = 0; i < 80; i++) {
  947. values.push_back(RandomString(&rnd, 100000));
  948. ASSERT_OK(Put(Key(i), values[i]));
  949. }
  950. // Reopening moves updates to level-0
  951. Reopen(&options);
  952. dbfull()->TEST_CompactRange(0, nullptr, nullptr);
  953. ASSERT_EQ(NumTableFilesAtLevel(0), 0);
  954. ASSERT_GT(NumTableFilesAtLevel(1), 1);
  955. for (int i = 0; i < 80; i++) {
  956. ASSERT_EQ(Get(Key(i)), values[i]);
  957. }
  958. }
  959. TEST(DBTest, RepeatedWritesToSameKey) {
  960. Options options = CurrentOptions();
  961. options.env = env_;
  962. options.write_buffer_size = 100000; // Small write buffer
  963. Reopen(&options);
  964. // We must have at most one file per level except for level-0,
  965. // which may have up to kL0_StopWritesTrigger files.
  966. const int kMaxFiles = config::kNumLevels + config::kL0_StopWritesTrigger;
  967. Random rnd(301);
  968. std::string value = RandomString(&rnd, 2 * options.write_buffer_size);
  969. for (int i = 0; i < 5 * kMaxFiles; i++) {
  970. Put("key", value);
  971. ASSERT_LE(TotalTableFiles(), kMaxFiles);
  972. fprintf(stderr, "after %d: %d files\n", int(i+1), TotalTableFiles());
  973. }
  974. }
  975. TEST(DBTest, SparseMerge) {
  976. Options options = CurrentOptions();
  977. options.compression = kNoCompression;
  978. Reopen(&options);
  979. FillLevels("A", "Z");
  980. // Suppose there is:
  981. // small amount of data with prefix A
  982. // large amount of data with prefix B
  983. // small amount of data with prefix C
  984. // and that recent updates have made small changes to all three prefixes.
  985. // Check that we do not do a compaction that merges all of B in one shot.
  986. const std::string value(1000, 'x');
  987. Put("A", "va");
  988. // Write approximately 100MB of "B" values
  989. for (int i = 0; i < 100000; i++) {
  990. char key[100];
  991. snprintf(key, sizeof(key), "B%010d", i);
  992. Put(key, value);
  993. }
  994. Put("C", "vc");
  995. dbfull()->TEST_CompactMemTable();
  996. dbfull()->TEST_CompactRange(0, nullptr, nullptr);
  997. // Make sparse update
  998. Put("A", "va2");
  999. Put("B100", "bvalue2");
  1000. Put("C", "vc2");
  1001. dbfull()->TEST_CompactMemTable();
  1002. // Compactions should not cause us to create a situation where
  1003. // a file overlaps too much data at the next level.
  1004. ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
  1005. dbfull()->TEST_CompactRange(0, nullptr, nullptr);
  1006. ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
  1007. dbfull()->TEST_CompactRange(1, nullptr, nullptr);
  1008. ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
  1009. }
  1010. static bool Between(uint64_t val, uint64_t low, uint64_t high) {
  1011. bool result = (val >= low) && (val <= high);
  1012. if (!result) {
  1013. fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
  1014. (unsigned long long)(val),
  1015. (unsigned long long)(low),
  1016. (unsigned long long)(high));
  1017. }
  1018. return result;
  1019. }
  1020. TEST(DBTest, ApproximateSizes) {
  1021. do {
  1022. Options options = CurrentOptions();
  1023. options.write_buffer_size = 100000000; // Large write buffer
  1024. options.compression = kNoCompression;
  1025. DestroyAndReopen();
  1026. ASSERT_TRUE(Between(Size("", "xyz"), 0, 0));
  1027. Reopen(&options);
  1028. ASSERT_TRUE(Between(Size("", "xyz"), 0, 0));
  1029. // Write 8MB (80 values, each 100K)
  1030. ASSERT_EQ(NumTableFilesAtLevel(0), 0);
  1031. const int N = 80;
  1032. static const int S1 = 100000;
  1033. static const int S2 = 105000; // Allow some expansion from metadata
  1034. Random rnd(301);
  1035. for (int i = 0; i < N; i++) {
  1036. ASSERT_OK(Put(Key(i), RandomString(&rnd, S1)));
  1037. }
  1038. // 0 because GetApproximateSizes() does not account for memtable space
  1039. ASSERT_TRUE(Between(Size("", Key(50)), 0, 0));
  1040. if (options.reuse_logs) {
  1041. // Recovery will reuse memtable, and GetApproximateSizes() does not
  1042. // account for memtable usage;
  1043. Reopen(&options);
  1044. ASSERT_TRUE(Between(Size("", Key(50)), 0, 0));
  1045. continue;
  1046. }
  1047. // Check sizes across recovery by reopening a few times
  1048. for (int run = 0; run < 3; run++) {
  1049. Reopen(&options);
  1050. for (int compact_start = 0; compact_start < N; compact_start += 10) {
  1051. for (int i = 0; i < N; i += 10) {
  1052. ASSERT_TRUE(Between(Size("", Key(i)), S1*i, S2*i));
  1053. ASSERT_TRUE(Between(Size("", Key(i)+".suffix"), S1*(i+1), S2*(i+1)));
  1054. ASSERT_TRUE(Between(Size(Key(i), Key(i+10)), S1*10, S2*10));
  1055. }
  1056. ASSERT_TRUE(Between(Size("", Key(50)), S1*50, S2*50));
  1057. ASSERT_TRUE(Between(Size("", Key(50)+".suffix"), S1*50, S2*50));
  1058. std::string cstart_str = Key(compact_start);
  1059. std::string cend_str = Key(compact_start + 9);
  1060. Slice cstart = cstart_str;
  1061. Slice cend = cend_str;
  1062. dbfull()->TEST_CompactRange(0, &cstart, &cend);
  1063. }
  1064. ASSERT_EQ(NumTableFilesAtLevel(0), 0);
  1065. ASSERT_GT(NumTableFilesAtLevel(1), 0);
  1066. }
  1067. } while (ChangeOptions());
  1068. }
  1069. TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
  1070. do {
  1071. Options options = CurrentOptions();
  1072. options.compression = kNoCompression;
  1073. Reopen();
  1074. Random rnd(301);
  1075. std::string big1 = RandomString(&rnd, 100000);
  1076. ASSERT_OK(Put(Key(0), RandomString(&rnd, 10000)));
  1077. ASSERT_OK(Put(Key(1), RandomString(&rnd, 10000)));
  1078. ASSERT_OK(Put(Key(2), big1));
  1079. ASSERT_OK(Put(Key(3), RandomString(&rnd, 10000)));
  1080. ASSERT_OK(Put(Key(4), big1));
  1081. ASSERT_OK(Put(Key(5), RandomString(&rnd, 10000)));
  1082. ASSERT_OK(Put(Key(6), RandomString(&rnd, 300000)));
  1083. ASSERT_OK(Put(Key(7), RandomString(&rnd, 10000)));
  1084. if (options.reuse_logs) {
  1085. // Need to force a memtable compaction since recovery does not do so.
  1086. ASSERT_OK(dbfull()->TEST_CompactMemTable());
  1087. }
  1088. // Check sizes across recovery by reopening a few times
  1089. for (int run = 0; run < 3; run++) {
  1090. Reopen(&options);
  1091. ASSERT_TRUE(Between(Size("", Key(0)), 0, 0));
  1092. ASSERT_TRUE(Between(Size("", Key(1)), 10000, 11000));
  1093. ASSERT_TRUE(Between(Size("", Key(2)), 20000, 21000));
  1094. ASSERT_TRUE(Between(Size("", Key(3)), 120000, 121000));
  1095. ASSERT_TRUE(Between(Size("", Key(4)), 130000, 131000));
  1096. ASSERT_TRUE(Between(Size("", Key(5)), 230000, 231000));
  1097. ASSERT_TRUE(Between(Size("", Key(6)), 240000, 241000));
  1098. ASSERT_TRUE(Between(Size("", Key(7)), 540000, 541000));
  1099. ASSERT_TRUE(Between(Size("", Key(8)), 550000, 560000));
  1100. ASSERT_TRUE(Between(Size(Key(3), Key(5)), 110000, 111000));
  1101. dbfull()->TEST_CompactRange(0, nullptr, nullptr);
  1102. }
  1103. } while (ChangeOptions());
  1104. }
  1105. TEST(DBTest, IteratorPinsRef) {
  1106. Put("foo", "hello");
  1107. // Get iterator that will yield the current contents of the DB.
  1108. Iterator* iter = db_->NewIterator(ReadOptions());
  1109. // Write to force compactions
  1110. Put("foo", "newvalue1");
  1111. for (int i = 0; i < 100; i++) {
  1112. ASSERT_OK(Put(Key(i), Key(i) + std::string(100000, 'v'))); // 100K values
  1113. }
  1114. Put("foo", "newvalue2");
  1115. iter->SeekToFirst();
  1116. ASSERT_TRUE(iter->Valid());
  1117. ASSERT_EQ("foo", iter->key().ToString());
  1118. ASSERT_EQ("hello", iter->value().ToString());
  1119. iter->Next();
  1120. ASSERT_TRUE(!iter->Valid());
  1121. delete iter;
  1122. }
  1123. TEST(DBTest, Snapshot) {
  1124. do {
  1125. Put("foo", "v1");
  1126. const Snapshot* s1 = db_->GetSnapshot();
  1127. Put("foo", "v2");
  1128. const Snapshot* s2 = db_->GetSnapshot();
  1129. Put("foo", "v3");
  1130. const Snapshot* s3 = db_->GetSnapshot();
  1131. Put("foo", "v4");
  1132. ASSERT_EQ("v1", Get("foo", s1));
  1133. ASSERT_EQ("v2", Get("foo", s2));
  1134. ASSERT_EQ("v3", Get("foo", s3));
  1135. ASSERT_EQ("v4", Get("foo"));
  1136. db_->ReleaseSnapshot(s3);
  1137. ASSERT_EQ("v1", Get("foo", s1));
  1138. ASSERT_EQ("v2", Get("foo", s2));
  1139. ASSERT_EQ("v4", Get("foo"));
  1140. db_->ReleaseSnapshot(s1);
  1141. ASSERT_EQ("v2", Get("foo", s2));
  1142. ASSERT_EQ("v4", Get("foo"));
  1143. db_->ReleaseSnapshot(s2);
  1144. ASSERT_EQ("v4", Get("foo"));
  1145. } while (ChangeOptions());
  1146. }
  1147. TEST(DBTest, HiddenValuesAreRemoved) {
  1148. do {
  1149. Random rnd(301);
  1150. FillLevels("a", "z");
  1151. std::string big = RandomString(&rnd, 50000);
  1152. Put("foo", big);
  1153. Put("pastfoo", "v");
  1154. const Snapshot* snapshot = db_->GetSnapshot();
  1155. Put("foo", "tiny");
  1156. Put("pastfoo2", "v2"); // Advance sequence number one more
  1157. ASSERT_OK(dbfull()->TEST_CompactMemTable());
  1158. ASSERT_GT(NumTableFilesAtLevel(0), 0);
  1159. ASSERT_EQ(big, Get("foo", snapshot));
  1160. ASSERT_TRUE(Between(Size("", "pastfoo"), 50000, 60000));
  1161. db_->ReleaseSnapshot(snapshot);
  1162. ASSERT_EQ(AllEntriesFor("foo"), "[ tiny, " + big + " ]");
  1163. Slice x("x");
  1164. dbfull()->TEST_CompactRange(0, nullptr, &x);
  1165. ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
  1166. ASSERT_EQ(NumTableFilesAtLevel(0), 0);
  1167. ASSERT_GE(NumTableFilesAtLevel(1), 1);
  1168. dbfull()->TEST_CompactRange(1, nullptr, &x);
  1169. ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
  1170. ASSERT_TRUE(Between(Size("", "pastfoo"), 0, 1000));
  1171. } while (ChangeOptions());
  1172. }
  1173. TEST(DBTest, DeletionMarkers1) {
  1174. Put("foo", "v1");
  1175. ASSERT_OK(dbfull()->TEST_CompactMemTable());
  1176. const int last = config::kMaxMemCompactLevel;
  1177. ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
  1178. // Place a table at level last-1 to prevent merging with preceding mutation
  1179. Put("a", "begin");
  1180. Put("z", "end");
  1181. dbfull()->TEST_CompactMemTable();
  1182. ASSERT_EQ(NumTableFilesAtLevel(last), 1);
  1183. ASSERT_EQ(NumTableFilesAtLevel(last-1), 1);
  1184. Delete("foo");
  1185. Put("foo", "v2");
  1186. ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
  1187. ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
  1188. ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
  1189. Slice z("z");
  1190. dbfull()->TEST_CompactRange(last-2, nullptr, &z);
  1191. // DEL eliminated, but v1 remains because we aren't compacting that level
  1192. // (DEL can be eliminated because v2 hides v1).
  1193. ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]");
  1194. dbfull()->TEST_CompactRange(last-1, nullptr, nullptr);
  1195. // Merging last-1 w/ last, so we are the base level for "foo", so
  1196. // DEL is removed. (as is v1).
  1197. ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]");
  1198. }
  1199. TEST(DBTest, DeletionMarkers2) {
  1200. Put("foo", "v1");
  1201. ASSERT_OK(dbfull()->TEST_CompactMemTable());
  1202. const int last = config::kMaxMemCompactLevel;
  1203. ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
  1204. // Place a table at level last-1 to prevent merging with preceding mutation
  1205. Put("a", "begin");
  1206. Put("z", "end");
  1207. dbfull()->TEST_CompactMemTable();
  1208. ASSERT_EQ(NumTableFilesAtLevel(last), 1);
  1209. ASSERT_EQ(NumTableFilesAtLevel(last-1), 1);
  1210. Delete("foo");
  1211. ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
  1212. ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
  1213. ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
  1214. dbfull()->TEST_CompactRange(last-2, nullptr, nullptr);
  1215. // DEL kept: "last" file overlaps
  1216. ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
  1217. dbfull()->TEST_CompactRange(last-1, nullptr, nullptr);
  1218. // Merging last-1 w/ last, so we are the base level for "foo", so
  1219. // DEL is removed. (as is v1).
  1220. ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
  1221. }
  1222. TEST(DBTest, OverlapInLevel0) {
  1223. do {
  1224. ASSERT_EQ(config::kMaxMemCompactLevel, 2) << "Fix test to match config";
  1225. // Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0.
  1226. ASSERT_OK(Put("100", "v100"));
  1227. ASSERT_OK(Put("999", "v999"));
  1228. dbfull()->TEST_CompactMemTable();
  1229. ASSERT_OK(Delete("100"));
  1230. ASSERT_OK(Delete("999"));
  1231. dbfull()->TEST_CompactMemTable();
  1232. ASSERT_EQ("0,1,1", FilesPerLevel());
  1233. // Make files spanning the following ranges in level-0:
  1234. // files[0] 200 .. 900
  1235. // files[1] 300 .. 500
  1236. // Note that files are sorted by smallest key.
  1237. ASSERT_OK(Put("300", "v300"));
  1238. ASSERT_OK(Put("500", "v500"));
  1239. dbfull()->TEST_CompactMemTable();
  1240. ASSERT_OK(Put("200", "v200"));
  1241. ASSERT_OK(Put("600", "v600"));
  1242. ASSERT_OK(Put("900", "v900"));
  1243. dbfull()->TEST_CompactMemTable();
  1244. ASSERT_EQ("2,1,1", FilesPerLevel());
  1245. // Compact away the placeholder files we created initially
  1246. dbfull()->TEST_CompactRange(1, nullptr, nullptr);
  1247. dbfull()->TEST_CompactRange(2, nullptr, nullptr);
  1248. ASSERT_EQ("2", FilesPerLevel());
  1249. // Do a memtable compaction. Before bug-fix, the compaction would
  1250. // not detect the overlap with level-0 files and would incorrectly place
  1251. // the deletion in a deeper level.
  1252. ASSERT_OK(Delete("600"));
  1253. dbfull()->TEST_CompactMemTable();
  1254. ASSERT_EQ("3", FilesPerLevel());
  1255. ASSERT_EQ("NOT_FOUND", Get("600"));
  1256. } while (ChangeOptions());
  1257. }
  1258. TEST(DBTest, L0_CompactionBug_Issue44_a) {
  1259. Reopen();
  1260. ASSERT_OK(Put("b", "v"));
  1261. Reopen();
  1262. ASSERT_OK(Delete("b"));
  1263. ASSERT_OK(Delete("a"));
  1264. Reopen();
  1265. ASSERT_OK(Delete("a"));
  1266. Reopen();
  1267. ASSERT_OK(Put("a", "v"));
  1268. Reopen();
  1269. Reopen();
  1270. ASSERT_EQ("(a->v)", Contents());
  1271. DelayMilliseconds(1000); // Wait for compaction to finish
  1272. ASSERT_EQ("(a->v)", Contents());
  1273. }
  1274. TEST(DBTest, L0_CompactionBug_Issue44_b) {
  1275. Reopen();
  1276. Put("","");
  1277. Reopen();
  1278. Delete("e");
  1279. Put("","");
  1280. Reopen();
  1281. Put("c", "cv");
  1282. Reopen();
  1283. Put("","");
  1284. Reopen();
  1285. Put("","");
  1286. DelayMilliseconds(1000); // Wait for compaction to finish
  1287. Reopen();
  1288. Put("d","dv");
  1289. Reopen();
  1290. Put("","");
  1291. Reopen();
  1292. Delete("d");
  1293. Delete("b");
  1294. Reopen();
  1295. ASSERT_EQ("(->)(c->cv)", Contents());
  1296. DelayMilliseconds(1000); // Wait for compaction to finish
  1297. ASSERT_EQ("(->)(c->cv)", Contents());
  1298. }
  1299. TEST(DBTest, Fflush_Issue474) {
  1300. static const int kNum = 100000;
  1301. Random rnd(test::RandomSeed());
  1302. for (int i = 0; i < kNum; i++) {
  1303. fflush(nullptr);
  1304. ASSERT_OK(Put(RandomKey(&rnd), RandomString(&rnd, 100)));
  1305. }
  1306. }
  1307. TEST(DBTest, ComparatorCheck) {
  1308. class NewComparator : public Comparator {
  1309. public:
  1310. virtual const char* Name() const { return "leveldb.NewComparator"; }
  1311. virtual int Compare(const Slice& a, const Slice& b) const {
  1312. return BytewiseComparator()->Compare(a, b);
  1313. }
  1314. virtual void FindShortestSeparator(std::string* s, const Slice& l) const {
  1315. BytewiseComparator()->FindShortestSeparator(s, l);
  1316. }
  1317. virtual void FindShortSuccessor(std::string* key) const {
  1318. BytewiseComparator()->FindShortSuccessor(key);
  1319. }
  1320. };
  1321. NewComparator cmp;
  1322. Options new_options = CurrentOptions();
  1323. new_options.comparator = &cmp;
  1324. Status s = TryReopen(&new_options);
  1325. ASSERT_TRUE(!s.ok());
  1326. ASSERT_TRUE(s.ToString().find("comparator") != std::string::npos)
  1327. << s.ToString();
  1328. }
  1329. TEST(DBTest, CustomComparator) {
  1330. class NumberComparator : public Comparator {
  1331. public:
  1332. virtual const char* Name() const { return "test.NumberComparator"; }
  1333. virtual int Compare(const Slice& a, const Slice& b) const {
  1334. return ToNumber(a) - ToNumber(b);
  1335. }
  1336. virtual void FindShortestSeparator(std::string* s, const Slice& l) const {
  1337. ToNumber(*s); // Check format
  1338. ToNumber(l); // Check format
  1339. }
  1340. virtual void FindShortSuccessor(std::string* key) const {
  1341. ToNumber(*key); // Check format
  1342. }
  1343. private:
  1344. static int ToNumber(const Slice& x) {
  1345. // Check that there are no extra characters.
  1346. ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size()-1] == ']')
  1347. << EscapeString(x);
  1348. int val;
  1349. char ignored;
  1350. ASSERT_TRUE(sscanf(x.ToString().c_str(), "[%i]%c", &val, &ignored) == 1)
  1351. << EscapeString(x);
  1352. return val;
  1353. }
  1354. };
  1355. NumberComparator cmp;
  1356. Options new_options = CurrentOptions();
  1357. new_options.create_if_missing = true;
  1358. new_options.comparator = &cmp;
  1359. new_options.filter_policy = nullptr; // Cannot use bloom filters
  1360. new_options.write_buffer_size = 1000; // Compact more often
  1361. DestroyAndReopen(&new_options);
  1362. ASSERT_OK(Put("[10]", "ten"));
  1363. ASSERT_OK(Put("[0x14]", "twenty"));
  1364. for (int i = 0; i < 2; i++) {
  1365. ASSERT_EQ("ten", Get("[10]"));
  1366. ASSERT_EQ("ten", Get("[0xa]"));
  1367. ASSERT_EQ("twenty", Get("[20]"));
  1368. ASSERT_EQ("twenty", Get("[0x14]"));
  1369. ASSERT_EQ("NOT_FOUND", Get("[15]"));
  1370. ASSERT_EQ("NOT_FOUND", Get("[0xf]"));
  1371. Compact("[0]", "[9999]");
  1372. }
  1373. for (int run = 0; run < 2; run++) {
  1374. for (int i = 0; i < 1000; i++) {
  1375. char buf[100];
  1376. snprintf(buf, sizeof(buf), "[%d]", i*10);
  1377. ASSERT_OK(Put(buf, buf));
  1378. }
  1379. Compact("[0]", "[1000000]");
  1380. }
  1381. }
  1382. TEST(DBTest, ManualCompaction) {
  1383. ASSERT_EQ(config::kMaxMemCompactLevel, 2)
  1384. << "Need to update this test to match kMaxMemCompactLevel";
  1385. MakeTables(3, "p", "q");
  1386. ASSERT_EQ("1,1,1", FilesPerLevel());
  1387. // Compaction range falls before files
  1388. Compact("", "c");
  1389. ASSERT_EQ("1,1,1", FilesPerLevel());
  1390. // Compaction range falls after files
  1391. Compact("r", "z");
  1392. ASSERT_EQ("1,1,1", FilesPerLevel());
  1393. // Compaction range overlaps files
  1394. Compact("p1", "p9");
  1395. ASSERT_EQ("0,0,1", FilesPerLevel());
  1396. // Populate a different range
  1397. MakeTables(3, "c", "e");
  1398. ASSERT_EQ("1,1,2", FilesPerLevel());
  1399. // Compact just the new range
  1400. Compact("b", "f");
  1401. ASSERT_EQ("0,0,2", FilesPerLevel());
  1402. // Compact all
  1403. MakeTables(1, "a", "z");
  1404. ASSERT_EQ("0,1,2", FilesPerLevel());
  1405. db_->CompactRange(nullptr, nullptr);
  1406. ASSERT_EQ("0,0,1", FilesPerLevel());
  1407. }
  1408. TEST(DBTest, DBOpen_Options) {
  1409. std::string dbname = test::TmpDir() + "/db_options_test";
  1410. DestroyDB(dbname, Options());
  1411. // Does not exist, and create_if_missing == false: error
  1412. DB* db = nullptr;
  1413. Options opts;
  1414. opts.create_if_missing = false;
  1415. Status s = DB::Open(opts, dbname, &db);
  1416. ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != nullptr);
  1417. ASSERT_TRUE(db == nullptr);
  1418. // Does not exist, and create_if_missing == true: OK
  1419. opts.create_if_missing = true;
  1420. s = DB::Open(opts, dbname, &db);
  1421. ASSERT_OK(s);
  1422. ASSERT_TRUE(db != nullptr);
  1423. delete db;
  1424. db = nullptr;
  1425. // Does exist, and error_if_exists == true: error
  1426. opts.create_if_missing = false;
  1427. opts.error_if_exists = true;
  1428. s = DB::Open(opts, dbname, &db);
  1429. ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != nullptr);
  1430. ASSERT_TRUE(db == nullptr);
  1431. // Does exist, and error_if_exists == false: OK
  1432. opts.create_if_missing = true;
  1433. opts.error_if_exists = false;
  1434. s = DB::Open(opts, dbname, &db);
  1435. ASSERT_OK(s);
  1436. ASSERT_TRUE(db != nullptr);
  1437. delete db;
  1438. db = nullptr;
  1439. }
  1440. TEST(DBTest, DestroyEmptyDir) {
  1441. std::string dbname = test::TmpDir() + "/db_empty_dir";
  1442. TestEnv env(Env::Default());
  1443. env.DeleteDir(dbname);
  1444. ASSERT_TRUE(!env.FileExists(dbname));
  1445. Options opts;
  1446. opts.env = &env;
  1447. ASSERT_OK(env.CreateDir(dbname));
  1448. ASSERT_TRUE(env.FileExists(dbname));
  1449. std::vector<std::string> children;
  1450. ASSERT_OK(env.GetChildren(dbname, &children));
  1451. // The POSIX env does not filter out '.' and '..' special files.
  1452. ASSERT_EQ(2, children.size());
  1453. ASSERT_OK(DestroyDB(dbname, opts));
  1454. ASSERT_TRUE(!env.FileExists(dbname));
  1455. // Should also be destroyed if Env is filtering out dot files.
  1456. env.SetIgnoreDotFiles(true);
  1457. ASSERT_OK(env.CreateDir(dbname));
  1458. ASSERT_TRUE(env.FileExists(dbname));
  1459. ASSERT_OK(env.GetChildren(dbname, &children));
  1460. ASSERT_EQ(0, children.size());
  1461. ASSERT_OK(DestroyDB(dbname, opts));
  1462. ASSERT_TRUE(!env.FileExists(dbname));
  1463. }
  1464. TEST(DBTest, DestroyOpenDB) {
  1465. std::string dbname = test::TmpDir() + "/open_db_dir";
  1466. env_->DeleteDir(dbname);
  1467. ASSERT_TRUE(!env_->FileExists(dbname));
  1468. Options opts;
  1469. opts.create_if_missing = true;
  1470. DB* db = nullptr;
  1471. ASSERT_OK(DB::Open(opts, dbname, &db));
  1472. ASSERT_TRUE(db != nullptr);
  1473. // Must fail to destroy an open db.
  1474. ASSERT_TRUE(env_->FileExists(dbname));
  1475. ASSERT_TRUE(!DestroyDB(dbname, Options()).ok());
  1476. ASSERT_TRUE(env_->FileExists(dbname));
  1477. delete db;
  1478. db = nullptr;
  1479. // Should succeed destroying a closed db.
  1480. ASSERT_OK(DestroyDB(dbname, Options()));
  1481. ASSERT_TRUE(!env_->FileExists(dbname));
  1482. }
  1483. TEST(DBTest, Locking) {
  1484. DB* db2 = nullptr;
  1485. Status s = DB::Open(CurrentOptions(), dbname_, &db2);
  1486. ASSERT_TRUE(!s.ok()) << "Locking did not prevent re-opening db";
  1487. }
  1488. // Check that number of files does not grow when we are out of space
  1489. TEST(DBTest, NoSpace) {
  1490. Options options = CurrentOptions();
  1491. options.env = env_;
  1492. Reopen(&options);
  1493. ASSERT_OK(Put("foo", "v1"));
  1494. ASSERT_EQ("v1", Get("foo"));
  1495. Compact("a", "z");
  1496. const int num_files = CountFiles();
  1497. env_->no_space_.Release_Store(env_); // Force out-of-space errors
  1498. for (int i = 0; i < 10; i++) {
  1499. for (int level = 0; level < config::kNumLevels-1; level++) {
  1500. dbfull()->TEST_CompactRange(level, nullptr, nullptr);
  1501. }
  1502. }
  1503. env_->no_space_.Release_Store(nullptr);
  1504. ASSERT_LT(CountFiles(), num_files + 3);
  1505. }
  1506. TEST(DBTest, NonWritableFileSystem) {
  1507. Options options = CurrentOptions();
  1508. options.write_buffer_size = 1000;
  1509. options.env = env_;
  1510. Reopen(&options);
  1511. ASSERT_OK(Put("foo", "v1"));
  1512. env_->non_writable_.Release_Store(env_); // Force errors for new files
  1513. std::string big(100000, 'x');
  1514. int errors = 0;
  1515. for (int i = 0; i < 20; i++) {
  1516. fprintf(stderr, "iter %d; errors %d\n", i, errors);
  1517. if (!Put("foo", big).ok()) {
  1518. errors++;
  1519. DelayMilliseconds(100);
  1520. }
  1521. }
  1522. ASSERT_GT(errors, 0);
  1523. env_->non_writable_.Release_Store(nullptr);
  1524. }
  1525. TEST(DBTest, WriteSyncError) {
  1526. // Check that log sync errors cause the DB to disallow future writes.
  1527. // (a) Cause log sync calls to fail
  1528. Options options = CurrentOptions();
  1529. options.env = env_;
  1530. Reopen(&options);
  1531. env_->data_sync_error_.Release_Store(env_);
  1532. // (b) Normal write should succeed
  1533. WriteOptions w;
  1534. ASSERT_OK(db_->Put(w, "k1", "v1"));
  1535. ASSERT_EQ("v1", Get("k1"));
  1536. // (c) Do a sync write; should fail
  1537. w.sync = true;
  1538. ASSERT_TRUE(!db_->Put(w, "k2", "v2").ok());
  1539. ASSERT_EQ("v1", Get("k1"));
  1540. ASSERT_EQ("NOT_FOUND", Get("k2"));
  1541. // (d) make sync behave normally
  1542. env_->data_sync_error_.Release_Store(nullptr);
  1543. // (e) Do a non-sync write; should fail
  1544. w.sync = false;
  1545. ASSERT_TRUE(!db_->Put(w, "k3", "v3").ok());
  1546. ASSERT_EQ("v1", Get("k1"));
  1547. ASSERT_EQ("NOT_FOUND", Get("k2"));
  1548. ASSERT_EQ("NOT_FOUND", Get("k3"));
  1549. }
  1550. TEST(DBTest, ManifestWriteError) {
  1551. // Test for the following problem:
  1552. // (a) Compaction produces file F
  1553. // (b) Log record containing F is written to MANIFEST file, but Sync() fails
  1554. // (c) GC deletes F
  1555. // (d) After reopening DB, reads fail since deleted F is named in log record
  1556. // We iterate twice. In the second iteration, everything is the
  1557. // same except the log record never makes it to the MANIFEST file.
  1558. for (int iter = 0; iter < 2; iter++) {
  1559. port::AtomicPointer* error_type = (iter == 0)
  1560. ? &env_->manifest_sync_error_
  1561. : &env_->manifest_write_error_;
  1562. // Insert foo=>bar mapping
  1563. Options options = CurrentOptions();
  1564. options.env = env_;
  1565. options.create_if_missing = true;
  1566. options.error_if_exists = false;
  1567. DestroyAndReopen(&options);
  1568. ASSERT_OK(Put("foo", "bar"));
  1569. ASSERT_EQ("bar", Get("foo"));
  1570. // Memtable compaction (will succeed)
  1571. dbfull()->TEST_CompactMemTable();
  1572. ASSERT_EQ("bar", Get("foo"));
  1573. const int last = config::kMaxMemCompactLevel;
  1574. ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo=>bar is now in last level
  1575. // Merging compaction (will fail)
  1576. error_type->Release_Store(env_);
  1577. dbfull()->TEST_CompactRange(last, nullptr, nullptr); // Should fail
  1578. ASSERT_EQ("bar", Get("foo"));
  1579. // Recovery: should not lose data
  1580. error_type->Release_Store(nullptr);
  1581. Reopen(&options);
  1582. ASSERT_EQ("bar", Get("foo"));
  1583. }
  1584. }
  1585. TEST(DBTest, MissingSSTFile) {
  1586. ASSERT_OK(Put("foo", "bar"));
  1587. ASSERT_EQ("bar", Get("foo"));
  1588. // Dump the memtable to disk.
  1589. dbfull()->TEST_CompactMemTable();
  1590. ASSERT_EQ("bar", Get("foo"));
  1591. Close();
  1592. ASSERT_TRUE(DeleteAnSSTFile());
  1593. Options options = CurrentOptions();
  1594. options.paranoid_checks = true;
  1595. Status s = TryReopen(&options);
  1596. ASSERT_TRUE(!s.ok());
  1597. ASSERT_TRUE(s.ToString().find("issing") != std::string::npos)
  1598. << s.ToString();
  1599. }
  1600. TEST(DBTest, StillReadSST) {
  1601. ASSERT_OK(Put("foo", "bar"));
  1602. ASSERT_EQ("bar", Get("foo"));
  1603. // Dump the memtable to disk.
  1604. dbfull()->TEST_CompactMemTable();
  1605. ASSERT_EQ("bar", Get("foo"));
  1606. Close();
  1607. ASSERT_GT(RenameLDBToSST(), 0);
  1608. Options options = CurrentOptions();
  1609. options.paranoid_checks = true;
  1610. Status s = TryReopen(&options);
  1611. ASSERT_TRUE(s.ok());
  1612. ASSERT_EQ("bar", Get("foo"));
  1613. }
  1614. TEST(DBTest, FilesDeletedAfterCompaction) {
  1615. ASSERT_OK(Put("foo", "v2"));
  1616. Compact("a", "z");
  1617. const int num_files = CountFiles();
  1618. for (int i = 0; i < 10; i++) {
  1619. ASSERT_OK(Put("foo", "v2"));
  1620. Compact("a", "z");
  1621. }
  1622. ASSERT_EQ(CountFiles(), num_files);
  1623. }
  1624. TEST(DBTest, BloomFilter) {
  1625. env_->count_random_reads_ = true;
  1626. Options options = CurrentOptions();
  1627. options.env = env_;
  1628. options.block_cache = NewLRUCache(0); // Prevent cache hits
  1629. options.filter_policy = NewBloomFilterPolicy(10);
  1630. Reopen(&options);
  1631. // Populate multiple layers
  1632. const int N = 10000;
  1633. for (int i = 0; i < N; i++) {
  1634. ASSERT_OK(Put(Key(i), Key(i)));
  1635. }
  1636. Compact("a", "z");
  1637. for (int i = 0; i < N; i += 100) {
  1638. ASSERT_OK(Put(Key(i), Key(i)));
  1639. }
  1640. dbfull()->TEST_CompactMemTable();
  1641. // Prevent auto compactions triggered by seeks
  1642. env_->delay_data_sync_.Release_Store(env_);
  1643. // Lookup present keys. Should rarely read from small sstable.
  1644. env_->random_read_counter_.Reset();
  1645. for (int i = 0; i < N; i++) {
  1646. ASSERT_EQ(Key(i), Get(Key(i)));
  1647. }
  1648. int reads = env_->random_read_counter_.Read();
  1649. fprintf(stderr, "%d present => %d reads\n", N, reads);
  1650. ASSERT_GE(reads, N);
  1651. ASSERT_LE(reads, N + 2*N/100);
  1652. // Lookup present keys. Should rarely read from either sstable.
  1653. env_->random_read_counter_.Reset();
  1654. for (int i = 0; i < N; i++) {
  1655. ASSERT_EQ("NOT_FOUND", Get(Key(i) + ".missing"));
  1656. }
  1657. reads = env_->random_read_counter_.Read();
  1658. fprintf(stderr, "%d missing => %d reads\n", N, reads);
  1659. ASSERT_LE(reads, 3*N/100);
  1660. env_->delay_data_sync_.Release_Store(nullptr);
  1661. Close();
  1662. delete options.block_cache;
  1663. delete options.filter_policy;
  1664. }
  1665. // Multi-threaded test:
  1666. namespace {
  1667. static const int kNumThreads = 4;
  1668. static const int kTestSeconds = 10;
  1669. static const int kNumKeys = 1000;
  1670. struct MTState {
  1671. DBTest* test;
  1672. port::AtomicPointer stop;
  1673. port::AtomicPointer counter[kNumThreads];
  1674. port::AtomicPointer thread_done[kNumThreads];
  1675. };
  1676. struct MTThread {
  1677. MTState* state;
  1678. int id;
  1679. };
  1680. static void MTThreadBody(void* arg) {
  1681. MTThread* t = reinterpret_cast<MTThread*>(arg);
  1682. int id = t->id;
  1683. DB* db = t->state->test->db_;
  1684. uintptr_t counter = 0;
  1685. fprintf(stderr, "... starting thread %d\n", id);
  1686. Random rnd(1000 + id);
  1687. std::string value;
  1688. char valbuf[1500];
  1689. while (t->state->stop.Acquire_Load() == nullptr) {
  1690. t->state->counter[id].Release_Store(reinterpret_cast<void*>(counter));
  1691. int key = rnd.Uniform(kNumKeys);
  1692. char keybuf[20];
  1693. snprintf(keybuf, sizeof(keybuf), "%016d", key);
  1694. if (rnd.OneIn(2)) {
  1695. // Write values of the form <key, my id, counter>.
  1696. // We add some padding for force compactions.
  1697. snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d",
  1698. key, id, static_cast<int>(counter));
  1699. ASSERT_OK(db->Put(WriteOptions(), Slice(keybuf), Slice(valbuf)));
  1700. } else {
  1701. // Read a value and verify that it matches the pattern written above.
  1702. Status s = db->Get(ReadOptions(), Slice(keybuf), &value);
  1703. if (s.IsNotFound()) {
  1704. // Key has not yet been written
  1705. } else {
  1706. // Check that the writer thread counter is >= the counter in the value
  1707. ASSERT_OK(s);
  1708. int k, w, c;
  1709. ASSERT_EQ(3, sscanf(value.c_str(), "%d.%d.%d", &k, &w, &c)) << value;
  1710. ASSERT_EQ(k, key);
  1711. ASSERT_GE(w, 0);
  1712. ASSERT_LT(w, kNumThreads);
  1713. ASSERT_LE(static_cast<uintptr_t>(c), reinterpret_cast<uintptr_t>(
  1714. t->state->counter[w].Acquire_Load()));
  1715. }
  1716. }
  1717. counter++;
  1718. }
  1719. t->state->thread_done[id].Release_Store(t);
  1720. fprintf(stderr, "... stopping thread %d after %d ops\n", id, int(counter));
  1721. }
  1722. } // namespace
  1723. TEST(DBTest, MultiThreaded) {
  1724. do {
  1725. // Initialize state
  1726. MTState mt;
  1727. mt.test = this;
  1728. mt.stop.Release_Store(0);
  1729. for (int id = 0; id < kNumThreads; id++) {
  1730. mt.counter[id].Release_Store(0);
  1731. mt.thread_done[id].Release_Store(0);
  1732. }
  1733. // Start threads
  1734. MTThread thread[kNumThreads];
  1735. for (int id = 0; id < kNumThreads; id++) {
  1736. thread[id].state = &mt;
  1737. thread[id].id = id;
  1738. env_->StartThread(MTThreadBody, &thread[id]);
  1739. }
  1740. // Let them run for a while
  1741. DelayMilliseconds(kTestSeconds * 1000);
  1742. // Stop the threads and wait for them to finish
  1743. mt.stop.Release_Store(&mt);
  1744. for (int id = 0; id < kNumThreads; id++) {
  1745. while (mt.thread_done[id].Acquire_Load() == nullptr) {
  1746. DelayMilliseconds(100);
  1747. }
  1748. }
  1749. } while (ChangeOptions());
  1750. }
  1751. namespace {
  1752. typedef std::map<std::string, std::string> KVMap;
  1753. }
  1754. class ModelDB: public DB {
  1755. public:
  1756. class ModelSnapshot : public Snapshot {
  1757. public:
  1758. KVMap map_;
  1759. };
  1760. explicit ModelDB(const Options& options): options_(options) { }
  1761. ~ModelDB() { }
  1762. virtual Status Put(const WriteOptions& o, const Slice& k, const Slice& v) {
  1763. return DB::Put(o, k, v);
  1764. }
  1765. virtual Status Delete(const WriteOptions& o, const Slice& key) {
  1766. return DB::Delete(o, key);
  1767. }
  1768. virtual Status Get(const ReadOptions& options,
  1769. const Slice& key, std::string* value) {
  1770. assert(false); // Not implemented
  1771. return Status::NotFound(key);
  1772. }
  1773. virtual Iterator* NewIterator(const ReadOptions& options) {
  1774. if (options.snapshot == nullptr) {
  1775. KVMap* saved = new KVMap;
  1776. *saved = map_;
  1777. return new ModelIter(saved, true);
  1778. } else {
  1779. const KVMap* snapshot_state =
  1780. &(reinterpret_cast<const ModelSnapshot*>(options.snapshot)->map_);
  1781. return new ModelIter(snapshot_state, false);
  1782. }
  1783. }
  1784. virtual const Snapshot* GetSnapshot() {
  1785. ModelSnapshot* snapshot = new ModelSnapshot;
  1786. snapshot->map_ = map_;
  1787. return snapshot;
  1788. }
  1789. virtual void ReleaseSnapshot(const Snapshot* snapshot) {
  1790. delete reinterpret_cast<const ModelSnapshot*>(snapshot);
  1791. }
  1792. virtual Status Write(const WriteOptions& options, WriteBatch* batch) {
  1793. class Handler : public WriteBatch::Handler {
  1794. public:
  1795. KVMap* map_;
  1796. virtual void Put(const Slice& key, const Slice& value) {
  1797. (*map_)[key.ToString()] = value.ToString();
  1798. }
  1799. virtual void Delete(const Slice& key) {
  1800. map_->erase(key.ToString());
  1801. }
  1802. };
  1803. Handler handler;
  1804. handler.map_ = &map_;
  1805. return batch->Iterate(&handler);
  1806. }
  1807. virtual bool GetProperty(const Slice& property, std::string* value) {
  1808. return false;
  1809. }
  1810. virtual void GetApproximateSizes(const Range* r, int n, uint64_t* sizes) {
  1811. for (int i = 0; i < n; i++) {
  1812. sizes[i] = 0;
  1813. }
  1814. }
  1815. virtual void CompactRange(const Slice* start, const Slice* end) {
  1816. }
  1817. private:
  1818. class ModelIter: public Iterator {
  1819. public:
  1820. ModelIter(const KVMap* map, bool owned)
  1821. : map_(map), owned_(owned), iter_(map_->end()) {
  1822. }
  1823. ~ModelIter() {
  1824. if (owned_) delete map_;
  1825. }
  1826. virtual bool Valid() const { return iter_ != map_->end(); }
  1827. virtual void SeekToFirst() { iter_ = map_->begin(); }
  1828. virtual void SeekToLast() {
  1829. if (map_->empty()) {
  1830. iter_ = map_->end();
  1831. } else {
  1832. iter_ = map_->find(map_->rbegin()->first);
  1833. }
  1834. }
  1835. virtual void Seek(const Slice& k) {
  1836. iter_ = map_->lower_bound(k.ToString());
  1837. }
  1838. virtual void Next() { ++iter_; }
  1839. virtual void Prev() { --iter_; }
  1840. virtual Slice key() const { return iter_->first; }
  1841. virtual Slice value() const { return iter_->second; }
  1842. virtual Status status() const { return Status::OK(); }
  1843. private:
  1844. const KVMap* const map_;
  1845. const bool owned_; // Do we own map_
  1846. KVMap::const_iterator iter_;
  1847. };
  1848. const Options options_;
  1849. KVMap map_;
  1850. };
  1851. static bool CompareIterators(int step,
  1852. DB* model,
  1853. DB* db,
  1854. const Snapshot* model_snap,
  1855. const Snapshot* db_snap) {
  1856. ReadOptions options;
  1857. options.snapshot = model_snap;
  1858. Iterator* miter = model->NewIterator(options);
  1859. options.snapshot = db_snap;
  1860. Iterator* dbiter = db->NewIterator(options);
  1861. bool ok = true;
  1862. int count = 0;
  1863. for (miter->SeekToFirst(), dbiter->SeekToFirst();
  1864. ok && miter->Valid() && dbiter->Valid();
  1865. miter->Next(), dbiter->Next()) {
  1866. count++;
  1867. if (miter->key().compare(dbiter->key()) != 0) {
  1868. fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n",
  1869. step,
  1870. EscapeString(miter->key()).c_str(),
  1871. EscapeString(dbiter->key()).c_str());
  1872. ok = false;
  1873. break;
  1874. }
  1875. if (miter->value().compare(dbiter->value()) != 0) {
  1876. fprintf(stderr, "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n",
  1877. step,
  1878. EscapeString(miter->key()).c_str(),
  1879. EscapeString(miter->value()).c_str(),
  1880. EscapeString(miter->value()).c_str());
  1881. ok = false;
  1882. }
  1883. }
  1884. if (ok) {
  1885. if (miter->Valid() != dbiter->Valid()) {
  1886. fprintf(stderr, "step %d: Mismatch at end of iterators: %d vs. %d\n",
  1887. step, miter->Valid(), dbiter->Valid());
  1888. ok = false;
  1889. }
  1890. }
  1891. fprintf(stderr, "%d entries compared: ok=%d\n", count, ok);
  1892. delete miter;
  1893. delete dbiter;
  1894. return ok;
  1895. }
  1896. TEST(DBTest, Randomized) {
  1897. Random rnd(test::RandomSeed());
  1898. do {
  1899. ModelDB model(CurrentOptions());
  1900. const int N = 10000;
  1901. const Snapshot* model_snap = nullptr;
  1902. const Snapshot* db_snap = nullptr;
  1903. std::string k, v;
  1904. for (int step = 0; step < N; step++) {
  1905. if (step % 100 == 0) {
  1906. fprintf(stderr, "Step %d of %d\n", step, N);
  1907. }
  1908. // TODO(sanjay): Test Get() works
  1909. int p = rnd.Uniform(100);
  1910. if (p < 45) { // Put
  1911. k = RandomKey(&rnd);
  1912. v = RandomString(&rnd,
  1913. rnd.OneIn(20)
  1914. ? 100 + rnd.Uniform(100)
  1915. : rnd.Uniform(8));
  1916. ASSERT_OK(model.Put(WriteOptions(), k, v));
  1917. ASSERT_OK(db_->Put(WriteOptions(), k, v));
  1918. } else if (p < 90) { // Delete
  1919. k = RandomKey(&rnd);
  1920. ASSERT_OK(model.Delete(WriteOptions(), k));
  1921. ASSERT_OK(db_->Delete(WriteOptions(), k));
  1922. } else { // Multi-element batch
  1923. WriteBatch b;
  1924. const int num = rnd.Uniform(8);
  1925. for (int i = 0; i < num; i++) {
  1926. if (i == 0 || !rnd.OneIn(10)) {
  1927. k = RandomKey(&rnd);
  1928. } else {
  1929. // Periodically re-use the same key from the previous iter, so
  1930. // we have multiple entries in the write batch for the same key
  1931. }
  1932. if (rnd.OneIn(2)) {
  1933. v = RandomString(&rnd, rnd.Uniform(10));
  1934. b.Put(k, v);
  1935. } else {
  1936. b.Delete(k);
  1937. }
  1938. }
  1939. ASSERT_OK(model.Write(WriteOptions(), &b));
  1940. ASSERT_OK(db_->Write(WriteOptions(), &b));
  1941. }
  1942. if ((step % 100) == 0) {
  1943. ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
  1944. ASSERT_TRUE(CompareIterators(step, &model, db_, model_snap, db_snap));
  1945. // Save a snapshot from each DB this time that we'll use next
  1946. // time we compare things, to make sure the current state is
  1947. // preserved with the snapshot
  1948. if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
  1949. if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
  1950. Reopen();
  1951. ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
  1952. model_snap = model.GetSnapshot();
  1953. db_snap = db_->GetSnapshot();
  1954. }
  1955. }
  1956. if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
  1957. if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
  1958. } while (ChangeOptions());
  1959. }
  1960. std::string MakeKey(unsigned int num) {
  1961. char buf[30];
  1962. snprintf(buf, sizeof(buf), "%016u", num);
  1963. return std::string(buf);
  1964. }
  1965. void BM_LogAndApply(int iters, int num_base_files) {
  1966. std::string dbname = test::TmpDir() + "/leveldb_test_benchmark";
  1967. DestroyDB(dbname, Options());
  1968. DB* db = nullptr;
  1969. Options opts;
  1970. opts.create_if_missing = true;
  1971. Status s = DB::Open(opts, dbname, &db);
  1972. ASSERT_OK(s);
  1973. ASSERT_TRUE(db != nullptr);
  1974. delete db;
  1975. db = nullptr;
  1976. Env* env = Env::Default();
  1977. port::Mutex mu;
  1978. MutexLock l(&mu);
  1979. InternalKeyComparator cmp(BytewiseComparator());
  1980. Options options;
  1981. VersionSet vset(dbname, &options, nullptr, &cmp);
  1982. bool save_manifest;
  1983. ASSERT_OK(vset.Recover(&save_manifest));
  1984. VersionEdit vbase;
  1985. uint64_t fnum = 1;
  1986. for (int i = 0; i < num_base_files; i++) {
  1987. InternalKey start(MakeKey(2*fnum), 1, kTypeValue);
  1988. InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion);
  1989. vbase.AddFile(2, fnum++, 1 /* file size */, start, limit);
  1990. }
  1991. ASSERT_OK(vset.LogAndApply(&vbase, &mu));
  1992. uint64_t start_micros = env->NowMicros();
  1993. for (int i = 0; i < iters; i++) {
  1994. VersionEdit vedit;
  1995. vedit.DeleteFile(2, fnum);
  1996. InternalKey start(MakeKey(2*fnum), 1, kTypeValue);
  1997. InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion);
  1998. vedit.AddFile(2, fnum++, 1 /* file size */, start, limit);
  1999. vset.LogAndApply(&vedit, &mu);
  2000. }
  2001. uint64_t stop_micros = env->NowMicros();
  2002. unsigned int us = stop_micros - start_micros;
  2003. char buf[16];
  2004. snprintf(buf, sizeof(buf), "%d", num_base_files);
  2005. fprintf(stderr,
  2006. "BM_LogAndApply/%-6s %8d iters : %9u us (%7.0f us / iter)\n",
  2007. buf, iters, us, ((float)us) / iters);
  2008. }
  2009. } // namespace leveldb
  2010. int main(int argc, char** argv) {
  2011. if (argc > 1 && std::string(argv[1]) == "--benchmark") {
  2012. leveldb::BM_LogAndApply(1000, 1);
  2013. leveldb::BM_LogAndApply(1000, 100);
  2014. leveldb::BM_LogAndApply(1000, 10000);
  2015. leveldb::BM_LogAndApply(100, 100000);
  2016. return 0;
  2017. }
  2018. return leveldb::test::RunAllTests();
  2019. }