小组成员:10215300402-朱维清 & 10222140408 谷杰
Nevar pievienot vairāk kā 25 tēmas Tēmai ir jāsākas ar burtu vai ciparu, tā var saturēt domu zīmes ('-') un var būt līdz 35 simboliem gara.

2058 rindas
59 KiB

  1. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  4. #include "leveldb/db.h"
  5. #include "leveldb/filter_policy.h"
  6. #include "db/db_impl.h"
  7. #include "db/filename.h"
  8. #include "db/version_set.h"
  9. #include "db/write_batch_internal.h"
  10. #include "leveldb/cache.h"
  11. #include "leveldb/env.h"
  12. #include "leveldb/table.h"
  13. #include "util/hash.h"
  14. #include "util/logging.h"
  15. #include "util/mutexlock.h"
  16. #include "util/testharness.h"
  17. #include "util/testutil.h"
  18. namespace leveldb {
  19. static std::string RandomString(Random* rnd, int len) {
  20. std::string r;
  21. test::RandomString(rnd, len, &r);
  22. return r;
  23. }
  24. namespace {
  25. class AtomicCounter {
  26. private:
  27. port::Mutex mu_;
  28. int count_;
  29. public:
  30. AtomicCounter() : count_(0) { }
  31. void Increment() {
  32. MutexLock l(&mu_);
  33. count_++;
  34. }
  35. int Read() {
  36. MutexLock l(&mu_);
  37. return count_;
  38. }
  39. void Reset() {
  40. MutexLock l(&mu_);
  41. count_ = 0;
  42. }
  43. };
  44. }
  45. // Special Env used to delay background operations
  46. class SpecialEnv : public EnvWrapper {
  47. public:
  48. // sstable Sync() calls are blocked while this pointer is non-NULL.
  49. port::AtomicPointer delay_sstable_sync_;
  50. // Simulate no-space errors while this pointer is non-NULL.
  51. port::AtomicPointer no_space_;
  52. // Simulate non-writable file system while this pointer is non-NULL
  53. port::AtomicPointer non_writable_;
  54. // Force sync of manifest files to fail while this pointer is non-NULL
  55. port::AtomicPointer manifest_sync_error_;
  56. // Force write to manifest files to fail while this pointer is non-NULL
  57. port::AtomicPointer manifest_write_error_;
  58. bool count_random_reads_;
  59. AtomicCounter random_read_counter_;
  60. AtomicCounter sleep_counter_;
  61. explicit SpecialEnv(Env* base) : EnvWrapper(base) {
  62. delay_sstable_sync_.Release_Store(NULL);
  63. no_space_.Release_Store(NULL);
  64. non_writable_.Release_Store(NULL);
  65. count_random_reads_ = false;
  66. manifest_sync_error_.Release_Store(NULL);
  67. manifest_write_error_.Release_Store(NULL);
  68. }
  69. Status NewWritableFile(const std::string& f, WritableFile** r) {
  70. class SSTableFile : public WritableFile {
  71. private:
  72. SpecialEnv* env_;
  73. WritableFile* base_;
  74. public:
  75. SSTableFile(SpecialEnv* env, WritableFile* base)
  76. : env_(env),
  77. base_(base) {
  78. }
  79. ~SSTableFile() { delete base_; }
  80. Status Append(const Slice& data) {
  81. if (env_->no_space_.Acquire_Load() != NULL) {
  82. // Drop writes on the floor
  83. return Status::OK();
  84. } else {
  85. return base_->Append(data);
  86. }
  87. }
  88. Status Close() { return base_->Close(); }
  89. Status Flush() { return base_->Flush(); }
  90. Status Sync() {
  91. while (env_->delay_sstable_sync_.Acquire_Load() != NULL) {
  92. env_->SleepForMicroseconds(100000);
  93. }
  94. return base_->Sync();
  95. }
  96. };
  97. class ManifestFile : public WritableFile {
  98. private:
  99. SpecialEnv* env_;
  100. WritableFile* base_;
  101. public:
  102. ManifestFile(SpecialEnv* env, WritableFile* b) : env_(env), base_(b) { }
  103. ~ManifestFile() { delete base_; }
  104. Status Append(const Slice& data) {
  105. if (env_->manifest_write_error_.Acquire_Load() != NULL) {
  106. return Status::IOError("simulated writer error");
  107. } else {
  108. return base_->Append(data);
  109. }
  110. }
  111. Status Close() { return base_->Close(); }
  112. Status Flush() { return base_->Flush(); }
  113. Status Sync() {
  114. if (env_->manifest_sync_error_.Acquire_Load() != NULL) {
  115. return Status::IOError("simulated sync error");
  116. } else {
  117. return base_->Sync();
  118. }
  119. }
  120. };
  121. if (non_writable_.Acquire_Load() != NULL) {
  122. return Status::IOError("simulated write error");
  123. }
  124. Status s = target()->NewWritableFile(f, r);
  125. if (s.ok()) {
  126. if (strstr(f.c_str(), ".sst") != NULL) {
  127. *r = new SSTableFile(this, *r);
  128. } else if (strstr(f.c_str(), "MANIFEST") != NULL) {
  129. *r = new ManifestFile(this, *r);
  130. }
  131. }
  132. return s;
  133. }
  134. Status NewRandomAccessFile(const std::string& f, RandomAccessFile** r) {
  135. class CountingFile : public RandomAccessFile {
  136. private:
  137. RandomAccessFile* target_;
  138. AtomicCounter* counter_;
  139. public:
  140. CountingFile(RandomAccessFile* target, AtomicCounter* counter)
  141. : target_(target), counter_(counter) {
  142. }
  143. virtual ~CountingFile() { delete target_; }
  144. virtual Status Read(uint64_t offset, size_t n, Slice* result,
  145. char* scratch) const {
  146. counter_->Increment();
  147. return target_->Read(offset, n, result, scratch);
  148. }
  149. };
  150. Status s = target()->NewRandomAccessFile(f, r);
  151. if (s.ok() && count_random_reads_) {
  152. *r = new CountingFile(*r, &random_read_counter_);
  153. }
  154. return s;
  155. }
  156. virtual void SleepForMicroseconds(int micros) {
  157. sleep_counter_.Increment();
  158. target()->SleepForMicroseconds(micros);
  159. }
  160. };
  161. class DBTest {
  162. private:
  163. const FilterPolicy* filter_policy_;
  164. // Sequence of option configurations to try
  165. enum OptionConfig {
  166. kDefault,
  167. kFilter,
  168. kUncompressed,
  169. kEnd
  170. };
  171. int option_config_;
  172. public:
  173. std::string dbname_;
  174. SpecialEnv* env_;
  175. DB* db_;
  176. Options last_options_;
  177. DBTest() : option_config_(kDefault),
  178. env_(new SpecialEnv(Env::Default())) {
  179. filter_policy_ = NewBloomFilterPolicy(10);
  180. dbname_ = test::TmpDir() + "/db_test";
  181. DestroyDB(dbname_, Options());
  182. db_ = NULL;
  183. Reopen();
  184. }
  185. ~DBTest() {
  186. delete db_;
  187. DestroyDB(dbname_, Options());
  188. delete env_;
  189. delete filter_policy_;
  190. }
  191. // Switch to a fresh database with the next option configuration to
  192. // test. Return false if there are no more configurations to test.
  193. bool ChangeOptions() {
  194. option_config_++;
  195. if (option_config_ >= kEnd) {
  196. return false;
  197. } else {
  198. DestroyAndReopen();
  199. return true;
  200. }
  201. }
  202. // Return the current option configuration.
  203. Options CurrentOptions() {
  204. Options options;
  205. switch (option_config_) {
  206. case kFilter:
  207. options.filter_policy = filter_policy_;
  208. break;
  209. case kUncompressed:
  210. options.compression = kNoCompression;
  211. break;
  212. default:
  213. break;
  214. }
  215. return options;
  216. }
  217. DBImpl* dbfull() {
  218. return reinterpret_cast<DBImpl*>(db_);
  219. }
  220. void Reopen(Options* options = NULL) {
  221. ASSERT_OK(TryReopen(options));
  222. }
  223. void Close() {
  224. delete db_;
  225. db_ = NULL;
  226. }
  227. void DestroyAndReopen(Options* options = NULL) {
  228. delete db_;
  229. db_ = NULL;
  230. DestroyDB(dbname_, Options());
  231. ASSERT_OK(TryReopen(options));
  232. }
  233. Status TryReopen(Options* options) {
  234. delete db_;
  235. db_ = NULL;
  236. Options opts;
  237. if (options != NULL) {
  238. opts = *options;
  239. } else {
  240. opts = CurrentOptions();
  241. opts.create_if_missing = true;
  242. }
  243. last_options_ = opts;
  244. return DB::Open(opts, dbname_, &db_);
  245. }
  246. Status Put(const std::string& k, const std::string& v) {
  247. return db_->Put(WriteOptions(), k, v);
  248. }
  249. Status Delete(const std::string& k) {
  250. return db_->Delete(WriteOptions(), k);
  251. }
  252. std::string Get(const std::string& k, const Snapshot* snapshot = NULL) {
  253. ReadOptions options;
  254. options.snapshot = snapshot;
  255. std::string result;
  256. Status s = db_->Get(options, k, &result);
  257. if (s.IsNotFound()) {
  258. result = "NOT_FOUND";
  259. } else if (!s.ok()) {
  260. result = s.ToString();
  261. }
  262. return result;
  263. }
  264. // Return a string that contains all key,value pairs in order,
  265. // formatted like "(k1->v1)(k2->v2)".
  266. std::string Contents() {
  267. std::vector<std::string> forward;
  268. std::string result;
  269. Iterator* iter = db_->NewIterator(ReadOptions());
  270. for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
  271. std::string s = IterStatus(iter);
  272. result.push_back('(');
  273. result.append(s);
  274. result.push_back(')');
  275. forward.push_back(s);
  276. }
  277. // Check reverse iteration results are the reverse of forward results
  278. int matched = 0;
  279. for (iter->SeekToLast(); iter->Valid(); iter->Prev()) {
  280. ASSERT_LT(matched, forward.size());
  281. ASSERT_EQ(IterStatus(iter), forward[forward.size() - matched - 1]);
  282. matched++;
  283. }
  284. ASSERT_EQ(matched, forward.size());
  285. delete iter;
  286. return result;
  287. }
  288. std::string AllEntriesFor(const Slice& user_key) {
  289. Iterator* iter = dbfull()->TEST_NewInternalIterator();
  290. InternalKey target(user_key, kMaxSequenceNumber, kTypeValue);
  291. iter->Seek(target.Encode());
  292. std::string result;
  293. if (!iter->status().ok()) {
  294. result = iter->status().ToString();
  295. } else {
  296. result = "[ ";
  297. bool first = true;
  298. while (iter->Valid()) {
  299. ParsedInternalKey ikey;
  300. if (!ParseInternalKey(iter->key(), &ikey)) {
  301. result += "CORRUPTED";
  302. } else {
  303. if (last_options_.comparator->Compare(ikey.user_key, user_key) != 0) {
  304. break;
  305. }
  306. if (!first) {
  307. result += ", ";
  308. }
  309. first = false;
  310. switch (ikey.type) {
  311. case kTypeValue:
  312. result += iter->value().ToString();
  313. break;
  314. case kTypeDeletion:
  315. result += "DEL";
  316. break;
  317. }
  318. }
  319. iter->Next();
  320. }
  321. if (!first) {
  322. result += " ";
  323. }
  324. result += "]";
  325. }
  326. delete iter;
  327. return result;
  328. }
  329. int NumTableFilesAtLevel(int level) {
  330. std::string property;
  331. ASSERT_TRUE(
  332. db_->GetProperty("leveldb.num-files-at-level" + NumberToString(level),
  333. &property));
  334. return atoi(property.c_str());
  335. }
  336. int TotalTableFiles() {
  337. int result = 0;
  338. for (int level = 0; level < config::kNumLevels; level++) {
  339. result += NumTableFilesAtLevel(level);
  340. }
  341. return result;
  342. }
  343. // Return spread of files per level
  344. std::string FilesPerLevel() {
  345. std::string result;
  346. int last_non_zero_offset = 0;
  347. for (int level = 0; level < config::kNumLevels; level++) {
  348. int f = NumTableFilesAtLevel(level);
  349. char buf[100];
  350. snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f);
  351. result += buf;
  352. if (f > 0) {
  353. last_non_zero_offset = result.size();
  354. }
  355. }
  356. result.resize(last_non_zero_offset);
  357. return result;
  358. }
  359. int CountFiles() {
  360. std::vector<std::string> files;
  361. env_->GetChildren(dbname_, &files);
  362. return static_cast<int>(files.size());
  363. }
  364. uint64_t Size(const Slice& start, const Slice& limit) {
  365. Range r(start, limit);
  366. uint64_t size;
  367. db_->GetApproximateSizes(&r, 1, &size);
  368. return size;
  369. }
  370. void Compact(const Slice& start, const Slice& limit) {
  371. db_->CompactRange(&start, &limit);
  372. }
  373. // Do n memtable compactions, each of which produces an sstable
  374. // covering the range [small,large].
  375. void MakeTables(int n, const std::string& small, const std::string& large) {
  376. for (int i = 0; i < n; i++) {
  377. Put(small, "begin");
  378. Put(large, "end");
  379. dbfull()->TEST_CompactMemTable();
  380. }
  381. }
  382. // Prevent pushing of new sstables into deeper levels by adding
  383. // tables that cover a specified range to all levels.
  384. void FillLevels(const std::string& smallest, const std::string& largest) {
  385. MakeTables(config::kNumLevels, smallest, largest);
  386. }
  387. void DumpFileCounts(const char* label) {
  388. fprintf(stderr, "---\n%s:\n", label);
  389. fprintf(stderr, "maxoverlap: %lld\n",
  390. static_cast<long long>(
  391. dbfull()->TEST_MaxNextLevelOverlappingBytes()));
  392. for (int level = 0; level < config::kNumLevels; level++) {
  393. int num = NumTableFilesAtLevel(level);
  394. if (num > 0) {
  395. fprintf(stderr, " level %3d : %d files\n", level, num);
  396. }
  397. }
  398. }
  399. std::string DumpSSTableList() {
  400. std::string property;
  401. db_->GetProperty("leveldb.sstables", &property);
  402. return property;
  403. }
  404. std::string IterStatus(Iterator* iter) {
  405. std::string result;
  406. if (iter->Valid()) {
  407. result = iter->key().ToString() + "->" + iter->value().ToString();
  408. } else {
  409. result = "(invalid)";
  410. }
  411. return result;
  412. }
  413. bool DeleteAnSSTFile() {
  414. std::vector<std::string> filenames;
  415. ASSERT_OK(env_->GetChildren(dbname_, &filenames));
  416. uint64_t number;
  417. FileType type;
  418. for (size_t i = 0; i < filenames.size(); i++) {
  419. if (ParseFileName(filenames[i], &number, &type) && type == kTableFile) {
  420. ASSERT_OK(env_->DeleteFile(TableFileName(dbname_, number)));
  421. return true;
  422. }
  423. }
  424. return false;
  425. }
  426. };
  427. TEST(DBTest, Empty) {
  428. do {
  429. ASSERT_TRUE(db_ != NULL);
  430. ASSERT_EQ("NOT_FOUND", Get("foo"));
  431. } while (ChangeOptions());
  432. }
  433. TEST(DBTest, ReadWrite) {
  434. do {
  435. ASSERT_OK(Put("foo", "v1"));
  436. ASSERT_EQ("v1", Get("foo"));
  437. ASSERT_OK(Put("bar", "v2"));
  438. ASSERT_OK(Put("foo", "v3"));
  439. ASSERT_EQ("v3", Get("foo"));
  440. ASSERT_EQ("v2", Get("bar"));
  441. } while (ChangeOptions());
  442. }
  443. TEST(DBTest, PutDeleteGet) {
  444. do {
  445. ASSERT_OK(db_->Put(WriteOptions(), "foo", "v1"));
  446. ASSERT_EQ("v1", Get("foo"));
  447. ASSERT_OK(db_->Put(WriteOptions(), "foo", "v2"));
  448. ASSERT_EQ("v2", Get("foo"));
  449. ASSERT_OK(db_->Delete(WriteOptions(), "foo"));
  450. ASSERT_EQ("NOT_FOUND", Get("foo"));
  451. } while (ChangeOptions());
  452. }
  453. TEST(DBTest, GetFromImmutableLayer) {
  454. do {
  455. Options options = CurrentOptions();
  456. options.env = env_;
  457. options.write_buffer_size = 100000; // Small write buffer
  458. Reopen(&options);
  459. ASSERT_OK(Put("foo", "v1"));
  460. ASSERT_EQ("v1", Get("foo"));
  461. env_->delay_sstable_sync_.Release_Store(env_); // Block sync calls
  462. Put("k1", std::string(100000, 'x')); // Fill memtable
  463. Put("k2", std::string(100000, 'y')); // Trigger compaction
  464. ASSERT_EQ("v1", Get("foo"));
  465. env_->delay_sstable_sync_.Release_Store(NULL); // Release sync calls
  466. } while (ChangeOptions());
  467. }
  468. TEST(DBTest, GetFromVersions) {
  469. do {
  470. ASSERT_OK(Put("foo", "v1"));
  471. dbfull()->TEST_CompactMemTable();
  472. ASSERT_EQ("v1", Get("foo"));
  473. } while (ChangeOptions());
  474. }
  475. TEST(DBTest, GetSnapshot) {
  476. do {
  477. // Try with both a short key and a long key
  478. for (int i = 0; i < 2; i++) {
  479. std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x');
  480. ASSERT_OK(Put(key, "v1"));
  481. const Snapshot* s1 = db_->GetSnapshot();
  482. ASSERT_OK(Put(key, "v2"));
  483. ASSERT_EQ("v2", Get(key));
  484. ASSERT_EQ("v1", Get(key, s1));
  485. dbfull()->TEST_CompactMemTable();
  486. ASSERT_EQ("v2", Get(key));
  487. ASSERT_EQ("v1", Get(key, s1));
  488. db_->ReleaseSnapshot(s1);
  489. }
  490. } while (ChangeOptions());
  491. }
  492. TEST(DBTest, GetLevel0Ordering) {
  493. do {
  494. // Check that we process level-0 files in correct order. The code
  495. // below generates two level-0 files where the earlier one comes
  496. // before the later one in the level-0 file list since the earlier
  497. // one has a smaller "smallest" key.
  498. ASSERT_OK(Put("bar", "b"));
  499. ASSERT_OK(Put("foo", "v1"));
  500. dbfull()->TEST_CompactMemTable();
  501. ASSERT_OK(Put("foo", "v2"));
  502. dbfull()->TEST_CompactMemTable();
  503. ASSERT_EQ("v2", Get("foo"));
  504. } while (ChangeOptions());
  505. }
  506. TEST(DBTest, GetOrderedByLevels) {
  507. do {
  508. ASSERT_OK(Put("foo", "v1"));
  509. Compact("a", "z");
  510. ASSERT_EQ("v1", Get("foo"));
  511. ASSERT_OK(Put("foo", "v2"));
  512. ASSERT_EQ("v2", Get("foo"));
  513. dbfull()->TEST_CompactMemTable();
  514. ASSERT_EQ("v2", Get("foo"));
  515. } while (ChangeOptions());
  516. }
  517. TEST(DBTest, GetPicksCorrectFile) {
  518. do {
  519. // Arrange to have multiple files in a non-level-0 level.
  520. ASSERT_OK(Put("a", "va"));
  521. Compact("a", "b");
  522. ASSERT_OK(Put("x", "vx"));
  523. Compact("x", "y");
  524. ASSERT_OK(Put("f", "vf"));
  525. Compact("f", "g");
  526. ASSERT_EQ("va", Get("a"));
  527. ASSERT_EQ("vf", Get("f"));
  528. ASSERT_EQ("vx", Get("x"));
  529. } while (ChangeOptions());
  530. }
  531. TEST(DBTest, GetEncountersEmptyLevel) {
  532. do {
  533. // Arrange for the following to happen:
  534. // * sstable A in level 0
  535. // * nothing in level 1
  536. // * sstable B in level 2
  537. // Then do enough Get() calls to arrange for an automatic compaction
  538. // of sstable A. A bug would cause the compaction to be marked as
  539. // occuring at level 1 (instead of the correct level 0).
  540. // Step 1: First place sstables in levels 0 and 2
  541. int compaction_count = 0;
  542. while (NumTableFilesAtLevel(0) == 0 ||
  543. NumTableFilesAtLevel(2) == 0) {
  544. ASSERT_LE(compaction_count, 100) << "could not fill levels 0 and 2";
  545. compaction_count++;
  546. Put("a", "begin");
  547. Put("z", "end");
  548. dbfull()->TEST_CompactMemTable();
  549. }
  550. // Step 2: clear level 1 if necessary.
  551. dbfull()->TEST_CompactRange(1, NULL, NULL);
  552. ASSERT_EQ(NumTableFilesAtLevel(0), 1);
  553. ASSERT_EQ(NumTableFilesAtLevel(1), 0);
  554. ASSERT_EQ(NumTableFilesAtLevel(2), 1);
  555. // Step 3: read a bunch of times
  556. for (int i = 0; i < 1000; i++) {
  557. ASSERT_EQ("NOT_FOUND", Get("missing"));
  558. }
  559. // Step 4: Wait for compaction to finish
  560. env_->SleepForMicroseconds(1000000);
  561. ASSERT_EQ(NumTableFilesAtLevel(0), 0);
  562. } while (ChangeOptions());
  563. }
  564. TEST(DBTest, IterEmpty) {
  565. Iterator* iter = db_->NewIterator(ReadOptions());
  566. iter->SeekToFirst();
  567. ASSERT_EQ(IterStatus(iter), "(invalid)");
  568. iter->SeekToLast();
  569. ASSERT_EQ(IterStatus(iter), "(invalid)");
  570. iter->Seek("foo");
  571. ASSERT_EQ(IterStatus(iter), "(invalid)");
  572. delete iter;
  573. }
  574. TEST(DBTest, IterSingle) {
  575. ASSERT_OK(Put("a", "va"));
  576. Iterator* iter = db_->NewIterator(ReadOptions());
  577. iter->SeekToFirst();
  578. ASSERT_EQ(IterStatus(iter), "a->va");
  579. iter->Next();
  580. ASSERT_EQ(IterStatus(iter), "(invalid)");
  581. iter->SeekToFirst();
  582. ASSERT_EQ(IterStatus(iter), "a->va");
  583. iter->Prev();
  584. ASSERT_EQ(IterStatus(iter), "(invalid)");
  585. iter->SeekToLast();
  586. ASSERT_EQ(IterStatus(iter), "a->va");
  587. iter->Next();
  588. ASSERT_EQ(IterStatus(iter), "(invalid)");
  589. iter->SeekToLast();
  590. ASSERT_EQ(IterStatus(iter), "a->va");
  591. iter->Prev();
  592. ASSERT_EQ(IterStatus(iter), "(invalid)");
  593. iter->Seek("");
  594. ASSERT_EQ(IterStatus(iter), "a->va");
  595. iter->Next();
  596. ASSERT_EQ(IterStatus(iter), "(invalid)");
  597. iter->Seek("a");
  598. ASSERT_EQ(IterStatus(iter), "a->va");
  599. iter->Next();
  600. ASSERT_EQ(IterStatus(iter), "(invalid)");
  601. iter->Seek("b");
  602. ASSERT_EQ(IterStatus(iter), "(invalid)");
  603. delete iter;
  604. }
  605. TEST(DBTest, IterMulti) {
  606. ASSERT_OK(Put("a", "va"));
  607. ASSERT_OK(Put("b", "vb"));
  608. ASSERT_OK(Put("c", "vc"));
  609. Iterator* iter = db_->NewIterator(ReadOptions());
  610. iter->SeekToFirst();
  611. ASSERT_EQ(IterStatus(iter), "a->va");
  612. iter->Next();
  613. ASSERT_EQ(IterStatus(iter), "b->vb");
  614. iter->Next();
  615. ASSERT_EQ(IterStatus(iter), "c->vc");
  616. iter->Next();
  617. ASSERT_EQ(IterStatus(iter), "(invalid)");
  618. iter->SeekToFirst();
  619. ASSERT_EQ(IterStatus(iter), "a->va");
  620. iter->Prev();
  621. ASSERT_EQ(IterStatus(iter), "(invalid)");
  622. iter->SeekToLast();
  623. ASSERT_EQ(IterStatus(iter), "c->vc");
  624. iter->Prev();
  625. ASSERT_EQ(IterStatus(iter), "b->vb");
  626. iter->Prev();
  627. ASSERT_EQ(IterStatus(iter), "a->va");
  628. iter->Prev();
  629. ASSERT_EQ(IterStatus(iter), "(invalid)");
  630. iter->SeekToLast();
  631. ASSERT_EQ(IterStatus(iter), "c->vc");
  632. iter->Next();
  633. ASSERT_EQ(IterStatus(iter), "(invalid)");
  634. iter->Seek("");
  635. ASSERT_EQ(IterStatus(iter), "a->va");
  636. iter->Seek("a");
  637. ASSERT_EQ(IterStatus(iter), "a->va");
  638. iter->Seek("ax");
  639. ASSERT_EQ(IterStatus(iter), "b->vb");
  640. iter->Seek("b");
  641. ASSERT_EQ(IterStatus(iter), "b->vb");
  642. iter->Seek("z");
  643. ASSERT_EQ(IterStatus(iter), "(invalid)");
  644. // Switch from reverse to forward
  645. iter->SeekToLast();
  646. iter->Prev();
  647. iter->Prev();
  648. iter->Next();
  649. ASSERT_EQ(IterStatus(iter), "b->vb");
  650. // Switch from forward to reverse
  651. iter->SeekToFirst();
  652. iter->Next();
  653. iter->Next();
  654. iter->Prev();
  655. ASSERT_EQ(IterStatus(iter), "b->vb");
  656. // Make sure iter stays at snapshot
  657. ASSERT_OK(Put("a", "va2"));
  658. ASSERT_OK(Put("a2", "va3"));
  659. ASSERT_OK(Put("b", "vb2"));
  660. ASSERT_OK(Put("c", "vc2"));
  661. ASSERT_OK(Delete("b"));
  662. iter->SeekToFirst();
  663. ASSERT_EQ(IterStatus(iter), "a->va");
  664. iter->Next();
  665. ASSERT_EQ(IterStatus(iter), "b->vb");
  666. iter->Next();
  667. ASSERT_EQ(IterStatus(iter), "c->vc");
  668. iter->Next();
  669. ASSERT_EQ(IterStatus(iter), "(invalid)");
  670. iter->SeekToLast();
  671. ASSERT_EQ(IterStatus(iter), "c->vc");
  672. iter->Prev();
  673. ASSERT_EQ(IterStatus(iter), "b->vb");
  674. iter->Prev();
  675. ASSERT_EQ(IterStatus(iter), "a->va");
  676. iter->Prev();
  677. ASSERT_EQ(IterStatus(iter), "(invalid)");
  678. delete iter;
  679. }
  680. TEST(DBTest, IterSmallAndLargeMix) {
  681. ASSERT_OK(Put("a", "va"));
  682. ASSERT_OK(Put("b", std::string(100000, 'b')));
  683. ASSERT_OK(Put("c", "vc"));
  684. ASSERT_OK(Put("d", std::string(100000, 'd')));
  685. ASSERT_OK(Put("e", std::string(100000, 'e')));
  686. Iterator* iter = db_->NewIterator(ReadOptions());
  687. iter->SeekToFirst();
  688. ASSERT_EQ(IterStatus(iter), "a->va");
  689. iter->Next();
  690. ASSERT_EQ(IterStatus(iter), "b->" + std::string(100000, 'b'));
  691. iter->Next();
  692. ASSERT_EQ(IterStatus(iter), "c->vc");
  693. iter->Next();
  694. ASSERT_EQ(IterStatus(iter), "d->" + std::string(100000, 'd'));
  695. iter->Next();
  696. ASSERT_EQ(IterStatus(iter), "e->" + std::string(100000, 'e'));
  697. iter->Next();
  698. ASSERT_EQ(IterStatus(iter), "(invalid)");
  699. iter->SeekToLast();
  700. ASSERT_EQ(IterStatus(iter), "e->" + std::string(100000, 'e'));
  701. iter->Prev();
  702. ASSERT_EQ(IterStatus(iter), "d->" + std::string(100000, 'd'));
  703. iter->Prev();
  704. ASSERT_EQ(IterStatus(iter), "c->vc");
  705. iter->Prev();
  706. ASSERT_EQ(IterStatus(iter), "b->" + std::string(100000, 'b'));
  707. iter->Prev();
  708. ASSERT_EQ(IterStatus(iter), "a->va");
  709. iter->Prev();
  710. ASSERT_EQ(IterStatus(iter), "(invalid)");
  711. delete iter;
  712. }
  713. TEST(DBTest, IterMultiWithDelete) {
  714. do {
  715. ASSERT_OK(Put("a", "va"));
  716. ASSERT_OK(Put("b", "vb"));
  717. ASSERT_OK(Put("c", "vc"));
  718. ASSERT_OK(Delete("b"));
  719. ASSERT_EQ("NOT_FOUND", Get("b"));
  720. Iterator* iter = db_->NewIterator(ReadOptions());
  721. iter->Seek("c");
  722. ASSERT_EQ(IterStatus(iter), "c->vc");
  723. iter->Prev();
  724. ASSERT_EQ(IterStatus(iter), "a->va");
  725. delete iter;
  726. } while (ChangeOptions());
  727. }
  728. TEST(DBTest, Recover) {
  729. do {
  730. ASSERT_OK(Put("foo", "v1"));
  731. ASSERT_OK(Put("baz", "v5"));
  732. Reopen();
  733. ASSERT_EQ("v1", Get("foo"));
  734. ASSERT_EQ("v1", Get("foo"));
  735. ASSERT_EQ("v5", Get("baz"));
  736. ASSERT_OK(Put("bar", "v2"));
  737. ASSERT_OK(Put("foo", "v3"));
  738. Reopen();
  739. ASSERT_EQ("v3", Get("foo"));
  740. ASSERT_OK(Put("foo", "v4"));
  741. ASSERT_EQ("v4", Get("foo"));
  742. ASSERT_EQ("v2", Get("bar"));
  743. ASSERT_EQ("v5", Get("baz"));
  744. } while (ChangeOptions());
  745. }
  746. TEST(DBTest, RecoveryWithEmptyLog) {
  747. do {
  748. ASSERT_OK(Put("foo", "v1"));
  749. ASSERT_OK(Put("foo", "v2"));
  750. Reopen();
  751. Reopen();
  752. ASSERT_OK(Put("foo", "v3"));
  753. Reopen();
  754. ASSERT_EQ("v3", Get("foo"));
  755. } while (ChangeOptions());
  756. }
  757. // Check that writes done during a memtable compaction are recovered
  758. // if the database is shutdown during the memtable compaction.
  759. TEST(DBTest, RecoverDuringMemtableCompaction) {
  760. do {
  761. Options options = CurrentOptions();
  762. options.env = env_;
  763. options.write_buffer_size = 1000000;
  764. Reopen(&options);
  765. // Trigger a long memtable compaction and reopen the database during it
  766. ASSERT_OK(Put("foo", "v1")); // Goes to 1st log file
  767. ASSERT_OK(Put("big1", std::string(10000000, 'x'))); // Fills memtable
  768. ASSERT_OK(Put("big2", std::string(1000, 'y'))); // Triggers compaction
  769. ASSERT_OK(Put("bar", "v2")); // Goes to new log file
  770. Reopen(&options);
  771. ASSERT_EQ("v1", Get("foo"));
  772. ASSERT_EQ("v2", Get("bar"));
  773. ASSERT_EQ(std::string(10000000, 'x'), Get("big1"));
  774. ASSERT_EQ(std::string(1000, 'y'), Get("big2"));
  775. } while (ChangeOptions());
  776. }
  777. static std::string Key(int i) {
  778. char buf[100];
  779. snprintf(buf, sizeof(buf), "key%06d", i);
  780. return std::string(buf);
  781. }
  782. TEST(DBTest, MinorCompactionsHappen) {
  783. Options options = CurrentOptions();
  784. options.write_buffer_size = 10000;
  785. Reopen(&options);
  786. const int N = 500;
  787. int starting_num_tables = TotalTableFiles();
  788. for (int i = 0; i < N; i++) {
  789. ASSERT_OK(Put(Key(i), Key(i) + std::string(1000, 'v')));
  790. }
  791. int ending_num_tables = TotalTableFiles();
  792. ASSERT_GT(ending_num_tables, starting_num_tables);
  793. for (int i = 0; i < N; i++) {
  794. ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(Key(i)));
  795. }
  796. Reopen();
  797. for (int i = 0; i < N; i++) {
  798. ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(Key(i)));
  799. }
  800. }
  801. TEST(DBTest, RecoverWithLargeLog) {
  802. {
  803. Options options = CurrentOptions();
  804. Reopen(&options);
  805. ASSERT_OK(Put("big1", std::string(200000, '1')));
  806. ASSERT_OK(Put("big2", std::string(200000, '2')));
  807. ASSERT_OK(Put("small3", std::string(10, '3')));
  808. ASSERT_OK(Put("small4", std::string(10, '4')));
  809. ASSERT_EQ(NumTableFilesAtLevel(0), 0);
  810. }
  811. // Make sure that if we re-open with a small write buffer size that
  812. // we flush table files in the middle of a large log file.
  813. Options options = CurrentOptions();
  814. options.write_buffer_size = 100000;
  815. Reopen(&options);
  816. ASSERT_EQ(NumTableFilesAtLevel(0), 3);
  817. ASSERT_EQ(std::string(200000, '1'), Get("big1"));
  818. ASSERT_EQ(std::string(200000, '2'), Get("big2"));
  819. ASSERT_EQ(std::string(10, '3'), Get("small3"));
  820. ASSERT_EQ(std::string(10, '4'), Get("small4"));
  821. ASSERT_GT(NumTableFilesAtLevel(0), 1);
  822. }
  823. TEST(DBTest, CompactionsGenerateMultipleFiles) {
  824. Options options = CurrentOptions();
  825. options.write_buffer_size = 100000000; // Large write buffer
  826. Reopen(&options);
  827. Random rnd(301);
  828. // Write 8MB (80 values, each 100K)
  829. ASSERT_EQ(NumTableFilesAtLevel(0), 0);
  830. std::vector<std::string> values;
  831. for (int i = 0; i < 80; i++) {
  832. values.push_back(RandomString(&rnd, 100000));
  833. ASSERT_OK(Put(Key(i), values[i]));
  834. }
  835. // Reopening moves updates to level-0
  836. Reopen(&options);
  837. dbfull()->TEST_CompactRange(0, NULL, NULL);
  838. ASSERT_EQ(NumTableFilesAtLevel(0), 0);
  839. ASSERT_GT(NumTableFilesAtLevel(1), 1);
  840. for (int i = 0; i < 80; i++) {
  841. ASSERT_EQ(Get(Key(i)), values[i]);
  842. }
  843. }
  844. TEST(DBTest, RepeatedWritesToSameKey) {
  845. Options options = CurrentOptions();
  846. options.env = env_;
  847. options.write_buffer_size = 100000; // Small write buffer
  848. Reopen(&options);
  849. // We must have at most one file per level except for level-0,
  850. // which may have up to kL0_StopWritesTrigger files.
  851. const int kMaxFiles = config::kNumLevels + config::kL0_StopWritesTrigger;
  852. Random rnd(301);
  853. std::string value = RandomString(&rnd, 2 * options.write_buffer_size);
  854. for (int i = 0; i < 5 * kMaxFiles; i++) {
  855. Put("key", value);
  856. ASSERT_LE(TotalTableFiles(), kMaxFiles);
  857. fprintf(stderr, "after %d: %d files\n", int(i+1), TotalTableFiles());
  858. }
  859. }
  860. TEST(DBTest, SparseMerge) {
  861. Options options = CurrentOptions();
  862. options.compression = kNoCompression;
  863. Reopen(&options);
  864. FillLevels("A", "Z");
  865. // Suppose there is:
  866. // small amount of data with prefix A
  867. // large amount of data with prefix B
  868. // small amount of data with prefix C
  869. // and that recent updates have made small changes to all three prefixes.
  870. // Check that we do not do a compaction that merges all of B in one shot.
  871. const std::string value(1000, 'x');
  872. Put("A", "va");
  873. // Write approximately 100MB of "B" values
  874. for (int i = 0; i < 100000; i++) {
  875. char key[100];
  876. snprintf(key, sizeof(key), "B%010d", i);
  877. Put(key, value);
  878. }
  879. Put("C", "vc");
  880. dbfull()->TEST_CompactMemTable();
  881. dbfull()->TEST_CompactRange(0, NULL, NULL);
  882. // Make sparse update
  883. Put("A", "va2");
  884. Put("B100", "bvalue2");
  885. Put("C", "vc2");
  886. dbfull()->TEST_CompactMemTable();
  887. // Compactions should not cause us to create a situation where
  888. // a file overlaps too much data at the next level.
  889. ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
  890. dbfull()->TEST_CompactRange(0, NULL, NULL);
  891. ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
  892. dbfull()->TEST_CompactRange(1, NULL, NULL);
  893. ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
  894. }
  895. static bool Between(uint64_t val, uint64_t low, uint64_t high) {
  896. bool result = (val >= low) && (val <= high);
  897. if (!result) {
  898. fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
  899. (unsigned long long)(val),
  900. (unsigned long long)(low),
  901. (unsigned long long)(high));
  902. }
  903. return result;
  904. }
  905. TEST(DBTest, ApproximateSizes) {
  906. do {
  907. Options options = CurrentOptions();
  908. options.write_buffer_size = 100000000; // Large write buffer
  909. options.compression = kNoCompression;
  910. DestroyAndReopen();
  911. ASSERT_TRUE(Between(Size("", "xyz"), 0, 0));
  912. Reopen(&options);
  913. ASSERT_TRUE(Between(Size("", "xyz"), 0, 0));
  914. // Write 8MB (80 values, each 100K)
  915. ASSERT_EQ(NumTableFilesAtLevel(0), 0);
  916. const int N = 80;
  917. static const int S1 = 100000;
  918. static const int S2 = 105000; // Allow some expansion from metadata
  919. Random rnd(301);
  920. for (int i = 0; i < N; i++) {
  921. ASSERT_OK(Put(Key(i), RandomString(&rnd, S1)));
  922. }
  923. // 0 because GetApproximateSizes() does not account for memtable space
  924. ASSERT_TRUE(Between(Size("", Key(50)), 0, 0));
  925. // Check sizes across recovery by reopening a few times
  926. for (int run = 0; run < 3; run++) {
  927. Reopen(&options);
  928. for (int compact_start = 0; compact_start < N; compact_start += 10) {
  929. for (int i = 0; i < N; i += 10) {
  930. ASSERT_TRUE(Between(Size("", Key(i)), S1*i, S2*i));
  931. ASSERT_TRUE(Between(Size("", Key(i)+".suffix"), S1*(i+1), S2*(i+1)));
  932. ASSERT_TRUE(Between(Size(Key(i), Key(i+10)), S1*10, S2*10));
  933. }
  934. ASSERT_TRUE(Between(Size("", Key(50)), S1*50, S2*50));
  935. ASSERT_TRUE(Between(Size("", Key(50)+".suffix"), S1*50, S2*50));
  936. std::string cstart_str = Key(compact_start);
  937. std::string cend_str = Key(compact_start + 9);
  938. Slice cstart = cstart_str;
  939. Slice cend = cend_str;
  940. dbfull()->TEST_CompactRange(0, &cstart, &cend);
  941. }
  942. ASSERT_EQ(NumTableFilesAtLevel(0), 0);
  943. ASSERT_GT(NumTableFilesAtLevel(1), 0);
  944. }
  945. } while (ChangeOptions());
  946. }
  947. TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
  948. do {
  949. Options options = CurrentOptions();
  950. options.compression = kNoCompression;
  951. Reopen();
  952. Random rnd(301);
  953. std::string big1 = RandomString(&rnd, 100000);
  954. ASSERT_OK(Put(Key(0), RandomString(&rnd, 10000)));
  955. ASSERT_OK(Put(Key(1), RandomString(&rnd, 10000)));
  956. ASSERT_OK(Put(Key(2), big1));
  957. ASSERT_OK(Put(Key(3), RandomString(&rnd, 10000)));
  958. ASSERT_OK(Put(Key(4), big1));
  959. ASSERT_OK(Put(Key(5), RandomString(&rnd, 10000)));
  960. ASSERT_OK(Put(Key(6), RandomString(&rnd, 300000)));
  961. ASSERT_OK(Put(Key(7), RandomString(&rnd, 10000)));
  962. // Check sizes across recovery by reopening a few times
  963. for (int run = 0; run < 3; run++) {
  964. Reopen(&options);
  965. ASSERT_TRUE(Between(Size("", Key(0)), 0, 0));
  966. ASSERT_TRUE(Between(Size("", Key(1)), 10000, 11000));
  967. ASSERT_TRUE(Between(Size("", Key(2)), 20000, 21000));
  968. ASSERT_TRUE(Between(Size("", Key(3)), 120000, 121000));
  969. ASSERT_TRUE(Between(Size("", Key(4)), 130000, 131000));
  970. ASSERT_TRUE(Between(Size("", Key(5)), 230000, 231000));
  971. ASSERT_TRUE(Between(Size("", Key(6)), 240000, 241000));
  972. ASSERT_TRUE(Between(Size("", Key(7)), 540000, 541000));
  973. ASSERT_TRUE(Between(Size("", Key(8)), 550000, 560000));
  974. ASSERT_TRUE(Between(Size(Key(3), Key(5)), 110000, 111000));
  975. dbfull()->TEST_CompactRange(0, NULL, NULL);
  976. }
  977. } while (ChangeOptions());
  978. }
  979. TEST(DBTest, IteratorPinsRef) {
  980. Put("foo", "hello");
  981. // Get iterator that will yield the current contents of the DB.
  982. Iterator* iter = db_->NewIterator(ReadOptions());
  983. // Write to force compactions
  984. Put("foo", "newvalue1");
  985. for (int i = 0; i < 100; i++) {
  986. ASSERT_OK(Put(Key(i), Key(i) + std::string(100000, 'v'))); // 100K values
  987. }
  988. Put("foo", "newvalue2");
  989. iter->SeekToFirst();
  990. ASSERT_TRUE(iter->Valid());
  991. ASSERT_EQ("foo", iter->key().ToString());
  992. ASSERT_EQ("hello", iter->value().ToString());
  993. iter->Next();
  994. ASSERT_TRUE(!iter->Valid());
  995. delete iter;
  996. }
  997. TEST(DBTest, Snapshot) {
  998. do {
  999. Put("foo", "v1");
  1000. const Snapshot* s1 = db_->GetSnapshot();
  1001. Put("foo", "v2");
  1002. const Snapshot* s2 = db_->GetSnapshot();
  1003. Put("foo", "v3");
  1004. const Snapshot* s3 = db_->GetSnapshot();
  1005. Put("foo", "v4");
  1006. ASSERT_EQ("v1", Get("foo", s1));
  1007. ASSERT_EQ("v2", Get("foo", s2));
  1008. ASSERT_EQ("v3", Get("foo", s3));
  1009. ASSERT_EQ("v4", Get("foo"));
  1010. db_->ReleaseSnapshot(s3);
  1011. ASSERT_EQ("v1", Get("foo", s1));
  1012. ASSERT_EQ("v2", Get("foo", s2));
  1013. ASSERT_EQ("v4", Get("foo"));
  1014. db_->ReleaseSnapshot(s1);
  1015. ASSERT_EQ("v2", Get("foo", s2));
  1016. ASSERT_EQ("v4", Get("foo"));
  1017. db_->ReleaseSnapshot(s2);
  1018. ASSERT_EQ("v4", Get("foo"));
  1019. } while (ChangeOptions());
  1020. }
  1021. TEST(DBTest, HiddenValuesAreRemoved) {
  1022. do {
  1023. Random rnd(301);
  1024. FillLevels("a", "z");
  1025. std::string big = RandomString(&rnd, 50000);
  1026. Put("foo", big);
  1027. Put("pastfoo", "v");
  1028. const Snapshot* snapshot = db_->GetSnapshot();
  1029. Put("foo", "tiny");
  1030. Put("pastfoo2", "v2"); // Advance sequence number one more
  1031. ASSERT_OK(dbfull()->TEST_CompactMemTable());
  1032. ASSERT_GT(NumTableFilesAtLevel(0), 0);
  1033. ASSERT_EQ(big, Get("foo", snapshot));
  1034. ASSERT_TRUE(Between(Size("", "pastfoo"), 50000, 60000));
  1035. db_->ReleaseSnapshot(snapshot);
  1036. ASSERT_EQ(AllEntriesFor("foo"), "[ tiny, " + big + " ]");
  1037. Slice x("x");
  1038. dbfull()->TEST_CompactRange(0, NULL, &x);
  1039. ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
  1040. ASSERT_EQ(NumTableFilesAtLevel(0), 0);
  1041. ASSERT_GE(NumTableFilesAtLevel(1), 1);
  1042. dbfull()->TEST_CompactRange(1, NULL, &x);
  1043. ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
  1044. ASSERT_TRUE(Between(Size("", "pastfoo"), 0, 1000));
  1045. } while (ChangeOptions());
  1046. }
  1047. TEST(DBTest, DeletionMarkers1) {
  1048. Put("foo", "v1");
  1049. ASSERT_OK(dbfull()->TEST_CompactMemTable());
  1050. const int last = config::kMaxMemCompactLevel;
  1051. ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
  1052. // Place a table at level last-1 to prevent merging with preceding mutation
  1053. Put("a", "begin");
  1054. Put("z", "end");
  1055. dbfull()->TEST_CompactMemTable();
  1056. ASSERT_EQ(NumTableFilesAtLevel(last), 1);
  1057. ASSERT_EQ(NumTableFilesAtLevel(last-1), 1);
  1058. Delete("foo");
  1059. Put("foo", "v2");
  1060. ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
  1061. ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
  1062. ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
  1063. Slice z("z");
  1064. dbfull()->TEST_CompactRange(last-2, NULL, &z);
  1065. // DEL eliminated, but v1 remains because we aren't compacting that level
  1066. // (DEL can be eliminated because v2 hides v1).
  1067. ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]");
  1068. dbfull()->TEST_CompactRange(last-1, NULL, NULL);
  1069. // Merging last-1 w/ last, so we are the base level for "foo", so
  1070. // DEL is removed. (as is v1).
  1071. ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]");
  1072. }
  1073. TEST(DBTest, DeletionMarkers2) {
  1074. Put("foo", "v1");
  1075. ASSERT_OK(dbfull()->TEST_CompactMemTable());
  1076. const int last = config::kMaxMemCompactLevel;
  1077. ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
  1078. // Place a table at level last-1 to prevent merging with preceding mutation
  1079. Put("a", "begin");
  1080. Put("z", "end");
  1081. dbfull()->TEST_CompactMemTable();
  1082. ASSERT_EQ(NumTableFilesAtLevel(last), 1);
  1083. ASSERT_EQ(NumTableFilesAtLevel(last-1), 1);
  1084. Delete("foo");
  1085. ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
  1086. ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2
  1087. ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
  1088. dbfull()->TEST_CompactRange(last-2, NULL, NULL);
  1089. // DEL kept: "last" file overlaps
  1090. ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
  1091. dbfull()->TEST_CompactRange(last-1, NULL, NULL);
  1092. // Merging last-1 w/ last, so we are the base level for "foo", so
  1093. // DEL is removed. (as is v1).
  1094. ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
  1095. }
  1096. TEST(DBTest, OverlapInLevel0) {
  1097. do {
  1098. ASSERT_EQ(config::kMaxMemCompactLevel, 2) << "Fix test to match config";
  1099. // Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0.
  1100. ASSERT_OK(Put("100", "v100"));
  1101. ASSERT_OK(Put("999", "v999"));
  1102. dbfull()->TEST_CompactMemTable();
  1103. ASSERT_OK(Delete("100"));
  1104. ASSERT_OK(Delete("999"));
  1105. dbfull()->TEST_CompactMemTable();
  1106. ASSERT_EQ("0,1,1", FilesPerLevel());
  1107. // Make files spanning the following ranges in level-0:
  1108. // files[0] 200 .. 900
  1109. // files[1] 300 .. 500
  1110. // Note that files are sorted by smallest key.
  1111. ASSERT_OK(Put("300", "v300"));
  1112. ASSERT_OK(Put("500", "v500"));
  1113. dbfull()->TEST_CompactMemTable();
  1114. ASSERT_OK(Put("200", "v200"));
  1115. ASSERT_OK(Put("600", "v600"));
  1116. ASSERT_OK(Put("900", "v900"));
  1117. dbfull()->TEST_CompactMemTable();
  1118. ASSERT_EQ("2,1,1", FilesPerLevel());
  1119. // Compact away the placeholder files we created initially
  1120. dbfull()->TEST_CompactRange(1, NULL, NULL);
  1121. dbfull()->TEST_CompactRange(2, NULL, NULL);
  1122. ASSERT_EQ("2", FilesPerLevel());
  1123. // Do a memtable compaction. Before bug-fix, the compaction would
  1124. // not detect the overlap with level-0 files and would incorrectly place
  1125. // the deletion in a deeper level.
  1126. ASSERT_OK(Delete("600"));
  1127. dbfull()->TEST_CompactMemTable();
  1128. ASSERT_EQ("3", FilesPerLevel());
  1129. ASSERT_EQ("NOT_FOUND", Get("600"));
  1130. } while (ChangeOptions());
  1131. }
  1132. TEST(DBTest, L0_CompactionBug_Issue44_a) {
  1133. Reopen();
  1134. ASSERT_OK(Put("b", "v"));
  1135. Reopen();
  1136. ASSERT_OK(Delete("b"));
  1137. ASSERT_OK(Delete("a"));
  1138. Reopen();
  1139. ASSERT_OK(Delete("a"));
  1140. Reopen();
  1141. ASSERT_OK(Put("a", "v"));
  1142. Reopen();
  1143. Reopen();
  1144. ASSERT_EQ("(a->v)", Contents());
  1145. env_->SleepForMicroseconds(1000000); // Wait for compaction to finish
  1146. ASSERT_EQ("(a->v)", Contents());
  1147. }
  1148. TEST(DBTest, L0_CompactionBug_Issue44_b) {
  1149. Reopen();
  1150. Put("","");
  1151. Reopen();
  1152. Delete("e");
  1153. Put("","");
  1154. Reopen();
  1155. Put("c", "cv");
  1156. Reopen();
  1157. Put("","");
  1158. Reopen();
  1159. Put("","");
  1160. env_->SleepForMicroseconds(1000000); // Wait for compaction to finish
  1161. Reopen();
  1162. Put("d","dv");
  1163. Reopen();
  1164. Put("","");
  1165. Reopen();
  1166. Delete("d");
  1167. Delete("b");
  1168. Reopen();
  1169. ASSERT_EQ("(->)(c->cv)", Contents());
  1170. env_->SleepForMicroseconds(1000000); // Wait for compaction to finish
  1171. ASSERT_EQ("(->)(c->cv)", Contents());
  1172. }
  1173. TEST(DBTest, ComparatorCheck) {
  1174. class NewComparator : public Comparator {
  1175. public:
  1176. virtual const char* Name() const { return "leveldb.NewComparator"; }
  1177. virtual int Compare(const Slice& a, const Slice& b) const {
  1178. return BytewiseComparator()->Compare(a, b);
  1179. }
  1180. virtual void FindShortestSeparator(std::string* s, const Slice& l) const {
  1181. BytewiseComparator()->FindShortestSeparator(s, l);
  1182. }
  1183. virtual void FindShortSuccessor(std::string* key) const {
  1184. BytewiseComparator()->FindShortSuccessor(key);
  1185. }
  1186. };
  1187. NewComparator cmp;
  1188. Options new_options = CurrentOptions();
  1189. new_options.comparator = &cmp;
  1190. Status s = TryReopen(&new_options);
  1191. ASSERT_TRUE(!s.ok());
  1192. ASSERT_TRUE(s.ToString().find("comparator") != std::string::npos)
  1193. << s.ToString();
  1194. }
  1195. TEST(DBTest, CustomComparator) {
  1196. class NumberComparator : public Comparator {
  1197. public:
  1198. virtual const char* Name() const { return "test.NumberComparator"; }
  1199. virtual int Compare(const Slice& a, const Slice& b) const {
  1200. return ToNumber(a) - ToNumber(b);
  1201. }
  1202. virtual void FindShortestSeparator(std::string* s, const Slice& l) const {
  1203. ToNumber(*s); // Check format
  1204. ToNumber(l); // Check format
  1205. }
  1206. virtual void FindShortSuccessor(std::string* key) const {
  1207. ToNumber(*key); // Check format
  1208. }
  1209. private:
  1210. static int ToNumber(const Slice& x) {
  1211. // Check that there are no extra characters.
  1212. ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size()-1] == ']')
  1213. << EscapeString(x);
  1214. int val;
  1215. char ignored;
  1216. ASSERT_TRUE(sscanf(x.ToString().c_str(), "[%i]%c", &val, &ignored) == 1)
  1217. << EscapeString(x);
  1218. return val;
  1219. }
  1220. };
  1221. NumberComparator cmp;
  1222. Options new_options = CurrentOptions();
  1223. new_options.create_if_missing = true;
  1224. new_options.comparator = &cmp;
  1225. new_options.filter_policy = NULL; // Cannot use bloom filters
  1226. new_options.write_buffer_size = 1000; // Compact more often
  1227. DestroyAndReopen(&new_options);
  1228. ASSERT_OK(Put("[10]", "ten"));
  1229. ASSERT_OK(Put("[0x14]", "twenty"));
  1230. for (int i = 0; i < 2; i++) {
  1231. ASSERT_EQ("ten", Get("[10]"));
  1232. ASSERT_EQ("ten", Get("[0xa]"));
  1233. ASSERT_EQ("twenty", Get("[20]"));
  1234. ASSERT_EQ("twenty", Get("[0x14]"));
  1235. ASSERT_EQ("NOT_FOUND", Get("[15]"));
  1236. ASSERT_EQ("NOT_FOUND", Get("[0xf]"));
  1237. Compact("[0]", "[9999]");
  1238. }
  1239. for (int run = 0; run < 2; run++) {
  1240. for (int i = 0; i < 1000; i++) {
  1241. char buf[100];
  1242. snprintf(buf, sizeof(buf), "[%d]", i*10);
  1243. ASSERT_OK(Put(buf, buf));
  1244. }
  1245. Compact("[0]", "[1000000]");
  1246. }
  1247. }
  1248. TEST(DBTest, ManualCompaction) {
  1249. ASSERT_EQ(config::kMaxMemCompactLevel, 2)
  1250. << "Need to update this test to match kMaxMemCompactLevel";
  1251. MakeTables(3, "p", "q");
  1252. ASSERT_EQ("1,1,1", FilesPerLevel());
  1253. // Compaction range falls before files
  1254. Compact("", "c");
  1255. ASSERT_EQ("1,1,1", FilesPerLevel());
  1256. // Compaction range falls after files
  1257. Compact("r", "z");
  1258. ASSERT_EQ("1,1,1", FilesPerLevel());
  1259. // Compaction range overlaps files
  1260. Compact("p1", "p9");
  1261. ASSERT_EQ("0,0,1", FilesPerLevel());
  1262. // Populate a different range
  1263. MakeTables(3, "c", "e");
  1264. ASSERT_EQ("1,1,2", FilesPerLevel());
  1265. // Compact just the new range
  1266. Compact("b", "f");
  1267. ASSERT_EQ("0,0,2", FilesPerLevel());
  1268. // Compact all
  1269. MakeTables(1, "a", "z");
  1270. ASSERT_EQ("0,1,2", FilesPerLevel());
  1271. db_->CompactRange(NULL, NULL);
  1272. ASSERT_EQ("0,0,1", FilesPerLevel());
  1273. }
  1274. TEST(DBTest, DBOpen_Options) {
  1275. std::string dbname = test::TmpDir() + "/db_options_test";
  1276. DestroyDB(dbname, Options());
  1277. // Does not exist, and create_if_missing == false: error
  1278. DB* db = NULL;
  1279. Options opts;
  1280. opts.create_if_missing = false;
  1281. Status s = DB::Open(opts, dbname, &db);
  1282. ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != NULL);
  1283. ASSERT_TRUE(db == NULL);
  1284. // Does not exist, and create_if_missing == true: OK
  1285. opts.create_if_missing = true;
  1286. s = DB::Open(opts, dbname, &db);
  1287. ASSERT_OK(s);
  1288. ASSERT_TRUE(db != NULL);
  1289. delete db;
  1290. db = NULL;
  1291. // Does exist, and error_if_exists == true: error
  1292. opts.create_if_missing = false;
  1293. opts.error_if_exists = true;
  1294. s = DB::Open(opts, dbname, &db);
  1295. ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != NULL);
  1296. ASSERT_TRUE(db == NULL);
  1297. // Does exist, and error_if_exists == false: OK
  1298. opts.create_if_missing = true;
  1299. opts.error_if_exists = false;
  1300. s = DB::Open(opts, dbname, &db);
  1301. ASSERT_OK(s);
  1302. ASSERT_TRUE(db != NULL);
  1303. delete db;
  1304. db = NULL;
  1305. }
  1306. TEST(DBTest, Locking) {
  1307. DB* db2 = NULL;
  1308. Status s = DB::Open(CurrentOptions(), dbname_, &db2);
  1309. ASSERT_TRUE(!s.ok()) << "Locking did not prevent re-opening db";
  1310. }
  1311. // Check that number of files does not grow when we are out of space
  1312. TEST(DBTest, NoSpace) {
  1313. Options options = CurrentOptions();
  1314. options.env = env_;
  1315. Reopen(&options);
  1316. ASSERT_OK(Put("foo", "v1"));
  1317. ASSERT_EQ("v1", Get("foo"));
  1318. Compact("a", "z");
  1319. const int num_files = CountFiles();
  1320. env_->no_space_.Release_Store(env_); // Force out-of-space errors
  1321. env_->sleep_counter_.Reset();
  1322. for (int i = 0; i < 5; i++) {
  1323. for (int level = 0; level < config::kNumLevels-1; level++) {
  1324. dbfull()->TEST_CompactRange(level, NULL, NULL);
  1325. }
  1326. }
  1327. env_->no_space_.Release_Store(NULL);
  1328. ASSERT_LT(CountFiles(), num_files + 3);
  1329. // Check that compaction attempts slept after errors
  1330. ASSERT_GE(env_->sleep_counter_.Read(), 5);
  1331. }
  1332. TEST(DBTest, NonWritableFileSystem) {
  1333. Options options = CurrentOptions();
  1334. options.write_buffer_size = 1000;
  1335. options.env = env_;
  1336. Reopen(&options);
  1337. ASSERT_OK(Put("foo", "v1"));
  1338. env_->non_writable_.Release_Store(env_); // Force errors for new files
  1339. std::string big(100000, 'x');
  1340. int errors = 0;
  1341. for (int i = 0; i < 20; i++) {
  1342. fprintf(stderr, "iter %d; errors %d\n", i, errors);
  1343. if (!Put("foo", big).ok()) {
  1344. errors++;
  1345. env_->SleepForMicroseconds(100000);
  1346. }
  1347. }
  1348. ASSERT_GT(errors, 0);
  1349. env_->non_writable_.Release_Store(NULL);
  1350. }
  1351. TEST(DBTest, ManifestWriteError) {
  1352. // Test for the following problem:
  1353. // (a) Compaction produces file F
  1354. // (b) Log record containing F is written to MANIFEST file, but Sync() fails
  1355. // (c) GC deletes F
  1356. // (d) After reopening DB, reads fail since deleted F is named in log record
  1357. // We iterate twice. In the second iteration, everything is the
  1358. // same except the log record never makes it to the MANIFEST file.
  1359. for (int iter = 0; iter < 2; iter++) {
  1360. port::AtomicPointer* error_type = (iter == 0)
  1361. ? &env_->manifest_sync_error_
  1362. : &env_->manifest_write_error_;
  1363. // Insert foo=>bar mapping
  1364. Options options = CurrentOptions();
  1365. options.env = env_;
  1366. options.create_if_missing = true;
  1367. options.error_if_exists = false;
  1368. DestroyAndReopen(&options);
  1369. ASSERT_OK(Put("foo", "bar"));
  1370. ASSERT_EQ("bar", Get("foo"));
  1371. // Memtable compaction (will succeed)
  1372. dbfull()->TEST_CompactMemTable();
  1373. ASSERT_EQ("bar", Get("foo"));
  1374. const int last = config::kMaxMemCompactLevel;
  1375. ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo=>bar is now in last level
  1376. // Merging compaction (will fail)
  1377. error_type->Release_Store(env_);
  1378. dbfull()->TEST_CompactRange(last, NULL, NULL); // Should fail
  1379. ASSERT_EQ("bar", Get("foo"));
  1380. // Recovery: should not lose data
  1381. error_type->Release_Store(NULL);
  1382. Reopen(&options);
  1383. ASSERT_EQ("bar", Get("foo"));
  1384. }
  1385. }
  1386. TEST(DBTest, MissingSSTFile) {
  1387. ASSERT_OK(Put("foo", "bar"));
  1388. ASSERT_EQ("bar", Get("foo"));
  1389. // Dump the memtable to disk.
  1390. dbfull()->TEST_CompactMemTable();
  1391. ASSERT_EQ("bar", Get("foo"));
  1392. ASSERT_TRUE(DeleteAnSSTFile());
  1393. Options options = CurrentOptions();
  1394. options.paranoid_checks = true;
  1395. Status s = TryReopen(&options);
  1396. ASSERT_TRUE(!s.ok());
  1397. ASSERT_TRUE(s.ToString().find("issing") != std::string::npos)
  1398. << s.ToString();
  1399. }
  1400. TEST(DBTest, FilesDeletedAfterCompaction) {
  1401. ASSERT_OK(Put("foo", "v2"));
  1402. Compact("a", "z");
  1403. const int num_files = CountFiles();
  1404. for (int i = 0; i < 10; i++) {
  1405. ASSERT_OK(Put("foo", "v2"));
  1406. Compact("a", "z");
  1407. }
  1408. ASSERT_EQ(CountFiles(), num_files);
  1409. }
  1410. TEST(DBTest, BloomFilter) {
  1411. env_->count_random_reads_ = true;
  1412. Options options = CurrentOptions();
  1413. options.env = env_;
  1414. options.block_cache = NewLRUCache(0); // Prevent cache hits
  1415. options.filter_policy = NewBloomFilterPolicy(10);
  1416. Reopen(&options);
  1417. // Populate multiple layers
  1418. const int N = 10000;
  1419. for (int i = 0; i < N; i++) {
  1420. ASSERT_OK(Put(Key(i), Key(i)));
  1421. }
  1422. Compact("a", "z");
  1423. for (int i = 0; i < N; i += 100) {
  1424. ASSERT_OK(Put(Key(i), Key(i)));
  1425. }
  1426. dbfull()->TEST_CompactMemTable();
  1427. // Prevent auto compactions triggered by seeks
  1428. env_->delay_sstable_sync_.Release_Store(env_);
  1429. // Lookup present keys. Should rarely read from small sstable.
  1430. env_->random_read_counter_.Reset();
  1431. for (int i = 0; i < N; i++) {
  1432. ASSERT_EQ(Key(i), Get(Key(i)));
  1433. }
  1434. int reads = env_->random_read_counter_.Read();
  1435. fprintf(stderr, "%d present => %d reads\n", N, reads);
  1436. ASSERT_GE(reads, N);
  1437. ASSERT_LE(reads, N + 2*N/100);
  1438. // Lookup present keys. Should rarely read from either sstable.
  1439. env_->random_read_counter_.Reset();
  1440. for (int i = 0; i < N; i++) {
  1441. ASSERT_EQ("NOT_FOUND", Get(Key(i) + ".missing"));
  1442. }
  1443. reads = env_->random_read_counter_.Read();
  1444. fprintf(stderr, "%d missing => %d reads\n", N, reads);
  1445. ASSERT_LE(reads, 3*N/100);
  1446. env_->delay_sstable_sync_.Release_Store(NULL);
  1447. Close();
  1448. delete options.block_cache;
  1449. delete options.filter_policy;
  1450. }
  1451. // Multi-threaded test:
  1452. namespace {
  1453. static const int kNumThreads = 4;
  1454. static const int kTestSeconds = 10;
  1455. static const int kNumKeys = 1000;
  1456. struct MTState {
  1457. DBTest* test;
  1458. port::AtomicPointer stop;
  1459. port::AtomicPointer counter[kNumThreads];
  1460. port::AtomicPointer thread_done[kNumThreads];
  1461. };
  1462. struct MTThread {
  1463. MTState* state;
  1464. int id;
  1465. };
  1466. static void MTThreadBody(void* arg) {
  1467. MTThread* t = reinterpret_cast<MTThread*>(arg);
  1468. int id = t->id;
  1469. DB* db = t->state->test->db_;
  1470. uintptr_t counter = 0;
  1471. fprintf(stderr, "... starting thread %d\n", id);
  1472. Random rnd(1000 + id);
  1473. std::string value;
  1474. char valbuf[1500];
  1475. while (t->state->stop.Acquire_Load() == NULL) {
  1476. t->state->counter[id].Release_Store(reinterpret_cast<void*>(counter));
  1477. int key = rnd.Uniform(kNumKeys);
  1478. char keybuf[20];
  1479. snprintf(keybuf, sizeof(keybuf), "%016d", key);
  1480. if (rnd.OneIn(2)) {
  1481. // Write values of the form <key, my id, counter>.
  1482. // We add some padding for force compactions.
  1483. snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d",
  1484. key, id, static_cast<int>(counter));
  1485. ASSERT_OK(db->Put(WriteOptions(), Slice(keybuf), Slice(valbuf)));
  1486. } else {
  1487. // Read a value and verify that it matches the pattern written above.
  1488. Status s = db->Get(ReadOptions(), Slice(keybuf), &value);
  1489. if (s.IsNotFound()) {
  1490. // Key has not yet been written
  1491. } else {
  1492. // Check that the writer thread counter is >= the counter in the value
  1493. ASSERT_OK(s);
  1494. int k, w, c;
  1495. ASSERT_EQ(3, sscanf(value.c_str(), "%d.%d.%d", &k, &w, &c)) << value;
  1496. ASSERT_EQ(k, key);
  1497. ASSERT_GE(w, 0);
  1498. ASSERT_LT(w, kNumThreads);
  1499. ASSERT_LE(c, reinterpret_cast<uintptr_t>(
  1500. t->state->counter[w].Acquire_Load()));
  1501. }
  1502. }
  1503. counter++;
  1504. }
  1505. t->state->thread_done[id].Release_Store(t);
  1506. fprintf(stderr, "... stopping thread %d after %d ops\n", id, int(counter));
  1507. }
  1508. } // namespace
  1509. TEST(DBTest, MultiThreaded) {
  1510. do {
  1511. // Initialize state
  1512. MTState mt;
  1513. mt.test = this;
  1514. mt.stop.Release_Store(0);
  1515. for (int id = 0; id < kNumThreads; id++) {
  1516. mt.counter[id].Release_Store(0);
  1517. mt.thread_done[id].Release_Store(0);
  1518. }
  1519. // Start threads
  1520. MTThread thread[kNumThreads];
  1521. for (int id = 0; id < kNumThreads; id++) {
  1522. thread[id].state = &mt;
  1523. thread[id].id = id;
  1524. env_->StartThread(MTThreadBody, &thread[id]);
  1525. }
  1526. // Let them run for a while
  1527. env_->SleepForMicroseconds(kTestSeconds * 1000000);
  1528. // Stop the threads and wait for them to finish
  1529. mt.stop.Release_Store(&mt);
  1530. for (int id = 0; id < kNumThreads; id++) {
  1531. while (mt.thread_done[id].Acquire_Load() == NULL) {
  1532. env_->SleepForMicroseconds(100000);
  1533. }
  1534. }
  1535. } while (ChangeOptions());
  1536. }
  1537. namespace {
  1538. typedef std::map<std::string, std::string> KVMap;
  1539. }
  1540. class ModelDB: public DB {
  1541. public:
  1542. class ModelSnapshot : public Snapshot {
  1543. public:
  1544. KVMap map_;
  1545. };
  1546. explicit ModelDB(const Options& options): options_(options) { }
  1547. ~ModelDB() { }
  1548. virtual Status Put(const WriteOptions& o, const Slice& k, const Slice& v) {
  1549. return DB::Put(o, k, v);
  1550. }
  1551. virtual Status Delete(const WriteOptions& o, const Slice& key) {
  1552. return DB::Delete(o, key);
  1553. }
  1554. virtual Status Get(const ReadOptions& options,
  1555. const Slice& key, std::string* value) {
  1556. assert(false); // Not implemented
  1557. return Status::NotFound(key);
  1558. }
  1559. virtual Iterator* NewIterator(const ReadOptions& options) {
  1560. if (options.snapshot == NULL) {
  1561. KVMap* saved = new KVMap;
  1562. *saved = map_;
  1563. return new ModelIter(saved, true);
  1564. } else {
  1565. const KVMap* snapshot_state =
  1566. &(reinterpret_cast<const ModelSnapshot*>(options.snapshot)->map_);
  1567. return new ModelIter(snapshot_state, false);
  1568. }
  1569. }
  1570. virtual const Snapshot* GetSnapshot() {
  1571. ModelSnapshot* snapshot = new ModelSnapshot;
  1572. snapshot->map_ = map_;
  1573. return snapshot;
  1574. }
  1575. virtual void ReleaseSnapshot(const Snapshot* snapshot) {
  1576. delete reinterpret_cast<const ModelSnapshot*>(snapshot);
  1577. }
  1578. virtual Status Write(const WriteOptions& options, WriteBatch* batch) {
  1579. class Handler : public WriteBatch::Handler {
  1580. public:
  1581. KVMap* map_;
  1582. virtual void Put(const Slice& key, const Slice& value) {
  1583. (*map_)[key.ToString()] = value.ToString();
  1584. }
  1585. virtual void Delete(const Slice& key) {
  1586. map_->erase(key.ToString());
  1587. }
  1588. };
  1589. Handler handler;
  1590. handler.map_ = &map_;
  1591. return batch->Iterate(&handler);
  1592. }
  1593. virtual bool GetProperty(const Slice& property, std::string* value) {
  1594. return false;
  1595. }
  1596. virtual void GetApproximateSizes(const Range* r, int n, uint64_t* sizes) {
  1597. for (int i = 0; i < n; i++) {
  1598. sizes[i] = 0;
  1599. }
  1600. }
  1601. virtual void CompactRange(const Slice* start, const Slice* end) {
  1602. }
  1603. private:
  1604. class ModelIter: public Iterator {
  1605. public:
  1606. ModelIter(const KVMap* map, bool owned)
  1607. : map_(map), owned_(owned), iter_(map_->end()) {
  1608. }
  1609. ~ModelIter() {
  1610. if (owned_) delete map_;
  1611. }
  1612. virtual bool Valid() const { return iter_ != map_->end(); }
  1613. virtual void SeekToFirst() { iter_ = map_->begin(); }
  1614. virtual void SeekToLast() {
  1615. if (map_->empty()) {
  1616. iter_ = map_->end();
  1617. } else {
  1618. iter_ = map_->find(map_->rbegin()->first);
  1619. }
  1620. }
  1621. virtual void Seek(const Slice& k) {
  1622. iter_ = map_->lower_bound(k.ToString());
  1623. }
  1624. virtual void Next() { ++iter_; }
  1625. virtual void Prev() { --iter_; }
  1626. virtual Slice key() const { return iter_->first; }
  1627. virtual Slice value() const { return iter_->second; }
  1628. virtual Status status() const { return Status::OK(); }
  1629. private:
  1630. const KVMap* const map_;
  1631. const bool owned_; // Do we own map_
  1632. KVMap::const_iterator iter_;
  1633. };
  1634. const Options options_;
  1635. KVMap map_;
  1636. };
  1637. static std::string RandomKey(Random* rnd) {
  1638. int len = (rnd->OneIn(3)
  1639. ? 1 // Short sometimes to encourage collisions
  1640. : (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
  1641. return test::RandomKey(rnd, len);
  1642. }
  1643. static bool CompareIterators(int step,
  1644. DB* model,
  1645. DB* db,
  1646. const Snapshot* model_snap,
  1647. const Snapshot* db_snap) {
  1648. ReadOptions options;
  1649. options.snapshot = model_snap;
  1650. Iterator* miter = model->NewIterator(options);
  1651. options.snapshot = db_snap;
  1652. Iterator* dbiter = db->NewIterator(options);
  1653. bool ok = true;
  1654. int count = 0;
  1655. for (miter->SeekToFirst(), dbiter->SeekToFirst();
  1656. ok && miter->Valid() && dbiter->Valid();
  1657. miter->Next(), dbiter->Next()) {
  1658. count++;
  1659. if (miter->key().compare(dbiter->key()) != 0) {
  1660. fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n",
  1661. step,
  1662. EscapeString(miter->key()).c_str(),
  1663. EscapeString(dbiter->key()).c_str());
  1664. ok = false;
  1665. break;
  1666. }
  1667. if (miter->value().compare(dbiter->value()) != 0) {
  1668. fprintf(stderr, "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n",
  1669. step,
  1670. EscapeString(miter->key()).c_str(),
  1671. EscapeString(miter->value()).c_str(),
  1672. EscapeString(miter->value()).c_str());
  1673. ok = false;
  1674. }
  1675. }
  1676. if (ok) {
  1677. if (miter->Valid() != dbiter->Valid()) {
  1678. fprintf(stderr, "step %d: Mismatch at end of iterators: %d vs. %d\n",
  1679. step, miter->Valid(), dbiter->Valid());
  1680. ok = false;
  1681. }
  1682. }
  1683. fprintf(stderr, "%d entries compared: ok=%d\n", count, ok);
  1684. delete miter;
  1685. delete dbiter;
  1686. return ok;
  1687. }
  1688. TEST(DBTest, Randomized) {
  1689. Random rnd(test::RandomSeed());
  1690. do {
  1691. ModelDB model(CurrentOptions());
  1692. const int N = 10000;
  1693. const Snapshot* model_snap = NULL;
  1694. const Snapshot* db_snap = NULL;
  1695. std::string k, v;
  1696. for (int step = 0; step < N; step++) {
  1697. if (step % 100 == 0) {
  1698. fprintf(stderr, "Step %d of %d\n", step, N);
  1699. }
  1700. // TODO(sanjay): Test Get() works
  1701. int p = rnd.Uniform(100);
  1702. if (p < 45) { // Put
  1703. k = RandomKey(&rnd);
  1704. v = RandomString(&rnd,
  1705. rnd.OneIn(20)
  1706. ? 100 + rnd.Uniform(100)
  1707. : rnd.Uniform(8));
  1708. ASSERT_OK(model.Put(WriteOptions(), k, v));
  1709. ASSERT_OK(db_->Put(WriteOptions(), k, v));
  1710. } else if (p < 90) { // Delete
  1711. k = RandomKey(&rnd);
  1712. ASSERT_OK(model.Delete(WriteOptions(), k));
  1713. ASSERT_OK(db_->Delete(WriteOptions(), k));
  1714. } else { // Multi-element batch
  1715. WriteBatch b;
  1716. const int num = rnd.Uniform(8);
  1717. for (int i = 0; i < num; i++) {
  1718. if (i == 0 || !rnd.OneIn(10)) {
  1719. k = RandomKey(&rnd);
  1720. } else {
  1721. // Periodically re-use the same key from the previous iter, so
  1722. // we have multiple entries in the write batch for the same key
  1723. }
  1724. if (rnd.OneIn(2)) {
  1725. v = RandomString(&rnd, rnd.Uniform(10));
  1726. b.Put(k, v);
  1727. } else {
  1728. b.Delete(k);
  1729. }
  1730. }
  1731. ASSERT_OK(model.Write(WriteOptions(), &b));
  1732. ASSERT_OK(db_->Write(WriteOptions(), &b));
  1733. }
  1734. if ((step % 100) == 0) {
  1735. ASSERT_TRUE(CompareIterators(step, &model, db_, NULL, NULL));
  1736. ASSERT_TRUE(CompareIterators(step, &model, db_, model_snap, db_snap));
  1737. // Save a snapshot from each DB this time that we'll use next
  1738. // time we compare things, to make sure the current state is
  1739. // preserved with the snapshot
  1740. if (model_snap != NULL) model.ReleaseSnapshot(model_snap);
  1741. if (db_snap != NULL) db_->ReleaseSnapshot(db_snap);
  1742. Reopen();
  1743. ASSERT_TRUE(CompareIterators(step, &model, db_, NULL, NULL));
  1744. model_snap = model.GetSnapshot();
  1745. db_snap = db_->GetSnapshot();
  1746. }
  1747. }
  1748. if (model_snap != NULL) model.ReleaseSnapshot(model_snap);
  1749. if (db_snap != NULL) db_->ReleaseSnapshot(db_snap);
  1750. } while (ChangeOptions());
  1751. }
  1752. std::string MakeKey(unsigned int num) {
  1753. char buf[30];
  1754. snprintf(buf, sizeof(buf), "%016u", num);
  1755. return std::string(buf);
  1756. }
  1757. void BM_LogAndApply(int iters, int num_base_files) {
  1758. std::string dbname = test::TmpDir() + "/leveldb_test_benchmark";
  1759. DestroyDB(dbname, Options());
  1760. DB* db = NULL;
  1761. Options opts;
  1762. opts.create_if_missing = true;
  1763. Status s = DB::Open(opts, dbname, &db);
  1764. ASSERT_OK(s);
  1765. ASSERT_TRUE(db != NULL);
  1766. delete db;
  1767. db = NULL;
  1768. Env* env = Env::Default();
  1769. port::Mutex mu;
  1770. MutexLock l(&mu);
  1771. InternalKeyComparator cmp(BytewiseComparator());
  1772. Options options;
  1773. VersionSet vset(dbname, &options, NULL, &cmp);
  1774. ASSERT_OK(vset.Recover());
  1775. VersionEdit vbase;
  1776. uint64_t fnum = 1;
  1777. for (int i = 0; i < num_base_files; i++) {
  1778. InternalKey start(MakeKey(2*fnum), 1, kTypeValue);
  1779. InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion);
  1780. vbase.AddFile(2, fnum++, 1 /* file size */, start, limit);
  1781. }
  1782. ASSERT_OK(vset.LogAndApply(&vbase, &mu));
  1783. uint64_t start_micros = env->NowMicros();
  1784. for (int i = 0; i < iters; i++) {
  1785. VersionEdit vedit;
  1786. vedit.DeleteFile(2, fnum);
  1787. InternalKey start(MakeKey(2*fnum), 1, kTypeValue);
  1788. InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion);
  1789. vedit.AddFile(2, fnum++, 1 /* file size */, start, limit);
  1790. vset.LogAndApply(&vedit, &mu);
  1791. }
  1792. uint64_t stop_micros = env->NowMicros();
  1793. unsigned int us = stop_micros - start_micros;
  1794. char buf[16];
  1795. snprintf(buf, sizeof(buf), "%d", num_base_files);
  1796. fprintf(stderr,
  1797. "BM_LogAndApply/%-6s %8d iters : %9u us (%7.0f us / iter)\n",
  1798. buf, iters, us, ((float)us) / iters);
  1799. }
  1800. } // namespace leveldb
  1801. int main(int argc, char** argv) {
  1802. if (argc > 1 && std::string(argv[1]) == "--benchmark") {
  1803. leveldb::BM_LogAndApply(1000, 1);
  1804. leveldb::BM_LogAndApply(1000, 100);
  1805. leveldb::BM_LogAndApply(1000, 10000);
  1806. leveldb::BM_LogAndApply(100, 100000);
  1807. return 0;
  1808. }
  1809. return leveldb::test::RunAllTests();
  1810. }