提供基本的ttl测试用例
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

569 lines
15 KiB

Release 1.18 Changes are: * Update version number to 1.18 * Replace the basic fprintf call with a call to fwrite in order to work around the apparent compiler optimization/rewrite failure that we are seeing with the new toolchain/iOS SDKs provided with Xcode6 and iOS8. * Fix ALL the header guards. * Createed a README.md with the LevelDB project description. * A new CONTRIBUTING file. * Don't implicitly convert uint64_t to size_t or int. Either preserve it as uint64_t, or explicitly cast. This fixes MSVC warnings about possible value truncation when compiling this code in Chromium. * Added a DumpFile() library function that encapsulates the guts of the "leveldbutil dump" command. This will allow clients to dump data to their log files instead of stdout. It will also allow clients to supply their own environment. * leveldb: Remove unused function 'ConsumeChar'. * leveldbutil: Remove unused member variables from WriteBatchItemPrinter. * OpenBSD, NetBSD and DragonflyBSD have _LITTLE_ENDIAN, so define PLATFORM_IS_LITTLE_ENDIAN like on FreeBSD. This fixes: * issue #143 * issue #198 * issue #249 * Switch from <cstdatomic> to <atomic>. The former never made it into the standard and doesn't exist in modern gcc versions at all. The later contains everything that leveldb was using from the former. This problem was noticed when porting to Portable Native Client where no memory barrier is defined. The fact that <cstdatomic> is missing normally goes unnoticed since memory barriers are defined for most architectures. * Make Hash() treat its input as unsigned. Before this change LevelDB files from platforms with different signedness of char were not compatible. This change fixes: issue #243 * Verify checksums of index/meta/filter blocks when paranoid_checks set. * Invoke all tools for iOS with xcrun. (This was causing problems with the new XCode 5.1.1 image on pulse.) * include <sys/stat.h> only once, and fix the following linter warning: "Found C system header after C++ system header" * When encountering a corrupted table file, return Status::Corruption instead of Status::InvalidArgument. * Support cygwin as build platform, patch is from https://code.google.com/p/leveldb/issues/detail?id=188 * Fix typo, merge patch from https://code.google.com/p/leveldb/issues/detail?id=159 * Fix typos and comments, and address the following two issues: * issue #166 * issue #241 * Add missing db synchronize after "fillseq" in the benchmark. * Removed unused variable in SeekRandom: value (issue #201)
10 years ago
  1. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  4. #include "db/log_reader.h"
  5. #include "db/log_writer.h"
  6. #include "leveldb/env.h"
  7. #include "util/coding.h"
  8. #include "util/crc32c.h"
  9. #include "util/random.h"
  10. #include "util/testharness.h"
  11. namespace leveldb {
  12. namespace log {
  13. // Construct a string of the specified length made out of the supplied
  14. // partial string.
  15. static std::string BigString(const std::string& partial_string, size_t n) {
  16. std::string result;
  17. while (result.size() < n) {
  18. result.append(partial_string);
  19. }
  20. result.resize(n);
  21. return result;
  22. }
  23. // Construct a string from a number
  24. static std::string NumberString(int n) {
  25. char buf[50];
  26. snprintf(buf, sizeof(buf), "%d.", n);
  27. return std::string(buf);
  28. }
  29. // Return a skewed potentially long string
  30. static std::string RandomSkewedString(int i, Random* rnd) {
  31. return BigString(NumberString(i), rnd->Skewed(17));
  32. }
  33. class LogTest {
  34. private:
  35. class StringDest : public WritableFile {
  36. public:
  37. std::string contents_;
  38. virtual Status Close() { return Status::OK(); }
  39. virtual Status Flush() { return Status::OK(); }
  40. virtual Status Sync() { return Status::OK(); }
  41. virtual Status Append(const Slice& slice) {
  42. contents_.append(slice.data(), slice.size());
  43. return Status::OK();
  44. }
  45. };
  46. class StringSource : public SequentialFile {
  47. public:
  48. Slice contents_;
  49. bool force_error_;
  50. bool returned_partial_;
  51. StringSource() : force_error_(false), returned_partial_(false) { }
  52. virtual Status Read(size_t n, Slice* result, char* scratch) {
  53. ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error";
  54. if (force_error_) {
  55. force_error_ = false;
  56. returned_partial_ = true;
  57. return Status::Corruption("read error");
  58. }
  59. if (contents_.size() < n) {
  60. n = contents_.size();
  61. returned_partial_ = true;
  62. }
  63. *result = Slice(contents_.data(), n);
  64. contents_.remove_prefix(n);
  65. return Status::OK();
  66. }
  67. virtual Status Skip(uint64_t n) {
  68. if (n > contents_.size()) {
  69. contents_.clear();
  70. return Status::NotFound("in-memory file skipped past end");
  71. }
  72. contents_.remove_prefix(n);
  73. return Status::OK();
  74. }
  75. };
  76. class ReportCollector : public Reader::Reporter {
  77. public:
  78. size_t dropped_bytes_;
  79. std::string message_;
  80. ReportCollector() : dropped_bytes_(0) { }
  81. virtual void Corruption(size_t bytes, const Status& status) {
  82. dropped_bytes_ += bytes;
  83. message_.append(status.ToString());
  84. }
  85. };
  86. StringDest dest_;
  87. StringSource source_;
  88. ReportCollector report_;
  89. bool reading_;
  90. Writer* writer_;
  91. Reader* reader_;
  92. // Record metadata for testing initial offset functionality
  93. static size_t initial_offset_record_sizes_[];
  94. static uint64_t initial_offset_last_record_offsets_[];
  95. public:
  96. LogTest() : reading_(false),
  97. writer_(new Writer(&dest_)),
  98. reader_(new Reader(&source_, &report_, true/*checksum*/,
  99. 0/*initial_offset*/)) {
  100. }
  101. ~LogTest() {
  102. delete writer_;
  103. delete reader_;
  104. }
  105. void ReopenForAppend() {
  106. delete writer_;
  107. writer_ = new Writer(&dest_, dest_.contents_.size());
  108. }
  109. void Write(const std::string& msg) {
  110. ASSERT_TRUE(!reading_) << "Write() after starting to read";
  111. writer_->AddRecord(Slice(msg));
  112. }
  113. size_t WrittenBytes() const {
  114. return dest_.contents_.size();
  115. }
  116. std::string Read() {
  117. if (!reading_) {
  118. reading_ = true;
  119. source_.contents_ = Slice(dest_.contents_);
  120. }
  121. std::string scratch;
  122. Slice record;
  123. if (reader_->ReadRecord(&record, &scratch)) {
  124. return record.ToString();
  125. } else {
  126. return "EOF";
  127. }
  128. }
  129. void IncrementByte(int offset, int delta) {
  130. dest_.contents_[offset] += delta;
  131. }
  132. void SetByte(int offset, char new_byte) {
  133. dest_.contents_[offset] = new_byte;
  134. }
  135. void ShrinkSize(int bytes) {
  136. dest_.contents_.resize(dest_.contents_.size() - bytes);
  137. }
  138. void FixChecksum(int header_offset, int len) {
  139. // Compute crc of type/len/data
  140. uint32_t crc = crc32c::Value(&dest_.contents_[header_offset+6], 1 + len);
  141. crc = crc32c::Mask(crc);
  142. EncodeFixed32(&dest_.contents_[header_offset], crc);
  143. }
  144. void ForceError() {
  145. source_.force_error_ = true;
  146. }
  147. size_t DroppedBytes() const {
  148. return report_.dropped_bytes_;
  149. }
  150. std::string ReportMessage() const {
  151. return report_.message_;
  152. }
  153. // Returns OK iff recorded error message contains "msg"
  154. std::string MatchError(const std::string& msg) const {
  155. if (report_.message_.find(msg) == std::string::npos) {
  156. return report_.message_;
  157. } else {
  158. return "OK";
  159. }
  160. }
  161. void WriteInitialOffsetLog() {
  162. for (int i = 0; i < 4; i++) {
  163. std::string record(initial_offset_record_sizes_[i],
  164. static_cast<char>('a' + i));
  165. Write(record);
  166. }
  167. }
  168. void StartReadingAt(uint64_t initial_offset) {
  169. delete reader_;
  170. reader_ = new Reader(&source_, &report_, true/*checksum*/, initial_offset);
  171. }
  172. void CheckOffsetPastEndReturnsNoRecords(uint64_t offset_past_end) {
  173. WriteInitialOffsetLog();
  174. reading_ = true;
  175. source_.contents_ = Slice(dest_.contents_);
  176. Reader* offset_reader = new Reader(&source_, &report_, true/*checksum*/,
  177. WrittenBytes() + offset_past_end);
  178. Slice record;
  179. std::string scratch;
  180. ASSERT_TRUE(!offset_reader->ReadRecord(&record, &scratch));
  181. delete offset_reader;
  182. }
  183. void CheckInitialOffsetRecord(uint64_t initial_offset,
  184. int expected_record_offset) {
  185. WriteInitialOffsetLog();
  186. reading_ = true;
  187. source_.contents_ = Slice(dest_.contents_);
  188. Reader* offset_reader = new Reader(&source_, &report_, true/*checksum*/,
  189. initial_offset);
  190. Slice record;
  191. std::string scratch;
  192. ASSERT_TRUE(offset_reader->ReadRecord(&record, &scratch));
  193. ASSERT_EQ(initial_offset_record_sizes_[expected_record_offset],
  194. record.size());
  195. ASSERT_EQ(initial_offset_last_record_offsets_[expected_record_offset],
  196. offset_reader->LastRecordOffset());
  197. ASSERT_EQ((char)('a' + expected_record_offset), record.data()[0]);
  198. delete offset_reader;
  199. }
  200. };
  201. size_t LogTest::initial_offset_record_sizes_[] =
  202. {10000, // Two sizable records in first block
  203. 10000,
  204. 2 * log::kBlockSize - 1000, // Span three blocks
  205. 1};
  206. uint64_t LogTest::initial_offset_last_record_offsets_[] =
  207. {0,
  208. kHeaderSize + 10000,
  209. 2 * (kHeaderSize + 10000),
  210. 2 * (kHeaderSize + 10000) +
  211. (2 * log::kBlockSize - 1000) + 3 * kHeaderSize};
  212. TEST(LogTest, Empty) {
  213. ASSERT_EQ("EOF", Read());
  214. }
  215. TEST(LogTest, ReadWrite) {
  216. Write("foo");
  217. Write("bar");
  218. Write("");
  219. Write("xxxx");
  220. ASSERT_EQ("foo", Read());
  221. ASSERT_EQ("bar", Read());
  222. ASSERT_EQ("", Read());
  223. ASSERT_EQ("xxxx", Read());
  224. ASSERT_EQ("EOF", Read());
  225. ASSERT_EQ("EOF", Read()); // Make sure reads at eof work
  226. }
  227. TEST(LogTest, ManyBlocks) {
  228. for (int i = 0; i < 100000; i++) {
  229. Write(NumberString(i));
  230. }
  231. for (int i = 0; i < 100000; i++) {
  232. ASSERT_EQ(NumberString(i), Read());
  233. }
  234. ASSERT_EQ("EOF", Read());
  235. }
  236. TEST(LogTest, Fragmentation) {
  237. Write("small");
  238. Write(BigString("medium", 50000));
  239. Write(BigString("large", 100000));
  240. ASSERT_EQ("small", Read());
  241. ASSERT_EQ(BigString("medium", 50000), Read());
  242. ASSERT_EQ(BigString("large", 100000), Read());
  243. ASSERT_EQ("EOF", Read());
  244. }
  245. TEST(LogTest, MarginalTrailer) {
  246. // Make a trailer that is exactly the same length as an empty record.
  247. const int n = kBlockSize - 2*kHeaderSize;
  248. Write(BigString("foo", n));
  249. ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes());
  250. Write("");
  251. Write("bar");
  252. ASSERT_EQ(BigString("foo", n), Read());
  253. ASSERT_EQ("", Read());
  254. ASSERT_EQ("bar", Read());
  255. ASSERT_EQ("EOF", Read());
  256. }
  257. TEST(LogTest, MarginalTrailer2) {
  258. // Make a trailer that is exactly the same length as an empty record.
  259. const int n = kBlockSize - 2*kHeaderSize;
  260. Write(BigString("foo", n));
  261. ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes());
  262. Write("bar");
  263. ASSERT_EQ(BigString("foo", n), Read());
  264. ASSERT_EQ("bar", Read());
  265. ASSERT_EQ("EOF", Read());
  266. ASSERT_EQ(0, DroppedBytes());
  267. ASSERT_EQ("", ReportMessage());
  268. }
  269. TEST(LogTest, ShortTrailer) {
  270. const int n = kBlockSize - 2*kHeaderSize + 4;
  271. Write(BigString("foo", n));
  272. ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
  273. Write("");
  274. Write("bar");
  275. ASSERT_EQ(BigString("foo", n), Read());
  276. ASSERT_EQ("", Read());
  277. ASSERT_EQ("bar", Read());
  278. ASSERT_EQ("EOF", Read());
  279. }
  280. TEST(LogTest, AlignedEof) {
  281. const int n = kBlockSize - 2*kHeaderSize + 4;
  282. Write(BigString("foo", n));
  283. ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
  284. ASSERT_EQ(BigString("foo", n), Read());
  285. ASSERT_EQ("EOF", Read());
  286. }
  287. TEST(LogTest, OpenForAppend) {
  288. Write("hello");
  289. ReopenForAppend();
  290. Write("world");
  291. ASSERT_EQ("hello", Read());
  292. ASSERT_EQ("world", Read());
  293. ASSERT_EQ("EOF", Read());
  294. }
  295. TEST(LogTest, RandomRead) {
  296. const int N = 500;
  297. Random write_rnd(301);
  298. for (int i = 0; i < N; i++) {
  299. Write(RandomSkewedString(i, &write_rnd));
  300. }
  301. Random read_rnd(301);
  302. for (int i = 0; i < N; i++) {
  303. ASSERT_EQ(RandomSkewedString(i, &read_rnd), Read());
  304. }
  305. ASSERT_EQ("EOF", Read());
  306. }
  307. // Tests of all the error paths in log_reader.cc follow:
  308. TEST(LogTest, ReadError) {
  309. Write("foo");
  310. ForceError();
  311. ASSERT_EQ("EOF", Read());
  312. ASSERT_EQ(kBlockSize, DroppedBytes());
  313. ASSERT_EQ("OK", MatchError("read error"));
  314. }
  315. TEST(LogTest, BadRecordType) {
  316. Write("foo");
  317. // Type is stored in header[6]
  318. IncrementByte(6, 100);
  319. FixChecksum(0, 3);
  320. ASSERT_EQ("EOF", Read());
  321. ASSERT_EQ(3, DroppedBytes());
  322. ASSERT_EQ("OK", MatchError("unknown record type"));
  323. }
  324. TEST(LogTest, TruncatedTrailingRecordIsIgnored) {
  325. Write("foo");
  326. ShrinkSize(4); // Drop all payload as well as a header byte
  327. ASSERT_EQ("EOF", Read());
  328. // Truncated last record is ignored, not treated as an error.
  329. ASSERT_EQ(0, DroppedBytes());
  330. ASSERT_EQ("", ReportMessage());
  331. }
  332. TEST(LogTest, BadLength) {
  333. const int kPayloadSize = kBlockSize - kHeaderSize;
  334. Write(BigString("bar", kPayloadSize));
  335. Write("foo");
  336. // Least significant size byte is stored in header[4].
  337. IncrementByte(4, 1);
  338. ASSERT_EQ("foo", Read());
  339. ASSERT_EQ(kBlockSize, DroppedBytes());
  340. ASSERT_EQ("OK", MatchError("bad record length"));
  341. }
  342. TEST(LogTest, BadLengthAtEndIsIgnored) {
  343. Write("foo");
  344. ShrinkSize(1);
  345. ASSERT_EQ("EOF", Read());
  346. ASSERT_EQ(0, DroppedBytes());
  347. ASSERT_EQ("", ReportMessage());
  348. }
  349. TEST(LogTest, ChecksumMismatch) {
  350. Write("foo");
  351. IncrementByte(0, 10);
  352. ASSERT_EQ("EOF", Read());
  353. ASSERT_EQ(10, DroppedBytes());
  354. ASSERT_EQ("OK", MatchError("checksum mismatch"));
  355. }
  356. TEST(LogTest, UnexpectedMiddleType) {
  357. Write("foo");
  358. SetByte(6, kMiddleType);
  359. FixChecksum(0, 3);
  360. ASSERT_EQ("EOF", Read());
  361. ASSERT_EQ(3, DroppedBytes());
  362. ASSERT_EQ("OK", MatchError("missing start"));
  363. }
  364. TEST(LogTest, UnexpectedLastType) {
  365. Write("foo");
  366. SetByte(6, kLastType);
  367. FixChecksum(0, 3);
  368. ASSERT_EQ("EOF", Read());
  369. ASSERT_EQ(3, DroppedBytes());
  370. ASSERT_EQ("OK", MatchError("missing start"));
  371. }
  372. TEST(LogTest, UnexpectedFullType) {
  373. Write("foo");
  374. Write("bar");
  375. SetByte(6, kFirstType);
  376. FixChecksum(0, 3);
  377. ASSERT_EQ("bar", Read());
  378. ASSERT_EQ("EOF", Read());
  379. ASSERT_EQ(3, DroppedBytes());
  380. ASSERT_EQ("OK", MatchError("partial record without end"));
  381. }
  382. TEST(LogTest, UnexpectedFirstType) {
  383. Write("foo");
  384. Write(BigString("bar", 100000));
  385. SetByte(6, kFirstType);
  386. FixChecksum(0, 3);
  387. ASSERT_EQ(BigString("bar", 100000), Read());
  388. ASSERT_EQ("EOF", Read());
  389. ASSERT_EQ(3, DroppedBytes());
  390. ASSERT_EQ("OK", MatchError("partial record without end"));
  391. }
  392. TEST(LogTest, MissingLastIsIgnored) {
  393. Write(BigString("bar", kBlockSize));
  394. // Remove the LAST block, including header.
  395. ShrinkSize(14);
  396. ASSERT_EQ("EOF", Read());
  397. ASSERT_EQ("", ReportMessage());
  398. ASSERT_EQ(0, DroppedBytes());
  399. }
  400. TEST(LogTest, PartialLastIsIgnored) {
  401. Write(BigString("bar", kBlockSize));
  402. // Cause a bad record length in the LAST block.
  403. ShrinkSize(1);
  404. ASSERT_EQ("EOF", Read());
  405. ASSERT_EQ("", ReportMessage());
  406. ASSERT_EQ(0, DroppedBytes());
  407. }
  408. TEST(LogTest, SkipIntoMultiRecord) {
  409. // Consider a fragmented record:
  410. // first(R1), middle(R1), last(R1), first(R2)
  411. // If initial_offset points to a record after first(R1) but before first(R2)
  412. // incomplete fragment errors are not actual errors, and must be suppressed
  413. // until a new first or full record is encountered.
  414. Write(BigString("foo", 3*kBlockSize));
  415. Write("correct");
  416. StartReadingAt(kBlockSize);
  417. ASSERT_EQ("correct", Read());
  418. ASSERT_EQ("", ReportMessage());
  419. ASSERT_EQ(0, DroppedBytes());
  420. ASSERT_EQ("EOF", Read());
  421. }
  422. TEST(LogTest, ErrorJoinsRecords) {
  423. // Consider two fragmented records:
  424. // first(R1) last(R1) first(R2) last(R2)
  425. // where the middle two fragments disappear. We do not want
  426. // first(R1),last(R2) to get joined and returned as a valid record.
  427. // Write records that span two blocks
  428. Write(BigString("foo", kBlockSize));
  429. Write(BigString("bar", kBlockSize));
  430. Write("correct");
  431. // Wipe the middle block
  432. for (int offset = kBlockSize; offset < 2*kBlockSize; offset++) {
  433. SetByte(offset, 'x');
  434. }
  435. ASSERT_EQ("correct", Read());
  436. ASSERT_EQ("EOF", Read());
  437. const size_t dropped = DroppedBytes();
  438. ASSERT_LE(dropped, 2*kBlockSize + 100);
  439. ASSERT_GE(dropped, 2*kBlockSize);
  440. }
  441. TEST(LogTest, ReadStart) {
  442. CheckInitialOffsetRecord(0, 0);
  443. }
  444. TEST(LogTest, ReadSecondOneOff) {
  445. CheckInitialOffsetRecord(1, 1);
  446. }
  447. TEST(LogTest, ReadSecondTenThousand) {
  448. CheckInitialOffsetRecord(10000, 1);
  449. }
  450. TEST(LogTest, ReadSecondStart) {
  451. CheckInitialOffsetRecord(10007, 1);
  452. }
  453. TEST(LogTest, ReadThirdOneOff) {
  454. CheckInitialOffsetRecord(10008, 2);
  455. }
  456. TEST(LogTest, ReadThirdStart) {
  457. CheckInitialOffsetRecord(20014, 2);
  458. }
  459. TEST(LogTest, ReadFourthOneOff) {
  460. CheckInitialOffsetRecord(20015, 3);
  461. }
  462. TEST(LogTest, ReadFourthFirstBlockTrailer) {
  463. CheckInitialOffsetRecord(log::kBlockSize - 4, 3);
  464. }
  465. TEST(LogTest, ReadFourthMiddleBlock) {
  466. CheckInitialOffsetRecord(log::kBlockSize + 1, 3);
  467. }
  468. TEST(LogTest, ReadFourthLastBlock) {
  469. CheckInitialOffsetRecord(2 * log::kBlockSize + 1, 3);
  470. }
  471. TEST(LogTest, ReadFourthStart) {
  472. CheckInitialOffsetRecord(
  473. 2 * (kHeaderSize + 1000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize,
  474. 3);
  475. }
  476. TEST(LogTest, ReadEnd) {
  477. CheckOffsetPastEndReturnsNoRecords(0);
  478. }
  479. TEST(LogTest, ReadPastEnd) {
  480. CheckOffsetPastEndReturnsNoRecords(5);
  481. }
  482. } // namespace log
  483. } // namespace leveldb
  484. int main(int argc, char** argv) {
  485. return leveldb::test::RunAllTests();
  486. }