小组成员:姚凯文(kevinyao0901),姜嘉琪
Nelze vybrat více než 25 témat Téma musí začínat písmenem nebo číslem, může obsahovat pomlčky („-“) a může být dlouhé až 35 znaků.

657 řádky
19 KiB

  1. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  4. #include <sys/types.h>
  5. #include <stdio.h>
  6. #include <stdlib.h>
  7. #include "db/db_impl.h"
  8. #include "db/version_set.h"
  9. #include "leveldb/cache.h"
  10. #include "leveldb/db.h"
  11. #include "leveldb/env.h"
  12. #include "leveldb/write_batch.h"
  13. #include "port/port.h"
  14. #include "util/crc32c.h"
  15. #include "util/histogram.h"
  16. #include "util/random.h"
  17. #include "util/testutil.h"
  18. // Comma-separated list of operations to run in the specified order
  19. // Actual benchmarks:
  20. // fillseq -- write N values in sequential key order in async mode
  21. // fillrandom -- write N values in random key order in async mode
  22. // overwrite -- overwrite N values in random key order in async mode
  23. // fillsync -- write N/100 values in random key order in sync mode
  24. // fill100K -- write N/1000 100K values in random order in async mode
  25. // readseq -- read N times sequentially
  26. // readreverse -- read N times in reverse order
  27. // readrandom -- read N times in random order
  28. // readhot -- read N times in random order from 1% section of DB
  29. // crc32c -- repeated crc32c of 4K of data
  30. // Meta operations:
  31. // compact -- Compact the entire DB
  32. // stats -- Print DB stats
  33. // heapprofile -- Dump a heap profile (if supported by this port)
  34. static const char* FLAGS_benchmarks =
  35. "fillseq,"
  36. "fillsync,"
  37. "fillrandom,"
  38. "overwrite,"
  39. "readrandom,"
  40. "readrandom," // Extra run to allow previous compactions to quiesce
  41. "readseq,"
  42. "readreverse,"
  43. "compact,"
  44. "readrandom,"
  45. "readseq,"
  46. "readreverse,"
  47. "fill100K,"
  48. "crc32c,"
  49. "snappycomp,"
  50. "snappyuncomp,"
  51. ;
  52. // Number of key/values to place in database
  53. static int FLAGS_num = 1000000;
  54. // Number of read operations to do. If negative, do FLAGS_num reads.
  55. static int FLAGS_reads = -1;
  56. // Size of each value
  57. static int FLAGS_value_size = 100;
  58. // Arrange to generate values that shrink to this fraction of
  59. // their original size after compression
  60. static double FLAGS_compression_ratio = 0.5;
  61. // Print histogram of operation timings
  62. static bool FLAGS_histogram = false;
  63. // Number of bytes to buffer in memtable before compacting
  64. // (initialized to default value by "main")
  65. static int FLAGS_write_buffer_size = 0;
  66. // Number of bytes to use as a cache of uncompressed data.
  67. // Negative means use default settings.
  68. static int FLAGS_cache_size = -1;
  69. // Maximum number of files to keep open at the same time (use default if == 0)
  70. static int FLAGS_open_files = 0;
  71. // If true, do not destroy the existing database. If you set this
  72. // flag and also specify a benchmark that wants a fresh database, that
  73. // benchmark will fail.
  74. static bool FLAGS_use_existing_db = false;
  75. namespace leveldb {
  76. // Helper for quickly generating random data.
  77. namespace {
  78. class RandomGenerator {
  79. private:
  80. std::string data_;
  81. int pos_;
  82. public:
  83. RandomGenerator() {
  84. // We use a limited amount of data over and over again and ensure
  85. // that it is larger than the compression window (32KB), and also
  86. // large enough to serve all typical value sizes we want to write.
  87. Random rnd(301);
  88. std::string piece;
  89. while (data_.size() < 1048576) {
  90. // Add a short fragment that is as compressible as specified
  91. // by FLAGS_compression_ratio.
  92. test::CompressibleString(&rnd, FLAGS_compression_ratio, 100, &piece);
  93. data_.append(piece);
  94. }
  95. pos_ = 0;
  96. }
  97. Slice Generate(int len) {
  98. if (pos_ + len > data_.size()) {
  99. pos_ = 0;
  100. assert(len < data_.size());
  101. }
  102. pos_ += len;
  103. return Slice(data_.data() + pos_ - len, len);
  104. }
  105. };
  106. static Slice TrimSpace(Slice s) {
  107. int start = 0;
  108. while (start < s.size() && isspace(s[start])) {
  109. start++;
  110. }
  111. int limit = s.size();
  112. while (limit > start && isspace(s[limit-1])) {
  113. limit--;
  114. }
  115. return Slice(s.data() + start, limit - start);
  116. }
  117. }
  118. class Benchmark {
  119. private:
  120. Cache* cache_;
  121. DB* db_;
  122. int num_;
  123. int reads_;
  124. int heap_counter_;
  125. double start_;
  126. double last_op_finish_;
  127. int64_t bytes_;
  128. std::string message_;
  129. std::string post_message_;
  130. Histogram hist_;
  131. RandomGenerator gen_;
  132. Random rand_;
  133. // State kept for progress messages
  134. int done_;
  135. int next_report_; // When to report next
  136. void PrintHeader() {
  137. const int kKeySize = 16;
  138. PrintEnvironment();
  139. fprintf(stdout, "Keys: %d bytes each\n", kKeySize);
  140. fprintf(stdout, "Values: %d bytes each (%d bytes after compression)\n",
  141. FLAGS_value_size,
  142. static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
  143. fprintf(stdout, "Entries: %d\n", num_);
  144. fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
  145. ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_)
  146. / 1048576.0));
  147. fprintf(stdout, "FileSize: %.1f MB (estimated)\n",
  148. (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_)
  149. / 1048576.0));
  150. PrintWarnings();
  151. fprintf(stdout, "------------------------------------------------\n");
  152. }
  153. void PrintWarnings() {
  154. #if defined(__GNUC__) && !defined(__OPTIMIZE__)
  155. fprintf(stdout,
  156. "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
  157. );
  158. #endif
  159. #ifndef NDEBUG
  160. fprintf(stdout,
  161. "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
  162. #endif
  163. // See if snappy is working by attempting to compress a compressible string
  164. const char text[] = "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy";
  165. std::string compressed;
  166. if (!port::Snappy_Compress(text, sizeof(text), &compressed)) {
  167. fprintf(stdout, "WARNING: Snappy compression is not enabled\n");
  168. } else if (compressed.size() >= sizeof(text)) {
  169. fprintf(stdout, "WARNING: Snappy compression is not effective\n");
  170. }
  171. }
  172. void PrintEnvironment() {
  173. fprintf(stderr, "LevelDB: version %d.%d\n",
  174. kMajorVersion, kMinorVersion);
  175. #if defined(__linux)
  176. time_t now = time(NULL);
  177. fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline
  178. FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
  179. if (cpuinfo != NULL) {
  180. char line[1000];
  181. int num_cpus = 0;
  182. std::string cpu_type;
  183. std::string cache_size;
  184. while (fgets(line, sizeof(line), cpuinfo) != NULL) {
  185. const char* sep = strchr(line, ':');
  186. if (sep == NULL) {
  187. continue;
  188. }
  189. Slice key = TrimSpace(Slice(line, sep - 1 - line));
  190. Slice val = TrimSpace(Slice(sep + 1));
  191. if (key == "model name") {
  192. ++num_cpus;
  193. cpu_type = val.ToString();
  194. } else if (key == "cache size") {
  195. cache_size = val.ToString();
  196. }
  197. }
  198. fclose(cpuinfo);
  199. fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str());
  200. fprintf(stderr, "CPUCache: %s\n", cache_size.c_str());
  201. }
  202. #endif
  203. }
  204. void Start() {
  205. start_ = Env::Default()->NowMicros() * 1e-6;
  206. bytes_ = 0;
  207. message_.clear();
  208. last_op_finish_ = start_;
  209. hist_.Clear();
  210. done_ = 0;
  211. next_report_ = 100;
  212. }
  213. void FinishedSingleOp() {
  214. if (FLAGS_histogram) {
  215. double now = Env::Default()->NowMicros() * 1e-6;
  216. double micros = (now - last_op_finish_) * 1e6;
  217. hist_.Add(micros);
  218. if (micros > 20000) {
  219. fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
  220. fflush(stderr);
  221. }
  222. last_op_finish_ = now;
  223. }
  224. done_++;
  225. if (done_ >= next_report_) {
  226. if (next_report_ < 1000) next_report_ += 100;
  227. else if (next_report_ < 5000) next_report_ += 500;
  228. else if (next_report_ < 10000) next_report_ += 1000;
  229. else if (next_report_ < 50000) next_report_ += 5000;
  230. else if (next_report_ < 100000) next_report_ += 10000;
  231. else if (next_report_ < 500000) next_report_ += 50000;
  232. else next_report_ += 100000;
  233. fprintf(stderr, "... finished %d ops%30s\r", done_, "");
  234. fflush(stderr);
  235. }
  236. }
  237. void Stop(const Slice& name) {
  238. double finish = Env::Default()->NowMicros() * 1e-6;
  239. // Pretend at least one op was done in case we are running a benchmark
  240. // that does nto call FinishedSingleOp().
  241. if (done_ < 1) done_ = 1;
  242. if (bytes_ > 0) {
  243. char rate[100];
  244. snprintf(rate, sizeof(rate), "%6.1f MB/s",
  245. (bytes_ / 1048576.0) / (finish - start_));
  246. if (!message_.empty()) {
  247. message_ = std::string(rate) + " " + message_;
  248. } else {
  249. message_ = rate;
  250. }
  251. }
  252. fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
  253. name.ToString().c_str(),
  254. (finish - start_) * 1e6 / done_,
  255. (message_.empty() ? "" : " "),
  256. message_.c_str());
  257. if (FLAGS_histogram) {
  258. fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
  259. }
  260. fflush(stdout);
  261. if (!post_message_.empty()) {
  262. fprintf(stdout, "\n%s\n", post_message_.c_str());
  263. post_message_.clear();
  264. }
  265. }
  266. public:
  267. enum Order {
  268. SEQUENTIAL,
  269. RANDOM
  270. };
  271. enum DBState {
  272. FRESH,
  273. EXISTING
  274. };
  275. Benchmark()
  276. : cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : NULL),
  277. db_(NULL),
  278. num_(FLAGS_num),
  279. reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
  280. heap_counter_(0),
  281. bytes_(0),
  282. rand_(301) {
  283. std::vector<std::string> files;
  284. Env::Default()->GetChildren("/tmp/dbbench", &files);
  285. for (int i = 0; i < files.size(); i++) {
  286. if (Slice(files[i]).starts_with("heap-")) {
  287. Env::Default()->DeleteFile("/tmp/dbbench/" + files[i]);
  288. }
  289. }
  290. if (!FLAGS_use_existing_db) {
  291. DestroyDB("/tmp/dbbench", Options());
  292. }
  293. }
  294. ~Benchmark() {
  295. delete db_;
  296. delete cache_;
  297. }
  298. void Run() {
  299. PrintHeader();
  300. Open();
  301. const char* benchmarks = FLAGS_benchmarks;
  302. while (benchmarks != NULL) {
  303. const char* sep = strchr(benchmarks, ',');
  304. Slice name;
  305. if (sep == NULL) {
  306. name = benchmarks;
  307. benchmarks = NULL;
  308. } else {
  309. name = Slice(benchmarks, sep - benchmarks);
  310. benchmarks = sep + 1;
  311. }
  312. Start();
  313. WriteOptions write_options;
  314. bool known = true;
  315. if (name == Slice("fillseq")) {
  316. Write(write_options, SEQUENTIAL, FRESH, num_, FLAGS_value_size, 1);
  317. } else if (name == Slice("fillbatch")) {
  318. Write(write_options, SEQUENTIAL, FRESH, num_, FLAGS_value_size, 1000);
  319. } else if (name == Slice("fillrandom")) {
  320. Write(write_options, RANDOM, FRESH, num_, FLAGS_value_size, 1);
  321. } else if (name == Slice("overwrite")) {
  322. Write(write_options, RANDOM, EXISTING, num_, FLAGS_value_size, 1);
  323. } else if (name == Slice("fillsync")) {
  324. write_options.sync = true;
  325. Write(write_options, RANDOM, FRESH, num_ / 100, FLAGS_value_size, 1);
  326. } else if (name == Slice("fill100K")) {
  327. Write(write_options, RANDOM, FRESH, num_ / 1000, 100 * 1000, 1);
  328. } else if (name == Slice("readseq")) {
  329. ReadSequential();
  330. } else if (name == Slice("readreverse")) {
  331. ReadReverse();
  332. } else if (name == Slice("readrandom")) {
  333. ReadRandom();
  334. } else if (name == Slice("readhot")) {
  335. ReadHot();
  336. } else if (name == Slice("readrandomsmall")) {
  337. int n = reads_;
  338. reads_ /= 1000;
  339. ReadRandom();
  340. reads_ = n;
  341. } else if (name == Slice("compact")) {
  342. Compact();
  343. } else if (name == Slice("crc32c")) {
  344. Crc32c(4096, "(4K per op)");
  345. } else if (name == Slice("snappycomp")) {
  346. SnappyCompress();
  347. } else if (name == Slice("snappyuncomp")) {
  348. SnappyUncompress();
  349. } else if (name == Slice("heapprofile")) {
  350. HeapProfile();
  351. } else if (name == Slice("stats")) {
  352. PrintStats();
  353. } else {
  354. known = false;
  355. if (name != Slice()) { // No error message for empty name
  356. fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str());
  357. }
  358. }
  359. if (known) {
  360. Stop(name);
  361. }
  362. }
  363. }
  364. private:
  365. void Crc32c(int size, const char* label) {
  366. // Checksum about 500MB of data total
  367. std::string data(size, 'x');
  368. int64_t bytes = 0;
  369. uint32_t crc = 0;
  370. while (bytes < 500 * 1048576) {
  371. crc = crc32c::Value(data.data(), size);
  372. FinishedSingleOp();
  373. bytes += size;
  374. }
  375. // Print so result is not dead
  376. fprintf(stderr, "... crc=0x%x\r", static_cast<unsigned int>(crc));
  377. bytes_ = bytes;
  378. message_ = label;
  379. }
  380. void SnappyCompress() {
  381. Slice input = gen_.Generate(Options().block_size);
  382. int64_t bytes = 0;
  383. int64_t produced = 0;
  384. bool ok = true;
  385. std::string compressed;
  386. while (ok && bytes < 1024 * 1048576) { // Compress 1G
  387. ok = port::Snappy_Compress(input.data(), input.size(), &compressed);
  388. produced += compressed.size();
  389. bytes += input.size();
  390. FinishedSingleOp();
  391. }
  392. if (!ok) {
  393. message_ = "(snappy failure)";
  394. } else {
  395. char buf[100];
  396. snprintf(buf, sizeof(buf), "(output: %.1f%%)",
  397. (produced * 100.0) / bytes);
  398. message_ = buf;
  399. bytes_ = bytes;
  400. }
  401. }
  402. void SnappyUncompress() {
  403. Slice input = gen_.Generate(Options().block_size);
  404. std::string compressed;
  405. bool ok = port::Snappy_Compress(input.data(), input.size(), &compressed);
  406. int64_t bytes = 0;
  407. std::string uncompressed;
  408. while (ok && bytes < 1024 * 1048576) { // Compress 1G
  409. ok = port::Snappy_Uncompress(compressed.data(), compressed.size(),
  410. &uncompressed);
  411. bytes += uncompressed.size();
  412. FinishedSingleOp();
  413. }
  414. if (!ok) {
  415. message_ = "(snappy failure)";
  416. } else {
  417. bytes_ = bytes;
  418. }
  419. }
  420. void Open() {
  421. assert(db_ == NULL);
  422. Options options;
  423. options.create_if_missing = !FLAGS_use_existing_db;
  424. options.block_cache = cache_;
  425. options.write_buffer_size = FLAGS_write_buffer_size;
  426. Status s = DB::Open(options, "/tmp/dbbench", &db_);
  427. if (!s.ok()) {
  428. fprintf(stderr, "open error: %s\n", s.ToString().c_str());
  429. exit(1);
  430. }
  431. }
  432. void Write(const WriteOptions& options, Order order, DBState state,
  433. int num_entries, int value_size, int entries_per_batch) {
  434. if (state == FRESH) {
  435. if (FLAGS_use_existing_db) {
  436. message_ = "skipping (--use_existing_db is true)";
  437. return;
  438. }
  439. delete db_;
  440. db_ = NULL;
  441. DestroyDB("/tmp/dbbench", Options());
  442. Open();
  443. Start(); // Do not count time taken to destroy/open
  444. }
  445. if (num_entries != num_) {
  446. char msg[100];
  447. snprintf(msg, sizeof(msg), "(%d ops)", num_entries);
  448. message_ = msg;
  449. }
  450. WriteBatch batch;
  451. Status s;
  452. std::string val;
  453. for (int i = 0; i < num_entries; i += entries_per_batch) {
  454. batch.Clear();
  455. for (int j = 0; j < entries_per_batch; j++) {
  456. const int k = (order == SEQUENTIAL) ? i+j : (rand_.Next() % FLAGS_num);
  457. char key[100];
  458. snprintf(key, sizeof(key), "%016d", k);
  459. batch.Put(key, gen_.Generate(value_size));
  460. bytes_ += value_size + strlen(key);
  461. FinishedSingleOp();
  462. }
  463. s = db_->Write(options, &batch);
  464. if (!s.ok()) {
  465. fprintf(stderr, "put error: %s\n", s.ToString().c_str());
  466. exit(1);
  467. }
  468. }
  469. }
  470. void ReadSequential() {
  471. Iterator* iter = db_->NewIterator(ReadOptions());
  472. int i = 0;
  473. for (iter->SeekToFirst(); i < reads_ && iter->Valid(); iter->Next()) {
  474. bytes_ += iter->key().size() + iter->value().size();
  475. FinishedSingleOp();
  476. ++i;
  477. }
  478. delete iter;
  479. }
  480. void ReadReverse() {
  481. Iterator* iter = db_->NewIterator(ReadOptions());
  482. int i = 0;
  483. for (iter->SeekToLast(); i < reads_ && iter->Valid(); iter->Prev()) {
  484. bytes_ += iter->key().size() + iter->value().size();
  485. FinishedSingleOp();
  486. ++i;
  487. }
  488. delete iter;
  489. }
  490. void ReadRandom() {
  491. ReadOptions options;
  492. std::string value;
  493. for (int i = 0; i < reads_; i++) {
  494. char key[100];
  495. const int k = rand_.Next() % FLAGS_num;
  496. snprintf(key, sizeof(key), "%016d", k);
  497. db_->Get(options, key, &value);
  498. FinishedSingleOp();
  499. }
  500. }
  501. void ReadHot() {
  502. ReadOptions options;
  503. std::string value;
  504. const int range = (FLAGS_num + 99) / 100;
  505. for (int i = 0; i < reads_; i++) {
  506. char key[100];
  507. const int k = rand_.Next() % range;
  508. snprintf(key, sizeof(key), "%016d", k);
  509. db_->Get(options, key, &value);
  510. FinishedSingleOp();
  511. }
  512. }
  513. void Compact() {
  514. DBImpl* dbi = reinterpret_cast<DBImpl*>(db_);
  515. dbi->TEST_CompactMemTable();
  516. int max_level_with_files = 1;
  517. for (int level = 1; level < config::kNumLevels; level++) {
  518. std::string property;
  519. char name[100];
  520. snprintf(name, sizeof(name), "leveldb.num-files-at-level%d", level);
  521. if (db_->GetProperty(name, &property) && atoi(property.c_str()) > 0) {
  522. max_level_with_files = level;
  523. }
  524. }
  525. for (int level = 0; level < max_level_with_files; level++) {
  526. dbi->TEST_CompactRange(level, "", "~");
  527. }
  528. }
  529. void PrintStats() {
  530. std::string stats;
  531. if (!db_->GetProperty("leveldb.stats", &stats)) {
  532. message_ = "(failed)";
  533. } else {
  534. post_message_ = stats;
  535. }
  536. }
  537. static void WriteToFile(void* arg, const char* buf, int n) {
  538. reinterpret_cast<WritableFile*>(arg)->Append(Slice(buf, n));
  539. }
  540. void HeapProfile() {
  541. char fname[100];
  542. snprintf(fname, sizeof(fname), "/tmp/dbbench/heap-%04d", ++heap_counter_);
  543. WritableFile* file;
  544. Status s = Env::Default()->NewWritableFile(fname, &file);
  545. if (!s.ok()) {
  546. message_ = s.ToString();
  547. return;
  548. }
  549. bool ok = port::GetHeapProfile(WriteToFile, file);
  550. delete file;
  551. if (!ok) {
  552. message_ = "not supported";
  553. Env::Default()->DeleteFile(fname);
  554. }
  555. }
  556. };
  557. }
  558. int main(int argc, char** argv) {
  559. FLAGS_write_buffer_size = leveldb::Options().write_buffer_size;
  560. FLAGS_open_files = leveldb::Options().max_open_files;
  561. for (int i = 1; i < argc; i++) {
  562. double d;
  563. int n;
  564. char junk;
  565. if (leveldb::Slice(argv[i]).starts_with("--benchmarks=")) {
  566. FLAGS_benchmarks = argv[i] + strlen("--benchmarks=");
  567. } else if (sscanf(argv[i], "--compression_ratio=%lf%c", &d, &junk) == 1) {
  568. FLAGS_compression_ratio = d;
  569. } else if (sscanf(argv[i], "--histogram=%d%c", &n, &junk) == 1 &&
  570. (n == 0 || n == 1)) {
  571. FLAGS_histogram = n;
  572. } else if (sscanf(argv[i], "--use_existing_db=%d%c", &n, &junk) == 1 &&
  573. (n == 0 || n == 1)) {
  574. FLAGS_use_existing_db = n;
  575. } else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) {
  576. FLAGS_num = n;
  577. } else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) {
  578. FLAGS_reads = n;
  579. } else if (sscanf(argv[i], "--value_size=%d%c", &n, &junk) == 1) {
  580. FLAGS_value_size = n;
  581. } else if (sscanf(argv[i], "--write_buffer_size=%d%c", &n, &junk) == 1) {
  582. FLAGS_write_buffer_size = n;
  583. } else if (sscanf(argv[i], "--cache_size=%d%c", &n, &junk) == 1) {
  584. FLAGS_cache_size = n;
  585. } else if (sscanf(argv[i], "--open_files=%d%c", &n, &junk) == 1) {
  586. FLAGS_open_files = n;
  587. } else {
  588. fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
  589. exit(1);
  590. }
  591. }
  592. leveldb::Benchmark benchmark;
  593. benchmark.Run();
  594. return 0;
  595. }