Compare commits

...

No commits in common. 'master' and 'ckx' have entirely different histories.
master ... ckx

525 changed files with 619 additions and 141038 deletions
Split View
  1. +102
    -0
      .github/workflows/build.yml
  2. +2
    -0
      .gitignore
  3. +1
    -1
      .gitmodules
  4. +14
    -21
      CMakeLists.txt
  5. +31
    -0
      CONTRIBUTING.md
  6. +3
    -246
      README.md
  7. +0
    -16
      TODO
  8. +0
    -5
      UseGuaide.txt
  9. +8
    -6
      benchmarks/db_bench.cc
  10. +4
    -0
      benchmarks/db_bench_log.cc
  11. +4
    -0
      benchmarks/db_bench_sqlite3.cc
  12. +4
    -0
      benchmarks/db_bench_tree_db.cc
  13. +4
    -0
      db/autocompact_test.cc
  14. +6
    -15
      db/builder.cc
  15. +5
    -2
      db/builder.h
  16. +5
    -2
      db/c.cc
  17. +4
    -0
      db/c_test.c
  18. +1
    -2
      db/corruption_test.cc
  19. +27
    -192
      db/db_impl.cc
  20. +6
    -37
      db/db_impl.h
  21. +4
    -24
      db/db_test.cc
  22. +2
    -11
      db/filename.cc
  23. +1
    -7
      db/filename.h
  24. +0
    -4
      db/log_test.cc
  25. +4
    -6
      db/repair.cc
  26. +0
    -20
      db/vlog_converter.cc
  27. +0
    -19
      db/vlog_converter.h
  28. +0
    -33
      db/vlog_manager.cc
  29. +0
    -61
      db/vlog_manager.h
  30. +0
    -58
      db/vlog_reader.cc
  31. +0
    -25
      db/vlog_reader.h
  32. +0
    -26
      db/vlog_writer.cc
  33. +0
    -26
      db/vlog_writer.h
  34. +31
    -18
      db/write_batch.cc
  35. +1
    -1
      db/write_batch_internal.h
  36. +0
    -45
      draw.py
  37. +0
    -50
      examples/GCtest.cc
  38. +0
    -28
      examples/ValueConvertTest.cc
  39. +0
    -173
      examples/WiscKeyTest_1.cc
  40. +0
    -81
      examples/WiscKeyTest_1.h
  41. +0
    -76
      examples/iterator_test.cc
  42. +0
    -43
      examples/kv_sep_test.cc
  43. +0
    -23
      examples/main.cc
  44. +0
    -25
      examples/test_1.cc
  45. +0
    -3
      gitpush.sh
  46. +0
    -8
      helpers/memenv/memenv.cc
  47. BIN
      images/1.png
  48. BIN
      images/10.png
  49. BIN
      images/11.png
  50. BIN
      images/12.webp
  51. BIN
      images/13.png
  52. BIN
      images/14.png
  53. BIN
      images/15.png
  54. BIN
      images/16.png
  55. BIN
      images/2.png
  56. BIN
      images/3.png
  57. BIN
      images/4.png
  58. BIN
      images/5.png
  59. BIN
      images/6.png
  60. BIN
      images/7.png
  61. BIN
      images/8.png
  62. BIN
      images/9.png
  63. BIN
      images/GC_test.png
  64. BIN
      images/c1.1.png
  65. BIN
      images/c1.2.png
  66. BIN
      images/c1.png
  67. BIN
      images/c10.png
  68. BIN
      images/c2.png
  69. BIN
      images/c3.png
  70. BIN
      images/c4.png
  71. BIN
      images/c5.png
  72. BIN
      images/c6.png
  73. BIN
      images/c7.png
  74. BIN
      images/c8.png
  75. BIN
      images/c9.png
  76. BIN
      images/field_test.png
  77. BIN
      images/iterate_test.png
  78. BIN
      images/kv_sep_test.png
  79. +14
    -3
      include/leveldb/db.h
  80. +0
    -22
      include/leveldb/env.h
  81. +0
    -21
      include/leveldb/options.h
  82. +3
    -0
      include/leveldb/slice.h
  83. +3
    -0
      include/leveldb/write_batch.h
  84. +0
    -0
      prefetch.txt
  85. +74
    -0
      test/db_test2.cc
  86. +26
    -25
      test/field_test.cc
  87. +114
    -0
      test/ttl_test.cc
  88. +1
    -0
      third_party/benchmark
  89. +0
    -5
      third_party/benchmark/.clang-format
  90. +0
    -7
      third_party/benchmark/.clang-tidy
  91. +0
    -32
      third_party/benchmark/.github/ISSUE_TEMPLATE/bug_report.md
  92. +0
    -20
      third_party/benchmark/.github/ISSUE_TEMPLATE/feature_request.md
  93. +0
    -13
      third_party/benchmark/.github/install_bazel.sh
  94. +0
    -27
      third_party/benchmark/.github/libcxx-setup.sh
  95. +0
    -35
      third_party/benchmark/.github/workflows/bazel.yml
  96. +0
    -46
      third_party/benchmark/.github/workflows/build-and-test-min-cmake.yml
  97. +0
    -51
      third_party/benchmark/.github/workflows/build-and-test-perfcounters.yml
  98. +0
    -114
      third_party/benchmark/.github/workflows/build-and-test.yml
  99. +0
    -17
      third_party/benchmark/.github/workflows/clang-format-lint.yml
  100. +0
    -38
      third_party/benchmark/.github/workflows/clang-tidy.yml

+ 102
- 0
.github/workflows/build.yml View File

@ -0,0 +1,102 @@
# Copyright 2021 The LevelDB Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. See the AUTHORS file for names of contributors.
name: ci
on: [push, pull_request]
permissions:
contents: read
jobs:
build-and-test:
name: >-
CI
${{ matrix.os }}
${{ matrix.compiler }}
${{ matrix.optimized && 'release' || 'debug' }}
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
compiler: [clang, gcc, msvc]
os: [ubuntu-latest, macos-latest, windows-latest]
optimized: [true, false]
exclude:
# MSVC only works on Windows.
- os: ubuntu-latest
compiler: msvc
- os: macos-latest
compiler: msvc
# Not testing with GCC on macOS.
- os: macos-latest
compiler: gcc
# Only testing with MSVC on Windows.
- os: windows-latest
compiler: clang
- os: windows-latest
compiler: gcc
include:
- compiler: clang
CC: clang
CXX: clang++
- compiler: gcc
CC: gcc
CXX: g++
- compiler: msvc
CC:
CXX:
env:
CMAKE_BUILD_DIR: ${{ github.workspace }}/build
CMAKE_BUILD_TYPE: ${{ matrix.optimized && 'RelWithDebInfo' || 'Debug' }}
CC: ${{ matrix.CC }}
CXX: ${{ matrix.CXX }}
BINARY_SUFFIX: ${{ startsWith(matrix.os, 'windows') && '.exe' || '' }}
BINARY_PATH: >-
${{ format(
startsWith(matrix.os, 'windows') && '{0}\build\{1}\' || '{0}/build/',
github.workspace,
matrix.optimized && 'RelWithDebInfo' || 'Debug') }}
steps:
- uses: actions/checkout@v2
with:
submodules: true
- name: Install dependencies on Linux
if: ${{ runner.os == 'Linux' }}
# libgoogle-perftools-dev is temporarily removed from the package list
# because it is currently broken on GitHub's Ubuntu 22.04.
run: |
sudo apt-get update
sudo apt-get install libkyotocabinet-dev libsnappy-dev libsqlite3-dev
- name: Generate build config
run: >-
cmake -S "${{ github.workspace }}" -B "${{ env.CMAKE_BUILD_DIR }}"
-DCMAKE_BUILD_TYPE=${{ env.CMAKE_BUILD_TYPE }}
-DCMAKE_INSTALL_PREFIX=${{ runner.temp }}/install_test/
- name: Build
run: >-
cmake --build "${{ env.CMAKE_BUILD_DIR }}"
--config "${{ env.CMAKE_BUILD_TYPE }}"
- name: Run Tests
working-directory: ${{ github.workspace }}/build
run: ctest -C "${{ env.CMAKE_BUILD_TYPE }}" --verbose
- name: Run LevelDB Benchmarks
run: ${{ env.BINARY_PATH }}db_bench${{ env.BINARY_SUFFIX }}
- name: Run SQLite Benchmarks
if: ${{ runner.os != 'Windows' }}
run: ${{ env.BINARY_PATH }}db_bench_sqlite3${{ env.BINARY_SUFFIX }}
- name: Run Kyoto Cabinet Benchmarks
if: ${{ runner.os == 'Linux' && matrix.compiler == 'clang' }}
run: ${{ env.BINARY_PATH }}db_bench_tree_db${{ env.BINARY_SUFFIX }}
- name: Test CMake installation
run: cmake --build "${{ env.CMAKE_BUILD_DIR }}" --target install

+ 2
- 0
.gitignore View File

@ -6,3 +6,5 @@
# Build directory.
build/
out/
cmake-build-*
.idea

+ 1
- 1
.gitmodules View File

@ -3,4 +3,4 @@
url = https://github.com/google/googletest.git
[submodule "third_party/benchmark"]
path = third_party/benchmark
url = https://github.com/google/benchmark.git
url = https://github.com/google/benchmark

+ 14
- 21
CMakeLists.txt View File

@ -138,14 +138,6 @@ target_sources(leveldb
"db/log_reader.h"
"db/log_writer.cc"
"db/log_writer.h"
"db/vlog_writer.cc" # VlogWriter pzy
"db/vlog_writer.h"
"db/vlog_reader.cc" # VlogReader ckx
"db/vlog_reader.h"
"db/vlog_manager.cc" # VlogManager pzy
"db/vlog_manager.h"
"db/vlog_converter.cc"
"db/vlog_converter.h"
"db/memtable.cc"
"db/memtable.h"
"db/repair.cc"
@ -400,13 +392,6 @@ if(LEVELDB_BUILD_TESTS)
endfunction(leveldb_test)
leveldb_test("db/c_test.c")
leveldb_test("examples/main.cc") # ckx
leveldb_test("examples/test_1.cc")
leveldb_test("examples/kv_sep_test.cc") # ckx
leveldb_test("examples/ValueConvertTest.cc") # ckx
leveldb_test("examples/WiscKeyTest_1.cc") # pzy
leveldb_test("examples/GCtest.cc") # pzy
leveldb_test("examples/iterate_test.cc") # pzy
if(NOT BUILD_SHARED_LIBS)
# TODO(costan): This test also uses
@ -533,11 +518,19 @@ if(LEVELDB_INSTALL)
)
endif(LEVELDB_INSTALL)
add_executable(field_test
"${PROJECT_SOURCE_DIR}/examples/field_test.cc" # ckx
add_executable(db_test2
"${PROJECT_SOURCE_DIR}/test/db_test2.cc"
)
target_link_libraries(field_test PRIVATE leveldb gtest)
target_link_libraries(db_test2 PRIVATE leveldb)
# add_executable(WiscKeyTest_1
# "${PROJECT_SOURCE_DIR}/examples/WiscKeyTest_1.cc"
# )
add_executable(ttl_test
"${PROJECT_SOURCE_DIR}/test/ttl_test.cc"
)
target_link_libraries(ttl_test PRIVATE leveldb gtest)
add_executable(field_test
"${PROJECT_SOURCE_DIR}/test/field_test.cc"
)
target_link_libraries(field_test PRIVATE leveldb gtest)

+ 31
- 0
CONTRIBUTING.md View File

@ -0,0 +1,31 @@
# How to Contribute
We'd love to accept your patches and contributions to this project. There are
just a few small guidelines you need to follow.
## Contributor License Agreement
Contributions to this project must be accompanied by a Contributor License
Agreement. You (or your employer) retain the copyright to your contribution;
this simply gives us permission to use and redistribute your contributions as
part of the project. Head over to <https://cla.developers.google.com/> to see
your current agreements on file or to sign a new one.
You generally only need to submit a CLA once, so if you've already submitted one
(even if it was for a different project), you probably don't need to do it
again.
## Code Reviews
All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose. Consult
[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
information on using pull requests.
See [the README](README.md#contributing-to-the-leveldb-project) for areas
where we are likely to accept external contributions.
## Community Guidelines
This project follows [Google's Open Source Community
Guidelines](https://opensource.google/conduct/).

+ 3
- 246
README.md View File

@ -1,252 +1,9 @@
LevelDB is a fast key-value storage library written at Google that provides an ordered mapping from string keys to string values.
实验报告请查看以下文档:
**本仓库提供TTL基本的测试用例**
我们的分工已在代码中以注释的形式体现,如:ckx、pzy。
- [实验报告](实验报告.md)
> **This repository is receiving very limited maintenance. We will only review the following types of changes.**
>
> * Fixes for critical bugs, such as data loss or memory corruption
> * Changes absolutely needed by internally supported leveldb clients. These typically fix breakage introduced by a language/standard library/OS update
[![ci](https://github.com/google/leveldb/actions/workflows/build.yml/badge.svg)](https://github.com/google/leveldb/actions/workflows/build.yml)
Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com)
# Features
* Keys and values are arbitrary byte arrays.
* Data is stored sorted by key.
* Callers can provide a custom comparison function to override the sort order.
* The basic operations are `Put(key,value)`, `Get(key)`, `Delete(key)`.
* Multiple changes can be made in one atomic batch.
* Users can create a transient snapshot to get a consistent view of data.
* Forward and backward iteration is supported over the data.
* Data is automatically compressed using the [Snappy compression library](https://google.github.io/snappy/), but [Zstd compression](https://facebook.github.io/zstd/) is also supported.
* External activity (file system operations etc.) is relayed through a virtual interface so users can customize the operating system interactions.
# Documentation
[LevelDB library documentation](https://github.com/google/leveldb/blob/main/doc/index.md) is online and bundled with the source code.
# Limitations
* This is not a SQL database. It does not have a relational data model, it does not support SQL queries, and it has no support for indexes.
* Only a single process (possibly multi-threaded) can access a particular database at a time.
* There is no client-server support builtin to the library. An application that needs such support will have to wrap their own server around the library.
# Getting the Source
```bash
git clone --recurse-submodules https://github.com/google/leveldb.git
```
# Building
This project supports [CMake](https://cmake.org/) out of the box.
### Build for POSIX
Quick start:
克隆代码:
```bash
mkdir -p build && cd build
cmake -DCMAKE_BUILD_TYPE=Release .. && cmake --build .
git clone --recurse-submodules https://gitea.shuishan.net.cn/building_data_management_systems.Xuanzhou.2024Fall.DaSE/leveldb_base.git
```
### Building for Windows
First generate the Visual Studio 2017 project/solution files:
```cmd
mkdir build
cd build
cmake -G "Visual Studio 15" ..
```
The default default will build for x86. For 64-bit run:
```cmd
cmake -G "Visual Studio 15 Win64" ..
```
To compile the Windows solution from the command-line:
```cmd
devenv /build Debug leveldb.sln
```
or open leveldb.sln in Visual Studio and build from within.
Please see the CMake documentation and `CMakeLists.txt` for more advanced usage.
# Contributing to the leveldb Project
> **This repository is receiving very limited maintenance. We will only review the following types of changes.**
>
> * Bug fixes
> * Changes absolutely needed by internally supported leveldb clients. These typically fix breakage introduced by a language/standard library/OS update
The leveldb project welcomes contributions. leveldb's primary goal is to be
a reliable and fast key/value store. Changes that are in line with the
features/limitations outlined above, and meet the requirements below,
will be considered.
Contribution requirements:
1. **Tested platforms only**. We _generally_ will only accept changes for
platforms that are compiled and tested. This means POSIX (for Linux and
macOS) or Windows. Very small changes will sometimes be accepted, but
consider that more of an exception than the rule.
2. **Stable API**. We strive very hard to maintain a stable API. Changes that
require changes for projects using leveldb _might_ be rejected without
sufficient benefit to the project.
3. **Tests**: All changes must be accompanied by a new (or changed) test, or
a sufficient explanation as to why a new (or changed) test is not required.
4. **Consistent Style**: This project conforms to the
[Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html).
To ensure your changes are properly formatted please run:
```
clang-format -i --style=file <file>
```
We are unlikely to accept contributions to the build configuration files, such
as `CMakeLists.txt`. We are focused on maintaining a build configuration that
allows us to test that the project works in a few supported configurations
inside Google. We are not currently interested in supporting other requirements,
such as different operating systems, compilers, or build systems.
## Submitting a Pull Request
Before any pull request will be accepted the author must first sign a
Contributor License Agreement (CLA) at https://cla.developers.google.com/.
In order to keep the commit timeline linear
[squash](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History#Squashing-Commits)
your changes down to a single commit and [rebase](https://git-scm.com/docs/git-rebase)
on google/leveldb/main. This keeps the commit timeline linear and more easily sync'ed
with the internal repository at Google. More information at GitHub's
[About Git rebase](https://help.github.com/articles/about-git-rebase/) page.
# Performance
Here is a performance report (with explanations) from the run of the
included db_bench program. The results are somewhat noisy, but should
be enough to get a ballpark performance estimate.
## Setup
We use a database with a million entries. Each entry has a 16 byte
key, and a 100 byte value. Values used by the benchmark compress to
about half their original size.
LevelDB: version 1.1
Date: Sun May 1 12:11:26 2011
CPU: 4 x Intel(R) Core(TM)2 Quad CPU Q6600 @ 2.40GHz
CPUCache: 4096 KB
Keys: 16 bytes each
Values: 100 bytes each (50 bytes after compression)
Entries: 1000000
Raw Size: 110.6 MB (estimated)
File Size: 62.9 MB (estimated)
## Write performance
The "fill" benchmarks create a brand new database, in either
sequential, or random order. The "fillsync" benchmark flushes data
from the operating system to the disk after every operation; the other
write operations leave the data sitting in the operating system buffer
cache for a while. The "overwrite" benchmark does random writes that
update existing keys in the database.
fillseq : 1.765 micros/op; 62.7 MB/s
fillsync : 268.409 micros/op; 0.4 MB/s (10000 ops)
fillrandom : 2.460 micros/op; 45.0 MB/s
overwrite : 2.380 micros/op; 46.5 MB/s
Each "op" above corresponds to a write of a single key/value pair.
I.e., a random write benchmark goes at approximately 400,000 writes per second.
Each "fillsync" operation costs much less (0.3 millisecond)
than a disk seek (typically 10 milliseconds). We suspect that this is
because the hard disk itself is buffering the update in its memory and
responding before the data has been written to the platter. This may
or may not be safe based on whether or not the hard disk has enough
power to save its memory in the event of a power failure.
## Read performance
We list the performance of reading sequentially in both the forward
and reverse direction, and also the performance of a random lookup.
Note that the database created by the benchmark is quite small.
Therefore the report characterizes the performance of leveldb when the
working set fits in memory. The cost of reading a piece of data that
is not present in the operating system buffer cache will be dominated
by the one or two disk seeks needed to fetch the data from disk.
Write performance will be mostly unaffected by whether or not the
working set fits in memory.
readrandom : 16.677 micros/op; (approximately 60,000 reads per second)
readseq : 0.476 micros/op; 232.3 MB/s
readreverse : 0.724 micros/op; 152.9 MB/s
LevelDB compacts its underlying storage data in the background to
improve read performance. The results listed above were done
immediately after a lot of random writes. The results after
compactions (which are usually triggered automatically) are better.
readrandom : 11.602 micros/op; (approximately 85,000 reads per second)
readseq : 0.423 micros/op; 261.8 MB/s
readreverse : 0.663 micros/op; 166.9 MB/s
Some of the high cost of reads comes from repeated decompression of blocks
read from disk. If we supply enough cache to the leveldb so it can hold the
uncompressed blocks in memory, the read performance improves again:
readrandom : 9.775 micros/op; (approximately 100,000 reads per second before compaction)
readrandom : 5.215 micros/op; (approximately 190,000 reads per second after compaction)
## Repository contents
See [doc/index.md](doc/index.md) for more explanation. See
[doc/impl.md](doc/impl.md) for a brief overview of the implementation.
The public interface is in include/leveldb/*.h. Callers should not include or
rely on the details of any other header files in this package. Those
internal APIs may be changed without warning.
Guide to header files:
* **include/leveldb/db.h**: Main interface to the DB: Start here.
* **include/leveldb/options.h**: Control over the behavior of an entire database,
and also control over the behavior of individual reads and writes.
* **include/leveldb/comparator.h**: Abstraction for user-specified comparison function.
If you want just bytewise comparison of keys, you can use the default
comparator, but clients can write their own comparator implementations if they
want custom ordering (e.g. to handle different character encodings, etc.).
* **include/leveldb/iterator.h**: Interface for iterating over data. You can get
an iterator from a DB object.
* **include/leveldb/write_batch.h**: Interface for atomically applying multiple
updates to a database.
* **include/leveldb/slice.h**: A simple module for maintaining a pointer and a
length into some other byte array.
* **include/leveldb/status.h**: Status is returned from many of the public interfaces
and is used to report success and various kinds of errors.
* **include/leveldb/env.h**:
Abstraction of the OS environment. A posix implementation of this interface is
in util/env_posix.cc.
* **include/leveldb/table.h, include/leveldb/table_builder.h**: Lower-level modules that most
clients probably won't use directly.

+ 0
- 16
TODO View File

@ -12,19 +12,3 @@ db
After a range is completely deleted, what gets rid of the
corresponding files if we do no future changes to that range. Make
the conditions for triggering compactions fire in more situations?
##Used For ZeitHaum In Octorber, 2023.
Implementation of WiscKey:
Step 1: For any Write, change Value to expected content, and supports for read. [Done.]
Step 2: Think the organization of value file.
Step 3: For a vptr, supports for read from a file.
Step 4: Combination of all above step to accomplish WiscKey.
I. WiscKeyMem
Notice TODOs!

+ 0
- 5
UseGuaide.txt View File

@ -1,5 +0,0 @@
运行新代码:
1. 在examples/ 添加对应的测试文件
2. 在最外层CMakeLists加入新的测试文件的编译指令,代码参考: + leveldb_test("examples/main.cc")
3. 进入build文件,重新编译,指令:"cmake -DCMAKE_BUILD_TYPE=Release .. && cmake --build ."。
"cmake -DCMAKE_BUILD_TYPE=Debug .. && cmake --build ."

+ 8
- 6
benchmarks/db_bench.cc View File

@ -1,3 +1,7 @@
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include <sys/types.h>
#include <atomic>
@ -16,7 +20,7 @@
#include "util/mutexlock.h"
#include "util/random.h"
#include "util/testutil.h"
//benchmarks,ckx,pzy
// Comma-separated list of operations to run in the specified order
// Actual benchmarks:
// fillseq -- write N values in sequential key order in async mode
@ -70,7 +74,7 @@ static int FLAGS_reads = -1;
static int FLAGS_threads = 1;
// Size of each value
static int FLAGS_value_size = 3072;
static int FLAGS_value_size = 100;
// Arrange to generate values that shrink to this fraction of
// their original size after compression
@ -206,7 +210,7 @@ class KeyBuffer {
Slice slice() const { return Slice(buffer_, FLAGS_key_prefix + 16); }
private:
char buffer_[4096];
char buffer_[1024];
};
#if defined(__linux)
@ -812,7 +816,6 @@ class Benchmark {
options.reuse_logs = FLAGS_reuse_logs;
options.compression =
FLAGS_compression ? kSnappyCompression : kNoCompression;
options.kvSepType = kVSepBeforeMem;
Status s = DB::Open(options, FLAGS_db, &db_);
if (!s.ok()) {
std::fprintf(stderr, "open error: %s\n", s.ToString().c_str());
@ -844,13 +847,12 @@ class Benchmark {
Status s;
int64_t bytes = 0;
KeyBuffer key;
entries_per_batch_ = 1; // Remove multi Write of batch.
for (int i = 0; i < num_; i += entries_per_batch_) {
batch.Clear();
for (int j = 0; j < entries_per_batch_; j++) {
const int k = seq ? i + j : thread->rand.Uniform(FLAGS_num);
key.Set(k);
db_->Put(WriteOptions(), key.slice(), gen.Generate(value_size_));
batch.Put(key.slice(), gen.Generate(value_size_));
bytes += value_size_ + key.slice().size();
thread->stats.FinishedSingleOp();
}

+ 4
- 0
benchmarks/db_bench_log.cc View File

@ -1,3 +1,7 @@
// Copyright (c) 2019 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include <cinttypes>
#include <cstdio>
#include <string>

+ 4
- 0
benchmarks/db_bench_sqlite3.cc View File

@ -1,3 +1,7 @@
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include <sqlite3.h>
#include <cstdio>

+ 4
- 0
benchmarks/db_bench_tree_db.cc View File

@ -1,3 +1,7 @@
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include <kcpolydb.h>
#include <cstdio>

+ 4
- 0
db/autocompact_test.cc View File

@ -1,3 +1,7 @@
// Copyright (c) 2013 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "gtest/gtest.h"
#include "db/db_impl.h"
#include "leveldb/cache.h"

+ 6
- 15
db/builder.cc View File

@ -1,10 +1,13 @@
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "db/builder.h"
#include "db/dbformat.h"
#include "db/filename.h"
#include "db/table_cache.h"
#include "db/version_edit.h"
#include "db/db_impl.h"
#include "leveldb/db.h"
#include "leveldb/env.h"
#include "leveldb/iterator.h"
@ -12,7 +15,7 @@
namespace leveldb {
Status BuildTable(const std::string& dbname, Env* env, const Options& options,
TableCache* table_cache, Iterator* iter, FileMetaData* meta, DBImpl* impl) {
TableCache* table_cache, Iterator* iter, FileMetaData* meta) {
Status s;
meta->file_size = 0;
iter->SeekToFirst();
@ -30,20 +33,8 @@ Status BuildTable(const std::string& dbname, Env* env, const Options& options,
Slice key;
for (; iter->Valid(); iter->Next()) {
key = iter->key();
if(impl->GetKVSepType() == kVSepBeforeSSD && impl!=nullptr){
char buf[20];
Slice vptr;
Slice val = iter->value();
Status s = impl->WriteValueIntoVlog(key, val, buf, vptr);
if(!s.ok()) return s;
builder->Add(key, vptr);
}
else builder->Add(key, iter->value());
builder->Add(key, iter->value());
}
if(impl->GetKVSepType() == kVSepBeforeSSD && impl!=nullptr){
s = impl->FlushVlog();
if(!s.ok()) return s;
}
if (!key.empty()) {
meta->largest.DecodeFrom(key);
}

+ 5
- 2
db/builder.h View File

@ -1,3 +1,7 @@
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#ifndef STORAGE_LEVELDB_DB_BUILDER_H_
#define STORAGE_LEVELDB_DB_BUILDER_H_
@ -12,7 +16,6 @@ class Env;
class Iterator;
class TableCache;
class VersionEdit;
class DBImpl;
// Build a Table file from the contents of *iter. The generated file
// will be named according to meta->number. On success, the rest of
@ -20,7 +23,7 @@ class DBImpl;
// If no data is present in *iter, meta->file_size will be set to
// zero, and no Table file will be produced.
Status BuildTable(const std::string& dbname, Env* env, const Options& options,
TableCache* table_cache, Iterator* iter, FileMetaData* meta, DBImpl* impl);
TableCache* table_cache, Iterator* iter, FileMetaData* meta);
} // namespace leveldb

+ 5
- 2
db/c.cc View File

@ -1,3 +1,7 @@
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "leveldb/c.h"
#include <string.h>
@ -273,8 +277,7 @@ void leveldb_destroy_db(const leveldb_options_t* options, const char* name,
void leveldb_repair_db(const leveldb_options_t* options, const char* name,
char** errptr) {
//TODO: Consider later.
// SaveError(errptr, RepairDB(name, options->rep, nullptr));
SaveError(errptr, RepairDB(name, options->rep));
}
void leveldb_iter_destroy(leveldb_iterator_t* iter) {

+ 4
- 0
db/c_test.c View File

@ -1,3 +1,7 @@
/* Copyright (c) 2011 The LevelDB Authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file. See the AUTHORS file for names of contributors. */
#include "leveldb/c.h"
#include <stddef.h>

+ 1
- 2
db/corruption_test.cc View File

@ -51,8 +51,7 @@ class CorruptionTest : public testing::Test {
void RepairDB() {
delete db_;
db_ = nullptr;
//TODO: Considered later.
// ASSERT_LEVELDB_OK(::leveldb::RepairDB(dbname_, options_, nullptr));
ASSERT_LEVELDB_OK(::leveldb::RepairDB(dbname_, options_));
}
void Build(int n) {

+ 27
- 192
db/db_impl.cc View File

@ -11,7 +11,8 @@
#include <set>
#include <string>
#include <vector>
#include <iostream>
#include<iostream>
#include "db/builder.h"
#include "db/db_iter.h"
#include "db/dbformat.h"
@ -34,13 +35,7 @@
#include "util/coding.h"
#include "util/logging.h"
#include "util/mutexlock.h"
#include <fstream>
// #define DEBUG
#ifdef DEBUG
#include <iostream>
#endif
namespace leveldb {
const int kNumNonTableCacheFiles = 10;
@ -148,12 +143,6 @@ DBImpl::DBImpl(const Options& raw_options, const std::string& dbname)
logfile_(nullptr),
logfile_number_(0),
log_(nullptr),
vlog_(nullptr),
vmanager_(new vlog::VlogManager()),
vconverter_(new vlog::VlogConverter()),
vlogfile_(nullptr),
vlogfile_number_(0),
vlogfile_offset_(0),
seed_(0),
tmp_batch_(new WriteBatch),
background_compaction_scheduled_(false),
@ -180,11 +169,6 @@ DBImpl::~DBImpl() {
delete tmp_batch_;
delete log_;
delete logfile_;
delete vlog_;
delete vlogfile_;
delete vmanager_;
delete vconverter_;
delete table_cache_;
if (owns_info_log_) {
@ -236,7 +220,8 @@ void DBImpl::MaybeIgnoreError(Status* s) const {
} else {
Log(options_.info_log, "Ignoring error %s", s->ToString().c_str());
*s = Status::OK();
}}
}
}
void DBImpl::RemoveObsoleteFiles() {
mutex_.AssertHeld();
@ -367,17 +352,6 @@ Status DBImpl::Recover(VersionEdit* edit, bool* save_manifest) {
expected.erase(number);
if (type == kLogFile && ((number >= min_log) || (number == prev_log)))
logs.push_back(number);
else if(type == kVlogFile ) {
vlogfile_number_ = std::max(vlogfile_number_, number);
// Add vlogfiles into vmanager_;
SequentialFile* now_file = nullptr;
WritableFile* writable_file = nullptr;
s = options_.env -> NewSequentialFile(VlogFileName(dbname_, vlogfile_number_), &now_file);
if (!s.ok()) return s;
s = options_.env->NewWritableFile(VlogFileName(dbname_, number), &writable_file);
if(!s.ok()) return s;
vmanager_->AddVlogFile(vlogfile_number_, now_file, writable_file);
}
}
}
if (!expected.empty()) {
@ -386,14 +360,7 @@ Status DBImpl::Recover(VersionEdit* edit, bool* save_manifest) {
static_cast<int>(expected.size()));
return Status::Corruption(buf, TableFileName(dbname_, *(expected.begin())));
}
// Make the vlog_ file Now
WritableFile* vfile;
if(s.ok()) s = options_.env -> NewWritableFile(VlogFileName(dbname_, vlogfile_number_),
&vfile);
vlogfile_ = vfile;
vlog_ = new vlog::VWriter(vfile);
if(!s.ok()) return s;
// Recover in the order in which the logs were generated
std::sort(logs.begin(), logs.end());
for (size_t i = 0; i < logs.size(); i++) {
@ -413,10 +380,6 @@ Status DBImpl::Recover(VersionEdit* edit, bool* save_manifest) {
versions_->SetLastSequence(max_sequence);
}
//Assign the offset
uint64_t file_size = 0;
Status gfsz = options_.env->GetFileSize(VlogFileName(dbname_, vlogfile_number_), &file_size);
if(!gfsz.ok()) file_size = 0;
return Status::OK();
}
@ -554,7 +517,7 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit,
Status s;
{
mutex_.Unlock();
s = BuildTable(dbname_, env_, options_, table_cache_, iter, &meta, this);
s = BuildTable(dbname_, env_, options_, table_cache_, iter, &meta);
mutex_.Lock();
}
@ -667,6 +630,11 @@ void DBImpl::TEST_CompactRange(int level, const Slice* begin,
background_work_finished_signal_.Wait();
}
}
// Finish current background compaction in the case where
// `background_work_finished_signal_` was signalled due to an error.
while (background_compaction_scheduled_) {
background_work_finished_signal_.Wait();
}
if (manual_compaction_ == &manual) {
// Cancel my manual compaction since we aborted early for some reason.
manual_compaction_ = nullptr;
@ -1047,19 +1015,6 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
}
}
}
// 垃圾回收,ckx
if (drop) {
// 标记为无效的逻辑
if (options_.kvSepType == kVSepBeforeMem) { // 如果启用了 KV 分离
Slice vptr = input->value(); // 获取指向 VLog 的 vptr
uint64_t vlogfile_number, vlogfile_offset;
Status s = vconverter_->DecodeVptr(&vlogfile_number, &vlogfile_offset, &vptr);
if (s.ok()) {
// 更新 vmanager_,标记该 offset 无效
vmanager_->MarkVlogValueInvalid(vlogfile_number, vlogfile_offset);
}
}
}
input->Next();
}
@ -1197,12 +1152,6 @@ Status DBImpl::Get(const ReadOptions& options, const Slice& key,
} else {
s = current->Get(options, lkey, value, &stats);
have_stat_update = true;
//if KVSeq, Need Decode
if(options_.kvSepType == kVSepBeforeSSD){
std::string tmp_key;
Status s = ReadValueFromVlog(&tmp_key, value, value);
if(!s.ok()) return s;
}
}
mutex_.Lock();
}
@ -1213,38 +1162,9 @@ Status DBImpl::Get(const ReadOptions& options, const Slice& key,
mem->Unref();
if (imm != nullptr) imm->Unref();
current->Unref();
//Decode vptr if Need Kvsep
if(options_.kvSepType == kVSepBeforeMem){
// std::cout << "get时进入KV分离情况 " << "\n";
std::string tmp_key;
Status s = ReadValueFromVlog(&tmp_key, value, value);
if(!s.ok()) return s;
}
return s;
}
// 根据字段值查找所有包含该字段的 key,字段分离,ckx
std::vector<std::string> DBImpl::FindKeysByField(leveldb::DB* db, Field &field) {
Iterator* iter = db->NewIterator(ReadOptions());
std::vector<std::string> ret_keys;
int64_t bytes = 0;
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
auto fields_ret = ParseValue(iter->value().data());
for (Field each_field : fields_ret)
{
std::cout << each_field.first << " " << each_field.second << std::endl;
if (field.first.compare(each_field.first) == 0) {
if (field.second.compare(each_field.second)==0)
{
ret_keys.push_back(iter->key().data());
}
else
break;
}
}
}
delete iter;
return ret_keys;
}
Iterator* DBImpl::NewIterator(const ReadOptions& options) {
SequenceNumber latest_snapshot;
uint32_t seed;
@ -1274,24 +1194,16 @@ void DBImpl::ReleaseSnapshot(const Snapshot* snapshot) {
snapshots_.Delete(static_cast<const SnapshotImpl*>(snapshot));
}
// Convenience methods
Status DBImpl::Put(const WriteOptions& o, const Slice& key, const Slice& val) {
// Convert value to vptr if need.
if(this->options_.kvSepType == noKVSep || this->options_.kvSepType == kVSepBeforeSSD){
return DB::Put(o, key, val);
}
else if(this->options_.kvSepType == kVSepBeforeMem){
char buf[20];
Slice vptr;
Status s = WriteValueIntoVlog(key, val, buf, vptr); //pzy
if(!s.ok()) return s;
s = vlog_-> Flush();
if(!s.ok()) return s;
return DB::Put(o, key, vptr);
}
return Status::Corruption("Invalid kvSepType.");
return DB::Put(o, key, val);
}
Status DBImpl::Put(const WriteOptions& o, const Slice& key, const Slice& val, uint64_t ttl) {
return DB::Put(o, key, val, ttl);
} // 实现新的put接口,心
Status DBImpl::Delete(const WriteOptions& options, const Slice& key) {
return DB::Delete(options, key);
}
@ -1577,85 +1489,6 @@ void DBImpl::GetApproximateSizes(const Range* range, int n, uint64_t* sizes) {
v->Unref();
}
Status DBImpl::WriteValueIntoVlog(const Slice& key, const Slice& val, char* buf, Slice& vptr){
//写VLog,pzy
Status s;
if(vlogfile_offset_ >= options_.vlog_file_size){
// if (IsDiskBusy("sda")) { // 替换 "sda" 为实际设备名
// // 如果 I/O 繁忙,可以选择等待或继续写入当前文件
// // std::this_thread::sleep_for(std::chrono::milliseconds(10));
// } else {
std::unique_lock<std::mutex> lock(vlog_mutex_); // 保证文件切换的原子性
WritableFile* newfile;
SequentialFile* readfile;
s = options_.env->NewWritableFile(VlogFileName(dbname_, vlogfile_number_ + 1), &newfile);
if(!s.ok()) return s;
s = options_.env->NewSequentialFile(VlogFileName(dbname_, vlogfile_number_ + 1), &readfile);
if(!s.ok()) return s;
//更新相应的模块
delete vlog_;
delete vlogfile_;
vmanager_->AddVlogFile(vlogfile_number_ + 1, readfile, newfile);
vlogfile_ = newfile;
vlog_ = new vlog::VWriter(vlogfile_);
++vlogfile_number_;
vlogfile_offset_ = 0;
//}
}
int write_size = 0;
std::string tmp_vrec;
PutLengthPrefixedSlice(&tmp_vrec, key);
PutLengthPrefixedSlice(&tmp_vrec, val);
{
std::unique_lock<std::mutex> lock(vlog_mutex_); // 保证记录插入和元数据修改的原子性
s = vlog_-> AddRecord(Slice(tmp_vrec), write_size); // 增加一条记录
if(!s.ok()) return s;
// 更新 total_value_count_
vmanager_->IncrementTotalValueCount(vlogfile_);
//将val替换为vptr.
vptr = vconverter_->GetVptr(vlogfile_number_, vlogfile_offset_, buf);
vlogfile_offset_ += write_size;
}
return s;
}
bool DBImpl::IsDiskBusy(const std::string& device) {
std::ifstream io_stat("/sys/block/" + device + "/stat");
if (!io_stat.is_open()) return false;
unsigned long read_requests, write_requests, io_in_progress;
io_stat >> read_requests >> write_requests >> io_in_progress;
io_stat.close();
// 如果当前正在处理的 I/O 请求数大于阈值,则认为磁盘繁忙
return io_in_progress > 2; // 设置一个合理的阈值
}
Status DBImpl::ReadValueFromVlog(std::string* key, std::string* val, std::string* vptr){//读vlog,pzy
Status s;
Slice encoded_vptr = Slice(*vptr);
uint64_t vlogfile_number;
uint64_t vlogfile_offset;
s = vconverter_->DecodeVptr(&vlogfile_number, &vlogfile_offset, &encoded_vptr); // 解码出vlog文件号、文件内偏移量
if(!s.ok()) return s;
SequentialFile* vlog_file = vmanager_->GetVlogFile(vlogfile_number); // 获取vlog文件
if(vlog_file == nullptr) return Status::Corruption("Failed to find vlog files.");
vlog::VReader vreader = vlog::VReader(vlog_file);
vreader.ReadKV(vlogfile_offset, key, val);
// std::cout << "从Vlog中读完value" << "\n";
return s;
}
KVSepType DBImpl::GetKVSepType(){
return options_.kvSepType;
}
Status DBImpl::FlushVlog(){
return vlog_ ->Flush();
}
// Default implementations of convenience methods that subclasses of DB
// can call if they wish
Status DB::Put(const WriteOptions& opt, const Slice& key, const Slice& value) {
@ -1664,12 +1497,22 @@ Status DB::Put(const WriteOptions& opt, const Slice& key, const Slice& value) {
return Write(opt, &batch);
}
// 假设增加一个新的Put接口,包含TTL参数, 单位(秒)
Status DB::Put(const WriteOptions& opt, const Slice& key, const Slice& value, uint64_t ttl){
WriteBatch batch;
batch.Put(key, value, ttl);
return Write(opt, &batch);
} // 这里应该是新的PUT接口的真正实现的地方,还是由本来的DB类实现,怪?心
Status DB::Delete(const WriteOptions& opt, const Slice& key) {
WriteBatch batch;
batch.Delete(key);
return Write(opt, &batch);
}
DB::~DB() = default;
Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) {
@ -1685,17 +1528,13 @@ Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) {
// Create new log and a corresponding memtable.
uint64_t new_log_number = impl->versions_->NewFileNumber();
WritableFile* lfile;
SequentialFile* sfile;
s = options.env->NewWritableFile(LogFileName(dbname, new_log_number),
&lfile);
if(s.ok()) s = options.env -> NewSequentialFile(VlogFileName(dbname, impl->vlogfile_number_),
&sfile);
if (s.ok()) {
edit.SetLogNumber(new_log_number);
impl->logfile_ = lfile;
impl->logfile_number_ = new_log_number;
impl->log_ = new log::Writer(lfile);
impl->vmanager_->AddVlogFile(impl->vlogfile_number_, sfile, lfile);
impl->mem_ = new MemTable(impl->internal_comparator_);
impl->mem_->Ref();
}
@ -1753,7 +1592,3 @@ Status DestroyDB(const std::string& dbname, const Options& options) {
}
} // namespace leveldb
#ifdef DEBUG
#undef DEBUG
#endif

+ 6
- 37
db/db_impl.h View File

@ -12,20 +12,12 @@
#include "db/dbformat.h"
#include "db/log_writer.h"
#include "db/vlog_writer.h"
#include "db/vlog_reader.h"
#include "db/vlog_manager.h"
#include "db/snapshot.h"
#include "db/vlog_converter.h"
#include "leveldb/db.h"
#include "leveldb/env.h"
#include "port/port.h"
#include "port/thread_annotations.h"
#include <thread>
#include <chrono> // 如果使用了 std::this_thread::sleep_for
namespace leveldb {
class MemTable;
@ -46,30 +38,21 @@ class DBImpl : public DB {
// Implementations of the DB interface
Status Put(const WriteOptions&, const Slice& key,
const Slice& value) override;
Status Put(const WriteOptions&, const Slice& key,
const Slice& value, uint64_t ttl) override; //put接口
Status Delete(const WriteOptions&, const Slice& key) override;
Status Write(const WriteOptions& options, WriteBatch* updates) override;
Status Get(const ReadOptions& options, const Slice& key,
std::string* value) override;
std::vector<std::string> FindKeysByField(leveldb::DB* db, Field& field) override;
Iterator* NewIterator(const ReadOptions&) override;
const Snapshot* GetSnapshot() override;
void ReleaseSnapshot(const Snapshot* snapshot) override;
bool GetProperty(const Slice& property, std::string* value) override;
void GetApproximateSizes(const Range* range, int n, uint64_t* sizes) override;
void CompactRange(const Slice* begin, const Slice* end) override;
void StartBackgroundCleanupTask() { // GC的线程
std::thread([this]() {
while (!shutting_down_.load(std::memory_order_acquire)) {
vmanager_->CleanupInvalidVlogFiles(options_, dbname_);
std::this_thread::sleep_for(std::chrono::seconds(60)); //
}
}).detach();
}
// Extra methods (for testing) that are not in the public DB interface
//to get the KVSepType
KVSepType GetKVSepType();
Status FlushVlog();
// Extra methods (for testing) that are not in the public DB interface
// Compact any files in the named level that overlap [*begin,*end]
void TEST_CompactRange(int level, const Slice* begin, const Slice* end);
@ -192,7 +175,6 @@ class DBImpl : public DB {
// State below is protected by mutex_
port::Mutex mutex_;
std::mutex vlog_mutex_; // VLog
std::atomic<bool> shutting_down_;
port::CondVar background_work_finished_signal_ GUARDED_BY(mutex_);
MemTable* mem_;
@ -201,14 +183,6 @@ class DBImpl : public DB {
WritableFile* logfile_;
uint64_t logfile_number_ GUARDED_BY(mutex_);
log::Writer* log_;
//Add Defination of vlog files.
//TODO: Consider the Concurrency.
uint64_t vlogfile_number_;
uint64_t vlogfile_offset_; // vlog_file的偏移
WritableFile* vlogfile_; //vlog_file的文件类
vlog::VWriter* vlog_;
vlog::VlogManager* vmanager_;
vlog::VlogConverter* vconverter_;
uint32_t seed_ GUARDED_BY(mutex_); // For sampling.
// Queue of writers.
@ -232,13 +206,6 @@ class DBImpl : public DB {
Status bg_error_ GUARDED_BY(mutex_);
CompactionStats stats_[config::kNumLevels] GUARDED_BY(mutex_);
public:
Status WriteValueIntoVlog(const Slice& key, const Slice& val, char* buf, Slice& vptr);
Status ReadValueFromVlog(std::string* key, std::string* val, std::string* vptr);
bool IsDiskBusy(const std::string& device) ;
};
// Sanitize db options. The caller should delete result.info_log if
@ -248,6 +215,8 @@ Options SanitizeOptions(const std::string& db,
const InternalFilterPolicy* ipolicy,
const Options& src);
} // namespace leveldb
#endif // STORAGE_LEVELDB_DB_DB_IMPL_H_

+ 4
- 24
db/db_test.cc View File

@ -2117,6 +2117,10 @@ class ModelDB : public DB {
Status Put(const WriteOptions& o, const Slice& k, const Slice& v) override {
return DB::Put(o, k, v);
}
Status Put(const WriteOptions& o, const Slice& k,
const Slice& v, uint64_t ttl) override {
return DB::Put(o, k, v);
}// 实现的是DB里的新put接口,心
Status Delete(const WriteOptions& o, const Slice& key) override {
return DB::Delete(o, key);
}
@ -2125,30 +2129,6 @@ class ModelDB : public DB {
assert(false); // Not implemented
return Status::NotFound(key);
}
// 根据字段值查找所有包含该字段的 key
std::vector<std::string> FindKeysByField(leveldb::DB* db, Field &field) override {
Iterator* iter = db->NewIterator(ReadOptions());
std::vector<std::string> ret_keys;
int64_t bytes = 0;
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
auto fields_ret = ParseValue(iter->value().data());
for (Field each_field : fields_ret)
{
std::cout << each_field.first << " " << each_field.second << std::endl;
if (field.first.compare(each_field.first) == 0) {
if (field.second.compare(each_field.second)==0)
{
ret_keys.push_back(iter->key().data());
}
else
break;
}
}
}
delete iter;
return ret_keys;
}
Iterator* NewIterator(const ReadOptions& options) override {
if (options.snapshot == nullptr) {
KVMap* saved = new KVMap;

+ 2
- 11
db/filename.cc View File

@ -30,12 +30,6 @@ std::string LogFileName(const std::string& dbname, uint64_t number) {
return MakeFileName(dbname, number, "log");
}
// 创建vlogfile的名字
std::string VlogFileName(const std::string& dbname, uint64_t number){
assert(number >= 0);
return MakeFileName(dbname, number, "vlog");
}
std::string TableFileName(const std::string& dbname, uint64_t number) {
assert(number > 0);
return MakeFileName(dbname, number, "ldb");
@ -80,7 +74,7 @@ std::string OldInfoLogFileName(const std::string& dbname) {
// dbname/LOG
// dbname/LOG.old
// dbname/MANIFEST-[0-9]+
// dbname/[0-9]+.(log|sst|ldb|vlog)
// dbname/[0-9]+.(log|sst|ldb)
bool ParseFileName(const std::string& filename, uint64_t* number,
FileType* type) {
Slice rest(filename);
@ -118,10 +112,7 @@ bool ParseFileName(const std::string& filename, uint64_t* number,
*type = kTableFile;
} else if (suffix == Slice(".dbtmp")) {
*type = kTempFile;
} else if(suffix == Slice(".vlog")){
*type = kVlogFile;
}
else {
} else {
return false;
}
*number = num;

+ 1
- 7
db/filename.h View File

@ -25,8 +25,7 @@ enum FileType {
kDescriptorFile,
kCurrentFile,
kTempFile,
kInfoLogFile, // Either the current one, or an old one
kVlogFile,
kInfoLogFile // Either the current one, or an old one
};
// Return the name of the log file with the specified number
@ -34,11 +33,6 @@ enum FileType {
// "dbname".
std::string LogFileName(const std::string& dbname, uint64_t number);
// Return the name of the vlog file with the specified number
// in the db named by "dbname". The result will be prefixed with
// "dbname".
std::string VlogFileName(const std::string& dbname, uint64_t number);
// Return the name of the sstable with the specified number
// in the db named by "dbname". The result will be prefixed with
// "dbname".

+ 0
- 4
db/log_test.cc View File

@ -205,10 +205,6 @@ class LogTest : public testing::Test {
return Status::OK();
}
Status SkipFromHead(uint64_t n){
return Status::NotSupported("SkipFromHead");
}
Slice contents_;
bool force_error_;
bool returned_partial_;

+ 4
- 6
db/repair.cc View File

@ -44,7 +44,7 @@ namespace {
class Repairer {
public:
Repairer(const std::string& dbname, const Options& options, DBImpl* impl)
Repairer(const std::string& dbname, const Options& options)
: dbname_(dbname),
env_(options.env),
icmp_(options.comparator),
@ -52,7 +52,6 @@ class Repairer {
options_(SanitizeOptions(dbname, &icmp_, &ipolicy_, options)),
owns_info_log_(options_.info_log != options.info_log),
owns_cache_(options_.block_cache != options.block_cache),
impl_(impl),
next_file_number_(1) {
// TableCache can be small since we expect each table to be opened once.
table_cache_ = new TableCache(dbname_, options_, 10);
@ -204,7 +203,7 @@ class Repairer {
FileMetaData meta;
meta.number = next_file_number_++;
Iterator* iter = mem->NewIterator();
status = BuildTable(dbname_, env_, options_, table_cache_, iter, &meta, impl_);
status = BuildTable(dbname_, env_, options_, table_cache_, iter, &meta);
delete iter;
mem->Unref();
mem = nullptr;
@ -433,7 +432,6 @@ class Repairer {
const Options options_;
bool owns_info_log_;
bool owns_cache_;
DBImpl* impl_;
TableCache* table_cache_;
VersionEdit edit_;
@ -445,8 +443,8 @@ class Repairer {
};
} // namespace
Status RepairDB(const std::string& dbname, const Options& options, DBImpl* impl) {
Repairer repairer(dbname, options, impl);
Status RepairDB(const std::string& dbname, const Options& options) {
Repairer repairer(dbname, options);
return repairer.Run();
}

+ 0
- 20
db/vlog_converter.cc View File

@ -1,20 +0,0 @@
#include "util/coding.h"
#include "db/vlog_converter.h"
namespace leveldb{
namespace vlog{
// 当需要将键值对插入数据库时,将值的存储位置 (file_no 和 file_offset) 编码为 Vlog Pointer,并与键关联存储。
// 紧凑的编码格式便于减少存储开销。
Slice VlogConverter::GetVptr(uint64_t file_no, uint64_t file_offset, char* buf){
char* vfileno_end = EncodeVarint64(buf, file_no);
char* vfileoff_end = EncodeVarint64(vfileno_end, file_offset);
return Slice(buf, vfileoff_end - buf);
}
Status VlogConverter::DecodeVptr(uint64_t* file_no, uint64_t* file_offset, Slice* vptr){
bool decoded_status = true;
decoded_status &= GetVarint64(vptr, file_no);
decoded_status &= GetVarint64(vptr, file_offset);
if(!decoded_status) return Status::Corruption("Can not Decode vptr from Read Bytes.");
else return Status::OK();
}
}// namespace vlog
}

+ 0
- 19
db/vlog_converter.h View File

@ -1,19 +0,0 @@
#ifndef STORAGE_LEVELDB_DB_VLOG_CONVERTER_H_
#define STORAGE_LEVELDB_DB_VLOG_CONVERTER_H_
#include <cstdint>
#include "leveldb/slice.h"
#include "leveldb/status.h"
namespace leveldb{
namespace vlog{
class VlogConverter{
public:
VlogConverter() = default;
~VlogConverter() = default;
Slice GetVptr(uint64_t file_no, uint64_t file_offset, char* buf);
Status DecodeVptr(uint64_t* file_no, uint64_t* file_offset, Slice* vptr);
};
}// namespace vlog
}
#endif

+ 0
- 33
db/vlog_manager.cc View File

@ -1,33 +0,0 @@
#include "db/vlog_manager.h"
namespace leveldb{
namespace vlog{
void VlogManager::AddVlogFile(uint64_t vlogfile_number, SequentialFile* seq_file, WritableFile* write_file){
if(vlog_table_.find(vlogfile_number) == vlog_table_.end()){
vlog_table_[vlogfile_number] = seq_file;
writable_to_sequential_[write_file] = seq_file;
}
else{
//Do Nothing
}
}
SequentialFile* VlogManager::GetVlogFile(uint64_t vlogfile_number){
auto it = vlog_table_.find(vlogfile_number);
if(it != vlog_table_.end()){
return it->second;
}
else return nullptr;
}
bool VlogManager::IsEmpty(){
return vlog_table_.size() == 0;
}
// 标记一个vlog文件有一个新的无效的value,pzy
void VlogManager::MarkVlogValueInvalid(uint64_t vlogfile_number, uint64_t offset) {
auto vlog_file = GetVlogFile(vlogfile_number);
if (vlog_file) {
vlog_file->MarkValueInvalid(offset); // 调用具体文件的标记逻辑
}
}
}// namespace vlog
}

+ 0
- 61
db/vlog_manager.h View File

@ -1,61 +0,0 @@
#ifndef STORAGE_LEVELDB_DB_VLOG_MANAGER_H_
#define STORAGE_LEVELDB_DB_VLOG_MANAGER_H_
#include <unordered_map>
#include <cstdint>
#include "leveldb/env.h"
#include "db/filename.h"
#include "leveldb/options.h"
namespace leveldb{
class SequentialFile;
namespace vlog{
class VlogManager{
public:
VlogManager() = default;
~VlogManager() = default;
//Add a vlog file, vlog file is already exist.
void AddVlogFile(uint64_t vlogfile_number, SequentialFile* seq_file, WritableFile* write_file);
SequentialFile* GetVlogFile(uint64_t vlogfile_number);
bool IsEmpty();
void MarkVlogValueInvalid(uint64_t vlogfile_number, uint64_t offset);
SequentialFile* GetSequentialFile(WritableFile* write_file) {
auto it = writable_to_sequential_.find(write_file);
return it != writable_to_sequential_.end() ? it->second : nullptr;
}
void IncrementTotalValueCount(WritableFile* write_file) {
auto seq_file = GetSequentialFile(write_file);
if (seq_file) {
seq_file->IncrementTotalValueCount(); // SequentialFile
}
}
void CleanupInvalidVlogFiles(const Options& options, const std::string& dbname) {
for (const auto& vlog_pair : vlog_table_) {
uint64_t vlogfile_number = vlog_pair.first;
auto vlog_file = vlog_pair.second;
if (vlog_file->AllValuesInvalid()) { //
RemoveVlogFile(vlogfile_number, options, dbname); // VLog
}
}
}
void RemoveVlogFile(uint64_t vlogfile_number, const Options& options, const std::string& dbname) { // vlogfile文件
auto it = vlog_table_.find(vlogfile_number);
if (it != vlog_table_.end()) {
delete it->second; // SequentialFile
vlog_table_.erase(it); //
options.env->DeleteFile(VlogFileName(dbname, vlogfile_number)); //
}
}
private:
std::unordered_map<uint64_t, SequentialFile*> vlog_table_; // vlog文件号和文件的关系
std::unordered_map<WritableFile*, SequentialFile*> writable_to_sequential_;
};
}// namespace vlog
}
#endif

+ 0
- 58
db/vlog_reader.cc View File

@ -1,58 +0,0 @@
#include <cstdint>
#include "db/vlog_reader.h"
#include "leveldb/slice.h"
#include "leveldb/env.h"
#include "util/coding.h"
namespace leveldb{
namespace vlog{
VReader::VReader(SequentialFile* file) // A file abstraction for reading sequentially through a file
:file_(file){}
Status VReader::ReadRecord(uint64_t vfile_offset, std::string* record){
Status s;
Slice size_slice;
char size_buf[11];
uint64_t rec_size = 0;
s = file_->SkipFromHead(vfile_offset); // 将文件的读取位置移动到 vfile_offset
if(s.ok()) s = file_ -> Read(10, &size_slice, size_buf); // 先把Record 长度读出来, 最长10字节.
if(s.ok()){
if(GetVarint64(&size_slice, &rec_size) == false){ // 解析变长整数,得到记录的长度 rec_size
return Status::Corruption("Failed to decode vlog record size.");
}
std::string rec;
char* c_rec = new char[rec_size]; // 为记录分配一个临时缓冲区
//TODO: Should delete c_rec?
rec.resize(rec_size);
Slice rec_slice;
s = file_->SkipFromHead(vfile_offset + (size_slice.data() - size_buf)); // 将文件的读取位置移动
if(!s.ok()) return s;
s = file_-> Read(rec_size, &rec_slice, c_rec); // 从文件中读取 rec_size 字节的数据到 c_rec 中,并用 rec_slice 包装这些数据
if(!s.ok()) return s;
rec = std::string(c_rec, rec_size);
*record = std::move(std::string(rec));
}
return s;
}
Status VReader::ReadKV(uint64_t vfile_offset, std::string* key, std::string* val){
std::string record_str;
Status s = ReadRecord(vfile_offset, &record_str);
if(s.ok()){
Slice record = Slice(record_str);
//File the val
uint64_t key_size;
bool decode_flag = true;
decode_flag &= GetVarint64(&record, &key_size); // 获取键的长度
if(decode_flag){
*key = Slice(record.data(), key_size).ToString(); // 从record中截取键值
record = Slice(record.data() + key_size, record.size() - key_size); // 截取剩余的record
}
uint64_t val_size;
decode_flag &= GetVarint64(&record, &val_size); // 获取value的长度
if(decode_flag) *val = Slice(record.data(), val_size).ToString(); // 截取value的值
if(!decode_flag || val->size() != record.size()){
s = Status::Corruption("Failed to decode Record Read From vlog.");
}
}
return s;
}
}// namespace vlog.
}

+ 0
- 25
db/vlog_reader.h View File

@ -1,25 +0,0 @@
#ifndef STORAGE_LEVELDB_DB_VLOG_READER_H_
#define STORAGE_LEVELDB_DB_VLOG_READER_H_
#include <cstdint>
#include "leveldb/slice.h"
#include "leveldb/status.h"
#include "port/port.h"
namespace leveldb {
class SequentialFile;
namespace vlog {
class VReader {
public:
explicit VReader(SequentialFile* file);
~VReader() = default;
Status ReadRecord(uint64_t vfile_offset, std::string* record);
Status ReadKV(uint64_t vfile_offset, std::string* key ,std::string* val);
private:
SequentialFile* file_;
};
} // namespace vlog
}
#endif

+ 0
- 26
db/vlog_writer.cc View File

@ -1,26 +0,0 @@
#include <cstdint>
#include "db/vlog_writer.h"
#include "leveldb/slice.h"
#include "leveldb/env.h"
#include "util/coding.h"
namespace leveldb{
namespace vlog{
VWriter::VWriter(WritableFile* vlogfile)
:vlogfile_(vlogfile){}
VWriter::~VWriter() = default;
Status VWriter::AddRecord(const Slice& slice, int& write_size){
//append slice length.
write_size = slice.size();
char buf[10]; // Used for Convert int64 to char.
char* end_byte = EncodeVarint64(buf, slice.size());
write_size += end_byte - buf;
Status s = vlogfile_->Append(Slice(buf, end_byte - buf));
//append slice
if(s.ok()) s = vlogfile_->Append(slice);
return s;
}
Status VWriter::Flush(){
return vlogfile_->Flush();
}
}// namespace vlog
}

+ 0
- 26
db/vlog_writer.h View File

@ -1,26 +0,0 @@
#ifndef STORAGE_LEVELDB_DB_VLOG_WRITER_H_
#define STORAGE_LEVELDB_DB_VLOG_WRITER_H_
#include <cstdint>
#include "leveldb/slice.h"
#include "leveldb/status.h"
// format: [size, key, vptr, value].
namespace leveldb{
class WritableFile;
namespace vlog{
class VWriter{
public:
explicit VWriter(WritableFile* vlogfile);
~VWriter();
Status AddRecord(const Slice& slice, int& write_size);
VWriter(const VWriter&) = delete;
VWriter& operator=(const VWriter&) = delete;
Status Flush();
private:
WritableFile* vlogfile_;
};
}// namespace vlog
}
#endif

+ 31
- 18
db/write_batch.cc View File

@ -21,6 +21,9 @@
#include "leveldb/db.h"
#include "util/coding.h"
#include <sstream> // For std::ostringstream 心
#include <cstdint>
namespace leveldb {
// WriteBatch header has an 8-byte sequence number followed by a 4-byte count.
@ -45,7 +48,6 @@ Status WriteBatch::Iterate(Handler* handler) const {
return Status::Corruption("malformed WriteBatch (too small)");
}
//rep_ Header 12字节, 包含8字节sequence和4字节count.
input.remove_prefix(kHeader);
Slice key, value;
int found = 0;
@ -73,7 +75,6 @@ Status WriteBatch::Iterate(Handler* handler) const {
return Status::Corruption("unknown WriteBatch tag");
}
}
//合法性判断
if (found != WriteBatchInternal::Count(this)) {
return Status::Corruption("WriteBatch has wrong count");
} else {
@ -104,6 +105,34 @@ void WriteBatch::Put(const Slice& key, const Slice& value) {
PutLengthPrefixedSlice(&rep_, value);
}
void WriteBatch::Put(const Slice& key, const Slice& value, uint64_t ttl) {
WriteBatchInternal::SetCount(this, WriteBatchInternal::Count(this) + 1);
rep_.push_back(static_cast<char>(kTypeValue));
PutLengthPrefixedSlice(&rep_, key);
// 获取当前时间
auto now = std::chrono::system_clock::now();
// 加上ttl
auto future_time = now + std::chrono::seconds(ttl);
// 转换为 time_t
std::time_t future_time_t = std::chrono::system_clock::to_time_t(future_time);
// 将 time_t 转换为 tm 结构
std::tm* local_tm = std::localtime(&future_time_t);
// 格式化为字符串
char buffer[20]; // 格式化字符串的缓冲区
std::strftime(buffer, sizeof(buffer), "%Y-%m-%d %H:%M:%S", local_tm);
std::string future_time_str(buffer);
// 拼接原本的值和时间字符串
std::string combined_str = value.ToString() + future_time_str;
PutLengthPrefixedSlice(&rep_, Slice(combined_str));
} // 心
void WriteBatch::Delete(const Slice& key) {
WriteBatchInternal::SetCount(this, WriteBatchInternal::Count(this) + 1);
rep_.push_back(static_cast<char>(kTypeDeletion));
@ -129,22 +158,6 @@ class MemTableInserter : public WriteBatch::Handler {
sequence_++;
}
};
//Use For KVSeqMem
class MemTableInserterKVSeq : public WriteBatch::Handler{
public:
SequenceNumber sequence_;
MemTable* mem_;
void Put(const Slice& key, const Slice& value) override {
assert(0 && "TODO");
mem_->Add(sequence_, kTypeValue, key, value);
sequence_++;
}
void Delete(const Slice& key) override {
mem_->Add(sequence_, kTypeDeletion, key, Slice());
sequence_++;
}
};
} // namespace
Status WriteBatchInternal::InsertInto(const WriteBatch* b, MemTable* memtable) {

+ 1
- 1
db/write_batch_internal.h View File

@ -35,7 +35,7 @@ class WriteBatchInternal {
static void SetContents(WriteBatch* batch, const Slice& contents);
static Status InsertInto(const WriteBatch* batch, MemTable* memtable);
static Status InsertInto(const WriteBatch* batch, MemTable* memtable);
static void Append(WriteBatch* dst, const WriteBatch* src);
};

+ 0
- 45
draw.py View File

@ -1,45 +0,0 @@
import matplotlib.pyplot as plt
x = [128, 256, 512, 1024, 2048, 3072, 4096]
y_w = [
[52.8, 67.0, 60.8, 52.3, 42.2, 34.2, 30.2],# noKVSep
[44.2, 87.5, 139.5, 274.2 ,426.3, 576.2, 770.4], # kvSepBeforeMem
[59.9, 102.4 ,147.9 ,173.5, 184.2, 199.2, 206.8] #kvSepBeforeSSD
]
y_r = [
[731.9, 1127.4, 1515.2, 3274.7, 4261.9, 4886.3, 4529.8],# noKVSep
[158.9, 154.9, 145.0, 160.9 , 147.3, 144.0, 127.4], # kvSepBeforeMem
[171.1, 136.0 ,179.8 ,169.8, 159.9, 161.5, 168.6] #kvSepBeforeSSD
]
y_random = [
[2.363, 2.698, 3.972, 3.735, 7.428, 12.137, 17.753],# noKVSep
[2.957, 2.953, 3.417, 3.363 ,3.954, 17.516, 79.023], # kvSepBeforeMem
[2.927, 2.739 ,2.947, 3.604, 3.530, 19.189, 80.608] #kvSepBeforeSSD
]
plt.figure(num = 1)
plt.title("Write Performance(fillrandom)")
plt.xlabel("Value size(B)")
plt.ylabel("Throughout(MiB/s)")
l1 = plt.plot(x, y_w[0], "bo", linestyle = "dashed")
l1 = plt.plot(x, y_w[1], "g^", linestyle = "dashed")
l1 = plt.plot(x, y_w[2], "y+", linestyle = "dashed")
plt.legend(["noKVSep", "kvSepBeforeMem", "kvSepBeforeSSD"])
plt.show()
plt.figure(num = 1)
plt.title("Read Performance(readreverse)")
plt.xlabel("Value size(B)")
plt.ylabel("Throughout(MiB/s)")
l1 = plt.plot(x, y_r[0], "bo", linestyle = "dashed")
l1 = plt.plot(x, y_r[1], "g^", linestyle = "dashed")
l1 = plt.plot(x, y_r[2], "y+", linestyle = "dashed")
plt.legend(["noKVSep", "kvSepBeforeMem", "kvSepBeforeSSD"])
plt.show()
plt.title("Read Performance(readrandom)")
plt.xlabel("Value size(B)")
plt.ylabel("Micros/op")
l1 = plt.plot(x, y_random[0], "bo", linestyle = "dashed")
l1 = plt.plot(x, y_random[1], "g^", linestyle = "dashed")
l1 = plt.plot(x, y_random[2], "y+", linestyle = "dashed")
plt.legend(["noKVSep", "kvSepBeforeMem", "kvSepBeforeSSD"])
plt.show()

+ 0
- 50
examples/GCtest.cc View File

@ -1,50 +0,0 @@
#include <iostream>
#include "leveldb/db.h"
#include "leveldb/options.h"
#include "gtest/gtest.h"
class LevelDBTest : public ::testing::Test {
protected:
leveldb::DB* db;
leveldb::Options options;
std::string db_path = "/tmp/testdb";
void SetUp() override {
options.create_if_missing = true;
leveldb::Status status = leveldb::DB::Open(options, db_path, &db);
ASSERT_TRUE(status.ok()) << "Failed to open DB: " << status.ToString();
}
void TearDown() override {
delete db;
}
};
TEST_F(LevelDBTest, CompactionTest) {
// 插入数据
db->Put(leveldb::WriteOptions(), "start", "value1");
db->Put(leveldb::WriteOptions(), "end", "value2");
db->Put(leveldb::WriteOptions(), "key_to_delete", "value3");
// 删除一个键,模拟删除标记
db->Delete(leveldb::WriteOptions(), "key_to_delete");
// 触发压缩
leveldb::Slice begin_key("start");
leveldb::Slice end_key("end");
db->CompactRange(&begin_key, &end_key);
// 验证压缩后的数据
std::string value;
leveldb::Status status = db->Get(leveldb::ReadOptions(), "key_to_delete", &value);
if (!status.ok()) {
std::cout << "'key_to_delete' was successfully removed during compaction." << std::endl;
} else {
FAIL() << "Unexpected: 'key_to_delete' still exists: " << value;
}
}
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

+ 0
- 28
examples/ValueConvertTest.cc View File

@ -1,28 +0,0 @@
#include <cassert>
#include <iostream>
#include "leveldb/db.h"
int main(){
leveldb::DB* db;
leveldb::Options options;
options.create_if_missing = true;
options.kvSepType = leveldb::kVSepBeforeSSD;
leveldb::Status status = leveldb::DB::Open(options, "/tmp/testdb", &db);
std::cout<< status.ToString() << '\n';
std::string fill_str = "";
// fill_str 4KB
for(int i = 1; i<= 4096; i++){
fill_str.push_back('%');
}
for(int i = 1E5; i>= 1; i--){
status = db -> Put(leveldb::WriteOptions(), "key" + std::to_string(i), "val" + std::to_string(i) + fill_str);
}
if(status.ok()) {
std::string val;
for(int i = 0; i< 1E5; i++){
status = db -> Get(leveldb::ReadOptions(), "key" + std::to_string(i), &val);
if(status.ok()) std::cout<< "Find value of \'key"<<i<<"\' From db:" << val << "\n";
}
}
delete db;
}

+ 0
- 173
examples/WiscKeyTest_1.cc View File

@ -1,173 +0,0 @@
#include "WiscKeyTest_1.h"
#include <fstream>
#include <algorithm>
#include <vector>
#include <ctime>
#include <cstdlib>
typedef struct WiscKey { // 集成了leveldb数据库和一个logfile链表
string dir;
DB * leveldb;
FILE * logfile;
} WK;
static bool wisckey_get(WK * wk, string &key, string &value)
{
cout << "\n\t\tGet Function\n\n";
cout << "Key Received: " << key << endl;
cout << "Value Received: " << value << endl;
string offsetinfo;
const bool found = leveldb_get(wk->leveldb, key, offsetinfo);
if (found) {
cout << "Offset and Length: " << offsetinfo << endl;
}
else {
cout << "Record:Not Found" << endl;
return false;
}
std::string value_offset;
std::string value_length;
std::string s = offsetinfo;
std::string delimiter = "&&";
size_t pos = 0;
std::string token;
while ((pos = s.find(delimiter)) != std::string::npos) {
token = s.substr(0, pos);
value_offset = token;
s.erase(0, pos + delimiter.length());
}
value_length = s;
cout << "Value Offset: " << value_offset << endl;
cout << "Value Length: " << value_length << endl;
std::string::size_type sz;
long offset = std::stol (value_offset,&sz);
long length = std::stol (value_length,&sz);
//cout << offset << length << endl;
std::string value_record;
//cout << ftell(wk->logread) << endl;
fseek(wk->logfile,offset,SEEK_SET);
//cout << ftell(wk->logfile) << endl;
//rewind(wk->logfile);
//cout << ftell(wk->logfile) << endl;
fread(&value_record,length,1,wk->logfile);
//rewind(wk->logfile);
cout << "LogFile Value: " << value_record << endl;
return true;
}
static void wisckey_set(WK * wk, string &key, string &value)
{
long offset = ftell (wk->logfile);
long size = sizeof(value);
std::string vlog_offset = std::to_string(offset);
std::string vlog_size = std::to_string(size);
std::stringstream vlog_value;
vlog_value << vlog_offset << "&&" << vlog_size;
std::string s = vlog_value.str();
fwrite (&value, sizeof(value),1,wk->logfile);
leveldb_set(wk->leveldb,key,s);
}
static void wisckey_del(WK * wk, string &key)
{
cout << "Key: " << key << endl;
leveldb_del(wk->leveldb,key);
}
static WK * open_wisckey(const string& dirname)
{
WK * wk = new WK;
wk->leveldb = open_leveldb(dirname);
wk->dir = dirname;
wk->logfile = fopen("logfile","wb+");
return wk;
}
static void close_wisckey(WK * wk)
{
fclose(wk->logfile);
delete wk->leveldb;
delete wk;
}
// For testing wisckey functionality
static void testing_function(WK * wk, string &key, string &value)
{
/* Setting Value and Testing it */
cout << "\n\n\t\tInput Received\n" << endl;
cout << "Key: " << key << endl;
cout << "Value: " << value << endl;
wisckey_set(wk,key,value);
const bool found = wisckey_get(wk,key,value);
if (found) {
cout << "Record Matched" << endl;
}
/* Deleting Value */
cout << "\n\n\t\tDelete Operation\n" << endl;
wisckey_del(wk,key);
cout << "Delete Successful" << endl;
/* Read after Delete */
cout << "\n\n\t\tInput Received\n" << endl;
string testkey= "1001224314";
string testvalue = "Abhishek Sharma";
cout << "Key: " << testkey << endl;
cout << "Value: " << testvalue << endl;
const bool testfound = wisckey_get(wk,testkey,testvalue);
if (testfound) {
cout << "Record Matched" << endl;
}
}
int main(int argc, char ** argv)
{
if (argc < 2) {
cout << "Usage: " << argv[0] << " <value-size>" << endl;
exit(0);
}
const size_t value_size = std::stoull(argv[1], NULL, 10);
if (value_size < 1 || value_size > 100000) {
cout << " <value-size> must be positive and less then 100000" << endl;
exit(0);
}
WK * wk = open_wisckey("wisckey_test_dir"); // 打开数据库
if (wk == NULL) {
cerr << "Open WiscKey failed!" << endl;
exit(1);
}
char * vbuf = new char[value_size];
for (size_t i = 0; i < value_size; i++) {
vbuf[i] = rand();
}
string value = string(vbuf, value_size);
size_t nfill = 1000000000 / (value_size + 8); // 生成 nfill = 1,000,000,000 / (value_size + 8) 条随机键值对
clock_t t0 = clock();
size_t p1 = nfill / 40; // 将插入任务分成四十份
for (size_t j = 0; j < nfill; j++) {
string key = std::to_string(((size_t)rand())*((size_t)rand()));
wisckey_set(wk, key, value);
if (j >= p1) {
clock_t dt = clock() - t0;
cout << "progress: " << j+1 << "/" << nfill << " time elapsed: " << dt * 1.0e-6 << endl << std::flush; // 打印进度和已经消耗的时间
p1 += (nfill / 40);
}
}
clock_t dt = clock() - t0;
cout << "time elapsed: " << dt * 1.0e-6 << " seconds" << endl;
close_wisckey(wk);
destroy_leveldb("wisckey_test_dir");
remove("logfile");
exit(0);
}

+ 0
- 81
examples/WiscKeyTest_1.h View File

@ -1,81 +0,0 @@
#pragma once
#include <assert.h>
#include <vector>
#include <iostream>
#include <sstream>
#include <string>
#include <ctime>
#include <algorithm>
#include <cstdlib>
// #include <boost/algorithm/string.hpp>
#include "leveldb/db.h"
#include "leveldb/filter_policy.h"
#include "leveldb/write_batch.h"
using std::string;
using std::vector;
using std::cin;
using std::cout;
using std::cerr;
using std::endl;
using std::stringstream;
using leveldb::ReadOptions;
using leveldb::Options;
using leveldb::Status;
using leveldb::WriteBatch;
using leveldb::WriteOptions;
using leveldb::DB;
static bool
leveldb_get(DB * db, string &key, string &value)
{
// assert(lldb);
ReadOptions ropt;
Status s = db->Get(ropt, key, &value);
assert(s.ok());
if (s.IsNotFound()) {
return false;
} else {
return true;
}
}
static void
leveldb_set(DB * db, string &key, string &value)
{
WriteBatch wb;
Status s;
WriteOptions wopt;
wb.Put(key, value);
s = db->Write(wopt, &wb);
assert(s.ok());
}
static void
leveldb_del(DB * db, string &key)
{
WriteOptions wopt;
Status s;
s = db->Delete(wopt, key);
assert(s.ok());
}
static void
destroy_leveldb(const string &dirname)
{
Options options;
leveldb::DestroyDB(dirname, options);
}
static DB *
open_leveldb(const string &dirname)
{
Options options;
options.create_if_missing = true;
options.filter_policy = leveldb::NewBloomFilterPolicy(10);
options.write_buffer_size = 1u << 21;
destroy_leveldb(dirname);
DB * db = NULL;
Status s = DB::Open(options, dirname, &db);
return db;
}

+ 0
- 76
examples/iterator_test.cc View File

@ -1,76 +0,0 @@
#include <iostream>
#include "leveldb/db.h"
#include "leveldb/options.h"
#include "gtest/gtest.h"
class RangeQueryTest : public ::testing::Test {
protected:
leveldb::DB* db;
leveldb::Options options;
std::string db_path = "/tmp/range_testdb";
void SetUp() override {
options.create_if_missing = true;
leveldb::Status status = leveldb::DB::Open(options, db_path, &db);
ASSERT_TRUE(status.ok()) << "Failed to open DB: " << status.ToString();
}
void TearDown() override {
delete db;
}
};
TEST_F(RangeQueryTest, TestRangeQuery) {
// 插入一组键值对
std::vector<std::pair<std::string, std::string>> data = {
{"a_key1", "value1"},
{"a_key2", "value2"},
{"b_key3", "value3"},
{"b_key4", "value4"},
{"c_key5", "value5"}
};
for (const auto& pair : data) {
const std::string& key = pair.first;
const std::string& value = pair.second;
leveldb::Status status = db->Put(leveldb::WriteOptions(), key, value);
ASSERT_TRUE(status.ok()) << "Failed to put data: " << status.ToString();
}
// 查询范围内的键值对
std::string range_start = "a_key1";
std::string range_end = "b_key4";
std::vector<std::pair<std::string, std::string>> expected_data = {
{"a_key1", "value1"},
{"a_key2", "value2"},
{"b_key3", "value3"},
{"b_key4", "value4"}
};
leveldb::Iterator* it = db->NewIterator(leveldb::ReadOptions());
it->Seek(range_start); // 从范围起始位置开始
std::vector<std::pair<std::string, std::string>> actual_data;
while (it->Valid() && it->key().ToString() <= range_end) {
actual_data.emplace_back(it->key().ToString(), it->value().ToString());
it->Next();
}
delete it;
// 验证范围查询结果是否符合预期
ASSERT_EQ(actual_data.size(), expected_data.size()) << "Range query results size mismatch.";
for (size_t i = 0; i < actual_data.size(); ++i) {
EXPECT_EQ(actual_data[i].first, expected_data[i].first) << "Key mismatch at index " << i;
EXPECT_EQ(actual_data[i].second, expected_data[i].second) << "Value mismatch at index " << i;
}
// 输出范围查询结果
for (const auto& pair : actual_data) {
const std::string& key = pair.first;
const std::string& value = pair.second;
std::cout << "Key: " << key << ", Value: " << value << std::endl;
}
}
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

+ 0
- 43
examples/kv_sep_test.cc View File

@ -1,43 +0,0 @@
#include <cassert>
#include <iostream>
#include "leveldb/db.h"
#include "db/db_impl.h"
int main() {
leveldb::DB* db;
leveldb::Options options;
options.create_if_missing = true;
options.kvSepType = leveldb::kVSepBeforeMem;
leveldb::Status status = leveldb::DB::Open(options, "/tmp/testdb", &db);
if (!status.ok()) {
std::cerr << "Failed to open DB: " << status.ToString() << '\n';
return 1;
}
// 使用 dynamic_cast 将基类指针转换为 DBImpl
auto* dbimpl = static_cast<leveldb::DBImpl*>(db);
if (dbimpl == nullptr) {
std::cerr << "Failed to cast to DBImpl\n";
delete db;
return 1;
}
status = dbimpl->Put(leveldb::WriteOptions(), "key1", "val1");
if (status.ok()) {
std::string val;
status = dbimpl->Get(leveldb::ReadOptions(), "key1", &val);
std::cout << "Find value of 'key1' From db: " << val << "\n";
}
if (status.ok()) {
std::string val;
dbimpl->Delete(leveldb::WriteOptions(), "key1");
status = dbimpl->Get(leveldb::ReadOptions(), "key1", &val);
// Not found.
std::cout << status.ToString() << '\n';
}
delete db;
return 0;
}

+ 0
- 23
examples/main.cc View File

@ -1,23 +0,0 @@
#include <cassert>
#include <iostream>
#include "leveldb/db.h"
#include "gtest/gtest.h"
TEST(Usage, InitDb) {
leveldb::DB* db;
leveldb::Options options;
options.create_if_missing = true;
leveldb::Status status = leveldb::DB::Open(options, "/tmp/test_db", &db);
std::cout << "db started, status: " << status.ToString() << std::endl;
assert(status.ok());
delete db;
}
int main(int argc, char** argv) {
printf("Running main() from %s\n", __FILE__);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

+ 0
- 25
examples/test_1.cc View File

@ -1,25 +0,0 @@
#include <cassert>
#include <iostream>
#include "leveldb/db.h"
int main(){
leveldb::DB* db;
leveldb::Options options;
options.create_if_missing = true;
leveldb::Status status = leveldb::DB::Open(options, "/tmp/testdb", &db);
std::cout<< status.ToString() << '\n';
status = db -> Put(leveldb::WriteOptions(), "key1", "val1");
if(status.ok()) {
std::string val;
status = db -> Get(leveldb::ReadOptions(), "key1", &val);
std::cout<< "Find value of \'key1\' From db:" << val << "\n";
}
if(status.ok()){
std::string val;
db -> Delete(leveldb::WriteOptions(), "key1");
status = db -> Get(leveldb::ReadOptions(), "key1", &val);
//Not find.
std::cout<< status.ToString() <<'\n';
}
delete db;
}

+ 0
- 3
gitpush.sh View File

@ -1,3 +0,0 @@
git add .
git commit -m "$1"
git push origin main

+ 0
- 8
helpers/memenv/memenv.cc View File

@ -177,14 +177,6 @@ class SequentialFileImpl : public SequentialFile {
return Status::OK();
}
Status SkipFromHead(uint64_t n) override{
if (pos_ > file_->Size()) {
return Status::IOError("pos_ > file_->Size()");
}
pos_ = n;
return Status::OK();
}
private:
FileState* file_;
uint64_t pos_;

BIN
images/1.png View File

Before After
Width: 807  |  Height: 151  |  Size: 9.1 KiB

BIN
images/10.png View File

Before After
Width: 1167  |  Height: 183  |  Size: 28 KiB

BIN
images/11.png View File

Before After
Width: 828  |  Height: 361  |  Size: 129 KiB

BIN
images/12.webp View File

Before After

BIN
images/13.png View File

Before After
Width: 3791  |  Height: 1024  |  Size: 106 KiB

BIN
images/14.png View File

Before After
Width: 3158  |  Height: 1729  |  Size: 223 KiB

BIN
images/15.png View File

Before After
Width: 2879  |  Height: 2220  |  Size: 276 KiB

BIN
images/16.png View File

Before After
Width: 870  |  Height: 849  |  Size: 96 KiB

BIN
images/2.png View File

Before After
Width: 1017  |  Height: 171  |  Size: 13 KiB

BIN
images/3.png View File

Before After
Width: 557  |  Height: 342  |  Size: 76 KiB

BIN
images/4.png View File

Before After
Width: 793  |  Height: 510  |  Size: 81 KiB

BIN
images/5.png View File

Before After
Width: 737  |  Height: 212  |  Size: 96 KiB

BIN
images/6.png View File

Before After
Width: 738  |  Height: 386  |  Size: 75 KiB

BIN
images/7.png View File

Before After
Width: 754  |  Height: 340  |  Size: 40 KiB

BIN
images/8.png View File

Before After
Width: 703  |  Height: 174  |  Size: 24 KiB

BIN
images/9.png View File

Before After
Width: 857  |  Height: 183  |  Size: 13 KiB

BIN
images/GC_test.png View File

Before After
Width: 1918  |  Height: 1032  |  Size: 128 KiB

BIN
images/c1.1.png View File

Before After
Width: 1495  |  Height: 719  |  Size: 51 KiB

BIN
images/c1.2.png View File

Before After
Width: 1511  |  Height: 723  |  Size: 56 KiB

BIN
images/c1.png View File

Before After
Width: 1359  |  Height: 617  |  Size: 42 KiB

BIN
images/c10.png View File

Before After
Width: 1407  |  Height: 588  |  Size: 127 KiB

BIN
images/c2.png View File

Before After
Width: 1483  |  Height: 617  |  Size: 55 KiB

BIN
images/c3.png View File

Before After
Width: 1373  |  Height: 521  |  Size: 47 KiB

BIN
images/c4.png View File

Before After
Width: 1599  |  Height: 1217  |  Size: 144 KiB

BIN
images/c5.png View File

Before After
Width: 1571  |  Height: 1223  |  Size: 144 KiB

BIN
images/c6.png View File

Before After
Width: 3196  |  Height: 965  |  Size: 234 KiB

BIN
images/c7.png View File

Before After
Width: 1479  |  Height: 1021  |  Size: 285 KiB

BIN
images/c8.png View File

Before After
Width: 1517  |  Height: 1057  |  Size: 453 KiB

BIN
images/c9.png View File

Before After
Width: 1397  |  Height: 585  |  Size: 136 KiB

BIN
images/field_test.png View File

Before After
Width: 1918  |  Height: 1032  |  Size: 132 KiB

BIN
images/iterate_test.png View File

Before After
Width: 1918  |  Height: 1032  |  Size: 147 KiB

BIN
images/kv_sep_test.png View File

Before After
Width: 1918  |  Height: 1032  |  Size: 135 KiB

+ 14
- 3
include/leveldb/db.h View File

@ -7,12 +7,18 @@
#include <cstdint>
#include <cstdio>
#include <vector>
#include "leveldb/export.h"
#include "leveldb/iterator.h"
#include "leveldb/options.h"
#include "util/coding.h"
#include <vector>
namespace leveldb {
//
using Field = std::pair<std::string, std::string>; // field_name:field_value
using FieldArray = std::vector<std::pair<std::string, std::string>>;
@ -88,7 +94,7 @@ class LEVELDB_EXPORT DB {
// May return some other Status on an error.
virtual Status Get(const ReadOptions& options, const Slice& key,
std::string* value) = 0;
virtual std::vector<std::string> FindKeysByField(leveldb::DB* db, Field& field) =0;
// Return a heap-allocated iterator over the contents of the database.
// The result of NewIterator() is initially invalid (caller must
// call one of the Seek methods on the iterator before using it).
@ -147,6 +153,11 @@ class LEVELDB_EXPORT DB {
// Therefore the following call will compact the entire database:
// db->CompactRange(nullptr, nullptr);
virtual void CompactRange(const Slice* begin, const Slice* end) = 0;
// ----------------------------For TTL-----------------------------
// key设置ttl
virtual Status Put(const WriteOptions& options, const Slice& key,
const Slice& value, uint64_t ttl) = 0;
};
// Destroy the contents of the specified database.
@ -162,7 +173,7 @@ LEVELDB_EXPORT Status DestroyDB(const std::string& name,
// Some data may be lost, so be careful when calling this function
// on a database that contains important information.
LEVELDB_EXPORT Status RepairDB(const std::string& dbname,
const Options& options, DB* db_);
const Options& options);
} // namespace leveldb

+ 0
- 22
include/leveldb/env.h View File

@ -21,8 +21,6 @@
#include "leveldb/export.h"
#include "leveldb/status.h"
#include <unordered_set>
// This workaround can be removed when leveldb::Env::DeleteFile is removed.
#if defined(_WIN32)
// On Windows, the method name DeleteFile (below) introduces the risk of
@ -248,26 +246,6 @@ class LEVELDB_EXPORT SequentialFile {
//
// REQUIRES: External synchronization
virtual Status Skip(uint64_t n) = 0;
// To Support Randomly Access, TODO: Fix RandomAccessFile Bugs.
virtual Status SkipFromHead(uint64_t n) = 0;
//
void MarkValueInvalid(uint64_t offset) {
invalid_offsets_.insert(offset);
}
// value都无效了
bool AllValuesInvalid() const {
return invalid_offsets_.size() >= total_value_count_;
}
void IncrementTotalValueCount() {
total_value_count_ += 1;
}
private:
std::unordered_set<uint64_t> invalid_offsets_; //
uint64_t total_value_count_ = 0; //
};
// A file abstraction for randomly reading the contents of a file.

+ 0
- 21
include/leveldb/options.h View File

@ -30,16 +30,6 @@ enum CompressionType {
kZstdCompression = 0x2,
};
// DB KVSeqType:
// default: No KVSeq
// kVSeqBeforeMem: MemTable前进行KV分离
// kVSeqBeforeSSD: SSD前进行KV分离
enum KVSepType{
noKVSep = 0x0,
kVSepBeforeMem = 0x1,
kVSepBeforeSSD = 0x2,
};
// Options to control the behavior of a database (passed to DB::Open)
struct LEVELDB_EXPORT Options {
// Create an Options object with default values for all fields.
@ -155,15 +145,6 @@ struct LEVELDB_EXPORT Options {
// Many applications will benefit from passing the result of
// NewBloomFilterPolicy() here.
const FilterPolicy* filter_policy = nullptr;
//The threshold of Value should be separated. Default value is set to 4KB, According to Titan.
int kvsepThreshold = 4096;
//Choose the DataBase Model
KVSepType kvSepType = noKVSep;
//threshold of each vlog files
int vlog_file_size = 64 * 1024 * 1024;
};
// Options that control read operations
@ -202,8 +183,6 @@ struct LEVELDB_EXPORT WriteOptions {
// with sync==true has similar crash semantics to a "write()"
// system call followed by "fsync()".
bool sync = false;
};
} // namespace leveldb

+ 3
- 0
include/leveldb/slice.h View File

@ -51,6 +51,9 @@ class LEVELDB_EXPORT Slice {
// Return true iff the length of the referenced data is zero
bool empty() const { return size_ == 0; }
const char* begin() const { return data(); }
const char* end() const { return data() + size(); }
// Return the ith byte in the referenced data.
// REQUIRES: n < size()
char operator[](size_t n) const {

+ 3
- 0
include/leveldb/write_batch.h View File

@ -25,6 +25,7 @@
#include "leveldb/export.h"
#include "leveldb/status.h"
#include <cstdint>
namespace leveldb {
@ -50,6 +51,8 @@ class LEVELDB_EXPORT WriteBatch {
// Store the mapping "key->value" in the database.
void Put(const Slice& key, const Slice& value);
void Put(const Slice& key, const Slice& value, uint64_t ttl); //
// If the database contains a mapping for "key", erase it. Else do nothing.
void Delete(const Slice& key);

+ 0
- 0
prefetch.txt View File


+ 74
- 0
test/db_test2.cc View File

@ -0,0 +1,74 @@
#include "leveldb/db.h"
#include "leveldb/filter_policy.h"
#include <iostream>
#include <cstdlib>
#include <ctime>
using namespace leveldb;
constexpr int value_size = 2048;
constexpr int data_size = 256 << 20;
// 3. 数据管理(Manifest/创建/恢复数据库)
Status OpenDB(std::string dbName, DB **db) {
Options options;
options.create_if_missing = true;
options.filter_policy = NewBloomFilterPolicy(10);
return DB::Open(options, dbName, db);
}
// 1. 存储(数据结构与写入)
// 4. 数据合并(Compaction)
void InsertData(DB *db) {
WriteOptions writeOptions;
int key_num = data_size / value_size;
srand(static_cast<unsigned int>(time(0)));
for (int i = 0; i < key_num; i++) {
int key_ = rand() % key_num+1;
std::string key = std::to_string(key_);
std::string value(value_size, 'a');
db->Put(writeOptions, key, value);
}
}
// 2. 数据访问(如何读数据)
void GetData(DB *db, int size = (1 << 30)) {
ReadOptions readOptions;
int key_num = data_size / value_size;
// 点查
srand(static_cast<unsigned int>(time(0)));
for (int i = 0; i < 100; i++) {
int key_ = rand() % key_num+1;
std::string key = std::to_string(key_);
std::string value;
db->Get(readOptions, key, &value);
}
// 范围查询
Iterator *iter = db->NewIterator(readOptions);
iter->SeekToFirst();
while (iter->Valid()) {
iter->Next();
}
delete iter;
}
int main() {
DB *db;
if(OpenDB("testdb", &db).ok()) {
InsertData(db);
delete db;
}
if(OpenDB("testdb", &db).ok()) {
GetData(db);
delete db;
}
return 0;
}

examples/field_test.cc → test/field_test.cc View File

@ -1,4 +1,5 @@
#include "gtest/gtest.h"
#include "leveldb/env.h"
@ -11,29 +12,29 @@ using namespace leveldb;
constexpr int value_size = 2048;
constexpr int data_size = 128 << 20;
// // 根据字段值查找所有包含该字段的 key
// std::vector<std::string> FindKeysByField(leveldb::DB* db, Field &field) {
// Iterator* iter = db->NewIterator(ReadOptions());
// std::vector<std::string> ret_keys;
// int64_t bytes = 0;
// for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
// auto fields_ret = ParseValue(iter->value().data());
// for (Field each_field : fields_ret)
// {
// std::cout << each_field.first << " " << each_field.second << std::endl;
// if (field.first.compare(each_field.first) == 0) {
// if (field.second.compare(each_field.second)==0)
// {
// ret_keys.push_back(iter->key().data());
// }
// else
// break;
// }
// }
// }
// delete iter;
// return ret_keys;
// }
// 根据字段值查找所有包含该字段的 key
std::vector<std::string> FindKeysByField(leveldb::DB* db, Field &field) {
Iterator* iter = db->NewIterator(ReadOptions());
std::vector<std::string> ret_keys;
int64_t bytes = 0;
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
auto fields_ret = ParseValue(iter->value().data());
for (Field each_field : fields_ret)
{
std::cout << each_field.first << " " << each_field.second << std::endl;
if (field.first.compare(each_field.first) == 0) {
if (field.second.compare(each_field.second)==0)
{
ret_keys.push_back(iter->key().data());
}
else
break;
}
}
}
delete iter;
return ret_keys;
}
Status OpenDB(std::string dbName, DB **db) {
@ -57,7 +58,7 @@ TEST(TestField, Read) {
{"phone", "25-989-741-2988"}
};
// 序列化并插入,ckx
// 序列化并插入
std::string value = SerializeValue(fields);
db->Put(WriteOptions(), key, value);
@ -107,7 +108,7 @@ TEST(TestField, Find) {
Field field = {"sex", "f"};
std::vector<std::string> key_ret;
// 查询得到对应的key
key_ret = db->FindKeysByField(db, field);
key_ret = FindKeysByField(db, field);
for (int i = 0; i < key_ret.size(); i++) {
std::cout << "找到的键:" << key_ret[i] << std::endl;
}

+ 114
- 0
test/ttl_test.cc View File

@ -0,0 +1,114 @@
#include "gtest/gtest.h"
#include "leveldb/env.h"
#include "leveldb/db.h"
using namespace leveldb;
constexpr int value_size = 2048;
constexpr int data_size = 128 << 20;
Status OpenDB(std::string dbName, DB **db) {
Options options;
options.create_if_missing = true;
return DB::Open(options, dbName, db);
}
void InsertData(DB *db, uint64_t ttl/* second */) {
WriteOptions writeOptions;
int key_num = data_size / value_size;
srand(static_cast<unsigned int>(time(0)));
for (int i = 0; i < key_num; i++) {
int key_ = rand() % key_num+1;
std::string key = std::to_string(key_);
std::string value(value_size, 'a');
db->Put(writeOptions, key, value, ttl);
}
}
void GetData(DB *db, int size = (1 << 30)) {
ReadOptions readOptions;
int key_num = data_size / value_size;
// 点查
srand(static_cast<unsigned int>(time(0)));
for (int i = 0; i < 100; i++) {
int key_ = rand() % key_num+1;
std::string key = std::to_string(key_);
std::string value;
db->Get(readOptions, key, &value);
}
}
TEST(TestTTL, ReadTTL) {
DB *db;
if(OpenDB("testdb", &db).ok() == false) {
std::cerr << "open db failed" << std::endl;
abort();
}
uint64_t ttl = 20;
InsertData(db, ttl);
ReadOptions readOptions;
Status status;
int key_num = data_size / value_size;
srand(static_cast<unsigned int>(time(0)));
for (int i = 0; i < 100; i++) {
int key_ = rand() % key_num+1;
std::string key = std::to_string(key_);
std::string value;
status = db->Get(readOptions, key, &value);
ASSERT_TRUE(status.ok());
}
Env::Default()->SleepForMicroseconds(ttl * 1000000);
for (int i = 0; i < 100; i++) {
int key_ = rand() % key_num+1;
std::string key = std::to_string(key_);
std::string value;
status = db->Get(readOptions, key, &value);
ASSERT_FALSE(status.ok()); // 经过超长时间之后所有的键值对应该都过期了,心
}
}
TEST(TestTTL, CompactionTTL) {
DB *db;
if(OpenDB("testdb", &db).ok() == false) {
std::cerr << "open db failed" << std::endl;
abort();
}
uint64_t ttl = 20;
InsertData(db, ttl);
leveldb::Range ranges[1];
ranges[0] = leveldb::Range("-", "A");
uint64_t sizes[1];
db->GetApproximateSizes(ranges, 1, sizes);
ASSERT_GT(sizes[0], 0);
Env::Default()->SleepForMicroseconds(ttl * 1000000);
db->CompactRange(nullptr, nullptr);
// leveldb::Range ranges[1]; // 这里为什么要重复定义?心
ranges[0] = leveldb::Range("-", "A");
// uint64_t sizes[1]; // 心
db->GetApproximateSizes(ranges, 1, sizes);
ASSERT_EQ(sizes[0], 0);
}
int main(int argc, char** argv) {
// All tests currently run with the same read-only file limits.
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

+ 1
- 0
third_party/benchmark

@ -0,0 +1 @@
Subproject commit f7547e29ccaed7b64ef4f7495ecfff1c9f6f3d03

+ 0
- 5
third_party/benchmark/.clang-format View File

@ -1,5 +0,0 @@
---
Language: Cpp
BasedOnStyle: Google
PointerAlignment: Left
...

+ 0
- 7
third_party/benchmark/.clang-tidy View File

@ -1,7 +0,0 @@
---
Checks: 'clang-analyzer-*,readability-redundant-*,performance-*'
WarningsAsErrors: 'clang-analyzer-*,readability-redundant-*,performance-*'
HeaderFilterRegex: '.*'
AnalyzeTemporaryDtors: false
FormatStyle: none
User: user

+ 0
- 32
third_party/benchmark/.github/ISSUE_TEMPLATE/bug_report.md View File

@ -1,32 +0,0 @@
---
name: Bug report
about: Create a report to help us improve
title: "[BUG]"
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**System**
Which OS, compiler, and compiler version are you using:
- OS:
- Compiler and version:
**To reproduce**
Steps to reproduce the behavior:
1. sync to commit ...
2. cmake/bazel...
3. make ...
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Additional context**
Add any other context about the problem here.

+ 0
- 20
third_party/benchmark/.github/ISSUE_TEMPLATE/feature_request.md View File

@ -1,20 +0,0 @@
---
name: Feature request
about: Suggest an idea for this project
title: "[FR]"
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

+ 0
- 13
third_party/benchmark/.github/install_bazel.sh View File

@ -1,13 +0,0 @@
if ! bazel version; then
arch=$(uname -m)
if [ "$arch" == "aarch64" ]; then
arch="arm64"
fi
echo "Installing wget and downloading $arch Bazel binary from GitHub releases."
yum install -y wget
wget "https://github.com/bazelbuild/bazel/releases/download/6.0.0/bazel-6.0.0-linux-$arch" -O /usr/local/bin/bazel
chmod +x /usr/local/bin/bazel
else
# bazel is installed for the correct architecture
exit 0
fi

+ 0
- 27
third_party/benchmark/.github/libcxx-setup.sh View File

@ -1,27 +0,0 @@
#!/usr/bin/env bash
# Checkout LLVM sources
#git clone --depth=1 https://github.com/llvm/llvm-project.git llvm-project
#
## Setup libc++ options
#if [ -z "$BUILD_32_BITS" ]; then
# export BUILD_32_BITS=OFF && echo disabling 32 bit build
#fi
#
## Build and install libc++ (Use unstable ABI for better sanitizer coverage)
#cd ./llvm-project
#cmake -DCMAKE_C_COMPILER=${CC} \
# -DCMAKE_CXX_COMPILER=${CXX} \
# -DCMAKE_BUILD_TYPE=RelWithDebInfo \
# -DCMAKE_INSTALL_PREFIX=/usr \
# -DLIBCXX_ABI_UNSTABLE=OFF \
# -DLLVM_USE_SANITIZER=${LIBCXX_SANITIZER} \
# -DLLVM_BUILD_32_BITS=${BUILD_32_BITS} \
# -DLLVM_ENABLE_RUNTIMES='libcxx;libcxxabi' \
# -S llvm -B llvm-build -G "Unix Makefiles"
#make -C llvm-build -j3 cxx cxxabi
#sudo make -C llvm-build install-cxx install-cxxabi
#cd ..
sudo apt update
sudo apt -y install libc++-dev libc++abi-dev

+ 0
- 35
third_party/benchmark/.github/workflows/bazel.yml View File

@ -1,35 +0,0 @@
name: bazel
on:
push: {}
pull_request: {}
jobs:
job:
name: bazel.${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest, windows-2022]
steps:
- uses: actions/checkout@v3
- name: mount bazel cache
uses: actions/cache@v3
env:
cache-name: bazel-cache
with:
path: "~/.cache/bazel"
key: ${{ env.cache-name }}-${{ matrix.os }}-${{ github.ref }}
restore-keys: |
${{ env.cache-name }}-${{ matrix.os }}-main
- name: build
run: |
bazel build //:benchmark //:benchmark_main //test/...
- name: test
run: |
bazel test --test_output=all //test/...

+ 0
- 46
third_party/benchmark/.github/workflows/build-and-test-min-cmake.yml View File

@ -1,46 +0,0 @@
name: build-and-test-min-cmake
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
jobs:
job:
name: ${{ matrix.os }}.min-cmake
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest]
steps:
- uses: actions/checkout@v3
- uses: lukka/get-cmake@latest
with:
cmakeVersion: 3.10.0
- name: create build environment
run: cmake -E make_directory ${{ runner.workspace }}/_build
- name: setup cmake initial cache
run: touch compiler-cache.cmake
- name: configure cmake
env:
CXX: ${{ matrix.compiler }}
shell: bash
working-directory: ${{ runner.workspace }}/_build
run: >
cmake -C ${{ github.workspace }}/compiler-cache.cmake
$GITHUB_WORKSPACE
-DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON
-DCMAKE_CXX_VISIBILITY_PRESET=hidden
-DCMAKE_VISIBILITY_INLINES_HIDDEN=ON
- name: build
shell: bash
working-directory: ${{ runner.workspace }}/_build
run: cmake --build .

+ 0
- 51
third_party/benchmark/.github/workflows/build-and-test-perfcounters.yml View File

@ -1,51 +0,0 @@
name: build-and-test-perfcounters
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
jobs:
job:
# TODO(dominic): Extend this to include compiler and set through env: CC/CXX.
name: ${{ matrix.os }}.${{ matrix.build_type }}
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-22.04, ubuntu-20.04]
build_type: ['Release', 'Debug']
steps:
- uses: actions/checkout@v3
- name: install libpfm
run: |
sudo apt update
sudo apt -y install libpfm4-dev
- name: create build environment
run: cmake -E make_directory ${{ runner.workspace }}/_build
- name: configure cmake
shell: bash
working-directory: ${{ runner.workspace }}/_build
run: >
cmake $GITHUB_WORKSPACE
-DBENCHMARK_ENABLE_LIBPFM=1
-DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON
-DCMAKE_BUILD_TYPE=${{ matrix.build_type }}
- name: build
shell: bash
working-directory: ${{ runner.workspace }}/_build
run: cmake --build . --config ${{ matrix.build_type }}
# Skip testing, for now. It seems perf_event_open does not succeed on the
# hosting machine, very likely a permissions issue.
# TODO(mtrofin): Enable test.
# - name: test
# shell: bash
# working-directory: ${{ runner.workspace }}/_build
# run: ctest -C ${{ matrix.build_type }} --rerun-failed --output-on-failure

+ 0
- 114
third_party/benchmark/.github/workflows/build-and-test.yml View File

@ -1,114 +0,0 @@
name: build-and-test
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
jobs:
# TODO: add 32-bit builds (g++ and clang++) for ubuntu
# (requires g++-multilib and libc6:i386)
# TODO: add coverage build (requires lcov)
# TODO: add clang + libc++ builds for ubuntu
job:
name: ${{ matrix.os }}.${{ matrix.build_type }}.${{ matrix.lib }}.${{ matrix.compiler }}
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-22.04, ubuntu-20.04, macos-latest]
build_type: ['Release', 'Debug']
compiler: ['g++', 'clang++']
lib: ['shared', 'static']
steps:
- uses: actions/checkout@v3
- uses: lukka/get-cmake@latest
- name: create build environment
run: cmake -E make_directory ${{ runner.workspace }}/_build
- name: setup cmake initial cache
run: touch compiler-cache.cmake
- name: configure cmake
env:
CXX: ${{ matrix.compiler }}
shell: bash
working-directory: ${{ runner.workspace }}/_build
run: >
cmake -C ${{ github.workspace }}/compiler-cache.cmake
$GITHUB_WORKSPACE
-DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON
-DBUILD_SHARED_LIBS=${{ matrix.lib == 'shared' }}
-DCMAKE_BUILD_TYPE=${{ matrix.build_type }}
-DCMAKE_CXX_COMPILER=${{ env.CXX }}
-DCMAKE_CXX_VISIBILITY_PRESET=hidden
-DCMAKE_VISIBILITY_INLINES_HIDDEN=ON
- name: build
shell: bash
working-directory: ${{ runner.workspace }}/_build
run: cmake --build . --config ${{ matrix.build_type }}
- name: test
shell: bash
working-directory: ${{ runner.workspace }}/_build
run: ctest -C ${{ matrix.build_type }} -VV
msvc:
name: ${{ matrix.os }}.${{ matrix.build_type }}.${{ matrix.lib }}.${{ matrix.msvc }}
runs-on: ${{ matrix.os }}
defaults:
run:
shell: powershell
strategy:
fail-fast: false
matrix:
msvc:
- VS-16-2019
- VS-17-2022
arch:
- x64
build_type:
- Debug
- Release
lib:
- shared
- static
include:
- msvc: VS-16-2019
os: windows-2019
generator: 'Visual Studio 16 2019'
- msvc: VS-17-2022
os: windows-2022
generator: 'Visual Studio 17 2022'
steps:
- uses: actions/checkout@v2
- uses: lukka/get-cmake@latest
- name: configure cmake
run: >
cmake -S . -B _build/
-A ${{ matrix.arch }}
-G "${{ matrix.generator }}"
-DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON
-DBUILD_SHARED_LIBS=${{ matrix.lib == 'shared' }}
- name: build
run: cmake --build _build/ --config ${{ matrix.build_type }}
- name: setup test environment
# Make sure gmock and benchmark DLLs can be found
run: >
echo "$((Get-Item .).FullName)/_build/bin/${{ matrix.build_type }}" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append;
echo "$((Get-Item .).FullName)/_build/src/${{ matrix.build_type }}" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append;
- name: test
run: ctest --test-dir _build/ -C ${{ matrix.build_type }} -VV

+ 0
- 17
third_party/benchmark/.github/workflows/clang-format-lint.yml View File

@ -1,17 +0,0 @@
name: clang-format-lint
on:
push: {}
pull_request: {}
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: DoozyX/clang-format-lint-action@v0.13
with:
source: './include/benchmark ./src ./test'
extensions: 'h,cc'
clangFormatVersion: 12
style: Google

+ 0
- 38
third_party/benchmark/.github/workflows/clang-tidy.yml View File

@ -1,38 +0,0 @@
name: clang-tidy
on:
push: {}
pull_request: {}
jobs:
job:
name: run-clang-tidy
runs-on: ubuntu-latest
strategy:
fail-fast: false
steps:
- uses: actions/checkout@v3
- name: install clang-tidy
run: sudo apt update && sudo apt -y install clang-tidy
- name: create build environment
run: cmake -E make_directory ${{ runner.workspace }}/_build
- name: configure cmake
shell: bash
working-directory: ${{ runner.workspace }}/_build
run: >
cmake $GITHUB_WORKSPACE
-DBENCHMARK_ENABLE_ASSEMBLY_TESTS=OFF
-DBENCHMARK_ENABLE_LIBPFM=OFF
-DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON
-DCMAKE_C_COMPILER=clang
-DCMAKE_CXX_COMPILER=clang++
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON
-DGTEST_COMPILE_COMMANDS=OFF
- name: run
shell: bash
working-directory: ${{ runner.workspace }}/_build
run: run-clang-tidy

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save