@ -0,0 +1,102 @@ | |||
# Copyright 2021 The LevelDB Authors. All rights reserved. | |||
# Use of this source code is governed by a BSD-style license that can be | |||
# found in the LICENSE file. See the AUTHORS file for names of contributors. | |||
name: ci | |||
on: [push, pull_request] | |||
permissions: | |||
contents: read | |||
jobs: | |||
build-and-test: | |||
name: >- | |||
CI | |||
${{ matrix.os }} | |||
${{ matrix.compiler }} | |||
${{ matrix.optimized && 'release' || 'debug' }} | |||
runs-on: ${{ matrix.os }} | |||
strategy: | |||
fail-fast: false | |||
matrix: | |||
compiler: [clang, gcc, msvc] | |||
os: [ubuntu-latest, macos-latest, windows-latest] | |||
optimized: [true, false] | |||
exclude: | |||
# MSVC only works on Windows. | |||
- os: ubuntu-latest | |||
compiler: msvc | |||
- os: macos-latest | |||
compiler: msvc | |||
# Not testing with GCC on macOS. | |||
- os: macos-latest | |||
compiler: gcc | |||
# Only testing with MSVC on Windows. | |||
- os: windows-latest | |||
compiler: clang | |||
- os: windows-latest | |||
compiler: gcc | |||
include: | |||
- compiler: clang | |||
CC: clang | |||
CXX: clang++ | |||
- compiler: gcc | |||
CC: gcc | |||
CXX: g++ | |||
- compiler: msvc | |||
CC: | |||
CXX: | |||
env: | |||
CMAKE_BUILD_DIR: ${{ github.workspace }}/build | |||
CMAKE_BUILD_TYPE: ${{ matrix.optimized && 'RelWithDebInfo' || 'Debug' }} | |||
CC: ${{ matrix.CC }} | |||
CXX: ${{ matrix.CXX }} | |||
BINARY_SUFFIX: ${{ startsWith(matrix.os, 'windows') && '.exe' || '' }} | |||
BINARY_PATH: >- | |||
${{ format( | |||
startsWith(matrix.os, 'windows') && '{0}\build\{1}\' || '{0}/build/', | |||
github.workspace, | |||
matrix.optimized && 'RelWithDebInfo' || 'Debug') }} | |||
steps: | |||
- uses: actions/checkout@v2 | |||
with: | |||
submodules: true | |||
- name: Install dependencies on Linux | |||
if: ${{ runner.os == 'Linux' }} | |||
# libgoogle-perftools-dev is temporarily removed from the package list | |||
# because it is currently broken on GitHub's Ubuntu 22.04. | |||
run: | | |||
sudo apt-get update | |||
sudo apt-get install libkyotocabinet-dev libsnappy-dev libsqlite3-dev | |||
- name: Generate build config | |||
run: >- | |||
cmake -S "${{ github.workspace }}" -B "${{ env.CMAKE_BUILD_DIR }}" | |||
-DCMAKE_BUILD_TYPE=${{ env.CMAKE_BUILD_TYPE }} | |||
-DCMAKE_INSTALL_PREFIX=${{ runner.temp }}/install_test/ | |||
- name: Build | |||
run: >- | |||
cmake --build "${{ env.CMAKE_BUILD_DIR }}" | |||
--config "${{ env.CMAKE_BUILD_TYPE }}" | |||
- name: Run Tests | |||
working-directory: ${{ github.workspace }}/build | |||
run: ctest -C "${{ env.CMAKE_BUILD_TYPE }}" --verbose | |||
- name: Run LevelDB Benchmarks | |||
run: ${{ env.BINARY_PATH }}db_bench${{ env.BINARY_SUFFIX }} | |||
- name: Run SQLite Benchmarks | |||
if: ${{ runner.os != 'Windows' }} | |||
run: ${{ env.BINARY_PATH }}db_bench_sqlite3${{ env.BINARY_SUFFIX }} | |||
- name: Run Kyoto Cabinet Benchmarks | |||
if: ${{ runner.os == 'Linux' && matrix.compiler == 'clang' }} | |||
run: ${{ env.BINARY_PATH }}db_bench_tree_db${{ env.BINARY_SUFFIX }} | |||
- name: Test CMake installation | |||
run: cmake --build "${{ env.CMAKE_BUILD_DIR }}" --target install |
@ -0,0 +1,31 @@ | |||
# How to Contribute | |||
We'd love to accept your patches and contributions to this project. There are | |||
just a few small guidelines you need to follow. | |||
## Contributor License Agreement | |||
Contributions to this project must be accompanied by a Contributor License | |||
Agreement. You (or your employer) retain the copyright to your contribution; | |||
this simply gives us permission to use and redistribute your contributions as | |||
part of the project. Head over to <https://cla.developers.google.com/> to see | |||
your current agreements on file or to sign a new one. | |||
You generally only need to submit a CLA once, so if you've already submitted one | |||
(even if it was for a different project), you probably don't need to do it | |||
again. | |||
## Code Reviews | |||
All submissions, including submissions by project members, require review. We | |||
use GitHub pull requests for this purpose. Consult | |||
[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more | |||
information on using pull requests. | |||
See [the README](README.md#contributing-to-the-leveldb-project) for areas | |||
where we are likely to accept external contributions. | |||
## Community Guidelines | |||
This project follows [Google's Open Source Community | |||
Guidelines](https://opensource.google/conduct/). |
@ -1,252 +1,9 @@ | |||
LevelDB is a fast key-value storage library written at Google that provides an ordered mapping from string keys to string values. | |||
实验报告请查看以下文档: | |||
**本仓库提供TTL基本的测试用例** | |||
我们的分工已在代码中以注释的形式体现,如:ckx、pzy。 | |||
- [实验报告](实验报告.md) | |||
> **This repository is receiving very limited maintenance. We will only review the following types of changes.** | |||
> | |||
> * Fixes for critical bugs, such as data loss or memory corruption | |||
> * Changes absolutely needed by internally supported leveldb clients. These typically fix breakage introduced by a language/standard library/OS update | |||
[](https://github.com/google/leveldb/actions/workflows/build.yml) | |||
Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com) | |||
# Features | |||
* Keys and values are arbitrary byte arrays. | |||
* Data is stored sorted by key. | |||
* Callers can provide a custom comparison function to override the sort order. | |||
* The basic operations are `Put(key,value)`, `Get(key)`, `Delete(key)`. | |||
* Multiple changes can be made in one atomic batch. | |||
* Users can create a transient snapshot to get a consistent view of data. | |||
* Forward and backward iteration is supported over the data. | |||
* Data is automatically compressed using the [Snappy compression library](https://google.github.io/snappy/), but [Zstd compression](https://facebook.github.io/zstd/) is also supported. | |||
* External activity (file system operations etc.) is relayed through a virtual interface so users can customize the operating system interactions. | |||
# Documentation | |||
[LevelDB library documentation](https://github.com/google/leveldb/blob/main/doc/index.md) is online and bundled with the source code. | |||
# Limitations | |||
* This is not a SQL database. It does not have a relational data model, it does not support SQL queries, and it has no support for indexes. | |||
* Only a single process (possibly multi-threaded) can access a particular database at a time. | |||
* There is no client-server support builtin to the library. An application that needs such support will have to wrap their own server around the library. | |||
# Getting the Source | |||
```bash | |||
git clone --recurse-submodules https://github.com/google/leveldb.git | |||
``` | |||
# Building | |||
This project supports [CMake](https://cmake.org/) out of the box. | |||
### Build for POSIX | |||
Quick start: | |||
克隆代码: | |||
```bash | |||
mkdir -p build && cd build | |||
cmake -DCMAKE_BUILD_TYPE=Release .. && cmake --build . | |||
git clone --recurse-submodules https://gitea.shuishan.net.cn/building_data_management_systems.Xuanzhou.2024Fall.DaSE/leveldb_base.git | |||
``` | |||
### Building for Windows | |||
First generate the Visual Studio 2017 project/solution files: | |||
```cmd | |||
mkdir build | |||
cd build | |||
cmake -G "Visual Studio 15" .. | |||
``` | |||
The default default will build for x86. For 64-bit run: | |||
```cmd | |||
cmake -G "Visual Studio 15 Win64" .. | |||
``` | |||
To compile the Windows solution from the command-line: | |||
```cmd | |||
devenv /build Debug leveldb.sln | |||
``` | |||
or open leveldb.sln in Visual Studio and build from within. | |||
Please see the CMake documentation and `CMakeLists.txt` for more advanced usage. | |||
# Contributing to the leveldb Project | |||
> **This repository is receiving very limited maintenance. We will only review the following types of changes.** | |||
> | |||
> * Bug fixes | |||
> * Changes absolutely needed by internally supported leveldb clients. These typically fix breakage introduced by a language/standard library/OS update | |||
The leveldb project welcomes contributions. leveldb's primary goal is to be | |||
a reliable and fast key/value store. Changes that are in line with the | |||
features/limitations outlined above, and meet the requirements below, | |||
will be considered. | |||
Contribution requirements: | |||
1. **Tested platforms only**. We _generally_ will only accept changes for | |||
platforms that are compiled and tested. This means POSIX (for Linux and | |||
macOS) or Windows. Very small changes will sometimes be accepted, but | |||
consider that more of an exception than the rule. | |||
2. **Stable API**. We strive very hard to maintain a stable API. Changes that | |||
require changes for projects using leveldb _might_ be rejected without | |||
sufficient benefit to the project. | |||
3. **Tests**: All changes must be accompanied by a new (or changed) test, or | |||
a sufficient explanation as to why a new (or changed) test is not required. | |||
4. **Consistent Style**: This project conforms to the | |||
[Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html). | |||
To ensure your changes are properly formatted please run: | |||
``` | |||
clang-format -i --style=file <file> | |||
``` | |||
We are unlikely to accept contributions to the build configuration files, such | |||
as `CMakeLists.txt`. We are focused on maintaining a build configuration that | |||
allows us to test that the project works in a few supported configurations | |||
inside Google. We are not currently interested in supporting other requirements, | |||
such as different operating systems, compilers, or build systems. | |||
## Submitting a Pull Request | |||
Before any pull request will be accepted the author must first sign a | |||
Contributor License Agreement (CLA) at https://cla.developers.google.com/. | |||
In order to keep the commit timeline linear | |||
[squash](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History#Squashing-Commits) | |||
your changes down to a single commit and [rebase](https://git-scm.com/docs/git-rebase) | |||
on google/leveldb/main. This keeps the commit timeline linear and more easily sync'ed | |||
with the internal repository at Google. More information at GitHub's | |||
[About Git rebase](https://help.github.com/articles/about-git-rebase/) page. | |||
# Performance | |||
Here is a performance report (with explanations) from the run of the | |||
included db_bench program. The results are somewhat noisy, but should | |||
be enough to get a ballpark performance estimate. | |||
## Setup | |||
We use a database with a million entries. Each entry has a 16 byte | |||
key, and a 100 byte value. Values used by the benchmark compress to | |||
about half their original size. | |||
LevelDB: version 1.1 | |||
Date: Sun May 1 12:11:26 2011 | |||
CPU: 4 x Intel(R) Core(TM)2 Quad CPU Q6600 @ 2.40GHz | |||
CPUCache: 4096 KB | |||
Keys: 16 bytes each | |||
Values: 100 bytes each (50 bytes after compression) | |||
Entries: 1000000 | |||
Raw Size: 110.6 MB (estimated) | |||
File Size: 62.9 MB (estimated) | |||
## Write performance | |||
The "fill" benchmarks create a brand new database, in either | |||
sequential, or random order. The "fillsync" benchmark flushes data | |||
from the operating system to the disk after every operation; the other | |||
write operations leave the data sitting in the operating system buffer | |||
cache for a while. The "overwrite" benchmark does random writes that | |||
update existing keys in the database. | |||
fillseq : 1.765 micros/op; 62.7 MB/s | |||
fillsync : 268.409 micros/op; 0.4 MB/s (10000 ops) | |||
fillrandom : 2.460 micros/op; 45.0 MB/s | |||
overwrite : 2.380 micros/op; 46.5 MB/s | |||
Each "op" above corresponds to a write of a single key/value pair. | |||
I.e., a random write benchmark goes at approximately 400,000 writes per second. | |||
Each "fillsync" operation costs much less (0.3 millisecond) | |||
than a disk seek (typically 10 milliseconds). We suspect that this is | |||
because the hard disk itself is buffering the update in its memory and | |||
responding before the data has been written to the platter. This may | |||
or may not be safe based on whether or not the hard disk has enough | |||
power to save its memory in the event of a power failure. | |||
## Read performance | |||
We list the performance of reading sequentially in both the forward | |||
and reverse direction, and also the performance of a random lookup. | |||
Note that the database created by the benchmark is quite small. | |||
Therefore the report characterizes the performance of leveldb when the | |||
working set fits in memory. The cost of reading a piece of data that | |||
is not present in the operating system buffer cache will be dominated | |||
by the one or two disk seeks needed to fetch the data from disk. | |||
Write performance will be mostly unaffected by whether or not the | |||
working set fits in memory. | |||
readrandom : 16.677 micros/op; (approximately 60,000 reads per second) | |||
readseq : 0.476 micros/op; 232.3 MB/s | |||
readreverse : 0.724 micros/op; 152.9 MB/s | |||
LevelDB compacts its underlying storage data in the background to | |||
improve read performance. The results listed above were done | |||
immediately after a lot of random writes. The results after | |||
compactions (which are usually triggered automatically) are better. | |||
readrandom : 11.602 micros/op; (approximately 85,000 reads per second) | |||
readseq : 0.423 micros/op; 261.8 MB/s | |||
readreverse : 0.663 micros/op; 166.9 MB/s | |||
Some of the high cost of reads comes from repeated decompression of blocks | |||
read from disk. If we supply enough cache to the leveldb so it can hold the | |||
uncompressed blocks in memory, the read performance improves again: | |||
readrandom : 9.775 micros/op; (approximately 100,000 reads per second before compaction) | |||
readrandom : 5.215 micros/op; (approximately 190,000 reads per second after compaction) | |||
## Repository contents | |||
See [doc/index.md](doc/index.md) for more explanation. See | |||
[doc/impl.md](doc/impl.md) for a brief overview of the implementation. | |||
The public interface is in include/leveldb/*.h. Callers should not include or | |||
rely on the details of any other header files in this package. Those | |||
internal APIs may be changed without warning. | |||
Guide to header files: | |||
* **include/leveldb/db.h**: Main interface to the DB: Start here. | |||
* **include/leveldb/options.h**: Control over the behavior of an entire database, | |||
and also control over the behavior of individual reads and writes. | |||
* **include/leveldb/comparator.h**: Abstraction for user-specified comparison function. | |||
If you want just bytewise comparison of keys, you can use the default | |||
comparator, but clients can write their own comparator implementations if they | |||
want custom ordering (e.g. to handle different character encodings, etc.). | |||
* **include/leveldb/iterator.h**: Interface for iterating over data. You can get | |||
an iterator from a DB object. | |||
* **include/leveldb/write_batch.h**: Interface for atomically applying multiple | |||
updates to a database. | |||
* **include/leveldb/slice.h**: A simple module for maintaining a pointer and a | |||
length into some other byte array. | |||
* **include/leveldb/status.h**: Status is returned from many of the public interfaces | |||
and is used to report success and various kinds of errors. | |||
* **include/leveldb/env.h**: | |||
Abstraction of the OS environment. A posix implementation of this interface is | |||
in util/env_posix.cc. | |||
* **include/leveldb/table.h, include/leveldb/table_builder.h**: Lower-level modules that most | |||
clients probably won't use directly. |
@ -1,5 +0,0 @@ | |||
运行新代码: | |||
1. 在examples/ 添加对应的测试文件 | |||
2. 在最外层CMakeLists加入新的测试文件的编译指令,代码参考: + leveldb_test("examples/main.cc") | |||
3. 进入build文件,重新编译,指令:"cmake -DCMAKE_BUILD_TYPE=Release .. && cmake --build ."。 | |||
"cmake -DCMAKE_BUILD_TYPE=Debug .. && cmake --build ." |
@ -1,253 +1,226 @@ | |||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. | |||
// Use of this source code is governed by a BSD-style license that can be | |||
// found in the LICENSE file. See the AUTHORS file for names of contributors. | |||
#ifndef STORAGE_LEVELDB_DB_DB_IMPL_H_ | |||
#define STORAGE_LEVELDB_DB_DB_IMPL_H_ | |||
#include <atomic> | |||
#include <deque> | |||
#include <set> | |||
#include <string> | |||
#include "db/dbformat.h" | |||
#include "db/log_writer.h" | |||
#include "db/vlog_writer.h" | |||
#include "db/vlog_reader.h" | |||
#include "db/vlog_manager.h" | |||
#include "db/snapshot.h" | |||
#include "db/vlog_converter.h" | |||
#include "leveldb/db.h" | |||
#include "leveldb/env.h" | |||
#include "port/port.h" | |||
#include "port/thread_annotations.h" | |||
#include <thread> | |||
#include <chrono> // 如果使用了 std::this_thread::sleep_for | |||
namespace leveldb { | |||
class MemTable; | |||
class TableCache; | |||
class Version; | |||
class VersionEdit; | |||
class VersionSet; | |||
class DBImpl : public DB { | |||
public: | |||
DBImpl(const Options& options, const std::string& dbname); | |||
DBImpl(const DBImpl&) = delete; | |||
DBImpl& operator=(const DBImpl&) = delete; | |||
~DBImpl() override; | |||
// Implementations of the DB interface | |||
Status Put(const WriteOptions&, const Slice& key, | |||
const Slice& value) override; | |||
Status Delete(const WriteOptions&, const Slice& key) override; | |||
Status Write(const WriteOptions& options, WriteBatch* updates) override; | |||
Status Get(const ReadOptions& options, const Slice& key, | |||
std::string* value) override; | |||
std::vector<std::string> FindKeysByField(leveldb::DB* db, Field& field) override; | |||
Iterator* NewIterator(const ReadOptions&) override; | |||
const Snapshot* GetSnapshot() override; | |||
void ReleaseSnapshot(const Snapshot* snapshot) override; | |||
bool GetProperty(const Slice& property, std::string* value) override; | |||
void GetApproximateSizes(const Range* range, int n, uint64_t* sizes) override; | |||
void CompactRange(const Slice* begin, const Slice* end) override; | |||
void StartBackgroundCleanupTask() { // 后台一个自动GC的线程 | |||
std::thread([this]() { | |||
while (!shutting_down_.load(std::memory_order_acquire)) { | |||
vmanager_->CleanupInvalidVlogFiles(options_, dbname_); | |||
std::this_thread::sleep_for(std::chrono::seconds(60)); // 每分钟检查一次 | |||
} | |||
}).detach(); | |||
} | |||
// Extra methods (for testing) that are not in the public DB interface | |||
//to get the KVSepType | |||
KVSepType GetKVSepType(); | |||
Status FlushVlog(); | |||
// Compact any files in the named level that overlap [*begin,*end] | |||
void TEST_CompactRange(int level, const Slice* begin, const Slice* end); | |||
// Force current memtable contents to be compacted. | |||
Status TEST_CompactMemTable(); | |||
// Return an internal iterator over the current state of the database. | |||
// The keys of this iterator are internal keys (see format.h). | |||
// The returned iterator should be deleted when no longer needed. | |||
Iterator* TEST_NewInternalIterator(); | |||
// Return the maximum overlapping data (in bytes) at next level for any | |||
// file at a level >= 1. | |||
int64_t TEST_MaxNextLevelOverlappingBytes(); | |||
// Record a sample of bytes read at the specified internal key. | |||
// Samples are taken approximately once every config::kReadBytesPeriod | |||
// bytes. | |||
void RecordReadSample(Slice key); | |||
private: | |||
friend class DB; | |||
struct CompactionState; | |||
struct Writer; | |||
// Information for a manual compaction | |||
struct ManualCompaction { | |||
int level; | |||
bool done; | |||
const InternalKey* begin; // null means beginning of key range | |||
const InternalKey* end; // null means end of key range | |||
InternalKey tmp_storage; // Used to keep track of compaction progress | |||
}; | |||
// Per level compaction stats. stats_[level] stores the stats for | |||
// compactions that produced data for the specified "level". | |||
struct CompactionStats { | |||
CompactionStats() : micros(0), bytes_read(0), bytes_written(0) {} | |||
void Add(const CompactionStats& c) { | |||
this->micros += c.micros; | |||
this->bytes_read += c.bytes_read; | |||
this->bytes_written += c.bytes_written; | |||
} | |||
int64_t micros; | |||
int64_t bytes_read; | |||
int64_t bytes_written; | |||
}; | |||
Iterator* NewInternalIterator(const ReadOptions&, | |||
SequenceNumber* latest_snapshot, | |||
uint32_t* seed); | |||
Status NewDB(); | |||
// Recover the descriptor from persistent storage. May do a significant | |||
// amount of work to recover recently logged updates. Any changes to | |||
// be made to the descriptor are added to *edit. | |||
Status Recover(VersionEdit* edit, bool* save_manifest) | |||
EXCLUSIVE_LOCKS_REQUIRED(mutex_); | |||
void MaybeIgnoreError(Status* s) const; | |||
// Delete any unneeded files and stale in-memory entries. | |||
void RemoveObsoleteFiles() EXCLUSIVE_LOCKS_REQUIRED(mutex_); | |||
// Compact the in-memory write buffer to disk. Switches to a new | |||
// log-file/memtable and writes a new descriptor iff successful. | |||
// Errors are recorded in bg_error_. | |||
void CompactMemTable() EXCLUSIVE_LOCKS_REQUIRED(mutex_); | |||
Status RecoverLogFile(uint64_t log_number, bool last_log, bool* save_manifest, | |||
VersionEdit* edit, SequenceNumber* max_sequence) | |||
EXCLUSIVE_LOCKS_REQUIRED(mutex_); | |||
Status WriteLevel0Table(MemTable* mem, VersionEdit* edit, Version* base) | |||
EXCLUSIVE_LOCKS_REQUIRED(mutex_); | |||
Status MakeRoomForWrite(bool force /* compact even if there is room? */) | |||
EXCLUSIVE_LOCKS_REQUIRED(mutex_); | |||
WriteBatch* BuildBatchGroup(Writer** last_writer) | |||
EXCLUSIVE_LOCKS_REQUIRED(mutex_); | |||
void RecordBackgroundError(const Status& s); | |||
void MaybeScheduleCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_); | |||
static void BGWork(void* db); | |||
void BackgroundCall(); | |||
void BackgroundCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_); | |||
void CleanupCompaction(CompactionState* compact) | |||
EXCLUSIVE_LOCKS_REQUIRED(mutex_); | |||
Status DoCompactionWork(CompactionState* compact) | |||
EXCLUSIVE_LOCKS_REQUIRED(mutex_); | |||
Status OpenCompactionOutputFile(CompactionState* compact); | |||
Status FinishCompactionOutputFile(CompactionState* compact, Iterator* input); | |||
Status InstallCompactionResults(CompactionState* compact) | |||
EXCLUSIVE_LOCKS_REQUIRED(mutex_); | |||
const Comparator* user_comparator() const { | |||
return internal_comparator_.user_comparator(); | |||
} | |||
// Constant after construction | |||
Env* const env_; | |||
const InternalKeyComparator internal_comparator_; | |||
const InternalFilterPolicy internal_filter_policy_; | |||
const Options options_; // options_.comparator == &internal_comparator_ | |||
const bool owns_info_log_; | |||
const bool owns_cache_; | |||
const std::string dbname_; | |||
// table_cache_ provides its own synchronization | |||
TableCache* const table_cache_; | |||
// Lock over the persistent DB state. Non-null iff successfully acquired. | |||
FileLock* db_lock_; | |||
// State below is protected by mutex_ | |||
port::Mutex mutex_; | |||
std::mutex vlog_mutex_; // 用于保护 VLog 操作的互斥锁 | |||
std::atomic<bool> shutting_down_; | |||
port::CondVar background_work_finished_signal_ GUARDED_BY(mutex_); | |||
MemTable* mem_; | |||
MemTable* imm_ GUARDED_BY(mutex_); // Memtable being compacted | |||
std::atomic<bool> has_imm_; // So bg thread can detect non-null imm_ | |||
WritableFile* logfile_; | |||
uint64_t logfile_number_ GUARDED_BY(mutex_); | |||
log::Writer* log_; | |||
//Add Defination of vlog files. | |||
//TODO: Consider the Concurrency. | |||
uint64_t vlogfile_number_; | |||
uint64_t vlogfile_offset_; // 当前vlog_file的偏移 | |||
WritableFile* vlogfile_; //写vlog_file的文件类 | |||
vlog::VWriter* vlog_; | |||
vlog::VlogManager* vmanager_; | |||
vlog::VlogConverter* vconverter_; | |||
uint32_t seed_ GUARDED_BY(mutex_); // For sampling. | |||
// Queue of writers. | |||
std::deque<Writer*> writers_ GUARDED_BY(mutex_); | |||
WriteBatch* tmp_batch_ GUARDED_BY(mutex_); | |||
SnapshotList snapshots_ GUARDED_BY(mutex_); | |||
// Set of table files to protect from deletion because they are | |||
// part of ongoing compactions. | |||
std::set<uint64_t> pending_outputs_ GUARDED_BY(mutex_); | |||
// Has a background compaction been scheduled or is running? | |||
bool background_compaction_scheduled_ GUARDED_BY(mutex_); | |||
ManualCompaction* manual_compaction_ GUARDED_BY(mutex_); | |||
VersionSet* const versions_ GUARDED_BY(mutex_); | |||
// Have we encountered a background error in paranoid mode? | |||
Status bg_error_ GUARDED_BY(mutex_); | |||
CompactionStats stats_[config::kNumLevels] GUARDED_BY(mutex_); | |||
public: | |||
Status WriteValueIntoVlog(const Slice& key, const Slice& val, char* buf, Slice& vptr); | |||
Status ReadValueFromVlog(std::string* key, std::string* val, std::string* vptr); | |||
bool IsDiskBusy(const std::string& device) ; | |||
}; | |||
// Sanitize db options. The caller should delete result.info_log if | |||
// it is not equal to src.info_log. | |||
Options SanitizeOptions(const std::string& db, | |||
const InternalKeyComparator* icmp, | |||
const InternalFilterPolicy* ipolicy, | |||
const Options& src); | |||
} // namespace leveldb | |||
#endif // STORAGE_LEVELDB_DB_DB_IMPL_H_ | |||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. | |||
// Use of this source code is governed by a BSD-style license that can be | |||
// found in the LICENSE file. See the AUTHORS file for names of contributors. | |||
#ifndef STORAGE_LEVELDB_DB_DB_IMPL_H_ | |||
#define STORAGE_LEVELDB_DB_DB_IMPL_H_ | |||
#include <atomic> | |||
#include <deque> | |||
#include <set> | |||
#include <string> | |||
#include "db/dbformat.h" | |||
#include "db/log_writer.h" | |||
#include "db/snapshot.h" | |||
#include "leveldb/db.h" | |||
#include "leveldb/env.h" | |||
#include "port/port.h" | |||
#include "port/thread_annotations.h" | |||
namespace leveldb { | |||
class MemTable; | |||
class TableCache; | |||
class Version; | |||
class VersionEdit; | |||
class VersionSet; | |||
class DBImpl : public DB { | |||
public: | |||
DBImpl(const Options& options, const std::string& dbname); | |||
DBImpl(const DBImpl&) = delete; | |||
DBImpl& operator=(const DBImpl&) = delete; | |||
~DBImpl() override; | |||
// Implementations of the DB interface | |||
Status Put(const WriteOptions&, const Slice& key, | |||
const Slice& value) override; | |||
Status Put(const WriteOptions&, const Slice& key, | |||
const Slice& value, uint64_t ttl) override; //实现新的put接口,心 | |||
Status Delete(const WriteOptions&, const Slice& key) override; | |||
Status Write(const WriteOptions& options, WriteBatch* updates) override; | |||
Status Get(const ReadOptions& options, const Slice& key, | |||
std::string* value) override; | |||
Iterator* NewIterator(const ReadOptions&) override; | |||
const Snapshot* GetSnapshot() override; | |||
void ReleaseSnapshot(const Snapshot* snapshot) override; | |||
bool GetProperty(const Slice& property, std::string* value) override; | |||
void GetApproximateSizes(const Range* range, int n, uint64_t* sizes) override; | |||
void CompactRange(const Slice* begin, const Slice* end) override; | |||
// 朴,添加是否kv分离接口,12.07 | |||
bool static key_value_separated_; | |||
// Extra methods (for testing) that are not in the public DB interface | |||
// Compact any files in the named level that overlap [*begin,*end] | |||
void TEST_CompactRange(int level, const Slice* begin, const Slice* end); | |||
// Force current memtable contents to be compacted. | |||
Status TEST_CompactMemTable(); | |||
// Return an internal iterator over the current state of the database. | |||
// The keys of this iterator are internal keys (see format.h). | |||
// The returned iterator should be deleted when no longer needed. | |||
Iterator* TEST_NewInternalIterator(); | |||
// Return the maximum overlapping data (in bytes) at next level for any | |||
// file at a level >= 1. | |||
int64_t TEST_MaxNextLevelOverlappingBytes(); | |||
// Record a sample of bytes read at the specified internal key. | |||
// Samples are taken approximately once every config::kReadBytesPeriod | |||
// bytes. | |||
void RecordReadSample(Slice key); | |||
private: | |||
friend class DB; | |||
struct CompactionState; | |||
struct Writer; | |||
// Information for a manual compaction | |||
struct ManualCompaction { | |||
int level; | |||
bool done; | |||
const InternalKey* begin; // null means beginning of key range | |||
const InternalKey* end; // null means end of key range | |||
InternalKey tmp_storage; // Used to keep track of compaction progress | |||
}; | |||
// Per level compaction stats. stats_[level] stores the stats for | |||
// compactions that produced data for the specified "level". | |||
struct CompactionStats { | |||
CompactionStats() : micros(0), bytes_read(0), bytes_written(0) {} | |||
void Add(const CompactionStats& c) { | |||
this->micros += c.micros; | |||
this->bytes_read += c.bytes_read; | |||
this->bytes_written += c.bytes_written; | |||
} | |||
int64_t micros; | |||
int64_t bytes_read; | |||
int64_t bytes_written; | |||
}; | |||
Iterator* NewInternalIterator(const ReadOptions&, | |||
SequenceNumber* latest_snapshot, | |||
uint32_t* seed); | |||
Status NewDB(); | |||
// Recover the descriptor from persistent storage. May do a significant | |||
// amount of work to recover recently logged updates. Any changes to | |||
// be made to the descriptor are added to *edit. | |||
Status Recover(VersionEdit* edit, bool* save_manifest) | |||
EXCLUSIVE_LOCKS_REQUIRED(mutex_); | |||
void MaybeIgnoreError(Status* s) const; | |||
// Delete any unneeded files and stale in-memory entries. | |||
void RemoveObsoleteFiles() EXCLUSIVE_LOCKS_REQUIRED(mutex_); | |||
// Compact the in-memory write buffer to disk. Switches to a new | |||
// log-file/memtable and writes a new descriptor iff successful. | |||
// Errors are recorded in bg_error_. | |||
void CompactMemTable() EXCLUSIVE_LOCKS_REQUIRED(mutex_); | |||
Status RecoverLogFile(uint64_t log_number, bool last_log, bool* save_manifest, | |||
VersionEdit* edit, SequenceNumber* max_sequence) | |||
EXCLUSIVE_LOCKS_REQUIRED(mutex_); | |||
Status WriteLevel0Table(MemTable* mem, VersionEdit* edit, Version* base) | |||
EXCLUSIVE_LOCKS_REQUIRED(mutex_); | |||
Status MakeRoomForWrite(bool force /* compact even if there is room? */) | |||
EXCLUSIVE_LOCKS_REQUIRED(mutex_); | |||
WriteBatch* BuildBatchGroup(Writer** last_writer) | |||
EXCLUSIVE_LOCKS_REQUIRED(mutex_); | |||
void RecordBackgroundError(const Status& s); | |||
void MaybeScheduleCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_); | |||
static void BGWork(void* db); | |||
void BackgroundCall(); | |||
void BackgroundCompaction() EXCLUSIVE_LOCKS_REQUIRED(mutex_); | |||
void CleanupCompaction(CompactionState* compact) | |||
EXCLUSIVE_LOCKS_REQUIRED(mutex_); | |||
Status DoCompactionWork(CompactionState* compact) | |||
EXCLUSIVE_LOCKS_REQUIRED(mutex_); | |||
Status OpenCompactionOutputFile(CompactionState* compact); | |||
Status FinishCompactionOutputFile(CompactionState* compact, Iterator* input); | |||
Status InstallCompactionResults(CompactionState* compact) | |||
EXCLUSIVE_LOCKS_REQUIRED(mutex_); | |||
const Comparator* user_comparator() const { | |||
return internal_comparator_.user_comparator(); | |||
} | |||
// Constant after construction | |||
Env* const env_; | |||
const InternalKeyComparator internal_comparator_; | |||
const InternalFilterPolicy internal_filter_policy_; | |||
const Options options_; // options_.comparator == &internal_comparator_ | |||
const bool owns_info_log_; | |||
const bool owns_cache_; | |||
const std::string dbname_; | |||
// table_cache_ provides its own synchronization | |||
TableCache* const table_cache_; | |||
// Lock over the persistent DB state. Non-null iff successfully acquired. | |||
FileLock* db_lock_; | |||
// State below is protected by mutex_ | |||
port::Mutex mutex_; | |||
std::atomic<bool> shutting_down_; | |||
port::CondVar background_work_finished_signal_ GUARDED_BY(mutex_); | |||
MemTable* mem_; | |||
MemTable* imm_ GUARDED_BY(mutex_); // Memtable being compacted | |||
std::atomic<bool> has_imm_; // So bg thread can detect non-null imm_ | |||
WritableFile* logfile_; | |||
uint64_t logfile_number_ GUARDED_BY(mutex_); | |||
log::Writer* log_; | |||
uint32_t seed_ GUARDED_BY(mutex_); // For sampling. | |||
// Queue of writers. | |||
std::deque<Writer*> writers_ GUARDED_BY(mutex_); | |||
WriteBatch* tmp_batch_ GUARDED_BY(mutex_); | |||
SnapshotList snapshots_ GUARDED_BY(mutex_); | |||
// Set of table files to protect from deletion because they are | |||
// part of ongoing compactions. | |||
std::set<uint64_t> pending_outputs_ GUARDED_BY(mutex_); | |||
// Has a background compaction been scheduled or is running? | |||
bool background_compaction_scheduled_ GUARDED_BY(mutex_); | |||
ManualCompaction* manual_compaction_ GUARDED_BY(mutex_); | |||
VersionSet* const versions_ GUARDED_BY(mutex_); | |||
// Have we encountered a background error in paranoid mode? | |||
Status bg_error_ GUARDED_BY(mutex_); | |||
CompactionStats stats_[config::kNumLevels] GUARDED_BY(mutex_); | |||
}; | |||
// Sanitize db options. The caller should delete result.info_log if | |||
// it is not equal to src.info_log. | |||
Options SanitizeOptions(const std::string& db, | |||
const InternalKeyComparator* icmp, | |||
const InternalFilterPolicy* ipolicy, | |||
const Options& src); | |||
} // namespace leveldb | |||
#endif // STORAGE_LEVELDB_DB_DB_IMPL_H_ |
@ -1,20 +0,0 @@ | |||
#include "util/coding.h" | |||
#include "db/vlog_converter.h" | |||
namespace leveldb{ | |||
namespace vlog{ | |||
// 当需要将键值对插入数据库时,将值的存储位置 (file_no 和 file_offset) 编码为 Vlog Pointer,并与键关联存储。 | |||
// 紧凑的编码格式便于减少存储开销。 | |||
Slice VlogConverter::GetVptr(uint64_t file_no, uint64_t file_offset, char* buf){ | |||
char* vfileno_end = EncodeVarint64(buf, file_no); | |||
char* vfileoff_end = EncodeVarint64(vfileno_end, file_offset); | |||
return Slice(buf, vfileoff_end - buf); | |||
} | |||
Status VlogConverter::DecodeVptr(uint64_t* file_no, uint64_t* file_offset, Slice* vptr){ | |||
bool decoded_status = true; | |||
decoded_status &= GetVarint64(vptr, file_no); | |||
decoded_status &= GetVarint64(vptr, file_offset); | |||
if(!decoded_status) return Status::Corruption("Can not Decode vptr from Read Bytes."); | |||
else return Status::OK(); | |||
} | |||
}// namespace vlog | |||
} |
@ -1,19 +0,0 @@ | |||
#ifndef STORAGE_LEVELDB_DB_VLOG_CONVERTER_H_ | |||
#define STORAGE_LEVELDB_DB_VLOG_CONVERTER_H_ | |||
#include <cstdint> | |||
#include "leveldb/slice.h" | |||
#include "leveldb/status.h" | |||
namespace leveldb{ | |||
namespace vlog{ | |||
class VlogConverter{ | |||
public: | |||
VlogConverter() = default; | |||
~VlogConverter() = default; | |||
Slice GetVptr(uint64_t file_no, uint64_t file_offset, char* buf); | |||
Status DecodeVptr(uint64_t* file_no, uint64_t* file_offset, Slice* vptr); | |||
}; | |||
}// namespace vlog | |||
} | |||
#endif |
@ -1,33 +0,0 @@ | |||
#include "db/vlog_manager.h" | |||
namespace leveldb{ | |||
namespace vlog{ | |||
void VlogManager::AddVlogFile(uint64_t vlogfile_number, SequentialFile* seq_file, WritableFile* write_file){ | |||
if(vlog_table_.find(vlogfile_number) == vlog_table_.end()){ | |||
vlog_table_[vlogfile_number] = seq_file; | |||
writable_to_sequential_[write_file] = seq_file; | |||
} | |||
else{ | |||
//Do Nothing | |||
} | |||
} | |||
SequentialFile* VlogManager::GetVlogFile(uint64_t vlogfile_number){ | |||
auto it = vlog_table_.find(vlogfile_number); | |||
if(it != vlog_table_.end()){ | |||
return it->second; | |||
} | |||
else return nullptr; | |||
} | |||
bool VlogManager::IsEmpty(){ | |||
return vlog_table_.size() == 0; | |||
} | |||
// 标记一个vlog文件有一个新的无效的value,pzy | |||
void VlogManager::MarkVlogValueInvalid(uint64_t vlogfile_number, uint64_t offset) { | |||
auto vlog_file = GetVlogFile(vlogfile_number); | |||
if (vlog_file) { | |||
vlog_file->MarkValueInvalid(offset); // 调用具体文件的标记逻辑 | |||
} | |||
} | |||
}// namespace vlog | |||
} |
@ -1,61 +0,0 @@ | |||
#ifndef STORAGE_LEVELDB_DB_VLOG_MANAGER_H_ | |||
#define STORAGE_LEVELDB_DB_VLOG_MANAGER_H_ | |||
#include <unordered_map> | |||
#include <cstdint> | |||
#include "leveldb/env.h" | |||
#include "db/filename.h" | |||
#include "leveldb/options.h" | |||
namespace leveldb{ | |||
class SequentialFile; | |||
namespace vlog{ | |||
class VlogManager{ | |||
public: | |||
VlogManager() = default; | |||
~VlogManager() = default; | |||
//Add a vlog file, vlog file is already exist. | |||
void AddVlogFile(uint64_t vlogfile_number, SequentialFile* seq_file, WritableFile* write_file); | |||
SequentialFile* GetVlogFile(uint64_t vlogfile_number); | |||
bool IsEmpty(); | |||
void MarkVlogValueInvalid(uint64_t vlogfile_number, uint64_t offset); | |||
SequentialFile* GetSequentialFile(WritableFile* write_file) { | |||
auto it = writable_to_sequential_.find(write_file); | |||
return it != writable_to_sequential_.end() ? it->second : nullptr; | |||
} | |||
void IncrementTotalValueCount(WritableFile* write_file) { | |||
auto seq_file = GetSequentialFile(write_file); | |||
if (seq_file) { | |||
seq_file->IncrementTotalValueCount(); // 假设 SequentialFile 提供该方法 | |||
} | |||
} | |||
void CleanupInvalidVlogFiles(const Options& options, const std::string& dbname) { | |||
for (const auto& vlog_pair : vlog_table_) { | |||
uint64_t vlogfile_number = vlog_pair.first; | |||
auto vlog_file = vlog_pair.second; | |||
if (vlog_file->AllValuesInvalid()) { // 检查文件内所有值是否无效 | |||
RemoveVlogFile(vlogfile_number, options, dbname); // 删除 VLog 文件 | |||
} | |||
} | |||
} | |||
void RemoveVlogFile(uint64_t vlogfile_number, const Options& options, const std::string& dbname) { // 移除无效的vlogfile文件 | |||
auto it = vlog_table_.find(vlogfile_number); | |||
if (it != vlog_table_.end()) { | |||
delete it->second; // 删除对应的 SequentialFile | |||
vlog_table_.erase(it); // 从管理器中移除 | |||
options.env->DeleteFile(VlogFileName(dbname, vlogfile_number)); // 删除实际文件 | |||
} | |||
} | |||
private: | |||
std::unordered_map<uint64_t, SequentialFile*> vlog_table_; // 用映射组织vlog文件号和文件的关系 | |||
std::unordered_map<WritableFile*, SequentialFile*> writable_to_sequential_; | |||
}; | |||
}// namespace vlog | |||
} | |||
#endif |
@ -1,58 +0,0 @@ | |||
#include <cstdint> | |||
#include "db/vlog_reader.h" | |||
#include "leveldb/slice.h" | |||
#include "leveldb/env.h" | |||
#include "util/coding.h" | |||
namespace leveldb{ | |||
namespace vlog{ | |||
VReader::VReader(SequentialFile* file) // A file abstraction for reading sequentially through a file | |||
:file_(file){} | |||
Status VReader::ReadRecord(uint64_t vfile_offset, std::string* record){ | |||
Status s; | |||
Slice size_slice; | |||
char size_buf[11]; | |||
uint64_t rec_size = 0; | |||
s = file_->SkipFromHead(vfile_offset); // 将文件的读取位置移动到 vfile_offset | |||
if(s.ok()) s = file_ -> Read(10, &size_slice, size_buf); // 先把Record 长度读出来, 最长10字节. | |||
if(s.ok()){ | |||
if(GetVarint64(&size_slice, &rec_size) == false){ // 解析变长整数,得到记录的长度 rec_size | |||
return Status::Corruption("Failed to decode vlog record size."); | |||
} | |||
std::string rec; | |||
char* c_rec = new char[rec_size]; // 为记录分配一个临时缓冲区 | |||
//TODO: Should delete c_rec? | |||
rec.resize(rec_size); | |||
Slice rec_slice; | |||
s = file_->SkipFromHead(vfile_offset + (size_slice.data() - size_buf)); // 将文件的读取位置移动 | |||
if(!s.ok()) return s; | |||
s = file_-> Read(rec_size, &rec_slice, c_rec); // 从文件中读取 rec_size 字节的数据到 c_rec 中,并用 rec_slice 包装这些数据 | |||
if(!s.ok()) return s; | |||
rec = std::string(c_rec, rec_size); | |||
*record = std::move(std::string(rec)); | |||
} | |||
return s; | |||
} | |||
Status VReader::ReadKV(uint64_t vfile_offset, std::string* key, std::string* val){ | |||
std::string record_str; | |||
Status s = ReadRecord(vfile_offset, &record_str); | |||
if(s.ok()){ | |||
Slice record = Slice(record_str); | |||
//File the val | |||
uint64_t key_size; | |||
bool decode_flag = true; | |||
decode_flag &= GetVarint64(&record, &key_size); // 获取键的长度 | |||
if(decode_flag){ | |||
*key = Slice(record.data(), key_size).ToString(); // 从record中截取键值 | |||
record = Slice(record.data() + key_size, record.size() - key_size); // 截取剩余的record | |||
} | |||
uint64_t val_size; | |||
decode_flag &= GetVarint64(&record, &val_size); // 获取value的长度 | |||
if(decode_flag) *val = Slice(record.data(), val_size).ToString(); // 截取value的值 | |||
if(!decode_flag || val->size() != record.size()){ | |||
s = Status::Corruption("Failed to decode Record Read From vlog."); | |||
} | |||
} | |||
return s; | |||
} | |||
}// namespace vlog. | |||
} |
@ -1,25 +0,0 @@ | |||
#ifndef STORAGE_LEVELDB_DB_VLOG_READER_H_ | |||
#define STORAGE_LEVELDB_DB_VLOG_READER_H_ | |||
#include <cstdint> | |||
#include "leveldb/slice.h" | |||
#include "leveldb/status.h" | |||
#include "port/port.h" | |||
namespace leveldb { | |||
class SequentialFile; | |||
namespace vlog { | |||
class VReader { | |||
public: | |||
explicit VReader(SequentialFile* file); | |||
~VReader() = default; | |||
Status ReadRecord(uint64_t vfile_offset, std::string* record); | |||
Status ReadKV(uint64_t vfile_offset, std::string* key ,std::string* val); | |||
private: | |||
SequentialFile* file_; | |||
}; | |||
} // namespace vlog | |||
} | |||
#endif |
@ -1,26 +0,0 @@ | |||
#include <cstdint> | |||
#include "db/vlog_writer.h" | |||
#include "leveldb/slice.h" | |||
#include "leveldb/env.h" | |||
#include "util/coding.h" | |||
namespace leveldb{ | |||
namespace vlog{ | |||
VWriter::VWriter(WritableFile* vlogfile) | |||
:vlogfile_(vlogfile){} | |||
VWriter::~VWriter() = default; | |||
Status VWriter::AddRecord(const Slice& slice, int& write_size){ | |||
//append slice length. | |||
write_size = slice.size(); | |||
char buf[10]; // Used for Convert int64 to char. | |||
char* end_byte = EncodeVarint64(buf, slice.size()); | |||
write_size += end_byte - buf; | |||
Status s = vlogfile_->Append(Slice(buf, end_byte - buf)); | |||
//append slice | |||
if(s.ok()) s = vlogfile_->Append(slice); | |||
return s; | |||
} | |||
Status VWriter::Flush(){ | |||
return vlogfile_->Flush(); | |||
} | |||
}// namespace vlog | |||
} |
@ -1,26 +0,0 @@ | |||
#ifndef STORAGE_LEVELDB_DB_VLOG_WRITER_H_ | |||
#define STORAGE_LEVELDB_DB_VLOG_WRITER_H_ | |||
#include <cstdint> | |||
#include "leveldb/slice.h" | |||
#include "leveldb/status.h" | |||
// format: [size, key, vptr, value]. | |||
namespace leveldb{ | |||
class WritableFile; | |||
namespace vlog{ | |||
class VWriter{ | |||
public: | |||
explicit VWriter(WritableFile* vlogfile); | |||
~VWriter(); | |||
Status AddRecord(const Slice& slice, int& write_size); | |||
VWriter(const VWriter&) = delete; | |||
VWriter& operator=(const VWriter&) = delete; | |||
Status Flush(); | |||
private: | |||
WritableFile* vlogfile_; | |||
}; | |||
}// namespace vlog | |||
} | |||
#endif |
@ -1,168 +1,209 @@ | |||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. | |||
// Use of this source code is governed by a BSD-style license that can be | |||
// found in the LICENSE file. See the AUTHORS file for names of contributors. | |||
// | |||
// WriteBatch::rep_ := | |||
// sequence: fixed64 | |||
// count: fixed32 | |||
// data: record[count] | |||
// record := | |||
// kTypeValue varstring varstring | | |||
// kTypeDeletion varstring | |||
// varstring := | |||
// len: varint32 | |||
// data: uint8[len] | |||
#include "leveldb/write_batch.h" | |||
#include "db/dbformat.h" | |||
#include "db/memtable.h" | |||
#include "db/write_batch_internal.h" | |||
#include "leveldb/db.h" | |||
#include "util/coding.h" | |||
namespace leveldb { | |||
// WriteBatch header has an 8-byte sequence number followed by a 4-byte count. | |||
static const size_t kHeader = 12; | |||
WriteBatch::WriteBatch() { Clear(); } | |||
WriteBatch::~WriteBatch() = default; | |||
WriteBatch::Handler::~Handler() = default; | |||
void WriteBatch::Clear() { | |||
rep_.clear(); | |||
rep_.resize(kHeader); | |||
} | |||
size_t WriteBatch::ApproximateSize() const { return rep_.size(); } | |||
Status WriteBatch::Iterate(Handler* handler) const { | |||
Slice input(rep_); | |||
if (input.size() < kHeader) { | |||
return Status::Corruption("malformed WriteBatch (too small)"); | |||
} | |||
//rep_ Header 12字节, 包含8字节sequence和4字节count. | |||
input.remove_prefix(kHeader); | |||
Slice key, value; | |||
int found = 0; | |||
while (!input.empty()) { | |||
found++; | |||
char tag = input[0]; | |||
input.remove_prefix(1); | |||
switch (tag) { | |||
case kTypeValue: | |||
if (GetLengthPrefixedSlice(&input, &key) && | |||
GetLengthPrefixedSlice(&input, &value)) { | |||
handler->Put(key, value); | |||
} else { | |||
return Status::Corruption("bad WriteBatch Put"); | |||
} | |||
break; | |||
case kTypeDeletion: | |||
if (GetLengthPrefixedSlice(&input, &key)) { | |||
handler->Delete(key); | |||
} else { | |||
return Status::Corruption("bad WriteBatch Delete"); | |||
} | |||
break; | |||
default: | |||
return Status::Corruption("unknown WriteBatch tag"); | |||
} | |||
} | |||
//合法性判断 | |||
if (found != WriteBatchInternal::Count(this)) { | |||
return Status::Corruption("WriteBatch has wrong count"); | |||
} else { | |||
return Status::OK(); | |||
} | |||
} | |||
int WriteBatchInternal::Count(const WriteBatch* b) { | |||
return DecodeFixed32(b->rep_.data() + 8); | |||
} | |||
void WriteBatchInternal::SetCount(WriteBatch* b, int n) { | |||
EncodeFixed32(&b->rep_[8], n); | |||
} | |||
SequenceNumber WriteBatchInternal::Sequence(const WriteBatch* b) { | |||
return SequenceNumber(DecodeFixed64(b->rep_.data())); | |||
} | |||
void WriteBatchInternal::SetSequence(WriteBatch* b, SequenceNumber seq) { | |||
EncodeFixed64(&b->rep_[0], seq); | |||
} | |||
void WriteBatch::Put(const Slice& key, const Slice& value) { | |||
WriteBatchInternal::SetCount(this, WriteBatchInternal::Count(this) + 1); | |||
rep_.push_back(static_cast<char>(kTypeValue)); | |||
PutLengthPrefixedSlice(&rep_, key); | |||
PutLengthPrefixedSlice(&rep_, value); | |||
} | |||
void WriteBatch::Delete(const Slice& key) { | |||
WriteBatchInternal::SetCount(this, WriteBatchInternal::Count(this) + 1); | |||
rep_.push_back(static_cast<char>(kTypeDeletion)); | |||
PutLengthPrefixedSlice(&rep_, key); | |||
} | |||
void WriteBatch::Append(const WriteBatch& source) { | |||
WriteBatchInternal::Append(this, &source); | |||
} | |||
namespace { | |||
class MemTableInserter : public WriteBatch::Handler { | |||
public: | |||
SequenceNumber sequence_; | |||
MemTable* mem_; | |||
void Put(const Slice& key, const Slice& value) override { | |||
mem_->Add(sequence_, kTypeValue, key, value); | |||
sequence_++; | |||
} | |||
void Delete(const Slice& key) override { | |||
mem_->Add(sequence_, kTypeDeletion, key, Slice()); | |||
sequence_++; | |||
} | |||
}; | |||
//Use For KVSeqMem | |||
class MemTableInserterKVSeq : public WriteBatch::Handler{ | |||
public: | |||
SequenceNumber sequence_; | |||
MemTable* mem_; | |||
void Put(const Slice& key, const Slice& value) override { | |||
assert(0 && "TODO"); | |||
mem_->Add(sequence_, kTypeValue, key, value); | |||
sequence_++; | |||
} | |||
void Delete(const Slice& key) override { | |||
mem_->Add(sequence_, kTypeDeletion, key, Slice()); | |||
sequence_++; | |||
} | |||
}; | |||
} // namespace | |||
Status WriteBatchInternal::InsertInto(const WriteBatch* b, MemTable* memtable) { | |||
MemTableInserter inserter; | |||
inserter.sequence_ = WriteBatchInternal::Sequence(b); | |||
inserter.mem_ = memtable; | |||
return b->Iterate(&inserter); | |||
} | |||
void WriteBatchInternal::SetContents(WriteBatch* b, const Slice& contents) { | |||
assert(contents.size() >= kHeader); | |||
b->rep_.assign(contents.data(), contents.size()); | |||
} | |||
void WriteBatchInternal::Append(WriteBatch* dst, const WriteBatch* src) { | |||
SetCount(dst, Count(dst) + Count(src)); | |||
assert(src->rep_.size() >= kHeader); | |||
dst->rep_.append(src->rep_.data() + kHeader, src->rep_.size() - kHeader); | |||
} | |||
} // namespace leveldb | |||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. | |||
// Use of this source code is governed by a BSD-style license that can be | |||
// found in the LICENSE file. See the AUTHORS file for names of contributors. | |||
// | |||
// WriteBatch::rep_ := | |||
// sequence: fixed64 | |||
// count: fixed32 | |||
// data: record[count] | |||
// record := | |||
// kTypeValue varstring varstring | | |||
// kTypeDeletion varstring | |||
// varstring := | |||
// len: varint32 | |||
// data: uint8[len] | |||
#include "leveldb/write_batch.h" | |||
#include "db/dbformat.h" | |||
#include "db/memtable.h" | |||
#include "db/write_batch_internal.h" | |||
#include "leveldb/db.h" | |||
#include "db/db_impl.h" //朴 | |||
#include "util/coding.h" | |||
#include <sstream> // For std::ostringstream 心 | |||
#include <cstdint> | |||
#include <string> | |||
namespace leveldb { | |||
// WriteBatch header has an 8-byte sequence number followed by a 4-byte count. | |||
static const size_t kHeader = 12; | |||
WriteBatch::WriteBatch() { Clear(); } | |||
WriteBatch::~WriteBatch() = default; | |||
WriteBatch::Handler::~Handler() = default; | |||
void WriteBatch::Clear() { | |||
rep_.clear(); | |||
rep_.resize(kHeader); | |||
} | |||
size_t WriteBatch::ApproximateSize() const { return rep_.size(); } | |||
Status WriteBatch::Iterate(Handler* handler) const { | |||
Slice input(rep_); | |||
if (input.size() < kHeader) { | |||
return Status::Corruption("malformed WriteBatch (too small)"); | |||
} | |||
input.remove_prefix(kHeader); | |||
Slice key, value; | |||
int found = 0; | |||
while (!input.empty()) { | |||
found++; | |||
char tag = input[0]; | |||
input.remove_prefix(1); | |||
switch (tag) { | |||
case kTypeValue: | |||
if (GetLengthPrefixedSlice(&input, &key) && | |||
GetLengthPrefixedSlice(&input, &value)) { | |||
handler->Put(key, value); | |||
} else { | |||
return Status::Corruption("bad WriteBatch Put"); | |||
} | |||
break; | |||
case kTypeDeletion: | |||
if (GetLengthPrefixedSlice(&input, &key)) { | |||
handler->Delete(key); | |||
} else { | |||
return Status::Corruption("bad WriteBatch Delete"); | |||
} | |||
break; | |||
default: | |||
return Status::Corruption("unknown WriteBatch tag"); | |||
} | |||
} | |||
if (found != WriteBatchInternal::Count(this)) { | |||
return Status::Corruption("WriteBatch has wrong count"); | |||
} else { | |||
return Status::OK(); | |||
} | |||
} | |||
int WriteBatchInternal::Count(const WriteBatch* b) { | |||
return DecodeFixed32(b->rep_.data() + 8); | |||
} | |||
void WriteBatchInternal::SetCount(WriteBatch* b, int n) { | |||
EncodeFixed32(&b->rep_[8], n); | |||
} | |||
SequenceNumber WriteBatchInternal::Sequence(const WriteBatch* b) { | |||
return SequenceNumber(DecodeFixed64(b->rep_.data())); | |||
} | |||
void WriteBatchInternal::SetSequence(WriteBatch* b, SequenceNumber seq) { | |||
EncodeFixed64(&b->rep_[0], seq); | |||
} | |||
void WriteBatch::Put(const Slice& key, const Slice& value) { | |||
WriteBatchInternal::SetCount(this, WriteBatchInternal::Count(this) + 1); | |||
rep_.push_back(static_cast<char>(kTypeValue)); | |||
PutLengthPrefixedSlice(&rep_, key); | |||
PutLengthPrefixedSlice(&rep_, value); | |||
} | |||
// void WriteBatch::Put(const Slice& key, const Slice& value) { // 朴,kv分离,12.07 | |||
// if (DBImpl::key_value_separated_) { | |||
// // 分离key和value的逻辑 | |||
// // 例如,你可以将key和value分别存储在不同的容器中 | |||
// // 这里需要根据你的具体需求来实现 | |||
// //... | |||
// if (value.size() > max_value_size_) { | |||
// // 分离key和value的逻辑 | |||
// // 将value存进新的数据结构blobfile | |||
// //... | |||
// // 例如,你可以使用以下代码将value写入blobfile | |||
// std::ofstream blobfile("blobfile.dat", std::ios::binary | std::ios::app); | |||
// blobfile.write(value.data(), value.size()); | |||
// blobfile.close(); | |||
// } | |||
// } | |||
// else { | |||
// // 不分离key和value的逻辑 | |||
// WriteBatchInternal::SetCount(this, WriteBatchInternal::Count(this) + 1); | |||
// rep_.push_back(static_cast<char>(kTypeValue)); | |||
// PutLengthPrefixedSlice(&rep_, key); | |||
// PutLengthPrefixedSlice(&rep_, value); | |||
// } | |||
// } | |||
void WriteBatch::Put(const Slice& key, const Slice& value, uint64_t ttl) { | |||
WriteBatchInternal::SetCount(this, WriteBatchInternal::Count(this) + 1); | |||
rep_.push_back(static_cast<char>(kTypeValue)); | |||
PutLengthPrefixedSlice(&rep_, key); | |||
// 获取当前时间 | |||
auto now = std::chrono::system_clock::now(); | |||
// 加上ttl | |||
auto future_time = now + std::chrono::seconds(ttl); | |||
// 转换为 time_t | |||
std::time_t future_time_t = std::chrono::system_clock::to_time_t(future_time); | |||
// 将 time_t 转换为 tm 结构 | |||
std::tm* local_tm = std::localtime(&future_time_t); | |||
// 格式化为字符串 | |||
char buffer[20]; // 格式化字符串的缓冲区 | |||
std::strftime(buffer, sizeof(buffer), "%Y-%m-%d %H:%M:%S", local_tm); | |||
std::string future_time_str(buffer); | |||
// 拼接原本的值和时间字符串 | |||
std::string combined_str = value.ToString() + future_time_str; | |||
PutLengthPrefixedSlice(&rep_, Slice(combined_str)); | |||
} // 心 | |||
void WriteBatch::Delete(const Slice& key) { | |||
WriteBatchInternal::SetCount(this, WriteBatchInternal::Count(this) + 1); | |||
rep_.push_back(static_cast<char>(kTypeDeletion)); | |||
PutLengthPrefixedSlice(&rep_, key); | |||
} | |||
void WriteBatch::Append(const WriteBatch& source) { | |||
WriteBatchInternal::Append(this, &source); | |||
} | |||
namespace { | |||
class MemTableInserter : public WriteBatch::Handler { | |||
public: | |||
SequenceNumber sequence_; | |||
MemTable* mem_; | |||
void Put(const Slice& key, const Slice& value) override { | |||
mem_->Add(sequence_, kTypeValue, key, value); | |||
sequence_++; | |||
} | |||
void Delete(const Slice& key) override { | |||
mem_->Add(sequence_, kTypeDeletion, key, Slice()); | |||
sequence_++; | |||
} | |||
}; | |||
} // namespace | |||
Status WriteBatchInternal::InsertInto(const WriteBatch* b, MemTable* memtable) { | |||
MemTableInserter inserter; | |||
inserter.sequence_ = WriteBatchInternal::Sequence(b); | |||
inserter.mem_ = memtable; | |||
return b->Iterate(&inserter); | |||
} | |||
void WriteBatchInternal::SetContents(WriteBatch* b, const Slice& contents) { | |||
assert(contents.size() >= kHeader); | |||
b->rep_.assign(contents.data(), contents.size()); | |||
} | |||
void WriteBatchInternal::Append(WriteBatch* dst, const WriteBatch* src) { | |||
SetCount(dst, Count(dst) + Count(src)); | |||
assert(src->rep_.size() >= kHeader); | |||
dst->rep_.append(src->rep_.data() + kHeader, src->rep_.size() - kHeader); | |||
} | |||
} // namespace leveldb |
@ -1,45 +0,0 @@ | |||
import matplotlib.pyplot as plt | |||
x = [128, 256, 512, 1024, 2048, 3072, 4096] | |||
y_w = [ | |||
[52.8, 67.0, 60.8, 52.3, 42.2, 34.2, 30.2],# noKVSep | |||
[44.2, 87.5, 139.5, 274.2 ,426.3, 576.2, 770.4], # kvSepBeforeMem | |||
[59.9, 102.4 ,147.9 ,173.5, 184.2, 199.2, 206.8] #kvSepBeforeSSD | |||
] | |||
y_r = [ | |||
[731.9, 1127.4, 1515.2, 3274.7, 4261.9, 4886.3, 4529.8],# noKVSep | |||
[158.9, 154.9, 145.0, 160.9 , 147.3, 144.0, 127.4], # kvSepBeforeMem | |||
[171.1, 136.0 ,179.8 ,169.8, 159.9, 161.5, 168.6] #kvSepBeforeSSD | |||
] | |||
y_random = [ | |||
[2.363, 2.698, 3.972, 3.735, 7.428, 12.137, 17.753],# noKVSep | |||
[2.957, 2.953, 3.417, 3.363 ,3.954, 17.516, 79.023], # kvSepBeforeMem | |||
[2.927, 2.739 ,2.947, 3.604, 3.530, 19.189, 80.608] #kvSepBeforeSSD | |||
] | |||
plt.figure(num = 1) | |||
plt.title("Write Performance(fillrandom)") | |||
plt.xlabel("Value size(B)") | |||
plt.ylabel("Throughout(MiB/s)") | |||
l1 = plt.plot(x, y_w[0], "bo", linestyle = "dashed") | |||
l1 = plt.plot(x, y_w[1], "g^", linestyle = "dashed") | |||
l1 = plt.plot(x, y_w[2], "y+", linestyle = "dashed") | |||
plt.legend(["noKVSep", "kvSepBeforeMem", "kvSepBeforeSSD"]) | |||
plt.show() | |||
plt.figure(num = 1) | |||
plt.title("Read Performance(readreverse)") | |||
plt.xlabel("Value size(B)") | |||
plt.ylabel("Throughout(MiB/s)") | |||
l1 = plt.plot(x, y_r[0], "bo", linestyle = "dashed") | |||
l1 = plt.plot(x, y_r[1], "g^", linestyle = "dashed") | |||
l1 = plt.plot(x, y_r[2], "y+", linestyle = "dashed") | |||
plt.legend(["noKVSep", "kvSepBeforeMem", "kvSepBeforeSSD"]) | |||
plt.show() | |||
plt.title("Read Performance(readrandom)") | |||
plt.xlabel("Value size(B)") | |||
plt.ylabel("Micros/op") | |||
l1 = plt.plot(x, y_random[0], "bo", linestyle = "dashed") | |||
l1 = plt.plot(x, y_random[1], "g^", linestyle = "dashed") | |||
l1 = plt.plot(x, y_random[2], "y+", linestyle = "dashed") | |||
plt.legend(["noKVSep", "kvSepBeforeMem", "kvSepBeforeSSD"]) | |||
plt.show() |
@ -1,50 +0,0 @@ | |||
#include <iostream> | |||
#include "leveldb/db.h" | |||
#include "leveldb/options.h" | |||
#include "gtest/gtest.h" | |||
class LevelDBTest : public ::testing::Test { | |||
protected: | |||
leveldb::DB* db; | |||
leveldb::Options options; | |||
std::string db_path = "/tmp/testdb"; | |||
void SetUp() override { | |||
options.create_if_missing = true; | |||
leveldb::Status status = leveldb::DB::Open(options, db_path, &db); | |||
ASSERT_TRUE(status.ok()) << "Failed to open DB: " << status.ToString(); | |||
} | |||
void TearDown() override { | |||
delete db; | |||
} | |||
}; | |||
TEST_F(LevelDBTest, CompactionTest) { | |||
// 插入数据 | |||
db->Put(leveldb::WriteOptions(), "start", "value1"); | |||
db->Put(leveldb::WriteOptions(), "end", "value2"); | |||
db->Put(leveldb::WriteOptions(), "key_to_delete", "value3"); | |||
// 删除一个键,模拟删除标记 | |||
db->Delete(leveldb::WriteOptions(), "key_to_delete"); | |||
// 触发压缩 | |||
leveldb::Slice begin_key("start"); | |||
leveldb::Slice end_key("end"); | |||
db->CompactRange(&begin_key, &end_key); | |||
// 验证压缩后的数据 | |||
std::string value; | |||
leveldb::Status status = db->Get(leveldb::ReadOptions(), "key_to_delete", &value); | |||
if (!status.ok()) { | |||
std::cout << "'key_to_delete' was successfully removed during compaction." << std::endl; | |||
} else { | |||
FAIL() << "Unexpected: 'key_to_delete' still exists: " << value; | |||
} | |||
} | |||
int main(int argc, char** argv) { | |||
::testing::InitGoogleTest(&argc, argv); | |||
return RUN_ALL_TESTS(); | |||
} |
@ -1,28 +0,0 @@ | |||
#include <cassert> | |||
#include <iostream> | |||
#include "leveldb/db.h" | |||
int main(){ | |||
leveldb::DB* db; | |||
leveldb::Options options; | |||
options.create_if_missing = true; | |||
options.kvSepType = leveldb::kVSepBeforeSSD; | |||
leveldb::Status status = leveldb::DB::Open(options, "/tmp/testdb", &db); | |||
std::cout<< status.ToString() << '\n'; | |||
std::string fill_str = ""; | |||
// fill_str 4KB | |||
for(int i = 1; i<= 4096; i++){ | |||
fill_str.push_back('%'); | |||
} | |||
for(int i = 1E5; i>= 1; i--){ | |||
status = db -> Put(leveldb::WriteOptions(), "key" + std::to_string(i), "val" + std::to_string(i) + fill_str); | |||
} | |||
if(status.ok()) { | |||
std::string val; | |||
for(int i = 0; i< 1E5; i++){ | |||
status = db -> Get(leveldb::ReadOptions(), "key" + std::to_string(i), &val); | |||
if(status.ok()) std::cout<< "Find value of \'key"<<i<<"\' From db:" << val << "\n"; | |||
} | |||
} | |||
delete db; | |||
} |
@ -1,173 +0,0 @@ | |||
#include "WiscKeyTest_1.h" | |||
#include <fstream> | |||
#include <algorithm> | |||
#include <vector> | |||
#include <ctime> | |||
#include <cstdlib> | |||
typedef struct WiscKey { // 集成了leveldb数据库和一个logfile链表 | |||
string dir; | |||
DB * leveldb; | |||
FILE * logfile; | |||
} WK; | |||
static bool wisckey_get(WK * wk, string &key, string &value) | |||
{ | |||
cout << "\n\t\tGet Function\n\n"; | |||
cout << "Key Received: " << key << endl; | |||
cout << "Value Received: " << value << endl; | |||
string offsetinfo; | |||
const bool found = leveldb_get(wk->leveldb, key, offsetinfo); | |||
if (found) { | |||
cout << "Offset and Length: " << offsetinfo << endl; | |||
} | |||
else { | |||
cout << "Record:Not Found" << endl; | |||
return false; | |||
} | |||
std::string value_offset; | |||
std::string value_length; | |||
std::string s = offsetinfo; | |||
std::string delimiter = "&&"; | |||
size_t pos = 0; | |||
std::string token; | |||
while ((pos = s.find(delimiter)) != std::string::npos) { | |||
token = s.substr(0, pos); | |||
value_offset = token; | |||
s.erase(0, pos + delimiter.length()); | |||
} | |||
value_length = s; | |||
cout << "Value Offset: " << value_offset << endl; | |||
cout << "Value Length: " << value_length << endl; | |||
std::string::size_type sz; | |||
long offset = std::stol (value_offset,&sz); | |||
long length = std::stol (value_length,&sz); | |||
//cout << offset << length << endl; | |||
std::string value_record; | |||
//cout << ftell(wk->logread) << endl; | |||
fseek(wk->logfile,offset,SEEK_SET); | |||
//cout << ftell(wk->logfile) << endl; | |||
//rewind(wk->logfile); | |||
//cout << ftell(wk->logfile) << endl; | |||
fread(&value_record,length,1,wk->logfile); | |||
//rewind(wk->logfile); | |||
cout << "LogFile Value: " << value_record << endl; | |||
return true; | |||
} | |||
static void wisckey_set(WK * wk, string &key, string &value) | |||
{ | |||
long offset = ftell (wk->logfile); | |||
long size = sizeof(value); | |||
std::string vlog_offset = std::to_string(offset); | |||
std::string vlog_size = std::to_string(size); | |||
std::stringstream vlog_value; | |||
vlog_value << vlog_offset << "&&" << vlog_size; | |||
std::string s = vlog_value.str(); | |||
fwrite (&value, sizeof(value),1,wk->logfile); | |||
leveldb_set(wk->leveldb,key,s); | |||
} | |||
static void wisckey_del(WK * wk, string &key) | |||
{ | |||
cout << "Key: " << key << endl; | |||
leveldb_del(wk->leveldb,key); | |||
} | |||
static WK * open_wisckey(const string& dirname) | |||
{ | |||
WK * wk = new WK; | |||
wk->leveldb = open_leveldb(dirname); | |||
wk->dir = dirname; | |||
wk->logfile = fopen("logfile","wb+"); | |||
return wk; | |||
} | |||
static void close_wisckey(WK * wk) | |||
{ | |||
fclose(wk->logfile); | |||
delete wk->leveldb; | |||
delete wk; | |||
} | |||
// For testing wisckey functionality | |||
static void testing_function(WK * wk, string &key, string &value) | |||
{ | |||
/* Setting Value and Testing it */ | |||
cout << "\n\n\t\tInput Received\n" << endl; | |||
cout << "Key: " << key << endl; | |||
cout << "Value: " << value << endl; | |||
wisckey_set(wk,key,value); | |||
const bool found = wisckey_get(wk,key,value); | |||
if (found) { | |||
cout << "Record Matched" << endl; | |||
} | |||
/* Deleting Value */ | |||
cout << "\n\n\t\tDelete Operation\n" << endl; | |||
wisckey_del(wk,key); | |||
cout << "Delete Successful" << endl; | |||
/* Read after Delete */ | |||
cout << "\n\n\t\tInput Received\n" << endl; | |||
string testkey= "1001224314"; | |||
string testvalue = "Abhishek Sharma"; | |||
cout << "Key: " << testkey << endl; | |||
cout << "Value: " << testvalue << endl; | |||
const bool testfound = wisckey_get(wk,testkey,testvalue); | |||
if (testfound) { | |||
cout << "Record Matched" << endl; | |||
} | |||
} | |||
int main(int argc, char ** argv) | |||
{ | |||
if (argc < 2) { | |||
cout << "Usage: " << argv[0] << " <value-size>" << endl; | |||
exit(0); | |||
} | |||
const size_t value_size = std::stoull(argv[1], NULL, 10); | |||
if (value_size < 1 || value_size > 100000) { | |||
cout << " <value-size> must be positive and less then 100000" << endl; | |||
exit(0); | |||
} | |||
WK * wk = open_wisckey("wisckey_test_dir"); // 打开数据库 | |||
if (wk == NULL) { | |||
cerr << "Open WiscKey failed!" << endl; | |||
exit(1); | |||
} | |||
char * vbuf = new char[value_size]; | |||
for (size_t i = 0; i < value_size; i++) { | |||
vbuf[i] = rand(); | |||
} | |||
string value = string(vbuf, value_size); | |||
size_t nfill = 1000000000 / (value_size + 8); // 生成 nfill = 1,000,000,000 / (value_size + 8) 条随机键值对 | |||
clock_t t0 = clock(); | |||
size_t p1 = nfill / 40; // 将插入任务分成四十份 | |||
for (size_t j = 0; j < nfill; j++) { | |||
string key = std::to_string(((size_t)rand())*((size_t)rand())); | |||
wisckey_set(wk, key, value); | |||
if (j >= p1) { | |||
clock_t dt = clock() - t0; | |||
cout << "progress: " << j+1 << "/" << nfill << " time elapsed: " << dt * 1.0e-6 << endl << std::flush; // 打印进度和已经消耗的时间 | |||
p1 += (nfill / 40); | |||
} | |||
} | |||
clock_t dt = clock() - t0; | |||
cout << "time elapsed: " << dt * 1.0e-6 << " seconds" << endl; | |||
close_wisckey(wk); | |||
destroy_leveldb("wisckey_test_dir"); | |||
remove("logfile"); | |||
exit(0); | |||
} |
@ -1,81 +0,0 @@ | |||
#pragma once | |||
#include <assert.h> | |||
#include <vector> | |||
#include <iostream> | |||
#include <sstream> | |||
#include <string> | |||
#include <ctime> | |||
#include <algorithm> | |||
#include <cstdlib> | |||
// #include <boost/algorithm/string.hpp> | |||
#include "leveldb/db.h" | |||
#include "leveldb/filter_policy.h" | |||
#include "leveldb/write_batch.h" | |||
using std::string; | |||
using std::vector; | |||
using std::cin; | |||
using std::cout; | |||
using std::cerr; | |||
using std::endl; | |||
using std::stringstream; | |||
using leveldb::ReadOptions; | |||
using leveldb::Options; | |||
using leveldb::Status; | |||
using leveldb::WriteBatch; | |||
using leveldb::WriteOptions; | |||
using leveldb::DB; | |||
static bool | |||
leveldb_get(DB * db, string &key, string &value) | |||
{ | |||
// assert(lldb); | |||
ReadOptions ropt; | |||
Status s = db->Get(ropt, key, &value); | |||
assert(s.ok()); | |||
if (s.IsNotFound()) { | |||
return false; | |||
} else { | |||
return true; | |||
} | |||
} | |||
static void | |||
leveldb_set(DB * db, string &key, string &value) | |||
{ | |||
WriteBatch wb; | |||
Status s; | |||
WriteOptions wopt; | |||
wb.Put(key, value); | |||
s = db->Write(wopt, &wb); | |||
assert(s.ok()); | |||
} | |||
static void | |||
leveldb_del(DB * db, string &key) | |||
{ | |||
WriteOptions wopt; | |||
Status s; | |||
s = db->Delete(wopt, key); | |||
assert(s.ok()); | |||
} | |||
static void | |||
destroy_leveldb(const string &dirname) | |||
{ | |||
Options options; | |||
leveldb::DestroyDB(dirname, options); | |||
} | |||
static DB * | |||
open_leveldb(const string &dirname) | |||
{ | |||
Options options; | |||
options.create_if_missing = true; | |||
options.filter_policy = leveldb::NewBloomFilterPolicy(10); | |||
options.write_buffer_size = 1u << 21; | |||
destroy_leveldb(dirname); | |||
DB * db = NULL; | |||
Status s = DB::Open(options, dirname, &db); | |||
return db; | |||
} |
@ -1,76 +0,0 @@ | |||
#include <iostream> | |||
#include "leveldb/db.h" | |||
#include "leveldb/options.h" | |||
#include "gtest/gtest.h" | |||
class RangeQueryTest : public ::testing::Test { | |||
protected: | |||
leveldb::DB* db; | |||
leveldb::Options options; | |||
std::string db_path = "/tmp/range_testdb"; | |||
void SetUp() override { | |||
options.create_if_missing = true; | |||
leveldb::Status status = leveldb::DB::Open(options, db_path, &db); | |||
ASSERT_TRUE(status.ok()) << "Failed to open DB: " << status.ToString(); | |||
} | |||
void TearDown() override { | |||
delete db; | |||
} | |||
}; | |||
TEST_F(RangeQueryTest, TestRangeQuery) { | |||
// 插入一组键值对 | |||
std::vector<std::pair<std::string, std::string>> data = { | |||
{"a_key1", "value1"}, | |||
{"a_key2", "value2"}, | |||
{"b_key3", "value3"}, | |||
{"b_key4", "value4"}, | |||
{"c_key5", "value5"} | |||
}; | |||
for (const auto& pair : data) { | |||
const std::string& key = pair.first; | |||
const std::string& value = pair.second; | |||
leveldb::Status status = db->Put(leveldb::WriteOptions(), key, value); | |||
ASSERT_TRUE(status.ok()) << "Failed to put data: " << status.ToString(); | |||
} | |||
// 查询范围内的键值对 | |||
std::string range_start = "a_key1"; | |||
std::string range_end = "b_key4"; | |||
std::vector<std::pair<std::string, std::string>> expected_data = { | |||
{"a_key1", "value1"}, | |||
{"a_key2", "value2"}, | |||
{"b_key3", "value3"}, | |||
{"b_key4", "value4"} | |||
}; | |||
leveldb::Iterator* it = db->NewIterator(leveldb::ReadOptions()); | |||
it->Seek(range_start); // 从范围起始位置开始 | |||
std::vector<std::pair<std::string, std::string>> actual_data; | |||
while (it->Valid() && it->key().ToString() <= range_end) { | |||
actual_data.emplace_back(it->key().ToString(), it->value().ToString()); | |||
it->Next(); | |||
} | |||
delete it; | |||
// 验证范围查询结果是否符合预期 | |||
ASSERT_EQ(actual_data.size(), expected_data.size()) << "Range query results size mismatch."; | |||
for (size_t i = 0; i < actual_data.size(); ++i) { | |||
EXPECT_EQ(actual_data[i].first, expected_data[i].first) << "Key mismatch at index " << i; | |||
EXPECT_EQ(actual_data[i].second, expected_data[i].second) << "Value mismatch at index " << i; | |||
} | |||
// 输出范围查询结果 | |||
for (const auto& pair : actual_data) { | |||
const std::string& key = pair.first; | |||
const std::string& value = pair.second; | |||
std::cout << "Key: " << key << ", Value: " << value << std::endl; | |||
} | |||
} | |||
int main(int argc, char** argv) { | |||
::testing::InitGoogleTest(&argc, argv); | |||
return RUN_ALL_TESTS(); | |||
} |
@ -1,43 +0,0 @@ | |||
#include <cassert> | |||
#include <iostream> | |||
#include "leveldb/db.h" | |||
#include "db/db_impl.h" | |||
int main() { | |||
leveldb::DB* db; | |||
leveldb::Options options; | |||
options.create_if_missing = true; | |||
options.kvSepType = leveldb::kVSepBeforeMem; | |||
leveldb::Status status = leveldb::DB::Open(options, "/tmp/testdb", &db); | |||
if (!status.ok()) { | |||
std::cerr << "Failed to open DB: " << status.ToString() << '\n'; | |||
return 1; | |||
} | |||
// 使用 dynamic_cast 将基类指针转换为 DBImpl | |||
auto* dbimpl = static_cast<leveldb::DBImpl*>(db); | |||
if (dbimpl == nullptr) { | |||
std::cerr << "Failed to cast to DBImpl\n"; | |||
delete db; | |||
return 1; | |||
} | |||
status = dbimpl->Put(leveldb::WriteOptions(), "key1", "val1"); | |||
if (status.ok()) { | |||
std::string val; | |||
status = dbimpl->Get(leveldb::ReadOptions(), "key1", &val); | |||
std::cout << "Find value of 'key1' From db: " << val << "\n"; | |||
} | |||
if (status.ok()) { | |||
std::string val; | |||
dbimpl->Delete(leveldb::WriteOptions(), "key1"); | |||
status = dbimpl->Get(leveldb::ReadOptions(), "key1", &val); | |||
// Not found. | |||
std::cout << status.ToString() << '\n'; | |||
} | |||
delete db; | |||
return 0; | |||
} |
@ -1,23 +0,0 @@ | |||
#include <cassert> | |||
#include <iostream> | |||
#include "leveldb/db.h" | |||
#include "gtest/gtest.h" | |||
TEST(Usage, InitDb) { | |||
leveldb::DB* db; | |||
leveldb::Options options; | |||
options.create_if_missing = true; | |||
leveldb::Status status = leveldb::DB::Open(options, "/tmp/test_db", &db); | |||
std::cout << "db started, status: " << status.ToString() << std::endl; | |||
assert(status.ok()); | |||
delete db; | |||
} | |||
int main(int argc, char** argv) { | |||
printf("Running main() from %s\n", __FILE__); | |||
::testing::InitGoogleTest(&argc, argv); | |||
return RUN_ALL_TESTS(); | |||
} |
@ -1,25 +0,0 @@ | |||
#include <cassert> | |||
#include <iostream> | |||
#include "leveldb/db.h" | |||
int main(){ | |||
leveldb::DB* db; | |||
leveldb::Options options; | |||
options.create_if_missing = true; | |||
leveldb::Status status = leveldb::DB::Open(options, "/tmp/testdb", &db); | |||
std::cout<< status.ToString() << '\n'; | |||
status = db -> Put(leveldb::WriteOptions(), "key1", "val1"); | |||
if(status.ok()) { | |||
std::string val; | |||
status = db -> Get(leveldb::ReadOptions(), "key1", &val); | |||
std::cout<< "Find value of \'key1\' From db:" << val << "\n"; | |||
} | |||
if(status.ok()){ | |||
std::string val; | |||
db -> Delete(leveldb::WriteOptions(), "key1"); | |||
status = db -> Get(leveldb::ReadOptions(), "key1", &val); | |||
//Not find. | |||
std::cout<< status.ToString() <<'\n'; | |||
} | |||
delete db; | |||
} |
@ -1,3 +0,0 @@ | |||
git add . | |||
git commit -m "$1" | |||
git push origin main |
@ -1,169 +1,188 @@ | |||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. | |||
// Use of this source code is governed by a BSD-style license that can be | |||
// found in the LICENSE file. See the AUTHORS file for names of contributors. | |||
#ifndef STORAGE_LEVELDB_INCLUDE_DB_H_ | |||
#define STORAGE_LEVELDB_INCLUDE_DB_H_ | |||
#include <cstdint> | |||
#include <cstdio> | |||
#include <vector> | |||
#include "leveldb/export.h" | |||
#include "leveldb/iterator.h" | |||
#include "leveldb/options.h" | |||
namespace leveldb { | |||
using Field = std::pair<std::string, std::string>; // field_name:field_value | |||
using FieldArray = std::vector<std::pair<std::string, std::string>>; | |||
// Update CMakeLists.txt if you change these | |||
static const int kMajorVersion = 1; | |||
static const int kMinorVersion = 23; | |||
struct Options; | |||
struct ReadOptions; | |||
struct WriteOptions; | |||
class WriteBatch; | |||
// Abstract handle to particular state of a DB. | |||
// A Snapshot is an immutable object and can therefore be safely | |||
// accessed from multiple threads without any external synchronization. | |||
class LEVELDB_EXPORT Snapshot { | |||
protected: | |||
virtual ~Snapshot(); | |||
}; | |||
// A range of keys | |||
struct LEVELDB_EXPORT Range { | |||
Range() = default; | |||
Range(const Slice& s, const Slice& l) : start(s), limit(l) {} | |||
Slice start; // Included in the range | |||
Slice limit; // Not included in the range | |||
}; | |||
// A DB is a persistent ordered map from keys to values. | |||
// A DB is safe for concurrent access from multiple threads without | |||
// any external synchronization. | |||
class LEVELDB_EXPORT DB { | |||
public: | |||
// Open the database with the specified "name". | |||
// Stores a pointer to a heap-allocated database in *dbptr and returns | |||
// OK on success. | |||
// Stores nullptr in *dbptr and returns a non-OK status on error. | |||
// Caller should delete *dbptr when it is no longer needed. | |||
static Status Open(const Options& options, const std::string& name, | |||
DB** dbptr); | |||
DB() = default; | |||
DB(const DB&) = delete; | |||
DB& operator=(const DB&) = delete; | |||
virtual ~DB(); | |||
// Set the database entry for "key" to "value". Returns OK on success, | |||
// and a non-OK status on error. | |||
// Note: consider setting options.sync = true. | |||
virtual Status Put(const WriteOptions& options, const Slice& key, | |||
const Slice& value) = 0; | |||
// Remove the database entry (if any) for "key". Returns OK on | |||
// success, and a non-OK status on error. It is not an error if "key" | |||
// did not exist in the database. | |||
// Note: consider setting options.sync = true. | |||
virtual Status Delete(const WriteOptions& options, const Slice& key) = 0; | |||
// Apply the specified updates to the database. | |||
// Returns OK on success, non-OK on failure. | |||
// Note: consider setting options.sync = true. | |||
virtual Status Write(const WriteOptions& options, WriteBatch* updates) = 0; | |||
// If the database contains an entry for "key" store the | |||
// corresponding value in *value and return OK. | |||
// | |||
// If there is no entry for "key" leave *value unchanged and return | |||
// a status for which Status::IsNotFound() returns true. | |||
// | |||
// May return some other Status on an error. | |||
virtual Status Get(const ReadOptions& options, const Slice& key, | |||
std::string* value) = 0; | |||
virtual std::vector<std::string> FindKeysByField(leveldb::DB* db, Field& field) =0; | |||
// Return a heap-allocated iterator over the contents of the database. | |||
// The result of NewIterator() is initially invalid (caller must | |||
// call one of the Seek methods on the iterator before using it). | |||
// | |||
// Caller should delete the iterator when it is no longer needed. | |||
// The returned iterator should be deleted before this db is deleted. | |||
virtual Iterator* NewIterator(const ReadOptions& options) = 0; | |||
// Return a handle to the current DB state. Iterators created with | |||
// this handle will all observe a stable snapshot of the current DB | |||
// state. The caller must call ReleaseSnapshot(result) when the | |||
// snapshot is no longer needed. | |||
virtual const Snapshot* GetSnapshot() = 0; | |||
// Release a previously acquired snapshot. The caller must not | |||
// use "snapshot" after this call. | |||
virtual void ReleaseSnapshot(const Snapshot* snapshot) = 0; | |||
// DB implementations can export properties about their state | |||
// via this method. If "property" is a valid property understood by this | |||
// DB implementation, fills "*value" with its current value and returns | |||
// true. Otherwise returns false. | |||
// | |||
// | |||
// Valid property names include: | |||
// | |||
// "leveldb.num-files-at-level<N>" - return the number of files at level <N>, | |||
// where <N> is an ASCII representation of a level number (e.g. "0"). | |||
// "leveldb.stats" - returns a multi-line string that describes statistics | |||
// about the internal operation of the DB. | |||
// "leveldb.sstables" - returns a multi-line string that describes all | |||
// of the sstables that make up the db contents. | |||
// "leveldb.approximate-memory-usage" - returns the approximate number of | |||
// bytes of memory in use by the DB. | |||
virtual bool GetProperty(const Slice& property, std::string* value) = 0; | |||
// For each i in [0,n-1], store in "sizes[i]", the approximate | |||
// file system space used by keys in "[range[i].start .. range[i].limit)". | |||
// | |||
// Note that the returned sizes measure file system space usage, so | |||
// if the user data compresses by a factor of ten, the returned | |||
// sizes will be one-tenth the size of the corresponding user data size. | |||
// | |||
// The results may not include the sizes of recently written data. | |||
virtual void GetApproximateSizes(const Range* range, int n, | |||
uint64_t* sizes) = 0; | |||
// Compact the underlying storage for the key range [*begin,*end]. | |||
// In particular, deleted and overwritten versions are discarded, | |||
// and the data is rearranged to reduce the cost of operations | |||
// needed to access the data. This operation should typically only | |||
// be invoked by users who understand the underlying implementation. | |||
// | |||
// begin==nullptr is treated as a key before all keys in the database. | |||
// end==nullptr is treated as a key after all keys in the database. | |||
// Therefore the following call will compact the entire database: | |||
// db->CompactRange(nullptr, nullptr); | |||
virtual void CompactRange(const Slice* begin, const Slice* end) = 0; | |||
}; | |||
// Destroy the contents of the specified database. | |||
// Be very careful using this method. | |||
// | |||
// Note: For backwards compatibility, if DestroyDB is unable to list the | |||
// database files, Status::OK() will still be returned masking this failure. | |||
LEVELDB_EXPORT Status DestroyDB(const std::string& name, | |||
const Options& options); | |||
// If a DB cannot be opened, you may attempt to call this method to | |||
// resurrect as much of the contents of the database as possible. | |||
// Some data may be lost, so be careful when calling this function | |||
// on a database that contains important information. | |||
LEVELDB_EXPORT Status RepairDB(const std::string& dbname, | |||
const Options& options, DB* db_); | |||
} // namespace leveldb | |||
#endif // STORAGE_LEVELDB_INCLUDE_DB_H_ | |||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. | |||
// Use of this source code is governed by a BSD-style license that can be | |||
// found in the LICENSE file. See the AUTHORS file for names of contributors. | |||
#ifndef STORAGE_LEVELDB_INCLUDE_DB_H_ | |||
#define STORAGE_LEVELDB_INCLUDE_DB_H_ | |||
#include <cstdint> | |||
#include <cstdio> | |||
#include "leveldb/export.h" | |||
#include "leveldb/iterator.h" | |||
#include "leveldb/options.h" | |||
#include "util/coding.h" | |||
#include <vector> | |||
namespace leveldb { | |||
// 用于表示一个包含多个字段的值 | |||
using Field = std::pair<std::string, std::string>; // field_name:field_value | |||
using FieldArray = std::vector<std::pair<std::string, std::string>>; | |||
// Update CMakeLists.txt if you change these | |||
static const int kMajorVersion = 1; | |||
static const int kMinorVersion = 23; | |||
struct Options; | |||
struct ReadOptions; | |||
struct WriteOptions; | |||
class WriteBatch; | |||
// Abstract handle to particular state of a DB. | |||
// A Snapshot is an immutable object and can therefore be safely | |||
// accessed from multiple threads without any external synchronization. | |||
class LEVELDB_EXPORT Snapshot { | |||
protected: | |||
virtual ~Snapshot(); | |||
}; | |||
// A range of keys | |||
struct LEVELDB_EXPORT Range { | |||
Range() = default; | |||
Range(const Slice& s, const Slice& l) : start(s), limit(l) {} | |||
Slice start; // Included in the range | |||
Slice limit; // Not included in the range | |||
}; | |||
// A DB is a persistent ordered map from keys to values. | |||
// A DB is safe for concurrent access from multiple threads without | |||
// any external synchronization. | |||
class LEVELDB_EXPORT DB { | |||
private: | |||
const static int blob_num = 0; | |||
public: | |||
static int NewBlobNum(); | |||
// Open the database with the specified "name". | |||
// Stores a pointer to a heap-allocated database in *dbptr and returns | |||
// OK on success. | |||
// Stores nullptr in *dbptr and returns a non-OK status on error. | |||
// Caller should delete *dbptr when it is no longer needed. | |||
static Status Open(const Options& options, const std::string& name, | |||
DB** dbptr); | |||
DB() = default; | |||
DB(const DB&) = delete; | |||
DB& operator=(const DB&) = delete; | |||
virtual ~DB(); | |||
// Set the database entry for "key" to "value". Returns OK on success, | |||
// and a non-OK status on error. | |||
// Note: consider setting options.sync = true. | |||
virtual Status Put(const WriteOptions& options, const Slice& key, | |||
const Slice& value) = 0; | |||
// Remove the database entry (if any) for "key". Returns OK on | |||
// success, and a non-OK status on error. It is not an error if "key" | |||
// did not exist in the database. | |||
// Note: consider setting options.sync = true. | |||
virtual Status Delete(const WriteOptions& options, const Slice& key) = 0; | |||
// Apply the specified updates to the database. | |||
// Returns OK on success, non-OK on failure. | |||
// Note: consider setting options.sync = true. | |||
virtual Status Write(const WriteOptions& options, WriteBatch* updates) = 0; | |||
// If the database contains an entry for "key" store the | |||
// corresponding value in *value and return OK. | |||
// | |||
// If there is no entry for "key" leave *value unchanged and return | |||
// a status for which Status::IsNotFound() returns true. | |||
// | |||
// May return some other Status on an error. | |||
virtual Status Get(const ReadOptions& options, const Slice& key, | |||
std::string* value) = 0; | |||
// Return a heap-allocated iterator over the contents of the database. | |||
// The result of NewIterator() is initially invalid (caller must | |||
// call one of the Seek methods on the iterator before using it). | |||
// | |||
// Caller should delete the iterator when it is no longer needed. | |||
// The returned iterator should be deleted before this db is deleted. | |||
virtual Iterator* NewIterator(const ReadOptions& options) = 0; | |||
// Return a handle to the current DB state. Iterators created with | |||
// this handle will all observe a stable snapshot of the current DB | |||
// state. The caller must call ReleaseSnapshot(result) when the | |||
// snapshot is no longer needed. | |||
virtual const Snapshot* GetSnapshot() = 0; | |||
// Release a previously acquired snapshot. The caller must not | |||
// use "snapshot" after this call. | |||
virtual void ReleaseSnapshot(const Snapshot* snapshot) = 0; | |||
// DB implementations can export properties about their state | |||
// via this method. If "property" is a valid property understood by this | |||
// DB implementation, fills "*value" with its current value and returns | |||
// true. Otherwise returns false. | |||
// | |||
// | |||
// Valid property names include: | |||
// | |||
// "leveldb.num-files-at-level<N>" - return the number of files at level <N>, | |||
// where <N> is an ASCII representation of a level number (e.g. "0"). | |||
// "leveldb.stats" - returns a multi-line string that describes statistics | |||
// about the internal operation of the DB. | |||
// "leveldb.sstables" - returns a multi-line string that describes all | |||
// of the sstables that make up the db contents. | |||
// "leveldb.approximate-memory-usage" - returns the approximate number of | |||
// bytes of memory in use by the DB. | |||
virtual bool GetProperty(const Slice& property, std::string* value) = 0; | |||
// For each i in [0,n-1], store in "sizes[i]", the approximate | |||
// file system space used by keys in "[range[i].start .. range[i].limit)". | |||
// | |||
// Note that the returned sizes measure file system space usage, so | |||
// if the user data compresses by a factor of ten, the returned | |||
// sizes will be one-tenth the size of the corresponding user data size. | |||
// | |||
// The results may not include the sizes of recently written data. | |||
virtual void GetApproximateSizes(const Range* range, int n, | |||
uint64_t* sizes) = 0; | |||
// Compact the underlying storage for the key range [*begin,*end]. | |||
// In particular, deleted and overwritten versions are discarded, | |||
// and the data is rearranged to reduce the cost of operations | |||
// needed to access the data. This operation should typically only | |||
// be invoked by users who understand the underlying implementation. | |||
// | |||
// begin==nullptr is treated as a key before all keys in the database. | |||
// end==nullptr is treated as a key after all keys in the database. | |||
// Therefore the following call will compact the entire database: | |||
// db->CompactRange(nullptr, nullptr); | |||
virtual void CompactRange(const Slice* begin, const Slice* end) = 0; | |||
// ----------------------------For TTL----------------------------- | |||
// 为当前key设置ttl,过期后自动失效 | |||
virtual Status Put(const WriteOptions& options, const Slice& key, | |||
const Slice& value, uint64_t ttl) = 0; | |||
// 手动进行离线 gc 回收。朴 | |||
// virtual Status OutLineGarbageCollection() = 0; | |||
}; | |||
// Destroy the contents of the specified database. | |||
// Be very careful using this method. | |||
// | |||
// Note: For backwards compatibility, if DestroyDB is unable to list the | |||
// database files, Status::OK() will still be returned masking this failure. | |||
LEVELDB_EXPORT Status DestroyDB(const std::string& name, | |||
const Options& options); | |||
// If a DB cannot be opened, you may attempt to call this method to | |||
// resurrect as much of the contents of the database as possible. | |||
// Some data may be lost, so be careful when calling this function | |||
// on a database that contains important information. | |||
LEVELDB_EXPORT Status RepairDB(const std::string& dbname, | |||
const Options& options); | |||
} // namespace leveldb | |||
#endif // STORAGE_LEVELDB_INCLUDE_DB_H_ |
@ -1,93 +1,94 @@ | |||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. | |||
// Use of this source code is governed by a BSD-style license that can be | |||
// found in the LICENSE file. See the AUTHORS file for names of contributors. | |||
// | |||
// TableBuilder provides the interface used to build a Table | |||
// (an immutable and sorted map from keys to values). | |||
// | |||
// Multiple threads can invoke const methods on a TableBuilder without | |||
// external synchronization, but if any of the threads may call a | |||
// non-const method, all threads accessing the same TableBuilder must use | |||
// external synchronization. | |||
#ifndef STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_ | |||
#define STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_ | |||
#include <cstdint> | |||
#include "leveldb/export.h" | |||
#include "leveldb/options.h" | |||
#include "leveldb/status.h" | |||
namespace leveldb { | |||
class BlockBuilder; | |||
class BlockHandle; | |||
class WritableFile; | |||
class LEVELDB_EXPORT TableBuilder { | |||
public: | |||
// Create a builder that will store the contents of the table it is | |||
// building in *file. Does not close the file. It is up to the | |||
// caller to close the file after calling Finish(). | |||
TableBuilder(const Options& options, WritableFile* file); | |||
TableBuilder(const TableBuilder&) = delete; | |||
TableBuilder& operator=(const TableBuilder&) = delete; | |||
// REQUIRES: Either Finish() or Abandon() has been called. | |||
~TableBuilder(); | |||
// Change the options used by this builder. Note: only some of the | |||
// option fields can be changed after construction. If a field is | |||
// not allowed to change dynamically and its value in the structure | |||
// passed to the constructor is different from its value in the | |||
// structure passed to this method, this method will return an error | |||
// without changing any fields. | |||
Status ChangeOptions(const Options& options); | |||
// Add key,value to the table being constructed. | |||
// REQUIRES: key is after any previously added key according to comparator. | |||
// REQUIRES: Finish(), Abandon() have not been called | |||
void Add(const Slice& key, const Slice& value); | |||
// Advanced operation: flush any buffered key/value pairs to file. | |||
// Can be used to ensure that two adjacent entries never live in | |||
// the same data block. Most clients should not need to use this method. | |||
// REQUIRES: Finish(), Abandon() have not been called | |||
void Flush(); | |||
// Return non-ok iff some error has been detected. | |||
Status status() const; | |||
// Finish building the table. Stops using the file passed to the | |||
// constructor after this function returns. | |||
// REQUIRES: Finish(), Abandon() have not been called | |||
Status Finish(); | |||
// Indicate that the contents of this builder should be abandoned. Stops | |||
// using the file passed to the constructor after this function returns. | |||
// If the caller is not going to call Finish(), it must call Abandon() | |||
// before destroying this builder. | |||
// REQUIRES: Finish(), Abandon() have not been called | |||
void Abandon(); | |||
// Number of calls to Add() so far. | |||
uint64_t NumEntries() const; | |||
// Size of the file generated so far. If invoked after a successful | |||
// Finish() call, returns the size of the final generated file. | |||
uint64_t FileSize() const; | |||
private: | |||
bool ok() const { return status().ok(); } | |||
void WriteBlock(BlockBuilder* block, BlockHandle* handle); | |||
void WriteRawBlock(const Slice& data, CompressionType, BlockHandle* handle); | |||
struct Rep; | |||
Rep* rep_; | |||
}; | |||
} // namespace leveldb | |||
#endif // STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_ | |||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. | |||
// Use of this source code is governed by a BSD-style license that can be | |||
// found in the LICENSE file. See the AUTHORS file for names of contributors. | |||
// | |||
// TableBuilder provides the interface used to build a Table | |||
// (an immutable and sorted map from keys to values). | |||
// | |||
// Multiple threads can invoke const methods on a TableBuilder without | |||
// external synchronization, but if any of the threads may call a | |||
// non-const method, all threads accessing the same TableBuilder must use | |||
// external synchronization. | |||
#ifndef STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_ | |||
#define STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_ | |||
#include <cstdint> | |||
#include "leveldb/export.h" | |||
#include "leveldb/options.h" | |||
#include "leveldb/status.h" | |||
namespace leveldb { | |||
class BlockBuilder; | |||
class BlockHandle; | |||
class WritableFile; | |||
class LEVELDB_EXPORT TableBuilder { | |||
public: | |||
// Create a builder that will store the contents of the table it is | |||
// building in *file. Does not close the file. It is up to the | |||
// caller to close the file after calling Finish(). | |||
TableBuilder(const Options& options, WritableFile* file); | |||
TableBuilder(const TableBuilder&) = delete; | |||
TableBuilder& operator=(const TableBuilder&) = delete; | |||
std::string EncodeBlobValue(uint64_t offset, int bfid); //朴 | |||
// REQUIRES: Either Finish() or Abandon() has been called. | |||
~TableBuilder(); | |||
// Change the options used by this builder. Note: only some of the | |||
// option fields can be changed after construction. If a field is | |||
// not allowed to change dynamically and its value in the structure | |||
// passed to the constructor is different from its value in the | |||
// structure passed to this method, this method will return an error | |||
// without changing any fields. | |||
Status ChangeOptions(const Options& options); | |||
// Add key,value to the table being constructed. | |||
// REQUIRES: key is after any previously added key according to comparator. | |||
// REQUIRES: Finish(), Abandon() have not been called | |||
void Add(const Slice& key, const Slice& value); | |||
// Advanced operation: flush any buffered key/value pairs to file. | |||
// Can be used to ensure that two adjacent entries never live in | |||
// the same data block. Most clients should not need to use this method. | |||
// REQUIRES: Finish(), Abandon() have not been called | |||
void Flush(); | |||
// Return non-ok iff some error has been detected. | |||
Status status() const; | |||
// Finish building the table. Stops using the file passed to the | |||
// constructor after this function returns. | |||
// REQUIRES: Finish(), Abandon() have not been called | |||
Status Finish(); | |||
// Indicate that the contents of this builder should be abandoned. Stops | |||
// using the file passed to the constructor after this function returns. | |||
// If the caller is not going to call Finish(), it must call Abandon() | |||
// before destroying this builder. | |||
// REQUIRES: Finish(), Abandon() have not been called | |||
void Abandon(); | |||
// Number of calls to Add() so far. | |||
uint64_t NumEntries() const; | |||
// Size of the file generated so far. If invoked after a successful | |||
// Finish() call, returns the size of the final generated file. | |||
uint64_t FileSize() const; | |||
private: | |||
bool ok() const { return status().ok(); } | |||
void WriteBlock(BlockBuilder* block, BlockHandle* handle); | |||
void WriteRawBlock(const Slice& data, CompressionType, BlockHandle* handle); | |||
struct Rep; | |||
Rep* rep_; | |||
}; | |||
} // namespace leveldb | |||
#endif // STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_ |
@ -1,83 +1,86 @@ | |||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. | |||
// Use of this source code is governed by a BSD-style license that can be | |||
// found in the LICENSE file. See the AUTHORS file for names of contributors. | |||
// | |||
// WriteBatch holds a collection of updates to apply atomically to a DB. | |||
// | |||
// The updates are applied in the order in which they are added | |||
// to the WriteBatch. For example, the value of "key" will be "v3" | |||
// after the following batch is written: | |||
// | |||
// batch.Put("key", "v1"); | |||
// batch.Delete("key"); | |||
// batch.Put("key", "v2"); | |||
// batch.Put("key", "v3"); | |||
// | |||
// Multiple threads can invoke const methods on a WriteBatch without | |||
// external synchronization, but if any of the threads may call a | |||
// non-const method, all threads accessing the same WriteBatch must use | |||
// external synchronization. | |||
#ifndef STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_ | |||
#define STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_ | |||
#include <string> | |||
#include "leveldb/export.h" | |||
#include "leveldb/status.h" | |||
namespace leveldb { | |||
class Slice; | |||
class LEVELDB_EXPORT WriteBatch { | |||
public: | |||
class LEVELDB_EXPORT Handler { | |||
public: | |||
virtual ~Handler(); | |||
virtual void Put(const Slice& key, const Slice& value) = 0; | |||
virtual void Delete(const Slice& key) = 0; | |||
}; | |||
WriteBatch(); | |||
// Intentionally copyable. | |||
WriteBatch(const WriteBatch&) = default; | |||
WriteBatch& operator=(const WriteBatch&) = default; | |||
~WriteBatch(); | |||
// Store the mapping "key->value" in the database. | |||
void Put(const Slice& key, const Slice& value); | |||
// If the database contains a mapping for "key", erase it. Else do nothing. | |||
void Delete(const Slice& key); | |||
// Clear all updates buffered in this batch. | |||
void Clear(); | |||
// The size of the database changes caused by this batch. | |||
// | |||
// This number is tied to implementation details, and may change across | |||
// releases. It is intended for LevelDB usage metrics. | |||
size_t ApproximateSize() const; | |||
// Copies the operations in "source" to this batch. | |||
// | |||
// This runs in O(source size) time. However, the constant factor is better | |||
// than calling Iterate() over the source batch with a Handler that replicates | |||
// the operations into this batch. | |||
void Append(const WriteBatch& source); | |||
// Support for iterating over the contents of a batch. | |||
Status Iterate(Handler* handler) const; | |||
private: | |||
friend class WriteBatchInternal; | |||
std::string rep_; // See comment in write_batch.cc for the format of rep_ | |||
}; | |||
} // namespace leveldb | |||
#endif // STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_ | |||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. | |||
// Use of this source code is governed by a BSD-style license that can be | |||
// found in the LICENSE file. See the AUTHORS file for names of contributors. | |||
// | |||
// WriteBatch holds a collection of updates to apply atomically to a DB. | |||
// | |||
// The updates are applied in the order in which they are added | |||
// to the WriteBatch. For example, the value of "key" will be "v3" | |||
// after the following batch is written: | |||
// | |||
// batch.Put("key", "v1"); | |||
// batch.Delete("key"); | |||
// batch.Put("key", "v2"); | |||
// batch.Put("key", "v3"); | |||
// | |||
// Multiple threads can invoke const methods on a WriteBatch without | |||
// external synchronization, but if any of the threads may call a | |||
// non-const method, all threads accessing the same WriteBatch must use | |||
// external synchronization. | |||
#ifndef STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_ | |||
#define STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_ | |||
#include <string> | |||
#include "leveldb/export.h" | |||
#include "leveldb/status.h" | |||
#include <cstdint> | |||
namespace leveldb { | |||
class Slice; | |||
class LEVELDB_EXPORT WriteBatch { | |||
public: | |||
class LEVELDB_EXPORT Handler { | |||
public: | |||
virtual ~Handler(); | |||
virtual void Put(const Slice& key, const Slice& value) = 0; | |||
virtual void Delete(const Slice& key) = 0; | |||
}; | |||
WriteBatch(); | |||
// Intentionally copyable. | |||
WriteBatch(const WriteBatch&) = default; | |||
WriteBatch& operator=(const WriteBatch&) = default; | |||
~WriteBatch(); | |||
// Store the mapping "key->value" in the database. | |||
void Put(const Slice& key, const Slice& value); | |||
void Put(const Slice& key, const Slice& value, uint64_t ttl); //心 | |||
// If the database contains a mapping for "key", erase it. Else do nothing. | |||
void Delete(const Slice& key); | |||
// Clear all updates buffered in this batch. | |||
void Clear(); | |||
// The size of the database changes caused by this batch. | |||
// | |||
// This number is tied to implementation details, and may change across | |||
// releases. It is intended for LevelDB usage metrics. | |||
size_t ApproximateSize() const; | |||
// Copies the operations in "source" to this batch. | |||
// | |||
// This runs in O(source size) time. However, the constant factor is better | |||
// than calling Iterate() over the source batch with a Handler that replicates | |||
// the operations into this batch. | |||
void Append(const WriteBatch& source); | |||
// Support for iterating over the contents of a batch. | |||
Status Iterate(Handler* handler) const; | |||
private: | |||
friend class WriteBatchInternal; | |||
std::string rep_; // See comment in write_batch.cc for the format of rep_ | |||
}; | |||
} // namespace leveldb | |||
#endif // STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_ |
@ -0,0 +1,63 @@ | |||
#include "table/blob_file.h" | |||
#include "util/coding.h" | |||
#include "util/crc32c.h" | |||
#include <cassert> | |||
namespace leveldb { | |||
namespace blob { | |||
BlobFile::BlobFile(WritableFile* dest) : dest_(dest), head_(0) {} | |||
BlobFile::BlobFile(WritableFile* dest, uint64_t dest_length) | |||
: dest_(dest), head_(dest_length) {} | |||
BlobFile::~BlobFile() = default; | |||
Status BlobFile::AddRecord(const Slice& key, const Slice& value, uint64_t& offset) { | |||
// 动态写入记录,返回写入的偏移量 | |||
return EmitDynamicRecord(key, value, offset); | |||
} | |||
Status BlobFile::EmitDynamicRecord(const Slice& key, const Slice& value, uint64_t& offset) { | |||
// 记录头部,包括 key 和 value 的长度 | |||
char header[8]; // 4 字节 key 长度 + 4 字节 value 长度 | |||
uint32_t key_size = static_cast<uint32_t>(key.size()); | |||
uint32_t value_size = static_cast<uint32_t>(value.size()); | |||
// 编码 key 和 value 长度 | |||
EncodeFixed32(header, key_size); | |||
EncodeFixed32(header + 4, value_size); | |||
// 写入头部 | |||
Status s = dest_->Append(Slice(header, sizeof(header))); | |||
if (!s.ok()) { | |||
return s; | |||
} | |||
// 写入 key 和 value 数据 | |||
s = dest_->Append(key); | |||
if (!s.ok()) { | |||
return s; | |||
} | |||
s = dest_->Append(value); | |||
if (!s.ok()) { | |||
return s; | |||
} | |||
// 刷新文件到磁盘 | |||
s = dest_->Flush(); | |||
if (!s.ok()) { | |||
return s; | |||
} | |||
// 更新偏移量 | |||
offset = head_; | |||
head_ += sizeof(header) + key_size + value_size; | |||
return Status::OK(); | |||
} | |||
} // namespace blob | |||
} // namespace leveldb |
@ -0,0 +1,34 @@ | |||
#ifndef LEVELDB_BLOB_FILE_H_ | |||
#define LEVELDB_BLOB_FILE_H_ | |||
#include <string> | |||
#include "leveldb/status.h" | |||
#include "leveldb/slice.h" | |||
#include "leveldb/env.h" | |||
namespace leveldb { | |||
namespace blob { | |||
class BlobFile { | |||
public: | |||
explicit BlobFile(WritableFile* dest); | |||
BlobFile(WritableFile* dest, uint64_t dest_length); | |||
~BlobFile(); | |||
// 添加一条记录,记录写入的偏移量 | |||
Status AddRecord(const Slice& key, const Slice& value, uint64_t& offset); | |||
private: | |||
WritableFile* dest_; // 用于写入数据的目标文件 | |||
uint64_t head_; // 当前写入位置的偏移量 | |||
uint64_t bfid_; // 用于标识 BlobFile 的唯一 ID | |||
// uint64_t head_; // 当前写入文件的偏移量 | |||
Status EmitDynamicRecord(const Slice& key, const Slice& value, uint64_t& offset); | |||
}; | |||
} // namespace blob | |||
} // namespace leveldb | |||
#endif // LEVELDB_BLOB_FILE_H_ |
@ -1,280 +1,345 @@ | |||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. | |||
// Use of this source code is governed by a BSD-style license that can be | |||
// found in the LICENSE file. See the AUTHORS file for names of contributors. | |||
#include "leveldb/table_builder.h" | |||
#include <cassert> | |||
#include "leveldb/comparator.h" | |||
#include "leveldb/env.h" | |||
#include "leveldb/filter_policy.h" | |||
#include "leveldb/options.h" | |||
#include "table/block_builder.h" | |||
#include "table/filter_block.h" | |||
#include "table/format.h" | |||
#include "util/coding.h" | |||
#include "util/crc32c.h" | |||
namespace leveldb { | |||
struct TableBuilder::Rep { | |||
Rep(const Options& opt, WritableFile* f) | |||
: options(opt), | |||
index_block_options(opt), | |||
file(f), | |||
offset(0), | |||
data_block(&options), | |||
index_block(&index_block_options), | |||
num_entries(0), | |||
closed(false), | |||
filter_block(opt.filter_policy == nullptr | |||
? nullptr | |||
: new FilterBlockBuilder(opt.filter_policy)), | |||
pending_index_entry(false) { | |||
index_block_options.block_restart_interval = 1; | |||
} | |||
Options options; | |||
Options index_block_options; | |||
WritableFile* file; | |||
uint64_t offset; | |||
Status status; | |||
BlockBuilder data_block; | |||
BlockBuilder index_block; | |||
std::string last_key; | |||
int64_t num_entries; | |||
bool closed; // Either Finish() or Abandon() has been called. | |||
FilterBlockBuilder* filter_block; | |||
// We do not emit the index entry for a block until we have seen the | |||
// first key for the next data block. This allows us to use shorter | |||
// keys in the index block. For example, consider a block boundary | |||
// between the keys "the quick brown fox" and "the who". We can use | |||
// "the r" as the key for the index block entry since it is >= all | |||
// entries in the first block and < all entries in subsequent | |||
// blocks. | |||
// | |||
// Invariant: r->pending_index_entry is true only if data_block is empty. | |||
bool pending_index_entry; | |||
BlockHandle pending_handle; // Handle to add to index block | |||
std::string compressed_output; | |||
}; | |||
TableBuilder::TableBuilder(const Options& options, WritableFile* file) | |||
: rep_(new Rep(options, file)) { | |||
if (rep_->filter_block != nullptr) { | |||
rep_->filter_block->StartBlock(0); | |||
} | |||
} | |||
TableBuilder::~TableBuilder() { | |||
assert(rep_->closed); // Catch errors where caller forgot to call Finish() | |||
delete rep_->filter_block; | |||
delete rep_; | |||
} | |||
Status TableBuilder::ChangeOptions(const Options& options) { | |||
// Note: if more fields are added to Options, update | |||
// this function to catch changes that should not be allowed to | |||
// change in the middle of building a Table. | |||
if (options.comparator != rep_->options.comparator) { | |||
return Status::InvalidArgument("changing comparator while building table"); | |||
} | |||
// Note that any live BlockBuilders point to rep_->options and therefore | |||
// will automatically pick up the updated options. | |||
rep_->options = options; | |||
rep_->index_block_options = options; | |||
rep_->index_block_options.block_restart_interval = 1; | |||
return Status::OK(); | |||
} | |||
void TableBuilder::Add(const Slice& key, const Slice& value) { | |||
Rep* r = rep_; | |||
assert(!r->closed); | |||
if (!ok()) return; | |||
if (r->num_entries > 0) { | |||
assert(r->options.comparator->Compare(key, Slice(r->last_key)) > 0); | |||
} | |||
if (r->pending_index_entry) { | |||
assert(r->data_block.empty()); | |||
r->options.comparator->FindShortestSeparator(&r->last_key, key); | |||
std::string handle_encoding; | |||
r->pending_handle.EncodeTo(&handle_encoding); | |||
r->index_block.Add(r->last_key, Slice(handle_encoding)); | |||
r->pending_index_entry = false; | |||
} | |||
if (r->filter_block != nullptr) { | |||
r->filter_block->AddKey(key); | |||
} | |||
r->last_key.assign(key.data(), key.size()); | |||
r->num_entries++; | |||
r->data_block.Add(key, value); | |||
const size_t estimated_block_size = r->data_block.CurrentSizeEstimate(); | |||
if (estimated_block_size >= r->options.block_size) { | |||
Flush(); | |||
} | |||
} | |||
void TableBuilder::Flush() { | |||
Rep* r = rep_; | |||
assert(!r->closed); | |||
if (!ok()) return; | |||
if (r->data_block.empty()) return; | |||
assert(!r->pending_index_entry); | |||
WriteBlock(&r->data_block, &r->pending_handle); | |||
if (ok()) { | |||
r->pending_index_entry = true; | |||
r->status = r->file->Flush(); | |||
} | |||
if (r->filter_block != nullptr) { | |||
r->filter_block->StartBlock(r->offset); | |||
} | |||
} | |||
void TableBuilder::WriteBlock(BlockBuilder* block, BlockHandle* handle) { | |||
// File format contains a sequence of blocks where each block has: | |||
// block_data: uint8[n] | |||
// type: uint8 | |||
// crc: uint32 | |||
assert(ok()); | |||
Rep* r = rep_; | |||
Slice raw = block->Finish(); | |||
Slice block_contents; | |||
CompressionType type = r->options.compression; | |||
// TODO(postrelease): Support more compression options: zlib? | |||
switch (type) { | |||
case kNoCompression: | |||
block_contents = raw; | |||
break; | |||
case kSnappyCompression: { | |||
std::string* compressed = &r->compressed_output; | |||
if (port::Snappy_Compress(raw.data(), raw.size(), compressed) && | |||
compressed->size() < raw.size() - (raw.size() / 8u)) { | |||
block_contents = *compressed; | |||
} else { | |||
// Snappy not supported, or compressed less than 12.5%, so just | |||
// store uncompressed form | |||
block_contents = raw; | |||
type = kNoCompression; | |||
} | |||
break; | |||
} | |||
case kZstdCompression: { | |||
std::string* compressed = &r->compressed_output; | |||
if (port::Zstd_Compress(r->options.zstd_compression_level, raw.data(), | |||
raw.size(), compressed) && | |||
compressed->size() < raw.size() - (raw.size() / 8u)) { | |||
block_contents = *compressed; | |||
} else { | |||
// Zstd not supported, or compressed less than 12.5%, so just | |||
// store uncompressed form | |||
block_contents = raw; | |||
type = kNoCompression; | |||
} | |||
break; | |||
} | |||
} | |||
WriteRawBlock(block_contents, type, handle); | |||
r->compressed_output.clear(); | |||
block->Reset(); | |||
} | |||
void TableBuilder::WriteRawBlock(const Slice& block_contents, | |||
CompressionType type, BlockHandle* handle) { | |||
Rep* r = rep_; | |||
handle->set_offset(r->offset); | |||
handle->set_size(block_contents.size()); | |||
r->status = r->file->Append(block_contents); | |||
if (r->status.ok()) { | |||
char trailer[kBlockTrailerSize]; | |||
trailer[0] = type; | |||
uint32_t crc = crc32c::Value(block_contents.data(), block_contents.size()); | |||
crc = crc32c::Extend(crc, trailer, 1); // Extend crc to cover block type | |||
EncodeFixed32(trailer + 1, crc32c::Mask(crc)); | |||
r->status = r->file->Append(Slice(trailer, kBlockTrailerSize)); | |||
if (r->status.ok()) { | |||
r->offset += block_contents.size() + kBlockTrailerSize; | |||
} | |||
} | |||
} | |||
Status TableBuilder::status() const { return rep_->status; } | |||
Status TableBuilder::Finish() { | |||
Rep* r = rep_; | |||
Flush(); | |||
assert(!r->closed); | |||
r->closed = true; | |||
BlockHandle filter_block_handle, metaindex_block_handle, index_block_handle; | |||
// Write filter block | |||
if (ok() && r->filter_block != nullptr) { | |||
WriteRawBlock(r->filter_block->Finish(), kNoCompression, | |||
&filter_block_handle); | |||
} | |||
// Write metaindex block | |||
if (ok()) { | |||
BlockBuilder meta_index_block(&r->options); | |||
if (r->filter_block != nullptr) { | |||
// Add mapping from "filter.Name" to location of filter data | |||
std::string key = "filter."; | |||
key.append(r->options.filter_policy->Name()); | |||
std::string handle_encoding; | |||
filter_block_handle.EncodeTo(&handle_encoding); | |||
meta_index_block.Add(key, handle_encoding); | |||
} | |||
// TODO(postrelease): Add stats and other meta blocks | |||
WriteBlock(&meta_index_block, &metaindex_block_handle); | |||
} | |||
// Write index block | |||
if (ok()) { | |||
if (r->pending_index_entry) { | |||
r->options.comparator->FindShortSuccessor(&r->last_key); | |||
std::string handle_encoding; | |||
r->pending_handle.EncodeTo(&handle_encoding); | |||
r->index_block.Add(r->last_key, Slice(handle_encoding)); | |||
r->pending_index_entry = false; | |||
} | |||
WriteBlock(&r->index_block, &index_block_handle); | |||
} | |||
// Write footer | |||
if (ok()) { | |||
Footer footer; | |||
footer.set_metaindex_handle(metaindex_block_handle); | |||
footer.set_index_handle(index_block_handle); | |||
std::string footer_encoding; | |||
footer.EncodeTo(&footer_encoding); | |||
r->status = r->file->Append(footer_encoding); | |||
if (r->status.ok()) { | |||
r->offset += footer_encoding.size(); | |||
} | |||
} | |||
return r->status; | |||
} | |||
void TableBuilder::Abandon() { | |||
Rep* r = rep_; | |||
assert(!r->closed); | |||
r->closed = true; | |||
} | |||
uint64_t TableBuilder::NumEntries() const { return rep_->num_entries; } | |||
uint64_t TableBuilder::FileSize() const { return rep_->offset; } | |||
} // namespace leveldb | |||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. | |||
// Use of this source code is governed by a BSD-style license that can be | |||
// found in the LICENSE file. See the AUTHORS file for names of contributors. | |||
#include "leveldb/table_builder.h" | |||
#include <cassert> | |||
#include "leveldb/comparator.h" | |||
#include "leveldb/env.h" | |||
#include "leveldb/filter_policy.h" | |||
#include "leveldb/options.h" | |||
#include "table/block_builder.h" | |||
#include "table/filter_block.h" | |||
#include "table/format.h" | |||
#include "util/coding.h" | |||
#include "util/crc32c.h" | |||
#include "db/db_impl.h" //朴 | |||
#include "table/blob_file.h" //朴 | |||
#include "table/block.h" //朴 | |||
const size_t min_blob_size = 1024; // 设定值大小阈值为 1KB,朴 | |||
namespace leveldb { | |||
struct TableBuilder::Rep { | |||
Rep(const Options& opt, WritableFile* f) | |||
: options(opt), | |||
index_block_options(opt), | |||
file(f), | |||
offset(0), | |||
data_block(&options), | |||
index_block(&index_block_options), | |||
num_entries(0), | |||
closed(false), | |||
filter_block(opt.filter_policy == nullptr | |||
? nullptr | |||
: new FilterBlockBuilder(opt.filter_policy)), | |||
pending_index_entry(false) { | |||
index_block_options.block_restart_interval = 1; | |||
} | |||
Options options; | |||
Options index_block_options; | |||
WritableFile* file; | |||
uint64_t offset; | |||
Status status; | |||
BlockBuilder data_block; | |||
BlockBuilder index_block; | |||
std::string last_key; | |||
int64_t num_entries; | |||
bool closed; // Either Finish() or Abandon() has been called. | |||
FilterBlockBuilder* filter_block; | |||
// We do not emit the index entry for a block until we have seen the | |||
// first key for the next data block. This allows us to use shorter | |||
// keys in the index block. For example, consider a block boundary | |||
// between the keys "the quick brown fox" and "the who". We can use | |||
// "the r" as the key for the index block entry since it is >= all | |||
// entries in the first block and < all entries in subsequent | |||
// blocks. | |||
// | |||
// Invariant: r->pending_index_entry is true only if data_block is empty. | |||
bool pending_index_entry; | |||
BlockHandle pending_handle; // Handle to add to index block | |||
std::string compressed_output; | |||
}; | |||
TableBuilder::TableBuilder(const Options& options, WritableFile* file) | |||
: rep_(new Rep(options, file)) { | |||
if (rep_->filter_block != nullptr) { | |||
rep_->filter_block->StartBlock(0); | |||
} | |||
} | |||
TableBuilder::~TableBuilder() { | |||
assert(rep_->closed); // Catch errors where caller forgot to call Finish() | |||
delete rep_->filter_block; | |||
delete rep_; | |||
} | |||
Status TableBuilder::ChangeOptions(const Options& options) { | |||
// Note: if more fields are added to Options, update | |||
// this function to catch changes that should not be allowed to | |||
// change in the middle of building a Table. | |||
if (options.comparator != rep_->options.comparator) { | |||
return Status::InvalidArgument("changing comparator while building table"); | |||
} | |||
// Note that any live BlockBuilders point to rep_->options and therefore | |||
// will automatically pick up the updated options. | |||
rep_->options = options; | |||
rep_->index_block_options = options; | |||
rep_->index_block_options.block_restart_interval = 1; | |||
return Status::OK(); | |||
} | |||
void TableBuilder::Add(const Slice& key, const Slice& value) { | |||
Rep* r = rep_; | |||
assert(!r->closed); | |||
if (!ok()) return; | |||
if (r->num_entries > 0) { | |||
assert(r->options.comparator->Compare(key, Slice(r->last_key)) > 0); | |||
} | |||
if (r->pending_index_entry) { | |||
assert(r->data_block.empty()); | |||
r->options.comparator->FindShortestSeparator(&r->last_key, key); | |||
std::string handle_encoding; | |||
r->pending_handle.EncodeTo(&handle_encoding); | |||
r->index_block.Add(r->last_key, Slice(handle_encoding)); | |||
r->pending_index_entry = false; | |||
} | |||
if (r->filter_block != nullptr) { | |||
r->filter_block->AddKey(key); | |||
} | |||
r->last_key.assign(key.data(), key.size()); | |||
r->num_entries++; | |||
r->data_block.Add(key, value); | |||
const size_t estimated_block_size = r->data_block.CurrentSizeEstimate(); | |||
if (estimated_block_size >= r->options.block_size) { | |||
Flush(); | |||
} | |||
} | |||
void TableBuilder::Flush() { | |||
Rep* r = rep_; | |||
assert(!r->closed); | |||
if (!ok()) return; | |||
if (r->data_block.empty()) return; //朴,正常判断 | |||
assert(!r->pending_index_entry); | |||
if (DBImpl::key_value_separated_) { | |||
// 这里获取数据块内容并初始化 Block 对象,朴 | |||
Slice block_content = r->data_block.Finish(); | |||
BlockContents contents; | |||
contents.data = block_content; | |||
contents.heap_allocated = false; | |||
contents.cachable = false; | |||
Rep* new_rep = new Rep(r->options, r->file); // 创建一个新的 Rep 实例 | |||
new_rep->offset = r->offset; // 新的 offset 初始化为当前的 offset | |||
new_rep->num_entries = r->num_entries; | |||
// 初始化 Block | |||
Block data_block(contents); | |||
leveldb::WritableFile* dest = nullptr; | |||
leveldb::blob::BlobFile blobfile(dest); // 可以动态生成文件名以防止重复 // 初始化 BlobFile 对象,朴 | |||
leveldb::WritableFile* file; | |||
int bfid = DBImpl::NewBlobNum(); // 生成唯一的 blobfile id | |||
std::unique_ptr<Iterator> iter(data_block.NewIterator(Options().comparator)); | |||
// 遍历数据块中的键值对 | |||
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { | |||
const Slice& key = iter->key(); | |||
const Slice& value = iter->value(); | |||
// 检查值是否大于阈值 | |||
if (value.size() > min_blob_size) { | |||
// 将值存储到 blobfile 中 | |||
uint64_t offset; // 局部变量存储偏移量 | |||
Status status = blobfile.AddRecord(key, value, offset); | |||
if (!status.ok()) { | |||
r->status = status; | |||
} | |||
// 这里修改 value,存储 Blob 的 offset 和 bfid | |||
std::string new_value = EncodeBlobValue(offset, bfid); | |||
new_rep->data_block.Add(key, Slice(new_value)); | |||
} | |||
else{ | |||
// 不需要 Blob 存储,直接处理普通值 | |||
new_rep->data_block.Add(key, value); | |||
} | |||
} | |||
} | |||
WriteBlock(&r->data_block, &r->pending_handle); //将数据块写入文件,并获取数据块的句柄。 | |||
if (ok()) { | |||
r->pending_index_entry = true; | |||
r->status = r->file->Flush(); //刷新 | |||
} | |||
if (r->filter_block != nullptr) { | |||
r->filter_block->StartBlock(r->offset); | |||
} | |||
} | |||
std::string TableBuilder::EncodeBlobValue(uint64_t offset, int bfid) { | |||
// 自定义方法:编码新的 Blob 值 | |||
std::string result; | |||
// 为 result 分配空间 | |||
result.resize(8 + 4); // 64位 (8字节) + 32位 (4字节) | |||
// 将 offset 和 bfid 编码成一个新的值 | |||
std::string result; | |||
EncodeFixed64(&result[0], offset); // 编码 offset | |||
EncodeFixed32(&result[8], bfid); // 编码 bfid | |||
return result; | |||
} | |||
void TableBuilder::WriteBlock(BlockBuilder* block, BlockHandle* handle) { | |||
// File format contains a sequence of blocks where each block has: | |||
// block_data: uint8[n] | |||
// type: uint8 | |||
// crc: uint32 | |||
assert(ok()); | |||
Rep* r = rep_; | |||
Slice raw = block->Finish(); | |||
Slice block_contents; | |||
CompressionType type = r->options.compression; | |||
// TODO(postrelease): Support more compression options: zlib? | |||
switch (type) { | |||
case kNoCompression: | |||
block_contents = raw; | |||
break; | |||
case kSnappyCompression: { | |||
std::string* compressed = &r->compressed_output; | |||
if (port::Snappy_Compress(raw.data(), raw.size(), compressed) && | |||
compressed->size() < raw.size() - (raw.size() / 8u)) { | |||
block_contents = *compressed; | |||
} else { | |||
// Snappy not supported, or compressed less than 12.5%, so just | |||
// store uncompressed form | |||
block_contents = raw; | |||
type = kNoCompression; | |||
} | |||
break; | |||
} | |||
case kZstdCompression: { | |||
std::string* compressed = &r->compressed_output; | |||
if (port::Zstd_Compress(r->options.zstd_compression_level, raw.data(), | |||
raw.size(), compressed) && | |||
compressed->size() < raw.size() - (raw.size() / 8u)) { | |||
block_contents = *compressed; | |||
} else { | |||
// Zstd not supported, or compressed less than 12.5%, so just | |||
// store uncompressed form | |||
block_contents = raw; | |||
type = kNoCompression; | |||
} | |||
break; | |||
} | |||
} | |||
WriteRawBlock(block_contents, type, handle); | |||
r->compressed_output.clear(); | |||
block->Reset(); | |||
} | |||
void TableBuilder::WriteRawBlock(const Slice& block_contents, | |||
CompressionType type, BlockHandle* handle) { | |||
Rep* r = rep_; | |||
handle->set_offset(r->offset); | |||
handle->set_size(block_contents.size()); | |||
r->status = r->file->Append(block_contents); | |||
if (r->status.ok()) { | |||
char trailer[kBlockTrailerSize]; | |||
trailer[0] = type; | |||
uint32_t crc = crc32c::Value(block_contents.data(), block_contents.size()); | |||
crc = crc32c::Extend(crc, trailer, 1); // Extend crc to cover block type | |||
EncodeFixed32(trailer + 1, crc32c::Mask(crc)); | |||
r->status = r->file->Append(Slice(trailer, kBlockTrailerSize)); | |||
if (r->status.ok()) { | |||
r->offset += block_contents.size() + kBlockTrailerSize; | |||
} | |||
} | |||
} | |||
Status TableBuilder::status() const { return rep_->status; } | |||
Status TableBuilder::Finish() { | |||
Rep* r = rep_; | |||
Flush(); | |||
assert(!r->closed); | |||
r->closed = true; | |||
BlockHandle filter_block_handle, metaindex_block_handle, index_block_handle; | |||
// Write filter block | |||
if (ok() && r->filter_block != nullptr) { | |||
WriteRawBlock(r->filter_block->Finish(), kNoCompression, | |||
&filter_block_handle); | |||
} | |||
// Write metaindex block | |||
if (ok()) { | |||
BlockBuilder meta_index_block(&r->options); | |||
if (r->filter_block != nullptr) { | |||
// Add mapping from "filter.Name" to location of filter data | |||
std::string key = "filter."; | |||
key.append(r->options.filter_policy->Name()); | |||
std::string handle_encoding; | |||
filter_block_handle.EncodeTo(&handle_encoding); | |||
meta_index_block.Add(key, handle_encoding); | |||
} | |||
// TODO(postrelease): Add stats and other meta blocks | |||
WriteBlock(&meta_index_block, &metaindex_block_handle); | |||
} | |||
// Write index block | |||
if (ok()) { | |||
if (r->pending_index_entry) { | |||
r->options.comparator->FindShortSuccessor(&r->last_key); | |||
std::string handle_encoding; | |||
r->pending_handle.EncodeTo(&handle_encoding); | |||
r->index_block.Add(r->last_key, Slice(handle_encoding)); | |||
r->pending_index_entry = false; | |||
} | |||
WriteBlock(&r->index_block, &index_block_handle); | |||
} | |||
// Write footer | |||
if (ok()) { | |||
Footer footer; | |||
footer.set_metaindex_handle(metaindex_block_handle); | |||
footer.set_index_handle(index_block_handle); | |||
std::string footer_encoding; | |||
footer.EncodeTo(&footer_encoding); | |||
r->status = r->file->Append(footer_encoding); | |||
if (r->status.ok()) { | |||
r->offset += footer_encoding.size(); | |||
} | |||
} | |||
return r->status; | |||
} | |||
void TableBuilder::Abandon() { | |||
Rep* r = rep_; | |||
assert(!r->closed); | |||
r->closed = true; | |||
} | |||
uint64_t TableBuilder::NumEntries() const { return rep_->num_entries; } | |||
uint64_t TableBuilder::FileSize() const { return rep_->offset; } | |||
} // namespace leveldb |
@ -0,0 +1,74 @@ | |||
#include "leveldb/db.h" | |||
#include "leveldb/filter_policy.h" | |||
#include <iostream> | |||
#include <cstdlib> | |||
#include <ctime> | |||
using namespace leveldb; | |||
constexpr int value_size = 2048; | |||
constexpr int data_size = 256 << 20; | |||
// 3. 数据管理(Manifest/创建/恢复数据库) | |||
Status OpenDB(std::string dbName, DB **db) { | |||
Options options; | |||
options.create_if_missing = true; | |||
options.filter_policy = NewBloomFilterPolicy(10); | |||
return DB::Open(options, dbName, db); | |||
} | |||
// 1. 存储(数据结构与写入) | |||
// 4. 数据合并(Compaction) | |||
void InsertData(DB *db) { | |||
WriteOptions writeOptions; | |||
int key_num = data_size / value_size; | |||
srand(static_cast<unsigned int>(time(0))); | |||
for (int i = 0; i < key_num; i++) { | |||
int key_ = rand() % key_num+1; | |||
std::string key = std::to_string(key_); | |||
std::string value(value_size, 'a'); | |||
db->Put(writeOptions, key, value); | |||
} | |||
} | |||
// 2. 数据访问(如何读数据) | |||
void GetData(DB *db, int size = (1 << 30)) { | |||
ReadOptions readOptions; | |||
int key_num = data_size / value_size; | |||
// 点查 | |||
srand(static_cast<unsigned int>(time(0))); | |||
for (int i = 0; i < 100; i++) { | |||
int key_ = rand() % key_num+1; | |||
std::string key = std::to_string(key_); | |||
std::string value; | |||
db->Get(readOptions, key, &value); | |||
} | |||
// 范围查询 | |||
Iterator *iter = db->NewIterator(readOptions); | |||
iter->SeekToFirst(); | |||
while (iter->Valid()) { | |||
iter->Next(); | |||
} | |||
delete iter; | |||
} | |||
int main() { | |||
DB *db; | |||
if(OpenDB("testdb", &db).ok()) { | |||
InsertData(db); | |||
delete db; | |||
} | |||
if(OpenDB("testdb", &db).ok()) { | |||
GetData(db); | |||
delete db; | |||
} | |||
return 0; | |||
} | |||
@ -0,0 +1,119 @@ | |||
#include "gtest/gtest.h" | |||
#include "leveldb/env.h" | |||
#include "leveldb/db.h" | |||
#include "table/blob_file.h" // 假设 BlobFile 的头文件 | |||
using namespace leveldb; | |||
constexpr int value_size = 2048; // 单个值的大小 | |||
constexpr int data_size = 128 << 20; // 总数据大小 | |||
constexpr int min_blob_size = 1024; // KV 分离的阈值 | |||
Status OpenDB(std::string dbName, DB** db) { | |||
Options options; | |||
options.create_if_missing = true; | |||
options.key_value_separated = true; // 启用 KV 分离 | |||
return DB::Open(options, dbName, db); | |||
} | |||
// 插入数据,模拟 KV 分离 | |||
void InsertData(DB* db) { | |||
WriteOptions writeOptions; | |||
int key_num = data_size / value_size; | |||
srand(static_cast<unsigned int>(time(0))); | |||
for (int i = 0; i < key_num; i++) { | |||
int key_ = rand() % key_num + 1; | |||
std::string key = std::to_string(key_); | |||
std::string value(value_size, 'a'); // 大 value | |||
db->Put(writeOptions, key, value); // 使用标准 Put 接口插入 | |||
} | |||
} | |||
// 检查数据是否被正确存入 BlobFile | |||
void VerifyBlobFile(const std::string& blob_file_path, int expected_entries) { | |||
BlobFile blobfile(blob_file_path, BlobFile::kReadMode); | |||
Status status = blobfile.Open(); | |||
ASSERT_TRUE(status.ok()); | |||
int entry_count = 0; | |||
BlobFile::Iterator it = blobfile.NewIterator(); | |||
for (it.SeekToFirst(); it.Valid(); it.Next()) { | |||
++entry_count; | |||
const Slice& key = it.key(); | |||
const Slice& value = it.value(); | |||
ASSERT_GT(value.size(), min_blob_size); // 确认 value 大于阈值 | |||
} | |||
ASSERT_EQ(entry_count, expected_entries); // 确认条目数是否正确 | |||
blobfile.Close(); | |||
} | |||
// KV 分离读写测试 | |||
TEST(TestKVSeparation, WriteAndRead) { | |||
DB* db; | |||
if (OpenDB("testdb", &db).ok() == false) { | |||
std::cerr << "open db failed" << std::endl; | |||
abort(); | |||
} | |||
// 插入数据 | |||
InsertData(db); | |||
// 验证 BlobFile 内容 | |||
VerifyBlobFile("blob_data", data_size / value_size); | |||
// 随机点查数据 | |||
ReadOptions readOptions; | |||
srand(static_cast<unsigned int>(time(0))); | |||
int key_num = data_size / value_size; | |||
for (int i = 0; i < 100; i++) { | |||
int key_ = rand() % key_num + 1; | |||
std::string key = std::to_string(key_); | |||
std::string value; | |||
Status status = db->Get(readOptions, key, &value); | |||
ASSERT_TRUE(status.ok()); // 验证是否成功读取 | |||
if (value.size() > min_blob_size) { | |||
ASSERT_TRUE(value == std::string(value_size, 'a')); // 验证大 value 的内容 | |||
} | |||
} | |||
delete db; | |||
} | |||
// KV 分离压缩测试 | |||
TEST(TestKVSeparation, Compaction) { | |||
DB* db; | |||
if (OpenDB("testdb", &db).ok() == false) { | |||
std::cerr << "open db failed" << std::endl; | |||
abort(); | |||
} | |||
// 插入数据 | |||
InsertData(db); | |||
leveldb::Range ranges[1]; | |||
ranges[0] = leveldb::Range("-", "A"); | |||
uint64_t sizes[1]; | |||
db->GetApproximateSizes(ranges, 1, sizes); | |||
ASSERT_GT(sizes[0], 0); | |||
// 执行压缩 | |||
db->CompactRange(nullptr, nullptr); | |||
// 验证压缩后主数据区的大小 | |||
ranges[0] = leveldb::Range("-", "A"); | |||
db->GetApproximateSizes(ranges, 1, sizes); | |||
ASSERT_EQ(sizes[0], 0); | |||
// 验证 BlobFile 内容仍然有效 | |||
VerifyBlobFile("blob_data", data_size / value_size); | |||
delete db; | |||
} | |||
int main(int argc, char** argv) { | |||
testing::InitGoogleTest(&argc, argv); | |||
return RUN_ALL_TESTS(); | |||
} |
@ -0,0 +1,114 @@ | |||
#include "gtest/gtest.h" | |||
#include "leveldb/env.h" | |||
#include "leveldb/db.h" | |||
using namespace leveldb; | |||
constexpr int value_size = 2048; | |||
constexpr int data_size = 128 << 20; | |||
Status OpenDB(std::string dbName, DB **db) { | |||
Options options; | |||
options.create_if_missing = true; | |||
return DB::Open(options, dbName, db); | |||
} | |||
void InsertData(DB *db, uint64_t ttl/* second */) { | |||
WriteOptions writeOptions; | |||
int key_num = data_size / value_size; | |||
srand(static_cast<unsigned int>(time(0))); | |||
for (int i = 0; i < key_num; i++) { | |||
int key_ = rand() % key_num+1; | |||
std::string key = std::to_string(key_); | |||
std::string value(value_size, 'a'); | |||
db->Put(writeOptions, key, value, ttl); | |||
} | |||
} | |||
void GetData(DB *db, int size = (1 << 30)) { | |||
ReadOptions readOptions; | |||
int key_num = data_size / value_size; | |||
// 点查 | |||
srand(static_cast<unsigned int>(time(0))); | |||
for (int i = 0; i < 100; i++) { | |||
int key_ = rand() % key_num+1; | |||
std::string key = std::to_string(key_); | |||
std::string value; | |||
db->Get(readOptions, key, &value); | |||
} | |||
} | |||
TEST(TestTTL, ReadTTL) { | |||
DB *db; | |||
if(OpenDB("testdb", &db).ok() == false) { | |||
std::cerr << "open db failed" << std::endl; | |||
abort(); | |||
} | |||
uint64_t ttl = 20; | |||
InsertData(db, ttl); | |||
ReadOptions readOptions; | |||
Status status; | |||
int key_num = data_size / value_size; | |||
srand(static_cast<unsigned int>(time(0))); | |||
for (int i = 0; i < 100; i++) { | |||
int key_ = rand() % key_num+1; | |||
std::string key = std::to_string(key_); | |||
std::string value; | |||
status = db->Get(readOptions, key, &value); | |||
ASSERT_TRUE(status.ok()); | |||
} | |||
Env::Default()->SleepForMicroseconds(ttl * 1000000); | |||
for (int i = 0; i < 100; i++) { | |||
int key_ = rand() % key_num+1; | |||
std::string key = std::to_string(key_); | |||
std::string value; | |||
status = db->Get(readOptions, key, &value); | |||
ASSERT_FALSE(status.ok()); // 经过超长时间之后所有的键值对应该都过期了,心 | |||
} | |||
} | |||
TEST(TestTTL, CompactionTTL) { | |||
DB *db; | |||
if(OpenDB("testdb", &db).ok() == false) { | |||
std::cerr << "open db failed" << std::endl; | |||
abort(); | |||
} | |||
uint64_t ttl = 20; | |||
InsertData(db, ttl); | |||
leveldb::Range ranges[1]; | |||
ranges[0] = leveldb::Range("-", "A"); | |||
uint64_t sizes[1]; | |||
db->GetApproximateSizes(ranges, 1, sizes); | |||
ASSERT_GT(sizes[0], 0); | |||
Env::Default()->SleepForMicroseconds(ttl * 1000000); | |||
db->CompactRange(nullptr, nullptr); | |||
// leveldb::Range ranges[1]; // 这里为什么要重复定义?心 | |||
ranges[0] = leveldb::Range("-", "A"); | |||
// uint64_t sizes[1]; // 心 | |||
db->GetApproximateSizes(ranges, 1, sizes); | |||
ASSERT_EQ(sizes[0], 0); | |||
} | |||
int main(int argc, char** argv) { | |||
// All tests currently run with the same read-only file limits. | |||
testing::InitGoogleTest(&argc, argv); | |||
return RUN_ALL_TESTS(); | |||
} |
@ -1,5 +0,0 @@ | |||
--- | |||
Language: Cpp | |||
BasedOnStyle: Google | |||
PointerAlignment: Left | |||
... |
@ -1,7 +0,0 @@ | |||
--- | |||
Checks: 'clang-analyzer-*,readability-redundant-*,performance-*' | |||
WarningsAsErrors: 'clang-analyzer-*,readability-redundant-*,performance-*' | |||
HeaderFilterRegex: '.*' | |||
AnalyzeTemporaryDtors: false | |||
FormatStyle: none | |||
User: user |
@ -1,32 +0,0 @@ | |||
--- | |||
name: Bug report | |||
about: Create a report to help us improve | |||
title: "[BUG]" | |||
labels: '' | |||
assignees: '' | |||
--- | |||
**Describe the bug** | |||
A clear and concise description of what the bug is. | |||
**System** | |||
Which OS, compiler, and compiler version are you using: | |||
- OS: | |||
- Compiler and version: | |||
**To reproduce** | |||
Steps to reproduce the behavior: | |||
1. sync to commit ... | |||
2. cmake/bazel... | |||
3. make ... | |||
4. See error | |||
**Expected behavior** | |||
A clear and concise description of what you expected to happen. | |||
**Screenshots** | |||
If applicable, add screenshots to help explain your problem. | |||
**Additional context** | |||
Add any other context about the problem here. |
@ -1,20 +0,0 @@ | |||
--- | |||
name: Feature request | |||
about: Suggest an idea for this project | |||
title: "[FR]" | |||
labels: '' | |||
assignees: '' | |||
--- | |||
**Is your feature request related to a problem? Please describe.** | |||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] | |||
**Describe the solution you'd like** | |||
A clear and concise description of what you want to happen. | |||
**Describe alternatives you've considered** | |||
A clear and concise description of any alternative solutions or features you've considered. | |||
**Additional context** | |||
Add any other context or screenshots about the feature request here. |
@ -1,13 +0,0 @@ | |||
if ! bazel version; then | |||
arch=$(uname -m) | |||
if [ "$arch" == "aarch64" ]; then | |||
arch="arm64" | |||
fi | |||
echo "Installing wget and downloading $arch Bazel binary from GitHub releases." | |||
yum install -y wget | |||
wget "https://github.com/bazelbuild/bazel/releases/download/6.0.0/bazel-6.0.0-linux-$arch" -O /usr/local/bin/bazel | |||
chmod +x /usr/local/bin/bazel | |||
else | |||
# bazel is installed for the correct architecture | |||
exit 0 | |||
fi |
@ -1,27 +0,0 @@ | |||
#!/usr/bin/env bash | |||
# Checkout LLVM sources | |||
#git clone --depth=1 https://github.com/llvm/llvm-project.git llvm-project | |||
# | |||
## Setup libc++ options | |||
#if [ -z "$BUILD_32_BITS" ]; then | |||
# export BUILD_32_BITS=OFF && echo disabling 32 bit build | |||
#fi | |||
# | |||
## Build and install libc++ (Use unstable ABI for better sanitizer coverage) | |||
#cd ./llvm-project | |||
#cmake -DCMAKE_C_COMPILER=${CC} \ | |||
# -DCMAKE_CXX_COMPILER=${CXX} \ | |||
# -DCMAKE_BUILD_TYPE=RelWithDebInfo \ | |||
# -DCMAKE_INSTALL_PREFIX=/usr \ | |||
# -DLIBCXX_ABI_UNSTABLE=OFF \ | |||
# -DLLVM_USE_SANITIZER=${LIBCXX_SANITIZER} \ | |||
# -DLLVM_BUILD_32_BITS=${BUILD_32_BITS} \ | |||
# -DLLVM_ENABLE_RUNTIMES='libcxx;libcxxabi' \ | |||
# -S llvm -B llvm-build -G "Unix Makefiles" | |||
#make -C llvm-build -j3 cxx cxxabi | |||
#sudo make -C llvm-build install-cxx install-cxxabi | |||
#cd .. | |||
sudo apt update | |||
sudo apt -y install libc++-dev libc++abi-dev |
@ -1,35 +0,0 @@ | |||
name: bazel | |||
on: | |||
push: {} | |||
pull_request: {} | |||
jobs: | |||
job: | |||
name: bazel.${{ matrix.os }} | |||
runs-on: ${{ matrix.os }} | |||
strategy: | |||
fail-fast: false | |||
matrix: | |||
os: [ubuntu-latest, macos-latest, windows-2022] | |||
steps: | |||
- uses: actions/checkout@v3 | |||
- name: mount bazel cache | |||
uses: actions/cache@v3 | |||
env: | |||
cache-name: bazel-cache | |||
with: | |||
path: "~/.cache/bazel" | |||
key: ${{ env.cache-name }}-${{ matrix.os }}-${{ github.ref }} | |||
restore-keys: | | |||
${{ env.cache-name }}-${{ matrix.os }}-main | |||
- name: build | |||
run: | | |||
bazel build //:benchmark //:benchmark_main //test/... | |||
- name: test | |||
run: | | |||
bazel test --test_output=all //test/... |
@ -1,46 +0,0 @@ | |||
name: build-and-test-min-cmake | |||
on: | |||
push: | |||
branches: [ main ] | |||
pull_request: | |||
branches: [ main ] | |||
jobs: | |||
job: | |||
name: ${{ matrix.os }}.min-cmake | |||
runs-on: ${{ matrix.os }} | |||
strategy: | |||
fail-fast: false | |||
matrix: | |||
os: [ubuntu-latest, macos-latest] | |||
steps: | |||
- uses: actions/checkout@v3 | |||
- uses: lukka/get-cmake@latest | |||
with: | |||
cmakeVersion: 3.10.0 | |||
- name: create build environment | |||
run: cmake -E make_directory ${{ runner.workspace }}/_build | |||
- name: setup cmake initial cache | |||
run: touch compiler-cache.cmake | |||
- name: configure cmake | |||
env: | |||
CXX: ${{ matrix.compiler }} | |||
shell: bash | |||
working-directory: ${{ runner.workspace }}/_build | |||
run: > | |||
cmake -C ${{ github.workspace }}/compiler-cache.cmake | |||
$GITHUB_WORKSPACE | |||
-DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON | |||
-DCMAKE_CXX_VISIBILITY_PRESET=hidden | |||
-DCMAKE_VISIBILITY_INLINES_HIDDEN=ON | |||
- name: build | |||
shell: bash | |||
working-directory: ${{ runner.workspace }}/_build | |||
run: cmake --build . |