| @ -0,0 +1,35 @@ | |||
| # Build matrix / environment variables are explained on: | |||
| # https://www.appveyor.com/docs/appveyor-yml/ | |||
| # This file can be validated on: https://ci.appveyor.com/tools/validate-yaml | |||
| version: "{build}" | |||
| environment: | |||
| matrix: | |||
| # AppVeyor currently has no custom job name feature. | |||
| # http://help.appveyor.com/discussions/questions/1623-can-i-provide-a-friendly-name-for-jobs | |||
| - JOB: Visual Studio 2017 | |||
| APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 | |||
| CMAKE_GENERATOR: Visual Studio 15 2017 | |||
| platform: | |||
| - x86 | |||
| - x64 | |||
| configuration: | |||
| - RelWithDebInfo | |||
| - Debug | |||
| build_script: | |||
| - git submodule update --init --recursive | |||
| - mkdir build | |||
| - cd build | |||
| - if "%platform%"=="x64" set CMAKE_GENERATOR=%CMAKE_GENERATOR% Win64 | |||
| - cmake --version | |||
| - cmake .. -G "%CMAKE_GENERATOR%" | |||
| -DCMAKE_CONFIGURATION_TYPES="%CONFIGURATION%" | |||
| - cmake --build . --config "%CONFIGURATION%" | |||
| - cd .. | |||
| test_script: | |||
| - cd build && ctest --verbose --build-config "%CONFIGURATION%" && cd .. | |||
| @ -0,0 +1,18 @@ | |||
| # Run manually to reformat a file: | |||
| # clang-format -i --style=file <file> | |||
| # find . -iname '*.cc' -o -iname '*.h' -o -iname '*.h.in' | xargs clang-format -i --style=file | |||
| BasedOnStyle: Google | |||
| DerivePointerAlignment: false | |||
| # Public headers are in a different location in the internal Google repository. | |||
| # Order them so that when imported to the authoritative repository they will be | |||
| # in correct alphabetical order. | |||
| IncludeCategories: | |||
| - Regex: '^(<|"(benchmarks|db|helpers)/)' | |||
| Priority: 1 | |||
| - Regex: '^"(leveldb)/' | |||
| Priority: 2 | |||
| - Regex: '^(<|"(issues|port|table|third_party|util)/)' | |||
| Priority: 3 | |||
| - Regex: '.*' | |||
| Priority: 4 | |||
| @ -1,9 +1,8 @@ | |||
| build_config.mk | |||
| *.a | |||
| *.o | |||
| *.dylib* | |||
| *.so | |||
| *.so.* | |||
| *_test | |||
| db_bench | |||
| leveldbutil | |||
| # Editors. | |||
| *.sw* | |||
| .vscode | |||
| .DS_Store | |||
| # Build directory. | |||
| build/ | |||
| out/ | |||
| @ -0,0 +1,79 @@ | |||
| # Build matrix / environment variables are explained on: | |||
| # http://about.travis-ci.org/docs/user/build-configuration/ | |||
| # This file can be validated on: http://lint.travis-ci.org/ | |||
| language: cpp | |||
| dist: xenial | |||
| osx_image: xcode10.2 | |||
| compiler: | |||
| - gcc | |||
| - clang | |||
| os: | |||
| - linux | |||
| - osx | |||
| env: | |||
| - BUILD_TYPE=Debug | |||
| - BUILD_TYPE=RelWithDebInfo | |||
| addons: | |||
| apt: | |||
| sources: | |||
| - llvm-toolchain-xenial-8 | |||
| - ubuntu-toolchain-r-test | |||
| packages: | |||
| - clang-8 | |||
| - cmake | |||
| - gcc-8 | |||
| - g++-8 | |||
| - libgoogle-perftools-dev | |||
| - libkyotocabinet-dev | |||
| - libsnappy-dev | |||
| - libsqlite3-dev | |||
| - ninja-build | |||
| homebrew: | |||
| packages: | |||
| - cmake | |||
| - crc32c | |||
| - gcc@8 | |||
| - gperftools | |||
| - kyotocabinet | |||
| - llvm@8 | |||
| - ninja | |||
| - snappy | |||
| - sqlite3 | |||
| update: true | |||
| install: | |||
| # The following Homebrew packages aren't linked by default, and need to be | |||
| # prepended to the path explicitly. | |||
| - if [ "$TRAVIS_OS_NAME" == "osx" ]; then | |||
| export PATH="$(brew --prefix llvm)/bin:$PATH"; | |||
| fi | |||
| # /usr/bin/gcc points to an older compiler on both Linux and macOS. | |||
| - if [ "$CXX" = "g++" ]; then export CXX="g++-8" CC="gcc-8"; fi | |||
| # /usr/bin/clang points to an older compiler on both Linux and macOS. | |||
| # | |||
| # Homebrew's llvm package doesn't ship a versioned clang++ binary, so the values | |||
| # below don't work on macOS. Fortunately, the path change above makes the | |||
| # default values (clang and clang++) resolve to the correct compiler on macOS. | |||
| - if [ "$TRAVIS_OS_NAME" == "linux" ]; then | |||
| if [ "$CXX" = "clang++" ]; then export CXX="clang++-8" CC="clang-8"; fi; | |||
| fi | |||
| - echo ${CC} | |||
| - echo ${CXX} | |||
| - ${CXX} --version | |||
| - cmake --version | |||
| before_script: | |||
| - mkdir -p build && cd build | |||
| - cmake .. -G Ninja -DCMAKE_BUILD_TYPE=$BUILD_TYPE | |||
| - cmake --build . | |||
| - cd .. | |||
| script: | |||
| - cd build && ctest --verbose && cd .. | |||
| - "if [ -f build/db_bench ] ; then build/db_bench ; fi" | |||
| - "if [ -f build/db_bench_sqlite3 ] ; then build/db_bench_sqlite3 ; fi" | |||
| - "if [ -f build/db_bench_tree_db ] ; then build/db_bench_tree_db ; fi" | |||
| @ -0,0 +1,454 @@ | |||
| # Copyright 2017 The LevelDB Authors. All rights reserved. | |||
| # Use of this source code is governed by a BSD-style license that can be | |||
| # found in the LICENSE file. See the AUTHORS file for names of contributors. | |||
| cmake_minimum_required(VERSION 3.9) | |||
| # Keep the version below in sync with the one in db.h | |||
| project(leveldb VERSION 1.22.0 LANGUAGES C CXX) | |||
| # This project can use C11, but will gracefully decay down to C89. | |||
| set(CMAKE_C_STANDARD 11) | |||
| set(CMAKE_C_STANDARD_REQUIRED OFF) | |||
| set(CMAKE_C_EXTENSIONS OFF) | |||
| # This project requires C++11. | |||
| set(CMAKE_CXX_STANDARD 11) | |||
| set(CMAKE_CXX_STANDARD_REQUIRED ON) | |||
| set(CMAKE_CXX_EXTENSIONS OFF) | |||
| if (WIN32) | |||
| set(LEVELDB_PLATFORM_NAME LEVELDB_PLATFORM_WINDOWS) | |||
| # TODO(cmumford): Make UNICODE configurable for Windows. | |||
| add_definitions(-D_UNICODE -DUNICODE) | |||
| else (WIN32) | |||
| set(LEVELDB_PLATFORM_NAME LEVELDB_PLATFORM_POSIX) | |||
| endif (WIN32) | |||
| option(LEVELDB_BUILD_TESTS "Build LevelDB's unit tests" ON) | |||
| option(LEVELDB_BUILD_BENCHMARKS "Build LevelDB's benchmarks" ON) | |||
| option(LEVELDB_INSTALL "Install LevelDB's header and library" ON) | |||
| include(TestBigEndian) | |||
| test_big_endian(LEVELDB_IS_BIG_ENDIAN) | |||
| include(CheckIncludeFile) | |||
| check_include_file("unistd.h" HAVE_UNISTD_H) | |||
| include(CheckLibraryExists) | |||
| check_library_exists(crc32c crc32c_value "" HAVE_CRC32C) | |||
| check_library_exists(snappy snappy_compress "" HAVE_SNAPPY) | |||
| check_library_exists(tcmalloc malloc "" HAVE_TCMALLOC) | |||
| include(CheckCXXSymbolExists) | |||
| # Using check_cxx_symbol_exists() instead of check_c_symbol_exists() because | |||
| # we're including the header from C++, and feature detection should use the same | |||
| # compiler language that the project will use later. Principles aside, some | |||
| # versions of do not expose fdatasync() in <unistd.h> in standard C mode | |||
| # (-std=c11), but do expose the function in standard C++ mode (-std=c++11). | |||
| check_cxx_symbol_exists(fdatasync "unistd.h" HAVE_FDATASYNC) | |||
| check_cxx_symbol_exists(F_FULLFSYNC "fcntl.h" HAVE_FULLFSYNC) | |||
| check_cxx_symbol_exists(O_CLOEXEC "fcntl.h" HAVE_O_CLOEXEC) | |||
| include(CheckCXXSourceCompiles) | |||
| set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wstrict-prototypes") | |||
| # Test whether -Wthread-safety is available. See | |||
| # https://clang.llvm.org/docs/ThreadSafetyAnalysis.html | |||
| # -Werror is necessary because unknown attributes only generate warnings. | |||
| set(OLD_CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS}) | |||
| list(APPEND CMAKE_REQUIRED_FLAGS -Werror -Wthread-safety) | |||
| check_cxx_source_compiles(" | |||
| struct __attribute__((lockable)) Lock { | |||
| void Acquire() __attribute__((exclusive_lock_function())); | |||
| void Release() __attribute__((unlock_function())); | |||
| }; | |||
| struct ThreadSafeType { | |||
| Lock lock_; | |||
| int data_ __attribute__((guarded_by(lock_))); | |||
| }; | |||
| int main() { return 0; } | |||
| " HAVE_CLANG_THREAD_SAFETY) | |||
| set(CMAKE_REQUIRED_FLAGS ${OLD_CMAKE_REQUIRED_FLAGS}) | |||
| # Test whether C++17 __has_include is available. | |||
| check_cxx_source_compiles(" | |||
| #if defined(__has_include) && __has_include(<string>) | |||
| #include <string> | |||
| #endif | |||
| int main() { std::string str; return 0; } | |||
| " HAVE_CXX17_HAS_INCLUDE) | |||
| set(LEVELDB_PUBLIC_INCLUDE_DIR "include/leveldb") | |||
| set(LEVELDB_PORT_CONFIG_DIR "include/port") | |||
| configure_file( | |||
| "${PROJECT_SOURCE_DIR}/port/port_config.h.in" | |||
| "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h" | |||
| ) | |||
| include_directories( | |||
| "${PROJECT_BINARY_DIR}/include" | |||
| "${PROJECT_SOURCE_DIR}" | |||
| ) | |||
| if(BUILD_SHARED_LIBS) | |||
| # Only export LEVELDB_EXPORT symbols from the shared library. | |||
| add_compile_options(-fvisibility=hidden) | |||
| endif(BUILD_SHARED_LIBS) | |||
| add_library(leveldb "") | |||
| target_sources(leveldb | |||
| PRIVATE | |||
| "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h" | |||
| "${PROJECT_SOURCE_DIR}/db/builder.cc" | |||
| "${PROJECT_SOURCE_DIR}/db/builder.h" | |||
| "${PROJECT_SOURCE_DIR}/db/c.cc" | |||
| "${PROJECT_SOURCE_DIR}/db/db_impl.cc" | |||
| "${PROJECT_SOURCE_DIR}/db/db_impl.h" | |||
| "${PROJECT_SOURCE_DIR}/db/db_iter.cc" | |||
| "${PROJECT_SOURCE_DIR}/db/db_iter.h" | |||
| "${PROJECT_SOURCE_DIR}/db/dbformat.cc" | |||
| "${PROJECT_SOURCE_DIR}/db/dbformat.h" | |||
| "${PROJECT_SOURCE_DIR}/db/dumpfile.cc" | |||
| "${PROJECT_SOURCE_DIR}/db/filename.cc" | |||
| "${PROJECT_SOURCE_DIR}/db/filename.h" | |||
| "${PROJECT_SOURCE_DIR}/db/log_format.h" | |||
| "${PROJECT_SOURCE_DIR}/db/log_reader.cc" | |||
| "${PROJECT_SOURCE_DIR}/db/log_reader.h" | |||
| "${PROJECT_SOURCE_DIR}/db/log_writer.cc" | |||
| "${PROJECT_SOURCE_DIR}/db/log_writer.h" | |||
| "${PROJECT_SOURCE_DIR}/db/memtable.cc" | |||
| "${PROJECT_SOURCE_DIR}/db/memtable.h" | |||
| "${PROJECT_SOURCE_DIR}/db/repair.cc" | |||
| "${PROJECT_SOURCE_DIR}/db/skiplist.h" | |||
| "${PROJECT_SOURCE_DIR}/db/snapshot.h" | |||
| "${PROJECT_SOURCE_DIR}/db/table_cache.cc" | |||
| "${PROJECT_SOURCE_DIR}/db/table_cache.h" | |||
| "${PROJECT_SOURCE_DIR}/db/version_edit.cc" | |||
| "${PROJECT_SOURCE_DIR}/db/version_edit.h" | |||
| "${PROJECT_SOURCE_DIR}/db/version_set.cc" | |||
| "${PROJECT_SOURCE_DIR}/db/version_set.h" | |||
| "${PROJECT_SOURCE_DIR}/db/write_batch_internal.h" | |||
| "${PROJECT_SOURCE_DIR}/db/write_batch.cc" | |||
| "${PROJECT_SOURCE_DIR}/port/port_stdcxx.h" | |||
| "${PROJECT_SOURCE_DIR}/port/port.h" | |||
| "${PROJECT_SOURCE_DIR}/port/thread_annotations.h" | |||
| "${PROJECT_SOURCE_DIR}/table/block_builder.cc" | |||
| "${PROJECT_SOURCE_DIR}/table/block_builder.h" | |||
| "${PROJECT_SOURCE_DIR}/table/block.cc" | |||
| "${PROJECT_SOURCE_DIR}/table/block.h" | |||
| "${PROJECT_SOURCE_DIR}/table/filter_block.cc" | |||
| "${PROJECT_SOURCE_DIR}/table/filter_block.h" | |||
| "${PROJECT_SOURCE_DIR}/table/format.cc" | |||
| "${PROJECT_SOURCE_DIR}/table/format.h" | |||
| "${PROJECT_SOURCE_DIR}/table/iterator_wrapper.h" | |||
| "${PROJECT_SOURCE_DIR}/table/iterator.cc" | |||
| "${PROJECT_SOURCE_DIR}/table/merger.cc" | |||
| "${PROJECT_SOURCE_DIR}/table/merger.h" | |||
| "${PROJECT_SOURCE_DIR}/table/table_builder.cc" | |||
| "${PROJECT_SOURCE_DIR}/table/table.cc" | |||
| "${PROJECT_SOURCE_DIR}/table/two_level_iterator.cc" | |||
| "${PROJECT_SOURCE_DIR}/table/two_level_iterator.h" | |||
| "${PROJECT_SOURCE_DIR}/util/arena.cc" | |||
| "${PROJECT_SOURCE_DIR}/util/arena.h" | |||
| "${PROJECT_SOURCE_DIR}/util/bloom.cc" | |||
| "${PROJECT_SOURCE_DIR}/util/cache.cc" | |||
| "${PROJECT_SOURCE_DIR}/util/coding.cc" | |||
| "${PROJECT_SOURCE_DIR}/util/coding.h" | |||
| "${PROJECT_SOURCE_DIR}/util/comparator.cc" | |||
| "${PROJECT_SOURCE_DIR}/util/crc32c.cc" | |||
| "${PROJECT_SOURCE_DIR}/util/crc32c.h" | |||
| "${PROJECT_SOURCE_DIR}/util/env.cc" | |||
| "${PROJECT_SOURCE_DIR}/util/filter_policy.cc" | |||
| "${PROJECT_SOURCE_DIR}/util/hash.cc" | |||
| "${PROJECT_SOURCE_DIR}/util/hash.h" | |||
| "${PROJECT_SOURCE_DIR}/util/logging.cc" | |||
| "${PROJECT_SOURCE_DIR}/util/logging.h" | |||
| "${PROJECT_SOURCE_DIR}/util/mutexlock.h" | |||
| "${PROJECT_SOURCE_DIR}/util/no_destructor.h" | |||
| "${PROJECT_SOURCE_DIR}/util/options.cc" | |||
| "${PROJECT_SOURCE_DIR}/util/random.h" | |||
| "${PROJECT_SOURCE_DIR}/util/status.cc" | |||
| # Only CMake 3.3+ supports PUBLIC sources in targets exported by "install". | |||
| $<$<VERSION_GREATER:CMAKE_VERSION,3.2>:PUBLIC> | |||
| "${LEVELDB_PUBLIC_INCLUDE_DIR}/c.h" | |||
| "${LEVELDB_PUBLIC_INCLUDE_DIR}/cache.h" | |||
| "${LEVELDB_PUBLIC_INCLUDE_DIR}/comparator.h" | |||
| "${LEVELDB_PUBLIC_INCLUDE_DIR}/db.h" | |||
| "${LEVELDB_PUBLIC_INCLUDE_DIR}/dumpfile.h" | |||
| "${LEVELDB_PUBLIC_INCLUDE_DIR}/env.h" | |||
| "${LEVELDB_PUBLIC_INCLUDE_DIR}/export.h" | |||
| "${LEVELDB_PUBLIC_INCLUDE_DIR}/filter_policy.h" | |||
| "${LEVELDB_PUBLIC_INCLUDE_DIR}/iterator.h" | |||
| "${LEVELDB_PUBLIC_INCLUDE_DIR}/options.h" | |||
| "${LEVELDB_PUBLIC_INCLUDE_DIR}/slice.h" | |||
| "${LEVELDB_PUBLIC_INCLUDE_DIR}/status.h" | |||
| "${LEVELDB_PUBLIC_INCLUDE_DIR}/table_builder.h" | |||
| "${LEVELDB_PUBLIC_INCLUDE_DIR}/table.h" | |||
| "${LEVELDB_PUBLIC_INCLUDE_DIR}/write_batch.h" | |||
| ) | |||
| if (WIN32) | |||
| target_sources(leveldb | |||
| PRIVATE | |||
| "${PROJECT_SOURCE_DIR}/util/env_windows.cc" | |||
| "${PROJECT_SOURCE_DIR}/util/windows_logger.h" | |||
| ) | |||
| else (WIN32) | |||
| target_sources(leveldb | |||
| PRIVATE | |||
| "${PROJECT_SOURCE_DIR}/util/env_posix.cc" | |||
| "${PROJECT_SOURCE_DIR}/util/posix_logger.h" | |||
| ) | |||
| endif (WIN32) | |||
| # MemEnv is not part of the interface and could be pulled to a separate library. | |||
| target_sources(leveldb | |||
| PRIVATE | |||
| "${PROJECT_SOURCE_DIR}/helpers/memenv/memenv.cc" | |||
| "${PROJECT_SOURCE_DIR}/helpers/memenv/memenv.h" | |||
| ) | |||
| target_include_directories(leveldb | |||
| PUBLIC | |||
| $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include> | |||
| $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}> | |||
| ) | |||
| set_target_properties(leveldb | |||
| PROPERTIES VERSION ${PROJECT_VERSION} SOVERSION ${PROJECT_VERSION_MAJOR}) | |||
| target_compile_definitions(leveldb | |||
| PRIVATE | |||
| # Used by include/export.h when building shared libraries. | |||
| LEVELDB_COMPILE_LIBRARY | |||
| # Used by port/port.h. | |||
| ${LEVELDB_PLATFORM_NAME}=1 | |||
| ) | |||
| if (NOT HAVE_CXX17_HAS_INCLUDE) | |||
| target_compile_definitions(leveldb | |||
| PRIVATE | |||
| LEVELDB_HAS_PORT_CONFIG_H=1 | |||
| ) | |||
| endif(NOT HAVE_CXX17_HAS_INCLUDE) | |||
| if(BUILD_SHARED_LIBS) | |||
| target_compile_definitions(leveldb | |||
| PUBLIC | |||
| # Used by include/export.h. | |||
| LEVELDB_SHARED_LIBRARY | |||
| ) | |||
| endif(BUILD_SHARED_LIBS) | |||
| if(HAVE_CLANG_THREAD_SAFETY) | |||
| target_compile_options(leveldb | |||
| PUBLIC | |||
| -Werror -Wthread-safety) | |||
| endif(HAVE_CLANG_THREAD_SAFETY) | |||
| if(HAVE_CRC32C) | |||
| target_link_libraries(leveldb crc32c) | |||
| endif(HAVE_CRC32C) | |||
| if(HAVE_SNAPPY) | |||
| target_link_libraries(leveldb snappy) | |||
| endif(HAVE_SNAPPY) | |||
| if(HAVE_TCMALLOC) | |||
| target_link_libraries(leveldb tcmalloc) | |||
| endif(HAVE_TCMALLOC) | |||
| # Needed by port_stdcxx.h | |||
| find_package(Threads REQUIRED) | |||
| target_link_libraries(leveldb Threads::Threads) | |||
| add_executable(leveldbutil | |||
| "${PROJECT_SOURCE_DIR}/db/leveldbutil.cc" | |||
| ) | |||
| target_link_libraries(leveldbutil leveldb) | |||
| if(LEVELDB_BUILD_TESTS) | |||
| enable_testing() | |||
| function(leveldb_test test_file) | |||
| get_filename_component(test_target_name "${test_file}" NAME_WE) | |||
| add_executable("${test_target_name}" "") | |||
| target_sources("${test_target_name}" | |||
| PRIVATE | |||
| "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h" | |||
| "${PROJECT_SOURCE_DIR}/util/testharness.cc" | |||
| "${PROJECT_SOURCE_DIR}/util/testharness.h" | |||
| "${PROJECT_SOURCE_DIR}/util/testutil.cc" | |||
| "${PROJECT_SOURCE_DIR}/util/testutil.h" | |||
| "${test_file}" | |||
| ) | |||
| target_link_libraries("${test_target_name}" leveldb) | |||
| target_compile_definitions("${test_target_name}" | |||
| PRIVATE | |||
| ${LEVELDB_PLATFORM_NAME}=1 | |||
| ) | |||
| if (NOT HAVE_CXX17_HAS_INCLUDE) | |||
| target_compile_definitions("${test_target_name}" | |||
| PRIVATE | |||
| LEVELDB_HAS_PORT_CONFIG_H=1 | |||
| ) | |||
| endif(NOT HAVE_CXX17_HAS_INCLUDE) | |||
| add_test(NAME "${test_target_name}" COMMAND "${test_target_name}") | |||
| endfunction(leveldb_test) | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/db/c_test.c") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/db/fault_injection_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/issues/issue178_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/issues/issue200_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/issues/issue320_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/util/env_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/util/status_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/util/no_destructor_test.cc") | |||
| if(NOT BUILD_SHARED_LIBS) | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/db/autocompact_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/db/corruption_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/db/db_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/db/dbformat_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/db/filename_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/db/log_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/db/recovery_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/db/skiplist_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/db/version_edit_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/db/version_set_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/db/write_batch_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/helpers/memenv/memenv_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/table/filter_block_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/table/table_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/util/arena_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/util/bloom_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/util/cache_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/util/coding_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/util/crc32c_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/util/hash_test.cc") | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/util/logging_test.cc") | |||
| # TODO(costan): This test also uses | |||
| # "${PROJECT_SOURCE_DIR}/util/env_{posix|windows}_test_helper.h" | |||
| if (WIN32) | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/util/env_windows_test.cc") | |||
| else (WIN32) | |||
| leveldb_test("${PROJECT_SOURCE_DIR}/util/env_posix_test.cc") | |||
| endif (WIN32) | |||
| endif(NOT BUILD_SHARED_LIBS) | |||
| endif(LEVELDB_BUILD_TESTS) | |||
| if(LEVELDB_BUILD_BENCHMARKS) | |||
| function(leveldb_benchmark bench_file) | |||
| get_filename_component(bench_target_name "${bench_file}" NAME_WE) | |||
| add_executable("${bench_target_name}" "") | |||
| target_sources("${bench_target_name}" | |||
| PRIVATE | |||
| "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h" | |||
| "${PROJECT_SOURCE_DIR}/util/histogram.cc" | |||
| "${PROJECT_SOURCE_DIR}/util/histogram.h" | |||
| "${PROJECT_SOURCE_DIR}/util/testharness.cc" | |||
| "${PROJECT_SOURCE_DIR}/util/testharness.h" | |||
| "${PROJECT_SOURCE_DIR}/util/testutil.cc" | |||
| "${PROJECT_SOURCE_DIR}/util/testutil.h" | |||
| "${bench_file}" | |||
| ) | |||
| target_link_libraries("${bench_target_name}" leveldb) | |||
| target_compile_definitions("${bench_target_name}" | |||
| PRIVATE | |||
| ${LEVELDB_PLATFORM_NAME}=1 | |||
| ) | |||
| if (NOT HAVE_CXX17_HAS_INCLUDE) | |||
| target_compile_definitions("${bench_target_name}" | |||
| PRIVATE | |||
| LEVELDB_HAS_PORT_CONFIG_H=1 | |||
| ) | |||
| endif(NOT HAVE_CXX17_HAS_INCLUDE) | |||
| endfunction(leveldb_benchmark) | |||
| if(NOT BUILD_SHARED_LIBS) | |||
| leveldb_benchmark("${PROJECT_SOURCE_DIR}/benchmarks/db_bench.cc") | |||
| endif(NOT BUILD_SHARED_LIBS) | |||
| check_library_exists(sqlite3 sqlite3_open "" HAVE_SQLITE3) | |||
| if(HAVE_SQLITE3) | |||
| leveldb_benchmark("${PROJECT_SOURCE_DIR}/benchmarks/db_bench_sqlite3.cc") | |||
| target_link_libraries(db_bench_sqlite3 sqlite3) | |||
| endif(HAVE_SQLITE3) | |||
| # check_library_exists is insufficient here because the library names have | |||
| # different manglings when compiled with clang or gcc, at least when installed | |||
| # with Homebrew on Mac. | |||
| set(OLD_CMAKE_REQURED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES}) | |||
| list(APPEND CMAKE_REQUIRED_LIBRARIES kyotocabinet) | |||
| check_cxx_source_compiles(" | |||
| #include <kcpolydb.h> | |||
| int main() { | |||
| kyotocabinet::TreeDB* db = new kyotocabinet::TreeDB(); | |||
| delete db; | |||
| return 0; | |||
| } | |||
| " HAVE_KYOTOCABINET) | |||
| set(CMAKE_REQUIRED_LIBRARIES ${OLD_CMAKE_REQURED_LIBRARIES}) | |||
| if(HAVE_KYOTOCABINET) | |||
| leveldb_benchmark("${PROJECT_SOURCE_DIR}/benchmarks/db_bench_tree_db.cc") | |||
| target_link_libraries(db_bench_tree_db kyotocabinet) | |||
| endif(HAVE_KYOTOCABINET) | |||
| endif(LEVELDB_BUILD_BENCHMARKS) | |||
| if(LEVELDB_INSTALL) | |||
| include(GNUInstallDirs) | |||
| install(TARGETS leveldb | |||
| EXPORT leveldbTargets | |||
| RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} | |||
| LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} | |||
| ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} | |||
| ) | |||
| install( | |||
| FILES | |||
| "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/c.h" | |||
| "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/cache.h" | |||
| "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/comparator.h" | |||
| "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/db.h" | |||
| "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/dumpfile.h" | |||
| "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/env.h" | |||
| "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/export.h" | |||
| "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/filter_policy.h" | |||
| "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/iterator.h" | |||
| "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/options.h" | |||
| "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/slice.h" | |||
| "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/status.h" | |||
| "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/table_builder.h" | |||
| "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/table.h" | |||
| "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/write_batch.h" | |||
| DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/leveldb | |||
| ) | |||
| include(CMakePackageConfigHelpers) | |||
| write_basic_package_version_file( | |||
| "${PROJECT_BINARY_DIR}/leveldbConfigVersion.cmake" | |||
| COMPATIBILITY SameMajorVersion | |||
| ) | |||
| install( | |||
| EXPORT leveldbTargets | |||
| NAMESPACE leveldb:: | |||
| DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/leveldb" | |||
| ) | |||
| install( | |||
| FILES | |||
| "${PROJECT_SOURCE_DIR}/cmake/leveldbConfig.cmake" | |||
| "${PROJECT_BINARY_DIR}/leveldbConfigVersion.cmake" | |||
| DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/leveldb" | |||
| ) | |||
| endif(LEVELDB_INSTALL) | |||
| @ -1,231 +0,0 @@ | |||
| # Copyright (c) 2011 The LevelDB Authors. All rights reserved. | |||
| # Use of this source code is governed by a BSD-style license that can be | |||
| # found in the LICENSE file. See the AUTHORS file for names of contributors. | |||
| #----------------------------------------------- | |||
| # Uncomment exactly one of the lines labelled (A), (B), and (C) below | |||
| # to switch between compilation modes. | |||
| # (A) Production use (optimized mode) | |||
| OPT ?= -O2 -DNDEBUG | |||
| # (B) Debug mode, w/ full line-level debugging symbols | |||
| # OPT ?= -g2 | |||
| # (C) Profiling mode: opt, but w/debugging symbols | |||
| # OPT ?= -O2 -g2 -DNDEBUG | |||
| #----------------------------------------------- | |||
| # detect what platform we're building on | |||
| $(shell CC="$(CC)" CXX="$(CXX)" TARGET_OS="$(TARGET_OS)" \ | |||
| ./build_detect_platform build_config.mk ./) | |||
| # this file is generated by the previous line to set build flags and sources | |||
| include build_config.mk | |||
| CFLAGS += -I. -I./include $(PLATFORM_CCFLAGS) $(OPT) | |||
| CXXFLAGS += -I. -I./include $(PLATFORM_CXXFLAGS) $(OPT) | |||
| LDFLAGS += $(PLATFORM_LDFLAGS) | |||
| LIBS += $(PLATFORM_LIBS) | |||
| LIBOBJECTS = $(SOURCES:.cc=.o) | |||
| MEMENVOBJECTS = $(MEMENV_SOURCES:.cc=.o) | |||
| TESTUTIL = ./util/testutil.o | |||
| TESTHARNESS = ./util/testharness.o $(TESTUTIL) | |||
| # Note: iOS should probably be using libtool, not ar. | |||
| ifeq ($(PLATFORM), IOS) | |||
| AR=xcrun ar | |||
| endif | |||
| TESTS = \ | |||
| arena_test \ | |||
| autocompact_test \ | |||
| bloom_test \ | |||
| c_test \ | |||
| cache_test \ | |||
| coding_test \ | |||
| corruption_test \ | |||
| crc32c_test \ | |||
| db_test \ | |||
| dbformat_test \ | |||
| env_test \ | |||
| fault_injection_test \ | |||
| filename_test \ | |||
| filter_block_test \ | |||
| hash_test \ | |||
| issue178_test \ | |||
| issue200_test \ | |||
| log_test \ | |||
| memenv_test \ | |||
| skiplist_test \ | |||
| table_test \ | |||
| version_edit_test \ | |||
| version_set_test \ | |||
| write_batch_test | |||
| PROGRAMS = db_bench leveldbutil $(TESTS) | |||
| BENCHMARKS = db_bench_sqlite3 db_bench_tree_db | |||
| LIBRARY = libleveldb.a | |||
| MEMENVLIBRARY = libmemenv.a | |||
| default: all | |||
| # Should we build shared libraries? | |||
| ifneq ($(PLATFORM_SHARED_EXT),) | |||
| ifneq ($(PLATFORM_SHARED_VERSIONED),true) | |||
| SHARED1 = libleveldb.$(PLATFORM_SHARED_EXT) | |||
| SHARED2 = $(SHARED1) | |||
| SHARED3 = $(SHARED1) | |||
| SHARED = $(SHARED1) | |||
| else | |||
| # Update db.h if you change these. | |||
| SHARED_MAJOR = 1 | |||
| SHARED_MINOR = 18 | |||
| SHARED1 = libleveldb.$(PLATFORM_SHARED_EXT) | |||
| SHARED2 = $(SHARED1).$(SHARED_MAJOR) | |||
| SHARED3 = $(SHARED1).$(SHARED_MAJOR).$(SHARED_MINOR) | |||
| SHARED = $(SHARED1) $(SHARED2) $(SHARED3) | |||
| $(SHARED1): $(SHARED3) | |||
| ln -fs $(SHARED3) $(SHARED1) | |||
| $(SHARED2): $(SHARED3) | |||
| ln -fs $(SHARED3) $(SHARED2) | |||
| endif | |||
| $(SHARED3): | |||
| $(CXX) $(LDFLAGS) $(PLATFORM_SHARED_LDFLAGS)$(SHARED2) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) $(SOURCES) -o $(SHARED3) $(LIBS) | |||
| endif # PLATFORM_SHARED_EXT | |||
| all: $(SHARED) $(LIBRARY) | |||
| check: all $(PROGRAMS) $(TESTS) | |||
| for t in $(TESTS); do echo "***** Running $$t"; ./$$t || exit 1; done | |||
| clean: | |||
| -rm -f $(PROGRAMS) $(BENCHMARKS) $(LIBRARY) $(SHARED) $(MEMENVLIBRARY) */*.o */*/*.o ios-x86/*/*.o ios-arm/*/*.o build_config.mk | |||
| -rm -rf ios-x86/* ios-arm/* | |||
| $(LIBRARY): $(LIBOBJECTS) | |||
| rm -f $@ | |||
| $(AR) -rs $@ $(LIBOBJECTS) | |||
| db_bench: db/db_bench.o $(LIBOBJECTS) $(TESTUTIL) | |||
| $(CXX) $(LDFLAGS) db/db_bench.o $(LIBOBJECTS) $(TESTUTIL) -o $@ $(LIBS) | |||
| db_bench_sqlite3: doc/bench/db_bench_sqlite3.o $(LIBOBJECTS) $(TESTUTIL) | |||
| $(CXX) $(LDFLAGS) doc/bench/db_bench_sqlite3.o $(LIBOBJECTS) $(TESTUTIL) -o $@ -lsqlite3 $(LIBS) | |||
| db_bench_tree_db: doc/bench/db_bench_tree_db.o $(LIBOBJECTS) $(TESTUTIL) | |||
| $(CXX) $(LDFLAGS) doc/bench/db_bench_tree_db.o $(LIBOBJECTS) $(TESTUTIL) -o $@ -lkyotocabinet $(LIBS) | |||
| leveldbutil: db/leveldb_main.o $(LIBOBJECTS) | |||
| $(CXX) $(LDFLAGS) db/leveldb_main.o $(LIBOBJECTS) -o $@ $(LIBS) | |||
| arena_test: util/arena_test.o $(LIBOBJECTS) $(TESTHARNESS) | |||
| $(CXX) $(LDFLAGS) util/arena_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) | |||
| autocompact_test: db/autocompact_test.o $(LIBOBJECTS) $(TESTHARNESS) | |||
| $(CXX) $(LDFLAGS) db/autocompact_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) | |||
| bloom_test: util/bloom_test.o $(LIBOBJECTS) $(TESTHARNESS) | |||
| $(CXX) $(LDFLAGS) util/bloom_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) | |||
| c_test: db/c_test.o $(LIBOBJECTS) $(TESTHARNESS) | |||
| $(CXX) $(LDFLAGS) db/c_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) | |||
| cache_test: util/cache_test.o $(LIBOBJECTS) $(TESTHARNESS) | |||
| $(CXX) $(LDFLAGS) util/cache_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) | |||
| coding_test: util/coding_test.o $(LIBOBJECTS) $(TESTHARNESS) | |||
| $(CXX) $(LDFLAGS) util/coding_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) | |||
| corruption_test: db/corruption_test.o $(LIBOBJECTS) $(TESTHARNESS) | |||
| $(CXX) $(LDFLAGS) db/corruption_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) | |||
| crc32c_test: util/crc32c_test.o $(LIBOBJECTS) $(TESTHARNESS) | |||
| $(CXX) $(LDFLAGS) util/crc32c_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) | |||
| db_test: db/db_test.o $(LIBOBJECTS) $(TESTHARNESS) | |||
| $(CXX) $(LDFLAGS) db/db_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) | |||
| dbformat_test: db/dbformat_test.o $(LIBOBJECTS) $(TESTHARNESS) | |||
| $(CXX) $(LDFLAGS) db/dbformat_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) | |||
| env_test: util/env_test.o $(LIBOBJECTS) $(TESTHARNESS) | |||
| $(CXX) $(LDFLAGS) util/env_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) | |||
| fault_injection_test: db/fault_injection_test.o $(LIBOBJECTS) $(TESTHARNESS) | |||
| $(CXX) $(LDFLAGS) db/fault_injection_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) | |||
| filename_test: db/filename_test.o $(LIBOBJECTS) $(TESTHARNESS) | |||
| $(CXX) $(LDFLAGS) db/filename_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) | |||
| filter_block_test: table/filter_block_test.o $(LIBOBJECTS) $(TESTHARNESS) | |||
| $(CXX) $(LDFLAGS) table/filter_block_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) | |||
| hash_test: util/hash_test.o $(LIBOBJECTS) $(TESTHARNESS) | |||
| $(CXX) $(LDFLAGS) util/hash_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) | |||
| issue178_test: issues/issue178_test.o $(LIBOBJECTS) $(TESTHARNESS) | |||
| $(CXX) $(LDFLAGS) issues/issue178_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) | |||
| issue200_test: issues/issue200_test.o $(LIBOBJECTS) $(TESTHARNESS) | |||
| $(CXX) $(LDFLAGS) issues/issue200_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) | |||
| log_test: db/log_test.o $(LIBOBJECTS) $(TESTHARNESS) | |||
| $(CXX) $(LDFLAGS) db/log_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) | |||
| table_test: table/table_test.o $(LIBOBJECTS) $(TESTHARNESS) | |||
| $(CXX) $(LDFLAGS) table/table_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) | |||
| skiplist_test: db/skiplist_test.o $(LIBOBJECTS) $(TESTHARNESS) | |||
| $(CXX) $(LDFLAGS) db/skiplist_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) | |||
| version_edit_test: db/version_edit_test.o $(LIBOBJECTS) $(TESTHARNESS) | |||
| $(CXX) $(LDFLAGS) db/version_edit_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) | |||
| version_set_test: db/version_set_test.o $(LIBOBJECTS) $(TESTHARNESS) | |||
| $(CXX) $(LDFLAGS) db/version_set_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) | |||
| write_batch_test: db/write_batch_test.o $(LIBOBJECTS) $(TESTHARNESS) | |||
| $(CXX) $(LDFLAGS) db/write_batch_test.o $(LIBOBJECTS) $(TESTHARNESS) -o $@ $(LIBS) | |||
| $(MEMENVLIBRARY) : $(MEMENVOBJECTS) | |||
| rm -f $@ | |||
| $(AR) -rs $@ $(MEMENVOBJECTS) | |||
| memenv_test : helpers/memenv/memenv_test.o $(MEMENVLIBRARY) $(LIBRARY) $(TESTHARNESS) | |||
| $(CXX) $(LDFLAGS) helpers/memenv/memenv_test.o $(MEMENVLIBRARY) $(LIBRARY) $(TESTHARNESS) -o $@ $(LIBS) | |||
| ifeq ($(PLATFORM), IOS) | |||
| # For iOS, create universal object files to be used on both the simulator and | |||
| # a device. | |||
| PLATFORMSROOT=/Applications/Xcode.app/Contents/Developer/Platforms | |||
| SIMULATORROOT=$(PLATFORMSROOT)/iPhoneSimulator.platform/Developer | |||
| DEVICEROOT=$(PLATFORMSROOT)/iPhoneOS.platform/Developer | |||
| IOSVERSION=$(shell defaults read $(PLATFORMSROOT)/iPhoneOS.platform/version CFBundleShortVersionString) | |||
| IOSARCH=-arch armv6 -arch armv7 -arch armv7s -arch arm64 | |||
| .cc.o: | |||
| mkdir -p ios-x86/$(dir $@) | |||
| xcrun -sdk iphonesimulator $(CXX) $(CXXFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 -arch x86_64 -c $< -o ios-x86/$@ | |||
| mkdir -p ios-arm/$(dir $@) | |||
| xcrun -sdk iphoneos $(CXX) $(CXXFLAGS) -isysroot $(DEVICEROOT)/SDKs/iPhoneOS$(IOSVERSION).sdk $(IOSARCH) -c $< -o ios-arm/$@ | |||
| xcrun lipo ios-x86/$@ ios-arm/$@ -create -output $@ | |||
| .c.o: | |||
| mkdir -p ios-x86/$(dir $@) | |||
| xcrun -sdk iphonesimulator $(CC) $(CFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 -arch x86_64 -c $< -o ios-x86/$@ | |||
| mkdir -p ios-arm/$(dir $@) | |||
| xcrun -sdk iphoneos $(CC) $(CFLAGS) -isysroot $(DEVICEROOT)/SDKs/iPhoneOS$(IOSVERSION).sdk $(IOSARCH) -c $< -o ios-arm/$@ | |||
| xcrun lipo ios-x86/$@ ios-arm/$@ -create -output $@ | |||
| else | |||
| .cc.o: | |||
| $(CXX) $(CXXFLAGS) -c $< -o $@ | |||
| .c.o: | |||
| $(CC) $(CFLAGS) -c $< -o $@ | |||
| endif | |||
| @ -1,228 +0,0 @@ | |||
| #!/bin/sh | |||
| # | |||
| # Detects OS we're compiling on and outputs a file specified by the first | |||
| # argument, which in turn gets read while processing Makefile. | |||
| # | |||
| # The output will set the following variables: | |||
| # CC C Compiler path | |||
| # CXX C++ Compiler path | |||
| # PLATFORM_LDFLAGS Linker flags | |||
| # PLATFORM_LIBS Libraries flags | |||
| # PLATFORM_SHARED_EXT Extension for shared libraries | |||
| # PLATFORM_SHARED_LDFLAGS Flags for building shared library | |||
| # This flag is embedded just before the name | |||
| # of the shared library without intervening spaces | |||
| # PLATFORM_SHARED_CFLAGS Flags for compiling objects for shared library | |||
| # PLATFORM_CCFLAGS C compiler flags | |||
| # PLATFORM_CXXFLAGS C++ compiler flags. Will contain: | |||
| # PLATFORM_SHARED_VERSIONED Set to 'true' if platform supports versioned | |||
| # shared libraries, empty otherwise. | |||
| # | |||
| # The PLATFORM_CCFLAGS and PLATFORM_CXXFLAGS might include the following: | |||
| # | |||
| # -DLEVELDB_ATOMIC_PRESENT if <atomic> is present | |||
| # -DLEVELDB_PLATFORM_POSIX for Posix-based platforms | |||
| # -DSNAPPY if the Snappy library is present | |||
| # | |||
| OUTPUT=$1 | |||
| PREFIX=$2 | |||
| if test -z "$OUTPUT" || test -z "$PREFIX"; then | |||
| echo "usage: $0 <output-filename> <directory_prefix>" >&2 | |||
| exit 1 | |||
| fi | |||
| # Delete existing output, if it exists | |||
| rm -f $OUTPUT | |||
| touch $OUTPUT | |||
| if test -z "$CC"; then | |||
| CC=cc | |||
| fi | |||
| if test -z "$CXX"; then | |||
| CXX=g++ | |||
| fi | |||
| if test -z "$TMPDIR"; then | |||
| TMPDIR=/tmp | |||
| fi | |||
| # Detect OS | |||
| if test -z "$TARGET_OS"; then | |||
| TARGET_OS=`uname -s` | |||
| fi | |||
| COMMON_FLAGS= | |||
| CROSS_COMPILE= | |||
| PLATFORM_CCFLAGS= | |||
| PLATFORM_CXXFLAGS= | |||
| PLATFORM_LDFLAGS= | |||
| PLATFORM_LIBS= | |||
| PLATFORM_SHARED_EXT="so" | |||
| PLATFORM_SHARED_LDFLAGS="-shared -Wl,-soname -Wl," | |||
| PLATFORM_SHARED_CFLAGS="-fPIC" | |||
| PLATFORM_SHARED_VERSIONED=true | |||
| MEMCMP_FLAG= | |||
| if [ "$CXX" = "g++" ]; then | |||
| # Use libc's memcmp instead of GCC's memcmp. This results in ~40% | |||
| # performance improvement on readrandom under gcc 4.4.3 on Linux/x86. | |||
| MEMCMP_FLAG="-fno-builtin-memcmp" | |||
| fi | |||
| case "$TARGET_OS" in | |||
| CYGWIN_*) | |||
| PLATFORM=OS_LINUX | |||
| COMMON_FLAGS="$MEMCMP_FLAG -lpthread -DOS_LINUX -DCYGWIN" | |||
| PLATFORM_LDFLAGS="-lpthread" | |||
| PORT_FILE=port/port_posix.cc | |||
| ;; | |||
| Darwin) | |||
| PLATFORM=OS_MACOSX | |||
| COMMON_FLAGS="$MEMCMP_FLAG -DOS_MACOSX" | |||
| PLATFORM_SHARED_EXT=dylib | |||
| [ -z "$INSTALL_PATH" ] && INSTALL_PATH=`pwd` | |||
| PLATFORM_SHARED_LDFLAGS="-dynamiclib -install_name $INSTALL_PATH/" | |||
| PORT_FILE=port/port_posix.cc | |||
| ;; | |||
| Linux) | |||
| PLATFORM=OS_LINUX | |||
| COMMON_FLAGS="$MEMCMP_FLAG -pthread -DOS_LINUX" | |||
| PLATFORM_LDFLAGS="-pthread" | |||
| PORT_FILE=port/port_posix.cc | |||
| ;; | |||
| SunOS) | |||
| PLATFORM=OS_SOLARIS | |||
| COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_SOLARIS" | |||
| PLATFORM_LIBS="-lpthread -lrt" | |||
| PORT_FILE=port/port_posix.cc | |||
| ;; | |||
| FreeBSD) | |||
| PLATFORM=OS_FREEBSD | |||
| COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_FREEBSD" | |||
| PLATFORM_LIBS="-lpthread" | |||
| PORT_FILE=port/port_posix.cc | |||
| ;; | |||
| NetBSD) | |||
| PLATFORM=OS_NETBSD | |||
| COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_NETBSD" | |||
| PLATFORM_LIBS="-lpthread -lgcc_s" | |||
| PORT_FILE=port/port_posix.cc | |||
| ;; | |||
| OpenBSD) | |||
| PLATFORM=OS_OPENBSD | |||
| COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_OPENBSD" | |||
| PLATFORM_LDFLAGS="-pthread" | |||
| PORT_FILE=port/port_posix.cc | |||
| ;; | |||
| DragonFly) | |||
| PLATFORM=OS_DRAGONFLYBSD | |||
| COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_DRAGONFLYBSD" | |||
| PLATFORM_LIBS="-lpthread" | |||
| PORT_FILE=port/port_posix.cc | |||
| ;; | |||
| OS_ANDROID_CROSSCOMPILE) | |||
| PLATFORM=OS_ANDROID | |||
| COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_ANDROID -DLEVELDB_PLATFORM_POSIX" | |||
| PLATFORM_LDFLAGS="" # All pthread features are in the Android C library | |||
| PORT_FILE=port/port_posix.cc | |||
| CROSS_COMPILE=true | |||
| ;; | |||
| HP-UX) | |||
| PLATFORM=OS_HPUX | |||
| COMMON_FLAGS="$MEMCMP_FLAG -D_REENTRANT -DOS_HPUX" | |||
| PLATFORM_LDFLAGS="-pthread" | |||
| PORT_FILE=port/port_posix.cc | |||
| # man ld: +h internal_name | |||
| PLATFORM_SHARED_LDFLAGS="-shared -Wl,+h -Wl," | |||
| ;; | |||
| IOS) | |||
| PLATFORM=IOS | |||
| COMMON_FLAGS="$MEMCMP_FLAG -DOS_MACOSX" | |||
| [ -z "$INSTALL_PATH" ] && INSTALL_PATH=`pwd` | |||
| PORT_FILE=port/port_posix.cc | |||
| PLATFORM_SHARED_EXT= | |||
| PLATFORM_SHARED_LDFLAGS= | |||
| PLATFORM_SHARED_CFLAGS= | |||
| PLATFORM_SHARED_VERSIONED= | |||
| ;; | |||
| *) | |||
| echo "Unknown platform!" >&2 | |||
| exit 1 | |||
| esac | |||
| # We want to make a list of all cc files within util, db, table, and helpers | |||
| # except for the test and benchmark files. By default, find will output a list | |||
| # of all files matching either rule, so we need to append -print to make the | |||
| # prune take effect. | |||
| DIRS="$PREFIX/db $PREFIX/util $PREFIX/table" | |||
| set -f # temporarily disable globbing so that our patterns aren't expanded | |||
| PRUNE_TEST="-name *test*.cc -prune" | |||
| PRUNE_BENCH="-name *_bench.cc -prune" | |||
| PRUNE_TOOL="-name leveldb_main.cc -prune" | |||
| PORTABLE_FILES=`find $DIRS $PRUNE_TEST -o $PRUNE_BENCH -o $PRUNE_TOOL -o -name '*.cc' -print | sort | sed "s,^$PREFIX/,," | tr "\n" " "` | |||
| set +f # re-enable globbing | |||
| # The sources consist of the portable files, plus the platform-specific port | |||
| # file. | |||
| echo "SOURCES=$PORTABLE_FILES $PORT_FILE" >> $OUTPUT | |||
| echo "MEMENV_SOURCES=helpers/memenv/memenv.cc" >> $OUTPUT | |||
| if [ "$CROSS_COMPILE" = "true" ]; then | |||
| # Cross-compiling; do not try any compilation tests. | |||
| true | |||
| else | |||
| CXXOUTPUT="${TMPDIR}/leveldb_build_detect_platform-cxx.$$" | |||
| # If -std=c++0x works, use <atomic> as fallback for when memory barriers | |||
| # are not available. | |||
| $CXX $CXXFLAGS -std=c++0x -x c++ - -o $CXXOUTPUT 2>/dev/null <<EOF | |||
| #include <atomic> | |||
| int main() {} | |||
| EOF | |||
| if [ "$?" = 0 ]; then | |||
| COMMON_FLAGS="$COMMON_FLAGS -DLEVELDB_PLATFORM_POSIX -DLEVELDB_ATOMIC_PRESENT" | |||
| PLATFORM_CXXFLAGS="-std=c++0x" | |||
| else | |||
| COMMON_FLAGS="$COMMON_FLAGS -DLEVELDB_PLATFORM_POSIX" | |||
| fi | |||
| # Test whether Snappy library is installed | |||
| # http://code.google.com/p/snappy/ | |||
| $CXX $CXXFLAGS -x c++ - -o $CXXOUTPUT 2>/dev/null <<EOF | |||
| #include <snappy.h> | |||
| int main() {} | |||
| EOF | |||
| if [ "$?" = 0 ]; then | |||
| COMMON_FLAGS="$COMMON_FLAGS -DSNAPPY" | |||
| PLATFORM_LIBS="$PLATFORM_LIBS -lsnappy" | |||
| fi | |||
| # Test whether tcmalloc is available | |||
| $CXX $CXXFLAGS -x c++ - -o $CXXOUTPUT -ltcmalloc 2>/dev/null <<EOF | |||
| int main() {} | |||
| EOF | |||
| if [ "$?" = 0 ]; then | |||
| PLATFORM_LIBS="$PLATFORM_LIBS -ltcmalloc" | |||
| fi | |||
| rm -f $CXXOUTPUT 2>/dev/null | |||
| fi | |||
| PLATFORM_CCFLAGS="$PLATFORM_CCFLAGS $COMMON_FLAGS" | |||
| PLATFORM_CXXFLAGS="$PLATFORM_CXXFLAGS $COMMON_FLAGS" | |||
| echo "CC=$CC" >> $OUTPUT | |||
| echo "CXX=$CXX" >> $OUTPUT | |||
| echo "PLATFORM=$PLATFORM" >> $OUTPUT | |||
| echo "PLATFORM_LDFLAGS=$PLATFORM_LDFLAGS" >> $OUTPUT | |||
| echo "PLATFORM_LIBS=$PLATFORM_LIBS" >> $OUTPUT | |||
| echo "PLATFORM_CCFLAGS=$PLATFORM_CCFLAGS" >> $OUTPUT | |||
| echo "PLATFORM_CXXFLAGS=$PLATFORM_CXXFLAGS" >> $OUTPUT | |||
| echo "PLATFORM_SHARED_CFLAGS=$PLATFORM_SHARED_CFLAGS" >> $OUTPUT | |||
| echo "PLATFORM_SHARED_EXT=$PLATFORM_SHARED_EXT" >> $OUTPUT | |||
| echo "PLATFORM_SHARED_LDFLAGS=$PLATFORM_SHARED_LDFLAGS" >> $OUTPUT | |||
| echo "PLATFORM_SHARED_VERSIONED=$PLATFORM_SHARED_VERSIONED" >> $OUTPUT | |||
| @ -0,0 +1 @@ | |||
| include("${CMAKE_CURRENT_LIST_DIR}/leveldbTargets.cmake") | |||
| @ -0,0 +1,330 @@ | |||
| // Copyright (c) 2014 The LevelDB Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style license that can be | |||
| // found in the LICENSE file. See the AUTHORS file for names of contributors. | |||
| #include "db/db_impl.h" | |||
| #include "db/filename.h" | |||
| #include "db/version_set.h" | |||
| #include "db/write_batch_internal.h" | |||
| #include "leveldb/db.h" | |||
| #include "leveldb/env.h" | |||
| #include "leveldb/write_batch.h" | |||
| #include "util/logging.h" | |||
| #include "util/testharness.h" | |||
| #include "util/testutil.h" | |||
| namespace leveldb { | |||
| class RecoveryTest { | |||
| public: | |||
| RecoveryTest() : env_(Env::Default()), db_(nullptr) { | |||
| dbname_ = test::TmpDir() + "/recovery_test"; | |||
| DestroyDB(dbname_, Options()); | |||
| Open(); | |||
| } | |||
| ~RecoveryTest() { | |||
| Close(); | |||
| DestroyDB(dbname_, Options()); | |||
| } | |||
| DBImpl* dbfull() const { return reinterpret_cast<DBImpl*>(db_); } | |||
| Env* env() const { return env_; } | |||
| bool CanAppend() { | |||
| WritableFile* tmp; | |||
| Status s = env_->NewAppendableFile(CurrentFileName(dbname_), &tmp); | |||
| delete tmp; | |||
| if (s.IsNotSupportedError()) { | |||
| return false; | |||
| } else { | |||
| return true; | |||
| } | |||
| } | |||
| void Close() { | |||
| delete db_; | |||
| db_ = nullptr; | |||
| } | |||
| Status OpenWithStatus(Options* options = nullptr) { | |||
| Close(); | |||
| Options opts; | |||
| if (options != nullptr) { | |||
| opts = *options; | |||
| } else { | |||
| opts.reuse_logs = true; // TODO(sanjay): test both ways | |||
| opts.create_if_missing = true; | |||
| } | |||
| if (opts.env == nullptr) { | |||
| opts.env = env_; | |||
| } | |||
| return DB::Open(opts, dbname_, &db_); | |||
| } | |||
| void Open(Options* options = nullptr) { | |||
| ASSERT_OK(OpenWithStatus(options)); | |||
| ASSERT_EQ(1, NumLogs()); | |||
| } | |||
| Status Put(const std::string& k, const std::string& v) { | |||
| return db_->Put(WriteOptions(), k, v); | |||
| } | |||
| std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) { | |||
| std::string result; | |||
| Status s = db_->Get(ReadOptions(), k, &result); | |||
| if (s.IsNotFound()) { | |||
| result = "NOT_FOUND"; | |||
| } else if (!s.ok()) { | |||
| result = s.ToString(); | |||
| } | |||
| return result; | |||
| } | |||
| std::string ManifestFileName() { | |||
| std::string current; | |||
| ASSERT_OK(ReadFileToString(env_, CurrentFileName(dbname_), ¤t)); | |||
| size_t len = current.size(); | |||
| if (len > 0 && current[len - 1] == '\n') { | |||
| current.resize(len - 1); | |||
| } | |||
| return dbname_ + "/" + current; | |||
| } | |||
| std::string LogName(uint64_t number) { return LogFileName(dbname_, number); } | |||
| size_t DeleteLogFiles() { | |||
| // Linux allows unlinking open files, but Windows does not. | |||
| // Closing the db allows for file deletion. | |||
| Close(); | |||
| std::vector<uint64_t> logs = GetFiles(kLogFile); | |||
| for (size_t i = 0; i < logs.size(); i++) { | |||
| ASSERT_OK(env_->DeleteFile(LogName(logs[i]))) << LogName(logs[i]); | |||
| } | |||
| return logs.size(); | |||
| } | |||
| void DeleteManifestFile() { ASSERT_OK(env_->DeleteFile(ManifestFileName())); } | |||
| uint64_t FirstLogFile() { return GetFiles(kLogFile)[0]; } | |||
| std::vector<uint64_t> GetFiles(FileType t) { | |||
| std::vector<std::string> filenames; | |||
| ASSERT_OK(env_->GetChildren(dbname_, &filenames)); | |||
| std::vector<uint64_t> result; | |||
| for (size_t i = 0; i < filenames.size(); i++) { | |||
| uint64_t number; | |||
| FileType type; | |||
| if (ParseFileName(filenames[i], &number, &type) && type == t) { | |||
| result.push_back(number); | |||
| } | |||
| } | |||
| return result; | |||
| } | |||
| int NumLogs() { return GetFiles(kLogFile).size(); } | |||
| int NumTables() { return GetFiles(kTableFile).size(); } | |||
| uint64_t FileSize(const std::string& fname) { | |||
| uint64_t result; | |||
| ASSERT_OK(env_->GetFileSize(fname, &result)) << fname; | |||
| return result; | |||
| } | |||
| void CompactMemTable() { dbfull()->TEST_CompactMemTable(); } | |||
| // Directly construct a log file that sets key to val. | |||
| void MakeLogFile(uint64_t lognum, SequenceNumber seq, Slice key, Slice val) { | |||
| std::string fname = LogFileName(dbname_, lognum); | |||
| WritableFile* file; | |||
| ASSERT_OK(env_->NewWritableFile(fname, &file)); | |||
| log::Writer writer(file); | |||
| WriteBatch batch; | |||
| batch.Put(key, val); | |||
| WriteBatchInternal::SetSequence(&batch, seq); | |||
| ASSERT_OK(writer.AddRecord(WriteBatchInternal::Contents(&batch))); | |||
| ASSERT_OK(file->Flush()); | |||
| delete file; | |||
| } | |||
| private: | |||
| std::string dbname_; | |||
| Env* env_; | |||
| DB* db_; | |||
| }; | |||
| TEST(RecoveryTest, ManifestReused) { | |||
| if (!CanAppend()) { | |||
| fprintf(stderr, "skipping test because env does not support appending\n"); | |||
| return; | |||
| } | |||
| ASSERT_OK(Put("foo", "bar")); | |||
| Close(); | |||
| std::string old_manifest = ManifestFileName(); | |||
| Open(); | |||
| ASSERT_EQ(old_manifest, ManifestFileName()); | |||
| ASSERT_EQ("bar", Get("foo")); | |||
| Open(); | |||
| ASSERT_EQ(old_manifest, ManifestFileName()); | |||
| ASSERT_EQ("bar", Get("foo")); | |||
| } | |||
| TEST(RecoveryTest, LargeManifestCompacted) { | |||
| if (!CanAppend()) { | |||
| fprintf(stderr, "skipping test because env does not support appending\n"); | |||
| return; | |||
| } | |||
| ASSERT_OK(Put("foo", "bar")); | |||
| Close(); | |||
| std::string old_manifest = ManifestFileName(); | |||
| // Pad with zeroes to make manifest file very big. | |||
| { | |||
| uint64_t len = FileSize(old_manifest); | |||
| WritableFile* file; | |||
| ASSERT_OK(env()->NewAppendableFile(old_manifest, &file)); | |||
| std::string zeroes(3 * 1048576 - static_cast<size_t>(len), 0); | |||
| ASSERT_OK(file->Append(zeroes)); | |||
| ASSERT_OK(file->Flush()); | |||
| delete file; | |||
| } | |||
| Open(); | |||
| std::string new_manifest = ManifestFileName(); | |||
| ASSERT_NE(old_manifest, new_manifest); | |||
| ASSERT_GT(10000, FileSize(new_manifest)); | |||
| ASSERT_EQ("bar", Get("foo")); | |||
| Open(); | |||
| ASSERT_EQ(new_manifest, ManifestFileName()); | |||
| ASSERT_EQ("bar", Get("foo")); | |||
| } | |||
| TEST(RecoveryTest, NoLogFiles) { | |||
| ASSERT_OK(Put("foo", "bar")); | |||
| ASSERT_EQ(1, DeleteLogFiles()); | |||
| Open(); | |||
| ASSERT_EQ("NOT_FOUND", Get("foo")); | |||
| Open(); | |||
| ASSERT_EQ("NOT_FOUND", Get("foo")); | |||
| } | |||
| TEST(RecoveryTest, LogFileReuse) { | |||
| if (!CanAppend()) { | |||
| fprintf(stderr, "skipping test because env does not support appending\n"); | |||
| return; | |||
| } | |||
| for (int i = 0; i < 2; i++) { | |||
| ASSERT_OK(Put("foo", "bar")); | |||
| if (i == 0) { | |||
| // Compact to ensure current log is empty | |||
| CompactMemTable(); | |||
| } | |||
| Close(); | |||
| ASSERT_EQ(1, NumLogs()); | |||
| uint64_t number = FirstLogFile(); | |||
| if (i == 0) { | |||
| ASSERT_EQ(0, FileSize(LogName(number))); | |||
| } else { | |||
| ASSERT_LT(0, FileSize(LogName(number))); | |||
| } | |||
| Open(); | |||
| ASSERT_EQ(1, NumLogs()); | |||
| ASSERT_EQ(number, FirstLogFile()) << "did not reuse log file"; | |||
| ASSERT_EQ("bar", Get("foo")); | |||
| Open(); | |||
| ASSERT_EQ(1, NumLogs()); | |||
| ASSERT_EQ(number, FirstLogFile()) << "did not reuse log file"; | |||
| ASSERT_EQ("bar", Get("foo")); | |||
| } | |||
| } | |||
| TEST(RecoveryTest, MultipleMemTables) { | |||
| // Make a large log. | |||
| const int kNum = 1000; | |||
| for (int i = 0; i < kNum; i++) { | |||
| char buf[100]; | |||
| snprintf(buf, sizeof(buf), "%050d", i); | |||
| ASSERT_OK(Put(buf, buf)); | |||
| } | |||
| ASSERT_EQ(0, NumTables()); | |||
| Close(); | |||
| ASSERT_EQ(0, NumTables()); | |||
| ASSERT_EQ(1, NumLogs()); | |||
| uint64_t old_log_file = FirstLogFile(); | |||
| // Force creation of multiple memtables by reducing the write buffer size. | |||
| Options opt; | |||
| opt.reuse_logs = true; | |||
| opt.write_buffer_size = (kNum * 100) / 2; | |||
| Open(&opt); | |||
| ASSERT_LE(2, NumTables()); | |||
| ASSERT_EQ(1, NumLogs()); | |||
| ASSERT_NE(old_log_file, FirstLogFile()) << "must not reuse log"; | |||
| for (int i = 0; i < kNum; i++) { | |||
| char buf[100]; | |||
| snprintf(buf, sizeof(buf), "%050d", i); | |||
| ASSERT_EQ(buf, Get(buf)); | |||
| } | |||
| } | |||
| TEST(RecoveryTest, MultipleLogFiles) { | |||
| ASSERT_OK(Put("foo", "bar")); | |||
| Close(); | |||
| ASSERT_EQ(1, NumLogs()); | |||
| // Make a bunch of uncompacted log files. | |||
| uint64_t old_log = FirstLogFile(); | |||
| MakeLogFile(old_log + 1, 1000, "hello", "world"); | |||
| MakeLogFile(old_log + 2, 1001, "hi", "there"); | |||
| MakeLogFile(old_log + 3, 1002, "foo", "bar2"); | |||
| // Recover and check that all log files were processed. | |||
| Open(); | |||
| ASSERT_LE(1, NumTables()); | |||
| ASSERT_EQ(1, NumLogs()); | |||
| uint64_t new_log = FirstLogFile(); | |||
| ASSERT_LE(old_log + 3, new_log); | |||
| ASSERT_EQ("bar2", Get("foo")); | |||
| ASSERT_EQ("world", Get("hello")); | |||
| ASSERT_EQ("there", Get("hi")); | |||
| // Test that previous recovery produced recoverable state. | |||
| Open(); | |||
| ASSERT_LE(1, NumTables()); | |||
| ASSERT_EQ(1, NumLogs()); | |||
| if (CanAppend()) { | |||
| ASSERT_EQ(new_log, FirstLogFile()); | |||
| } | |||
| ASSERT_EQ("bar2", Get("foo")); | |||
| ASSERT_EQ("world", Get("hello")); | |||
| ASSERT_EQ("there", Get("hi")); | |||
| // Check that introducing an older log file does not cause it to be re-read. | |||
| Close(); | |||
| MakeLogFile(old_log + 1, 2000, "hello", "stale write"); | |||
| Open(); | |||
| ASSERT_LE(1, NumTables()); | |||
| ASSERT_EQ(1, NumLogs()); | |||
| if (CanAppend()) { | |||
| ASSERT_EQ(new_log, FirstLogFile()); | |||
| } | |||
| ASSERT_EQ("bar2", Get("foo")); | |||
| ASSERT_EQ("world", Get("hello")); | |||
| ASSERT_EQ("there", Get("hi")); | |||
| } | |||
| TEST(RecoveryTest, ManifestMissing) { | |||
| ASSERT_OK(Put("foo", "bar")); | |||
| Close(); | |||
| DeleteManifestFile(); | |||
| Status status = OpenWithStatus(); | |||
| ASSERT_TRUE(status.IsCorruption()); | |||
| } | |||
| } // namespace leveldb | |||
| int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } | |||
| @ -1,89 +0,0 @@ | |||
| body { | |||
| margin-left: 0.5in; | |||
| margin-right: 0.5in; | |||
| background: white; | |||
| color: black; | |||
| } | |||
| h1 { | |||
| margin-left: -0.2in; | |||
| font-size: 14pt; | |||
| } | |||
| h2 { | |||
| margin-left: -0in; | |||
| font-size: 12pt; | |||
| } | |||
| h3 { | |||
| margin-left: -0in; | |||
| } | |||
| h4 { | |||
| margin-left: -0in; | |||
| } | |||
| hr { | |||
| margin-left: -0in; | |||
| } | |||
| /* Definition lists: definition term bold */ | |||
| dt { | |||
| font-weight: bold; | |||
| } | |||
| address { | |||
| text-align: center; | |||
| } | |||
| code,samp,var { | |||
| color: blue; | |||
| } | |||
| kbd { | |||
| color: #600000; | |||
| } | |||
| div.note p { | |||
| float: right; | |||
| width: 3in; | |||
| margin-right: 0%; | |||
| padding: 1px; | |||
| border: 2px solid #6060a0; | |||
| background-color: #fffff0; | |||
| } | |||
| ul { | |||
| margin-top: -0em; | |||
| margin-bottom: -0em; | |||
| } | |||
| ol { | |||
| margin-top: -0em; | |||
| margin-bottom: -0em; | |||
| } | |||
| UL.nobullets { | |||
| list-style-type: none; | |||
| list-style-image: none; | |||
| margin-left: -1em; | |||
| } | |||
| p { | |||
| margin: 1em 0 1em 0; | |||
| padding: 0 0 0 0; | |||
| } | |||
| pre { | |||
| line-height: 1.3em; | |||
| padding: 0.4em 0 0.8em 0; | |||
| margin: 0 0 0 0; | |||
| border: 0 0 0 0; | |||
| color: blue; | |||
| } | |||
| .datatable { | |||
| margin-left: auto; | |||
| margin-right: auto; | |||
| margin-top: 2em; | |||
| margin-bottom: 2em; | |||
| border: 1px solid; | |||
| } | |||
| .datatable td,th { | |||
| padding: 0 0.5em 0 0.5em; | |||
| text-align: right; | |||
| } | |||
| @ -1,213 +0,0 @@ | |||
| <!DOCTYPE html> | |||
| <html> | |||
| <head> | |||
| <link rel="stylesheet" type="text/css" href="doc.css" /> | |||
| <title>Leveldb file layout and compactions</title> | |||
| </head> | |||
| <body> | |||
| <h1>Files</h1> | |||
| The implementation of leveldb is similar in spirit to the | |||
| representation of a single | |||
| <a href="http://research.google.com/archive/bigtable.html"> | |||
| Bigtable tablet (section 5.3)</a>. | |||
| However the organization of the files that make up the representation | |||
| is somewhat different and is explained below. | |||
| <p> | |||
| Each database is represented by a set of files stored in a directory. | |||
| There are several different types of files as documented below: | |||
| <p> | |||
| <h2>Log files</h2> | |||
| <p> | |||
| A log file (*.log) stores a sequence of recent updates. Each update | |||
| is appended to the current log file. When the log file reaches a | |||
| pre-determined size (approximately 4MB by default), it is converted | |||
| to a sorted table (see below) and a new log file is created for future | |||
| updates. | |||
| <p> | |||
| A copy of the current log file is kept in an in-memory structure (the | |||
| <code>memtable</code>). This copy is consulted on every read so that read | |||
| operations reflect all logged updates. | |||
| <p> | |||
| <h2>Sorted tables</h2> | |||
| <p> | |||
| A sorted table (*.sst) stores a sequence of entries sorted by key. | |||
| Each entry is either a value for the key, or a deletion marker for the | |||
| key. (Deletion markers are kept around to hide obsolete values | |||
| present in older sorted tables). | |||
| <p> | |||
| The set of sorted tables are organized into a sequence of levels. The | |||
| sorted table generated from a log file is placed in a special <code>young</code> | |||
| level (also called level-0). When the number of young files exceeds a | |||
| certain threshold (currently four), all of the young files are merged | |||
| together with all of the overlapping level-1 files to produce a | |||
| sequence of new level-1 files (we create a new level-1 file for every | |||
| 2MB of data.) | |||
| <p> | |||
| Files in the young level may contain overlapping keys. However files | |||
| in other levels have distinct non-overlapping key ranges. Consider | |||
| level number L where L >= 1. When the combined size of files in | |||
| level-L exceeds (10^L) MB (i.e., 10MB for level-1, 100MB for level-2, | |||
| ...), one file in level-L, and all of the overlapping files in | |||
| level-(L+1) are merged to form a set of new files for level-(L+1). | |||
| These merges have the effect of gradually migrating new updates from | |||
| the young level to the largest level using only bulk reads and writes | |||
| (i.e., minimizing expensive seeks). | |||
| <h2>Manifest</h2> | |||
| <p> | |||
| A MANIFEST file lists the set of sorted tables that make up each | |||
| level, the corresponding key ranges, and other important metadata. | |||
| A new MANIFEST file (with a new number embedded in the file name) | |||
| is created whenever the database is reopened. The MANIFEST file is | |||
| formatted as a log, and changes made to the serving state (as files | |||
| are added or removed) are appended to this log. | |||
| <p> | |||
| <h2>Current</h2> | |||
| <p> | |||
| CURRENT is a simple text file that contains the name of the latest | |||
| MANIFEST file. | |||
| <p> | |||
| <h2>Info logs</h2> | |||
| <p> | |||
| Informational messages are printed to files named LOG and LOG.old. | |||
| <p> | |||
| <h2>Others</h2> | |||
| <p> | |||
| Other files used for miscellaneous purposes may also be present | |||
| (LOCK, *.dbtmp). | |||
| <h1>Level 0</h1> | |||
| When the log file grows above a certain size (1MB by default): | |||
| <ul> | |||
| <li>Create a brand new memtable and log file and direct future updates here | |||
| <li>In the background: | |||
| <ul> | |||
| <li>Write the contents of the previous memtable to an sstable | |||
| <li>Discard the memtable | |||
| <li>Delete the old log file and the old memtable | |||
| <li>Add the new sstable to the young (level-0) level. | |||
| </ul> | |||
| </ul> | |||
| <h1>Compactions</h1> | |||
| <p> | |||
| When the size of level L exceeds its limit, we compact it in a | |||
| background thread. The compaction picks a file from level L and all | |||
| overlapping files from the next level L+1. Note that if a level-L | |||
| file overlaps only part of a level-(L+1) file, the entire file at | |||
| level-(L+1) is used as an input to the compaction and will be | |||
| discarded after the compaction. Aside: because level-0 is special | |||
| (files in it may overlap each other), we treat compactions from | |||
| level-0 to level-1 specially: a level-0 compaction may pick more than | |||
| one level-0 file in case some of these files overlap each other. | |||
| <p> | |||
| A compaction merges the contents of the picked files to produce a | |||
| sequence of level-(L+1) files. We switch to producing a new | |||
| level-(L+1) file after the current output file has reached the target | |||
| file size (2MB). We also switch to a new output file when the key | |||
| range of the current output file has grown enough to overlap more than | |||
| ten level-(L+2) files. This last rule ensures that a later compaction | |||
| of a level-(L+1) file will not pick up too much data from level-(L+2). | |||
| <p> | |||
| The old files are discarded and the new files are added to the serving | |||
| state. | |||
| <p> | |||
| Compactions for a particular level rotate through the key space. In | |||
| more detail, for each level L, we remember the ending key of the last | |||
| compaction at level L. The next compaction for level L will pick the | |||
| first file that starts after this key (wrapping around to the | |||
| beginning of the key space if there is no such file). | |||
| <p> | |||
| Compactions drop overwritten values. They also drop deletion markers | |||
| if there are no higher numbered levels that contain a file whose range | |||
| overlaps the current key. | |||
| <h2>Timing</h2> | |||
| Level-0 compactions will read up to four 1MB files from level-0, and | |||
| at worst all the level-1 files (10MB). I.e., we will read 14MB and | |||
| write 14MB. | |||
| <p> | |||
| Other than the special level-0 compactions, we will pick one 2MB file | |||
| from level L. In the worst case, this will overlap ~ 12 files from | |||
| level L+1 (10 because level-(L+1) is ten times the size of level-L, | |||
| and another two at the boundaries since the file ranges at level-L | |||
| will usually not be aligned with the file ranges at level-L+1). The | |||
| compaction will therefore read 26MB and write 26MB. Assuming a disk | |||
| IO rate of 100MB/s (ballpark range for modern drives), the worst | |||
| compaction cost will be approximately 0.5 second. | |||
| <p> | |||
| If we throttle the background writing to something small, say 10% of | |||
| the full 100MB/s speed, a compaction may take up to 5 seconds. If the | |||
| user is writing at 10MB/s, we might build up lots of level-0 files | |||
| (~50 to hold the 5*10MB). This may significantly increase the cost of | |||
| reads due to the overhead of merging more files together on every | |||
| read. | |||
| <p> | |||
| Solution 1: To reduce this problem, we might want to increase the log | |||
| switching threshold when the number of level-0 files is large. Though | |||
| the downside is that the larger this threshold, the more memory we will | |||
| need to hold the corresponding memtable. | |||
| <p> | |||
| Solution 2: We might want to decrease write rate artificially when the | |||
| number of level-0 files goes up. | |||
| <p> | |||
| Solution 3: We work on reducing the cost of very wide merges. | |||
| Perhaps most of the level-0 files will have their blocks sitting | |||
| uncompressed in the cache and we will only need to worry about the | |||
| O(N) complexity in the merging iterator. | |||
| <h2>Number of files</h2> | |||
| Instead of always making 2MB files, we could make larger files for | |||
| larger levels to reduce the total file count, though at the expense of | |||
| more bursty compactions. Alternatively, we could shard the set of | |||
| files into multiple directories. | |||
| <p> | |||
| An experiment on an <code>ext3</code> filesystem on Feb 04, 2011 shows | |||
| the following timings to do 100K file opens in directories with | |||
| varying number of files: | |||
| <table class="datatable"> | |||
| <tr><th>Files in directory</th><th>Microseconds to open a file</th></tr> | |||
| <tr><td>1000</td><td>9</td> | |||
| <tr><td>10000</td><td>10</td> | |||
| <tr><td>100000</td><td>16</td> | |||
| </table> | |||
| So maybe even the sharding is not necessary on modern filesystems? | |||
| <h1>Recovery</h1> | |||
| <ul> | |||
| <li> Read CURRENT to find name of the latest committed MANIFEST | |||
| <li> Read the named MANIFEST file | |||
| <li> Clean up stale files | |||
| <li> We could open all sstables here, but it is probably better to be lazy... | |||
| <li> Convert log chunk to a new level-0 sstable | |||
| <li> Start directing new writes to a new log file with recovered sequence# | |||
| </ul> | |||
| <h1>Garbage collection of files</h1> | |||
| <code>DeleteObsoleteFiles()</code> is called at the end of every | |||
| compaction and at the end of recovery. It finds the names of all | |||
| files in the database. It deletes all log files that are not the | |||
| current log file. It deletes all table files that are not referenced | |||
| from some level and are not the output of an active compaction. | |||
| </body> | |||
| </html> | |||
| @ -0,0 +1,172 @@ | |||
| ## Files | |||
| The implementation of leveldb is similar in spirit to the representation of a | |||
| single [Bigtable tablet (section 5.3)](http://research.google.com/archive/bigtable.html). | |||
| However the organization of the files that make up the representation is | |||
| somewhat different and is explained below. | |||
| Each database is represented by a set of files stored in a directory. There are | |||
| several different types of files as documented below: | |||
| ### Log files | |||
| A log file (*.log) stores a sequence of recent updates. Each update is appended | |||
| to the current log file. When the log file reaches a pre-determined size | |||
| (approximately 4MB by default), it is converted to a sorted table (see below) | |||
| and a new log file is created for future updates. | |||
| A copy of the current log file is kept in an in-memory structure (the | |||
| `memtable`). This copy is consulted on every read so that read operations | |||
| reflect all logged updates. | |||
| ## Sorted tables | |||
| A sorted table (*.ldb) stores a sequence of entries sorted by key. Each entry is | |||
| either a value for the key, or a deletion marker for the key. (Deletion markers | |||
| are kept around to hide obsolete values present in older sorted tables). | |||
| The set of sorted tables are organized into a sequence of levels. The sorted | |||
| table generated from a log file is placed in a special **young** level (also | |||
| called level-0). When the number of young files exceeds a certain threshold | |||
| (currently four), all of the young files are merged together with all of the | |||
| overlapping level-1 files to produce a sequence of new level-1 files (we create | |||
| a new level-1 file for every 2MB of data.) | |||
| Files in the young level may contain overlapping keys. However files in other | |||
| levels have distinct non-overlapping key ranges. Consider level number L where | |||
| L >= 1. When the combined size of files in level-L exceeds (10^L) MB (i.e., 10MB | |||
| for level-1, 100MB for level-2, ...), one file in level-L, and all of the | |||
| overlapping files in level-(L+1) are merged to form a set of new files for | |||
| level-(L+1). These merges have the effect of gradually migrating new updates | |||
| from the young level to the largest level using only bulk reads and writes | |||
| (i.e., minimizing expensive seeks). | |||
| ### Manifest | |||
| A MANIFEST file lists the set of sorted tables that make up each level, the | |||
| corresponding key ranges, and other important metadata. A new MANIFEST file | |||
| (with a new number embedded in the file name) is created whenever the database | |||
| is reopened. The MANIFEST file is formatted as a log, and changes made to the | |||
| serving state (as files are added or removed) are appended to this log. | |||
| ### Current | |||
| CURRENT is a simple text file that contains the name of the latest MANIFEST | |||
| file. | |||
| ### Info logs | |||
| Informational messages are printed to files named LOG and LOG.old. | |||
| ### Others | |||
| Other files used for miscellaneous purposes may also be present (LOCK, *.dbtmp). | |||
| ## Level 0 | |||
| When the log file grows above a certain size (4MB by default): | |||
| Create a brand new memtable and log file and direct future updates here. | |||
| In the background: | |||
| 1. Write the contents of the previous memtable to an sstable. | |||
| 2. Discard the memtable. | |||
| 3. Delete the old log file and the old memtable. | |||
| 4. Add the new sstable to the young (level-0) level. | |||
| ## Compactions | |||
| When the size of level L exceeds its limit, we compact it in a background | |||
| thread. The compaction picks a file from level L and all overlapping files from | |||
| the next level L+1. Note that if a level-L file overlaps only part of a | |||
| level-(L+1) file, the entire file at level-(L+1) is used as an input to the | |||
| compaction and will be discarded after the compaction. Aside: because level-0 | |||
| is special (files in it may overlap each other), we treat compactions from | |||
| level-0 to level-1 specially: a level-0 compaction may pick more than one | |||
| level-0 file in case some of these files overlap each other. | |||
| A compaction merges the contents of the picked files to produce a sequence of | |||
| level-(L+1) files. We switch to producing a new level-(L+1) file after the | |||
| current output file has reached the target file size (2MB). We also switch to a | |||
| new output file when the key range of the current output file has grown enough | |||
| to overlap more than ten level-(L+2) files. This last rule ensures that a later | |||
| compaction of a level-(L+1) file will not pick up too much data from | |||
| level-(L+2). | |||
| The old files are discarded and the new files are added to the serving state. | |||
| Compactions for a particular level rotate through the key space. In more detail, | |||
| for each level L, we remember the ending key of the last compaction at level L. | |||
| The next compaction for level L will pick the first file that starts after this | |||
| key (wrapping around to the beginning of the key space if there is no such | |||
| file). | |||
| Compactions drop overwritten values. They also drop deletion markers if there | |||
| are no higher numbered levels that contain a file whose range overlaps the | |||
| current key. | |||
| ### Timing | |||
| Level-0 compactions will read up to four 1MB files from level-0, and at worst | |||
| all the level-1 files (10MB). I.e., we will read 14MB and write 14MB. | |||
| Other than the special level-0 compactions, we will pick one 2MB file from level | |||
| L. In the worst case, this will overlap ~ 12 files from level L+1 (10 because | |||
| level-(L+1) is ten times the size of level-L, and another two at the boundaries | |||
| since the file ranges at level-L will usually not be aligned with the file | |||
| ranges at level-L+1). The compaction will therefore read 26MB and write 26MB. | |||
| Assuming a disk IO rate of 100MB/s (ballpark range for modern drives), the worst | |||
| compaction cost will be approximately 0.5 second. | |||
| If we throttle the background writing to something small, say 10% of the full | |||
| 100MB/s speed, a compaction may take up to 5 seconds. If the user is writing at | |||
| 10MB/s, we might build up lots of level-0 files (~50 to hold the 5*10MB). This | |||
| may significantly increase the cost of reads due to the overhead of merging more | |||
| files together on every read. | |||
| Solution 1: To reduce this problem, we might want to increase the log switching | |||
| threshold when the number of level-0 files is large. Though the downside is that | |||
| the larger this threshold, the more memory we will need to hold the | |||
| corresponding memtable. | |||
| Solution 2: We might want to decrease write rate artificially when the number of | |||
| level-0 files goes up. | |||
| Solution 3: We work on reducing the cost of very wide merges. Perhaps most of | |||
| the level-0 files will have their blocks sitting uncompressed in the cache and | |||
| we will only need to worry about the O(N) complexity in the merging iterator. | |||
| ### Number of files | |||
| Instead of always making 2MB files, we could make larger files for larger levels | |||
| to reduce the total file count, though at the expense of more bursty | |||
| compactions. Alternatively, we could shard the set of files into multiple | |||
| directories. | |||
| An experiment on an ext3 filesystem on Feb 04, 2011 shows the following timings | |||
| to do 100K file opens in directories with varying number of files: | |||
| | Files in directory | Microseconds to open a file | | |||
| |-------------------:|----------------------------:| | |||
| | 1000 | 9 | | |||
| | 10000 | 10 | | |||
| | 100000 | 16 | | |||
| So maybe even the sharding is not necessary on modern filesystems? | |||
| ## Recovery | |||
| * Read CURRENT to find name of the latest committed MANIFEST | |||
| * Read the named MANIFEST file | |||
| * Clean up stale files | |||
| * We could open all sstables here, but it is probably better to be lazy... | |||
| * Convert log chunk to a new level-0 sstable | |||
| * Start directing new writes to a new log file with recovered sequence# | |||
| ## Garbage collection of files | |||
| `DeleteObsoleteFiles()` is called at the end of every compaction and at the end | |||
| of recovery. It finds the names of all files in the database. It deletes all log | |||
| files that are not the current log file. It deletes all table files that are not | |||
| referenced from some level and are not the output of an active compaction. | |||
| @ -1,549 +0,0 @@ | |||
| <!DOCTYPE html> | |||
| <html> | |||
| <head> | |||
| <link rel="stylesheet" type="text/css" href="doc.css" /> | |||
| <title>Leveldb</title> | |||
| </head> | |||
| <body> | |||
| <h1>Leveldb</h1> | |||
| <address>Jeff Dean, Sanjay Ghemawat</address> | |||
| <p> | |||
| The <code>leveldb</code> library provides a persistent key value store. Keys and | |||
| values are arbitrary byte arrays. The keys are ordered within the key | |||
| value store according to a user-specified comparator function. | |||
| <p> | |||
| <h1>Opening A Database</h1> | |||
| <p> | |||
| A <code>leveldb</code> database has a name which corresponds to a file system | |||
| directory. All of the contents of database are stored in this | |||
| directory. The following example shows how to open a database, | |||
| creating it if necessary: | |||
| <p> | |||
| <pre> | |||
| #include <assert> | |||
| #include "leveldb/db.h" | |||
| leveldb::DB* db; | |||
| leveldb::Options options; | |||
| options.create_if_missing = true; | |||
| leveldb::Status status = leveldb::DB::Open(options, "/tmp/testdb", &db); | |||
| assert(status.ok()); | |||
| ... | |||
| </pre> | |||
| If you want to raise an error if the database already exists, add | |||
| the following line before the <code>leveldb::DB::Open</code> call: | |||
| <pre> | |||
| options.error_if_exists = true; | |||
| </pre> | |||
| <h1>Status</h1> | |||
| <p> | |||
| You may have noticed the <code>leveldb::Status</code> type above. Values of this | |||
| type are returned by most functions in <code>leveldb</code> that may encounter an | |||
| error. You can check if such a result is ok, and also print an | |||
| associated error message: | |||
| <p> | |||
| <pre> | |||
| leveldb::Status s = ...; | |||
| if (!s.ok()) cerr << s.ToString() << endl; | |||
| </pre> | |||
| <h1>Closing A Database</h1> | |||
| <p> | |||
| When you are done with a database, just delete the database object. | |||
| Example: | |||
| <p> | |||
| <pre> | |||
| ... open the db as described above ... | |||
| ... do something with db ... | |||
| delete db; | |||
| </pre> | |||
| <h1>Reads And Writes</h1> | |||
| <p> | |||
| The database provides <code>Put</code>, <code>Delete</code>, and <code>Get</code> methods to | |||
| modify/query the database. For example, the following code | |||
| moves the value stored under key1 to key2. | |||
| <pre> | |||
| std::string value; | |||
| leveldb::Status s = db->Get(leveldb::ReadOptions(), key1, &value); | |||
| if (s.ok()) s = db->Put(leveldb::WriteOptions(), key2, value); | |||
| if (s.ok()) s = db->Delete(leveldb::WriteOptions(), key1); | |||
| </pre> | |||
| <h1>Atomic Updates</h1> | |||
| <p> | |||
| Note that if the process dies after the Put of key2 but before the | |||
| delete of key1, the same value may be left stored under multiple keys. | |||
| Such problems can be avoided by using the <code>WriteBatch</code> class to | |||
| atomically apply a set of updates: | |||
| <p> | |||
| <pre> | |||
| #include "leveldb/write_batch.h" | |||
| ... | |||
| std::string value; | |||
| leveldb::Status s = db->Get(leveldb::ReadOptions(), key1, &value); | |||
| if (s.ok()) { | |||
| leveldb::WriteBatch batch; | |||
| batch.Delete(key1); | |||
| batch.Put(key2, value); | |||
| s = db->Write(leveldb::WriteOptions(), &batch); | |||
| } | |||
| </pre> | |||
| The <code>WriteBatch</code> holds a sequence of edits to be made to the database, | |||
| and these edits within the batch are applied in order. Note that we | |||
| called <code>Delete</code> before <code>Put</code> so that if <code>key1</code> is identical to <code>key2</code>, | |||
| we do not end up erroneously dropping the value entirely. | |||
| <p> | |||
| Apart from its atomicity benefits, <code>WriteBatch</code> may also be used to | |||
| speed up bulk updates by placing lots of individual mutations into the | |||
| same batch. | |||
| <h1>Synchronous Writes</h1> | |||
| By default, each write to <code>leveldb</code> is asynchronous: it | |||
| returns after pushing the write from the process into the operating | |||
| system. The transfer from operating system memory to the underlying | |||
| persistent storage happens asynchronously. The <code>sync</code> flag | |||
| can be turned on for a particular write to make the write operation | |||
| not return until the data being written has been pushed all the way to | |||
| persistent storage. (On Posix systems, this is implemented by calling | |||
| either <code>fsync(...)</code> or <code>fdatasync(...)</code> or | |||
| <code>msync(..., MS_SYNC)</code> before the write operation returns.) | |||
| <pre> | |||
| leveldb::WriteOptions write_options; | |||
| write_options.sync = true; | |||
| db->Put(write_options, ...); | |||
| </pre> | |||
| Asynchronous writes are often more than a thousand times as fast as | |||
| synchronous writes. The downside of asynchronous writes is that a | |||
| crash of the machine may cause the last few updates to be lost. Note | |||
| that a crash of just the writing process (i.e., not a reboot) will not | |||
| cause any loss since even when <code>sync</code> is false, an update | |||
| is pushed from the process memory into the operating system before it | |||
| is considered done. | |||
| <p> | |||
| Asynchronous writes can often be used safely. For example, when | |||
| loading a large amount of data into the database you can handle lost | |||
| updates by restarting the bulk load after a crash. A hybrid scheme is | |||
| also possible where every Nth write is synchronous, and in the event | |||
| of a crash, the bulk load is restarted just after the last synchronous | |||
| write finished by the previous run. (The synchronous write can update | |||
| a marker that describes where to restart on a crash.) | |||
| <p> | |||
| <code>WriteBatch</code> provides an alternative to asynchronous writes. | |||
| Multiple updates may be placed in the same <code>WriteBatch</code> and | |||
| applied together using a synchronous write (i.e., | |||
| <code>write_options.sync</code> is set to true). The extra cost of | |||
| the synchronous write will be amortized across all of the writes in | |||
| the batch. | |||
| <p> | |||
| <h1>Concurrency</h1> | |||
| <p> | |||
| A database may only be opened by one process at a time. | |||
| The <code>leveldb</code> implementation acquires a lock from the | |||
| operating system to prevent misuse. Within a single process, the | |||
| same <code>leveldb::DB</code> object may be safely shared by multiple | |||
| concurrent threads. I.e., different threads may write into or fetch | |||
| iterators or call <code>Get</code> on the same database without any | |||
| external synchronization (the leveldb implementation will | |||
| automatically do the required synchronization). However other objects | |||
| (like Iterator and WriteBatch) may require external synchronization. | |||
| If two threads share such an object, they must protect access to it | |||
| using their own locking protocol. More details are available in | |||
| the public header files. | |||
| <p> | |||
| <h1>Iteration</h1> | |||
| <p> | |||
| The following example demonstrates how to print all key,value pairs | |||
| in a database. | |||
| <p> | |||
| <pre> | |||
| leveldb::Iterator* it = db->NewIterator(leveldb::ReadOptions()); | |||
| for (it->SeekToFirst(); it->Valid(); it->Next()) { | |||
| cout << it->key().ToString() << ": " << it->value().ToString() << endl; | |||
| } | |||
| assert(it->status().ok()); // Check for any errors found during the scan | |||
| delete it; | |||
| </pre> | |||
| The following variation shows how to process just the keys in the | |||
| range <code>[start,limit)</code>: | |||
| <p> | |||
| <pre> | |||
| for (it->Seek(start); | |||
| it->Valid() && it->key().ToString() < limit; | |||
| it->Next()) { | |||
| ... | |||
| } | |||
| </pre> | |||
| You can also process entries in reverse order. (Caveat: reverse | |||
| iteration may be somewhat slower than forward iteration.) | |||
| <p> | |||
| <pre> | |||
| for (it->SeekToLast(); it->Valid(); it->Prev()) { | |||
| ... | |||
| } | |||
| </pre> | |||
| <h1>Snapshots</h1> | |||
| <p> | |||
| Snapshots provide consistent read-only views over the entire state of | |||
| the key-value store. <code>ReadOptions::snapshot</code> may be non-NULL to indicate | |||
| that a read should operate on a particular version of the DB state. | |||
| If <code>ReadOptions::snapshot</code> is NULL, the read will operate on an | |||
| implicit snapshot of the current state. | |||
| <p> | |||
| Snapshots are created by the DB::GetSnapshot() method: | |||
| <p> | |||
| <pre> | |||
| leveldb::ReadOptions options; | |||
| options.snapshot = db->GetSnapshot(); | |||
| ... apply some updates to db ... | |||
| leveldb::Iterator* iter = db->NewIterator(options); | |||
| ... read using iter to view the state when the snapshot was created ... | |||
| delete iter; | |||
| db->ReleaseSnapshot(options.snapshot); | |||
| </pre> | |||
| Note that when a snapshot is no longer needed, it should be released | |||
| using the DB::ReleaseSnapshot interface. This allows the | |||
| implementation to get rid of state that was being maintained just to | |||
| support reading as of that snapshot. | |||
| <h1>Slice</h1> | |||
| <p> | |||
| The return value of the <code>it->key()</code> and <code>it->value()</code> calls above | |||
| are instances of the <code>leveldb::Slice</code> type. <code>Slice</code> is a simple | |||
| structure that contains a length and a pointer to an external byte | |||
| array. Returning a <code>Slice</code> is a cheaper alternative to returning a | |||
| <code>std::string</code> since we do not need to copy potentially large keys and | |||
| values. In addition, <code>leveldb</code> methods do not return null-terminated | |||
| C-style strings since <code>leveldb</code> keys and values are allowed to | |||
| contain '\0' bytes. | |||
| <p> | |||
| C++ strings and null-terminated C-style strings can be easily converted | |||
| to a Slice: | |||
| <p> | |||
| <pre> | |||
| leveldb::Slice s1 = "hello"; | |||
| std::string str("world"); | |||
| leveldb::Slice s2 = str; | |||
| </pre> | |||
| A Slice can be easily converted back to a C++ string: | |||
| <pre> | |||
| std::string str = s1.ToString(); | |||
| assert(str == std::string("hello")); | |||
| </pre> | |||
| Be careful when using Slices since it is up to the caller to ensure that | |||
| the external byte array into which the Slice points remains live while | |||
| the Slice is in use. For example, the following is buggy: | |||
| <p> | |||
| <pre> | |||
| leveldb::Slice slice; | |||
| if (...) { | |||
| std::string str = ...; | |||
| slice = str; | |||
| } | |||
| Use(slice); | |||
| </pre> | |||
| When the <code>if</code> statement goes out of scope, <code>str</code> will be destroyed and the | |||
| backing storage for <code>slice</code> will disappear. | |||
| <p> | |||
| <h1>Comparators</h1> | |||
| <p> | |||
| The preceding examples used the default ordering function for key, | |||
| which orders bytes lexicographically. You can however supply a custom | |||
| comparator when opening a database. For example, suppose each | |||
| database key consists of two numbers and we should sort by the first | |||
| number, breaking ties by the second number. First, define a proper | |||
| subclass of <code>leveldb::Comparator</code> that expresses these rules: | |||
| <p> | |||
| <pre> | |||
| class TwoPartComparator : public leveldb::Comparator { | |||
| public: | |||
| // Three-way comparison function: | |||
| // if a < b: negative result | |||
| // if a > b: positive result | |||
| // else: zero result | |||
| int Compare(const leveldb::Slice& a, const leveldb::Slice& b) const { | |||
| int a1, a2, b1, b2; | |||
| ParseKey(a, &a1, &a2); | |||
| ParseKey(b, &b1, &b2); | |||
| if (a1 < b1) return -1; | |||
| if (a1 > b1) return +1; | |||
| if (a2 < b2) return -1; | |||
| if (a2 > b2) return +1; | |||
| return 0; | |||
| } | |||
| // Ignore the following methods for now: | |||
| const char* Name() const { return "TwoPartComparator"; } | |||
| void FindShortestSeparator(std::string*, const leveldb::Slice&) const { } | |||
| void FindShortSuccessor(std::string*) const { } | |||
| }; | |||
| </pre> | |||
| Now create a database using this custom comparator: | |||
| <p> | |||
| <pre> | |||
| TwoPartComparator cmp; | |||
| leveldb::DB* db; | |||
| leveldb::Options options; | |||
| options.create_if_missing = true; | |||
| options.comparator = &cmp; | |||
| leveldb::Status status = leveldb::DB::Open(options, "/tmp/testdb", &db); | |||
| ... | |||
| </pre> | |||
| <h2>Backwards compatibility</h2> | |||
| <p> | |||
| The result of the comparator's <code>Name</code> method is attached to the | |||
| database when it is created, and is checked on every subsequent | |||
| database open. If the name changes, the <code>leveldb::DB::Open</code> call will | |||
| fail. Therefore, change the name if and only if the new key format | |||
| and comparison function are incompatible with existing databases, and | |||
| it is ok to discard the contents of all existing databases. | |||
| <p> | |||
| You can however still gradually evolve your key format over time with | |||
| a little bit of pre-planning. For example, you could store a version | |||
| number at the end of each key (one byte should suffice for most uses). | |||
| When you wish to switch to a new key format (e.g., adding an optional | |||
| third part to the keys processed by <code>TwoPartComparator</code>), | |||
| (a) keep the same comparator name (b) increment the version number | |||
| for new keys (c) change the comparator function so it uses the | |||
| version numbers found in the keys to decide how to interpret them. | |||
| <p> | |||
| <h1>Performance</h1> | |||
| <p> | |||
| Performance can be tuned by changing the default values of the | |||
| types defined in <code>include/leveldb/options.h</code>. | |||
| <p> | |||
| <h2>Block size</h2> | |||
| <p> | |||
| <code>leveldb</code> groups adjacent keys together into the same block and such a | |||
| block is the unit of transfer to and from persistent storage. The | |||
| default block size is approximately 4096 uncompressed bytes. | |||
| Applications that mostly do bulk scans over the contents of the | |||
| database may wish to increase this size. Applications that do a lot | |||
| of point reads of small values may wish to switch to a smaller block | |||
| size if performance measurements indicate an improvement. There isn't | |||
| much benefit in using blocks smaller than one kilobyte, or larger than | |||
| a few megabytes. Also note that compression will be more effective | |||
| with larger block sizes. | |||
| <p> | |||
| <h2>Compression</h2> | |||
| <p> | |||
| Each block is individually compressed before being written to | |||
| persistent storage. Compression is on by default since the default | |||
| compression method is very fast, and is automatically disabled for | |||
| uncompressible data. In rare cases, applications may want to disable | |||
| compression entirely, but should only do so if benchmarks show a | |||
| performance improvement: | |||
| <p> | |||
| <pre> | |||
| leveldb::Options options; | |||
| options.compression = leveldb::kNoCompression; | |||
| ... leveldb::DB::Open(options, name, ...) .... | |||
| </pre> | |||
| <h2>Cache</h2> | |||
| <p> | |||
| The contents of the database are stored in a set of files in the | |||
| filesystem and each file stores a sequence of compressed blocks. If | |||
| <code>options.cache</code> is non-NULL, it is used to cache frequently used | |||
| uncompressed block contents. | |||
| <p> | |||
| <pre> | |||
| #include "leveldb/cache.h" | |||
| leveldb::Options options; | |||
| options.cache = leveldb::NewLRUCache(100 * 1048576); // 100MB cache | |||
| leveldb::DB* db; | |||
| leveldb::DB::Open(options, name, &db); | |||
| ... use the db ... | |||
| delete db | |||
| delete options.cache; | |||
| </pre> | |||
| Note that the cache holds uncompressed data, and therefore it should | |||
| be sized according to application level data sizes, without any | |||
| reduction from compression. (Caching of compressed blocks is left to | |||
| the operating system buffer cache, or any custom <code>Env</code> | |||
| implementation provided by the client.) | |||
| <p> | |||
| When performing a bulk read, the application may wish to disable | |||
| caching so that the data processed by the bulk read does not end up | |||
| displacing most of the cached contents. A per-iterator option can be | |||
| used to achieve this: | |||
| <p> | |||
| <pre> | |||
| leveldb::ReadOptions options; | |||
| options.fill_cache = false; | |||
| leveldb::Iterator* it = db->NewIterator(options); | |||
| for (it->SeekToFirst(); it->Valid(); it->Next()) { | |||
| ... | |||
| } | |||
| </pre> | |||
| <h2>Key Layout</h2> | |||
| <p> | |||
| Note that the unit of disk transfer and caching is a block. Adjacent | |||
| keys (according to the database sort order) will usually be placed in | |||
| the same block. Therefore the application can improve its performance | |||
| by placing keys that are accessed together near each other and placing | |||
| infrequently used keys in a separate region of the key space. | |||
| <p> | |||
| For example, suppose we are implementing a simple file system on top | |||
| of <code>leveldb</code>. The types of entries we might wish to store are: | |||
| <p> | |||
| <pre> | |||
| filename -> permission-bits, length, list of file_block_ids | |||
| file_block_id -> data | |||
| </pre> | |||
| We might want to prefix <code>filename</code> keys with one letter (say '/') and the | |||
| <code>file_block_id</code> keys with a different letter (say '0') so that scans | |||
| over just the metadata do not force us to fetch and cache bulky file | |||
| contents. | |||
| <p> | |||
| <h2>Filters</h2> | |||
| <p> | |||
| Because of the way <code>leveldb</code> data is organized on disk, | |||
| a single <code>Get()</code> call may involve multiple reads from disk. | |||
| The optional <code>FilterPolicy</code> mechanism can be used to reduce | |||
| the number of disk reads substantially. | |||
| <pre> | |||
| leveldb::Options options; | |||
| options.filter_policy = NewBloomFilterPolicy(10); | |||
| leveldb::DB* db; | |||
| leveldb::DB::Open(options, "/tmp/testdb", &db); | |||
| ... use the database ... | |||
| delete db; | |||
| delete options.filter_policy; | |||
| </pre> | |||
| The preceding code associates a | |||
| <a href="http://en.wikipedia.org/wiki/Bloom_filter">Bloom filter</a> | |||
| based filtering policy with the database. Bloom filter based | |||
| filtering relies on keeping some number of bits of data in memory per | |||
| key (in this case 10 bits per key since that is the argument we passed | |||
| to NewBloomFilterPolicy). This filter will reduce the number of unnecessary | |||
| disk reads needed for <code>Get()</code> calls by a factor of | |||
| approximately a 100. Increasing the bits per key will lead to a | |||
| larger reduction at the cost of more memory usage. We recommend that | |||
| applications whose working set does not fit in memory and that do a | |||
| lot of random reads set a filter policy. | |||
| <p> | |||
| If you are using a custom comparator, you should ensure that the filter | |||
| policy you are using is compatible with your comparator. For example, | |||
| consider a comparator that ignores trailing spaces when comparing keys. | |||
| <code>NewBloomFilterPolicy</code> must not be used with such a comparator. | |||
| Instead, the application should provide a custom filter policy that | |||
| also ignores trailing spaces. For example: | |||
| <pre> | |||
| class CustomFilterPolicy : public leveldb::FilterPolicy { | |||
| private: | |||
| FilterPolicy* builtin_policy_; | |||
| public: | |||
| CustomFilterPolicy() : builtin_policy_(NewBloomFilterPolicy(10)) { } | |||
| ~CustomFilterPolicy() { delete builtin_policy_; } | |||
| const char* Name() const { return "IgnoreTrailingSpacesFilter"; } | |||
| void CreateFilter(const Slice* keys, int n, std::string* dst) const { | |||
| // Use builtin bloom filter code after removing trailing spaces | |||
| std::vector<Slice> trimmed(n); | |||
| for (int i = 0; i < n; i++) { | |||
| trimmed[i] = RemoveTrailingSpaces(keys[i]); | |||
| } | |||
| return builtin_policy_->CreateFilter(&trimmed[i], n, dst); | |||
| } | |||
| bool KeyMayMatch(const Slice& key, const Slice& filter) const { | |||
| // Use builtin bloom filter code after removing trailing spaces | |||
| return builtin_policy_->KeyMayMatch(RemoveTrailingSpaces(key), filter); | |||
| } | |||
| }; | |||
| </pre> | |||
| <p> | |||
| Advanced applications may provide a filter policy that does not use | |||
| a bloom filter but uses some other mechanism for summarizing a set | |||
| of keys. See <code>leveldb/filter_policy.h</code> for detail. | |||
| <p> | |||
| <h1>Checksums</h1> | |||
| <p> | |||
| <code>leveldb</code> associates checksums with all data it stores in the file system. | |||
| There are two separate controls provided over how aggressively these | |||
| checksums are verified: | |||
| <p> | |||
| <ul> | |||
| <li> <code>ReadOptions::verify_checksums</code> may be set to true to force | |||
| checksum verification of all data that is read from the file system on | |||
| behalf of a particular read. By default, no such verification is | |||
| done. | |||
| <p> | |||
| <li> <code>Options::paranoid_checks</code> may be set to true before opening a | |||
| database to make the database implementation raise an error as soon as | |||
| it detects an internal corruption. Depending on which portion of the | |||
| database has been corrupted, the error may be raised when the database | |||
| is opened, or later by another database operation. By default, | |||
| paranoid checking is off so that the database can be used even if | |||
| parts of its persistent storage have been corrupted. | |||
| <p> | |||
| If a database is corrupted (perhaps it cannot be opened when | |||
| paranoid checking is turned on), the <code>leveldb::RepairDB</code> function | |||
| may be used to recover as much of the data as possible | |||
| <p> | |||
| </ul> | |||
| <h1>Approximate Sizes</h1> | |||
| <p> | |||
| The <code>GetApproximateSizes</code> method can used to get the approximate | |||
| number of bytes of file system space used by one or more key ranges. | |||
| <p> | |||
| <pre> | |||
| leveldb::Range ranges[2]; | |||
| ranges[0] = leveldb::Range("a", "c"); | |||
| ranges[1] = leveldb::Range("x", "z"); | |||
| uint64_t sizes[2]; | |||
| leveldb::Status s = db->GetApproximateSizes(ranges, 2, sizes); | |||
| </pre> | |||
| The preceding call will set <code>sizes[0]</code> to the approximate number of | |||
| bytes of file system space used by the key range <code>[a..c)</code> and | |||
| <code>sizes[1]</code> to the approximate number of bytes used by the key range | |||
| <code>[x..z)</code>. | |||
| <p> | |||
| <h1>Environment</h1> | |||
| <p> | |||
| All file operations (and other operating system calls) issued by the | |||
| <code>leveldb</code> implementation are routed through a <code>leveldb::Env</code> object. | |||
| Sophisticated clients may wish to provide their own <code>Env</code> | |||
| implementation to get better control. For example, an application may | |||
| introduce artificial delays in the file IO paths to limit the impact | |||
| of <code>leveldb</code> on other activities in the system. | |||
| <p> | |||
| <pre> | |||
| class SlowEnv : public leveldb::Env { | |||
| .. implementation of the Env interface ... | |||
| }; | |||
| SlowEnv env; | |||
| leveldb::Options options; | |||
| options.env = &env; | |||
| Status s = leveldb::DB::Open(options, ...); | |||
| </pre> | |||
| <h1>Porting</h1> | |||
| <p> | |||
| <code>leveldb</code> may be ported to a new platform by providing platform | |||
| specific implementations of the types/methods/functions exported by | |||
| <code>leveldb/port/port.h</code>. See <code>leveldb/port/port_example.h</code> for more | |||
| details. | |||
| <p> | |||
| In addition, the new platform may need a new default <code>leveldb::Env</code> | |||
| implementation. See <code>leveldb/util/env_posix.h</code> for an example. | |||
| <h1>Other Information</h1> | |||
| <p> | |||
| Details about the <code>leveldb</code> implementation may be found in | |||
| the following documents: | |||
| <ul> | |||
| <li> <a href="impl.html">Implementation notes</a> | |||
| <li> <a href="table_format.txt">Format of an immutable Table file</a> | |||
| <li> <a href="log_format.txt">Format of a log file</a> | |||
| </ul> | |||
| </body> | |||
| </html> | |||
| @ -0,0 +1,523 @@ | |||
| leveldb | |||
| ======= | |||
| _Jeff Dean, Sanjay Ghemawat_ | |||
| The leveldb library provides a persistent key value store. Keys and values are | |||
| arbitrary byte arrays. The keys are ordered within the key value store | |||
| according to a user-specified comparator function. | |||
| ## Opening A Database | |||
| A leveldb database has a name which corresponds to a file system directory. All | |||
| of the contents of database are stored in this directory. The following example | |||
| shows how to open a database, creating it if necessary: | |||
| ```c++ | |||
| #include <cassert> | |||
| #include "leveldb/db.h" | |||
| leveldb::DB* db; | |||
| leveldb::Options options; | |||
| options.create_if_missing = true; | |||
| leveldb::Status status = leveldb::DB::Open(options, "/tmp/testdb", &db); | |||
| assert(status.ok()); | |||
| ... | |||
| ``` | |||
| If you want to raise an error if the database already exists, add the following | |||
| line before the `leveldb::DB::Open` call: | |||
| ```c++ | |||
| options.error_if_exists = true; | |||
| ``` | |||
| ## Status | |||
| You may have noticed the `leveldb::Status` type above. Values of this type are | |||
| returned by most functions in leveldb that may encounter an error. You can check | |||
| if such a result is ok, and also print an associated error message: | |||
| ```c++ | |||
| leveldb::Status s = ...; | |||
| if (!s.ok()) cerr << s.ToString() << endl; | |||
| ``` | |||
| ## Closing A Database | |||
| When you are done with a database, just delete the database object. Example: | |||
| ```c++ | |||
| ... open the db as described above ... | |||
| ... do something with db ... | |||
| delete db; | |||
| ``` | |||
| ## Reads And Writes | |||
| The database provides Put, Delete, and Get methods to modify/query the database. | |||
| For example, the following code moves the value stored under key1 to key2. | |||
| ```c++ | |||
| std::string value; | |||
| leveldb::Status s = db->Get(leveldb::ReadOptions(), key1, &value); | |||
| if (s.ok()) s = db->Put(leveldb::WriteOptions(), key2, value); | |||
| if (s.ok()) s = db->Delete(leveldb::WriteOptions(), key1); | |||
| ``` | |||
| ## Atomic Updates | |||
| Note that if the process dies after the Put of key2 but before the delete of | |||
| key1, the same value may be left stored under multiple keys. Such problems can | |||
| be avoided by using the `WriteBatch` class to atomically apply a set of updates: | |||
| ```c++ | |||
| #include "leveldb/write_batch.h" | |||
| ... | |||
| std::string value; | |||
| leveldb::Status s = db->Get(leveldb::ReadOptions(), key1, &value); | |||
| if (s.ok()) { | |||
| leveldb::WriteBatch batch; | |||
| batch.Delete(key1); | |||
| batch.Put(key2, value); | |||
| s = db->Write(leveldb::WriteOptions(), &batch); | |||
| } | |||
| ``` | |||
| The `WriteBatch` holds a sequence of edits to be made to the database, and these | |||
| edits within the batch are applied in order. Note that we called Delete before | |||
| Put so that if key1 is identical to key2, we do not end up erroneously dropping | |||
| the value entirely. | |||
| Apart from its atomicity benefits, `WriteBatch` may also be used to speed up | |||
| bulk updates by placing lots of individual mutations into the same batch. | |||
| ## Synchronous Writes | |||
| By default, each write to leveldb is asynchronous: it returns after pushing the | |||
| write from the process into the operating system. The transfer from operating | |||
| system memory to the underlying persistent storage happens asynchronously. The | |||
| sync flag can be turned on for a particular write to make the write operation | |||
| not return until the data being written has been pushed all the way to | |||
| persistent storage. (On Posix systems, this is implemented by calling either | |||
| `fsync(...)` or `fdatasync(...)` or `msync(..., MS_SYNC)` before the write | |||
| operation returns.) | |||
| ```c++ | |||
| leveldb::WriteOptions write_options; | |||
| write_options.sync = true; | |||
| db->Put(write_options, ...); | |||
| ``` | |||
| Asynchronous writes are often more than a thousand times as fast as synchronous | |||
| writes. The downside of asynchronous writes is that a crash of the machine may | |||
| cause the last few updates to be lost. Note that a crash of just the writing | |||
| process (i.e., not a reboot) will not cause any loss since even when sync is | |||
| false, an update is pushed from the process memory into the operating system | |||
| before it is considered done. | |||
| Asynchronous writes can often be used safely. For example, when loading a large | |||
| amount of data into the database you can handle lost updates by restarting the | |||
| bulk load after a crash. A hybrid scheme is also possible where every Nth write | |||
| is synchronous, and in the event of a crash, the bulk load is restarted just | |||
| after the last synchronous write finished by the previous run. (The synchronous | |||
| write can update a marker that describes where to restart on a crash.) | |||
| `WriteBatch` provides an alternative to asynchronous writes. Multiple updates | |||
| may be placed in the same WriteBatch and applied together using a synchronous | |||
| write (i.e., `write_options.sync` is set to true). The extra cost of the | |||
| synchronous write will be amortized across all of the writes in the batch. | |||
| ## Concurrency | |||
| A database may only be opened by one process at a time. The leveldb | |||
| implementation acquires a lock from the operating system to prevent misuse. | |||
| Within a single process, the same `leveldb::DB` object may be safely shared by | |||
| multiple concurrent threads. I.e., different threads may write into or fetch | |||
| iterators or call Get on the same database without any external synchronization | |||
| (the leveldb implementation will automatically do the required synchronization). | |||
| However other objects (like Iterator and `WriteBatch`) may require external | |||
| synchronization. If two threads share such an object, they must protect access | |||
| to it using their own locking protocol. More details are available in the public | |||
| header files. | |||
| ## Iteration | |||
| The following example demonstrates how to print all key,value pairs in a | |||
| database. | |||
| ```c++ | |||
| leveldb::Iterator* it = db->NewIterator(leveldb::ReadOptions()); | |||
| for (it->SeekToFirst(); it->Valid(); it->Next()) { | |||
| cout << it->key().ToString() << ": " << it->value().ToString() << endl; | |||
| } | |||
| assert(it->status().ok()); // Check for any errors found during the scan | |||
| delete it; | |||
| ``` | |||
| The following variation shows how to process just the keys in the range | |||
| [start,limit): | |||
| ```c++ | |||
| for (it->Seek(start); | |||
| it->Valid() && it->key().ToString() < limit; | |||
| it->Next()) { | |||
| ... | |||
| } | |||
| ``` | |||
| You can also process entries in reverse order. (Caveat: reverse iteration may be | |||
| somewhat slower than forward iteration.) | |||
| ```c++ | |||
| for (it->SeekToLast(); it->Valid(); it->Prev()) { | |||
| ... | |||
| } | |||
| ``` | |||
| ## Snapshots | |||
| Snapshots provide consistent read-only views over the entire state of the | |||
| key-value store. `ReadOptions::snapshot` may be non-NULL to indicate that a | |||
| read should operate on a particular version of the DB state. If | |||
| `ReadOptions::snapshot` is NULL, the read will operate on an implicit snapshot | |||
| of the current state. | |||
| Snapshots are created by the `DB::GetSnapshot()` method: | |||
| ```c++ | |||
| leveldb::ReadOptions options; | |||
| options.snapshot = db->GetSnapshot(); | |||
| ... apply some updates to db ... | |||
| leveldb::Iterator* iter = db->NewIterator(options); | |||
| ... read using iter to view the state when the snapshot was created ... | |||
| delete iter; | |||
| db->ReleaseSnapshot(options.snapshot); | |||
| ``` | |||
| Note that when a snapshot is no longer needed, it should be released using the | |||
| `DB::ReleaseSnapshot` interface. This allows the implementation to get rid of | |||
| state that was being maintained just to support reading as of that snapshot. | |||
| ## Slice | |||
| The return value of the `it->key()` and `it->value()` calls above are instances | |||
| of the `leveldb::Slice` type. Slice is a simple structure that contains a length | |||
| and a pointer to an external byte array. Returning a Slice is a cheaper | |||
| alternative to returning a `std::string` since we do not need to copy | |||
| potentially large keys and values. In addition, leveldb methods do not return | |||
| null-terminated C-style strings since leveldb keys and values are allowed to | |||
| contain `'\0'` bytes. | |||
| C++ strings and null-terminated C-style strings can be easily converted to a | |||
| Slice: | |||
| ```c++ | |||
| leveldb::Slice s1 = "hello"; | |||
| std::string str("world"); | |||
| leveldb::Slice s2 = str; | |||
| ``` | |||
| A Slice can be easily converted back to a C++ string: | |||
| ```c++ | |||
| std::string str = s1.ToString(); | |||
| assert(str == std::string("hello")); | |||
| ``` | |||
| Be careful when using Slices since it is up to the caller to ensure that the | |||
| external byte array into which the Slice points remains live while the Slice is | |||
| in use. For example, the following is buggy: | |||
| ```c++ | |||
| leveldb::Slice slice; | |||
| if (...) { | |||
| std::string str = ...; | |||
| slice = str; | |||
| } | |||
| Use(slice); | |||
| ``` | |||
| When the if statement goes out of scope, str will be destroyed and the backing | |||
| storage for slice will disappear. | |||
| ## Comparators | |||
| The preceding examples used the default ordering function for key, which orders | |||
| bytes lexicographically. You can however supply a custom comparator when opening | |||
| a database. For example, suppose each database key consists of two numbers and | |||
| we should sort by the first number, breaking ties by the second number. First, | |||
| define a proper subclass of `leveldb::Comparator` that expresses these rules: | |||
| ```c++ | |||
| class TwoPartComparator : public leveldb::Comparator { | |||
| public: | |||
| // Three-way comparison function: | |||
| // if a < b: negative result | |||
| // if a > b: positive result | |||
| // else: zero result | |||
| int Compare(const leveldb::Slice& a, const leveldb::Slice& b) const { | |||
| int a1, a2, b1, b2; | |||
| ParseKey(a, &a1, &a2); | |||
| ParseKey(b, &b1, &b2); | |||
| if (a1 < b1) return -1; | |||
| if (a1 > b1) return +1; | |||
| if (a2 < b2) return -1; | |||
| if (a2 > b2) return +1; | |||
| return 0; | |||
| } | |||
| // Ignore the following methods for now: | |||
| const char* Name() const { return "TwoPartComparator"; } | |||
| void FindShortestSeparator(std::string*, const leveldb::Slice&) const {} | |||
| void FindShortSuccessor(std::string*) const {} | |||
| }; | |||
| ``` | |||
| Now create a database using this custom comparator: | |||
| ```c++ | |||
| TwoPartComparator cmp; | |||
| leveldb::DB* db; | |||
| leveldb::Options options; | |||
| options.create_if_missing = true; | |||
| options.comparator = &cmp; | |||
| leveldb::Status status = leveldb::DB::Open(options, "/tmp/testdb", &db); | |||
| ... | |||
| ``` | |||
| ### Backwards compatibility | |||
| The result of the comparator's Name method is attached to the database when it | |||
| is created, and is checked on every subsequent database open. If the name | |||
| changes, the `leveldb::DB::Open` call will fail. Therefore, change the name if | |||
| and only if the new key format and comparison function are incompatible with | |||
| existing databases, and it is ok to discard the contents of all existing | |||
| databases. | |||
| You can however still gradually evolve your key format over time with a little | |||
| bit of pre-planning. For example, you could store a version number at the end of | |||
| each key (one byte should suffice for most uses). When you wish to switch to a | |||
| new key format (e.g., adding an optional third part to the keys processed by | |||
| `TwoPartComparator`), (a) keep the same comparator name (b) increment the | |||
| version number for new keys (c) change the comparator function so it uses the | |||
| version numbers found in the keys to decide how to interpret them. | |||
| ## Performance | |||
| Performance can be tuned by changing the default values of the types defined in | |||
| `include/leveldb/options.h`. | |||
| ### Block size | |||
| leveldb groups adjacent keys together into the same block and such a block is | |||
| the unit of transfer to and from persistent storage. The default block size is | |||
| approximately 4096 uncompressed bytes. Applications that mostly do bulk scans | |||
| over the contents of the database may wish to increase this size. Applications | |||
| that do a lot of point reads of small values may wish to switch to a smaller | |||
| block size if performance measurements indicate an improvement. There isn't much | |||
| benefit in using blocks smaller than one kilobyte, or larger than a few | |||
| megabytes. Also note that compression will be more effective with larger block | |||
| sizes. | |||
| ### Compression | |||
| Each block is individually compressed before being written to persistent | |||
| storage. Compression is on by default since the default compression method is | |||
| very fast, and is automatically disabled for uncompressible data. In rare cases, | |||
| applications may want to disable compression entirely, but should only do so if | |||
| benchmarks show a performance improvement: | |||
| ```c++ | |||
| leveldb::Options options; | |||
| options.compression = leveldb::kNoCompression; | |||
| ... leveldb::DB::Open(options, name, ...) .... | |||
| ``` | |||
| ### Cache | |||
| The contents of the database are stored in a set of files in the filesystem and | |||
| each file stores a sequence of compressed blocks. If options.block_cache is | |||
| non-NULL, it is used to cache frequently used uncompressed block contents. | |||
| ```c++ | |||
| #include "leveldb/cache.h" | |||
| leveldb::Options options; | |||
| options.block_cache = leveldb::NewLRUCache(100 * 1048576); // 100MB cache | |||
| leveldb::DB* db; | |||
| leveldb::DB::Open(options, name, &db); | |||
| ... use the db ... | |||
| delete db | |||
| delete options.block_cache; | |||
| ``` | |||
| Note that the cache holds uncompressed data, and therefore it should be sized | |||
| according to application level data sizes, without any reduction from | |||
| compression. (Caching of compressed blocks is left to the operating system | |||
| buffer cache, or any custom Env implementation provided by the client.) | |||
| When performing a bulk read, the application may wish to disable caching so that | |||
| the data processed by the bulk read does not end up displacing most of the | |||
| cached contents. A per-iterator option can be used to achieve this: | |||
| ```c++ | |||
| leveldb::ReadOptions options; | |||
| options.fill_cache = false; | |||
| leveldb::Iterator* it = db->NewIterator(options); | |||
| for (it->SeekToFirst(); it->Valid(); it->Next()) { | |||
| ... | |||
| } | |||
| ``` | |||
| ### Key Layout | |||
| Note that the unit of disk transfer and caching is a block. Adjacent keys | |||
| (according to the database sort order) will usually be placed in the same block. | |||
| Therefore the application can improve its performance by placing keys that are | |||
| accessed together near each other and placing infrequently used keys in a | |||
| separate region of the key space. | |||
| For example, suppose we are implementing a simple file system on top of leveldb. | |||
| The types of entries we might wish to store are: | |||
| filename -> permission-bits, length, list of file_block_ids | |||
| file_block_id -> data | |||
| We might want to prefix filename keys with one letter (say '/') and the | |||
| `file_block_id` keys with a different letter (say '0') so that scans over just | |||
| the metadata do not force us to fetch and cache bulky file contents. | |||
| ### Filters | |||
| Because of the way leveldb data is organized on disk, a single `Get()` call may | |||
| involve multiple reads from disk. The optional FilterPolicy mechanism can be | |||
| used to reduce the number of disk reads substantially. | |||
| ```c++ | |||
| leveldb::Options options; | |||
| options.filter_policy = NewBloomFilterPolicy(10); | |||
| leveldb::DB* db; | |||
| leveldb::DB::Open(options, "/tmp/testdb", &db); | |||
| ... use the database ... | |||
| delete db; | |||
| delete options.filter_policy; | |||
| ``` | |||
| The preceding code associates a Bloom filter based filtering policy with the | |||
| database. Bloom filter based filtering relies on keeping some number of bits of | |||
| data in memory per key (in this case 10 bits per key since that is the argument | |||
| we passed to `NewBloomFilterPolicy`). This filter will reduce the number of | |||
| unnecessary disk reads needed for Get() calls by a factor of approximately | |||
| a 100. Increasing the bits per key will lead to a larger reduction at the cost | |||
| of more memory usage. We recommend that applications whose working set does not | |||
| fit in memory and that do a lot of random reads set a filter policy. | |||
| If you are using a custom comparator, you should ensure that the filter policy | |||
| you are using is compatible with your comparator. For example, consider a | |||
| comparator that ignores trailing spaces when comparing keys. | |||
| `NewBloomFilterPolicy` must not be used with such a comparator. Instead, the | |||
| application should provide a custom filter policy that also ignores trailing | |||
| spaces. For example: | |||
| ```c++ | |||
| class CustomFilterPolicy : public leveldb::FilterPolicy { | |||
| private: | |||
| FilterPolicy* builtin_policy_; | |||
| public: | |||
| CustomFilterPolicy() : builtin_policy_(NewBloomFilterPolicy(10)) {} | |||
| ~CustomFilterPolicy() { delete builtin_policy_; } | |||
| const char* Name() const { return "IgnoreTrailingSpacesFilter"; } | |||
| void CreateFilter(const Slice* keys, int n, std::string* dst) const { | |||
| // Use builtin bloom filter code after removing trailing spaces | |||
| std::vector<Slice> trimmed(n); | |||
| for (int i = 0; i < n; i++) { | |||
| trimmed[i] = RemoveTrailingSpaces(keys[i]); | |||
| } | |||
| return builtin_policy_->CreateFilter(&trimmed[i], n, dst); | |||
| } | |||
| }; | |||
| ``` | |||
| Advanced applications may provide a filter policy that does not use a bloom | |||
| filter but uses some other mechanism for summarizing a set of keys. See | |||
| `leveldb/filter_policy.h` for detail. | |||
| ## Checksums | |||
| leveldb associates checksums with all data it stores in the file system. There | |||
| are two separate controls provided over how aggressively these checksums are | |||
| verified: | |||
| `ReadOptions::verify_checksums` may be set to true to force checksum | |||
| verification of all data that is read from the file system on behalf of a | |||
| particular read. By default, no such verification is done. | |||
| `Options::paranoid_checks` may be set to true before opening a database to make | |||
| the database implementation raise an error as soon as it detects an internal | |||
| corruption. Depending on which portion of the database has been corrupted, the | |||
| error may be raised when the database is opened, or later by another database | |||
| operation. By default, paranoid checking is off so that the database can be used | |||
| even if parts of its persistent storage have been corrupted. | |||
| If a database is corrupted (perhaps it cannot be opened when paranoid checking | |||
| is turned on), the `leveldb::RepairDB` function may be used to recover as much | |||
| of the data as possible | |||
| ## Approximate Sizes | |||
| The `GetApproximateSizes` method can used to get the approximate number of bytes | |||
| of file system space used by one or more key ranges. | |||
| ```c++ | |||
| leveldb::Range ranges[2]; | |||
| ranges[0] = leveldb::Range("a", "c"); | |||
| ranges[1] = leveldb::Range("x", "z"); | |||
| uint64_t sizes[2]; | |||
| leveldb::Status s = db->GetApproximateSizes(ranges, 2, sizes); | |||
| ``` | |||
| The preceding call will set `sizes[0]` to the approximate number of bytes of | |||
| file system space used by the key range `[a..c)` and `sizes[1]` to the | |||
| approximate number of bytes used by the key range `[x..z)`. | |||
| ## Environment | |||
| All file operations (and other operating system calls) issued by the leveldb | |||
| implementation are routed through a `leveldb::Env` object. Sophisticated clients | |||
| may wish to provide their own Env implementation to get better control. | |||
| For example, an application may introduce artificial delays in the file IO | |||
| paths to limit the impact of leveldb on other activities in the system. | |||
| ```c++ | |||
| class SlowEnv : public leveldb::Env { | |||
| ... implementation of the Env interface ... | |||
| }; | |||
| SlowEnv env; | |||
| leveldb::Options options; | |||
| options.env = &env; | |||
| Status s = leveldb::DB::Open(options, ...); | |||
| ``` | |||
| ## Porting | |||
| leveldb may be ported to a new platform by providing platform specific | |||
| implementations of the types/methods/functions exported by | |||
| `leveldb/port/port.h`. See `leveldb/port/port_example.h` for more details. | |||
| In addition, the new platform may need a new default `leveldb::Env` | |||
| implementation. See `leveldb/util/env_posix.h` for an example. | |||
| ## Other Information | |||
| Details about the leveldb implementation may be found in the following | |||
| documents: | |||
| 1. [Implementation notes](impl.md) | |||
| 2. [Format of an immutable Table file](table_format.md) | |||
| 3. [Format of a log file](log_format.md) | |||
| @ -0,0 +1,75 @@ | |||
| leveldb Log format | |||
| ================== | |||
| The log file contents are a sequence of 32KB blocks. The only exception is that | |||
| the tail of the file may contain a partial block. | |||
| Each block consists of a sequence of records: | |||
| block := record* trailer? | |||
| record := | |||
| checksum: uint32 // crc32c of type and data[] ; little-endian | |||
| length: uint16 // little-endian | |||
| type: uint8 // One of FULL, FIRST, MIDDLE, LAST | |||
| data: uint8[length] | |||
| A record never starts within the last six bytes of a block (since it won't fit). | |||
| Any leftover bytes here form the trailer, which must consist entirely of zero | |||
| bytes and must be skipped by readers. | |||
| Aside: if exactly seven bytes are left in the current block, and a new non-zero | |||
| length record is added, the writer must emit a FIRST record (which contains zero | |||
| bytes of user data) to fill up the trailing seven bytes of the block and then | |||
| emit all of the user data in subsequent blocks. | |||
| More types may be added in the future. Some Readers may skip record types they | |||
| do not understand, others may report that some data was skipped. | |||
| FULL == 1 | |||
| FIRST == 2 | |||
| MIDDLE == 3 | |||
| LAST == 4 | |||
| The FULL record contains the contents of an entire user record. | |||
| FIRST, MIDDLE, LAST are types used for user records that have been split into | |||
| multiple fragments (typically because of block boundaries). FIRST is the type | |||
| of the first fragment of a user record, LAST is the type of the last fragment of | |||
| a user record, and MIDDLE is the type of all interior fragments of a user | |||
| record. | |||
| Example: consider a sequence of user records: | |||
| A: length 1000 | |||
| B: length 97270 | |||
| C: length 8000 | |||
| **A** will be stored as a FULL record in the first block. | |||
| **B** will be split into three fragments: first fragment occupies the rest of | |||
| the first block, second fragment occupies the entirety of the second block, and | |||
| the third fragment occupies a prefix of the third block. This will leave six | |||
| bytes free in the third block, which will be left empty as the trailer. | |||
| **C** will be stored as a FULL record in the fourth block. | |||
| ---- | |||
| ## Some benefits over the recordio format: | |||
| 1. We do not need any heuristics for resyncing - just go to next block boundary | |||
| and scan. If there is a corruption, skip to the next block. As a | |||
| side-benefit, we do not get confused when part of the contents of one log | |||
| file are embedded as a record inside another log file. | |||
| 2. Splitting at approximate boundaries (e.g., for mapreduce) is simple: find the | |||
| next block boundary and skip records until we hit a FULL or FIRST record. | |||
| 3. We do not need extra buffering for large records. | |||
| ## Some downsides compared to recordio format: | |||
| 1. No packing of tiny records. This could be fixed by adding a new record type, | |||
| so it is a shortcoming of the current implementation, not necessarily the | |||
| format. | |||
| 2. No compression. Again, this could be fixed by adding new record types. | |||
| @ -1,75 +0,0 @@ | |||
| The log file contents are a sequence of 32KB blocks. The only | |||
| exception is that the tail of the file may contain a partial block. | |||
| Each block consists of a sequence of records: | |||
| block := record* trailer? | |||
| record := | |||
| checksum: uint32 // crc32c of type and data[] ; little-endian | |||
| length: uint16 // little-endian | |||
| type: uint8 // One of FULL, FIRST, MIDDLE, LAST | |||
| data: uint8[length] | |||
| A record never starts within the last six bytes of a block (since it | |||
| won't fit). Any leftover bytes here form the trailer, which must | |||
| consist entirely of zero bytes and must be skipped by readers. | |||
| Aside: if exactly seven bytes are left in the current block, and a new | |||
| non-zero length record is added, the writer must emit a FIRST record | |||
| (which contains zero bytes of user data) to fill up the trailing seven | |||
| bytes of the block and then emit all of the user data in subsequent | |||
| blocks. | |||
| More types may be added in the future. Some Readers may skip record | |||
| types they do not understand, others may report that some data was | |||
| skipped. | |||
| FULL == 1 | |||
| FIRST == 2 | |||
| MIDDLE == 3 | |||
| LAST == 4 | |||
| The FULL record contains the contents of an entire user record. | |||
| FIRST, MIDDLE, LAST are types used for user records that have been | |||
| split into multiple fragments (typically because of block boundaries). | |||
| FIRST is the type of the first fragment of a user record, LAST is the | |||
| type of the last fragment of a user record, and MIDDLE is the type of | |||
| all interior fragments of a user record. | |||
| Example: consider a sequence of user records: | |||
| A: length 1000 | |||
| B: length 97270 | |||
| C: length 8000 | |||
| A will be stored as a FULL record in the first block. | |||
| B will be split into three fragments: first fragment occupies the rest | |||
| of the first block, second fragment occupies the entirety of the | |||
| second block, and the third fragment occupies a prefix of the third | |||
| block. This will leave six bytes free in the third block, which will | |||
| be left empty as the trailer. | |||
| C will be stored as a FULL record in the fourth block. | |||
| =================== | |||
| Some benefits over the recordio format: | |||
| (1) We do not need any heuristics for resyncing - just go to next | |||
| block boundary and scan. If there is a corruption, skip to the next | |||
| block. As a side-benefit, we do not get confused when part of the | |||
| contents of one log file are embedded as a record inside another log | |||
| file. | |||
| (2) Splitting at approximate boundaries (e.g., for mapreduce) is | |||
| simple: find the next block boundary and skip records until we | |||
| hit a FULL or FIRST record. | |||
| (3) We do not need extra buffering for large records. | |||
| Some downsides compared to recordio format: | |||
| (1) No packing of tiny records. This could be fixed by adding a new | |||
| record type, so it is a shortcoming of the current implementation, | |||
| not necessarily the format. | |||
| (2) No compression. Again, this could be fixed by adding new record types. | |||
| @ -0,0 +1,107 @@ | |||
| leveldb File format | |||
| =================== | |||
| <beginning_of_file> | |||
| [data block 1] | |||
| [data block 2] | |||
| ... | |||
| [data block N] | |||
| [meta block 1] | |||
| ... | |||
| [meta block K] | |||
| [metaindex block] | |||
| [index block] | |||
| [Footer] (fixed size; starts at file_size - sizeof(Footer)) | |||
| <end_of_file> | |||
| The file contains internal pointers. Each such pointer is called | |||
| a BlockHandle and contains the following information: | |||
| offset: varint64 | |||
| size: varint64 | |||
| See [varints](https://developers.google.com/protocol-buffers/docs/encoding#varints) | |||
| for an explanation of varint64 format. | |||
| 1. The sequence of key/value pairs in the file are stored in sorted | |||
| order and partitioned into a sequence of data blocks. These blocks | |||
| come one after another at the beginning of the file. Each data block | |||
| is formatted according to the code in `block_builder.cc`, and then | |||
| optionally compressed. | |||
| 2. After the data blocks we store a bunch of meta blocks. The | |||
| supported meta block types are described below. More meta block types | |||
| may be added in the future. Each meta block is again formatted using | |||
| `block_builder.cc` and then optionally compressed. | |||
| 3. A "metaindex" block. It contains one entry for every other meta | |||
| block where the key is the name of the meta block and the value is a | |||
| BlockHandle pointing to that meta block. | |||
| 4. An "index" block. This block contains one entry per data block, | |||
| where the key is a string >= last key in that data block and before | |||
| the first key in the successive data block. The value is the | |||
| BlockHandle for the data block. | |||
| 5. At the very end of the file is a fixed length footer that contains | |||
| the BlockHandle of the metaindex and index blocks as well as a magic number. | |||
| metaindex_handle: char[p]; // Block handle for metaindex | |||
| index_handle: char[q]; // Block handle for index | |||
| padding: char[40-p-q];// zeroed bytes to make fixed length | |||
| // (40==2*BlockHandle::kMaxEncodedLength) | |||
| magic: fixed64; // == 0xdb4775248b80fb57 (little-endian) | |||
| ## "filter" Meta Block | |||
| If a `FilterPolicy` was specified when the database was opened, a | |||
| filter block is stored in each table. The "metaindex" block contains | |||
| an entry that maps from `filter.<N>` to the BlockHandle for the filter | |||
| block where `<N>` is the string returned by the filter policy's | |||
| `Name()` method. | |||
| The filter block stores a sequence of filters, where filter i contains | |||
| the output of `FilterPolicy::CreateFilter()` on all keys that are stored | |||
| in a block whose file offset falls within the range | |||
| [ i*base ... (i+1)*base-1 ] | |||
| Currently, "base" is 2KB. So for example, if blocks X and Y start in | |||
| the range `[ 0KB .. 2KB-1 ]`, all of the keys in X and Y will be | |||
| converted to a filter by calling `FilterPolicy::CreateFilter()`, and the | |||
| resulting filter will be stored as the first filter in the filter | |||
| block. | |||
| The filter block is formatted as follows: | |||
| [filter 0] | |||
| [filter 1] | |||
| [filter 2] | |||
| ... | |||
| [filter N-1] | |||
| [offset of filter 0] : 4 bytes | |||
| [offset of filter 1] : 4 bytes | |||
| [offset of filter 2] : 4 bytes | |||
| ... | |||
| [offset of filter N-1] : 4 bytes | |||
| [offset of beginning of offset array] : 4 bytes | |||
| lg(base) : 1 byte | |||
| The offset array at the end of the filter block allows efficient | |||
| mapping from a data block offset to the corresponding filter. | |||
| ## "stats" Meta Block | |||
| This meta block contains a bunch of stats. The key is the name | |||
| of the statistic. The value contains the statistic. | |||
| TODO(postrelease): record following stats. | |||
| data size | |||
| index size | |||
| key size (uncompressed) | |||
| value size (uncompressed) | |||
| number of entries | |||
| number of data blocks | |||
| @ -1,104 +0,0 @@ | |||
| File format | |||
| =========== | |||
| <beginning_of_file> | |||
| [data block 1] | |||
| [data block 2] | |||
| ... | |||
| [data block N] | |||
| [meta block 1] | |||
| ... | |||
| [meta block K] | |||
| [metaindex block] | |||
| [index block] | |||
| [Footer] (fixed size; starts at file_size - sizeof(Footer)) | |||
| <end_of_file> | |||
| The file contains internal pointers. Each such pointer is called | |||
| a BlockHandle and contains the following information: | |||
| offset: varint64 | |||
| size: varint64 | |||
| See https://developers.google.com/protocol-buffers/docs/encoding#varints | |||
| for an explanation of varint64 format. | |||
| (1) The sequence of key/value pairs in the file are stored in sorted | |||
| order and partitioned into a sequence of data blocks. These blocks | |||
| come one after another at the beginning of the file. Each data block | |||
| is formatted according to the code in block_builder.cc, and then | |||
| optionally compressed. | |||
| (2) After the data blocks we store a bunch of meta blocks. The | |||
| supported meta block types are described below. More meta block types | |||
| may be added in the future. Each meta block is again formatted using | |||
| block_builder.cc and then optionally compressed. | |||
| (3) A "metaindex" block. It contains one entry for every other meta | |||
| block where the key is the name of the meta block and the value is a | |||
| BlockHandle pointing to that meta block. | |||
| (4) An "index" block. This block contains one entry per data block, | |||
| where the key is a string >= last key in that data block and before | |||
| the first key in the successive data block. The value is the | |||
| BlockHandle for the data block. | |||
| (6) At the very end of the file is a fixed length footer that contains | |||
| the BlockHandle of the metaindex and index blocks as well as a magic number. | |||
| metaindex_handle: char[p]; // Block handle for metaindex | |||
| index_handle: char[q]; // Block handle for index | |||
| padding: char[40-p-q]; // zeroed bytes to make fixed length | |||
| // (40==2*BlockHandle::kMaxEncodedLength) | |||
| magic: fixed64; // == 0xdb4775248b80fb57 (little-endian) | |||
| "filter" Meta Block | |||
| ------------------- | |||
| If a "FilterPolicy" was specified when the database was opened, a | |||
| filter block is stored in each table. The "metaindex" block contains | |||
| an entry that maps from "filter.<N>" to the BlockHandle for the filter | |||
| block where "<N>" is the string returned by the filter policy's | |||
| "Name()" method. | |||
| The filter block stores a sequence of filters, where filter i contains | |||
| the output of FilterPolicy::CreateFilter() on all keys that are stored | |||
| in a block whose file offset falls within the range | |||
| [ i*base ... (i+1)*base-1 ] | |||
| Currently, "base" is 2KB. So for example, if blocks X and Y start in | |||
| the range [ 0KB .. 2KB-1 ], all of the keys in X and Y will be | |||
| converted to a filter by calling FilterPolicy::CreateFilter(), and the | |||
| resulting filter will be stored as the first filter in the filter | |||
| block. | |||
| The filter block is formatted as follows: | |||
| [filter 0] | |||
| [filter 1] | |||
| [filter 2] | |||
| ... | |||
| [filter N-1] | |||
| [offset of filter 0] : 4 bytes | |||
| [offset of filter 1] : 4 bytes | |||
| [offset of filter 2] : 4 bytes | |||
| ... | |||
| [offset of filter N-1] : 4 bytes | |||
| [offset of beginning of offset array] : 4 bytes | |||
| lg(base) : 1 byte | |||
| The offset array at the end of the filter block allows efficient | |||
| mapping from a data block offset to the corresponding filter. | |||
| "stats" Meta Block | |||
| ------------------ | |||
| This meta block contains a bunch of stats. The key is the name | |||
| of the statistic. The value contains the statistic. | |||
| TODO(postrelease): record following stats. | |||
| data size | |||
| index size | |||
| key size (uncompressed) | |||
| value size (uncompressed) | |||
| number of entries | |||
| number of data blocks | |||
| @ -0,0 +1,33 @@ | |||
| // Copyright (c) 2017 The LevelDB Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style license that can be | |||
| // found in the LICENSE file. See the AUTHORS file for names of contributors. | |||
| #ifndef STORAGE_LEVELDB_INCLUDE_EXPORT_H_ | |||
| #define STORAGE_LEVELDB_INCLUDE_EXPORT_H_ | |||
| #if !defined(LEVELDB_EXPORT) | |||
| #if defined(LEVELDB_SHARED_LIBRARY) | |||
| #if defined(_WIN32) | |||
| #if defined(LEVELDB_COMPILE_LIBRARY) | |||
| #define LEVELDB_EXPORT __declspec(dllexport) | |||
| #else | |||
| #define LEVELDB_EXPORT __declspec(dllimport) | |||
| #endif // defined(LEVELDB_COMPILE_LIBRARY) | |||
| #else // defined(_WIN32) | |||
| #if defined(LEVELDB_COMPILE_LIBRARY) | |||
| #define LEVELDB_EXPORT __attribute__((visibility("default"))) | |||
| #else | |||
| #define LEVELDB_EXPORT | |||
| #endif | |||
| #endif // defined(_WIN32) | |||
| #else // defined(LEVELDB_SHARED_LIBRARY) | |||
| #define LEVELDB_EXPORT | |||
| #endif | |||
| #endif // !defined(LEVELDB_EXPORT) | |||
| #endif // STORAGE_LEVELDB_INCLUDE_EXPORT_H_ | |||
| @ -0,0 +1,128 @@ | |||
| // Copyright (c) 2019 The LevelDB Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style license that can be | |||
| // found in the LICENSE file. See the AUTHORS file for names of contributors. | |||
| #include <cstdint> | |||
| #include <cstdlib> | |||
| #include <iostream> | |||
| #include <memory> | |||
| #include <string> | |||
| #include <vector> | |||
| #include "leveldb/db.h" | |||
| #include "leveldb/write_batch.h" | |||
| #include "util/testharness.h" | |||
| namespace leveldb { | |||
| namespace { | |||
| // Creates a random number in the range of [0, max). | |||
| int GenerateRandomNumber(int max) { return std::rand() % max; } | |||
| std::string CreateRandomString(int32_t index) { | |||
| static const size_t len = 1024; | |||
| char bytes[len]; | |||
| size_t i = 0; | |||
| while (i < 8) { | |||
| bytes[i] = 'a' + ((index >> (4 * i)) & 0xf); | |||
| ++i; | |||
| } | |||
| while (i < sizeof(bytes)) { | |||
| bytes[i] = 'a' + GenerateRandomNumber(26); | |||
| ++i; | |||
| } | |||
| return std::string(bytes, sizeof(bytes)); | |||
| } | |||
| } // namespace | |||
| class Issue320 {}; | |||
| TEST(Issue320, Test) { | |||
| std::srand(0); | |||
| bool delete_before_put = false; | |||
| bool keep_snapshots = true; | |||
| std::vector<std::unique_ptr<std::pair<std::string, std::string>>> test_map( | |||
| 10000); | |||
| std::vector<Snapshot const*> snapshots(100, nullptr); | |||
| DB* db; | |||
| Options options; | |||
| options.create_if_missing = true; | |||
| std::string dbpath = test::TmpDir() + "/leveldb_issue320_test"; | |||
| ASSERT_OK(DB::Open(options, dbpath, &db)); | |||
| uint32_t target_size = 10000; | |||
| uint32_t num_items = 0; | |||
| uint32_t count = 0; | |||
| std::string key; | |||
| std::string value, old_value; | |||
| WriteOptions writeOptions; | |||
| ReadOptions readOptions; | |||
| while (count < 200000) { | |||
| if ((++count % 1000) == 0) { | |||
| std::cout << "count: " << count << std::endl; | |||
| } | |||
| int index = GenerateRandomNumber(test_map.size()); | |||
| WriteBatch batch; | |||
| if (test_map[index] == nullptr) { | |||
| num_items++; | |||
| test_map[index].reset(new std::pair<std::string, std::string>( | |||
| CreateRandomString(index), CreateRandomString(index))); | |||
| batch.Put(test_map[index]->first, test_map[index]->second); | |||
| } else { | |||
| ASSERT_OK(db->Get(readOptions, test_map[index]->first, &old_value)); | |||
| if (old_value != test_map[index]->second) { | |||
| std::cout << "ERROR incorrect value returned by Get" << std::endl; | |||
| std::cout << " count=" << count << std::endl; | |||
| std::cout << " old value=" << old_value << std::endl; | |||
| std::cout << " test_map[index]->second=" << test_map[index]->second | |||
| << std::endl; | |||
| std::cout << " test_map[index]->first=" << test_map[index]->first | |||
| << std::endl; | |||
| std::cout << " index=" << index << std::endl; | |||
| ASSERT_EQ(old_value, test_map[index]->second); | |||
| } | |||
| if (num_items >= target_size && GenerateRandomNumber(100) > 30) { | |||
| batch.Delete(test_map[index]->first); | |||
| test_map[index] = nullptr; | |||
| --num_items; | |||
| } else { | |||
| test_map[index]->second = CreateRandomString(index); | |||
| if (delete_before_put) batch.Delete(test_map[index]->first); | |||
| batch.Put(test_map[index]->first, test_map[index]->second); | |||
| } | |||
| } | |||
| ASSERT_OK(db->Write(writeOptions, &batch)); | |||
| if (keep_snapshots && GenerateRandomNumber(10) == 0) { | |||
| int i = GenerateRandomNumber(snapshots.size()); | |||
| if (snapshots[i] != nullptr) { | |||
| db->ReleaseSnapshot(snapshots[i]); | |||
| } | |||
| snapshots[i] = db->GetSnapshot(); | |||
| } | |||
| } | |||
| for (Snapshot const* snapshot : snapshots) { | |||
| if (snapshot) { | |||
| db->ReleaseSnapshot(snapshot); | |||
| } | |||
| } | |||
| delete db; | |||
| DestroyDB(dbpath, options); | |||
| } | |||
| } // namespace leveldb | |||
| int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } | |||
| @ -1,233 +0,0 @@ | |||
| // Copyright (c) 2011 The LevelDB Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style license that can be | |||
| // found in the LICENSE file. See the AUTHORS file for names of contributors. | |||
| // AtomicPointer provides storage for a lock-free pointer. | |||
| // Platform-dependent implementation of AtomicPointer: | |||
| // - If the platform provides a cheap barrier, we use it with raw pointers | |||
| // - If <atomic> is present (on newer versions of gcc, it is), we use | |||
| // a <atomic>-based AtomicPointer. However we prefer the memory | |||
| // barrier based version, because at least on a gcc 4.4 32-bit build | |||
| // on linux, we have encountered a buggy <atomic> implementation. | |||
| // Also, some <atomic> implementations are much slower than a memory-barrier | |||
| // based implementation (~16ns for <atomic> based acquire-load vs. ~1ns for | |||
| // a barrier based acquire-load). | |||
| // This code is based on atomicops-internals-* in Google's perftools: | |||
| // http://code.google.com/p/google-perftools/source/browse/#svn%2Ftrunk%2Fsrc%2Fbase | |||
| #ifndef PORT_ATOMIC_POINTER_H_ | |||
| #define PORT_ATOMIC_POINTER_H_ | |||
| #include <stdint.h> | |||
| #ifdef LEVELDB_ATOMIC_PRESENT | |||
| #include <atomic> | |||
| #endif | |||
| #ifdef OS_WIN | |||
| #include <windows.h> | |||
| #endif | |||
| #ifdef OS_MACOSX | |||
| #include <libkern/OSAtomic.h> | |||
| #endif | |||
| #if defined(_M_X64) || defined(__x86_64__) | |||
| #define ARCH_CPU_X86_FAMILY 1 | |||
| #elif defined(_M_IX86) || defined(__i386__) || defined(__i386) | |||
| #define ARCH_CPU_X86_FAMILY 1 | |||
| #elif defined(__ARMEL__) | |||
| #define ARCH_CPU_ARM_FAMILY 1 | |||
| #elif defined(__aarch64__) | |||
| #define ARCH_CPU_ARM64_FAMILY 1 | |||
| #elif defined(__ppc__) || defined(__powerpc__) || defined(__powerpc64__) | |||
| #define ARCH_CPU_PPC_FAMILY 1 | |||
| #endif | |||
| namespace leveldb { | |||
| namespace port { | |||
| // Define MemoryBarrier() if available | |||
| // Windows on x86 | |||
| #if defined(OS_WIN) && defined(COMPILER_MSVC) && defined(ARCH_CPU_X86_FAMILY) | |||
| // windows.h already provides a MemoryBarrier(void) macro | |||
| // http://msdn.microsoft.com/en-us/library/ms684208(v=vs.85).aspx | |||
| #define LEVELDB_HAVE_MEMORY_BARRIER | |||
| // Mac OS | |||
| #elif defined(OS_MACOSX) | |||
| inline void MemoryBarrier() { | |||
| OSMemoryBarrier(); | |||
| } | |||
| #define LEVELDB_HAVE_MEMORY_BARRIER | |||
| // Gcc on x86 | |||
| #elif defined(ARCH_CPU_X86_FAMILY) && defined(__GNUC__) | |||
| inline void MemoryBarrier() { | |||
| // See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on | |||
| // this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering. | |||
| __asm__ __volatile__("" : : : "memory"); | |||
| } | |||
| #define LEVELDB_HAVE_MEMORY_BARRIER | |||
| // Sun Studio | |||
| #elif defined(ARCH_CPU_X86_FAMILY) && defined(__SUNPRO_CC) | |||
| inline void MemoryBarrier() { | |||
| // See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on | |||
| // this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering. | |||
| asm volatile("" : : : "memory"); | |||
| } | |||
| #define LEVELDB_HAVE_MEMORY_BARRIER | |||
| // ARM Linux | |||
| #elif defined(ARCH_CPU_ARM_FAMILY) && defined(__linux__) | |||
| typedef void (*LinuxKernelMemoryBarrierFunc)(void); | |||
| // The Linux ARM kernel provides a highly optimized device-specific memory | |||
| // barrier function at a fixed memory address that is mapped in every | |||
| // user-level process. | |||
| // | |||
| // This beats using CPU-specific instructions which are, on single-core | |||
| // devices, un-necessary and very costly (e.g. ARMv7-A "dmb" takes more | |||
| // than 180ns on a Cortex-A8 like the one on a Nexus One). Benchmarking | |||
| // shows that the extra function call cost is completely negligible on | |||
| // multi-core devices. | |||
| // | |||
| inline void MemoryBarrier() { | |||
| (*(LinuxKernelMemoryBarrierFunc)0xffff0fa0)(); | |||
| } | |||
| #define LEVELDB_HAVE_MEMORY_BARRIER | |||
| // ARM64 | |||
| #elif defined(ARCH_CPU_ARM64_FAMILY) | |||
| inline void MemoryBarrier() { | |||
| asm volatile("dmb sy" : : : "memory"); | |||
| } | |||
| #define LEVELDB_HAVE_MEMORY_BARRIER | |||
| // PPC | |||
| #elif defined(ARCH_CPU_PPC_FAMILY) && defined(__GNUC__) | |||
| inline void MemoryBarrier() { | |||
| // TODO for some powerpc expert: is there a cheaper suitable variant? | |||
| // Perhaps by having separate barriers for acquire and release ops. | |||
| asm volatile("sync" : : : "memory"); | |||
| } | |||
| #define LEVELDB_HAVE_MEMORY_BARRIER | |||
| #endif | |||
| // AtomicPointer built using platform-specific MemoryBarrier() | |||
| #if defined(LEVELDB_HAVE_MEMORY_BARRIER) | |||
| class AtomicPointer { | |||
| private: | |||
| void* rep_; | |||
| public: | |||
| AtomicPointer() { } | |||
| explicit AtomicPointer(void* p) : rep_(p) {} | |||
| inline void* NoBarrier_Load() const { return rep_; } | |||
| inline void NoBarrier_Store(void* v) { rep_ = v; } | |||
| inline void* Acquire_Load() const { | |||
| void* result = rep_; | |||
| MemoryBarrier(); | |||
| return result; | |||
| } | |||
| inline void Release_Store(void* v) { | |||
| MemoryBarrier(); | |||
| rep_ = v; | |||
| } | |||
| }; | |||
| // AtomicPointer based on <cstdatomic> | |||
| #elif defined(LEVELDB_ATOMIC_PRESENT) | |||
| class AtomicPointer { | |||
| private: | |||
| std::atomic<void*> rep_; | |||
| public: | |||
| AtomicPointer() { } | |||
| explicit AtomicPointer(void* v) : rep_(v) { } | |||
| inline void* Acquire_Load() const { | |||
| return rep_.load(std::memory_order_acquire); | |||
| } | |||
| inline void Release_Store(void* v) { | |||
| rep_.store(v, std::memory_order_release); | |||
| } | |||
| inline void* NoBarrier_Load() const { | |||
| return rep_.load(std::memory_order_relaxed); | |||
| } | |||
| inline void NoBarrier_Store(void* v) { | |||
| rep_.store(v, std::memory_order_relaxed); | |||
| } | |||
| }; | |||
| // Atomic pointer based on sparc memory barriers | |||
| #elif defined(__sparcv9) && defined(__GNUC__) | |||
| class AtomicPointer { | |||
| private: | |||
| void* rep_; | |||
| public: | |||
| AtomicPointer() { } | |||
| explicit AtomicPointer(void* v) : rep_(v) { } | |||
| inline void* Acquire_Load() const { | |||
| void* val; | |||
| __asm__ __volatile__ ( | |||
| "ldx [%[rep_]], %[val] \n\t" | |||
| "membar #LoadLoad|#LoadStore \n\t" | |||
| : [val] "=r" (val) | |||
| : [rep_] "r" (&rep_) | |||
| : "memory"); | |||
| return val; | |||
| } | |||
| inline void Release_Store(void* v) { | |||
| __asm__ __volatile__ ( | |||
| "membar #LoadStore|#StoreStore \n\t" | |||
| "stx %[v], [%[rep_]] \n\t" | |||
| : | |||
| : [rep_] "r" (&rep_), [v] "r" (v) | |||
| : "memory"); | |||
| } | |||
| inline void* NoBarrier_Load() const { return rep_; } | |||
| inline void NoBarrier_Store(void* v) { rep_ = v; } | |||
| }; | |||
| // Atomic pointer based on ia64 acq/rel | |||
| #elif defined(__ia64) && defined(__GNUC__) | |||
| class AtomicPointer { | |||
| private: | |||
| void* rep_; | |||
| public: | |||
| AtomicPointer() { } | |||
| explicit AtomicPointer(void* v) : rep_(v) { } | |||
| inline void* Acquire_Load() const { | |||
| void* val ; | |||
| __asm__ __volatile__ ( | |||
| "ld8.acq %[val] = [%[rep_]] \n\t" | |||
| : [val] "=r" (val) | |||
| : [rep_] "r" (&rep_) | |||
| : "memory" | |||
| ); | |||
| return val; | |||
| } | |||
| inline void Release_Store(void* v) { | |||
| __asm__ __volatile__ ( | |||
| "st8.rel [%[rep_]] = %[v] \n\t" | |||
| : | |||
| : [rep_] "r" (&rep_), [v] "r" (v) | |||
| : "memory" | |||
| ); | |||
| } | |||
| inline void* NoBarrier_Load() const { return rep_; } | |||
| inline void NoBarrier_Store(void* v) { rep_ = v; } | |||
| }; | |||
| // We have neither MemoryBarrier(), nor <atomic> | |||
| #else | |||
| #error Please implement AtomicPointer for this platform. | |||
| #endif | |||
| #undef LEVELDB_HAVE_MEMORY_BARRIER | |||
| #undef ARCH_CPU_X86_FAMILY | |||
| #undef ARCH_CPU_ARM_FAMILY | |||
| #undef ARCH_CPU_ARM64_FAMILY | |||
| #undef ARCH_CPU_PPC_FAMILY | |||
| } // namespace port | |||
| } // namespace leveldb | |||
| #endif // PORT_ATOMIC_POINTER_H_ | |||
| @ -0,0 +1,39 @@ | |||
| // Copyright 2017 The LevelDB Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style license that can be | |||
| // found in the LICENSE file. See the AUTHORS file for names of contributors. | |||
| #ifndef STORAGE_LEVELDB_PORT_PORT_CONFIG_H_ | |||
| #define STORAGE_LEVELDB_PORT_PORT_CONFIG_H_ | |||
| // Define to 1 if you have a definition for fdatasync() in <unistd.h>. | |||
| #if !defined(HAVE_FDATASYNC) | |||
| #cmakedefine01 HAVE_FDATASYNC | |||
| #endif // !defined(HAVE_FDATASYNC) | |||
| // Define to 1 if you have a definition for F_FULLFSYNC in <fcntl.h>. | |||
| #if !defined(HAVE_FULLFSYNC) | |||
| #cmakedefine01 HAVE_FULLFSYNC | |||
| #endif // !defined(HAVE_FULLFSYNC) | |||
| // Define to 1 if you have a definition for O_CLOEXEC in <fcntl.h>. | |||
| #if !defined(HAVE_O_CLOEXEC) | |||
| #cmakedefine01 HAVE_O_CLOEXEC | |||
| #endif // !defined(HAVE_O_CLOEXEC) | |||
| // Define to 1 if you have Google CRC32C. | |||
| #if !defined(HAVE_CRC32C) | |||
| #cmakedefine01 HAVE_CRC32C | |||
| #endif // !defined(HAVE_CRC32C) | |||
| // Define to 1 if you have Google Snappy. | |||
| #if !defined(HAVE_SNAPPY) | |||
| #cmakedefine01 HAVE_SNAPPY | |||
| #endif // !defined(HAVE_SNAPPY) | |||
| // Define to 1 if your processor stores words with the most significant byte | |||
| // first (like Motorola and SPARC, unlike Intel and VAX). | |||
| #if !defined(LEVELDB_IS_BIG_ENDIAN) | |||
| #cmakedefine01 LEVELDB_IS_BIG_ENDIAN | |||
| #endif // !defined(LEVELDB_IS_BIG_ENDIAN) | |||
| #endif // STORAGE_LEVELDB_PORT_PORT_CONFIG_H_ | |||
| @ -1,54 +0,0 @@ | |||
| // Copyright (c) 2011 The LevelDB Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style license that can be | |||
| // found in the LICENSE file. See the AUTHORS file for names of contributors. | |||
| #include "port/port_posix.h" | |||
| #include <cstdlib> | |||
| #include <stdio.h> | |||
| #include <string.h> | |||
| #include "util/logging.h" | |||
| namespace leveldb { | |||
| namespace port { | |||
| static void PthreadCall(const char* label, int result) { | |||
| if (result != 0) { | |||
| fprintf(stderr, "pthread %s: %s\n", label, strerror(result)); | |||
| abort(); | |||
| } | |||
| } | |||
| Mutex::Mutex() { PthreadCall("init mutex", pthread_mutex_init(&mu_, NULL)); } | |||
| Mutex::~Mutex() { PthreadCall("destroy mutex", pthread_mutex_destroy(&mu_)); } | |||
| void Mutex::Lock() { PthreadCall("lock", pthread_mutex_lock(&mu_)); } | |||
| void Mutex::Unlock() { PthreadCall("unlock", pthread_mutex_unlock(&mu_)); } | |||
| CondVar::CondVar(Mutex* mu) | |||
| : mu_(mu) { | |||
| PthreadCall("init cv", pthread_cond_init(&cv_, NULL)); | |||
| } | |||
| CondVar::~CondVar() { PthreadCall("destroy cv", pthread_cond_destroy(&cv_)); } | |||
| void CondVar::Wait() { | |||
| PthreadCall("wait", pthread_cond_wait(&cv_, &mu_->mu_)); | |||
| } | |||
| void CondVar::Signal() { | |||
| PthreadCall("signal", pthread_cond_signal(&cv_)); | |||
| } | |||
| void CondVar::SignalAll() { | |||
| PthreadCall("broadcast", pthread_cond_broadcast(&cv_)); | |||
| } | |||
| void InitOnce(OnceType* once, void (*initializer)()) { | |||
| PthreadCall("once", pthread_once(once, initializer)); | |||
| } | |||
| } // namespace port | |||
| } // namespace leveldb | |||
| @ -1,154 +0,0 @@ | |||
| // Copyright (c) 2011 The LevelDB Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style license that can be | |||
| // found in the LICENSE file. See the AUTHORS file for names of contributors. | |||
| // | |||
| // See port_example.h for documentation for the following types/functions. | |||
| #ifndef STORAGE_LEVELDB_PORT_PORT_POSIX_H_ | |||
| #define STORAGE_LEVELDB_PORT_PORT_POSIX_H_ | |||
| #undef PLATFORM_IS_LITTLE_ENDIAN | |||
| #if defined(OS_MACOSX) | |||
| #include <machine/endian.h> | |||
| #if defined(__DARWIN_LITTLE_ENDIAN) && defined(__DARWIN_BYTE_ORDER) | |||
| #define PLATFORM_IS_LITTLE_ENDIAN \ | |||
| (__DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN) | |||
| #endif | |||
| #elif defined(OS_SOLARIS) | |||
| #include <sys/isa_defs.h> | |||
| #ifdef _LITTLE_ENDIAN | |||
| #define PLATFORM_IS_LITTLE_ENDIAN true | |||
| #else | |||
| #define PLATFORM_IS_LITTLE_ENDIAN false | |||
| #endif | |||
| #elif defined(OS_FREEBSD) || defined(OS_OPENBSD) ||\ | |||
| defined(OS_NETBSD) || defined(OS_DRAGONFLYBSD) | |||
| #include <sys/types.h> | |||
| #include <sys/endian.h> | |||
| #define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN) | |||
| #elif defined(OS_HPUX) | |||
| #define PLATFORM_IS_LITTLE_ENDIAN false | |||
| #elif defined(OS_ANDROID) | |||
| // Due to a bug in the NDK x86 <sys/endian.h> definition, | |||
| // _BYTE_ORDER must be used instead of __BYTE_ORDER on Android. | |||
| // See http://code.google.com/p/android/issues/detail?id=39824 | |||
| #include <endian.h> | |||
| #define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN) | |||
| #else | |||
| #include <endian.h> | |||
| #endif | |||
| #include <pthread.h> | |||
| #ifdef SNAPPY | |||
| #include <snappy.h> | |||
| #endif | |||
| #include <stdint.h> | |||
| #include <string> | |||
| #include "port/atomic_pointer.h" | |||
| #ifndef PLATFORM_IS_LITTLE_ENDIAN | |||
| #define PLATFORM_IS_LITTLE_ENDIAN (__BYTE_ORDER == __LITTLE_ENDIAN) | |||
| #endif | |||
| #if defined(OS_MACOSX) || defined(OS_SOLARIS) || defined(OS_FREEBSD) ||\ | |||
| defined(OS_NETBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLYBSD) ||\ | |||
| defined(OS_ANDROID) || defined(OS_HPUX) || defined(CYGWIN) | |||
| // Use fread/fwrite/fflush on platforms without _unlocked variants | |||
| #define fread_unlocked fread | |||
| #define fwrite_unlocked fwrite | |||
| #define fflush_unlocked fflush | |||
| #endif | |||
| #if defined(OS_MACOSX) || defined(OS_FREEBSD) ||\ | |||
| defined(OS_OPENBSD) || defined(OS_DRAGONFLYBSD) | |||
| // Use fsync() on platforms without fdatasync() | |||
| #define fdatasync fsync | |||
| #endif | |||
| #if defined(OS_ANDROID) && __ANDROID_API__ < 9 | |||
| // fdatasync() was only introduced in API level 9 on Android. Use fsync() | |||
| // when targetting older platforms. | |||
| #define fdatasync fsync | |||
| #endif | |||
| namespace leveldb { | |||
| namespace port { | |||
| static const bool kLittleEndian = PLATFORM_IS_LITTLE_ENDIAN; | |||
| #undef PLATFORM_IS_LITTLE_ENDIAN | |||
| class CondVar; | |||
| class Mutex { | |||
| public: | |||
| Mutex(); | |||
| ~Mutex(); | |||
| void Lock(); | |||
| void Unlock(); | |||
| void AssertHeld() { } | |||
| private: | |||
| friend class CondVar; | |||
| pthread_mutex_t mu_; | |||
| // No copying | |||
| Mutex(const Mutex&); | |||
| void operator=(const Mutex&); | |||
| }; | |||
| class CondVar { | |||
| public: | |||
| explicit CondVar(Mutex* mu); | |||
| ~CondVar(); | |||
| void Wait(); | |||
| void Signal(); | |||
| void SignalAll(); | |||
| private: | |||
| pthread_cond_t cv_; | |||
| Mutex* mu_; | |||
| }; | |||
| typedef pthread_once_t OnceType; | |||
| #define LEVELDB_ONCE_INIT PTHREAD_ONCE_INIT | |||
| extern void InitOnce(OnceType* once, void (*initializer)()); | |||
| inline bool Snappy_Compress(const char* input, size_t length, | |||
| ::std::string* output) { | |||
| #ifdef SNAPPY | |||
| output->resize(snappy::MaxCompressedLength(length)); | |||
| size_t outlen; | |||
| snappy::RawCompress(input, length, &(*output)[0], &outlen); | |||
| output->resize(outlen); | |||
| return true; | |||
| #endif | |||
| return false; | |||
| } | |||
| inline bool Snappy_GetUncompressedLength(const char* input, size_t length, | |||
| size_t* result) { | |||
| #ifdef SNAPPY | |||
| return snappy::GetUncompressedLength(input, length, result); | |||
| #else | |||
| return false; | |||
| #endif | |||
| } | |||
| inline bool Snappy_Uncompress(const char* input, size_t length, | |||
| char* output) { | |||
| #ifdef SNAPPY | |||
| return snappy::RawUncompress(input, length, output); | |||
| #else | |||
| return false; | |||
| #endif | |||
| } | |||
| inline bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg) { | |||
| return false; | |||
| } | |||
| } // namespace port | |||
| } // namespace leveldb | |||
| #endif // STORAGE_LEVELDB_PORT_PORT_POSIX_H_ | |||
| @ -0,0 +1,153 @@ | |||
| // Copyright (c) 2018 The LevelDB Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style license that can be | |||
| // found in the LICENSE file. See the AUTHORS file for names of contributors. | |||
| #ifndef STORAGE_LEVELDB_PORT_PORT_STDCXX_H_ | |||
| #define STORAGE_LEVELDB_PORT_PORT_STDCXX_H_ | |||
| // port/port_config.h availability is automatically detected via __has_include | |||
| // in newer compilers. If LEVELDB_HAS_PORT_CONFIG_H is defined, it overrides the | |||
| // configuration detection. | |||
| #if defined(LEVELDB_HAS_PORT_CONFIG_H) | |||
| #if LEVELDB_HAS_PORT_CONFIG_H | |||
| #include "port/port_config.h" | |||
| #endif // LEVELDB_HAS_PORT_CONFIG_H | |||
| #elif defined(__has_include) | |||
| #if __has_include("port/port_config.h") | |||
| #include "port/port_config.h" | |||
| #endif // __has_include("port/port_config.h") | |||
| #endif // defined(LEVELDB_HAS_PORT_CONFIG_H) | |||
| #if HAVE_CRC32C | |||
| #include <crc32c/crc32c.h> | |||
| #endif // HAVE_CRC32C | |||
| #if HAVE_SNAPPY | |||
| #include <snappy.h> | |||
| #endif // HAVE_SNAPPY | |||
| #include <cassert> | |||
| #include <condition_variable> // NOLINT | |||
| #include <cstddef> | |||
| #include <cstdint> | |||
| #include <mutex> // NOLINT | |||
| #include <string> | |||
| #include "port/thread_annotations.h" | |||
| namespace leveldb { | |||
| namespace port { | |||
| static const bool kLittleEndian = !LEVELDB_IS_BIG_ENDIAN; | |||
| class CondVar; | |||
| // Thinly wraps std::mutex. | |||
| class LOCKABLE Mutex { | |||
| public: | |||
| Mutex() = default; | |||
| ~Mutex() = default; | |||
| Mutex(const Mutex&) = delete; | |||
| Mutex& operator=(const Mutex&) = delete; | |||
| void Lock() EXCLUSIVE_LOCK_FUNCTION() { mu_.lock(); } | |||
| void Unlock() UNLOCK_FUNCTION() { mu_.unlock(); } | |||
| void AssertHeld() ASSERT_EXCLUSIVE_LOCK() {} | |||
| private: | |||
| friend class CondVar; | |||
| std::mutex mu_; | |||
| }; | |||
| // Thinly wraps std::condition_variable. | |||
| class CondVar { | |||
| public: | |||
| explicit CondVar(Mutex* mu) : mu_(mu) { assert(mu != nullptr); } | |||
| ~CondVar() = default; | |||
| CondVar(const CondVar&) = delete; | |||
| CondVar& operator=(const CondVar&) = delete; | |||
| void Wait() { | |||
| std::unique_lock<std::mutex> lock(mu_->mu_, std::adopt_lock); | |||
| cv_.wait(lock); | |||
| lock.release(); | |||
| } | |||
| void Signal() { cv_.notify_one(); } | |||
| void SignalAll() { cv_.notify_all(); } | |||
| private: | |||
| std::condition_variable cv_; | |||
| Mutex* const mu_; | |||
| }; | |||
| inline bool Snappy_Compress(const char* input, size_t length, | |||
| std::string* output) { | |||
| #if HAVE_SNAPPY | |||
| output->resize(snappy::MaxCompressedLength(length)); | |||
| size_t outlen; | |||
| snappy::RawCompress(input, length, &(*output)[0], &outlen); | |||
| output->resize(outlen); | |||
| return true; | |||
| #else | |||
| // Silence compiler warnings about unused arguments. | |||
| (void)input; | |||
| (void)length; | |||
| (void)output; | |||
| #endif // HAVE_SNAPPY | |||
| return false; | |||
| } | |||
| inline bool Snappy_GetUncompressedLength(const char* input, size_t length, | |||
| size_t* result) { | |||
| #if HAVE_SNAPPY | |||
| return snappy::GetUncompressedLength(input, length, result); | |||
| #else | |||
| // Silence compiler warnings about unused arguments. | |||
| (void)input; | |||
| (void)length; | |||
| (void)result; | |||
| return false; | |||
| #endif // HAVE_SNAPPY | |||
| } | |||
| inline bool Snappy_Uncompress(const char* input, size_t length, char* output) { | |||
| #if HAVE_SNAPPY | |||
| return snappy::RawUncompress(input, length, output); | |||
| #else | |||
| // Silence compiler warnings about unused arguments. | |||
| (void)input; | |||
| (void)length; | |||
| (void)output; | |||
| return false; | |||
| #endif // HAVE_SNAPPY | |||
| } | |||
| inline bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg) { | |||
| // Silence compiler warnings about unused arguments. | |||
| (void)func; | |||
| (void)arg; | |||
| return false; | |||
| } | |||
| inline uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size) { | |||
| #if HAVE_CRC32C | |||
| return ::crc32c::Extend(crc, reinterpret_cast<const uint8_t*>(buf), size); | |||
| #else | |||
| // Silence compiler warnings about unused arguments. | |||
| (void)crc; | |||
| (void)buf; | |||
| (void)size; | |||
| return 0; | |||
| #endif // HAVE_CRC32C | |||
| } | |||
| } // namespace port | |||
| } // namespace leveldb | |||
| #endif // STORAGE_LEVELDB_PORT_PORT_STDCXX_H_ | |||
| @ -1,24 +0,0 @@ | |||
| // Copyright (c) 2011 The LevelDB Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style license that can be | |||
| // found in the LICENSE file. See the AUTHORS file for names of contributors. | |||
| // MSVC didn't ship with this file until the 2010 version. | |||
| #ifndef STORAGE_LEVELDB_PORT_WIN_STDINT_H_ | |||
| #define STORAGE_LEVELDB_PORT_WIN_STDINT_H_ | |||
| #if !defined(_MSC_VER) | |||
| #error This file should only be included when compiling with MSVC. | |||
| #endif | |||
| // Define C99 equivalent types. | |||
| typedef signed char int8_t; | |||
| typedef signed short int16_t; | |||
| typedef signed int int32_t; | |||
| typedef signed long long int64_t; | |||
| typedef unsigned char uint8_t; | |||
| typedef unsigned short uint16_t; | |||
| typedef unsigned int uint32_t; | |||
| typedef unsigned long long uint64_t; | |||
| #endif // STORAGE_LEVELDB_PORT_WIN_STDINT_H_ | |||