From 657ba514298a726c7533f3106d3778062b59d75f Mon Sep 17 00:00:00 2001 From: Chris Mumford Date: Wed, 2 Oct 2019 09:35:37 -0700 Subject: [PATCH 01/68] Added return in Version::Get::State::Match to quiet warning. Added unreached return at the end of Version::Get::State::Match to stop this _incorrect_ warning: version_set.cc:376:5: warning: control reaches end of non-void function [-Wreturn-type] This warning was being emitted when building with clang 6.0.1-10 and also emitted by lgtm.com when statically analyzing leveldb even though all SaverState enumeration values were handled. PiperOrigin-RevId: 272455474 --- db/version_set.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/db/version_set.cc b/db/version_set.cc index fd5e3ab..cd07346 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -373,6 +373,10 @@ Status Version::Get(const ReadOptions& options, const LookupKey& k, state->found = true; return false; } + + // Not reached. Added to avoid false compilation warnings of + // "control reaches end of non-void function". + return false; } }; From 95d0ba1cb046bfd76619b8b80e14ee1b2897d219 Mon Sep 17 00:00:00 2001 From: Chris Mumford Date: Mon, 28 Oct 2019 10:19:33 -0700 Subject: [PATCH 02/68] Renamed local variable in DBImpl::Write. The local variable `updates` in DBImpl::Write was hiding the `updates` parameter. Renamed to avoid this conflict. PiperOrigin-RevId: 277089971 --- db/db_impl.cc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/db/db_impl.cc b/db/db_impl.cc index 4754ba3..95e2bb4 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -1213,9 +1213,9 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* updates) { uint64_t last_sequence = versions_->LastSequence(); Writer* last_writer = &w; if (status.ok() && updates != nullptr) { // nullptr batch is for compactions - WriteBatch* updates = BuildBatchGroup(&last_writer); - WriteBatchInternal::SetSequence(updates, last_sequence + 1); - last_sequence += WriteBatchInternal::Count(updates); + WriteBatch* write_batch = BuildBatchGroup(&last_writer); + WriteBatchInternal::SetSequence(write_batch, last_sequence + 1); + last_sequence += WriteBatchInternal::Count(write_batch); // Add to log and apply to memtable. We can release the lock // during this phase since &w is currently responsible for logging @@ -1223,7 +1223,7 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* updates) { // into mem_. { mutex_.Unlock(); - status = log_->AddRecord(WriteBatchInternal::Contents(updates)); + status = log_->AddRecord(WriteBatchInternal::Contents(write_batch)); bool sync_error = false; if (status.ok() && options.sync) { status = logfile_->Sync(); @@ -1232,7 +1232,7 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* updates) { } } if (status.ok()) { - status = WriteBatchInternal::InsertInto(updates, mem_); + status = WriteBatchInternal::InsertInto(write_batch, mem_); } mutex_.Lock(); if (sync_error) { @@ -1242,7 +1242,7 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* updates) { RecordBackgroundError(status); } } - if (updates == tmp_batch_) tmp_batch_->Clear(); + if (write_batch == tmp_batch_) tmp_batch_->Clear(); versions_->SetLastSequence(last_sequence); } From cf4d9ab23de7ec36b8e00536b7450f02c639cd87 Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Sun, 3 Nov 2019 21:38:38 -0800 Subject: [PATCH 03/68] Test CMake installation on Travis. PiperOrigin-RevId: 278300591 --- .travis.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index b4acf13..42cbe64 100644 --- a/.travis.yml +++ b/.travis.yml @@ -70,6 +70,7 @@ install: before_script: - mkdir -p build && cd build - cmake .. -G Ninja -DCMAKE_BUILD_TYPE=$BUILD_TYPE + -DCMAKE_INSTALL_PREFIX=$HOME/.local - cmake --build . - cd .. @@ -78,3 +79,4 @@ script: - "if [ -f build/db_bench ] ; then build/db_bench ; fi" - "if [ -f build/db_bench_sqlite3 ] ; then build/db_bench_sqlite3 ; fi" - "if [ -f build/db_bench_tree_db ] ; then build/db_bench_tree_db ; fi" +- cd build && cmake --build . --target install From 5abdf4c019e51fce59d34c21b13bf4e0a948828a Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Mon, 4 Nov 2019 11:38:53 -0800 Subject: [PATCH 04/68] Fix installed target definition. Using CMAKE_INSTALL_INCLUDEDIR before including GNUINstallDirs results in a broken installation when CMAKE_INSTALL_PREFIX is a non-standard directory. Inspired from https://github.com/google/crc32c/pull/39 PiperOrigin-RevId: 278427974 --- CMakeLists.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index a65afbf..7ccda94 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -106,6 +106,9 @@ if(BUILD_SHARED_LIBS) add_compile_options(-fvisibility=hidden) endif(BUILD_SHARED_LIBS) +# Must be included before CMAKE_INSTALL_INCLUDEDIR is used. +include(GNUInstallDirs) + add_library(leveldb "") target_sources(leveldb PRIVATE @@ -417,7 +420,6 @@ int main() { endif(LEVELDB_BUILD_BENCHMARKS) if(LEVELDB_INSTALL) - include(GNUInstallDirs) install(TARGETS leveldb EXPORT leveldbTargets RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} From 0c40829872a9f00f38e11dc370ff8adb3e19f25b Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Tue, 5 Nov 2019 15:15:03 -0800 Subject: [PATCH 05/68] Remove redundant PROJECT_SOURCE_DIR usage from CMake config. Inspired by https://github.com/google/crc32c/pull/32 PiperOrigin-RevId: 278718726 --- CMakeLists.txt | 284 ++++++++++++++++++++++++++++----------------------------- 1 file changed, 142 insertions(+), 142 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7ccda94..1cb4625 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -92,13 +92,13 @@ set(LEVELDB_PUBLIC_INCLUDE_DIR "include/leveldb") set(LEVELDB_PORT_CONFIG_DIR "include/port") configure_file( - "${PROJECT_SOURCE_DIR}/port/port_config.h.in" + "port/port_config.h.in" "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h" ) include_directories( "${PROJECT_BINARY_DIR}/include" - "${PROJECT_SOURCE_DIR}" + "." ) if(BUILD_SHARED_LIBS) @@ -113,75 +113,75 @@ add_library(leveldb "") target_sources(leveldb PRIVATE "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h" - "${PROJECT_SOURCE_DIR}/db/builder.cc" - "${PROJECT_SOURCE_DIR}/db/builder.h" - "${PROJECT_SOURCE_DIR}/db/c.cc" - "${PROJECT_SOURCE_DIR}/db/db_impl.cc" - "${PROJECT_SOURCE_DIR}/db/db_impl.h" - "${PROJECT_SOURCE_DIR}/db/db_iter.cc" - "${PROJECT_SOURCE_DIR}/db/db_iter.h" - "${PROJECT_SOURCE_DIR}/db/dbformat.cc" - "${PROJECT_SOURCE_DIR}/db/dbformat.h" - "${PROJECT_SOURCE_DIR}/db/dumpfile.cc" - "${PROJECT_SOURCE_DIR}/db/filename.cc" - "${PROJECT_SOURCE_DIR}/db/filename.h" - "${PROJECT_SOURCE_DIR}/db/log_format.h" - "${PROJECT_SOURCE_DIR}/db/log_reader.cc" - "${PROJECT_SOURCE_DIR}/db/log_reader.h" - "${PROJECT_SOURCE_DIR}/db/log_writer.cc" - "${PROJECT_SOURCE_DIR}/db/log_writer.h" - "${PROJECT_SOURCE_DIR}/db/memtable.cc" - "${PROJECT_SOURCE_DIR}/db/memtable.h" - "${PROJECT_SOURCE_DIR}/db/repair.cc" - "${PROJECT_SOURCE_DIR}/db/skiplist.h" - "${PROJECT_SOURCE_DIR}/db/snapshot.h" - "${PROJECT_SOURCE_DIR}/db/table_cache.cc" - "${PROJECT_SOURCE_DIR}/db/table_cache.h" - "${PROJECT_SOURCE_DIR}/db/version_edit.cc" - "${PROJECT_SOURCE_DIR}/db/version_edit.h" - "${PROJECT_SOURCE_DIR}/db/version_set.cc" - "${PROJECT_SOURCE_DIR}/db/version_set.h" - "${PROJECT_SOURCE_DIR}/db/write_batch_internal.h" - "${PROJECT_SOURCE_DIR}/db/write_batch.cc" - "${PROJECT_SOURCE_DIR}/port/port_stdcxx.h" - "${PROJECT_SOURCE_DIR}/port/port.h" - "${PROJECT_SOURCE_DIR}/port/thread_annotations.h" - "${PROJECT_SOURCE_DIR}/table/block_builder.cc" - "${PROJECT_SOURCE_DIR}/table/block_builder.h" - "${PROJECT_SOURCE_DIR}/table/block.cc" - "${PROJECT_SOURCE_DIR}/table/block.h" - "${PROJECT_SOURCE_DIR}/table/filter_block.cc" - "${PROJECT_SOURCE_DIR}/table/filter_block.h" - "${PROJECT_SOURCE_DIR}/table/format.cc" - "${PROJECT_SOURCE_DIR}/table/format.h" - "${PROJECT_SOURCE_DIR}/table/iterator_wrapper.h" - "${PROJECT_SOURCE_DIR}/table/iterator.cc" - "${PROJECT_SOURCE_DIR}/table/merger.cc" - "${PROJECT_SOURCE_DIR}/table/merger.h" - "${PROJECT_SOURCE_DIR}/table/table_builder.cc" - "${PROJECT_SOURCE_DIR}/table/table.cc" - "${PROJECT_SOURCE_DIR}/table/two_level_iterator.cc" - "${PROJECT_SOURCE_DIR}/table/two_level_iterator.h" - "${PROJECT_SOURCE_DIR}/util/arena.cc" - "${PROJECT_SOURCE_DIR}/util/arena.h" - "${PROJECT_SOURCE_DIR}/util/bloom.cc" - "${PROJECT_SOURCE_DIR}/util/cache.cc" - "${PROJECT_SOURCE_DIR}/util/coding.cc" - "${PROJECT_SOURCE_DIR}/util/coding.h" - "${PROJECT_SOURCE_DIR}/util/comparator.cc" - "${PROJECT_SOURCE_DIR}/util/crc32c.cc" - "${PROJECT_SOURCE_DIR}/util/crc32c.h" - "${PROJECT_SOURCE_DIR}/util/env.cc" - "${PROJECT_SOURCE_DIR}/util/filter_policy.cc" - "${PROJECT_SOURCE_DIR}/util/hash.cc" - "${PROJECT_SOURCE_DIR}/util/hash.h" - "${PROJECT_SOURCE_DIR}/util/logging.cc" - "${PROJECT_SOURCE_DIR}/util/logging.h" - "${PROJECT_SOURCE_DIR}/util/mutexlock.h" - "${PROJECT_SOURCE_DIR}/util/no_destructor.h" - "${PROJECT_SOURCE_DIR}/util/options.cc" - "${PROJECT_SOURCE_DIR}/util/random.h" - "${PROJECT_SOURCE_DIR}/util/status.cc" + "db/builder.cc" + "db/builder.h" + "db/c.cc" + "db/db_impl.cc" + "db/db_impl.h" + "db/db_iter.cc" + "db/db_iter.h" + "db/dbformat.cc" + "db/dbformat.h" + "db/dumpfile.cc" + "db/filename.cc" + "db/filename.h" + "db/log_format.h" + "db/log_reader.cc" + "db/log_reader.h" + "db/log_writer.cc" + "db/log_writer.h" + "db/memtable.cc" + "db/memtable.h" + "db/repair.cc" + "db/skiplist.h" + "db/snapshot.h" + "db/table_cache.cc" + "db/table_cache.h" + "db/version_edit.cc" + "db/version_edit.h" + "db/version_set.cc" + "db/version_set.h" + "db/write_batch_internal.h" + "db/write_batch.cc" + "port/port_stdcxx.h" + "port/port.h" + "port/thread_annotations.h" + "table/block_builder.cc" + "table/block_builder.h" + "table/block.cc" + "table/block.h" + "table/filter_block.cc" + "table/filter_block.h" + "table/format.cc" + "table/format.h" + "table/iterator_wrapper.h" + "table/iterator.cc" + "table/merger.cc" + "table/merger.h" + "table/table_builder.cc" + "table/table.cc" + "table/two_level_iterator.cc" + "table/two_level_iterator.h" + "util/arena.cc" + "util/arena.h" + "util/bloom.cc" + "util/cache.cc" + "util/coding.cc" + "util/coding.h" + "util/comparator.cc" + "util/crc32c.cc" + "util/crc32c.h" + "util/env.cc" + "util/filter_policy.cc" + "util/hash.cc" + "util/hash.h" + "util/logging.cc" + "util/logging.h" + "util/mutexlock.h" + "util/no_destructor.h" + "util/options.cc" + "util/random.h" + "util/status.cc" # Only CMake 3.3+ supports PUBLIC sources in targets exported by "install". $<$:PUBLIC> @@ -205,22 +205,22 @@ target_sources(leveldb if (WIN32) target_sources(leveldb PRIVATE - "${PROJECT_SOURCE_DIR}/util/env_windows.cc" - "${PROJECT_SOURCE_DIR}/util/windows_logger.h" + "util/env_windows.cc" + "util/windows_logger.h" ) else (WIN32) target_sources(leveldb PRIVATE - "${PROJECT_SOURCE_DIR}/util/env_posix.cc" - "${PROJECT_SOURCE_DIR}/util/posix_logger.h" + "util/env_posix.cc" + "util/posix_logger.h" ) endif (WIN32) # MemEnv is not part of the interface and could be pulled to a separate library. target_sources(leveldb PRIVATE - "${PROJECT_SOURCE_DIR}/helpers/memenv/memenv.cc" - "${PROJECT_SOURCE_DIR}/helpers/memenv/memenv.h" + "helpers/memenv/memenv.cc" + "helpers/memenv/memenv.h" ) target_include_directories(leveldb @@ -275,7 +275,7 @@ find_package(Threads REQUIRED) target_link_libraries(leveldb Threads::Threads) add_executable(leveldbutil - "${PROJECT_SOURCE_DIR}/db/leveldbutil.cc" + "db/leveldbutil.cc" ) target_link_libraries(leveldbutil leveldb) @@ -289,10 +289,10 @@ if(LEVELDB_BUILD_TESTS) target_sources("${test_target_name}" PRIVATE "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h" - "${PROJECT_SOURCE_DIR}/util/testharness.cc" - "${PROJECT_SOURCE_DIR}/util/testharness.h" - "${PROJECT_SOURCE_DIR}/util/testutil.cc" - "${PROJECT_SOURCE_DIR}/util/testutil.h" + "util/testharness.cc" + "util/testharness.h" + "util/testutil.cc" + "util/testutil.h" "${test_file}" ) @@ -311,49 +311,49 @@ if(LEVELDB_BUILD_TESTS) add_test(NAME "${test_target_name}" COMMAND "${test_target_name}") endfunction(leveldb_test) - leveldb_test("${PROJECT_SOURCE_DIR}/db/c_test.c") - leveldb_test("${PROJECT_SOURCE_DIR}/db/fault_injection_test.cc") + leveldb_test("db/c_test.c") + leveldb_test("db/fault_injection_test.cc") - leveldb_test("${PROJECT_SOURCE_DIR}/issues/issue178_test.cc") - leveldb_test("${PROJECT_SOURCE_DIR}/issues/issue200_test.cc") - leveldb_test("${PROJECT_SOURCE_DIR}/issues/issue320_test.cc") + leveldb_test("issues/issue178_test.cc") + leveldb_test("issues/issue200_test.cc") + leveldb_test("issues/issue320_test.cc") - leveldb_test("${PROJECT_SOURCE_DIR}/util/env_test.cc") - leveldb_test("${PROJECT_SOURCE_DIR}/util/status_test.cc") - leveldb_test("${PROJECT_SOURCE_DIR}/util/no_destructor_test.cc") + leveldb_test("util/env_test.cc") + leveldb_test("util/status_test.cc") + leveldb_test("util/no_destructor_test.cc") if(NOT BUILD_SHARED_LIBS) - leveldb_test("${PROJECT_SOURCE_DIR}/db/autocompact_test.cc") - leveldb_test("${PROJECT_SOURCE_DIR}/db/corruption_test.cc") - leveldb_test("${PROJECT_SOURCE_DIR}/db/db_test.cc") - leveldb_test("${PROJECT_SOURCE_DIR}/db/dbformat_test.cc") - leveldb_test("${PROJECT_SOURCE_DIR}/db/filename_test.cc") - leveldb_test("${PROJECT_SOURCE_DIR}/db/log_test.cc") - leveldb_test("${PROJECT_SOURCE_DIR}/db/recovery_test.cc") - leveldb_test("${PROJECT_SOURCE_DIR}/db/skiplist_test.cc") - leveldb_test("${PROJECT_SOURCE_DIR}/db/version_edit_test.cc") - leveldb_test("${PROJECT_SOURCE_DIR}/db/version_set_test.cc") - leveldb_test("${PROJECT_SOURCE_DIR}/db/write_batch_test.cc") - - leveldb_test("${PROJECT_SOURCE_DIR}/helpers/memenv/memenv_test.cc") - - leveldb_test("${PROJECT_SOURCE_DIR}/table/filter_block_test.cc") - leveldb_test("${PROJECT_SOURCE_DIR}/table/table_test.cc") - - leveldb_test("${PROJECT_SOURCE_DIR}/util/arena_test.cc") - leveldb_test("${PROJECT_SOURCE_DIR}/util/bloom_test.cc") - leveldb_test("${PROJECT_SOURCE_DIR}/util/cache_test.cc") - leveldb_test("${PROJECT_SOURCE_DIR}/util/coding_test.cc") - leveldb_test("${PROJECT_SOURCE_DIR}/util/crc32c_test.cc") - leveldb_test("${PROJECT_SOURCE_DIR}/util/hash_test.cc") - leveldb_test("${PROJECT_SOURCE_DIR}/util/logging_test.cc") + leveldb_test("db/autocompact_test.cc") + leveldb_test("db/corruption_test.cc") + leveldb_test("db/db_test.cc") + leveldb_test("db/dbformat_test.cc") + leveldb_test("db/filename_test.cc") + leveldb_test("db/log_test.cc") + leveldb_test("db/recovery_test.cc") + leveldb_test("db/skiplist_test.cc") + leveldb_test("db/version_edit_test.cc") + leveldb_test("db/version_set_test.cc") + leveldb_test("db/write_batch_test.cc") + + leveldb_test("helpers/memenv/memenv_test.cc") + + leveldb_test("table/filter_block_test.cc") + leveldb_test("table/table_test.cc") + + leveldb_test("util/arena_test.cc") + leveldb_test("util/bloom_test.cc") + leveldb_test("util/cache_test.cc") + leveldb_test("util/coding_test.cc") + leveldb_test("util/crc32c_test.cc") + leveldb_test("util/hash_test.cc") + leveldb_test("util/logging_test.cc") # TODO(costan): This test also uses - # "${PROJECT_SOURCE_DIR}/util/env_{posix|windows}_test_helper.h" + # "util/env_{posix|windows}_test_helper.h" if (WIN32) - leveldb_test("${PROJECT_SOURCE_DIR}/util/env_windows_test.cc") + leveldb_test("util/env_windows_test.cc") else (WIN32) - leveldb_test("${PROJECT_SOURCE_DIR}/util/env_posix_test.cc") + leveldb_test("util/env_posix_test.cc") endif (WIN32) endif(NOT BUILD_SHARED_LIBS) endif(LEVELDB_BUILD_TESTS) @@ -366,12 +366,12 @@ if(LEVELDB_BUILD_BENCHMARKS) target_sources("${bench_target_name}" PRIVATE "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h" - "${PROJECT_SOURCE_DIR}/util/histogram.cc" - "${PROJECT_SOURCE_DIR}/util/histogram.h" - "${PROJECT_SOURCE_DIR}/util/testharness.cc" - "${PROJECT_SOURCE_DIR}/util/testharness.h" - "${PROJECT_SOURCE_DIR}/util/testutil.cc" - "${PROJECT_SOURCE_DIR}/util/testutil.h" + "util/histogram.cc" + "util/histogram.h" + "util/testharness.cc" + "util/testharness.h" + "util/testutil.cc" + "util/testutil.h" "${bench_file}" ) @@ -389,12 +389,12 @@ if(LEVELDB_BUILD_BENCHMARKS) endfunction(leveldb_benchmark) if(NOT BUILD_SHARED_LIBS) - leveldb_benchmark("${PROJECT_SOURCE_DIR}/benchmarks/db_bench.cc") + leveldb_benchmark("benchmarks/db_bench.cc") endif(NOT BUILD_SHARED_LIBS) check_library_exists(sqlite3 sqlite3_open "" HAVE_SQLITE3) if(HAVE_SQLITE3) - leveldb_benchmark("${PROJECT_SOURCE_DIR}/benchmarks/db_bench_sqlite3.cc") + leveldb_benchmark("benchmarks/db_bench_sqlite3.cc") target_link_libraries(db_bench_sqlite3 sqlite3) endif(HAVE_SQLITE3) @@ -414,7 +414,7 @@ int main() { " HAVE_KYOTOCABINET) set(CMAKE_REQUIRED_LIBRARIES ${OLD_CMAKE_REQURED_LIBRARIES}) if(HAVE_KYOTOCABINET) - leveldb_benchmark("${PROJECT_SOURCE_DIR}/benchmarks/db_bench_tree_db.cc") + leveldb_benchmark("benchmarks/db_bench_tree_db.cc") target_link_libraries(db_bench_tree_db kyotocabinet) endif(HAVE_KYOTOCABINET) endif(LEVELDB_BUILD_BENCHMARKS) @@ -428,21 +428,21 @@ if(LEVELDB_INSTALL) ) install( FILES - "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/c.h" - "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/cache.h" - "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/comparator.h" - "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/db.h" - "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/dumpfile.h" - "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/env.h" - "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/export.h" - "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/filter_policy.h" - "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/iterator.h" - "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/options.h" - "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/slice.h" - "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/status.h" - "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/table_builder.h" - "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/table.h" - "${PROJECT_SOURCE_DIR}/${LEVELDB_PUBLIC_INCLUDE_DIR}/write_batch.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/c.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/cache.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/comparator.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/db.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/dumpfile.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/env.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/export.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/filter_policy.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/iterator.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/options.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/slice.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/status.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/table_builder.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/table.h" + "${LEVELDB_PUBLIC_INCLUDE_DIR}/write_batch.h" DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/leveldb ) @@ -458,7 +458,7 @@ if(LEVELDB_INSTALL) ) install( FILES - "${PROJECT_SOURCE_DIR}/cmake/leveldbConfig.cmake" + "cmake/leveldbConfig.cmake" "${PROJECT_BINARY_DIR}/leveldbConfigVersion.cmake" DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/leveldb" ) From 41c8d839149134a3a6c8908f185437f536a47211 Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Thu, 7 Nov 2019 21:39:02 -0800 Subject: [PATCH 06/68] Align CMake configuration with related projects. PiperOrigin-RevId: 279238007 --- CMakeLists.txt | 19 ++++++++++++------- cmake/leveldbConfig.cmake | 1 - cmake/leveldbConfig.cmake.in | 9 +++++++++ 3 files changed, 21 insertions(+), 8 deletions(-) delete mode 100644 cmake/leveldbConfig.cmake create mode 100644 cmake/leveldbConfig.cmake.in diff --git a/CMakeLists.txt b/CMakeLists.txt index 1cb4625..2ad1c6c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -443,23 +443,28 @@ if(LEVELDB_INSTALL) "${LEVELDB_PUBLIC_INCLUDE_DIR}/table_builder.h" "${LEVELDB_PUBLIC_INCLUDE_DIR}/table.h" "${LEVELDB_PUBLIC_INCLUDE_DIR}/write_batch.h" - DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/leveldb + DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/leveldb" ) include(CMakePackageConfigHelpers) + configure_package_config_file( + "cmake/${PROJECT_NAME}Config.cmake.in" + "${PROJECT_BINARY_DIR}/cmake/${PROJECT_NAME}Config.cmake" + INSTALL_DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}" + ) write_basic_package_version_file( - "${PROJECT_BINARY_DIR}/leveldbConfigVersion.cmake" - COMPATIBILITY SameMajorVersion + "${PROJECT_BINARY_DIR}/cmake/${PROJECT_NAME}ConfigVersion.cmake" + COMPATIBILITY SameMajorVersion ) install( EXPORT leveldbTargets NAMESPACE leveldb:: - DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/leveldb" + DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}" ) install( FILES - "cmake/leveldbConfig.cmake" - "${PROJECT_BINARY_DIR}/leveldbConfigVersion.cmake" - DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/leveldb" + "${PROJECT_BINARY_DIR}/cmake/${PROJECT_NAME}Config.cmake" + "${PROJECT_BINARY_DIR}/cmake/${PROJECT_NAME}ConfigVersion.cmake" + DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}" ) endif(LEVELDB_INSTALL) diff --git a/cmake/leveldbConfig.cmake b/cmake/leveldbConfig.cmake deleted file mode 100644 index eea6e5c..0000000 --- a/cmake/leveldbConfig.cmake +++ /dev/null @@ -1 +0,0 @@ -include("${CMAKE_CURRENT_LIST_DIR}/leveldbTargets.cmake") diff --git a/cmake/leveldbConfig.cmake.in b/cmake/leveldbConfig.cmake.in new file mode 100644 index 0000000..2572728 --- /dev/null +++ b/cmake/leveldbConfig.cmake.in @@ -0,0 +1,9 @@ +# Copyright 2019 The LevelDB Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. See the AUTHORS file for names of contributors. + +@PACKAGE_INIT@ + +include("${CMAKE_CURRENT_LIST_DIR}/leveldbTargets.cmake") + +check_required_components(leveldb) \ No newline at end of file From ed72a3496ed01e1c6a28f743258623a58f6867ee Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Sun, 10 Nov 2019 18:03:43 -0800 Subject: [PATCH 07/68] Allow different C/C++ standards when this is used as a subproject. Inspired by https://github.com/google/snappy/pull/85 PiperOrigin-RevId: 279649967 --- CMakeLists.txt | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 2ad1c6c..e5c614c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -6,15 +6,21 @@ cmake_minimum_required(VERSION 3.9) # Keep the version below in sync with the one in db.h project(leveldb VERSION 1.22.0 LANGUAGES C CXX) -# This project can use C11, but will gracefully decay down to C89. -set(CMAKE_C_STANDARD 11) -set(CMAKE_C_STANDARD_REQUIRED OFF) -set(CMAKE_C_EXTENSIONS OFF) - -# This project requires C++11. -set(CMAKE_CXX_STANDARD 11) -set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_EXTENSIONS OFF) +# C standard can be overridden when this is used as a sub-project. +if(NOT CMAKE_C_STANDARD) + # This project can use C11, but will gracefully decay down to C89. + set(CMAKE_C_STANDARD 11) + set(CMAKE_C_STANDARD_REQUIRED OFF) + set(CMAKE_C_EXTENSIONS OFF) +endif(NOT CMAKE_C_STANDARD) + +# C++ standard can be overridden when this is used as a sub-project. +if(NOT CMAKE_CXX_STANDARD) + # This project requires C++11. + set(CMAKE_CXX_STANDARD 11) + set(CMAKE_CXX_STANDARD_REQUIRED ON) + set(CMAKE_CXX_EXTENSIONS OFF) +endif(NOT CMAKE_CXX_STANDARD) if (WIN32) set(LEVELDB_PLATFORM_NAME LEVELDB_PLATFORM_WINDOWS) From 2c9c80bd539ca5aad5ea864ee6dd81c1ee3eb91e Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Mon, 11 Nov 2019 11:58:55 -0800 Subject: [PATCH 08/68] Move CI to Visual Studio 2019. PiperOrigin-RevId: 279785825 --- .appveyor.yml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/.appveyor.yml b/.appveyor.yml index c24b17e..448f183 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -8,9 +8,9 @@ environment: matrix: # AppVeyor currently has no custom job name feature. # http://help.appveyor.com/discussions/questions/1623-can-i-provide-a-friendly-name-for-jobs - - JOB: Visual Studio 2017 - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 - CMAKE_GENERATOR: Visual Studio 15 2017 + - JOB: Visual Studio 2019 + APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 + CMAKE_GENERATOR: Visual Studio 16 2019 platform: - x86 @@ -24,9 +24,10 @@ build_script: - git submodule update --init --recursive - mkdir build - cd build - - if "%platform%"=="x64" set CMAKE_GENERATOR=%CMAKE_GENERATOR% Win64 + - if "%platform%"=="x86" (set CMAKE_GENERATOR_PLATFORM="Win32") + else (set CMAKE_GENERATOR_PLATFORM="%platform%") - cmake --version - - cmake .. -G "%CMAKE_GENERATOR%" + - cmake .. -G "%CMAKE_GENERATOR%" -A "%CMAKE_GENERATOR_PLATFORM%" -DCMAKE_CONFIGURATION_TYPES="%CONFIGURATION%" - cmake --build . --config "%CONFIGURATION%" - cd .. From 1c58902bdcc8d129f3883606bbd8e59085b48878 Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Thu, 21 Nov 2019 13:09:53 -0800 Subject: [PATCH 09/68] Switch testing harness to googletest. PiperOrigin-RevId: 281815695 --- .gitmodules | 3 + CMakeLists.txt | 29 ++- README.md | 6 + db/autocompact_test.cc | 25 +-- db/corruption_test.cc | 72 ++++---- db/db_test.cc | 418 +++++++++++++++++++++--------------------- db/dbformat_test.cc | 10 +- db/fault_injection_test.cc | 40 ++-- db/filename_test.cc | 9 +- db/log_test.cc | 87 ++++----- db/recovery_test.cc | 66 ++++--- db/skiplist_test.cc | 10 +- db/version_edit_test.cc | 10 +- db/version_set_test.cc | 38 ++-- db/write_batch_test.cc | 14 +- helpers/memenv/memenv_test.cc | 135 +++++++------- issues/issue178_test.cc | 20 +- issues/issue200_test.cc | 26 +-- issues/issue320_test.cc | 19 +- table/filter_block_test.cc | 15 +- table/table_test.cc | 39 ++-- util/arena_test.cc | 9 +- util/bloom_test.cc | 16 +- util/cache_test.cc | 29 +-- util/coding_test.cc | 12 +- util/crc32c_test.cc | 10 +- util/env_posix_test.cc | 83 +++++---- util/env_test.cc | 73 ++++---- util/env_windows_test.cc | 19 +- util/hash_test.cc | 10 +- util/logging_test.cc | 12 +- util/no_destructor_test.cc | 12 +- util/status_test.cc | 10 +- util/testharness.cc | 81 -------- util/testharness.h | 141 -------------- util/testutil.cc | 2 + util/testutil.h | 16 ++ 37 files changed, 763 insertions(+), 863 deletions(-) create mode 100644 .gitmodules delete mode 100644 util/testharness.cc delete mode 100644 util/testharness.h diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..5a4e85a --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "third_party/googletest"] + path = third_party/googletest + url = https://github.com/google/googletest.git diff --git a/CMakeLists.txt b/CMakeLists.txt index e5c614c..be41ba4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -84,6 +84,10 @@ endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") include(CheckCXXCompilerFlag) check_cxx_compiler_flag(-Wthread-safety HAVE_CLANG_THREAD_SAFETY) +# Used by googletest. +check_cxx_compiler_flag(-Wno-missing-field-initializers + LEVELDB_HAVE_NO_MISSING_FIELD_INITIALIZERS) + include(CheckCXXSourceCompiles) # Test whether C++17 __has_include is available. @@ -288,6 +292,23 @@ target_link_libraries(leveldbutil leveldb) if(LEVELDB_BUILD_TESTS) enable_testing() + # Prevent overriding the parent project's compiler/linker settings on Windows. + set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) + set(install_gtest OFF) + set(install_gmock OFF) + set(build_gmock ON) + + # This project is tested using GoogleTest. + add_subdirectory("third_party/googletest") + + # GoogleTest triggers a missing field initializers warning. + if(LEVELDB_HAVE_NO_MISSING_FIELD_INITIALIZERS) + set_property(TARGET gtest + APPEND PROPERTY COMPILE_OPTIONS -Wno-missing-field-initializers) + set_property(TARGET gmock + APPEND PROPERTY COMPILE_OPTIONS -Wno-missing-field-initializers) + endif(LEVELDB_HAVE_NO_MISSING_FIELD_INITIALIZERS) + function(leveldb_test test_file) get_filename_component(test_target_name "${test_file}" NAME_WE) @@ -295,14 +316,12 @@ if(LEVELDB_BUILD_TESTS) target_sources("${test_target_name}" PRIVATE "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h" - "util/testharness.cc" - "util/testharness.h" "util/testutil.cc" "util/testutil.h" "${test_file}" ) - target_link_libraries("${test_target_name}" leveldb) + target_link_libraries("${test_target_name}" leveldb gmock gtest) target_compile_definitions("${test_target_name}" PRIVATE ${LEVELDB_PLATFORM_NAME}=1 @@ -374,14 +393,12 @@ if(LEVELDB_BUILD_BENCHMARKS) "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h" "util/histogram.cc" "util/histogram.h" - "util/testharness.cc" - "util/testharness.h" "util/testutil.cc" "util/testutil.h" "${bench_file}" ) - target_link_libraries("${bench_target_name}" leveldb) + target_link_libraries("${bench_target_name}" leveldb gmock gtest) target_compile_definitions("${bench_target_name}" PRIVATE ${LEVELDB_PLATFORM_NAME}=1 diff --git a/README.md b/README.md index dadfd56..28d29c1 100644 --- a/README.md +++ b/README.md @@ -27,6 +27,12 @@ Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com) * Only a single process (possibly multi-threaded) can access a particular database at a time. * There is no client-server support builtin to the library. An application that needs such support will have to wrap their own server around the library. +# Getting the Source + +```bash +git clone --recurse-submodules https://github.com/google/leveldb.git +``` + # Building This project supports [CMake](https://cmake.org/) out of the box. diff --git a/db/autocompact_test.cc b/db/autocompact_test.cc index e6c97a0..d4caf71 100644 --- a/db/autocompact_test.cc +++ b/db/autocompact_test.cc @@ -2,24 +2,24 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "db/db_impl.h" #include "leveldb/cache.h" #include "leveldb/db.h" -#include "util/testharness.h" #include "util/testutil.h" namespace leveldb { -class AutoCompactTest { +class AutoCompactTest : public testing::Test { public: AutoCompactTest() { - dbname_ = test::TmpDir() + "/autocompact_test"; + dbname_ = testing::TempDir() + "autocompact_test"; tiny_cache_ = NewLRUCache(100); options_.block_cache = tiny_cache_; DestroyDB(dbname_, options_); options_.create_if_missing = true; options_.compression = kNoCompression; - ASSERT_OK(DB::Open(options_, dbname_, &db_)); + EXPECT_LEVELDB_OK(DB::Open(options_, dbname_, &db_)); } ~AutoCompactTest() { @@ -62,15 +62,15 @@ void AutoCompactTest::DoReads(int n) { // Fill database for (int i = 0; i < kCount; i++) { - ASSERT_OK(db_->Put(WriteOptions(), Key(i), value)); + ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), Key(i), value)); } - ASSERT_OK(dbi->TEST_CompactMemTable()); + ASSERT_LEVELDB_OK(dbi->TEST_CompactMemTable()); // Delete everything for (int i = 0; i < kCount; i++) { - ASSERT_OK(db_->Delete(WriteOptions(), Key(i))); + ASSERT_LEVELDB_OK(db_->Delete(WriteOptions(), Key(i))); } - ASSERT_OK(dbi->TEST_CompactMemTable()); + ASSERT_LEVELDB_OK(dbi->TEST_CompactMemTable()); // Get initial measurement of the space we will be reading. const int64_t initial_size = Size(Key(0), Key(n)); @@ -103,10 +103,13 @@ void AutoCompactTest::DoReads(int n) { ASSERT_GE(final_other_size, initial_other_size / 5 - 1048576); } -TEST(AutoCompactTest, ReadAll) { DoReads(kCount); } +TEST_F(AutoCompactTest, ReadAll) { DoReads(kCount); } -TEST(AutoCompactTest, ReadHalf) { DoReads(kCount / 2); } +TEST_F(AutoCompactTest, ReadHalf) { DoReads(kCount / 2); } } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/db/corruption_test.cc b/db/corruption_test.cc index 42f5237..4d20946 100644 --- a/db/corruption_test.cc +++ b/db/corruption_test.cc @@ -4,6 +4,7 @@ #include +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "db/db_impl.h" #include "db/filename.h" #include "db/log_format.h" @@ -13,14 +14,13 @@ #include "leveldb/table.h" #include "leveldb/write_batch.h" #include "util/logging.h" -#include "util/testharness.h" #include "util/testutil.h" namespace leveldb { static const int kValueSize = 1000; -class CorruptionTest { +class CorruptionTest : public testing::Test { public: CorruptionTest() : db_(nullptr), @@ -46,12 +46,12 @@ class CorruptionTest { return DB::Open(options_, dbname_, &db_); } - void Reopen() { ASSERT_OK(TryReopen()); } + void Reopen() { ASSERT_LEVELDB_OK(TryReopen()); } void RepairDB() { delete db_; db_ = nullptr; - ASSERT_OK(::leveldb::RepairDB(dbname_, options_)); + ASSERT_LEVELDB_OK(::leveldb::RepairDB(dbname_, options_)); } void Build(int n) { @@ -68,7 +68,7 @@ class CorruptionTest { if (i == n - 1) { options.sync = true; } - ASSERT_OK(db_->Write(options, &batch)); + ASSERT_LEVELDB_OK(db_->Write(options, &batch)); } } @@ -112,7 +112,7 @@ class CorruptionTest { void Corrupt(FileType filetype, int offset, int bytes_to_corrupt) { // Pick file to corrupt std::vector filenames; - ASSERT_OK(env_.target()->GetChildren(dbname_, &filenames)); + ASSERT_LEVELDB_OK(env_.target()->GetChildren(dbname_, &filenames)); uint64_t number; FileType type; std::string fname; @@ -127,7 +127,7 @@ class CorruptionTest { ASSERT_TRUE(!fname.empty()) << filetype; uint64_t file_size; - ASSERT_OK(env_.target()->GetFileSize(fname, &file_size)); + ASSERT_LEVELDB_OK(env_.target()->GetFileSize(fname, &file_size)); if (offset < 0) { // Relative to end of file; make it absolute @@ -189,7 +189,7 @@ class CorruptionTest { Cache* tiny_cache_; }; -TEST(CorruptionTest, Recovery) { +TEST_F(CorruptionTest, Recovery) { Build(100); Check(100, 100); Corrupt(kLogFile, 19, 1); // WriteBatch tag for first record @@ -200,13 +200,13 @@ TEST(CorruptionTest, Recovery) { Check(36, 36); } -TEST(CorruptionTest, RecoverWriteError) { +TEST_F(CorruptionTest, RecoverWriteError) { env_.writable_file_error_ = true; Status s = TryReopen(); ASSERT_TRUE(!s.ok()); } -TEST(CorruptionTest, NewFileErrorDuringWrite) { +TEST_F(CorruptionTest, NewFileErrorDuringWrite) { // Do enough writing to force minor compaction env_.writable_file_error_ = true; const int num = 3 + (Options().write_buffer_size / kValueSize); @@ -223,7 +223,7 @@ TEST(CorruptionTest, NewFileErrorDuringWrite) { Reopen(); } -TEST(CorruptionTest, TableFile) { +TEST_F(CorruptionTest, TableFile) { Build(100); DBImpl* dbi = reinterpret_cast(db_); dbi->TEST_CompactMemTable(); @@ -234,7 +234,7 @@ TEST(CorruptionTest, TableFile) { Check(90, 99); } -TEST(CorruptionTest, TableFileRepair) { +TEST_F(CorruptionTest, TableFileRepair) { options_.block_size = 2 * kValueSize; // Limit scope of corruption options_.paranoid_checks = true; Reopen(); @@ -250,7 +250,7 @@ TEST(CorruptionTest, TableFileRepair) { Check(95, 99); } -TEST(CorruptionTest, TableFileIndexData) { +TEST_F(CorruptionTest, TableFileIndexData) { Build(10000); // Enough to build multiple Tables DBImpl* dbi = reinterpret_cast(db_); dbi->TEST_CompactMemTable(); @@ -260,36 +260,36 @@ TEST(CorruptionTest, TableFileIndexData) { Check(5000, 9999); } -TEST(CorruptionTest, MissingDescriptor) { +TEST_F(CorruptionTest, MissingDescriptor) { Build(1000); RepairDB(); Reopen(); Check(1000, 1000); } -TEST(CorruptionTest, SequenceNumberRecovery) { - ASSERT_OK(db_->Put(WriteOptions(), "foo", "v1")); - ASSERT_OK(db_->Put(WriteOptions(), "foo", "v2")); - ASSERT_OK(db_->Put(WriteOptions(), "foo", "v3")); - ASSERT_OK(db_->Put(WriteOptions(), "foo", "v4")); - ASSERT_OK(db_->Put(WriteOptions(), "foo", "v5")); +TEST_F(CorruptionTest, SequenceNumberRecovery) { + ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v1")); + ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v2")); + ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v3")); + ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v4")); + ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v5")); RepairDB(); Reopen(); std::string v; - ASSERT_OK(db_->Get(ReadOptions(), "foo", &v)); + ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), "foo", &v)); ASSERT_EQ("v5", v); // Write something. If sequence number was not recovered properly, // it will be hidden by an earlier write. - ASSERT_OK(db_->Put(WriteOptions(), "foo", "v6")); - ASSERT_OK(db_->Get(ReadOptions(), "foo", &v)); + ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v6")); + ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), "foo", &v)); ASSERT_EQ("v6", v); Reopen(); - ASSERT_OK(db_->Get(ReadOptions(), "foo", &v)); + ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), "foo", &v)); ASSERT_EQ("v6", v); } -TEST(CorruptionTest, CorruptedDescriptor) { - ASSERT_OK(db_->Put(WriteOptions(), "foo", "hello")); +TEST_F(CorruptionTest, CorruptedDescriptor) { + ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "hello")); DBImpl* dbi = reinterpret_cast(db_); dbi->TEST_CompactMemTable(); dbi->TEST_CompactRange(0, nullptr, nullptr); @@ -301,11 +301,11 @@ TEST(CorruptionTest, CorruptedDescriptor) { RepairDB(); Reopen(); std::string v; - ASSERT_OK(db_->Get(ReadOptions(), "foo", &v)); + ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), "foo", &v)); ASSERT_EQ("hello", v); } -TEST(CorruptionTest, CompactionInputError) { +TEST_F(CorruptionTest, CompactionInputError) { Build(10); DBImpl* dbi = reinterpret_cast(db_); dbi->TEST_CompactMemTable(); @@ -320,7 +320,7 @@ TEST(CorruptionTest, CompactionInputError) { Check(10000, 10000); } -TEST(CorruptionTest, CompactionInputErrorParanoid) { +TEST_F(CorruptionTest, CompactionInputErrorParanoid) { options_.paranoid_checks = true; options_.write_buffer_size = 512 << 10; Reopen(); @@ -341,22 +341,26 @@ TEST(CorruptionTest, CompactionInputErrorParanoid) { ASSERT_TRUE(!s.ok()) << "write did not fail in corrupted paranoid db"; } -TEST(CorruptionTest, UnrelatedKeys) { +TEST_F(CorruptionTest, UnrelatedKeys) { Build(10); DBImpl* dbi = reinterpret_cast(db_); dbi->TEST_CompactMemTable(); Corrupt(kTableFile, 100, 1); std::string tmp1, tmp2; - ASSERT_OK(db_->Put(WriteOptions(), Key(1000, &tmp1), Value(1000, &tmp2))); + ASSERT_LEVELDB_OK( + db_->Put(WriteOptions(), Key(1000, &tmp1), Value(1000, &tmp2))); std::string v; - ASSERT_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v)); + ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v)); ASSERT_EQ(Value(1000, &tmp2).ToString(), v); dbi->TEST_CompactMemTable(); - ASSERT_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v)); + ASSERT_LEVELDB_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v)); ASSERT_EQ(Value(1000, &tmp2).ToString(), v); } } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/db/db_test.cc b/db/db_test.cc index 9a8faf1..e8e3495 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -7,6 +7,7 @@ #include #include +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "db/db_impl.h" #include "db/filename.h" #include "db/version_set.h" @@ -20,7 +21,6 @@ #include "util/hash.h" #include "util/logging.h" #include "util/mutexlock.h" -#include "util/testharness.h" #include "util/testutil.h" namespace leveldb { @@ -226,7 +226,7 @@ class SpecialEnv : public EnvWrapper { } }; -class DBTest { +class DBTest : public testing::Test { public: std::string dbname_; SpecialEnv* env_; @@ -236,7 +236,7 @@ class DBTest { DBTest() : env_(new SpecialEnv(Env::Default())), option_config_(kDefault) { filter_policy_ = NewBloomFilterPolicy(10); - dbname_ = test::TmpDir() + "/db_test"; + dbname_ = testing::TempDir() + "db_test"; DestroyDB(dbname_, Options()); db_ = nullptr; Reopen(); @@ -283,7 +283,9 @@ class DBTest { DBImpl* dbfull() { return reinterpret_cast(db_); } - void Reopen(Options* options = nullptr) { ASSERT_OK(TryReopen(options)); } + void Reopen(Options* options = nullptr) { + ASSERT_LEVELDB_OK(TryReopen(options)); + } void Close() { delete db_; @@ -294,7 +296,7 @@ class DBTest { delete db_; db_ = nullptr; DestroyDB(dbname_, Options()); - ASSERT_OK(TryReopen(options)); + ASSERT_LEVELDB_OK(TryReopen(options)); } Status TryReopen(Options* options) { @@ -348,11 +350,11 @@ class DBTest { // Check reverse iteration results are the reverse of forward results size_t matched = 0; for (iter->SeekToLast(); iter->Valid(); iter->Prev()) { - ASSERT_LT(matched, forward.size()); - ASSERT_EQ(IterStatus(iter), forward[forward.size() - matched - 1]); + EXPECT_LT(matched, forward.size()); + EXPECT_EQ(IterStatus(iter), forward[forward.size() - matched - 1]); matched++; } - ASSERT_EQ(matched, forward.size()); + EXPECT_EQ(matched, forward.size()); delete iter; return result; @@ -402,7 +404,7 @@ class DBTest { int NumTableFilesAtLevel(int level) { std::string property; - ASSERT_TRUE(db_->GetProperty( + EXPECT_TRUE(db_->GetProperty( "leveldb.num-files-at-level" + NumberToString(level), &property)); return std::stoi(property); } @@ -497,12 +499,12 @@ class DBTest { bool DeleteAnSSTFile() { std::vector filenames; - ASSERT_OK(env_->GetChildren(dbname_, &filenames)); + EXPECT_LEVELDB_OK(env_->GetChildren(dbname_, &filenames)); uint64_t number; FileType type; for (size_t i = 0; i < filenames.size(); i++) { if (ParseFileName(filenames[i], &number, &type) && type == kTableFile) { - ASSERT_OK(env_->DeleteFile(TableFileName(dbname_, number))); + EXPECT_LEVELDB_OK(env_->DeleteFile(TableFileName(dbname_, number))); return true; } } @@ -512,7 +514,7 @@ class DBTest { // Returns number of files renamed. int RenameLDBToSST() { std::vector filenames; - ASSERT_OK(env_->GetChildren(dbname_, &filenames)); + EXPECT_LEVELDB_OK(env_->GetChildren(dbname_, &filenames)); uint64_t number; FileType type; int files_renamed = 0; @@ -520,7 +522,7 @@ class DBTest { if (ParseFileName(filenames[i], &number, &type) && type == kTableFile) { const std::string from = TableFileName(dbname_, number); const std::string to = SSTTableFileName(dbname_, number); - ASSERT_OK(env_->RenameFile(from, to)); + EXPECT_LEVELDB_OK(env_->RenameFile(from, to)); files_renamed++; } } @@ -535,63 +537,63 @@ class DBTest { int option_config_; }; -TEST(DBTest, Empty) { +TEST_F(DBTest, Empty) { do { ASSERT_TRUE(db_ != nullptr); ASSERT_EQ("NOT_FOUND", Get("foo")); } while (ChangeOptions()); } -TEST(DBTest, EmptyKey) { +TEST_F(DBTest, EmptyKey) { do { - ASSERT_OK(Put("", "v1")); + ASSERT_LEVELDB_OK(Put("", "v1")); ASSERT_EQ("v1", Get("")); - ASSERT_OK(Put("", "v2")); + ASSERT_LEVELDB_OK(Put("", "v2")); ASSERT_EQ("v2", Get("")); } while (ChangeOptions()); } -TEST(DBTest, EmptyValue) { +TEST_F(DBTest, EmptyValue) { do { - ASSERT_OK(Put("key", "v1")); + ASSERT_LEVELDB_OK(Put("key", "v1")); ASSERT_EQ("v1", Get("key")); - ASSERT_OK(Put("key", "")); + ASSERT_LEVELDB_OK(Put("key", "")); ASSERT_EQ("", Get("key")); - ASSERT_OK(Put("key", "v2")); + ASSERT_LEVELDB_OK(Put("key", "v2")); ASSERT_EQ("v2", Get("key")); } while (ChangeOptions()); } -TEST(DBTest, ReadWrite) { +TEST_F(DBTest, ReadWrite) { do { - ASSERT_OK(Put("foo", "v1")); + ASSERT_LEVELDB_OK(Put("foo", "v1")); ASSERT_EQ("v1", Get("foo")); - ASSERT_OK(Put("bar", "v2")); - ASSERT_OK(Put("foo", "v3")); + ASSERT_LEVELDB_OK(Put("bar", "v2")); + ASSERT_LEVELDB_OK(Put("foo", "v3")); ASSERT_EQ("v3", Get("foo")); ASSERT_EQ("v2", Get("bar")); } while (ChangeOptions()); } -TEST(DBTest, PutDeleteGet) { +TEST_F(DBTest, PutDeleteGet) { do { - ASSERT_OK(db_->Put(WriteOptions(), "foo", "v1")); + ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v1")); ASSERT_EQ("v1", Get("foo")); - ASSERT_OK(db_->Put(WriteOptions(), "foo", "v2")); + ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), "foo", "v2")); ASSERT_EQ("v2", Get("foo")); - ASSERT_OK(db_->Delete(WriteOptions(), "foo")); + ASSERT_LEVELDB_OK(db_->Delete(WriteOptions(), "foo")); ASSERT_EQ("NOT_FOUND", Get("foo")); } while (ChangeOptions()); } -TEST(DBTest, GetFromImmutableLayer) { +TEST_F(DBTest, GetFromImmutableLayer) { do { Options options = CurrentOptions(); options.env = env_; options.write_buffer_size = 100000; // Small write buffer Reopen(&options); - ASSERT_OK(Put("foo", "v1")); + ASSERT_LEVELDB_OK(Put("foo", "v1")); ASSERT_EQ("v1", Get("foo")); // Block sync calls. @@ -604,17 +606,17 @@ TEST(DBTest, GetFromImmutableLayer) { } while (ChangeOptions()); } -TEST(DBTest, GetFromVersions) { +TEST_F(DBTest, GetFromVersions) { do { - ASSERT_OK(Put("foo", "v1")); + ASSERT_LEVELDB_OK(Put("foo", "v1")); dbfull()->TEST_CompactMemTable(); ASSERT_EQ("v1", Get("foo")); } while (ChangeOptions()); } -TEST(DBTest, GetMemUsage) { +TEST_F(DBTest, GetMemUsage) { do { - ASSERT_OK(Put("foo", "v1")); + ASSERT_LEVELDB_OK(Put("foo", "v1")); std::string val; ASSERT_TRUE(db_->GetProperty("leveldb.approximate-memory-usage", &val)); int mem_usage = std::stoi(val); @@ -623,14 +625,14 @@ TEST(DBTest, GetMemUsage) { } while (ChangeOptions()); } -TEST(DBTest, GetSnapshot) { +TEST_F(DBTest, GetSnapshot) { do { // Try with both a short key and a long key for (int i = 0; i < 2; i++) { std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x'); - ASSERT_OK(Put(key, "v1")); + ASSERT_LEVELDB_OK(Put(key, "v1")); const Snapshot* s1 = db_->GetSnapshot(); - ASSERT_OK(Put(key, "v2")); + ASSERT_LEVELDB_OK(Put(key, "v2")); ASSERT_EQ("v2", Get(key)); ASSERT_EQ("v1", Get(key, s1)); dbfull()->TEST_CompactMemTable(); @@ -641,16 +643,16 @@ TEST(DBTest, GetSnapshot) { } while (ChangeOptions()); } -TEST(DBTest, GetIdenticalSnapshots) { +TEST_F(DBTest, GetIdenticalSnapshots) { do { // Try with both a short key and a long key for (int i = 0; i < 2; i++) { std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x'); - ASSERT_OK(Put(key, "v1")); + ASSERT_LEVELDB_OK(Put(key, "v1")); const Snapshot* s1 = db_->GetSnapshot(); const Snapshot* s2 = db_->GetSnapshot(); const Snapshot* s3 = db_->GetSnapshot(); - ASSERT_OK(Put(key, "v2")); + ASSERT_LEVELDB_OK(Put(key, "v2")); ASSERT_EQ("v2", Get(key)); ASSERT_EQ("v1", Get(key, s1)); ASSERT_EQ("v1", Get(key, s2)); @@ -666,13 +668,13 @@ TEST(DBTest, GetIdenticalSnapshots) { } while (ChangeOptions()); } -TEST(DBTest, IterateOverEmptySnapshot) { +TEST_F(DBTest, IterateOverEmptySnapshot) { do { const Snapshot* snapshot = db_->GetSnapshot(); ReadOptions read_options; read_options.snapshot = snapshot; - ASSERT_OK(Put("foo", "v1")); - ASSERT_OK(Put("foo", "v2")); + ASSERT_LEVELDB_OK(Put("foo", "v1")); + ASSERT_LEVELDB_OK(Put("foo", "v2")); Iterator* iterator1 = db_->NewIterator(read_options); iterator1->SeekToFirst(); @@ -690,41 +692,41 @@ TEST(DBTest, IterateOverEmptySnapshot) { } while (ChangeOptions()); } -TEST(DBTest, GetLevel0Ordering) { +TEST_F(DBTest, GetLevel0Ordering) { do { // Check that we process level-0 files in correct order. The code // below generates two level-0 files where the earlier one comes // before the later one in the level-0 file list since the earlier // one has a smaller "smallest" key. - ASSERT_OK(Put("bar", "b")); - ASSERT_OK(Put("foo", "v1")); + ASSERT_LEVELDB_OK(Put("bar", "b")); + ASSERT_LEVELDB_OK(Put("foo", "v1")); dbfull()->TEST_CompactMemTable(); - ASSERT_OK(Put("foo", "v2")); + ASSERT_LEVELDB_OK(Put("foo", "v2")); dbfull()->TEST_CompactMemTable(); ASSERT_EQ("v2", Get("foo")); } while (ChangeOptions()); } -TEST(DBTest, GetOrderedByLevels) { +TEST_F(DBTest, GetOrderedByLevels) { do { - ASSERT_OK(Put("foo", "v1")); + ASSERT_LEVELDB_OK(Put("foo", "v1")); Compact("a", "z"); ASSERT_EQ("v1", Get("foo")); - ASSERT_OK(Put("foo", "v2")); + ASSERT_LEVELDB_OK(Put("foo", "v2")); ASSERT_EQ("v2", Get("foo")); dbfull()->TEST_CompactMemTable(); ASSERT_EQ("v2", Get("foo")); } while (ChangeOptions()); } -TEST(DBTest, GetPicksCorrectFile) { +TEST_F(DBTest, GetPicksCorrectFile) { do { // Arrange to have multiple files in a non-level-0 level. - ASSERT_OK(Put("a", "va")); + ASSERT_LEVELDB_OK(Put("a", "va")); Compact("a", "b"); - ASSERT_OK(Put("x", "vx")); + ASSERT_LEVELDB_OK(Put("x", "vx")); Compact("x", "y"); - ASSERT_OK(Put("f", "vf")); + ASSERT_LEVELDB_OK(Put("f", "vf")); Compact("f", "g"); ASSERT_EQ("va", Get("a")); ASSERT_EQ("vf", Get("f")); @@ -732,7 +734,7 @@ TEST(DBTest, GetPicksCorrectFile) { } while (ChangeOptions()); } -TEST(DBTest, GetEncountersEmptyLevel) { +TEST_F(DBTest, GetEncountersEmptyLevel) { do { // Arrange for the following to happen: // * sstable A in level 0 @@ -770,7 +772,7 @@ TEST(DBTest, GetEncountersEmptyLevel) { } while (ChangeOptions()); } -TEST(DBTest, IterEmpty) { +TEST_F(DBTest, IterEmpty) { Iterator* iter = db_->NewIterator(ReadOptions()); iter->SeekToFirst(); @@ -785,8 +787,8 @@ TEST(DBTest, IterEmpty) { delete iter; } -TEST(DBTest, IterSingle) { - ASSERT_OK(Put("a", "va")); +TEST_F(DBTest, IterSingle) { + ASSERT_LEVELDB_OK(Put("a", "va")); Iterator* iter = db_->NewIterator(ReadOptions()); iter->SeekToFirst(); @@ -823,10 +825,10 @@ TEST(DBTest, IterSingle) { delete iter; } -TEST(DBTest, IterMulti) { - ASSERT_OK(Put("a", "va")); - ASSERT_OK(Put("b", "vb")); - ASSERT_OK(Put("c", "vc")); +TEST_F(DBTest, IterMulti) { + ASSERT_LEVELDB_OK(Put("a", "va")); + ASSERT_LEVELDB_OK(Put("b", "vb")); + ASSERT_LEVELDB_OK(Put("c", "vc")); Iterator* iter = db_->NewIterator(ReadOptions()); iter->SeekToFirst(); @@ -881,11 +883,11 @@ TEST(DBTest, IterMulti) { ASSERT_EQ(IterStatus(iter), "b->vb"); // Make sure iter stays at snapshot - ASSERT_OK(Put("a", "va2")); - ASSERT_OK(Put("a2", "va3")); - ASSERT_OK(Put("b", "vb2")); - ASSERT_OK(Put("c", "vc2")); - ASSERT_OK(Delete("b")); + ASSERT_LEVELDB_OK(Put("a", "va2")); + ASSERT_LEVELDB_OK(Put("a2", "va3")); + ASSERT_LEVELDB_OK(Put("b", "vb2")); + ASSERT_LEVELDB_OK(Put("c", "vc2")); + ASSERT_LEVELDB_OK(Delete("b")); iter->SeekToFirst(); ASSERT_EQ(IterStatus(iter), "a->va"); iter->Next(); @@ -906,12 +908,12 @@ TEST(DBTest, IterMulti) { delete iter; } -TEST(DBTest, IterSmallAndLargeMix) { - ASSERT_OK(Put("a", "va")); - ASSERT_OK(Put("b", std::string(100000, 'b'))); - ASSERT_OK(Put("c", "vc")); - ASSERT_OK(Put("d", std::string(100000, 'd'))); - ASSERT_OK(Put("e", std::string(100000, 'e'))); +TEST_F(DBTest, IterSmallAndLargeMix) { + ASSERT_LEVELDB_OK(Put("a", "va")); + ASSERT_LEVELDB_OK(Put("b", std::string(100000, 'b'))); + ASSERT_LEVELDB_OK(Put("c", "vc")); + ASSERT_LEVELDB_OK(Put("d", std::string(100000, 'd'))); + ASSERT_LEVELDB_OK(Put("e", std::string(100000, 'e'))); Iterator* iter = db_->NewIterator(ReadOptions()); @@ -944,12 +946,12 @@ TEST(DBTest, IterSmallAndLargeMix) { delete iter; } -TEST(DBTest, IterMultiWithDelete) { +TEST_F(DBTest, IterMultiWithDelete) { do { - ASSERT_OK(Put("a", "va")); - ASSERT_OK(Put("b", "vb")); - ASSERT_OK(Put("c", "vc")); - ASSERT_OK(Delete("b")); + ASSERT_LEVELDB_OK(Put("a", "va")); + ASSERT_LEVELDB_OK(Put("b", "vb")); + ASSERT_LEVELDB_OK(Put("c", "vc")); + ASSERT_LEVELDB_OK(Delete("b")); ASSERT_EQ("NOT_FOUND", Get("b")); Iterator* iter = db_->NewIterator(ReadOptions()); @@ -961,35 +963,35 @@ TEST(DBTest, IterMultiWithDelete) { } while (ChangeOptions()); } -TEST(DBTest, Recover) { +TEST_F(DBTest, Recover) { do { - ASSERT_OK(Put("foo", "v1")); - ASSERT_OK(Put("baz", "v5")); + ASSERT_LEVELDB_OK(Put("foo", "v1")); + ASSERT_LEVELDB_OK(Put("baz", "v5")); Reopen(); ASSERT_EQ("v1", Get("foo")); ASSERT_EQ("v1", Get("foo")); ASSERT_EQ("v5", Get("baz")); - ASSERT_OK(Put("bar", "v2")); - ASSERT_OK(Put("foo", "v3")); + ASSERT_LEVELDB_OK(Put("bar", "v2")); + ASSERT_LEVELDB_OK(Put("foo", "v3")); Reopen(); ASSERT_EQ("v3", Get("foo")); - ASSERT_OK(Put("foo", "v4")); + ASSERT_LEVELDB_OK(Put("foo", "v4")); ASSERT_EQ("v4", Get("foo")); ASSERT_EQ("v2", Get("bar")); ASSERT_EQ("v5", Get("baz")); } while (ChangeOptions()); } -TEST(DBTest, RecoveryWithEmptyLog) { +TEST_F(DBTest, RecoveryWithEmptyLog) { do { - ASSERT_OK(Put("foo", "v1")); - ASSERT_OK(Put("foo", "v2")); + ASSERT_LEVELDB_OK(Put("foo", "v1")); + ASSERT_LEVELDB_OK(Put("foo", "v2")); Reopen(); Reopen(); - ASSERT_OK(Put("foo", "v3")); + ASSERT_LEVELDB_OK(Put("foo", "v3")); Reopen(); ASSERT_EQ("v3", Get("foo")); } while (ChangeOptions()); @@ -997,7 +999,7 @@ TEST(DBTest, RecoveryWithEmptyLog) { // Check that writes done during a memtable compaction are recovered // if the database is shutdown during the memtable compaction. -TEST(DBTest, RecoverDuringMemtableCompaction) { +TEST_F(DBTest, RecoverDuringMemtableCompaction) { do { Options options = CurrentOptions(); options.env = env_; @@ -1005,10 +1007,12 @@ TEST(DBTest, RecoverDuringMemtableCompaction) { Reopen(&options); // Trigger a long memtable compaction and reopen the database during it - ASSERT_OK(Put("foo", "v1")); // Goes to 1st log file - ASSERT_OK(Put("big1", std::string(10000000, 'x'))); // Fills memtable - ASSERT_OK(Put("big2", std::string(1000, 'y'))); // Triggers compaction - ASSERT_OK(Put("bar", "v2")); // Goes to new log file + ASSERT_LEVELDB_OK(Put("foo", "v1")); // Goes to 1st log file + ASSERT_LEVELDB_OK( + Put("big1", std::string(10000000, 'x'))); // Fills memtable + ASSERT_LEVELDB_OK( + Put("big2", std::string(1000, 'y'))); // Triggers compaction + ASSERT_LEVELDB_OK(Put("bar", "v2")); // Goes to new log file Reopen(&options); ASSERT_EQ("v1", Get("foo")); @@ -1024,7 +1028,7 @@ static std::string Key(int i) { return std::string(buf); } -TEST(DBTest, MinorCompactionsHappen) { +TEST_F(DBTest, MinorCompactionsHappen) { Options options = CurrentOptions(); options.write_buffer_size = 10000; Reopen(&options); @@ -1033,7 +1037,7 @@ TEST(DBTest, MinorCompactionsHappen) { int starting_num_tables = TotalTableFiles(); for (int i = 0; i < N; i++) { - ASSERT_OK(Put(Key(i), Key(i) + std::string(1000, 'v'))); + ASSERT_LEVELDB_OK(Put(Key(i), Key(i) + std::string(1000, 'v'))); } int ending_num_tables = TotalTableFiles(); ASSERT_GT(ending_num_tables, starting_num_tables); @@ -1049,14 +1053,14 @@ TEST(DBTest, MinorCompactionsHappen) { } } -TEST(DBTest, RecoverWithLargeLog) { +TEST_F(DBTest, RecoverWithLargeLog) { { Options options = CurrentOptions(); Reopen(&options); - ASSERT_OK(Put("big1", std::string(200000, '1'))); - ASSERT_OK(Put("big2", std::string(200000, '2'))); - ASSERT_OK(Put("small3", std::string(10, '3'))); - ASSERT_OK(Put("small4", std::string(10, '4'))); + ASSERT_LEVELDB_OK(Put("big1", std::string(200000, '1'))); + ASSERT_LEVELDB_OK(Put("big2", std::string(200000, '2'))); + ASSERT_LEVELDB_OK(Put("small3", std::string(10, '3'))); + ASSERT_LEVELDB_OK(Put("small4", std::string(10, '4'))); ASSERT_EQ(NumTableFilesAtLevel(0), 0); } @@ -1073,7 +1077,7 @@ TEST(DBTest, RecoverWithLargeLog) { ASSERT_GT(NumTableFilesAtLevel(0), 1); } -TEST(DBTest, CompactionsGenerateMultipleFiles) { +TEST_F(DBTest, CompactionsGenerateMultipleFiles) { Options options = CurrentOptions(); options.write_buffer_size = 100000000; // Large write buffer Reopen(&options); @@ -1085,7 +1089,7 @@ TEST(DBTest, CompactionsGenerateMultipleFiles) { std::vector values; for (int i = 0; i < 80; i++) { values.push_back(RandomString(&rnd, 100000)); - ASSERT_OK(Put(Key(i), values[i])); + ASSERT_LEVELDB_OK(Put(Key(i), values[i])); } // Reopening moves updates to level-0 @@ -1099,7 +1103,7 @@ TEST(DBTest, CompactionsGenerateMultipleFiles) { } } -TEST(DBTest, RepeatedWritesToSameKey) { +TEST_F(DBTest, RepeatedWritesToSameKey) { Options options = CurrentOptions(); options.env = env_; options.write_buffer_size = 100000; // Small write buffer @@ -1118,7 +1122,7 @@ TEST(DBTest, RepeatedWritesToSameKey) { } } -TEST(DBTest, SparseMerge) { +TEST_F(DBTest, SparseMerge) { Options options = CurrentOptions(); options.compression = kNoCompression; Reopen(&options); @@ -1168,7 +1172,7 @@ static bool Between(uint64_t val, uint64_t low, uint64_t high) { return result; } -TEST(DBTest, ApproximateSizes) { +TEST_F(DBTest, ApproximateSizes) { do { Options options = CurrentOptions(); options.write_buffer_size = 100000000; // Large write buffer @@ -1186,7 +1190,7 @@ TEST(DBTest, ApproximateSizes) { static const int S2 = 105000; // Allow some expansion from metadata Random rnd(301); for (int i = 0; i < N; i++) { - ASSERT_OK(Put(Key(i), RandomString(&rnd, S1))); + ASSERT_LEVELDB_OK(Put(Key(i), RandomString(&rnd, S1))); } // 0 because GetApproximateSizes() does not account for memtable space @@ -1227,7 +1231,7 @@ TEST(DBTest, ApproximateSizes) { } while (ChangeOptions()); } -TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) { +TEST_F(DBTest, ApproximateSizes_MixOfSmallAndLarge) { do { Options options = CurrentOptions(); options.compression = kNoCompression; @@ -1235,18 +1239,18 @@ TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) { Random rnd(301); std::string big1 = RandomString(&rnd, 100000); - ASSERT_OK(Put(Key(0), RandomString(&rnd, 10000))); - ASSERT_OK(Put(Key(1), RandomString(&rnd, 10000))); - ASSERT_OK(Put(Key(2), big1)); - ASSERT_OK(Put(Key(3), RandomString(&rnd, 10000))); - ASSERT_OK(Put(Key(4), big1)); - ASSERT_OK(Put(Key(5), RandomString(&rnd, 10000))); - ASSERT_OK(Put(Key(6), RandomString(&rnd, 300000))); - ASSERT_OK(Put(Key(7), RandomString(&rnd, 10000))); + ASSERT_LEVELDB_OK(Put(Key(0), RandomString(&rnd, 10000))); + ASSERT_LEVELDB_OK(Put(Key(1), RandomString(&rnd, 10000))); + ASSERT_LEVELDB_OK(Put(Key(2), big1)); + ASSERT_LEVELDB_OK(Put(Key(3), RandomString(&rnd, 10000))); + ASSERT_LEVELDB_OK(Put(Key(4), big1)); + ASSERT_LEVELDB_OK(Put(Key(5), RandomString(&rnd, 10000))); + ASSERT_LEVELDB_OK(Put(Key(6), RandomString(&rnd, 300000))); + ASSERT_LEVELDB_OK(Put(Key(7), RandomString(&rnd, 10000))); if (options.reuse_logs) { // Need to force a memtable compaction since recovery does not do so. - ASSERT_OK(dbfull()->TEST_CompactMemTable()); + ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable()); } // Check sizes across recovery by reopening a few times @@ -1270,7 +1274,7 @@ TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) { } while (ChangeOptions()); } -TEST(DBTest, IteratorPinsRef) { +TEST_F(DBTest, IteratorPinsRef) { Put("foo", "hello"); // Get iterator that will yield the current contents of the DB. @@ -1279,7 +1283,8 @@ TEST(DBTest, IteratorPinsRef) { // Write to force compactions Put("foo", "newvalue1"); for (int i = 0; i < 100; i++) { - ASSERT_OK(Put(Key(i), Key(i) + std::string(100000, 'v'))); // 100K values + ASSERT_LEVELDB_OK( + Put(Key(i), Key(i) + std::string(100000, 'v'))); // 100K values } Put("foo", "newvalue2"); @@ -1292,7 +1297,7 @@ TEST(DBTest, IteratorPinsRef) { delete iter; } -TEST(DBTest, Snapshot) { +TEST_F(DBTest, Snapshot) { do { Put("foo", "v1"); const Snapshot* s1 = db_->GetSnapshot(); @@ -1321,7 +1326,7 @@ TEST(DBTest, Snapshot) { } while (ChangeOptions()); } -TEST(DBTest, HiddenValuesAreRemoved) { +TEST_F(DBTest, HiddenValuesAreRemoved) { do { Random rnd(301); FillLevels("a", "z"); @@ -1333,7 +1338,7 @@ TEST(DBTest, HiddenValuesAreRemoved) { Put("foo", "tiny"); Put("pastfoo2", "v2"); // Advance sequence number one more - ASSERT_OK(dbfull()->TEST_CompactMemTable()); + ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable()); ASSERT_GT(NumTableFilesAtLevel(0), 0); ASSERT_EQ(big, Get("foo", snapshot)); @@ -1352,9 +1357,9 @@ TEST(DBTest, HiddenValuesAreRemoved) { } while (ChangeOptions()); } -TEST(DBTest, DeletionMarkers1) { +TEST_F(DBTest, DeletionMarkers1) { Put("foo", "v1"); - ASSERT_OK(dbfull()->TEST_CompactMemTable()); + ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable()); const int last = config::kMaxMemCompactLevel; ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level @@ -1368,7 +1373,7 @@ TEST(DBTest, DeletionMarkers1) { Delete("foo"); Put("foo", "v2"); ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]"); - ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2 + ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2 ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]"); Slice z("z"); dbfull()->TEST_CompactRange(last - 2, nullptr, &z); @@ -1381,9 +1386,9 @@ TEST(DBTest, DeletionMarkers1) { ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]"); } -TEST(DBTest, DeletionMarkers2) { +TEST_F(DBTest, DeletionMarkers2) { Put("foo", "v1"); - ASSERT_OK(dbfull()->TEST_CompactMemTable()); + ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable()); const int last = config::kMaxMemCompactLevel; ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level @@ -1396,7 +1401,7 @@ TEST(DBTest, DeletionMarkers2) { Delete("foo"); ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]"); - ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2 + ASSERT_LEVELDB_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2 ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]"); dbfull()->TEST_CompactRange(last - 2, nullptr, nullptr); // DEL kept: "last" file overlaps @@ -1407,17 +1412,17 @@ TEST(DBTest, DeletionMarkers2) { ASSERT_EQ(AllEntriesFor("foo"), "[ ]"); } -TEST(DBTest, OverlapInLevel0) { +TEST_F(DBTest, OverlapInLevel0) { do { ASSERT_EQ(config::kMaxMemCompactLevel, 2) << "Fix test to match config"; // Fill levels 1 and 2 to disable the pushing of new memtables to levels > // 0. - ASSERT_OK(Put("100", "v100")); - ASSERT_OK(Put("999", "v999")); + ASSERT_LEVELDB_OK(Put("100", "v100")); + ASSERT_LEVELDB_OK(Put("999", "v999")); dbfull()->TEST_CompactMemTable(); - ASSERT_OK(Delete("100")); - ASSERT_OK(Delete("999")); + ASSERT_LEVELDB_OK(Delete("100")); + ASSERT_LEVELDB_OK(Delete("999")); dbfull()->TEST_CompactMemTable(); ASSERT_EQ("0,1,1", FilesPerLevel()); @@ -1425,12 +1430,12 @@ TEST(DBTest, OverlapInLevel0) { // files[0] 200 .. 900 // files[1] 300 .. 500 // Note that files are sorted by smallest key. - ASSERT_OK(Put("300", "v300")); - ASSERT_OK(Put("500", "v500")); + ASSERT_LEVELDB_OK(Put("300", "v300")); + ASSERT_LEVELDB_OK(Put("500", "v500")); dbfull()->TEST_CompactMemTable(); - ASSERT_OK(Put("200", "v200")); - ASSERT_OK(Put("600", "v600")); - ASSERT_OK(Put("900", "v900")); + ASSERT_LEVELDB_OK(Put("200", "v200")); + ASSERT_LEVELDB_OK(Put("600", "v600")); + ASSERT_LEVELDB_OK(Put("900", "v900")); dbfull()->TEST_CompactMemTable(); ASSERT_EQ("2,1,1", FilesPerLevel()); @@ -1442,23 +1447,23 @@ TEST(DBTest, OverlapInLevel0) { // Do a memtable compaction. Before bug-fix, the compaction would // not detect the overlap with level-0 files and would incorrectly place // the deletion in a deeper level. - ASSERT_OK(Delete("600")); + ASSERT_LEVELDB_OK(Delete("600")); dbfull()->TEST_CompactMemTable(); ASSERT_EQ("3", FilesPerLevel()); ASSERT_EQ("NOT_FOUND", Get("600")); } while (ChangeOptions()); } -TEST(DBTest, L0_CompactionBug_Issue44_a) { +TEST_F(DBTest, L0_CompactionBug_Issue44_a) { Reopen(); - ASSERT_OK(Put("b", "v")); + ASSERT_LEVELDB_OK(Put("b", "v")); Reopen(); - ASSERT_OK(Delete("b")); - ASSERT_OK(Delete("a")); + ASSERT_LEVELDB_OK(Delete("b")); + ASSERT_LEVELDB_OK(Delete("a")); Reopen(); - ASSERT_OK(Delete("a")); + ASSERT_LEVELDB_OK(Delete("a")); Reopen(); - ASSERT_OK(Put("a", "v")); + ASSERT_LEVELDB_OK(Put("a", "v")); Reopen(); Reopen(); ASSERT_EQ("(a->v)", Contents()); @@ -1466,7 +1471,7 @@ TEST(DBTest, L0_CompactionBug_Issue44_a) { ASSERT_EQ("(a->v)", Contents()); } -TEST(DBTest, L0_CompactionBug_Issue44_b) { +TEST_F(DBTest, L0_CompactionBug_Issue44_b) { Reopen(); Put("", ""); Reopen(); @@ -1492,16 +1497,16 @@ TEST(DBTest, L0_CompactionBug_Issue44_b) { ASSERT_EQ("(->)(c->cv)", Contents()); } -TEST(DBTest, Fflush_Issue474) { +TEST_F(DBTest, Fflush_Issue474) { static const int kNum = 100000; Random rnd(test::RandomSeed()); for (int i = 0; i < kNum; i++) { fflush(nullptr); - ASSERT_OK(Put(RandomKey(&rnd), RandomString(&rnd, 100))); + ASSERT_LEVELDB_OK(Put(RandomKey(&rnd), RandomString(&rnd, 100))); } } -TEST(DBTest, ComparatorCheck) { +TEST_F(DBTest, ComparatorCheck) { class NewComparator : public Comparator { public: const char* Name() const override { return "leveldb.NewComparator"; } @@ -1524,7 +1529,7 @@ TEST(DBTest, ComparatorCheck) { << s.ToString(); } -TEST(DBTest, CustomComparator) { +TEST_F(DBTest, CustomComparator) { class NumberComparator : public Comparator { public: const char* Name() const override { return "test.NumberComparator"; } @@ -1542,11 +1547,11 @@ TEST(DBTest, CustomComparator) { private: static int ToNumber(const Slice& x) { // Check that there are no extra characters. - ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size() - 1] == ']') + EXPECT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size() - 1] == ']') << EscapeString(x); int val; char ignored; - ASSERT_TRUE(sscanf(x.ToString().c_str(), "[%i]%c", &val, &ignored) == 1) + EXPECT_TRUE(sscanf(x.ToString().c_str(), "[%i]%c", &val, &ignored) == 1) << EscapeString(x); return val; } @@ -1558,8 +1563,8 @@ TEST(DBTest, CustomComparator) { new_options.filter_policy = nullptr; // Cannot use bloom filters new_options.write_buffer_size = 1000; // Compact more often DestroyAndReopen(&new_options); - ASSERT_OK(Put("[10]", "ten")); - ASSERT_OK(Put("[0x14]", "twenty")); + ASSERT_LEVELDB_OK(Put("[10]", "ten")); + ASSERT_LEVELDB_OK(Put("[0x14]", "twenty")); for (int i = 0; i < 2; i++) { ASSERT_EQ("ten", Get("[10]")); ASSERT_EQ("ten", Get("[0xa]")); @@ -1574,13 +1579,13 @@ TEST(DBTest, CustomComparator) { for (int i = 0; i < 1000; i++) { char buf[100]; snprintf(buf, sizeof(buf), "[%d]", i * 10); - ASSERT_OK(Put(buf, buf)); + ASSERT_LEVELDB_OK(Put(buf, buf)); } Compact("[0]", "[1000000]"); } } -TEST(DBTest, ManualCompaction) { +TEST_F(DBTest, ManualCompaction) { ASSERT_EQ(config::kMaxMemCompactLevel, 2) << "Need to update this test to match kMaxMemCompactLevel"; @@ -1614,8 +1619,8 @@ TEST(DBTest, ManualCompaction) { ASSERT_EQ("0,0,1", FilesPerLevel()); } -TEST(DBTest, DBOpen_Options) { - std::string dbname = test::TmpDir() + "/db_options_test"; +TEST_F(DBTest, DBOpen_Options) { + std::string dbname = testing::TempDir() + "db_options_test"; DestroyDB(dbname, Options()); // Does not exist, and create_if_missing == false: error @@ -1629,7 +1634,7 @@ TEST(DBTest, DBOpen_Options) { // Does not exist, and create_if_missing == true: OK opts.create_if_missing = true; s = DB::Open(opts, dbname, &db); - ASSERT_OK(s); + ASSERT_LEVELDB_OK(s); ASSERT_TRUE(db != nullptr); delete db; @@ -1646,15 +1651,15 @@ TEST(DBTest, DBOpen_Options) { opts.create_if_missing = true; opts.error_if_exists = false; s = DB::Open(opts, dbname, &db); - ASSERT_OK(s); + ASSERT_LEVELDB_OK(s); ASSERT_TRUE(db != nullptr); delete db; db = nullptr; } -TEST(DBTest, DestroyEmptyDir) { - std::string dbname = test::TmpDir() + "/db_empty_dir"; +TEST_F(DBTest, DestroyEmptyDir) { + std::string dbname = testing::TempDir() + "db_empty_dir"; TestEnv env(Env::Default()); env.DeleteDir(dbname); ASSERT_TRUE(!env.FileExists(dbname)); @@ -1662,34 +1667,34 @@ TEST(DBTest, DestroyEmptyDir) { Options opts; opts.env = &env; - ASSERT_OK(env.CreateDir(dbname)); + ASSERT_LEVELDB_OK(env.CreateDir(dbname)); ASSERT_TRUE(env.FileExists(dbname)); std::vector children; - ASSERT_OK(env.GetChildren(dbname, &children)); + ASSERT_LEVELDB_OK(env.GetChildren(dbname, &children)); // The stock Env's do not filter out '.' and '..' special files. ASSERT_EQ(2, children.size()); - ASSERT_OK(DestroyDB(dbname, opts)); + ASSERT_LEVELDB_OK(DestroyDB(dbname, opts)); ASSERT_TRUE(!env.FileExists(dbname)); // Should also be destroyed if Env is filtering out dot files. env.SetIgnoreDotFiles(true); - ASSERT_OK(env.CreateDir(dbname)); + ASSERT_LEVELDB_OK(env.CreateDir(dbname)); ASSERT_TRUE(env.FileExists(dbname)); - ASSERT_OK(env.GetChildren(dbname, &children)); + ASSERT_LEVELDB_OK(env.GetChildren(dbname, &children)); ASSERT_EQ(0, children.size()); - ASSERT_OK(DestroyDB(dbname, opts)); + ASSERT_LEVELDB_OK(DestroyDB(dbname, opts)); ASSERT_TRUE(!env.FileExists(dbname)); } -TEST(DBTest, DestroyOpenDB) { - std::string dbname = test::TmpDir() + "/open_db_dir"; +TEST_F(DBTest, DestroyOpenDB) { + std::string dbname = testing::TempDir() + "open_db_dir"; env_->DeleteDir(dbname); ASSERT_TRUE(!env_->FileExists(dbname)); Options opts; opts.create_if_missing = true; DB* db = nullptr; - ASSERT_OK(DB::Open(opts, dbname, &db)); + ASSERT_LEVELDB_OK(DB::Open(opts, dbname, &db)); ASSERT_TRUE(db != nullptr); // Must fail to destroy an open db. @@ -1701,23 +1706,23 @@ TEST(DBTest, DestroyOpenDB) { db = nullptr; // Should succeed destroying a closed db. - ASSERT_OK(DestroyDB(dbname, Options())); + ASSERT_LEVELDB_OK(DestroyDB(dbname, Options())); ASSERT_TRUE(!env_->FileExists(dbname)); } -TEST(DBTest, Locking) { +TEST_F(DBTest, Locking) { DB* db2 = nullptr; Status s = DB::Open(CurrentOptions(), dbname_, &db2); ASSERT_TRUE(!s.ok()) << "Locking did not prevent re-opening db"; } // Check that number of files does not grow when we are out of space -TEST(DBTest, NoSpace) { +TEST_F(DBTest, NoSpace) { Options options = CurrentOptions(); options.env = env_; Reopen(&options); - ASSERT_OK(Put("foo", "v1")); + ASSERT_LEVELDB_OK(Put("foo", "v1")); ASSERT_EQ("v1", Get("foo")); Compact("a", "z"); const int num_files = CountFiles(); @@ -1732,12 +1737,12 @@ TEST(DBTest, NoSpace) { ASSERT_LT(CountFiles(), num_files + 3); } -TEST(DBTest, NonWritableFileSystem) { +TEST_F(DBTest, NonWritableFileSystem) { Options options = CurrentOptions(); options.write_buffer_size = 1000; options.env = env_; Reopen(&options); - ASSERT_OK(Put("foo", "v1")); + ASSERT_LEVELDB_OK(Put("foo", "v1")); // Force errors for new files. env_->non_writable_.store(true, std::memory_order_release); std::string big(100000, 'x'); @@ -1753,7 +1758,7 @@ TEST(DBTest, NonWritableFileSystem) { env_->non_writable_.store(false, std::memory_order_release); } -TEST(DBTest, WriteSyncError) { +TEST_F(DBTest, WriteSyncError) { // Check that log sync errors cause the DB to disallow future writes. // (a) Cause log sync calls to fail @@ -1764,7 +1769,7 @@ TEST(DBTest, WriteSyncError) { // (b) Normal write should succeed WriteOptions w; - ASSERT_OK(db_->Put(w, "k1", "v1")); + ASSERT_LEVELDB_OK(db_->Put(w, "k1", "v1")); ASSERT_EQ("v1", Get("k1")); // (c) Do a sync write; should fail @@ -1784,7 +1789,7 @@ TEST(DBTest, WriteSyncError) { ASSERT_EQ("NOT_FOUND", Get("k3")); } -TEST(DBTest, ManifestWriteError) { +TEST_F(DBTest, ManifestWriteError) { // Test for the following problem: // (a) Compaction produces file F // (b) Log record containing F is written to MANIFEST file, but Sync() fails @@ -1803,7 +1808,7 @@ TEST(DBTest, ManifestWriteError) { options.create_if_missing = true; options.error_if_exists = false; DestroyAndReopen(&options); - ASSERT_OK(Put("foo", "bar")); + ASSERT_LEVELDB_OK(Put("foo", "bar")); ASSERT_EQ("bar", Get("foo")); // Memtable compaction (will succeed) @@ -1824,8 +1829,8 @@ TEST(DBTest, ManifestWriteError) { } } -TEST(DBTest, MissingSSTFile) { - ASSERT_OK(Put("foo", "bar")); +TEST_F(DBTest, MissingSSTFile) { + ASSERT_LEVELDB_OK(Put("foo", "bar")); ASSERT_EQ("bar", Get("foo")); // Dump the memtable to disk. @@ -1841,8 +1846,8 @@ TEST(DBTest, MissingSSTFile) { ASSERT_TRUE(s.ToString().find("issing") != std::string::npos) << s.ToString(); } -TEST(DBTest, StillReadSST) { - ASSERT_OK(Put("foo", "bar")); +TEST_F(DBTest, StillReadSST) { + ASSERT_LEVELDB_OK(Put("foo", "bar")); ASSERT_EQ("bar", Get("foo")); // Dump the memtable to disk. @@ -1857,18 +1862,18 @@ TEST(DBTest, StillReadSST) { ASSERT_EQ("bar", Get("foo")); } -TEST(DBTest, FilesDeletedAfterCompaction) { - ASSERT_OK(Put("foo", "v2")); +TEST_F(DBTest, FilesDeletedAfterCompaction) { + ASSERT_LEVELDB_OK(Put("foo", "v2")); Compact("a", "z"); const int num_files = CountFiles(); for (int i = 0; i < 10; i++) { - ASSERT_OK(Put("foo", "v2")); + ASSERT_LEVELDB_OK(Put("foo", "v2")); Compact("a", "z"); } ASSERT_EQ(CountFiles(), num_files); } -TEST(DBTest, BloomFilter) { +TEST_F(DBTest, BloomFilter) { env_->count_random_reads_ = true; Options options = CurrentOptions(); options.env = env_; @@ -1879,11 +1884,11 @@ TEST(DBTest, BloomFilter) { // Populate multiple layers const int N = 10000; for (int i = 0; i < N; i++) { - ASSERT_OK(Put(Key(i), Key(i))); + ASSERT_LEVELDB_OK(Put(Key(i), Key(i))); } Compact("a", "z"); for (int i = 0; i < N; i += 100) { - ASSERT_OK(Put(Key(i), Key(i))); + ASSERT_LEVELDB_OK(Put(Key(i), Key(i))); } dbfull()->TEST_CompactMemTable(); @@ -1955,7 +1960,7 @@ static void MTThreadBody(void* arg) { // We add some padding for force compactions. snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d", key, id, static_cast(counter)); - ASSERT_OK(db->Put(WriteOptions(), Slice(keybuf), Slice(valbuf))); + ASSERT_LEVELDB_OK(db->Put(WriteOptions(), Slice(keybuf), Slice(valbuf))); } else { // Read a value and verify that it matches the pattern written above. Status s = db->Get(ReadOptions(), Slice(keybuf), &value); @@ -1963,7 +1968,7 @@ static void MTThreadBody(void* arg) { // Key has not yet been written } else { // Check that the writer thread counter is >= the counter in the value - ASSERT_OK(s); + ASSERT_LEVELDB_OK(s); int k, w, c; ASSERT_EQ(3, sscanf(value.c_str(), "%d.%d.%d", &k, &w, &c)) << value; ASSERT_EQ(k, key); @@ -1980,7 +1985,7 @@ static void MTThreadBody(void* arg) { } // namespace -TEST(DBTest, MultiThreaded) { +TEST_F(DBTest, MultiThreaded) { do { // Initialize state MTState mt; @@ -2158,7 +2163,7 @@ static bool CompareIterators(int step, DB* model, DB* db, return ok; } -TEST(DBTest, Randomized) { +TEST_F(DBTest, Randomized) { Random rnd(test::RandomSeed()); do { ModelDB model(CurrentOptions()); @@ -2176,13 +2181,13 @@ TEST(DBTest, Randomized) { k = RandomKey(&rnd); v = RandomString( &rnd, rnd.OneIn(20) ? 100 + rnd.Uniform(100) : rnd.Uniform(8)); - ASSERT_OK(model.Put(WriteOptions(), k, v)); - ASSERT_OK(db_->Put(WriteOptions(), k, v)); + ASSERT_LEVELDB_OK(model.Put(WriteOptions(), k, v)); + ASSERT_LEVELDB_OK(db_->Put(WriteOptions(), k, v)); } else if (p < 90) { // Delete k = RandomKey(&rnd); - ASSERT_OK(model.Delete(WriteOptions(), k)); - ASSERT_OK(db_->Delete(WriteOptions(), k)); + ASSERT_LEVELDB_OK(model.Delete(WriteOptions(), k)); + ASSERT_LEVELDB_OK(db_->Delete(WriteOptions(), k)); } else { // Multi-element batch WriteBatch b; @@ -2201,8 +2206,8 @@ TEST(DBTest, Randomized) { b.Delete(k); } } - ASSERT_OK(model.Write(WriteOptions(), &b)); - ASSERT_OK(db_->Write(WriteOptions(), &b)); + ASSERT_LEVELDB_OK(model.Write(WriteOptions(), &b)); + ASSERT_LEVELDB_OK(db_->Write(WriteOptions(), &b)); } if ((step % 100) == 0) { @@ -2233,14 +2238,14 @@ std::string MakeKey(unsigned int num) { } void BM_LogAndApply(int iters, int num_base_files) { - std::string dbname = test::TmpDir() + "/leveldb_test_benchmark"; + std::string dbname = testing::TempDir() + "leveldb_test_benchmark"; DestroyDB(dbname, Options()); DB* db = nullptr; Options opts; opts.create_if_missing = true; Status s = DB::Open(opts, dbname, &db); - ASSERT_OK(s); + ASSERT_LEVELDB_OK(s); ASSERT_TRUE(db != nullptr); delete db; @@ -2255,7 +2260,7 @@ void BM_LogAndApply(int iters, int num_base_files) { Options options; VersionSet vset(dbname, &options, nullptr, &cmp); bool save_manifest; - ASSERT_OK(vset.Recover(&save_manifest)); + ASSERT_LEVELDB_OK(vset.Recover(&save_manifest)); VersionEdit vbase; uint64_t fnum = 1; for (int i = 0; i < num_base_files; i++) { @@ -2263,7 +2268,7 @@ void BM_LogAndApply(int iters, int num_base_files) { InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion); vbase.AddFile(2, fnum++, 1 /* file size */, start, limit); } - ASSERT_OK(vset.LogAndApply(&vbase, &mu)); + ASSERT_LEVELDB_OK(vset.LogAndApply(&vbase, &mu)); uint64_t start_micros = env->NowMicros(); @@ -2295,5 +2300,6 @@ int main(int argc, char** argv) { return 0; } - return leveldb::test::RunAllTests(); + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/db/dbformat_test.cc b/db/dbformat_test.cc index 1209369..ca49e0a 100644 --- a/db/dbformat_test.cc +++ b/db/dbformat_test.cc @@ -3,8 +3,9 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/dbformat.h" + +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "util/logging.h" -#include "util/testharness.h" namespace leveldb { @@ -41,8 +42,6 @@ static void TestKey(const std::string& key, uint64_t seq, ValueType vt) { ASSERT_TRUE(!ParseInternalKey(Slice("bar"), &decoded)); } -class FormatTest {}; - TEST(FormatTest, InternalKey_EncodeDecode) { const char* keys[] = {"", "k", "hello", "longggggggggggggggggggggg"}; const uint64_t seq[] = {1, @@ -128,4 +127,7 @@ TEST(FormatTest, InternalKeyDebugString) { } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc index 5b31bb8..80b8f12 100644 --- a/db/fault_injection_test.cc +++ b/db/fault_injection_test.cc @@ -9,6 +9,7 @@ #include #include +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "db/db_impl.h" #include "db/filename.h" #include "db/log_format.h" @@ -22,7 +23,6 @@ #include "port/thread_annotations.h" #include "util/logging.h" #include "util/mutexlock.h" -#include "util/testharness.h" #include "util/testutil.h" namespace leveldb { @@ -300,7 +300,7 @@ void FaultInjectionTestEnv::UntrackFile(const std::string& f) { Status FaultInjectionTestEnv::DeleteFile(const std::string& f) { Status s = EnvWrapper::DeleteFile(f); - ASSERT_OK(s); + EXPECT_LEVELDB_OK(s); if (s.ok()) { UntrackFile(f); } @@ -361,7 +361,7 @@ Status FileState::DropUnsyncedData() const { return Truncate(filename_, sync_pos); } -class FaultInjectionTest { +class FaultInjectionTest : public testing::Test { public: enum ExpectedVerifResult { VAL_EXPECT_NO_ERROR, VAL_EXPECT_ERROR }; enum ResetMethod { RESET_DROP_UNSYNCED_DATA, RESET_DELETE_UNSYNCED_FILES }; @@ -376,7 +376,7 @@ class FaultInjectionTest { : env_(new FaultInjectionTestEnv), tiny_cache_(NewLRUCache(100)), db_(nullptr) { - dbname_ = test::TmpDir() + "/fault_test"; + dbname_ = testing::TempDir() + "fault_test"; DestroyDB(dbname_, Options()); // Destroy any db from earlier run options_.reuse_logs = true; options_.env = env_; @@ -402,7 +402,7 @@ class FaultInjectionTest { batch.Clear(); batch.Put(key, Value(i, &value_space)); WriteOptions options; - ASSERT_OK(db_->Write(options, &batch)); + ASSERT_LEVELDB_OK(db_->Write(options, &batch)); } } @@ -424,7 +424,7 @@ class FaultInjectionTest { s = ReadValue(i, &val); if (expected == VAL_EXPECT_NO_ERROR) { if (s.ok()) { - ASSERT_EQ(value_space, val); + EXPECT_EQ(value_space, val); } } else if (s.ok()) { fprintf(stderr, "Expected an error at %d, but was OK\n", i); @@ -465,7 +465,7 @@ class FaultInjectionTest { void DeleteAllData() { Iterator* iter = db_->NewIterator(ReadOptions()); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { - ASSERT_OK(db_->Delete(WriteOptions(), iter->key())); + ASSERT_LEVELDB_OK(db_->Delete(WriteOptions(), iter->key())); } delete iter; @@ -474,10 +474,10 @@ class FaultInjectionTest { void ResetDBState(ResetMethod reset_method) { switch (reset_method) { case RESET_DROP_UNSYNCED_DATA: - ASSERT_OK(env_->DropUnsyncedFileData()); + ASSERT_LEVELDB_OK(env_->DropUnsyncedFileData()); break; case RESET_DELETE_UNSYNCED_FILES: - ASSERT_OK(env_->DeleteFilesCreatedAfterLastDirSync()); + ASSERT_LEVELDB_OK(env_->DeleteFilesCreatedAfterLastDirSync()); break; default: assert(false); @@ -496,10 +496,11 @@ class FaultInjectionTest { env_->SetFilesystemActive(false); CloseDB(); ResetDBState(reset_method); - ASSERT_OK(OpenDB()); - ASSERT_OK(Verify(0, num_pre_sync, FaultInjectionTest::VAL_EXPECT_NO_ERROR)); - ASSERT_OK(Verify(num_pre_sync, num_post_sync, - FaultInjectionTest::VAL_EXPECT_ERROR)); + ASSERT_LEVELDB_OK(OpenDB()); + ASSERT_LEVELDB_OK( + Verify(0, num_pre_sync, FaultInjectionTest::VAL_EXPECT_NO_ERROR)); + ASSERT_LEVELDB_OK(Verify(num_pre_sync, num_post_sync, + FaultInjectionTest::VAL_EXPECT_ERROR)); } void NoWriteTestPreFault() {} @@ -507,12 +508,12 @@ class FaultInjectionTest { void NoWriteTestReopenWithFault(ResetMethod reset_method) { CloseDB(); ResetDBState(reset_method); - ASSERT_OK(OpenDB()); + ASSERT_LEVELDB_OK(OpenDB()); } void DoTest() { Random rnd(0); - ASSERT_OK(OpenDB()); + ASSERT_LEVELDB_OK(OpenDB()); for (size_t idx = 0; idx < kNumIterations; idx++) { int num_pre_sync = rnd.Uniform(kMaxNumValues); int num_post_sync = rnd.Uniform(kMaxNumValues); @@ -536,16 +537,19 @@ class FaultInjectionTest { } }; -TEST(FaultInjectionTest, FaultTestNoLogReuse) { +TEST_F(FaultInjectionTest, FaultTestNoLogReuse) { ReuseLogs(false); DoTest(); } -TEST(FaultInjectionTest, FaultTestWithLogReuse) { +TEST_F(FaultInjectionTest, FaultTestWithLogReuse) { ReuseLogs(true); DoTest(); } } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/db/filename_test.cc b/db/filename_test.cc index 952f320..ad0bc73 100644 --- a/db/filename_test.cc +++ b/db/filename_test.cc @@ -4,15 +4,13 @@ #include "db/filename.h" +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "db/dbformat.h" #include "port/port.h" #include "util/logging.h" -#include "util/testharness.h" namespace leveldb { -class FileNameTest {}; - TEST(FileNameTest, Parse) { Slice db; FileType type; @@ -128,4 +126,7 @@ TEST(FileNameTest, Construction) { } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/db/log_test.cc b/db/log_test.cc index 0e31648..680f267 100644 --- a/db/log_test.cc +++ b/db/log_test.cc @@ -2,13 +2,13 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "db/log_reader.h" #include "db/log_writer.h" #include "leveldb/env.h" #include "util/coding.h" #include "util/crc32c.h" #include "util/random.h" -#include "util/testharness.h" namespace leveldb { namespace log { @@ -36,7 +36,7 @@ static std::string RandomSkewedString(int i, Random* rnd) { return BigString(NumberString(i), rnd->Skewed(17)); } -class LogTest { +class LogTest : public testing::Test { public: LogTest() : reading_(false), @@ -177,7 +177,7 @@ class LogTest { StringSource() : force_error_(false), returned_partial_(false) {} Status Read(size_t n, Slice* result, char* scratch) override { - ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error"; + EXPECT_TRUE(!returned_partial_) << "must not Read() after eof/error"; if (force_error_) { force_error_ = false; @@ -258,9 +258,9 @@ uint64_t LogTest::initial_offset_last_record_offsets_[] = { int LogTest::num_initial_offset_records_ = sizeof(LogTest::initial_offset_last_record_offsets_) / sizeof(uint64_t); -TEST(LogTest, Empty) { ASSERT_EQ("EOF", Read()); } +TEST_F(LogTest, Empty) { ASSERT_EQ("EOF", Read()); } -TEST(LogTest, ReadWrite) { +TEST_F(LogTest, ReadWrite) { Write("foo"); Write("bar"); Write(""); @@ -273,7 +273,7 @@ TEST(LogTest, ReadWrite) { ASSERT_EQ("EOF", Read()); // Make sure reads at eof work } -TEST(LogTest, ManyBlocks) { +TEST_F(LogTest, ManyBlocks) { for (int i = 0; i < 100000; i++) { Write(NumberString(i)); } @@ -283,7 +283,7 @@ TEST(LogTest, ManyBlocks) { ASSERT_EQ("EOF", Read()); } -TEST(LogTest, Fragmentation) { +TEST_F(LogTest, Fragmentation) { Write("small"); Write(BigString("medium", 50000)); Write(BigString("large", 100000)); @@ -293,7 +293,7 @@ TEST(LogTest, Fragmentation) { ASSERT_EQ("EOF", Read()); } -TEST(LogTest, MarginalTrailer) { +TEST_F(LogTest, MarginalTrailer) { // Make a trailer that is exactly the same length as an empty record. const int n = kBlockSize - 2 * kHeaderSize; Write(BigString("foo", n)); @@ -306,7 +306,7 @@ TEST(LogTest, MarginalTrailer) { ASSERT_EQ("EOF", Read()); } -TEST(LogTest, MarginalTrailer2) { +TEST_F(LogTest, MarginalTrailer2) { // Make a trailer that is exactly the same length as an empty record. const int n = kBlockSize - 2 * kHeaderSize; Write(BigString("foo", n)); @@ -319,7 +319,7 @@ TEST(LogTest, MarginalTrailer2) { ASSERT_EQ("", ReportMessage()); } -TEST(LogTest, ShortTrailer) { +TEST_F(LogTest, ShortTrailer) { const int n = kBlockSize - 2 * kHeaderSize + 4; Write(BigString("foo", n)); ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes()); @@ -331,7 +331,7 @@ TEST(LogTest, ShortTrailer) { ASSERT_EQ("EOF", Read()); } -TEST(LogTest, AlignedEof) { +TEST_F(LogTest, AlignedEof) { const int n = kBlockSize - 2 * kHeaderSize + 4; Write(BigString("foo", n)); ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes()); @@ -339,7 +339,7 @@ TEST(LogTest, AlignedEof) { ASSERT_EQ("EOF", Read()); } -TEST(LogTest, OpenForAppend) { +TEST_F(LogTest, OpenForAppend) { Write("hello"); ReopenForAppend(); Write("world"); @@ -348,7 +348,7 @@ TEST(LogTest, OpenForAppend) { ASSERT_EQ("EOF", Read()); } -TEST(LogTest, RandomRead) { +TEST_F(LogTest, RandomRead) { const int N = 500; Random write_rnd(301); for (int i = 0; i < N; i++) { @@ -363,7 +363,7 @@ TEST(LogTest, RandomRead) { // Tests of all the error paths in log_reader.cc follow: -TEST(LogTest, ReadError) { +TEST_F(LogTest, ReadError) { Write("foo"); ForceError(); ASSERT_EQ("EOF", Read()); @@ -371,7 +371,7 @@ TEST(LogTest, ReadError) { ASSERT_EQ("OK", MatchError("read error")); } -TEST(LogTest, BadRecordType) { +TEST_F(LogTest, BadRecordType) { Write("foo"); // Type is stored in header[6] IncrementByte(6, 100); @@ -381,7 +381,7 @@ TEST(LogTest, BadRecordType) { ASSERT_EQ("OK", MatchError("unknown record type")); } -TEST(LogTest, TruncatedTrailingRecordIsIgnored) { +TEST_F(LogTest, TruncatedTrailingRecordIsIgnored) { Write("foo"); ShrinkSize(4); // Drop all payload as well as a header byte ASSERT_EQ("EOF", Read()); @@ -390,7 +390,7 @@ TEST(LogTest, TruncatedTrailingRecordIsIgnored) { ASSERT_EQ("", ReportMessage()); } -TEST(LogTest, BadLength) { +TEST_F(LogTest, BadLength) { const int kPayloadSize = kBlockSize - kHeaderSize; Write(BigString("bar", kPayloadSize)); Write("foo"); @@ -401,7 +401,7 @@ TEST(LogTest, BadLength) { ASSERT_EQ("OK", MatchError("bad record length")); } -TEST(LogTest, BadLengthAtEndIsIgnored) { +TEST_F(LogTest, BadLengthAtEndIsIgnored) { Write("foo"); ShrinkSize(1); ASSERT_EQ("EOF", Read()); @@ -409,7 +409,7 @@ TEST(LogTest, BadLengthAtEndIsIgnored) { ASSERT_EQ("", ReportMessage()); } -TEST(LogTest, ChecksumMismatch) { +TEST_F(LogTest, ChecksumMismatch) { Write("foo"); IncrementByte(0, 10); ASSERT_EQ("EOF", Read()); @@ -417,7 +417,7 @@ TEST(LogTest, ChecksumMismatch) { ASSERT_EQ("OK", MatchError("checksum mismatch")); } -TEST(LogTest, UnexpectedMiddleType) { +TEST_F(LogTest, UnexpectedMiddleType) { Write("foo"); SetByte(6, kMiddleType); FixChecksum(0, 3); @@ -426,7 +426,7 @@ TEST(LogTest, UnexpectedMiddleType) { ASSERT_EQ("OK", MatchError("missing start")); } -TEST(LogTest, UnexpectedLastType) { +TEST_F(LogTest, UnexpectedLastType) { Write("foo"); SetByte(6, kLastType); FixChecksum(0, 3); @@ -435,7 +435,7 @@ TEST(LogTest, UnexpectedLastType) { ASSERT_EQ("OK", MatchError("missing start")); } -TEST(LogTest, UnexpectedFullType) { +TEST_F(LogTest, UnexpectedFullType) { Write("foo"); Write("bar"); SetByte(6, kFirstType); @@ -446,7 +446,7 @@ TEST(LogTest, UnexpectedFullType) { ASSERT_EQ("OK", MatchError("partial record without end")); } -TEST(LogTest, UnexpectedFirstType) { +TEST_F(LogTest, UnexpectedFirstType) { Write("foo"); Write(BigString("bar", 100000)); SetByte(6, kFirstType); @@ -457,7 +457,7 @@ TEST(LogTest, UnexpectedFirstType) { ASSERT_EQ("OK", MatchError("partial record without end")); } -TEST(LogTest, MissingLastIsIgnored) { +TEST_F(LogTest, MissingLastIsIgnored) { Write(BigString("bar", kBlockSize)); // Remove the LAST block, including header. ShrinkSize(14); @@ -466,7 +466,7 @@ TEST(LogTest, MissingLastIsIgnored) { ASSERT_EQ(0, DroppedBytes()); } -TEST(LogTest, PartialLastIsIgnored) { +TEST_F(LogTest, PartialLastIsIgnored) { Write(BigString("bar", kBlockSize)); // Cause a bad record length in the LAST block. ShrinkSize(1); @@ -475,7 +475,7 @@ TEST(LogTest, PartialLastIsIgnored) { ASSERT_EQ(0, DroppedBytes()); } -TEST(LogTest, SkipIntoMultiRecord) { +TEST_F(LogTest, SkipIntoMultiRecord) { // Consider a fragmented record: // first(R1), middle(R1), last(R1), first(R2) // If initial_offset points to a record after first(R1) but before first(R2) @@ -491,7 +491,7 @@ TEST(LogTest, SkipIntoMultiRecord) { ASSERT_EQ("EOF", Read()); } -TEST(LogTest, ErrorJoinsRecords) { +TEST_F(LogTest, ErrorJoinsRecords) { // Consider two fragmented records: // first(R1) last(R1) first(R2) last(R2) // where the middle two fragments disappear. We do not want @@ -514,47 +514,50 @@ TEST(LogTest, ErrorJoinsRecords) { ASSERT_GE(dropped, 2 * kBlockSize); } -TEST(LogTest, ReadStart) { CheckInitialOffsetRecord(0, 0); } +TEST_F(LogTest, ReadStart) { CheckInitialOffsetRecord(0, 0); } -TEST(LogTest, ReadSecondOneOff) { CheckInitialOffsetRecord(1, 1); } +TEST_F(LogTest, ReadSecondOneOff) { CheckInitialOffsetRecord(1, 1); } -TEST(LogTest, ReadSecondTenThousand) { CheckInitialOffsetRecord(10000, 1); } +TEST_F(LogTest, ReadSecondTenThousand) { CheckInitialOffsetRecord(10000, 1); } -TEST(LogTest, ReadSecondStart) { CheckInitialOffsetRecord(10007, 1); } +TEST_F(LogTest, ReadSecondStart) { CheckInitialOffsetRecord(10007, 1); } -TEST(LogTest, ReadThirdOneOff) { CheckInitialOffsetRecord(10008, 2); } +TEST_F(LogTest, ReadThirdOneOff) { CheckInitialOffsetRecord(10008, 2); } -TEST(LogTest, ReadThirdStart) { CheckInitialOffsetRecord(20014, 2); } +TEST_F(LogTest, ReadThirdStart) { CheckInitialOffsetRecord(20014, 2); } -TEST(LogTest, ReadFourthOneOff) { CheckInitialOffsetRecord(20015, 3); } +TEST_F(LogTest, ReadFourthOneOff) { CheckInitialOffsetRecord(20015, 3); } -TEST(LogTest, ReadFourthFirstBlockTrailer) { +TEST_F(LogTest, ReadFourthFirstBlockTrailer) { CheckInitialOffsetRecord(log::kBlockSize - 4, 3); } -TEST(LogTest, ReadFourthMiddleBlock) { +TEST_F(LogTest, ReadFourthMiddleBlock) { CheckInitialOffsetRecord(log::kBlockSize + 1, 3); } -TEST(LogTest, ReadFourthLastBlock) { +TEST_F(LogTest, ReadFourthLastBlock) { CheckInitialOffsetRecord(2 * log::kBlockSize + 1, 3); } -TEST(LogTest, ReadFourthStart) { +TEST_F(LogTest, ReadFourthStart) { CheckInitialOffsetRecord( 2 * (kHeaderSize + 1000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize, 3); } -TEST(LogTest, ReadInitialOffsetIntoBlockPadding) { +TEST_F(LogTest, ReadInitialOffsetIntoBlockPadding) { CheckInitialOffsetRecord(3 * log::kBlockSize - 3, 5); } -TEST(LogTest, ReadEnd) { CheckOffsetPastEndReturnsNoRecords(0); } +TEST_F(LogTest, ReadEnd) { CheckOffsetPastEndReturnsNoRecords(0); } -TEST(LogTest, ReadPastEnd) { CheckOffsetPastEndReturnsNoRecords(5); } +TEST_F(LogTest, ReadPastEnd) { CheckOffsetPastEndReturnsNoRecords(5); } } // namespace log } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/db/recovery_test.cc b/db/recovery_test.cc index 547a959..0657743 100644 --- a/db/recovery_test.cc +++ b/db/recovery_test.cc @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "db/db_impl.h" #include "db/filename.h" #include "db/version_set.h" @@ -10,15 +11,14 @@ #include "leveldb/env.h" #include "leveldb/write_batch.h" #include "util/logging.h" -#include "util/testharness.h" #include "util/testutil.h" namespace leveldb { -class RecoveryTest { +class RecoveryTest : public testing::Test { public: RecoveryTest() : env_(Env::Default()), db_(nullptr) { - dbname_ = test::TmpDir() + "/recovery_test"; + dbname_ = testing::TempDir() + "/recovery_test"; DestroyDB(dbname_, Options()); Open(); } @@ -63,7 +63,7 @@ class RecoveryTest { } void Open(Options* options = nullptr) { - ASSERT_OK(OpenWithStatus(options)); + ASSERT_LEVELDB_OK(OpenWithStatus(options)); ASSERT_EQ(1, NumLogs()); } @@ -84,7 +84,8 @@ class RecoveryTest { std::string ManifestFileName() { std::string current; - ASSERT_OK(ReadFileToString(env_, CurrentFileName(dbname_), ¤t)); + EXPECT_LEVELDB_OK( + ReadFileToString(env_, CurrentFileName(dbname_), ¤t)); size_t len = current.size(); if (len > 0 && current[len - 1] == '\n') { current.resize(len - 1); @@ -100,18 +101,20 @@ class RecoveryTest { Close(); std::vector logs = GetFiles(kLogFile); for (size_t i = 0; i < logs.size(); i++) { - ASSERT_OK(env_->DeleteFile(LogName(logs[i]))) << LogName(logs[i]); + EXPECT_LEVELDB_OK(env_->DeleteFile(LogName(logs[i]))) << LogName(logs[i]); } return logs.size(); } - void DeleteManifestFile() { ASSERT_OK(env_->DeleteFile(ManifestFileName())); } + void DeleteManifestFile() { + ASSERT_LEVELDB_OK(env_->DeleteFile(ManifestFileName())); + } uint64_t FirstLogFile() { return GetFiles(kLogFile)[0]; } std::vector GetFiles(FileType t) { std::vector filenames; - ASSERT_OK(env_->GetChildren(dbname_, &filenames)); + EXPECT_LEVELDB_OK(env_->GetChildren(dbname_, &filenames)); std::vector result; for (size_t i = 0; i < filenames.size(); i++) { uint64_t number; @@ -129,7 +132,7 @@ class RecoveryTest { uint64_t FileSize(const std::string& fname) { uint64_t result; - ASSERT_OK(env_->GetFileSize(fname, &result)) << fname; + EXPECT_LEVELDB_OK(env_->GetFileSize(fname, &result)) << fname; return result; } @@ -139,13 +142,13 @@ class RecoveryTest { void MakeLogFile(uint64_t lognum, SequenceNumber seq, Slice key, Slice val) { std::string fname = LogFileName(dbname_, lognum); WritableFile* file; - ASSERT_OK(env_->NewWritableFile(fname, &file)); + ASSERT_LEVELDB_OK(env_->NewWritableFile(fname, &file)); log::Writer writer(file); WriteBatch batch; batch.Put(key, val); WriteBatchInternal::SetSequence(&batch, seq); - ASSERT_OK(writer.AddRecord(WriteBatchInternal::Contents(&batch))); - ASSERT_OK(file->Flush()); + ASSERT_LEVELDB_OK(writer.AddRecord(WriteBatchInternal::Contents(&batch))); + ASSERT_LEVELDB_OK(file->Flush()); delete file; } @@ -155,12 +158,12 @@ class RecoveryTest { DB* db_; }; -TEST(RecoveryTest, ManifestReused) { +TEST_F(RecoveryTest, ManifestReused) { if (!CanAppend()) { fprintf(stderr, "skipping test because env does not support appending\n"); return; } - ASSERT_OK(Put("foo", "bar")); + ASSERT_LEVELDB_OK(Put("foo", "bar")); Close(); std::string old_manifest = ManifestFileName(); Open(); @@ -171,12 +174,12 @@ TEST(RecoveryTest, ManifestReused) { ASSERT_EQ("bar", Get("foo")); } -TEST(RecoveryTest, LargeManifestCompacted) { +TEST_F(RecoveryTest, LargeManifestCompacted) { if (!CanAppend()) { fprintf(stderr, "skipping test because env does not support appending\n"); return; } - ASSERT_OK(Put("foo", "bar")); + ASSERT_LEVELDB_OK(Put("foo", "bar")); Close(); std::string old_manifest = ManifestFileName(); @@ -184,10 +187,10 @@ TEST(RecoveryTest, LargeManifestCompacted) { { uint64_t len = FileSize(old_manifest); WritableFile* file; - ASSERT_OK(env()->NewAppendableFile(old_manifest, &file)); + ASSERT_LEVELDB_OK(env()->NewAppendableFile(old_manifest, &file)); std::string zeroes(3 * 1048576 - static_cast(len), 0); - ASSERT_OK(file->Append(zeroes)); - ASSERT_OK(file->Flush()); + ASSERT_LEVELDB_OK(file->Append(zeroes)); + ASSERT_LEVELDB_OK(file->Flush()); delete file; } @@ -202,8 +205,8 @@ TEST(RecoveryTest, LargeManifestCompacted) { ASSERT_EQ("bar", Get("foo")); } -TEST(RecoveryTest, NoLogFiles) { - ASSERT_OK(Put("foo", "bar")); +TEST_F(RecoveryTest, NoLogFiles) { + ASSERT_LEVELDB_OK(Put("foo", "bar")); ASSERT_EQ(1, DeleteLogFiles()); Open(); ASSERT_EQ("NOT_FOUND", Get("foo")); @@ -211,13 +214,13 @@ TEST(RecoveryTest, NoLogFiles) { ASSERT_EQ("NOT_FOUND", Get("foo")); } -TEST(RecoveryTest, LogFileReuse) { +TEST_F(RecoveryTest, LogFileReuse) { if (!CanAppend()) { fprintf(stderr, "skipping test because env does not support appending\n"); return; } for (int i = 0; i < 2; i++) { - ASSERT_OK(Put("foo", "bar")); + ASSERT_LEVELDB_OK(Put("foo", "bar")); if (i == 0) { // Compact to ensure current log is empty CompactMemTable(); @@ -241,13 +244,13 @@ TEST(RecoveryTest, LogFileReuse) { } } -TEST(RecoveryTest, MultipleMemTables) { +TEST_F(RecoveryTest, MultipleMemTables) { // Make a large log. const int kNum = 1000; for (int i = 0; i < kNum; i++) { char buf[100]; snprintf(buf, sizeof(buf), "%050d", i); - ASSERT_OK(Put(buf, buf)); + ASSERT_LEVELDB_OK(Put(buf, buf)); } ASSERT_EQ(0, NumTables()); Close(); @@ -270,8 +273,8 @@ TEST(RecoveryTest, MultipleMemTables) { } } -TEST(RecoveryTest, MultipleLogFiles) { - ASSERT_OK(Put("foo", "bar")); +TEST_F(RecoveryTest, MultipleLogFiles) { + ASSERT_LEVELDB_OK(Put("foo", "bar")); Close(); ASSERT_EQ(1, NumLogs()); @@ -316,8 +319,8 @@ TEST(RecoveryTest, MultipleLogFiles) { ASSERT_EQ("there", Get("hi")); } -TEST(RecoveryTest, ManifestMissing) { - ASSERT_OK(Put("foo", "bar")); +TEST_F(RecoveryTest, ManifestMissing) { + ASSERT_LEVELDB_OK(Put("foo", "bar")); Close(); DeleteManifestFile(); @@ -327,4 +330,7 @@ TEST(RecoveryTest, ManifestMissing) { } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/db/skiplist_test.cc b/db/skiplist_test.cc index 9fa2d96..04b9fa7 100644 --- a/db/skiplist_test.cc +++ b/db/skiplist_test.cc @@ -7,13 +7,14 @@ #include #include +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "leveldb/env.h" #include "port/port.h" #include "port/thread_annotations.h" #include "util/arena.h" #include "util/hash.h" #include "util/random.h" -#include "util/testharness.h" +#include "util/testutil.h" namespace leveldb { @@ -31,8 +32,6 @@ struct Comparator { } }; -class SkipTest {}; - TEST(SkipTest, Empty) { Arena arena; Comparator cmp; @@ -366,4 +365,7 @@ TEST(SkipTest, Concurrent5) { RunConcurrent(5); } } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/db/version_edit_test.cc b/db/version_edit_test.cc index 0b7cda8..228fa3b 100644 --- a/db/version_edit_test.cc +++ b/db/version_edit_test.cc @@ -3,7 +3,8 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/version_edit.h" -#include "util/testharness.h" + +#include "third_party/googletest/googletest/include/gtest/gtest.h" namespace leveldb { @@ -17,8 +18,6 @@ static void TestEncodeDecode(const VersionEdit& edit) { ASSERT_EQ(encoded, encoded2); } -class VersionEditTest {}; - TEST(VersionEditTest, EncodeDecode) { static const uint64_t kBig = 1ull << 50; @@ -41,4 +40,7 @@ TEST(VersionEditTest, EncodeDecode) { } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/db/version_set_test.cc b/db/version_set_test.cc index c1056a1..71b19a7 100644 --- a/db/version_set_test.cc +++ b/db/version_set_test.cc @@ -3,13 +3,14 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/version_set.h" + +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "util/logging.h" -#include "util/testharness.h" #include "util/testutil.h" namespace leveldb { -class FindFileTest { +class FindFileTest : public testing::Test { public: FindFileTest() : disjoint_sorted_files_(true) {} @@ -50,7 +51,7 @@ class FindFileTest { std::vector files_; }; -TEST(FindFileTest, Empty) { +TEST_F(FindFileTest, Empty) { ASSERT_EQ(0, Find("foo")); ASSERT_TRUE(!Overlaps("a", "z")); ASSERT_TRUE(!Overlaps(nullptr, "z")); @@ -58,7 +59,7 @@ TEST(FindFileTest, Empty) { ASSERT_TRUE(!Overlaps(nullptr, nullptr)); } -TEST(FindFileTest, Single) { +TEST_F(FindFileTest, Single) { Add("p", "q"); ASSERT_EQ(0, Find("a")); ASSERT_EQ(0, Find("p")); @@ -88,7 +89,7 @@ TEST(FindFileTest, Single) { ASSERT_TRUE(Overlaps(nullptr, nullptr)); } -TEST(FindFileTest, Multiple) { +TEST_F(FindFileTest, Multiple) { Add("150", "200"); Add("200", "250"); Add("300", "350"); @@ -126,7 +127,7 @@ TEST(FindFileTest, Multiple) { ASSERT_TRUE(Overlaps("450", "500")); } -TEST(FindFileTest, MultipleNullBoundaries) { +TEST_F(FindFileTest, MultipleNullBoundaries) { Add("150", "200"); Add("200", "250"); Add("300", "350"); @@ -146,7 +147,7 @@ TEST(FindFileTest, MultipleNullBoundaries) { ASSERT_TRUE(Overlaps("450", nullptr)); } -TEST(FindFileTest, OverlapSequenceChecks) { +TEST_F(FindFileTest, OverlapSequenceChecks) { Add("200", "200", 5000, 3000); ASSERT_TRUE(!Overlaps("199", "199")); ASSERT_TRUE(!Overlaps("201", "300")); @@ -155,7 +156,7 @@ TEST(FindFileTest, OverlapSequenceChecks) { ASSERT_TRUE(Overlaps("200", "210")); } -TEST(FindFileTest, OverlappingFiles) { +TEST_F(FindFileTest, OverlappingFiles) { Add("150", "600"); Add("400", "500"); disjoint_sorted_files_ = false; @@ -177,7 +178,7 @@ void AddBoundaryInputs(const InternalKeyComparator& icmp, const std::vector& level_files, std::vector* compaction_files); -class AddBoundaryInputsTest { +class AddBoundaryInputsTest : public testing::Test { public: std::vector level_files_; std::vector compaction_files_; @@ -204,13 +205,13 @@ class AddBoundaryInputsTest { } }; -TEST(AddBoundaryInputsTest, TestEmptyFileSets) { +TEST_F(AddBoundaryInputsTest, TestEmptyFileSets) { AddBoundaryInputs(icmp_, level_files_, &compaction_files_); ASSERT_TRUE(compaction_files_.empty()); ASSERT_TRUE(level_files_.empty()); } -TEST(AddBoundaryInputsTest, TestEmptyLevelFiles) { +TEST_F(AddBoundaryInputsTest, TestEmptyLevelFiles) { FileMetaData* f1 = CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), InternalKey(InternalKey("100", 1, kTypeValue))); @@ -222,7 +223,7 @@ TEST(AddBoundaryInputsTest, TestEmptyLevelFiles) { ASSERT_TRUE(level_files_.empty()); } -TEST(AddBoundaryInputsTest, TestEmptyCompactionFiles) { +TEST_F(AddBoundaryInputsTest, TestEmptyCompactionFiles) { FileMetaData* f1 = CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), InternalKey(InternalKey("100", 1, kTypeValue))); @@ -234,7 +235,7 @@ TEST(AddBoundaryInputsTest, TestEmptyCompactionFiles) { ASSERT_EQ(f1, level_files_[0]); } -TEST(AddBoundaryInputsTest, TestNoBoundaryFiles) { +TEST_F(AddBoundaryInputsTest, TestNoBoundaryFiles) { FileMetaData* f1 = CreateFileMetaData(1, InternalKey("100", 2, kTypeValue), InternalKey(InternalKey("100", 1, kTypeValue))); @@ -255,7 +256,7 @@ TEST(AddBoundaryInputsTest, TestNoBoundaryFiles) { ASSERT_EQ(2, compaction_files_.size()); } -TEST(AddBoundaryInputsTest, TestOneBoundaryFiles) { +TEST_F(AddBoundaryInputsTest, TestOneBoundaryFiles) { FileMetaData* f1 = CreateFileMetaData(1, InternalKey("100", 3, kTypeValue), InternalKey(InternalKey("100", 2, kTypeValue))); @@ -277,7 +278,7 @@ TEST(AddBoundaryInputsTest, TestOneBoundaryFiles) { ASSERT_EQ(f2, compaction_files_[1]); } -TEST(AddBoundaryInputsTest, TestTwoBoundaryFiles) { +TEST_F(AddBoundaryInputsTest, TestTwoBoundaryFiles) { FileMetaData* f1 = CreateFileMetaData(1, InternalKey("100", 6, kTypeValue), InternalKey(InternalKey("100", 5, kTypeValue))); @@ -300,7 +301,7 @@ TEST(AddBoundaryInputsTest, TestTwoBoundaryFiles) { ASSERT_EQ(f2, compaction_files_[2]); } -TEST(AddBoundaryInputsTest, TestDisjoinFilePointers) { +TEST_F(AddBoundaryInputsTest, TestDisjoinFilePointers) { FileMetaData* f1 = CreateFileMetaData(1, InternalKey("100", 6, kTypeValue), InternalKey(InternalKey("100", 5, kTypeValue))); @@ -329,4 +330,7 @@ TEST(AddBoundaryInputsTest, TestDisjoinFilePointers) { } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/db/write_batch_test.cc b/db/write_batch_test.cc index c32317f..b33993a 100644 --- a/db/write_batch_test.cc +++ b/db/write_batch_test.cc @@ -2,13 +2,12 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#include "leveldb/db.h" - +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "db/memtable.h" #include "db/write_batch_internal.h" +#include "leveldb/db.h" #include "leveldb/env.h" #include "util/logging.h" -#include "util/testharness.h" namespace leveldb { @@ -22,7 +21,7 @@ static std::string PrintContents(WriteBatch* b) { Iterator* iter = mem->NewIterator(); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { ParsedInternalKey ikey; - ASSERT_TRUE(ParseInternalKey(iter->key(), &ikey)); + EXPECT_TRUE(ParseInternalKey(iter->key(), &ikey)); switch (ikey.type) { case kTypeValue: state.append("Put("); @@ -52,8 +51,6 @@ static std::string PrintContents(WriteBatch* b) { return state; } -class WriteBatchTest {}; - TEST(WriteBatchTest, Empty) { WriteBatch batch; ASSERT_EQ("", PrintContents(&batch)); @@ -134,4 +131,7 @@ TEST(WriteBatchTest, ApproximateSize) { } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/helpers/memenv/memenv_test.cc b/helpers/memenv/memenv_test.cc index 94ad06b..72e22da 100644 --- a/helpers/memenv/memenv_test.cc +++ b/helpers/memenv/memenv_test.cc @@ -7,14 +7,15 @@ #include #include +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "db/db_impl.h" #include "leveldb/db.h" #include "leveldb/env.h" -#include "util/testharness.h" +#include "util/testutil.h" namespace leveldb { -class MemEnvTest { +class MemEnvTest : public testing::Test { public: MemEnvTest() : env_(NewMemEnv(Env::Default())) {} ~MemEnvTest() { delete env_; } @@ -22,55 +23,55 @@ class MemEnvTest { Env* env_; }; -TEST(MemEnvTest, Basics) { +TEST_F(MemEnvTest, Basics) { uint64_t file_size; WritableFile* writable_file; std::vector children; - ASSERT_OK(env_->CreateDir("/dir")); + ASSERT_LEVELDB_OK(env_->CreateDir("/dir")); // Check that the directory is empty. ASSERT_TRUE(!env_->FileExists("/dir/non_existent")); ASSERT_TRUE(!env_->GetFileSize("/dir/non_existent", &file_size).ok()); - ASSERT_OK(env_->GetChildren("/dir", &children)); + ASSERT_LEVELDB_OK(env_->GetChildren("/dir", &children)); ASSERT_EQ(0, children.size()); // Create a file. - ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file)); - ASSERT_OK(env_->GetFileSize("/dir/f", &file_size)); + ASSERT_LEVELDB_OK(env_->NewWritableFile("/dir/f", &writable_file)); + ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/f", &file_size)); ASSERT_EQ(0, file_size); delete writable_file; // Check that the file exists. ASSERT_TRUE(env_->FileExists("/dir/f")); - ASSERT_OK(env_->GetFileSize("/dir/f", &file_size)); + ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/f", &file_size)); ASSERT_EQ(0, file_size); - ASSERT_OK(env_->GetChildren("/dir", &children)); + ASSERT_LEVELDB_OK(env_->GetChildren("/dir", &children)); ASSERT_EQ(1, children.size()); ASSERT_EQ("f", children[0]); // Write to the file. - ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file)); - ASSERT_OK(writable_file->Append("abc")); + ASSERT_LEVELDB_OK(env_->NewWritableFile("/dir/f", &writable_file)); + ASSERT_LEVELDB_OK(writable_file->Append("abc")); delete writable_file; // Check that append works. - ASSERT_OK(env_->NewAppendableFile("/dir/f", &writable_file)); - ASSERT_OK(env_->GetFileSize("/dir/f", &file_size)); + ASSERT_LEVELDB_OK(env_->NewAppendableFile("/dir/f", &writable_file)); + ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/f", &file_size)); ASSERT_EQ(3, file_size); - ASSERT_OK(writable_file->Append("hello")); + ASSERT_LEVELDB_OK(writable_file->Append("hello")); delete writable_file; // Check for expected size. - ASSERT_OK(env_->GetFileSize("/dir/f", &file_size)); + ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/f", &file_size)); ASSERT_EQ(8, file_size); // Check that renaming works. ASSERT_TRUE(!env_->RenameFile("/dir/non_existent", "/dir/g").ok()); - ASSERT_OK(env_->RenameFile("/dir/f", "/dir/g")); + ASSERT_LEVELDB_OK(env_->RenameFile("/dir/f", "/dir/g")); ASSERT_TRUE(!env_->FileExists("/dir/f")); ASSERT_TRUE(env_->FileExists("/dir/g")); - ASSERT_OK(env_->GetFileSize("/dir/g", &file_size)); + ASSERT_LEVELDB_OK(env_->GetFileSize("/dir/g", &file_size)); ASSERT_EQ(8, file_size); // Check that opening non-existent file fails. @@ -83,48 +84,49 @@ TEST(MemEnvTest, Basics) { // Check that deleting works. ASSERT_TRUE(!env_->DeleteFile("/dir/non_existent").ok()); - ASSERT_OK(env_->DeleteFile("/dir/g")); + ASSERT_LEVELDB_OK(env_->DeleteFile("/dir/g")); ASSERT_TRUE(!env_->FileExists("/dir/g")); - ASSERT_OK(env_->GetChildren("/dir", &children)); + ASSERT_LEVELDB_OK(env_->GetChildren("/dir", &children)); ASSERT_EQ(0, children.size()); - ASSERT_OK(env_->DeleteDir("/dir")); + ASSERT_LEVELDB_OK(env_->DeleteDir("/dir")); } -TEST(MemEnvTest, ReadWrite) { +TEST_F(MemEnvTest, ReadWrite) { WritableFile* writable_file; SequentialFile* seq_file; RandomAccessFile* rand_file; Slice result; char scratch[100]; - ASSERT_OK(env_->CreateDir("/dir")); + ASSERT_LEVELDB_OK(env_->CreateDir("/dir")); - ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file)); - ASSERT_OK(writable_file->Append("hello ")); - ASSERT_OK(writable_file->Append("world")); + ASSERT_LEVELDB_OK(env_->NewWritableFile("/dir/f", &writable_file)); + ASSERT_LEVELDB_OK(writable_file->Append("hello ")); + ASSERT_LEVELDB_OK(writable_file->Append("world")); delete writable_file; // Read sequentially. - ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file)); - ASSERT_OK(seq_file->Read(5, &result, scratch)); // Read "hello". + ASSERT_LEVELDB_OK(env_->NewSequentialFile("/dir/f", &seq_file)); + ASSERT_LEVELDB_OK(seq_file->Read(5, &result, scratch)); // Read "hello". ASSERT_EQ(0, result.compare("hello")); - ASSERT_OK(seq_file->Skip(1)); - ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Read "world". + ASSERT_LEVELDB_OK(seq_file->Skip(1)); + ASSERT_LEVELDB_OK(seq_file->Read(1000, &result, scratch)); // Read "world". ASSERT_EQ(0, result.compare("world")); - ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Try reading past EOF. + ASSERT_LEVELDB_OK( + seq_file->Read(1000, &result, scratch)); // Try reading past EOF. ASSERT_EQ(0, result.size()); - ASSERT_OK(seq_file->Skip(100)); // Try to skip past end of file. - ASSERT_OK(seq_file->Read(1000, &result, scratch)); + ASSERT_LEVELDB_OK(seq_file->Skip(100)); // Try to skip past end of file. + ASSERT_LEVELDB_OK(seq_file->Read(1000, &result, scratch)); ASSERT_EQ(0, result.size()); delete seq_file; // Random reads. - ASSERT_OK(env_->NewRandomAccessFile("/dir/f", &rand_file)); - ASSERT_OK(rand_file->Read(6, 5, &result, scratch)); // Read "world". + ASSERT_LEVELDB_OK(env_->NewRandomAccessFile("/dir/f", &rand_file)); + ASSERT_LEVELDB_OK(rand_file->Read(6, 5, &result, scratch)); // Read "world". ASSERT_EQ(0, result.compare("world")); - ASSERT_OK(rand_file->Read(0, 5, &result, scratch)); // Read "hello". + ASSERT_LEVELDB_OK(rand_file->Read(0, 5, &result, scratch)); // Read "hello". ASSERT_EQ(0, result.compare("hello")); - ASSERT_OK(rand_file->Read(10, 100, &result, scratch)); // Read "d". + ASSERT_LEVELDB_OK(rand_file->Read(10, 100, &result, scratch)); // Read "d". ASSERT_EQ(0, result.compare("d")); // Too high offset. @@ -132,30 +134,30 @@ TEST(MemEnvTest, ReadWrite) { delete rand_file; } -TEST(MemEnvTest, Locks) { +TEST_F(MemEnvTest, Locks) { FileLock* lock; // These are no-ops, but we test they return success. - ASSERT_OK(env_->LockFile("some file", &lock)); - ASSERT_OK(env_->UnlockFile(lock)); + ASSERT_LEVELDB_OK(env_->LockFile("some file", &lock)); + ASSERT_LEVELDB_OK(env_->UnlockFile(lock)); } -TEST(MemEnvTest, Misc) { +TEST_F(MemEnvTest, Misc) { std::string test_dir; - ASSERT_OK(env_->GetTestDirectory(&test_dir)); + ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir)); ASSERT_TRUE(!test_dir.empty()); WritableFile* writable_file; - ASSERT_OK(env_->NewWritableFile("/a/b", &writable_file)); + ASSERT_LEVELDB_OK(env_->NewWritableFile("/a/b", &writable_file)); // These are no-ops, but we test they return success. - ASSERT_OK(writable_file->Sync()); - ASSERT_OK(writable_file->Flush()); - ASSERT_OK(writable_file->Close()); + ASSERT_LEVELDB_OK(writable_file->Sync()); + ASSERT_LEVELDB_OK(writable_file->Flush()); + ASSERT_LEVELDB_OK(writable_file->Close()); delete writable_file; } -TEST(MemEnvTest, LargeWrite) { +TEST_F(MemEnvTest, LargeWrite) { const size_t kWriteSize = 300 * 1024; char* scratch = new char[kWriteSize * 2]; @@ -165,21 +167,21 @@ TEST(MemEnvTest, LargeWrite) { } WritableFile* writable_file; - ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file)); - ASSERT_OK(writable_file->Append("foo")); - ASSERT_OK(writable_file->Append(write_data)); + ASSERT_LEVELDB_OK(env_->NewWritableFile("/dir/f", &writable_file)); + ASSERT_LEVELDB_OK(writable_file->Append("foo")); + ASSERT_LEVELDB_OK(writable_file->Append(write_data)); delete writable_file; SequentialFile* seq_file; Slice result; - ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file)); - ASSERT_OK(seq_file->Read(3, &result, scratch)); // Read "foo". + ASSERT_LEVELDB_OK(env_->NewSequentialFile("/dir/f", &seq_file)); + ASSERT_LEVELDB_OK(seq_file->Read(3, &result, scratch)); // Read "foo". ASSERT_EQ(0, result.compare("foo")); size_t read = 0; std::string read_data; while (read < kWriteSize) { - ASSERT_OK(seq_file->Read(kWriteSize - read, &result, scratch)); + ASSERT_LEVELDB_OK(seq_file->Read(kWriteSize - read, &result, scratch)); read_data.append(result.data(), result.size()); read += result.size(); } @@ -188,30 +190,30 @@ TEST(MemEnvTest, LargeWrite) { delete[] scratch; } -TEST(MemEnvTest, OverwriteOpenFile) { +TEST_F(MemEnvTest, OverwriteOpenFile) { const char kWrite1Data[] = "Write #1 data"; const size_t kFileDataLen = sizeof(kWrite1Data) - 1; - const std::string kTestFileName = test::TmpDir() + "/leveldb-TestFile.dat"; + const std::string kTestFileName = testing::TempDir() + "leveldb-TestFile.dat"; - ASSERT_OK(WriteStringToFile(env_, kWrite1Data, kTestFileName)); + ASSERT_LEVELDB_OK(WriteStringToFile(env_, kWrite1Data, kTestFileName)); RandomAccessFile* rand_file; - ASSERT_OK(env_->NewRandomAccessFile(kTestFileName, &rand_file)); + ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(kTestFileName, &rand_file)); const char kWrite2Data[] = "Write #2 data"; - ASSERT_OK(WriteStringToFile(env_, kWrite2Data, kTestFileName)); + ASSERT_LEVELDB_OK(WriteStringToFile(env_, kWrite2Data, kTestFileName)); // Verify that overwriting an open file will result in the new file data // being read from files opened before the write. Slice result; char scratch[kFileDataLen]; - ASSERT_OK(rand_file->Read(0, kFileDataLen, &result, scratch)); + ASSERT_LEVELDB_OK(rand_file->Read(0, kFileDataLen, &result, scratch)); ASSERT_EQ(0, result.compare(kWrite2Data)); delete rand_file; } -TEST(MemEnvTest, DBTest) { +TEST_F(MemEnvTest, DBTest) { Options options; options.create_if_missing = true; options.env = env_; @@ -220,14 +222,14 @@ TEST(MemEnvTest, DBTest) { const Slice keys[] = {Slice("aaa"), Slice("bbb"), Slice("ccc")}; const Slice vals[] = {Slice("foo"), Slice("bar"), Slice("baz")}; - ASSERT_OK(DB::Open(options, "/dir/db", &db)); + ASSERT_LEVELDB_OK(DB::Open(options, "/dir/db", &db)); for (size_t i = 0; i < 3; ++i) { - ASSERT_OK(db->Put(WriteOptions(), keys[i], vals[i])); + ASSERT_LEVELDB_OK(db->Put(WriteOptions(), keys[i], vals[i])); } for (size_t i = 0; i < 3; ++i) { std::string res; - ASSERT_OK(db->Get(ReadOptions(), keys[i], &res)); + ASSERT_LEVELDB_OK(db->Get(ReadOptions(), keys[i], &res)); ASSERT_TRUE(res == vals[i]); } @@ -243,11 +245,11 @@ TEST(MemEnvTest, DBTest) { delete iterator; DBImpl* dbi = reinterpret_cast(db); - ASSERT_OK(dbi->TEST_CompactMemTable()); + ASSERT_LEVELDB_OK(dbi->TEST_CompactMemTable()); for (size_t i = 0; i < 3; ++i) { std::string res; - ASSERT_OK(db->Get(ReadOptions(), keys[i], &res)); + ASSERT_LEVELDB_OK(db->Get(ReadOptions(), keys[i], &res)); ASSERT_TRUE(res == vals[i]); } @@ -256,4 +258,7 @@ TEST(MemEnvTest, DBTest) { } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/issues/issue178_test.cc b/issues/issue178_test.cc index d50ffeb..4a52a1b 100644 --- a/issues/issue178_test.cc +++ b/issues/issue178_test.cc @@ -7,9 +7,10 @@ #include #include +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "leveldb/db.h" #include "leveldb/write_batch.h" -#include "util/testharness.h" +#include "util/testutil.h" namespace { @@ -23,11 +24,9 @@ std::string Key1(int i) { std::string Key2(int i) { return Key1(i) + "_xxx"; } -class Issue178 {}; - TEST(Issue178, Test) { // Get rid of any state from an old run. - std::string dbpath = leveldb::test::TmpDir() + "/leveldb_cbug_test"; + std::string dbpath = testing::TempDir() + "leveldb_cbug_test"; DestroyDB(dbpath, leveldb::Options()); // Open database. Disable compression since it affects the creation @@ -37,28 +36,28 @@ TEST(Issue178, Test) { leveldb::Options db_options; db_options.create_if_missing = true; db_options.compression = leveldb::kNoCompression; - ASSERT_OK(leveldb::DB::Open(db_options, dbpath, &db)); + ASSERT_LEVELDB_OK(leveldb::DB::Open(db_options, dbpath, &db)); // create first key range leveldb::WriteBatch batch; for (size_t i = 0; i < kNumKeys; i++) { batch.Put(Key1(i), "value for range 1 key"); } - ASSERT_OK(db->Write(leveldb::WriteOptions(), &batch)); + ASSERT_LEVELDB_OK(db->Write(leveldb::WriteOptions(), &batch)); // create second key range batch.Clear(); for (size_t i = 0; i < kNumKeys; i++) { batch.Put(Key2(i), "value for range 2 key"); } - ASSERT_OK(db->Write(leveldb::WriteOptions(), &batch)); + ASSERT_LEVELDB_OK(db->Write(leveldb::WriteOptions(), &batch)); // delete second key range batch.Clear(); for (size_t i = 0; i < kNumKeys; i++) { batch.Delete(Key2(i)); } - ASSERT_OK(db->Write(leveldb::WriteOptions(), &batch)); + ASSERT_LEVELDB_OK(db->Write(leveldb::WriteOptions(), &batch)); // compact database std::string start_key = Key1(0); @@ -85,4 +84,7 @@ TEST(Issue178, Test) { } // anonymous namespace -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/issues/issue200_test.cc b/issues/issue200_test.cc index 877b2af..ee08bc6 100644 --- a/issues/issue200_test.cc +++ b/issues/issue200_test.cc @@ -6,35 +6,34 @@ // to forward, the current key can be yielded unexpectedly if a new // mutation has been added just before the current key. +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "leveldb/db.h" -#include "util/testharness.h" +#include "util/testutil.h" namespace leveldb { -class Issue200 {}; - TEST(Issue200, Test) { // Get rid of any state from an old run. - std::string dbpath = test::TmpDir() + "/leveldb_issue200_test"; + std::string dbpath = testing::TempDir() + "leveldb_issue200_test"; DestroyDB(dbpath, Options()); DB* db; Options options; options.create_if_missing = true; - ASSERT_OK(DB::Open(options, dbpath, &db)); + ASSERT_LEVELDB_OK(DB::Open(options, dbpath, &db)); WriteOptions write_options; - ASSERT_OK(db->Put(write_options, "1", "b")); - ASSERT_OK(db->Put(write_options, "2", "c")); - ASSERT_OK(db->Put(write_options, "3", "d")); - ASSERT_OK(db->Put(write_options, "4", "e")); - ASSERT_OK(db->Put(write_options, "5", "f")); + ASSERT_LEVELDB_OK(db->Put(write_options, "1", "b")); + ASSERT_LEVELDB_OK(db->Put(write_options, "2", "c")); + ASSERT_LEVELDB_OK(db->Put(write_options, "3", "d")); + ASSERT_LEVELDB_OK(db->Put(write_options, "4", "e")); + ASSERT_LEVELDB_OK(db->Put(write_options, "5", "f")); ReadOptions read_options; Iterator* iter = db->NewIterator(read_options); // Add an element that should not be reflected in the iterator. - ASSERT_OK(db->Put(write_options, "25", "cd")); + ASSERT_LEVELDB_OK(db->Put(write_options, "25", "cd")); iter->Seek("5"); ASSERT_EQ(iter->key().ToString(), "5"); @@ -54,4 +53,7 @@ TEST(Issue200, Test) { } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/issues/issue320_test.cc b/issues/issue320_test.cc index c5fcbfc..c289ab4 100644 --- a/issues/issue320_test.cc +++ b/issues/issue320_test.cc @@ -9,9 +9,10 @@ #include #include +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "leveldb/db.h" #include "leveldb/write_batch.h" -#include "util/testharness.h" +#include "util/testutil.h" namespace leveldb { @@ -37,8 +38,6 @@ std::string CreateRandomString(int32_t index) { } // namespace -class Issue320 {}; - TEST(Issue320, Test) { std::srand(0); @@ -53,8 +52,8 @@ TEST(Issue320, Test) { Options options; options.create_if_missing = true; - std::string dbpath = test::TmpDir() + "/leveldb_issue320_test"; - ASSERT_OK(DB::Open(options, dbpath, &db)); + std::string dbpath = testing::TempDir() + "leveldb_issue320_test"; + ASSERT_LEVELDB_OK(DB::Open(options, dbpath, &db)); uint32_t target_size = 10000; uint32_t num_items = 0; @@ -78,7 +77,8 @@ TEST(Issue320, Test) { CreateRandomString(index), CreateRandomString(index))); batch.Put(test_map[index]->first, test_map[index]->second); } else { - ASSERT_OK(db->Get(readOptions, test_map[index]->first, &old_value)); + ASSERT_LEVELDB_OK( + db->Get(readOptions, test_map[index]->first, &old_value)); if (old_value != test_map[index]->second) { std::cout << "ERROR incorrect value returned by Get" << std::endl; std::cout << " count=" << count << std::endl; @@ -102,7 +102,7 @@ TEST(Issue320, Test) { } } - ASSERT_OK(db->Write(writeOptions, &batch)); + ASSERT_LEVELDB_OK(db->Write(writeOptions, &batch)); if (keep_snapshots && GenerateRandomNumber(10) == 0) { int i = GenerateRandomNumber(snapshots.size()); @@ -125,4 +125,7 @@ TEST(Issue320, Test) { } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/table/filter_block_test.cc b/table/filter_block_test.cc index 8b33bbd..53be948 100644 --- a/table/filter_block_test.cc +++ b/table/filter_block_test.cc @@ -4,11 +4,11 @@ #include "table/filter_block.h" +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "leveldb/filter_policy.h" #include "util/coding.h" #include "util/hash.h" #include "util/logging.h" -#include "util/testharness.h" #include "util/testutil.h" namespace leveldb { @@ -36,12 +36,12 @@ class TestHashFilter : public FilterPolicy { } }; -class FilterBlockTest { +class FilterBlockTest : public testing::Test { public: TestHashFilter policy_; }; -TEST(FilterBlockTest, EmptyBuilder) { +TEST_F(FilterBlockTest, EmptyBuilder) { FilterBlockBuilder builder(&policy_); Slice block = builder.Finish(); ASSERT_EQ("\\x00\\x00\\x00\\x00\\x0b", EscapeString(block)); @@ -50,7 +50,7 @@ TEST(FilterBlockTest, EmptyBuilder) { ASSERT_TRUE(reader.KeyMayMatch(100000, "foo")); } -TEST(FilterBlockTest, SingleChunk) { +TEST_F(FilterBlockTest, SingleChunk) { FilterBlockBuilder builder(&policy_); builder.StartBlock(100); builder.AddKey("foo"); @@ -71,7 +71,7 @@ TEST(FilterBlockTest, SingleChunk) { ASSERT_TRUE(!reader.KeyMayMatch(100, "other")); } -TEST(FilterBlockTest, MultiChunk) { +TEST_F(FilterBlockTest, MultiChunk) { FilterBlockBuilder builder(&policy_); // First filter @@ -121,4 +121,7 @@ TEST(FilterBlockTest, MultiChunk) { } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/table/table_test.cc b/table/table_test.cc index f689a27..09d1b5d 100644 --- a/table/table_test.cc +++ b/table/table_test.cc @@ -7,6 +7,7 @@ #include #include +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "db/dbformat.h" #include "db/memtable.h" #include "db/write_batch_internal.h" @@ -18,7 +19,6 @@ #include "table/block_builder.h" #include "table/format.h" #include "util/random.h" -#include "util/testharness.h" #include "util/testutil.h" namespace leveldb { @@ -219,12 +219,12 @@ class TableConstructor : public Constructor { for (const auto& kvp : data) { builder.Add(kvp.first, kvp.second); - ASSERT_TRUE(builder.status().ok()); + EXPECT_LEVELDB_OK(builder.status()); } Status s = builder.Finish(); - ASSERT_TRUE(s.ok()) << s.ToString(); + EXPECT_LEVELDB_OK(s); - ASSERT_EQ(sink.contents().size(), builder.FileSize()); + EXPECT_EQ(sink.contents().size(), builder.FileSize()); // Open the table source_ = new StringSource(sink.contents()); @@ -340,7 +340,7 @@ class DBConstructor : public Constructor { for (const auto& kvp : data) { WriteBatch batch; batch.Put(kvp.first, kvp.second); - ASSERT_TRUE(db_->Write(WriteOptions(), &batch).ok()); + EXPECT_TRUE(db_->Write(WriteOptions(), &batch).ok()); } return Status::OK(); } @@ -352,7 +352,7 @@ class DBConstructor : public Constructor { private: void NewDB() { - std::string name = test::TmpDir() + "/table_testdb"; + std::string name = testing::TempDir() + "table_testdb"; Options options; options.comparator = comparator_; @@ -403,7 +403,7 @@ static const TestArgs kTestArgList[] = { }; static const int kNumTestArgs = sizeof(kTestArgList) / sizeof(kTestArgList[0]); -class Harness { +class Harness : public testing::Test { public: Harness() : constructor_(nullptr) {} @@ -609,7 +609,7 @@ class Harness { }; // Test empty table/block. -TEST(Harness, Empty) { +TEST_F(Harness, Empty) { for (int i = 0; i < kNumTestArgs; i++) { Init(kTestArgList[i]); Random rnd(test::RandomSeed() + 1); @@ -620,7 +620,7 @@ TEST(Harness, Empty) { // Special test for a block with no restart entries. The C++ leveldb // code never generates such blocks, but the Java version of leveldb // seems to. -TEST(Harness, ZeroRestartPointsInBlock) { +TEST_F(Harness, ZeroRestartPointsInBlock) { char data[sizeof(uint32_t)]; memset(data, 0, sizeof(data)); BlockContents contents; @@ -639,7 +639,7 @@ TEST(Harness, ZeroRestartPointsInBlock) { } // Test the empty key -TEST(Harness, SimpleEmptyKey) { +TEST_F(Harness, SimpleEmptyKey) { for (int i = 0; i < kNumTestArgs; i++) { Init(kTestArgList[i]); Random rnd(test::RandomSeed() + 1); @@ -648,7 +648,7 @@ TEST(Harness, SimpleEmptyKey) { } } -TEST(Harness, SimpleSingle) { +TEST_F(Harness, SimpleSingle) { for (int i = 0; i < kNumTestArgs; i++) { Init(kTestArgList[i]); Random rnd(test::RandomSeed() + 2); @@ -657,7 +657,7 @@ TEST(Harness, SimpleSingle) { } } -TEST(Harness, SimpleMulti) { +TEST_F(Harness, SimpleMulti) { for (int i = 0; i < kNumTestArgs; i++) { Init(kTestArgList[i]); Random rnd(test::RandomSeed() + 3); @@ -668,7 +668,7 @@ TEST(Harness, SimpleMulti) { } } -TEST(Harness, SimpleSpecialKey) { +TEST_F(Harness, SimpleSpecialKey) { for (int i = 0; i < kNumTestArgs; i++) { Init(kTestArgList[i]); Random rnd(test::RandomSeed() + 4); @@ -677,7 +677,7 @@ TEST(Harness, SimpleSpecialKey) { } } -TEST(Harness, Randomized) { +TEST_F(Harness, Randomized) { for (int i = 0; i < kNumTestArgs; i++) { Init(kTestArgList[i]); Random rnd(test::RandomSeed() + 5); @@ -697,7 +697,7 @@ TEST(Harness, Randomized) { } } -TEST(Harness, RandomizedLongDB) { +TEST_F(Harness, RandomizedLongDB) { Random rnd(test::RandomSeed()); TestArgs args = {DB_TEST, false, 16}; Init(args); @@ -721,8 +721,6 @@ TEST(Harness, RandomizedLongDB) { ASSERT_GT(files, 0); } -class MemTableTest {}; - TEST(MemTableTest, Simple) { InternalKeyComparator cmp(BytewiseComparator()); MemTable* memtable = new MemTable(cmp); @@ -757,8 +755,6 @@ static bool Between(uint64_t val, uint64_t low, uint64_t high) { return result; } -class TableTest {}; - TEST(TableTest, ApproximateOffsetOfPlain) { TableConstructor c(BytewiseComparator()); c.Add("k01", "hello"); @@ -832,4 +828,7 @@ TEST(TableTest, ApproximateOffsetOfCompressed) { } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/util/arena_test.cc b/util/arena_test.cc index e917228..3f8855b 100644 --- a/util/arena_test.cc +++ b/util/arena_test.cc @@ -4,13 +4,11 @@ #include "util/arena.h" +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "util/random.h" -#include "util/testharness.h" namespace leveldb { -class ArenaTest {}; - TEST(ArenaTest, Empty) { Arena arena; } TEST(ArenaTest, Simple) { @@ -62,4 +60,7 @@ TEST(ArenaTest, Simple) { } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/util/bloom_test.cc b/util/bloom_test.cc index 436daa9..bcbd7f6 100644 --- a/util/bloom_test.cc +++ b/util/bloom_test.cc @@ -2,11 +2,10 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "leveldb/filter_policy.h" - #include "util/coding.h" #include "util/logging.h" -#include "util/testharness.h" #include "util/testutil.h" namespace leveldb { @@ -18,7 +17,7 @@ static Slice Key(int i, char* buffer) { return Slice(buffer, sizeof(uint32_t)); } -class BloomTest { +class BloomTest : public testing::Test { public: BloomTest() : policy_(NewBloomFilterPolicy(10)) {} @@ -80,12 +79,12 @@ class BloomTest { std::vector keys_; }; -TEST(BloomTest, EmptyFilter) { +TEST_F(BloomTest, EmptyFilter) { ASSERT_TRUE(!Matches("hello")); ASSERT_TRUE(!Matches("world")); } -TEST(BloomTest, Small) { +TEST_F(BloomTest, Small) { Add("hello"); Add("world"); ASSERT_TRUE(Matches("hello")); @@ -107,7 +106,7 @@ static int NextLength(int length) { return length; } -TEST(BloomTest, VaryingLengths) { +TEST_F(BloomTest, VaryingLengths) { char buffer[sizeof(int)]; // Count number of filters that significantly exceed the false positive rate @@ -153,4 +152,7 @@ TEST(BloomTest, VaryingLengths) { } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/util/cache_test.cc b/util/cache_test.cc index 974334b..8ce9463 100644 --- a/util/cache_test.cc +++ b/util/cache_test.cc @@ -5,8 +5,9 @@ #include "leveldb/cache.h" #include + +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "util/coding.h" -#include "util/testharness.h" namespace leveldb { @@ -23,7 +24,7 @@ static int DecodeKey(const Slice& k) { static void* EncodeValue(uintptr_t v) { return reinterpret_cast(v); } static int DecodeValue(void* v) { return reinterpret_cast(v); } -class CacheTest { +class CacheTest : public testing::Test { public: static void Deleter(const Slice& key, void* v) { current_->deleted_keys_.push_back(DecodeKey(key)); @@ -59,12 +60,11 @@ class CacheTest { } void Erase(int key) { cache_->Erase(EncodeKey(key)); } - static CacheTest* current_; }; CacheTest* CacheTest::current_; -TEST(CacheTest, HitAndMiss) { +TEST_F(CacheTest, HitAndMiss) { ASSERT_EQ(-1, Lookup(100)); Insert(100, 101); @@ -87,7 +87,7 @@ TEST(CacheTest, HitAndMiss) { ASSERT_EQ(101, deleted_values_[0]); } -TEST(CacheTest, Erase) { +TEST_F(CacheTest, Erase) { Erase(200); ASSERT_EQ(0, deleted_keys_.size()); @@ -106,7 +106,7 @@ TEST(CacheTest, Erase) { ASSERT_EQ(1, deleted_keys_.size()); } -TEST(CacheTest, EntriesArePinned) { +TEST_F(CacheTest, EntriesArePinned) { Insert(100, 101); Cache::Handle* h1 = cache_->Lookup(EncodeKey(100)); ASSERT_EQ(101, DecodeValue(cache_->Value(h1))); @@ -131,7 +131,7 @@ TEST(CacheTest, EntriesArePinned) { ASSERT_EQ(102, deleted_values_[1]); } -TEST(CacheTest, EvictionPolicy) { +TEST_F(CacheTest, EvictionPolicy) { Insert(100, 101); Insert(200, 201); Insert(300, 301); @@ -150,7 +150,7 @@ TEST(CacheTest, EvictionPolicy) { cache_->Release(h); } -TEST(CacheTest, UseExceedsCacheSize) { +TEST_F(CacheTest, UseExceedsCacheSize) { // Overfill the cache, keeping handles on all inserted entries. std::vector h; for (int i = 0; i < kCacheSize + 100; i++) { @@ -167,7 +167,7 @@ TEST(CacheTest, UseExceedsCacheSize) { } } -TEST(CacheTest, HeavyEntries) { +TEST_F(CacheTest, HeavyEntries) { // Add a bunch of light and heavy entries and then count the combined // size of items still in the cache, which must be approximately the // same as the total capacity. @@ -194,13 +194,13 @@ TEST(CacheTest, HeavyEntries) { ASSERT_LE(cached_weight, kCacheSize + kCacheSize / 10); } -TEST(CacheTest, NewId) { +TEST_F(CacheTest, NewId) { uint64_t a = cache_->NewId(); uint64_t b = cache_->NewId(); ASSERT_NE(a, b); } -TEST(CacheTest, Prune) { +TEST_F(CacheTest, Prune) { Insert(1, 100); Insert(2, 200); @@ -213,7 +213,7 @@ TEST(CacheTest, Prune) { ASSERT_EQ(-1, Lookup(2)); } -TEST(CacheTest, ZeroSizeCache) { +TEST_F(CacheTest, ZeroSizeCache) { delete cache_; cache_ = NewLRUCache(0); @@ -223,4 +223,7 @@ TEST(CacheTest, ZeroSizeCache) { } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/util/coding_test.cc b/util/coding_test.cc index 0d2a0c5..db83367 100644 --- a/util/coding_test.cc +++ b/util/coding_test.cc @@ -2,15 +2,14 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#include "util/coding.h" + #include -#include "util/coding.h" -#include "util/testharness.h" +#include "third_party/googletest/googletest/include/gtest/gtest.h" namespace leveldb { -class Coding {}; - TEST(Coding, Fixed32) { std::string s; for (uint32_t v = 0; v < 100000; v++) { @@ -193,4 +192,7 @@ TEST(Coding, Strings) { } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/util/crc32c_test.cc b/util/crc32c_test.cc index 18a8494..1e2aae7 100644 --- a/util/crc32c_test.cc +++ b/util/crc32c_test.cc @@ -3,13 +3,12 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "util/crc32c.h" -#include "util/testharness.h" + +#include "third_party/googletest/googletest/include/gtest/gtest.h" namespace leveldb { namespace crc32c { -class CRC {}; - TEST(CRC, StandardResults) { // From rfc3720 section B.4. char buf[32]; @@ -56,4 +55,7 @@ TEST(CRC, Mask) { } // namespace crc32c } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/util/env_posix_test.cc b/util/env_posix_test.cc index 9675d73..5ee2248 100644 --- a/util/env_posix_test.cc +++ b/util/env_posix_test.cc @@ -13,10 +13,11 @@ #include #include +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "leveldb/env.h" #include "port/port.h" #include "util/env_posix_test_helper.h" -#include "util/testharness.h" +#include "util/testutil.h" #if HAVE_O_CLOEXEC @@ -168,7 +169,7 @@ namespace leveldb { static const int kReadOnlyFileLimit = 4; static const int kMMapLimit = 4; -class EnvPosixTest { +class EnvPosixTest : public testing::Test { public: static void SetFileLimits(int read_only_file_limit, int mmap_limit) { EnvPosixTestHelper::SetReadOnlyFDLimit(read_only_file_limit); @@ -180,10 +181,10 @@ class EnvPosixTest { Env* env_; }; -TEST(EnvPosixTest, TestOpenOnRead) { +TEST_F(EnvPosixTest, TestOpenOnRead) { // Write some test data to a single file that will be opened |n| times. std::string test_dir; - ASSERT_OK(env_->GetTestDirectory(&test_dir)); + ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir)); std::string test_file = test_dir + "/open_on_read.txt"; FILE* f = fopen(test_file.c_str(), "we"); @@ -197,133 +198,133 @@ TEST(EnvPosixTest, TestOpenOnRead) { const int kNumFiles = kReadOnlyFileLimit + kMMapLimit + 5; leveldb::RandomAccessFile* files[kNumFiles] = {0}; for (int i = 0; i < kNumFiles; i++) { - ASSERT_OK(env_->NewRandomAccessFile(test_file, &files[i])); + ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(test_file, &files[i])); } char scratch; Slice read_result; for (int i = 0; i < kNumFiles; i++) { - ASSERT_OK(files[i]->Read(i, 1, &read_result, &scratch)); + ASSERT_LEVELDB_OK(files[i]->Read(i, 1, &read_result, &scratch)); ASSERT_EQ(kFileData[i], read_result[0]); } for (int i = 0; i < kNumFiles; i++) { delete files[i]; } - ASSERT_OK(env_->DeleteFile(test_file)); + ASSERT_LEVELDB_OK(env_->DeleteFile(test_file)); } #if HAVE_O_CLOEXEC -TEST(EnvPosixTest, TestCloseOnExecSequentialFile) { +TEST_F(EnvPosixTest, TestCloseOnExecSequentialFile) { std::unordered_set open_fds; GetOpenFileDescriptors(&open_fds); std::string test_dir; - ASSERT_OK(env_->GetTestDirectory(&test_dir)); + ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir)); std::string file_path = test_dir + "/close_on_exec_sequential.txt"; - ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path)); + ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path)); leveldb::SequentialFile* file = nullptr; - ASSERT_OK(env_->NewSequentialFile(file_path, &file)); + ASSERT_LEVELDB_OK(env_->NewSequentialFile(file_path, &file)); CheckCloseOnExecDoesNotLeakFDs(open_fds); delete file; - ASSERT_OK(env_->DeleteFile(file_path)); + ASSERT_LEVELDB_OK(env_->DeleteFile(file_path)); } -TEST(EnvPosixTest, TestCloseOnExecRandomAccessFile) { +TEST_F(EnvPosixTest, TestCloseOnExecRandomAccessFile) { std::unordered_set open_fds; GetOpenFileDescriptors(&open_fds); std::string test_dir; - ASSERT_OK(env_->GetTestDirectory(&test_dir)); + ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir)); std::string file_path = test_dir + "/close_on_exec_random_access.txt"; - ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path)); + ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path)); // Exhaust the RandomAccessFile mmap limit. This way, the test // RandomAccessFile instance below is backed by a file descriptor, not by an // mmap region. leveldb::RandomAccessFile* mmapped_files[kReadOnlyFileLimit] = {nullptr}; for (int i = 0; i < kReadOnlyFileLimit; i++) { - ASSERT_OK(env_->NewRandomAccessFile(file_path, &mmapped_files[i])); + ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(file_path, &mmapped_files[i])); } leveldb::RandomAccessFile* file = nullptr; - ASSERT_OK(env_->NewRandomAccessFile(file_path, &file)); + ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(file_path, &file)); CheckCloseOnExecDoesNotLeakFDs(open_fds); delete file; for (int i = 0; i < kReadOnlyFileLimit; i++) { delete mmapped_files[i]; } - ASSERT_OK(env_->DeleteFile(file_path)); + ASSERT_LEVELDB_OK(env_->DeleteFile(file_path)); } -TEST(EnvPosixTest, TestCloseOnExecWritableFile) { +TEST_F(EnvPosixTest, TestCloseOnExecWritableFile) { std::unordered_set open_fds; GetOpenFileDescriptors(&open_fds); std::string test_dir; - ASSERT_OK(env_->GetTestDirectory(&test_dir)); + ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir)); std::string file_path = test_dir + "/close_on_exec_writable.txt"; - ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path)); + ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path)); leveldb::WritableFile* file = nullptr; - ASSERT_OK(env_->NewWritableFile(file_path, &file)); + ASSERT_LEVELDB_OK(env_->NewWritableFile(file_path, &file)); CheckCloseOnExecDoesNotLeakFDs(open_fds); delete file; - ASSERT_OK(env_->DeleteFile(file_path)); + ASSERT_LEVELDB_OK(env_->DeleteFile(file_path)); } -TEST(EnvPosixTest, TestCloseOnExecAppendableFile) { +TEST_F(EnvPosixTest, TestCloseOnExecAppendableFile) { std::unordered_set open_fds; GetOpenFileDescriptors(&open_fds); std::string test_dir; - ASSERT_OK(env_->GetTestDirectory(&test_dir)); + ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir)); std::string file_path = test_dir + "/close_on_exec_appendable.txt"; - ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path)); + ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path)); leveldb::WritableFile* file = nullptr; - ASSERT_OK(env_->NewAppendableFile(file_path, &file)); + ASSERT_LEVELDB_OK(env_->NewAppendableFile(file_path, &file)); CheckCloseOnExecDoesNotLeakFDs(open_fds); delete file; - ASSERT_OK(env_->DeleteFile(file_path)); + ASSERT_LEVELDB_OK(env_->DeleteFile(file_path)); } -TEST(EnvPosixTest, TestCloseOnExecLockFile) { +TEST_F(EnvPosixTest, TestCloseOnExecLockFile) { std::unordered_set open_fds; GetOpenFileDescriptors(&open_fds); std::string test_dir; - ASSERT_OK(env_->GetTestDirectory(&test_dir)); + ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir)); std::string file_path = test_dir + "/close_on_exec_lock.txt"; - ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path)); + ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path)); leveldb::FileLock* lock = nullptr; - ASSERT_OK(env_->LockFile(file_path, &lock)); + ASSERT_LEVELDB_OK(env_->LockFile(file_path, &lock)); CheckCloseOnExecDoesNotLeakFDs(open_fds); - ASSERT_OK(env_->UnlockFile(lock)); + ASSERT_LEVELDB_OK(env_->UnlockFile(lock)); - ASSERT_OK(env_->DeleteFile(file_path)); + ASSERT_LEVELDB_OK(env_->DeleteFile(file_path)); } -TEST(EnvPosixTest, TestCloseOnExecLogger) { +TEST_F(EnvPosixTest, TestCloseOnExecLogger) { std::unordered_set open_fds; GetOpenFileDescriptors(&open_fds); std::string test_dir; - ASSERT_OK(env_->GetTestDirectory(&test_dir)); + ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir)); std::string file_path = test_dir + "/close_on_exec_logger.txt"; - ASSERT_OK(WriteStringToFile(env_, "0123456789", file_path)); + ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path)); leveldb::Logger* file = nullptr; - ASSERT_OK(env_->NewLogger(file_path, &file)); + ASSERT_LEVELDB_OK(env_->NewLogger(file_path, &file)); CheckCloseOnExecDoesNotLeakFDs(open_fds); delete file; - ASSERT_OK(env_->DeleteFile(file_path)); + ASSERT_LEVELDB_OK(env_->DeleteFile(file_path)); } #endif // HAVE_O_CLOEXEC @@ -346,5 +347,7 @@ int main(int argc, char** argv) { // All tests currently run with the same read-only file limits. leveldb::EnvPosixTest::SetFileLimits(leveldb::kReadOnlyFileLimit, leveldb::kMMapLimit); - return leveldb::test::RunAllTests(); + + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/util/env_test.cc b/util/env_test.cc index 7db03fc..2a1f73b 100644 --- a/util/env_test.cc +++ b/util/env_test.cc @@ -6,32 +6,32 @@ #include +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "port/port.h" #include "port/thread_annotations.h" #include "util/mutexlock.h" -#include "util/testharness.h" #include "util/testutil.h" namespace leveldb { static const int kDelayMicros = 100000; -class EnvTest { +class EnvTest : public testing::Test { public: EnvTest() : env_(Env::Default()) {} Env* env_; }; -TEST(EnvTest, ReadWrite) { +TEST_F(EnvTest, ReadWrite) { Random rnd(test::RandomSeed()); // Get file to use for testing. std::string test_dir; - ASSERT_OK(env_->GetTestDirectory(&test_dir)); + ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir)); std::string test_file_name = test_dir + "/open_on_read.txt"; WritableFile* writable_file; - ASSERT_OK(env_->NewWritableFile(test_file_name, &writable_file)); + ASSERT_LEVELDB_OK(env_->NewWritableFile(test_file_name, &writable_file)); // Fill a file with data generated via a sequence of randomly sized writes. static const size_t kDataSize = 10 * 1048576; @@ -40,26 +40,26 @@ TEST(EnvTest, ReadWrite) { int len = rnd.Skewed(18); // Up to 2^18 - 1, but typically much smaller std::string r; test::RandomString(&rnd, len, &r); - ASSERT_OK(writable_file->Append(r)); + ASSERT_LEVELDB_OK(writable_file->Append(r)); data += r; if (rnd.OneIn(10)) { - ASSERT_OK(writable_file->Flush()); + ASSERT_LEVELDB_OK(writable_file->Flush()); } } - ASSERT_OK(writable_file->Sync()); - ASSERT_OK(writable_file->Close()); + ASSERT_LEVELDB_OK(writable_file->Sync()); + ASSERT_LEVELDB_OK(writable_file->Close()); delete writable_file; // Read all data using a sequence of randomly sized reads. SequentialFile* sequential_file; - ASSERT_OK(env_->NewSequentialFile(test_file_name, &sequential_file)); + ASSERT_LEVELDB_OK(env_->NewSequentialFile(test_file_name, &sequential_file)); std::string read_result; std::string scratch; while (read_result.size() < data.size()) { int len = std::min(rnd.Skewed(18), data.size() - read_result.size()); scratch.resize(std::max(len, 1)); // at least 1 so &scratch[0] is legal Slice read; - ASSERT_OK(sequential_file->Read(len, &read, &scratch[0])); + ASSERT_LEVELDB_OK(sequential_file->Read(len, &read, &scratch[0])); if (len > 0) { ASSERT_GT(read.size(), 0); } @@ -70,7 +70,7 @@ TEST(EnvTest, ReadWrite) { delete sequential_file; } -TEST(EnvTest, RunImmediately) { +TEST_F(EnvTest, RunImmediately) { struct RunState { port::Mutex mu; port::CondVar cvar{&mu}; @@ -94,7 +94,7 @@ TEST(EnvTest, RunImmediately) { } } -TEST(EnvTest, RunMany) { +TEST_F(EnvTest, RunMany) { struct RunState { port::Mutex mu; port::CondVar cvar{&mu}; @@ -153,7 +153,7 @@ static void ThreadBody(void* arg) { s->mu.Unlock(); } -TEST(EnvTest, StartThread) { +TEST_F(EnvTest, StartThread) { State state(0, 3); for (int i = 0; i < 3; i++) { env_->StartThread(&ThreadBody, &state); @@ -166,10 +166,10 @@ TEST(EnvTest, StartThread) { ASSERT_EQ(state.val, 3); } -TEST(EnvTest, TestOpenNonExistentFile) { +TEST_F(EnvTest, TestOpenNonExistentFile) { // Write some test data to a single file that will be opened |n| times. std::string test_dir; - ASSERT_OK(env_->GetTestDirectory(&test_dir)); + ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir)); std::string non_existent_file = test_dir + "/non_existent_file"; ASSERT_TRUE(!env_->FileExists(non_existent_file)); @@ -184,54 +184,57 @@ TEST(EnvTest, TestOpenNonExistentFile) { ASSERT_TRUE(status.IsNotFound()); } -TEST(EnvTest, ReopenWritableFile) { +TEST_F(EnvTest, ReopenWritableFile) { std::string test_dir; - ASSERT_OK(env_->GetTestDirectory(&test_dir)); + ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir)); std::string test_file_name = test_dir + "/reopen_writable_file.txt"; env_->DeleteFile(test_file_name); WritableFile* writable_file; - ASSERT_OK(env_->NewWritableFile(test_file_name, &writable_file)); + ASSERT_LEVELDB_OK(env_->NewWritableFile(test_file_name, &writable_file)); std::string data("hello world!"); - ASSERT_OK(writable_file->Append(data)); - ASSERT_OK(writable_file->Close()); + ASSERT_LEVELDB_OK(writable_file->Append(data)); + ASSERT_LEVELDB_OK(writable_file->Close()); delete writable_file; - ASSERT_OK(env_->NewWritableFile(test_file_name, &writable_file)); + ASSERT_LEVELDB_OK(env_->NewWritableFile(test_file_name, &writable_file)); data = "42"; - ASSERT_OK(writable_file->Append(data)); - ASSERT_OK(writable_file->Close()); + ASSERT_LEVELDB_OK(writable_file->Append(data)); + ASSERT_LEVELDB_OK(writable_file->Close()); delete writable_file; - ASSERT_OK(ReadFileToString(env_, test_file_name, &data)); + ASSERT_LEVELDB_OK(ReadFileToString(env_, test_file_name, &data)); ASSERT_EQ(std::string("42"), data); env_->DeleteFile(test_file_name); } -TEST(EnvTest, ReopenAppendableFile) { +TEST_F(EnvTest, ReopenAppendableFile) { std::string test_dir; - ASSERT_OK(env_->GetTestDirectory(&test_dir)); + ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir)); std::string test_file_name = test_dir + "/reopen_appendable_file.txt"; env_->DeleteFile(test_file_name); WritableFile* appendable_file; - ASSERT_OK(env_->NewAppendableFile(test_file_name, &appendable_file)); + ASSERT_LEVELDB_OK(env_->NewAppendableFile(test_file_name, &appendable_file)); std::string data("hello world!"); - ASSERT_OK(appendable_file->Append(data)); - ASSERT_OK(appendable_file->Close()); + ASSERT_LEVELDB_OK(appendable_file->Append(data)); + ASSERT_LEVELDB_OK(appendable_file->Close()); delete appendable_file; - ASSERT_OK(env_->NewAppendableFile(test_file_name, &appendable_file)); + ASSERT_LEVELDB_OK(env_->NewAppendableFile(test_file_name, &appendable_file)); data = "42"; - ASSERT_OK(appendable_file->Append(data)); - ASSERT_OK(appendable_file->Close()); + ASSERT_LEVELDB_OK(appendable_file->Append(data)); + ASSERT_LEVELDB_OK(appendable_file->Close()); delete appendable_file; - ASSERT_OK(ReadFileToString(env_, test_file_name, &data)); + ASSERT_LEVELDB_OK(ReadFileToString(env_, test_file_name, &data)); ASSERT_EQ(std::string("hello world!42"), data); env_->DeleteFile(test_file_name); } } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/util/env_windows_test.cc b/util/env_windows_test.cc index 3c22133..b926107 100644 --- a/util/env_windows_test.cc +++ b/util/env_windows_test.cc @@ -2,17 +2,17 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "leveldb/env.h" - #include "port/port.h" #include "util/env_windows_test_helper.h" -#include "util/testharness.h" +#include "util/testutil.h" namespace leveldb { static const int kMMapLimit = 4; -class EnvWindowsTest { +class EnvWindowsTest : public testing::Test { public: static void SetFileLimits(int mmap_limit) { EnvWindowsTestHelper::SetReadOnlyMMapLimit(mmap_limit); @@ -23,10 +23,10 @@ class EnvWindowsTest { Env* env_; }; -TEST(EnvWindowsTest, TestOpenOnRead) { +TEST_F(EnvWindowsTest, TestOpenOnRead) { // Write some test data to a single file that will be opened |n| times. std::string test_dir; - ASSERT_OK(env_->GetTestDirectory(&test_dir)); + ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir)); std::string test_file = test_dir + "/open_on_read.txt"; FILE* f = fopen(test_file.c_str(), "w"); @@ -41,18 +41,18 @@ TEST(EnvWindowsTest, TestOpenOnRead) { const int kNumFiles = kMMapLimit + 5; leveldb::RandomAccessFile* files[kNumFiles] = {0}; for (int i = 0; i < kNumFiles; i++) { - ASSERT_OK(env_->NewRandomAccessFile(test_file, &files[i])); + ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(test_file, &files[i])); } char scratch; Slice read_result; for (int i = 0; i < kNumFiles; i++) { - ASSERT_OK(files[i]->Read(i, 1, &read_result, &scratch)); + ASSERT_LEVELDB_OK(files[i]->Read(i, 1, &read_result, &scratch)); ASSERT_EQ(kFileData[i], read_result[0]); } for (int i = 0; i < kNumFiles; i++) { delete files[i]; } - ASSERT_OK(env_->DeleteFile(test_file)); + ASSERT_LEVELDB_OK(env_->DeleteFile(test_file)); } } // namespace leveldb @@ -60,5 +60,6 @@ TEST(EnvWindowsTest, TestOpenOnRead) { int main(int argc, char** argv) { // All tests currently run with the same read-only file limits. leveldb::EnvWindowsTest::SetFileLimits(leveldb::kMMapLimit); - return leveldb::test::RunAllTests(); + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); } diff --git a/util/hash_test.cc b/util/hash_test.cc index 21f8171..e970c1e 100644 --- a/util/hash_test.cc +++ b/util/hash_test.cc @@ -3,11 +3,10 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "util/hash.h" -#include "util/testharness.h" -namespace leveldb { +#include "third_party/googletest/googletest/include/gtest/gtest.h" -class HASH {}; +namespace leveldb { TEST(HASH, SignedUnsignedIssue) { const uint8_t data1[1] = {0x62}; @@ -41,4 +40,7 @@ TEST(HASH, SignedUnsignedIssue) { } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/util/logging_test.cc b/util/logging_test.cc index 389cbeb..92417aa 100644 --- a/util/logging_test.cc +++ b/util/logging_test.cc @@ -2,17 +2,16 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#include "util/logging.h" + #include #include +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "leveldb/slice.h" -#include "util/logging.h" -#include "util/testharness.h" namespace leveldb { -class Logging {}; - TEST(Logging, NumberToString) { ASSERT_EQ("0", NumberToString(0)); ASSERT_EQ("1", NumberToString(1)); @@ -140,4 +139,7 @@ TEST(Logging, ConsumeDecimalNumberNoDigits) { } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/util/no_destructor_test.cc b/util/no_destructor_test.cc index b41caca..edafb08 100644 --- a/util/no_destructor_test.cc +++ b/util/no_destructor_test.cc @@ -2,12 +2,13 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#include "util/no_destructor.h" + #include #include #include -#include "util/no_destructor.h" -#include "util/testharness.h" +#include "third_party/googletest/googletest/include/gtest/gtest.h" namespace leveldb { @@ -28,8 +29,6 @@ constexpr const uint64_t kGoldenB = 0xaabbccddeeffaabb; } // namespace -class NoDestructorTest {}; - TEST(NoDestructorTest, StackInstance) { NoDestructor instance(kGoldenA, kGoldenB); ASSERT_EQ(kGoldenA, instance.get()->a); @@ -44,4 +43,7 @@ TEST(NoDestructorTest, StaticInstance) { } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/util/status_test.cc b/util/status_test.cc index 2842319..b7e2444 100644 --- a/util/status_test.cc +++ b/util/status_test.cc @@ -2,11 +2,12 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#include "leveldb/status.h" + #include +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "leveldb/slice.h" -#include "leveldb/status.h" -#include "util/testharness.h" namespace leveldb { @@ -37,4 +38,7 @@ TEST(Status, MoveConstructor) { } // namespace leveldb -int main(int argc, char** argv) { return leveldb::test::RunAllTests(); } +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/util/testharness.cc b/util/testharness.cc deleted file mode 100644 index 318ecfa..0000000 --- a/util/testharness.cc +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. - -#include "util/testharness.h" - -#include -#include -#include - -#include -#include - -#include "leveldb/env.h" - -namespace leveldb { -namespace test { - -namespace { -struct Test { - const char* base; - const char* name; - void (*func)(); -}; -std::vector* tests; -} // namespace - -bool RegisterTest(const char* base, const char* name, void (*func)()) { - if (tests == nullptr) { - tests = new std::vector; - } - Test t; - t.base = base; - t.name = name; - t.func = func; - tests->push_back(t); - return true; -} - -int RunAllTests() { - const char* matcher = getenv("LEVELDB_TESTS"); - - int num = 0; - if (tests != nullptr) { - for (size_t i = 0; i < tests->size(); i++) { - const Test& t = (*tests)[i]; - if (matcher != nullptr) { - std::string name = t.base; - name.push_back('.'); - name.append(t.name); - if (strstr(name.c_str(), matcher) == nullptr) { - continue; - } - } - fprintf(stderr, "==== Test %s.%s\n", t.base, t.name); - (*t.func)(); - ++num; - } - } - fprintf(stderr, "==== PASSED %d tests\n", num); - return 0; -} - -std::string TmpDir() { - std::string dir; - Status s = Env::Default()->GetTestDirectory(&dir); - ASSERT_TRUE(s.ok()) << s.ToString(); - return dir; -} - -int RandomSeed() { - const char* env = getenv("TEST_RANDOM_SEED"); - int result = (env != nullptr ? atoi(env) : 301); - if (result <= 0) { - result = 301; - } - return result; -} - -} // namespace test -} // namespace leveldb diff --git a/util/testharness.h b/util/testharness.h deleted file mode 100644 index 72cd162..0000000 --- a/util/testharness.h +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. - -#ifndef STORAGE_LEVELDB_UTIL_TESTHARNESS_H_ -#define STORAGE_LEVELDB_UTIL_TESTHARNESS_H_ - -#include -#include - -#include - -#include "leveldb/status.h" - -namespace leveldb { -namespace test { - -// Run some of the tests registered by the TEST() macro. If the -// environment variable "LEVELDB_TESTS" is not set, runs all tests. -// Otherwise, runs only the tests whose name contains the value of -// "LEVELDB_TESTS" as a substring. E.g., suppose the tests are: -// TEST(Foo, Hello) { ... } -// TEST(Foo, World) { ... } -// LEVELDB_TESTS=Hello will run the first test -// LEVELDB_TESTS=o will run both tests -// LEVELDB_TESTS=Junk will run no tests -// -// Returns 0 if all tests pass. -// Dies or returns a non-zero value if some test fails. -int RunAllTests(); - -// Return the directory to use for temporary storage. -std::string TmpDir(); - -// Return a randomization seed for this run. Typically returns the -// same number on repeated invocations of this binary, but automated -// runs may be able to vary the seed. -int RandomSeed(); - -// An instance of Tester is allocated to hold temporary state during -// the execution of an assertion. -class Tester { - private: - bool ok_; - const char* fname_; - int line_; - std::stringstream ss_; - - public: - Tester(const char* f, int l) : ok_(true), fname_(f), line_(l) {} - - ~Tester() { - if (!ok_) { - fprintf(stderr, "%s:%d:%s\n", fname_, line_, ss_.str().c_str()); - exit(1); - } - } - - Tester& Is(bool b, const char* msg) { - if (!b) { - ss_ << " Assertion failure " << msg; - ok_ = false; - } - return *this; - } - - Tester& IsOk(const Status& s) { - if (!s.ok()) { - ss_ << " " << s.ToString(); - ok_ = false; - } - return *this; - } - -#define BINARY_OP(name, op) \ - template \ - Tester& name(const X& x, const Y& y) { \ - if (!(x op y)) { \ - ss_ << " failed: " << x << (" " #op " ") << y; \ - ok_ = false; \ - } \ - return *this; \ - } - - BINARY_OP(IsEq, ==) - BINARY_OP(IsNe, !=) - BINARY_OP(IsGe, >=) - BINARY_OP(IsGt, >) - BINARY_OP(IsLe, <=) - BINARY_OP(IsLt, <) -#undef BINARY_OP - - // Attach the specified value to the error message if an error has occurred - template - Tester& operator<<(const V& value) { - if (!ok_) { - ss_ << " " << value; - } - return *this; - } -}; - -#define ASSERT_TRUE(c) ::leveldb::test::Tester(__FILE__, __LINE__).Is((c), #c) -#define ASSERT_OK(s) ::leveldb::test::Tester(__FILE__, __LINE__).IsOk((s)) -#define ASSERT_EQ(a, b) \ - ::leveldb::test::Tester(__FILE__, __LINE__).IsEq((a), (b)) -#define ASSERT_NE(a, b) \ - ::leveldb::test::Tester(__FILE__, __LINE__).IsNe((a), (b)) -#define ASSERT_GE(a, b) \ - ::leveldb::test::Tester(__FILE__, __LINE__).IsGe((a), (b)) -#define ASSERT_GT(a, b) \ - ::leveldb::test::Tester(__FILE__, __LINE__).IsGt((a), (b)) -#define ASSERT_LE(a, b) \ - ::leveldb::test::Tester(__FILE__, __LINE__).IsLe((a), (b)) -#define ASSERT_LT(a, b) \ - ::leveldb::test::Tester(__FILE__, __LINE__).IsLt((a), (b)) - -#define TCONCAT(a, b) TCONCAT1(a, b) -#define TCONCAT1(a, b) a##b - -#define TEST(base, name) \ - class TCONCAT(_Test_, name) : public base { \ - public: \ - void _Run(); \ - static void _RunIt() { \ - TCONCAT(_Test_, name) t; \ - t._Run(); \ - } \ - }; \ - bool TCONCAT(_Test_ignored_, name) = ::leveldb::test::RegisterTest( \ - #base, #name, &TCONCAT(_Test_, name)::_RunIt); \ - void TCONCAT(_Test_, name)::_Run() - -// Register the specified test. Typically not used directly, but -// invoked via the macro expansion of TEST. -bool RegisterTest(const char* base, const char* name, void (*func)()); - -} // namespace test -} // namespace leveldb - -#endif // STORAGE_LEVELDB_UTIL_TESTHARNESS_H_ diff --git a/util/testutil.cc b/util/testutil.cc index 6b151b9..5f77b08 100644 --- a/util/testutil.cc +++ b/util/testutil.cc @@ -4,6 +4,8 @@ #include "util/testutil.h" +#include + #include "util/random.h" namespace leveldb { diff --git a/util/testutil.h b/util/testutil.h index bb4051b..5765afb 100644 --- a/util/testutil.h +++ b/util/testutil.h @@ -5,6 +5,8 @@ #ifndef STORAGE_LEVELDB_UTIL_TESTUTIL_H_ #define STORAGE_LEVELDB_UTIL_TESTUTIL_H_ +#include "third_party/googletest/googlemock/include/gmock/gmock.h" +#include "third_party/googletest/googletest/include/gtest/gtest.h" #include "helpers/memenv/memenv.h" #include "leveldb/env.h" #include "leveldb/slice.h" @@ -13,6 +15,20 @@ namespace leveldb { namespace test { +MATCHER(IsOK, "") { return arg.ok(); } + +// Macros for testing the results of functions that return leveldb::Status or +// util::StatusOr (for any type T). +#define EXPECT_LEVELDB_OK(expression) \ + EXPECT_THAT(expression, leveldb::test::IsOK()) +#define ASSERT_LEVELDB_OK(expression) \ + ASSERT_THAT(expression, leveldb::test::IsOK()) + +// Returns the random seed used at the start of the current test run. +inline int RandomSeed() { + return testing::UnitTest::GetInstance()->random_seed(); +} + // Store in *dst a random string of length "len" and return a Slice that // references the generated data. Slice RandomString(Random* rnd, int len, std::string* dst); From db8352187b2c2d037b6fe215b7f82415789ec71f Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Mon, 25 Nov 2019 07:22:35 -0800 Subject: [PATCH 10/68] Fixup for adding the third_party/googletest submodule. (#754) --- third_party/googletest | 1 + 1 file changed, 1 insertion(+) create mode 160000 third_party/googletest diff --git a/third_party/googletest b/third_party/googletest new file mode 160000 index 0000000..c27aceb --- /dev/null +++ b/third_party/googletest @@ -0,0 +1 @@ +Subproject commit c27acebba3b3c7d94209e0467b0a801db4af73ed From 583a42b5961dfd1804b5588a0ad2723becc8cc6a Mon Sep 17 00:00:00 2001 From: leveldb Team Date: Mon, 25 Nov 2019 09:29:06 -0800 Subject: [PATCH 11/68] Internal change. PiperOrigin-RevId: 282373286 --- db/autocompact_test.cc | 2 +- db/corruption_test.cc | 2 +- db/db_test.cc | 2 +- db/dbformat_test.cc | 2 +- db/fault_injection_test.cc | 2 +- db/filename_test.cc | 2 +- db/log_test.cc | 2 +- db/recovery_test.cc | 2 +- db/skiplist_test.cc | 2 +- db/version_edit_test.cc | 2 +- db/version_set_test.cc | 2 +- db/write_batch_test.cc | 2 +- helpers/memenv/memenv_test.cc | 2 +- issues/issue178_test.cc | 2 +- issues/issue200_test.cc | 2 +- issues/issue320_test.cc | 2 +- table/filter_block_test.cc | 2 +- table/table_test.cc | 2 +- third_party/googletest | 1 - util/arena_test.cc | 2 +- util/bloom_test.cc | 2 +- util/cache_test.cc | 2 +- util/coding_test.cc | 2 +- util/crc32c_test.cc | 2 +- util/env_posix_test.cc | 2 +- util/env_test.cc | 2 +- util/env_windows_test.cc | 2 +- util/hash_test.cc | 2 +- util/logging_test.cc | 2 +- util/no_destructor_test.cc | 2 +- util/status_test.cc | 2 +- util/testutil.h | 4 ++-- 32 files changed, 32 insertions(+), 33 deletions(-) delete mode 160000 third_party/googletest diff --git a/db/autocompact_test.cc b/db/autocompact_test.cc index d4caf71..9779c95 100644 --- a/db/autocompact_test.cc +++ b/db/autocompact_test.cc @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" #include "db/db_impl.h" #include "leveldb/cache.h" #include "leveldb/db.h" diff --git a/db/corruption_test.cc b/db/corruption_test.cc index 4d20946..b22f9e7 100644 --- a/db/corruption_test.cc +++ b/db/corruption_test.cc @@ -4,7 +4,7 @@ #include -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" #include "db/db_impl.h" #include "db/filename.h" #include "db/log_format.h" diff --git a/db/db_test.cc b/db/db_test.cc index e8e3495..1bd5afc 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -7,7 +7,7 @@ #include #include -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" #include "db/db_impl.h" #include "db/filename.h" #include "db/version_set.h" diff --git a/db/dbformat_test.cc b/db/dbformat_test.cc index ca49e0a..4a11c4a 100644 --- a/db/dbformat_test.cc +++ b/db/dbformat_test.cc @@ -4,7 +4,7 @@ #include "db/dbformat.h" -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" #include "util/logging.h" namespace leveldb { diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc index 80b8f12..db8580c 100644 --- a/db/fault_injection_test.cc +++ b/db/fault_injection_test.cc @@ -9,7 +9,7 @@ #include #include -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" #include "db/db_impl.h" #include "db/filename.h" #include "db/log_format.h" diff --git a/db/filename_test.cc b/db/filename_test.cc index ad0bc73..f291d72 100644 --- a/db/filename_test.cc +++ b/db/filename_test.cc @@ -4,7 +4,7 @@ #include "db/filename.h" -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" #include "db/dbformat.h" #include "port/port.h" #include "util/logging.h" diff --git a/db/log_test.cc b/db/log_test.cc index 680f267..c765e93 100644 --- a/db/log_test.cc +++ b/db/log_test.cc @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" #include "db/log_reader.h" #include "db/log_writer.h" #include "leveldb/env.h" diff --git a/db/recovery_test.cc b/db/recovery_test.cc index 0657743..cf6574e 100644 --- a/db/recovery_test.cc +++ b/db/recovery_test.cc @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" #include "db/db_impl.h" #include "db/filename.h" #include "db/version_set.h" diff --git a/db/skiplist_test.cc b/db/skiplist_test.cc index 04b9fa7..7c5d09b 100644 --- a/db/skiplist_test.cc +++ b/db/skiplist_test.cc @@ -7,7 +7,7 @@ #include #include -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" #include "leveldb/env.h" #include "port/port.h" #include "port/thread_annotations.h" diff --git a/db/version_edit_test.cc b/db/version_edit_test.cc index 228fa3b..39ea8b7 100644 --- a/db/version_edit_test.cc +++ b/db/version_edit_test.cc @@ -4,7 +4,7 @@ #include "db/version_edit.h" -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" namespace leveldb { diff --git a/db/version_set_test.cc b/db/version_set_test.cc index 71b19a7..dee6b4c 100644 --- a/db/version_set_test.cc +++ b/db/version_set_test.cc @@ -4,7 +4,7 @@ #include "db/version_set.h" -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" #include "util/logging.h" #include "util/testutil.h" diff --git a/db/write_batch_test.cc b/db/write_batch_test.cc index b33993a..64df9b8 100644 --- a/db/write_batch_test.cc +++ b/db/write_batch_test.cc @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" #include "db/memtable.h" #include "db/write_batch_internal.h" #include "leveldb/db.h" diff --git a/helpers/memenv/memenv_test.cc b/helpers/memenv/memenv_test.cc index 72e22da..2001101 100644 --- a/helpers/memenv/memenv_test.cc +++ b/helpers/memenv/memenv_test.cc @@ -7,7 +7,7 @@ #include #include -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" #include "db/db_impl.h" #include "leveldb/db.h" #include "leveldb/env.h" diff --git a/issues/issue178_test.cc b/issues/issue178_test.cc index 4a52a1b..7fc43ea 100644 --- a/issues/issue178_test.cc +++ b/issues/issue178_test.cc @@ -7,7 +7,7 @@ #include #include -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" #include "leveldb/db.h" #include "leveldb/write_batch.h" #include "util/testutil.h" diff --git a/issues/issue200_test.cc b/issues/issue200_test.cc index ee08bc6..4eba23a 100644 --- a/issues/issue200_test.cc +++ b/issues/issue200_test.cc @@ -6,7 +6,7 @@ // to forward, the current key can be yielded unexpectedly if a new // mutation has been added just before the current key. -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" #include "leveldb/db.h" #include "util/testutil.h" diff --git a/issues/issue320_test.cc b/issues/issue320_test.cc index c289ab4..c08296a 100644 --- a/issues/issue320_test.cc +++ b/issues/issue320_test.cc @@ -9,7 +9,7 @@ #include #include -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" #include "leveldb/db.h" #include "leveldb/write_batch.h" #include "util/testutil.h" diff --git a/table/filter_block_test.cc b/table/filter_block_test.cc index 53be948..91a6be2 100644 --- a/table/filter_block_test.cc +++ b/table/filter_block_test.cc @@ -4,7 +4,7 @@ #include "table/filter_block.h" -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" #include "leveldb/filter_policy.h" #include "util/coding.h" #include "util/hash.h" diff --git a/table/table_test.cc b/table/table_test.cc index 09d1b5d..713b63e 100644 --- a/table/table_test.cc +++ b/table/table_test.cc @@ -7,7 +7,7 @@ #include #include -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" #include "db/dbformat.h" #include "db/memtable.h" #include "db/write_batch_internal.h" diff --git a/third_party/googletest b/third_party/googletest deleted file mode 160000 index c27aceb..0000000 --- a/third_party/googletest +++ /dev/null @@ -1 +0,0 @@ -Subproject commit c27acebba3b3c7d94209e0467b0a801db4af73ed diff --git a/util/arena_test.cc b/util/arena_test.cc index 3f8855b..90226fe 100644 --- a/util/arena_test.cc +++ b/util/arena_test.cc @@ -4,7 +4,7 @@ #include "util/arena.h" -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" #include "util/random.h" namespace leveldb { diff --git a/util/bloom_test.cc b/util/bloom_test.cc index bcbd7f6..bcf14dc 100644 --- a/util/bloom_test.cc +++ b/util/bloom_test.cc @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" #include "leveldb/filter_policy.h" #include "util/coding.h" #include "util/logging.h" diff --git a/util/cache_test.cc b/util/cache_test.cc index 8ce9463..b5d9873 100644 --- a/util/cache_test.cc +++ b/util/cache_test.cc @@ -6,7 +6,7 @@ #include -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" #include "util/coding.h" namespace leveldb { diff --git a/util/coding_test.cc b/util/coding_test.cc index db83367..aa6c748 100644 --- a/util/coding_test.cc +++ b/util/coding_test.cc @@ -6,7 +6,7 @@ #include -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" namespace leveldb { diff --git a/util/crc32c_test.cc b/util/crc32c_test.cc index 1e2aae7..647e561 100644 --- a/util/crc32c_test.cc +++ b/util/crc32c_test.cc @@ -4,7 +4,7 @@ #include "util/crc32c.h" -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" namespace leveldb { namespace crc32c { diff --git a/util/env_posix_test.cc b/util/env_posix_test.cc index 5ee2248..ed4ac96 100644 --- a/util/env_posix_test.cc +++ b/util/env_posix_test.cc @@ -13,7 +13,7 @@ #include #include -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" #include "leveldb/env.h" #include "port/port.h" #include "util/env_posix_test_helper.h" diff --git a/util/env_test.cc b/util/env_test.cc index 2a1f73b..b35ba05 100644 --- a/util/env_test.cc +++ b/util/env_test.cc @@ -6,7 +6,7 @@ #include -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" #include "port/port.h" #include "port/thread_annotations.h" #include "util/mutexlock.h" diff --git a/util/env_windows_test.cc b/util/env_windows_test.cc index b926107..c75ca7b 100644 --- a/util/env_windows_test.cc +++ b/util/env_windows_test.cc @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" #include "leveldb/env.h" #include "port/port.h" #include "util/env_windows_test_helper.h" diff --git a/util/hash_test.cc b/util/hash_test.cc index e970c1e..6d6771f 100644 --- a/util/hash_test.cc +++ b/util/hash_test.cc @@ -4,7 +4,7 @@ #include "util/hash.h" -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" namespace leveldb { diff --git a/util/logging_test.cc b/util/logging_test.cc index 92417aa..24e1fe9 100644 --- a/util/logging_test.cc +++ b/util/logging_test.cc @@ -7,7 +7,7 @@ #include #include -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" #include "leveldb/slice.h" namespace leveldb { diff --git a/util/no_destructor_test.cc b/util/no_destructor_test.cc index edafb08..68fdfee 100644 --- a/util/no_destructor_test.cc +++ b/util/no_destructor_test.cc @@ -8,7 +8,7 @@ #include #include -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" namespace leveldb { diff --git a/util/status_test.cc b/util/status_test.cc index b7e2444..914b386 100644 --- a/util/status_test.cc +++ b/util/status_test.cc @@ -6,7 +6,7 @@ #include -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gtest/gtest.h" #include "leveldb/slice.h" namespace leveldb { diff --git a/util/testutil.h b/util/testutil.h index 5765afb..cc67d96 100644 --- a/util/testutil.h +++ b/util/testutil.h @@ -5,8 +5,8 @@ #ifndef STORAGE_LEVELDB_UTIL_TESTUTIL_H_ #define STORAGE_LEVELDB_UTIL_TESTUTIL_H_ -#include "third_party/googletest/googlemock/include/gmock/gmock.h" -#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" #include "helpers/memenv/memenv.h" #include "leveldb/env.h" #include "leveldb/slice.h" From e36b831851fd2ff33ca0d9bee65cde7f395da10b Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Mon, 2 Dec 2019 12:18:34 -0800 Subject: [PATCH 12/68] Fixup for adding the third_party/googletest submodule. --- third_party/googletest | 1 + 1 file changed, 1 insertion(+) create mode 160000 third_party/googletest diff --git a/third_party/googletest b/third_party/googletest new file mode 160000 index 0000000..c27aceb --- /dev/null +++ b/third_party/googletest @@ -0,0 +1 @@ +Subproject commit c27acebba3b3c7d94209e0467b0a801db4af73ed From 58a89bbcb28d02d5704c5fff7aeb6e72f7ca2431 Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Mon, 2 Dec 2019 13:37:34 -0800 Subject: [PATCH 13/68] Add WITHOUT ROWID to SQLite benchmark. The SQLite-specific schema feature is documented at https://www.sqlite.org/withoutrowid.html and https://www.sqlite.org/rowidtable.html. By default, SQLite stores each table in a B-tree keyed by an integer, called the ROWID. Any index, including the PRIMARY KEY index, is a separate B-tree mapping index keys to ROWIDs. Tables without ROWIDs are stored in a B-tree keyed by the primary key. Additional indexes (the PRIMARY KEY index is implicitly built into the table) are stored as B-trees mapping index keys to row primary keys. This CL introduces a boolean --use-rowids flag to db_bench_sqlite. When the flag is false (default), the schema of the test table includes WITHOUT ROWID. The test table uses a primary key, so adding WITHOUT ROWID to the schema reduces the number of B-trees used by the benchmark from 2 to 1. This brings SQLite's disk usage closer to LevelDB. When WITHOUT ROWID is used, SQLite fares better (than today) on benchmarks with small (16-byte) keys, and worse on benchmarks with large (100kb) keys. Baseline results: fillseq : 21.310 micros/op; 5.2 MB/s fillseqsync : 146.377 micros/op; 0.8 MB/s (10000 ops) fillseqbatch : 2.065 micros/op; 53.6 MB/s fillrandom : 34.767 micros/op; 3.2 MB/s fillrandsync : 159.943 micros/op; 0.7 MB/s (10000 ops) fillrandbatch : 15.055 micros/op; 7.3 MB/s overwrite : 43.660 micros/op; 2.5 MB/s overwritebatch : 27.691 micros/op; 4.0 MB/s readrandom : 12.725 micros/op; readseq : 2.602 micros/op; 36.7 MB/s fillrand100K : 606.333 micros/op; 157.3 MB/s (1000 ops) fillseq100K : 657.457 micros/op; 145.1 MB/s (1000 ops) readseq : 46.523 micros/op; 2049.9 MB/s readrand100K : 54.943 micros/op; Results after this CL: fillseq : 16.231 micros/op; 6.8 MB/s fillseqsync : 147.460 micros/op; 0.8 MB/s (10000 ops) fillseqbatch : 2.294 micros/op; 48.2 MB/s fillrandom : 27.871 micros/op; 4.0 MB/s fillrandsync : 141.979 micros/op; 0.8 MB/s (10000 ops) fillrandbatch : 16.087 micros/op; 6.9 MB/s overwrite : 26.829 micros/op; 4.1 MB/s overwritebatch : 19.014 micros/op; 5.8 MB/s readrandom : 11.657 micros/op; readseq : 0.155 micros/op; 615.0 MB/s fillrand100K : 816.812 micros/op; 116.8 MB/s (1000 ops) fillseq100K : 754.689 micros/op; 126.4 MB/s (1000 ops) readseq : 47.112 micros/op; 2024.3 MB/s readrand100K : 287.679 micros/op; Results after this CL, with --use-rowids=1 fillseq : 20.655 micros/op; 5.4 MB/s fillseqsync : 146.408 micros/op; 0.8 MB/s (10000 ops) fillseqbatch : 2.045 micros/op; 54.1 MB/s fillrandom : 34.080 micros/op; 3.2 MB/s fillrandsync : 154.582 micros/op; 0.7 MB/s (10000 ops) fillrandbatch : 14.404 micros/op; 7.7 MB/s overwrite : 42.928 micros/op; 2.6 MB/s overwritebatch : 27.829 micros/op; 4.0 MB/s readrandom : 12.835 micros/op; readseq : 2.483 micros/op; 38.4 MB/s fillrand100K : 603.265 micros/op; 158.1 MB/s (1000 ops) fillseq100K : 662.473 micros/op; 144.0 MB/s (1000 ops) readseq : 45.478 micros/op; 2097.0 MB/s readrand100K : 54.439 micros/op; PiperOrigin-RevId: 283407101 --- benchmarks/db_bench_sqlite3.cc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/benchmarks/db_bench_sqlite3.cc b/benchmarks/db_bench_sqlite3.cc index f183f4f..d3fe339 100644 --- a/benchmarks/db_bench_sqlite3.cc +++ b/benchmarks/db_bench_sqlite3.cc @@ -69,6 +69,9 @@ static int FLAGS_num_pages = 4096; // benchmark will fail. static bool FLAGS_use_existing_db = false; +// If true, the SQLite table has ROWIDs. +static bool FLAGS_use_rowids = false; + // If true, we allow batch writes to occur static bool FLAGS_transaction = true; @@ -462,6 +465,7 @@ class Benchmark { std::string locking_stmt = "PRAGMA locking_mode = EXCLUSIVE"; std::string create_stmt = "CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))"; + if (!FLAGS_use_rowids) create_stmt += " WITHOUT ROWID"; std::string stmt_array[] = {locking_stmt, create_stmt}; int stmt_array_length = sizeof(stmt_array) / sizeof(std::string); for (int i = 0; i < stmt_array_length; i++) { @@ -678,6 +682,9 @@ int main(int argc, char** argv) { } else if (sscanf(argv[i], "--use_existing_db=%d%c", &n, &junk) == 1 && (n == 0 || n == 1)) { FLAGS_use_existing_db = n; + } else if (sscanf(argv[i], "--use_rowids=%d%c", &n, &junk) == 1 && + (n == 0 || n == 1)) { + FLAGS_use_rowids = n; } else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) { FLAGS_num = n; } else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) { From d152b23f3b787f67a0ac3a40498e13831f3778d7 Mon Sep 17 00:00:00 2001 From: leveldb Team Date: Tue, 3 Dec 2019 13:15:21 -0800 Subject: [PATCH 14/68] Defend against inclusion of windows.h in tests that invoke Env::DeleteFile. PiperOrigin-RevId: 283607548 --- benchmarks/db_bench.cc | 5 +++++ db/db_test.cc | 5 +++++ db/fault_injection_test.cc | 5 +++++ db/recovery_test.cc | 5 +++++ helpers/memenv/memenv_test.cc | 5 +++++ util/env_test.cc | 5 +++++ 6 files changed, 30 insertions(+) diff --git a/benchmarks/db_bench.cc b/benchmarks/db_bench.cc index 3696023..397e23f 100644 --- a/benchmarks/db_bench.cc +++ b/benchmarks/db_bench.cc @@ -18,6 +18,11 @@ #include "util/random.h" #include "util/testutil.h" +#if defined(_WIN32) && defined(DeleteFile) +// See rationale in env.h +#undef DeleteFile +#endif + // Comma-separated list of operations to run in the specified order // Actual benchmarks: // fillseq -- write N values in sequential key order in async mode diff --git a/db/db_test.cc b/db/db_test.cc index 1bd5afc..3f41c36 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -23,6 +23,11 @@ #include "util/mutexlock.h" #include "util/testutil.h" +#if defined(_WIN32) && defined(DeleteFile) +// See rationale in env.h +#undef DeleteFile +#endif + namespace leveldb { static std::string RandomString(Random* rnd, int len) { diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc index db8580c..b2d2adb 100644 --- a/db/fault_injection_test.cc +++ b/db/fault_injection_test.cc @@ -25,6 +25,11 @@ #include "util/mutexlock.h" #include "util/testutil.h" +#if defined(_WIN32) && defined(DeleteFile) +// See rationale in env.h +#undef DeleteFile +#endif + namespace leveldb { static const int kValueSize = 1000; diff --git a/db/recovery_test.cc b/db/recovery_test.cc index cf6574e..ea137e6 100644 --- a/db/recovery_test.cc +++ b/db/recovery_test.cc @@ -13,6 +13,11 @@ #include "util/logging.h" #include "util/testutil.h" +#if defined(_WIN32) && defined(DeleteFile) +// See rationale in env.h +#undef DeleteFile +#endif + namespace leveldb { class RecoveryTest : public testing::Test { diff --git a/helpers/memenv/memenv_test.cc b/helpers/memenv/memenv_test.cc index 2001101..619fe51 100644 --- a/helpers/memenv/memenv_test.cc +++ b/helpers/memenv/memenv_test.cc @@ -13,6 +13,11 @@ #include "leveldb/env.h" #include "util/testutil.h" +#if defined(_WIN32) && defined(DeleteFile) +// See rationale in env.h +#undef DeleteFile +#endif + namespace leveldb { class MemEnvTest : public testing::Test { diff --git a/util/env_test.cc b/util/env_test.cc index b35ba05..09e9d39 100644 --- a/util/env_test.cc +++ b/util/env_test.cc @@ -12,6 +12,11 @@ #include "util/mutexlock.h" #include "util/testutil.h" +#if defined(_WIN32) && defined(DeleteFile) +// See rationale in env.h +#undef DeleteFile +#endif + namespace leveldb { static const int kDelayMicros = 100000; From a0191e5563b7a6c24b39edcbdbff29e602e0acfc Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Wed, 8 Jan 2020 09:14:53 -0800 Subject: [PATCH 15/68] Add Env::Remove{File,Dir} which obsolete Env::Delete{File,Dir}. The "DeleteFile" method name causes pain for Windows developers, because #defines a DeleteFile macro to DeleteFileW or DeleteFileA. Current code uses workarounds, like #undefining DeleteFile everywhere an Env is declared, implemented, or used. This CL removes the need for workarounds by renaming Env::DeleteFile to Env::RemoveFile. For consistency, Env::DeleteDir is also renamed to Env::RemoveDir. A few internal methods are also renamed for consistency. Software that supports Windows is expected to migrate any Env implementations and usage to Remove{File,Dir}, and never use the name Env::Delete{File,Dir} in its code. The renaming is done in a backwards-compatible way, at the risk of making it slightly more difficult to build a new correct Env implementation. The backwards compatibility is achieved using the following hacks: 1) Env::Remove{File,Dir} methods are added, with a default implementation that calls into Env::Delete{File,Dir}. This makes old Env implementations compatible with code that calls into the updated API. 2) The Env::Delete{File,Dir} methods are no longer pure virtuals. Instead, they gain a default implementation that calls into Env::Remove{File,Dir}. This makes updated Env implementations compatible with code that calls into the old API. The cost of this approach is that it's possible to write an Env without overriding either Rename{File,Dir} or Delete{File,Dir}, without getting a compiler warning. However, attempting to run the test suite will immediately fail with an infinite call stack ending in {Remove,Delete}{File,Dir}, making developers aware of the problem. PiperOrigin-RevId: 288710907 --- benchmarks/db_bench.cc | 4 +-- benchmarks/db_bench_sqlite3.cc | 2 +- benchmarks/db_bench_tree_db.cc | 2 +- db/builder.cc | 2 +- db/db_impl.cc | 20 ++++++------ db/db_impl.h | 2 +- db/db_test.cc | 8 ++--- db/fault_injection_test.cc | 22 ++++++------- db/filename.cc | 2 +- db/recovery_test.cc | 12 +++---- db/repair.cc | 6 ++-- db/version_edit.cc | 2 +- db/version_edit.h | 2 +- db/version_edit_test.cc | 2 +- db/version_set.cc | 4 +-- doc/impl.md | 2 +- helpers/memenv/memenv.cc | 10 +++--- helpers/memenv/memenv_test.cc | 6 ++-- include/leveldb/env.h | 73 ++++++++++++++++++++++++++++++------------ util/env.cc | 16 ++++++++- util/env_posix.cc | 4 +-- util/env_posix_test.cc | 14 ++++---- util/env_test.cc | 8 ++--- util/env_windows.cc | 8 ++--- util/env_windows_test.cc | 2 +- 25 files changed, 138 insertions(+), 97 deletions(-) diff --git a/benchmarks/db_bench.cc b/benchmarks/db_bench.cc index 397e23f..03da9d8 100644 --- a/benchmarks/db_bench.cc +++ b/benchmarks/db_bench.cc @@ -414,7 +414,7 @@ class Benchmark { g_env->GetChildren(FLAGS_db, &files); for (size_t i = 0; i < files.size(); i++) { if (Slice(files[i]).starts_with("heap-")) { - g_env->DeleteFile(std::string(FLAGS_db) + "/" + files[i]); + g_env->RemoveFile(std::string(FLAGS_db) + "/" + files[i]); } } if (!FLAGS_use_existing_db) { @@ -912,7 +912,7 @@ class Benchmark { delete file; if (!ok) { fprintf(stderr, "heap profiling not supported\n"); - g_env->DeleteFile(fname); + g_env->RemoveFile(fname); } } }; diff --git a/benchmarks/db_bench_sqlite3.cc b/benchmarks/db_bench_sqlite3.cc index d3fe339..9c32a2d 100644 --- a/benchmarks/db_bench_sqlite3.cc +++ b/benchmarks/db_bench_sqlite3.cc @@ -328,7 +328,7 @@ class Benchmark { std::string file_name(test_dir); file_name += "/"; file_name += files[i]; - Env::Default()->DeleteFile(file_name.c_str()); + Env::Default()->RemoveFile(file_name.c_str()); } } } diff --git a/benchmarks/db_bench_tree_db.cc b/benchmarks/db_bench_tree_db.cc index b2f6646..43f0f65 100644 --- a/benchmarks/db_bench_tree_db.cc +++ b/benchmarks/db_bench_tree_db.cc @@ -301,7 +301,7 @@ class Benchmark { std::string file_name(test_dir); file_name += "/"; file_name += files[i]; - Env::Default()->DeleteFile(file_name.c_str()); + Env::Default()->RemoveFile(file_name.c_str()); } } } diff --git a/db/builder.cc b/db/builder.cc index 9520ee4..943e857 100644 --- a/db/builder.cc +++ b/db/builder.cc @@ -71,7 +71,7 @@ Status BuildTable(const std::string& dbname, Env* env, const Options& options, if (s.ok() && meta->file_size > 0) { // Keep it } else { - env->DeleteFile(fname); + env->RemoveFile(fname); } return s; } diff --git a/db/db_impl.cc b/db/db_impl.cc index 95e2bb4..ba0a46d 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -206,7 +206,7 @@ Status DBImpl::NewDB() { // Make "CURRENT" file that points to the new manifest file. s = SetCurrentFile(env_, dbname_, 1); } else { - env_->DeleteFile(manifest); + env_->RemoveFile(manifest); } return s; } @@ -220,7 +220,7 @@ void DBImpl::MaybeIgnoreError(Status* s) const { } } -void DBImpl::DeleteObsoleteFiles() { +void DBImpl::RemoveObsoleteFiles() { mutex_.AssertHeld(); if (!bg_error_.ok()) { @@ -282,7 +282,7 @@ void DBImpl::DeleteObsoleteFiles() { // are therefore safe to delete while allowing other threads to proceed. mutex_.Unlock(); for (const std::string& filename : files_to_delete) { - env_->DeleteFile(dbname_ + "/" + filename); + env_->RemoveFile(dbname_ + "/" + filename); } mutex_.Lock(); } @@ -569,7 +569,7 @@ void DBImpl::CompactMemTable() { imm_->Unref(); imm_ = nullptr; has_imm_.store(false, std::memory_order_release); - DeleteObsoleteFiles(); + RemoveObsoleteFiles(); } else { RecordBackgroundError(s); } @@ -729,7 +729,7 @@ void DBImpl::BackgroundCompaction() { // Move file to next level assert(c->num_input_files(0) == 1); FileMetaData* f = c->input(0, 0); - c->edit()->DeleteFile(c->level(), f->number); + c->edit()->RemoveFile(c->level(), f->number); c->edit()->AddFile(c->level() + 1, f->number, f->file_size, f->smallest, f->largest); status = versions_->LogAndApply(c->edit(), &mutex_); @@ -749,7 +749,7 @@ void DBImpl::BackgroundCompaction() { } CleanupCompaction(compact); c->ReleaseInputs(); - DeleteObsoleteFiles(); + RemoveObsoleteFiles(); } delete c; @@ -1506,7 +1506,7 @@ Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) { s = impl->versions_->LogAndApply(&edit, &impl->mutex_); } if (s.ok()) { - impl->DeleteObsoleteFiles(); + impl->RemoveObsoleteFiles(); impl->MaybeScheduleCompaction(); } impl->mutex_.Unlock(); @@ -1539,15 +1539,15 @@ Status DestroyDB(const std::string& dbname, const Options& options) { for (size_t i = 0; i < filenames.size(); i++) { if (ParseFileName(filenames[i], &number, &type) && type != kDBLockFile) { // Lock file will be deleted at end - Status del = env->DeleteFile(dbname + "/" + filenames[i]); + Status del = env->RemoveFile(dbname + "/" + filenames[i]); if (result.ok() && !del.ok()) { result = del; } } } env->UnlockFile(lock); // Ignore error since state is already gone - env->DeleteFile(lockname); - env->DeleteDir(dbname); // Ignore error in case dir contains other files + env->RemoveFile(lockname); + env->RemoveDir(dbname); // Ignore error in case dir contains other files } return result; } diff --git a/db/db_impl.h b/db/db_impl.h index 685735c..c7b0172 100644 --- a/db/db_impl.h +++ b/db/db_impl.h @@ -116,7 +116,7 @@ class DBImpl : public DB { void MaybeIgnoreError(Status* s) const; // Delete any unneeded files and stale in-memory entries. - void DeleteObsoleteFiles() EXCLUSIVE_LOCKS_REQUIRED(mutex_); + void RemoveObsoleteFiles() EXCLUSIVE_LOCKS_REQUIRED(mutex_); // Compact the in-memory write buffer to disk. Switches to a new // log-file/memtable and writes a new descriptor iff successful. diff --git a/db/db_test.cc b/db/db_test.cc index 3f41c36..2ee6761 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -509,7 +509,7 @@ class DBTest : public testing::Test { FileType type; for (size_t i = 0; i < filenames.size(); i++) { if (ParseFileName(filenames[i], &number, &type) && type == kTableFile) { - EXPECT_LEVELDB_OK(env_->DeleteFile(TableFileName(dbname_, number))); + EXPECT_LEVELDB_OK(env_->RemoveFile(TableFileName(dbname_, number))); return true; } } @@ -1666,7 +1666,7 @@ TEST_F(DBTest, DBOpen_Options) { TEST_F(DBTest, DestroyEmptyDir) { std::string dbname = testing::TempDir() + "db_empty_dir"; TestEnv env(Env::Default()); - env.DeleteDir(dbname); + env.RemoveDir(dbname); ASSERT_TRUE(!env.FileExists(dbname)); Options opts; @@ -1693,7 +1693,7 @@ TEST_F(DBTest, DestroyEmptyDir) { TEST_F(DBTest, DestroyOpenDB) { std::string dbname = testing::TempDir() + "open_db_dir"; - env_->DeleteDir(dbname); + env_->RemoveDir(dbname); ASSERT_TRUE(!env_->FileExists(dbname)); Options opts; @@ -2279,7 +2279,7 @@ void BM_LogAndApply(int iters, int num_base_files) { for (int i = 0; i < iters; i++) { VersionEdit vedit; - vedit.DeleteFile(2, fnum); + vedit.RemoveFile(2, fnum); InternalKey start(MakeKey(2 * fnum), 1, kTypeValue); InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion); vedit.AddFile(2, fnum++, 1 /* file size */, start, limit); diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc index b2d2adb..60e4631 100644 --- a/db/fault_injection_test.cc +++ b/db/fault_injection_test.cc @@ -77,7 +77,7 @@ Status Truncate(const std::string& filename, uint64_t length) { if (s.ok()) { s = env->RenameFile(tmp_name, filename); } else { - env->DeleteFile(tmp_name); + env->RemoveFile(tmp_name); } } } @@ -138,12 +138,12 @@ class FaultInjectionTestEnv : public EnvWrapper { WritableFile** result) override; Status NewAppendableFile(const std::string& fname, WritableFile** result) override; - Status DeleteFile(const std::string& f) override; + Status RemoveFile(const std::string& f) override; Status RenameFile(const std::string& s, const std::string& t) override; void WritableFileClosed(const FileState& state); Status DropUnsyncedFileData(); - Status DeleteFilesCreatedAfterLastDirSync(); + Status RemoveFilesCreatedAfterLastDirSync(); void DirWasSynced(); bool IsFileCreatedSinceLastDirSync(const std::string& filename); void ResetState(); @@ -303,8 +303,8 @@ void FaultInjectionTestEnv::UntrackFile(const std::string& f) { new_files_since_last_dir_sync_.erase(f); } -Status FaultInjectionTestEnv::DeleteFile(const std::string& f) { - Status s = EnvWrapper::DeleteFile(f); +Status FaultInjectionTestEnv::RemoveFile(const std::string& f) { + Status s = EnvWrapper::RemoveFile(f); EXPECT_LEVELDB_OK(s); if (s.ok()) { UntrackFile(f); @@ -340,17 +340,17 @@ void FaultInjectionTestEnv::ResetState() { SetFilesystemActive(true); } -Status FaultInjectionTestEnv::DeleteFilesCreatedAfterLastDirSync() { - // Because DeleteFile access this container make a copy to avoid deadlock +Status FaultInjectionTestEnv::RemoveFilesCreatedAfterLastDirSync() { + // Because RemoveFile access this container make a copy to avoid deadlock mutex_.Lock(); std::set new_files(new_files_since_last_dir_sync_.begin(), new_files_since_last_dir_sync_.end()); mutex_.Unlock(); Status status; for (const auto& new_file : new_files) { - Status delete_status = DeleteFile(new_file); - if (!delete_status.ok() && status.ok()) { - status = std::move(delete_status); + Status remove_status = RemoveFile(new_file); + if (!remove_status.ok() && status.ok()) { + status = std::move(remove_status); } } return status; @@ -482,7 +482,7 @@ class FaultInjectionTest : public testing::Test { ASSERT_LEVELDB_OK(env_->DropUnsyncedFileData()); break; case RESET_DELETE_UNSYNCED_FILES: - ASSERT_LEVELDB_OK(env_->DeleteFilesCreatedAfterLastDirSync()); + ASSERT_LEVELDB_OK(env_->RemoveFilesCreatedAfterLastDirSync()); break; default: assert(false); diff --git a/db/filename.cc b/db/filename.cc index 85de45c..9b451fc 100644 --- a/db/filename.cc +++ b/db/filename.cc @@ -133,7 +133,7 @@ Status SetCurrentFile(Env* env, const std::string& dbname, s = env->RenameFile(tmp, CurrentFileName(dbname)); } if (!s.ok()) { - env->DeleteFile(tmp); + env->RemoveFile(tmp); } return s; } diff --git a/db/recovery_test.cc b/db/recovery_test.cc index ea137e6..04b39ae 100644 --- a/db/recovery_test.cc +++ b/db/recovery_test.cc @@ -100,19 +100,19 @@ class RecoveryTest : public testing::Test { std::string LogName(uint64_t number) { return LogFileName(dbname_, number); } - size_t DeleteLogFiles() { + size_t RemoveLogFiles() { // Linux allows unlinking open files, but Windows does not. // Closing the db allows for file deletion. Close(); std::vector logs = GetFiles(kLogFile); for (size_t i = 0; i < logs.size(); i++) { - EXPECT_LEVELDB_OK(env_->DeleteFile(LogName(logs[i]))) << LogName(logs[i]); + EXPECT_LEVELDB_OK(env_->RemoveFile(LogName(logs[i]))) << LogName(logs[i]); } return logs.size(); } - void DeleteManifestFile() { - ASSERT_LEVELDB_OK(env_->DeleteFile(ManifestFileName())); + void RemoveManifestFile() { + ASSERT_LEVELDB_OK(env_->RemoveFile(ManifestFileName())); } uint64_t FirstLogFile() { return GetFiles(kLogFile)[0]; } @@ -212,7 +212,7 @@ TEST_F(RecoveryTest, LargeManifestCompacted) { TEST_F(RecoveryTest, NoLogFiles) { ASSERT_LEVELDB_OK(Put("foo", "bar")); - ASSERT_EQ(1, DeleteLogFiles()); + ASSERT_EQ(1, RemoveLogFiles()); Open(); ASSERT_EQ("NOT_FOUND", Get("foo")); Open(); @@ -327,7 +327,7 @@ TEST_F(RecoveryTest, MultipleLogFiles) { TEST_F(RecoveryTest, ManifestMissing) { ASSERT_LEVELDB_OK(Put("foo", "bar")); Close(); - DeleteManifestFile(); + RemoveManifestFile(); Status status = OpenWithStatus(); ASSERT_TRUE(status.IsCorruption()); diff --git a/db/repair.cc b/db/repair.cc index d9d12ba..d2a495e 100644 --- a/db/repair.cc +++ b/db/repair.cc @@ -341,7 +341,7 @@ class Repairer { } } if (!s.ok()) { - env_->DeleteFile(copy); + env_->RemoveFile(copy); } } @@ -386,7 +386,7 @@ class Repairer { file = nullptr; if (!status.ok()) { - env_->DeleteFile(tmp); + env_->RemoveFile(tmp); } else { // Discard older manifests for (size_t i = 0; i < manifests_.size(); i++) { @@ -398,7 +398,7 @@ class Repairer { if (status.ok()) { status = SetCurrentFile(env_, dbname_, 1); } else { - env_->DeleteFile(tmp); + env_->RemoveFile(tmp); } } return status; diff --git a/db/version_edit.cc b/db/version_edit.cc index cd770ef..3e9012f 100644 --- a/db/version_edit.cc +++ b/db/version_edit.cc @@ -232,7 +232,7 @@ std::string VersionEdit::DebugString() const { r.append(compact_pointers_[i].second.DebugString()); } for (const auto& deleted_files_kvp : deleted_files_) { - r.append("\n DeleteFile: "); + r.append("\n RemoveFile: "); AppendNumberTo(&r, deleted_files_kvp.first); r.append(" "); AppendNumberTo(&r, deleted_files_kvp.second); diff --git a/db/version_edit.h b/db/version_edit.h index 0de4531..137b4b1 100644 --- a/db/version_edit.h +++ b/db/version_edit.h @@ -71,7 +71,7 @@ class VersionEdit { } // Delete the specified "file" from the specified "level". - void DeleteFile(int level, uint64_t file) { + void RemoveFile(int level, uint64_t file) { deleted_files_.insert(std::make_pair(level, file)); } diff --git a/db/version_edit_test.cc b/db/version_edit_test.cc index 39ea8b7..acafab0 100644 --- a/db/version_edit_test.cc +++ b/db/version_edit_test.cc @@ -27,7 +27,7 @@ TEST(VersionEditTest, EncodeDecode) { edit.AddFile(3, kBig + 300 + i, kBig + 400 + i, InternalKey("foo", kBig + 500 + i, kTypeValue), InternalKey("zoo", kBig + 600 + i, kTypeDeletion)); - edit.DeleteFile(4, kBig + 700 + i); + edit.RemoveFile(4, kBig + 700 + i); edit.SetCompactPointer(i, InternalKey("x", kBig + 900 + i, kTypeValue)); } diff --git a/db/version_set.cc b/db/version_set.cc index cd07346..2d5e51a 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -853,7 +853,7 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) { delete descriptor_file_; descriptor_log_ = nullptr; descriptor_file_ = nullptr; - env_->DeleteFile(new_manifest_file); + env_->RemoveFile(new_manifest_file); } } @@ -1502,7 +1502,7 @@ bool Compaction::IsTrivialMove() const { void Compaction::AddInputDeletions(VersionEdit* edit) { for (int which = 0; which < 2; which++) { for (size_t i = 0; i < inputs_[which].size(); i++) { - edit->DeleteFile(level_ + which, inputs_[which][i]->number); + edit->RemoveFile(level_ + which, inputs_[which][i]->number); } } } diff --git a/doc/impl.md b/doc/impl.md index cacabb9..45187a2 100644 --- a/doc/impl.md +++ b/doc/impl.md @@ -166,7 +166,7 @@ So maybe even the sharding is not necessary on modern filesystems? ## Garbage collection of files -`DeleteObsoleteFiles()` is called at the end of every compaction and at the end +`RemoveObsoleteFiles()` is called at the end of every compaction and at the end of recovery. It finds the names of all files in the database. It deletes all log files that are not the current log file. It deletes all table files that are not referenced from some level and are not the output of an active compaction. diff --git a/helpers/memenv/memenv.cc b/helpers/memenv/memenv.cc index 31d2bc0..383c78b 100644 --- a/helpers/memenv/memenv.cc +++ b/helpers/memenv/memenv.cc @@ -309,7 +309,7 @@ class InMemoryEnv : public EnvWrapper { return Status::OK(); } - void DeleteFileInternal(const std::string& fname) + void RemoveFileInternal(const std::string& fname) EXCLUSIVE_LOCKS_REQUIRED(mutex_) { if (file_map_.find(fname) == file_map_.end()) { return; @@ -319,19 +319,19 @@ class InMemoryEnv : public EnvWrapper { file_map_.erase(fname); } - Status DeleteFile(const std::string& fname) override { + Status RemoveFile(const std::string& fname) override { MutexLock lock(&mutex_); if (file_map_.find(fname) == file_map_.end()) { return Status::IOError(fname, "File not found"); } - DeleteFileInternal(fname); + RemoveFileInternal(fname); return Status::OK(); } Status CreateDir(const std::string& dirname) override { return Status::OK(); } - Status DeleteDir(const std::string& dirname) override { return Status::OK(); } + Status RemoveDir(const std::string& dirname) override { return Status::OK(); } Status GetFileSize(const std::string& fname, uint64_t* file_size) override { MutexLock lock(&mutex_); @@ -350,7 +350,7 @@ class InMemoryEnv : public EnvWrapper { return Status::IOError(src, "File not found"); } - DeleteFileInternal(target); + RemoveFileInternal(target); file_map_[target] = file_map_[src]; file_map_.erase(src); return Status::OK(); diff --git a/helpers/memenv/memenv_test.cc b/helpers/memenv/memenv_test.cc index 619fe51..93186ab 100644 --- a/helpers/memenv/memenv_test.cc +++ b/helpers/memenv/memenv_test.cc @@ -88,12 +88,12 @@ TEST_F(MemEnvTest, Basics) { ASSERT_TRUE(!rand_file); // Check that deleting works. - ASSERT_TRUE(!env_->DeleteFile("/dir/non_existent").ok()); - ASSERT_LEVELDB_OK(env_->DeleteFile("/dir/g")); + ASSERT_TRUE(!env_->RemoveFile("/dir/non_existent").ok()); + ASSERT_LEVELDB_OK(env_->RemoveFile("/dir/g")); ASSERT_TRUE(!env_->FileExists("/dir/g")); ASSERT_LEVELDB_OK(env_->GetChildren("/dir", &children)); ASSERT_EQ(0, children.size()); - ASSERT_LEVELDB_OK(env_->DeleteDir("/dir")); + ASSERT_LEVELDB_OK(env_->RemoveDir("/dir")); } TEST_F(MemEnvTest, ReadWrite) { diff --git a/include/leveldb/env.h b/include/leveldb/env.h index 112fe96..6501fa4 100644 --- a/include/leveldb/env.h +++ b/include/leveldb/env.h @@ -22,21 +22,18 @@ #include "leveldb/export.h" #include "leveldb/status.h" +// This workaround can be removed when leveldb::Env::DeleteFile is removed. #if defined(_WIN32) -// The leveldb::Env class below contains a DeleteFile method. -// At the same time, , a fairly popular header -// file for Windows applications, defines a DeleteFile macro. +// On Windows, the method name DeleteFile (below) introduces the risk of +// triggering undefined behavior by exposing the compiler to different +// declarations of the Env class in different translation units. // -// Without any intervention on our part, the result of this -// unfortunate coincidence is that the name of the -// leveldb::Env::DeleteFile method seen by the compiler depends on -// whether was included before or after the LevelDB -// headers. +// This is because , a fairly popular header file for Windows +// applications, defines a DeleteFile macro. So, files that include the Windows +// header before this header will contain an altered Env declaration. // -// To avoid headaches, we undefined DeleteFile (if defined) and -// redefine it at the bottom of this file. This way -// can be included before this file (or not at all) and the -// exported method will always be leveldb::Env::DeleteFile. +// This workaround ensures that the compiler sees the same Env declaration, +// independently of whether was included. #if defined(DeleteFile) #undef DeleteFile #define LEVELDB_DELETEFILE_UNDEFINED @@ -54,7 +51,7 @@ class WritableFile; class LEVELDB_EXPORT Env { public: - Env() = default; + Env(); Env(const Env&) = delete; Env& operator=(const Env&) = delete; @@ -122,15 +119,48 @@ class LEVELDB_EXPORT Env { // Original contents of *results are dropped. virtual Status GetChildren(const std::string& dir, std::vector* result) = 0; - // Delete the named file. - virtual Status DeleteFile(const std::string& fname) = 0; + // + // The default implementation calls DeleteFile, to support legacy Env + // implementations. Updated Env implementations must override RemoveFile and + // ignore the existence of DeleteFile. Updated code calling into the Env API + // must call RemoveFile instead of DeleteFile. + // + // A future release will remove DeleteDir and the default implementation of + // RemoveDir. + virtual Status RemoveFile(const std::string& fname); + + // DEPRECATED: Modern Env implementations should override RemoveFile instead. + // + // The default implementation calls RemoveFile, to support legacy Env user + // code that calls this method on modern Env implementations. Modern Env user + // code should call RemoveFile. + // + // A future release will remove this method. + virtual Status DeleteFile(const std::string& fname); // Create the specified directory. virtual Status CreateDir(const std::string& dirname) = 0; // Delete the specified directory. - virtual Status DeleteDir(const std::string& dirname) = 0; + // + // The default implementation calls DeleteDir, to support legacy Env + // implementations. Updated Env implementations must override RemoveDir and + // ignore the existence of DeleteDir. Modern code calling into the Env API + // must call RemoveDir instead of DeleteDir. + // + // A future release will remove DeleteDir and the default implementation of + // RemoveDir. + virtual Status RemoveDir(const std::string& dirname); + + // DEPRECATED: Modern Env implementations should override RemoveDir instead. + // + // The default implementation calls RemoveDir, to support legacy Env user + // code that calls this method on modern Env implementations. Modern Env user + // code should call RemoveDir. + // + // A future release will remove this method. + virtual Status DeleteDir(const std::string& dirname); // Store the size of fname in *file_size. virtual Status GetFileSize(const std::string& fname, uint64_t* file_size) = 0; @@ -333,14 +363,14 @@ class LEVELDB_EXPORT EnvWrapper : public Env { std::vector* r) override { return target_->GetChildren(dir, r); } - Status DeleteFile(const std::string& f) override { - return target_->DeleteFile(f); + Status RemoveFile(const std::string& f) override { + return target_->RemoveFile(f); } Status CreateDir(const std::string& d) override { return target_->CreateDir(d); } - Status DeleteDir(const std::string& d) override { - return target_->DeleteDir(d); + Status RemoveDir(const std::string& d) override { + return target_->RemoveDir(d); } Status GetFileSize(const std::string& f, uint64_t* s) override { return target_->GetFileSize(f, s); @@ -375,7 +405,8 @@ class LEVELDB_EXPORT EnvWrapper : public Env { } // namespace leveldb -// Redefine DeleteFile if necessary. +// This workaround can be removed when leveldb::Env::DeleteFile is removed. +// Redefine DeleteFile if it was undefined earlier. #if defined(_WIN32) && defined(LEVELDB_DELETEFILE_UNDEFINED) #if defined(UNICODE) #define DeleteFile DeleteFileW diff --git a/util/env.cc b/util/env.cc index d2f0aef..40e6071 100644 --- a/util/env.cc +++ b/util/env.cc @@ -4,14 +4,28 @@ #include "leveldb/env.h" +// This workaround can be removed when leveldb::Env::DeleteFile is removed. +// See env.h for justification. +#if defined(_WIN32) && defined(LEVELDB_DELETEFILE_UNDEFINED) +#undef DeleteFile +#endif + namespace leveldb { +Env::Env() = default; + Env::~Env() = default; Status Env::NewAppendableFile(const std::string& fname, WritableFile** result) { return Status::NotSupported("NewAppendableFile", fname); } +Status Env::RemoveDir(const std::string& dirname) { return DeleteDir(dirname); } +Status Env::DeleteDir(const std::string& dirname) { return RemoveDir(dirname); } + +Status Env::RemoveFile(const std::string& fname) { return DeleteFile(fname); } +Status Env::DeleteFile(const std::string& fname) { return RemoveFile(fname); } + SequentialFile::~SequentialFile() = default; RandomAccessFile::~RandomAccessFile() = default; @@ -47,7 +61,7 @@ static Status DoWriteStringToFile(Env* env, const Slice& data, } delete file; // Will auto-close if we did not close above if (!s.ok()) { - env->DeleteFile(fname); + env->RemoveFile(fname); } return s; } diff --git a/util/env_posix.cc b/util/env_posix.cc index 00ca9ae..d84cd1e 100644 --- a/util/env_posix.cc +++ b/util/env_posix.cc @@ -587,7 +587,7 @@ class PosixEnv : public Env { return Status::OK(); } - Status DeleteFile(const std::string& filename) override { + Status RemoveFile(const std::string& filename) override { if (::unlink(filename.c_str()) != 0) { return PosixError(filename, errno); } @@ -601,7 +601,7 @@ class PosixEnv : public Env { return Status::OK(); } - Status DeleteDir(const std::string& dirname) override { + Status RemoveDir(const std::string& dirname) override { if (::rmdir(dirname.c_str()) != 0) { return PosixError(dirname, errno); } diff --git a/util/env_posix_test.cc b/util/env_posix_test.cc index ed4ac96..36f226f 100644 --- a/util/env_posix_test.cc +++ b/util/env_posix_test.cc @@ -209,7 +209,7 @@ TEST_F(EnvPosixTest, TestOpenOnRead) { for (int i = 0; i < kNumFiles; i++) { delete files[i]; } - ASSERT_LEVELDB_OK(env_->DeleteFile(test_file)); + ASSERT_LEVELDB_OK(env_->RemoveFile(test_file)); } #if HAVE_O_CLOEXEC @@ -228,7 +228,7 @@ TEST_F(EnvPosixTest, TestCloseOnExecSequentialFile) { CheckCloseOnExecDoesNotLeakFDs(open_fds); delete file; - ASSERT_LEVELDB_OK(env_->DeleteFile(file_path)); + ASSERT_LEVELDB_OK(env_->RemoveFile(file_path)); } TEST_F(EnvPosixTest, TestCloseOnExecRandomAccessFile) { @@ -256,7 +256,7 @@ TEST_F(EnvPosixTest, TestCloseOnExecRandomAccessFile) { for (int i = 0; i < kReadOnlyFileLimit; i++) { delete mmapped_files[i]; } - ASSERT_LEVELDB_OK(env_->DeleteFile(file_path)); + ASSERT_LEVELDB_OK(env_->RemoveFile(file_path)); } TEST_F(EnvPosixTest, TestCloseOnExecWritableFile) { @@ -273,7 +273,7 @@ TEST_F(EnvPosixTest, TestCloseOnExecWritableFile) { CheckCloseOnExecDoesNotLeakFDs(open_fds); delete file; - ASSERT_LEVELDB_OK(env_->DeleteFile(file_path)); + ASSERT_LEVELDB_OK(env_->RemoveFile(file_path)); } TEST_F(EnvPosixTest, TestCloseOnExecAppendableFile) { @@ -290,7 +290,7 @@ TEST_F(EnvPosixTest, TestCloseOnExecAppendableFile) { CheckCloseOnExecDoesNotLeakFDs(open_fds); delete file; - ASSERT_LEVELDB_OK(env_->DeleteFile(file_path)); + ASSERT_LEVELDB_OK(env_->RemoveFile(file_path)); } TEST_F(EnvPosixTest, TestCloseOnExecLockFile) { @@ -307,7 +307,7 @@ TEST_F(EnvPosixTest, TestCloseOnExecLockFile) { CheckCloseOnExecDoesNotLeakFDs(open_fds); ASSERT_LEVELDB_OK(env_->UnlockFile(lock)); - ASSERT_LEVELDB_OK(env_->DeleteFile(file_path)); + ASSERT_LEVELDB_OK(env_->RemoveFile(file_path)); } TEST_F(EnvPosixTest, TestCloseOnExecLogger) { @@ -324,7 +324,7 @@ TEST_F(EnvPosixTest, TestCloseOnExecLogger) { CheckCloseOnExecDoesNotLeakFDs(open_fds); delete file; - ASSERT_LEVELDB_OK(env_->DeleteFile(file_path)); + ASSERT_LEVELDB_OK(env_->RemoveFile(file_path)); } #endif // HAVE_O_CLOEXEC diff --git a/util/env_test.cc b/util/env_test.cc index 09e9d39..223090e 100644 --- a/util/env_test.cc +++ b/util/env_test.cc @@ -193,7 +193,7 @@ TEST_F(EnvTest, ReopenWritableFile) { std::string test_dir; ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir)); std::string test_file_name = test_dir + "/reopen_writable_file.txt"; - env_->DeleteFile(test_file_name); + env_->RemoveFile(test_file_name); WritableFile* writable_file; ASSERT_LEVELDB_OK(env_->NewWritableFile(test_file_name, &writable_file)); @@ -210,14 +210,14 @@ TEST_F(EnvTest, ReopenWritableFile) { ASSERT_LEVELDB_OK(ReadFileToString(env_, test_file_name, &data)); ASSERT_EQ(std::string("42"), data); - env_->DeleteFile(test_file_name); + env_->RemoveFile(test_file_name); } TEST_F(EnvTest, ReopenAppendableFile) { std::string test_dir; ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir)); std::string test_file_name = test_dir + "/reopen_appendable_file.txt"; - env_->DeleteFile(test_file_name); + env_->RemoveFile(test_file_name); WritableFile* appendable_file; ASSERT_LEVELDB_OK(env_->NewAppendableFile(test_file_name, &appendable_file)); @@ -234,7 +234,7 @@ TEST_F(EnvTest, ReopenAppendableFile) { ASSERT_LEVELDB_OK(ReadFileToString(env_, test_file_name, &data)); ASSERT_EQ(std::string("hello world!42"), data); - env_->DeleteFile(test_file_name); + env_->RemoveFile(test_file_name); } } // namespace leveldb diff --git a/util/env_windows.cc b/util/env_windows.cc index 2dd7794..449f564 100644 --- a/util/env_windows.cc +++ b/util/env_windows.cc @@ -33,10 +33,6 @@ #include "util/mutexlock.h" #include "util/windows_logger.h" -#if defined(DeleteFile) -#undef DeleteFile -#endif // defined(DeleteFile) - namespace leveldb { namespace { @@ -505,7 +501,7 @@ class WindowsEnv : public Env { return Status::OK(); } - Status DeleteFile(const std::string& filename) override { + Status RemoveFile(const std::string& filename) override { if (!::DeleteFileA(filename.c_str())) { return WindowsError(filename, ::GetLastError()); } @@ -519,7 +515,7 @@ class WindowsEnv : public Env { return Status::OK(); } - Status DeleteDir(const std::string& dirname) override { + Status RemoveDir(const std::string& dirname) override { if (!::RemoveDirectoryA(dirname.c_str())) { return WindowsError(dirname, ::GetLastError()); } diff --git a/util/env_windows_test.cc b/util/env_windows_test.cc index c75ca7b..15c0274 100644 --- a/util/env_windows_test.cc +++ b/util/env_windows_test.cc @@ -52,7 +52,7 @@ TEST_F(EnvWindowsTest, TestOpenOnRead) { for (int i = 0; i < kNumFiles; i++) { delete files[i]; } - ASSERT_LEVELDB_OK(env_->DeleteFile(test_file)); + ASSERT_LEVELDB_OK(env_->RemoveFile(test_file)); } } // namespace leveldb From 5903e7a1125cacaa1d44367b5b84fe9208e42884 Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Fri, 10 Jan 2020 10:45:16 -0800 Subject: [PATCH 16/68] Remove Windows workarounds in some tests. leveldb::Env::DeleteFile was replaced with leveldb::Env::RemoveFile in all tests. This allows us to remove workarounds for windows.h #defining DeleteFile. PiperOrigin-RevId: 289121105 --- benchmarks/db_bench.cc | 5 ----- db/db_test.cc | 5 ----- db/fault_injection_test.cc | 5 ----- db/recovery_test.cc | 5 ----- helpers/memenv/memenv_test.cc | 5 ----- util/env_test.cc | 5 ----- 6 files changed, 30 deletions(-) diff --git a/benchmarks/db_bench.cc b/benchmarks/db_bench.cc index 03da9d8..82ed892 100644 --- a/benchmarks/db_bench.cc +++ b/benchmarks/db_bench.cc @@ -18,11 +18,6 @@ #include "util/random.h" #include "util/testutil.h" -#if defined(_WIN32) && defined(DeleteFile) -// See rationale in env.h -#undef DeleteFile -#endif - // Comma-separated list of operations to run in the specified order // Actual benchmarks: // fillseq -- write N values in sequential key order in async mode diff --git a/db/db_test.cc b/db/db_test.cc index 2ee6761..8cd90f3 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -23,11 +23,6 @@ #include "util/mutexlock.h" #include "util/testutil.h" -#if defined(_WIN32) && defined(DeleteFile) -// See rationale in env.h -#undef DeleteFile -#endif - namespace leveldb { static std::string RandomString(Random* rnd, int len) { diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc index 60e4631..8f2b647 100644 --- a/db/fault_injection_test.cc +++ b/db/fault_injection_test.cc @@ -25,11 +25,6 @@ #include "util/mutexlock.h" #include "util/testutil.h" -#if defined(_WIN32) && defined(DeleteFile) -// See rationale in env.h -#undef DeleteFile -#endif - namespace leveldb { static const int kValueSize = 1000; diff --git a/db/recovery_test.cc b/db/recovery_test.cc index 04b39ae..e5cc916 100644 --- a/db/recovery_test.cc +++ b/db/recovery_test.cc @@ -13,11 +13,6 @@ #include "util/logging.h" #include "util/testutil.h" -#if defined(_WIN32) && defined(DeleteFile) -// See rationale in env.h -#undef DeleteFile -#endif - namespace leveldb { class RecoveryTest : public testing::Test { diff --git a/helpers/memenv/memenv_test.cc b/helpers/memenv/memenv_test.cc index 93186ab..3f03cb6 100644 --- a/helpers/memenv/memenv_test.cc +++ b/helpers/memenv/memenv_test.cc @@ -13,11 +13,6 @@ #include "leveldb/env.h" #include "util/testutil.h" -#if defined(_WIN32) && defined(DeleteFile) -// See rationale in env.h -#undef DeleteFile -#endif - namespace leveldb { class MemEnvTest : public testing::Test { diff --git a/util/env_test.cc b/util/env_test.cc index 223090e..491ef43 100644 --- a/util/env_test.cc +++ b/util/env_test.cc @@ -12,11 +12,6 @@ #include "util/mutexlock.h" #include "util/testutil.h" -#if defined(_WIN32) && defined(DeleteFile) -// See rationale in env.h -#undef DeleteFile -#endif - namespace leveldb { static const int kDelayMicros = 100000; From ba369ddbaffcfe635dd620d1aa68473b56267065 Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Mon, 13 Apr 2020 15:21:41 +0000 Subject: [PATCH 17/68] Use LLVM 10 on Travis CI. PiperOrigin-RevId: 306236199 --- .travis.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index 42cbe64..766fdc9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,11 +20,11 @@ env: addons: apt: sources: - - sourceline: 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main' + - sourceline: 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-10 main' key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key' - sourceline: 'ppa:ubuntu-toolchain-r/test' packages: - - clang-9 + - clang-10 - cmake - gcc-9 - g++-9 @@ -40,7 +40,7 @@ addons: - gcc@9 - gperftools - kyoto-cabinet - - llvm@9 + - llvm@10 - ninja - snappy - sqlite3 @@ -60,7 +60,7 @@ install: # below don't work on macOS. Fortunately, the path change above makes the # default values (clang and clang++) resolve to the correct compiler on macOS. - if [ "$TRAVIS_OS_NAME" = "linux" ]; then - if [ "$CXX" = "clang++" ]; then export CXX="clang++-9" CC="clang-9"; fi; + if [ "$CXX" = "clang++" ]; then export CXX="clang++-10" CC="clang-10"; fi; fi - echo ${CC} - echo ${CXX} From 201f52201f5dd9701e7a8ceaa0ec4d344e69e022 Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Mon, 13 Apr 2020 23:18:12 +0000 Subject: [PATCH 18/68] Remove leveldb::port::kLittleEndian. Clang 10 includes the optimizations described in https://bugs.llvm.org/show_bug.cgi?id=41761. This means that the platform-independent implementations of {Decode,Encode}Fixed{32,64}() compile to one instruction on the most recent Clang and GCC. PiperOrigin-RevId: 306330166 --- CMakeLists.txt | 3 --- port/port_config.h.in | 6 ------ port/port_example.h | 4 ---- port/port_stdcxx.h | 2 -- util/coding.h | 50 ++++---------------------------------------------- 5 files changed, 4 insertions(+), 61 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index be41ba4..ae9b0f7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -34,9 +34,6 @@ option(LEVELDB_BUILD_TESTS "Build LevelDB's unit tests" ON) option(LEVELDB_BUILD_BENCHMARKS "Build LevelDB's benchmarks" ON) option(LEVELDB_INSTALL "Install LevelDB's header and library" ON) -include(TestBigEndian) -test_big_endian(LEVELDB_IS_BIG_ENDIAN) - include(CheckIncludeFile) check_include_file("unistd.h" HAVE_UNISTD_H) diff --git a/port/port_config.h.in b/port/port_config.h.in index 2127315..272671d 100644 --- a/port/port_config.h.in +++ b/port/port_config.h.in @@ -30,10 +30,4 @@ #cmakedefine01 HAVE_SNAPPY #endif // !defined(HAVE_SNAPPY) -// Define to 1 if your processor stores words with the most significant byte -// first (like Motorola and SPARC, unlike Intel and VAX). -#if !defined(LEVELDB_IS_BIG_ENDIAN) -#cmakedefine01 LEVELDB_IS_BIG_ENDIAN -#endif // !defined(LEVELDB_IS_BIG_ENDIAN) - #endif // STORAGE_LEVELDB_PORT_PORT_CONFIG_H_ \ No newline at end of file diff --git a/port/port_example.h b/port/port_example.h index 1a8fca2..a665910 100644 --- a/port/port_example.h +++ b/port/port_example.h @@ -18,10 +18,6 @@ namespace port { // TODO(jorlow): Many of these belong more in the environment class rather than // here. We should try moving them and see if it affects perf. -// The following boolean constant must be true on a little-endian machine -// and false otherwise. -static const bool kLittleEndian = true /* or some other expression */; - // ------------------ Threading ------------------- // A Mutex represents an exclusive lock. diff --git a/port/port_stdcxx.h b/port/port_stdcxx.h index e9cb0e5..2bda48d 100644 --- a/port/port_stdcxx.h +++ b/port/port_stdcxx.h @@ -41,8 +41,6 @@ namespace leveldb { namespace port { -static const bool kLittleEndian = !LEVELDB_IS_BIG_ENDIAN; - class CondVar; // Thinly wraps std::mutex. diff --git a/util/coding.h b/util/coding.h index 1983ae7..f0bb57b 100644 --- a/util/coding.h +++ b/util/coding.h @@ -48,29 +48,13 @@ int VarintLength(uint64_t v); char* EncodeVarint32(char* dst, uint32_t value); char* EncodeVarint64(char* dst, uint64_t value); -// TODO(costan): Remove port::kLittleEndian and the fast paths based on -// std::memcpy when clang learns to optimize the generic code, as -// described in https://bugs.llvm.org/show_bug.cgi?id=41761 -// -// The platform-independent code in DecodeFixed{32,64}() gets optimized to mov -// on x86 and ldr on ARM64, by both clang and gcc. However, only gcc optimizes -// the platform-independent code in EncodeFixed{32,64}() to mov / str. - // Lower-level versions of Put... that write directly into a character buffer // REQUIRES: dst has enough space for the value being written inline void EncodeFixed32(char* dst, uint32_t value) { uint8_t* const buffer = reinterpret_cast(dst); - if (port::kLittleEndian) { - // Fast path for little-endian CPUs. All major compilers optimize this to a - // single mov (x86_64) / str (ARM) instruction. - std::memcpy(buffer, &value, sizeof(uint32_t)); - return; - } - - // Platform-independent code. - // Currently, only gcc optimizes this to a single mov / str instruction. + // Recent clang and gcc optimize this to a single mov / str instruction. buffer[0] = static_cast(value); buffer[1] = static_cast(value >> 8); buffer[2] = static_cast(value >> 16); @@ -80,15 +64,7 @@ inline void EncodeFixed32(char* dst, uint32_t value) { inline void EncodeFixed64(char* dst, uint64_t value) { uint8_t* const buffer = reinterpret_cast(dst); - if (port::kLittleEndian) { - // Fast path for little-endian CPUs. All major compilers optimize this to a - // single mov (x86_64) / str (ARM) instruction. - std::memcpy(buffer, &value, sizeof(uint64_t)); - return; - } - - // Platform-independent code. - // Currently, only gcc optimizes this to a single mov / str instruction. + // Recent clang and gcc optimize this to a single mov / str instruction. buffer[0] = static_cast(value); buffer[1] = static_cast(value >> 8); buffer[2] = static_cast(value >> 16); @@ -105,16 +81,7 @@ inline void EncodeFixed64(char* dst, uint64_t value) { inline uint32_t DecodeFixed32(const char* ptr) { const uint8_t* const buffer = reinterpret_cast(ptr); - if (port::kLittleEndian) { - // Fast path for little-endian CPUs. All major compilers optimize this to a - // single mov (x86_64) / ldr (ARM) instruction. - uint32_t result; - std::memcpy(&result, buffer, sizeof(uint32_t)); - return result; - } - - // Platform-independent code. - // Clang and gcc optimize this to a single mov / ldr instruction. + // Recent clang and gcc optimize this to a single mov / ldr instruction. return (static_cast(buffer[0])) | (static_cast(buffer[1]) << 8) | (static_cast(buffer[2]) << 16) | @@ -124,16 +91,7 @@ inline uint32_t DecodeFixed32(const char* ptr) { inline uint64_t DecodeFixed64(const char* ptr) { const uint8_t* const buffer = reinterpret_cast(ptr); - if (port::kLittleEndian) { - // Fast path for little-endian CPUs. All major compilers optimize this to a - // single mov (x86_64) / ldr (ARM) instruction. - uint64_t result; - std::memcpy(&result, buffer, sizeof(uint64_t)); - return result; - } - - // Platform-independent code. - // Clang and gcc optimize this to a single mov / ldr instruction. + // Recent clang and gcc optimize this to a single mov / ldr instruction. return (static_cast(buffer[0])) | (static_cast(buffer[1]) << 8) | (static_cast(buffer[2]) << 16) | From 10bc0f2595b8672c0c1756f22051ec420036fdf2 Mon Sep 17 00:00:00 2001 From: lntotk Date: Fri, 24 Apr 2020 02:00:12 +0000 Subject: [PATCH 19/68] remove unnessary status judge --- table/table.cc | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/table/table.cc b/table/table.cc index b07bc88..29e835f 100644 --- a/table/table.cc +++ b/table/table.cc @@ -54,13 +54,11 @@ Status Table::Open(const Options& options, RandomAccessFile* file, // Read the index block BlockContents index_block_contents; - if (s.ok()) { - ReadOptions opt; - if (options.paranoid_checks) { - opt.verify_checksums = true; - } - s = ReadBlock(file, opt, footer.index_handle(), &index_block_contents); + ReadOptions opt; + if (options.paranoid_checks) { + opt.verify_checksums = true; } + s = ReadBlock(file, opt, footer.index_handle(), &index_block_contents); if (s.ok()) { // We've successfully read the footer and the index block: we're From 98a3b8cf6531220c5ecfe124ebfe7d29deb1251b Mon Sep 17 00:00:00 2001 From: leveldb Team Date: Fri, 17 Apr 2020 21:19:50 +0000 Subject: [PATCH 20/68] change const to constexpr PiperOrigin-RevId: 307113877 --- db/skiplist_test.cc | 3 +-- util/cache_test.cc | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/db/skiplist_test.cc b/db/skiplist_test.cc index 7c5d09b..9c70c5b 100644 --- a/db/skiplist_test.cc +++ b/db/skiplist_test.cc @@ -151,7 +151,7 @@ TEST(SkipTest, InsertAndLookup) { // been concurrently added since the iterator started. class ConcurrentTest { private: - static const uint32_t K = 4; + static constexpr uint32_t K = 4; static uint64_t key(Key key) { return (key >> 40); } static uint64_t gen(Key key) { return (key >> 8) & 0xffffffffu; } @@ -280,7 +280,6 @@ class ConcurrentTest { } } }; -const uint32_t ConcurrentTest::K; // Simple test that does single-threaded testing of the ConcurrentTest // scaffolding. diff --git a/util/cache_test.cc b/util/cache_test.cc index b5d9873..79cfc27 100644 --- a/util/cache_test.cc +++ b/util/cache_test.cc @@ -31,7 +31,7 @@ class CacheTest : public testing::Test { current_->deleted_values_.push_back(DecodeValue(v)); } - static const int kCacheSize = 1000; + static constexpr int kCacheSize = 1000; std::vector deleted_keys_; std::vector deleted_values_; Cache* cache_; From 23d67e7c1f4396919bd0c73c0eced13a0dac37f3 Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Tue, 28 Apr 2020 16:41:33 +0000 Subject: [PATCH 21/68] Fix C++11 build. PiperOrigin-RevId: 308839805 --- db/skiplist_test.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/db/skiplist_test.cc b/db/skiplist_test.cc index 9c70c5b..b548017 100644 --- a/db/skiplist_test.cc +++ b/db/skiplist_test.cc @@ -281,6 +281,9 @@ class ConcurrentTest { } }; +// Needed when building in C++11 mode. +constexpr uint32_t ConcurrentTest::K; + // Simple test that does single-threaded testing of the ConcurrentTest // scaffolding. TEST(SkipTest, ConcurrentWithoutThreads) { From 3f934e3705444a3df80b128ddefc4cf440441ffe Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Wed, 29 Apr 2020 19:59:39 +0000 Subject: [PATCH 22/68] Switch from C headers to C++ headers. This CL makes the following substitutions. * assert.h -> cassert * math.h -> cmath * stdarg.h -> cstdarg * stddef.h -> cstddef * stdint.h -> cstdint * stdio.h -> cstdio * stdlib.h -> cstdlib * string.h -> cstring PiperOrigin-RevId: 309080151 --- benchmarks/db_bench.cc | 5 +++-- benchmarks/db_bench_sqlite3.cc | 5 +++-- benchmarks/db_bench_tree_db.cc | 5 +++-- db/db_impl.cc | 5 ++--- db/db_iter.h | 2 +- db/dbformat.cc | 3 +-- db/dumpfile.cc | 2 +- db/filename.cc | 4 ++-- db/filename.h | 3 +-- db/leveldbutil.cc | 2 +- db/log_reader.cc | 2 +- db/log_reader.h | 2 +- db/log_writer.cc | 2 +- db/log_writer.h | 2 +- db/table_cache.h | 3 +-- db/version_set.cc | 3 +-- helpers/memenv/memenv.cc | 3 +-- include/leveldb/cache.h | 2 +- include/leveldb/db.h | 4 ++-- include/leveldb/env.h | 5 ++--- include/leveldb/options.h | 2 +- include/leveldb/slice.h | 7 +++---- include/leveldb/table.h | 2 +- include/leveldb/table_builder.h | 2 +- table/block.h | 4 ++-- table/block_builder.cc | 3 +-- table/block_builder.h | 3 +-- table/filter_block.h | 5 ++--- table/format.h | 3 +-- table/table_builder.cc | 2 +- util/cache.cc | 9 +++++---- util/crc32c.cc | 4 ++-- util/crc32c.h | 4 ++-- util/hash.cc | 2 +- util/hash.h | 4 ++-- util/histogram.cc | 4 ++-- util/logging.cc | 8 +++----- util/logging.h | 5 ++--- util/random.h | 2 +- util/status.cc | 2 +- 40 files changed, 65 insertions(+), 76 deletions(-) diff --git a/benchmarks/db_bench.cc b/benchmarks/db_bench.cc index 82ed892..3dcd751 100644 --- a/benchmarks/db_bench.cc +++ b/benchmarks/db_bench.cc @@ -2,10 +2,11 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#include -#include #include +#include +#include + #include "leveldb/cache.h" #include "leveldb/db.h" #include "leveldb/env.h" diff --git a/benchmarks/db_bench_sqlite3.cc b/benchmarks/db_bench_sqlite3.cc index 9c32a2d..2563481 100644 --- a/benchmarks/db_bench_sqlite3.cc +++ b/benchmarks/db_bench_sqlite3.cc @@ -3,8 +3,9 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include -#include -#include + +#include +#include #include "util/histogram.h" #include "util/random.h" diff --git a/benchmarks/db_bench_tree_db.cc b/benchmarks/db_bench_tree_db.cc index 43f0f65..60ab3b0 100644 --- a/benchmarks/db_bench_tree_db.cc +++ b/benchmarks/db_bench_tree_db.cc @@ -3,8 +3,9 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include -#include -#include + +#include +#include #include "util/histogram.h" #include "util/random.h" diff --git a/db/db_impl.cc b/db/db_impl.cc index ba0a46d..ca53485 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -4,11 +4,10 @@ #include "db/db_impl.h" -#include -#include - #include #include +#include +#include #include #include #include diff --git a/db/db_iter.h b/db/db_iter.h index fd93e91..5977fc8 100644 --- a/db/db_iter.h +++ b/db/db_iter.h @@ -5,7 +5,7 @@ #ifndef STORAGE_LEVELDB_DB_DB_ITER_H_ #define STORAGE_LEVELDB_DB_DB_ITER_H_ -#include +#include #include "db/dbformat.h" #include "leveldb/db.h" diff --git a/db/dbformat.cc b/db/dbformat.cc index 459eddf..019aa92 100644 --- a/db/dbformat.cc +++ b/db/dbformat.cc @@ -4,8 +4,7 @@ #include "db/dbformat.h" -#include - +#include #include #include "port/port.h" diff --git a/db/dumpfile.cc b/db/dumpfile.cc index 77d5900..6085475 100644 --- a/db/dumpfile.cc +++ b/db/dumpfile.cc @@ -4,7 +4,7 @@ #include "leveldb/dumpfile.h" -#include +#include #include "db/dbformat.h" #include "db/filename.h" diff --git a/db/filename.cc b/db/filename.cc index 9b451fc..f6bec00 100644 --- a/db/filename.cc +++ b/db/filename.cc @@ -4,8 +4,8 @@ #include "db/filename.h" -#include -#include +#include +#include #include "db/dbformat.h" #include "leveldb/env.h" diff --git a/db/filename.h b/db/filename.h index 524e813..563c6d8 100644 --- a/db/filename.h +++ b/db/filename.h @@ -7,8 +7,7 @@ #ifndef STORAGE_LEVELDB_DB_FILENAME_H_ #define STORAGE_LEVELDB_DB_FILENAME_H_ -#include - +#include #include #include "leveldb/slice.h" diff --git a/db/leveldbutil.cc b/db/leveldbutil.cc index 55cdcc5..8e94abd 100644 --- a/db/leveldbutil.cc +++ b/db/leveldbutil.cc @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#include +#include #include "leveldb/dumpfile.h" #include "leveldb/env.h" diff --git a/db/log_reader.cc b/db/log_reader.cc index b770fee..dcd4b75 100644 --- a/db/log_reader.cc +++ b/db/log_reader.cc @@ -4,7 +4,7 @@ #include "db/log_reader.h" -#include +#include #include "leveldb/env.h" #include "util/coding.h" diff --git a/db/log_reader.h b/db/log_reader.h index 001da89..75d53f7 100644 --- a/db/log_reader.h +++ b/db/log_reader.h @@ -5,7 +5,7 @@ #ifndef STORAGE_LEVELDB_DB_LOG_READER_H_ #define STORAGE_LEVELDB_DB_LOG_READER_H_ -#include +#include #include "db/log_format.h" #include "leveldb/slice.h" diff --git a/db/log_writer.cc b/db/log_writer.cc index bfb16fb..ad66bfb 100644 --- a/db/log_writer.cc +++ b/db/log_writer.cc @@ -4,7 +4,7 @@ #include "db/log_writer.h" -#include +#include #include "leveldb/env.h" #include "util/coding.h" diff --git a/db/log_writer.h b/db/log_writer.h index c0a2114..ad36794 100644 --- a/db/log_writer.h +++ b/db/log_writer.h @@ -5,7 +5,7 @@ #ifndef STORAGE_LEVELDB_DB_LOG_WRITER_H_ #define STORAGE_LEVELDB_DB_LOG_WRITER_H_ -#include +#include #include "db/log_format.h" #include "leveldb/slice.h" diff --git a/db/table_cache.h b/db/table_cache.h index 93069c8..aac9bfc 100644 --- a/db/table_cache.h +++ b/db/table_cache.h @@ -7,8 +7,7 @@ #ifndef STORAGE_LEVELDB_DB_TABLE_CACHE_H_ #define STORAGE_LEVELDB_DB_TABLE_CACHE_H_ -#include - +#include #include #include "db/dbformat.h" diff --git a/db/version_set.cc b/db/version_set.cc index 2d5e51a..f23ae14 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -4,9 +4,8 @@ #include "db/version_set.h" -#include - #include +#include #include "db/filename.h" #include "db/log_reader.h" diff --git a/helpers/memenv/memenv.cc b/helpers/memenv/memenv.cc index 383c78b..0da4e76 100644 --- a/helpers/memenv/memenv.cc +++ b/helpers/memenv/memenv.cc @@ -4,8 +4,7 @@ #include "helpers/memenv/memenv.h" -#include - +#include #include #include #include diff --git a/include/leveldb/cache.h b/include/leveldb/cache.h index 7d1a221..98c95ac 100644 --- a/include/leveldb/cache.h +++ b/include/leveldb/cache.h @@ -18,7 +18,7 @@ #ifndef STORAGE_LEVELDB_INCLUDE_CACHE_H_ #define STORAGE_LEVELDB_INCLUDE_CACHE_H_ -#include +#include #include "leveldb/export.h" #include "leveldb/slice.h" diff --git a/include/leveldb/db.h b/include/leveldb/db.h index b73014a..2a995ec 100644 --- a/include/leveldb/db.h +++ b/include/leveldb/db.h @@ -5,8 +5,8 @@ #ifndef STORAGE_LEVELDB_INCLUDE_DB_H_ #define STORAGE_LEVELDB_INCLUDE_DB_H_ -#include -#include +#include +#include #include "leveldb/export.h" #include "leveldb/iterator.h" diff --git a/include/leveldb/env.h b/include/leveldb/env.h index 6501fa4..3ef0393 100644 --- a/include/leveldb/env.h +++ b/include/leveldb/env.h @@ -13,9 +13,8 @@ #ifndef STORAGE_LEVELDB_INCLUDE_ENV_H_ #define STORAGE_LEVELDB_INCLUDE_ENV_H_ -#include -#include - +#include +#include #include #include diff --git a/include/leveldb/options.h b/include/leveldb/options.h index b748772..0f285bc 100644 --- a/include/leveldb/options.h +++ b/include/leveldb/options.h @@ -5,7 +5,7 @@ #ifndef STORAGE_LEVELDB_INCLUDE_OPTIONS_H_ #define STORAGE_LEVELDB_INCLUDE_OPTIONS_H_ -#include +#include #include "leveldb/export.h" diff --git a/include/leveldb/slice.h b/include/leveldb/slice.h index 2df417d..37cb821 100644 --- a/include/leveldb/slice.h +++ b/include/leveldb/slice.h @@ -15,10 +15,9 @@ #ifndef STORAGE_LEVELDB_INCLUDE_SLICE_H_ #define STORAGE_LEVELDB_INCLUDE_SLICE_H_ -#include -#include -#include - +#include +#include +#include #include #include "leveldb/export.h" diff --git a/include/leveldb/table.h b/include/leveldb/table.h index 25c6013..a30e903 100644 --- a/include/leveldb/table.h +++ b/include/leveldb/table.h @@ -5,7 +5,7 @@ #ifndef STORAGE_LEVELDB_INCLUDE_TABLE_H_ #define STORAGE_LEVELDB_INCLUDE_TABLE_H_ -#include +#include #include "leveldb/export.h" #include "leveldb/iterator.h" diff --git a/include/leveldb/table_builder.h b/include/leveldb/table_builder.h index 7d8896b..85710c3 100644 --- a/include/leveldb/table_builder.h +++ b/include/leveldb/table_builder.h @@ -13,7 +13,7 @@ #ifndef STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_ #define STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_ -#include +#include #include "leveldb/export.h" #include "leveldb/options.h" diff --git a/table/block.h b/table/block.h index c8f1f7b..5224108 100644 --- a/table/block.h +++ b/table/block.h @@ -5,8 +5,8 @@ #ifndef STORAGE_LEVELDB_TABLE_BLOCK_H_ #define STORAGE_LEVELDB_TABLE_BLOCK_H_ -#include -#include +#include +#include #include "leveldb/iterator.h" diff --git a/table/block_builder.cc b/table/block_builder.cc index 919cff5..37d4008 100644 --- a/table/block_builder.cc +++ b/table/block_builder.cc @@ -28,9 +28,8 @@ #include "table/block_builder.h" -#include - #include +#include #include "leveldb/comparator.h" #include "leveldb/options.h" diff --git a/table/block_builder.h b/table/block_builder.h index f91f5e6..7a481cd 100644 --- a/table/block_builder.h +++ b/table/block_builder.h @@ -5,8 +5,7 @@ #ifndef STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_ #define STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_ -#include - +#include #include #include "leveldb/slice.h" diff --git a/table/filter_block.h b/table/filter_block.h index 73b5399..25ab75b 100644 --- a/table/filter_block.h +++ b/table/filter_block.h @@ -9,9 +9,8 @@ #ifndef STORAGE_LEVELDB_TABLE_FILTER_BLOCK_H_ #define STORAGE_LEVELDB_TABLE_FILTER_BLOCK_H_ -#include -#include - +#include +#include #include #include diff --git a/table/format.h b/table/format.h index e49dfdc..f6ea304 100644 --- a/table/format.h +++ b/table/format.h @@ -5,8 +5,7 @@ #ifndef STORAGE_LEVELDB_TABLE_FORMAT_H_ #define STORAGE_LEVELDB_TABLE_FORMAT_H_ -#include - +#include #include #include "leveldb/slice.h" diff --git a/table/table_builder.cc b/table/table_builder.cc index 278febf..29a619d 100644 --- a/table/table_builder.cc +++ b/table/table_builder.cc @@ -4,7 +4,7 @@ #include "leveldb/table_builder.h" -#include +#include #include "leveldb/comparator.h" #include "leveldb/env.h" diff --git a/util/cache.cc b/util/cache.cc index 12de306..509e5eb 100644 --- a/util/cache.cc +++ b/util/cache.cc @@ -2,11 +2,12 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#include -#include -#include - #include "leveldb/cache.h" + +#include +#include +#include + #include "port/port.h" #include "port/thread_annotations.h" #include "util/hash.h" diff --git a/util/crc32c.cc b/util/crc32c.cc index c2e61f7..3f18908 100644 --- a/util/crc32c.cc +++ b/util/crc32c.cc @@ -6,8 +6,8 @@ #include "util/crc32c.h" -#include -#include +#include +#include #include "port/port.h" #include "util/coding.h" diff --git a/util/crc32c.h b/util/crc32c.h index 98fabb0..b420b5f 100644 --- a/util/crc32c.h +++ b/util/crc32c.h @@ -5,8 +5,8 @@ #ifndef STORAGE_LEVELDB_UTIL_CRC32C_H_ #define STORAGE_LEVELDB_UTIL_CRC32C_H_ -#include -#include +#include +#include namespace leveldb { namespace crc32c { diff --git a/util/hash.cc b/util/hash.cc index dd47c11..8122fa8 100644 --- a/util/hash.cc +++ b/util/hash.cc @@ -4,7 +4,7 @@ #include "util/hash.h" -#include +#include #include "util/coding.h" diff --git a/util/hash.h b/util/hash.h index 74bdb6e..87ab279 100644 --- a/util/hash.h +++ b/util/hash.h @@ -7,8 +7,8 @@ #ifndef STORAGE_LEVELDB_UTIL_HASH_H_ #define STORAGE_LEVELDB_UTIL_HASH_H_ -#include -#include +#include +#include namespace leveldb { diff --git a/util/histogram.cc b/util/histogram.cc index 65092c8..d110d28 100644 --- a/util/histogram.cc +++ b/util/histogram.cc @@ -4,8 +4,8 @@ #include "util/histogram.h" -#include -#include +#include +#include #include "port/port.h" diff --git a/util/logging.cc b/util/logging.cc index 75e9d03..39d8551 100644 --- a/util/logging.cc +++ b/util/logging.cc @@ -4,11 +4,9 @@ #include "util/logging.h" -#include -#include -#include -#include - +#include +#include +#include #include #include "leveldb/env.h" diff --git a/util/logging.h b/util/logging.h index 8ff2da8..a0394b2 100644 --- a/util/logging.h +++ b/util/logging.h @@ -8,9 +8,8 @@ #ifndef STORAGE_LEVELDB_UTIL_LOGGING_H_ #define STORAGE_LEVELDB_UTIL_LOGGING_H_ -#include -#include - +#include +#include #include #include "port/port.h" diff --git a/util/random.h b/util/random.h index 76f7daf..fe76ab4 100644 --- a/util/random.h +++ b/util/random.h @@ -5,7 +5,7 @@ #ifndef STORAGE_LEVELDB_UTIL_RANDOM_H_ #define STORAGE_LEVELDB_UTIL_RANDOM_H_ -#include +#include namespace leveldb { diff --git a/util/status.cc b/util/status.cc index 15ce747..6b6528b 100644 --- a/util/status.cc +++ b/util/status.cc @@ -4,7 +4,7 @@ #include "leveldb/status.h" -#include +#include #include "port/port.h" From a6b3a2012e9c598258a295aef74d88b796c47a2b Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Wed, 29 Apr 2020 22:31:41 +0000 Subject: [PATCH 23/68] Add some std:: qualifiers to types and functions. PiperOrigin-RevId: 309110431 --- benchmarks/db_bench.cc | 136 ++++++++++++++++++++++------------------- benchmarks/db_bench_sqlite3.cc | 98 +++++++++++++++-------------- benchmarks/db_bench_tree_db.cc | 98 +++++++++++++++-------------- db/autocompact_test.cc | 6 +- db/c.cc | 4 +- db/corruption_test.cc | 11 ++-- db/db_impl.cc | 30 ++++----- db/db_iter.cc | 4 +- db/db_test.cc | 73 +++++++++++----------- db/dbformat.cc | 2 +- db/fault_injection_test.cc | 4 +- db/filename.cc | 8 +-- db/leveldbutil.cc | 9 +-- db/log_reader.cc | 2 +- db/log_test.cc | 2 +- db/memtable.cc | 4 +- db/recovery_test.cc | 13 ++-- db/repair.cc | 3 +- db/skiplist_test.cc | 2 +- db/version_set.cc | 19 +++--- helpers/memenv/memenv.cc | 6 +- include/leveldb/env.h | 2 +- issues/issue178_test.cc | 2 +- table/table_test.cc | 32 +++++----- util/bloom_test.cc | 15 ++--- util/cache.cc | 2 +- util/env.cc | 4 +- util/env_posix_test.cc | 6 +- util/env_windows_test.cc | 4 +- util/histogram.cc | 20 +++--- util/logging.cc | 6 +- util/posix_logger.h | 8 +-- util/status.cc | 16 ++--- util/windows_logger.h | 8 +-- 34 files changed, 345 insertions(+), 314 deletions(-) diff --git a/benchmarks/db_bench.cc b/benchmarks/db_bench.cc index 3dcd751..288b119 100644 --- a/benchmarks/db_bench.cc +++ b/benchmarks/db_bench.cc @@ -221,8 +221,8 @@ class Stats { double micros = now - last_op_finish_; hist_.Add(micros); if (micros > 20000) { - fprintf(stderr, "long op: %.1f micros%30s\r", micros, ""); - fflush(stderr); + std::fprintf(stderr, "long op: %.1f micros%30s\r", micros, ""); + std::fflush(stderr); } last_op_finish_ = now; } @@ -243,8 +243,8 @@ class Stats { next_report_ += 50000; else next_report_ += 100000; - fprintf(stderr, "... finished %d ops%30s\r", done_, ""); - fflush(stderr); + std::fprintf(stderr, "... finished %d ops%30s\r", done_, ""); + std::fflush(stderr); } } @@ -261,18 +261,20 @@ class Stats { // elapsed times. double elapsed = (finish_ - start_) * 1e-6; char rate[100]; - snprintf(rate, sizeof(rate), "%6.1f MB/s", - (bytes_ / 1048576.0) / elapsed); + std::snprintf(rate, sizeof(rate), "%6.1f MB/s", + (bytes_ / 1048576.0) / elapsed); extra = rate; } AppendWithSpace(&extra, message_); - fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(), - seconds_ * 1e6 / done_, (extra.empty() ? "" : " "), extra.c_str()); + std::fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", + name.ToString().c_str(), seconds_ * 1e6 / done_, + (extra.empty() ? "" : " "), extra.c_str()); if (FLAGS_histogram) { - fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str()); + std::fprintf(stdout, "Microseconds per op:\n%s\n", + hist_.ToString().c_str()); } - fflush(stdout); + std::fflush(stdout); } }; @@ -323,51 +325,55 @@ class Benchmark { void PrintHeader() { const int kKeySize = 16; PrintEnvironment(); - fprintf(stdout, "Keys: %d bytes each\n", kKeySize); - fprintf(stdout, "Values: %d bytes each (%d bytes after compression)\n", - FLAGS_value_size, - static_cast(FLAGS_value_size * FLAGS_compression_ratio + 0.5)); - fprintf(stdout, "Entries: %d\n", num_); - fprintf(stdout, "RawSize: %.1f MB (estimated)\n", - ((static_cast(kKeySize + FLAGS_value_size) * num_) / - 1048576.0)); - fprintf(stdout, "FileSize: %.1f MB (estimated)\n", - (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) / - 1048576.0)); + std::fprintf(stdout, "Keys: %d bytes each\n", kKeySize); + std::fprintf( + stdout, "Values: %d bytes each (%d bytes after compression)\n", + FLAGS_value_size, + static_cast(FLAGS_value_size * FLAGS_compression_ratio + 0.5)); + std::fprintf(stdout, "Entries: %d\n", num_); + std::fprintf(stdout, "RawSize: %.1f MB (estimated)\n", + ((static_cast(kKeySize + FLAGS_value_size) * num_) / + 1048576.0)); + std::fprintf( + stdout, "FileSize: %.1f MB (estimated)\n", + (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) / + 1048576.0)); PrintWarnings(); - fprintf(stdout, "------------------------------------------------\n"); + std::fprintf(stdout, "------------------------------------------------\n"); } void PrintWarnings() { #if defined(__GNUC__) && !defined(__OPTIMIZE__) - fprintf( + std::fprintf( stdout, "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"); #endif #ifndef NDEBUG - fprintf(stdout, - "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n"); + std::fprintf( + stdout, + "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n"); #endif // See if snappy is working by attempting to compress a compressible string const char text[] = "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy"; std::string compressed; if (!port::Snappy_Compress(text, sizeof(text), &compressed)) { - fprintf(stdout, "WARNING: Snappy compression is not enabled\n"); + std::fprintf(stdout, "WARNING: Snappy compression is not enabled\n"); } else if (compressed.size() >= sizeof(text)) { - fprintf(stdout, "WARNING: Snappy compression is not effective\n"); + std::fprintf(stdout, "WARNING: Snappy compression is not effective\n"); } } void PrintEnvironment() { - fprintf(stderr, "LevelDB: version %d.%d\n", kMajorVersion, - kMinorVersion); + std::fprintf(stderr, "LevelDB: version %d.%d\n", kMajorVersion, + kMinorVersion); #if defined(__linux) time_t now = time(nullptr); - fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline + std::fprintf(stderr, "Date: %s", + ctime(&now)); // ctime() adds newline - FILE* cpuinfo = fopen("/proc/cpuinfo", "r"); + FILE* cpuinfo = std::fopen("/proc/cpuinfo", "r"); if (cpuinfo != nullptr) { char line[1000]; int num_cpus = 0; @@ -387,9 +393,9 @@ class Benchmark { cache_size = val.ToString(); } } - fclose(cpuinfo); - fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str()); - fprintf(stderr, "CPUCache: %s\n", cache_size.c_str()); + std::fclose(cpuinfo); + std::fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str()); + std::fprintf(stderr, "CPUCache: %s\n", cache_size.c_str()); } #endif } @@ -516,14 +522,15 @@ class Benchmark { PrintStats("leveldb.sstables"); } else { if (!name.empty()) { // No error message for empty name - fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str()); + std::fprintf(stderr, "unknown benchmark '%s'\n", + name.ToString().c_str()); } } if (fresh_db) { if (FLAGS_use_existing_db) { - fprintf(stdout, "%-12s : skipped (--use_existing_db is true)\n", - name.ToString().c_str()); + std::fprintf(stdout, "%-12s : skipped (--use_existing_db is true)\n", + name.ToString().c_str()); method = nullptr; } else { delete db_; @@ -625,7 +632,7 @@ class Benchmark { bytes += size; } // Print so result is not dead - fprintf(stderr, "... crc=0x%x\r", static_cast(crc)); + std::fprintf(stderr, "... crc=0x%x\r", static_cast(crc)); thread->stats.AddBytes(bytes); thread->stats.AddMessage(label); @@ -649,8 +656,8 @@ class Benchmark { thread->stats.AddMessage("(snappy failure)"); } else { char buf[100]; - snprintf(buf, sizeof(buf), "(output: %.1f%%)", - (produced * 100.0) / bytes); + std::snprintf(buf, sizeof(buf), "(output: %.1f%%)", + (produced * 100.0) / bytes); thread->stats.AddMessage(buf); thread->stats.AddBytes(bytes); } @@ -692,8 +699,8 @@ class Benchmark { options.reuse_logs = FLAGS_reuse_logs; Status s = DB::Open(options, FLAGS_db, &db_); if (!s.ok()) { - fprintf(stderr, "open error: %s\n", s.ToString().c_str()); - exit(1); + std::fprintf(stderr, "open error: %s\n", s.ToString().c_str()); + std::exit(1); } } @@ -712,7 +719,7 @@ class Benchmark { void DoWrite(ThreadState* thread, bool seq) { if (num_ != FLAGS_num) { char msg[100]; - snprintf(msg, sizeof(msg), "(%d ops)", num_); + std::snprintf(msg, sizeof(msg), "(%d ops)", num_); thread->stats.AddMessage(msg); } @@ -725,15 +732,15 @@ class Benchmark { for (int j = 0; j < entries_per_batch_; j++) { const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num); char key[100]; - snprintf(key, sizeof(key), "%016d", k); + std::snprintf(key, sizeof(key), "%016d", k); batch.Put(key, gen.Generate(value_size_)); bytes += value_size_ + strlen(key); thread->stats.FinishedSingleOp(); } s = db_->Write(write_options_, &batch); if (!s.ok()) { - fprintf(stderr, "put error: %s\n", s.ToString().c_str()); - exit(1); + std::fprintf(stderr, "put error: %s\n", s.ToString().c_str()); + std::exit(1); } } thread->stats.AddBytes(bytes); @@ -772,14 +779,14 @@ class Benchmark { for (int i = 0; i < reads_; i++) { char key[100]; const int k = thread->rand.Next() % FLAGS_num; - snprintf(key, sizeof(key), "%016d", k); + std::snprintf(key, sizeof(key), "%016d", k); if (db_->Get(options, key, &value).ok()) { found++; } thread->stats.FinishedSingleOp(); } char msg[100]; - snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_); + std::snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_); thread->stats.AddMessage(msg); } @@ -789,7 +796,7 @@ class Benchmark { for (int i = 0; i < reads_; i++) { char key[100]; const int k = thread->rand.Next() % FLAGS_num; - snprintf(key, sizeof(key), "%016d.", k); + std::snprintf(key, sizeof(key), "%016d.", k); db_->Get(options, key, &value); thread->stats.FinishedSingleOp(); } @@ -802,7 +809,7 @@ class Benchmark { for (int i = 0; i < reads_; i++) { char key[100]; const int k = thread->rand.Next() % range; - snprintf(key, sizeof(key), "%016d", k); + std::snprintf(key, sizeof(key), "%016d", k); db_->Get(options, key, &value); thread->stats.FinishedSingleOp(); } @@ -815,14 +822,14 @@ class Benchmark { Iterator* iter = db_->NewIterator(options); char key[100]; const int k = thread->rand.Next() % FLAGS_num; - snprintf(key, sizeof(key), "%016d", k); + std::snprintf(key, sizeof(key), "%016d", k); iter->Seek(key); if (iter->Valid() && iter->key() == key) found++; delete iter; thread->stats.FinishedSingleOp(); } char msg[100]; - snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_); + std::snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_); thread->stats.AddMessage(msg); } @@ -835,14 +842,14 @@ class Benchmark { for (int j = 0; j < entries_per_batch_; j++) { const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num); char key[100]; - snprintf(key, sizeof(key), "%016d", k); + std::snprintf(key, sizeof(key), "%016d", k); batch.Delete(key); thread->stats.FinishedSingleOp(); } s = db_->Write(write_options_, &batch); if (!s.ok()) { - fprintf(stderr, "del error: %s\n", s.ToString().c_str()); - exit(1); + std::fprintf(stderr, "del error: %s\n", s.ToString().c_str()); + std::exit(1); } } } @@ -868,11 +875,11 @@ class Benchmark { const int k = thread->rand.Next() % FLAGS_num; char key[100]; - snprintf(key, sizeof(key), "%016d", k); + std::snprintf(key, sizeof(key), "%016d", k); Status s = db_->Put(write_options_, key, gen.Generate(value_size_)); if (!s.ok()) { - fprintf(stderr, "put error: %s\n", s.ToString().c_str()); - exit(1); + std::fprintf(stderr, "put error: %s\n", s.ToString().c_str()); + std::exit(1); } } @@ -888,7 +895,7 @@ class Benchmark { if (!db_->GetProperty(key, &stats)) { stats = "(failed)"; } - fprintf(stdout, "\n%s\n", stats.c_str()); + std::fprintf(stdout, "\n%s\n", stats.c_str()); } static void WriteToFile(void* arg, const char* buf, int n) { @@ -897,17 +904,18 @@ class Benchmark { void HeapProfile() { char fname[100]; - snprintf(fname, sizeof(fname), "%s/heap-%04d", FLAGS_db, ++heap_counter_); + std::snprintf(fname, sizeof(fname), "%s/heap-%04d", FLAGS_db, + ++heap_counter_); WritableFile* file; Status s = g_env->NewWritableFile(fname, &file); if (!s.ok()) { - fprintf(stderr, "%s\n", s.ToString().c_str()); + std::fprintf(stderr, "%s\n", s.ToString().c_str()); return; } bool ok = port::GetHeapProfile(WriteToFile, file); delete file; if (!ok) { - fprintf(stderr, "heap profiling not supported\n"); + std::fprintf(stderr, "heap profiling not supported\n"); g_env->RemoveFile(fname); } } @@ -962,8 +970,8 @@ int main(int argc, char** argv) { } else if (strncmp(argv[i], "--db=", 5) == 0) { FLAGS_db = argv[i] + 5; } else { - fprintf(stderr, "Invalid flag '%s'\n", argv[i]); - exit(1); + std::fprintf(stderr, "Invalid flag '%s'\n", argv[i]); + std::exit(1); } } diff --git a/benchmarks/db_bench_sqlite3.cc b/benchmarks/db_bench_sqlite3.cc index 2563481..c9be652 100644 --- a/benchmarks/db_bench_sqlite3.cc +++ b/benchmarks/db_bench_sqlite3.cc @@ -84,23 +84,23 @@ static const char* FLAGS_db = nullptr; inline static void ExecErrorCheck(int status, char* err_msg) { if (status != SQLITE_OK) { - fprintf(stderr, "SQL error: %s\n", err_msg); + std::fprintf(stderr, "SQL error: %s\n", err_msg); sqlite3_free(err_msg); - exit(1); + std::exit(1); } } inline static void StepErrorCheck(int status) { if (status != SQLITE_DONE) { - fprintf(stderr, "SQL step error: status = %d\n", status); - exit(1); + std::fprintf(stderr, "SQL step error: status = %d\n", status); + std::exit(1); } } inline static void ErrorCheck(int status) { if (status != SQLITE_OK) { - fprintf(stderr, "sqlite3 error: status = %d\n", status); - exit(1); + std::fprintf(stderr, "sqlite3 error: status = %d\n", status); + std::exit(1); } } @@ -182,36 +182,38 @@ class Benchmark { void PrintHeader() { const int kKeySize = 16; PrintEnvironment(); - fprintf(stdout, "Keys: %d bytes each\n", kKeySize); - fprintf(stdout, "Values: %d bytes each\n", FLAGS_value_size); - fprintf(stdout, "Entries: %d\n", num_); - fprintf(stdout, "RawSize: %.1f MB (estimated)\n", - ((static_cast(kKeySize + FLAGS_value_size) * num_) / - 1048576.0)); + std::fprintf(stdout, "Keys: %d bytes each\n", kKeySize); + std::fprintf(stdout, "Values: %d bytes each\n", FLAGS_value_size); + std::fprintf(stdout, "Entries: %d\n", num_); + std::fprintf(stdout, "RawSize: %.1f MB (estimated)\n", + ((static_cast(kKeySize + FLAGS_value_size) * num_) / + 1048576.0)); PrintWarnings(); - fprintf(stdout, "------------------------------------------------\n"); + std::fprintf(stdout, "------------------------------------------------\n"); } void PrintWarnings() { #if defined(__GNUC__) && !defined(__OPTIMIZE__) - fprintf( + std::fprintf( stdout, "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"); #endif #ifndef NDEBUG - fprintf(stdout, - "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n"); + std::fprintf( + stdout, + "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n"); #endif } void PrintEnvironment() { - fprintf(stderr, "SQLite: version %s\n", SQLITE_VERSION); + std::fprintf(stderr, "SQLite: version %s\n", SQLITE_VERSION); #if defined(__linux) time_t now = time(nullptr); - fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline + std::fprintf(stderr, "Date: %s", + ctime(&now)); // ctime() adds newline - FILE* cpuinfo = fopen("/proc/cpuinfo", "r"); + FILE* cpuinfo = std::fopen("/proc/cpuinfo", "r"); if (cpuinfo != nullptr) { char line[1000]; int num_cpus = 0; @@ -231,9 +233,9 @@ class Benchmark { cache_size = val.ToString(); } } - fclose(cpuinfo); - fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str()); - fprintf(stderr, "CPUCache: %s\n", cache_size.c_str()); + std::fclose(cpuinfo); + std::fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str()); + std::fprintf(stderr, "CPUCache: %s\n", cache_size.c_str()); } #endif } @@ -254,8 +256,8 @@ class Benchmark { double micros = (now - last_op_finish_) * 1e6; hist_.Add(micros); if (micros > 20000) { - fprintf(stderr, "long op: %.1f micros%30s\r", micros, ""); - fflush(stderr); + std::fprintf(stderr, "long op: %.1f micros%30s\r", micros, ""); + std::fflush(stderr); } last_op_finish_ = now; } @@ -276,8 +278,8 @@ class Benchmark { next_report_ += 50000; else next_report_ += 100000; - fprintf(stderr, "... finished %d ops%30s\r", done_, ""); - fflush(stderr); + std::fprintf(stderr, "... finished %d ops%30s\r", done_, ""); + std::fflush(stderr); } } @@ -290,8 +292,8 @@ class Benchmark { if (bytes_ > 0) { char rate[100]; - snprintf(rate, sizeof(rate), "%6.1f MB/s", - (bytes_ / 1048576.0) / (finish - start_)); + std::snprintf(rate, sizeof(rate), "%6.1f MB/s", + (bytes_ / 1048576.0) / (finish - start_)); if (!message_.empty()) { message_ = std::string(rate) + " " + message_; } else { @@ -299,13 +301,14 @@ class Benchmark { } } - fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(), - (finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "), - message_.c_str()); + std::fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", + name.ToString().c_str(), (finish - start_) * 1e6 / done_, + (message_.empty() ? "" : " "), message_.c_str()); if (FLAGS_histogram) { - fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str()); + std::fprintf(stdout, "Microseconds per op:\n%s\n", + hist_.ToString().c_str()); } - fflush(stdout); + std::fflush(stdout); } public: @@ -405,7 +408,8 @@ class Benchmark { } else { known = false; if (name != Slice()) { // No error message for empty name - fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str()); + std::fprintf(stderr, "unknown benchmark '%s'\n", + name.ToString().c_str()); } } if (known) { @@ -425,26 +429,26 @@ class Benchmark { // Open database std::string tmp_dir; Env::Default()->GetTestDirectory(&tmp_dir); - snprintf(file_name, sizeof(file_name), "%s/dbbench_sqlite3-%d.db", - tmp_dir.c_str(), db_num_); + std::snprintf(file_name, sizeof(file_name), "%s/dbbench_sqlite3-%d.db", + tmp_dir.c_str(), db_num_); status = sqlite3_open(file_name, &db_); if (status) { - fprintf(stderr, "open error: %s\n", sqlite3_errmsg(db_)); - exit(1); + std::fprintf(stderr, "open error: %s\n", sqlite3_errmsg(db_)); + std::exit(1); } // Change SQLite cache size char cache_size[100]; - snprintf(cache_size, sizeof(cache_size), "PRAGMA cache_size = %d", - FLAGS_num_pages); + std::snprintf(cache_size, sizeof(cache_size), "PRAGMA cache_size = %d", + FLAGS_num_pages); status = sqlite3_exec(db_, cache_size, nullptr, nullptr, &err_msg); ExecErrorCheck(status, err_msg); // FLAGS_page_size is defaulted to 1024 if (FLAGS_page_size != 1024) { char page_size[100]; - snprintf(page_size, sizeof(page_size), "PRAGMA page_size = %d", - FLAGS_page_size); + std::snprintf(page_size, sizeof(page_size), "PRAGMA page_size = %d", + FLAGS_page_size); status = sqlite3_exec(db_, page_size, nullptr, nullptr, &err_msg); ExecErrorCheck(status, err_msg); } @@ -492,7 +496,7 @@ class Benchmark { if (num_entries != num_) { char msg[100]; - snprintf(msg, sizeof(msg), "(%d ops)", num_entries); + std::snprintf(msg, sizeof(msg), "(%d ops)", num_entries); message_ = msg; } @@ -539,7 +543,7 @@ class Benchmark { const int k = (order == SEQUENTIAL) ? i + j : (rand_.Next() % num_entries); char key[100]; - snprintf(key, sizeof(key), "%016d", k); + std::snprintf(key, sizeof(key), "%016d", k); // Bind KV values into replace_stmt status = sqlite3_bind_blob(replace_stmt, 1, key, 16, SQLITE_STATIC); @@ -612,7 +616,7 @@ class Benchmark { // Create key value char key[100]; int k = (order == SEQUENTIAL) ? i + j : (rand_.Next() % reads_); - snprintf(key, sizeof(key), "%016d", k); + std::snprintf(key, sizeof(key), "%016d", k); // Bind key value into read_stmt status = sqlite3_bind_blob(read_stmt, 1, key, 16, SQLITE_STATIC); @@ -704,8 +708,8 @@ int main(int argc, char** argv) { } else if (strncmp(argv[i], "--db=", 5) == 0) { FLAGS_db = argv[i] + 5; } else { - fprintf(stderr, "Invalid flag '%s'\n", argv[i]); - exit(1); + std::fprintf(stderr, "Invalid flag '%s'\n", argv[i]); + std::exit(1); } } diff --git a/benchmarks/db_bench_tree_db.cc b/benchmarks/db_bench_tree_db.cc index 60ab3b0..533600b 100644 --- a/benchmarks/db_bench_tree_db.cc +++ b/benchmarks/db_bench_tree_db.cc @@ -75,7 +75,7 @@ static const char* FLAGS_db = nullptr; inline static void DBSynchronize(kyotocabinet::TreeDB* db_) { // Synchronize will flush writes to disk if (!db_->synchronize()) { - fprintf(stderr, "synchronize error: %s\n", db_->error().name()); + std::fprintf(stderr, "synchronize error: %s\n", db_->error().name()); } } @@ -150,42 +150,47 @@ class Benchmark { void PrintHeader() { const int kKeySize = 16; PrintEnvironment(); - fprintf(stdout, "Keys: %d bytes each\n", kKeySize); - fprintf(stdout, "Values: %d bytes each (%d bytes after compression)\n", - FLAGS_value_size, - static_cast(FLAGS_value_size * FLAGS_compression_ratio + 0.5)); - fprintf(stdout, "Entries: %d\n", num_); - fprintf(stdout, "RawSize: %.1f MB (estimated)\n", - ((static_cast(kKeySize + FLAGS_value_size) * num_) / - 1048576.0)); - fprintf(stdout, "FileSize: %.1f MB (estimated)\n", - (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) / - 1048576.0)); + std::fprintf(stdout, "Keys: %d bytes each\n", kKeySize); + std::fprintf( + stdout, "Values: %d bytes each (%d bytes after compression)\n", + FLAGS_value_size, + static_cast(FLAGS_value_size * FLAGS_compression_ratio + 0.5)); + std::fprintf(stdout, "Entries: %d\n", num_); + std::fprintf(stdout, "RawSize: %.1f MB (estimated)\n", + ((static_cast(kKeySize + FLAGS_value_size) * num_) / + 1048576.0)); + std::fprintf( + stdout, "FileSize: %.1f MB (estimated)\n", + (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) / + 1048576.0)); PrintWarnings(); - fprintf(stdout, "------------------------------------------------\n"); + std::fprintf(stdout, "------------------------------------------------\n"); } void PrintWarnings() { #if defined(__GNUC__) && !defined(__OPTIMIZE__) - fprintf( + std::fprintf( stdout, "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"); #endif #ifndef NDEBUG - fprintf(stdout, - "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n"); + std::fprintf( + stdout, + "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n"); #endif } void PrintEnvironment() { - fprintf(stderr, "Kyoto Cabinet: version %s, lib ver %d, lib rev %d\n", - kyotocabinet::VERSION, kyotocabinet::LIBVER, kyotocabinet::LIBREV); + std::fprintf( + stderr, "Kyoto Cabinet: version %s, lib ver %d, lib rev %d\n", + kyotocabinet::VERSION, kyotocabinet::LIBVER, kyotocabinet::LIBREV); #if defined(__linux) time_t now = time(nullptr); - fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline + std::fprintf(stderr, "Date: %s", + ctime(&now)); // ctime() adds newline - FILE* cpuinfo = fopen("/proc/cpuinfo", "r"); + FILE* cpuinfo = std::fopen("/proc/cpuinfo", "r"); if (cpuinfo != nullptr) { char line[1000]; int num_cpus = 0; @@ -205,9 +210,10 @@ class Benchmark { cache_size = val.ToString(); } } - fclose(cpuinfo); - fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str()); - fprintf(stderr, "CPUCache: %s\n", cache_size.c_str()); + std::fclose(cpuinfo); + std::fprintf(stderr, "CPU: %d * %s\n", num_cpus, + cpu_type.c_str()); + std::fprintf(stderr, "CPUCache: %s\n", cache_size.c_str()); } #endif } @@ -228,8 +234,8 @@ class Benchmark { double micros = (now - last_op_finish_) * 1e6; hist_.Add(micros); if (micros > 20000) { - fprintf(stderr, "long op: %.1f micros%30s\r", micros, ""); - fflush(stderr); + std::fprintf(stderr, "long op: %.1f micros%30s\r", micros, ""); + std::fflush(stderr); } last_op_finish_ = now; } @@ -250,8 +256,8 @@ class Benchmark { next_report_ += 50000; else next_report_ += 100000; - fprintf(stderr, "... finished %d ops%30s\r", done_, ""); - fflush(stderr); + std::fprintf(stderr, "... finished %d ops%30s\r", done_, ""); + std::fflush(stderr); } } @@ -264,8 +270,8 @@ class Benchmark { if (bytes_ > 0) { char rate[100]; - snprintf(rate, sizeof(rate), "%6.1f MB/s", - (bytes_ / 1048576.0) / (finish - start_)); + std::snprintf(rate, sizeof(rate), "%6.1f MB/s", + (bytes_ / 1048576.0) / (finish - start_)); if (!message_.empty()) { message_ = std::string(rate) + " " + message_; } else { @@ -273,13 +279,14 @@ class Benchmark { } } - fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(), - (finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "), - message_.c_str()); + std::fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", + name.ToString().c_str(), (finish - start_) * 1e6 / done_, + (message_.empty() ? "" : " "), message_.c_str()); if (FLAGS_histogram) { - fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str()); + std::fprintf(stdout, "Microseconds per op:\n%s\n", + hist_.ToString().c_str()); } - fflush(stdout); + std::fflush(stdout); } public: @@ -310,7 +317,7 @@ class Benchmark { ~Benchmark() { if (!db_->close()) { - fprintf(stderr, "close error: %s\n", db_->error().name()); + std::fprintf(stderr, "close error: %s\n", db_->error().name()); } } @@ -374,7 +381,8 @@ class Benchmark { } else { known = false; if (name != Slice()) { // No error message for empty name - fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str()); + std::fprintf(stderr, "unknown benchmark '%s'\n", + name.ToString().c_str()); } } if (known) { @@ -393,8 +401,8 @@ class Benchmark { db_num_++; std::string test_dir; Env::Default()->GetTestDirectory(&test_dir); - snprintf(file_name, sizeof(file_name), "%s/dbbench_polyDB-%d.kct", - test_dir.c_str(), db_num_); + std::snprintf(file_name, sizeof(file_name), "%s/dbbench_polyDB-%d.kct", + test_dir.c_str(), db_num_); // Create tuning options and open the database int open_options = @@ -413,7 +421,7 @@ class Benchmark { open_options |= kyotocabinet::PolyDB::OAUTOSYNC; } if (!db_->open(file_name, open_options)) { - fprintf(stderr, "open error: %s\n", db_->error().name()); + std::fprintf(stderr, "open error: %s\n", db_->error().name()); } } @@ -433,7 +441,7 @@ class Benchmark { if (num_entries != num_) { char msg[100]; - snprintf(msg, sizeof(msg), "(%d ops)", num_entries); + std::snprintf(msg, sizeof(msg), "(%d ops)", num_entries); message_ = msg; } @@ -441,11 +449,11 @@ class Benchmark { for (int i = 0; i < num_entries; i++) { const int k = (order == SEQUENTIAL) ? i : (rand_.Next() % num_entries); char key[100]; - snprintf(key, sizeof(key), "%016d", k); + std::snprintf(key, sizeof(key), "%016d", k); bytes_ += value_size + strlen(key); std::string cpp_key = key; if (!db_->set(cpp_key, gen_.Generate(value_size).ToString())) { - fprintf(stderr, "set error: %s\n", db_->error().name()); + std::fprintf(stderr, "set error: %s\n", db_->error().name()); } FinishedSingleOp(); } @@ -467,7 +475,7 @@ class Benchmark { for (int i = 0; i < reads_; i++) { char key[100]; const int k = rand_.Next() % reads_; - snprintf(key, sizeof(key), "%016d", k); + std::snprintf(key, sizeof(key), "%016d", k); db_->get(key, &value); FinishedSingleOp(); } @@ -505,8 +513,8 @@ int main(int argc, char** argv) { } else if (strncmp(argv[i], "--db=", 5) == 0) { FLAGS_db = argv[i] + 5; } else { - fprintf(stderr, "Invalid flag '%s'\n", argv[i]); - exit(1); + std::fprintf(stderr, "Invalid flag '%s'\n", argv[i]); + std::exit(1); } } diff --git a/db/autocompact_test.cc b/db/autocompact_test.cc index 9779c95..3b7241b 100644 --- a/db/autocompact_test.cc +++ b/db/autocompact_test.cc @@ -30,7 +30,7 @@ class AutoCompactTest : public testing::Test { std::string Key(int i) { char buf[100]; - snprintf(buf, sizeof(buf), "key%06d", i); + std::snprintf(buf, sizeof(buf), "key%06d", i); return std::string(buf); } @@ -89,8 +89,8 @@ void AutoCompactTest::DoReads(int n) { // Wait a little bit to allow any triggered compactions to complete. Env::Default()->SleepForMicroseconds(1000000); uint64_t size = Size(Key(0), Key(n)); - fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n", read + 1, - size / 1048576.0, Size(Key(n), Key(kCount)) / 1048576.0); + std::fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n", read + 1, + size / 1048576.0, Size(Key(n), Key(kCount)) / 1048576.0); if (size <= initial_size / 10) { break; } diff --git a/db/c.cc b/db/c.cc index 3a492f9..b5c9251 100644 --- a/db/c.cc +++ b/db/c.cc @@ -158,7 +158,7 @@ static bool SaveError(char** errptr, const Status& s) { static char* CopyString(const std::string& str) { char* result = reinterpret_cast(malloc(sizeof(char) * str.size())); - memcpy(result, str.data(), sizeof(char) * str.size()); + std::memcpy(result, str.data(), sizeof(char) * str.size()); return result; } @@ -548,7 +548,7 @@ char* leveldb_env_get_test_directory(leveldb_env_t* env) { } char* buffer = static_cast(malloc(result.size() + 1)); - memcpy(buffer, result.data(), result.size()); + std::memcpy(buffer, result.data(), result.size()); buffer[result.size()] = '\0'; return buffer; } diff --git a/db/corruption_test.cc b/db/corruption_test.cc index b22f9e7..a31f448 100644 --- a/db/corruption_test.cc +++ b/db/corruption_test.cc @@ -58,7 +58,7 @@ class CorruptionTest : public testing::Test { std::string key_space, value_space; WriteBatch batch; for (int i = 0; i < n; i++) { - // if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n); + // if ((i % 100) == 0) std::fprintf(stderr, "@ %d of %d\n", i, n); Slice key = Key(i, &key_space); batch.Clear(); batch.Put(key, Value(i, &value_space)); @@ -102,9 +102,10 @@ class CorruptionTest : public testing::Test { } delete iter; - fprintf(stderr, - "expected=%d..%d; got=%d; bad_keys=%d; bad_values=%d; missed=%d\n", - min_expected, max_expected, correct, bad_keys, bad_values, missed); + std::fprintf( + stderr, + "expected=%d..%d; got=%d; bad_keys=%d; bad_values=%d; missed=%d\n", + min_expected, max_expected, correct, bad_keys, bad_values, missed); ASSERT_LE(min_expected, correct); ASSERT_GE(max_expected, correct); } @@ -169,7 +170,7 @@ class CorruptionTest : public testing::Test { // Return the ith key Slice Key(int i, std::string* storage) { char buf[100]; - snprintf(buf, sizeof(buf), "%016d", i); + std::snprintf(buf, sizeof(buf), "%016d", i); storage->assign(buf, strlen(buf)); return Slice(*storage); } diff --git a/db/db_impl.cc b/db/db_impl.cc index ca53485..59b834f 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -350,8 +350,8 @@ Status DBImpl::Recover(VersionEdit* edit, bool* save_manifest) { } if (!expected.empty()) { char buf[50]; - snprintf(buf, sizeof(buf), "%d missing files; e.g.", - static_cast(expected.size())); + std::snprintf(buf, sizeof(buf), "%d missing files; e.g.", + static_cast(expected.size())); return Status::Corruption(buf, TableFileName(dbname_, *(expected.begin()))); } @@ -1396,26 +1396,26 @@ bool DBImpl::GetProperty(const Slice& property, std::string* value) { return false; } else { char buf[100]; - snprintf(buf, sizeof(buf), "%d", - versions_->NumLevelFiles(static_cast(level))); + std::snprintf(buf, sizeof(buf), "%d", + versions_->NumLevelFiles(static_cast(level))); *value = buf; return true; } } else if (in == "stats") { char buf[200]; - snprintf(buf, sizeof(buf), - " Compactions\n" - "Level Files Size(MB) Time(sec) Read(MB) Write(MB)\n" - "--------------------------------------------------\n"); + std::snprintf(buf, sizeof(buf), + " Compactions\n" + "Level Files Size(MB) Time(sec) Read(MB) Write(MB)\n" + "--------------------------------------------------\n"); value->append(buf); for (int level = 0; level < config::kNumLevels; level++) { int files = versions_->NumLevelFiles(level); if (stats_[level].micros > 0 || files > 0) { - snprintf(buf, sizeof(buf), "%3d %8d %8.0f %9.0f %8.0f %9.0f\n", level, - files, versions_->NumLevelBytes(level) / 1048576.0, - stats_[level].micros / 1e6, - stats_[level].bytes_read / 1048576.0, - stats_[level].bytes_written / 1048576.0); + std::snprintf(buf, sizeof(buf), "%3d %8d %8.0f %9.0f %8.0f %9.0f\n", + level, files, versions_->NumLevelBytes(level) / 1048576.0, + stats_[level].micros / 1e6, + stats_[level].bytes_read / 1048576.0, + stats_[level].bytes_written / 1048576.0); value->append(buf); } } @@ -1432,8 +1432,8 @@ bool DBImpl::GetProperty(const Slice& property, std::string* value) { total_usage += imm_->ApproximateMemoryUsage(); } char buf[50]; - snprintf(buf, sizeof(buf), "%llu", - static_cast(total_usage)); + std::snprintf(buf, sizeof(buf), "%llu", + static_cast(total_usage)); value->append(buf); return true; } diff --git a/db/db_iter.cc b/db/db_iter.cc index 98715a9..532c2db 100644 --- a/db/db_iter.cc +++ b/db/db_iter.cc @@ -21,9 +21,9 @@ static void DumpInternalIter(Iterator* iter) { for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { ParsedInternalKey k; if (!ParseInternalKey(iter->key(), &k)) { - fprintf(stderr, "Corrupt '%s'\n", EscapeString(iter->key()).c_str()); + std::fprintf(stderr, "Corrupt '%s'\n", EscapeString(iter->key()).c_str()); } else { - fprintf(stderr, "@ '%s'\n", k.DebugString().c_str()); + std::fprintf(stderr, "@ '%s'\n", k.DebugString().c_str()); } } } diff --git a/db/db_test.cc b/db/db_test.cc index 8cd90f3..3a45731 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -424,7 +424,7 @@ class DBTest : public testing::Test { for (int level = 0; level < config::kNumLevels; level++) { int f = NumTableFilesAtLevel(level); char buf[100]; - snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f); + std::snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f); result += buf; if (f > 0) { last_non_zero_offset = result.size(); @@ -469,14 +469,14 @@ class DBTest : public testing::Test { } void DumpFileCounts(const char* label) { - fprintf(stderr, "---\n%s:\n", label); - fprintf( + std::fprintf(stderr, "---\n%s:\n", label); + std::fprintf( stderr, "maxoverlap: %lld\n", static_cast(dbfull()->TEST_MaxNextLevelOverlappingBytes())); for (int level = 0; level < config::kNumLevels; level++) { int num = NumTableFilesAtLevel(level); if (num > 0) { - fprintf(stderr, " level %3d : %d files\n", level, num); + std::fprintf(stderr, " level %3d : %d files\n", level, num); } } } @@ -1024,7 +1024,7 @@ TEST_F(DBTest, RecoverDuringMemtableCompaction) { static std::string Key(int i) { char buf[100]; - snprintf(buf, sizeof(buf), "key%06d", i); + std::snprintf(buf, sizeof(buf), "key%06d", i); return std::string(buf); } @@ -1118,7 +1118,7 @@ TEST_F(DBTest, RepeatedWritesToSameKey) { for (int i = 0; i < 5 * kMaxFiles; i++) { Put("key", value); ASSERT_LE(TotalTableFiles(), kMaxFiles); - fprintf(stderr, "after %d: %d files\n", i + 1, TotalTableFiles()); + std::fprintf(stderr, "after %d: %d files\n", i + 1, TotalTableFiles()); } } @@ -1140,7 +1140,7 @@ TEST_F(DBTest, SparseMerge) { // Write approximately 100MB of "B" values for (int i = 0; i < 100000; i++) { char key[100]; - snprintf(key, sizeof(key), "B%010d", i); + std::snprintf(key, sizeof(key), "B%010d", i); Put(key, value); } Put("C", "vc"); @@ -1165,9 +1165,9 @@ TEST_F(DBTest, SparseMerge) { static bool Between(uint64_t val, uint64_t low, uint64_t high) { bool result = (val >= low) && (val <= high); if (!result) { - fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n", - (unsigned long long)(val), (unsigned long long)(low), - (unsigned long long)(high)); + std::fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n", + (unsigned long long)(val), (unsigned long long)(low), + (unsigned long long)(high)); } return result; } @@ -1501,7 +1501,7 @@ TEST_F(DBTest, Fflush_Issue474) { static const int kNum = 100000; Random rnd(test::RandomSeed()); for (int i = 0; i < kNum; i++) { - fflush(nullptr); + std::fflush(nullptr); ASSERT_LEVELDB_OK(Put(RandomKey(&rnd), RandomString(&rnd, 100))); } } @@ -1578,7 +1578,7 @@ TEST_F(DBTest, CustomComparator) { for (int run = 0; run < 2; run++) { for (int i = 0; i < 1000; i++) { char buf[100]; - snprintf(buf, sizeof(buf), "[%d]", i * 10); + std::snprintf(buf, sizeof(buf), "[%d]", i * 10); ASSERT_LEVELDB_OK(Put(buf, buf)); } Compact("[0]", "[1000000]"); @@ -1748,7 +1748,7 @@ TEST_F(DBTest, NonWritableFileSystem) { std::string big(100000, 'x'); int errors = 0; for (int i = 0; i < 20; i++) { - fprintf(stderr, "iter %d; errors %d\n", i, errors); + std::fprintf(stderr, "iter %d; errors %d\n", i, errors); if (!Put("foo", big).ok()) { errors++; DelayMilliseconds(100); @@ -1901,7 +1901,7 @@ TEST_F(DBTest, BloomFilter) { ASSERT_EQ(Key(i), Get(Key(i))); } int reads = env_->random_read_counter_.Read(); - fprintf(stderr, "%d present => %d reads\n", N, reads); + std::fprintf(stderr, "%d present => %d reads\n", N, reads); ASSERT_GE(reads, N); ASSERT_LE(reads, N + 2 * N / 100); @@ -1911,7 +1911,7 @@ TEST_F(DBTest, BloomFilter) { ASSERT_EQ("NOT_FOUND", Get(Key(i) + ".missing")); } reads = env_->random_read_counter_.Read(); - fprintf(stderr, "%d missing => %d reads\n", N, reads); + std::fprintf(stderr, "%d missing => %d reads\n", N, reads); ASSERT_LE(reads, 3 * N / 100); env_->delay_data_sync_.store(false, std::memory_order_release); @@ -1944,7 +1944,7 @@ static void MTThreadBody(void* arg) { int id = t->id; DB* db = t->state->test->db_; int counter = 0; - fprintf(stderr, "... starting thread %d\n", id); + std::fprintf(stderr, "... starting thread %d\n", id); Random rnd(1000 + id); std::string value; char valbuf[1500]; @@ -1953,13 +1953,13 @@ static void MTThreadBody(void* arg) { int key = rnd.Uniform(kNumKeys); char keybuf[20]; - snprintf(keybuf, sizeof(keybuf), "%016d", key); + std::snprintf(keybuf, sizeof(keybuf), "%016d", key); if (rnd.OneIn(2)) { // Write values of the form . // We add some padding for force compactions. - snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d", key, id, - static_cast(counter)); + std::snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d", key, id, + static_cast(counter)); ASSERT_LEVELDB_OK(db->Put(WriteOptions(), Slice(keybuf), Slice(valbuf))); } else { // Read a value and verify that it matches the pattern written above. @@ -1980,7 +1980,7 @@ static void MTThreadBody(void* arg) { counter++; } t->state->thread_done[id].store(true, std::memory_order_release); - fprintf(stderr, "... stopping thread %d after %d ops\n", id, counter); + std::fprintf(stderr, "... stopping thread %d after %d ops\n", id, counter); } } // namespace @@ -2134,30 +2134,31 @@ static bool CompareIterators(int step, DB* model, DB* db, ok && miter->Valid() && dbiter->Valid(); miter->Next(), dbiter->Next()) { count++; if (miter->key().compare(dbiter->key()) != 0) { - fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n", step, - EscapeString(miter->key()).c_str(), - EscapeString(dbiter->key()).c_str()); + std::fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n", step, + EscapeString(miter->key()).c_str(), + EscapeString(dbiter->key()).c_str()); ok = false; break; } if (miter->value().compare(dbiter->value()) != 0) { - fprintf(stderr, "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n", - step, EscapeString(miter->key()).c_str(), - EscapeString(miter->value()).c_str(), - EscapeString(miter->value()).c_str()); + std::fprintf(stderr, + "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n", + step, EscapeString(miter->key()).c_str(), + EscapeString(miter->value()).c_str(), + EscapeString(miter->value()).c_str()); ok = false; } } if (ok) { if (miter->Valid() != dbiter->Valid()) { - fprintf(stderr, "step %d: Mismatch at end of iterators: %d vs. %d\n", - step, miter->Valid(), dbiter->Valid()); + std::fprintf(stderr, "step %d: Mismatch at end of iterators: %d vs. %d\n", + step, miter->Valid(), dbiter->Valid()); ok = false; } } - fprintf(stderr, "%d entries compared: ok=%d\n", count, ok); + std::fprintf(stderr, "%d entries compared: ok=%d\n", count, ok); delete miter; delete dbiter; return ok; @@ -2173,7 +2174,7 @@ TEST_F(DBTest, Randomized) { std::string k, v; for (int step = 0; step < N; step++) { if (step % 100 == 0) { - fprintf(stderr, "Step %d of %d\n", step, N); + std::fprintf(stderr, "Step %d of %d\n", step, N); } // TODO(sanjay): Test Get() works int p = rnd.Uniform(100); @@ -2233,7 +2234,7 @@ TEST_F(DBTest, Randomized) { std::string MakeKey(unsigned int num) { char buf[30]; - snprintf(buf, sizeof(buf), "%016u", num); + std::snprintf(buf, sizeof(buf), "%016u", num); return std::string(buf); } @@ -2283,10 +2284,10 @@ void BM_LogAndApply(int iters, int num_base_files) { uint64_t stop_micros = env->NowMicros(); unsigned int us = stop_micros - start_micros; char buf[16]; - snprintf(buf, sizeof(buf), "%d", num_base_files); - fprintf(stderr, - "BM_LogAndApply/%-6s %8d iters : %9u us (%7.0f us / iter)\n", buf, - iters, us, ((float)us) / iters); + std::snprintf(buf, sizeof(buf), "%d", num_base_files); + std::fprintf(stderr, + "BM_LogAndApply/%-6s %8d iters : %9u us (%7.0f us / iter)\n", + buf, iters, us, ((float)us) / iters); } } // namespace leveldb diff --git a/db/dbformat.cc b/db/dbformat.cc index 019aa92..2a5749f 100644 --- a/db/dbformat.cc +++ b/db/dbformat.cc @@ -126,7 +126,7 @@ LookupKey::LookupKey(const Slice& user_key, SequenceNumber s) { start_ = dst; dst = EncodeVarint32(dst, usize + 8); kstart_ = dst; - memcpy(dst, user_key.data(), usize); + std::memcpy(dst, user_key.data(), usize); dst += usize; EncodeFixed64(dst, PackSequenceAndType(s, kValueTypeForSeek)); dst += 8; diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc index 8f2b647..6eebafa 100644 --- a/db/fault_injection_test.cc +++ b/db/fault_injection_test.cc @@ -427,7 +427,7 @@ class FaultInjectionTest : public testing::Test { EXPECT_EQ(value_space, val); } } else if (s.ok()) { - fprintf(stderr, "Expected an error at %d, but was OK\n", i); + std::fprintf(stderr, "Expected an error at %d, but was OK\n", i); s = Status::IOError(dbname_, "Expected value error:"); } else { s = Status::OK(); // An expected error @@ -439,7 +439,7 @@ class FaultInjectionTest : public testing::Test { // Return the ith key Slice Key(int i, std::string* storage) const { char buf[100]; - snprintf(buf, sizeof(buf), "%016d", i); + std::snprintf(buf, sizeof(buf), "%016d", i); storage->assign(buf, strlen(buf)); return Slice(*storage); } diff --git a/db/filename.cc b/db/filename.cc index f6bec00..e526249 100644 --- a/db/filename.cc +++ b/db/filename.cc @@ -20,8 +20,8 @@ Status WriteStringToFileSync(Env* env, const Slice& data, static std::string MakeFileName(const std::string& dbname, uint64_t number, const char* suffix) { char buf[100]; - snprintf(buf, sizeof(buf), "/%06llu.%s", - static_cast(number), suffix); + std::snprintf(buf, sizeof(buf), "/%06llu.%s", + static_cast(number), suffix); return dbname + buf; } @@ -43,8 +43,8 @@ std::string SSTTableFileName(const std::string& dbname, uint64_t number) { std::string DescriptorFileName(const std::string& dbname, uint64_t number) { assert(number > 0); char buf[100]; - snprintf(buf, sizeof(buf), "/MANIFEST-%06llu", - static_cast(number)); + std::snprintf(buf, sizeof(buf), "/MANIFEST-%06llu", + static_cast(number)); return dbname + buf; } diff --git a/db/leveldbutil.cc b/db/leveldbutil.cc index 8e94abd..95ee897 100644 --- a/db/leveldbutil.cc +++ b/db/leveldbutil.cc @@ -28,7 +28,7 @@ bool HandleDumpCommand(Env* env, char** files, int num) { for (int i = 0; i < num; i++) { Status s = DumpFile(env, files[i], &printer); if (!s.ok()) { - fprintf(stderr, "%s\n", s.ToString().c_str()); + std::fprintf(stderr, "%s\n", s.ToString().c_str()); ok = false; } } @@ -39,9 +39,10 @@ bool HandleDumpCommand(Env* env, char** files, int num) { } // namespace leveldb static void Usage() { - fprintf(stderr, - "Usage: leveldbutil command...\n" - " dump files... -- dump contents of specified files\n"); + std::fprintf( + stderr, + "Usage: leveldbutil command...\n" + " dump files... -- dump contents of specified files\n"); } int main(int argc, char** argv) { diff --git a/db/log_reader.cc b/db/log_reader.cc index dcd4b75..9880279 100644 --- a/db/log_reader.cc +++ b/db/log_reader.cc @@ -160,7 +160,7 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch) { default: { char buf[40]; - snprintf(buf, sizeof(buf), "unknown record type %u", record_type); + std::snprintf(buf, sizeof(buf), "unknown record type %u", record_type); ReportCorruption( (fragment.size() + (in_fragmented_record ? scratch->size() : 0)), buf); diff --git a/db/log_test.cc b/db/log_test.cc index c765e93..346b19c 100644 --- a/db/log_test.cc +++ b/db/log_test.cc @@ -27,7 +27,7 @@ static std::string BigString(const std::string& partial_string, size_t n) { // Construct a string from a number static std::string NumberString(int n) { char buf[50]; - snprintf(buf, sizeof(buf), "%d.", n); + std::snprintf(buf, sizeof(buf), "%d.", n); return std::string(buf); } diff --git a/db/memtable.cc b/db/memtable.cc index 00931d4..f42774d 100644 --- a/db/memtable.cc +++ b/db/memtable.cc @@ -88,12 +88,12 @@ void MemTable::Add(SequenceNumber s, ValueType type, const Slice& key, val_size; char* buf = arena_.Allocate(encoded_len); char* p = EncodeVarint32(buf, internal_key_size); - memcpy(p, key.data(), key_size); + std::memcpy(p, key.data(), key_size); p += key_size; EncodeFixed64(p, (s << 8) | type); p += 8; p = EncodeVarint32(p, val_size); - memcpy(p, value.data(), val_size); + std::memcpy(p, value.data(), val_size); assert(p + val_size == buf + encoded_len); table_.Insert(buf); } diff --git a/db/recovery_test.cc b/db/recovery_test.cc index e5cc916..3db817e 100644 --- a/db/recovery_test.cc +++ b/db/recovery_test.cc @@ -160,7 +160,8 @@ class RecoveryTest : public testing::Test { TEST_F(RecoveryTest, ManifestReused) { if (!CanAppend()) { - fprintf(stderr, "skipping test because env does not support appending\n"); + std::fprintf(stderr, + "skipping test because env does not support appending\n"); return; } ASSERT_LEVELDB_OK(Put("foo", "bar")); @@ -176,7 +177,8 @@ TEST_F(RecoveryTest, ManifestReused) { TEST_F(RecoveryTest, LargeManifestCompacted) { if (!CanAppend()) { - fprintf(stderr, "skipping test because env does not support appending\n"); + std::fprintf(stderr, + "skipping test because env does not support appending\n"); return; } ASSERT_LEVELDB_OK(Put("foo", "bar")); @@ -216,7 +218,8 @@ TEST_F(RecoveryTest, NoLogFiles) { TEST_F(RecoveryTest, LogFileReuse) { if (!CanAppend()) { - fprintf(stderr, "skipping test because env does not support appending\n"); + std::fprintf(stderr, + "skipping test because env does not support appending\n"); return; } for (int i = 0; i < 2; i++) { @@ -249,7 +252,7 @@ TEST_F(RecoveryTest, MultipleMemTables) { const int kNum = 1000; for (int i = 0; i < kNum; i++) { char buf[100]; - snprintf(buf, sizeof(buf), "%050d", i); + std::snprintf(buf, sizeof(buf), "%050d", i); ASSERT_LEVELDB_OK(Put(buf, buf)); } ASSERT_EQ(0, NumTables()); @@ -268,7 +271,7 @@ TEST_F(RecoveryTest, MultipleMemTables) { ASSERT_NE(old_log_file, FirstLogFile()) << "must not reuse log"; for (int i = 0; i < kNum; i++) { char buf[100]; - snprintf(buf, sizeof(buf), "%050d", i); + std::snprintf(buf, sizeof(buf), "%050d", i); ASSERT_EQ(buf, Get(buf)); } } diff --git a/db/repair.cc b/db/repair.cc index d2a495e..97a27c6 100644 --- a/db/repair.cc +++ b/db/repair.cc @@ -372,7 +372,8 @@ class Repairer { t.meta.largest); } - // fprintf(stderr, "NewDescriptor:\n%s\n", edit_.DebugString().c_str()); + // std::fprintf(stderr, + // "NewDescriptor:\n%s\n", edit_.DebugString().c_str()); { log::Writer log(file); std::string record; diff --git a/db/skiplist_test.cc b/db/skiplist_test.cc index b548017..79a5b86 100644 --- a/db/skiplist_test.cc +++ b/db/skiplist_test.cc @@ -346,7 +346,7 @@ static void RunConcurrent(int run) { const int kSize = 1000; for (int i = 0; i < N; i++) { if ((i % 100) == 0) { - fprintf(stderr, "Run %d of %d\n", i, N); + std::fprintf(stderr, "Run %d of %d\n", i, N); } TestState state(seed + 1); Env::Default()->Schedule(ConcurrentReader, &state); diff --git a/db/version_set.cc b/db/version_set.cc index f23ae14..a459587 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -703,10 +703,10 @@ class VersionSet::Builder { const InternalKey& prev_end = v->files_[level][i - 1]->largest; const InternalKey& this_begin = v->files_[level][i]->smallest; if (vset_->icmp_.Compare(prev_end, this_begin) >= 0) { - fprintf(stderr, "overlapping ranges in same level %s vs. %s\n", - prev_end.DebugString().c_str(), - this_begin.DebugString().c_str()); - abort(); + std::fprintf(stderr, "overlapping ranges in same level %s vs. %s\n", + prev_end.DebugString().c_str(), + this_begin.DebugString().c_str()); + std::abort(); } } } @@ -1100,11 +1100,12 @@ int VersionSet::NumLevelFiles(int level) const { const char* VersionSet::LevelSummary(LevelSummaryStorage* scratch) const { // Update code if kNumLevels changes static_assert(config::kNumLevels == 7, ""); - snprintf(scratch->buffer, sizeof(scratch->buffer), - "files[ %d %d %d %d %d %d %d ]", int(current_->files_[0].size()), - int(current_->files_[1].size()), int(current_->files_[2].size()), - int(current_->files_[3].size()), int(current_->files_[4].size()), - int(current_->files_[5].size()), int(current_->files_[6].size())); + std::snprintf( + scratch->buffer, sizeof(scratch->buffer), "files[ %d %d %d %d %d %d %d ]", + int(current_->files_[0].size()), int(current_->files_[1].size()), + int(current_->files_[2].size()), int(current_->files_[3].size()), + int(current_->files_[4].size()), int(current_->files_[5].size()), + int(current_->files_[6].size())); return scratch->buffer; } diff --git a/helpers/memenv/memenv.cc b/helpers/memenv/memenv.cc index 0da4e76..e476613 100644 --- a/helpers/memenv/memenv.cc +++ b/helpers/memenv/memenv.cc @@ -93,7 +93,7 @@ class FileState { if (avail > bytes_to_copy) { avail = bytes_to_copy; } - memcpy(dst, blocks_[block] + block_offset, avail); + std::memcpy(dst, blocks_[block] + block_offset, avail); bytes_to_copy -= avail; dst += avail; @@ -126,7 +126,7 @@ class FileState { if (avail > src_len) { avail = src_len; } - memcpy(blocks_.back() + offset, src, avail); + std::memcpy(blocks_.back() + offset, src, avail); src_len -= avail; src += avail; size_ += avail; @@ -215,7 +215,7 @@ class WritableFileImpl : public WritableFile { class NoOpLogger : public Logger { public: - void Logv(const char* format, va_list ap) override {} + void Logv(const char* format, std::va_list ap) override {} }; class InMemoryEnv : public EnvWrapper { diff --git a/include/leveldb/env.h b/include/leveldb/env.h index 3ef0393..e00895a 100644 --- a/include/leveldb/env.h +++ b/include/leveldb/env.h @@ -300,7 +300,7 @@ class LEVELDB_EXPORT Logger { virtual ~Logger(); // Write an entry to the log file with the specified format. - virtual void Logv(const char* format, va_list ap) = 0; + virtual void Logv(const char* format, std::va_list ap) = 0; }; // Identifies a locked file. diff --git a/issues/issue178_test.cc b/issues/issue178_test.cc index 7fc43ea..8fa5bb9 100644 --- a/issues/issue178_test.cc +++ b/issues/issue178_test.cc @@ -18,7 +18,7 @@ const int kNumKeys = 1100000; std::string Key1(int i) { char buf[100]; - snprintf(buf, sizeof(buf), "my_key_%d", i); + std::snprintf(buf, sizeof(buf), "my_key_%d", i); return buf; } diff --git a/table/table_test.cc b/table/table_test.cc index 713b63e..190dd0f 100644 --- a/table/table_test.cc +++ b/table/table_test.cc @@ -123,7 +123,7 @@ class StringSource : public RandomAccessFile { if (offset + n > contents_.size()) { n = contents_.size() - offset; } - memcpy(scratch, &contents_[offset], n); + std::memcpy(scratch, &contents_[offset], n); *result = Slice(scratch, n); return Status::OK(); } @@ -485,13 +485,13 @@ class Harness : public testing::Test { Iterator* iter = constructor_->NewIterator(); ASSERT_TRUE(!iter->Valid()); KVMap::const_iterator model_iter = data.begin(); - if (kVerbose) fprintf(stderr, "---\n"); + if (kVerbose) std::fprintf(stderr, "---\n"); for (int i = 0; i < 200; i++) { const int toss = rnd->Uniform(5); switch (toss) { case 0: { if (iter->Valid()) { - if (kVerbose) fprintf(stderr, "Next\n"); + if (kVerbose) std::fprintf(stderr, "Next\n"); iter->Next(); ++model_iter; ASSERT_EQ(ToString(data, model_iter), ToString(iter)); @@ -500,7 +500,7 @@ class Harness : public testing::Test { } case 1: { - if (kVerbose) fprintf(stderr, "SeekToFirst\n"); + if (kVerbose) std::fprintf(stderr, "SeekToFirst\n"); iter->SeekToFirst(); model_iter = data.begin(); ASSERT_EQ(ToString(data, model_iter), ToString(iter)); @@ -511,7 +511,7 @@ class Harness : public testing::Test { std::string key = PickRandomKey(rnd, keys); model_iter = data.lower_bound(key); if (kVerbose) - fprintf(stderr, "Seek '%s'\n", EscapeString(key).c_str()); + std::fprintf(stderr, "Seek '%s'\n", EscapeString(key).c_str()); iter->Seek(Slice(key)); ASSERT_EQ(ToString(data, model_iter), ToString(iter)); break; @@ -519,7 +519,7 @@ class Harness : public testing::Test { case 3: { if (iter->Valid()) { - if (kVerbose) fprintf(stderr, "Prev\n"); + if (kVerbose) std::fprintf(stderr, "Prev\n"); iter->Prev(); if (model_iter == data.begin()) { model_iter = data.end(); // Wrap around to invalid value @@ -532,7 +532,7 @@ class Harness : public testing::Test { } case 4: { - if (kVerbose) fprintf(stderr, "SeekToLast\n"); + if (kVerbose) std::fprintf(stderr, "SeekToLast\n"); iter->SeekToLast(); if (keys.empty()) { model_iter = data.end(); @@ -684,8 +684,8 @@ TEST_F(Harness, Randomized) { for (int num_entries = 0; num_entries < 2000; num_entries += (num_entries < 50 ? 1 : 200)) { if ((num_entries % 10) == 0) { - fprintf(stderr, "case %d of %d: num_entries = %d\n", (i + 1), - int(kNumTestArgs), num_entries); + std::fprintf(stderr, "case %d of %d: num_entries = %d\n", (i + 1), + int(kNumTestArgs), num_entries); } for (int e = 0; e < num_entries; e++) { std::string v; @@ -714,7 +714,7 @@ TEST_F(Harness, RandomizedLongDB) { for (int level = 0; level < config::kNumLevels; level++) { std::string value; char name[100]; - snprintf(name, sizeof(name), "leveldb.num-files-at-level%d", level); + std::snprintf(name, sizeof(name), "leveldb.num-files-at-level%d", level); ASSERT_TRUE(db()->GetProperty(name, &value)); files += atoi(value.c_str()); } @@ -736,8 +736,8 @@ TEST(MemTableTest, Simple) { Iterator* iter = memtable->NewIterator(); iter->SeekToFirst(); while (iter->Valid()) { - fprintf(stderr, "key: '%s' -> '%s'\n", iter->key().ToString().c_str(), - iter->value().ToString().c_str()); + std::fprintf(stderr, "key: '%s' -> '%s'\n", iter->key().ToString().c_str(), + iter->value().ToString().c_str()); iter->Next(); } @@ -748,9 +748,9 @@ TEST(MemTableTest, Simple) { static bool Between(uint64_t val, uint64_t low, uint64_t high) { bool result = (val >= low) && (val <= high); if (!result) { - fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n", - (unsigned long long)(val), (unsigned long long)(low), - (unsigned long long)(high)); + std::fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n", + (unsigned long long)(val), (unsigned long long)(low), + (unsigned long long)(high)); } return result; } @@ -792,7 +792,7 @@ static bool SnappyCompressionSupported() { TEST(TableTest, ApproximateOffsetOfCompressed) { if (!SnappyCompressionSupported()) { - fprintf(stderr, "skipping compression tests\n"); + std::fprintf(stderr, "skipping compression tests\n"); return; } diff --git a/util/bloom_test.cc b/util/bloom_test.cc index bcf14dc..520473e 100644 --- a/util/bloom_test.cc +++ b/util/bloom_test.cc @@ -45,14 +45,14 @@ class BloomTest : public testing::Test { size_t FilterSize() const { return filter_.size(); } void DumpFilter() { - fprintf(stderr, "F("); + std::fprintf(stderr, "F("); for (size_t i = 0; i + 1 < filter_.size(); i++) { const unsigned int c = static_cast(filter_[i]); for (int j = 0; j < 8; j++) { - fprintf(stderr, "%c", (c & (1 << j)) ? '1' : '.'); + std::fprintf(stderr, "%c", (c & (1 << j)) ? '1' : '.'); } } - fprintf(stderr, ")\n"); + std::fprintf(stderr, ")\n"); } bool Matches(const Slice& s) { @@ -132,8 +132,9 @@ TEST_F(BloomTest, VaryingLengths) { // Check false positive rate double rate = FalsePositiveRate(); if (kVerbose >= 1) { - fprintf(stderr, "False positives: %5.2f%% @ length = %6d ; bytes = %6d\n", - rate * 100.0, length, static_cast(FilterSize())); + std::fprintf(stderr, + "False positives: %5.2f%% @ length = %6d ; bytes = %6d\n", + rate * 100.0, length, static_cast(FilterSize())); } ASSERT_LE(rate, 0.02); // Must not be over 2% if (rate > 0.0125) @@ -142,8 +143,8 @@ TEST_F(BloomTest, VaryingLengths) { good_filters++; } if (kVerbose >= 1) { - fprintf(stderr, "Filters: %d good, %d mediocre\n", good_filters, - mediocre_filters); + std::fprintf(stderr, "Filters: %d good, %d mediocre\n", good_filters, + mediocre_filters); } ASSERT_LE(mediocre_filters, good_filters / 5); } diff --git a/util/cache.cc b/util/cache.cc index 509e5eb..ad1e9a2 100644 --- a/util/cache.cc +++ b/util/cache.cc @@ -279,7 +279,7 @@ Cache::Handle* LRUCache::Insert(const Slice& key, uint32_t hash, void* value, e->hash = hash; e->in_cache = false; e->refs = 1; // for the returned handle. - memcpy(e->key_data, key.data(), key.size()); + std::memcpy(e->key_data, key.data(), key.size()); if (capacity_ > 0) { e->refs++; // for the cache's reference. diff --git a/util/env.cc b/util/env.cc index 40e6071..a53b230 100644 --- a/util/env.cc +++ b/util/env.cc @@ -4,6 +4,8 @@ #include "leveldb/env.h" +#include + // This workaround can be removed when leveldb::Env::DeleteFile is removed. // See env.h for justification. #if defined(_WIN32) && defined(LEVELDB_DELETEFILE_UNDEFINED) @@ -38,7 +40,7 @@ FileLock::~FileLock() = default; void Log(Logger* info_log, const char* format, ...) { if (info_log != nullptr) { - va_list ap; + std::va_list ap; va_start(ap, format); info_log->Logv(format, ap); va_end(ap); diff --git a/util/env_posix_test.cc b/util/env_posix_test.cc index 36f226f..29f973f 100644 --- a/util/env_posix_test.cc +++ b/util/env_posix_test.cc @@ -149,7 +149,7 @@ void CheckCloseOnExecDoesNotLeakFDs( if (child_pid == kForkInChildProcessReturnValue) { ::execv(child_argv[0], child_argv); std::fprintf(stderr, "Error spawning child process: %s\n", strerror(errno)); - std::exit(kTextCloseOnExecHelperExecFailedCode); + std::std::exit(kTextCloseOnExecHelperExecFailedCode); } int child_status = 0; @@ -187,11 +187,11 @@ TEST_F(EnvPosixTest, TestOpenOnRead) { ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir)); std::string test_file = test_dir + "/open_on_read.txt"; - FILE* f = fopen(test_file.c_str(), "we"); + FILE* f = std::fopen(test_file.c_str(), "we"); ASSERT_TRUE(f != nullptr); const char kFileData[] = "abcdefghijklmnopqrstuvwxyz"; fputs(kFileData, f); - fclose(f); + std::fclose(f); // Open test file some number above the sum of the two limits to force // open-on-read behavior of POSIX Env leveldb::RandomAccessFile. diff --git a/util/env_windows_test.cc b/util/env_windows_test.cc index 15c0274..d6822d2 100644 --- a/util/env_windows_test.cc +++ b/util/env_windows_test.cc @@ -29,11 +29,11 @@ TEST_F(EnvWindowsTest, TestOpenOnRead) { ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir)); std::string test_file = test_dir + "/open_on_read.txt"; - FILE* f = fopen(test_file.c_str(), "w"); + FILE* f = std::fopen(test_file.c_str(), "w"); ASSERT_TRUE(f != nullptr); const char kFileData[] = "abcdefghijklmnopqrstuvwxyz"; fputs(kFileData, f); - fclose(f); + std::fclose(f); // Open test file some number above the sum of the two limits to force // leveldb::WindowsEnv to switch from mapping the file into memory diff --git a/util/histogram.cc b/util/histogram.cc index d110d28..7af4030 100644 --- a/util/histogram.cc +++ b/util/histogram.cc @@ -241,11 +241,11 @@ double Histogram::StandardDeviation() const { std::string Histogram::ToString() const { std::string r; char buf[200]; - snprintf(buf, sizeof(buf), "Count: %.0f Average: %.4f StdDev: %.2f\n", num_, - Average(), StandardDeviation()); + std::snprintf(buf, sizeof(buf), "Count: %.0f Average: %.4f StdDev: %.2f\n", + num_, Average(), StandardDeviation()); r.append(buf); - snprintf(buf, sizeof(buf), "Min: %.4f Median: %.4f Max: %.4f\n", - (num_ == 0.0 ? 0.0 : min_), Median(), max_); + std::snprintf(buf, sizeof(buf), "Min: %.4f Median: %.4f Max: %.4f\n", + (num_ == 0.0 ? 0.0 : min_), Median(), max_); r.append(buf); r.append("------------------------------------------------------\n"); const double mult = 100.0 / num_; @@ -253,12 +253,12 @@ std::string Histogram::ToString() const { for (int b = 0; b < kNumBuckets; b++) { if (buckets_[b] <= 0.0) continue; sum += buckets_[b]; - snprintf(buf, sizeof(buf), "[ %7.0f, %7.0f ) %7.0f %7.3f%% %7.3f%% ", - ((b == 0) ? 0.0 : kBucketLimit[b - 1]), // left - kBucketLimit[b], // right - buckets_[b], // count - mult * buckets_[b], // percentage - mult * sum); // cumulative percentage + std::snprintf(buf, sizeof(buf), "[ %7.0f, %7.0f ) %7.0f %7.3f%% %7.3f%% ", + ((b == 0) ? 0.0 : kBucketLimit[b - 1]), // left + kBucketLimit[b], // right + buckets_[b], // count + mult * buckets_[b], // percentage + mult * sum); // cumulative percentage r.append(buf); // Add hash marks based on percentage; 20 marks for 100%. diff --git a/util/logging.cc b/util/logging.cc index 39d8551..8d6fb5b 100644 --- a/util/logging.cc +++ b/util/logging.cc @@ -16,7 +16,7 @@ namespace leveldb { void AppendNumberTo(std::string* str, uint64_t num) { char buf[30]; - snprintf(buf, sizeof(buf), "%llu", (unsigned long long)num); + std::snprintf(buf, sizeof(buf), "%llu", static_cast(num)); str->append(buf); } @@ -27,8 +27,8 @@ void AppendEscapedStringTo(std::string* str, const Slice& value) { str->push_back(c); } else { char buf[10]; - snprintf(buf, sizeof(buf), "\\x%02x", - static_cast(c) & 0xff); + std::snprintf(buf, sizeof(buf), "\\x%02x", + static_cast(c) & 0xff); str->append(buf); } } diff --git a/util/posix_logger.h b/util/posix_logger.h index 28e15d1..6bbc1a0 100644 --- a/util/posix_logger.h +++ b/util/posix_logger.h @@ -30,7 +30,7 @@ class PosixLogger final : public Logger { ~PosixLogger() override { std::fclose(fp_); } - void Logv(const char* format, va_list arguments) override { + void Logv(const char* format, std::va_list arguments) override { // Record the time as close to the Logv() call as possible. struct ::timeval now_timeval; ::gettimeofday(&now_timeval, nullptr); @@ -62,7 +62,7 @@ class PosixLogger final : public Logger { (iteration == 0) ? stack_buffer : new char[dynamic_buffer_size]; // Print the header into the buffer. - int buffer_offset = snprintf( + int buffer_offset = std::snprintf( buffer, buffer_size, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ", now_components.tm_year + 1900, now_components.tm_mon + 1, now_components.tm_mday, now_components.tm_hour, now_components.tm_min, @@ -98,8 +98,8 @@ class PosixLogger final : public Logger { } // The dynamically-allocated buffer was incorrectly sized. This should - // not happen, assuming a correct implementation of (v)snprintf. Fail - // in tests, recover by truncating the log message in production. + // not happen, assuming a correct implementation of std::(v)snprintf. + // Fail in tests, recover by truncating the log message in production. assert(false); buffer_offset = buffer_size - 1; } diff --git a/util/status.cc b/util/status.cc index 6b6528b..0559f5b 100644 --- a/util/status.cc +++ b/util/status.cc @@ -12,9 +12,9 @@ namespace leveldb { const char* Status::CopyState(const char* state) { uint32_t size; - memcpy(&size, state, sizeof(size)); + std::memcpy(&size, state, sizeof(size)); char* result = new char[size + 5]; - memcpy(result, state, size + 5); + std::memcpy(result, state, size + 5); return result; } @@ -24,13 +24,13 @@ Status::Status(Code code, const Slice& msg, const Slice& msg2) { const uint32_t len2 = static_cast(msg2.size()); const uint32_t size = len1 + (len2 ? (2 + len2) : 0); char* result = new char[size + 5]; - memcpy(result, &size, sizeof(size)); + std::memcpy(result, &size, sizeof(size)); result[4] = static_cast(code); - memcpy(result + 5, msg.data(), len1); + std::memcpy(result + 5, msg.data(), len1); if (len2) { result[5 + len1] = ':'; result[6 + len1] = ' '; - memcpy(result + 7 + len1, msg2.data(), len2); + std::memcpy(result + 7 + len1, msg2.data(), len2); } state_ = result; } @@ -61,14 +61,14 @@ std::string Status::ToString() const { type = "IO error: "; break; default: - snprintf(tmp, sizeof(tmp), - "Unknown code(%d): ", static_cast(code())); + std::snprintf(tmp, sizeof(tmp), + "Unknown code(%d): ", static_cast(code())); type = tmp; break; } std::string result(type); uint32_t length; - memcpy(&length, state_, sizeof(length)); + std::memcpy(&length, state_, sizeof(length)); result.append(state_ + 5, length); return result; } diff --git a/util/windows_logger.h b/util/windows_logger.h index 9296063..26e6c7b 100644 --- a/util/windows_logger.h +++ b/util/windows_logger.h @@ -27,7 +27,7 @@ class WindowsLogger final : public Logger { ~WindowsLogger() override { std::fclose(fp_); } - void Logv(const char* format, va_list arguments) override { + void Logv(const char* format, std::va_list arguments) override { // Record the time as close to the Logv() call as possible. SYSTEMTIME now_components; ::GetLocalTime(&now_components); @@ -56,7 +56,7 @@ class WindowsLogger final : public Logger { (iteration == 0) ? stack_buffer : new char[dynamic_buffer_size]; // Print the header into the buffer. - int buffer_offset = snprintf( + int buffer_offset = std::snprintf( buffer, buffer_size, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ", now_components.wYear, now_components.wMonth, now_components.wDay, now_components.wHour, now_components.wMinute, now_components.wSecond, @@ -92,8 +92,8 @@ class WindowsLogger final : public Logger { } // The dynamically-allocated buffer was incorrectly sized. This should - // not happen, assuming a correct implementation of (v)snprintf. Fail - // in tests, recover by truncating the log message in production. + // not happen, assuming a correct implementation of std::(v)snprintf. + // Fail in tests, recover by truncating the log message in production. assert(false); buffer_offset = buffer_size - 1; } From 5c6dd75897adc9e542a55d983e4b57406fbfb0a0 Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Thu, 30 Apr 2020 01:03:12 +0000 Subject: [PATCH 24/68] Fix accidental double std:: qualifiers. PiperOrigin-RevId: 309136120 --- util/env_posix_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/env_posix_test.cc b/util/env_posix_test.cc index 29f973f..da264f0 100644 --- a/util/env_posix_test.cc +++ b/util/env_posix_test.cc @@ -149,7 +149,7 @@ void CheckCloseOnExecDoesNotLeakFDs( if (child_pid == kForkInChildProcessReturnValue) { ::execv(child_argv[0], child_argv); std::fprintf(stderr, "Error spawning child process: %s\n", strerror(errno)); - std::std::exit(kTextCloseOnExecHelperExecFailedCode); + std::exit(kTextCloseOnExecHelperExecFailedCode); } int child_status = 0; From 23b6337f69a39d16570f8a66db69b55535d59a51 Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Thu, 30 Apr 2020 01:18:25 +0000 Subject: [PATCH 25/68] Fix Travis CI build. PiperOrigin-RevId: 309138195 --- .travis.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 766fdc9..56c772d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,7 +4,7 @@ language: cpp dist: bionic -osx_image: xcode10.3 +osx_image: xcode11.3 compiler: - gcc @@ -17,6 +17,12 @@ env: - BUILD_TYPE=Debug - BUILD_TYPE=RelWithDebInfo +jobs: + allow_failures: + # Homebrew's GCC is currently broken on XCode 11. + - compiler: gcc + os: osx + addons: apt: sources: From 28602d36254263127c0e1b90334614abc1ee0c83 Mon Sep 17 00:00:00 2001 From: wzk784533 Date: Sat, 11 Jul 2020 13:44:11 +0800 Subject: [PATCH 26/68] avoid unnecessary memory copy --- db/builder.cc | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/db/builder.cc b/db/builder.cc index 943e857..fe5cde1 100644 --- a/db/builder.cc +++ b/db/builder.cc @@ -30,11 +30,14 @@ Status BuildTable(const std::string& dbname, Env* env, const Options& options, TableBuilder* builder = new TableBuilder(options, file); meta->smallest.DecodeFrom(iter->key()); + Slice key; for (; iter->Valid(); iter->Next()) { - Slice key = iter->key(); - meta->largest.DecodeFrom(key); + key = iter->key(); builder->Add(key, iter->value()); } + if(!key.empty()) { + meta->largest.DecodeFrom(key); + } // Finish and check for builder errors s = builder->Finish(); From 1754c12c54d3544678205930a09b142418e34181 Mon Sep 17 00:00:00 2001 From: jl0x61 Date: Tue, 14 Jul 2020 19:32:03 +0800 Subject: [PATCH 27/68] update index.md remove return value of GetApproximateSizes in index.md --- doc/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/index.md b/doc/index.md index 3d9a258..4e7c5ef 100644 --- a/doc/index.md +++ b/doc/index.md @@ -478,7 +478,7 @@ leveldb::Range ranges[2]; ranges[0] = leveldb::Range("a", "c"); ranges[1] = leveldb::Range("x", "z"); uint64_t sizes[2]; -leveldb::Status s = db->GetApproximateSizes(ranges, 2, sizes); +db->GetApproximateSizes(ranges, 2, sizes); ``` The preceding call will set `sizes[0]` to the approximate number of bytes of From b7d302326961fb809d92a95ce813e2d26fe2e16e Mon Sep 17 00:00:00 2001 From: leveldb Team Date: Wed, 2 Sep 2020 15:45:40 +0000 Subject: [PATCH 28/68] Internal cleanup migrating StatusOr. PiperOrigin-RevId: 329720018 --- util/testutil.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/testutil.h b/util/testutil.h index cc67d96..e0e2d64 100644 --- a/util/testutil.h +++ b/util/testutil.h @@ -18,7 +18,7 @@ namespace test { MATCHER(IsOK, "") { return arg.ok(); } // Macros for testing the results of functions that return leveldb::Status or -// util::StatusOr (for any type T). +// absl::StatusOr (for any type T). #define EXPECT_LEVELDB_OK(expression) \ EXPECT_THAT(expression, leveldb::test::IsOK()) #define ASSERT_LEVELDB_OK(expression) \ From ed781070b42f368ea2c914158528848143f92684 Mon Sep 17 00:00:00 2001 From: leveldb Team Date: Tue, 27 Oct 2020 11:09:49 -0700 Subject: [PATCH 29/68] Internal test cleanup PiperOrigin-RevId: 339287832 --- db/db_test.cc | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/db/db_test.cc b/db/db_test.cc index 3a45731..22ac292 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -7,6 +7,7 @@ #include #include +#include "testing/base/public/benchmark.h" #include "gtest/gtest.h" #include "db/db_impl.h" #include "db/filename.h" @@ -2238,7 +2239,9 @@ std::string MakeKey(unsigned int num) { return std::string(buf); } -void BM_LogAndApply(int iters, int num_base_files) { +static void BM_LogAndApply(benchmark::State& state) { + const int num_base_files = state.range(0); + std::string dbname = testing::TempDir() + "leveldb_test_benchmark"; DestroyDB(dbname, Options()); @@ -2273,7 +2276,7 @@ void BM_LogAndApply(int iters, int num_base_files) { uint64_t start_micros = env->NowMicros(); - for (int i = 0; i < iters; i++) { + for (auto st : state) { VersionEdit vedit; vedit.RemoveFile(2, fnum); InternalKey start(MakeKey(2 * fnum), 1, kTypeValue); @@ -2286,21 +2289,15 @@ void BM_LogAndApply(int iters, int num_base_files) { char buf[16]; std::snprintf(buf, sizeof(buf), "%d", num_base_files); std::fprintf(stderr, - "BM_LogAndApply/%-6s %8d iters : %9u us (%7.0f us / iter)\n", - buf, iters, us, ((float)us) / iters); + "BM_LogAndApply/%-6s %8zu iters : %9u us (%7.0f us / iter)\n", + buf, state.iterations(), us, ((float)us) / state.iterations()); } +BENCHMARK(BM_LogAndApply)->Arg(1)->Arg(100)->Arg(10000)->Arg(100000); } // namespace leveldb int main(int argc, char** argv) { - if (argc > 1 && std::string(argv[1]) == "--benchmark") { - leveldb::BM_LogAndApply(1000, 1); - leveldb::BM_LogAndApply(1000, 100); - leveldb::BM_LogAndApply(1000, 10000); - leveldb::BM_LogAndApply(100, 100000); - return 0; - } - testing::InitGoogleTest(&argc, argv); + RunSpecifiedBenchmarks(); return RUN_ALL_TESTS(); } From 99ab4730d62444099dbd1ea9c402e15f4aad0728 Mon Sep 17 00:00:00 2001 From: leveldb Team Date: Tue, 27 Oct 2020 12:59:41 -0700 Subject: [PATCH 30/68] Use external benchmark API header PiperOrigin-RevId: 339310928 --- db/db_test.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/db/db_test.cc b/db/db_test.cc index 22ac292..8cab018 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -7,8 +7,8 @@ #include #include -#include "testing/base/public/benchmark.h" #include "gtest/gtest.h" +#include "third_party/benchmark/include/benchmark/benchmark.h" #include "db/db_impl.h" #include "db/filename.h" #include "db/version_set.h" @@ -2298,6 +2298,6 @@ BENCHMARK(BM_LogAndApply)->Arg(1)->Arg(100)->Arg(10000)->Arg(100000); int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); - RunSpecifiedBenchmarks(); + benchmark::RunSpecifiedBenchmarks(); return RUN_ALL_TESTS(); } From 2802398c94b3b5708f111dae58ac1b738613bbf8 Mon Sep 17 00:00:00 2001 From: Sanjay Ghemawat Date: Mon, 30 Nov 2020 08:32:50 -0800 Subject: [PATCH 31/68] Fix bug in filter policy documentation example. PiperOrigin-RevId: 344817715 --- doc/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/index.md b/doc/index.md index 4e7c5ef..01693ad 100644 --- a/doc/index.md +++ b/doc/index.md @@ -438,7 +438,7 @@ class CustomFilterPolicy : public leveldb::FilterPolicy { for (int i = 0; i < n; i++) { trimmed[i] = RemoveTrailingSpaces(keys[i]); } - return builtin_policy_->CreateFilter(&trimmed[i], n, dst); + return builtin_policy_->CreateFilter(trimmed.data(), n, dst); } }; ``` From 37d36c92f8622595aa791867775d2f4d82e45be7 Mon Sep 17 00:00:00 2001 From: Chris Mumford Date: Mon, 30 Nov 2020 09:57:27 -0800 Subject: [PATCH 32/68] Added google/benchmark submodule. --- .gitmodules | 3 +++ CMakeLists.txt | 4 +++- third_party/benchmark | 1 + 3 files changed, 7 insertions(+), 1 deletion(-) create mode 160000 third_party/benchmark diff --git a/.gitmodules b/.gitmodules index 5a4e85a..6e6d3f0 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,6 @@ [submodule "third_party/googletest"] path = third_party/googletest url = https://github.com/google/googletest.git +[submodule "third_party/benchmark"] + path = third_party/benchmark + url = https://github.com/google/benchmark diff --git a/CMakeLists.txt b/CMakeLists.txt index ae9b0f7..2cb2296 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -298,6 +298,8 @@ if(LEVELDB_BUILD_TESTS) # This project is tested using GoogleTest. add_subdirectory("third_party/googletest") + add_subdirectory("third_party/benchmark") + # GoogleTest triggers a missing field initializers warning. if(LEVELDB_HAVE_NO_MISSING_FIELD_INITIALIZERS) set_property(TARGET gtest @@ -318,7 +320,7 @@ if(LEVELDB_BUILD_TESTS) "${test_file}" ) - target_link_libraries("${test_target_name}" leveldb gmock gtest) + target_link_libraries("${test_target_name}" leveldb gmock gtest benchmark) target_compile_definitions("${test_target_name}" PRIVATE ${LEVELDB_PLATFORM_NAME}=1 diff --git a/third_party/benchmark b/third_party/benchmark new file mode 160000 index 0000000..bf585a2 --- /dev/null +++ b/third_party/benchmark @@ -0,0 +1 @@ +Subproject commit bf585a2789e30585b4e3ce6baf11ef2750b54677 From b754fdca72e9382edd457c0fd81de6e1b644d789 Mon Sep 17 00:00:00 2001 From: Chris Mumford Date: Mon, 30 Nov 2020 10:43:24 -0800 Subject: [PATCH 33/68] Fixed fprintf of 64-bit value. --- db/db_test.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/db/db_test.cc b/db/db_test.cc index 8cab018..5c364a3 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -5,6 +5,7 @@ #include "leveldb/db.h" #include +#include #include #include "gtest/gtest.h" @@ -2289,7 +2290,8 @@ static void BM_LogAndApply(benchmark::State& state) { char buf[16]; std::snprintf(buf, sizeof(buf), "%d", num_base_files); std::fprintf(stderr, - "BM_LogAndApply/%-6s %8zu iters : %9u us (%7.0f us / iter)\n", + "BM_LogAndApply/%-6s %8" PRIu64 + " iters : %9u us (%7.0f us / iter)\n", buf, state.iterations(), us, ((float)us) / state.iterations()); } From c3b52f7db6dba54bb8c17fa0dee9e2c0d066fa92 Mon Sep 17 00:00:00 2001 From: Chris Mumford Date: Mon, 30 Nov 2020 09:57:27 -0800 Subject: [PATCH 34/68] Fixup for adding the third_party/benchmark submodule. --- third_party/benchmark | 1 + 1 file changed, 1 insertion(+) create mode 160000 third_party/benchmark diff --git a/third_party/benchmark b/third_party/benchmark new file mode 160000 index 0000000..bf585a2 --- /dev/null +++ b/third_party/benchmark @@ -0,0 +1 @@ +Subproject commit bf585a2789e30585b4e3ce6baf11ef2750b54677 From 6721eda0b46654d3531b4a0a00c90dc659b337d6 Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Mon, 14 Dec 2020 08:34:41 -0800 Subject: [PATCH 35/68] Update Travis CI config. PiperOrigin-RevId: 347391876 --- .travis.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index 56c772d..e34a67e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,7 +4,7 @@ language: cpp dist: bionic -osx_image: xcode11.3 +osx_image: xcode12.2 compiler: - gcc @@ -32,8 +32,8 @@ addons: packages: - clang-10 - cmake - - gcc-9 - - g++-9 + - gcc-10 + - g++-10 - libgoogle-perftools-dev - libkyotocabinet-dev - libsnappy-dev @@ -43,7 +43,7 @@ addons: packages: - cmake - crc32c - - gcc@9 + - gcc@10 - gperftools - kyoto-cabinet - llvm@10 @@ -59,7 +59,7 @@ install: export PATH="$(brew --prefix llvm)/bin:$PATH"; fi # /usr/bin/gcc points to an older compiler on both Linux and macOS. -- if [ "$CXX" = "g++" ]; then export CXX="g++-9" CC="gcc-9"; fi +- if [ "$CXX" = "g++" ]; then export CXX="g++-10" CC="gcc-10"; fi # /usr/bin/clang points to an older compiler on both Linux and macOS. # # Homebrew's llvm package doesn't ship a versioned clang++ binary, so the values From 532be8530678a95da46354037d61a504c290403d Mon Sep 17 00:00:00 2001 From: Dimitris Apostolou Date: Thu, 17 Dec 2020 09:05:33 +0200 Subject: [PATCH 36/68] Fix insecure links --- CONTRIBUTING.md | 2 +- README.md | 2 +- doc/benchmark.html | 10 +++++----- doc/impl.md | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a74572a..7ede021 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -32,5 +32,5 @@ the CLA. ## Writing Code ## If your contribution contains code, please make sure that it follows -[the style guide](http://google.github.io/styleguide/cppguide.html). +[the style guide](https://google.github.io/styleguide/cppguide.html). Otherwise we will have to ask you to make changes, and that's no fun for anyone. diff --git a/README.md b/README.md index 28d29c1..81144dd 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com) * Multiple changes can be made in one atomic batch. * Users can create a transient snapshot to get a consistent view of data. * Forward and backward iteration is supported over the data. - * Data is automatically compressed using the [Snappy compression library](http://google.github.io/snappy/). + * Data is automatically compressed using the [Snappy compression library](https://google.github.io/snappy/). * External activity (file system operations etc.) is relayed through a virtual interface so users can customize the operating system interactions. # Documentation diff --git a/doc/benchmark.html b/doc/benchmark.html index f3fd771..1e0b4ef 100644 --- a/doc/benchmark.html +++ b/doc/benchmark.html @@ -83,7 +83,7 @@ div.bsql {

Google, July 2011


-

In order to test LevelDB's performance, we benchmark it against other well-established database implementations. We compare LevelDB (revision 39) against SQLite3 (version 3.7.6.3) and Kyoto Cabinet's (version 1.2.67) TreeDB (a B+Tree based key-value store). We would like to acknowledge Scott Hess and Mikio Hirabayashi for their suggestions and contributions to the SQLite3 and Kyoto Cabinet benchmarks, respectively.

+

In order to test LevelDB's performance, we benchmark it against other well-established database implementations. We compare LevelDB (revision 39) against SQLite3 (version 3.7.6.3) and Kyoto Cabinet's (version 1.2.67) TreeDB (a B+Tree based key-value store). We would like to acknowledge Scott Hess and Mikio Hirabayashi for their suggestions and contributions to the SQLite3 and Kyoto Cabinet benchmarks, respectively.

Benchmarks were all performed on a six-core Intel(R) Xeon(R) CPU X5650 @ 2.67GHz, with 12288 KB of total L3 cache and 12 GB of DDR3 RAM at 1333 MHz. (Note that LevelDB uses at most two CPUs since the benchmarks are single threaded: one to run the benchmark, and one for background compactions.) We ran the benchmarks on two machines (with identical processors), one with an Ext3 file system and one with an Ext4 file system. The machine with the Ext3 file system has a SATA Hitachi HDS721050CLA362 hard drive. The machine with the Ext4 file system has a SATA Samsung HD502HJ hard drive. Both hard drives spin at 7200 RPM and have hard drive write-caching enabled (using `hdparm -W 1 [device]`). The numbers reported below are the median of three measurements.

@@ -97,9 +97,9 @@ div.bsql {

Custom Build Specifications

    -
  • LevelDB: LevelDB was compiled with the tcmalloc library and the Snappy compression library (revision 33). Assertions were disabled.
  • -
  • TreeDB: TreeDB was compiled using the LZO compression library (version 2.03). Furthermore, we enabled the TSMALL and TLINEAR options when opening the database in order to reduce the footprint of each record.
  • -
  • SQLite: We tuned SQLite's performance, by setting its locking mode to exclusive. We also enabled SQLite's write-ahead logging.
  • +
  • LevelDB: LevelDB was compiled with the tcmalloc library and the Snappy compression library (revision 33). Assertions were disabled.
  • +
  • TreeDB: TreeDB was compiled using the LZO compression library (version 2.03). Furthermore, we enabled the TSMALL and TLINEAR options when opening the database in order to reduce the footprint of each record.
  • +
  • SQLite: We tuned SQLite's performance, by setting its locking mode to exclusive. We also enabled SQLite's write-ahead logging.

1. Baseline Performance

@@ -451,7 +451,7 @@ performance may very well be better with compression if it allows more of the working set to fit in memory.

Note about Ext4 Filesystems

-

The preceding numbers are for an ext3 file system. Synchronous writes are much slower under ext4 (LevelDB drops to ~31 writes / second and TreeDB drops to ~5 writes / second; SQLite3's synchronous writes do not noticeably drop) due to ext4's different handling of fsync / msync calls. Even LevelDB's asynchronous write performance drops somewhat since it spreads its storage across multiple files and issues fsync calls when switching to a new file.

+

The preceding numbers are for an ext3 file system. Synchronous writes are much slower under ext4 (LevelDB drops to ~31 writes / second and TreeDB drops to ~5 writes / second; SQLite3's synchronous writes do not noticeably drop) due to ext4's different handling of fsync / msync calls. Even LevelDB's asynchronous write performance drops somewhat since it spreads its storage across multiple files and issues fsync calls when switching to a new file.

Acknowledgements

Jeff Dean and Sanjay Ghemawat wrote LevelDB. Kevin Tseng wrote and compiled these benchmarks. Mikio Hirabayashi, Scott Hess, and Gabor Cselle provided help and advice.

diff --git a/doc/impl.md b/doc/impl.md index 45187a2..c9bb621 100644 --- a/doc/impl.md +++ b/doc/impl.md @@ -1,7 +1,7 @@ ## Files The implementation of leveldb is similar in spirit to the representation of a -single [Bigtable tablet (section 5.3)](http://research.google.com/archive/bigtable.html). +single [Bigtable tablet (section 5.3)](https://research.google/pubs/pub27898/). However the organization of the files that make up the representation is somewhat different and is explained below. From 8cce47e450b365347769959c53b8836ef0216df9 Mon Sep 17 00:00:00 2001 From: leveldb Team Date: Mon, 11 Jan 2021 15:32:34 +0000 Subject: [PATCH 37/68] Optimize leveldb block seeks to utilize the current iterator location. This is beneficial when iterators are reused and seeks are not random but increasing. It is additionally beneficial with larger block sizes and keys with common prefixes. Add a benchmark "seekordered" to db_bench that reuses iterators across increasing seeks. Add support to the benchmark to count comparisons made and to support common key prefix length. Change benchmark random seeds to be reproducible for entire benchmark suite executions but unique for threads in different benchmarks runs. This changes a benchmark suite of readrandom,seekrandom from having a 100% found ratio as previously it had the same seed used for fillrandom. ./db_bench --benchmarks=fillrandom,compact,seekordered --block_size=262144 --comparisons=1 --key_prefix=100 without this change (though with benchmark changes): seekrandom : 55.309 micros/op; (631820 of 1000000 found) Comparisons: 27001049 seekordered : 1.732 micros/op; (631882 of 1000000 found) Comparisons: 26998402 with this change: seekrandom : 55.866 micros/op; (631820 of 1000000 found) Comparisons: 26952143 seekordered : 1.686 micros/op; (631882 of 1000000 found) Comparisons: 25549369 For ordered seeking, this is a reduction of 5% comparisons and a 3% speedup. For random seeking (with single use iterators) the comparisons and speed are less than 1% and likely noise. PiperOrigin-RevId: 351149832 --- benchmarks/db_bench.cc | 168 +++++++++++++++++++++++++++++++++++++++---------- db/db_test.cc | 61 ++++++++++++++++++ table/block.cc | 27 +++++++- 3 files changed, 221 insertions(+), 35 deletions(-) diff --git a/benchmarks/db_bench.cc b/benchmarks/db_bench.cc index 288b119..7c474d8 100644 --- a/benchmarks/db_bench.cc +++ b/benchmarks/db_bench.cc @@ -4,10 +4,12 @@ #include +#include #include #include #include "leveldb/cache.h" +#include "leveldb/comparator.h" #include "leveldb/db.h" #include "leveldb/env.h" #include "leveldb/filter_policy.h" @@ -34,6 +36,7 @@ // readmissing -- read N missing keys in random order // readhot -- read N times in random order from 1% section of DB // seekrandom -- N random seeks +// seekordered -- N ordered seeks // open -- cost of opening a DB // crc32c -- repeated crc32c of 4K of data // Meta operations: @@ -78,6 +81,9 @@ static double FLAGS_compression_ratio = 0.5; // Print histogram of operation timings static bool FLAGS_histogram = false; +// Count the number of string comparisons performed +static bool FLAGS_comparisons = false; + // Number of bytes to buffer in memtable before compacting // (initialized to default value by "main") static int FLAGS_write_buffer_size = 0; @@ -101,6 +107,9 @@ static int FLAGS_open_files = 0; // Negative means use default settings. static int FLAGS_bloom_bits = -1; +// Common key prefix length. +static int FLAGS_key_prefix = 0; + // If true, do not destroy the existing database. If you set this // flag and also specify a benchmark that wants a fresh database, that // benchmark will fail. @@ -117,6 +126,33 @@ namespace leveldb { namespace { leveldb::Env* g_env = nullptr; +class CountComparator : public Comparator { + public: + CountComparator(const Comparator* wrapped) : wrapped_(wrapped) {} + ~CountComparator() override {} + int Compare(const Slice& a, const Slice& b) const { + count_.fetch_add(1, std::memory_order_relaxed); + return wrapped_->Compare(a, b); + } + const char* Name() const override { return wrapped_->Name(); } + void FindShortestSeparator(std::string* start, + const Slice& limit) const override { + wrapped_->FindShortestSeparator(start, limit); + } + + void FindShortSuccessor(std::string* key) const override { + return wrapped_->FindShortSuccessor(key); + } + + size_t comparisons() const { return count_.load(std::memory_order_relaxed); } + + void reset() { count_.store(0, std::memory_order_relaxed); } + + private: + mutable std::atomic count_ = 0; + const Comparator* const wrapped_; +}; + // Helper for quickly generating random data. class RandomGenerator { private: @@ -149,6 +185,26 @@ class RandomGenerator { } }; +class KeyBuffer { + public: + KeyBuffer() { + assert(FLAGS_key_prefix < sizeof(buffer_)); + memset(buffer_, 'a', FLAGS_key_prefix); + } + KeyBuffer& operator=(KeyBuffer& other) = delete; + KeyBuffer(KeyBuffer& other) = delete; + + void Set(int k) { + std::snprintf(buffer_ + FLAGS_key_prefix, + sizeof(buffer_) - FLAGS_key_prefix, "%016d", k); + } + + Slice slice() const { return Slice(buffer_, FLAGS_key_prefix + 16); } + + private: + char buffer_[1024]; +}; + #if defined(__linux) static Slice TrimSpace(Slice s) { size_t start = 0; @@ -305,7 +361,7 @@ struct ThreadState { Stats stats; SharedState* shared; - ThreadState(int index) : tid(index), rand(1000 + index), shared(nullptr) {} + ThreadState(int index, int seed) : tid(index), rand(seed), shared(nullptr) {} }; } // namespace @@ -321,9 +377,11 @@ class Benchmark { WriteOptions write_options_; int reads_; int heap_counter_; + CountComparator count_comparator_; + int total_thread_count_; void PrintHeader() { - const int kKeySize = 16; + const int kKeySize = 16 + FLAGS_key_prefix; PrintEnvironment(); std::fprintf(stdout, "Keys: %d bytes each\n", kKeySize); std::fprintf( @@ -411,7 +469,9 @@ class Benchmark { value_size_(FLAGS_value_size), entries_per_batch_(1), reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads), - heap_counter_(0) { + heap_counter_(0), + count_comparator_(BytewiseComparator()), + total_thread_count_(0) { std::vector files; g_env->GetChildren(FLAGS_db, &files); for (size_t i = 0; i < files.size(); i++) { @@ -494,6 +554,8 @@ class Benchmark { method = &Benchmark::ReadMissing; } else if (name == Slice("seekrandom")) { method = &Benchmark::SeekRandom; + } else if (name == Slice("seekordered")) { + method = &Benchmark::SeekOrdered; } else if (name == Slice("readhot")) { method = &Benchmark::ReadHot; } else if (name == Slice("readrandomsmall")) { @@ -591,7 +653,11 @@ class Benchmark { arg[i].bm = this; arg[i].method = method; arg[i].shared = &shared; - arg[i].thread = new ThreadState(i); + ++total_thread_count_; + // Seed the thread's random state deterministically based upon thread + // creation across all benchmarks. This ensures that the seeds are unique + // but reproducible when rerunning the same set of benchmarks. + arg[i].thread = new ThreadState(i, /*seed=*/1000 + total_thread_count_); arg[i].thread->shared = &shared; g_env->StartThread(ThreadBody, &arg[i]); } @@ -612,6 +678,11 @@ class Benchmark { arg[0].thread->stats.Merge(arg[i].thread->stats); } arg[0].thread->stats.Report(name); + if (FLAGS_comparisons) { + fprintf(stdout, "Comparisons: %ld\n", count_comparator_.comparisons()); + count_comparator_.reset(); + fflush(stdout); + } for (int i = 0; i < n; i++) { delete arg[i].thread; @@ -694,6 +765,9 @@ class Benchmark { options.write_buffer_size = FLAGS_write_buffer_size; options.max_file_size = FLAGS_max_file_size; options.block_size = FLAGS_block_size; + if (FLAGS_comparisons) { + options.comparator = &count_comparator_; + } options.max_open_files = FLAGS_open_files; options.filter_policy = filter_policy_; options.reuse_logs = FLAGS_reuse_logs; @@ -727,14 +801,14 @@ class Benchmark { WriteBatch batch; Status s; int64_t bytes = 0; + KeyBuffer key; for (int i = 0; i < num_; i += entries_per_batch_) { batch.Clear(); for (int j = 0; j < entries_per_batch_; j++) { - const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num); - char key[100]; - std::snprintf(key, sizeof(key), "%016d", k); - batch.Put(key, gen.Generate(value_size_)); - bytes += value_size_ + strlen(key); + const int k = seq ? i + j : thread->rand.Uniform(FLAGS_num); + key.Set(k); + batch.Put(key.slice(), gen.Generate(value_size_)); + bytes += value_size_ + key.slice().size(); thread->stats.FinishedSingleOp(); } s = db_->Write(write_options_, &batch); @@ -776,11 +850,11 @@ class Benchmark { ReadOptions options; std::string value; int found = 0; + KeyBuffer key; for (int i = 0; i < reads_; i++) { - char key[100]; - const int k = thread->rand.Next() % FLAGS_num; - std::snprintf(key, sizeof(key), "%016d", k); - if (db_->Get(options, key, &value).ok()) { + const int k = thread->rand.Uniform(FLAGS_num); + key.Set(k); + if (db_->Get(options, key.slice(), &value).ok()) { found++; } thread->stats.FinishedSingleOp(); @@ -793,11 +867,12 @@ class Benchmark { void ReadMissing(ThreadState* thread) { ReadOptions options; std::string value; + KeyBuffer key; for (int i = 0; i < reads_; i++) { - char key[100]; - const int k = thread->rand.Next() % FLAGS_num; - std::snprintf(key, sizeof(key), "%016d.", k); - db_->Get(options, key, &value); + const int k = thread->rand.Uniform(FLAGS_num); + key.Set(k); + Slice s = Slice(key.slice().data(), key.slice().size() - 1); + db_->Get(options, s, &value); thread->stats.FinishedSingleOp(); } } @@ -806,11 +881,11 @@ class Benchmark { ReadOptions options; std::string value; const int range = (FLAGS_num + 99) / 100; + KeyBuffer key; for (int i = 0; i < reads_; i++) { - char key[100]; - const int k = thread->rand.Next() % range; - std::snprintf(key, sizeof(key), "%016d", k); - db_->Get(options, key, &value); + const int k = thread->rand.Uniform(range); + key.Set(k); + db_->Get(options, key.slice(), &value); thread->stats.FinishedSingleOp(); } } @@ -818,17 +893,36 @@ class Benchmark { void SeekRandom(ThreadState* thread) { ReadOptions options; int found = 0; + KeyBuffer key; for (int i = 0; i < reads_; i++) { Iterator* iter = db_->NewIterator(options); - char key[100]; - const int k = thread->rand.Next() % FLAGS_num; - std::snprintf(key, sizeof(key), "%016d", k); - iter->Seek(key); - if (iter->Valid() && iter->key() == key) found++; + const int k = thread->rand.Uniform(FLAGS_num); + key.Set(k); + iter->Seek(key.slice()); + if (iter->Valid() && iter->key() == key.slice()) found++; delete iter; thread->stats.FinishedSingleOp(); } char msg[100]; + snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_); + thread->stats.AddMessage(msg); + } + + void SeekOrdered(ThreadState* thread) { + ReadOptions options; + Iterator* iter = db_->NewIterator(options); + int found = 0; + int k = 0; + KeyBuffer key; + for (int i = 0; i < reads_; i++) { + k = (k + (thread->rand.Uniform(100))) % FLAGS_num; + key.Set(k); + iter->Seek(key.slice()); + if (iter->Valid() && iter->key() == key.slice()) found++; + thread->stats.FinishedSingleOp(); + } + delete iter; + char msg[100]; std::snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_); thread->stats.AddMessage(msg); } @@ -837,13 +931,13 @@ class Benchmark { RandomGenerator gen; WriteBatch batch; Status s; + KeyBuffer key; for (int i = 0; i < num_; i += entries_per_batch_) { batch.Clear(); for (int j = 0; j < entries_per_batch_; j++) { - const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num); - char key[100]; - std::snprintf(key, sizeof(key), "%016d", k); - batch.Delete(key); + const int k = seq ? i + j : (thread->rand.Uniform(FLAGS_num)); + key.Set(k); + batch.Delete(key.slice()); thread->stats.FinishedSingleOp(); } s = db_->Write(write_options_, &batch); @@ -864,6 +958,7 @@ class Benchmark { } else { // Special thread that keeps writing until other threads are done. RandomGenerator gen; + KeyBuffer key; while (true) { { MutexLock l(&thread->shared->mu); @@ -873,10 +968,10 @@ class Benchmark { } } - const int k = thread->rand.Next() % FLAGS_num; - char key[100]; - std::snprintf(key, sizeof(key), "%016d", k); - Status s = db_->Put(write_options_, key, gen.Generate(value_size_)); + const int k = thread->rand.Uniform(FLAGS_num); + key.Set(k); + Status s = + db_->Put(write_options_, key.slice(), gen.Generate(value_size_)); if (!s.ok()) { std::fprintf(stderr, "put error: %s\n", s.ToString().c_str()); std::exit(1); @@ -941,6 +1036,9 @@ int main(int argc, char** argv) { } else if (sscanf(argv[i], "--histogram=%d%c", &n, &junk) == 1 && (n == 0 || n == 1)) { FLAGS_histogram = n; + } else if (sscanf(argv[i], "--comparisons=%d%c", &n, &junk) == 1 && + (n == 0 || n == 1)) { + FLAGS_comparisons = n; } else if (sscanf(argv[i], "--use_existing_db=%d%c", &n, &junk) == 1 && (n == 0 || n == 1)) { FLAGS_use_existing_db = n; @@ -961,6 +1059,8 @@ int main(int argc, char** argv) { FLAGS_max_file_size = n; } else if (sscanf(argv[i], "--block_size=%d%c", &n, &junk) == 1) { FLAGS_block_size = n; + } else if (sscanf(argv[i], "--key_prefix=%d%c", &n, &junk) == 1) { + FLAGS_key_prefix = n; } else if (sscanf(argv[i], "--cache_size=%d%c", &n, &junk) == 1) { FLAGS_cache_size = n; } else if (sscanf(argv[i], "--bloom_bits=%d%c", &n, &junk) == 1) { diff --git a/db/db_test.cc b/db/db_test.cc index 5c364a3..eb8d60c 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -965,6 +965,26 @@ TEST_F(DBTest, IterMultiWithDelete) { } while (ChangeOptions()); } +TEST_F(DBTest, IterMultiWithDeleteAndCompaction) { + do { + ASSERT_LEVELDB_OK(Put("b", "vb")); + ASSERT_LEVELDB_OK(Put("c", "vc")); + ASSERT_LEVELDB_OK(Put("a", "va")); + dbfull()->TEST_CompactMemTable(); + ASSERT_LEVELDB_OK(Delete("b")); + ASSERT_EQ("NOT_FOUND", Get("b")); + + Iterator* iter = db_->NewIterator(ReadOptions()); + iter->Seek("c"); + ASSERT_EQ(IterStatus(iter), "c->vc"); + iter->Prev(); + ASSERT_EQ(IterStatus(iter), "a->va"); + iter->Seek("b"); + ASSERT_EQ(IterStatus(iter), "c->vc"); + delete iter; + } while (ChangeOptions()); +} + TEST_F(DBTest, Recover) { do { ASSERT_LEVELDB_OK(Put("foo", "v1")); @@ -2132,6 +2152,9 @@ static bool CompareIterators(int step, DB* model, DB* db, Iterator* dbiter = db->NewIterator(options); bool ok = true; int count = 0; + std::vector seek_keys; + // Compare equality of all elements using Next(). Save some of the keys for + // comparing Seek equality. for (miter->SeekToFirst(), dbiter->SeekToFirst(); ok && miter->Valid() && dbiter->Valid(); miter->Next(), dbiter->Next()) { count++; @@ -2150,6 +2173,11 @@ static bool CompareIterators(int step, DB* model, DB* db, EscapeString(miter->value()).c_str(), EscapeString(miter->value()).c_str()); ok = false; + break; + } + + if (count % 10 == 0) { + seek_keys.push_back(miter->key().ToString()); } } @@ -2160,6 +2188,39 @@ static bool CompareIterators(int step, DB* model, DB* db, ok = false; } } + + if (ok) { + // Validate iterator equality when performing seeks. + for (auto kiter = seek_keys.begin(); ok && kiter != seek_keys.end(); + ++kiter) { + miter->Seek(*kiter); + dbiter->Seek(*kiter); + if (!miter->Valid() || !dbiter->Valid()) { + std::fprintf(stderr, "step %d: Seek iterators invalid: %d vs. %d\n", + step, miter->Valid(), dbiter->Valid()); + ok = false; + } + if (miter->key().compare(dbiter->key()) != 0) { + std::fprintf(stderr, "step %d: Seek key mismatch: '%s' vs. '%s'\n", + step, EscapeString(miter->key()).c_str(), + EscapeString(dbiter->key()).c_str()); + ok = false; + break; + } + + if (miter->value().compare(dbiter->value()) != 0) { + std::fprintf( + stderr, + "step %d: Seek value mismatch for key '%s': '%s' vs. '%s'\n", step, + EscapeString(miter->key()).c_str(), + EscapeString(miter->value()).c_str(), + EscapeString(miter->value()).c_str()); + ok = false; + break; + } + } + } + std::fprintf(stderr, "%d entries compared: ok=%d\n", count, ok); delete miter; delete dbiter; diff --git a/table/block.cc b/table/block.cc index 2fe89ea..3b15257 100644 --- a/table/block.cc +++ b/table/block.cc @@ -166,6 +166,24 @@ class Block::Iter : public Iterator { // with a key < target uint32_t left = 0; uint32_t right = num_restarts_ - 1; + int current_key_compare = 0; + + if (Valid()) { + // If we're already scanning, use the current position as a starting + // point. This is beneficial if the key we're seeking to is ahead of the + // current position. + current_key_compare = Compare(key_, target); + if (current_key_compare < 0) { + // key_ is smaller than target + left = restart_index_; + } else if (current_key_compare > 0) { + right = restart_index_; + } else { + // We're seeking to the key we're already at. + return; + } + } + while (left < right) { uint32_t mid = (left + right + 1) / 2; uint32_t region_offset = GetRestartPoint(mid); @@ -189,8 +207,15 @@ class Block::Iter : public Iterator { } } + // We might be able to use our current position within the restart block. + // This is true if we determined the key we desire is in the current block + // and is after than the current key. + assert(current_key_compare == 0 || Valid()); + bool skip_seek = left == restart_index_ && current_key_compare < 0; + if (!skip_seek) { + SeekToRestartPoint(left); + } // Linear search (within restart block) for first key >= target - SeekToRestartPoint(left); while (true) { if (!ParseNextKey()) { return; From 8f1861462b27727dfc5b2c4687112108e6ba88eb Mon Sep 17 00:00:00 2001 From: leveldb Team Date: Tue, 12 Jan 2021 21:08:52 +0000 Subject: [PATCH 38/68] Sync MANIFEST before closing in db_impl when creating a new DB. Add logging with debugging information when failing to load a version set. PiperOrigin-RevId: 351432332 --- db/db_impl.cc | 5 +++++ db/version_set.cc | 6 ++++++ 2 files changed, 11 insertions(+) diff --git a/db/db_impl.cc b/db/db_impl.cc index 59b834f..1a4e459 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -197,6 +197,9 @@ Status DBImpl::NewDB() { new_db.EncodeTo(&record); s = log.AddRecord(record); if (s.ok()) { + s = file->Sync(); + } + if (s.ok()) { s = file->Close(); } } @@ -301,6 +304,8 @@ Status DBImpl::Recover(VersionEdit* edit, bool* save_manifest) { if (!env_->FileExists(CurrentFileName(dbname_))) { if (options_.create_if_missing) { + Log(options_.info_log, "Creating DB %s since it was missing.", + dbname_.c_str()); s = NewDB(); if (!s.ok()) { return s; diff --git a/db/version_set.cc b/db/version_set.cc index a459587..1963353 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -898,6 +898,7 @@ Status VersionSet::Recover(bool* save_manifest) { uint64_t log_number = 0; uint64_t prev_log_number = 0; Builder builder(this, current_); + int read_records = 0; { LogReporter reporter; @@ -907,6 +908,7 @@ Status VersionSet::Recover(bool* save_manifest) { Slice record; std::string scratch; while (reader.ReadRecord(&record, &scratch) && s.ok()) { + ++read_records; VersionEdit edit; s = edit.DecodeFrom(record); if (s.ok()) { @@ -981,6 +983,10 @@ Status VersionSet::Recover(bool* save_manifest) { } else { *save_manifest = true; } + } else { + std::string error = s.ToString(); + Log(options_->info_log, "Error recovering version set with %d records: %s", + read_records, error.c_str()); } return s; From 1998c0ef15f0fb64994e165230473337f041fd8c Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Tue, 12 Jan 2021 21:54:35 +0000 Subject: [PATCH 39/68] Fix build errors. PiperOrigin-RevId: 351442409 --- benchmarks/db_bench.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/benchmarks/db_bench.cc b/benchmarks/db_bench.cc index 7c474d8..b362569 100644 --- a/benchmarks/db_bench.cc +++ b/benchmarks/db_bench.cc @@ -130,7 +130,7 @@ class CountComparator : public Comparator { public: CountComparator(const Comparator* wrapped) : wrapped_(wrapped) {} ~CountComparator() override {} - int Compare(const Slice& a, const Slice& b) const { + int Compare(const Slice& a, const Slice& b) const override { count_.fetch_add(1, std::memory_order_relaxed); return wrapped_->Compare(a, b); } @@ -149,7 +149,7 @@ class CountComparator : public Comparator { void reset() { count_.store(0, std::memory_order_relaxed); } private: - mutable std::atomic count_ = 0; + mutable std::atomic count_{0}; const Comparator* const wrapped_; }; From 4a919ea4f7cf2b78bb1403e40b4888f592610c5a Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Mon, 25 Jan 2021 17:02:58 +0000 Subject: [PATCH 40/68] IWYU fixes in db/c.cc. Fixes https://github.com/google/leveldb/issues/872 PiperOrigin-RevId: 353657701 --- db/c.cc | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/db/c.cc b/db/c.cc index b5c9251..8bdde38 100644 --- a/db/c.cc +++ b/db/c.cc @@ -4,6 +4,8 @@ #include "leveldb/c.h" +#include + #include #include @@ -119,7 +121,7 @@ struct leveldb_filterpolicy_t : public FilterPolicy { size_t len; char* filter = (*create_)(state_, &key_pointers[0], &key_sizes[0], n, &len); dst->append(filter, len); - free(filter); + std::free(filter); } bool KeyMayMatch(const Slice& key, const Slice& filter) const override { @@ -150,14 +152,15 @@ static bool SaveError(char** errptr, const Status& s) { *errptr = strdup(s.ToString().c_str()); } else { // TODO(sanjay): Merge with existing error? - free(*errptr); + std::free(*errptr); *errptr = strdup(s.ToString().c_str()); } return true; } static char* CopyString(const std::string& str) { - char* result = reinterpret_cast(malloc(sizeof(char) * str.size())); + char* result = + reinterpret_cast(std::malloc(sizeof(char) * str.size())); std::memcpy(result, str.data(), sizeof(char) * str.size()); return result; } @@ -547,13 +550,13 @@ char* leveldb_env_get_test_directory(leveldb_env_t* env) { return nullptr; } - char* buffer = static_cast(malloc(result.size() + 1)); + char* buffer = static_cast(std::malloc(result.size() + 1)); std::memcpy(buffer, result.data(), result.size()); buffer[result.size()] = '\0'; return buffer; } -void leveldb_free(void* ptr) { free(ptr); } +void leveldb_free(void* ptr) { std::free(ptr); } int leveldb_major_version() { return kMajorVersion; } From 2a47801868e223fd10af272ed9fbd8b699711aae Mon Sep 17 00:00:00 2001 From: Chris Mumford Date: Tue, 16 Feb 2021 15:38:44 -0800 Subject: [PATCH 41/68] Use partial path to benchmark/benchmark.h. Using the partial path offers more flexibility to projects which may checkout google/benchmark to a different location. PiperOrigin-RevId: 357819911 --- db/db_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/db/db_test.cc b/db/db_test.cc index eb8d60c..908b41d 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -9,7 +9,7 @@ #include #include "gtest/gtest.h" -#include "third_party/benchmark/include/benchmark/benchmark.h" +#include "benchmark/benchmark.h" #include "db/db_impl.h" #include "db/filename.h" #include "db/version_set.h" From 37aaf2fccd8db3b18bb303bfdb25b455f5c75c51 Mon Sep 17 00:00:00 2001 From: Chris Mumford Date: Wed, 17 Feb 2021 10:18:12 -0800 Subject: [PATCH 42/68] Fix fprintf format string. Using %zu for size_t instead of %ld. PiperOrigin-RevId: 357976882 --- benchmarks/db_bench.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/db_bench.cc b/benchmarks/db_bench.cc index b362569..429a61a 100644 --- a/benchmarks/db_bench.cc +++ b/benchmarks/db_bench.cc @@ -679,7 +679,7 @@ class Benchmark { } arg[0].thread->stats.Report(name); if (FLAGS_comparisons) { - fprintf(stdout, "Comparisons: %ld\n", count_comparator_.comparisons()); + fprintf(stdout, "Comparisons: %zu\n", count_comparator_.comparisons()); count_comparator_.reset(); fflush(stdout); } From 99b3c03b3284f5886f9ef9a4ef703d57373e61be Mon Sep 17 00:00:00 2001 From: Chris Mumford Date: Tue, 23 Feb 2021 12:51:40 -0800 Subject: [PATCH 43/68] Change version to 1.23. PiperOrigin-RevId: 359111035 --- CMakeLists.txt | 2 +- include/leveldb/db.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7ecf317..f8285b8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -4,7 +4,7 @@ cmake_minimum_required(VERSION 3.9) # Keep the version below in sync with the one in db.h -project(leveldb VERSION 1.22.0 LANGUAGES C CXX) +project(leveldb VERSION 1.23.0 LANGUAGES C CXX) # C standard can be overridden when this is used as a sub-project. if(NOT CMAKE_C_STANDARD) diff --git a/include/leveldb/db.h b/include/leveldb/db.h index 2a995ec..a13d147 100644 --- a/include/leveldb/db.h +++ b/include/leveldb/db.h @@ -16,7 +16,7 @@ namespace leveldb { // Update CMakeLists.txt if you change these static const int kMajorVersion = 1; -static const int kMinorVersion = 22; +static const int kMinorVersion = 23; struct Options; struct ReadOptions; From 24bcf7f7ceeb4534e8e3fd6a7e543e41b568e251 Mon Sep 17 00:00:00 2001 From: Paul Beusterien Date: Fri, 19 Feb 2021 13:17:38 -0800 Subject: [PATCH 44/68] Don't include C++ headers in extern C --- include/leveldb/c.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/include/leveldb/c.h b/include/leveldb/c.h index 02c79ba..62e1f64 100644 --- a/include/leveldb/c.h +++ b/include/leveldb/c.h @@ -40,16 +40,16 @@ #ifndef STORAGE_LEVELDB_INCLUDE_C_H_ #define STORAGE_LEVELDB_INCLUDE_C_H_ -#ifdef __cplusplus -extern "C" { -#endif - #include #include #include #include "leveldb/export.h" +#ifdef __cplusplus +extern "C" { +#endif + /* Exported types */ typedef struct leveldb_t leveldb_t; From f6d094e994d54d8e536cf7c36fafd0f5f1af61f9 Mon Sep 17 00:00:00 2001 From: Raynol Menezes <62543741+raynolmenezes@users.noreply.github.com> Date: Fri, 16 Apr 2021 13:00:59 +0530 Subject: [PATCH 45/68] Update log_reader.h --- db/log_reader.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/db/log_reader.h b/db/log_reader.h index 75d53f7..ba711f8 100644 --- a/db/log_reader.h +++ b/db/log_reader.h @@ -24,7 +24,7 @@ class Reader { public: virtual ~Reporter(); - // Some corruption was detected. "size" is the approximate number + // Some corruption was detected. "bytes" is the approximate number // of bytes dropped due to the corruption. virtual void Corruption(size_t bytes, const Status& status) = 0; }; From 1ca4f5b466c84063d61c350abe2c04f88d656e33 Mon Sep 17 00:00:00 2001 From: mwish Date: Sun, 2 May 2021 12:31:40 +0800 Subject: [PATCH 46/68] [Init] initial commit --- util/env_posix.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/env_posix.cc b/util/env_posix.cc index d84cd1e..e6a5743 100644 --- a/util/env_posix.cc +++ b/util/env_posix.cc @@ -108,7 +108,7 @@ class Limiter { class PosixSequentialFile final : public SequentialFile { public: PosixSequentialFile(std::string filename, int fd) - : fd_(fd), filename_(filename) {} + : fd_(fd), filename_(std::move(filename)) {} ~PosixSequentialFile() override { close(fd_); } Status Read(size_t n, Slice* result, char* scratch) override { From dbf24d9a0c3e91345281d1c8c9263e31fefadc36 Mon Sep 17 00:00:00 2001 From: ehds Date: Sat, 8 May 2021 13:48:39 +0800 Subject: [PATCH 47/68] Make table cache non-copyable --- db/table_cache.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/db/table_cache.h b/db/table_cache.h index aac9bfc..db8a123 100644 --- a/db/table_cache.h +++ b/db/table_cache.h @@ -22,6 +22,10 @@ class Env; class TableCache { public: TableCache(const std::string& dbname, const Options& options, int entries); + + TableCache(const TableCache&) = delete; + TableCache& operator=(const TableCache&) = delete; + ~TableCache(); // Return an iterator for the specified file number (the corresponding From 3806fbc23c6b4a84b2abe26bb650e1b3d059438f Mon Sep 17 00:00:00 2001 From: LazyWolfLin Date: Thu, 18 Feb 2021 11:04:10 +0800 Subject: [PATCH 48/68] Small fix. Use function instead of original expression. --- db/skiplist.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/db/skiplist.h b/db/skiplist.h index a59b45b..f716834 100644 --- a/db/skiplist.h +++ b/db/skiplist.h @@ -243,7 +243,7 @@ int SkipList::RandomHeight() { // Increase height with probability 1 in kBranching static const unsigned int kBranching = 4; int height = 1; - while (height < kMaxHeight && ((rnd_.Next() % kBranching) == 0)) { + while (height < kMaxHeight && rnd_.OneIn(kBranching)) { height++; } assert(height > 0); From f6fe2ec5616823da11d3a36674e94131047f9210 Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Mon, 17 May 2021 18:11:07 -0700 Subject: [PATCH 49/68] Roll third-party dependencies. --- third_party/benchmark | 2 +- third_party/googletest | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/third_party/benchmark b/third_party/benchmark index bf585a2..7d0d906 160000 --- a/third_party/benchmark +++ b/third_party/benchmark @@ -1 +1 @@ -Subproject commit bf585a2789e30585b4e3ce6baf11ef2750b54677 +Subproject commit 7d0d9061d83b663ce05d9de5da3d5865a3845b79 diff --git a/third_party/googletest b/third_party/googletest index c27aceb..662fe38 160000 --- a/third_party/googletest +++ b/third_party/googletest @@ -1 +1 @@ -Subproject commit c27acebba3b3c7d94209e0467b0a801db4af73ed +Subproject commit 662fe38e44900c007eccb65a5d2ea19df7bd520e From 13e3c4efc66b8d7317c7648766a930b5d7e48aa7 Mon Sep 17 00:00:00 2001 From: Sanjay Ghemawat Date: Thu, 20 May 2021 19:02:41 +0000 Subject: [PATCH 50/68] Fix compactions that could end up breaking a run of the same user key across multiple files. As reported in Github issue #339, it is incorrect to split the same user key across multiple compacted files since it causes tombstones/newer-versions to be dropped, thereby exposing obsolete data. There was a fix for #339, but it ended up not fully fixing the problem. (It checked for boundary problems in the first level being compacted, but not the second). This problem was revealed by Github issue 887. We now adjust boundaries to avoid splitting user keys in both the first level and the second level. PiperOrigin-RevId: 374921082 --- db/version_set.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/db/version_set.cc b/db/version_set.cc index 1963353..8d85fce 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -1392,6 +1392,7 @@ void VersionSet::SetupOtherInputs(Compaction* c) { current_->GetOverlappingInputs(level + 1, &smallest, &largest, &c->inputs_[1]); + AddBoundaryInputs(icmp_, current_->files_[level + 1], &c->inputs_[1]); // Get entire range covered by compaction InternalKey all_start, all_limit; @@ -1414,6 +1415,7 @@ void VersionSet::SetupOtherInputs(Compaction* c) { std::vector expanded1; current_->GetOverlappingInputs(level + 1, &new_start, &new_limit, &expanded1); + AddBoundaryInputs(icmp_, current_->files_[level + 1], &expanded1); if (expanded1.size() == c->inputs_[1].size()) { Log(options_->info_log, "Expanding@%d %d+%d (%ld+%ld bytes) to %d+%d (%ld+%ld bytes)\n", From 5d94ad4d95c09d3ac203ddaf9922e55e730706a8 Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Mon, 24 May 2021 23:28:59 +0000 Subject: [PATCH 51/68] Update Travis CI config. Xcode (drives macOS image) : 12.2 => 12.5 Clang : 10 => 12 GCC : 10 => 11 PiperOrigin-RevId: 375582717 --- .travis.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.travis.yml b/.travis.yml index e34a67e..ad59b19 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,7 +4,7 @@ language: cpp dist: bionic -osx_image: xcode12.2 +osx_image: xcode12.5 compiler: - gcc @@ -26,14 +26,14 @@ jobs: addons: apt: sources: - - sourceline: 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-10 main' + - sourceline: 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-12 main' key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key' - sourceline: 'ppa:ubuntu-toolchain-r/test' packages: - - clang-10 + - clang-12 - cmake - - gcc-10 - - g++-10 + - gcc-11 + - g++-11 - libgoogle-perftools-dev - libkyotocabinet-dev - libsnappy-dev @@ -43,10 +43,10 @@ addons: packages: - cmake - crc32c - - gcc@10 + - gcc@11 - gperftools - kyoto-cabinet - - llvm@10 + - llvm@12 - ninja - snappy - sqlite3 @@ -59,14 +59,14 @@ install: export PATH="$(brew --prefix llvm)/bin:$PATH"; fi # /usr/bin/gcc points to an older compiler on both Linux and macOS. -- if [ "$CXX" = "g++" ]; then export CXX="g++-10" CC="gcc-10"; fi +- if [ "$CXX" = "g++" ]; then export CXX="g++-11" CC="gcc-11"; fi # /usr/bin/clang points to an older compiler on both Linux and macOS. # # Homebrew's llvm package doesn't ship a versioned clang++ binary, so the values # below don't work on macOS. Fortunately, the path change above makes the # default values (clang and clang++) resolve to the correct compiler on macOS. - if [ "$TRAVIS_OS_NAME" = "linux" ]; then - if [ "$CXX" = "clang++" ]; then export CXX="clang++-10" CC="clang-10"; fi; + if [ "$CXX" = "clang++" ]; then export CXX="clang++-12" CC="clang-12"; fi; fi - echo ${CC} - echo ${CXX} From 8949158f5d7264444e5b04530c92e9cc524499c4 Mon Sep 17 00:00:00 2001 From: wineway Date: Thu, 1 Jul 2021 20:52:01 +0800 Subject: [PATCH 52/68] fixed random access file exhaust random mmap file use wrong limit count --- util/env_posix_test.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/util/env_posix_test.cc b/util/env_posix_test.cc index da264f0..34bda62 100644 --- a/util/env_posix_test.cc +++ b/util/env_posix_test.cc @@ -243,8 +243,8 @@ TEST_F(EnvPosixTest, TestCloseOnExecRandomAccessFile) { // Exhaust the RandomAccessFile mmap limit. This way, the test // RandomAccessFile instance below is backed by a file descriptor, not by an // mmap region. - leveldb::RandomAccessFile* mmapped_files[kReadOnlyFileLimit] = {nullptr}; - for (int i = 0; i < kReadOnlyFileLimit; i++) { + leveldb::RandomAccessFile* mmapped_files[kMMapLimit]; + for (int i = 0; i < kMMapLimit; i++) { ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(file_path, &mmapped_files[i])); } @@ -253,7 +253,7 @@ TEST_F(EnvPosixTest, TestCloseOnExecRandomAccessFile) { CheckCloseOnExecDoesNotLeakFDs(open_fds); delete file; - for (int i = 0; i < kReadOnlyFileLimit; i++) { + for (int i = 0; i < kMMapLimit; i++) { delete mmapped_files[i]; } ASSERT_LEVELDB_OK(env_->RemoveFile(file_path)); From 8e62cc51246612ff8ea30c3eeffb2407807e5525 Mon Sep 17 00:00:00 2001 From: leveldb Team Date: Tue, 3 Aug 2021 00:25:26 +0000 Subject: [PATCH 53/68] Remove the `/` prefix from the recovery_test test file to prevent a double `/`. PiperOrigin-RevId: 388341429 --- db/recovery_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/db/recovery_test.cc b/db/recovery_test.cc index 3db817e..6c5d42e 100644 --- a/db/recovery_test.cc +++ b/db/recovery_test.cc @@ -18,7 +18,7 @@ namespace leveldb { class RecoveryTest : public testing::Test { public: RecoveryTest() : env_(Env::Default()), db_(nullptr) { - dbname_ = testing::TempDir() + "/recovery_test"; + dbname_ = testing::TempDir() + "recovery_test"; DestroyDB(dbname_, Options()); Open(); } From 54340b4a1020737e17ae4efacc31afeb53022be9 Mon Sep 17 00:00:00 2001 From: ehds Date: Sun, 8 Aug 2021 22:24:19 +0800 Subject: [PATCH 54/68] Fix comments position --- db/version_set.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/db/version_set.h b/db/version_set.h index 69f3d70..ea0c925 100644 --- a/db/version_set.h +++ b/db/version_set.h @@ -59,9 +59,6 @@ bool SomeFileOverlapsRange(const InternalKeyComparator& icmp, class Version { public: - // Lookup the value for key. If found, store it in *val and - // return OK. Else return a non-OK status. Fills *stats. - // REQUIRES: lock is not held struct GetStats { FileMetaData* seek_file; int seek_file_level; @@ -72,6 +69,9 @@ class Version { // REQUIRES: This version has been saved (see VersionSet::SaveTo) void AddIterators(const ReadOptions&, std::vector* iters); + // Lookup the value for key. If found, store it in *val and + // return OK. Else return a non-OK status. Fills *stats. + // REQUIRES: lock is not held Status Get(const ReadOptions&, const LookupKey& key, std::string* val, GetStats* stats); From 5783a79309bfcd2089147bd474f796347e4a2d1e Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Thu, 2 Sep 2021 21:25:03 +0000 Subject: [PATCH 55/68] Switch CI to GitHub Actions. PiperOrigin-RevId: 394542401 --- .appveyor.yml | 36 ---------------- .github/workflows/build.yml | 101 ++++++++++++++++++++++++++++++++++++++++++++ .travis.yml | 88 -------------------------------------- README.md | 3 +- 4 files changed, 102 insertions(+), 126 deletions(-) delete mode 100644 .appveyor.yml create mode 100644 .github/workflows/build.yml delete mode 100644 .travis.yml diff --git a/.appveyor.yml b/.appveyor.yml deleted file mode 100644 index 448f183..0000000 --- a/.appveyor.yml +++ /dev/null @@ -1,36 +0,0 @@ -# Build matrix / environment variables are explained on: -# https://www.appveyor.com/docs/appveyor-yml/ -# This file can be validated on: https://ci.appveyor.com/tools/validate-yaml - -version: "{build}" - -environment: - matrix: - # AppVeyor currently has no custom job name feature. - # http://help.appveyor.com/discussions/questions/1623-can-i-provide-a-friendly-name-for-jobs - - JOB: Visual Studio 2019 - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 - CMAKE_GENERATOR: Visual Studio 16 2019 - -platform: - - x86 - - x64 - -configuration: - - RelWithDebInfo - - Debug - -build_script: - - git submodule update --init --recursive - - mkdir build - - cd build - - if "%platform%"=="x86" (set CMAKE_GENERATOR_PLATFORM="Win32") - else (set CMAKE_GENERATOR_PLATFORM="%platform%") - - cmake --version - - cmake .. -G "%CMAKE_GENERATOR%" -A "%CMAKE_GENERATOR_PLATFORM%" - -DCMAKE_CONFIGURATION_TYPES="%CONFIGURATION%" - - cmake --build . --config "%CONFIGURATION%" - - cd .. - -test_script: - - cd build && ctest --verbose --build-config "%CONFIGURATION%" && cd .. diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000..efb81ee --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,101 @@ +# Copyright 2021 The LevelDB Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. See the AUTHORS file for names of contributors. + +name: ci +on: [push, pull_request] + +permissions: + contents: read + +jobs: + build-and-test: + name: >- + CI + ${{ matrix.os }} + ${{ matrix.compiler }} + ${{ matrix.optimized && 'release' || 'debug' }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + compiler: [clang, gcc, msvc] + os: [ubuntu-latest, macos-latest, windows-latest] + optimized: [true, false] + exclude: + # MSVC only works on Windows. + - os: ubuntu-latest + compiler: msvc + - os: macos-latest + compiler: msvc + # Not testing with GCC on macOS. + - os: macos-latest + compiler: gcc + # Only testing with MSVC on Windows. + - os: windows-latest + compiler: clang + - os: windows-latest + compiler: gcc + include: + - compiler: clang + CC: clang + CXX: clang++ + - compiler: gcc + CC: gcc + CXX: g++ + - compiler: msvc + CC: + CXX: + + env: + CMAKE_BUILD_DIR: ${{ github.workspace }}/build + CMAKE_BUILD_TYPE: ${{ matrix.optimized && 'RelWithDebInfo' || 'Debug' }} + CC: ${{ matrix.CC }} + CXX: ${{ matrix.CXX }} + BINARY_SUFFIX: ${{ startsWith(matrix.os, 'windows') && '.exe' || '' }} + BINARY_PATH: >- + ${{ format( + startsWith(matrix.os, 'windows') && '{0}\build\{1}\' || '{0}/build/', + github.workspace, + matrix.optimized && 'RelWithDebInfo' || 'Debug') }} + + steps: + - uses: actions/checkout@v2 + with: + submodules: true + + - name: Install dependencies on Linux + if: ${{ runner.os == 'Linux' }} + run: | + sudo apt-get update + sudo apt-get install libgoogle-perftools-dev libkyotocabinet-dev \ + libsnappy-dev libsqlite3-dev + + - name: Generate build config + run: >- + cmake -S "${{ github.workspace }}" -B "${{ env.CMAKE_BUILD_DIR }}" + -DCMAKE_BUILD_TYPE=${{ env.CMAKE_BUILD_TYPE }} + -DCMAKE_INSTALL_PREFIX=${{ runner.temp }}/install_test/ + + - name: Build + run: >- + cmake --build "${{ env.CMAKE_BUILD_DIR }}" + --config "${{ env.CMAKE_BUILD_TYPE }}" + + - name: Run Tests + working-directory: ${{ github.workspace }}/build + run: ctest -C "${{ env.CMAKE_BUILD_TYPE }}" --verbose + + - name: Run LevelDB Benchmarks + run: ${{ env.BINARY_PATH }}db_bench${{ env.BINARY_SUFFIX }} + + - name: Run SQLite Benchmarks + if: ${{ runner.os != 'Windows' }} + run: ${{ env.BINARY_PATH }}db_bench_sqlite3${{ env.BINARY_SUFFIX }} + + - name: Run Kyoto Cabinet Benchmarks + if: ${{ runner.os == 'Linux' && matrix.compiler == 'clang' }} + run: ${{ env.BINARY_PATH }}db_bench_tree_db${{ env.BINARY_SUFFIX }} + + - name: Test CMake installation + run: cmake --build "${{ env.CMAKE_BUILD_DIR }}" --target install diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index ad59b19..0000000 --- a/.travis.yml +++ /dev/null @@ -1,88 +0,0 @@ -# Build matrix / environment variables are explained on: -# http://about.travis-ci.org/docs/user/build-configuration/ -# This file can be validated on: http://lint.travis-ci.org/ - -language: cpp -dist: bionic -osx_image: xcode12.5 - -compiler: -- gcc -- clang -os: -- linux -- osx - -env: -- BUILD_TYPE=Debug -- BUILD_TYPE=RelWithDebInfo - -jobs: - allow_failures: - # Homebrew's GCC is currently broken on XCode 11. - - compiler: gcc - os: osx - -addons: - apt: - sources: - - sourceline: 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-12 main' - key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key' - - sourceline: 'ppa:ubuntu-toolchain-r/test' - packages: - - clang-12 - - cmake - - gcc-11 - - g++-11 - - libgoogle-perftools-dev - - libkyotocabinet-dev - - libsnappy-dev - - libsqlite3-dev - - ninja-build - homebrew: - packages: - - cmake - - crc32c - - gcc@11 - - gperftools - - kyoto-cabinet - - llvm@12 - - ninja - - snappy - - sqlite3 - update: true - -install: -# The following Homebrew packages aren't linked by default, and need to be -# prepended to the path explicitly. -- if [ "$TRAVIS_OS_NAME" = "osx" ]; then - export PATH="$(brew --prefix llvm)/bin:$PATH"; - fi -# /usr/bin/gcc points to an older compiler on both Linux and macOS. -- if [ "$CXX" = "g++" ]; then export CXX="g++-11" CC="gcc-11"; fi -# /usr/bin/clang points to an older compiler on both Linux and macOS. -# -# Homebrew's llvm package doesn't ship a versioned clang++ binary, so the values -# below don't work on macOS. Fortunately, the path change above makes the -# default values (clang and clang++) resolve to the correct compiler on macOS. -- if [ "$TRAVIS_OS_NAME" = "linux" ]; then - if [ "$CXX" = "clang++" ]; then export CXX="clang++-12" CC="clang-12"; fi; - fi -- echo ${CC} -- echo ${CXX} -- ${CXX} --version -- cmake --version - -before_script: -- mkdir -p build && cd build -- cmake .. -G Ninja -DCMAKE_BUILD_TYPE=$BUILD_TYPE - -DCMAKE_INSTALL_PREFIX=$HOME/.local -- cmake --build . -- cd .. - -script: -- cd build && ctest --verbose && cd .. -- "if [ -f build/db_bench ] ; then build/db_bench ; fi" -- "if [ -f build/db_bench_sqlite3 ] ; then build/db_bench_sqlite3 ; fi" -- "if [ -f build/db_bench_tree_db ] ; then build/db_bench_tree_db ; fi" -- cd build && cmake --build . --target install diff --git a/README.md b/README.md index 81144dd..3c4d14d 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,6 @@ **LevelDB is a fast key-value storage library written at Google that provides an ordered mapping from string keys to string values.** -[![Build Status](https://travis-ci.org/google/leveldb.svg?branch=master)](https://travis-ci.org/google/leveldb) -[![Build status](https://ci.appveyor.com/api/projects/status/g2j5j4rfkda6eyw5/branch/master?svg=true)](https://ci.appveyor.com/project/pwnall/leveldb) +[![ci](https://github.com/google/leveldb/actions/workflows/build.yml/badge.svg)](https://github.com/google/leveldb/actions/workflows/build.yml) Authors: Sanjay Ghemawat (sanjay@google.com) and Jeff Dean (jeff@google.com) From 11aafab31f96ba0a07ac5b2d5275f289e17c7814 Mon Sep 17 00:00:00 2001 From: zzt Date: Fri, 3 Sep 2021 11:18:31 +0800 Subject: [PATCH 56/68] Fix version_set.cc comments typo Fix typo of comment of FindLargestKey function --- db/version_set.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/db/version_set.cc b/db/version_set.cc index 8d85fce..da38bbb 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -1304,7 +1304,7 @@ Compaction* VersionSet::PickCompaction() { return c; } -// Finds the largest key in a vector of files. Returns true if files it not +// Finds the largest key in a vector of files. Returns true if files is not // empty. bool FindLargestKey(const InternalKeyComparator& icmp, const std::vector& files, From c5d5174a66f02e66d8e30c21ff4761214d8e4d6d Mon Sep 17 00:00:00 2001 From: leveldb Team Date: Fri, 10 Sep 2021 00:45:26 +0000 Subject: [PATCH 57/68] Get env_posix.cc building under Fuchsia. PiperOrigin-RevId: 395824737 --- util/env_posix.cc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/util/env_posix.cc b/util/env_posix.cc index d84cd1e..24b1c4c 100644 --- a/util/env_posix.cc +++ b/util/env_posix.cc @@ -6,7 +6,9 @@ #include #include #include +#ifndef __Fuchsia__ #include +#endif #include #include #include @@ -757,6 +759,10 @@ int MaxOpenFiles() { if (g_open_read_only_file_limit >= 0) { return g_open_read_only_file_limit; } +#ifdef __Fuchsia__ + // Fuchsia doesn't implement getrlimit. + g_open_read_only_file_limit = 50; +#else struct ::rlimit rlim; if (::getrlimit(RLIMIT_NOFILE, &rlim)) { // getrlimit failed, fallback to hard-coded default. @@ -767,6 +773,7 @@ int MaxOpenFiles() { // Allow use of 20% of available file descriptors for read-only files. g_open_read_only_file_limit = rlim.rlim_cur / 5; } +#endif return g_open_read_only_file_limit; } From 68d14a723a23eac5e53d4643890f27651eb2df28 Mon Sep 17 00:00:00 2001 From: "Dylan K. Taylor" Date: Sat, 9 Oct 2021 16:21:57 +0100 Subject: [PATCH 58/68] Prevent handle used for LOG from being inherited by subprocesses I recently encountered a problem with this because Windows doesn't allow files to be deleted when there's open handles to them. Other files opened by leveldb are not affected because by and large they are using CreateFileA, which does not allow inheritance when lpSecurityAttributes is null (ref: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea) However, fopen() _does_ allow inheritance, and it needs to be expressly disabled. https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/fopen-wfopen?view=msvc-160 --- util/env_windows.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/env_windows.cc b/util/env_windows.cc index 449f564..84905df 100644 --- a/util/env_windows.cc +++ b/util/env_windows.cc @@ -622,7 +622,7 @@ class WindowsEnv : public Env { } Status NewLogger(const std::string& filename, Logger** result) override { - std::FILE* fp = std::fopen(filename.c_str(), "w"); + std::FILE* fp = std::fopen(filename.c_str(), "wN"); if (fp == nullptr) { *result = nullptr; return WindowsError(filename, ::GetLastError()); From d7da5d9d353cf3d865109fc1aac8e587f6086ef5 Mon Sep 17 00:00:00 2001 From: xiong-ang Date: Fri, 22 Oct 2021 18:00:57 +0800 Subject: [PATCH 59/68] fix some trifling points --- doc/index.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/doc/index.md b/doc/index.md index 01693ad..56967c7 100644 --- a/doc/index.md +++ b/doc/index.md @@ -345,7 +345,7 @@ non-NULL, it is used to cache frequently used uncompressed block contents. #include "leveldb/cache.h" leveldb::Options options; -options.block_cache = leveldb::NewLRUCache(100 * 1048576); // 100MB cache +options.block_cache = leveldb::NewLRUCache(100 * 1048576); // 100M cache capacity leveldb::DB* db; leveldb::DB::Open(options, name, &db); ... use the db ... @@ -369,6 +369,7 @@ leveldb::Iterator* it = db->NewIterator(options); for (it->SeekToFirst(); it->Valid(); it->Next()) { ... } +delete it; ``` ### Key Layout @@ -424,21 +425,21 @@ spaces. For example: ```c++ class CustomFilterPolicy : public leveldb::FilterPolicy { private: - FilterPolicy* builtin_policy_; + leveldb::FilterPolicy* builtin_policy_; public: - CustomFilterPolicy() : builtin_policy_(NewBloomFilterPolicy(10)) {} + CustomFilterPolicy() : builtin_policy_(leveldb::NewBloomFilterPolicy(10)) {} ~CustomFilterPolicy() { delete builtin_policy_; } const char* Name() const { return "IgnoreTrailingSpacesFilter"; } - void CreateFilter(const Slice* keys, int n, std::string* dst) const { + void CreateFilter(const leveldb::Slice* keys, int n, std::string* dst) const { // Use builtin bloom filter code after removing trailing spaces - std::vector trimmed(n); + std::vector trimmed(n); for (int i = 0; i < n; i++) { trimmed[i] = RemoveTrailingSpaces(keys[i]); } - return builtin_policy_->CreateFilter(trimmed.data(), n, dst); + builtin_policy_->CreateFilter(trimmed.data(), n, dst); } }; ``` From dd6658754f85f54058be416e1b43150e39d8ffa5 Mon Sep 17 00:00:00 2001 From: Felipe Oliveira Carvalho Date: Mon, 15 Nov 2021 00:36:57 +0100 Subject: [PATCH 60/68] Remove include and find_package() from build files --- util/env_posix.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/util/env_posix.cc b/util/env_posix.cc index 24b1c4c..9ac03f8 100644 --- a/util/env_posix.cc +++ b/util/env_posix.cc @@ -4,7 +4,6 @@ #include #include -#include #include #ifndef __Fuchsia__ #include From 42d00a80cc0bc776f19325e9043fcf4e7892ec81 Mon Sep 17 00:00:00 2001 From: Eric Wang Date: Sun, 5 Dec 2021 11:44:55 +0800 Subject: [PATCH 61/68] rm redundant code: SetNextFile has already been called before in this function --- db/version_set.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/db/version_set.cc b/db/version_set.cc index 8d85fce..f457e26 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -806,7 +806,6 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) { // first call to LogAndApply (when opening the database). assert(descriptor_file_ == nullptr); new_manifest_file = DescriptorFileName(dbname_, manifest_file_number_); - edit->SetNextFile(next_file_number_); s = env_->NewWritableFile(new_manifest_file, &descriptor_file_); if (s.ok()) { descriptor_log_ = new log::Writer(descriptor_file_); From 335876a1335c765f818ae10d9c4d18f563cdfce5 Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Wed, 22 Dec 2021 19:12:56 +0000 Subject: [PATCH 62/68] Add invariant checks to Limiter in Env implementations. PiperOrigin-RevId: 417853172 --- util/env_posix.cc | 33 ++++++++++++++++++++++++++++++--- util/env_windows.cc | 24 ++++++++++++++++++++++-- 2 files changed, 52 insertions(+), 5 deletions(-) diff --git a/util/env_posix.cc b/util/env_posix.cc index 9ac03f8..8b8d9c8 100644 --- a/util/env_posix.cc +++ b/util/env_posix.cc @@ -73,7 +73,14 @@ Status PosixError(const std::string& context, int error_number) { class Limiter { public: // Limit maximum number of resources to |max_acquires|. - Limiter(int max_acquires) : acquires_allowed_(max_acquires) {} + Limiter(int max_acquires) + : +#if !defined(NDEBUG) + max_acquires_(max_acquires), +#endif // !defined(NDEBUG) + acquires_allowed_(max_acquires) { + assert(max_acquires >= 0); + } Limiter(const Limiter&) = delete; Limiter operator=(const Limiter&) = delete; @@ -86,15 +93,35 @@ class Limiter { if (old_acquires_allowed > 0) return true; - acquires_allowed_.fetch_add(1, std::memory_order_relaxed); + int pre_increment_acquires_allowed = + acquires_allowed_.fetch_add(1, std::memory_order_relaxed); + + // Silence compiler warnings about unused arguments when NDEBUG is defined. + (void)pre_increment_acquires_allowed; + // If the check below fails, Release() was called more times than acquire. + assert(pre_increment_acquires_allowed < max_acquires_); + return false; } // Release a resource acquired by a previous call to Acquire() that returned // true. - void Release() { acquires_allowed_.fetch_add(1, std::memory_order_relaxed); } + void Release() { + int old_acquires_allowed = + acquires_allowed_.fetch_add(1, std::memory_order_relaxed); + + // Silence compiler warnings about unused arguments when NDEBUG is defined. + (void)old_acquires_allowed; + // If the check below fails, Release() was called more times than acquire. + assert(old_acquires_allowed < max_acquires_); + } private: +#if !defined(NDEBUG) + // Catches an excessive number of Release() calls. + const int max_acquires_; +#endif // !defined(NDEBUG) + // The number of available resources. // // This is a counter and is not tied to the invariants of any other class, so diff --git a/util/env_windows.cc b/util/env_windows.cc index 84905df..9ffcd07 100644 --- a/util/env_windows.cc +++ b/util/env_windows.cc @@ -114,7 +114,14 @@ class ScopedHandle { class Limiter { public: // Limit maximum number of resources to |max_acquires|. - Limiter(int max_acquires) : acquires_allowed_(max_acquires) {} + Limiter(int max_acquires) + : +#if !defined(NDEBUG) + max_acquires_(max_acquires), +#endif // !defined(NDEBUG) + acquires_allowed_(max_acquires) { + assert(max_acquires >= 0); + } Limiter(const Limiter&) = delete; Limiter operator=(const Limiter&) = delete; @@ -133,9 +140,22 @@ class Limiter { // Release a resource acquired by a previous call to Acquire() that returned // true. - void Release() { acquires_allowed_.fetch_add(1, std::memory_order_relaxed); } + void Release() { + int old_acquires_allowed = + acquires_allowed_.fetch_add(1, std::memory_order_relaxed); + + // Silence compiler warnings about unused arguments when NDEBUG is defined. + (void)old_acquires_allowed; + // If the check below fails, Release() was called more times than acquire. + assert(old_acquires_allowed < max_acquires_); + } private: +#if !defined(NDEBUG) + // Catches an excessive number of Release() calls. + const int max_acquires_; +#endif // !defined(NDEBUG) + // The number of available resources. // // This is a counter and is not tied to the invariants of any other class, so From b2801ee1a0e3a5c0c393dc04eef63691f79ed694 Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Wed, 29 Dec 2021 03:48:42 +0000 Subject: [PATCH 63/68] Extract benchmark from db_test.cc. The benchmark in db/db_test.cc is extracted to its own file, benchmarks/db_bench_log.cc. PiperOrigin-RevId: 418713499 --- CMakeLists.txt | 14 +++---- benchmarks/db_bench_log.cc | 92 ++++++++++++++++++++++++++++++++++++++++++++++ db/db_test.cc | 64 -------------------------------- 3 files changed, 99 insertions(+), 71 deletions(-) create mode 100644 benchmarks/db_bench_log.cc diff --git a/CMakeLists.txt b/CMakeLists.txt index f8285b8..7690302 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -298,11 +298,6 @@ if(LEVELDB_BUILD_TESTS) # This project is tested using GoogleTest. add_subdirectory("third_party/googletest") - # This project uses Google benchmark for benchmarking. - set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "" FORCE) - set(BENCHMARK_ENABLE_EXCEPTIONS OFF CACHE BOOL "" FORCE) - add_subdirectory("third_party/benchmark") - # GoogleTest triggers a missing field initializers warning. if(LEVELDB_HAVE_NO_MISSING_FIELD_INITIALIZERS) set_property(TARGET gtest @@ -323,7 +318,7 @@ if(LEVELDB_BUILD_TESTS) "${test_file}" ) - target_link_libraries("${test_target_name}" leveldb gmock gtest benchmark) + target_link_libraries("${test_target_name}" leveldb gmock gtest) target_compile_definitions("${test_target_name}" PRIVATE ${LEVELDB_PLATFORM_NAME}=1 @@ -386,6 +381,11 @@ if(LEVELDB_BUILD_TESTS) endif(LEVELDB_BUILD_TESTS) if(LEVELDB_BUILD_BENCHMARKS) + # This project uses Google benchmark for benchmarking. + set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "" FORCE) + set(BENCHMARK_ENABLE_EXCEPTIONS OFF CACHE BOOL "" FORCE) + add_subdirectory("third_party/benchmark") + function(leveldb_benchmark bench_file) get_filename_component(bench_target_name "${bench_file}" NAME_WE) @@ -400,7 +400,7 @@ if(LEVELDB_BUILD_BENCHMARKS) "${bench_file}" ) - target_link_libraries("${bench_target_name}" leveldb gmock gtest) + target_link_libraries("${bench_target_name}" leveldb gmock gtest benchmark) target_compile_definitions("${bench_target_name}" PRIVATE ${LEVELDB_PLATFORM_NAME}=1 diff --git a/benchmarks/db_bench_log.cc b/benchmarks/db_bench_log.cc new file mode 100644 index 0000000..a1845bf --- /dev/null +++ b/benchmarks/db_bench_log.cc @@ -0,0 +1,92 @@ +// Copyright (c) 2019 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include +#include +#include + +#include "gtest/gtest.h" +#include "benchmark/benchmark.h" +#include "db/version_set.h" +#include "leveldb/comparator.h" +#include "leveldb/db.h" +#include "leveldb/env.h" +#include "leveldb/options.h" +#include "port/port.h" +#include "util/mutexlock.h" +#include "util/testutil.h" + +namespace leveldb { + +namespace { + +std::string MakeKey(unsigned int num) { + char buf[30]; + std::snprintf(buf, sizeof(buf), "%016u", num); + return std::string(buf); +} + +void BM_LogAndApply(benchmark::State& state) { + const int num_base_files = state.range(0); + + std::string dbname = testing::TempDir() + "leveldb_test_benchmark"; + DestroyDB(dbname, Options()); + + DB* db = nullptr; + Options opts; + opts.create_if_missing = true; + Status s = DB::Open(opts, dbname, &db); + ASSERT_LEVELDB_OK(s); + ASSERT_TRUE(db != nullptr); + + delete db; + db = nullptr; + + Env* env = Env::Default(); + + port::Mutex mu; + MutexLock l(&mu); + + InternalKeyComparator cmp(BytewiseComparator()); + Options options; + VersionSet vset(dbname, &options, nullptr, &cmp); + bool save_manifest; + ASSERT_LEVELDB_OK(vset.Recover(&save_manifest)); + VersionEdit vbase; + uint64_t fnum = 1; + for (int i = 0; i < num_base_files; i++) { + InternalKey start(MakeKey(2 * fnum), 1, kTypeValue); + InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion); + vbase.AddFile(2, fnum++, 1 /* file size */, start, limit); + } + ASSERT_LEVELDB_OK(vset.LogAndApply(&vbase, &mu)); + + uint64_t start_micros = env->NowMicros(); + + for (auto st : state) { + VersionEdit vedit; + vedit.RemoveFile(2, fnum); + InternalKey start(MakeKey(2 * fnum), 1, kTypeValue); + InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion); + vedit.AddFile(2, fnum++, 1 /* file size */, start, limit); + vset.LogAndApply(&vedit, &mu); + } + + uint64_t stop_micros = env->NowMicros(); + unsigned int us = stop_micros - start_micros; + char buf[16]; + std::snprintf(buf, sizeof(buf), "%d", num_base_files); + std::fprintf(stderr, + "BM_LogAndApply/%-6s %8" PRIu64 + " iters : %9u us (%7.0f us / iter)\n", + buf, state.iterations(), us, ((float)us) / state.iterations()); +} + +BENCHMARK(BM_LogAndApply)->Arg(1)->Arg(100)->Arg(10000)->Arg(100000); + +} // namespace + +} // namespace leveldb + +BENCHMARK_MAIN(); diff --git a/db/db_test.cc b/db/db_test.cc index 908b41d..7f22688 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -9,7 +9,6 @@ #include #include "gtest/gtest.h" -#include "benchmark/benchmark.h" #include "db/db_impl.h" #include "db/filename.h" #include "db/version_set.h" @@ -2295,72 +2294,9 @@ TEST_F(DBTest, Randomized) { } while (ChangeOptions()); } -std::string MakeKey(unsigned int num) { - char buf[30]; - std::snprintf(buf, sizeof(buf), "%016u", num); - return std::string(buf); -} - -static void BM_LogAndApply(benchmark::State& state) { - const int num_base_files = state.range(0); - - std::string dbname = testing::TempDir() + "leveldb_test_benchmark"; - DestroyDB(dbname, Options()); - - DB* db = nullptr; - Options opts; - opts.create_if_missing = true; - Status s = DB::Open(opts, dbname, &db); - ASSERT_LEVELDB_OK(s); - ASSERT_TRUE(db != nullptr); - - delete db; - db = nullptr; - - Env* env = Env::Default(); - - port::Mutex mu; - MutexLock l(&mu); - - InternalKeyComparator cmp(BytewiseComparator()); - Options options; - VersionSet vset(dbname, &options, nullptr, &cmp); - bool save_manifest; - ASSERT_LEVELDB_OK(vset.Recover(&save_manifest)); - VersionEdit vbase; - uint64_t fnum = 1; - for (int i = 0; i < num_base_files; i++) { - InternalKey start(MakeKey(2 * fnum), 1, kTypeValue); - InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion); - vbase.AddFile(2, fnum++, 1 /* file size */, start, limit); - } - ASSERT_LEVELDB_OK(vset.LogAndApply(&vbase, &mu)); - - uint64_t start_micros = env->NowMicros(); - - for (auto st : state) { - VersionEdit vedit; - vedit.RemoveFile(2, fnum); - InternalKey start(MakeKey(2 * fnum), 1, kTypeValue); - InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion); - vedit.AddFile(2, fnum++, 1 /* file size */, start, limit); - vset.LogAndApply(&vedit, &mu); - } - uint64_t stop_micros = env->NowMicros(); - unsigned int us = stop_micros - start_micros; - char buf[16]; - std::snprintf(buf, sizeof(buf), "%d", num_base_files); - std::fprintf(stderr, - "BM_LogAndApply/%-6s %8" PRIu64 - " iters : %9u us (%7.0f us / iter)\n", - buf, state.iterations(), us, ((float)us) / state.iterations()); -} - -BENCHMARK(BM_LogAndApply)->Arg(1)->Arg(100)->Arg(10000)->Arg(100000); } // namespace leveldb int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); - benchmark::RunSpecifiedBenchmarks(); return RUN_ALL_TESTS(); } From 7a2f64ed504f510425183fa225ae80c671f0145f Mon Sep 17 00:00:00 2001 From: Shawn Zhong Date: Thu, 30 Dec 2021 18:33:55 -0600 Subject: [PATCH 64/68] Update env_posix.cc --- util/env_posix.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/util/env_posix.cc b/util/env_posix.cc index 8b8d9c8..d8d793a 100644 --- a/util/env_posix.cc +++ b/util/env_posix.cc @@ -870,7 +870,7 @@ class SingletonEnv { public: SingletonEnv() { #if !defined(NDEBUG) - env_initialized_.store(true, std::memory_order::memory_order_relaxed); + env_initialized_.store(true, std::memory_order_relaxed); #endif // !defined(NDEBUG) static_assert(sizeof(env_storage_) >= sizeof(EnvType), "env_storage_ will not fit the Env"); @@ -887,7 +887,7 @@ class SingletonEnv { static void AssertEnvNotInitialized() { #if !defined(NDEBUG) - assert(!env_initialized_.load(std::memory_order::memory_order_relaxed)); + assert(!env_initialized_.load(std::memory_order_relaxed)); #endif // !defined(NDEBUG) } From 8f464e7f68fd9d50ed39b2866ef8dac9c837439d Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Mon, 3 Jan 2022 20:57:56 +0000 Subject: [PATCH 65/68] Remove main() from most tests. This gives some flexibility to embedders. Currently, embedders have to build a binary for each test file. After this CL, embedders can still choose to have a binary for each test file, by linking each test file with a googletest target that includes main() (usually "gtest_main"). Embedders can also choose to build a single binary for almost all test files, and link with a googletest target that includes main(). The latter is more convenient for projects that have very few test binaries, like Chromium. PiperOrigin-RevId: 419470798 --- CMakeLists.txt | 88 ++++++++++++++++++++++++++----------------- db/autocompact_test.cc | 5 --- db/corruption_test.cc | 5 --- db/db_test.cc | 5 --- db/dbformat_test.cc | 5 --- db/fault_injection_test.cc | 5 --- db/filename_test.cc | 5 --- db/log_test.cc | 5 --- db/recovery_test.cc | 5 --- db/skiplist_test.cc | 5 --- db/version_edit_test.cc | 5 --- db/version_set_test.cc | 5 --- db/write_batch_test.cc | 5 --- helpers/memenv/memenv_test.cc | 5 --- issues/issue178_test.cc | 5 --- issues/issue200_test.cc | 5 --- issues/issue320_test.cc | 5 --- table/filter_block_test.cc | 5 --- table/table_test.cc | 5 --- util/arena_test.cc | 5 --- util/bloom_test.cc | 5 --- util/cache_test.cc | 5 --- util/coding_test.cc | 5 --- util/crc32c_test.cc | 5 --- util/env_test.cc | 5 --- util/hash_test.cc | 5 --- util/logging_test.cc | 5 --- util/no_destructor_test.cc | 5 --- util/status_test.cc | 5 --- 29 files changed, 54 insertions(+), 174 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7690302..b829c94 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -306,6 +306,60 @@ if(LEVELDB_BUILD_TESTS) APPEND PROPERTY COMPILE_OPTIONS -Wno-missing-field-initializers) endif(LEVELDB_HAVE_NO_MISSING_FIELD_INITIALIZERS) + add_executable(leveldb_tests "") + target_sources(leveldb_tests + PRIVATE + # "db/fault_injection_test.cc" + # "issues/issue178_test.cc" + # "issues/issue200_test.cc" + # "issues/issue320_test.cc" + "${PROJECT_BINARY_DIR}/${LEVELDB_PORT_CONFIG_DIR}/port_config.h" + # "util/env_test.cc" + "util/status_test.cc" + "util/no_destructor_test.cc" + "util/testutil.cc" + "util/testutil.h" + ) + if(NOT BUILD_SHARED_LIBS) + target_sources(leveldb_tests + PRIVATE + "db/autocompact_test.cc" + "db/corruption_test.cc" + "db/db_test.cc" + "db/dbformat_test.cc" + "db/filename_test.cc" + "db/log_test.cc" + "db/recovery_test.cc" + "db/skiplist_test.cc" + "db/version_edit_test.cc" + "db/version_set_test.cc" + "db/write_batch_test.cc" + "helpers/memenv/memenv_test.cc" + "table/filter_block_test.cc" + "table/table_test.cc" + "util/arena_test.cc" + "util/bloom_test.cc" + "util/cache_test.cc" + "util/coding_test.cc" + "util/crc32c_test.cc" + "util/hash_test.cc" + "util/logging_test.cc" + ) + endif(NOT BUILD_SHARED_LIBS) + target_link_libraries(leveldb_tests leveldb gmock gtest gtest_main) + target_compile_definitions(leveldb_tests + PRIVATE + ${LEVELDB_PLATFORM_NAME}=1 + ) + if (NOT HAVE_CXX17_HAS_INCLUDE) + target_compile_definitions(leveldb_tests + PRIVATE + LEVELDB_HAS_PORT_CONFIG_H=1 + ) + endif(NOT HAVE_CXX17_HAS_INCLUDE) + + add_test(NAME "leveldb_tests" COMMAND "leveldb_tests") + function(leveldb_test test_file) get_filename_component(test_target_name "${test_file}" NAME_WE) @@ -334,42 +388,8 @@ if(LEVELDB_BUILD_TESTS) endfunction(leveldb_test) leveldb_test("db/c_test.c") - leveldb_test("db/fault_injection_test.cc") - - leveldb_test("issues/issue178_test.cc") - leveldb_test("issues/issue200_test.cc") - leveldb_test("issues/issue320_test.cc") - - leveldb_test("util/env_test.cc") - leveldb_test("util/status_test.cc") - leveldb_test("util/no_destructor_test.cc") if(NOT BUILD_SHARED_LIBS) - leveldb_test("db/autocompact_test.cc") - leveldb_test("db/corruption_test.cc") - leveldb_test("db/db_test.cc") - leveldb_test("db/dbformat_test.cc") - leveldb_test("db/filename_test.cc") - leveldb_test("db/log_test.cc") - leveldb_test("db/recovery_test.cc") - leveldb_test("db/skiplist_test.cc") - leveldb_test("db/version_edit_test.cc") - leveldb_test("db/version_set_test.cc") - leveldb_test("db/write_batch_test.cc") - - leveldb_test("helpers/memenv/memenv_test.cc") - - leveldb_test("table/filter_block_test.cc") - leveldb_test("table/table_test.cc") - - leveldb_test("util/arena_test.cc") - leveldb_test("util/bloom_test.cc") - leveldb_test("util/cache_test.cc") - leveldb_test("util/coding_test.cc") - leveldb_test("util/crc32c_test.cc") - leveldb_test("util/hash_test.cc") - leveldb_test("util/logging_test.cc") - # TODO(costan): This test also uses # "util/env_{posix|windows}_test_helper.h" if (WIN32) diff --git a/db/autocompact_test.cc b/db/autocompact_test.cc index 3b7241b..69341e3 100644 --- a/db/autocompact_test.cc +++ b/db/autocompact_test.cc @@ -108,8 +108,3 @@ TEST_F(AutoCompactTest, ReadAll) { DoReads(kCount); } TEST_F(AutoCompactTest, ReadHalf) { DoReads(kCount / 2); } } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/db/corruption_test.cc b/db/corruption_test.cc index a31f448..dc7da76 100644 --- a/db/corruption_test.cc +++ b/db/corruption_test.cc @@ -360,8 +360,3 @@ TEST_F(CorruptionTest, UnrelatedKeys) { } } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/db/db_test.cc b/db/db_test.cc index 7f22688..9bd6e14 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -2295,8 +2295,3 @@ TEST_F(DBTest, Randomized) { } } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/db/dbformat_test.cc b/db/dbformat_test.cc index 4a11c4a..7f3f81a 100644 --- a/db/dbformat_test.cc +++ b/db/dbformat_test.cc @@ -126,8 +126,3 @@ TEST(FormatTest, InternalKeyDebugString) { } } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc index 6eebafa..ef864a4 100644 --- a/db/fault_injection_test.cc +++ b/db/fault_injection_test.cc @@ -548,8 +548,3 @@ TEST_F(FaultInjectionTest, FaultTestWithLogReuse) { } } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/db/filename_test.cc b/db/filename_test.cc index f291d72..9ac0111 100644 --- a/db/filename_test.cc +++ b/db/filename_test.cc @@ -125,8 +125,3 @@ TEST(FileNameTest, Construction) { } } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/db/log_test.cc b/db/log_test.cc index 346b19c..d55d4dd 100644 --- a/db/log_test.cc +++ b/db/log_test.cc @@ -556,8 +556,3 @@ TEST_F(LogTest, ReadPastEnd) { CheckOffsetPastEndReturnsNoRecords(5); } } // namespace log } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/db/recovery_test.cc b/db/recovery_test.cc index 6c5d42e..1d9f621 100644 --- a/db/recovery_test.cc +++ b/db/recovery_test.cc @@ -332,8 +332,3 @@ TEST_F(RecoveryTest, ManifestMissing) { } } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/db/skiplist_test.cc b/db/skiplist_test.cc index 79a5b86..1d355cb 100644 --- a/db/skiplist_test.cc +++ b/db/skiplist_test.cc @@ -366,8 +366,3 @@ TEST(SkipTest, Concurrent4) { RunConcurrent(4); } TEST(SkipTest, Concurrent5) { RunConcurrent(5); } } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/db/version_edit_test.cc b/db/version_edit_test.cc index acafab0..a108c15 100644 --- a/db/version_edit_test.cc +++ b/db/version_edit_test.cc @@ -39,8 +39,3 @@ TEST(VersionEditTest, EncodeDecode) { } } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/db/version_set_test.cc b/db/version_set_test.cc index dee6b4c..64bb983 100644 --- a/db/version_set_test.cc +++ b/db/version_set_test.cc @@ -329,8 +329,3 @@ TEST_F(AddBoundaryInputsTest, TestDisjoinFilePointers) { } } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/db/write_batch_test.cc b/db/write_batch_test.cc index 64df9b8..1a3ea8f 100644 --- a/db/write_batch_test.cc +++ b/db/write_batch_test.cc @@ -130,8 +130,3 @@ TEST(WriteBatchTest, ApproximateSize) { } } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/helpers/memenv/memenv_test.cc b/helpers/memenv/memenv_test.cc index 3f03cb6..909a0ca 100644 --- a/helpers/memenv/memenv_test.cc +++ b/helpers/memenv/memenv_test.cc @@ -257,8 +257,3 @@ TEST_F(MemEnvTest, DBTest) { } } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/issues/issue178_test.cc b/issues/issue178_test.cc index 8fa5bb9..5cd5862 100644 --- a/issues/issue178_test.cc +++ b/issues/issue178_test.cc @@ -83,8 +83,3 @@ TEST(Issue178, Test) { } } // anonymous namespace - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/issues/issue200_test.cc b/issues/issue200_test.cc index 4eba23a..959b371 100644 --- a/issues/issue200_test.cc +++ b/issues/issue200_test.cc @@ -52,8 +52,3 @@ TEST(Issue200, Test) { } } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/issues/issue320_test.cc b/issues/issue320_test.cc index c08296a..9d7fa7b 100644 --- a/issues/issue320_test.cc +++ b/issues/issue320_test.cc @@ -124,8 +124,3 @@ TEST(Issue320, Test) { } } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/table/filter_block_test.cc b/table/filter_block_test.cc index 91a6be2..3ee41cf 100644 --- a/table/filter_block_test.cc +++ b/table/filter_block_test.cc @@ -120,8 +120,3 @@ TEST_F(FilterBlockTest, MultiChunk) { } } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/table/table_test.cc b/table/table_test.cc index 190dd0f..7f0f998 100644 --- a/table/table_test.cc +++ b/table/table_test.cc @@ -827,8 +827,3 @@ TEST(TableTest, ApproximateOffsetOfCompressed) { } } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/util/arena_test.cc b/util/arena_test.cc index 90226fe..3e2011e 100644 --- a/util/arena_test.cc +++ b/util/arena_test.cc @@ -59,8 +59,3 @@ TEST(ArenaTest, Simple) { } } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/util/bloom_test.cc b/util/bloom_test.cc index 520473e..9f11108 100644 --- a/util/bloom_test.cc +++ b/util/bloom_test.cc @@ -152,8 +152,3 @@ TEST_F(BloomTest, VaryingLengths) { // Different bits-per-byte } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/util/cache_test.cc b/util/cache_test.cc index 79cfc27..e68da34 100644 --- a/util/cache_test.cc +++ b/util/cache_test.cc @@ -222,8 +222,3 @@ TEST_F(CacheTest, ZeroSizeCache) { } } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/util/coding_test.cc b/util/coding_test.cc index aa6c748..cceda14 100644 --- a/util/coding_test.cc +++ b/util/coding_test.cc @@ -191,8 +191,3 @@ TEST(Coding, Strings) { } } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/util/crc32c_test.cc b/util/crc32c_test.cc index 647e561..2fe1c41 100644 --- a/util/crc32c_test.cc +++ b/util/crc32c_test.cc @@ -54,8 +54,3 @@ TEST(CRC, Mask) { } // namespace crc32c } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/util/env_test.cc b/util/env_test.cc index 491ef43..fc69d71 100644 --- a/util/env_test.cc +++ b/util/env_test.cc @@ -233,8 +233,3 @@ TEST_F(EnvTest, ReopenAppendableFile) { } } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/util/hash_test.cc b/util/hash_test.cc index 6d6771f..0ea5977 100644 --- a/util/hash_test.cc +++ b/util/hash_test.cc @@ -39,8 +39,3 @@ TEST(HASH, SignedUnsignedIssue) { } } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/util/logging_test.cc b/util/logging_test.cc index 24e1fe9..1746c57 100644 --- a/util/logging_test.cc +++ b/util/logging_test.cc @@ -138,8 +138,3 @@ TEST(Logging, ConsumeDecimalNumberNoDigits) { } } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/util/no_destructor_test.cc b/util/no_destructor_test.cc index 68fdfee..e3602cc 100644 --- a/util/no_destructor_test.cc +++ b/util/no_destructor_test.cc @@ -42,8 +42,3 @@ TEST(NoDestructorTest, StaticInstance) { } } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/util/status_test.cc b/util/status_test.cc index 914b386..dbf5faa 100644 --- a/util/status_test.cc +++ b/util/status_test.cc @@ -37,8 +37,3 @@ TEST(Status, MoveConstructor) { } } // namespace leveldb - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} From 0e8aa26c4e9325f04e186defca123f7d4837791f Mon Sep 17 00:00:00 2001 From: Dimitris Apostolou Date: Wed, 5 Jan 2022 11:04:16 +0200 Subject: [PATCH 66/68] Fix typos --- db/snapshot.h | 2 +- util/env_posix.cc | 4 ++-- util/env_windows.cc | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/db/snapshot.h b/db/snapshot.h index 9f1d664..817bb7b 100644 --- a/db/snapshot.h +++ b/db/snapshot.h @@ -25,7 +25,7 @@ class SnapshotImpl : public Snapshot { friend class SnapshotList; // SnapshotImpl is kept in a doubly-linked circular list. The SnapshotList - // implementation operates on the next/previous fields direcly. + // implementation operates on the next/previous fields directly. SnapshotImpl* prev_; SnapshotImpl* next_; diff --git a/util/env_posix.cc b/util/env_posix.cc index 8b8d9c8..8405909 100644 --- a/util/env_posix.cc +++ b/util/env_posix.cc @@ -242,7 +242,7 @@ class PosixMmapReadableFile final : public RandomAccessFile { // over the ownership of the region. // // |mmap_limiter| must outlive this instance. The caller must have already - // aquired the right to use one mmap region, which will be released when this + // acquired the right to use one mmap region, which will be released when this // instance is destroyed. PosixMmapReadableFile(std::string filename, char* mmap_base, size_t length, Limiter* mmap_limiter) @@ -756,7 +756,7 @@ class PosixEnv : public Env { // Instances are constructed on the thread calling Schedule() and used on the // background thread. // - // This structure is thread-safe beacuse it is immutable. + // This structure is thread-safe because it is immutable. struct BackgroundWorkItem { explicit BackgroundWorkItem(void (*function)(void* arg), void* arg) : function(function), arg(arg) {} diff --git a/util/env_windows.cc b/util/env_windows.cc index 9ffcd07..c6d439c 100644 --- a/util/env_windows.cc +++ b/util/env_windows.cc @@ -681,7 +681,7 @@ class WindowsEnv : public Env { // Instances are constructed on the thread calling Schedule() and used on the // background thread. // - // This structure is thread-safe beacuse it is immutable. + // This structure is thread-safe because it is immutable. struct BackgroundWorkItem { explicit BackgroundWorkItem(void (*function)(void* arg), void* arg) : function(function), arg(arg) {} From 87b3a371b1a12ed17dae3b80239a84b6bbecd570 Mon Sep 17 00:00:00 2001 From: xindubawukong Date: Thu, 6 Jan 2022 03:11:11 +0800 Subject: [PATCH 67/68] remove useless code in cache.h --- include/leveldb/cache.h | 8 -------- 1 file changed, 8 deletions(-) diff --git a/include/leveldb/cache.h b/include/leveldb/cache.h index 98c95ac..a94c683 100644 --- a/include/leveldb/cache.h +++ b/include/leveldb/cache.h @@ -96,14 +96,6 @@ class LEVELDB_EXPORT Cache { // Return an estimate of the combined charges of all elements stored in the // cache. virtual size_t TotalCharge() const = 0; - - private: - void LRU_Remove(Handle* e); - void LRU_Append(Handle* e); - void Unref(Handle* e); - - struct Rep; - Rep* rep_; }; } // namespace leveldb From 7ee3889a6137075560e64a7ad4289c10d4cdafc9 Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Sun, 9 Jan 2022 03:04:29 +0000 Subject: [PATCH 68/68] VersionSet::Builder::Apply() does not mutate its argument. PiperOrigin-RevId: 420533763 --- db/version_set.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/db/version_set.cc b/db/version_set.cc index 8d85fce..597e226 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -626,7 +626,7 @@ class VersionSet::Builder { } // Apply all of the edits in *edit to the current state. - void Apply(VersionEdit* edit) { + void Apply(const VersionEdit* edit) { // Update compaction pointers for (size_t i = 0; i < edit->compact_pointers_.size(); i++) { const int level = edit->compact_pointers_[i].first;