diff --git a/.clang-format b/.clang-format
new file mode 100644
index 0000000..75f3401
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,18 @@
+# Run manually to reformat a file:
+# clang-format -i --style=file <file>
+# find . -iname '*.cc' -o -iname '*.h' -o -iname '*.h.in' | xargs clang-format -i --style=file
+BasedOnStyle: Google
+DerivePointerAlignment: false
+
+# Public headers are in a different location in the internal Google repository.
+# Order them so that when imported to the authoritative repository they will be
+# in correct alphabetical order.
+IncludeCategories:
+  - Regex:           '^(<|"(db|helpers)/)'
+    Priority:        1
+  - Regex:           '^"(leveldb)/'
+    Priority:        2
+  - Regex:           '^(<|"(issues|port|table|third_party|util)/)'
+    Priority:        3
+  - Regex:           '.*'
+    Priority:        4
diff --git a/README.md b/README.md
index 537cab1..0b660ae 100644
--- a/README.md
+++ b/README.md
@@ -86,6 +86,14 @@ Contribution requirements:
 3. **Tests**: All changes must be accompanied by a new (or changed) test, or
    a sufficient explanation as to why a new (or changed) test is not required.
 
+4. **Consistent Style**: This project conforms to the
+   [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html).
+   To ensure your changes are properly formatted please run:
+
+   ```
+   clang-format -i --style=file <file>
+   ```
+
 ## Submitting a Pull Request
 
 Before any pull request will be accepted the author must first sign a
diff --git a/db/autocompact_test.cc b/db/autocompact_test.cc
index d20a236..00e3672 100644
--- a/db/autocompact_test.cc
+++ b/db/autocompact_test.cc
@@ -2,9 +2,9 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
-#include "leveldb/db.h"
 #include "db/db_impl.h"
 #include "leveldb/cache.h"
+#include "leveldb/db.h"
 #include "util/testharness.h"
 #include "util/testutil.h"
 
@@ -81,17 +81,16 @@ void AutoCompactTest::DoReads(int n) {
     ASSERT_LT(read, 100) << "Taking too long to compact";
     Iterator* iter = db_->NewIterator(ReadOptions());
     for (iter->SeekToFirst();
-         iter->Valid() && iter->key().ToString() < limit_key;
-         iter->Next()) {
+         iter->Valid() && iter->key().ToString() < limit_key; iter->Next()) {
       // Drop data
     }
     delete iter;
     // Wait a little bit to allow any triggered compactions to complete.
     Env::Default()->SleepForMicroseconds(1000000);
     uint64_t size = Size(Key(0), Key(n));
-    fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n",
-            read+1, size/1048576.0, Size(Key(n), Key(kCount))/1048576.0);
-    if (size <= initial_size/10) {
+    fprintf(stderr, "iter %3d => %7.3f MB [other %7.3f MB]\n", read + 1,
+            size / 1048576.0, Size(Key(n), Key(kCount)) / 1048576.0);
+    if (size <= initial_size / 10) {
       break;
     }
   }
@@ -100,19 +99,13 @@ void AutoCompactTest::DoReads(int n) {
   // is pretty much unchanged.
   const int64_t final_other_size = Size(Key(n), Key(kCount));
   ASSERT_LE(final_other_size, initial_other_size + 1048576);
-  ASSERT_GE(final_other_size, initial_other_size/5 - 1048576);
+  ASSERT_GE(final_other_size, initial_other_size / 5 - 1048576);
 }
 
-TEST(AutoCompactTest, ReadAll) {
-  DoReads(kCount);
-}
+TEST(AutoCompactTest, ReadAll) { DoReads(kCount); }
 
-TEST(AutoCompactTest, ReadHalf) {
-  DoReads(kCount/2);
-}
+TEST(AutoCompactTest, ReadHalf) { DoReads(kCount / 2); }
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/db/builder.cc b/db/builder.cc
index 5fa405d..9520ee4 100644
--- a/db/builder.cc
+++ b/db/builder.cc
@@ -4,8 +4,8 @@
 
 #include "db/builder.h"
 
-#include "db/filename.h"
 #include "db/dbformat.h"
+#include "db/filename.h"
 #include "db/table_cache.h"
 #include "db/version_edit.h"
 #include "leveldb/db.h"
@@ -14,12 +14,8 @@
 
 namespace leveldb {
 
-Status BuildTable(const std::string& dbname,
-                  Env* env,
-                  const Options& options,
-                  TableCache* table_cache,
-                  Iterator* iter,
-                  FileMetaData* meta) {
+Status BuildTable(const std::string& dbname, Env* env, const Options& options,
+                  TableCache* table_cache, Iterator* iter, FileMetaData* meta) {
   Status s;
   meta->file_size = 0;
   iter->SeekToFirst();
@@ -60,8 +56,7 @@ Status BuildTable(const std::string& dbname,
 
     if (s.ok()) {
       // Verify that the table is usable
-      Iterator* it = table_cache->NewIterator(ReadOptions(),
-                                              meta->number,
+      Iterator* it = table_cache->NewIterator(ReadOptions(), meta->number,
                                               meta->file_size);
       s = it->status();
       delete it;
diff --git a/db/builder.h b/db/builder.h
index 0289730..7bd0b80 100644
--- a/db/builder.h
+++ b/db/builder.h
@@ -22,12 +22,8 @@ class VersionEdit;
 // *meta will be filled with metadata about the generated table.
 // If no data is present in *iter, meta->file_size will be set to
 // zero, and no Table file will be produced.
-Status BuildTable(const std::string& dbname,
-                  Env* env,
-                  const Options& options,
-                  TableCache* table_cache,
-                  Iterator* iter,
-                  FileMetaData* meta);
+Status BuildTable(const std::string& dbname, Env* env, const Options& options,
+                  TableCache* table_cache, Iterator* iter, FileMetaData* meta);
 
 }  // namespace leveldb
 
diff --git a/db/c.cc b/db/c.cc
index 7756ea3..72f6daa 100644
--- a/db/c.cc
+++ b/db/c.cc
@@ -5,6 +5,7 @@
 #include "leveldb/c.h"
 
 #include <stdlib.h>
+
 #include "leveldb/cache.h"
 #include "leveldb/comparator.h"
 #include "leveldb/db.h"
@@ -42,67 +43,79 @@ using leveldb::WriteOptions;
 
 extern "C" {
 
-struct leveldb_t              { DB*               rep; };
-struct leveldb_iterator_t     { Iterator*         rep; };
-struct leveldb_writebatch_t   { WriteBatch        rep; };
-struct leveldb_snapshot_t     { const Snapshot*   rep; };
-struct leveldb_readoptions_t  { ReadOptions       rep; };
-struct leveldb_writeoptions_t { WriteOptions      rep; };
-struct leveldb_options_t      { Options           rep; };
-struct leveldb_cache_t        { Cache*            rep; };
-struct leveldb_seqfile_t      { SequentialFile*   rep; };
-struct leveldb_randomfile_t   { RandomAccessFile* rep; };
-struct leveldb_writablefile_t { WritableFile*     rep; };
-struct leveldb_logger_t       { Logger*           rep; };
-struct leveldb_filelock_t     { FileLock*         rep; };
+struct leveldb_t {
+  DB* rep;
+};
+struct leveldb_iterator_t {
+  Iterator* rep;
+};
+struct leveldb_writebatch_t {
+  WriteBatch rep;
+};
+struct leveldb_snapshot_t {
+  const Snapshot* rep;
+};
+struct leveldb_readoptions_t {
+  ReadOptions rep;
+};
+struct leveldb_writeoptions_t {
+  WriteOptions rep;
+};
+struct leveldb_options_t {
+  Options rep;
+};
+struct leveldb_cache_t {
+  Cache* rep;
+};
+struct leveldb_seqfile_t {
+  SequentialFile* rep;
+};
+struct leveldb_randomfile_t {
+  RandomAccessFile* rep;
+};
+struct leveldb_writablefile_t {
+  WritableFile* rep;
+};
+struct leveldb_logger_t {
+  Logger* rep;
+};
+struct leveldb_filelock_t {
+  FileLock* rep;
+};
 
 struct leveldb_comparator_t : public Comparator {
   void* state_;
   void (*destructor_)(void*);
-  int (*compare_)(
-      void*,
-      const char* a, size_t alen,
-      const char* b, size_t blen);
+  int (*compare_)(void*, const char* a, size_t alen, const char* b,
+                  size_t blen);
   const char* (*name_)(void*);
 
-  virtual ~leveldb_comparator_t() {
-    (*destructor_)(state_);
-  }
+  virtual ~leveldb_comparator_t() { (*destructor_)(state_); }
 
   virtual int Compare(const Slice& a, const Slice& b) const {
     return (*compare_)(state_, a.data(), a.size(), b.data(), b.size());
   }
 
-  virtual const char* Name() const {
-    return (*name_)(state_);
-  }
+  virtual const char* Name() const { return (*name_)(state_); }
 
   // No-ops since the C binding does not support key shortening methods.
-  virtual void FindShortestSeparator(std::string*, const Slice&) const { }
-  virtual void FindShortSuccessor(std::string* key) const { }
+  virtual void FindShortestSeparator(std::string*, const Slice&) const {}
+  virtual void FindShortSuccessor(std::string* key) const {}
 };
 
 struct leveldb_filterpolicy_t : public FilterPolicy {
   void* state_;
   void (*destructor_)(void*);
   const char* (*name_)(void*);
-  char* (*create_)(
-      void*,
-      const char* const* key_array, const size_t* key_length_array,
-      int num_keys,
-      size_t* filter_length);
-  unsigned char (*key_match_)(
-      void*,
-      const char* key, size_t length,
-      const char* filter, size_t filter_length);
-
-  virtual ~leveldb_filterpolicy_t() {
-    (*destructor_)(state_);
-  }
+  char* (*create_)(void*, const char* const* key_array,
+                   const size_t* key_length_array, int num_keys,
+                   size_t* filter_length);
+  unsigned char (*key_match_)(void*, const char* key, size_t length,
+                              const char* filter, size_t filter_length);
 
-  virtual const char* Name() const {
-    return (*name_)(state_);
-  }
+  virtual ~leveldb_filterpolicy_t() { (*destructor_)(state_); }
+
+  virtual const char* Name() const { return (*name_)(state_); }
 
   virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
     std::vector<const char*> key_pointers(n);
@@ -118,8 +131,8 @@ struct leveldb_filterpolicy_t : public FilterPolicy {
   }
 
   virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const {
-    return (*key_match_)(state_, key.data(), key.size(),
-                         filter.data(), filter.size());
+    return (*key_match_)(state_, key.data(), key.size(), filter.data(),
+                         filter.size());
   }
 };
 
@@ -148,10 +161,8 @@ static char* CopyString(const std::string& str) {
   return result;
 }
 
-leveldb_t* leveldb_open(
-    const leveldb_options_t* options,
-    const char* name,
-    char** errptr) {
+leveldb_t* leveldb_open(const leveldb_options_t* options, const char* name,
+                        char** errptr) {
   DB* db;
   if (SaveError(errptr, DB::Open(options->rep, std::string(name), &db))) {
     return nullptr;
@@ -166,39 +177,26 @@ void leveldb_close(leveldb_t* db) {
   delete db;
 }
 
-void leveldb_put(
-    leveldb_t* db,
-    const leveldb_writeoptions_t* options,
-    const char* key, size_t keylen,
-    const char* val, size_t vallen,
-    char** errptr) {
+void leveldb_put(leveldb_t* db, const leveldb_writeoptions_t* options,
+                 const char* key, size_t keylen, const char* val, size_t vallen,
+                 char** errptr) {
   SaveError(errptr,
             db->rep->Put(options->rep, Slice(key, keylen), Slice(val, vallen)));
 }
 
-void leveldb_delete(
-    leveldb_t* db,
-    const leveldb_writeoptions_t* options,
-    const char* key, size_t keylen,
-    char** errptr) {
+void leveldb_delete(leveldb_t* db, const leveldb_writeoptions_t* options,
+                    const char* key, size_t keylen, char** errptr) {
   SaveError(errptr, db->rep->Delete(options->rep, Slice(key, keylen)));
 }
 
-
-void leveldb_write(
-    leveldb_t* db,
-    const leveldb_writeoptions_t* options,
-    leveldb_writebatch_t* batch,
-    char** errptr) {
+void leveldb_write(leveldb_t* db, const leveldb_writeoptions_t* options,
+                   leveldb_writebatch_t* batch, char** errptr) {
   SaveError(errptr, db->rep->Write(options->rep, &batch->rep));
 }
 
-char* leveldb_get(
-    leveldb_t* db,
-    const leveldb_readoptions_t* options,
-    const char* key, size_t keylen,
-    size_t* vallen,
-    char** errptr) {
+char* leveldb_get(leveldb_t* db, const leveldb_readoptions_t* options,
+                  const char* key, size_t keylen, size_t* vallen,
+                  char** errptr) {
   char* result = nullptr;
   std::string tmp;
   Status s = db->rep->Get(options->rep, Slice(key, keylen), &tmp);
@@ -215,30 +213,25 @@ char* leveldb_get(
 }
 
 leveldb_iterator_t* leveldb_create_iterator(
-    leveldb_t* db,
-    const leveldb_readoptions_t* options) {
+    leveldb_t* db, const leveldb_readoptions_t* options) {
   leveldb_iterator_t* result = new leveldb_iterator_t;
   result->rep = db->rep->NewIterator(options->rep);
   return result;
 }
 
-const leveldb_snapshot_t* leveldb_create_snapshot(
-    leveldb_t* db) {
+const leveldb_snapshot_t* leveldb_create_snapshot(leveldb_t* db) {
   leveldb_snapshot_t* result = new leveldb_snapshot_t;
   result->rep = db->rep->GetSnapshot();
   return result;
 }
 
-void leveldb_release_snapshot(
-    leveldb_t* db,
-    const leveldb_snapshot_t* snapshot) {
+void leveldb_release_snapshot(leveldb_t* db,
+                              const leveldb_snapshot_t* snapshot) {
   db->rep->ReleaseSnapshot(snapshot->rep);
   delete snapshot;
 }
 
-char* leveldb_property_value(
-    leveldb_t* db,
-    const char* propname) {
+char* leveldb_property_value(leveldb_t* db, const char* propname) {
   std::string tmp;
   if (db->rep->GetProperty(Slice(propname), &tmp)) {
     // We use strdup() since we expect human readable output.
@@ -248,12 +241,12 @@ char* leveldb_property_value(
   }
 }
 
-void leveldb_approximate_sizes(
-    leveldb_t* db,
-    int num_ranges,
-    const char* const* range_start_key, const size_t* range_start_key_len,
-    const char* const* range_limit_key, const size_t* range_limit_key_len,
-    uint64_t* sizes) {
+void leveldb_approximate_sizes(leveldb_t* db, int num_ranges,
+                               const char* const* range_start_key,
+                               const size_t* range_start_key_len,
+                               const char* const* range_limit_key,
+                               const size_t* range_limit_key_len,
+                               uint64_t* sizes) {
   Range* ranges = new Range[num_ranges];
   for (int i = 0; i < num_ranges; i++) {
     ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]);
@@ -263,10 +256,9 @@ void leveldb_approximate_sizes(
   delete[] ranges;
 }
 
-void leveldb_compact_range(
-    leveldb_t* db,
-    const char* start_key, size_t start_key_len,
-    const char* limit_key, size_t limit_key_len) {
+void leveldb_compact_range(leveldb_t* db, const char* start_key,
+                           size_t start_key_len, const char* limit_key,
+                           size_t limit_key_len) {
   Slice a, b;
   db->rep->CompactRange(
       // Pass null Slice if corresponding "const char*" is null
@@ -274,17 +266,13 @@ void leveldb_compact_range(
       (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
 }
 
-void leveldb_destroy_db(
-    const leveldb_options_t* options,
-    const char* name,
-    char** errptr) {
+void leveldb_destroy_db(const leveldb_options_t* options, const char* name,
+                        char** errptr) {
   SaveError(errptr, DestroyDB(name, options->rep));
 }
 
-void leveldb_repair_db(
-    const leveldb_options_t* options,
-    const char* name,
-    char** errptr) {
+void leveldb_repair_db(const leveldb_options_t* options, const char* name,
+                       char** errptr) {
   SaveError(errptr, RepairDB(name, options->rep));
 }
 
@@ -309,13 +297,9 @@ void leveldb_iter_seek(leveldb_iterator_t* iter, const char* k, size_t klen) {
   iter->rep->Seek(Slice(k, klen));
 }
 
-void leveldb_iter_next(leveldb_iterator_t* iter) {
-  iter->rep->Next();
-}
+void leveldb_iter_next(leveldb_iterator_t* iter) { iter->rep->Next(); }
 
-void leveldb_iter_prev(leveldb_iterator_t* iter) {
-  iter->rep->Prev();
-}
+void leveldb_iter_prev(leveldb_iterator_t* iter) { iter->rep->Prev(); }
 
 const char* leveldb_iter_key(const leveldb_iterator_t* iter, size_t* klen) {
   Slice s = iter->rep->key();
@@ -337,32 +321,25 @@ leveldb_writebatch_t* leveldb_writebatch_create() {
   return new leveldb_writebatch_t;
 }
 
-void leveldb_writebatch_destroy(leveldb_writebatch_t* b) {
-  delete b;
-}
+void leveldb_writebatch_destroy(leveldb_writebatch_t* b) { delete b; }
 
-void leveldb_writebatch_clear(leveldb_writebatch_t* b) {
-  b->rep.Clear();
-}
+void leveldb_writebatch_clear(leveldb_writebatch_t* b) { b->rep.Clear(); }
 
-void leveldb_writebatch_put(
-    leveldb_writebatch_t* b,
-    const char* key, size_t klen,
-    const char* val, size_t vlen) {
+void leveldb_writebatch_put(leveldb_writebatch_t* b, const char* key,
+                            size_t klen, const char* val, size_t vlen) {
   b->rep.Put(Slice(key, klen), Slice(val, vlen));
 }
 
-void leveldb_writebatch_delete(
-    leveldb_writebatch_t* b,
-    const char* key, size_t klen) {
+void leveldb_writebatch_delete(leveldb_writebatch_t* b, const char* key,
+                               size_t klen) {
   b->rep.Delete(Slice(key, klen));
 }
 
-void leveldb_writebatch_iterate(
-    const leveldb_writebatch_t* b,
-    void* state,
-    void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen),
-    void (*deleted)(void*, const char* k, size_t klen)) {
+void leveldb_writebatch_iterate(const leveldb_writebatch_t* b, void* state,
+                                void (*put)(void*, const char* k, size_t klen,
+                                            const char* v, size_t vlen),
+                                void (*deleted)(void*, const char* k,
+                                                size_t klen)) {
   class H : public WriteBatch::Handler {
    public:
     void* state_;
@@ -382,43 +359,37 @@ void leveldb_writebatch_iterate(
   b->rep.Iterate(&handler);
 }
 
-void leveldb_writebatch_append(leveldb_writebatch_t *destination,
-                               const leveldb_writebatch_t *source) {
+void leveldb_writebatch_append(leveldb_writebatch_t* destination,
+                               const leveldb_writebatch_t* source) {
   destination->rep.Append(source->rep);
 }
 
-leveldb_options_t* leveldb_options_create() {
-  return new leveldb_options_t;
-}
+leveldb_options_t* leveldb_options_create() { return new leveldb_options_t; }
 
-void leveldb_options_destroy(leveldb_options_t* options) {
-  delete options;
-}
+void leveldb_options_destroy(leveldb_options_t* options) { delete options; }
 
-void leveldb_options_set_comparator(
-    leveldb_options_t* opt,
-    leveldb_comparator_t* cmp) {
+void leveldb_options_set_comparator(leveldb_options_t* opt,
+                                    leveldb_comparator_t* cmp) {
   opt->rep.comparator = cmp;
 }
 
-void leveldb_options_set_filter_policy(
-    leveldb_options_t* opt,
-    leveldb_filterpolicy_t* policy) {
+void leveldb_options_set_filter_policy(leveldb_options_t* opt,
+                                       leveldb_filterpolicy_t* policy) {
   opt->rep.filter_policy = policy;
 }
 
-void leveldb_options_set_create_if_missing(
-    leveldb_options_t* opt, unsigned char v) {
+void leveldb_options_set_create_if_missing(leveldb_options_t* opt,
+                                           unsigned char v) {
   opt->rep.create_if_missing = v;
 }
 
-void leveldb_options_set_error_if_exists(
-    leveldb_options_t* opt, unsigned char v) {
+void leveldb_options_set_error_if_exists(leveldb_options_t* opt,
+                                         unsigned char v) {
   opt->rep.error_if_exists = v;
 }
 
-void leveldb_options_set_paranoid_checks(
-    leveldb_options_t* opt, unsigned char v) {
+void leveldb_options_set_paranoid_checks(leveldb_options_t* opt,
+                                         unsigned char v) {
   opt->rep.paranoid_checks = v;
 }
 
@@ -459,12 +430,9 @@ void leveldb_options_set_compression(leveldb_options_t* opt, int t) {
 }
 
 leveldb_comparator_t* leveldb_comparator_create(
-    void* state,
-    void (*destructor)(void*),
-    int (*compare)(
-        void*,
-        const char* a, size_t alen,
-        const char* b, size_t blen),
+    void* state, void (*destructor)(void*),
+    int (*compare)(void*, const char* a, size_t alen, const char* b,
+                   size_t blen),
     const char* (*name)(void*)) {
   leveldb_comparator_t* result = new leveldb_comparator_t;
   result->state_ = state;
@@ -474,22 +442,15 @@ leveldb_comparator_t* leveldb_comparator_create(
   return result;
 }
 
-void leveldb_comparator_destroy(leveldb_comparator_t* cmp) {
-  delete cmp;
-}
+void leveldb_comparator_destroy(leveldb_comparator_t* cmp) { delete cmp; }
 
 leveldb_filterpolicy_t* leveldb_filterpolicy_create(
-    void* state,
-    void (*destructor)(void*),
-    char* (*create_filter)(
-        void*,
-        const char* const* key_array, const size_t* key_length_array,
-        int num_keys,
-        size_t* filter_length),
-    unsigned char (*key_may_match)(
-        void*,
-        const char* key, size_t length,
-        const char* filter, size_t filter_length),
+    void* state, void (*destructor)(void*),
+    char* (*create_filter)(void*, const char* const* key_array,
+                           const size_t* key_length_array, int num_keys,
+                           size_t* filter_length),
+    unsigned char (*key_may_match)(void*, const char* key, size_t length,
+                                   const char* filter, size_t filter_length),
     const char* (*name)(void*)) {
   leveldb_filterpolicy_t* result = new leveldb_filterpolicy_t;
   result->state_ = state;
@@ -518,7 +479,7 @@ leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom(int bits_per_key) {
     bool KeyMayMatch(const Slice& key, const Slice& filter) const {
       return rep_->KeyMayMatch(key, filter);
     }
-    static void DoNothing(void*) { }
+    static void DoNothing(void*) {}
   };
   Wrapper* wrapper = new Wrapper;
   wrapper->rep_ = NewBloomFilterPolicy(bits_per_key);
@@ -531,24 +492,20 @@ leveldb_readoptions_t* leveldb_readoptions_create() {
   return new leveldb_readoptions_t;
 }
 
-void leveldb_readoptions_destroy(leveldb_readoptions_t* opt) {
-  delete opt;
-}
+void leveldb_readoptions_destroy(leveldb_readoptions_t* opt) { delete opt; }
 
-void leveldb_readoptions_set_verify_checksums(
-    leveldb_readoptions_t* opt,
-    unsigned char v) {
+void leveldb_readoptions_set_verify_checksums(leveldb_readoptions_t* opt,
+                                              unsigned char v) {
   opt->rep.verify_checksums = v;
 }
 
-void leveldb_readoptions_set_fill_cache(
-    leveldb_readoptions_t* opt, unsigned char v) {
+void leveldb_readoptions_set_fill_cache(leveldb_readoptions_t* opt,
+                                        unsigned char v) {
   opt->rep.fill_cache = v;
 }
 
-void leveldb_readoptions_set_snapshot(
-    leveldb_readoptions_t* opt,
-    const leveldb_snapshot_t* snap) {
+void leveldb_readoptions_set_snapshot(leveldb_readoptions_t* opt,
+                                      const leveldb_snapshot_t* snap) {
   opt->rep.snapshot = (snap ? snap->rep : nullptr);
 }
 
@@ -556,12 +513,10 @@ leveldb_writeoptions_t* leveldb_writeoptions_create() {
   return new leveldb_writeoptions_t;
 }
 
-void leveldb_writeoptions_destroy(leveldb_writeoptions_t* opt) {
-  delete opt;
-}
+void leveldb_writeoptions_destroy(leveldb_writeoptions_t* opt) { delete opt; }
 
-void leveldb_writeoptions_set_sync(
-    leveldb_writeoptions_t* opt, unsigned char v) {
+void leveldb_writeoptions_set_sync(leveldb_writeoptions_t* opt,
+                                   unsigned char v) {
   opt->rep.sync = v;
 }
 
@@ -600,16 +555,10 @@ char* leveldb_env_get_test_directory(leveldb_env_t* env) {
   return buffer;
 }
 
-void leveldb_free(void* ptr) {
-  free(ptr);
-}
+void leveldb_free(void* ptr) { free(ptr); }
 
-int leveldb_major_version() {
-  return kMajorVersion;
-}
+int leveldb_major_version() { return kMajorVersion; }
 
-int leveldb_minor_version() {
-  return kMinorVersion;
-}
+int leveldb_minor_version() { return kMinorVersion; }
 
 }  // end extern "C"
diff --git a/db/corruption_test.cc b/db/corruption_test.cc
index d50785a..e6f64ee 100644
--- a/db/corruption_test.cc
+++ b/db/corruption_test.cc
@@ -2,16 +2,16 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
-#include "leveldb/db.h"
-
 #include <sys/types.h>
-#include "leveldb/cache.h"
-#include "leveldb/table.h"
-#include "leveldb/write_batch.h"
+
 #include "db/db_impl.h"
 #include "db/filename.h"
 #include "db/log_format.h"
 #include "db/version_set.h"
+#include "leveldb/cache.h"
+#include "leveldb/db.h"
+#include "leveldb/table.h"
+#include "leveldb/write_batch.h"
 #include "util/logging.h"
 #include "util/testharness.h"
 #include "util/testutil.h"
@@ -42,8 +42,8 @@ class CorruptionTest {
   }
 
   ~CorruptionTest() {
-     delete db_;
-     delete tiny_cache_;
+    delete db_;
+    delete tiny_cache_;
   }
 
   Status TryReopen() {
@@ -52,9 +52,7 @@ class CorruptionTest {
     return DB::Open(options_, dbname_, &db_);
   }
 
-  void Reopen() {
-    ASSERT_OK(TryReopen());
-  }
+  void Reopen() { ASSERT_OK(TryReopen()); }
 
   void RepairDB() {
     delete db_;
@@ -66,7 +64,7 @@ class CorruptionTest {
     std::string key_space, value_space;
     WriteBatch batch;
     for (int i = 0; i < n; i++) {
-      //if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n);
+      // if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n);
       Slice key = Key(i, &key_space);
       batch.Clear();
       batch.Put(key, Value(i, &value_space));
@@ -95,8 +93,7 @@ class CorruptionTest {
         // Ignore boundary keys.
         continue;
       }
-      if (!ConsumeDecimalNumber(&in, &key) ||
-          !in.empty() ||
+      if (!ConsumeDecimalNumber(&in, &key) || !in.empty() ||
           key < next_expected) {
         bad_keys++;
         continue;
@@ -127,8 +124,7 @@ class CorruptionTest {
     std::string fname;
     int picked_number = -1;
     for (size_t i = 0; i < filenames.size(); i++) {
-      if (ParseFileName(filenames[i], &number, &type) &&
-          type == filetype &&
+      if (ParseFileName(filenames[i], &number, &type) && type == filetype &&
           int(number) > picked_number) {  // Pick latest file
         fname = dbname_ + "/" + filenames[i];
         picked_number = number;
@@ -194,7 +190,7 @@ class CorruptionTest {
 TEST(CorruptionTest, Recovery) {
   Build(100);
   Check(100, 100);
-  Corrupt(kLogFile, 19, 1);      // WriteBatch tag for first record
+  Corrupt(kLogFile, 19, 1);  // WriteBatch tag for first record
   Corrupt(kLogFile, log::kBlockSize + 1000, 1);  // Somewhere in second block
   Reopen();
 
@@ -361,6 +357,4 @@ TEST(CorruptionTest, UnrelatedKeys) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/db/db_bench.cc b/db/db_bench.cc
index 41e903b..3090b43 100644
--- a/db/db_bench.cc
+++ b/db/db_bench.cc
@@ -2,9 +2,10 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
-#include <sys/types.h>
 #include <stdio.h>
 #include <stdlib.h>
+#include <sys/types.h>
+
 #include "leveldb/cache.h"
 #include "leveldb/db.h"
 #include "leveldb/env.h"
@@ -55,8 +56,7 @@ static const char* FLAGS_benchmarks =
     "fill100K,"
     "crc32c,"
     "snappycomp,"
-    "snappyuncomp,"
-    ;
+    "snappyuncomp,";
 
 // Number of key/values to place in database
 static int FLAGS_num = 1000000;
@@ -155,7 +155,7 @@ static Slice TrimSpace(Slice s) {
     start++;
   }
   size_t limit = s.size();
-  while (limit > start && isspace(s[limit-1])) {
+  while (limit > start && isspace(s[limit - 1])) {
     limit--;
   }
   return Slice(s.data() + start, limit - start);
@@ -214,9 +214,7 @@ class Stats {
     seconds_ = (finish_ - start_) * 1e-6;
   }
 
-  void AddMessage(Slice msg) {
-    AppendWithSpace(&message_, msg);
-  }
+  void AddMessage(Slice msg) { AppendWithSpace(&message_, msg); }
 
   void FinishedSingleOp() {
     if (FLAGS_histogram) {
@@ -232,21 +230,26 @@ class Stats {
 
     done_++;
     if (done_ >= next_report_) {
-      if      (next_report_ < 1000)   next_report_ += 100;
-      else if (next_report_ < 5000)   next_report_ += 500;
-      else if (next_report_ < 10000)  next_report_ += 1000;
-      else if (next_report_ < 50000)  next_report_ += 5000;
-      else if (next_report_ < 100000) next_report_ += 10000;
-      else if (next_report_ < 500000) next_report_ += 50000;
-      else                            next_report_ += 100000;
+      if (next_report_ < 1000)
+        next_report_ += 100;
+      else if (next_report_ < 5000)
+        next_report_ += 500;
+      else if (next_report_ < 10000)
+        next_report_ += 1000;
+      else if (next_report_ < 50000)
+        next_report_ += 5000;
+      else if (next_report_ < 100000)
+        next_report_ += 10000;
+      else if (next_report_ < 500000)
+        next_report_ += 50000;
+      else
+        next_report_ += 100000;
       fprintf(stderr, "... finished %d ops%30s\r", done_, "");
       fflush(stderr);
     }
   }
 
-  void AddBytes(int64_t n) {
-    bytes_ += n;
-  }
+  void AddBytes(int64_t n) { bytes_ += n; }
 
   void Report(const Slice& name) {
     // Pretend at least one op was done in case we are running a benchmark
@@ -265,11 +268,8 @@ class Stats {
     }
     AppendWithSpace(&extra, message_);
 
-    fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
-            name.ToString().c_str(),
-            seconds_ * 1e6 / done_,
-            (extra.empty() ? "" : " "),
-            extra.c_str());
+    fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
+            seconds_ * 1e6 / done_, (extra.empty() ? "" : " "), extra.c_str());
     if (FLAGS_histogram) {
       fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
     }
@@ -294,13 +294,13 @@ struct SharedState {
   bool start GUARDED_BY(mu);
 
   SharedState(int total)
-      : cv(&mu), total(total), num_initialized(0), num_done(0), start(false) { }
+      : cv(&mu), total(total), num_initialized(0), num_done(0), start(false) {}
 };
 
 // Per-thread state for concurrent executions of the same benchmark.
 struct ThreadState {
-  int tid;             // 0..n-1 when running in n threads
-  Random rand;         // Has different seeds for different threads
+  int tid;      // 0..n-1 when running in n threads
+  Random rand;  // Has different seeds for different threads
   Stats stats;
   SharedState* shared;
 
@@ -330,20 +330,20 @@ class Benchmark {
             static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
     fprintf(stdout, "Entries:    %d\n", num_);
     fprintf(stdout, "RawSize:    %.1f MB (estimated)\n",
-            ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_)
-             / 1048576.0));
+            ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
+             1048576.0));
     fprintf(stdout, "FileSize:   %.1f MB (estimated)\n",
-            (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_)
-             / 1048576.0));
+            (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
+             1048576.0));
     PrintWarnings();
     fprintf(stdout, "------------------------------------------------\n");
   }
 
   void PrintWarnings() {
 #if defined(__GNUC__) && !defined(__OPTIMIZE__)
-    fprintf(stdout,
-            "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
-            );
+    fprintf(
+        stdout,
+        "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
 #endif
 #ifndef NDEBUG
     fprintf(stdout,
@@ -361,8 +361,8 @@ class Benchmark {
   }
 
   void PrintEnvironment() {
-    fprintf(stderr, "LevelDB:    version %d.%d\n",
-            kMajorVersion, kMinorVersion);
+    fprintf(stderr, "LevelDB:    version %d.%d\n", kMajorVersion,
+            kMinorVersion);
 
 #if defined(__linux)
     time_t now = time(nullptr);
@@ -397,16 +397,16 @@ class Benchmark {
 
  public:
   Benchmark()
-  : cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : nullptr),
-    filter_policy_(FLAGS_bloom_bits >= 0
-                   ? NewBloomFilterPolicy(FLAGS_bloom_bits)
-                   : nullptr),
-    db_(nullptr),
-    num_(FLAGS_num),
-    value_size_(FLAGS_value_size),
-    entries_per_batch_(1),
-    reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
-    heap_counter_(0) {
+      : cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : nullptr),
+        filter_policy_(FLAGS_bloom_bits >= 0
+                           ? NewBloomFilterPolicy(FLAGS_bloom_bits)
+                           : nullptr),
+        db_(nullptr),
+        num_(FLAGS_num),
+        value_size_(FLAGS_value_size),
+        entries_per_batch_(1),
+        reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
+        heap_counter_(0) {
     std::vector<std::string> files;
     g_env->GetChildren(FLAGS_db, &files);
     for (size_t i = 0; i < files.size(); i++) {
@@ -516,7 +516,7 @@ class Benchmark {
       } else if (name == Slice("sstables")) {
         PrintStats("leveldb.sstables");
       } else {
-        if (name != Slice()) {  // No error message for empty name
+        if (!name.empty()) {  // No error message for empty name
           fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str());
         }
       }
@@ -665,8 +665,8 @@ class Benchmark {
     int64_t bytes = 0;
     char* uncompressed = new char[input.size()];
     while (ok && bytes < 1024 * 1048576) {  // Compress 1G
-      ok =  port::Snappy_Uncompress(compressed.data(), compressed.size(),
-                                    uncompressed);
+      ok = port::Snappy_Uncompress(compressed.data(), compressed.size(),
+                                   uncompressed);
       bytes += input.size();
       thread->stats.FinishedSingleOp();
     }
@@ -706,13 +706,9 @@ class Benchmark {
     }
   }
 
-  void WriteSeq(ThreadState* thread) {
-    DoWrite(thread, true);
-  }
+  void WriteSeq(ThreadState* thread) { DoWrite(thread, true); }
 
-  void WriteRandom(ThreadState* thread) {
-    DoWrite(thread, false);
-  }
+  void WriteRandom(ThreadState* thread) { DoWrite(thread, false); }
 
   void DoWrite(ThreadState* thread, bool seq) {
     if (num_ != FLAGS_num) {
@@ -728,7 +724,7 @@ class Benchmark {
     for (int i = 0; i < num_; i += entries_per_batch_) {
       batch.Clear();
       for (int j = 0; j < entries_per_batch_; j++) {
-        const int k = seq ? i+j : (thread->rand.Next() % FLAGS_num);
+        const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num);
         char key[100];
         snprintf(key, sizeof(key), "%016d", k);
         batch.Put(key, gen.Generate(value_size_));
@@ -838,7 +834,7 @@ class Benchmark {
     for (int i = 0; i < num_; i += entries_per_batch_) {
       batch.Clear();
       for (int j = 0; j < entries_per_batch_; j++) {
-        const int k = seq ? i+j : (thread->rand.Next() % FLAGS_num);
+        const int k = seq ? i + j : (thread->rand.Next() % FLAGS_num);
         char key[100];
         snprintf(key, sizeof(key), "%016d", k);
         batch.Delete(key);
@@ -852,13 +848,9 @@ class Benchmark {
     }
   }
 
-  void DeleteSeq(ThreadState* thread) {
-    DoDelete(thread, true);
-  }
+  void DeleteSeq(ThreadState* thread) { DoDelete(thread, true); }
 
-  void DeleteRandom(ThreadState* thread) {
-    DoDelete(thread, false);
-  }
+  void DeleteRandom(ThreadState* thread) { DoDelete(thread, false); }
 
   void ReadWhileWriting(ThreadState* thread) {
     if (thread->tid > 0) {
@@ -890,9 +882,7 @@ class Benchmark {
     }
   }
 
-  void Compact(ThreadState* thread) {
-    db_->CompactRange(nullptr, nullptr);
-  }
+  void Compact(ThreadState* thread) { db_->CompactRange(nullptr, nullptr); }
 
   void PrintStats(const char* key) {
     std::string stats;
@@ -982,9 +972,9 @@ int main(int argc, char** argv) {
 
   // Choose a location for the test database if none given with --db=<path>
   if (FLAGS_db == nullptr) {
-      leveldb::g_env->GetTestDirectory(&default_db_path);
-      default_db_path += "/dbbench";
-      FLAGS_db = default_db_path.c_str();
+    leveldb::g_env->GetTestDirectory(&default_db_path);
+    default_db_path += "/dbbench";
+    FLAGS_db = default_db_path.c_str();
   }
 
   leveldb::Benchmark benchmark;
diff --git a/db/db_impl.cc b/db/db_impl.cc
index caef2b1..bff2d62 100644
--- a/db/db_impl.cc
+++ b/db/db_impl.cc
@@ -75,7 +75,7 @@ struct DBImpl::CompactionState {
 
   uint64_t total_bytes;
 
-  Output* current_output() { return &outputs[outputs.size()-1]; }
+  Output* current_output() { return &outputs[outputs.size() - 1]; }
 
   explicit CompactionState(Compaction* c)
       : compaction(c),
@@ -98,10 +98,10 @@ Options SanitizeOptions(const std::string& dbname,
   Options result = src;
   result.comparator = icmp;
   result.filter_policy = (src.filter_policy != nullptr) ? ipolicy : nullptr;
-  ClipToRange(&result.max_open_files,    64 + kNumNonTableCacheFiles, 50000);
-  ClipToRange(&result.write_buffer_size, 64<<10,                      1<<30);
-  ClipToRange(&result.max_file_size,     1<<20,                       1<<30);
-  ClipToRange(&result.block_size,        1<<10,                       4<<20);
+  ClipToRange(&result.max_open_files, 64 + kNumNonTableCacheFiles, 50000);
+  ClipToRange(&result.write_buffer_size, 64 << 10, 1 << 30);
+  ClipToRange(&result.max_file_size, 1 << 20, 1 << 30);
+  ClipToRange(&result.block_size, 1 << 10, 4 << 20);
   if (result.info_log == nullptr) {
     // Open a log file in the same directory as the db
     src.env->CreateDir(dbname);  // In case it does not exist
@@ -268,8 +268,7 @@ void DBImpl::DeleteObsoleteFiles() {
         if (type == kTableFile) {
           table_cache_->Evict(number);
         }
-        Log(options_.info_log, "Delete type=%d #%lld\n",
-            static_cast<int>(type),
+        Log(options_.info_log, "Delete type=%d #%lld\n", static_cast<int>(type),
             static_cast<unsigned long long>(number));
         env_->DeleteFile(dbname_ + "/" + filenames[i]);
       }
@@ -277,7 +276,7 @@ void DBImpl::DeleteObsoleteFiles() {
   }
 }
 
-Status DBImpl::Recover(VersionEdit* edit, bool *save_manifest) {
+Status DBImpl::Recover(VersionEdit* edit, bool* save_manifest) {
   mutex_.AssertHeld();
 
   // Ignore error from CreateDir since the creation of the DB is
@@ -302,8 +301,8 @@ Status DBImpl::Recover(VersionEdit* edit, bool *save_manifest) {
     }
   } else {
     if (options_.error_if_exists) {
-      return Status::InvalidArgument(
-          dbname_, "exists (error_if_exists is true)");
+      return Status::InvalidArgument(dbname_,
+                                     "exists (error_if_exists is true)");
     }
   }
 
@@ -378,8 +377,8 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
     Status* status;  // null if options_.paranoid_checks==false
     virtual void Corruption(size_t bytes, const Status& s) {
       Log(info_log, "%s%s: dropping %d bytes; %s",
-          (this->status == nullptr ? "(ignoring error) " : ""),
-          fname, static_cast<int>(bytes), s.ToString().c_str());
+          (this->status == nullptr ? "(ignoring error) " : ""), fname,
+          static_cast<int>(bytes), s.ToString().c_str());
       if (this->status != nullptr && this->status->ok()) *this->status = s;
     }
   };
@@ -405,10 +404,9 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
   // paranoid_checks==false so that corruptions cause entire commits
   // to be skipped instead of propagating bad information (like overly
   // large sequence numbers).
-  log::Reader reader(file, &reporter, true/*checksum*/,
-                     0/*initial_offset*/);
+  log::Reader reader(file, &reporter, true /*checksum*/, 0 /*initial_offset*/);
   Log(options_.info_log, "Recovering log #%llu",
-      (unsigned long long) log_number);
+      (unsigned long long)log_number);
 
   // Read all the records and add to a memtable
   std::string scratch;
@@ -416,11 +414,10 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
   WriteBatch batch;
   int compactions = 0;
   MemTable* mem = nullptr;
-  while (reader.ReadRecord(&record, &scratch) &&
-         status.ok()) {
+  while (reader.ReadRecord(&record, &scratch) && status.ok()) {
     if (record.size() < 12) {
-      reporter.Corruption(
-          record.size(), Status::Corruption("log record too small"));
+      reporter.Corruption(record.size(),
+                          Status::Corruption("log record too small"));
       continue;
     }
     WriteBatchInternal::SetContents(&batch, record);
@@ -434,9 +431,8 @@ Status DBImpl::RecoverLogFile(uint64_t log_number, bool last_log,
     if (!status.ok()) {
       break;
     }
-    const SequenceNumber last_seq =
-        WriteBatchInternal::Sequence(&batch) +
-        WriteBatchInternal::Count(&batch) - 1;
+    const SequenceNumber last_seq = WriteBatchInternal::Sequence(&batch) +
+                                    WriteBatchInternal::Count(&batch) - 1;
     if (last_seq > *max_sequence) {
       *max_sequence = last_seq;
     }
@@ -500,7 +496,7 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit,
   pending_outputs_.insert(meta.number);
   Iterator* iter = mem->NewIterator();
   Log(options_.info_log, "Level-0 table #%llu: started",
-      (unsigned long long) meta.number);
+      (unsigned long long)meta.number);
 
   Status s;
   {
@@ -510,13 +506,11 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit,
   }
 
   Log(options_.info_log, "Level-0 table #%llu: %lld bytes %s",
-      (unsigned long long) meta.number,
-      (unsigned long long) meta.file_size,
+      (unsigned long long)meta.number, (unsigned long long)meta.file_size,
       s.ToString().c_str());
   delete iter;
   pending_outputs_.erase(meta.number);
 
-
   // Note that if file_size is zero, the file has been deleted and
   // should not be added to the manifest.
   int level = 0;
@@ -526,8 +520,8 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit,
     if (base != nullptr) {
       level = base->PickLevelForMemTableOutput(min_user_key, max_user_key);
     }
-    edit->AddFile(level, meta.number, meta.file_size,
-                  meta.smallest, meta.largest);
+    edit->AddFile(level, meta.number, meta.file_size, meta.smallest,
+                  meta.largest);
   }
 
   CompactionStats stats;
@@ -658,8 +652,7 @@ void DBImpl::MaybeScheduleCompaction() {
     // DB is being deleted; no more background compactions
   } else if (!bg_error_.ok()) {
     // Already got an error; no more changes
-  } else if (imm_ == nullptr &&
-             manual_compaction_ == nullptr &&
+  } else if (imm_ == nullptr && manual_compaction_ == nullptr &&
              !versions_->NeedsCompaction()) {
     // No work to be done
   } else {
@@ -711,8 +704,7 @@ void DBImpl::BackgroundCompaction() {
     }
     Log(options_.info_log,
         "Manual compaction at level-%d from %s .. %s; will stop at %s\n",
-        m->level,
-        (m->begin ? m->begin->DebugString().c_str() : "(begin)"),
+        m->level, (m->begin ? m->begin->DebugString().c_str() : "(begin)"),
         (m->end ? m->end->DebugString().c_str() : "(end)"),
         (m->done ? "(end)" : manual_end.DebugString().c_str()));
   } else {
@@ -727,19 +719,17 @@ void DBImpl::BackgroundCompaction() {
     assert(c->num_input_files(0) == 1);
     FileMetaData* f = c->input(0, 0);
     c->edit()->DeleteFile(c->level(), f->number);
-    c->edit()->AddFile(c->level() + 1, f->number, f->file_size,
-                       f->smallest, f->largest);
+    c->edit()->AddFile(c->level() + 1, f->number, f->file_size, f->smallest,
+                       f->largest);
     status = versions_->LogAndApply(c->edit(), &mutex_);
     if (!status.ok()) {
       RecordBackgroundError(status);
     }
     VersionSet::LevelSummaryStorage tmp;
     Log(options_.info_log, "Moved #%lld to level-%d %lld bytes %s: %s\n",
-        static_cast<unsigned long long>(f->number),
-        c->level() + 1,
+        static_cast<unsigned long long>(f->number), c->level() + 1,
         static_cast<unsigned long long>(f->file_size),
-        status.ToString().c_str(),
-        versions_->LevelSummary(&tmp));
+        status.ToString().c_str(), versions_->LevelSummary(&tmp));
   } else {
     CompactionState* compact = new CompactionState(c);
     status = DoCompactionWork(compact);
@@ -757,8 +747,7 @@ void DBImpl::BackgroundCompaction() {
   } else if (shutting_down_.load(std::memory_order_acquire)) {
     // Ignore compaction errors found during shutting down
   } else {
-    Log(options_.info_log,
-        "Compaction error: %s", status.ToString().c_str());
+    Log(options_.info_log, "Compaction error: %s", status.ToString().c_str());
   }
 
   if (is_manual) {
@@ -853,31 +842,25 @@ Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
 
   if (s.ok() && current_entries > 0) {
     // Verify that the table is usable
-    Iterator* iter = table_cache_->NewIterator(ReadOptions(),
-                                               output_number,
-                                               current_bytes);
+    Iterator* iter =
+        table_cache_->NewIterator(ReadOptions(), output_number, current_bytes);
     s = iter->status();
     delete iter;
     if (s.ok()) {
-      Log(options_.info_log,
-          "Generated table #%llu@%d: %lld keys, %lld bytes",
-          (unsigned long long) output_number,
-          compact->compaction->level(),
-          (unsigned long long) current_entries,
-          (unsigned long long) current_bytes);
+      Log(options_.info_log, "Generated table #%llu@%d: %lld keys, %lld bytes",
+          (unsigned long long)output_number, compact->compaction->level(),
+          (unsigned long long)current_entries,
+          (unsigned long long)current_bytes);
     }
   }
   return s;
 }
 
-
 Status DBImpl::InstallCompactionResults(CompactionState* compact) {
   mutex_.AssertHeld();
-  Log(options_.info_log,  "Compacted %d@%d + %d@%d files => %lld bytes",
-      compact->compaction->num_input_files(0),
-      compact->compaction->level(),
-      compact->compaction->num_input_files(1),
-      compact->compaction->level() + 1,
+  Log(options_.info_log, "Compacted %d@%d + %d@%d files => %lld bytes",
+      compact->compaction->num_input_files(0), compact->compaction->level(),
+      compact->compaction->num_input_files(1), compact->compaction->level() + 1,
       static_cast<long long>(compact->total_bytes));
 
   // Add compaction outputs
@@ -885,9 +868,8 @@ Status DBImpl::InstallCompactionResults(CompactionState* compact) {
   const int level = compact->compaction->level();
   for (size_t i = 0; i < compact->outputs.size(); i++) {
     const CompactionState::Output& out = compact->outputs[i];
-    compact->compaction->edit()->AddFile(
-        level + 1,
-        out.number, out.file_size, out.smallest, out.largest);
+    compact->compaction->edit()->AddFile(level + 1, out.number, out.file_size,
+                                         out.smallest, out.largest);
   }
   return versions_->LogAndApply(compact->compaction->edit(), &mutex_);
 }
@@ -896,9 +878,8 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
   const uint64_t start_micros = env_->NowMicros();
   int64_t imm_micros = 0;  // Micros spent doing imm_ compactions
 
-  Log(options_.info_log,  "Compacting %d@%d + %d@%d files",
-      compact->compaction->num_input_files(0),
-      compact->compaction->level(),
+  Log(options_.info_log, "Compacting %d@%d + %d@%d files",
+      compact->compaction->num_input_files(0), compact->compaction->level(),
       compact->compaction->num_input_files(1),
       compact->compaction->level() + 1);
 
@@ -921,7 +902,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
   std::string current_user_key;
   bool has_current_user_key = false;
   SequenceNumber last_sequence_for_key = kMaxSequenceNumber;
-  for (; input->Valid() && !shutting_down_.load(std::memory_order_acquire); ) {
+  for (; input->Valid() && !shutting_down_.load(std::memory_order_acquire);) {
     // Prioritize immutable compaction work
     if (has_imm_.load(std::memory_order_relaxed)) {
       const uint64_t imm_start = env_->NowMicros();
@@ -953,8 +934,8 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
       last_sequence_for_key = kMaxSequenceNumber;
     } else {
       if (!has_current_user_key ||
-          user_comparator()->Compare(ikey.user_key,
-                                     Slice(current_user_key)) != 0) {
+          user_comparator()->Compare(ikey.user_key, Slice(current_user_key)) !=
+              0) {
         // First occurrence of this user key
         current_user_key.assign(ikey.user_key.data(), ikey.user_key.size());
         has_current_user_key = true;
@@ -963,7 +944,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
 
       if (last_sequence_for_key <= compact->smallest_snapshot) {
         // Hidden by an newer entry for same user key
-        drop = true;    // (A)
+        drop = true;  // (A)
       } else if (ikey.type == kTypeDeletion &&
                  ikey.sequence <= compact->smallest_snapshot &&
                  compact->compaction->IsBaseLevelForKey(ikey.user_key)) {
@@ -1049,8 +1030,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
     RecordBackgroundError(status);
   }
   VersionSet::LevelSummaryStorage tmp;
-  Log(options_.info_log,
-      "compacted to: %s", versions_->LevelSummary(&tmp));
+  Log(options_.info_log, "compacted to: %s", versions_->LevelSummary(&tmp));
   return status;
 }
 
@@ -1063,7 +1043,7 @@ struct IterState {
   MemTable* const imm GUARDED_BY(mu);
 
   IterState(port::Mutex* mutex, MemTable* mem, MemTable* imm, Version* version)
-      : mu(mutex), version(version), mem(mem), imm(imm) { }
+      : mu(mutex), version(version), mem(mem), imm(imm) {}
 };
 
 static void CleanupIteratorState(void* arg1, void* arg2) {
@@ -1116,8 +1096,7 @@ int64_t DBImpl::TEST_MaxNextLevelOverlappingBytes() {
   return versions_->MaxNextLevelOverlappingBytes();
 }
 
-Status DBImpl::Get(const ReadOptions& options,
-                   const Slice& key,
+Status DBImpl::Get(const ReadOptions& options, const Slice& key,
                    std::string* value) {
   Status s;
   MutexLock l(&mutex_);
@@ -1168,12 +1147,12 @@ Iterator* DBImpl::NewIterator(const ReadOptions& options) {
   SequenceNumber latest_snapshot;
   uint32_t seed;
   Iterator* iter = NewInternalIterator(options, &latest_snapshot, &seed);
-  return NewDBIterator(
-      this, user_comparator(), iter,
-      (options.snapshot != nullptr
-       ? static_cast<const SnapshotImpl*>(options.snapshot)->sequence_number()
-       : latest_snapshot),
-      seed);
+  return NewDBIterator(this, user_comparator(), iter,
+                       (options.snapshot != nullptr
+                            ? static_cast<const SnapshotImpl*>(options.snapshot)
+                                  ->sequence_number()
+                            : latest_snapshot),
+                       seed);
 }
 
 void DBImpl::RecordReadSample(Slice key) {
@@ -1202,9 +1181,9 @@ Status DBImpl::Delete(const WriteOptions& options, const Slice& key) {
   return DB::Delete(options, key);
 }
 
-Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
+Status DBImpl::Write(const WriteOptions& options, WriteBatch* updates) {
   Writer w(&mutex_);
-  w.batch = my_batch;
+  w.batch = updates;
   w.sync = options.sync;
   w.done = false;
 
@@ -1218,10 +1197,10 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
   }
 
   // May temporarily unlock and wait.
-  Status status = MakeRoomForWrite(my_batch == nullptr);
+  Status status = MakeRoomForWrite(updates == nullptr);
   uint64_t last_sequence = versions_->LastSequence();
   Writer* last_writer = &w;
-  if (status.ok() && my_batch != nullptr) {  // nullptr batch is for compactions
+  if (status.ok() && updates != nullptr) {  // nullptr batch is for compactions
     WriteBatch* updates = BuildBatchGroup(&last_writer);
     WriteBatchInternal::SetSequence(updates, last_sequence + 1);
     last_sequence += WriteBatchInternal::Count(updates);
@@ -1290,8 +1269,8 @@ WriteBatch* DBImpl::BuildBatchGroup(Writer** last_writer) {
   // original write is small, limit the growth so we do not slow
   // down the small write too much.
   size_t max_size = 1 << 20;
-  if (size <= (128<<10)) {
-    max_size = size + (128<<10);
+  if (size <= (128 << 10)) {
+    max_size = size + (128 << 10);
   }
 
   *last_writer = first;
@@ -1337,9 +1316,8 @@ Status DBImpl::MakeRoomForWrite(bool force) {
       // Yield previous error
       s = bg_error_;
       break;
-    } else if (
-        allow_delay &&
-        versions_->NumLevelFiles(0) >= config::kL0_SlowdownWritesTrigger) {
+    } else if (allow_delay && versions_->NumLevelFiles(0) >=
+                                  config::kL0_SlowdownWritesTrigger) {
       // We are getting close to hitting a hard limit on the number of
       // L0 files.  Rather than delaying a single write by several
       // seconds when we hit the hard limit, start delaying each
@@ -1383,7 +1361,7 @@ Status DBImpl::MakeRoomForWrite(bool force) {
       has_imm_.store(true, std::memory_order_release);
       mem_ = new MemTable(internal_comparator_);
       mem_->Ref();
-      force = false;   // Do not force another compaction if have room
+      force = false;  // Do not force another compaction if have room
       MaybeScheduleCompaction();
     }
   }
@@ -1417,21 +1395,16 @@ bool DBImpl::GetProperty(const Slice& property, std::string* value) {
     snprintf(buf, sizeof(buf),
              "                               Compactions\n"
              "Level  Files Size(MB) Time(sec) Read(MB) Write(MB)\n"
-             "--------------------------------------------------\n"
-             );
+             "--------------------------------------------------\n");
     value->append(buf);
     for (int level = 0; level < config::kNumLevels; level++) {
       int files = versions_->NumLevelFiles(level);
       if (stats_[level].micros > 0 || files > 0) {
-        snprintf(
-            buf, sizeof(buf),
-            "%3d %8d %8.0f %9.0f %8.0f %9.0f\n",
-            level,
-            files,
-            versions_->NumLevelBytes(level) / 1048576.0,
-            stats_[level].micros / 1e6,
-            stats_[level].bytes_read / 1048576.0,
-            stats_[level].bytes_written / 1048576.0);
+        snprintf(buf, sizeof(buf), "%3d %8d %8.0f %9.0f %8.0f %9.0f\n", level,
+                 files, versions_->NumLevelBytes(level) / 1048576.0,
+                 stats_[level].micros / 1e6,
+                 stats_[level].bytes_read / 1048576.0,
+                 stats_[level].bytes_written / 1048576.0);
         value->append(buf);
       }
     }
@@ -1457,9 +1430,7 @@ bool DBImpl::GetProperty(const Slice& property, std::string* value) {
   return false;
 }
 
-void DBImpl::GetApproximateSizes(
-    const Range* range, int n,
-    uint64_t* sizes) {
+void DBImpl::GetApproximateSizes(const Range* range, int n, uint64_t* sizes) {
   // TODO(opt): better implementation
   Version* v;
   {
@@ -1497,10 +1468,9 @@ Status DB::Delete(const WriteOptions& opt, const Slice& key) {
   return Write(opt, &batch);
 }
 
-DB::~DB() { }
+DB::~DB() {}
 
-Status DB::Open(const Options& options, const std::string& dbname,
-                DB** dbptr) {
+Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) {
   *dbptr = nullptr;
 
   DBImpl* impl = new DBImpl(options, dbname);
@@ -1543,8 +1513,7 @@ Status DB::Open(const Options& options, const std::string& dbname,
   return s;
 }
 
-Snapshot::~Snapshot() {
-}
+Snapshot::~Snapshot() {}
 
 Status DestroyDB(const std::string& dbname, const Options& options) {
   Env* env = options.env;
diff --git a/db/db_impl.h b/db/db_impl.h
index ca00d42..c895952 100644
--- a/db/db_impl.h
+++ b/db/db_impl.h
@@ -35,8 +35,7 @@ class DBImpl : public DB {
   virtual Status Put(const WriteOptions&, const Slice& key, const Slice& value);
   virtual Status Delete(const WriteOptions&, const Slice& key);
   virtual Status Write(const WriteOptions& options, WriteBatch* updates);
-  virtual Status Get(const ReadOptions& options,
-                     const Slice& key,
+  virtual Status Get(const ReadOptions& options, const Slice& key,
                      std::string* value);
   virtual Iterator* NewIterator(const ReadOptions&);
   virtual const Snapshot* GetSnapshot();
@@ -166,9 +165,9 @@ class DBImpl : public DB {
   struct ManualCompaction {
     int level;
     bool done;
-    const InternalKey* begin;   // null means beginning of key range
-    const InternalKey* end;     // null means end of key range
-    InternalKey tmp_storage;    // Used to keep track of compaction progress
+    const InternalKey* begin;  // null means beginning of key range
+    const InternalKey* end;    // null means end of key range
+    InternalKey tmp_storage;   // Used to keep track of compaction progress
   };
   ManualCompaction* manual_compaction_ GUARDED_BY(mutex_);
 
@@ -184,7 +183,7 @@ class DBImpl : public DB {
     int64_t bytes_read;
     int64_t bytes_written;
 
-    CompactionStats() : micros(0), bytes_read(0), bytes_written(0) { }
+    CompactionStats() : micros(0), bytes_read(0), bytes_written(0) {}
 
     void Add(const CompactionStats& c) {
       this->micros += c.micros;
diff --git a/db/db_iter.cc b/db/db_iter.cc
index 4d0f42e..1e5b5e2 100644
--- a/db/db_iter.cc
+++ b/db/db_iter.cc
@@ -4,9 +4,9 @@
 
 #include "db/db_iter.h"
 
-#include "db/filename.h"
 #include "db/db_impl.h"
 #include "db/dbformat.h"
+#include "db/filename.h"
 #include "leveldb/env.h"
 #include "leveldb/iterator.h"
 #include "port/port.h"
@@ -36,17 +36,14 @@ namespace {
 // combines multiple entries for the same userkey found in the DB
 // representation into a single entry while accounting for sequence
 // numbers, deletion markers, overwrites, etc.
-class DBIter: public Iterator {
+class DBIter : public Iterator {
  public:
   // Which direction is the iterator currently moving?
   // (1) When moving forward, the internal iterator is positioned at
   //     the exact entry that yields this->key(), this->value()
   // (2) When moving backwards, the internal iterator is positioned
   //     just before all entries whose user key == this->key().
-  enum Direction {
-    kForward,
-    kReverse
-  };
+  enum Direction { kForward, kReverse };
 
   DBIter(DBImpl* db, const Comparator* cmp, Iterator* iter, SequenceNumber s,
          uint32_t seed)
@@ -57,11 +54,8 @@ class DBIter: public Iterator {
         direction_(kForward),
         valid_(false),
         rnd_(seed),
-        bytes_until_read_sampling_(RandomCompactionPeriod()) {
-  }
-  virtual ~DBIter() {
-    delete iter_;
-  }
+        bytes_until_read_sampling_(RandomCompactionPeriod()) {}
+  virtual ~DBIter() { delete iter_; }
   virtual bool Valid() const { return valid_; }
   virtual Slice key() const {
     assert(valid_);
@@ -105,7 +99,7 @@ class DBIter: public Iterator {
 
   // Picks the number of bytes that can be read until a compaction is scheduled.
   size_t RandomCompactionPeriod() {
-    return rnd_.Uniform(2*config::kReadBytesPeriod);
+    return rnd_.Uniform(2 * config::kReadBytesPeriod);
   }
 
   DBImpl* db_;
@@ -114,8 +108,8 @@ class DBIter: public Iterator {
   SequenceNumber const sequence_;
 
   Status status_;
-  std::string saved_key_;     // == current key when direction_==kReverse
-  std::string saved_value_;   // == current raw value when direction_==kReverse
+  std::string saved_key_;    // == current key when direction_==kReverse
+  std::string saved_value_;  // == current raw value when direction_==kReverse
   Direction direction_;
   bool valid_;
 
@@ -221,8 +215,8 @@ void DBIter::Prev() {
         ClearSavedValue();
         return;
       }
-      if (user_comparator_->Compare(ExtractUserKey(iter_->key()),
-                                    saved_key_) < 0) {
+      if (user_comparator_->Compare(ExtractUserKey(iter_->key()), saved_key_) <
+          0) {
         break;
       }
     }
@@ -278,8 +272,8 @@ void DBIter::Seek(const Slice& target) {
   direction_ = kForward;
   ClearSavedValue();
   saved_key_.clear();
-  AppendInternalKey(
-      &saved_key_, ParsedInternalKey(target, sequence_, kValueTypeForSeek));
+  AppendInternalKey(&saved_key_,
+                    ParsedInternalKey(target, sequence_, kValueTypeForSeek));
   iter_->Seek(saved_key_);
   if (iter_->Valid()) {
     FindNextUserEntry(false, &saved_key_ /* temporary storage */);
@@ -308,12 +302,9 @@ void DBIter::SeekToLast() {
 
 }  // anonymous namespace
 
-Iterator* NewDBIterator(
-    DBImpl* db,
-    const Comparator* user_key_comparator,
-    Iterator* internal_iter,
-    SequenceNumber sequence,
-    uint32_t seed) {
+Iterator* NewDBIterator(DBImpl* db, const Comparator* user_key_comparator,
+                        Iterator* internal_iter, SequenceNumber sequence,
+                        uint32_t seed) {
   return new DBIter(db, user_key_comparator, internal_iter, sequence, seed);
 }
 
diff --git a/db/db_iter.h b/db/db_iter.h
index 262840e..fd93e91 100644
--- a/db/db_iter.h
+++ b/db/db_iter.h
@@ -6,8 +6,9 @@
 #define STORAGE_LEVELDB_DB_DB_ITER_H_
 
 #include <stdint.h>
-#include "leveldb/db.h"
+
 #include "db/dbformat.h"
+#include "leveldb/db.h"
 
 namespace leveldb {
 
@@ -16,10 +17,8 @@ class DBImpl;
 // Return a new iterator that converts internal keys (yielded by
 // "*internal_iter") that were live at the specified "sequence" number
 // into appropriate user keys.
-Iterator* NewDBIterator(DBImpl* db,
-                        const Comparator* user_key_comparator,
-                        Iterator* internal_iter,
-                        SequenceNumber sequence,
+Iterator* NewDBIterator(DBImpl* db, const Comparator* user_key_comparator,
+                        Iterator* internal_iter, SequenceNumber sequence,
                         uint32_t seed);
 
 }  // namespace leveldb
diff --git a/db/db_test.cc b/db/db_test.cc
index 3ab4aee..4343216 100644
--- a/db/db_test.cc
+++ b/db/db_test.cc
@@ -2,17 +2,18 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
+#include "leveldb/db.h"
+
 #include <atomic>
 #include <string>
 
-#include "leveldb/db.h"
-#include "leveldb/filter_policy.h"
 #include "db/db_impl.h"
 #include "db/filename.h"
 #include "db/version_set.h"
 #include "db/write_batch_internal.h"
 #include "leveldb/cache.h"
 #include "leveldb/env.h"
+#include "leveldb/filter_policy.h"
 #include "leveldb/table.h"
 #include "port/port.h"
 #include "port/thread_annotations.h"
@@ -31,9 +32,9 @@ static std::string RandomString(Random* rnd, int len) {
 }
 
 static std::string RandomKey(Random* rnd) {
-  int len = (rnd->OneIn(3)
-             ? 1                // Short sometimes to encourage collisions
-             : (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
+  int len =
+      (rnd->OneIn(3) ? 1  // Short sometimes to encourage collisions
+                     : (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
   return test::RandomKey(rnd, len);
 }
 
@@ -42,11 +43,10 @@ class AtomicCounter {
  private:
   port::Mutex mu_;
   int count_ GUARDED_BY(mu_);
+
  public:
-  AtomicCounter() : count_(0) { }
-  void Increment() {
-    IncrementBy(1);
-  }
+  AtomicCounter() : count_(0) {}
+  void Increment() { IncrementBy(1); }
   void IncrementBy(int count) LOCKS_EXCLUDED(mu_) {
     MutexLock l(&mu_);
     count_ += count;
@@ -120,15 +120,15 @@ class SpecialEnv : public EnvWrapper {
   bool count_random_reads_;
   AtomicCounter random_read_counter_;
 
-  explicit SpecialEnv(Env* base) : EnvWrapper(base),
-    delay_data_sync_(false),
-    data_sync_error_(false),
-    no_space_(false),
-    non_writable_(false),
-    manifest_sync_error_(false),
-    manifest_write_error_(false),
-    count_random_reads_(false) {
-  }
+  explicit SpecialEnv(Env* base)
+      : EnvWrapper(base),
+        delay_data_sync_(false),
+        data_sync_error_(false),
+        no_space_(false),
+        non_writable_(false),
+        manifest_sync_error_(false),
+        manifest_write_error_(false),
+        count_random_reads_(false) {}
 
   Status NewWritableFile(const std::string& f, WritableFile** r) {
     class DataFile : public WritableFile {
@@ -137,10 +137,7 @@ class SpecialEnv : public EnvWrapper {
       WritableFile* const base_;
 
      public:
-      DataFile(SpecialEnv* env, WritableFile* base)
-          : env_(env),
-            base_(base) {
-      }
+      DataFile(SpecialEnv* env, WritableFile* base) : env_(env), base_(base) {}
       ~DataFile() { delete base_; }
       Status Append(const Slice& data) {
         if (env_->no_space_.load(std::memory_order_acquire)) {
@@ -166,8 +163,9 @@ class SpecialEnv : public EnvWrapper {
      private:
       SpecialEnv* env_;
       WritableFile* base_;
+
      public:
-      ManifestFile(SpecialEnv* env, WritableFile* b) : env_(env), base_(b) { }
+      ManifestFile(SpecialEnv* env, WritableFile* b) : env_(env), base_(b) {}
       ~ManifestFile() { delete base_; }
       Status Append(const Slice& data) {
         if (env_->manifest_write_error_.load(std::memory_order_acquire)) {
@@ -208,10 +206,10 @@ class SpecialEnv : public EnvWrapper {
      private:
       RandomAccessFile* target_;
       AtomicCounter* counter_;
+
      public:
       CountingFile(RandomAccessFile* target, AtomicCounter* counter)
-          : target_(target), counter_(counter) {
-      }
+          : target_(target), counter_(counter) {}
       virtual ~CountingFile() { delete target_; }
       virtual Status Read(uint64_t offset, size_t n, Slice* result,
                           char* scratch) const {
@@ -233,13 +231,7 @@ class DBTest {
   const FilterPolicy* filter_policy_;
 
   // Sequence of option configurations to try
-  enum OptionConfig {
-    kDefault,
-    kReuse,
-    kFilter,
-    kUncompressed,
-    kEnd
-  };
+  enum OptionConfig { kDefault, kReuse, kFilter, kUncompressed, kEnd };
   int option_config_;
 
  public:
@@ -249,8 +241,7 @@ class DBTest {
 
   Options last_options_;
 
-  DBTest() : option_config_(kDefault),
-             env_(new SpecialEnv(Env::Default())) {
+  DBTest() : option_config_(kDefault), env_(new SpecialEnv(Env::Default())) {
     filter_policy_ = NewBloomFilterPolicy(10);
     dbname_ = test::TmpDir() + "/db_test";
     DestroyDB(dbname_, Options());
@@ -297,13 +288,9 @@ class DBTest {
     return options;
   }
 
-  DBImpl* dbfull() {
-    return reinterpret_cast<DBImpl*>(db_);
-  }
+  DBImpl* dbfull() { return reinterpret_cast<DBImpl*>(db_); }
 
-  void Reopen(Options* options = nullptr) {
-    ASSERT_OK(TryReopen(options));
-  }
+  void Reopen(Options* options = nullptr) { ASSERT_OK(TryReopen(options)); }
 
   void Close() {
     delete db_;
@@ -336,9 +323,7 @@ class DBTest {
     return db_->Put(WriteOptions(), k, v);
   }
 
-  Status Delete(const std::string& k) {
-    return db_->Delete(WriteOptions(), k);
-  }
+  Status Delete(const std::string& k) { return db_->Delete(WriteOptions(), k); }
 
   std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) {
     ReadOptions options;
@@ -424,9 +409,8 @@ class DBTest {
 
   int NumTableFilesAtLevel(int level) {
     std::string property;
-    ASSERT_TRUE(
-        db_->GetProperty("leveldb.num-files-at-level" + NumberToString(level),
-                         &property));
+    ASSERT_TRUE(db_->GetProperty(
+        "leveldb.num-files-at-level" + NumberToString(level), &property));
     return std::stoi(property);
   }
 
@@ -491,9 +475,9 @@ class DBTest {
 
   void DumpFileCounts(const char* label) {
     fprintf(stderr, "---\n%s:\n", label);
-    fprintf(stderr, "maxoverlap: %lld\n",
-            static_cast<long long>(
-                dbfull()->TEST_MaxNextLevelOverlappingBytes()));
+    fprintf(
+        stderr, "maxoverlap: %lld\n",
+        static_cast<long long>(dbfull()->TEST_MaxNextLevelOverlappingBytes()));
     for (int level = 0; level < config::kNumLevels; level++) {
       int num = NumTableFilesAtLevel(level);
       if (num > 0) {
@@ -612,8 +596,8 @@ TEST(DBTest, GetFromImmutableLayer) {
 
     // Block sync calls.
     env_->delay_data_sync_.store(true, std::memory_order_release);
-    Put("k1", std::string(100000, 'x'));             // Fill memtable.
-    Put("k2", std::string(100000, 'y'));             // Trigger compaction.
+    Put("k1", std::string(100000, 'x'));  // Fill memtable.
+    Put("k2", std::string(100000, 'y'));  // Trigger compaction.
     ASSERT_EQ("v1", Get("foo"));
     // Release sync calls.
     env_->delay_data_sync_.store(false, std::memory_order_release);
@@ -635,7 +619,7 @@ TEST(DBTest, GetMemUsage) {
     ASSERT_TRUE(db_->GetProperty("leveldb.approximate-memory-usage", &val));
     int mem_usage = std::stoi(val);
     ASSERT_GT(mem_usage, 0);
-    ASSERT_LT(mem_usage, 5*1024*1024);
+    ASSERT_LT(mem_usage, 5 * 1024 * 1024);
   } while (ChangeOptions());
 }
 
@@ -760,8 +744,7 @@ TEST(DBTest, GetEncountersEmptyLevel) {
 
     // Step 1: First place sstables in levels 0 and 2
     int compaction_count = 0;
-    while (NumTableFilesAtLevel(0) == 0 ||
-           NumTableFilesAtLevel(2) == 0) {
+    while (NumTableFilesAtLevel(0) == 0 || NumTableFilesAtLevel(2) == 0) {
       ASSERT_LE(compaction_count, 100) << "could not fill levels 0 and 2";
       compaction_count++;
       Put("a", "begin");
@@ -898,10 +881,10 @@ TEST(DBTest, IterMulti) {
   ASSERT_EQ(IterStatus(iter), "b->vb");
 
   // Make sure iter stays at snapshot
-  ASSERT_OK(Put("a",  "va2"));
+  ASSERT_OK(Put("a", "va2"));
   ASSERT_OK(Put("a2", "va3"));
-  ASSERT_OK(Put("b",  "vb2"));
-  ASSERT_OK(Put("c",  "vc2"));
+  ASSERT_OK(Put("b", "vb2"));
+  ASSERT_OK(Put("c", "vc2"));
   ASSERT_OK(Delete("b"));
   iter->SeekToFirst();
   ASSERT_EQ(IterStatus(iter), "a->va");
@@ -1092,7 +1075,7 @@ TEST(DBTest, RecoverWithLargeLog) {
 
 TEST(DBTest, CompactionsGenerateMultipleFiles) {
   Options options = CurrentOptions();
-  options.write_buffer_size = 100000000;        // Large write buffer
+  options.write_buffer_size = 100000000;  // Large write buffer
   Reopen(&options);
 
   Random rnd(301);
@@ -1161,26 +1144,25 @@ TEST(DBTest, SparseMerge) {
   dbfull()->TEST_CompactRange(0, nullptr, nullptr);
 
   // Make sparse update
-  Put("A",    "va2");
+  Put("A", "va2");
   Put("B100", "bvalue2");
-  Put("C",    "vc2");
+  Put("C", "vc2");
   dbfull()->TEST_CompactMemTable();
 
   // Compactions should not cause us to create a situation where
   // a file overlaps too much data at the next level.
-  ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
+  ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
   dbfull()->TEST_CompactRange(0, nullptr, nullptr);
-  ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
+  ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
   dbfull()->TEST_CompactRange(1, nullptr, nullptr);
-  ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
+  ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20 * 1048576);
 }
 
 static bool Between(uint64_t val, uint64_t low, uint64_t high) {
   bool result = (val >= low) && (val <= high);
   if (!result) {
     fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
-            (unsigned long long)(val),
-            (unsigned long long)(low),
+            (unsigned long long)(val), (unsigned long long)(low),
             (unsigned long long)(high));
   }
   return result;
@@ -1189,7 +1171,7 @@ static bool Between(uint64_t val, uint64_t low, uint64_t high) {
 TEST(DBTest, ApproximateSizes) {
   do {
     Options options = CurrentOptions();
-    options.write_buffer_size = 100000000;        // Large write buffer
+    options.write_buffer_size = 100000000;  // Large write buffer
     options.compression = kNoCompression;
     DestroyAndReopen();
 
@@ -1224,12 +1206,13 @@ TEST(DBTest, ApproximateSizes) {
 
       for (int compact_start = 0; compact_start < N; compact_start += 10) {
         for (int i = 0; i < N; i += 10) {
-          ASSERT_TRUE(Between(Size("", Key(i)), S1*i, S2*i));
-          ASSERT_TRUE(Between(Size("", Key(i)+".suffix"), S1*(i+1), S2*(i+1)));
-          ASSERT_TRUE(Between(Size(Key(i), Key(i+10)), S1*10, S2*10));
+          ASSERT_TRUE(Between(Size("", Key(i)), S1 * i, S2 * i));
+          ASSERT_TRUE(Between(Size("", Key(i) + ".suffix"), S1 * (i + 1),
+                              S2 * (i + 1)));
+          ASSERT_TRUE(Between(Size(Key(i), Key(i + 10)), S1 * 10, S2 * 10));
         }
-        ASSERT_TRUE(Between(Size("", Key(50)), S1*50, S2*50));
-        ASSERT_TRUE(Between(Size("", Key(50)+".suffix"), S1*50, S2*50));
+        ASSERT_TRUE(Between(Size("", Key(50)), S1 * 50, S2 * 50));
+        ASSERT_TRUE(Between(Size("", Key(50) + ".suffix"), S1 * 50, S2 * 50));
 
         std::string cstart_str = Key(compact_start);
         std::string cend_str = Key(compact_start + 9);
@@ -1348,7 +1331,7 @@ TEST(DBTest, HiddenValuesAreRemoved) {
     Put("pastfoo", "v");
     const Snapshot* snapshot = db_->GetSnapshot();
     Put("foo", "tiny");
-    Put("pastfoo2", "v2");        // Advance sequence number one more
+    Put("pastfoo2", "v2");  // Advance sequence number one more
 
     ASSERT_OK(dbfull()->TEST_CompactMemTable());
     ASSERT_GT(NumTableFilesAtLevel(0), 0);
@@ -1373,14 +1356,14 @@ TEST(DBTest, DeletionMarkers1) {
   Put("foo", "v1");
   ASSERT_OK(dbfull()->TEST_CompactMemTable());
   const int last = config::kMaxMemCompactLevel;
-  ASSERT_EQ(NumTableFilesAtLevel(last), 1);   // foo => v1 is now in last level
+  ASSERT_EQ(NumTableFilesAtLevel(last), 1);  // foo => v1 is now in last level
 
   // Place a table at level last-1 to prevent merging with preceding mutation
   Put("a", "begin");
   Put("z", "end");
   dbfull()->TEST_CompactMemTable();
   ASSERT_EQ(NumTableFilesAtLevel(last), 1);
-  ASSERT_EQ(NumTableFilesAtLevel(last-1), 1);
+  ASSERT_EQ(NumTableFilesAtLevel(last - 1), 1);
 
   Delete("foo");
   Put("foo", "v2");
@@ -1388,11 +1371,11 @@ TEST(DBTest, DeletionMarkers1) {
   ASSERT_OK(dbfull()->TEST_CompactMemTable());  // Moves to level last-2
   ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
   Slice z("z");
-  dbfull()->TEST_CompactRange(last-2, nullptr, &z);
+  dbfull()->TEST_CompactRange(last - 2, nullptr, &z);
   // DEL eliminated, but v1 remains because we aren't compacting that level
   // (DEL can be eliminated because v2 hides v1).
   ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]");
-  dbfull()->TEST_CompactRange(last-1, nullptr, nullptr);
+  dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr);
   // Merging last-1 w/ last, so we are the base level for "foo", so
   // DEL is removed.  (as is v1).
   ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]");
@@ -1402,23 +1385,23 @@ TEST(DBTest, DeletionMarkers2) {
   Put("foo", "v1");
   ASSERT_OK(dbfull()->TEST_CompactMemTable());
   const int last = config::kMaxMemCompactLevel;
-  ASSERT_EQ(NumTableFilesAtLevel(last), 1);   // foo => v1 is now in last level
+  ASSERT_EQ(NumTableFilesAtLevel(last), 1);  // foo => v1 is now in last level
 
   // Place a table at level last-1 to prevent merging with preceding mutation
   Put("a", "begin");
   Put("z", "end");
   dbfull()->TEST_CompactMemTable();
   ASSERT_EQ(NumTableFilesAtLevel(last), 1);
-  ASSERT_EQ(NumTableFilesAtLevel(last-1), 1);
+  ASSERT_EQ(NumTableFilesAtLevel(last - 1), 1);
 
   Delete("foo");
   ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
   ASSERT_OK(dbfull()->TEST_CompactMemTable());  // Moves to level last-2
   ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
-  dbfull()->TEST_CompactRange(last-2, nullptr, nullptr);
+  dbfull()->TEST_CompactRange(last - 2, nullptr, nullptr);
   // DEL kept: "last" file overlaps
   ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
-  dbfull()->TEST_CompactRange(last-1, nullptr, nullptr);
+  dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr);
   // Merging last-1 w/ last, so we are the base level for "foo", so
   // DEL is removed.  (as is v1).
   ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
@@ -1428,7 +1411,8 @@ TEST(DBTest, OverlapInLevel0) {
   do {
     ASSERT_EQ(config::kMaxMemCompactLevel, 2) << "Fix test to match config";
 
-    // Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0.
+    // Fill levels 1 and 2 to disable the pushing of new memtables to levels >
+    // 0.
     ASSERT_OK(Put("100", "v100"));
     ASSERT_OK(Put("999", "v999"));
     dbfull()->TEST_CompactMemTable();
@@ -1548,16 +1532,17 @@ TEST(DBTest, CustomComparator) {
       return ToNumber(a) - ToNumber(b);
     }
     virtual void FindShortestSeparator(std::string* s, const Slice& l) const {
-      ToNumber(*s);     // Check format
-      ToNumber(l);      // Check format
+      ToNumber(*s);  // Check format
+      ToNumber(l);   // Check format
     }
     virtual void FindShortSuccessor(std::string* key) const {
-      ToNumber(*key);   // Check format
+      ToNumber(*key);  // Check format
     }
+
    private:
     static int ToNumber(const Slice& x) {
       // Check that there are no extra characters.
-      ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size()-1] == ']')
+      ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size() - 1] == ']')
           << EscapeString(x);
       int val;
       char ignored;
@@ -1570,7 +1555,7 @@ TEST(DBTest, CustomComparator) {
   Options new_options = CurrentOptions();
   new_options.create_if_missing = true;
   new_options.comparator = &cmp;
-  new_options.filter_policy = nullptr;     // Cannot use bloom filters
+  new_options.filter_policy = nullptr;   // Cannot use bloom filters
   new_options.write_buffer_size = 1000;  // Compact more often
   DestroyAndReopen(&new_options);
   ASSERT_OK(Put("[10]", "ten"));
@@ -1588,7 +1573,7 @@ TEST(DBTest, CustomComparator) {
   for (int run = 0; run < 2; run++) {
     for (int i = 0; i < 1000; i++) {
       char buf[100];
-      snprintf(buf, sizeof(buf), "[%d]", i*10);
+      snprintf(buf, sizeof(buf), "[%d]", i * 10);
       ASSERT_OK(Put(buf, buf));
     }
     Compact("[0]", "[1000000]");
@@ -1739,7 +1724,7 @@ TEST(DBTest, NoSpace) {
   // Force out-of-space errors.
   env_->no_space_.store(true, std::memory_order_release);
   for (int i = 0; i < 10; i++) {
-    for (int level = 0; level < config::kNumLevels-1; level++) {
+    for (int level = 0; level < config::kNumLevels - 1; level++) {
       dbfull()->TEST_CompactRange(level, nullptr, nullptr);
     }
   }
@@ -1809,9 +1794,8 @@ TEST(DBTest, ManifestWriteError) {
   // We iterate twice.  In the second iteration, everything is the
   // same except the log record never makes it to the MANIFEST file.
   for (int iter = 0; iter < 2; iter++) {
-    std::atomic<bool>* error_type = (iter == 0)
-        ? &env_->manifest_sync_error_
-        : &env_->manifest_write_error_;
+    std::atomic<bool>* error_type = (iter == 0) ? &env_->manifest_sync_error_
+                                                : &env_->manifest_write_error_;
 
     // Insert foo=>bar mapping
     Options options = CurrentOptions();
@@ -1826,7 +1810,7 @@ TEST(DBTest, ManifestWriteError) {
     dbfull()->TEST_CompactMemTable();
     ASSERT_EQ("bar", Get("foo"));
     const int last = config::kMaxMemCompactLevel;
-    ASSERT_EQ(NumTableFilesAtLevel(last), 1);   // foo=>bar is now in last level
+    ASSERT_EQ(NumTableFilesAtLevel(last), 1);  // foo=>bar is now in last level
 
     // Merging compaction (will fail)
     error_type->store(true, std::memory_order_release);
@@ -1854,8 +1838,7 @@ TEST(DBTest, MissingSSTFile) {
   options.paranoid_checks = true;
   Status s = TryReopen(&options);
   ASSERT_TRUE(!s.ok());
-  ASSERT_TRUE(s.ToString().find("issing") != std::string::npos)
-      << s.ToString();
+  ASSERT_TRUE(s.ToString().find("issing") != std::string::npos) << s.ToString();
 }
 
 TEST(DBTest, StillReadSST) {
@@ -1915,7 +1898,7 @@ TEST(DBTest, BloomFilter) {
   int reads = env_->random_read_counter_.Read();
   fprintf(stderr, "%d present => %d reads\n", N, reads);
   ASSERT_GE(reads, N);
-  ASSERT_LE(reads, N + 2*N/100);
+  ASSERT_LE(reads, N + 2 * N / 100);
 
   // Lookup present keys.  Should rarely read from either sstable.
   env_->random_read_counter_.Reset();
@@ -1924,7 +1907,7 @@ TEST(DBTest, BloomFilter) {
   }
   reads = env_->random_read_counter_.Read();
   fprintf(stderr, "%d missing => %d reads\n", N, reads);
-  ASSERT_LE(reads, 3*N/100);
+  ASSERT_LE(reads, 3 * N / 100);
 
   env_->delay_data_sync_.store(false, std::memory_order_release);
   Close();
@@ -1970,8 +1953,8 @@ static void MTThreadBody(void* arg) {
     if (rnd.OneIn(2)) {
       // Write values of the form <key, my id, counter>.
       // We add some padding for force compactions.
-      snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d",
-               key, id, static_cast<int>(counter));
+      snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d", key, id,
+               static_cast<int>(counter));
       ASSERT_OK(db->Put(WriteOptions(), Slice(keybuf), Slice(valbuf)));
     } else {
       // Read a value and verify that it matches the pattern written above.
@@ -2033,24 +2016,24 @@ namespace {
 typedef std::map<std::string, std::string> KVMap;
 }
 
-class ModelDB: public DB {
+class ModelDB : public DB {
  public:
   class ModelSnapshot : public Snapshot {
    public:
     KVMap map_;
   };
 
-  explicit ModelDB(const Options& options): options_(options) { }
-  ~ModelDB() { }
+  explicit ModelDB(const Options& options) : options_(options) {}
+  ~ModelDB() {}
   virtual Status Put(const WriteOptions& o, const Slice& k, const Slice& v) {
     return DB::Put(o, k, v);
   }
   virtual Status Delete(const WriteOptions& o, const Slice& key) {
     return DB::Delete(o, key);
   }
-  virtual Status Get(const ReadOptions& options,
-                     const Slice& key, std::string* value) {
-    assert(false);      // Not implemented
+  virtual Status Get(const ReadOptions& options, const Slice& key,
+                     std::string* value) {
+    assert(false);  // Not implemented
     return Status::NotFound(key);
   }
   virtual Iterator* NewIterator(const ReadOptions& options) {
@@ -2080,9 +2063,7 @@ class ModelDB: public DB {
       virtual void Put(const Slice& key, const Slice& value) {
         (*map_)[key.ToString()] = value.ToString();
       }
-      virtual void Delete(const Slice& key) {
-        map_->erase(key.ToString());
-      }
+      virtual void Delete(const Slice& key) { map_->erase(key.ToString()); }
     };
     Handler handler;
     handler.map_ = &map_;
@@ -2097,15 +2078,13 @@ class ModelDB: public DB {
       sizes[i] = 0;
     }
   }
-  virtual void CompactRange(const Slice* start, const Slice* end) {
-  }
+  virtual void CompactRange(const Slice* start, const Slice* end) {}
 
  private:
-  class ModelIter: public Iterator {
+  class ModelIter : public Iterator {
    public:
     ModelIter(const KVMap* map, bool owned)
-        : map_(map), owned_(owned), iter_(map_->end()) {
-    }
+        : map_(map), owned_(owned), iter_(map_->end()) {}
     ~ModelIter() {
       if (owned_) delete map_;
     }
@@ -2136,9 +2115,7 @@ class ModelDB: public DB {
   KVMap map_;
 };
 
-static bool CompareIterators(int step,
-                             DB* model,
-                             DB* db,
+static bool CompareIterators(int step, DB* model, DB* db,
                              const Snapshot* model_snap,
                              const Snapshot* db_snap) {
   ReadOptions options;
@@ -2149,12 +2126,10 @@ static bool CompareIterators(int step,
   bool ok = true;
   int count = 0;
   for (miter->SeekToFirst(), dbiter->SeekToFirst();
-       ok && miter->Valid() && dbiter->Valid();
-       miter->Next(), dbiter->Next()) {
+       ok && miter->Valid() && dbiter->Valid(); miter->Next(), dbiter->Next()) {
     count++;
     if (miter->key().compare(dbiter->key()) != 0) {
-      fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n",
-              step,
+      fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n", step,
               EscapeString(miter->key()).c_str(),
               EscapeString(dbiter->key()).c_str());
       ok = false;
@@ -2163,8 +2138,7 @@ static bool CompareIterators(int step,
 
     if (miter->value().compare(dbiter->value()) != 0) {
       fprintf(stderr, "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n",
-              step,
-              EscapeString(miter->key()).c_str(),
+              step, EscapeString(miter->key()).c_str(),
               EscapeString(miter->value()).c_str(),
               EscapeString(miter->value()).c_str());
       ok = false;
@@ -2198,22 +2172,19 @@ TEST(DBTest, Randomized) {
       }
       // TODO(sanjay): Test Get() works
       int p = rnd.Uniform(100);
-      if (p < 45) {                               // Put
+      if (p < 45) {  // Put
         k = RandomKey(&rnd);
-        v = RandomString(&rnd,
-                         rnd.OneIn(20)
-                         ? 100 + rnd.Uniform(100)
-                         : rnd.Uniform(8));
+        v = RandomString(
+            &rnd, rnd.OneIn(20) ? 100 + rnd.Uniform(100) : rnd.Uniform(8));
         ASSERT_OK(model.Put(WriteOptions(), k, v));
         ASSERT_OK(db_->Put(WriteOptions(), k, v));
 
-      } else if (p < 90) {                        // Delete
+      } else if (p < 90) {  // Delete
         k = RandomKey(&rnd);
         ASSERT_OK(model.Delete(WriteOptions(), k));
         ASSERT_OK(db_->Delete(WriteOptions(), k));
 
-
-      } else {                                    // Multi-element batch
+      } else {  // Multi-element batch
         WriteBatch b;
         const int num = rnd.Uniform(8);
         for (int i = 0; i < num; i++) {
@@ -2288,8 +2259,8 @@ void BM_LogAndApply(int iters, int num_base_files) {
   VersionEdit vbase;
   uint64_t fnum = 1;
   for (int i = 0; i < num_base_files; i++) {
-    InternalKey start(MakeKey(2*fnum), 1, kTypeValue);
-    InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion);
+    InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
+    InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
     vbase.AddFile(2, fnum++, 1 /* file size */, start, limit);
   }
   ASSERT_OK(vset.LogAndApply(&vbase, &mu));
@@ -2299,8 +2270,8 @@ void BM_LogAndApply(int iters, int num_base_files) {
   for (int i = 0; i < iters; i++) {
     VersionEdit vedit;
     vedit.DeleteFile(2, fnum);
-    InternalKey start(MakeKey(2*fnum), 1, kTypeValue);
-    InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion);
+    InternalKey start(MakeKey(2 * fnum), 1, kTypeValue);
+    InternalKey limit(MakeKey(2 * fnum + 1), 1, kTypeDeletion);
     vedit.AddFile(2, fnum++, 1 /* file size */, start, limit);
     vset.LogAndApply(&vedit, &mu);
   }
@@ -2309,8 +2280,8 @@ void BM_LogAndApply(int iters, int num_base_files) {
   char buf[16];
   snprintf(buf, sizeof(buf), "%d", num_base_files);
   fprintf(stderr,
-          "BM_LogAndApply/%-6s   %8d iters : %9u us (%7.0f us / iter)\n",
-          buf, iters, us, ((float)us) / iters);
+          "BM_LogAndApply/%-6s   %8d iters : %9u us (%7.0f us / iter)\n", buf,
+          iters, us, ((float)us) / iters);
 }
 
 }  // namespace leveldb
diff --git a/db/dbformat.cc b/db/dbformat.cc
index 20a7ca4..69e8dc6 100644
--- a/db/dbformat.cc
+++ b/db/dbformat.cc
@@ -2,8 +2,10 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
-#include <stdio.h>
 #include "db/dbformat.h"
+
+#include <stdio.h>
+
 #include "port/port.h"
 #include "util/coding.h"
 
@@ -22,8 +24,7 @@ void AppendInternalKey(std::string* result, const ParsedInternalKey& key) {
 
 std::string ParsedInternalKey::DebugString() const {
   char buf[50];
-  snprintf(buf, sizeof(buf), "' @ %llu : %d",
-           (unsigned long long) sequence,
+  snprintf(buf, sizeof(buf), "' @ %llu : %d", (unsigned long long)sequence,
            int(type));
   std::string result = "'";
   result += EscapeString(user_key.ToString());
@@ -65,9 +66,8 @@ int InternalKeyComparator::Compare(const Slice& akey, const Slice& bkey) const {
   return r;
 }
 
-void InternalKeyComparator::FindShortestSeparator(
-      std::string* start,
-      const Slice& limit) const {
+void InternalKeyComparator::FindShortestSeparator(std::string* start,
+                                                  const Slice& limit) const {
   // Attempt to shorten the user portion of the key
   Slice user_start = ExtractUserKey(*start);
   Slice user_limit = ExtractUserKey(limit);
@@ -77,7 +77,8 @@ void InternalKeyComparator::FindShortestSeparator(
       user_comparator_->Compare(user_start, tmp) < 0) {
     // User key has become shorter physically, but larger logically.
     // Tack on the earliest possible number to the shortened user key.
-    PutFixed64(&tmp, PackSequenceAndType(kMaxSequenceNumber,kValueTypeForSeek));
+    PutFixed64(&tmp,
+               PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek));
     assert(this->Compare(*start, tmp) < 0);
     assert(this->Compare(tmp, limit) < 0);
     start->swap(tmp);
@@ -92,15 +93,14 @@ void InternalKeyComparator::FindShortSuccessor(std::string* key) const {
       user_comparator_->Compare(user_key, tmp) < 0) {
     // User key has become shorter physically, but larger logically.
     // Tack on the earliest possible number to the shortened user key.
-    PutFixed64(&tmp, PackSequenceAndType(kMaxSequenceNumber,kValueTypeForSeek));
+    PutFixed64(&tmp,
+               PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek));
     assert(this->Compare(*key, tmp) < 0);
     key->swap(tmp);
   }
 }
 
-const char* InternalFilterPolicy::Name() const {
-  return user_policy_->Name();
-}
+const char* InternalFilterPolicy::Name() const { return user_policy_->Name(); }
 
 void InternalFilterPolicy::CreateFilter(const Slice* keys, int n,
                                         std::string* dst) const {
diff --git a/db/dbformat.h b/db/dbformat.h
index c4d9575..bdc23b8 100644
--- a/db/dbformat.h
+++ b/db/dbformat.h
@@ -6,6 +6,7 @@
 #define STORAGE_LEVELDB_DB_DBFORMAT_H_
 
 #include <stdio.h>
+
 #include "leveldb/comparator.h"
 #include "leveldb/db.h"
 #include "leveldb/filter_policy.h"
@@ -48,10 +49,7 @@ class InternalKey;
 // Value types encoded as the last component of internal keys.
 // DO NOT CHANGE THESE ENUM VALUES: they are embedded in the on-disk
 // data structures.
-enum ValueType {
-  kTypeDeletion = 0x0,
-  kTypeValue = 0x1
-};
+enum ValueType { kTypeDeletion = 0x0, kTypeValue = 0x1 };
 // kValueTypeForSeek defines the ValueType that should be passed when
 // constructing a ParsedInternalKey object for seeking to a particular
 // sequence number (since we sort sequence numbers in decreasing order
@@ -64,17 +62,16 @@ typedef uint64_t SequenceNumber;
 
 // We leave eight bits empty at the bottom so a type and sequence#
 // can be packed together into 64-bits.
-static const SequenceNumber kMaxSequenceNumber =
-    ((0x1ull << 56) - 1);
+static const SequenceNumber kMaxSequenceNumber = ((0x1ull << 56) - 1);
 
 struct ParsedInternalKey {
   Slice user_key;
   SequenceNumber sequence;
   ValueType type;
 
-  ParsedInternalKey() { }  // Intentionally left uninitialized (for speed)
+  ParsedInternalKey() {}  // Intentionally left uninitialized (for speed)
   ParsedInternalKey(const Slice& u, const SequenceNumber& seq, ValueType t)
-      : user_key(u), sequence(seq), type(t) { }
+      : user_key(u), sequence(seq), type(t) {}
   std::string DebugString() const;
 };
 
@@ -103,13 +100,13 @@ inline Slice ExtractUserKey(const Slice& internal_key) {
 class InternalKeyComparator : public Comparator {
  private:
   const Comparator* user_comparator_;
+
  public:
-  explicit InternalKeyComparator(const Comparator* c) : user_comparator_(c) { }
+  explicit InternalKeyComparator(const Comparator* c) : user_comparator_(c) {}
   virtual const char* Name() const;
   virtual int Compare(const Slice& a, const Slice& b) const;
-  virtual void FindShortestSeparator(
-      std::string* start,
-      const Slice& limit) const;
+  virtual void FindShortestSeparator(std::string* start,
+                                     const Slice& limit) const;
   virtual void FindShortSuccessor(std::string* key) const;
 
   const Comparator* user_comparator() const { return user_comparator_; }
@@ -121,8 +118,9 @@ class InternalKeyComparator : public Comparator {
 class InternalFilterPolicy : public FilterPolicy {
  private:
   const FilterPolicy* const user_policy_;
+
  public:
-  explicit InternalFilterPolicy(const FilterPolicy* p) : user_policy_(p) { }
+  explicit InternalFilterPolicy(const FilterPolicy* p) : user_policy_(p) {}
   virtual const char* Name() const;
   virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const;
   virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const;
@@ -134,8 +132,9 @@ class InternalFilterPolicy : public FilterPolicy {
 class InternalKey {
  private:
   std::string rep_;
+
  public:
-  InternalKey() { }   // Leave rep_ as empty to indicate it is invalid
+  InternalKey() {}  // Leave rep_ as empty to indicate it is invalid
   InternalKey(const Slice& user_key, SequenceNumber s, ValueType t) {
     AppendInternalKey(&rep_, ParsedInternalKey(user_key, s, t));
   }
@@ -158,8 +157,8 @@ class InternalKey {
   std::string DebugString() const;
 };
 
-inline int InternalKeyComparator::Compare(
-    const InternalKey& a, const InternalKey& b) const {
+inline int InternalKeyComparator::Compare(const InternalKey& a,
+                                          const InternalKey& b) const {
   return Compare(a.Encode(), b.Encode());
 }
 
@@ -204,7 +203,7 @@ class LookupKey {
   const char* start_;
   const char* kstart_;
   const char* end_;
-  char space_[200];      // Avoid allocation for short keys
+  char space_[200];  // Avoid allocation for short keys
 
   // No copying allowed
   LookupKey(const LookupKey&);
diff --git a/db/dbformat_test.cc b/db/dbformat_test.cc
index 5d82f5d..87e6aae 100644
--- a/db/dbformat_test.cc
+++ b/db/dbformat_test.cc
@@ -8,8 +8,7 @@
 
 namespace leveldb {
 
-static std::string IKey(const std::string& user_key,
-                        uint64_t seq,
+static std::string IKey(const std::string& user_key, uint64_t seq,
                         ValueType vt) {
   std::string encoded;
   AppendInternalKey(&encoded, ParsedInternalKey(user_key, seq, vt));
@@ -28,9 +27,7 @@ static std::string ShortSuccessor(const std::string& s) {
   return result;
 }
 
-static void TestKey(const std::string& key,
-                    uint64_t seq,
-                    ValueType vt) {
+static void TestKey(const std::string& key, uint64_t seq, ValueType vt) {
   std::string encoded = IKey(key, seq, vt);
 
   Slice in(encoded);
@@ -44,16 +41,22 @@ static void TestKey(const std::string& key,
   ASSERT_TRUE(!ParseInternalKey(Slice("bar"), &decoded));
 }
 
-class FormatTest { };
+class FormatTest {};
 
 TEST(FormatTest, InternalKey_EncodeDecode) {
-  const char* keys[] = { "", "k", "hello", "longggggggggggggggggggggg" };
-  const uint64_t seq[] = {
-    1, 2, 3,
-    (1ull << 8) - 1, 1ull << 8, (1ull << 8) + 1,
-    (1ull << 16) - 1, 1ull << 16, (1ull << 16) + 1,
-    (1ull << 32) - 1, 1ull << 32, (1ull << 32) + 1
-  };
+  const char* keys[] = {"", "k", "hello", "longggggggggggggggggggggg"};
+  const uint64_t seq[] = {1,
+                          2,
+                          3,
+                          (1ull << 8) - 1,
+                          1ull << 8,
+                          (1ull << 8) + 1,
+                          (1ull << 16) - 1,
+                          1ull << 16,
+                          (1ull << 16) + 1,
+                          (1ull << 32) - 1,
+                          1ull << 32,
+                          (1ull << 32) + 1};
   for (int k = 0; k < sizeof(keys) / sizeof(keys[0]); k++) {
     for (int s = 0; s < sizeof(seq) / sizeof(seq[0]); s++) {
       TestKey(keys[k], seq[s], kTypeValue);
@@ -65,37 +68,35 @@ TEST(FormatTest, InternalKey_EncodeDecode) {
 TEST(FormatTest, InternalKeyShortSeparator) {
   // When user keys are same
   ASSERT_EQ(IKey("foo", 100, kTypeValue),
-            Shorten(IKey("foo", 100, kTypeValue),
-                    IKey("foo", 99, kTypeValue)));
-  ASSERT_EQ(IKey("foo", 100, kTypeValue),
-            Shorten(IKey("foo", 100, kTypeValue),
-                    IKey("foo", 101, kTypeValue)));
-  ASSERT_EQ(IKey("foo", 100, kTypeValue),
-            Shorten(IKey("foo", 100, kTypeValue),
-                    IKey("foo", 100, kTypeValue)));
-  ASSERT_EQ(IKey("foo", 100, kTypeValue),
-            Shorten(IKey("foo", 100, kTypeValue),
-                    IKey("foo", 100, kTypeDeletion)));
+            Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 99, kTypeValue)));
+  ASSERT_EQ(
+      IKey("foo", 100, kTypeValue),
+      Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 101, kTypeValue)));
+  ASSERT_EQ(
+      IKey("foo", 100, kTypeValue),
+      Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeValue)));
+  ASSERT_EQ(
+      IKey("foo", 100, kTypeValue),
+      Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeDeletion)));
 
   // When user keys are misordered
   ASSERT_EQ(IKey("foo", 100, kTypeValue),
-            Shorten(IKey("foo", 100, kTypeValue),
-                    IKey("bar", 99, kTypeValue)));
+            Shorten(IKey("foo", 100, kTypeValue), IKey("bar", 99, kTypeValue)));
 
   // When user keys are different, but correctly ordered
-  ASSERT_EQ(IKey("g", kMaxSequenceNumber, kValueTypeForSeek),
-            Shorten(IKey("foo", 100, kTypeValue),
-                    IKey("hello", 200, kTypeValue)));
+  ASSERT_EQ(
+      IKey("g", kMaxSequenceNumber, kValueTypeForSeek),
+      Shorten(IKey("foo", 100, kTypeValue), IKey("hello", 200, kTypeValue)));
 
   // When start user key is prefix of limit user key
-  ASSERT_EQ(IKey("foo", 100, kTypeValue),
-            Shorten(IKey("foo", 100, kTypeValue),
-                    IKey("foobar", 200, kTypeValue)));
+  ASSERT_EQ(
+      IKey("foo", 100, kTypeValue),
+      Shorten(IKey("foo", 100, kTypeValue), IKey("foobar", 200, kTypeValue)));
 
   // When limit user key is prefix of start user key
-  ASSERT_EQ(IKey("foobar", 100, kTypeValue),
-            Shorten(IKey("foobar", 100, kTypeValue),
-                    IKey("foo", 200, kTypeValue)));
+  ASSERT_EQ(
+      IKey("foobar", 100, kTypeValue),
+      Shorten(IKey("foobar", 100, kTypeValue), IKey("foo", 200, kTypeValue)));
 }
 
 TEST(FormatTest, InternalKeyShortestSuccessor) {
@@ -107,6 +108,4 @@ TEST(FormatTest, InternalKeyShortestSuccessor) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/db/dumpfile.cc b/db/dumpfile.cc
index 941988b..1dbff5e 100644
--- a/db/dumpfile.cc
+++ b/db/dumpfile.cc
@@ -90,7 +90,6 @@ class WriteBatchItemPrinter : public WriteBatch::Handler {
   }
 };
 
-
 // Called on every log record (each one of which is a WriteBatch)
 // found in a kLogFile.
 static void WriteBatchPrinter(uint64_t pos, Slice record, WritableFile* dst) {
@@ -216,9 +215,12 @@ Status DumpFile(Env* env, const std::string& fname, WritableFile* dst) {
     return Status::InvalidArgument(fname + ": unknown file type");
   }
   switch (ftype) {
-    case kLogFile:         return DumpLog(env, fname, dst);
-    case kDescriptorFile:  return DumpDescriptor(env, fname, dst);
-    case kTableFile:       return DumpTable(env, fname, dst);
+    case kLogFile:
+      return DumpLog(env, fname, dst);
+    case kDescriptorFile:
+      return DumpDescriptor(env, fname, dst);
+    case kTableFile:
+      return DumpTable(env, fname, dst);
     default:
       break;
   }
diff --git a/db/fault_injection_test.cc b/db/fault_injection_test.cc
index 1f72984..7088ea7 100644
--- a/db/fault_injection_test.cc
+++ b/db/fault_injection_test.cc
@@ -9,12 +9,12 @@
 #include <map>
 #include <set>
 
-#include "leveldb/db.h"
 #include "db/db_impl.h"
 #include "db/filename.h"
 #include "db/log_format.h"
 #include "db/version_set.h"
 #include "leveldb/cache.h"
+#include "leveldb/db.h"
 #include "leveldb/env.h"
 #include "leveldb/table.h"
 #include "leveldb/write_batch.h"
@@ -56,8 +56,7 @@ Status Truncate(const std::string& filename, uint64_t length) {
 
   SequentialFile* orig_file;
   Status s = env->NewSequentialFile(filename, &orig_file);
-  if (!s.ok())
-    return s;
+  if (!s.ok()) return s;
 
   char* scratch = new char[length];
   leveldb::Slice result;
@@ -93,7 +92,7 @@ struct FileState {
       : filename_(filename),
         pos_(-1),
         pos_at_last_sync_(-1),
-        pos_at_last_flush_(-1) { }
+        pos_at_last_flush_(-1) {}
 
   FileState() : pos_(-1), pos_at_last_sync_(-1), pos_at_last_flush_(-1) {}
 
@@ -108,8 +107,7 @@ struct FileState {
 // is written to or sync'ed.
 class TestWritableFile : public WritableFile {
  public:
-  TestWritableFile(const FileState& state,
-                   WritableFile* f,
+  TestWritableFile(const FileState& state, WritableFile* f,
                    FaultInjectionTestEnv* env);
   virtual ~TestWritableFile();
   virtual Status Append(const Slice& data);
@@ -130,7 +128,7 @@ class FaultInjectionTestEnv : public EnvWrapper {
  public:
   FaultInjectionTestEnv()
       : EnvWrapper(Env::Default()), filesystem_active_(true) {}
-  virtual ~FaultInjectionTestEnv() { }
+  virtual ~FaultInjectionTestEnv() {}
   virtual Status NewWritableFile(const std::string& fname,
                                  WritableFile** result);
   virtual Status NewAppendableFile(const std::string& fname,
@@ -165,13 +163,9 @@ class FaultInjectionTestEnv : public EnvWrapper {
   bool filesystem_active_ GUARDED_BY(mutex_);  // Record flushes, syncs, writes
 };
 
-TestWritableFile::TestWritableFile(const FileState& state,
-                                   WritableFile* f,
+TestWritableFile::TestWritableFile(const FileState& state, WritableFile* f,
                                    FaultInjectionTestEnv* env)
-    : state_(state),
-      target_(f),
-      writable_file_opened_(true),
-      env_(env) {
+    : state_(state), target_(f), writable_file_opened_(true), env_(env) {
   assert(f != nullptr);
 }
 
@@ -395,9 +389,7 @@ class FaultInjectionTest {
     delete env_;
   }
 
-  void ReuseLogs(bool reuse) {
-    options_.reuse_logs = reuse;
-  }
+  void ReuseLogs(bool reuse) { options_.reuse_logs = reuse; }
 
   void Build(int start_idx, int num_vals) {
     std::string key_space, value_space;
@@ -497,18 +489,17 @@ class FaultInjectionTest {
   }
 
   void PartialCompactTestReopenWithFault(ResetMethod reset_method,
-                                         int num_pre_sync,
-                                         int num_post_sync) {
+                                         int num_pre_sync, int num_post_sync) {
     env_->SetFilesystemActive(false);
     CloseDB();
     ResetDBState(reset_method);
     ASSERT_OK(OpenDB());
     ASSERT_OK(Verify(0, num_pre_sync, FaultInjectionTest::VAL_EXPECT_NO_ERROR));
-    ASSERT_OK(Verify(num_pre_sync, num_post_sync, FaultInjectionTest::VAL_EXPECT_ERROR));
+    ASSERT_OK(Verify(num_pre_sync, num_post_sync,
+                     FaultInjectionTest::VAL_EXPECT_ERROR));
   }
 
-  void NoWriteTestPreFault() {
-  }
+  void NoWriteTestPreFault() {}
 
   void NoWriteTestReopenWithFault(ResetMethod reset_method) {
     CloseDB();
@@ -524,8 +515,7 @@ class FaultInjectionTest {
       int num_post_sync = rnd.Uniform(kMaxNumValues);
 
       PartialCompactTestPreFault(num_pre_sync, num_post_sync);
-      PartialCompactTestReopenWithFault(RESET_DROP_UNSYNCED_DATA,
-                                        num_pre_sync,
+      PartialCompactTestReopenWithFault(RESET_DROP_UNSYNCED_DATA, num_pre_sync,
                                         num_post_sync);
 
       NoWriteTestPreFault();
@@ -535,8 +525,7 @@ class FaultInjectionTest {
       // No new files created so we expect all values since no files will be
       // dropped.
       PartialCompactTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES,
-                                        num_pre_sync + num_post_sync,
-                                        0);
+                                        num_pre_sync + num_post_sync, 0);
 
       NoWriteTestPreFault();
       NoWriteTestReopenWithFault(RESET_DELETE_UNSYNCED_FILES);
@@ -556,6 +545,4 @@ TEST(FaultInjectionTest, FaultTestWithLogReuse) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/db/filename.cc b/db/filename.cc
index 6539bbe..85de45c 100644
--- a/db/filename.cc
+++ b/db/filename.cc
@@ -2,9 +2,11 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
+#include "db/filename.h"
+
 #include <ctype.h>
 #include <stdio.h>
-#include "db/filename.h"
+
 #include "db/dbformat.h"
 #include "leveldb/env.h"
 #include "util/logging.h"
@@ -19,8 +21,7 @@ static std::string MakeFileName(const std::string& dbname, uint64_t number,
                                 const char* suffix) {
   char buf[100];
   snprintf(buf, sizeof(buf), "/%06llu.%s",
-           static_cast<unsigned long long>(number),
-           suffix);
+           static_cast<unsigned long long>(number), suffix);
   return dbname + buf;
 }
 
@@ -51,9 +52,7 @@ std::string CurrentFileName(const std::string& dbname) {
   return dbname + "/CURRENT";
 }
 
-std::string LockFileName(const std::string& dbname) {
-  return dbname + "/LOCK";
-}
+std::string LockFileName(const std::string& dbname) { return dbname + "/LOCK"; }
 
 std::string TempFileName(const std::string& dbname, uint64_t number) {
   assert(number > 0);
@@ -69,7 +68,6 @@ std::string OldInfoLogFileName(const std::string& dbname) {
   return dbname + "/LOG.old";
 }
 
-
 // Owned filenames have the form:
 //    dbname/CURRENT
 //    dbname/LOCK
@@ -77,8 +75,7 @@ std::string OldInfoLogFileName(const std::string& dbname) {
 //    dbname/LOG.old
 //    dbname/MANIFEST-[0-9]+
 //    dbname/[0-9]+.(log|sst|ldb)
-bool ParseFileName(const std::string& filename,
-                   uint64_t* number,
+bool ParseFileName(const std::string& filename, uint64_t* number,
                    FileType* type) {
   Slice rest(filename);
   if (rest == "CURRENT") {
diff --git a/db/filename.h b/db/filename.h
index 62cb3ef..524e813 100644
--- a/db/filename.h
+++ b/db/filename.h
@@ -8,7 +8,9 @@
 #define STORAGE_LEVELDB_DB_FILENAME_H_
 
 #include <stdint.h>
+
 #include <string>
+
 #include "leveldb/slice.h"
 #include "leveldb/status.h"
 #include "port/port.h"
@@ -69,8 +71,7 @@ std::string OldInfoLogFileName(const std::string& dbname);
 // If filename is a leveldb file, store the type of the file in *type.
 // The number encoded in the filename is stored in *number.  If the
 // filename was successfully parsed, returns true.  Else return false.
-bool ParseFileName(const std::string& filename,
-                   uint64_t* number,
+bool ParseFileName(const std::string& filename, uint64_t* number,
                    FileType* type);
 
 // Make the CURRENT file point to the descriptor file with the
diff --git a/db/filename_test.cc b/db/filename_test.cc
index 0bde538..952f320 100644
--- a/db/filename_test.cc
+++ b/db/filename_test.cc
@@ -11,7 +11,7 @@
 
 namespace leveldb {
 
-class FileNameTest { };
+class FileNameTest {};
 
 TEST(FileNameTest, Parse) {
   Slice db;
@@ -24,17 +24,17 @@ TEST(FileNameTest, Parse) {
     uint64_t number;
     FileType type;
   } cases[] = {
-    { "100.log",            100,   kLogFile },
-    { "0.log",              0,     kLogFile },
-    { "0.sst",              0,     kTableFile },
-    { "0.ldb",              0,     kTableFile },
-    { "CURRENT",            0,     kCurrentFile },
-    { "LOCK",               0,     kDBLockFile },
-    { "MANIFEST-2",         2,     kDescriptorFile },
-    { "MANIFEST-7",         7,     kDescriptorFile },
-    { "LOG",                0,     kInfoLogFile },
-    { "LOG.old",            0,     kInfoLogFile },
-    { "18446744073709551615.log", 18446744073709551615ull, kLogFile },
+      {"100.log", 100, kLogFile},
+      {"0.log", 0, kLogFile},
+      {"0.sst", 0, kTableFile},
+      {"0.ldb", 0, kTableFile},
+      {"CURRENT", 0, kCurrentFile},
+      {"LOCK", 0, kDBLockFile},
+      {"MANIFEST-2", 2, kDescriptorFile},
+      {"MANIFEST-7", 7, kDescriptorFile},
+      {"LOG", 0, kInfoLogFile},
+      {"LOG.old", 0, kInfoLogFile},
+      {"18446744073709551615.log", 18446744073709551615ull, kLogFile},
   };
   for (int i = 0; i < sizeof(cases) / sizeof(cases[0]); i++) {
     std::string f = cases[i].fname;
@@ -44,30 +44,28 @@ TEST(FileNameTest, Parse) {
   }
 
   // Errors
-  static const char* errors[] = {
-    "",
-    "foo",
-    "foo-dx-100.log",
-    ".log",
-    "",
-    "manifest",
-    "CURREN",
-    "CURRENTX",
-    "MANIFES",
-    "MANIFEST",
-    "MANIFEST-",
-    "XMANIFEST-3",
-    "MANIFEST-3x",
-    "LOC",
-    "LOCKx",
-    "LO",
-    "LOGx",
-    "18446744073709551616.log",
-    "184467440737095516150.log",
-    "100",
-    "100.",
-    "100.lop"
-  };
+  static const char* errors[] = {"",
+                                 "foo",
+                                 "foo-dx-100.log",
+                                 ".log",
+                                 "",
+                                 "manifest",
+                                 "CURREN",
+                                 "CURRENTX",
+                                 "MANIFES",
+                                 "MANIFEST",
+                                 "MANIFEST-",
+                                 "XMANIFEST-3",
+                                 "MANIFEST-3x",
+                                 "LOC",
+                                 "LOCKx",
+                                 "LO",
+                                 "LOGx",
+                                 "18446744073709551616.log",
+                                 "184467440737095516150.log",
+                                 "100",
+                                 "100.",
+                                 "100.lop"};
   for (int i = 0; i < sizeof(errors) / sizeof(errors[0]); i++) {
     std::string f = errors[i];
     ASSERT_TRUE(!ParseFileName(f, &number, &type)) << f;
@@ -130,6 +128,4 @@ TEST(FileNameTest, Construction) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/db/leveldbutil.cc b/db/leveldbutil.cc
index 9f4b7dd..b21cf8e 100644
--- a/db/leveldbutil.cc
+++ b/db/leveldbutil.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
 #include <stdio.h>
+
 #include "leveldb/dumpfile.h"
 #include "leveldb/env.h"
 #include "leveldb/status.h"
@@ -38,11 +39,9 @@ bool HandleDumpCommand(Env* env, char** files, int num) {
 }  // namespace leveldb
 
 static void Usage() {
-  fprintf(
-      stderr,
-      "Usage: leveldbutil command...\n"
-      "   dump files...         -- dump contents of specified files\n"
-      );
+  fprintf(stderr,
+          "Usage: leveldbutil command...\n"
+          "   dump files...         -- dump contents of specified files\n");
 }
 
 int main(int argc, char** argv) {
@@ -54,7 +53,7 @@ int main(int argc, char** argv) {
   } else {
     std::string command = argv[1];
     if (command == "dump") {
-      ok = leveldb::HandleDumpCommand(env, argv+2, argc-2);
+      ok = leveldb::HandleDumpCommand(env, argv + 2, argc - 2);
     } else {
       Usage();
       ok = false;
diff --git a/db/log_reader.cc b/db/log_reader.cc
index 19c4df6..f472723 100644
--- a/db/log_reader.cc
+++ b/db/log_reader.cc
@@ -5,6 +5,7 @@
 #include "db/log_reader.h"
 
 #include <stdio.h>
+
 #include "leveldb/env.h"
 #include "util/coding.h"
 #include "util/crc32c.h"
@@ -12,8 +13,7 @@
 namespace leveldb {
 namespace log {
 
-Reader::Reporter::~Reporter() {
-}
+Reader::Reporter::~Reporter() {}
 
 Reader::Reader(SequentialFile* file, Reporter* reporter, bool checksum,
                uint64_t initial_offset)
@@ -26,12 +26,9 @@ Reader::Reader(SequentialFile* file, Reporter* reporter, bool checksum,
       last_record_offset_(0),
       end_of_buffer_offset_(0),
       initial_offset_(initial_offset),
-      resyncing_(initial_offset > 0) {
-}
+      resyncing_(initial_offset > 0) {}
 
-Reader::~Reader() {
-  delete[] backing_store_;
-}
+Reader::~Reader() { delete[] backing_store_; }
 
 bool Reader::SkipToInitialBlock() {
   const size_t offset_in_block = initial_offset_ % kBlockSize;
@@ -176,9 +173,7 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch) {
   return false;
 }
 
-uint64_t Reader::LastRecordOffset() {
-  return last_record_offset_;
-}
+uint64_t Reader::LastRecordOffset() { return last_record_offset_; }
 
 void Reader::ReportCorruption(uint64_t bytes, const char* reason) {
   ReportDrop(bytes, Status::Corruption(reason));
diff --git a/db/log_reader.h b/db/log_reader.h
index 7dcce8e..b27c164 100644
--- a/db/log_reader.h
+++ b/db/log_reader.h
@@ -63,7 +63,7 @@ class Reader {
   bool const checksum_;
   char* const backing_store_;
   Slice buffer_;
-  bool eof_;   // Last Read() indicated EOF by returning < kBlockSize
+  bool eof_;  // Last Read() indicated EOF by returning < kBlockSize
 
   // Offset of the last record returned by ReadRecord.
   uint64_t last_record_offset_;
diff --git a/db/log_test.cc b/db/log_test.cc
index 48a5928..3acaa33 100644
--- a/db/log_test.cc
+++ b/db/log_test.cc
@@ -56,7 +56,7 @@ class LogTest {
     Slice contents_;
     bool force_error_;
     bool returned_partial_;
-    StringSource() : force_error_(false), returned_partial_(false) { }
+    StringSource() : force_error_(false), returned_partial_(false) {}
 
     virtual Status Read(size_t n, Slice* result, char* scratch) {
       ASSERT_TRUE(!returned_partial_) << "must not Read() after eof/error";
@@ -93,7 +93,7 @@ class LogTest {
     size_t dropped_bytes_;
     std::string message_;
 
-    ReportCollector() : dropped_bytes_(0) { }
+    ReportCollector() : dropped_bytes_(0) {}
     virtual void Corruption(size_t bytes, const Status& status) {
       dropped_bytes_ += bytes;
       message_.append(status.ToString());
@@ -113,11 +113,11 @@ class LogTest {
   static int num_initial_offset_records_;
 
  public:
-  LogTest() : reading_(false),
-              writer_(new Writer(&dest_)),
-              reader_(new Reader(&source_, &report_, true/*checksum*/,
-                      0/*initial_offset*/)) {
-  }
+  LogTest()
+      : reading_(false),
+        writer_(new Writer(&dest_)),
+        reader_(new Reader(&source_, &report_, true /*checksum*/,
+                           0 /*initial_offset*/)) {}
 
   ~LogTest() {
     delete writer_;
@@ -134,9 +134,7 @@ class LogTest {
     writer_->AddRecord(Slice(msg));
   }
 
-  size_t WrittenBytes() const {
-    return dest_.contents_.size();
-  }
+  size_t WrittenBytes() const { return dest_.contents_.size(); }
 
   std::string Read() {
     if (!reading_) {
@@ -166,22 +164,16 @@ class LogTest {
 
   void FixChecksum(int header_offset, int len) {
     // Compute crc of type/len/data
-    uint32_t crc = crc32c::Value(&dest_.contents_[header_offset+6], 1 + len);
+    uint32_t crc = crc32c::Value(&dest_.contents_[header_offset + 6], 1 + len);
     crc = crc32c::Mask(crc);
     EncodeFixed32(&dest_.contents_[header_offset], crc);
   }
 
-  void ForceError() {
-    source_.force_error_ = true;
-  }
+  void ForceError() { source_.force_error_ = true; }
 
-  size_t DroppedBytes() const {
-    return report_.dropped_bytes_;
-  }
+  size_t DroppedBytes() const { return report_.dropped_bytes_; }
 
-  std::string ReportMessage() const {
-    return report_.message_;
-  }
+  std::string ReportMessage() const { return report_.message_; }
 
   // Returns OK iff recorded error message contains "msg"
   std::string MatchError(const std::string& msg) const {
@@ -202,14 +194,14 @@ class LogTest {
 
   void StartReadingAt(uint64_t initial_offset) {
     delete reader_;
-    reader_ = new Reader(&source_, &report_, true/*checksum*/, initial_offset);
+    reader_ = new Reader(&source_, &report_, true /*checksum*/, initial_offset);
   }
 
   void CheckOffsetPastEndReturnsNoRecords(uint64_t offset_past_end) {
     WriteInitialOffsetLog();
     reading_ = true;
     source_.contents_ = Slice(dest_.contents_);
-    Reader* offset_reader = new Reader(&source_, &report_, true/*checksum*/,
+    Reader* offset_reader = new Reader(&source_, &report_, true /*checksum*/,
                                        WrittenBytes() + offset_past_end);
     Slice record;
     std::string scratch;
@@ -222,8 +214,8 @@ class LogTest {
     WriteInitialOffsetLog();
     reading_ = true;
     source_.contents_ = Slice(dest_.contents_);
-    Reader* offset_reader = new Reader(&source_, &report_, true/*checksum*/,
-                                       initial_offset);
+    Reader* offset_reader =
+        new Reader(&source_, &report_, true /*checksum*/, initial_offset);
 
     // Read all records from expected_record_offset through the last one.
     ASSERT_LT(expected_record_offset, num_initial_offset_records_);
@@ -242,34 +234,30 @@ class LogTest {
   }
 };
 
-size_t LogTest::initial_offset_record_sizes_[] =
-    {10000,  // Two sizable records in first block
-     10000,
-     2 * log::kBlockSize - 1000,  // Span three blocks
-     1,
-     13716,  // Consume all but two bytes of block 3.
-     log::kBlockSize - kHeaderSize, // Consume the entirety of block 4.
-    };
-
-uint64_t LogTest::initial_offset_last_record_offsets_[] =
-    {0,
-     kHeaderSize + 10000,
-     2 * (kHeaderSize + 10000),
-     2 * (kHeaderSize + 10000) +
-         (2 * log::kBlockSize - 1000) + 3 * kHeaderSize,
-     2 * (kHeaderSize + 10000) +
-         (2 * log::kBlockSize - 1000) + 3 * kHeaderSize
-         + kHeaderSize + 1,
-     3 * log::kBlockSize,
-    };
+size_t LogTest::initial_offset_record_sizes_[] = {
+    10000,  // Two sizable records in first block
+    10000,
+    2 * log::kBlockSize - 1000,  // Span three blocks
+    1,
+    13716,                          // Consume all but two bytes of block 3.
+    log::kBlockSize - kHeaderSize,  // Consume the entirety of block 4.
+};
+
+uint64_t LogTest::initial_offset_last_record_offsets_[] = {
+    0,
+    kHeaderSize + 10000,
+    2 * (kHeaderSize + 10000),
+    2 * (kHeaderSize + 10000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize,
+    2 * (kHeaderSize + 10000) + (2 * log::kBlockSize - 1000) + 3 * kHeaderSize +
+        kHeaderSize + 1,
+    3 * log::kBlockSize,
+};
 
 // LogTest::initial_offset_last_record_offsets_ must be defined before this.
 int LogTest::num_initial_offset_records_ =
-    sizeof(LogTest::initial_offset_last_record_offsets_)/sizeof(uint64_t);
+    sizeof(LogTest::initial_offset_last_record_offsets_) / sizeof(uint64_t);
 
-TEST(LogTest, Empty) {
-  ASSERT_EQ("EOF", Read());
-}
+TEST(LogTest, Empty) { ASSERT_EQ("EOF", Read()); }
 
 TEST(LogTest, ReadWrite) {
   Write("foo");
@@ -306,7 +294,7 @@ TEST(LogTest, Fragmentation) {
 
 TEST(LogTest, MarginalTrailer) {
   // Make a trailer that is exactly the same length as an empty record.
-  const int n = kBlockSize - 2*kHeaderSize;
+  const int n = kBlockSize - 2 * kHeaderSize;
   Write(BigString("foo", n));
   ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes());
   Write("");
@@ -319,7 +307,7 @@ TEST(LogTest, MarginalTrailer) {
 
 TEST(LogTest, MarginalTrailer2) {
   // Make a trailer that is exactly the same length as an empty record.
-  const int n = kBlockSize - 2*kHeaderSize;
+  const int n = kBlockSize - 2 * kHeaderSize;
   Write(BigString("foo", n));
   ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes());
   Write("bar");
@@ -331,7 +319,7 @@ TEST(LogTest, MarginalTrailer2) {
 }
 
 TEST(LogTest, ShortTrailer) {
-  const int n = kBlockSize - 2*kHeaderSize + 4;
+  const int n = kBlockSize - 2 * kHeaderSize + 4;
   Write(BigString("foo", n));
   ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
   Write("");
@@ -343,7 +331,7 @@ TEST(LogTest, ShortTrailer) {
 }
 
 TEST(LogTest, AlignedEof) {
-  const int n = kBlockSize - 2*kHeaderSize + 4;
+  const int n = kBlockSize - 2 * kHeaderSize + 4;
   Write(BigString("foo", n));
   ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes());
   ASSERT_EQ(BigString("foo", n), Read());
@@ -394,7 +382,7 @@ TEST(LogTest, BadRecordType) {
 
 TEST(LogTest, TruncatedTrailingRecordIsIgnored) {
   Write("foo");
-  ShrinkSize(4);   // Drop all payload as well as a header byte
+  ShrinkSize(4);  // Drop all payload as well as a header byte
   ASSERT_EQ("EOF", Read());
   // Truncated last record is ignored, not treated as an error.
   ASSERT_EQ(0, DroppedBytes());
@@ -492,7 +480,7 @@ TEST(LogTest, SkipIntoMultiRecord) {
   // If initial_offset points to a record after first(R1) but before first(R2)
   // incomplete fragment errors are not actual errors, and must be suppressed
   // until a new first or full record is encountered.
-  Write(BigString("foo", 3*kBlockSize));
+  Write(BigString("foo", 3 * kBlockSize));
   Write("correct");
   StartReadingAt(kBlockSize);
 
@@ -514,44 +502,30 @@ TEST(LogTest, ErrorJoinsRecords) {
   Write("correct");
 
   // Wipe the middle block
-  for (int offset = kBlockSize; offset < 2*kBlockSize; offset++) {
+  for (int offset = kBlockSize; offset < 2 * kBlockSize; offset++) {
     SetByte(offset, 'x');
   }
 
   ASSERT_EQ("correct", Read());
   ASSERT_EQ("EOF", Read());
   const size_t dropped = DroppedBytes();
-  ASSERT_LE(dropped, 2*kBlockSize + 100);
-  ASSERT_GE(dropped, 2*kBlockSize);
+  ASSERT_LE(dropped, 2 * kBlockSize + 100);
+  ASSERT_GE(dropped, 2 * kBlockSize);
 }
 
-TEST(LogTest, ReadStart) {
-  CheckInitialOffsetRecord(0, 0);
-}
+TEST(LogTest, ReadStart) { CheckInitialOffsetRecord(0, 0); }
 
-TEST(LogTest, ReadSecondOneOff) {
-  CheckInitialOffsetRecord(1, 1);
-}
+TEST(LogTest, ReadSecondOneOff) { CheckInitialOffsetRecord(1, 1); }
 
-TEST(LogTest, ReadSecondTenThousand) {
-  CheckInitialOffsetRecord(10000, 1);
-}
+TEST(LogTest, ReadSecondTenThousand) { CheckInitialOffsetRecord(10000, 1); }
 
-TEST(LogTest, ReadSecondStart) {
-  CheckInitialOffsetRecord(10007, 1);
-}
+TEST(LogTest, ReadSecondStart) { CheckInitialOffsetRecord(10007, 1); }
 
-TEST(LogTest, ReadThirdOneOff) {
-  CheckInitialOffsetRecord(10008, 2);
-}
+TEST(LogTest, ReadThirdOneOff) { CheckInitialOffsetRecord(10008, 2); }
 
-TEST(LogTest, ReadThirdStart) {
-  CheckInitialOffsetRecord(20014, 2);
-}
+TEST(LogTest, ReadThirdStart) { CheckInitialOffsetRecord(20014, 2); }
 
-TEST(LogTest, ReadFourthOneOff) {
-  CheckInitialOffsetRecord(20015, 3);
-}
+TEST(LogTest, ReadFourthOneOff) { CheckInitialOffsetRecord(20015, 3); }
 
 TEST(LogTest, ReadFourthFirstBlockTrailer) {
   CheckInitialOffsetRecord(log::kBlockSize - 4, 3);
@@ -575,17 +549,11 @@ TEST(LogTest, ReadInitialOffsetIntoBlockPadding) {
   CheckInitialOffsetRecord(3 * log::kBlockSize - 3, 5);
 }
 
-TEST(LogTest, ReadEnd) {
-  CheckOffsetPastEndReturnsNoRecords(0);
-}
+TEST(LogTest, ReadEnd) { CheckOffsetPastEndReturnsNoRecords(0); }
 
-TEST(LogTest, ReadPastEnd) {
-  CheckOffsetPastEndReturnsNoRecords(5);
-}
+TEST(LogTest, ReadPastEnd) { CheckOffsetPastEndReturnsNoRecords(5); }
 
 }  // namespace log
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/db/log_writer.cc b/db/log_writer.cc
index 74a0327..5e83f6a 100644
--- a/db/log_writer.cc
+++ b/db/log_writer.cc
@@ -5,6 +5,7 @@
 #include "db/log_writer.h"
 
 #include <stdint.h>
+
 #include "leveldb/env.h"
 #include "util/coding.h"
 #include "util/crc32c.h"
@@ -19,9 +20,7 @@ static void InitTypeCrc(uint32_t* type_crc) {
   }
 }
 
-Writer::Writer(WritableFile* dest)
-    : dest_(dest),
-      block_offset_(0) {
+Writer::Writer(WritableFile* dest) : dest_(dest), block_offset_(0) {
   InitTypeCrc(type_crc_);
 }
 
@@ -30,8 +29,7 @@ Writer::Writer(WritableFile* dest, uint64_t dest_length)
   InitTypeCrc(type_crc_);
 }
 
-Writer::~Writer() {
-}
+Writer::~Writer() {}
 
 Status Writer::AddRecord(const Slice& slice) {
   const char* ptr = slice.data();
@@ -49,7 +47,7 @@ Status Writer::AddRecord(const Slice& slice) {
       // Switch to a new block
       if (leftover > 0) {
         // Fill the trailer (literal below relies on kHeaderSize being 7)
-        assert(kHeaderSize == 7);
+        static_assert(kHeaderSize == 7, "");
         dest_->Append(Slice("\x00\x00\x00\x00\x00\x00", leftover));
       }
       block_offset_ = 0;
@@ -81,30 +79,31 @@ Status Writer::AddRecord(const Slice& slice) {
   return s;
 }
 
-Status Writer::EmitPhysicalRecord(RecordType t, const char* ptr, size_t n) {
-  assert(n <= 0xffff);  // Must fit in two bytes
-  assert(block_offset_ + kHeaderSize + n <= kBlockSize);
+Status Writer::EmitPhysicalRecord(RecordType t, const char* ptr,
+                                  size_t length) {
+  assert(length <= 0xffff);  // Must fit in two bytes
+  assert(block_offset_ + kHeaderSize + length <= kBlockSize);
 
   // Format the header
   char buf[kHeaderSize];
-  buf[4] = static_cast<char>(n & 0xff);
-  buf[5] = static_cast<char>(n >> 8);
+  buf[4] = static_cast<char>(length & 0xff);
+  buf[5] = static_cast<char>(length >> 8);
   buf[6] = static_cast<char>(t);
 
   // Compute the crc of the record type and the payload.
-  uint32_t crc = crc32c::Extend(type_crc_[t], ptr, n);
-  crc = crc32c::Mask(crc);                 // Adjust for storage
+  uint32_t crc = crc32c::Extend(type_crc_[t], ptr, length);
+  crc = crc32c::Mask(crc);  // Adjust for storage
   EncodeFixed32(buf, crc);
 
   // Write the header and the payload
   Status s = dest_->Append(Slice(buf, kHeaderSize));
   if (s.ok()) {
-    s = dest_->Append(Slice(ptr, n));
+    s = dest_->Append(Slice(ptr, length));
     if (s.ok()) {
       s = dest_->Flush();
     }
   }
-  block_offset_ += kHeaderSize + n;
+  block_offset_ += kHeaderSize + length;
   return s;
 }
 
diff --git a/db/log_writer.h b/db/log_writer.h
index 9e7cc47..840809d 100644
--- a/db/log_writer.h
+++ b/db/log_writer.h
@@ -6,6 +6,7 @@
 #define STORAGE_LEVELDB_DB_LOG_WRITER_H_
 
 #include <stdint.h>
+
 #include "db/log_format.h"
 #include "leveldb/slice.h"
 #include "leveldb/status.h"
@@ -34,7 +35,7 @@ class Writer {
 
  private:
   WritableFile* dest_;
-  int block_offset_;       // Current offset in block
+  int block_offset_;  // Current offset in block
 
   // crc32c values for all supported record types.  These are
   // pre-computed to reduce the overhead of computing the crc of the
diff --git a/db/memtable.cc b/db/memtable.cc
index 287afdb..c91405c 100644
--- a/db/memtable.cc
+++ b/db/memtable.cc
@@ -18,20 +18,15 @@ static Slice GetLengthPrefixedSlice(const char* data) {
   return Slice(p, len);
 }
 
-MemTable::MemTable(const InternalKeyComparator& cmp)
-    : comparator_(cmp),
-      refs_(0),
-      table_(comparator_, &arena_) {
-}
+MemTable::MemTable(const InternalKeyComparator& comparator)
+    : comparator_(comparator), refs_(0), table_(comparator_, &arena_) {}
 
-MemTable::~MemTable() {
-  assert(refs_ == 0);
-}
+MemTable::~MemTable() { assert(refs_ == 0); }
 
 size_t MemTable::ApproximateMemoryUsage() { return arena_.MemoryUsage(); }
 
-int MemTable::KeyComparator::operator()(const char* aptr, const char* bptr)
-    const {
+int MemTable::KeyComparator::operator()(const char* aptr,
+                                        const char* bptr) const {
   // Internal keys are encoded as length-prefixed strings.
   Slice a = GetLengthPrefixedSlice(aptr);
   Slice b = GetLengthPrefixedSlice(bptr);
@@ -48,9 +43,9 @@ static const char* EncodeKey(std::string* scratch, const Slice& target) {
   return scratch->data();
 }
 
-class MemTableIterator: public Iterator {
+class MemTableIterator : public Iterator {
  public:
-  explicit MemTableIterator(MemTable::Table* table) : iter_(table) { }
+  explicit MemTableIterator(MemTable::Table* table) : iter_(table) {}
 
   virtual bool Valid() const { return iter_.Valid(); }
   virtual void Seek(const Slice& k) { iter_.Seek(EncodeKey(&tmp_, k)); }
@@ -68,19 +63,16 @@ class MemTableIterator: public Iterator {
 
  private:
   MemTable::Table::Iterator iter_;
-  std::string tmp_;       // For passing to EncodeKey
+  std::string tmp_;  // For passing to EncodeKey
 
   // No copying allowed
   MemTableIterator(const MemTableIterator&);
   void operator=(const MemTableIterator&);
 };
 
-Iterator* MemTable::NewIterator() {
-  return new MemTableIterator(&table_);
-}
+Iterator* MemTable::NewIterator() { return new MemTableIterator(&table_); }
 
-void MemTable::Add(SequenceNumber s, ValueType type,
-                   const Slice& key,
+void MemTable::Add(SequenceNumber s, ValueType type, const Slice& key,
                    const Slice& value) {
   // Format of an entry is concatenation of:
   //  key_size     : varint32 of internal_key.size()
@@ -90,9 +82,9 @@ void MemTable::Add(SequenceNumber s, ValueType type,
   size_t key_size = key.size();
   size_t val_size = value.size();
   size_t internal_key_size = key_size + 8;
-  const size_t encoded_len =
-      VarintLength(internal_key_size) + internal_key_size +
-      VarintLength(val_size) + val_size;
+  const size_t encoded_len = VarintLength(internal_key_size) +
+                             internal_key_size + VarintLength(val_size) +
+                             val_size;
   char* buf = arena_.Allocate(encoded_len);
   char* p = EncodeVarint32(buf, internal_key_size);
   memcpy(p, key.data(), key_size);
@@ -121,10 +113,9 @@ bool MemTable::Get(const LookupKey& key, std::string* value, Status* s) {
     // all entries with overly large sequence numbers.
     const char* entry = iter.key();
     uint32_t key_length;
-    const char* key_ptr = GetVarint32Ptr(entry, entry+5, &key_length);
+    const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
     if (comparator_.comparator.user_comparator()->Compare(
-            Slice(key_ptr, key_length - 8),
-            key.user_key()) == 0) {
+            Slice(key_ptr, key_length - 8), key.user_key()) == 0) {
       // Correct user key
       const uint64_t tag = DecodeFixed64(key_ptr + key_length - 8);
       switch (static_cast<ValueType>(tag & 0xff)) {
diff --git a/db/memtable.h b/db/memtable.h
index f2a6736..ef18bb5 100644
--- a/db/memtable.h
+++ b/db/memtable.h
@@ -6,9 +6,10 @@
 #define STORAGE_LEVELDB_DB_MEMTABLE_H_
 
 #include <string>
-#include "leveldb/db.h"
+
 #include "db/dbformat.h"
 #include "db/skiplist.h"
+#include "leveldb/db.h"
 #include "util/arena.h"
 
 namespace leveldb {
@@ -49,8 +50,7 @@ class MemTable {
   // Add an entry into memtable that maps key to value at the
   // specified sequence number and with the specified type.
   // Typically value will be empty if type==kTypeDeletion.
-  void Add(SequenceNumber seq, ValueType type,
-           const Slice& key,
+  void Add(SequenceNumber seq, ValueType type, const Slice& key,
            const Slice& value);
 
   // If memtable contains a value for key, store it in *value and return true.
@@ -64,7 +64,7 @@ class MemTable {
 
   struct KeyComparator {
     const InternalKeyComparator comparator;
-    explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) { }
+    explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) {}
     int operator()(const char* a, const char* b) const;
   };
   friend class MemTableIterator;
diff --git a/db/recovery_test.cc b/db/recovery_test.cc
index 87bd53c..547a959 100644
--- a/db/recovery_test.cc
+++ b/db/recovery_test.cc
@@ -86,15 +86,13 @@ class RecoveryTest {
     std::string current;
     ASSERT_OK(ReadFileToString(env_, CurrentFileName(dbname_), &current));
     size_t len = current.size();
-    if (len > 0 && current[len-1] == '\n') {
+    if (len > 0 && current[len - 1] == '\n') {
       current.resize(len - 1);
     }
     return dbname_ + "/" + current;
   }
 
-  std::string LogName(uint64_t number) {
-    return LogFileName(dbname_, number);
-  }
+  std::string LogName(uint64_t number) { return LogFileName(dbname_, number); }
 
   size_t DeleteLogFiles() {
     // Linux allows unlinking open files, but Windows does not.
@@ -107,13 +105,9 @@ class RecoveryTest {
     return logs.size();
   }
 
-  void DeleteManifestFile() {
-    ASSERT_OK(env_->DeleteFile(ManifestFileName()));
-  }
+  void DeleteManifestFile() { ASSERT_OK(env_->DeleteFile(ManifestFileName())); }
 
-  uint64_t FirstLogFile() {
-    return GetFiles(kLogFile)[0];
-  }
+  uint64_t FirstLogFile() { return GetFiles(kLogFile)[0]; }
 
   std::vector<uint64_t> GetFiles(FileType t) {
     std::vector<std::string> filenames;
@@ -129,13 +123,9 @@ class RecoveryTest {
     return result;
   }
 
-  int NumLogs() {
-    return GetFiles(kLogFile).size();
-  }
+  int NumLogs() { return GetFiles(kLogFile).size(); }
 
-  int NumTables() {
-    return GetFiles(kTableFile).size();
-  }
+  int NumTables() { return GetFiles(kTableFile).size(); }
 
   uint64_t FileSize(const std::string& fname) {
     uint64_t result;
@@ -143,9 +133,7 @@ class RecoveryTest {
     return result;
   }
 
-  void CompactMemTable() {
-    dbfull()->TEST_CompactMemTable();
-  }
+  void CompactMemTable() { dbfull()->TEST_CompactMemTable(); }
 
   // Directly construct a log file that sets key to val.
   void MakeLogFile(uint64_t lognum, SequenceNumber seq, Slice key, Slice val) {
@@ -197,7 +185,7 @@ TEST(RecoveryTest, LargeManifestCompacted) {
     uint64_t len = FileSize(old_manifest);
     WritableFile* file;
     ASSERT_OK(env()->NewAppendableFile(old_manifest, &file));
-    std::string zeroes(3*1048576 - static_cast<size_t>(len), 0);
+    std::string zeroes(3 * 1048576 - static_cast<size_t>(len), 0);
     ASSERT_OK(file->Append(zeroes));
     ASSERT_OK(file->Flush());
     delete file;
@@ -270,7 +258,7 @@ TEST(RecoveryTest, MultipleMemTables) {
   // Force creation of multiple memtables by reducing the write buffer size.
   Options opt;
   opt.reuse_logs = true;
-  opt.write_buffer_size = (kNum*100) / 2;
+  opt.write_buffer_size = (kNum * 100) / 2;
   Open(&opt);
   ASSERT_LE(2, NumTables());
   ASSERT_EQ(1, NumLogs());
@@ -289,16 +277,16 @@ TEST(RecoveryTest, MultipleLogFiles) {
 
   // Make a bunch of uncompacted log files.
   uint64_t old_log = FirstLogFile();
-  MakeLogFile(old_log+1, 1000, "hello", "world");
-  MakeLogFile(old_log+2, 1001, "hi", "there");
-  MakeLogFile(old_log+3, 1002, "foo", "bar2");
+  MakeLogFile(old_log + 1, 1000, "hello", "world");
+  MakeLogFile(old_log + 2, 1001, "hi", "there");
+  MakeLogFile(old_log + 3, 1002, "foo", "bar2");
 
   // Recover and check that all log files were processed.
   Open();
   ASSERT_LE(1, NumTables());
   ASSERT_EQ(1, NumLogs());
   uint64_t new_log = FirstLogFile();
-  ASSERT_LE(old_log+3, new_log);
+  ASSERT_LE(old_log + 3, new_log);
   ASSERT_EQ("bar2", Get("foo"));
   ASSERT_EQ("world", Get("hello"));
   ASSERT_EQ("there", Get("hi"));
@@ -316,7 +304,7 @@ TEST(RecoveryTest, MultipleLogFiles) {
 
   // Check that introducing an older log file does not cause it to be re-read.
   Close();
-  MakeLogFile(old_log+1, 2000, "hello", "stale write");
+  MakeLogFile(old_log + 1, 2000, "hello", "stale write");
   Open();
   ASSERT_LE(1, NumTables());
   ASSERT_EQ(1, NumLogs());
@@ -339,6 +327,4 @@ TEST(RecoveryTest, ManifestMissing) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/db/repair.cc b/db/repair.cc
index df8dcd2..d5ecc45 100644
--- a/db/repair.cc
+++ b/db/repair.cc
@@ -84,9 +84,7 @@ class Repairer {
           "recovered %d files; %llu bytes. "
           "Some data may have been lost. "
           "****",
-          dbname_.c_str(),
-          static_cast<int>(tables_.size()),
-          bytes);
+          dbname_.c_str(), static_cast<int>(tables_.size()), bytes);
     }
     return status;
   }
@@ -152,8 +150,7 @@ class Repairer {
       Status status = ConvertLogToTable(logs_[i]);
       if (!status.ok()) {
         Log(options_.info_log, "Log #%llu: ignoring conversion error: %s",
-            (unsigned long long) logs_[i],
-            status.ToString().c_str());
+            (unsigned long long)logs_[i], status.ToString().c_str());
       }
       ArchiveFile(logname);
     }
@@ -167,8 +164,7 @@ class Repairer {
       virtual void Corruption(size_t bytes, const Status& s) {
         // We print error messages for corruption, but continue repairing.
         Log(info_log, "Log #%llu: dropping %d bytes; %s",
-            (unsigned long long) lognum,
-            static_cast<int>(bytes),
+            (unsigned long long)lognum, static_cast<int>(bytes),
             s.ToString().c_str());
       }
     };
@@ -190,8 +186,8 @@ class Repairer {
     // corruptions cause entire commits to be skipped instead of
     // propagating bad information (like overly large sequence
     // numbers).
-    log::Reader reader(lfile, &reporter, false/*do not checksum*/,
-                       0/*initial_offset*/);
+    log::Reader reader(lfile, &reporter, false /*do not checksum*/,
+                       0 /*initial_offset*/);
 
     // Read all the records and add to a memtable
     std::string scratch;
@@ -202,8 +198,8 @@ class Repairer {
     int counter = 0;
     while (reader.ReadRecord(&record, &scratch)) {
       if (record.size() < 12) {
-        reporter.Corruption(
-            record.size(), Status::Corruption("log record too small"));
+        reporter.Corruption(record.size(),
+                            Status::Corruption("log record too small"));
         continue;
       }
       WriteBatchInternal::SetContents(&batch, record);
@@ -212,8 +208,7 @@ class Repairer {
         counter += WriteBatchInternal::Count(&batch);
       } else {
         Log(options_.info_log, "Log #%llu: ignoring %s",
-            (unsigned long long) log,
-            status.ToString().c_str());
+            (unsigned long long)log, status.ToString().c_str());
         status = Status::OK();  // Keep going with rest of file
       }
     }
@@ -234,9 +229,7 @@ class Repairer {
       }
     }
     Log(options_.info_log, "Log #%llu: %d ops saved to Table #%llu %s",
-        (unsigned long long) log,
-        counter,
-        (unsigned long long) meta.number,
+        (unsigned long long)log, counter, (unsigned long long)meta.number,
         status.ToString().c_str());
     return status;
   }
@@ -272,8 +265,7 @@ class Repairer {
       ArchiveFile(TableFileName(dbname_, number));
       ArchiveFile(SSTTableFileName(dbname_, number));
       Log(options_.info_log, "Table #%llu: dropped: %s",
-          (unsigned long long) t.meta.number,
-          status.ToString().c_str());
+          (unsigned long long)t.meta.number, status.ToString().c_str());
       return;
     }
 
@@ -287,8 +279,7 @@ class Repairer {
       Slice key = iter->key();
       if (!ParseInternalKey(key, &parsed)) {
         Log(options_.info_log, "Table #%llu: unparsable key %s",
-            (unsigned long long) t.meta.number,
-            EscapeString(key).c_str());
+            (unsigned long long)t.meta.number, EscapeString(key).c_str());
         continue;
       }
 
@@ -307,9 +298,7 @@ class Repairer {
     }
     delete iter;
     Log(options_.info_log, "Table #%llu: %d entries %s",
-        (unsigned long long) t.meta.number,
-        counter,
-        status.ToString().c_str());
+        (unsigned long long)t.meta.number, counter, status.ToString().c_str());
 
     if (status.ok()) {
       tables_.push_back(t);
@@ -363,7 +352,7 @@ class Repairer {
       s = env_->RenameFile(copy, orig);
       if (s.ok()) {
         Log(options_.info_log, "Table #%llu: %d entries repaired",
-            (unsigned long long) t.meta.number, counter);
+            (unsigned long long)t.meta.number, counter);
         tables_.push_back(t);
       }
     }
@@ -395,11 +384,11 @@ class Repairer {
     for (size_t i = 0; i < tables_.size(); i++) {
       // TODO(opt): separate out into multiple levels
       const TableInfo& t = tables_[i];
-      edit_.AddFile(0, t.meta.number, t.meta.file_size,
-                    t.meta.smallest, t.meta.largest);
+      edit_.AddFile(0, t.meta.number, t.meta.file_size, t.meta.smallest,
+                    t.meta.largest);
     }
 
-    //fprintf(stderr, "NewDescriptor:\n%s\n", edit_.DebugString().c_str());
+    // fprintf(stderr, "NewDescriptor:\n%s\n", edit_.DebugString().c_str());
     {
       log::Writer log(file);
       std::string record;
@@ -447,8 +436,8 @@ class Repairer {
     new_file.append("/");
     new_file.append((slash == nullptr) ? fname.c_str() : slash + 1);
     Status s = env_->RenameFile(fname, new_file);
-    Log(options_.info_log, "Archiving %s: %s\n",
-        fname.c_str(), s.ToString().c_str());
+    Log(options_.info_log, "Archiving %s: %s\n", fname.c_str(),
+        s.ToString().c_str());
   }
 };
 }  // namespace
diff --git a/db/skiplist.h b/db/skiplist.h
index 7ac914b..05e5733 100644
--- a/db/skiplist.h
+++ b/db/skiplist.h
@@ -38,7 +38,7 @@ namespace leveldb {
 
 class Arena;
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 class SkipList {
  private:
   struct Node;
@@ -100,13 +100,13 @@ class SkipList {
 
   // Immutable after construction
   Comparator const compare_;
-  Arena* const arena_;    // Arena used for allocations of nodes
+  Arena* const arena_;  // Arena used for allocations of nodes
 
   Node* const head_;
 
   // Modified only by Insert().  Read racily by readers, but stale
   // values are ok.
-  std::atomic<int> max_height_;   // Height of the entire list
+  std::atomic<int> max_height_;  // Height of the entire list
 
   inline int GetMaxHeight() const {
     return max_height_.load(std::memory_order_relaxed);
@@ -143,9 +143,9 @@ class SkipList {
 };
 
 // Implementation details follow
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 struct SkipList<Key, Comparator>::Node {
-  explicit Node(const Key& k) : key(k) { }
+  explicit Node(const Key& k) : key(k) {}
 
   Key const key;
 
@@ -179,38 +179,38 @@ struct SkipList<Key, Comparator>::Node {
   std::atomic<Node*> next_[1];
 };
 
-template<typename Key, class Comparator>
-typename SkipList<Key, Comparator>::Node*
-SkipList<Key, Comparator>::NewNode(const Key& key, int height) {
+template <typename Key, class Comparator>
+typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::NewNode(
+    const Key& key, int height) {
   char* const node_memory = arena_->AllocateAligned(
       sizeof(Node) + sizeof(std::atomic<Node*>) * (height - 1));
   return new (node_memory) Node(key);
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 inline SkipList<Key, Comparator>::Iterator::Iterator(const SkipList* list) {
   list_ = list;
   node_ = nullptr;
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 inline bool SkipList<Key, Comparator>::Iterator::Valid() const {
   return node_ != nullptr;
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 inline const Key& SkipList<Key, Comparator>::Iterator::key() const {
   assert(Valid());
   return node_->key;
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 inline void SkipList<Key, Comparator>::Iterator::Next() {
   assert(Valid());
   node_ = node_->Next(0);
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 inline void SkipList<Key, Comparator>::Iterator::Prev() {
   // Instead of using explicit "prev" links, we just search for the
   // last node that falls before key.
@@ -221,17 +221,17 @@ inline void SkipList<Key, Comparator>::Iterator::Prev() {
   }
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 inline void SkipList<Key, Comparator>::Iterator::Seek(const Key& target) {
   node_ = list_->FindGreaterOrEqual(target, nullptr);
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 inline void SkipList<Key, Comparator>::Iterator::SeekToFirst() {
   node_ = list_->head_->Next(0);
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 inline void SkipList<Key, Comparator>::Iterator::SeekToLast() {
   node_ = list_->FindLast();
   if (node_ == list_->head_) {
@@ -239,7 +239,7 @@ inline void SkipList<Key, Comparator>::Iterator::SeekToLast() {
   }
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 int SkipList<Key, Comparator>::RandomHeight() {
   // Increase height with probability 1 in kBranching
   static const unsigned int kBranching = 4;
@@ -252,13 +252,13 @@ int SkipList<Key, Comparator>::RandomHeight() {
   return height;
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 bool SkipList<Key, Comparator>::KeyIsAfterNode(const Key& key, Node* n) const {
   // null n is considered infinite
   return (n != nullptr) && (compare_(n->key, key) < 0);
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 typename SkipList<Key, Comparator>::Node*
 SkipList<Key, Comparator>::FindGreaterOrEqual(const Key& key,
                                               Node** prev) const {
@@ -281,7 +281,7 @@ SkipList<Key, Comparator>::FindGreaterOrEqual(const Key& key,
   }
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 typename SkipList<Key, Comparator>::Node*
 SkipList<Key, Comparator>::FindLessThan(const Key& key) const {
   Node* x = head_;
@@ -302,7 +302,7 @@ SkipList<Key, Comparator>::FindLessThan(const Key& key) const {
   }
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::FindLast()
     const {
   Node* x = head_;
@@ -322,7 +322,7 @@ typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::FindLast()
   }
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 SkipList<Key, Comparator>::SkipList(Comparator cmp, Arena* arena)
     : compare_(cmp),
       arena_(arena),
@@ -334,7 +334,7 @@ SkipList<Key, Comparator>::SkipList(Comparator cmp, Arena* arena)
   }
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 void SkipList<Key, Comparator>::Insert(const Key& key) {
   // TODO(opt): We can use a barrier-free variant of FindGreaterOrEqual()
   // here since Insert() is externally synchronized.
@@ -368,7 +368,7 @@ void SkipList<Key, Comparator>::Insert(const Key& key) {
   }
 }
 
-template<typename Key, class Comparator>
+template <typename Key, class Comparator>
 bool SkipList<Key, Comparator>::Contains(const Key& key) const {
   Node* x = FindGreaterOrEqual(key, nullptr);
   if (x != nullptr && Equal(key, x->key)) {
diff --git a/db/skiplist_test.cc b/db/skiplist_test.cc
index 38c1941..9fa2d96 100644
--- a/db/skiplist_test.cc
+++ b/db/skiplist_test.cc
@@ -31,7 +31,7 @@ struct Comparator {
   }
 };
 
-class SkipTest { };
+class SkipTest {};
 
 TEST(SkipTest, Empty) {
   Arena arena;
@@ -117,8 +117,7 @@ TEST(SkipTest, InsertAndLookup) {
 
     // Compare against model iterator
     for (std::set<Key>::reverse_iterator model_iter = keys.rbegin();
-         model_iter != keys.rend();
-         ++model_iter) {
+         model_iter != keys.rend(); ++model_iter) {
       ASSERT_TRUE(iter.Valid());
       ASSERT_EQ(*model_iter, iter.key());
       iter.Prev();
@@ -160,12 +159,12 @@ class ConcurrentTest {
   static uint64_t hash(Key key) { return key & 0xff; }
 
   static uint64_t HashNumbers(uint64_t k, uint64_t g) {
-    uint64_t data[2] = { k, g };
+    uint64_t data[2] = {k, g};
     return Hash(reinterpret_cast<char*>(data), sizeof(data), 0);
   }
 
   static Key MakeKey(uint64_t k, uint64_t g) {
-    assert(sizeof(Key) == sizeof(uint64_t));
+    static_assert(sizeof(Key) == sizeof(uint64_t), "");
     assert(k <= K);  // We sometimes pass K to seek to the end of the skiplist
     assert(g <= 0xffffffffu);
     return ((k << 40) | (g << 8) | (HashNumbers(k, g) & 0xff));
@@ -195,9 +194,7 @@ class ConcurrentTest {
     void Set(int k, int v) {
       generation[k].store(v, std::memory_order_release);
     }
-    int Get(int k) {
-      return generation[k].load(std::memory_order_acquire);
-    }
+    int Get(int k) { return generation[k].load(std::memory_order_acquire); }
 
     State() {
       for (int k = 0; k < K; k++) {
@@ -216,7 +213,7 @@ class ConcurrentTest {
   SkipList<Key, Comparator> list_;
 
  public:
-  ConcurrentTest() : list_(Comparator(), &arena_) { }
+  ConcurrentTest() : list_(Comparator(), &arena_) {}
 
   // REQUIRES: External synchronization
   void WriteStep(Random* rnd) {
@@ -255,11 +252,9 @@ class ConcurrentTest {
         // Note that generation 0 is never inserted, so it is ok if
         // <*,0,*> is missing.
         ASSERT_TRUE((gen(pos) == 0) ||
-                    (gen(pos) > static_cast<Key>(initial_state.Get(key(pos))))
-                    ) << "key: " << key(pos)
-                      << "; gen: " << gen(pos)
-                      << "; initgen: "
-                      << initial_state.Get(key(pos));
+                    (gen(pos) > static_cast<Key>(initial_state.Get(key(pos)))))
+            << "key: " << key(pos) << "; gen: " << gen(pos)
+            << "; initgen: " << initial_state.Get(key(pos));
 
         // Advance to next key in the valid key space
         if (key(pos) < key(current)) {
@@ -305,17 +300,10 @@ class TestState {
   int seed_;
   std::atomic<bool> quit_flag_;
 
-  enum ReaderState {
-    STARTING,
-    RUNNING,
-    DONE
-  };
+  enum ReaderState { STARTING, RUNNING, DONE };
 
   explicit TestState(int s)
-      : seed_(s),
-        quit_flag_(false),
-        state_(STARTING),
-        state_cv_(&mu_) {}
+      : seed_(s), quit_flag_(false), state_(STARTING), state_cv_(&mu_) {}
 
   void Wait(ReaderState s) LOCKS_EXCLUDED(mu_) {
     mu_.Lock();
@@ -378,6 +366,4 @@ TEST(SkipTest, Concurrent5) { RunConcurrent(5); }
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/db/snapshot.h b/db/snapshot.h
index c43d9f9..9f1d664 100644
--- a/db/snapshot.h
+++ b/db/snapshot.h
@@ -44,8 +44,14 @@ class SnapshotList {
   }
 
   bool empty() const { return head_.next_ == &head_; }
-  SnapshotImpl* oldest() const { assert(!empty()); return head_.next_; }
-  SnapshotImpl* newest() const { assert(!empty()); return head_.prev_; }
+  SnapshotImpl* oldest() const {
+    assert(!empty());
+    return head_.next_;
+  }
+  SnapshotImpl* newest() const {
+    assert(!empty());
+    return head_.prev_;
+  }
 
   // Creates a SnapshotImpl and appends it to the end of the list.
   SnapshotImpl* New(SequenceNumber sequence_number) {
diff --git a/db/table_cache.cc b/db/table_cache.cc
index 7226d3b..73f05fd 100644
--- a/db/table_cache.cc
+++ b/db/table_cache.cc
@@ -29,18 +29,14 @@ static void UnrefEntry(void* arg1, void* arg2) {
   cache->Release(h);
 }
 
-TableCache::TableCache(const std::string& dbname,
-                       const Options& options,
+TableCache::TableCache(const std::string& dbname, const Options& options,
                        int entries)
     : env_(options.env),
       dbname_(dbname),
       options_(options),
-      cache_(NewLRUCache(entries)) {
-}
+      cache_(NewLRUCache(entries)) {}
 
-TableCache::~TableCache() {
-  delete cache_;
-}
+TableCache::~TableCache() { delete cache_; }
 
 Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
                              Cache::Handle** handle) {
@@ -80,8 +76,7 @@ Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
 }
 
 Iterator* TableCache::NewIterator(const ReadOptions& options,
-                                  uint64_t file_number,
-                                  uint64_t file_size,
+                                  uint64_t file_number, uint64_t file_size,
                                   Table** tableptr) {
   if (tableptr != nullptr) {
     *tableptr = nullptr;
@@ -102,17 +97,15 @@ Iterator* TableCache::NewIterator(const ReadOptions& options,
   return result;
 }
 
-Status TableCache::Get(const ReadOptions& options,
-                       uint64_t file_number,
-                       uint64_t file_size,
-                       const Slice& k,
-                       void* arg,
-                       void (*saver)(void*, const Slice&, const Slice&)) {
+Status TableCache::Get(const ReadOptions& options, uint64_t file_number,
+                       uint64_t file_size, const Slice& k, void* arg,
+                       void (*handle_result)(void*, const Slice&,
+                                             const Slice&)) {
   Cache::Handle* handle = nullptr;
   Status s = FindTable(file_number, file_size, &handle);
   if (s.ok()) {
     Table* t = reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table;
-    s = t->InternalGet(options, k, arg, saver);
+    s = t->InternalGet(options, k, arg, handle_result);
     cache_->Release(handle);
   }
   return s;
diff --git a/db/table_cache.h b/db/table_cache.h
index ae8bee5..21ae92d 100644
--- a/db/table_cache.h
+++ b/db/table_cache.h
@@ -7,8 +7,10 @@
 #ifndef STORAGE_LEVELDB_DB_TABLE_CACHE_H_
 #define STORAGE_LEVELDB_DB_TABLE_CACHE_H_
 
-#include <string>
 #include <stdint.h>
+
+#include <string>
+
 #include "db/dbformat.h"
 #include "leveldb/cache.h"
 #include "leveldb/table.h"
@@ -30,18 +32,13 @@ class TableCache {
   // underlies the returned iterator.  The returned "*tableptr" object is owned
   // by the cache and should not be deleted, and is valid for as long as the
   // returned iterator is live.
-  Iterator* NewIterator(const ReadOptions& options,
-                        uint64_t file_number,
-                        uint64_t file_size,
-                        Table** tableptr = nullptr);
+  Iterator* NewIterator(const ReadOptions& options, uint64_t file_number,
+                        uint64_t file_size, Table** tableptr = nullptr);
 
   // If a seek to internal key "k" in specified file finds an entry,
   // call (*handle_result)(arg, found_key, found_value).
-  Status Get(const ReadOptions& options,
-             uint64_t file_number,
-             uint64_t file_size,
-             const Slice& k,
-             void* arg,
+  Status Get(const ReadOptions& options, uint64_t file_number,
+             uint64_t file_size, const Slice& k, void* arg,
              void (*handle_result)(void*, const Slice&, const Slice&));
 
   // Evict any entry for the specified file number
diff --git a/db/version_edit.cc b/db/version_edit.cc
index b7a366d..44a4d02 100644
--- a/db/version_edit.cc
+++ b/db/version_edit.cc
@@ -12,15 +12,15 @@ namespace leveldb {
 // Tag numbers for serialized VersionEdit.  These numbers are written to
 // disk and should not be changed.
 enum Tag {
-  kComparator           = 1,
-  kLogNumber            = 2,
-  kNextFileNumber       = 3,
-  kLastSequence         = 4,
-  kCompactPointer       = 5,
-  kDeletedFile          = 6,
-  kNewFile              = 7,
+  kComparator = 1,
+  kLogNumber = 2,
+  kNextFileNumber = 3,
+  kLastSequence = 4,
+  kCompactPointer = 5,
+  kDeletedFile = 6,
+  kNewFile = 7,
   // 8 was used for large value refs
-  kPrevLogNumber        = 9
+  kPrevLogNumber = 9
 };
 
 void VersionEdit::Clear() {
@@ -67,8 +67,7 @@ void VersionEdit::EncodeTo(std::string* dst) const {
   }
 
   for (DeletedFileSet::const_iterator iter = deleted_files_.begin();
-       iter != deleted_files_.end();
-       ++iter) {
+       iter != deleted_files_.end(); ++iter) {
     PutVarint32(dst, kDeletedFile);
     PutVarint32(dst, iter->first);   // level
     PutVarint64(dst, iter->second);  // file number
@@ -97,8 +96,7 @@ static bool GetInternalKey(Slice* input, InternalKey* dst) {
 
 static bool GetLevel(Slice* input, int* level) {
   uint32_t v;
-  if (GetVarint32(input, &v) &&
-      v < config::kNumLevels) {
+  if (GetVarint32(input, &v) && v < config::kNumLevels) {
     *level = v;
     return true;
   } else {
@@ -163,8 +161,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
         break;
 
       case kCompactPointer:
-        if (GetLevel(&input, &level) &&
-            GetInternalKey(&input, &key)) {
+        if (GetLevel(&input, &level) && GetInternalKey(&input, &key)) {
           compact_pointers_.push_back(std::make_pair(level, key));
         } else {
           msg = "compaction pointer";
@@ -172,8 +169,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
         break;
 
       case kDeletedFile:
-        if (GetLevel(&input, &level) &&
-            GetVarint64(&input, &number)) {
+        if (GetLevel(&input, &level) && GetVarint64(&input, &number)) {
           deleted_files_.insert(std::make_pair(level, number));
         } else {
           msg = "deleted file";
@@ -181,8 +177,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
         break;
 
       case kNewFile:
-        if (GetLevel(&input, &level) &&
-            GetVarint64(&input, &f.number) &&
+        if (GetLevel(&input, &level) && GetVarint64(&input, &f.number) &&
             GetVarint64(&input, &f.file_size) &&
             GetInternalKey(&input, &f.smallest) &&
             GetInternalKey(&input, &f.largest)) {
@@ -239,8 +234,7 @@ std::string VersionEdit::DebugString() const {
     r.append(compact_pointers_[i].second.DebugString());
   }
   for (DeletedFileSet::const_iterator iter = deleted_files_.begin();
-       iter != deleted_files_.end();
-       ++iter) {
+       iter != deleted_files_.end(); ++iter) {
     r.append("\n  DeleteFile: ");
     AppendNumberTo(&r, iter->first);
     r.append(" ");
diff --git a/db/version_edit.h b/db/version_edit.h
index eaef77b..3daf4ef 100644
--- a/db/version_edit.h
+++ b/db/version_edit.h
@@ -8,6 +8,7 @@
 #include <set>
 #include <utility>
 #include <vector>
+
 #include "db/dbformat.h"
 
 namespace leveldb {
@@ -16,19 +17,19 @@ class VersionSet;
 
 struct FileMetaData {
   int refs;
-  int allowed_seeks;          // Seeks allowed until compaction
+  int allowed_seeks;  // Seeks allowed until compaction
   uint64_t number;
-  uint64_t file_size;         // File size in bytes
-  InternalKey smallest;       // Smallest internal key served by table
-  InternalKey largest;        // Largest internal key served by table
+  uint64_t file_size;    // File size in bytes
+  InternalKey smallest;  // Smallest internal key served by table
+  InternalKey largest;   // Largest internal key served by table
 
-  FileMetaData() : refs(0), allowed_seeks(1 << 30), file_size(0) { }
+  FileMetaData() : refs(0), allowed_seeks(1 << 30), file_size(0) {}
 };
 
 class VersionEdit {
  public:
   VersionEdit() { Clear(); }
-  ~VersionEdit() { }
+  ~VersionEdit() {}
 
   void Clear();
 
@@ -59,10 +60,8 @@ class VersionEdit {
   // Add the specified file at the specified number.
   // REQUIRES: This version has not been saved (see VersionSet::SaveTo)
   // REQUIRES: "smallest" and "largest" are smallest and largest keys in file
-  void AddFile(int level, uint64_t file,
-               uint64_t file_size,
-               const InternalKey& smallest,
-               const InternalKey& largest) {
+  void AddFile(int level, uint64_t file, uint64_t file_size,
+               const InternalKey& smallest, const InternalKey& largest) {
     FileMetaData f;
     f.number = file;
     f.file_size = file_size;
@@ -84,7 +83,7 @@ class VersionEdit {
  private:
   friend class VersionSet;
 
-  typedef std::set< std::pair<int, uint64_t> > DeletedFileSet;
+  typedef std::set<std::pair<int, uint64_t> > DeletedFileSet;
 
   std::string comparator_;
   uint64_t log_number_;
@@ -97,9 +96,9 @@ class VersionEdit {
   bool has_next_file_number_;
   bool has_last_sequence_;
 
-  std::vector< std::pair<int, InternalKey> > compact_pointers_;
+  std::vector<std::pair<int, InternalKey> > compact_pointers_;
   DeletedFileSet deleted_files_;
-  std::vector< std::pair<int, FileMetaData> > new_files_;
+  std::vector<std::pair<int, FileMetaData> > new_files_;
 };
 
 }  // namespace leveldb
diff --git a/db/version_edit_test.cc b/db/version_edit_test.cc
index 280310b..0b7cda8 100644
--- a/db/version_edit_test.cc
+++ b/db/version_edit_test.cc
@@ -17,7 +17,7 @@ static void TestEncodeDecode(const VersionEdit& edit) {
   ASSERT_EQ(encoded, encoded2);
 }
 
-class VersionEditTest { };
+class VersionEditTest {};
 
 TEST(VersionEditTest, EncodeDecode) {
   static const uint64_t kBig = 1ull << 50;
@@ -41,6 +41,4 @@ TEST(VersionEditTest, EncodeDecode) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/db/version_set.cc b/db/version_set.cc
index 56493ac..96a92cc 100644
--- a/db/version_set.cc
+++ b/db/version_set.cc
@@ -4,8 +4,10 @@
 
 #include "db/version_set.h"
 
-#include <algorithm>
 #include <stdio.h>
+
+#include <algorithm>
+
 #include "db/filename.h"
 #include "db/log_reader.h"
 #include "db/log_writer.h"
@@ -84,8 +86,7 @@ Version::~Version() {
 }
 
 int FindFile(const InternalKeyComparator& icmp,
-             const std::vector<FileMetaData*>& files,
-             const Slice& key) {
+             const std::vector<FileMetaData*>& files, const Slice& key) {
   uint32_t left = 0;
   uint32_t right = files.size();
   while (left < right) {
@@ -104,26 +105,25 @@ int FindFile(const InternalKeyComparator& icmp,
   return right;
 }
 
-static bool AfterFile(const Comparator* ucmp,
-                      const Slice* user_key, const FileMetaData* f) {
+static bool AfterFile(const Comparator* ucmp, const Slice* user_key,
+                      const FileMetaData* f) {
   // null user_key occurs before all keys and is therefore never after *f
   return (user_key != nullptr &&
           ucmp->Compare(*user_key, f->largest.user_key()) > 0);
 }
 
-static bool BeforeFile(const Comparator* ucmp,
-                       const Slice* user_key, const FileMetaData* f) {
+static bool BeforeFile(const Comparator* ucmp, const Slice* user_key,
+                       const FileMetaData* f) {
   // null user_key occurs after all keys and is therefore never before *f
   return (user_key != nullptr &&
           ucmp->Compare(*user_key, f->smallest.user_key()) < 0);
 }
 
-bool SomeFileOverlapsRange(
-    const InternalKeyComparator& icmp,
-    bool disjoint_sorted_files,
-    const std::vector<FileMetaData*>& files,
-    const Slice* smallest_user_key,
-    const Slice* largest_user_key) {
+bool SomeFileOverlapsRange(const InternalKeyComparator& icmp,
+                           bool disjoint_sorted_files,
+                           const std::vector<FileMetaData*>& files,
+                           const Slice* smallest_user_key,
+                           const Slice* largest_user_key) {
   const Comparator* ucmp = icmp.user_comparator();
   if (!disjoint_sorted_files) {
     // Need to check against all files
@@ -143,7 +143,8 @@ bool SomeFileOverlapsRange(
   uint32_t index = 0;
   if (smallest_user_key != nullptr) {
     // Find the earliest possible internal key for smallest_user_key
-    InternalKey small_key(*smallest_user_key, kMaxSequenceNumber,kValueTypeForSeek);
+    InternalKey small_key(*smallest_user_key, kMaxSequenceNumber,
+                          kValueTypeForSeek);
     index = FindFile(icmp, files, small_key.Encode());
   }
 
@@ -164,13 +165,9 @@ class Version::LevelFileNumIterator : public Iterator {
  public:
   LevelFileNumIterator(const InternalKeyComparator& icmp,
                        const std::vector<FileMetaData*>* flist)
-      : icmp_(icmp),
-        flist_(flist),
-        index_(flist->size()) {        // Marks as invalid
-  }
-  virtual bool Valid() const {
-    return index_ < flist_->size();
+      : icmp_(icmp), flist_(flist), index_(flist->size()) {  // Marks as invalid
   }
+  virtual bool Valid() const { return index_ < flist_->size(); }
   virtual void Seek(const Slice& target) {
     index_ = FindFile(icmp_, *flist_, target);
   }
@@ -197,10 +194,11 @@ class Version::LevelFileNumIterator : public Iterator {
   Slice value() const {
     assert(Valid());
     EncodeFixed64(value_buf_, (*flist_)[index_]->number);
-    EncodeFixed64(value_buf_+8, (*flist_)[index_]->file_size);
+    EncodeFixed64(value_buf_ + 8, (*flist_)[index_]->file_size);
     return Slice(value_buf_, sizeof(value_buf_));
   }
   virtual Status status() const { return Status::OK(); }
+
  private:
   const InternalKeyComparator icmp_;
   const std::vector<FileMetaData*>* const flist_;
@@ -210,16 +208,14 @@ class Version::LevelFileNumIterator : public Iterator {
   mutable char value_buf_[16];
 };
 
-static Iterator* GetFileIterator(void* arg,
-                                 const ReadOptions& options,
+static Iterator* GetFileIterator(void* arg, const ReadOptions& options,
                                  const Slice& file_value) {
   TableCache* cache = reinterpret_cast<TableCache*>(arg);
   if (file_value.size() != 16) {
     return NewErrorIterator(
         Status::Corruption("FileReader invoked with unexpected value"));
   } else {
-    return cache->NewIterator(options,
-                              DecodeFixed64(file_value.data()),
+    return cache->NewIterator(options, DecodeFixed64(file_value.data()),
                               DecodeFixed64(file_value.data() + 8));
   }
 }
@@ -227,17 +223,16 @@ static Iterator* GetFileIterator(void* arg,
 Iterator* Version::NewConcatenatingIterator(const ReadOptions& options,
                                             int level) const {
   return NewTwoLevelIterator(
-      new LevelFileNumIterator(vset_->icmp_, &files_[level]),
-      &GetFileIterator, vset_->table_cache_, options);
+      new LevelFileNumIterator(vset_->icmp_, &files_[level]), &GetFileIterator,
+      vset_->table_cache_, options);
 }
 
 void Version::AddIterators(const ReadOptions& options,
                            std::vector<Iterator*>* iters) {
   // Merge all level zero files together since they may overlap
   for (size_t i = 0; i < files_[0].size(); i++) {
-    iters->push_back(
-        vset_->table_cache_->NewIterator(
-            options, files_[0][i]->number, files_[0][i]->file_size));
+    iters->push_back(vset_->table_cache_->NewIterator(
+        options, files_[0][i]->number, files_[0][i]->file_size));
   }
 
   // For levels > 0, we can use a concatenating iterator that sequentially
@@ -264,7 +259,7 @@ struct Saver {
   Slice user_key;
   std::string* value;
 };
-}
+}  // namespace
 static void SaveValue(void* arg, const Slice& ikey, const Slice& v) {
   Saver* s = reinterpret_cast<Saver*>(arg);
   ParsedInternalKey parsed_key;
@@ -284,8 +279,7 @@ static bool NewestFirst(FileMetaData* a, FileMetaData* b) {
   return a->number > b->number;
 }
 
-void Version::ForEachOverlapping(Slice user_key, Slice internal_key,
-                                 void* arg,
+void Version::ForEachOverlapping(Slice user_key, Slice internal_key, void* arg,
                                  bool (*func)(void*, int, FileMetaData*)) {
   // TODO(sanjay): Change Version::Get() to use this function.
   const Comparator* ucmp = vset_->icmp_.user_comparator();
@@ -329,10 +323,8 @@ void Version::ForEachOverlapping(Slice user_key, Slice internal_key,
   }
 }
 
-Status Version::Get(const ReadOptions& options,
-                    const LookupKey& k,
-                    std::string* value,
-                    GetStats* stats) {
+Status Version::Get(const ReadOptions& options, const LookupKey& k,
+                    std::string* value, GetStats* stats) {
   Slice ikey = k.internal_key();
   Slice user_key = k.user_key();
   const Comparator* ucmp = vset_->icmp_.user_comparator();
@@ -405,14 +397,14 @@ Status Version::Get(const ReadOptions& options,
       saver.ucmp = ucmp;
       saver.user_key = user_key;
       saver.value = value;
-      s = vset_->table_cache_->Get(options, f->number, f->file_size,
-                                   ikey, &saver, SaveValue);
+      s = vset_->table_cache_->Get(options, f->number, f->file_size, ikey,
+                                   &saver, SaveValue);
       if (!s.ok()) {
         return s;
       }
       switch (saver.state) {
         case kNotFound:
-          break;      // Keep searching in other files
+          break;  // Keep searching in other files
         case kFound:
           return s;
         case kDeleted:
@@ -479,9 +471,7 @@ bool Version::RecordReadSample(Slice internal_key) {
   return false;
 }
 
-void Version::Ref() {
-  ++refs_;
-}
+void Version::Ref() { ++refs_; }
 
 void Version::Unref() {
   assert(this != &vset_->dummy_versions_);
@@ -492,16 +482,14 @@ void Version::Unref() {
   }
 }
 
-bool Version::OverlapInLevel(int level,
-                             const Slice* smallest_user_key,
+bool Version::OverlapInLevel(int level, const Slice* smallest_user_key,
                              const Slice* largest_user_key) {
   return SomeFileOverlapsRange(vset_->icmp_, (level > 0), files_[level],
                                smallest_user_key, largest_user_key);
 }
 
-int Version::PickLevelForMemTableOutput(
-    const Slice& smallest_user_key,
-    const Slice& largest_user_key) {
+int Version::PickLevelForMemTableOutput(const Slice& smallest_user_key,
+                                        const Slice& largest_user_key) {
   int level = 0;
   if (!OverlapInLevel(0, &smallest_user_key, &largest_user_key)) {
     // Push to next level if there is no overlap in next level,
@@ -528,11 +516,9 @@ int Version::PickLevelForMemTableOutput(
 }
 
 // Store in "*inputs" all files in "level" that overlap [begin,end]
-void Version::GetOverlappingInputs(
-    int level,
-    const InternalKey* begin,
-    const InternalKey* end,
-    std::vector<FileMetaData*>* inputs) {
+void Version::GetOverlappingInputs(int level, const InternalKey* begin,
+                                   const InternalKey* end,
+                                   std::vector<FileMetaData*>* inputs) {
   assert(level >= 0);
   assert(level < config::kNumLevels);
   inputs->clear();
@@ -544,7 +530,7 @@ void Version::GetOverlappingInputs(
     user_end = end->user_key();
   }
   const Comparator* user_cmp = vset_->icmp_.user_comparator();
-  for (size_t i = 0; i < files_[level].size(); ) {
+  for (size_t i = 0; i < files_[level].size();) {
     FileMetaData* f = files_[level][i++];
     const Slice file_start = f->smallest.user_key();
     const Slice file_limit = f->largest.user_key();
@@ -561,8 +547,8 @@ void Version::GetOverlappingInputs(
           user_begin = file_start;
           inputs->clear();
           i = 0;
-        } else if (end != nullptr && user_cmp->Compare(file_limit,
-                                                       user_end) > 0) {
+        } else if (end != nullptr &&
+                   user_cmp->Compare(file_limit, user_end) > 0) {
           user_end = file_limit;
           inputs->clear();
           i = 0;
@@ -630,9 +616,7 @@ class VersionSet::Builder {
 
  public:
   // Initialize a builder with the files from *base and other info from *vset
-  Builder(VersionSet* vset, Version* base)
-      : vset_(vset),
-        base_(base) {
+  Builder(VersionSet* vset, Version* base) : vset_(vset), base_(base) {
     base_->Ref();
     BySmallestKey cmp;
     cmp.internal_comparator = &vset_->icmp_;
@@ -646,8 +630,8 @@ class VersionSet::Builder {
       const FileSet* added = levels_[level].added_files;
       std::vector<FileMetaData*> to_unref;
       to_unref.reserve(added->size());
-      for (FileSet::const_iterator it = added->begin();
-          it != added->end(); ++it) {
+      for (FileSet::const_iterator it = added->begin(); it != added->end();
+           ++it) {
         to_unref.push_back(*it);
       }
       delete added;
@@ -674,8 +658,7 @@ class VersionSet::Builder {
     // Delete files
     const VersionEdit::DeletedFileSet& del = edit->deleted_files_;
     for (VersionEdit::DeletedFileSet::const_iterator iter = del.begin();
-         iter != del.end();
-         ++iter) {
+         iter != del.end(); ++iter) {
       const int level = iter->first;
       const uint64_t number = iter->second;
       levels_[level].deleted_files.insert(number);
@@ -721,13 +704,11 @@ class VersionSet::Builder {
       const FileSet* added = levels_[level].added_files;
       v->files_[level].reserve(base_files.size() + added->size());
       for (FileSet::const_iterator added_iter = added->begin();
-           added_iter != added->end();
-           ++added_iter) {
+           added_iter != added->end(); ++added_iter) {
         // Add all smaller files listed in base_
-        for (std::vector<FileMetaData*>::const_iterator bpos
-                 = std::upper_bound(base_iter, base_end, *added_iter, cmp);
-             base_iter != bpos;
-             ++base_iter) {
+        for (std::vector<FileMetaData*>::const_iterator bpos =
+                 std::upper_bound(base_iter, base_end, *added_iter, cmp);
+             base_iter != bpos; ++base_iter) {
           MaybeAddFile(v, level, *base_iter);
         }
 
@@ -743,7 +724,7 @@ class VersionSet::Builder {
       // Make sure there is no overlap in levels > 0
       if (level > 0) {
         for (uint32_t i = 1; i < v->files_[level].size(); i++) {
-          const InternalKey& prev_end = v->files_[level][i-1]->largest;
+          const InternalKey& prev_end = v->files_[level][i - 1]->largest;
           const InternalKey& this_begin = v->files_[level][i]->smallest;
           if (vset_->icmp_.Compare(prev_end, this_begin) >= 0) {
             fprintf(stderr, "overlapping ranges in same level %s vs. %s\n",
@@ -764,7 +745,7 @@ class VersionSet::Builder {
       std::vector<FileMetaData*>* files = &v->files_[level];
       if (level > 0 && !files->empty()) {
         // Must not overlap
-        assert(vset_->icmp_.Compare((*files)[files->size()-1]->largest,
+        assert(vset_->icmp_.Compare((*files)[files->size() - 1]->largest,
                                     f->smallest) < 0);
       }
       f->refs++;
@@ -773,8 +754,7 @@ class VersionSet::Builder {
   }
 };
 
-VersionSet::VersionSet(const std::string& dbname,
-                       const Options* options,
+VersionSet::VersionSet(const std::string& dbname, const Options* options,
                        TableCache* table_cache,
                        const InternalKeyComparator* cmp)
     : env_(options->env),
@@ -903,7 +883,7 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu) {
   return s;
 }
 
-Status VersionSet::Recover(bool *save_manifest) {
+Status VersionSet::Recover(bool* save_manifest) {
   struct LogReporter : public log::Reader::Reporter {
     Status* status;
     virtual void Corruption(size_t bytes, const Status& s) {
@@ -917,7 +897,7 @@ Status VersionSet::Recover(bool *save_manifest) {
   if (!s.ok()) {
     return s;
   }
-  if (current.empty() || current[current.size()-1] != '\n') {
+  if (current.empty() || current[current.size() - 1] != '\n') {
     return Status::Corruption("CURRENT file does not end with newline");
   }
   current.resize(current.size() - 1);
@@ -927,8 +907,8 @@ Status VersionSet::Recover(bool *save_manifest) {
   s = env_->NewSequentialFile(dscname, &file);
   if (!s.ok()) {
     if (s.IsNotFound()) {
-      return Status::Corruption(
-            "CURRENT points to a non-existent file", s.ToString());
+      return Status::Corruption("CURRENT points to a non-existent file",
+                                s.ToString());
     }
     return s;
   }
@@ -946,7 +926,8 @@ Status VersionSet::Recover(bool *save_manifest) {
   {
     LogReporter reporter;
     reporter.status = &s;
-    log::Reader reader(file, &reporter, true/*checksum*/, 0/*initial_offset*/);
+    log::Reader reader(file, &reporter, true /*checksum*/,
+                       0 /*initial_offset*/);
     Slice record;
     std::string scratch;
     while (reader.ReadRecord(&record, &scratch) && s.ok()) {
@@ -1071,7 +1052,7 @@ void VersionSet::Finalize(Version* v) {
   int best_level = -1;
   double best_score = -1;
 
-  for (int level = 0; level < config::kNumLevels-1; level++) {
+  for (int level = 0; level < config::kNumLevels - 1; level++) {
     double score;
     if (level == 0) {
       // We treat level-0 specially by bounding the number of files
@@ -1086,7 +1067,7 @@ void VersionSet::Finalize(Version* v) {
       // setting, or very high compression ratios, or lots of
       // overwrites/deletions).
       score = v->files_[level].size() /
-          static_cast<double>(config::kL0_CompactionTrigger);
+              static_cast<double>(config::kL0_CompactionTrigger);
     } else {
       // Compute the ratio of current size to size limit.
       const uint64_t level_bytes = TotalFileSize(v->files_[level]);
@@ -1142,16 +1123,12 @@ int VersionSet::NumLevelFiles(int level) const {
 
 const char* VersionSet::LevelSummary(LevelSummaryStorage* scratch) const {
   // Update code if kNumLevels changes
-  assert(config::kNumLevels == 7);
+  static_assert(config::kNumLevels == 7, "");
   snprintf(scratch->buffer, sizeof(scratch->buffer),
-           "files[ %d %d %d %d %d %d %d ]",
-           int(current_->files_[0].size()),
-           int(current_->files_[1].size()),
-           int(current_->files_[2].size()),
-           int(current_->files_[3].size()),
-           int(current_->files_[4].size()),
-           int(current_->files_[5].size()),
-           int(current_->files_[6].size()));
+           "files[ %d %d %d %d %d %d %d ]", int(current_->files_[0].size()),
+           int(current_->files_[1].size()), int(current_->files_[2].size()),
+           int(current_->files_[3].size()), int(current_->files_[4].size()),
+           int(current_->files_[5].size()), int(current_->files_[6].size()));
   return scratch->buffer;
 }
 
@@ -1188,8 +1165,7 @@ uint64_t VersionSet::ApproximateOffsetOf(Version* v, const InternalKey& ikey) {
 }
 
 void VersionSet::AddLiveFiles(std::set<uint64_t>* live) {
-  for (Version* v = dummy_versions_.next_;
-       v != &dummy_versions_;
+  for (Version* v = dummy_versions_.next_; v != &dummy_versions_;
        v = v->next_) {
     for (int level = 0; level < config::kNumLevels; level++) {
       const std::vector<FileMetaData*>& files = v->files_[level];
@@ -1212,7 +1188,7 @@ int64_t VersionSet::MaxNextLevelOverlappingBytes() {
   for (int level = 1; level < config::kNumLevels - 1; level++) {
     for (size_t i = 0; i < current_->files_[level].size(); i++) {
       const FileMetaData* f = current_->files_[level][i];
-      current_->GetOverlappingInputs(level+1, &f->smallest, &f->largest,
+      current_->GetOverlappingInputs(level + 1, &f->smallest, &f->largest,
                                      &overlaps);
       const int64_t sum = TotalFileSize(overlaps);
       if (sum > result) {
@@ -1227,8 +1203,7 @@ int64_t VersionSet::MaxNextLevelOverlappingBytes() {
 // *smallest, *largest.
 // REQUIRES: inputs is not empty
 void VersionSet::GetRange(const std::vector<FileMetaData*>& inputs,
-                          InternalKey* smallest,
-                          InternalKey* largest) {
+                          InternalKey* smallest, InternalKey* largest) {
   assert(!inputs.empty());
   smallest->Clear();
   largest->Clear();
@@ -1253,8 +1228,7 @@ void VersionSet::GetRange(const std::vector<FileMetaData*>& inputs,
 // REQUIRES: inputs is not empty
 void VersionSet::GetRange2(const std::vector<FileMetaData*>& inputs1,
                            const std::vector<FileMetaData*>& inputs2,
-                           InternalKey* smallest,
-                           InternalKey* largest) {
+                           InternalKey* smallest, InternalKey* largest) {
   std::vector<FileMetaData*> all = inputs1;
   all.insert(all.end(), inputs2.begin(), inputs2.end());
   GetRange(all, smallest, largest);
@@ -1276,8 +1250,8 @@ Iterator* VersionSet::MakeInputIterator(Compaction* c) {
       if (c->level() + which == 0) {
         const std::vector<FileMetaData*>& files = c->inputs_[which];
         for (size_t i = 0; i < files.size(); i++) {
-          list[num++] = table_cache_->NewIterator(
-              options, files[i]->number, files[i]->file_size);
+          list[num++] = table_cache_->NewIterator(options, files[i]->number,
+                                                  files[i]->file_size);
         }
       } else {
         // Create concatenating iterator for the files from this level
@@ -1304,7 +1278,7 @@ Compaction* VersionSet::PickCompaction() {
   if (size_compaction) {
     level = current_->compaction_level_;
     assert(level >= 0);
-    assert(level+1 < config::kNumLevels);
+    assert(level + 1 < config::kNumLevels);
     c = new Compaction(options_, level);
 
     // Pick the first file that comes after compact_pointer_[level]
@@ -1433,7 +1407,8 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
   AddBoundaryInputs(icmp_, current_->files_[level], &c->inputs_[0]);
   GetRange(c->inputs_[0], &smallest, &largest);
 
-  current_->GetOverlappingInputs(level+1, &smallest, &largest, &c->inputs_[1]);
+  current_->GetOverlappingInputs(level + 1, &smallest, &largest,
+                                 &c->inputs_[1]);
 
   // Get entire range covered by compaction
   InternalKey all_start, all_limit;
@@ -1454,18 +1429,14 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
       InternalKey new_start, new_limit;
       GetRange(expanded0, &new_start, &new_limit);
       std::vector<FileMetaData*> expanded1;
-      current_->GetOverlappingInputs(level+1, &new_start, &new_limit,
+      current_->GetOverlappingInputs(level + 1, &new_start, &new_limit,
                                      &expanded1);
       if (expanded1.size() == c->inputs_[1].size()) {
         Log(options_->info_log,
             "Expanding@%d %d+%d (%ld+%ld bytes) to %d+%d (%ld+%ld bytes)\n",
-            level,
-            int(c->inputs_[0].size()),
-            int(c->inputs_[1].size()),
-            long(inputs0_size), long(inputs1_size),
-            int(expanded0.size()),
-            int(expanded1.size()),
-            long(expanded0_size), long(inputs1_size));
+            level, int(c->inputs_[0].size()), int(c->inputs_[1].size()),
+            long(inputs0_size), long(inputs1_size), int(expanded0.size()),
+            int(expanded1.size()), long(expanded0_size), long(inputs1_size));
         smallest = new_start;
         largest = new_limit;
         c->inputs_[0] = expanded0;
@@ -1490,10 +1461,8 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
   c->edit_.SetCompactPointer(level, largest);
 }
 
-Compaction* VersionSet::CompactRange(
-    int level,
-    const InternalKey* begin,
-    const InternalKey* end) {
+Compaction* VersionSet::CompactRange(int level, const InternalKey* begin,
+                                     const InternalKey* end) {
   std::vector<FileMetaData*> inputs;
   current_->GetOverlappingInputs(level, begin, end, &inputs);
   if (inputs.empty()) {
@@ -1566,7 +1535,7 @@ bool Compaction::IsBaseLevelForKey(const Slice& user_key) {
   const Comparator* user_cmp = input_version_->vset_->icmp_.user_comparator();
   for (int lvl = level_ + 2; lvl < config::kNumLevels; lvl++) {
     const std::vector<FileMetaData*>& files = input_version_->files_[lvl];
-    for (; level_ptrs_[lvl] < files.size(); ) {
+    for (; level_ptrs_[lvl] < files.size();) {
       FileMetaData* f = files[level_ptrs_[lvl]];
       if (user_cmp->Compare(user_key, f->largest.user_key()) <= 0) {
         // We've advanced far enough
@@ -1587,8 +1556,9 @@ bool Compaction::ShouldStopBefore(const Slice& internal_key) {
   // Scan to find earliest grandparent file that contains key.
   const InternalKeyComparator* icmp = &vset->icmp_;
   while (grandparent_index_ < grandparents_.size() &&
-      icmp->Compare(internal_key,
-                    grandparents_[grandparent_index_]->largest.Encode()) > 0) {
+         icmp->Compare(internal_key,
+                       grandparents_[grandparent_index_]->largest.Encode()) >
+             0) {
     if (seen_key_) {
       overlapped_bytes_ += grandparents_[grandparent_index_]->file_size;
     }
diff --git a/db/version_set.h b/db/version_set.h
index 0beae4d..334ebd9 100644
--- a/db/version_set.h
+++ b/db/version_set.h
@@ -18,6 +18,7 @@
 #include <map>
 #include <set>
 #include <vector>
+
 #include "db/dbformat.h"
 #include "db/version_edit.h"
 #include "port/port.h"
@@ -25,7 +26,9 @@
 
 namespace leveldb {
 
-namespace log { class Writer; }
+namespace log {
+class Writer;
+}
 
 class Compaction;
 class Iterator;
@@ -40,8 +43,7 @@ class WritableFile;
 // Return files.size() if there is no such file.
 // REQUIRES: "files" contains a sorted list of non-overlapping files.
 int FindFile(const InternalKeyComparator& icmp,
-             const std::vector<FileMetaData*>& files,
-             const Slice& key);
+             const std::vector<FileMetaData*>& files, const Slice& key);
 
 // Returns true iff some file in "files" overlaps the user key range
 // [*smallest,*largest].
@@ -90,16 +92,15 @@ class Version {
 
   void GetOverlappingInputs(
       int level,
-      const InternalKey* begin,         // nullptr means before all keys
-      const InternalKey* end,           // nullptr means after all keys
+      const InternalKey* begin,  // nullptr means before all keys
+      const InternalKey* end,    // nullptr means after all keys
       std::vector<FileMetaData*>* inputs);
 
   // Returns true iff some file in the specified level overlaps
   // some part of [*smallest_user_key,*largest_user_key].
   // smallest_user_key==nullptr represents a key smaller than all the DB's keys.
   // largest_user_key==nullptr represents a key largest than all the DB's keys.
-  bool OverlapInLevel(int level,
-                      const Slice* smallest_user_key,
+  bool OverlapInLevel(int level, const Slice* smallest_user_key,
                       const Slice* largest_user_key);
 
   // Return the level at which we should place a new memtable compaction
@@ -124,14 +125,13 @@ class Version {
   // false, makes no more calls.
   //
   // REQUIRES: user portion of internal_key == user_key.
-  void ForEachOverlapping(Slice user_key, Slice internal_key,
-                          void* arg,
+  void ForEachOverlapping(Slice user_key, Slice internal_key, void* arg,
                           bool (*func)(void*, int, FileMetaData*));
 
-  VersionSet* vset_;            // VersionSet to which this Version belongs
-  Version* next_;               // Next version in linked list
-  Version* prev_;               // Previous version in linked list
-  int refs_;                    // Number of live refs to this version
+  VersionSet* vset_;  // VersionSet to which this Version belongs
+  Version* next_;     // Next version in linked list
+  Version* prev_;     // Previous version in linked list
+  int refs_;          // Number of live refs to this version
 
   // List of files per level
   std::vector<FileMetaData*> files_[config::kNumLevels];
@@ -147,12 +147,14 @@ class Version {
   int compaction_level_;
 
   explicit Version(VersionSet* vset)
-      : vset_(vset), next_(this), prev_(this), refs_(0),
+      : vset_(vset),
+        next_(this),
+        prev_(this),
+        refs_(0),
         file_to_compact_(nullptr),
         file_to_compact_level_(-1),
         compaction_score_(-1),
-        compaction_level_(-1) {
-  }
+        compaction_level_(-1) {}
 
   ~Version();
 
@@ -163,10 +165,8 @@ class Version {
 
 class VersionSet {
  public:
-  VersionSet(const std::string& dbname,
-             const Options* options,
-             TableCache* table_cache,
-             const InternalKeyComparator*);
+  VersionSet(const std::string& dbname, const Options* options,
+             TableCache* table_cache, const InternalKeyComparator*);
   ~VersionSet();
 
   // Apply *edit to the current version to form a new descriptor that
@@ -178,7 +178,7 @@ class VersionSet {
       EXCLUSIVE_LOCKS_REQUIRED(mu);
 
   // Recover the last saved descriptor from persistent storage.
-  Status Recover(bool *save_manifest);
+  Status Recover(bool* save_manifest);
 
   // Return the current version.
   Version* current() const { return current_; }
@@ -233,10 +233,8 @@ class VersionSet {
   // the specified level.  Returns nullptr if there is nothing in that
   // level that overlaps the specified range.  Caller should delete
   // the result.
-  Compaction* CompactRange(
-      int level,
-      const InternalKey* begin,
-      const InternalKey* end);
+  Compaction* CompactRange(int level, const InternalKey* begin,
+                           const InternalKey* end);
 
   // Return the maximum overlapping data (in bytes) at next level for any
   // file at a level >= 1.
@@ -277,14 +275,12 @@ class VersionSet {
 
   void Finalize(Version* v);
 
-  void GetRange(const std::vector<FileMetaData*>& inputs,
-                InternalKey* smallest,
+  void GetRange(const std::vector<FileMetaData*>& inputs, InternalKey* smallest,
                 InternalKey* largest);
 
   void GetRange2(const std::vector<FileMetaData*>& inputs1,
                  const std::vector<FileMetaData*>& inputs2,
-                 InternalKey* smallest,
-                 InternalKey* largest);
+                 InternalKey* smallest, InternalKey* largest);
 
   void SetupOtherInputs(Compaction* c);
 
@@ -373,7 +369,7 @@ class Compaction {
   VersionEdit edit_;
 
   // Each compaction reads inputs from "level_" and "level_+1"
-  std::vector<FileMetaData*> inputs_[2];      // The two sets of inputs
+  std::vector<FileMetaData*> inputs_[2];  // The two sets of inputs
 
   // State used to check for number of overlapping grandparent files
   // (parent == level_ + 1, grandparent == level_ + 2)
diff --git a/db/version_set_test.cc b/db/version_set_test.cc
index b32e2e5..43b51d8 100644
--- a/db/version_set_test.cc
+++ b/db/version_set_test.cc
@@ -14,7 +14,7 @@ class FindFileTest {
   std::vector<FileMetaData*> files_;
   bool disjoint_sorted_files_;
 
-  FindFileTest() : disjoint_sorted_files_(true) { }
+  FindFileTest() : disjoint_sorted_files_(true) {}
 
   ~FindFileTest() {
     for (int i = 0; i < files_.size(); i++) {
@@ -50,10 +50,10 @@ class FindFileTest {
 
 TEST(FindFileTest, Empty) {
   ASSERT_EQ(0, Find("foo"));
-  ASSERT_TRUE(! Overlaps("a", "z"));
-  ASSERT_TRUE(! Overlaps(nullptr, "z"));
-  ASSERT_TRUE(! Overlaps("a", nullptr));
-  ASSERT_TRUE(! Overlaps(nullptr, nullptr));
+  ASSERT_TRUE(!Overlaps("a", "z"));
+  ASSERT_TRUE(!Overlaps(nullptr, "z"));
+  ASSERT_TRUE(!Overlaps("a", nullptr));
+  ASSERT_TRUE(!Overlaps(nullptr, nullptr));
 }
 
 TEST(FindFileTest, Single) {
@@ -65,8 +65,8 @@ TEST(FindFileTest, Single) {
   ASSERT_EQ(1, Find("q1"));
   ASSERT_EQ(1, Find("z"));
 
-  ASSERT_TRUE(! Overlaps("a", "b"));
-  ASSERT_TRUE(! Overlaps("z1", "z2"));
+  ASSERT_TRUE(!Overlaps("a", "b"));
+  ASSERT_TRUE(!Overlaps("z1", "z2"));
   ASSERT_TRUE(Overlaps("a", "p"));
   ASSERT_TRUE(Overlaps("a", "q"));
   ASSERT_TRUE(Overlaps("a", "z"));
@@ -78,15 +78,14 @@ TEST(FindFileTest, Single) {
   ASSERT_TRUE(Overlaps("q", "q"));
   ASSERT_TRUE(Overlaps("q", "q1"));
 
-  ASSERT_TRUE(! Overlaps(nullptr, "j"));
-  ASSERT_TRUE(! Overlaps("r", nullptr));
+  ASSERT_TRUE(!Overlaps(nullptr, "j"));
+  ASSERT_TRUE(!Overlaps("r", nullptr));
   ASSERT_TRUE(Overlaps(nullptr, "p"));
   ASSERT_TRUE(Overlaps(nullptr, "p1"));
   ASSERT_TRUE(Overlaps("q", nullptr));
   ASSERT_TRUE(Overlaps(nullptr, nullptr));
 }
 
-
 TEST(FindFileTest, Multiple) {
   Add("150", "200");
   Add("200", "250");
@@ -110,10 +109,10 @@ TEST(FindFileTest, Multiple) {
   ASSERT_EQ(3, Find("450"));
   ASSERT_EQ(4, Find("451"));
 
-  ASSERT_TRUE(! Overlaps("100", "149"));
-  ASSERT_TRUE(! Overlaps("251", "299"));
-  ASSERT_TRUE(! Overlaps("451", "500"));
-  ASSERT_TRUE(! Overlaps("351", "399"));
+  ASSERT_TRUE(!Overlaps("100", "149"));
+  ASSERT_TRUE(!Overlaps("251", "299"));
+  ASSERT_TRUE(!Overlaps("451", "500"));
+  ASSERT_TRUE(!Overlaps("351", "399"));
 
   ASSERT_TRUE(Overlaps("100", "150"));
   ASSERT_TRUE(Overlaps("100", "200"));
@@ -130,8 +129,8 @@ TEST(FindFileTest, MultipleNullBoundaries) {
   Add("200", "250");
   Add("300", "350");
   Add("400", "450");
-  ASSERT_TRUE(! Overlaps(nullptr, "149"));
-  ASSERT_TRUE(! Overlaps("451", nullptr));
+  ASSERT_TRUE(!Overlaps(nullptr, "149"));
+  ASSERT_TRUE(!Overlaps("451", nullptr));
   ASSERT_TRUE(Overlaps(nullptr, nullptr));
   ASSERT_TRUE(Overlaps(nullptr, "150"));
   ASSERT_TRUE(Overlaps(nullptr, "199"));
@@ -147,8 +146,8 @@ TEST(FindFileTest, MultipleNullBoundaries) {
 
 TEST(FindFileTest, OverlapSequenceChecks) {
   Add("200", "200", 5000, 3000);
-  ASSERT_TRUE(! Overlaps("199", "199"));
-  ASSERT_TRUE(! Overlaps("201", "300"));
+  ASSERT_TRUE(!Overlaps("199", "199"));
+  ASSERT_TRUE(!Overlaps("201", "300"));
   ASSERT_TRUE(Overlaps("200", "200"));
   ASSERT_TRUE(Overlaps("190", "200"));
   ASSERT_TRUE(Overlaps("200", "210"));
@@ -158,8 +157,8 @@ TEST(FindFileTest, OverlappingFiles) {
   Add("150", "600");
   Add("400", "500");
   disjoint_sorted_files_ = false;
-  ASSERT_TRUE(! Overlaps("100", "149"));
-  ASSERT_TRUE(! Overlaps("601", "700"));
+  ASSERT_TRUE(!Overlaps("100", "149"));
+  ASSERT_TRUE(!Overlaps("601", "700"));
   ASSERT_TRUE(Overlaps("100", "150"));
   ASSERT_TRUE(Overlaps("100", "200"));
   ASSERT_TRUE(Overlaps("100", "300"));
diff --git a/db/write_batch.cc b/db/write_batch.cc
index 23eb00f..2dec642 100644
--- a/db/write_batch.cc
+++ b/db/write_batch.cc
@@ -15,10 +15,10 @@
 
 #include "leveldb/write_batch.h"
 
-#include "leveldb/db.h"
 #include "db/dbformat.h"
 #include "db/memtable.h"
 #include "db/write_batch_internal.h"
+#include "leveldb/db.h"
 #include "util/coding.h"
 
 namespace leveldb {
@@ -26,22 +26,18 @@ namespace leveldb {
 // WriteBatch header has an 8-byte sequence number followed by a 4-byte count.
 static const size_t kHeader = 12;
 
-WriteBatch::WriteBatch() {
-  Clear();
-}
+WriteBatch::WriteBatch() { Clear(); }
 
-WriteBatch::~WriteBatch() { }
+WriteBatch::~WriteBatch() {}
 
-WriteBatch::Handler::~Handler() { }
+WriteBatch::Handler::~Handler() {}
 
 void WriteBatch::Clear() {
   rep_.clear();
   rep_.resize(kHeader);
 }
 
-size_t WriteBatch::ApproximateSize() const {
-  return rep_.size();
-}
+size_t WriteBatch::ApproximateSize() const { return rep_.size(); }
 
 Status WriteBatch::Iterate(Handler* handler) const {
   Slice input(rep_);
@@ -112,7 +108,7 @@ void WriteBatch::Delete(const Slice& key) {
   PutLengthPrefixedSlice(&rep_, key);
 }
 
-void WriteBatch::Append(const WriteBatch &source) {
+void WriteBatch::Append(const WriteBatch& source) {
   WriteBatchInternal::Append(this, &source);
 }
 
@@ -133,8 +129,7 @@ class MemTableInserter : public WriteBatch::Handler {
 };
 }  // namespace
 
-Status WriteBatchInternal::InsertInto(const WriteBatch* b,
-                                      MemTable* memtable) {
+Status WriteBatchInternal::InsertInto(const WriteBatch* b, MemTable* memtable) {
   MemTableInserter inserter;
   inserter.sequence_ = WriteBatchInternal::Sequence(b);
   inserter.mem_ = memtable;
diff --git a/db/write_batch_internal.h b/db/write_batch_internal.h
index 9448ef7..fce86e3 100644
--- a/db/write_batch_internal.h
+++ b/db/write_batch_internal.h
@@ -29,13 +29,9 @@ class WriteBatchInternal {
   // this batch.
   static void SetSequence(WriteBatch* batch, SequenceNumber seq);
 
-  static Slice Contents(const WriteBatch* batch) {
-    return Slice(batch->rep_);
-  }
+  static Slice Contents(const WriteBatch* batch) { return Slice(batch->rep_); }
 
-  static size_t ByteSize(const WriteBatch* batch) {
-    return batch->rep_.size();
-  }
+  static size_t ByteSize(const WriteBatch* batch) { return batch->rep_.size(); }
 
   static void SetContents(WriteBatch* batch, const Slice& contents);
 
@@ -46,5 +42,4 @@ class WriteBatchInternal {
 
 }  // namespace leveldb
 
-
 #endif  // STORAGE_LEVELDB_DB_WRITE_BATCH_INTERNAL_H_
diff --git a/db/write_batch_test.cc b/db/write_batch_test.cc
index 49c178d..c32317f 100644
--- a/db/write_batch_test.cc
+++ b/db/write_batch_test.cc
@@ -52,7 +52,7 @@ static std::string PrintContents(WriteBatch* b) {
   return state;
 }
 
-class WriteBatchTest { };
+class WriteBatchTest {};
 
 TEST(WriteBatchTest, Empty) {
   WriteBatch batch;
@@ -68,10 +68,11 @@ TEST(WriteBatchTest, Multiple) {
   WriteBatchInternal::SetSequence(&batch, 100);
   ASSERT_EQ(100, WriteBatchInternal::Sequence(&batch));
   ASSERT_EQ(3, WriteBatchInternal::Count(&batch));
-  ASSERT_EQ("Put(baz, boo)@102"
-            "Delete(box)@101"
-            "Put(foo, bar)@100",
-            PrintContents(&batch));
+  ASSERT_EQ(
+      "Put(baz, boo)@102"
+      "Delete(box)@101"
+      "Put(foo, bar)@100",
+      PrintContents(&batch));
 }
 
 TEST(WriteBatchTest, Corruption) {
@@ -81,10 +82,11 @@ TEST(WriteBatchTest, Corruption) {
   WriteBatchInternal::SetSequence(&batch, 200);
   Slice contents = WriteBatchInternal::Contents(&batch);
   WriteBatchInternal::SetContents(&batch,
-                                  Slice(contents.data(),contents.size()-1));
-  ASSERT_EQ("Put(foo, bar)@200"
-            "ParseError()",
-            PrintContents(&batch));
+                                  Slice(contents.data(), contents.size() - 1));
+  ASSERT_EQ(
+      "Put(foo, bar)@200"
+      "ParseError()",
+      PrintContents(&batch));
 }
 
 TEST(WriteBatchTest, Append) {
@@ -92,25 +94,25 @@ TEST(WriteBatchTest, Append) {
   WriteBatchInternal::SetSequence(&b1, 200);
   WriteBatchInternal::SetSequence(&b2, 300);
   b1.Append(b2);
-  ASSERT_EQ("",
-            PrintContents(&b1));
+  ASSERT_EQ("", PrintContents(&b1));
   b2.Put("a", "va");
   b1.Append(b2);
-  ASSERT_EQ("Put(a, va)@200",
-            PrintContents(&b1));
+  ASSERT_EQ("Put(a, va)@200", PrintContents(&b1));
   b2.Clear();
   b2.Put("b", "vb");
   b1.Append(b2);
-  ASSERT_EQ("Put(a, va)@200"
-            "Put(b, vb)@201",
-            PrintContents(&b1));
+  ASSERT_EQ(
+      "Put(a, va)@200"
+      "Put(b, vb)@201",
+      PrintContents(&b1));
   b2.Delete("foo");
   b1.Append(b2);
-  ASSERT_EQ("Put(a, va)@200"
-            "Put(b, vb)@202"
-            "Put(b, vb)@201"
-            "Delete(foo)@203",
-            PrintContents(&b1));
+  ASSERT_EQ(
+      "Put(a, va)@200"
+      "Put(b, vb)@202"
+      "Put(b, vb)@201"
+      "Delete(foo)@203",
+      PrintContents(&b1));
 }
 
 TEST(WriteBatchTest, ApproximateSize) {
@@ -132,6 +134,4 @@ TEST(WriteBatchTest, ApproximateSize) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/doc/bench/db_bench_sqlite3.cc b/doc/bench/db_bench_sqlite3.cc
index 7e05de2..f183f4f 100644
--- a/doc/bench/db_bench_sqlite3.cc
+++ b/doc/bench/db_bench_sqlite3.cc
@@ -2,9 +2,10 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
+#include <sqlite3.h>
 #include <stdio.h>
 #include <stdlib.h>
-#include <sqlite3.h>
+
 #include "util/histogram.h"
 #include "util/random.h"
 #include "util/testutil.h"
@@ -38,8 +39,7 @@ static const char* FLAGS_benchmarks =
     "fillrand100K,"
     "fillseq100K,"
     "readseq,"
-    "readrand100K,"
-    ;
+    "readrand100K,";
 
 // Number of key/values to place in database
 static int FLAGS_num = 1000000;
@@ -78,8 +78,7 @@ static bool FLAGS_WAL_enabled = true;
 // Use the db with the following name.
 static const char* FLAGS_db = nullptr;
 
-inline
-static void ExecErrorCheck(int status, char *err_msg) {
+inline static void ExecErrorCheck(int status, char* err_msg) {
   if (status != SQLITE_OK) {
     fprintf(stderr, "SQL error: %s\n", err_msg);
     sqlite3_free(err_msg);
@@ -87,24 +86,21 @@ static void ExecErrorCheck(int status, char *err_msg) {
   }
 }
 
-inline
-static void StepErrorCheck(int status) {
+inline static void StepErrorCheck(int status) {
   if (status != SQLITE_DONE) {
     fprintf(stderr, "SQL step error: status = %d\n", status);
     exit(1);
   }
 }
 
-inline
-static void ErrorCheck(int status) {
+inline static void ErrorCheck(int status) {
   if (status != SQLITE_OK) {
     fprintf(stderr, "sqlite3 error: status = %d\n", status);
     exit(1);
   }
 }
 
-inline
-static void WalCheckpoint(sqlite3* db_) {
+inline static void WalCheckpoint(sqlite3* db_) {
   // Flush all writes to disk
   if (FLAGS_WAL_enabled) {
     sqlite3_wal_checkpoint_v2(db_, nullptr, SQLITE_CHECKPOINT_FULL, nullptr,
@@ -153,7 +149,7 @@ static Slice TrimSpace(Slice s) {
     start++;
   }
   int limit = s.size();
-  while (limit > start && isspace(s[limit-1])) {
+  while (limit > start && isspace(s[limit - 1])) {
     limit--;
   }
   return Slice(s.data() + start, limit - start);
@@ -177,7 +173,7 @@ class Benchmark {
 
   // State kept for progress messages
   int done_;
-  int next_report_;     // When to report next
+  int next_report_;  // When to report next
 
   void PrintHeader() {
     const int kKeySize = 16;
@@ -186,17 +182,17 @@ class Benchmark {
     fprintf(stdout, "Values:     %d bytes each\n", FLAGS_value_size);
     fprintf(stdout, "Entries:    %d\n", num_);
     fprintf(stdout, "RawSize:    %.1f MB (estimated)\n",
-            ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_)
-             / 1048576.0));
+            ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
+             1048576.0));
     PrintWarnings();
     fprintf(stdout, "------------------------------------------------\n");
   }
 
   void PrintWarnings() {
 #if defined(__GNUC__) && !defined(__OPTIMIZE__)
-    fprintf(stdout,
-            "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
-            );
+    fprintf(
+        stdout,
+        "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
 #endif
 #ifndef NDEBUG
     fprintf(stdout,
@@ -262,13 +258,20 @@ class Benchmark {
 
     done_++;
     if (done_ >= next_report_) {
-      if      (next_report_ < 1000)   next_report_ += 100;
-      else if (next_report_ < 5000)   next_report_ += 500;
-      else if (next_report_ < 10000)  next_report_ += 1000;
-      else if (next_report_ < 50000)  next_report_ += 5000;
-      else if (next_report_ < 100000) next_report_ += 10000;
-      else if (next_report_ < 500000) next_report_ += 50000;
-      else                            next_report_ += 100000;
+      if (next_report_ < 1000)
+        next_report_ += 100;
+      else if (next_report_ < 5000)
+        next_report_ += 500;
+      else if (next_report_ < 10000)
+        next_report_ += 1000;
+      else if (next_report_ < 50000)
+        next_report_ += 5000;
+      else if (next_report_ < 100000)
+        next_report_ += 10000;
+      else if (next_report_ < 500000)
+        next_report_ += 50000;
+      else
+        next_report_ += 100000;
       fprintf(stderr, "... finished %d ops%30s\r", done_, "");
       fflush(stderr);
     }
@@ -286,16 +289,14 @@ class Benchmark {
       snprintf(rate, sizeof(rate), "%6.1f MB/s",
                (bytes_ / 1048576.0) / (finish - start_));
       if (!message_.empty()) {
-        message_  = std::string(rate) + " " + message_;
+        message_ = std::string(rate) + " " + message_;
       } else {
         message_ = rate;
       }
     }
 
-    fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
-            name.ToString().c_str(),
-            (finish - start_) * 1e6 / done_,
-            (message_.empty() ? "" : " "),
+    fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
+            (finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "),
             message_.c_str());
     if (FLAGS_histogram) {
       fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
@@ -304,22 +305,16 @@ class Benchmark {
   }
 
  public:
-  enum Order {
-    SEQUENTIAL,
-    RANDOM
-  };
-  enum DBState {
-    FRESH,
-    EXISTING
-  };
+  enum Order { SEQUENTIAL, RANDOM };
+  enum DBState { FRESH, EXISTING };
 
   Benchmark()
-  : db_(nullptr),
-    db_num_(0),
-    num_(FLAGS_num),
-    reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
-    bytes_(0),
-    rand_(301) {
+      : db_(nullptr),
+        db_num_(0),
+        num_(FLAGS_num),
+        reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
+        bytes_(0),
+        rand_(301) {
     std::vector<std::string> files;
     std::string test_dir;
     Env::Default()->GetTestDirectory(&test_dir);
@@ -426,10 +421,8 @@ class Benchmark {
     // Open database
     std::string tmp_dir;
     Env::Default()->GetTestDirectory(&tmp_dir);
-    snprintf(file_name, sizeof(file_name),
-             "%s/dbbench_sqlite3-%d.db",
-             tmp_dir.c_str(),
-             db_num_);
+    snprintf(file_name, sizeof(file_name), "%s/dbbench_sqlite3-%d.db",
+             tmp_dir.c_str(), db_num_);
     status = sqlite3_open(file_name, &db_);
     if (status) {
       fprintf(stderr, "open error: %s\n", sqlite3_errmsg(db_));
@@ -460,26 +453,26 @@ class Benchmark {
       std::string WAL_checkpoint = "PRAGMA wal_autocheckpoint = 4096";
       status = sqlite3_exec(db_, WAL_stmt.c_str(), nullptr, nullptr, &err_msg);
       ExecErrorCheck(status, err_msg);
-      status = sqlite3_exec(db_, WAL_checkpoint.c_str(), nullptr, nullptr,
-                            &err_msg);
+      status =
+          sqlite3_exec(db_, WAL_checkpoint.c_str(), nullptr, nullptr, &err_msg);
       ExecErrorCheck(status, err_msg);
     }
 
     // Change locking mode to exclusive and create tables/index for database
     std::string locking_stmt = "PRAGMA locking_mode = EXCLUSIVE";
     std::string create_stmt =
-          "CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))";
-    std::string stmt_array[] = { locking_stmt, create_stmt };
+        "CREATE TABLE test (key blob, value blob, PRIMARY KEY(key))";
+    std::string stmt_array[] = {locking_stmt, create_stmt};
     int stmt_array_length = sizeof(stmt_array) / sizeof(std::string);
     for (int i = 0; i < stmt_array_length; i++) {
-      status = sqlite3_exec(db_, stmt_array[i].c_str(), nullptr, nullptr,
-                            &err_msg);
+      status =
+          sqlite3_exec(db_, stmt_array[i].c_str(), nullptr, nullptr, &err_msg);
       ExecErrorCheck(status, err_msg);
     }
   }
 
-  void Write(bool write_sync, Order order, DBState state,
-             int num_entries, int value_size, int entries_per_batch) {
+  void Write(bool write_sync, Order order, DBState state, int num_entries,
+             int value_size, int entries_per_batch) {
     // Create new database if state == FRESH
     if (state == FRESH) {
       if (FLAGS_use_existing_db) {
@@ -507,20 +500,20 @@ class Benchmark {
     std::string end_trans_str = "END TRANSACTION;";
 
     // Check for synchronous flag in options
-    std::string sync_stmt = (write_sync) ? "PRAGMA synchronous = FULL" :
-                                           "PRAGMA synchronous = OFF";
+    std::string sync_stmt =
+        (write_sync) ? "PRAGMA synchronous = FULL" : "PRAGMA synchronous = OFF";
     status = sqlite3_exec(db_, sync_stmt.c_str(), nullptr, nullptr, &err_msg);
     ExecErrorCheck(status, err_msg);
 
     // Preparing sqlite3 statements
-    status = sqlite3_prepare_v2(db_, replace_str.c_str(), -1,
-                                &replace_stmt, nullptr);
+    status = sqlite3_prepare_v2(db_, replace_str.c_str(), -1, &replace_stmt,
+                                nullptr);
     ErrorCheck(status);
     status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
                                 &begin_trans_stmt, nullptr);
     ErrorCheck(status);
-    status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1,
-                                &end_trans_stmt, nullptr);
+    status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, &end_trans_stmt,
+                                nullptr);
     ErrorCheck(status);
 
     bool transaction = (entries_per_batch > 1);
@@ -538,16 +531,16 @@ class Benchmark {
         const char* value = gen_.Generate(value_size).data();
 
         // Create values for key-value pair
-        const int k = (order == SEQUENTIAL) ? i + j :
-                      (rand_.Next() % num_entries);
+        const int k =
+            (order == SEQUENTIAL) ? i + j : (rand_.Next() % num_entries);
         char key[100];
         snprintf(key, sizeof(key), "%016d", k);
 
         // Bind KV values into replace_stmt
         status = sqlite3_bind_blob(replace_stmt, 1, key, 16, SQLITE_STATIC);
         ErrorCheck(status);
-        status = sqlite3_bind_blob(replace_stmt, 2, value,
-                                   value_size, SQLITE_STATIC);
+        status = sqlite3_bind_blob(replace_stmt, 2, value, value_size,
+                                   SQLITE_STATIC);
         ErrorCheck(status);
 
         // Execute replace_stmt
@@ -593,8 +586,8 @@ class Benchmark {
     status = sqlite3_prepare_v2(db_, begin_trans_str.c_str(), -1,
                                 &begin_trans_stmt, nullptr);
     ErrorCheck(status);
-    status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1,
-                                &end_trans_stmt, nullptr);
+    status = sqlite3_prepare_v2(db_, end_trans_str.c_str(), -1, &end_trans_stmt,
+                                nullptr);
     ErrorCheck(status);
     status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &read_stmt, nullptr);
     ErrorCheck(status);
@@ -621,7 +614,8 @@ class Benchmark {
         ErrorCheck(status);
 
         // Execute read statement
-        while ((status = sqlite3_step(read_stmt)) == SQLITE_ROW) {}
+        while ((status = sqlite3_step(read_stmt)) == SQLITE_ROW) {
+        }
         StepErrorCheck(status);
 
         // Reset SQLite statement for another use
@@ -651,7 +645,7 @@ class Benchmark {
 
   void ReadSequential() {
     int status;
-    sqlite3_stmt *pStmt;
+    sqlite3_stmt* pStmt;
     std::string read_str = "SELECT * FROM test ORDER BY key";
 
     status = sqlite3_prepare_v2(db_, read_str.c_str(), -1, &pStmt, nullptr);
@@ -664,7 +658,6 @@ class Benchmark {
     status = sqlite3_finalize(pStmt);
     ErrorCheck(status);
   }
-
 };
 
 }  // namespace leveldb
@@ -710,9 +703,9 @@ int main(int argc, char** argv) {
 
   // Choose a location for the test database if none given with --db=<path>
   if (FLAGS_db == nullptr) {
-      leveldb::Env::Default()->GetTestDirectory(&default_db_path);
-      default_db_path += "/dbbench";
-      FLAGS_db = default_db_path.c_str();
+    leveldb::Env::Default()->GetTestDirectory(&default_db_path);
+    default_db_path += "/dbbench";
+    FLAGS_db = default_db_path.c_str();
   }
 
   leveldb::Benchmark benchmark;
diff --git a/doc/bench/db_bench_tree_db.cc b/doc/bench/db_bench_tree_db.cc
index 9f8fb90..b2f6646 100644
--- a/doc/bench/db_bench_tree_db.cc
+++ b/doc/bench/db_bench_tree_db.cc
@@ -2,9 +2,10 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
+#include <kcpolydb.h>
 #include <stdio.h>
 #include <stdlib.h>
-#include <kcpolydb.h>
+
 #include "util/histogram.h"
 #include "util/random.h"
 #include "util/testutil.h"
@@ -34,8 +35,7 @@ static const char* FLAGS_benchmarks =
     "fillrand100K,"
     "fillseq100K,"
     "readseq100K,"
-    "readrand100K,"
-    ;
+    "readrand100K,";
 
 // Number of key/values to place in database
 static int FLAGS_num = 1000000;
@@ -71,9 +71,7 @@ static bool FLAGS_compression = true;
 // Use the db with the following name.
 static const char* FLAGS_db = nullptr;
 
-inline
-static void DBSynchronize(kyotocabinet::TreeDB* db_)
-{
+inline static void DBSynchronize(kyotocabinet::TreeDB* db_) {
   // Synchronize will flush writes to disk
   if (!db_->synchronize()) {
     fprintf(stderr, "synchronize error: %s\n", db_->error().name());
@@ -121,7 +119,7 @@ static Slice TrimSpace(Slice s) {
     start++;
   }
   int limit = s.size();
-  while (limit > start && isspace(s[limit-1])) {
+  while (limit > start && isspace(s[limit - 1])) {
     limit--;
   }
   return Slice(s.data() + start, limit - start);
@@ -146,7 +144,7 @@ class Benchmark {
 
   // State kept for progress messages
   int done_;
-  int next_report_;     // When to report next
+  int next_report_;  // When to report next
 
   void PrintHeader() {
     const int kKeySize = 16;
@@ -157,20 +155,20 @@ class Benchmark {
             static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
     fprintf(stdout, "Entries:    %d\n", num_);
     fprintf(stdout, "RawSize:    %.1f MB (estimated)\n",
-            ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_)
-             / 1048576.0));
+            ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_) /
+             1048576.0));
     fprintf(stdout, "FileSize:   %.1f MB (estimated)\n",
-            (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_)
-             / 1048576.0));
+            (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_) /
+             1048576.0));
     PrintWarnings();
     fprintf(stdout, "------------------------------------------------\n");
   }
 
   void PrintWarnings() {
 #if defined(__GNUC__) && !defined(__OPTIMIZE__)
-    fprintf(stdout,
-            "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
-            );
+    fprintf(
+        stdout,
+        "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
 #endif
 #ifndef NDEBUG
     fprintf(stdout,
@@ -237,13 +235,20 @@ class Benchmark {
 
     done_++;
     if (done_ >= next_report_) {
-      if      (next_report_ < 1000)   next_report_ += 100;
-      else if (next_report_ < 5000)   next_report_ += 500;
-      else if (next_report_ < 10000)  next_report_ += 1000;
-      else if (next_report_ < 50000)  next_report_ += 5000;
-      else if (next_report_ < 100000) next_report_ += 10000;
-      else if (next_report_ < 500000) next_report_ += 50000;
-      else                            next_report_ += 100000;
+      if (next_report_ < 1000)
+        next_report_ += 100;
+      else if (next_report_ < 5000)
+        next_report_ += 500;
+      else if (next_report_ < 10000)
+        next_report_ += 1000;
+      else if (next_report_ < 50000)
+        next_report_ += 5000;
+      else if (next_report_ < 100000)
+        next_report_ += 10000;
+      else if (next_report_ < 500000)
+        next_report_ += 50000;
+      else
+        next_report_ += 100000;
       fprintf(stderr, "... finished %d ops%30s\r", done_, "");
       fflush(stderr);
     }
@@ -261,16 +266,14 @@ class Benchmark {
       snprintf(rate, sizeof(rate), "%6.1f MB/s",
                (bytes_ / 1048576.0) / (finish - start_));
       if (!message_.empty()) {
-        message_  = std::string(rate) + " " + message_;
+        message_ = std::string(rate) + " " + message_;
       } else {
         message_ = rate;
       }
     }
 
-    fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
-            name.ToString().c_str(),
-            (finish - start_) * 1e6 / done_,
-            (message_.empty() ? "" : " "),
+    fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n", name.ToString().c_str(),
+            (finish - start_) * 1e6 / done_, (message_.empty() ? "" : " "),
             message_.c_str());
     if (FLAGS_histogram) {
       fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
@@ -279,21 +282,15 @@ class Benchmark {
   }
 
  public:
-  enum Order {
-    SEQUENTIAL,
-    RANDOM
-  };
-  enum DBState {
-    FRESH,
-    EXISTING
-  };
+  enum Order { SEQUENTIAL, RANDOM };
+  enum DBState { FRESH, EXISTING };
 
   Benchmark()
-  : db_(nullptr),
-    num_(FLAGS_num),
-    reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
-    bytes_(0),
-    rand_(301) {
+      : db_(nullptr),
+        num_(FLAGS_num),
+        reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
+        bytes_(0),
+        rand_(301) {
     std::vector<std::string> files;
     std::string test_dir;
     Env::Default()->GetTestDirectory(&test_dir);
@@ -386,7 +383,7 @@ class Benchmark {
   }
 
  private:
-    void Open(bool sync) {
+  void Open(bool sync) {
     assert(db_ == nullptr);
 
     // Initialize db_
@@ -395,16 +392,14 @@ class Benchmark {
     db_num_++;
     std::string test_dir;
     Env::Default()->GetTestDirectory(&test_dir);
-    snprintf(file_name, sizeof(file_name),
-             "%s/dbbench_polyDB-%d.kct",
-             test_dir.c_str(),
-             db_num_);
+    snprintf(file_name, sizeof(file_name), "%s/dbbench_polyDB-%d.kct",
+             test_dir.c_str(), db_num_);
 
     // Create tuning options and open the database
-    int open_options = kyotocabinet::PolyDB::OWRITER |
-                       kyotocabinet::PolyDB::OCREATE;
-    int tune_options = kyotocabinet::TreeDB::TSMALL |
-        kyotocabinet::TreeDB::TLINEAR;
+    int open_options =
+        kyotocabinet::PolyDB::OWRITER | kyotocabinet::PolyDB::OCREATE;
+    int tune_options =
+        kyotocabinet::TreeDB::TSMALL | kyotocabinet::TreeDB::TLINEAR;
     if (FLAGS_compression) {
       tune_options |= kyotocabinet::TreeDB::TCOMPRESS;
       db_->tune_compressor(&comp_);
@@ -412,7 +407,7 @@ class Benchmark {
     db_->tune_options(tune_options);
     db_->tune_page_cache(FLAGS_cache_size);
     db_->tune_page(FLAGS_page_size);
-    db_->tune_map(256LL<<20);
+    db_->tune_map(256LL << 20);
     if (sync) {
       open_options |= kyotocabinet::PolyDB::OAUTOSYNC;
     }
@@ -421,8 +416,8 @@ class Benchmark {
     }
   }
 
-  void Write(bool sync, Order order, DBState state,
-             int num_entries, int value_size, int entries_per_batch) {
+  void Write(bool sync, Order order, DBState state, int num_entries,
+             int value_size, int entries_per_batch) {
     // Create new database if state == FRESH
     if (state == FRESH) {
       if (FLAGS_use_existing_db) {
@@ -442,8 +437,7 @@ class Benchmark {
     }
 
     // Write to database
-    for (int i = 0; i < num_entries; i++)
-    {
+    for (int i = 0; i < num_entries; i++) {
       const int k = (order == SEQUENTIAL) ? i : (rand_.Next() % num_entries);
       char key[100];
       snprintf(key, sizeof(key), "%016d", k);
@@ -517,9 +511,9 @@ int main(int argc, char** argv) {
 
   // Choose a location for the test database if none given with --db=<path>
   if (FLAGS_db == nullptr) {
-      leveldb::Env::Default()->GetTestDirectory(&default_db_path);
-      default_db_path += "/dbbench";
-      FLAGS_db = default_db_path.c_str();
+    leveldb::Env::Default()->GetTestDirectory(&default_db_path);
+    default_db_path += "/dbbench";
+    FLAGS_db = default_db_path.c_str();
   }
 
   leveldb::Benchmark benchmark;
diff --git a/helpers/memenv/memenv.cc b/helpers/memenv/memenv.cc
index ff384e4..58dc538 100644
--- a/helpers/memenv/memenv.cc
+++ b/helpers/memenv/memenv.cc
@@ -134,9 +134,7 @@ class FileState {
 
  private:
   // Private since only Unref() should be used to delete it.
-  ~FileState() {
-    Truncate();
-  }
+  ~FileState() { Truncate(); }
 
   // No copying allowed.
   FileState(const FileState&);
@@ -158,9 +156,7 @@ class SequentialFileImpl : public SequentialFile {
     file_->Ref();
   }
 
-  ~SequentialFileImpl() {
-    file_->Unref();
-  }
+  ~SequentialFileImpl() { file_->Unref(); }
 
   virtual Status Read(size_t n, Slice* result, char* scratch) {
     Status s = file_->Read(pos_, n, result, scratch);
@@ -189,13 +185,9 @@ class SequentialFileImpl : public SequentialFile {
 
 class RandomAccessFileImpl : public RandomAccessFile {
  public:
-  explicit RandomAccessFileImpl(FileState* file) : file_(file) {
-    file_->Ref();
-  }
+  explicit RandomAccessFileImpl(FileState* file) : file_(file) { file_->Ref(); }
 
-  ~RandomAccessFileImpl() {
-    file_->Unref();
-  }
+  ~RandomAccessFileImpl() { file_->Unref(); }
 
   virtual Status Read(uint64_t offset, size_t n, Slice* result,
                       char* scratch) const {
@@ -208,17 +200,11 @@ class RandomAccessFileImpl : public RandomAccessFile {
 
 class WritableFileImpl : public WritableFile {
  public:
-  WritableFileImpl(FileState* file) : file_(file) {
-    file_->Ref();
-  }
+  WritableFileImpl(FileState* file) : file_(file) { file_->Ref(); }
 
-  ~WritableFileImpl() {
-    file_->Unref();
-  }
+  ~WritableFileImpl() { file_->Unref(); }
 
-  virtual Status Append(const Slice& data) {
-    return file_->Append(data);
-  }
+  virtual Status Append(const Slice& data) { return file_->Append(data); }
 
   virtual Status Close() { return Status::OK(); }
   virtual Status Flush() { return Status::OK(); }
@@ -230,15 +216,16 @@ class WritableFileImpl : public WritableFile {
 
 class NoOpLogger : public Logger {
  public:
-  virtual void Logv(const char* format, va_list ap) { }
+  virtual void Logv(const char* format, va_list ap) {}
 };
 
 class InMemoryEnv : public EnvWrapper {
  public:
-  explicit InMemoryEnv(Env* base_env) : EnvWrapper(base_env) { }
+  explicit InMemoryEnv(Env* base_env) : EnvWrapper(base_env) {}
 
   virtual ~InMemoryEnv() {
-    for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end(); ++i){
+    for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end();
+         ++i) {
       i->second->Unref();
     }
   }
@@ -311,7 +298,8 @@ class InMemoryEnv : public EnvWrapper {
     MutexLock lock(&mutex_);
     result->clear();
 
-    for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end(); ++i){
+    for (FileSystem::iterator i = file_map_.begin(); i != file_map_.end();
+         ++i) {
       const std::string& filename = i->first;
 
       if (filename.size() >= dir.size() + 1 && filename[dir.size()] == '/' &&
@@ -343,13 +331,9 @@ class InMemoryEnv : public EnvWrapper {
     return Status::OK();
   }
 
-  virtual Status CreateDir(const std::string& dirname) {
-    return Status::OK();
-  }
+  virtual Status CreateDir(const std::string& dirname) { return Status::OK(); }
 
-  virtual Status DeleteDir(const std::string& dirname) {
-    return Status::OK();
-  }
+  virtual Status DeleteDir(const std::string& dirname) { return Status::OK(); }
 
   virtual Status GetFileSize(const std::string& fname, uint64_t* file_size) {
     MutexLock lock(&mutex_);
@@ -361,8 +345,7 @@ class InMemoryEnv : public EnvWrapper {
     return Status::OK();
   }
 
-  virtual Status RenameFile(const std::string& src,
-                            const std::string& target) {
+  virtual Status RenameFile(const std::string& src, const std::string& target) {
     MutexLock lock(&mutex_);
     if (file_map_.find(src) == file_map_.end()) {
       return Status::IOError(src, "File not found");
@@ -403,8 +386,6 @@ class InMemoryEnv : public EnvWrapper {
 
 }  // namespace
 
-Env* NewMemEnv(Env* base_env) {
-  return new InMemoryEnv(base_env);
-}
+Env* NewMemEnv(Env* base_env) { return new InMemoryEnv(base_env); }
 
 }  // namespace leveldb
diff --git a/helpers/memenv/memenv_test.cc b/helpers/memenv/memenv_test.cc
index 4664795..a0a9469 100644
--- a/helpers/memenv/memenv_test.cc
+++ b/helpers/memenv/memenv_test.cc
@@ -4,12 +4,13 @@
 
 #include "helpers/memenv/memenv.h"
 
+#include <string>
+#include <vector>
+
 #include "db/db_impl.h"
 #include "leveldb/db.h"
 #include "leveldb/env.h"
 #include "util/testharness.h"
-#include <string>
-#include <vector>
 
 namespace leveldb {
 
@@ -17,12 +18,8 @@ class MemEnvTest {
  public:
   Env* env_;
 
-  MemEnvTest()
-      : env_(NewMemEnv(Env::Default())) {
-  }
-  ~MemEnvTest() {
-    delete env_;
-  }
+  MemEnvTest() : env_(NewMemEnv(Env::Default())) {}
+  ~MemEnvTest() { delete env_; }
 };
 
 TEST(MemEnvTest, Basics) {
@@ -109,25 +106,25 @@ TEST(MemEnvTest, ReadWrite) {
 
   // Read sequentially.
   ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
-  ASSERT_OK(seq_file->Read(5, &result, scratch)); // Read "hello".
+  ASSERT_OK(seq_file->Read(5, &result, scratch));  // Read "hello".
   ASSERT_EQ(0, result.compare("hello"));
   ASSERT_OK(seq_file->Skip(1));
-  ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Read "world".
+  ASSERT_OK(seq_file->Read(1000, &result, scratch));  // Read "world".
   ASSERT_EQ(0, result.compare("world"));
-  ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Try reading past EOF.
+  ASSERT_OK(seq_file->Read(1000, &result, scratch));  // Try reading past EOF.
   ASSERT_EQ(0, result.size());
-  ASSERT_OK(seq_file->Skip(100)); // Try to skip past end of file.
+  ASSERT_OK(seq_file->Skip(100));  // Try to skip past end of file.
   ASSERT_OK(seq_file->Read(1000, &result, scratch));
   ASSERT_EQ(0, result.size());
   delete seq_file;
 
   // Random reads.
   ASSERT_OK(env_->NewRandomAccessFile("/dir/f", &rand_file));
-  ASSERT_OK(rand_file->Read(6, 5, &result, scratch)); // Read "world".
+  ASSERT_OK(rand_file->Read(6, 5, &result, scratch));  // Read "world".
   ASSERT_EQ(0, result.compare("world"));
-  ASSERT_OK(rand_file->Read(0, 5, &result, scratch)); // Read "hello".
+  ASSERT_OK(rand_file->Read(0, 5, &result, scratch));  // Read "hello".
   ASSERT_EQ(0, result.compare("hello"));
-  ASSERT_OK(rand_file->Read(10, 100, &result, scratch)); // Read "d".
+  ASSERT_OK(rand_file->Read(10, 100, &result, scratch));  // Read "d".
   ASSERT_EQ(0, result.compare("d"));
 
   // Too high offset.
@@ -176,7 +173,7 @@ TEST(MemEnvTest, LargeWrite) {
   SequentialFile* seq_file;
   Slice result;
   ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
-  ASSERT_OK(seq_file->Read(3, &result, scratch)); // Read "foo".
+  ASSERT_OK(seq_file->Read(3, &result, scratch));  // Read "foo".
   ASSERT_EQ(0, result.compare("foo"));
 
   size_t read = 0;
@@ -188,7 +185,7 @@ TEST(MemEnvTest, LargeWrite) {
   }
   ASSERT_TRUE(write_data == read_data);
   delete seq_file;
-  delete [] scratch;
+  delete[] scratch;
 }
 
 TEST(MemEnvTest, OverwriteOpenFile) {
@@ -259,6 +256,4 @@ TEST(MemEnvTest, DBTest) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/include/leveldb/c.h b/include/leveldb/c.h
index d8aab5b..8e0d592 100644
--- a/include/leveldb/c.h
+++ b/include/leveldb/c.h
@@ -47,26 +47,27 @@ extern "C" {
 #include <stdarg.h>
 #include <stddef.h>
 #include <stdint.h>
+
 #include "leveldb/export.h"
 
 /* Exported types */
 
-typedef struct leveldb_t               leveldb_t;
-typedef struct leveldb_cache_t         leveldb_cache_t;
-typedef struct leveldb_comparator_t    leveldb_comparator_t;
-typedef struct leveldb_env_t           leveldb_env_t;
-typedef struct leveldb_filelock_t      leveldb_filelock_t;
-typedef struct leveldb_filterpolicy_t  leveldb_filterpolicy_t;
-typedef struct leveldb_iterator_t      leveldb_iterator_t;
-typedef struct leveldb_logger_t        leveldb_logger_t;
-typedef struct leveldb_options_t       leveldb_options_t;
-typedef struct leveldb_randomfile_t    leveldb_randomfile_t;
-typedef struct leveldb_readoptions_t   leveldb_readoptions_t;
-typedef struct leveldb_seqfile_t       leveldb_seqfile_t;
-typedef struct leveldb_snapshot_t      leveldb_snapshot_t;
-typedef struct leveldb_writablefile_t  leveldb_writablefile_t;
-typedef struct leveldb_writebatch_t    leveldb_writebatch_t;
-typedef struct leveldb_writeoptions_t  leveldb_writeoptions_t;
+typedef struct leveldb_t leveldb_t;
+typedef struct leveldb_cache_t leveldb_cache_t;
+typedef struct leveldb_comparator_t leveldb_comparator_t;
+typedef struct leveldb_env_t leveldb_env_t;
+typedef struct leveldb_filelock_t leveldb_filelock_t;
+typedef struct leveldb_filterpolicy_t leveldb_filterpolicy_t;
+typedef struct leveldb_iterator_t leveldb_iterator_t;
+typedef struct leveldb_logger_t leveldb_logger_t;
+typedef struct leveldb_options_t leveldb_options_t;
+typedef struct leveldb_randomfile_t leveldb_randomfile_t;
+typedef struct leveldb_readoptions_t leveldb_readoptions_t;
+typedef struct leveldb_seqfile_t leveldb_seqfile_t;
+typedef struct leveldb_snapshot_t leveldb_snapshot_t;
+typedef struct leveldb_writablefile_t leveldb_writablefile_t;
+typedef struct leveldb_writebatch_t leveldb_writebatch_t;
+typedef struct leveldb_writeoptions_t leveldb_writeoptions_t;
 
 /* DB operations */
 
@@ -189,10 +190,7 @@ LEVELDB_EXPORT void leveldb_options_set_block_restart_interval(
 LEVELDB_EXPORT void leveldb_options_set_max_file_size(leveldb_options_t*,
                                                       size_t);
 
-enum {
-  leveldb_no_compression = 0,
-  leveldb_snappy_compression = 1
-};
+enum { leveldb_no_compression = 0, leveldb_snappy_compression = 1 };
 LEVELDB_EXPORT void leveldb_options_set_compression(leveldb_options_t*, int);
 
 /* Comparator */
@@ -266,7 +264,7 @@ LEVELDB_EXPORT int leveldb_major_version();
 LEVELDB_EXPORT int leveldb_minor_version();
 
 #ifdef __cplusplus
-}  /* end extern "C" */
+} /* end extern "C" */
 #endif
 
-#endif  /* STORAGE_LEVELDB_INCLUDE_C_H_ */
+#endif /* STORAGE_LEVELDB_INCLUDE_C_H_ */
diff --git a/include/leveldb/cache.h b/include/leveldb/cache.h
index e416ea5..7d1a221 100644
--- a/include/leveldb/cache.h
+++ b/include/leveldb/cache.h
@@ -19,6 +19,7 @@
 #define STORAGE_LEVELDB_INCLUDE_CACHE_H_
 
 #include <stdint.h>
+
 #include "leveldb/export.h"
 #include "leveldb/slice.h"
 
@@ -42,7 +43,7 @@ class LEVELDB_EXPORT Cache {
   virtual ~Cache();
 
   // Opaque handle to an entry stored in the cache.
-  struct Handle { };
+  struct Handle {};
 
   // Insert a mapping from key->value into the cache and assign it
   // the specified charge against the total cache capacity.
diff --git a/include/leveldb/comparator.h b/include/leveldb/comparator.h
index 9b09684..a85b51e 100644
--- a/include/leveldb/comparator.h
+++ b/include/leveldb/comparator.h
@@ -6,6 +6,7 @@
 #define STORAGE_LEVELDB_INCLUDE_COMPARATOR_H_
 
 #include <string>
+
 #include "leveldb/export.h"
 
 namespace leveldb {
@@ -44,9 +45,8 @@ class LEVELDB_EXPORT Comparator {
   // If *start < limit, changes *start to a short string in [start,limit).
   // Simple comparator implementations may return with *start unchanged,
   // i.e., an implementation of this method that does nothing is correct.
-  virtual void FindShortestSeparator(
-      std::string* start,
-      const Slice& limit) const = 0;
+  virtual void FindShortestSeparator(std::string* start,
+                                     const Slice& limit) const = 0;
 
   // Changes *key to a short string >= *key.
   // Simple comparator implementations may return with *key unchanged,
diff --git a/include/leveldb/db.h b/include/leveldb/db.h
index 0239593..0b8dc24 100644
--- a/include/leveldb/db.h
+++ b/include/leveldb/db.h
@@ -7,6 +7,7 @@
 
 #include <stdint.h>
 #include <stdio.h>
+
 #include "leveldb/export.h"
 #include "leveldb/iterator.h"
 #include "leveldb/options.h"
@@ -32,11 +33,11 @@ class LEVELDB_EXPORT Snapshot {
 
 // A range of keys
 struct LEVELDB_EXPORT Range {
-  Slice start;          // Included in the range
-  Slice limit;          // Not included in the range
+  Slice start;  // Included in the range
+  Slice limit;  // Not included in the range
 
-  Range() { }
-  Range(const Slice& s, const Slice& l) : start(s), limit(l) { }
+  Range() {}
+  Range(const Slice& s, const Slice& l) : start(s), limit(l) {}
 };
 
 // A DB is a persistent ordered map from keys to values.
@@ -49,8 +50,7 @@ class LEVELDB_EXPORT DB {
   // OK on success.
   // Stores nullptr in *dbptr and returns a non-OK status on error.
   // Caller should delete *dbptr when it is no longer needed.
-  static Status Open(const Options& options,
-                     const std::string& name,
+  static Status Open(const Options& options, const std::string& name,
                      DB** dbptr);
 
   DB() = default;
@@ -63,8 +63,7 @@ class LEVELDB_EXPORT DB {
   // Set the database entry for "key" to "value".  Returns OK on success,
   // and a non-OK status on error.
   // Note: consider setting options.sync = true.
-  virtual Status Put(const WriteOptions& options,
-                     const Slice& key,
+  virtual Status Put(const WriteOptions& options, const Slice& key,
                      const Slice& value) = 0;
 
   // Remove the database entry (if any) for "key".  Returns OK on
@@ -85,8 +84,8 @@ class LEVELDB_EXPORT DB {
   // a status for which Status::IsNotFound() returns true.
   //
   // May return some other Status on an error.
-  virtual Status Get(const ReadOptions& options,
-                     const Slice& key, std::string* value) = 0;
+  virtual Status Get(const ReadOptions& options, const Slice& key,
+                     std::string* value) = 0;
 
   // Return a heap-allocated iterator over the contents of the database.
   // The result of NewIterator() is initially invalid (caller must
diff --git a/include/leveldb/dumpfile.h b/include/leveldb/dumpfile.h
index 6597741..a58bc6b 100644
--- a/include/leveldb/dumpfile.h
+++ b/include/leveldb/dumpfile.h
@@ -6,6 +6,7 @@
 #define STORAGE_LEVELDB_INCLUDE_DUMPFILE_H_
 
 #include <string>
+
 #include "leveldb/env.h"
 #include "leveldb/export.h"
 #include "leveldb/status.h"
diff --git a/include/leveldb/env.h b/include/leveldb/env.h
index ea728c9..112fe96 100644
--- a/include/leveldb/env.h
+++ b/include/leveldb/env.h
@@ -15,8 +15,10 @@
 
 #include <stdarg.h>
 #include <stdint.h>
+
 #include <string>
 #include <vector>
+
 #include "leveldb/export.h"
 #include "leveldb/status.h"
 
@@ -164,9 +166,7 @@ class LEVELDB_EXPORT Env {
   // added to the same Env may run concurrently in different threads.
   // I.e., the caller may not assume that background work items are
   // serialized.
-  virtual void Schedule(
-      void (*function)(void* arg),
-      void* arg) = 0;
+  virtual void Schedule(void (*function)(void* arg), void* arg) = 0;
 
   // Start a new thread, invoking "function(arg)" within the new thread.
   // When "function(arg)" returns, the thread will be destroyed.
@@ -287,9 +287,9 @@ class LEVELDB_EXPORT FileLock {
 
 // Log the specified data to *info_log if info_log is non-null.
 void Log(Logger* info_log, const char* format, ...)
-#   if defined(__GNUC__) || defined(__clang__)
-    __attribute__((__format__ (__printf__, 2, 3)))
-#   endif
+#if defined(__GNUC__) || defined(__clang__)
+    __attribute__((__format__(__printf__, 2, 3)))
+#endif
     ;
 
 // A utility routine: write "data" to the named file.
@@ -306,7 +306,7 @@ LEVELDB_EXPORT Status ReadFileToString(Env* env, const std::string& fname,
 class LEVELDB_EXPORT EnvWrapper : public Env {
  public:
   // Initialize an EnvWrapper that delegates all calls to *t.
-  explicit EnvWrapper(Env* t) : target_(t) { }
+  explicit EnvWrapper(Env* t) : target_(t) {}
   virtual ~EnvWrapper();
 
   // Return the target to which this Env forwards all calls.
@@ -364,9 +364,7 @@ class LEVELDB_EXPORT EnvWrapper : public Env {
   Status NewLogger(const std::string& fname, Logger** result) override {
     return target_->NewLogger(fname, result);
   }
-  uint64_t NowMicros() override {
-    return target_->NowMicros();
-  }
+  uint64_t NowMicros() override { return target_->NowMicros(); }
   void SleepForMicroseconds(int micros) override {
     target_->SleepForMicroseconds(micros);
   }
diff --git a/include/leveldb/filter_policy.h b/include/leveldb/filter_policy.h
index ba02720..49c8eda 100644
--- a/include/leveldb/filter_policy.h
+++ b/include/leveldb/filter_policy.h
@@ -17,6 +17,7 @@
 #define STORAGE_LEVELDB_INCLUDE_FILTER_POLICY_H_
 
 #include <string>
+
 #include "leveldb/export.h"
 
 namespace leveldb {
@@ -39,8 +40,8 @@ class LEVELDB_EXPORT FilterPolicy {
   //
   // Warning: do not change the initial contents of *dst.  Instead,
   // append the newly constructed filter to *dst.
-  virtual void CreateFilter(const Slice* keys, int n, std::string* dst)
-      const = 0;
+  virtual void CreateFilter(const Slice* keys, int n,
+                            std::string* dst) const = 0;
 
   // "filter" contains the data appended by a preceding call to
   // CreateFilter() on this class.  This method must return true if
diff --git a/include/leveldb/iterator.h b/include/leveldb/iterator.h
index 6c1d91b..447e950 100644
--- a/include/leveldb/iterator.h
+++ b/include/leveldb/iterator.h
@@ -93,7 +93,10 @@ class LEVELDB_EXPORT Iterator {
     // True if the node is not used. Only head nodes might be unused.
     bool IsEmpty() const { return function == nullptr; }
     // Invokes the cleanup function.
-    void Run() { assert(function != nullptr); (*function)(arg1, arg2); }
+    void Run() {
+      assert(function != nullptr);
+      (*function)(arg1, arg2);
+    }
   };
   CleanupNode cleanup_head_;
 };
diff --git a/include/leveldb/options.h b/include/leveldb/options.h
index 90aa19e..7e26dc6 100644
--- a/include/leveldb/options.h
+++ b/include/leveldb/options.h
@@ -6,6 +6,7 @@
 #define STORAGE_LEVELDB_INCLUDE_OPTIONS_H_
 
 #include <stddef.h>
+
 #include "leveldb/export.h"
 
 namespace leveldb {
@@ -24,7 +25,7 @@ class Snapshot;
 enum CompressionType {
   // NOTE: do not change the values of existing entries, as these are
   // part of the persistent format on disk.
-  kNoCompression     = 0x0,
+  kNoCompression = 0x0,
   kSnappyCompression = 0x1
 };
 
diff --git a/include/leveldb/slice.h b/include/leveldb/slice.h
index a86e8a6..2df417d 100644
--- a/include/leveldb/slice.h
+++ b/include/leveldb/slice.h
@@ -18,7 +18,9 @@
 #include <assert.h>
 #include <stddef.h>
 #include <string.h>
+
 #include <string>
+
 #include "leveldb/export.h"
 
 namespace leveldb {
@@ -26,16 +28,16 @@ namespace leveldb {
 class LEVELDB_EXPORT Slice {
  public:
   // Create an empty slice.
-  Slice() : data_(""), size_(0) { }
+  Slice() : data_(""), size_(0) {}
 
   // Create a slice that refers to d[0,n-1].
-  Slice(const char* d, size_t n) : data_(d), size_(n) { }
+  Slice(const char* d, size_t n) : data_(d), size_(n) {}
 
   // Create a slice that refers to the contents of "s"
-  Slice(const std::string& s) : data_(s.data()), size_(s.size()) { }
+  Slice(const std::string& s) : data_(s.data()), size_(s.size()) {}
 
   // Create a slice that refers to s[0,strlen(s)-1]
-  Slice(const char* s) : data_(s), size_(strlen(s)) { }
+  Slice(const char* s) : data_(s), size_(strlen(s)) {}
 
   // Intentionally copyable.
   Slice(const Slice&) = default;
@@ -58,7 +60,10 @@ class LEVELDB_EXPORT Slice {
   }
 
   // Change this slice to refer to an empty array
-  void clear() { data_ = ""; size_ = 0; }
+  void clear() {
+    data_ = "";
+    size_ = 0;
+  }
 
   // Drop the first "n" bytes from this slice.
   void remove_prefix(size_t n) {
@@ -78,8 +83,7 @@ class LEVELDB_EXPORT Slice {
 
   // Return true iff "x" is a prefix of "*this"
   bool starts_with(const Slice& x) const {
-    return ((size_ >= x.size_) &&
-            (memcmp(data_, x.data_, x.size_) == 0));
+    return ((size_ >= x.size_) && (memcmp(data_, x.data_, x.size_) == 0));
   }
 
  private:
@@ -92,21 +96,20 @@ inline bool operator==(const Slice& x, const Slice& y) {
           (memcmp(x.data(), y.data(), x.size()) == 0));
 }
 
-inline bool operator!=(const Slice& x, const Slice& y) {
-  return !(x == y);
-}
+inline bool operator!=(const Slice& x, const Slice& y) { return !(x == y); }
 
 inline int Slice::compare(const Slice& b) const {
   const size_t min_len = (size_ < b.size_) ? size_ : b.size_;
   int r = memcmp(data_, b.data_, min_len);
   if (r == 0) {
-    if (size_ < b.size_) r = -1;
-    else if (size_ > b.size_) r = +1;
+    if (size_ < b.size_)
+      r = -1;
+    else if (size_ > b.size_)
+      r = +1;
   }
   return r;
 }
 
 }  // namespace leveldb
 
-
 #endif  // STORAGE_LEVELDB_INCLUDE_SLICE_H_
diff --git a/include/leveldb/status.h b/include/leveldb/status.h
index ee9fac2..54cf377 100644
--- a/include/leveldb/status.h
+++ b/include/leveldb/status.h
@@ -15,6 +15,7 @@
 
 #include <algorithm>
 #include <string>
+
 #include "leveldb/export.h"
 #include "leveldb/slice.h"
 
@@ -23,7 +24,7 @@ namespace leveldb {
 class LEVELDB_EXPORT Status {
  public:
   // Create a success status.
-  Status() noexcept : state_(nullptr) { }
+  Status() noexcept : state_(nullptr) {}
   ~Status() { delete[] state_; }
 
   Status(const Status& rhs);
diff --git a/include/leveldb/table.h b/include/leveldb/table.h
index e9f6641..14a6a44 100644
--- a/include/leveldb/table.h
+++ b/include/leveldb/table.h
@@ -6,6 +6,7 @@
 #define STORAGE_LEVELDB_INCLUDE_TABLE_H_
 
 #include <stdint.h>
+
 #include "leveldb/export.h"
 #include "leveldb/iterator.h"
 
@@ -36,10 +37,8 @@ class LEVELDB_EXPORT Table {
   // for the duration of the returned table's lifetime.
   //
   // *file must remain live while this Table is in use.
-  static Status Open(const Options& options,
-                     RandomAccessFile* file,
-                     uint64_t file_size,
-                     Table** table);
+  static Status Open(const Options& options, RandomAccessFile* file,
+                     uint64_t file_size, Table** table);
 
   Table(const Table&) = delete;
   void operator=(const Table&) = delete;
@@ -70,11 +69,9 @@ class LEVELDB_EXPORT Table {
   // to Seek(key).  May not make such a call if filter policy says
   // that key is not present.
   friend class TableCache;
-  Status InternalGet(
-      const ReadOptions&, const Slice& key,
-      void* arg,
-      void (*handle_result)(void* arg, const Slice& k, const Slice& v));
-
+  Status InternalGet(const ReadOptions&, const Slice& key, void* arg,
+                     void (*handle_result)(void* arg, const Slice& k,
+                                           const Slice& v));
 
   void ReadMeta(const Footer& footer);
   void ReadFilter(const Slice& filter_handle_value);
diff --git a/include/leveldb/table_builder.h b/include/leveldb/table_builder.h
index 8d05d33..f8361fd 100644
--- a/include/leveldb/table_builder.h
+++ b/include/leveldb/table_builder.h
@@ -14,6 +14,7 @@
 #define STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_
 
 #include <stdint.h>
+
 #include "leveldb/export.h"
 #include "leveldb/options.h"
 #include "leveldb/status.h"
diff --git a/include/leveldb/write_batch.h b/include/leveldb/write_batch.h
index 5380c53..21f7c63 100644
--- a/include/leveldb/write_batch.h
+++ b/include/leveldb/write_batch.h
@@ -22,6 +22,7 @@
 #define STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_
 
 #include <string>
+
 #include "leveldb/export.h"
 #include "leveldb/status.h"
 
@@ -35,7 +36,7 @@ class LEVELDB_EXPORT WriteBatch {
 
   // Intentionally copyable.
   WriteBatch(const WriteBatch&) = default;
-  WriteBatch& operator =(const WriteBatch&) = default;
+  WriteBatch& operator=(const WriteBatch&) = default;
 
   ~WriteBatch();
 
diff --git a/issues/issue178_test.cc b/issues/issue178_test.cc
index 1b1cf8b..d50ffeb 100644
--- a/issues/issue178_test.cc
+++ b/issues/issue178_test.cc
@@ -3,9 +3,9 @@
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
 // Test for issue 178: a manual compaction causes deleted data to reappear.
+#include <cstdlib>
 #include <iostream>
 #include <sstream>
-#include <cstdlib>
 
 #include "leveldb/db.h"
 #include "leveldb/write_batch.h"
@@ -21,11 +21,9 @@ std::string Key1(int i) {
   return buf;
 }
 
-std::string Key2(int i) {
-  return Key1(i) + "_xxx";
-}
+std::string Key2(int i) { return Key1(i) + "_xxx"; }
 
-class Issue178 { };
+class Issue178 {};
 
 TEST(Issue178, Test) {
   // Get rid of any state from an old run.
@@ -87,6 +85,4 @@ TEST(Issue178, Test) {
 
 }  // anonymous namespace
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/issues/issue200_test.cc b/issues/issue200_test.cc
index 1cec79f..877b2af 100644
--- a/issues/issue200_test.cc
+++ b/issues/issue200_test.cc
@@ -11,14 +11,14 @@
 
 namespace leveldb {
 
-class Issue200 { };
+class Issue200 {};
 
 TEST(Issue200, Test) {
   // Get rid of any state from an old run.
   std::string dbpath = test::TmpDir() + "/leveldb_issue200_test";
   DestroyDB(dbpath, Options());
 
-  DB *db;
+  DB* db;
   Options options;
   options.create_if_missing = true;
   ASSERT_OK(DB::Open(options, dbpath, &db));
@@ -31,7 +31,7 @@ TEST(Issue200, Test) {
   ASSERT_OK(db->Put(write_options, "5", "f"));
 
   ReadOptions read_options;
-  Iterator *iter = db->NewIterator(read_options);
+  Iterator* iter = db->NewIterator(read_options);
 
   // Add an element that should not be reflected in the iterator.
   ASSERT_OK(db->Put(write_options, "25", "cd"));
@@ -54,6 +54,4 @@ TEST(Issue200, Test) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/port/port.h b/port/port.h
index b2210a7..4b247f7 100644
--- a/port/port.h
+++ b/port/port.h
@@ -11,9 +11,9 @@
 // porting to a new platform, see "port_example.h" for documentation
 // of what the new port_<platform>.h file must provide.
 #if defined(LEVELDB_PLATFORM_POSIX) || defined(LEVELDB_PLATFORM_WINDOWS)
-#  include "port/port_stdcxx.h"
+#include "port/port_stdcxx.h"
 #elif defined(LEVELDB_PLATFORM_CHROMIUM)
-#  include "port/port_chromium.h"
+#include "port/port_chromium.h"
 #endif
 
 #endif  // STORAGE_LEVELDB_PORT_PORT_H_
diff --git a/port/port_stdcxx.h b/port/port_stdcxx.h
index 7638ded..e9cb0e5 100644
--- a/port/port_stdcxx.h
+++ b/port/port_stdcxx.h
@@ -30,10 +30,10 @@
 #endif  // HAVE_SNAPPY
 
 #include <cassert>
+#include <condition_variable>  // NOLINT
 #include <cstddef>
 #include <cstdint>
-#include <condition_variable>  // NOLINT
-#include <mutex>               // NOLINT
+#include <mutex>  // NOLINT
 #include <string>
 
 #include "port/thread_annotations.h"
@@ -56,7 +56,7 @@ class LOCKABLE Mutex {
 
   void Lock() EXCLUSIVE_LOCK_FUNCTION() { mu_.lock(); }
   void Unlock() UNLOCK_FUNCTION() { mu_.unlock(); }
-  void AssertHeld() ASSERT_EXCLUSIVE_LOCK() { }
+  void AssertHeld() ASSERT_EXCLUSIVE_LOCK() {}
 
  private:
   friend class CondVar;
@@ -79,6 +79,7 @@ class CondVar {
   }
   void Signal() { cv_.notify_one(); }
   void SignalAll() { cv_.notify_all(); }
+
  private:
   std::condition_variable cv_;
   Mutex* const mu_;
@@ -94,7 +95,9 @@ inline bool Snappy_Compress(const char* input, size_t length,
   return true;
 #else
   // Silence compiler warnings about unused arguments.
-  (void)input; (void)length; (void)output;
+  (void)input;
+  (void)length;
+  (void)output;
 #endif  // HAVE_SNAPPY
 
   return false;
@@ -106,7 +109,9 @@ inline bool Snappy_GetUncompressedLength(const char* input, size_t length,
   return snappy::GetUncompressedLength(input, length, result);
 #else
   // Silence compiler warnings about unused arguments.
-  (void)input; (void)length; (void)result;
+  (void)input;
+  (void)length;
+  (void)result;
   return false;
 #endif  // HAVE_SNAPPY
 }
@@ -116,14 +121,17 @@ inline bool Snappy_Uncompress(const char* input, size_t length, char* output) {
   return snappy::RawUncompress(input, length, output);
 #else
   // Silence compiler warnings about unused arguments.
-  (void)input; (void)length; (void)output;
+  (void)input;
+  (void)length;
+  (void)output;
   return false;
 #endif  // HAVE_SNAPPY
 }
 
 inline bool GetHeapProfile(void (*func)(void*, const char*, int), void* arg) {
   // Silence compiler warnings about unused arguments.
-  (void)func; (void)arg;
+  (void)func;
+  (void)arg;
   return false;
 }
 
@@ -132,7 +140,9 @@ inline uint32_t AcceleratedCRC32C(uint32_t crc, const char* buf, size_t size) {
   return ::crc32c::Extend(crc, reinterpret_cast<const uint8_t*>(buf), size);
 #else
   // Silence compiler warnings about unused arguments.
-  (void)crc; (void)buf; (void)size;
+  (void)crc;
+  (void)buf;
+  (void)size;
   return 0;
 #endif  // HAVE_CRC32C
 }
diff --git a/port/thread_annotations.h b/port/thread_annotations.h
index b737c69..1547df9 100644
--- a/port/thread_annotations.h
+++ b/port/thread_annotations.h
@@ -13,9 +13,9 @@
 
 #if defined(__clang__)
 
-#define THREAD_ANNOTATION_ATTRIBUTE__(x)   __attribute__((x))
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
 #else
-#define THREAD_ANNOTATION_ATTRIBUTE__(x)   // no-op
+#define THREAD_ANNOTATION_ATTRIBUTE__(x)  // no-op
 #endif
 
 #endif  // !defined(THREAD_ANNOTATION_ATTRIBUTE__)
@@ -54,18 +54,15 @@
 #endif
 
 #ifndef LOCK_RETURNED
-#define LOCK_RETURNED(x) \
-  THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
+#define LOCK_RETURNED(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
 #endif
 
 #ifndef LOCKABLE
-#define LOCKABLE \
-  THREAD_ANNOTATION_ATTRIBUTE__(lockable)
+#define LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(lockable)
 #endif
 
 #ifndef SCOPED_LOCKABLE
-#define SCOPED_LOCKABLE \
-  THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
+#define SCOPED_LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
 #endif
 
 #ifndef EXCLUSIVE_LOCK_FUNCTION
diff --git a/table/block.cc b/table/block.cc
index 6fdfdea..ad0ee98 100644
--- a/table/block.cc
+++ b/table/block.cc
@@ -6,8 +6,9 @@
 
 #include "table/block.h"
 
-#include <vector>
 #include <algorithm>
+#include <vector>
+
 #include "leveldb/comparator.h"
 #include "table/format.h"
 #include "util/coding.h"
@@ -27,7 +28,7 @@ Block::Block(const BlockContents& contents)
   if (size_ < sizeof(uint32_t)) {
     size_ = 0;  // Error marker
   } else {
-    size_t max_restarts_allowed = (size_-sizeof(uint32_t)) / sizeof(uint32_t);
+    size_t max_restarts_allowed = (size_ - sizeof(uint32_t)) / sizeof(uint32_t);
     if (NumRestarts() > max_restarts_allowed) {
       // The size is too small for NumRestarts()
       size_ = 0;
@@ -51,8 +52,7 @@ Block::~Block() {
 // If any errors are detected, returns nullptr.  Otherwise, returns a
 // pointer to the key delta (just past the three decoded values).
 static inline const char* DecodeEntry(const char* p, const char* limit,
-                                      uint32_t* shared,
-                                      uint32_t* non_shared,
+                                      uint32_t* shared, uint32_t* non_shared,
                                       uint32_t* value_length) {
   if (limit - p < 3) return nullptr;
   *shared = reinterpret_cast<const unsigned char*>(p)[0];
@@ -76,9 +76,9 @@ static inline const char* DecodeEntry(const char* p, const char* limit,
 class Block::Iter : public Iterator {
  private:
   const Comparator* const comparator_;
-  const char* const data_;      // underlying block contents
-  uint32_t const restarts_;     // Offset of restart array (list of fixed32)
-  uint32_t const num_restarts_; // Number of uint32_t entries in restart array
+  const char* const data_;       // underlying block contents
+  uint32_t const restarts_;      // Offset of restart array (list of fixed32)
+  uint32_t const num_restarts_;  // Number of uint32_t entries in restart array
 
   // current_ is offset in data_ of current entry.  >= restarts_ if !Valid
   uint32_t current_;
@@ -112,9 +112,7 @@ class Block::Iter : public Iterator {
   }
 
  public:
-  Iter(const Comparator* comparator,
-       const char* data,
-       uint32_t restarts,
+  Iter(const Comparator* comparator, const char* data, uint32_t restarts,
        uint32_t num_restarts)
       : comparator_(comparator),
         data_(data),
@@ -171,9 +169,9 @@ class Block::Iter : public Iterator {
       uint32_t mid = (left + right + 1) / 2;
       uint32_t region_offset = GetRestartPoint(mid);
       uint32_t shared, non_shared, value_length;
-      const char* key_ptr = DecodeEntry(data_ + region_offset,
-                                        data_ + restarts_,
-                                        &shared, &non_shared, &value_length);
+      const char* key_ptr =
+          DecodeEntry(data_ + region_offset, data_ + restarts_, &shared,
+                      &non_shared, &value_length);
       if (key_ptr == nullptr || (shared != 0)) {
         CorruptionError();
         return;
@@ -253,7 +251,7 @@ class Block::Iter : public Iterator {
   }
 };
 
-Iterator* Block::NewIterator(const Comparator* cmp) {
+Iterator* Block::NewIterator(const Comparator* comparator) {
   if (size_ < sizeof(uint32_t)) {
     return NewErrorIterator(Status::Corruption("bad block contents"));
   }
@@ -261,7 +259,7 @@ Iterator* Block::NewIterator(const Comparator* cmp) {
   if (num_restarts == 0) {
     return NewEmptyIterator();
   } else {
-    return new Iter(cmp, data_, restart_offset_, num_restarts);
+    return new Iter(comparator, data_, restart_offset_, num_restarts);
   }
 }
 
diff --git a/table/block.h b/table/block.h
index 2493eb9..3d4b03c 100644
--- a/table/block.h
+++ b/table/block.h
@@ -7,6 +7,7 @@
 
 #include <stddef.h>
 #include <stdint.h>
+
 #include "leveldb/iterator.h"
 
 namespace leveldb {
@@ -29,8 +30,8 @@ class Block {
 
   const char* data_;
   size_t size_;
-  uint32_t restart_offset_;     // Offset in data_ of restart array
-  bool owned_;                  // Block owns data_[]
+  uint32_t restart_offset_;  // Offset in data_ of restart array
+  bool owned_;               // Block owns data_[]
 
   // No copying allowed
   Block(const Block&);
diff --git a/table/block_builder.cc b/table/block_builder.cc
index db660cd..f7cb1b0 100644
--- a/table/block_builder.cc
+++ b/table/block_builder.cc
@@ -28,8 +28,10 @@
 
 #include "table/block_builder.h"
 
-#include <algorithm>
 #include <assert.h>
+
+#include <algorithm>
+
 #include "leveldb/comparator.h"
 #include "leveldb/table_builder.h"
 #include "util/coding.h"
@@ -37,27 +39,24 @@
 namespace leveldb {
 
 BlockBuilder::BlockBuilder(const Options* options)
-    : options_(options),
-      restarts_(),
-      counter_(0),
-      finished_(false) {
+    : options_(options), restarts_(), counter_(0), finished_(false) {
   assert(options->block_restart_interval >= 1);
-  restarts_.push_back(0);       // First restart point is at offset 0
+  restarts_.push_back(0);  // First restart point is at offset 0
 }
 
 void BlockBuilder::Reset() {
   buffer_.clear();
   restarts_.clear();
-  restarts_.push_back(0);       // First restart point is at offset 0
+  restarts_.push_back(0);  // First restart point is at offset 0
   counter_ = 0;
   finished_ = false;
   last_key_.clear();
 }
 
 size_t BlockBuilder::CurrentSizeEstimate() const {
-  return (buffer_.size() +                        // Raw data buffer
-          restarts_.size() * sizeof(uint32_t) +   // Restart array
-          sizeof(uint32_t));                      // Restart array length
+  return (buffer_.size() +                       // Raw data buffer
+          restarts_.size() * sizeof(uint32_t) +  // Restart array
+          sizeof(uint32_t));                     // Restart array length
 }
 
 Slice BlockBuilder::Finish() {
@@ -74,7 +73,7 @@ void BlockBuilder::Add(const Slice& key, const Slice& value) {
   Slice last_key_piece(last_key_);
   assert(!finished_);
   assert(counter_ <= options_->block_restart_interval);
-  assert(buffer_.empty() // No values yet?
+  assert(buffer_.empty()  // No values yet?
          || options_->comparator->Compare(key, last_key_piece) > 0);
   size_t shared = 0;
   if (counter_ < options_->block_restart_interval) {
diff --git a/table/block_builder.h b/table/block_builder.h
index 4fbcb33..d0d9b6e 100644
--- a/table/block_builder.h
+++ b/table/block_builder.h
@@ -5,9 +5,10 @@
 #ifndef STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_
 #define STORAGE_LEVELDB_TABLE_BLOCK_BUILDER_H_
 
+#include <stdint.h>
+
 #include <vector>
 
-#include <stdint.h>
 #include "leveldb/slice.h"
 
 namespace leveldb {
@@ -35,17 +36,15 @@ class BlockBuilder {
   size_t CurrentSizeEstimate() const;
 
   // Return true iff no entries have been added since the last Reset()
-  bool empty() const {
-    return buffer_.empty();
-  }
+  bool empty() const { return buffer_.empty(); }
 
  private:
-  const Options*        options_;
-  std::string           buffer_;      // Destination buffer
-  std::vector<uint32_t> restarts_;    // Restart points
-  int                   counter_;     // Number of entries emitted since restart
-  bool                  finished_;    // Has Finish() been called?
-  std::string           last_key_;
+  const Options* options_;
+  std::string buffer_;              // Destination buffer
+  std::vector<uint32_t> restarts_;  // Restart points
+  int counter_;                     // Number of entries emitted since restart
+  bool finished_;                   // Has Finish() been called?
+  std::string last_key_;
 
   // No copying allowed
   BlockBuilder(const BlockBuilder&);
diff --git a/table/filter_block.cc b/table/filter_block.cc
index ce0aa04..09ec009 100644
--- a/table/filter_block.cc
+++ b/table/filter_block.cc
@@ -16,8 +16,7 @@ static const size_t kFilterBaseLg = 11;
 static const size_t kFilterBase = 1 << kFilterBaseLg;
 
 FilterBlockBuilder::FilterBlockBuilder(const FilterPolicy* policy)
-    : policy_(policy) {
-}
+    : policy_(policy) {}
 
 void FilterBlockBuilder::StartBlock(uint64_t block_offset) {
   uint64_t filter_index = (block_offset / kFilterBase);
@@ -62,7 +61,7 @@ void FilterBlockBuilder::GenerateFilter() {
   tmp_keys_.resize(num_keys);
   for (size_t i = 0; i < num_keys; i++) {
     const char* base = keys_.data() + start_[i];
-    size_t length = start_[i+1] - start_[i];
+    size_t length = start_[i + 1] - start_[i];
     tmp_keys_[i] = Slice(base, length);
   }
 
@@ -77,14 +76,10 @@ void FilterBlockBuilder::GenerateFilter() {
 
 FilterBlockReader::FilterBlockReader(const FilterPolicy* policy,
                                      const Slice& contents)
-    : policy_(policy),
-      data_(nullptr),
-      offset_(nullptr),
-      num_(0),
-      base_lg_(0) {
+    : policy_(policy), data_(nullptr), offset_(nullptr), num_(0), base_lg_(0) {
   size_t n = contents.size();
   if (n < 5) return;  // 1 byte for base_lg_ and 4 for start of offset array
-  base_lg_ = contents[n-1];
+  base_lg_ = contents[n - 1];
   uint32_t last_word = DecodeFixed32(contents.data() + n - 5);
   if (last_word > n - 5) return;
   data_ = contents.data();
@@ -95,8 +90,8 @@ FilterBlockReader::FilterBlockReader(const FilterPolicy* policy,
 bool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) {
   uint64_t index = block_offset >> base_lg_;
   if (index < num_) {
-    uint32_t start = DecodeFixed32(offset_ + index*4);
-    uint32_t limit = DecodeFixed32(offset_ + index*4 + 4);
+    uint32_t start = DecodeFixed32(offset_ + index * 4);
+    uint32_t limit = DecodeFixed32(offset_ + index * 4 + 4);
     if (start <= limit && limit <= static_cast<size_t>(offset_ - data_)) {
       Slice filter = Slice(data_ + start, limit - start);
       return policy_->KeyMayMatch(key, filter);
@@ -108,4 +103,4 @@ bool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) {
   return true;  // Errors are treated as potential matches
 }
 
-}
+}  // namespace leveldb
diff --git a/table/filter_block.h b/table/filter_block.h
index c67d010..1b034dc 100644
--- a/table/filter_block.h
+++ b/table/filter_block.h
@@ -11,8 +11,10 @@
 
 #include <stddef.h>
 #include <stdint.h>
+
 #include <string>
 #include <vector>
+
 #include "leveldb/slice.h"
 #include "util/hash.h"
 
@@ -38,10 +40,10 @@ class FilterBlockBuilder {
   void GenerateFilter();
 
   const FilterPolicy* policy_;
-  std::string keys_;              // Flattened key contents
-  std::vector<size_t> start_;     // Starting index in keys_ of each key
-  std::string result_;            // Filter data computed so far
-  std::vector<Slice> tmp_keys_;   // policy_->CreateFilter() argument
+  std::string keys_;             // Flattened key contents
+  std::vector<size_t> start_;    // Starting index in keys_ of each key
+  std::string result_;           // Filter data computed so far
+  std::vector<Slice> tmp_keys_;  // policy_->CreateFilter() argument
   std::vector<uint32_t> filter_offsets_;
 
   // No copying allowed
@@ -51,7 +53,7 @@ class FilterBlockBuilder {
 
 class FilterBlockReader {
  public:
- // REQUIRES: "contents" and *policy must stay live while *this is live.
+  // REQUIRES: "contents" and *policy must stay live while *this is live.
   FilterBlockReader(const FilterPolicy* policy, const Slice& contents);
   bool KeyMayMatch(uint64_t block_offset, const Slice& key);
 
@@ -63,6 +65,6 @@ class FilterBlockReader {
   size_t base_lg_;      // Encoding parameter (see kFilterBaseLg in .cc file)
 };
 
-}
+}  // namespace leveldb
 
 #endif  // STORAGE_LEVELDB_TABLE_FILTER_BLOCK_H_
diff --git a/table/filter_block_test.cc b/table/filter_block_test.cc
index 8c4a474..6cdd435 100644
--- a/table/filter_block_test.cc
+++ b/table/filter_block_test.cc
@@ -16,9 +16,7 @@ namespace leveldb {
 // For testing: emit an array with one hash value per key
 class TestHashFilter : public FilterPolicy {
  public:
-  virtual const char* Name() const {
-    return "TestHashFilter";
-  }
+  virtual const char* Name() const { return "TestHashFilter"; }
 
   virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
     for (int i = 0; i < n; i++) {
@@ -69,8 +67,8 @@ TEST(FilterBlockTest, SingleChunk) {
   ASSERT_TRUE(reader.KeyMayMatch(100, "box"));
   ASSERT_TRUE(reader.KeyMayMatch(100, "hello"));
   ASSERT_TRUE(reader.KeyMayMatch(100, "foo"));
-  ASSERT_TRUE(! reader.KeyMayMatch(100, "missing"));
-  ASSERT_TRUE(! reader.KeyMayMatch(100, "other"));
+  ASSERT_TRUE(!reader.KeyMayMatch(100, "missing"));
+  ASSERT_TRUE(!reader.KeyMayMatch(100, "other"));
 }
 
 TEST(FilterBlockTest, MultiChunk) {
@@ -99,30 +97,28 @@ TEST(FilterBlockTest, MultiChunk) {
   // Check first filter
   ASSERT_TRUE(reader.KeyMayMatch(0, "foo"));
   ASSERT_TRUE(reader.KeyMayMatch(2000, "bar"));
-  ASSERT_TRUE(! reader.KeyMayMatch(0, "box"));
-  ASSERT_TRUE(! reader.KeyMayMatch(0, "hello"));
+  ASSERT_TRUE(!reader.KeyMayMatch(0, "box"));
+  ASSERT_TRUE(!reader.KeyMayMatch(0, "hello"));
 
   // Check second filter
   ASSERT_TRUE(reader.KeyMayMatch(3100, "box"));
-  ASSERT_TRUE(! reader.KeyMayMatch(3100, "foo"));
-  ASSERT_TRUE(! reader.KeyMayMatch(3100, "bar"));
-  ASSERT_TRUE(! reader.KeyMayMatch(3100, "hello"));
+  ASSERT_TRUE(!reader.KeyMayMatch(3100, "foo"));
+  ASSERT_TRUE(!reader.KeyMayMatch(3100, "bar"));
+  ASSERT_TRUE(!reader.KeyMayMatch(3100, "hello"));
 
   // Check third filter (empty)
-  ASSERT_TRUE(! reader.KeyMayMatch(4100, "foo"));
-  ASSERT_TRUE(! reader.KeyMayMatch(4100, "bar"));
-  ASSERT_TRUE(! reader.KeyMayMatch(4100, "box"));
-  ASSERT_TRUE(! reader.KeyMayMatch(4100, "hello"));
+  ASSERT_TRUE(!reader.KeyMayMatch(4100, "foo"));
+  ASSERT_TRUE(!reader.KeyMayMatch(4100, "bar"));
+  ASSERT_TRUE(!reader.KeyMayMatch(4100, "box"));
+  ASSERT_TRUE(!reader.KeyMayMatch(4100, "hello"));
 
   // Check last filter
   ASSERT_TRUE(reader.KeyMayMatch(9000, "box"));
   ASSERT_TRUE(reader.KeyMayMatch(9000, "hello"));
-  ASSERT_TRUE(! reader.KeyMayMatch(9000, "foo"));
-  ASSERT_TRUE(! reader.KeyMayMatch(9000, "bar"));
+  ASSERT_TRUE(!reader.KeyMayMatch(9000, "foo"));
+  ASSERT_TRUE(!reader.KeyMayMatch(9000, "bar"));
 }
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/table/format.cc b/table/format.cc
index 24e4e02..e183977 100644
--- a/table/format.cc
+++ b/table/format.cc
@@ -21,8 +21,7 @@ void BlockHandle::EncodeTo(std::string* dst) const {
 }
 
 Status BlockHandle::DecodeFrom(Slice* input) {
-  if (GetVarint64(input, &offset_) &&
-      GetVarint64(input, &size_)) {
+  if (GetVarint64(input, &offset_) && GetVarint64(input, &size_)) {
     return Status::OK();
   } else {
     return Status::Corruption("bad block handle");
@@ -62,10 +61,8 @@ Status Footer::DecodeFrom(Slice* input) {
   return result;
 }
 
-Status ReadBlock(RandomAccessFile* file,
-                 const ReadOptions& options,
-                 const BlockHandle& handle,
-                 BlockContents* result) {
+Status ReadBlock(RandomAccessFile* file, const ReadOptions& options,
+                 const BlockHandle& handle, BlockContents* result) {
   result->data = Slice();
   result->cachable = false;
   result->heap_allocated = false;
@@ -86,7 +83,7 @@ Status ReadBlock(RandomAccessFile* file,
   }
 
   // Check the crc of the type and the block contents
-  const char* data = contents.data();    // Pointer to where Read put the data
+  const char* data = contents.data();  // Pointer to where Read put the data
   if (options.verify_checksums) {
     const uint32_t crc = crc32c::Unmask(DecodeFixed32(data + n + 1));
     const uint32_t actual = crc32c::Value(data, n + 1);
diff --git a/table/format.h b/table/format.h
index 144ff55..dacaa9f 100644
--- a/table/format.h
+++ b/table/format.h
@@ -5,8 +5,10 @@
 #ifndef STORAGE_LEVELDB_TABLE_FORMAT_H_
 #define STORAGE_LEVELDB_TABLE_FORMAT_H_
 
-#include <string>
 #include <stdint.h>
+
+#include <string>
+
 #include "leveldb/slice.h"
 #include "leveldb/status.h"
 #include "leveldb/table_builder.h"
@@ -46,19 +48,15 @@ class BlockHandle {
 // end of every table file.
 class Footer {
  public:
-  Footer() { }
+  Footer() {}
 
   // The block handle for the metaindex block of the table
   const BlockHandle& metaindex_handle() const { return metaindex_handle_; }
   void set_metaindex_handle(const BlockHandle& h) { metaindex_handle_ = h; }
 
   // The block handle for the index block of the table
-  const BlockHandle& index_handle() const {
-    return index_handle_;
-  }
-  void set_index_handle(const BlockHandle& h) {
-    index_handle_ = h;
-  }
+  const BlockHandle& index_handle() const { return index_handle_; }
+  void set_index_handle(const BlockHandle& h) { index_handle_ = h; }
 
   void EncodeTo(std::string* dst) const;
   Status DecodeFrom(Slice* input);
@@ -66,9 +64,7 @@ class Footer {
   // Encoded length of a Footer.  Note that the serialization of a
   // Footer will always occupy exactly this many bytes.  It consists
   // of two block handles and a magic number.
-  enum {
-    kEncodedLength = 2*BlockHandle::kMaxEncodedLength + 8
-  };
+  enum { kEncodedLength = 2 * BlockHandle::kMaxEncodedLength + 8 };
 
  private:
   BlockHandle metaindex_handle_;
@@ -91,17 +87,13 @@ struct BlockContents {
 
 // Read the block identified by "handle" from "file".  On failure
 // return non-OK.  On success fill *result and return OK.
-Status ReadBlock(RandomAccessFile* file,
-                 const ReadOptions& options,
-                 const BlockHandle& handle,
-                 BlockContents* result);
+Status ReadBlock(RandomAccessFile* file, const ReadOptions& options,
+                 const BlockHandle& handle, BlockContents* result);
 
 // Implementation details follow.  Clients should ignore,
 
 inline BlockHandle::BlockHandle()
-    : offset_(~static_cast<uint64_t>(0)),
-      size_(~static_cast<uint64_t>(0)) {
-}
+    : offset_(~static_cast<uint64_t>(0)), size_(~static_cast<uint64_t>(0)) {}
 
 }  // namespace leveldb
 
diff --git a/table/iterator.cc b/table/iterator.cc
index 41ec1aa..dfef083 100644
--- a/table/iterator.cc
+++ b/table/iterator.cc
@@ -14,7 +14,7 @@ Iterator::Iterator() {
 Iterator::~Iterator() {
   if (!cleanup_head_.IsEmpty()) {
     cleanup_head_.Run();
-    for (CleanupNode* node = cleanup_head_.next; node != nullptr; ) {
+    for (CleanupNode* node = cleanup_head_.next; node != nullptr;) {
       node->Run();
       CleanupNode* next_node = node->next;
       delete node;
@@ -42,17 +42,23 @@ namespace {
 
 class EmptyIterator : public Iterator {
  public:
-  EmptyIterator(const Status& s) : status_(s) { }
+  EmptyIterator(const Status& s) : status_(s) {}
   ~EmptyIterator() override = default;
 
   bool Valid() const override { return false; }
-  void Seek(const Slice& target) override { }
-  void SeekToFirst() override { }
-  void SeekToLast() override { }
+  void Seek(const Slice& target) override {}
+  void SeekToFirst() override {}
+  void SeekToLast() override {}
   void Next() override { assert(false); }
   void Prev() override { assert(false); }
-  Slice key() const override { assert(false); return Slice(); }
-  Slice value() const override { assert(false); return Slice(); }
+  Slice key() const override {
+    assert(false);
+    return Slice();
+  }
+  Slice value() const override {
+    assert(false);
+    return Slice();
+  }
   Status status() const override { return status_; }
 
  private:
@@ -61,9 +67,7 @@ class EmptyIterator : public Iterator {
 
 }  // anonymous namespace
 
-Iterator* NewEmptyIterator() {
-  return new EmptyIterator(Status::OK());
-}
+Iterator* NewEmptyIterator() { return new EmptyIterator(Status::OK()); }
 
 Iterator* NewErrorIterator(const Status& status) {
   return new EmptyIterator(status);
diff --git a/table/iterator_wrapper.h b/table/iterator_wrapper.h
index f1814ca..c230572 100644
--- a/table/iterator_wrapper.h
+++ b/table/iterator_wrapper.h
@@ -16,10 +16,8 @@ namespace leveldb {
 // cache locality.
 class IteratorWrapper {
  public:
-  IteratorWrapper(): iter_(nullptr), valid_(false) { }
-  explicit IteratorWrapper(Iterator* iter): iter_(nullptr) {
-    Set(iter);
-  }
+  IteratorWrapper() : iter_(nullptr), valid_(false) {}
+  explicit IteratorWrapper(Iterator* iter) : iter_(nullptr) { Set(iter); }
   ~IteratorWrapper() { delete iter_; }
   Iterator* iter() const { return iter_; }
 
@@ -35,18 +33,46 @@ class IteratorWrapper {
     }
   }
 
-
   // Iterator interface methods
-  bool Valid() const        { return valid_; }
-  Slice key() const         { assert(Valid()); return key_; }
-  Slice value() const       { assert(Valid()); return iter_->value(); }
+  bool Valid() const { return valid_; }
+  Slice key() const {
+    assert(Valid());
+    return key_;
+  }
+  Slice value() const {
+    assert(Valid());
+    return iter_->value();
+  }
   // Methods below require iter() != nullptr
-  Status status() const     { assert(iter_); return iter_->status(); }
-  void Next()               { assert(iter_); iter_->Next();        Update(); }
-  void Prev()               { assert(iter_); iter_->Prev();        Update(); }
-  void Seek(const Slice& k) { assert(iter_); iter_->Seek(k);       Update(); }
-  void SeekToFirst()        { assert(iter_); iter_->SeekToFirst(); Update(); }
-  void SeekToLast()         { assert(iter_); iter_->SeekToLast();  Update(); }
+  Status status() const {
+    assert(iter_);
+    return iter_->status();
+  }
+  void Next() {
+    assert(iter_);
+    iter_->Next();
+    Update();
+  }
+  void Prev() {
+    assert(iter_);
+    iter_->Prev();
+    Update();
+  }
+  void Seek(const Slice& k) {
+    assert(iter_);
+    iter_->Seek(k);
+    Update();
+  }
+  void SeekToFirst() {
+    assert(iter_);
+    iter_->SeekToFirst();
+    Update();
+  }
+  void SeekToLast() {
+    assert(iter_);
+    iter_->SeekToLast();
+    Update();
+  }
 
  private:
   void Update() {
diff --git a/table/merger.cc b/table/merger.cc
index e079680..3a5c3e4 100644
--- a/table/merger.cc
+++ b/table/merger.cc
@@ -24,13 +24,9 @@ class MergingIterator : public Iterator {
     }
   }
 
-  virtual ~MergingIterator() {
-    delete[] children_;
-  }
+  virtual ~MergingIterator() { delete[] children_; }
 
-  virtual bool Valid() const {
-    return (current_ != nullptr);
-  }
+  virtual bool Valid() const { return (current_ != nullptr); }
 
   virtual void SeekToFirst() {
     for (int i = 0; i < n_; i++) {
@@ -145,10 +141,7 @@ class MergingIterator : public Iterator {
   IteratorWrapper* current_;
 
   // Which direction is the iterator moving?
-  enum Direction {
-    kForward,
-    kReverse
-  };
+  enum Direction { kForward, kReverse };
   Direction direction_;
 };
 
@@ -169,7 +162,7 @@ void MergingIterator::FindSmallest() {
 
 void MergingIterator::FindLargest() {
   IteratorWrapper* largest = nullptr;
-  for (int i = n_-1; i >= 0; i--) {
+  for (int i = n_ - 1; i >= 0; i--) {
     IteratorWrapper* child = &children_[i];
     if (child->Valid()) {
       if (largest == nullptr) {
@@ -183,14 +176,15 @@ void MergingIterator::FindLargest() {
 }
 }  // namespace
 
-Iterator* NewMergingIterator(const Comparator* cmp, Iterator** list, int n) {
+Iterator* NewMergingIterator(const Comparator* comparator, Iterator** children,
+                             int n) {
   assert(n >= 0);
   if (n == 0) {
     return NewEmptyIterator();
   } else if (n == 1) {
-    return list[0];
+    return children[0];
   } else {
-    return new MergingIterator(cmp, list, n);
+    return new MergingIterator(comparator, children, n);
   }
 }
 
diff --git a/table/merger.h b/table/merger.h
index bafdf5a..41cedc5 100644
--- a/table/merger.h
+++ b/table/merger.h
@@ -18,8 +18,8 @@ class Iterator;
 // key is present in K child iterators, it will be yielded K times.
 //
 // REQUIRES: n >= 0
-Iterator* NewMergingIterator(
-    const Comparator* comparator, Iterator** children, int n);
+Iterator* NewMergingIterator(const Comparator* comparator, Iterator** children,
+                             int n);
 
 }  // namespace leveldb
 
diff --git a/table/table.cc b/table/table.cc
index 8e737e1..b07bc88 100644
--- a/table/table.cc
+++ b/table/table.cc
@@ -20,7 +20,7 @@ namespace leveldb {
 struct Table::Rep {
   ~Rep() {
     delete filter;
-    delete [] filter_data;
+    delete[] filter_data;
     delete index_block;
   }
 
@@ -35,10 +35,8 @@ struct Table::Rep {
   Block* index_block;
 };
 
-Status Table::Open(const Options& options,
-                   RandomAccessFile* file,
-                   uint64_t size,
-                   Table** table) {
+Status Table::Open(const Options& options, RandomAccessFile* file,
+                   uint64_t size, Table** table) {
   *table = nullptr;
   if (size < Footer::kEncodedLength) {
     return Status::Corruption("file is too short to be an sstable");
@@ -130,14 +128,12 @@ void Table::ReadFilter(const Slice& filter_handle_value) {
     return;
   }
   if (block.heap_allocated) {
-    rep_->filter_data = block.data.data();     // Will need to delete later
+    rep_->filter_data = block.data.data();  // Will need to delete later
   }
   rep_->filter = new FilterBlockReader(rep_->options.filter_policy, block.data);
 }
 
-Table::~Table() {
-  delete rep_;
-}
+Table::~Table() { delete rep_; }
 
 static void DeleteBlock(void* arg, void* ignored) {
   delete reinterpret_cast<Block*>(arg);
@@ -156,8 +152,7 @@ static void ReleaseBlock(void* arg, void* h) {
 
 // Convert an index iterator value (i.e., an encoded BlockHandle)
 // into an iterator over the contents of the corresponding block.
-Iterator* Table::BlockReader(void* arg,
-                             const ReadOptions& options,
+Iterator* Table::BlockReader(void* arg, const ReadOptions& options,
                              const Slice& index_value) {
   Table* table = reinterpret_cast<Table*>(arg);
   Cache* block_cache = table->rep_->options.block_cache;
@@ -175,7 +170,7 @@ Iterator* Table::BlockReader(void* arg,
     if (block_cache != nullptr) {
       char cache_key_buffer[16];
       EncodeFixed64(cache_key_buffer, table->rep_->cache_id);
-      EncodeFixed64(cache_key_buffer+8, handle.offset());
+      EncodeFixed64(cache_key_buffer + 8, handle.offset());
       Slice key(cache_key_buffer, sizeof(cache_key_buffer));
       cache_handle = block_cache->Lookup(key);
       if (cache_handle != nullptr) {
@@ -185,8 +180,8 @@ Iterator* Table::BlockReader(void* arg,
         if (s.ok()) {
           block = new Block(contents);
           if (contents.cachable && options.fill_cache) {
-            cache_handle = block_cache->Insert(
-                key, block, block->size(), &DeleteCachedBlock);
+            cache_handle = block_cache->Insert(key, block, block->size(),
+                                               &DeleteCachedBlock);
           }
         }
       }
@@ -218,9 +213,9 @@ Iterator* Table::NewIterator(const ReadOptions& options) const {
       &Table::BlockReader, const_cast<Table*>(this), options);
 }
 
-Status Table::InternalGet(const ReadOptions& options, const Slice& k,
-                          void* arg,
-                          void (*saver)(void*, const Slice&, const Slice&)) {
+Status Table::InternalGet(const ReadOptions& options, const Slice& k, void* arg,
+                          void (*handle_result)(void*, const Slice&,
+                                                const Slice&)) {
   Status s;
   Iterator* iiter = rep_->index_block->NewIterator(rep_->options.comparator);
   iiter->Seek(k);
@@ -228,15 +223,14 @@ Status Table::InternalGet(const ReadOptions& options, const Slice& k,
     Slice handle_value = iiter->value();
     FilterBlockReader* filter = rep_->filter;
     BlockHandle handle;
-    if (filter != nullptr &&
-        handle.DecodeFrom(&handle_value).ok() &&
+    if (filter != nullptr && handle.DecodeFrom(&handle_value).ok() &&
         !filter->KeyMayMatch(handle.offset(), k)) {
       // Not found
     } else {
       Iterator* block_iter = BlockReader(this, options, iiter->value());
       block_iter->Seek(k);
       if (block_iter->Valid()) {
-        (*saver)(arg, block_iter->key(), block_iter->value());
+        (*handle_result)(arg, block_iter->key(), block_iter->value());
       }
       s = block_iter->status();
       delete block_iter;
@@ -249,7 +243,6 @@ Status Table::InternalGet(const ReadOptions& options, const Slice& k,
   return s;
 }
 
-
 uint64_t Table::ApproximateOffsetOf(const Slice& key) const {
   Iterator* index_iter =
       rep_->index_block->NewIterator(rep_->options.comparator);
diff --git a/table/table_builder.cc b/table/table_builder.cc
index 444d4f9..9afff76 100644
--- a/table/table_builder.cc
+++ b/table/table_builder.cc
@@ -5,6 +5,7 @@
 #include "leveldb/table_builder.h"
 
 #include <assert.h>
+
 #include "leveldb/comparator.h"
 #include "leveldb/env.h"
 #include "leveldb/filter_policy.h"
@@ -27,7 +28,7 @@ struct TableBuilder::Rep {
   BlockBuilder index_block;
   std::string last_key;
   int64_t num_entries;
-  bool closed;          // Either Finish() or Abandon() has been called.
+  bool closed;  // Either Finish() or Abandon() has been called.
   FilterBlockBuilder* filter_block;
 
   // We do not emit the index entry for a block until we have seen the
@@ -53,8 +54,9 @@ struct TableBuilder::Rep {
         index_block(&index_block_options),
         num_entries(0),
         closed(false),
-        filter_block(opt.filter_policy == nullptr ? nullptr
-                     : new FilterBlockBuilder(opt.filter_policy)),
+        filter_block(opt.filter_policy == nullptr
+                         ? nullptr
+                         : new FilterBlockBuilder(opt.filter_policy)),
         pending_index_entry(false) {
     index_block_options.block_restart_interval = 1;
   }
@@ -173,8 +175,7 @@ void TableBuilder::WriteBlock(BlockBuilder* block, BlockHandle* handle) {
 }
 
 void TableBuilder::WriteRawBlock(const Slice& block_contents,
-                                 CompressionType type,
-                                 BlockHandle* handle) {
+                                 CompressionType type, BlockHandle* handle) {
   Rep* r = rep_;
   handle->set_offset(r->offset);
   handle->set_size(block_contents.size());
@@ -184,7 +185,7 @@ void TableBuilder::WriteRawBlock(const Slice& block_contents,
     trailer[0] = type;
     uint32_t crc = crc32c::Value(block_contents.data(), block_contents.size());
     crc = crc32c::Extend(crc, trailer, 1);  // Extend crc to cover block type
-    EncodeFixed32(trailer+1, crc32c::Mask(crc));
+    EncodeFixed32(trailer + 1, crc32c::Mask(crc));
     r->status = r->file->Append(Slice(trailer, kBlockTrailerSize));
     if (r->status.ok()) {
       r->offset += block_contents.size() + kBlockTrailerSize;
@@ -192,9 +193,7 @@ void TableBuilder::WriteRawBlock(const Slice& block_contents,
   }
 }
 
-Status TableBuilder::status() const {
-  return rep_->status;
-}
+Status TableBuilder::status() const { return rep_->status; }
 
 Status TableBuilder::Finish() {
   Rep* r = rep_;
@@ -259,12 +258,8 @@ void TableBuilder::Abandon() {
   r->closed = true;
 }
 
-uint64_t TableBuilder::NumEntries() const {
-  return rep_->num_entries;
-}
+uint64_t TableBuilder::NumEntries() const { return rep_->num_entries; }
 
-uint64_t TableBuilder::FileSize() const {
-  return rep_->offset;
-}
+uint64_t TableBuilder::FileSize() const { return rep_->offset; }
 
 }  // namespace leveldb
diff --git a/table/table_test.cc b/table/table_test.cc
index 5573be6..0974052 100644
--- a/table/table_test.cc
+++ b/table/table_test.cc
@@ -6,6 +6,7 @@
 
 #include <map>
 #include <string>
+
 #include "db/dbformat.h"
 #include "db/memtable.h"
 #include "db/write_batch_internal.h"
@@ -27,8 +28,8 @@ namespace leveldb {
 static std::string Reverse(const Slice& key) {
   std::string str(key.ToString());
   std::string rev("");
-  for (std::string::reverse_iterator rit = str.rbegin();
-       rit != str.rend(); ++rit) {
+  for (std::string::reverse_iterator rit = str.rbegin(); rit != str.rend();
+       ++rit) {
     rev.push_back(*rit);
   }
   return rev;
@@ -45,9 +46,8 @@ class ReverseKeyComparator : public Comparator {
     return BytewiseComparator()->Compare(Reverse(a), Reverse(b));
   }
 
-  virtual void FindShortestSeparator(
-      std::string* start,
-      const Slice& limit) const {
+  virtual void FindShortestSeparator(std::string* start,
+                                     const Slice& limit) const {
     std::string s = Reverse(*start);
     std::string l = Reverse(limit);
     BytewiseComparator()->FindShortestSeparator(&s, l);
@@ -79,17 +79,17 @@ namespace {
 struct STLLessThan {
   const Comparator* cmp;
 
-  STLLessThan() : cmp(BytewiseComparator()) { }
-  STLLessThan(const Comparator* c) : cmp(c) { }
+  STLLessThan() : cmp(BytewiseComparator()) {}
+  STLLessThan(const Comparator* c) : cmp(c) {}
   bool operator()(const std::string& a, const std::string& b) const {
     return cmp->Compare(Slice(a), Slice(b)) < 0;
   }
 };
 }  // namespace
 
-class StringSink: public WritableFile {
+class StringSink : public WritableFile {
  public:
-  ~StringSink() { }
+  ~StringSink() {}
 
   const std::string& contents() const { return contents_; }
 
@@ -106,19 +106,17 @@ class StringSink: public WritableFile {
   std::string contents_;
 };
 
-
-class StringSource: public RandomAccessFile {
+class StringSource : public RandomAccessFile {
  public:
   StringSource(const Slice& contents)
-      : contents_(contents.data(), contents.size()) {
-  }
+      : contents_(contents.data(), contents.size()) {}
 
-  virtual ~StringSource() { }
+  virtual ~StringSource() {}
 
   uint64_t Size() const { return contents_.size(); }
 
   virtual Status Read(uint64_t offset, size_t n, Slice* result,
-                       char* scratch) const {
+                      char* scratch) const {
     if (offset >= contents_.size()) {
       return Status::InvalidArgument("invalid Read offset");
     }
@@ -140,8 +138,8 @@ typedef std::map<std::string, std::string, STLLessThan> KVMap;
 // BlockBuilder/TableBuilder and Block/Table.
 class Constructor {
  public:
-  explicit Constructor(const Comparator* cmp) : data_(STLLessThan(cmp)) { }
-  virtual ~Constructor() { }
+  explicit Constructor(const Comparator* cmp) : data_(STLLessThan(cmp)) {}
+  virtual ~Constructor() {}
 
   void Add(const std::string& key, const Slice& value) {
     data_[key] = value.ToString();
@@ -150,14 +148,11 @@ class Constructor {
   // Finish constructing the data structure with all the keys that have
   // been added so far.  Returns the keys in sorted order in "*keys"
   // and stores the key/value pairs in "*kvmap"
-  void Finish(const Options& options,
-              std::vector<std::string>* keys,
+  void Finish(const Options& options, std::vector<std::string>* keys,
               KVMap* kvmap) {
     *kvmap = data_;
     keys->clear();
-    for (KVMap::const_iterator it = data_.begin();
-         it != data_.end();
-         ++it) {
+    for (KVMap::const_iterator it = data_.begin(); it != data_.end(); ++it) {
       keys->push_back(it->first);
     }
     data_.clear();
@@ -178,23 +173,17 @@ class Constructor {
   KVMap data_;
 };
 
-class BlockConstructor: public Constructor {
+class BlockConstructor : public Constructor {
  public:
   explicit BlockConstructor(const Comparator* cmp)
-      : Constructor(cmp),
-        comparator_(cmp),
-        block_(nullptr) { }
-  ~BlockConstructor() {
-    delete block_;
-  }
+      : Constructor(cmp), comparator_(cmp), block_(nullptr) {}
+  ~BlockConstructor() { delete block_; }
   virtual Status FinishImpl(const Options& options, const KVMap& data) {
     delete block_;
     block_ = nullptr;
     BlockBuilder builder(&options);
 
-    for (KVMap::const_iterator it = data.begin();
-         it != data.end();
-         ++it) {
+    for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) {
       builder.Add(it->first, it->second);
     }
     // Open the block
@@ -218,23 +207,17 @@ class BlockConstructor: public Constructor {
   BlockConstructor();
 };
 
-class TableConstructor: public Constructor {
+class TableConstructor : public Constructor {
  public:
   TableConstructor(const Comparator* cmp)
-      : Constructor(cmp),
-        source_(nullptr), table_(nullptr) {
-  }
-  ~TableConstructor() {
-    Reset();
-  }
+      : Constructor(cmp), source_(nullptr), table_(nullptr) {}
+  ~TableConstructor() { Reset(); }
   virtual Status FinishImpl(const Options& options, const KVMap& data) {
     Reset();
     StringSink sink;
     TableBuilder builder(options, &sink);
 
-    for (KVMap::const_iterator it = data.begin();
-         it != data.end();
-         ++it) {
+    for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) {
       builder.Add(it->first, it->second);
       ASSERT_TRUE(builder.status().ok());
     }
@@ -273,9 +256,9 @@ class TableConstructor: public Constructor {
 };
 
 // A helper class that converts internal format keys into user keys
-class KeyConvertingIterator: public Iterator {
+class KeyConvertingIterator : public Iterator {
  public:
-  explicit KeyConvertingIterator(Iterator* iter) : iter_(iter) { }
+  explicit KeyConvertingIterator(Iterator* iter) : iter_(iter) {}
   virtual ~KeyConvertingIterator() { delete iter_; }
   virtual bool Valid() const { return iter_->Valid(); }
   virtual void Seek(const Slice& target) {
@@ -313,25 +296,20 @@ class KeyConvertingIterator: public Iterator {
   void operator=(const KeyConvertingIterator&);
 };
 
-class MemTableConstructor: public Constructor {
+class MemTableConstructor : public Constructor {
  public:
   explicit MemTableConstructor(const Comparator* cmp)
-      : Constructor(cmp),
-        internal_comparator_(cmp) {
+      : Constructor(cmp), internal_comparator_(cmp) {
     memtable_ = new MemTable(internal_comparator_);
     memtable_->Ref();
   }
-  ~MemTableConstructor() {
-    memtable_->Unref();
-  }
+  ~MemTableConstructor() { memtable_->Unref(); }
   virtual Status FinishImpl(const Options& options, const KVMap& data) {
     memtable_->Unref();
     memtable_ = new MemTable(internal_comparator_);
     memtable_->Ref();
     int seq = 1;
-    for (KVMap::const_iterator it = data.begin();
-         it != data.end();
-         ++it) {
+    for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) {
       memtable_->Add(seq, kTypeValue, it->first, it->second);
       seq++;
     }
@@ -346,24 +324,19 @@ class MemTableConstructor: public Constructor {
   MemTable* memtable_;
 };
 
-class DBConstructor: public Constructor {
+class DBConstructor : public Constructor {
  public:
   explicit DBConstructor(const Comparator* cmp)
-      : Constructor(cmp),
-        comparator_(cmp) {
+      : Constructor(cmp), comparator_(cmp) {
     db_ = nullptr;
     NewDB();
   }
-  ~DBConstructor() {
-    delete db_;
-  }
+  ~DBConstructor() { delete db_; }
   virtual Status FinishImpl(const Options& options, const KVMap& data) {
     delete db_;
     db_ = nullptr;
     NewDB();
-    for (KVMap::const_iterator it = data.begin();
-         it != data.end();
-         ++it) {
+    for (KVMap::const_iterator it = data.begin(); it != data.end(); ++it) {
       WriteBatch batch;
       batch.Put(it->first, it->second);
       ASSERT_TRUE(db_->Write(WriteOptions(), &batch).ok());
@@ -396,12 +369,7 @@ class DBConstructor: public Constructor {
   DB* db_;
 };
 
-enum TestType {
-  TABLE_TEST,
-  BLOCK_TEST,
-  MEMTABLE_TEST,
-  DB_TEST
-};
+enum TestType { TABLE_TEST, BLOCK_TEST, MEMTABLE_TEST, DB_TEST };
 
 struct TestArgs {
   TestType type;
@@ -410,33 +378,33 @@ struct TestArgs {
 };
 
 static const TestArgs kTestArgList[] = {
-  { TABLE_TEST, false, 16 },
-  { TABLE_TEST, false, 1 },
-  { TABLE_TEST, false, 1024 },
-  { TABLE_TEST, true, 16 },
-  { TABLE_TEST, true, 1 },
-  { TABLE_TEST, true, 1024 },
-
-  { BLOCK_TEST, false, 16 },
-  { BLOCK_TEST, false, 1 },
-  { BLOCK_TEST, false, 1024 },
-  { BLOCK_TEST, true, 16 },
-  { BLOCK_TEST, true, 1 },
-  { BLOCK_TEST, true, 1024 },
-
-  // Restart interval does not matter for memtables
-  { MEMTABLE_TEST, false, 16 },
-  { MEMTABLE_TEST, true, 16 },
-
-  // Do not bother with restart interval variations for DB
-  { DB_TEST, false, 16 },
-  { DB_TEST, true, 16 },
+    {TABLE_TEST, false, 16},
+    {TABLE_TEST, false, 1},
+    {TABLE_TEST, false, 1024},
+    {TABLE_TEST, true, 16},
+    {TABLE_TEST, true, 1},
+    {TABLE_TEST, true, 1024},
+
+    {BLOCK_TEST, false, 16},
+    {BLOCK_TEST, false, 1},
+    {BLOCK_TEST, false, 1024},
+    {BLOCK_TEST, true, 16},
+    {BLOCK_TEST, true, 1},
+    {BLOCK_TEST, true, 1024},
+
+    // Restart interval does not matter for memtables
+    {MEMTABLE_TEST, false, 16},
+    {MEMTABLE_TEST, true, 16},
+
+    // Do not bother with restart interval variations for DB
+    {DB_TEST, false, 16},
+    {DB_TEST, true, 16},
 };
 static const int kNumTestArgs = sizeof(kTestArgList) / sizeof(kTestArgList[0]);
 
 class Harness {
  public:
-  Harness() : constructor_(nullptr) { }
+  Harness() : constructor_(nullptr) {}
 
   void Init(const TestArgs& args) {
     delete constructor_;
@@ -466,9 +434,7 @@ class Harness {
     }
   }
 
-  ~Harness() {
-    delete constructor_;
-  }
+  ~Harness() { delete constructor_; }
 
   void Add(const std::string& key, const std::string& value) {
     constructor_->Add(key, value);
@@ -490,8 +456,7 @@ class Harness {
     ASSERT_TRUE(!iter->Valid());
     iter->SeekToFirst();
     for (KVMap::const_iterator model_iter = data.begin();
-         model_iter != data.end();
-         ++model_iter) {
+         model_iter != data.end(); ++model_iter) {
       ASSERT_EQ(ToString(data, model_iter), ToString(iter));
       iter->Next();
     }
@@ -505,8 +470,7 @@ class Harness {
     ASSERT_TRUE(!iter->Valid());
     iter->SeekToLast();
     for (KVMap::const_reverse_iterator model_iter = data.rbegin();
-         model_iter != data.rend();
-         ++model_iter) {
+         model_iter != data.rend(); ++model_iter) {
       ASSERT_EQ(ToString(data, model_iter), ToString(iter));
       iter->Prev();
     }
@@ -514,8 +478,7 @@ class Harness {
     delete iter;
   }
 
-  void TestRandomAccess(Random* rnd,
-                        const std::vector<std::string>& keys,
+  void TestRandomAccess(Random* rnd, const std::vector<std::string>& keys,
                         const KVMap& data) {
     static const bool kVerbose = false;
     Iterator* iter = constructor_->NewIterator();
@@ -546,8 +509,8 @@ class Harness {
         case 2: {
           std::string key = PickRandomKey(rnd, keys);
           model_iter = data.lower_bound(key);
-          if (kVerbose) fprintf(stderr, "Seek '%s'\n",
-                                EscapeString(key).c_str());
+          if (kVerbose)
+            fprintf(stderr, "Seek '%s'\n", EscapeString(key).c_str());
           iter->Seek(Slice(key));
           ASSERT_EQ(ToString(data, model_iter), ToString(iter));
           break;
@@ -558,7 +521,7 @@ class Harness {
             if (kVerbose) fprintf(stderr, "Prev\n");
             iter->Prev();
             if (model_iter == data.begin()) {
-              model_iter = data.end();   // Wrap around to invalid value
+              model_iter = data.end();  // Wrap around to invalid value
             } else {
               --model_iter;
             }
@@ -621,8 +584,8 @@ class Harness {
           break;
         case 1: {
           // Attempt to return something smaller than an existing key
-          if (result.size() > 0 && result[result.size()-1] > '\0') {
-            result[result.size()-1]--;
+          if (!result.empty() && result[result.size() - 1] > '\0') {
+            result[result.size() - 1]--;
           }
           break;
         }
@@ -720,8 +683,8 @@ TEST(Harness, Randomized) {
     for (int num_entries = 0; num_entries < 2000;
          num_entries += (num_entries < 50 ? 1 : 200)) {
       if ((num_entries % 10) == 0) {
-        fprintf(stderr, "case %d of %d: num_entries = %d\n",
-                (i + 1), int(kNumTestArgs), num_entries);
+        fprintf(stderr, "case %d of %d: num_entries = %d\n", (i + 1),
+                int(kNumTestArgs), num_entries);
       }
       for (int e = 0; e < num_entries; e++) {
         std::string v;
@@ -735,7 +698,7 @@ TEST(Harness, Randomized) {
 
 TEST(Harness, RandomizedLongDB) {
   Random rnd(test::RandomSeed());
-  TestArgs args = { DB_TEST, false, 16 };
+  TestArgs args = {DB_TEST, false, 16};
   Init(args);
   int num_entries = 100000;
   for (int e = 0; e < num_entries; e++) {
@@ -757,7 +720,7 @@ TEST(Harness, RandomizedLongDB) {
   ASSERT_GT(files, 0);
 }
 
-class MemTableTest { };
+class MemTableTest {};
 
 TEST(MemTableTest, Simple) {
   InternalKeyComparator cmp(BytewiseComparator());
@@ -774,8 +737,7 @@ TEST(MemTableTest, Simple) {
   Iterator* iter = memtable->NewIterator();
   iter->SeekToFirst();
   while (iter->Valid()) {
-    fprintf(stderr, "key: '%s' -> '%s'\n",
-            iter->key().ToString().c_str(),
+    fprintf(stderr, "key: '%s' -> '%s'\n", iter->key().ToString().c_str(),
             iter->value().ToString().c_str());
     iter->Next();
   }
@@ -788,14 +750,13 @@ static bool Between(uint64_t val, uint64_t low, uint64_t high) {
   bool result = (val >= low) && (val <= high);
   if (!result) {
     fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
-            (unsigned long long)(val),
-            (unsigned long long)(low),
+            (unsigned long long)(val), (unsigned long long)(low),
             (unsigned long long)(high));
   }
   return result;
 }
 
-class TableTest { };
+class TableTest {};
 
 TEST(TableTest, ApproximateOffsetOfPlain) {
   TableConstructor c(BytewiseComparator());
@@ -813,18 +774,17 @@ TEST(TableTest, ApproximateOffsetOfPlain) {
   options.compression = kNoCompression;
   c.Finish(options, &keys, &kvmap);
 
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"),       0,      0));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"),       0,      0));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"),      0,      0));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"),       0,      0));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"),       0,      0));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"),   10000,  11000));
+  ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0));
+  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0));
+  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"), 0, 0));
+  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0));
+  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 0, 0));
+  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 10000, 11000));
   ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04a"), 210000, 211000));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"),  210000, 211000));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"),  510000, 511000));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"),  510000, 511000));
-  ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"),  610000, 612000));
-
+  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"), 210000, 211000));
+  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"), 510000, 511000));
+  ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"), 510000, 511000));
+  ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 610000, 612000));
 }
 
 static bool SnappyCompressionSupported() {
@@ -855,7 +815,7 @@ TEST(TableTest, ApproximateOffsetOfCompressed) {
 
   // Expected upper and lower bounds of space used by compressible strings.
   static const int kSlop = 1000;  // Compressor effectiveness varies.
-  const int expected = 2500;  // 10000 * compression ratio (0.25)
+  const int expected = 2500;      // 10000 * compression ratio (0.25)
   const int min_z = expected - kSlop;
   const int max_z = expected + kSlop;
 
@@ -871,6 +831,4 @@ TEST(TableTest, ApproximateOffsetOfCompressed) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/table/two_level_iterator.cc b/table/two_level_iterator.cc
index 4e6f420..5340a4d 100644
--- a/table/two_level_iterator.cc
+++ b/table/two_level_iterator.cc
@@ -15,13 +15,10 @@ namespace {
 
 typedef Iterator* (*BlockFunction)(void*, const ReadOptions&, const Slice&);
 
-class TwoLevelIterator: public Iterator {
+class TwoLevelIterator : public Iterator {
  public:
-  TwoLevelIterator(
-    Iterator* index_iter,
-    BlockFunction block_function,
-    void* arg,
-    const ReadOptions& options);
+  TwoLevelIterator(Iterator* index_iter, BlockFunction block_function,
+                   void* arg, const ReadOptions& options);
 
   virtual ~TwoLevelIterator();
 
@@ -31,9 +28,7 @@ class TwoLevelIterator: public Iterator {
   virtual void Next();
   virtual void Prev();
 
-  virtual bool Valid() const {
-    return data_iter_.Valid();
-  }
+  virtual bool Valid() const { return data_iter_.Valid(); }
   virtual Slice key() const {
     assert(Valid());
     return data_iter_.key();
@@ -67,26 +62,22 @@ class TwoLevelIterator: public Iterator {
   const ReadOptions options_;
   Status status_;
   IteratorWrapper index_iter_;
-  IteratorWrapper data_iter_; // May be nullptr
+  IteratorWrapper data_iter_;  // May be nullptr
   // If data_iter_ is non-null, then "data_block_handle_" holds the
   // "index_value" passed to block_function_ to create the data_iter_.
   std::string data_block_handle_;
 };
 
-TwoLevelIterator::TwoLevelIterator(
-    Iterator* index_iter,
-    BlockFunction block_function,
-    void* arg,
-    const ReadOptions& options)
+TwoLevelIterator::TwoLevelIterator(Iterator* index_iter,
+                                   BlockFunction block_function, void* arg,
+                                   const ReadOptions& options)
     : block_function_(block_function),
       arg_(arg),
       options_(options),
       index_iter_(index_iter),
-      data_iter_(nullptr) {
-}
+      data_iter_(nullptr) {}
 
-TwoLevelIterator::~TwoLevelIterator() {
-}
+TwoLevelIterator::~TwoLevelIterator() {}
 
 void TwoLevelIterator::Seek(const Slice& target) {
   index_iter_.Seek(target);
@@ -121,7 +112,6 @@ void TwoLevelIterator::Prev() {
   SkipEmptyDataBlocksBackward();
 }
 
-
 void TwoLevelIterator::SkipEmptyDataBlocksForward() {
   while (data_iter_.iter() == nullptr || !data_iter_.Valid()) {
     // Move to next block
@@ -158,7 +148,8 @@ void TwoLevelIterator::InitDataBlock() {
     SetDataIterator(nullptr);
   } else {
     Slice handle = index_iter_.value();
-    if (data_iter_.iter() != nullptr && handle.compare(data_block_handle_) == 0) {
+    if (data_iter_.iter() != nullptr &&
+        handle.compare(data_block_handle_) == 0) {
       // data_iter_ is already constructed with this iterator, so
       // no need to change anything
     } else {
@@ -171,11 +162,9 @@ void TwoLevelIterator::InitDataBlock() {
 
 }  // namespace
 
-Iterator* NewTwoLevelIterator(
-    Iterator* index_iter,
-    BlockFunction block_function,
-    void* arg,
-    const ReadOptions& options) {
+Iterator* NewTwoLevelIterator(Iterator* index_iter,
+                              BlockFunction block_function, void* arg,
+                              const ReadOptions& options) {
   return new TwoLevelIterator(index_iter, block_function, arg, options);
 }
 
diff --git a/table/two_level_iterator.h b/table/two_level_iterator.h
index a93ba89..81ffe80 100644
--- a/table/two_level_iterator.h
+++ b/table/two_level_iterator.h
@@ -22,12 +22,9 @@ struct ReadOptions;
 // an iterator over the contents of the corresponding block.
 Iterator* NewTwoLevelIterator(
     Iterator* index_iter,
-    Iterator* (*block_function)(
-        void* arg,
-        const ReadOptions& options,
-        const Slice& index_value),
-    void* arg,
-    const ReadOptions& options);
+    Iterator* (*block_function)(void* arg, const ReadOptions& options,
+                                const Slice& index_value),
+    void* arg, const ReadOptions& options);
 
 }  // namespace leveldb
 
diff --git a/util/arena.cc b/util/arena.cc
index a496ad0..eadec8a 100644
--- a/util/arena.cc
+++ b/util/arena.cc
@@ -39,8 +39,9 @@ char* Arena::AllocateFallback(size_t bytes) {
 
 char* Arena::AllocateAligned(size_t bytes) {
   const int align = (sizeof(void*) > 8) ? sizeof(void*) : 8;
-  assert((align & (align-1)) == 0);   // Pointer size should be a power of 2
-  size_t current_mod = reinterpret_cast<uintptr_t>(alloc_ptr_) & (align-1);
+  static_assert((align & (align - 1)) == 0,
+                "Pointer size should be a power of 2");
+  size_t current_mod = reinterpret_cast<uintptr_t>(alloc_ptr_) & (align - 1);
   size_t slop = (current_mod == 0 ? 0 : align - current_mod);
   size_t needed = bytes + slop;
   char* result;
@@ -52,7 +53,7 @@ char* Arena::AllocateAligned(size_t bytes) {
     // AllocateFallback always returned aligned memory
     result = AllocateFallback(bytes);
   }
-  assert((reinterpret_cast<uintptr_t>(result) & (align-1)) == 0);
+  assert((reinterpret_cast<uintptr_t>(result) & (align - 1)) == 0);
   return result;
 }
 
diff --git a/util/arena_test.cc b/util/arena_test.cc
index 58e870e..f34095c 100644
--- a/util/arena_test.cc
+++ b/util/arena_test.cc
@@ -9,11 +9,9 @@
 
 namespace leveldb {
 
-class ArenaTest { };
+class ArenaTest {};
 
-TEST(ArenaTest, Empty) {
-  Arena arena;
-}
+TEST(ArenaTest, Empty) { Arena arena; }
 
 TEST(ArenaTest, Simple) {
   std::vector<std::pair<size_t, char*> > allocated;
@@ -26,8 +24,9 @@ TEST(ArenaTest, Simple) {
     if (i % (N / 10) == 0) {
       s = i;
     } else {
-      s = rnd.OneIn(4000) ? rnd.Uniform(6000) :
-          (rnd.OneIn(10) ? rnd.Uniform(100) : rnd.Uniform(20));
+      s = rnd.OneIn(4000)
+              ? rnd.Uniform(6000)
+              : (rnd.OneIn(10) ? rnd.Uniform(100) : rnd.Uniform(20));
     }
     if (s == 0) {
       // Our arena disallows size 0 allocations.
@@ -47,7 +46,7 @@ TEST(ArenaTest, Simple) {
     bytes += s;
     allocated.push_back(std::make_pair(s, r));
     ASSERT_GE(arena.MemoryUsage(), bytes);
-    if (i > N/10) {
+    if (i > N / 10) {
       ASSERT_LE(arena.MemoryUsage(), bytes * 1.10);
     }
   }
@@ -63,6 +62,4 @@ TEST(ArenaTest, Simple) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/util/bloom.cc b/util/bloom.cc
index bf3e4ca..097ce7a 100644
--- a/util/bloom.cc
+++ b/util/bloom.cc
@@ -20,17 +20,14 @@ class BloomFilterPolicy : public FilterPolicy {
   size_t k_;
 
  public:
-  explicit BloomFilterPolicy(int bits_per_key)
-      : bits_per_key_(bits_per_key) {
+  explicit BloomFilterPolicy(int bits_per_key) : bits_per_key_(bits_per_key) {
     // We intentionally round down to reduce probing cost a little bit
     k_ = static_cast<size_t>(bits_per_key * 0.69);  // 0.69 =~ ln(2)
     if (k_ < 1) k_ = 1;
     if (k_ > 30) k_ = 30;
   }
 
-  virtual const char* Name() const {
-    return "leveldb.BuiltinBloomFilter2";
-  }
+  virtual const char* Name() const { return "leveldb.BuiltinBloomFilter2"; }
 
   virtual void CreateFilter(const Slice* keys, int n, std::string* dst) const {
     // Compute bloom filter size (in both bits and bytes)
@@ -54,7 +51,7 @@ class BloomFilterPolicy : public FilterPolicy {
       const uint32_t delta = (h >> 17) | (h << 15);  // Rotate right 17 bits
       for (size_t j = 0; j < k_; j++) {
         const uint32_t bitpos = h % bits;
-        array[bitpos/8] |= (1 << (bitpos % 8));
+        array[bitpos / 8] |= (1 << (bitpos % 8));
         h += delta;
       }
     }
@@ -69,7 +66,7 @@ class BloomFilterPolicy : public FilterPolicy {
 
     // Use the encoded k so that we can read filters generated by
     // bloom filters created using different parameters.
-    const size_t k = array[len-1];
+    const size_t k = array[len - 1];
     if (k > 30) {
       // Reserved for potentially new encodings for short bloom filters.
       // Consider it a match.
@@ -80,13 +77,13 @@ class BloomFilterPolicy : public FilterPolicy {
     const uint32_t delta = (h >> 17) | (h << 15);  // Rotate right 17 bits
     for (size_t j = 0; j < k; j++) {
       const uint32_t bitpos = h % bits;
-      if ((array[bitpos/8] & (1 << (bitpos % 8))) == 0) return false;
+      if ((array[bitpos / 8] & (1 << (bitpos % 8))) == 0) return false;
       h += delta;
     }
     return true;
   }
 };
-}
+}  // namespace
 
 const FilterPolicy* NewBloomFilterPolicy(int bits_per_key) {
   return new BloomFilterPolicy(bits_per_key);
diff --git a/util/bloom_test.cc b/util/bloom_test.cc
index 1b87a2b..71c4115 100644
--- a/util/bloom_test.cc
+++ b/util/bloom_test.cc
@@ -25,20 +25,16 @@ class BloomTest {
   std::vector<std::string> keys_;
 
  public:
-  BloomTest() : policy_(NewBloomFilterPolicy(10)) { }
+  BloomTest() : policy_(NewBloomFilterPolicy(10)) {}
 
-  ~BloomTest() {
-    delete policy_;
-  }
+  ~BloomTest() { delete policy_; }
 
   void Reset() {
     keys_.clear();
     filter_.clear();
   }
 
-  void Add(const Slice& s) {
-    keys_.push_back(s.ToString());
-  }
+  void Add(const Slice& s) { keys_.push_back(s.ToString()); }
 
   void Build() {
     std::vector<Slice> key_slices;
@@ -52,16 +48,14 @@ class BloomTest {
     if (kVerbose >= 2) DumpFilter();
   }
 
-  size_t FilterSize() const {
-    return filter_.size();
-  }
+  size_t FilterSize() const { return filter_.size(); }
 
   void DumpFilter() {
     fprintf(stderr, "F(");
-    for (size_t i = 0; i+1 < filter_.size(); i++) {
+    for (size_t i = 0; i + 1 < filter_.size(); i++) {
       const unsigned int c = static_cast<unsigned int>(filter_[i]);
       for (int j = 0; j < 8; j++) {
-        fprintf(stderr, "%c", (c & (1 <<j)) ? '1' : '.');
+        fprintf(stderr, "%c", (c & (1 << j)) ? '1' : '.');
       }
     }
     fprintf(stderr, ")\n");
@@ -87,8 +81,8 @@ class BloomTest {
 };
 
 TEST(BloomTest, EmptyFilter) {
-  ASSERT_TRUE(! Matches("hello"));
-  ASSERT_TRUE(! Matches("world"));
+  ASSERT_TRUE(!Matches("hello"));
+  ASSERT_TRUE(!Matches("world"));
 }
 
 TEST(BloomTest, Small) {
@@ -96,8 +90,8 @@ TEST(BloomTest, Small) {
   Add("world");
   ASSERT_TRUE(Matches("hello"));
   ASSERT_TRUE(Matches("world"));
-  ASSERT_TRUE(! Matches("x"));
-  ASSERT_TRUE(! Matches("foo"));
+  ASSERT_TRUE(!Matches("x"));
+  ASSERT_TRUE(!Matches("foo"));
 }
 
 static int NextLength(int length) {
@@ -140,23 +134,23 @@ TEST(BloomTest, VaryingLengths) {
     double rate = FalsePositiveRate();
     if (kVerbose >= 1) {
       fprintf(stderr, "False positives: %5.2f%% @ length = %6d ; bytes = %6d\n",
-              rate*100.0, length, static_cast<int>(FilterSize()));
+              rate * 100.0, length, static_cast<int>(FilterSize()));
     }
-    ASSERT_LE(rate, 0.02);   // Must not be over 2%
-    if (rate > 0.0125) mediocre_filters++;  // Allowed, but not too often
-    else good_filters++;
+    ASSERT_LE(rate, 0.02);  // Must not be over 2%
+    if (rate > 0.0125)
+      mediocre_filters++;  // Allowed, but not too often
+    else
+      good_filters++;
   }
   if (kVerbose >= 1) {
-    fprintf(stderr, "Filters: %d good, %d mediocre\n",
-            good_filters, mediocre_filters);
+    fprintf(stderr, "Filters: %d good, %d mediocre\n", good_filters,
+            mediocre_filters);
   }
-  ASSERT_LE(mediocre_filters, good_filters/5);
+  ASSERT_LE(mediocre_filters, good_filters / 5);
 }
 
 // Different bits-per-byte
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/util/cache.cc b/util/cache.cc
index 25b51b5..0f801cc 100644
--- a/util/cache.cc
+++ b/util/cache.cc
@@ -14,8 +14,7 @@
 
 namespace leveldb {
 
-Cache::~Cache() {
-}
+Cache::~Cache() {}
 
 namespace {
 
@@ -46,12 +45,12 @@ struct LRUHandle {
   LRUHandle* next_hash;
   LRUHandle* next;
   LRUHandle* prev;
-  size_t charge;      // TODO(opt): Only allow uint32_t?
+  size_t charge;  // TODO(opt): Only allow uint32_t?
   size_t key_length;
-  bool in_cache;      // Whether entry is in the cache.
-  uint32_t refs;      // References, including cache reference, if present.
-  uint32_t hash;      // Hash of key(); used for fast sharding and comparisons
-  char key_data[1];   // Beginning of key
+  bool in_cache;     // Whether entry is in the cache.
+  uint32_t refs;     // References, including cache reference, if present.
+  uint32_t hash;     // Hash of key(); used for fast sharding and comparisons
+  char key_data[1];  // Beginning of key
 
   Slice key() const {
     // next_ is only equal to this if the LRU handle is the list head of an
@@ -114,8 +113,7 @@ class HandleTable {
   // pointer to the trailing slot in the corresponding linked list.
   LRUHandle** FindPointer(const Slice& key, uint32_t hash) {
     LRUHandle** ptr = &list_[hash & (length_ - 1)];
-    while (*ptr != nullptr &&
-           ((*ptr)->hash != hash || key != (*ptr)->key())) {
+    while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) {
       ptr = &(*ptr)->next_hash;
     }
     return ptr;
@@ -158,8 +156,8 @@ class LRUCache {
   void SetCapacity(size_t capacity) { capacity_ = capacity; }
 
   // Like Cache methods, but with an extra "hash" parameter.
-  Cache::Handle* Insert(const Slice& key, uint32_t hash,
-                        void* value, size_t charge,
+  Cache::Handle* Insert(const Slice& key, uint32_t hash, void* value,
+                        size_t charge,
                         void (*deleter)(const Slice& key, void* value));
   Cache::Handle* Lookup(const Slice& key, uint32_t hash);
   void Release(Cache::Handle* handle);
@@ -172,7 +170,7 @@ class LRUCache {
 
  private:
   void LRU_Remove(LRUHandle* e);
-  void LRU_Append(LRUHandle*list, LRUHandle* e);
+  void LRU_Append(LRUHandle* list, LRUHandle* e);
   void Ref(LRUHandle* e);
   void Unref(LRUHandle* e);
   bool FinishErase(LRUHandle* e) EXCLUSIVE_LOCKS_REQUIRED(mutex_);
@@ -206,7 +204,7 @@ LRUCache::LRUCache() : capacity_(0), usage_(0) {
 
 LRUCache::~LRUCache() {
   assert(in_use_.next == &in_use_);  // Error if caller has an unreleased handle
-  for (LRUHandle* e = lru_.next; e != &lru_; ) {
+  for (LRUHandle* e = lru_.next; e != &lru_;) {
     LRUHandle* next = e->next;
     assert(e->in_cache);
     e->in_cache = false;
@@ -265,13 +263,14 @@ void LRUCache::Release(Cache::Handle* handle) {
   Unref(reinterpret_cast<LRUHandle*>(handle));
 }
 
-Cache::Handle* LRUCache::Insert(
-    const Slice& key, uint32_t hash, void* value, size_t charge,
-    void (*deleter)(const Slice& key, void* value)) {
+Cache::Handle* LRUCache::Insert(const Slice& key, uint32_t hash, void* value,
+                                size_t charge,
+                                void (*deleter)(const Slice& key,
+                                                void* value)) {
   MutexLock l(&mutex_);
 
-  LRUHandle* e = reinterpret_cast<LRUHandle*>(
-      malloc(sizeof(LRUHandle)-1 + key.size()));
+  LRUHandle* e =
+      reinterpret_cast<LRUHandle*>(malloc(sizeof(LRUHandle) - 1 + key.size()));
   e->value = value;
   e->deleter = deleter;
   e->charge = charge;
@@ -346,19 +345,16 @@ class ShardedLRUCache : public Cache {
     return Hash(s.data(), s.size(), 0);
   }
 
-  static uint32_t Shard(uint32_t hash) {
-    return hash >> (32 - kNumShardBits);
-  }
+  static uint32_t Shard(uint32_t hash) { return hash >> (32 - kNumShardBits); }
 
  public:
-  explicit ShardedLRUCache(size_t capacity)
-      : last_id_(0) {
+  explicit ShardedLRUCache(size_t capacity) : last_id_(0) {
     const size_t per_shard = (capacity + (kNumShards - 1)) / kNumShards;
     for (int s = 0; s < kNumShards; s++) {
       shard_[s].SetCapacity(per_shard);
     }
   }
-  virtual ~ShardedLRUCache() { }
+  virtual ~ShardedLRUCache() {}
   virtual Handle* Insert(const Slice& key, void* value, size_t charge,
                          void (*deleter)(const Slice& key, void* value)) {
     const uint32_t hash = HashSlice(key);
@@ -399,8 +395,6 @@ class ShardedLRUCache : public Cache {
 
 }  // end anonymous namespace
 
-Cache* NewLRUCache(size_t capacity) {
-  return new ShardedLRUCache(capacity);
-}
+Cache* NewLRUCache(size_t capacity) { return new ShardedLRUCache(capacity); }
 
 }  // namespace leveldb
diff --git a/util/cache_test.cc b/util/cache_test.cc
index 8647feb..d5c1a1d 100644
--- a/util/cache_test.cc
+++ b/util/cache_test.cc
@@ -37,13 +37,9 @@ class CacheTest {
   std::vector<int> deleted_values_;
   Cache* cache_;
 
-  CacheTest() : cache_(NewLRUCache(kCacheSize)) {
-    current_ = this;
-  }
+  CacheTest() : cache_(NewLRUCache(kCacheSize)) { current_ = this; }
 
-  ~CacheTest() {
-    delete cache_;
-  }
+  ~CacheTest() { delete cache_; }
 
   int Lookup(int key) {
     Cache::Handle* handle = cache_->Lookup(EncodeKey(key));
@@ -64,9 +60,7 @@ class CacheTest {
                           &CacheTest::Deleter);
   }
 
-  void Erase(int key) {
-    cache_->Erase(EncodeKey(key));
-  }
+  void Erase(int key) { cache_->Erase(EncodeKey(key)); }
 };
 CacheTest* CacheTest::current_;
 
@@ -75,18 +69,18 @@ TEST(CacheTest, HitAndMiss) {
 
   Insert(100, 101);
   ASSERT_EQ(101, Lookup(100));
-  ASSERT_EQ(-1,  Lookup(200));
-  ASSERT_EQ(-1,  Lookup(300));
+  ASSERT_EQ(-1, Lookup(200));
+  ASSERT_EQ(-1, Lookup(300));
 
   Insert(200, 201);
   ASSERT_EQ(101, Lookup(100));
   ASSERT_EQ(201, Lookup(200));
-  ASSERT_EQ(-1,  Lookup(300));
+  ASSERT_EQ(-1, Lookup(300));
 
   Insert(100, 102);
   ASSERT_EQ(102, Lookup(100));
   ASSERT_EQ(201, Lookup(200));
-  ASSERT_EQ(-1,  Lookup(300));
+  ASSERT_EQ(-1, Lookup(300));
 
   ASSERT_EQ(1, deleted_keys_.size());
   ASSERT_EQ(100, deleted_keys_[0]);
@@ -100,14 +94,14 @@ TEST(CacheTest, Erase) {
   Insert(100, 101);
   Insert(200, 201);
   Erase(100);
-  ASSERT_EQ(-1,  Lookup(100));
+  ASSERT_EQ(-1, Lookup(100));
   ASSERT_EQ(201, Lookup(200));
   ASSERT_EQ(1, deleted_keys_.size());
   ASSERT_EQ(100, deleted_keys_[0]);
   ASSERT_EQ(101, deleted_values_[0]);
 
   Erase(100);
-  ASSERT_EQ(-1,  Lookup(100));
+  ASSERT_EQ(-1, Lookup(100));
   ASSERT_EQ(201, Lookup(200));
   ASSERT_EQ(1, deleted_keys_.size());
 }
@@ -146,8 +140,8 @@ TEST(CacheTest, EvictionPolicy) {
   // Frequently used entry must be kept around,
   // as must things that are still in use.
   for (int i = 0; i < kCacheSize + 100; i++) {
-    Insert(1000+i, 2000+i);
-    ASSERT_EQ(2000+i, Lookup(1000+i));
+    Insert(1000 + i, 2000 + i);
+    ASSERT_EQ(2000 + i, Lookup(1000 + i));
     ASSERT_EQ(101, Lookup(100));
   }
   ASSERT_EQ(101, Lookup(100));
@@ -160,12 +154,12 @@ TEST(CacheTest, UseExceedsCacheSize) {
   // Overfill the cache, keeping handles on all inserted entries.
   std::vector<Cache::Handle*> h;
   for (int i = 0; i < kCacheSize + 100; i++) {
-    h.push_back(InsertAndReturnHandle(1000+i, 2000+i));
+    h.push_back(InsertAndReturnHandle(1000 + i, 2000 + i));
   }
 
   // Check that all the entries can be found in the cache.
   for (int i = 0; i < h.size(); i++) {
-    ASSERT_EQ(2000+i, Lookup(1000+i));
+    ASSERT_EQ(2000 + i, Lookup(1000 + i));
   }
 
   for (int i = 0; i < h.size(); i++) {
@@ -181,9 +175,9 @@ TEST(CacheTest, HeavyEntries) {
   const int kHeavy = 10;
   int added = 0;
   int index = 0;
-  while (added < 2*kCacheSize) {
+  while (added < 2 * kCacheSize) {
     const int weight = (index & 1) ? kLight : kHeavy;
-    Insert(index, 1000+index, weight);
+    Insert(index, 1000 + index, weight);
     added += weight;
     index++;
   }
@@ -194,10 +188,10 @@ TEST(CacheTest, HeavyEntries) {
     int r = Lookup(i);
     if (r >= 0) {
       cached_weight += weight;
-      ASSERT_EQ(1000+i, r);
+      ASSERT_EQ(1000 + i, r);
     }
   }
-  ASSERT_LE(cached_weight, kCacheSize + kCacheSize/10);
+  ASSERT_LE(cached_weight, kCacheSize + kCacheSize / 10);
 }
 
 TEST(CacheTest, NewId) {
@@ -229,6 +223,4 @@ TEST(CacheTest, ZeroSizeCache) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/util/coding.cc b/util/coding.cc
index 1a9e333..e2089df 100644
--- a/util/coding.cc
+++ b/util/coding.cc
@@ -6,29 +6,29 @@
 
 namespace leveldb {
 
-void EncodeFixed32(char* buf, uint32_t value) {
+void EncodeFixed32(char* dst, uint32_t value) {
   if (port::kLittleEndian) {
-    memcpy(buf, &value, sizeof(value));
+    memcpy(dst, &value, sizeof(value));
   } else {
-    buf[0] = value & 0xff;
-    buf[1] = (value >> 8) & 0xff;
-    buf[2] = (value >> 16) & 0xff;
-    buf[3] = (value >> 24) & 0xff;
+    dst[0] = value & 0xff;
+    dst[1] = (value >> 8) & 0xff;
+    dst[2] = (value >> 16) & 0xff;
+    dst[3] = (value >> 24) & 0xff;
   }
 }
 
-void EncodeFixed64(char* buf, uint64_t value) {
+void EncodeFixed64(char* dst, uint64_t value) {
   if (port::kLittleEndian) {
-    memcpy(buf, &value, sizeof(value));
+    memcpy(dst, &value, sizeof(value));
   } else {
-    buf[0] = value & 0xff;
-    buf[1] = (value >> 8) & 0xff;
-    buf[2] = (value >> 16) & 0xff;
-    buf[3] = (value >> 24) & 0xff;
-    buf[4] = (value >> 32) & 0xff;
-    buf[5] = (value >> 40) & 0xff;
-    buf[6] = (value >> 48) & 0xff;
-    buf[7] = (value >> 56) & 0xff;
+    dst[0] = value & 0xff;
+    dst[1] = (value >> 8) & 0xff;
+    dst[2] = (value >> 16) & 0xff;
+    dst[3] = (value >> 24) & 0xff;
+    dst[4] = (value >> 32) & 0xff;
+    dst[5] = (value >> 40) & 0xff;
+    dst[6] = (value >> 48) & 0xff;
+    dst[7] = (value >> 56) & 0xff;
   }
 }
 
@@ -48,26 +48,26 @@ char* EncodeVarint32(char* dst, uint32_t v) {
   // Operate on characters as unsigneds
   unsigned char* ptr = reinterpret_cast<unsigned char*>(dst);
   static const int B = 128;
-  if (v < (1<<7)) {
+  if (v < (1 << 7)) {
     *(ptr++) = v;
-  } else if (v < (1<<14)) {
+  } else if (v < (1 << 14)) {
     *(ptr++) = v | B;
-    *(ptr++) = v>>7;
-  } else if (v < (1<<21)) {
+    *(ptr++) = v >> 7;
+  } else if (v < (1 << 21)) {
     *(ptr++) = v | B;
-    *(ptr++) = (v>>7) | B;
-    *(ptr++) = v>>14;
-  } else if (v < (1<<28)) {
+    *(ptr++) = (v >> 7) | B;
+    *(ptr++) = v >> 14;
+  } else if (v < (1 << 28)) {
     *(ptr++) = v | B;
-    *(ptr++) = (v>>7) | B;
-    *(ptr++) = (v>>14) | B;
-    *(ptr++) = v>>21;
+    *(ptr++) = (v >> 7) | B;
+    *(ptr++) = (v >> 14) | B;
+    *(ptr++) = v >> 21;
   } else {
     *(ptr++) = v | B;
-    *(ptr++) = (v>>7) | B;
-    *(ptr++) = (v>>14) | B;
-    *(ptr++) = (v>>21) | B;
-    *(ptr++) = v>>28;
+    *(ptr++) = (v >> 7) | B;
+    *(ptr++) = (v >> 14) | B;
+    *(ptr++) = (v >> 21) | B;
+    *(ptr++) = v >> 28;
   }
   return reinterpret_cast<char*>(ptr);
 }
@@ -109,8 +109,7 @@ int VarintLength(uint64_t v) {
   return len;
 }
 
-const char* GetVarint32PtrFallback(const char* p,
-                                   const char* limit,
+const char* GetVarint32PtrFallback(const char* p, const char* limit,
                                    uint32_t* value) {
   uint32_t result = 0;
   for (uint32_t shift = 0; shift <= 28 && p < limit; shift += 7) {
@@ -181,8 +180,7 @@ const char* GetLengthPrefixedSlice(const char* p, const char* limit,
 
 bool GetLengthPrefixedSlice(Slice* input, Slice* result) {
   uint32_t len;
-  if (GetVarint32(input, &len) &&
-      input->size() >= len) {
+  if (GetVarint32(input, &len) && input->size() >= len) {
     *result = Slice(input->data(), len);
     input->remove_prefix(len);
     return true;
diff --git a/util/coding.h b/util/coding.h
index f0fa2cb..d9eeaa3 100644
--- a/util/coding.h
+++ b/util/coding.h
@@ -64,10 +64,10 @@ inline uint32_t DecodeFixed32(const char* ptr) {
     memcpy(&result, ptr, sizeof(result));  // gcc optimizes this to a plain load
     return result;
   } else {
-    return ((static_cast<uint32_t>(static_cast<unsigned char>(ptr[0])))
-        | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[1])) << 8)
-        | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[2])) << 16)
-        | (static_cast<uint32_t>(static_cast<unsigned char>(ptr[3])) << 24));
+    return ((static_cast<uint32_t>(static_cast<unsigned char>(ptr[0]))) |
+            (static_cast<uint32_t>(static_cast<unsigned char>(ptr[1])) << 8) |
+            (static_cast<uint32_t>(static_cast<unsigned char>(ptr[2])) << 16) |
+            (static_cast<uint32_t>(static_cast<unsigned char>(ptr[3])) << 24));
   }
 }
 
@@ -85,11 +85,9 @@ inline uint64_t DecodeFixed64(const char* ptr) {
 }
 
 // Internal routine for use by fallback path of GetVarint32Ptr
-const char* GetVarint32PtrFallback(const char* p,
-                                   const char* limit,
+const char* GetVarint32PtrFallback(const char* p, const char* limit,
                                    uint32_t* value);
-inline const char* GetVarint32Ptr(const char* p,
-                                  const char* limit,
+inline const char* GetVarint32Ptr(const char* p, const char* limit,
                                   uint32_t* value) {
   if (p < limit) {
     uint32_t result = *(reinterpret_cast<const unsigned char*>(p));
diff --git a/util/coding_test.cc b/util/coding_test.cc
index d315e19..0d2a0c5 100644
--- a/util/coding_test.cc
+++ b/util/coding_test.cc
@@ -9,7 +9,7 @@
 
 namespace leveldb {
 
-class Coding { };
+class Coding {};
 
 TEST(Coding, Fixed32) {
   std::string s;
@@ -39,15 +39,15 @@ TEST(Coding, Fixed64) {
     uint64_t v = static_cast<uint64_t>(1) << power;
     uint64_t actual;
     actual = DecodeFixed64(p);
-    ASSERT_EQ(v-1, actual);
+    ASSERT_EQ(v - 1, actual);
     p += sizeof(uint64_t);
 
     actual = DecodeFixed64(p);
-    ASSERT_EQ(v+0, actual);
+    ASSERT_EQ(v + 0, actual);
     p += sizeof(uint64_t);
 
     actual = DecodeFixed64(p);
-    ASSERT_EQ(v+1, actual);
+    ASSERT_EQ(v + 1, actual);
     p += sizeof(uint64_t);
   }
 }
@@ -108,8 +108,8 @@ TEST(Coding, Varint64) {
     // Test values near powers of two
     const uint64_t power = 1ull << k;
     values.push_back(power);
-    values.push_back(power-1);
-    values.push_back(power+1);
+    values.push_back(power - 1);
+    values.push_back(power + 1);
   }
 
   std::string s;
@@ -134,8 +134,8 @@ TEST(Coding, Varint64) {
 TEST(Coding, Varint32Overflow) {
   uint32_t result;
   std::string input("\x81\x82\x83\x84\x85\x11");
-  ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(), &result)
-              == nullptr);
+  ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(),
+                             &result) == nullptr);
 }
 
 TEST(Coding, Varint32Truncation) {
@@ -146,16 +146,16 @@ TEST(Coding, Varint32Truncation) {
   for (size_t len = 0; len < s.size() - 1; len++) {
     ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == nullptr);
   }
-  ASSERT_TRUE(
-      GetVarint32Ptr(s.data(), s.data() + s.size(), &result) != nullptr);
+  ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + s.size(), &result) !=
+              nullptr);
   ASSERT_EQ(large_value, result);
 }
 
 TEST(Coding, Varint64Overflow) {
   uint64_t result;
   std::string input("\x81\x82\x83\x84\x85\x81\x82\x83\x84\x85\x11");
-  ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(), &result)
-              == nullptr);
+  ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(),
+                             &result) == nullptr);
 }
 
 TEST(Coding, Varint64Truncation) {
@@ -166,8 +166,8 @@ TEST(Coding, Varint64Truncation) {
   for (size_t len = 0; len < s.size() - 1; len++) {
     ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == nullptr);
   }
-  ASSERT_TRUE(
-      GetVarint64Ptr(s.data(), s.data() + s.size(), &result) != nullptr);
+  ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) !=
+              nullptr);
   ASSERT_EQ(large_value, result);
 }
 
@@ -193,6 +193,4 @@ TEST(Coding, Strings) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/util/comparator.cc b/util/comparator.cc
index e1e2963..26d1eb3 100644
--- a/util/comparator.cc
+++ b/util/comparator.cc
@@ -13,24 +13,21 @@
 
 namespace leveldb {
 
-Comparator::~Comparator() { }
+Comparator::~Comparator() {}
 
 namespace {
 class BytewiseComparatorImpl : public Comparator {
  public:
-  BytewiseComparatorImpl() { }
+  BytewiseComparatorImpl() {}
 
-  virtual const char* Name() const {
-    return "leveldb.BytewiseComparator";
-  }
+  virtual const char* Name() const { return "leveldb.BytewiseComparator"; }
 
   virtual int Compare(const Slice& a, const Slice& b) const {
     return a.compare(b);
   }
 
-  virtual void FindShortestSeparator(
-      std::string* start,
-      const Slice& limit) const {
+  virtual void FindShortestSeparator(std::string* start,
+                                     const Slice& limit) const {
     // Find length of common prefix
     size_t min_length = std::min(start->size(), limit.size());
     size_t diff_index = 0;
@@ -59,7 +56,7 @@ class BytewiseComparatorImpl : public Comparator {
       const uint8_t byte = (*key)[i];
       if (byte != static_cast<uint8_t>(0xff)) {
         (*key)[i] = byte + 1;
-        key->resize(i+1);
+        key->resize(i + 1);
         return;
       }
     }
diff --git a/util/crc32c.cc b/util/crc32c.cc
index 4f1d80f..c2e61f7 100644
--- a/util/crc32c.cc
+++ b/util/crc32c.cc
@@ -256,8 +256,8 @@ inline uint32_t ReadUint32LE(const uint8_t* buffer) {
 template <int N>
 constexpr inline const uint8_t* RoundUp(const uint8_t* pointer) {
   return reinterpret_cast<uint8_t*>(
-      (reinterpret_cast<uintptr_t>(pointer) + (N - 1))
-      & ~static_cast<uintptr_t>(N - 1));
+      (reinterpret_cast<uintptr_t>(pointer) + (N - 1)) &
+      ~static_cast<uintptr_t>(N - 1));
 }
 
 }  // namespace
@@ -273,14 +273,14 @@ static bool CanAccelerateCRC32C() {
   return port::AcceleratedCRC32C(0, kTestCRCBuffer, kBufSize) == kTestCRCValue;
 }
 
-uint32_t Extend(uint32_t crc, const char* buf, size_t size) {
+uint32_t Extend(uint32_t crc, const char* data, size_t n) {
   static bool accelerate = CanAccelerateCRC32C();
   if (accelerate) {
-    return port::AcceleratedCRC32C(crc, buf, size);
+    return port::AcceleratedCRC32C(crc, data, n);
   }
 
-  const uint8_t* p = reinterpret_cast<const uint8_t*>(buf);
-  const uint8_t* e = p + size;
+  const uint8_t* p = reinterpret_cast<const uint8_t*>(data);
+  const uint8_t* e = p + n;
   uint32_t l = crc ^ kCRC32Xor;
 
 // Process one byte at a time.
diff --git a/util/crc32c.h b/util/crc32c.h
index 7864688..98fabb0 100644
--- a/util/crc32c.h
+++ b/util/crc32c.h
@@ -17,9 +17,7 @@ namespace crc32c {
 uint32_t Extend(uint32_t init_crc, const char* data, size_t n);
 
 // Return the crc32c of data[0,n-1]
-inline uint32_t Value(const char* data, size_t n) {
-  return Extend(0, data, n);
-}
+inline uint32_t Value(const char* data, size_t n) { return Extend(0, data, n); }
 
 static const uint32_t kMaskDelta = 0xa282ead8ul;
 
diff --git a/util/crc32c_test.cc b/util/crc32c_test.cc
index 4b957ee..dbd2ba4 100644
--- a/util/crc32c_test.cc
+++ b/util/crc32c_test.cc
@@ -8,7 +8,7 @@
 namespace leveldb {
 namespace crc32c {
 
-class CRC { };
+class CRC {};
 
 TEST(CRC, StandardResults) {
   // From rfc3720 section B.4.
@@ -31,29 +31,18 @@ TEST(CRC, StandardResults) {
   ASSERT_EQ(0x113fdb5c, Value(buf, sizeof(buf)));
 
   unsigned char data[48] = {
-    0x01, 0xc0, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-    0x14, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x04, 0x00,
-    0x00, 0x00, 0x00, 0x14,
-    0x00, 0x00, 0x00, 0x18,
-    0x28, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-    0x02, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
+      0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+      0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+      0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
+      0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
   };
   ASSERT_EQ(0xd9963a56, Value(reinterpret_cast<char*>(data), sizeof(data)));
 }
 
-TEST(CRC, Values) {
-  ASSERT_NE(Value("a", 1), Value("foo", 3));
-}
+TEST(CRC, Values) { ASSERT_NE(Value("a", 1), Value("foo", 3)); }
 
 TEST(CRC, Extend) {
-  ASSERT_EQ(Value("hello world", 11),
-            Extend(Value("hello ", 6), "world", 5));
+  ASSERT_EQ(Value("hello world", 11), Extend(Value("hello ", 6), "world", 5));
 }
 
 TEST(CRC, Mask) {
@@ -67,6 +56,4 @@ TEST(CRC, Mask) {
 }  // namespace crc32c
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/util/env.cc b/util/env.cc
index 40a1363..6cd5f2e 100644
--- a/util/env.cc
+++ b/util/env.cc
@@ -6,27 +6,21 @@
 
 namespace leveldb {
 
-Env::~Env() {
-}
+Env::~Env() {}
 
 Status Env::NewAppendableFile(const std::string& fname, WritableFile** result) {
   return Status::NotSupported("NewAppendableFile", fname);
 }
 
-SequentialFile::~SequentialFile() {
-}
+SequentialFile::~SequentialFile() {}
 
-RandomAccessFile::~RandomAccessFile() {
-}
+RandomAccessFile::~RandomAccessFile() {}
 
-WritableFile::~WritableFile() {
-}
+WritableFile::~WritableFile() {}
 
-Logger::~Logger() {
-}
+Logger::~Logger() {}
 
-FileLock::~FileLock() {
-}
+FileLock::~FileLock() {}
 
 void Log(Logger* info_log, const char* format, ...) {
   if (info_log != nullptr) {
@@ -38,8 +32,7 @@ void Log(Logger* info_log, const char* format, ...) {
 }
 
 static Status DoWriteStringToFile(Env* env, const Slice& data,
-                                  const std::string& fname,
-                                  bool should_sync) {
+                                  const std::string& fname, bool should_sync) {
   WritableFile* file;
   Status s = env->NewWritableFile(fname, &file);
   if (!s.ok()) {
@@ -94,7 +87,6 @@ Status ReadFileToString(Env* env, const std::string& fname, std::string* data) {
   return s;
 }
 
-EnvWrapper::~EnvWrapper() {
-}
+EnvWrapper::~EnvWrapper() {}
 
 }  // namespace leveldb
diff --git a/util/env_posix.cc b/util/env_posix.cc
index 362adb3..8c74f5a 100644
--- a/util/env_posix.cc
+++ b/util/env_posix.cc
@@ -32,8 +32,8 @@
 #include "leveldb/status.h"
 #include "port/port.h"
 #include "port/thread_annotations.h"
-#include "util/posix_logger.h"
 #include "util/env_posix_test_helper.h"
+#include "util/posix_logger.h"
 
 namespace leveldb {
 
@@ -76,8 +76,7 @@ class Limiter {
     int old_acquires_allowed =
         acquires_allowed_.fetch_sub(1, std::memory_order_relaxed);
 
-    if (old_acquires_allowed > 0)
-      return true;
+    if (old_acquires_allowed > 0) return true;
 
     acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
     return false;
@@ -85,9 +84,7 @@ class Limiter {
 
   // Release a resource acquired by a previous call to Acquire() that returned
   // true.
-  void Release() {
-    acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
-  }
+  void Release() { acquires_allowed_.fetch_add(1, std::memory_order_relaxed); }
 
  private:
   // The number of available resources.
@@ -193,7 +190,7 @@ class PosixRandomAccessFile final : public RandomAccessFile {
 
  private:
   const bool has_permanent_fd_;  // If false, the file is opened on every read.
-  const int fd_;  // -1 if has_permanent_fd_ is false.
+  const int fd_;                 // -1 if has_permanent_fd_ is false.
   Limiter* const fd_limiter_;
   const std::string filename_;
 };
@@ -214,7 +211,9 @@ class PosixMmapReadableFile final : public RandomAccessFile {
   // instance is destroyed.
   PosixMmapReadableFile(std::string filename, char* mmap_base, size_t length,
                         Limiter* mmap_limiter)
-      : mmap_base_(mmap_base), length_(length), mmap_limiter_(mmap_limiter),
+      : mmap_base_(mmap_base),
+        length_(length),
+        mmap_limiter_(mmap_limiter),
         filename_(std::move(filename)) {}
 
   ~PosixMmapReadableFile() override {
@@ -243,8 +242,11 @@ class PosixMmapReadableFile final : public RandomAccessFile {
 class PosixWritableFile final : public WritableFile {
  public:
   PosixWritableFile(std::string filename, int fd)
-      : pos_(0), fd_(fd), is_manifest_(IsManifest(filename)),
-        filename_(std::move(filename)), dirname_(Dirname(filename_)) {}
+      : pos_(0),
+        fd_(fd),
+        is_manifest_(IsManifest(filename)),
+        filename_(std::move(filename)),
+        dirname_(Dirname(filename_)) {}
 
   ~PosixWritableFile() override {
     if (fd_ >= 0) {
@@ -292,9 +294,7 @@ class PosixWritableFile final : public WritableFile {
     return status;
   }
 
-  Status Flush() override {
-    return FlushBuffer();
-  }
+  Status Flush() override { return FlushBuffer(); }
 
   Status Sync() override {
     // Ensure new files referred to by the manifest are in the filesystem.
@@ -517,12 +517,12 @@ class PosixEnv : public Env {
     uint64_t file_size;
     Status status = GetFileSize(filename, &file_size);
     if (status.ok()) {
-      void* mmap_base = ::mmap(/*addr=*/nullptr, file_size, PROT_READ,
-                               MAP_SHARED, fd, 0);
+      void* mmap_base =
+          ::mmap(/*addr=*/nullptr, file_size, PROT_READ, MAP_SHARED, fd, 0);
       if (mmap_base != MAP_FAILED) {
-        *result = new PosixMmapReadableFile(
-            filename, reinterpret_cast<char*>(mmap_base), file_size,
-            &mmap_limiter_);
+        *result = new PosixMmapReadableFile(filename,
+                                            reinterpret_cast<char*>(mmap_base),
+                                            file_size, &mmap_limiter_);
       } else {
         status = PosixError(filename, errno);
       }
@@ -691,9 +691,7 @@ class PosixEnv : public Env {
     return static_cast<uint64_t>(tv.tv_sec) * kUsecondsPerSecond + tv.tv_usec;
   }
 
-  void SleepForMicroseconds(int micros) override {
-    ::usleep(micros);
-  }
+  void SleepForMicroseconds(int micros) override { ::usleep(micros); }
 
  private:
   void BackgroundThreadMain();
@@ -712,11 +710,10 @@ class PosixEnv : public Env {
     explicit BackgroundWorkItem(void (*function)(void* arg), void* arg)
         : function(function), arg(arg) {}
 
-    void (* const function)(void*);
+    void (*const function)(void*);
     void* const arg;
   };
 
-
   port::Mutex background_work_mutex_;
   port::CondVar background_work_cv_ GUARDED_BY(background_work_mutex_);
   bool started_background_thread_ GUARDED_BY(background_work_mutex_);
@@ -726,13 +723,11 @@ class PosixEnv : public Env {
 
   PosixLockTable locks_;  // Thread-safe.
   Limiter mmap_limiter_;  // Thread-safe.
-  Limiter fd_limiter_;  // Thread-safe.
+  Limiter fd_limiter_;    // Thread-safe.
 };
 
 // Return the maximum number of concurrent mmaps.
-int MaxMmaps() {
-  return g_mmap_limit;
-}
+int MaxMmaps() { return g_mmap_limit; }
 
 // Return the maximum number of read-only files to keep open.
 int MaxOpenFiles() {
@@ -758,8 +753,7 @@ PosixEnv::PosixEnv()
     : background_work_cv_(&background_work_mutex_),
       started_background_thread_(false),
       mmap_limiter_(MaxMmaps()),
-      fd_limiter_(MaxOpenFiles()) {
-}
+      fd_limiter_(MaxOpenFiles()) {}
 
 void PosixEnv::Schedule(
     void (*background_work_function)(void* background_work_arg),
@@ -792,8 +786,7 @@ void PosixEnv::BackgroundThreadMain() {
     }
 
     assert(!background_work_queue_.empty());
-    auto background_work_function =
-        background_work_queue_.front().function;
+    auto background_work_function = background_work_queue_.front().function;
     void* background_work_arg = background_work_queue_.front().arg;
     background_work_queue_.pop();
 
@@ -816,7 +809,7 @@ namespace {
 //     static PlatformSingletonEnv default_env;
 //     return default_env.env();
 //   }
-template<typename EnvType>
+template <typename EnvType>
 class SingletonEnv {
  public:
   SingletonEnv() {
@@ -851,7 +844,7 @@ class SingletonEnv {
 };
 
 #if !defined(NDEBUG)
-template<typename EnvType>
+template <typename EnvType>
 std::atomic<bool> SingletonEnv<EnvType>::env_initialized_;
 #endif  // !defined(NDEBUG)
 
diff --git a/util/env_posix_test.cc b/util/env_posix_test.cc
index e28df9a..6a2a1fc 100644
--- a/util/env_posix_test.cc
+++ b/util/env_posix_test.cc
@@ -3,21 +3,19 @@
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
 #include "leveldb/env.h"
-
 #include "port/port.h"
-#include "util/testharness.h"
 #include "util/env_posix_test_helper.h"
+#include "util/testharness.h"
 
 namespace leveldb {
 
-static const int kDelayMicros = 100000;
 static const int kReadOnlyFileLimit = 4;
 static const int kMMapLimit = 4;
 
 class EnvPosixTest {
  public:
   Env* env_;
-  EnvPosixTest() : env_(Env::Default()) { }
+  EnvPosixTest() : env_(Env::Default()) {}
 
   static void SetFileLimits(int read_only_file_limit, int mmap_limit) {
     EnvPosixTestHelper::SetReadOnlyFDLimit(read_only_file_limit);
diff --git a/util/env_test.cc b/util/env_test.cc
index b204089..3e81261 100644
--- a/util/env_test.cc
+++ b/util/env_test.cc
@@ -16,13 +16,11 @@
 namespace leveldb {
 
 static const int kDelayMicros = 100000;
-static const int kReadOnlyFileLimit = 4;
-static const int kMMapLimit = 4;
 
 class EnvTest {
  public:
   Env* env_;
-  EnvTest() : env_(Env::Default()) { }
+  EnvTest() : env_(Env::Default()) {}
 };
 
 namespace {
@@ -97,7 +95,7 @@ TEST(EnvTest, RunMany) {
     const int id_;  // Order# for the execution of this callback.
 
     Callback(std::atomic<int>* last_id_ptr, int id)
-        : last_id_ptr_(last_id_ptr), id_(id) { }
+        : last_id_ptr_(last_id_ptr), id_(id) {}
 
     static void Run(void* arg) {
       Callback* callback = reinterpret_cast<Callback*>(arg);
@@ -125,7 +123,7 @@ struct State {
   int val GUARDED_BY(mu);
   int num_running GUARDED_BY(mu);
 
-  State(int val, int num_running) : val(val), num_running(num_running) { }
+  State(int val, int num_running) : val(val), num_running(num_running) {}
 };
 
 static void ThreadBody(void* arg) {
@@ -164,8 +162,8 @@ TEST(EnvTest, TestOpenNonExistentFile) {
   ASSERT_TRUE(!env_->FileExists(non_existent_file));
 
   RandomAccessFile* random_access_file;
-  Status status = env_->NewRandomAccessFile(
-      non_existent_file, &random_access_file);
+  Status status =
+      env_->NewRandomAccessFile(non_existent_file, &random_access_file);
   ASSERT_TRUE(status.IsNotFound());
 
   SequentialFile* sequential_file;
@@ -223,6 +221,4 @@ TEST(EnvTest, ReopenAppendableFile) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/util/env_windows.cc b/util/env_windows.cc
index 14e41e9..c537938 100644
--- a/util/env_windows.cc
+++ b/util/env_windows.cc
@@ -122,8 +122,7 @@ class Limiter {
     int old_acquires_allowed =
         acquires_allowed_.fetch_sub(1, std::memory_order_relaxed);
 
-    if (old_acquires_allowed > 0)
-      return true;
+    if (old_acquires_allowed > 0) return true;
 
     acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
     return false;
@@ -131,9 +130,7 @@ class Limiter {
 
   // Release a resource acquired by a previous call to Acquire() that returned
   // true.
-  void Release() {
-    acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
-  }
+  void Release() { acquires_allowed_.fetch_add(1, std::memory_order_relaxed); }
 
  private:
   // The number of available resources.
diff --git a/util/filter_policy.cc b/util/filter_policy.cc
index 7b045c8..90fd754 100644
--- a/util/filter_policy.cc
+++ b/util/filter_policy.cc
@@ -6,6 +6,6 @@
 
 namespace leveldb {
 
-FilterPolicy::~FilterPolicy() { }
+FilterPolicy::~FilterPolicy() {}
 
 }  // namespace leveldb
diff --git a/util/hash.cc b/util/hash.cc
index ed439ce..67dc134 100644
--- a/util/hash.cc
+++ b/util/hash.cc
@@ -2,15 +2,19 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
+#include "util/hash.h"
+
 #include <string.h>
+
 #include "util/coding.h"
-#include "util/hash.h"
 
 // The FALLTHROUGH_INTENDED macro can be used to annotate implicit fall-through
 // between switch labels. The real definition should be provided externally.
 // This one is a fallback version for unsupported compilers.
 #ifndef FALLTHROUGH_INTENDED
-#define FALLTHROUGH_INTENDED do { } while (0)
+#define FALLTHROUGH_INTENDED \
+  do {                       \
+  } while (0)
 #endif
 
 namespace leveldb {
@@ -48,5 +52,4 @@ uint32_t Hash(const char* data, size_t n, uint32_t seed) {
   return h;
 }
 
-
 }  // namespace leveldb
diff --git a/util/hash_test.cc b/util/hash_test.cc
index eaa1c92..8f579cc 100644
--- a/util/hash_test.cc
+++ b/util/hash_test.cc
@@ -7,7 +7,7 @@
 
 namespace leveldb {
 
-class HASH { };
+class HASH {};
 
 TEST(HASH, SignedUnsignedIssue) {
   const unsigned char data1[1] = {0x62};
@@ -15,18 +15,10 @@ TEST(HASH, SignedUnsignedIssue) {
   const unsigned char data3[3] = {0xe2, 0x99, 0xa5};
   const unsigned char data4[4] = {0xe1, 0x80, 0xb9, 0x32};
   const unsigned char data5[48] = {
-    0x01, 0xc0, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-    0x14, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x04, 0x00,
-    0x00, 0x00, 0x00, 0x14,
-    0x00, 0x00, 0x00, 0x18,
-    0x28, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
-    0x02, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00,
+      0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+      0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
+      0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
+      0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
   };
 
   ASSERT_EQ(Hash(0, 0, 0xbc9f1d34), 0xbc9f1d34);
@@ -49,6 +41,4 @@ TEST(HASH, SignedUnsignedIssue) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/util/histogram.cc b/util/histogram.cc
index bb95f58..65092c8 100644
--- a/util/histogram.cc
+++ b/util/histogram.cc
@@ -2,36 +2,174 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
+#include "util/histogram.h"
+
 #include <math.h>
 #include <stdio.h>
+
 #include "port/port.h"
-#include "util/histogram.h"
 
 namespace leveldb {
 
 const double Histogram::kBucketLimit[kNumBuckets] = {
-  1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 25, 30, 35, 40, 45,
-  50, 60, 70, 80, 90, 100, 120, 140, 160, 180, 200, 250, 300, 350, 400, 450,
-  500, 600, 700, 800, 900, 1000, 1200, 1400, 1600, 1800, 2000, 2500, 3000,
-  3500, 4000, 4500, 5000, 6000, 7000, 8000, 9000, 10000, 12000, 14000,
-  16000, 18000, 20000, 25000, 30000, 35000, 40000, 45000, 50000, 60000,
-  70000, 80000, 90000, 100000, 120000, 140000, 160000, 180000, 200000,
-  250000, 300000, 350000, 400000, 450000, 500000, 600000, 700000, 800000,
-  900000, 1000000, 1200000, 1400000, 1600000, 1800000, 2000000, 2500000,
-  3000000, 3500000, 4000000, 4500000, 5000000, 6000000, 7000000, 8000000,
-  9000000, 10000000, 12000000, 14000000, 16000000, 18000000, 20000000,
-  25000000, 30000000, 35000000, 40000000, 45000000, 50000000, 60000000,
-  70000000, 80000000, 90000000, 100000000, 120000000, 140000000, 160000000,
-  180000000, 200000000, 250000000, 300000000, 350000000, 400000000,
-  450000000, 500000000, 600000000, 700000000, 800000000, 900000000,
-  1000000000, 1200000000, 1400000000, 1600000000, 1800000000, 2000000000,
-  2500000000.0, 3000000000.0, 3500000000.0, 4000000000.0, 4500000000.0,
-  5000000000.0, 6000000000.0, 7000000000.0, 8000000000.0, 9000000000.0,
-  1e200,
+    1,
+    2,
+    3,
+    4,
+    5,
+    6,
+    7,
+    8,
+    9,
+    10,
+    12,
+    14,
+    16,
+    18,
+    20,
+    25,
+    30,
+    35,
+    40,
+    45,
+    50,
+    60,
+    70,
+    80,
+    90,
+    100,
+    120,
+    140,
+    160,
+    180,
+    200,
+    250,
+    300,
+    350,
+    400,
+    450,
+    500,
+    600,
+    700,
+    800,
+    900,
+    1000,
+    1200,
+    1400,
+    1600,
+    1800,
+    2000,
+    2500,
+    3000,
+    3500,
+    4000,
+    4500,
+    5000,
+    6000,
+    7000,
+    8000,
+    9000,
+    10000,
+    12000,
+    14000,
+    16000,
+    18000,
+    20000,
+    25000,
+    30000,
+    35000,
+    40000,
+    45000,
+    50000,
+    60000,
+    70000,
+    80000,
+    90000,
+    100000,
+    120000,
+    140000,
+    160000,
+    180000,
+    200000,
+    250000,
+    300000,
+    350000,
+    400000,
+    450000,
+    500000,
+    600000,
+    700000,
+    800000,
+    900000,
+    1000000,
+    1200000,
+    1400000,
+    1600000,
+    1800000,
+    2000000,
+    2500000,
+    3000000,
+    3500000,
+    4000000,
+    4500000,
+    5000000,
+    6000000,
+    7000000,
+    8000000,
+    9000000,
+    10000000,
+    12000000,
+    14000000,
+    16000000,
+    18000000,
+    20000000,
+    25000000,
+    30000000,
+    35000000,
+    40000000,
+    45000000,
+    50000000,
+    60000000,
+    70000000,
+    80000000,
+    90000000,
+    100000000,
+    120000000,
+    140000000,
+    160000000,
+    180000000,
+    200000000,
+    250000000,
+    300000000,
+    350000000,
+    400000000,
+    450000000,
+    500000000,
+    600000000,
+    700000000,
+    800000000,
+    900000000,
+    1000000000,
+    1200000000,
+    1400000000,
+    1600000000,
+    1800000000,
+    2000000000,
+    2500000000.0,
+    3000000000.0,
+    3500000000.0,
+    4000000000.0,
+    4500000000.0,
+    5000000000.0,
+    6000000000.0,
+    7000000000.0,
+    8000000000.0,
+    9000000000.0,
+    1e200,
 };
 
 void Histogram::Clear() {
-  min_ = kBucketLimit[kNumBuckets-1];
+  min_ = kBucketLimit[kNumBuckets - 1];
   max_ = 0;
   num_ = 0;
   sum_ = 0;
@@ -66,9 +204,7 @@ void Histogram::Merge(const Histogram& other) {
   }
 }
 
-double Histogram::Median() const {
-  return Percentile(50.0);
-}
+double Histogram::Median() const { return Percentile(50.0); }
 
 double Histogram::Percentile(double p) const {
   double threshold = num_ * (p / 100.0);
@@ -77,7 +213,7 @@ double Histogram::Percentile(double p) const {
     sum += buckets_[b];
     if (sum >= threshold) {
       // Scale linearly within this bucket
-      double left_point = (b == 0) ? 0 : kBucketLimit[b-1];
+      double left_point = (b == 0) ? 0 : kBucketLimit[b - 1];
       double right_point = kBucketLimit[b];
       double left_sum = sum - buckets_[b];
       double right_sum = sum;
@@ -105,12 +241,10 @@ double Histogram::StandardDeviation() const {
 std::string Histogram::ToString() const {
   std::string r;
   char buf[200];
-  snprintf(buf, sizeof(buf),
-           "Count: %.0f  Average: %.4f  StdDev: %.2f\n",
-           num_, Average(), StandardDeviation());
+  snprintf(buf, sizeof(buf), "Count: %.0f  Average: %.4f  StdDev: %.2f\n", num_,
+           Average(), StandardDeviation());
   r.append(buf);
-  snprintf(buf, sizeof(buf),
-           "Min: %.4f  Median: %.4f  Max: %.4f\n",
+  snprintf(buf, sizeof(buf), "Min: %.4f  Median: %.4f  Max: %.4f\n",
            (num_ == 0.0 ? 0.0 : min_), Median(), max_);
   r.append(buf);
   r.append("------------------------------------------------------\n");
@@ -119,17 +253,16 @@ std::string Histogram::ToString() const {
   for (int b = 0; b < kNumBuckets; b++) {
     if (buckets_[b] <= 0.0) continue;
     sum += buckets_[b];
-    snprintf(buf, sizeof(buf),
-             "[ %7.0f, %7.0f ) %7.0f %7.3f%% %7.3f%% ",
-             ((b == 0) ? 0.0 : kBucketLimit[b-1]),      // left
-             kBucketLimit[b],                           // right
-             buckets_[b],                               // count
-             mult * buckets_[b],                        // percentage
-             mult * sum);                               // cumulative percentage
+    snprintf(buf, sizeof(buf), "[ %7.0f, %7.0f ) %7.0f %7.3f%% %7.3f%% ",
+             ((b == 0) ? 0.0 : kBucketLimit[b - 1]),  // left
+             kBucketLimit[b],                         // right
+             buckets_[b],                             // count
+             mult * buckets_[b],                      // percentage
+             mult * sum);                             // cumulative percentage
     r.append(buf);
 
     // Add hash marks based on percentage; 20 marks for 100%.
-    int marks = static_cast<int>(20*(buckets_[b] / num_) + 0.5);
+    int marks = static_cast<int>(20 * (buckets_[b] / num_) + 0.5);
     r.append(marks, '#');
     r.push_back('\n');
   }
diff --git a/util/histogram.h b/util/histogram.h
index 1ef9f3c..fe281a9 100644
--- a/util/histogram.h
+++ b/util/histogram.h
@@ -11,8 +11,8 @@ namespace leveldb {
 
 class Histogram {
  public:
-  Histogram() { }
-  ~Histogram() { }
+  Histogram() {}
+  ~Histogram() {}
 
   void Clear();
   void Add(double value);
diff --git a/util/logging.cc b/util/logging.cc
index 411a303..1ad8f1c 100644
--- a/util/logging.cc
+++ b/util/logging.cc
@@ -8,7 +8,9 @@
 #include <stdarg.h>
 #include <stdio.h>
 #include <stdlib.h>
+
 #include <limits>
+
 #include "leveldb/env.h"
 #include "leveldb/slice.h"
 
@@ -16,7 +18,7 @@ namespace leveldb {
 
 void AppendNumberTo(std::string* str, uint64_t num) {
   char buf[30];
-  snprintf(buf, sizeof(buf), "%llu", (unsigned long long) num);
+  snprintf(buf, sizeof(buf), "%llu", (unsigned long long)num);
   str->append(buf);
 }
 
@@ -62,8 +64,7 @@ bool ConsumeDecimalNumber(Slice* in, uint64_t* val) {
   const unsigned char* current = start;
   for (; current != end; ++current) {
     const unsigned char ch = *current;
-    if (ch < '0' || ch > '9')
-      break;
+    if (ch < '0' || ch > '9') break;
 
     // Overflow check.
     // kMaxUint64 / 10 is also constant and will be optimized away.
diff --git a/util/logging.h b/util/logging.h
index 13351a2..8ff2da8 100644
--- a/util/logging.h
+++ b/util/logging.h
@@ -8,9 +8,11 @@
 #ifndef STORAGE_LEVELDB_UTIL_LOGGING_H_
 #define STORAGE_LEVELDB_UTIL_LOGGING_H_
 
-#include <stdio.h>
 #include <stdint.h>
+#include <stdio.h>
+
 #include <string>
+
 #include "port/port.h"
 
 namespace leveldb {
diff --git a/util/logging_test.cc b/util/logging_test.cc
index 11665fc..389cbeb 100644
--- a/util/logging_test.cc
+++ b/util/logging_test.cc
@@ -11,7 +11,7 @@
 
 namespace leveldb {
 
-class Logging { };
+class Logging {};
 
 TEST(Logging, NumberToString) {
   ASSERT_EQ("0", NumberToString(0));
@@ -140,6 +140,4 @@ TEST(Logging, ConsumeDecimalNumberNoDigits) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/util/mutexlock.h b/util/mutexlock.h
index 08d709a..0cb2e25 100644
--- a/util/mutexlock.h
+++ b/util/mutexlock.h
@@ -22,8 +22,7 @@ namespace leveldb {
 
 class SCOPED_LOCKABLE MutexLock {
  public:
-  explicit MutexLock(port::Mutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu)
-      : mu_(mu)  {
+  explicit MutexLock(port::Mutex* mu) EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
     this->mu_->Lock();
   }
   ~MutexLock() UNLOCK_FUNCTION() { this->mu_->Unlock(); }
@@ -32,10 +31,9 @@ class SCOPED_LOCKABLE MutexLock {
   MutexLock& operator=(const MutexLock&) = delete;
 
  private:
-  port::Mutex *const mu_;
+  port::Mutex* const mu_;
 };
 
 }  // namespace leveldb
 
-
 #endif  // STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_
diff --git a/util/no_destructor.h b/util/no_destructor.h
index 4827e45..a0d3b87 100644
--- a/util/no_destructor.h
+++ b/util/no_destructor.h
@@ -13,7 +13,7 @@ namespace leveldb {
 // Wraps an instance whose destructor is never called.
 //
 // This is intended for use with function-level static variables.
-template<typename InstanceType>
+template <typename InstanceType>
 class NoDestructor {
  public:
   template <typename... ConstructorArgTypes>
@@ -23,8 +23,8 @@ class NoDestructor {
     static_assert(
         alignof(decltype(instance_storage_)) >= alignof(InstanceType),
         "instance_storage_ does not meet the instance's alignment requirement");
-    new (&instance_storage_) InstanceType(
-        std::forward<ConstructorArgTypes>(constructor_args)...);
+    new (&instance_storage_)
+        InstanceType(std::forward<ConstructorArgTypes>(constructor_args)...);
   }
 
   ~NoDestructor() = default;
@@ -37,9 +37,8 @@ class NoDestructor {
   }
 
  private:
-  typename
-      std::aligned_storage<sizeof(InstanceType), alignof(InstanceType)>::type
-      instance_storage_;
+  typename std::aligned_storage<sizeof(InstanceType),
+                                alignof(InstanceType)>::type instance_storage_;
 };
 
 }  // namespace leveldb
diff --git a/util/no_destructor_test.cc b/util/no_destructor_test.cc
index 7ce2631..b41caca 100644
--- a/util/no_destructor_test.cc
+++ b/util/no_destructor_test.cc
@@ -28,7 +28,7 @@ constexpr const uint64_t kGoldenB = 0xaabbccddeeffaabb;
 
 }  // namespace
 
-class NoDestructorTest { };
+class NoDestructorTest {};
 
 TEST(NoDestructorTest, StackInstance) {
   NoDestructor<DoNotDestruct> instance(kGoldenA, kGoldenB);
@@ -44,6 +44,4 @@ TEST(NoDestructorTest, StaticInstance) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/util/options.cc b/util/options.cc
index 63284f8..62de5bf 100644
--- a/util/options.cc
+++ b/util/options.cc
@@ -9,9 +9,6 @@
 
 namespace leveldb {
 
-Options::Options()
-    : comparator(BytewiseComparator()),
-      env(Env::Default()) {
-}
+Options::Options() : comparator(BytewiseComparator()), env(Env::Default()) {}
 
 }  // namespace leveldb
diff --git a/util/posix_logger.h b/util/posix_logger.h
index 5685fa3..28e15d1 100644
--- a/util/posix_logger.h
+++ b/util/posix_logger.h
@@ -26,13 +26,9 @@ class PosixLogger final : public Logger {
   // Creates a logger that writes to the given file.
   //
   // The PosixLogger instance takes ownership of the file handle.
-  explicit PosixLogger(std::FILE* fp) : fp_(fp) {
-    assert(fp != nullptr);
-  }
+  explicit PosixLogger(std::FILE* fp) : fp_(fp) { assert(fp != nullptr); }
 
-  ~PosixLogger() override {
-    std::fclose(fp_);
-  }
+  ~PosixLogger() override { std::fclose(fp_); }
 
   void Logv(const char* format, va_list arguments) override {
     // Record the time as close to the Logv() call as possible.
@@ -67,15 +63,10 @@ class PosixLogger final : public Logger {
 
       // Print the header into the buffer.
       int buffer_offset = snprintf(
-          buffer, buffer_size,
-          "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ",
-          now_components.tm_year + 1900,
-          now_components.tm_mon + 1,
-          now_components.tm_mday,
-          now_components.tm_hour,
-          now_components.tm_min,
-          now_components.tm_sec,
-          static_cast<int>(now_timeval.tv_usec),
+          buffer, buffer_size, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ",
+          now_components.tm_year + 1900, now_components.tm_mon + 1,
+          now_components.tm_mday, now_components.tm_hour, now_components.tm_min,
+          now_components.tm_sec, static_cast<int>(now_timeval.tv_usec),
           thread_id.c_str());
 
       // The header can be at most 28 characters (10 date + 15 time +
@@ -89,9 +80,9 @@ class PosixLogger final : public Logger {
       // Print the message into the buffer.
       std::va_list arguments_copy;
       va_copy(arguments_copy, arguments);
-      buffer_offset += std::vsnprintf(buffer + buffer_offset,
-                                      buffer_size - buffer_offset, format,
-                                      arguments_copy);
+      buffer_offset +=
+          std::vsnprintf(buffer + buffer_offset, buffer_size - buffer_offset,
+                         format, arguments_copy);
       va_end(arguments_copy);
 
       // The code below may append a newline at the end of the buffer, which
diff --git a/util/random.h b/util/random.h
index ddd51b1..76f7daf 100644
--- a/util/random.h
+++ b/util/random.h
@@ -15,6 +15,7 @@ namespace leveldb {
 class Random {
  private:
   uint32_t seed_;
+
  public:
   explicit Random(uint32_t s) : seed_(s & 0x7fffffffu) {
     // Avoid bad seeds.
@@ -23,8 +24,8 @@ class Random {
     }
   }
   uint32_t Next() {
-    static const uint32_t M = 2147483647L;   // 2^31-1
-    static const uint64_t A = 16807;  // bits 14, 8, 7, 5, 2, 1, 0
+    static const uint32_t M = 2147483647L;  // 2^31-1
+    static const uint64_t A = 16807;        // bits 14, 8, 7, 5, 2, 1, 0
     // We are computing
     //       seed_ = (seed_ * A) % M,    where M = 2^31-1
     //
@@ -54,9 +55,7 @@ class Random {
   // Skewed: pick "base" uniformly from range [0,max_log] and then
   // return "base" random bits.  The effect is to pick a number in the
   // range [0,2^max_log-1] with exponential bias towards smaller numbers.
-  uint32_t Skewed(int max_log) {
-    return Uniform(1 << Uniform(max_log + 1));
-  }
+  uint32_t Skewed(int max_log) { return Uniform(1 << Uniform(max_log + 1)); }
 };
 
 }  // namespace leveldb
diff --git a/util/status.cc b/util/status.cc
index 5591381..6ca8da6 100644
--- a/util/status.cc
+++ b/util/status.cc
@@ -2,9 +2,11 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file. See the AUTHORS file for names of contributors.
 
+#include "leveldb/status.h"
+
 #include <stdio.h>
+
 #include "port/port.h"
-#include "leveldb/status.h"
 
 namespace leveldb {
 
@@ -59,8 +61,8 @@ std::string Status::ToString() const {
         type = "IO error: ";
         break;
       default:
-        snprintf(tmp, sizeof(tmp), "Unknown code(%d): ",
-                 static_cast<int>(code()));
+        snprintf(tmp, sizeof(tmp),
+                 "Unknown code(%d): ", static_cast<int>(code()));
         type = tmp;
         break;
     }
diff --git a/util/status_test.cc b/util/status_test.cc
index 7ed3b9e..2842319 100644
--- a/util/status_test.cc
+++ b/util/status_test.cc
@@ -37,6 +37,4 @@ TEST(Status, MoveConstructor) {
 
 }  // namespace leveldb
 
-int main(int argc, char** argv) {
-  return leveldb::test::RunAllTests();
-}
+int main(int argc, char** argv) { return leveldb::test::RunAllTests(); }
diff --git a/util/testharness.cc b/util/testharness.cc
index 37ba410..318ecfa 100644
--- a/util/testharness.cc
+++ b/util/testharness.cc
@@ -23,7 +23,7 @@ struct Test {
   void (*func)();
 };
 std::vector<Test>* tests;
-}
+}  // namespace
 
 bool RegisterTest(const char* base, const char* name, void (*func)()) {
   if (tests == nullptr) {
diff --git a/util/testharness.h b/util/testharness.h
index 8ee7972..72cd162 100644
--- a/util/testharness.h
+++ b/util/testharness.h
@@ -47,9 +47,7 @@ class Tester {
   std::stringstream ss_;
 
  public:
-  Tester(const char* f, int l)
-      : ok_(true), fname_(f), line_(l) {
-  }
+  Tester(const char* f, int l) : ok_(true), fname_(f), line_(l) {}
 
   ~Tester() {
     if (!ok_) {
@@ -74,14 +72,14 @@ class Tester {
     return *this;
   }
 
-#define BINARY_OP(name, op)                             \
-  template <class X, class Y>                           \
-  Tester& name(const X& x, const Y& y) {                \
-    if (!(x op y)) {                                    \
-      ss_ << " failed: " << x << (" " #op " ") << y;    \
-      ok_ = false;                                      \
-    }                                                   \
-    return *this;                                       \
+#define BINARY_OP(name, op)                          \
+  template <class X, class Y>                        \
+  Tester& name(const X& x, const Y& y) {             \
+    if (!(x op y)) {                                 \
+      ss_ << " failed: " << x << (" " #op " ") << y; \
+      ok_ = false;                                   \
+    }                                                \
+    return *this;                                    \
   }
 
   BINARY_OP(IsEq, ==)
@@ -104,28 +102,34 @@ class Tester {
 
 #define ASSERT_TRUE(c) ::leveldb::test::Tester(__FILE__, __LINE__).Is((c), #c)
 #define ASSERT_OK(s) ::leveldb::test::Tester(__FILE__, __LINE__).IsOk((s))
-#define ASSERT_EQ(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsEq((a),(b))
-#define ASSERT_NE(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsNe((a),(b))
-#define ASSERT_GE(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsGe((a),(b))
-#define ASSERT_GT(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsGt((a),(b))
-#define ASSERT_LE(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsLe((a),(b))
-#define ASSERT_LT(a,b) ::leveldb::test::Tester(__FILE__, __LINE__).IsLt((a),(b))
+#define ASSERT_EQ(a, b) \
+  ::leveldb::test::Tester(__FILE__, __LINE__).IsEq((a), (b))
+#define ASSERT_NE(a, b) \
+  ::leveldb::test::Tester(__FILE__, __LINE__).IsNe((a), (b))
+#define ASSERT_GE(a, b) \
+  ::leveldb::test::Tester(__FILE__, __LINE__).IsGe((a), (b))
+#define ASSERT_GT(a, b) \
+  ::leveldb::test::Tester(__FILE__, __LINE__).IsGt((a), (b))
+#define ASSERT_LE(a, b) \
+  ::leveldb::test::Tester(__FILE__, __LINE__).IsLe((a), (b))
+#define ASSERT_LT(a, b) \
+  ::leveldb::test::Tester(__FILE__, __LINE__).IsLt((a), (b))
 
 #define TCONCAT(a, b) TCONCAT1(a, b)
 #define TCONCAT1(a, b) a##b
 
-#define TEST(base, name)                                                       \
-class TCONCAT(_Test_, name) : public base {                                    \
- public:                                                                       \
-  void _Run();                                                                 \
-  static void _RunIt() {                                                       \
-    TCONCAT(_Test_, name) t;                                                   \
-    t._Run();                                                                  \
-  }                                                                            \
-};                                                                             \
-bool TCONCAT(_Test_ignored_, name) =                                           \
-  ::leveldb::test::RegisterTest(#base, #name, &TCONCAT(_Test_, name)::_RunIt); \
-void TCONCAT(_Test_, name)::_Run()
+#define TEST(base, name)                                              \
+  class TCONCAT(_Test_, name) : public base {                         \
+   public:                                                            \
+    void _Run();                                                      \
+    static void _RunIt() {                                            \
+      TCONCAT(_Test_, name) t;                                        \
+      t._Run();                                                       \
+    }                                                                 \
+  };                                                                  \
+  bool TCONCAT(_Test_ignored_, name) = ::leveldb::test::RegisterTest( \
+      #base, #name, &TCONCAT(_Test_, name)::_RunIt);                  \
+  void TCONCAT(_Test_, name)::_Run()
 
 // Register the specified test.  Typically not used directly, but
 // invoked via the macro expansion of TEST.
diff --git a/util/testutil.cc b/util/testutil.cc
index 9d8079c..6b151b9 100644
--- a/util/testutil.cc
+++ b/util/testutil.cc
@@ -12,7 +12,7 @@ namespace test {
 Slice RandomString(Random* rnd, int len, std::string* dst) {
   dst->resize(len);
   for (int i = 0; i < len; i++) {
-    (*dst)[i] = static_cast<char>(' ' + rnd->Uniform(95));   // ' ' .. '~'
+    (*dst)[i] = static_cast<char>(' ' + rnd->Uniform(95));  // ' ' .. '~'
   }
   return Slice(*dst);
 }
@@ -20,9 +20,8 @@ Slice RandomString(Random* rnd, int len, std::string* dst) {
 std::string RandomKey(Random* rnd, int len) {
   // Make sure to generate a wide variety of characters so we
   // test the boundary conditions for short-key optimizations.
-  static const char kTestChars[] = {
-    '\0', '\1', 'a', 'b', 'c', 'd', 'e', '\xfd', '\xfe', '\xff'
-  };
+  static const char kTestChars[] = {'\0', '\1', 'a',    'b',    'c',
+                                    'd',  'e',  '\xfd', '\xfe', '\xff'};
   std::string result;
   for (int i = 0; i < len; i++) {
     result += kTestChars[rnd->Uniform(sizeof(kTestChars))];
@@ -30,9 +29,8 @@ std::string RandomKey(Random* rnd, int len) {
   return result;
 }
 
-
-Slice CompressibleString(Random* rnd, double compressed_fraction,
-                         size_t len, std::string* dst) {
+Slice CompressibleString(Random* rnd, double compressed_fraction, size_t len,
+                         std::string* dst) {
   int raw = static_cast<int>(len * compressed_fraction);
   if (raw < 1) raw = 1;
   std::string raw_data;
diff --git a/util/testutil.h b/util/testutil.h
index a568824..bb4051b 100644
--- a/util/testutil.h
+++ b/util/testutil.h
@@ -24,8 +24,8 @@ std::string RandomKey(Random* rnd, int len);
 // Store in *dst a string of length "len" that will compress to
 // "N*compressed_fraction" bytes and return a Slice that references
 // the generated data.
-Slice CompressibleString(Random* rnd, double compressed_fraction,
-                         size_t len, std::string* dst);
+Slice CompressibleString(Random* rnd, double compressed_fraction, size_t len,
+                         std::string* dst);
 
 // A wrapper that allows injection of errors.
 class ErrorEnv : public EnvWrapper {
@@ -33,12 +33,11 @@ class ErrorEnv : public EnvWrapper {
   bool writable_file_error_;
   int num_writable_file_errors_;
 
-  ErrorEnv() : EnvWrapper(NewMemEnv(Env::Default())),
-               writable_file_error_(false),
-               num_writable_file_errors_(0) { }
-  ~ErrorEnv() override {
-    delete target();
-  }
+  ErrorEnv()
+      : EnvWrapper(NewMemEnv(Env::Default())),
+        writable_file_error_(false),
+        num_writable_file_errors_(0) {}
+  ~ErrorEnv() override { delete target(); }
 
   Status NewWritableFile(const std::string& fname,
                          WritableFile** result) override {
diff --git a/util/windows_logger.h b/util/windows_logger.h
index 96799bc..9296063 100644
--- a/util/windows_logger.h
+++ b/util/windows_logger.h
@@ -23,13 +23,9 @@ class WindowsLogger final : public Logger {
   // Creates a logger that writes to the given file.
   //
   // The PosixLogger instance takes ownership of the file handle.
-  explicit WindowsLogger(std::FILE* fp) : fp_(fp) {
-    assert(fp != nullptr);
-  }
+  explicit WindowsLogger(std::FILE* fp) : fp_(fp) { assert(fp != nullptr); }
 
-  ~WindowsLogger() override {
-    std::fclose(fp_);
-  }
+  ~WindowsLogger() override { std::fclose(fp_); }
 
   void Logv(const char* format, va_list arguments) override {
     // Record the time as close to the Logv() call as possible.
@@ -61,14 +57,9 @@ class WindowsLogger final : public Logger {
 
       // Print the header into the buffer.
       int buffer_offset = snprintf(
-          buffer, buffer_size,
-          "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ",
-          now_components.wYear,
-          now_components.wMonth,
-          now_components.wDay,
-          now_components.wHour,
-          now_components.wMinute,
-          now_components.wSecond,
+          buffer, buffer_size, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %s ",
+          now_components.wYear, now_components.wMonth, now_components.wDay,
+          now_components.wHour, now_components.wMinute, now_components.wSecond,
           static_cast<int>(now_components.wMilliseconds * 1000),
           thread_id.c_str());
 
@@ -83,9 +74,9 @@ class WindowsLogger final : public Logger {
       // Print the message into the buffer.
       std::va_list arguments_copy;
       va_copy(arguments_copy, arguments);
-      buffer_offset += std::vsnprintf(buffer + buffer_offset,
-                                      buffer_size - buffer_offset, format,
-                                      arguments_copy);
+      buffer_offset +=
+          std::vsnprintf(buffer + buffer_offset, buffer_size - buffer_offset,
+                         format, arguments_copy);
       va_end(arguments_copy);
 
       // The code below may append a newline at the end of the buffer, which