@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
// found in the LICENSE file. See the AUTHORS file for names of contributors.
# include <atomic>
# include <string>
# include "leveldb/db.h"
# include "leveldb/db.h"
# include "leveldb/filter_policy.h"
# include "leveldb/filter_policy.h"
# include "db/db_impl.h"
# include "db/db_impl.h"
@ -61,7 +64,7 @@ class AtomicCounter {
void DelayMilliseconds ( int millis ) {
void DelayMilliseconds ( int millis ) {
Env : : Default ( ) - > SleepForMicroseconds ( millis * 1000 ) ;
Env : : Default ( ) - > SleepForMicroseconds ( millis * 1000 ) ;
}
}
}
} // namespace
// Test Env to override default Env behavior for testing.
// Test Env to override default Env behavior for testing.
class TestEnv : public EnvWrapper {
class TestEnv : public EnvWrapper {
@ -93,45 +96,45 @@ class TestEnv : public EnvWrapper {
bool ignore_dot_files_ ;
bool ignore_dot_files_ ;
} ;
} ;
// Special Env used to delay background operations
// Special Env used to delay background operations.
class SpecialEnv : public EnvWrapper {
class SpecialEnv : public EnvWrapper {
public :
public :
// sstable/log Sync() calls are blocked while this pointer is non-null.
// sstable/log Sync() calls are blocked while this pointer is non-null.
port : : AtomicPointer delay_data_sync_ ;
std : : atomic < bool > delay_data_sync_ ;
// sstable/log Sync() calls return an error.
// sstable/log Sync() calls return an error.
port : : AtomicPointer data_sync_error_ ;
std : : atomic < bool > data_sync_error_ ;
// Simulate no-space errors while this pointer is non-null.
// Simulate no-space errors while this pointer is non-null.
port : : AtomicPointer no_space_ ;
std : : atomic < bool > no_space_ ;
// Simulate non-writable file system while this pointer is non-null.
// Simulate non-writable file system while this pointer is non-null.
port : : AtomicPointer non_writable_ ;
std : : atomic < bool > non_writable_ ;
// Force sync of manifest files to fail while this pointer is non-null.
// Force sync of manifest files to fail while this pointer is non-null.
port : : AtomicPointer manifest_sync_error_ ;
std : : atomic < bool > manifest_sync_error_ ;
// Force write to manifest files to fail while this pointer is non-null.
// Force write to manifest files to fail while this pointer is non-null.
port : : AtomicPointer manifest_write_error_ ;
std : : atomic < bool > manifest_write_error_ ;
bool count_random_reads_ ;
bool count_random_reads_ ;
AtomicCounter random_read_counter_ ;
AtomicCounter random_read_counter_ ;
explicit
SpecialEnv ( Env * base ) : EnvWrapper ( base ) {
delay_data_sync_ . Release_Store ( nullptr ) ;
data_sync_error_ . Release_Store ( nullptr ) ;
no_space_ . Release_Store ( nullptr ) ;
non_writable_ . Release_Store ( nullptr ) ;
count_random_reads_ = false ;
manifest_sync_error_ . Release_Store ( nullptr ) ;
manifest_write_error_ . Release_Store ( nullptr ) ;
explicit SpecialEnv ( Env * base ) : EnvWrapper ( base ) ,
delay_data_sync_ ( false ) ,
data_sync_error_ ( false ) ,
no_space_ ( false ) ,
non_writable_ ( false ) ,
manifest_sync_error_ ( false ) ,
manifest_write_error_ ( false ) ,
count_random_reads_ ( false ) {
}
}
Status NewWritableFile ( const std : : string & f , WritableFile * * r ) {
Status NewWritableFile ( const std : : string & f , WritableFile * * r ) {
class DataFile : public WritableFile {
class DataFile : public WritableFile {
private :
private :
SpecialEnv * env_ ;
WritableFile * base_ ;
SpecialEnv * const env_ ;
WritableFile * const base_ ;
public :
public :
DataFile ( SpecialEnv * env , WritableFile * base )
DataFile ( SpecialEnv * env , WritableFile * base )
@ -140,7 +143,7 @@ class SpecialEnv : public EnvWrapper {
}
}
~ DataFile ( ) { delete base_ ; }
~ DataFile ( ) { delete base_ ; }
Status Append ( const Slice & data ) {
Status Append ( const Slice & data ) {
if ( env_ - > no_space_ . Acquire_Load ( ) ! = nullptr ) {
if ( env_ - > no_space_ . load ( std : : memory_order_acquire ) ) {
// Drop writes on the floor
// Drop writes on the floor
return Status : : OK ( ) ;
return Status : : OK ( ) ;
} else {
} else {
@ -150,10 +153,10 @@ class SpecialEnv : public EnvWrapper {
Status Close ( ) { return base_ - > Close ( ) ; }
Status Close ( ) { return base_ - > Close ( ) ; }
Status Flush ( ) { return base_ - > Flush ( ) ; }
Status Flush ( ) { return base_ - > Flush ( ) ; }
Status Sync ( ) {
Status Sync ( ) {
if ( env_ - > data_sync_error_ . Acquire_Load ( ) ! = nullptr ) {
if ( env_ - > data_sync_error_ . load ( std : : memory_order_acquire ) ) {
return Status : : IOError ( " simulated data sync error " ) ;
return Status : : IOError ( " simulated data sync error " ) ;
}
}
while ( env_ - > delay_data_sync_ . Acquire_Load ( ) ! = nullptr ) {
while ( env_ - > delay_data_sync_ . load ( std : : memory_order_acquire ) ) {
DelayMilliseconds ( 100 ) ;
DelayMilliseconds ( 100 ) ;
}
}
return base_ - > Sync ( ) ;
return base_ - > Sync ( ) ;
@ -167,7 +170,7 @@ class SpecialEnv : public EnvWrapper {
ManifestFile ( SpecialEnv * env , WritableFile * b ) : env_ ( env ) , base_ ( b ) { }
ManifestFile ( SpecialEnv * env , WritableFile * b ) : env_ ( env ) , base_ ( b ) { }
~ ManifestFile ( ) { delete base_ ; }
~ ManifestFile ( ) { delete base_ ; }
Status Append ( const Slice & data ) {
Status Append ( const Slice & data ) {
if ( env_ - > manifest_write_error_ . Acquire_Load ( ) ! = nullptr ) {
if ( env_ - > manifest_write_error_ . load ( std : : memory_order_acquire ) ) {
return Status : : IOError ( " simulated writer error " ) ;
return Status : : IOError ( " simulated writer error " ) ;
} else {
} else {
return base_ - > Append ( data ) ;
return base_ - > Append ( data ) ;
@ -176,7 +179,7 @@ class SpecialEnv : public EnvWrapper {
Status Close ( ) { return base_ - > Close ( ) ; }
Status Close ( ) { return base_ - > Close ( ) ; }
Status Flush ( ) { return base_ - > Flush ( ) ; }
Status Flush ( ) { return base_ - > Flush ( ) ; }
Status Sync ( ) {
Status Sync ( ) {
if ( env_ - > manifest_sync_error_ . Acquire_Load ( ) ! = nullptr ) {
if ( env_ - > manifest_sync_error_ . load ( std : : memory_order_acquire ) ) {
return Status : : IOError ( " simulated sync error " ) ;
return Status : : IOError ( " simulated sync error " ) ;
} else {
} else {
return base_ - > Sync ( ) ;
return base_ - > Sync ( ) ;
@ -184,7 +187,7 @@ class SpecialEnv : public EnvWrapper {
}
}
} ;
} ;
if ( non_writable_ . Acquire_Load ( ) ! = nullptr ) {
if ( non_writable_ . load ( std : : memory_order_acquire ) ) {
return Status : : IOError ( " simulated write error " ) ;
return Status : : IOError ( " simulated write error " ) ;
}
}
@ -424,7 +427,7 @@ class DBTest {
ASSERT_TRUE (
ASSERT_TRUE (
db_ - > GetProperty ( " leveldb.num-files-at-level " + NumberToString ( level ) ,
db_ - > GetProperty ( " leveldb.num-files-at-level " + NumberToString ( level ) ,
& property ) ) ;
& property ) ) ;
return atoi ( property . c_str ( ) ) ;
return std : : stoi ( property ) ;
}
}
int TotalTableFiles ( ) {
int TotalTableFiles ( ) {
@ -587,11 +590,13 @@ TEST(DBTest, GetFromImmutableLayer) {
ASSERT_OK ( Put ( " foo " , " v1 " ) ) ;
ASSERT_OK ( Put ( " foo " , " v1 " ) ) ;
ASSERT_EQ ( " v1 " , Get ( " foo " ) ) ;
ASSERT_EQ ( " v1 " , Get ( " foo " ) ) ;
env_ - > delay_data_sync_ . Release_Store ( env_ ) ; // Block sync calls
Put ( " k1 " , std : : string ( 100000 , ' x ' ) ) ; // Fill memtable
Put ( " k2 " , std : : string ( 100000 , ' y ' ) ) ; // Trigger compaction
// Block sync calls.
env_ - > delay_data_sync_ . store ( true , std : : memory_order_release ) ;
Put ( " k1 " , std : : string ( 100000 , ' x ' ) ) ; // Fill memtable.
Put ( " k2 " , std : : string ( 100000 , ' y ' ) ) ; // Trigger compaction.
ASSERT_EQ ( " v1 " , Get ( " foo " ) ) ;
ASSERT_EQ ( " v1 " , Get ( " foo " ) ) ;
env_ - > delay_data_sync_ . Release_Store ( nullptr ) ; // Release sync calls
// Release sync calls.
env_ - > delay_data_sync_ . store ( false , std : : memory_order_release ) ;
} while ( ChangeOptions ( ) ) ;
} while ( ChangeOptions ( ) ) ;
}
}
@ -608,7 +613,7 @@ TEST(DBTest, GetMemUsage) {
ASSERT_OK ( Put ( " foo " , " v1 " ) ) ;
ASSERT_OK ( Put ( " foo " , " v1 " ) ) ;
std : : string val ;
std : : string val ;
ASSERT_TRUE ( db_ - > GetProperty ( " leveldb.approximate-memory-usage " , & val ) ) ;
ASSERT_TRUE ( db_ - > GetProperty ( " leveldb.approximate-memory-usage " , & val ) ) ;
int mem_usage = atoi ( val . c_str ( ) ) ;
int mem_usage = std : : stoi ( val ) ;
ASSERT_GT ( mem_usage , 0 ) ;
ASSERT_GT ( mem_usage , 0 ) ;
ASSERT_LT ( mem_usage , 5 * 1024 * 1024 ) ;
ASSERT_LT ( mem_usage , 5 * 1024 * 1024 ) ;
} while ( ChangeOptions ( ) ) ;
} while ( ChangeOptions ( ) ) ;
@ -1106,7 +1111,7 @@ TEST(DBTest, RepeatedWritesToSameKey) {
for ( int i = 0 ; i < 5 * kMaxFiles ; i + + ) {
for ( int i = 0 ; i < 5 * kMaxFiles ; i + + ) {
Put ( " key " , value ) ;
Put ( " key " , value ) ;
ASSERT_LE ( TotalTableFiles ( ) , kMaxFiles ) ;
ASSERT_LE ( TotalTableFiles ( ) , kMaxFiles ) ;
fprintf ( stderr , " after %d: %d files \n " , int ( i + 1 ) , TotalTableFiles ( ) ) ;
fprintf ( stderr , " after %d: %d files \n " , i + 1 , TotalTableFiles ( ) ) ;
}
}
}
}
@ -1271,7 +1276,7 @@ TEST(DBTest, IteratorPinsRef) {
// Write to force compactions
// Write to force compactions
Put ( " foo " , " newvalue1 " ) ;
Put ( " foo " , " newvalue1 " ) ;
for ( int i = 0 ; i < 100 ; i + + ) {
for ( int i = 0 ; i < 100 ; i + + ) {
ASSERT_OK ( Put ( Key ( i ) , Key ( i ) + std : : string ( 100000 , ' v ' ) ) ) ; // 100K values
ASSERT_OK ( Put ( Key ( i ) , Key ( i ) + std : : string ( 100000 , ' v ' ) ) ) ; // 100K values
}
}
Put ( " foo " , " newvalue2 " ) ;
Put ( " foo " , " newvalue2 " ) ;
@ -1459,21 +1464,21 @@ TEST(DBTest, L0_CompactionBug_Issue44_a) {
TEST ( DBTest , L0_CompactionBug_Issue44_b ) {
TEST ( DBTest , L0_CompactionBug_Issue44_b ) {
Reopen ( ) ;
Reopen ( ) ;
Put ( " " , " " ) ;
Put ( " " , " " ) ;
Reopen ( ) ;
Reopen ( ) ;
Delete ( " e " ) ;
Delete ( " e " ) ;
Put ( " " , " " ) ;
Put ( " " , " " ) ;
Reopen ( ) ;
Reopen ( ) ;
Put ( " c " , " cv " ) ;
Put ( " c " , " cv " ) ;
Reopen ( ) ;
Reopen ( ) ;
Put ( " " , " " ) ;
Put ( " " , " " ) ;
Reopen ( ) ;
Reopen ( ) ;
Put ( " " , " " ) ;
Put ( " " , " " ) ;
DelayMilliseconds ( 1000 ) ; // Wait for compaction to finish
DelayMilliseconds ( 1000 ) ; // Wait for compaction to finish
Reopen ( ) ;
Reopen ( ) ;
Put ( " d " , " dv " ) ;
Put ( " d " , " dv " ) ;
Reopen ( ) ;
Reopen ( ) ;
Put ( " " , " " ) ;
Put ( " " , " " ) ;
Reopen ( ) ;
Reopen ( ) ;
Delete ( " d " ) ;
Delete ( " d " ) ;
Delete ( " b " ) ;
Delete ( " b " ) ;
@ -1711,13 +1716,14 @@ TEST(DBTest, NoSpace) {
ASSERT_EQ ( " v1 " , Get ( " foo " ) ) ;
ASSERT_EQ ( " v1 " , Get ( " foo " ) ) ;
Compact ( " a " , " z " ) ;
Compact ( " a " , " z " ) ;
const int num_files = CountFiles ( ) ;
const int num_files = CountFiles ( ) ;
env_ - > no_space_ . Release_Store ( env_ ) ; // Force out-of-space errors
// Force out-of-space errors.
env_ - > no_space_ . store ( true , std : : memory_order_release ) ;
for ( int i = 0 ; i < 10 ; i + + ) {
for ( int i = 0 ; i < 10 ; i + + ) {
for ( int level = 0 ; level < config : : kNumLevels - 1 ; level + + ) {
for ( int level = 0 ; level < config : : kNumLevels - 1 ; level + + ) {
dbfull ( ) - > TEST_CompactRange ( level , nullptr , nullptr ) ;
dbfull ( ) - > TEST_CompactRange ( level , nullptr , nullptr ) ;
}
}
}
}
env_ - > no_space_ . Release_Store ( nullptr ) ;
env_ - > no_space_ . store ( false , std : : memory_order_release ) ;
ASSERT_LT ( CountFiles ( ) , num_files + 3 ) ;
ASSERT_LT ( CountFiles ( ) , num_files + 3 ) ;
}
}
@ -1727,7 +1733,8 @@ TEST(DBTest, NonWritableFileSystem) {
options . env = env_ ;
options . env = env_ ;
Reopen ( & options ) ;
Reopen ( & options ) ;
ASSERT_OK ( Put ( " foo " , " v1 " ) ) ;
ASSERT_OK ( Put ( " foo " , " v1 " ) ) ;
env_ - > non_writable_ . Release_Store ( env_ ) ; // Force errors for new files
// Force errors for new files.
env_ - > non_writable_ . store ( true , std : : memory_order_release ) ;
std : : string big ( 100000 , ' x ' ) ;
std : : string big ( 100000 , ' x ' ) ;
int errors = 0 ;
int errors = 0 ;
for ( int i = 0 ; i < 20 ; i + + ) {
for ( int i = 0 ; i < 20 ; i + + ) {
@ -1738,7 +1745,7 @@ TEST(DBTest, NonWritableFileSystem) {
}
}
}
}
ASSERT_GT ( errors , 0 ) ;
ASSERT_GT ( errors , 0 ) ;
env_ - > non_writable_ . Release_Store ( nullptr ) ;
env_ - > non_writable_ . store ( false , std : : memory_order_release ) ;
}
}
TEST ( DBTest , WriteSyncError ) {
TEST ( DBTest , WriteSyncError ) {
@ -1748,7 +1755,7 @@ TEST(DBTest, WriteSyncError) {
Options options = CurrentOptions ( ) ;
Options options = CurrentOptions ( ) ;
options . env = env_ ;
options . env = env_ ;
Reopen ( & options ) ;
Reopen ( & options ) ;
env_ - > data_sync_error_ . Release_Store ( env_ ) ;
env_ - > data_sync_error_ . store ( true , std : : memory_order_release ) ;
// (b) Normal write should succeed
// (b) Normal write should succeed
WriteOptions w ;
WriteOptions w ;
@ -1762,7 +1769,7 @@ TEST(DBTest, WriteSyncError) {
ASSERT_EQ ( " NOT_FOUND " , Get ( " k2 " ) ) ;
ASSERT_EQ ( " NOT_FOUND " , Get ( " k2 " ) ) ;
// (d) make sync behave normally
// (d) make sync behave normally
env_ - > data_sync_error_ . Release_Store ( nullptr ) ;
env_ - > data_sync_error_ . store ( false , std : : memory_order_release ) ;
// (e) Do a non-sync write; should fail
// (e) Do a non-sync write; should fail
w . sync = false ;
w . sync = false ;
@ -1782,7 +1789,7 @@ TEST(DBTest, ManifestWriteError) {
// We iterate twice. In the second iteration, everything is the
// We iterate twice. In the second iteration, everything is the
// same except the log record never makes it to the MANIFEST file.
// same except the log record never makes it to the MANIFEST file.
for ( int iter = 0 ; iter < 2 ; iter + + ) {
for ( int iter = 0 ; iter < 2 ; iter + + ) {
port : : AtomicPointer * error_type = ( iter = = 0 )
std : : atomic < bool > * error_type = ( iter = = 0 )
? & env_ - > manifest_sync_error_
? & env_ - > manifest_sync_error_
: & env_ - > manifest_write_error_ ;
: & env_ - > manifest_write_error_ ;
@ -1802,12 +1809,12 @@ TEST(DBTest, ManifestWriteError) {
ASSERT_EQ ( NumTableFilesAtLevel ( last ) , 1 ) ; // foo=>bar is now in last level
ASSERT_EQ ( NumTableFilesAtLevel ( last ) , 1 ) ; // foo=>bar is now in last level
// Merging compaction (will fail)
// Merging compaction (will fail)
error_type - > Release_Store ( env_ ) ;
error_type - > store ( true , std : : memory_order_release ) ;
dbfull ( ) - > TEST_CompactRange ( last , nullptr , nullptr ) ; // Should fail
dbfull ( ) - > TEST_CompactRange ( last , nullptr , nullptr ) ; // Should fail
ASSERT_EQ ( " bar " , Get ( " foo " ) ) ;
ASSERT_EQ ( " bar " , Get ( " foo " ) ) ;
// Recovery: should not lose data
// Recovery: should not lose data
error_type - > Release_Store ( nullptr ) ;
error_type - > store ( false , std : : memory_order_release ) ;
Reopen ( & options ) ;
Reopen ( & options ) ;
ASSERT_EQ ( " bar " , Get ( " foo " ) ) ;
ASSERT_EQ ( " bar " , Get ( " foo " ) ) ;
}
}
@ -1878,7 +1885,7 @@ TEST(DBTest, BloomFilter) {
dbfull ( ) - > TEST_CompactMemTable ( ) ;
dbfull ( ) - > TEST_CompactMemTable ( ) ;
// Prevent auto compactions triggered by seeks
// Prevent auto compactions triggered by seeks
env_ - > delay_data_sync_ . Release_Store ( env_ ) ;
env_ - > delay_data_sync_ . store ( true , std : : memory_order_release ) ;
// Lookup present keys. Should rarely read from small sstable.
// Lookup present keys. Should rarely read from small sstable.
env_ - > random_read_counter_ . Reset ( ) ;
env_ - > random_read_counter_ . Reset ( ) ;
@ -1899,7 +1906,7 @@ TEST(DBTest, BloomFilter) {
fprintf ( stderr , " %d missing => %d reads \n " , N , reads ) ;
fprintf ( stderr , " %d missing => %d reads \n " , N , reads ) ;
ASSERT_LE ( reads , 3 * N / 100 ) ;
ASSERT_LE ( reads , 3 * N / 100 ) ;
env_ - > delay_data_sync_ . Release_Store ( nullptr ) ;
env_ - > delay_data_sync_ . store ( false , std : : memory_order_release ) ;
Close ( ) ;
Close ( ) ;
delete options . block_cache ;
delete options . block_cache ;
delete options . filter_policy ;
delete options . filter_policy ;
@ -1914,9 +1921,9 @@ static const int kNumKeys = 1000;
struct MTState {
struct MTState {
DBTest * test ;
DBTest * test ;
port : : AtomicPointer stop ;
port : : AtomicPointer counter [ kNumThreads ] ;
port : : AtomicPointer thread_done [ kNumThreads ] ;
std : : atomic < bool > stop ;
std : : atomic < int > counter [ kNumThreads ] ;
std : : atomic < bool > thread_done [ kNumThreads ] ;
} ;
} ;
struct MTThread {
struct MTThread {
@ -1928,13 +1935,13 @@ static void MTThreadBody(void* arg) {
MTThread * t = reinterpret_cast < MTThread * > ( arg ) ;
MTThread * t = reinterpret_cast < MTThread * > ( arg ) ;
int id = t - > id ;
int id = t - > id ;
DB * db = t - > state - > test - > db_ ;
DB * db = t - > state - > test - > db_ ;
uintptr_ t counter = 0 ;
in t counter = 0 ;
fprintf ( stderr , " ... starting thread %d \n " , id ) ;
fprintf ( stderr , " ... starting thread %d \n " , id ) ;
Random rnd ( 1000 + id ) ;
Random rnd ( 1000 + id ) ;
std : : string value ;
std : : string value ;
char valbuf [ 1500 ] ;
char valbuf [ 1500 ] ;
while ( t - > state - > stop . Acquire_Load ( ) = = nullptr ) {
t - > state - > counter [ id ] . Release_Store ( reinterpret_cast < void * > ( counter ) ) ;
while ( ! t - > state - > stop . load ( std : : memory_order_acquire ) ) {
t - > state - > counter [ id ] . store ( counter , std : : memory_order_release ) ;
int key = rnd . Uniform ( kNumKeys ) ;
int key = rnd . Uniform ( kNumKeys ) ;
char keybuf [ 20 ] ;
char keybuf [ 20 ] ;
@ -1959,14 +1966,13 @@ static void MTThreadBody(void* arg) {
ASSERT_EQ ( k , key ) ;
ASSERT_EQ ( k , key ) ;
ASSERT_GE ( w , 0 ) ;
ASSERT_GE ( w , 0 ) ;
ASSERT_LT ( w , kNumThreads ) ;
ASSERT_LT ( w , kNumThreads ) ;
ASSERT_LE ( static_cast < uintptr_t > ( c ) , reinterpret_cast < uintptr_t > (
t - > state - > counter [ w ] . Acquire_Load ( ) ) ) ;
ASSERT_LE ( c , t - > state - > counter [ w ] . load ( std : : memory_order_acquire ) ) ;
}
}
}
}
counter + + ;
counter + + ;
}
}
t - > state - > thread_done [ id ] . Release_Store ( t ) ;
fprintf ( stderr , " ... stopping thread %d after %d ops \n " , id , int ( counter ) ) ;
t - > state - > thread_done [ id ] . store ( true , std : : memory_order_release ) ;
fprintf ( stderr , " ... stopping thread %d after %d ops \n " , id , counter ) ;
}
}
} // namespace
} // namespace
@ -1976,10 +1982,10 @@ TEST(DBTest, MultiThreaded) {
// Initialize state
// Initialize state
MTState mt ;
MTState mt ;
mt . test = this ;
mt . test = this ;
mt . stop . Release_Store ( 0 ) ;
mt . stop . store ( false , std : : memory_order_release ) ;
for ( int id = 0 ; id < kNumThreads ; id + + ) {
for ( int id = 0 ; id < kNumThreads ; id + + ) {
mt . counter [ id ] . Release_Store ( 0 ) ;
mt . thread_done [ id ] . Release_Store ( 0 ) ;
mt . counter [ id ] . store ( false , std : : memory_order_release ) ;
mt . thread_done [ id ] . store ( false , std : : memory_order_release ) ;
}
}
// Start threads
// Start threads
@ -1994,9 +2000,9 @@ TEST(DBTest, MultiThreaded) {
DelayMilliseconds ( kTestSeconds * 1000 ) ;
DelayMilliseconds ( kTestSeconds * 1000 ) ;
// Stop the threads and wait for them to finish
// Stop the threads and wait for them to finish
mt . stop . Release_Store ( & mt ) ;
mt . stop . store ( true , std : : memory_order_release ) ;
for ( int id = 0 ; id < kNumThreads ; id + + ) {
for ( int id = 0 ; id < kNumThreads ; id + + ) {
while ( mt . thread_done [ id ] . Acquire_Load ( ) = = nullptr ) {
while ( ! mt . thread_done [ id ] . load ( std : : memory_order_acquire ) ) {
DelayMilliseconds ( 100 ) ;
DelayMilliseconds ( 100 ) ;
}
}
}
}
@ -2100,6 +2106,7 @@ class ModelDB: public DB {
virtual Slice key ( ) const { return iter_ - > first ; }
virtual Slice key ( ) const { return iter_ - > first ; }
virtual Slice value ( ) const { return iter_ - > second ; }
virtual Slice value ( ) const { return iter_ - > second ; }
virtual Status status ( ) const { return Status : : OK ( ) ; }
virtual Status status ( ) const { return Status : : OK ( ) ; }
private :
private :
const KVMap * const map_ ;
const KVMap * const map_ ;
const bool owned_ ; // Do we own map_
const bool owned_ ; // Do we own map_