diff --git a/README.md b/README.md index 05ee763..3bc814d 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,49 @@ # 实验报告 仓库地址 https://gitea.shuishan.net.cn/10225501448/leveldb_proj2 -新建文件时 cmakelist 120行下面记得加进去 \ No newline at end of file +# 1. 项目概述 +leveldb中的存储原本只支持简单的字节序列,在这个项目中我们对其功能进行拓展,使其可以包含多个字段,并通过这些字段实现类似数据库列查询的功能。但如果仅通过字段查找数据,需要对整个数据库的遍历,不够高效,因此还要新增二级索引,提高对特定字段的查询效率。 + +# 2. 功能实现 +## 2.1 字段 +设计目标:对value存储读取时进行序列化编码,使其支持字段。 + +实现思路:设计之初有考虑增加一些元数据(例如过滤器、字段偏移支持二分)来加速查询。但考虑到在数据库中kv的数量是十分庞大的,新加数据结构会带来巨大的空间开销。因此我们决定在这里牺牲时间换取空间,而将时间的加速放在索引中。 +在这一基础上,我们对序列化进行了简单的优化:将字段名排序后,一一调用leveldb中原本的编码方法`PutLengthPrefixedSlice`存入value。这样不会有额外的空间开销,而好处在于遍历一个value的字段时,如果得到的字段名比目标大,就可以提前结束遍历。 +``` +std::string SerializeValue(const FieldArray& fields){ + std::sort(sortFields.begin(), sortFields.end(), compareByFirst); + for (const Field& pairs : sortFields) { + PutLengthPrefixedSlice(&result, pairs.first); + PutLengthPrefixedSlice(&result, pairs.second); + } + return result; +} +``` +最终db类提供了新接口`putFields`, `getFields`,分别对传入的字段序列化后调用原来的`put`, `get`接口。 +`FindKeysByField`调用`NewIterator`遍历所有数据,field名和值符合则加入返回的key中。 +**这一部分的具体代码在util/serialize_value.cc中** + +## 2.2 二级索引 +设计目标:对某个字段(属性)建立索引,提高对该字段的查询效率。 + +### 2.2.1 总体架构 +fielddb + +### 2.2.2 如何并发创删索引与读写 +request + +### 2.2.3 如何保证两个kv与index的一致性 +metadb + +## 3. 测试 +### 3.1 正确性测试 + +### 3.2 性能测试 +测试、分析、优化 + +## 4. 问题与解决 + +## 5. 潜在优化点 + +## 6. 分工 diff --git a/benchmarks/db_bench_FieldDB.cc b/benchmarks/db_bench_FieldDB.cc index 9b032a7..65e980a 100644 --- a/benchmarks/db_bench_FieldDB.cc +++ b/benchmarks/db_bench_FieldDB.cc @@ -63,7 +63,6 @@ static const char* FLAGS_benchmarks = "readreverse," "fill100K," "crc32c," - "readwhilewriting," "CreateIndex," "FindKeysByField," "QueryByIndex," @@ -80,11 +79,7 @@ static const char* FLAGS_benchmarks = "ReadRandomWhileCreating," "ReadRandomWhileDeleting," "WriteRandomWithIndex," - "WriteSeqWithIndex," - "snappycomp," - "snappyuncomp," - "zstdcomp," - "zstduncomp,"; + "WriteSeqWithIndex,"; // Number of key/values to place in database static int FLAGS_num = 1000000; @@ -1175,17 +1170,7 @@ class Benchmark { std::fprintf(stderr, "index status error in WriteWhileCreating\n"); std::exit(1); } - - while (true) { - if (db_->GetIndexStatus("age") == IndexStatus::Exist) { - break; - } - - db_->CreateIndexOnField("age", write_options_); - } - - // Do not count any of the preceding work/delay in stats. - thread->stats.Start(); + db_->CreateIndexOnField("age", write_options_); } } @@ -1198,17 +1183,7 @@ class Benchmark { std::fprintf(stderr, "index status error in WriteWhileDeleting\n"); std::exit(1); } - - while (true) { - if (db_->GetIndexStatus("age") == IndexStatus::NotExist) { - break; - } - - db_->DeleteIndex("age", write_options_); - } - - // Do not count any of the preceding work/delay in stats. - thread->stats.Start(); + db_->DeleteIndex("age", write_options_); } } @@ -1221,17 +1196,7 @@ class Benchmark { std::fprintf(stderr, "index status error in WriteWhileCreating\n"); std::exit(1); } - - while (true) { - if (db_->GetIndexStatus("age") == IndexStatus::Exist) { - break; - } - - db_->CreateIndexOnField("age", write_options_); - } - - // Do not count any of the preceding work/delay in stats. - thread->stats.Start(); + db_->CreateIndexOnField("age", write_options_); } } @@ -1244,109 +1209,129 @@ class Benchmark { std::fprintf(stderr, "index status error in WriteWhileDeleting\n"); std::exit(1); } - - while (true) { - if (db_->GetIndexStatus("age") == IndexStatus::NotExist) { - break; - } - - db_->DeleteIndex("age", write_options_); - } - - // Do not count any of the preceding work/delay in stats. - thread->stats.Start(); + db_->DeleteIndex("age", write_options_); } } void ReadSeqWhileCreating(ThreadState* thread) { if (thread->tid > 0) { - ReadSequential(thread); + Iterator* iter = db_->NewIterator(ReadOptions()); + iter->SeekToFirst(); + int64_t bytes = 0; + while (true) { + { + MutexLock l(&thread->shared->mu); + if (thread->shared->num_done == 1) { + // 创删索引完成 + delete iter; + thread->stats.AddBytes(bytes); + break; + } + } + bytes += iter->key().size() + iter->value().size(); + thread->stats.FinishedSingleOp(); + iter->Next(); + if (!iter->Valid()) iter->SeekToFirst(); + } } else { // Special thread that keeps creating index until other threads are done. if (db_->GetIndexStatus("age") != IndexStatus::NotExist) { std::fprintf(stderr, "index status error in WriteWhileCreating\n"); std::exit(1); } - - while (true) { - if (db_->GetIndexStatus("age") == IndexStatus::Exist) { - break; - } - - db_->CreateIndexOnField("age", write_options_); - } - - // Do not count any of the preceding work/delay in stats. - thread->stats.Start(); + db_->CreateIndexOnField("age", write_options_); } } void ReadSeqWhileDeleting(ThreadState* thread) { if (thread->tid > 0) { - ReadSequential(thread); + Iterator* iter = db_->NewIterator(ReadOptions()); + iter->SeekToFirst(); + int64_t bytes = 0; + while (true) { + { + MutexLock l(&thread->shared->mu); + if (thread->shared->num_done == 1) { + // 创删索引完成 + delete iter; + thread->stats.AddBytes(bytes); + break; + } + } + bytes += iter->key().size() + iter->value().size(); + thread->stats.FinishedSingleOp(); + iter->Next(); + if (!iter->Valid()) iter->SeekToFirst(); + } } else { // Special thread that keeps creating index until other threads are done. if (db_->GetIndexStatus("age") != IndexStatus::Exist) { std::fprintf(stderr, "index status error in WriteWhileDeleting\n"); std::exit(1); } - - while (true) { - if (db_->GetIndexStatus("age") == IndexStatus::NotExist) { - break; - } - - db_->DeleteIndex("age", write_options_); - } - - // Do not count any of the preceding work/delay in stats. - thread->stats.Start(); + db_->DeleteIndex("age", write_options_); } } void ReadRandomWhileCreating(ThreadState* thread) { if (thread->tid > 0) { - ReadRandom(thread); + ReadOptions options; + int found = 0; + KeyBuffer key; + while (true) { + { + MutexLock l(&thread->shared->mu); + if (thread->shared->num_done == 1) { + // 创删索引完成 + break; + } + } + const int k = thread->rand.Uniform(FLAGS_num); + key.Set(k); + FieldArray fields_ret; + if (db_->GetFields(options, key.slice(), &fields_ret).ok()) { + found++; + } + thread->stats.FinishedSingleOp(); + } } else { // Special thread that keeps creating index until other threads are done. if (db_->GetIndexStatus("age") != IndexStatus::NotExist) { std::fprintf(stderr, "index status error in WriteWhileCreating\n"); std::exit(1); } - - while (true) { - if (db_->GetIndexStatus("age") == IndexStatus::Exist) { - break; - } - - db_->CreateIndexOnField("age", write_options_); - } - - // Do not count any of the preceding work/delay in stats. - thread->stats.Start(); + db_->CreateIndexOnField("age", write_options_); } } void ReadRandomWhileDeleting(ThreadState* thread) { if (thread->tid > 0) { - ReadRandom(thread); + ReadOptions options; + int found = 0; + KeyBuffer key; + while (true) { + { + MutexLock l(&thread->shared->mu); + if (thread->shared->num_done == 1) { + // 创删索引完成 + break; + } + } + const int k = thread->rand.Uniform(FLAGS_num); + key.Set(k); + FieldArray fields_ret; + if (db_->GetFields(options, key.slice(), &fields_ret).ok()) { + found++; + } + thread->stats.FinishedSingleOp(); + } } else { // Special thread that keeps creating index until other threads are done. if (db_->GetIndexStatus("age") != IndexStatus::Exist) { std::fprintf(stderr, "index status error in WriteWhileDeleting\n"); std::exit(1); } - - while (true) { - if (db_->GetIndexStatus("age") == IndexStatus::NotExist) { - break; - } - - db_->DeleteIndex("age", write_options_); - } - - // Do not count any of the preceding work/delay in stats. - thread->stats.Start(); + db_->DeleteIndex("age", write_options_); } } }; diff --git a/fielddb/field_db.cpp b/fielddb/field_db.cpp index 346c8c9..1e8cafe 100644 --- a/fielddb/field_db.cpp +++ b/fielddb/field_db.cpp @@ -125,18 +125,18 @@ Request *FieldDB::GetHandleInterval() { } Status FieldDB::HandleRequest(Request &req, const WriteOptions &op) { - uint64_t start_ = env_->NowMicros(); + //uint64_t start_ = env_->NowMicros(); MutexLock L(&mutex_); taskqueue_.push_back(&req); while(true){ - uint64_t start_waiting = env_->NowMicros(); + //uint64_t start_waiting = env_->NowMicros(); while(req.isPending() || !req.done && &req != taskqueue_.front()) { req.cond_.Wait(); } - waiting_elasped += env_->NowMicros() - start_waiting; + //waiting_elasped += env_->NowMicros() - start_waiting; if(req.done) { - elapsed += env_->NowMicros() - start_; - count ++; + //elapsed += env_->NowMicros() - start_; + //count ++; // dumpStatistics(); return req.s; //在返回时自动释放锁L } @@ -149,48 +149,48 @@ Status FieldDB::HandleRequest(Request &req, const WriteOptions &op) { { //1. 构建各个Batch。构建的过程中要保证索引状态的一致性,需要上锁。 MutexLock iL(&index_mu); - uint64_t start_construct = env_->NowMicros(); + //uint64_t start_construct = env_->NowMicros(); for(auto *req_ptr : taskqueue_) { req_ptr->ConstructBatch(KVBatch, IndexBatch, MetaBatch, this, batchKeySet); if(req_ptr == tail) break; } - construct_elapsed += env_->NowMicros() - start_construct; + //construct_elapsed += env_->NowMicros() - start_construct; } //2. 首先写入meta,再并发写入index和kv,完成之后清除meta数据 //此处可以放锁是因为写入的有序性可以通过队列来保证 mutex_.Unlock(); - uint64_t start_write = env_->NowMicros(); + //uint64_t start_write = env_->NowMicros(); if(MetaBatch.ApproximateSize() > 12) { - uint64_t start_meta = env_->NowMicros(); + //uint64_t start_meta = env_->NowMicros(); status = metaDB_->Write(op, &MetaBatch); - write_meta_elapsed += env_->NowMicros() - start_meta; - write_bytes += MetaBatch.ApproximateSize(); + //write_meta_elapsed += env_->NowMicros() - start_meta; + //write_bytes += MetaBatch.ApproximateSize(); assert(status.ok()); } //TODO:index的写入需要在另外一个线程中同时完成 if(IndexBatch.ApproximateSize() > 12) { - uint64_t start_index = env_->NowMicros(); + //uint64_t start_index = env_->NowMicros(); status = indexDB_->Write(op, &IndexBatch); - write_index_elapsed += env_->NowMicros() - start_index; - write_bytes += IndexBatch.ApproximateSize(); + //write_index_elapsed += env_->NowMicros() - start_index; + //write_bytes += IndexBatch.ApproximateSize(); assert(status.ok()); } if(KVBatch.ApproximateSize() > 12) { - uint64_t start_kv = env_->NowMicros(); + //uint64_t start_kv = env_->NowMicros(); status = kvDB_->Write(op, &KVBatch); - write_kv_elapsed += env_->NowMicros() - start_kv; - write_bytes += KVBatch.ApproximateSize(); + //write_kv_elapsed += env_->NowMicros() - start_kv; + //write_bytes += KVBatch.ApproximateSize(); assert(status.ok()); } //3. 将meta数据清除 if(MetaBatch.ApproximateSize() > 12) { - uint64_t start_clean = env_->NowMicros(); + //uint64_t start_clean = env_->NowMicros(); MetaCleaner cleaner; cleaner.Collect(MetaBatch); cleaner.CleanMetaBatch(metaDB_); - write_clean_elapsed += env_->NowMicros() - start_clean; + //write_clean_elapsed += env_->NowMicros() - start_clean; } - write_elapsed += env_->NowMicros() - start_write; + //write_elapsed += env_->NowMicros() - start_write; mutex_.Lock(); } else { //对于创建和删除索引的请求,通过prepare完成索引状态的更新 @@ -263,9 +263,9 @@ Status FieldDB::Write(const WriteOptions &options, WriteBatch *updates) { // dumpStatistics(); // return status; // } - uint64_t start_ = env_->NowMicros(); + //uint64_t start_ = env_->NowMicros(); BatchReq req(updates,&mutex_); - construct_BatchReq_init_elapsed += env_->NowMicros() - start_; + //construct_BatchReq_init_elapsed += env_->NowMicros() - start_; Status status = HandleRequest(req, options); return status; } diff --git a/fielddb/field_db.h b/fielddb/field_db.h index 2cd458f..ed9b87f 100644 --- a/fielddb/field_db.h +++ b/fielddb/field_db.h @@ -91,60 +91,60 @@ private: Status HandleRequest(Request &req, const WriteOptions &op); //每个请求自行构造请求后交由这个函数处理 Request *GetHandleInterval(); //获得任务队列中的待处理区间,区间划分规则和原因见文档 -private: - int count = 0; - int count_Batch = 0; - int count_Batch_Sub = 0; - uint64_t elapsed = 0; - - uint64_t construct_elapsed = 0; - uint64_t construct_BatchReq_init_elapsed = 0; - uint64_t construct_BatchReq_elapsed = 0; - uint64_t construct_BatchReq_Sub_elapsed = 0; - uint64_t construct_BatchReq_perSub_elapsed = 0; - uint64_t construct_FieldsReq_Read_elapsed = 0; - - uint64_t write_elapsed = 0; - uint64_t write_meta_elapsed = 0; - uint64_t write_index_elapsed = 0; - uint64_t write_kv_elapsed = 0; - uint64_t write_clean_elapsed = 0; - - uint64_t write_bytes = 0; - uint64_t write_step = 500 * 1024 * 1024; - uint64_t write_bytes_lim = write_step; - - uint64_t temp_elapsed = 0; - - uint64_t waiting_elasped = 0; - - inline void dumpStatistics() { - if(count && count % 500000 == 0 || write_bytes && write_bytes > write_bytes_lim) { - std::cout << "=====================================================\n"; - std::cout << "Total Count : " << count; - std::cout << "\tTotal Write Bytes(MB) : " << write_bytes / 1048576.0 << std::endl; - std::cout << "Average Time(ms) : " << elapsed * 1.0 / count; - std::cout << "\tAverage Write rates(MB/s) : " << write_bytes / 1048576.0 / elapsed * 1000000 << std::endl; - std::cout << "Construct Time(ms) : " << construct_elapsed * 1.0 / count << std::endl; - std::cout << "\tConstruct BatchReq Init Time(ms) : " << construct_BatchReq_init_elapsed * 1.0 / count << std::endl; - std::cout << "\tConstruct BatchReq Time(ms) : " << construct_BatchReq_elapsed * 1.0 / count << std::endl; - std::cout << "\tConstruct BatchReq Sub Time(ms) : " << construct_BatchReq_Sub_elapsed * 1.0 / count << std::endl; - std::cout << "\tConstruct BatchReq perSub Time(ms) : " << construct_BatchReq_perSub_elapsed * 1.0 / count_Batch_Sub << std::endl; - std::cout << "\tConstruct FieldsReq Read Time(ms) : " << construct_FieldsReq_Read_elapsed * 1.0 / count << std::endl; - std::cout << "Write Time(ms) : " << write_elapsed * 1.0 / count << std::endl; - std::cout << "\tWrite Meta Time(ms) : " << write_meta_elapsed * 1.0 / count << std::endl; - std::cout << "\tWrite Index Time(ms) : " << write_index_elapsed * 1.0 / count << std::endl; - std::cout << "\tWrite KV Time(ms) : " << write_kv_elapsed * 1.0 / count << std::endl; - std::cout << "\tWrite Clean Time(ms) : " << write_clean_elapsed * 1.0 / count << std::endl; - std::cout << "TaskQueue Size : " << taskqueue_.size() << std::endl; - std::cout << "temp_elased : " << temp_elapsed * 1.0 / count << std::endl; - std::cout << "waiting elapsed : " << waiting_elasped * 1.0 / count << std::endl; - // std::cout << MetaBatch.ApproximateSize() << " " << IndexBatch.ApproximateSize() << " " << KVBatch.ApproximateSize() << std::endl; - std::cout << "=====================================================\n"; - write_bytes_lim = write_bytes + write_step; - std::fflush(stdout); - } - } +// private: +// int count = 0; +// int count_Batch = 0; +// int count_Batch_Sub = 0; +// uint64_t elapsed = 0; + +// uint64_t construct_elapsed = 0; +// uint64_t construct_BatchReq_init_elapsed = 0; +// uint64_t construct_BatchReq_elapsed = 0; +// uint64_t construct_BatchReq_Sub_elapsed = 0; +// uint64_t construct_BatchReq_perSub_elapsed = 0; +// uint64_t construct_FieldsReq_Read_elapsed = 0; + +// uint64_t write_elapsed = 0; +// uint64_t write_meta_elapsed = 0; +// uint64_t write_index_elapsed = 0; +// uint64_t write_kv_elapsed = 0; +// uint64_t write_clean_elapsed = 0; + +// uint64_t write_bytes = 0; +// uint64_t write_step = 500 * 1024 * 1024; +// uint64_t write_bytes_lim = write_step; + +// uint64_t temp_elapsed = 0; + +// uint64_t waiting_elasped = 0; + +// inline void dumpStatistics() { +// if(count && count % 500000 == 0 || write_bytes && write_bytes > write_bytes_lim) { +// std::cout << "=====================================================\n"; +// std::cout << "Total Count : " << count; +// std::cout << "\tTotal Write Bytes(MB) : " << write_bytes / 1048576.0 << std::endl; +// std::cout << "Average Time(ms) : " << elapsed * 1.0 / count; +// std::cout << "\tAverage Write rates(MB/s) : " << write_bytes / 1048576.0 / elapsed * 1000000 << std::endl; +// std::cout << "Construct Time(ms) : " << construct_elapsed * 1.0 / count << std::endl; +// std::cout << "\tConstruct BatchReq Init Time(ms) : " << construct_BatchReq_init_elapsed * 1.0 / count << std::endl; +// std::cout << "\tConstruct BatchReq Time(ms) : " << construct_BatchReq_elapsed * 1.0 / count << std::endl; +// std::cout << "\tConstruct BatchReq Sub Time(ms) : " << construct_BatchReq_Sub_elapsed * 1.0 / count << std::endl; +// std::cout << "\tConstruct BatchReq perSub Time(ms) : " << construct_BatchReq_perSub_elapsed * 1.0 / count_Batch_Sub << std::endl; +// std::cout << "\tConstruct FieldsReq Read Time(ms) : " << construct_FieldsReq_Read_elapsed * 1.0 / count << std::endl; +// std::cout << "Write Time(ms) : " << write_elapsed * 1.0 / count << std::endl; +// std::cout << "\tWrite Meta Time(ms) : " << write_meta_elapsed * 1.0 / count << std::endl; +// std::cout << "\tWrite Index Time(ms) : " << write_index_elapsed * 1.0 / count << std::endl; +// std::cout << "\tWrite KV Time(ms) : " << write_kv_elapsed * 1.0 / count << std::endl; +// std::cout << "\tWrite Clean Time(ms) : " << write_clean_elapsed * 1.0 / count << std::endl; +// std::cout << "TaskQueue Size : " << taskqueue_.size() << std::endl; +// std::cout << "temp_elased : " << temp_elapsed * 1.0 / count << std::endl; +// std::cout << "waiting elapsed : " << waiting_elasped * 1.0 / count << std::endl; +// // std::cout << MetaBatch.ApproximateSize() << " " << IndexBatch.ApproximateSize() << " " << KVBatch.ApproximateSize() << std::endl; +// std::cout << "=====================================================\n"; +// write_bytes_lim = write_bytes + write_step; +// std::fflush(stdout); +// } +// } }; Status DestroyDB(const std::string& name, diff --git a/fielddb/request.cpp b/fielddb/request.cpp index 9a53a6a..a614f7b 100644 --- a/fielddb/request.cpp +++ b/fielddb/request.cpp @@ -56,10 +56,9 @@ void FieldsReq::ConstructBatch(WriteBatch &KVBatch,WriteBatch &IndexBatch, batchKeySet.insert(Key); } std::string val_str; - Status s = Status::NotFound("test"); - uint64_t start_ = DB->env_->NowMicros(); + //uint64_t start_ = DB->env_->NowMicros(); s = DB->kvDB_->Get(ReadOptions(), Key, &val_str); - DB->construct_FieldsReq_Read_elapsed += DB->env_->NowMicros() - start_; + //DB->construct_FieldsReq_Read_elapsed += DB->env_->NowMicros() - start_; // FieldArray *oldFields; FieldSliceArray oldFields; if (s.IsNotFound()){ @@ -409,20 +408,20 @@ void BatchReq::ConstructBatch(WriteBatch &KVBatch,WriteBatch &IndexBatch, WriteBatch Sub_KVBatch,Sub_IndexBatch,Sub_MetaBatch; SliceHashSet Sub_batchKeySet; //由于batch是有顺序的,根据我们现在的一个key只处理最开始的算法,这里需要反向迭代 - uint64_t start_ = DB->env_->NowMicros(); + //uint64_t start_ = DB->env_->NowMicros(); for(auto subreq = sub_requests.rbegin(); subreq != sub_requests.rend(); subreq++ ) { - uint64_t start_sub = DB->env_->NowMicros(); + //uint64_t start_sub = DB->env_->NowMicros(); (*subreq)->ConstructBatch(Sub_KVBatch, Sub_IndexBatch, Sub_MetaBatch, DB, Sub_batchKeySet); // (*subreq)->ConstructBatch(KVBatch, IndexBatch, MetaBatch, DB, batchKeySet); - DB->construct_BatchReq_perSub_elapsed += DB->env_->NowMicros() - start_sub; - DB->count_Batch_Sub ++; + //DB->construct_BatchReq_perSub_elapsed += DB->env_->NowMicros() - start_sub; + //DB->count_Batch_Sub ++; //所有的对于pendreq的调用传入的参数被改成了this->parent,因此,对于subrequests来说, //pendreq的传参为对应的Batchreq,因此,此处判断batchreq是否pending可以得到subreq是否有冲突 if(isPending()) { return; } } - DB->construct_BatchReq_Sub_elapsed += DB->env_->NowMicros() - start_; + //DB->construct_BatchReq_Sub_elapsed += DB->env_->NowMicros() - start_; if(Sub_KVBatch.ApproximateSize() > 12) { KVBatch.Append(Sub_KVBatch); } @@ -433,7 +432,7 @@ void BatchReq::ConstructBatch(WriteBatch &KVBatch,WriteBatch &IndexBatch, MetaBatch.Append(Sub_MetaBatch); } batchKeySet.insert(Sub_batchKeySet.begin(),Sub_batchKeySet.end()); - DB->construct_BatchReq_elapsed += DB->env_->NowMicros() - start_; + //DB->construct_BatchReq_elapsed += DB->env_->NowMicros() - start_; } diff --git a/test/recover_test.cc b/test/recover_test.cc index de06aea..d2b104d 100644 --- a/test/recover_test.cc +++ b/test/recover_test.cc @@ -45,8 +45,8 @@ TEST(TestParalRecover, Recover) { // std::cerr << "open db failed" << std::endl; // abort(); // } - // db->CreateIndexOnField("address"); - // db->CreateIndexOnField("age"); + // db->CreateIndexOnField("address", op); + // db->CreateIndexOnField("age", op); // int thread_num_ = 4; // std::vector threads(thread_num_); // threads[0] = std::thread([db](){ @@ -80,6 +80,7 @@ TEST(TestParalRecover, Recover) { } GetOneField(db); checkDataInKVAndIndex(db); + //这里会出现两个数字,如果>1说明除了线程3插入的一条数据,其他线程也有数据在崩溃前被正确恢复了 } int main(int argc, char** argv) {