Преглед изворни кода

Merge branch 'cyq' into ld

pull/2/head
augurier пре 8 месеци
родитељ
комит
791c69262b
15 измењених фајлова са 2645 додато и 29 уклоњено
  1. +3
    -1
      .gitignore
  2. +4
    -0
      CMakeLists.txt
  3. +6
    -2
      benchmarks/db_bench.cc
  4. +1144
    -0
      benchmarks/db_bench_FieldDB.cc
  5. +1145
    -0
      benchmarks/db_bench_testDB.cc
  6. +70
    -13
      fielddb/field_db.cpp
  7. +58
    -4
      fielddb/field_db.h
  8. +2
    -1
      fielddb/meta.cpp
  9. +22
    -6
      fielddb/request.cpp
  10. +2
    -0
      include/leveldb/env.h
  11. +111
    -0
      testdb/testdb.cc
  12. +72
    -0
      testdb/testdb.h
  13. +4
    -0
      util/env_posix.cc
  14. +1
    -1
      util/serialize_value.cc
  15. +1
    -1
      util/serialize_value.h

+ 3
- 1
.gitignore Прегледај датотеку

@ -9,4 +9,6 @@ out/
# clangd
.cache/
compile_commands.json
compile_commands.json
benchmark-result/

+ 4
- 0
CMakeLists.txt Прегледај датотеку

@ -198,6 +198,8 @@ target_sources(leveldb
"fielddb/meta.h"
"fielddb/request.cpp"
"fielddb/request.h"
"testdb/testdb.cc"
"testdb/testdb.h"
# Only CMake 3.3+ supports PUBLIC sources in targets exported by "install".
$<$<VERSION_GREATER:CMAKE_VERSION,3.2>:PUBLIC>
@ -447,6 +449,8 @@ if(LEVELDB_BUILD_BENCHMARKS)
if(NOT BUILD_SHARED_LIBS)
leveldb_benchmark("benchmarks/db_bench.cc")
leveldb_benchmark("benchmarks/db_bench_FieldDB.cc")
leveldb_benchmark("benchmarks/db_bench_testDB.cc")
endif(NOT BUILD_SHARED_LIBS)
check_library_exists(sqlite3 sqlite3_open "" HAVE_SQLITE3)

+ 6
- 2
benchmarks/db_bench.cc Прегледај датотеку

@ -325,8 +325,8 @@ class Stats {
// elapsed times.
double elapsed = (finish_ - start_) * 1e-6;
char rate[100];
std::snprintf(rate, sizeof(rate), "%6.1f MB/s",
(bytes_ / 1048576.0) / elapsed);
std::snprintf(rate, sizeof(rate), "%6.1f MB/s Bytes:%6.1f elapsed(s):%6.1f seconds:%6.1f ",
(bytes_ / 1048576.0) / elapsed,(bytes_ / 1048576.0),elapsed,seconds_);
extra = rate;
}
AppendWithSpace(&extra, message_);
@ -737,6 +737,10 @@ class Benchmark {
}
shared.mu.Unlock();
// for(int i = 0; i < n; i++) {
// arg[i].thread->stats.Report(name.ToString() + "thread:" + std::to_string(i));
// }
for (int i = 1; i < n; i++) {
arg[0].thread->stats.Merge(arg[i].thread->stats);
}

+ 1144
- 0
benchmarks/db_bench_FieldDB.cc
Разлика између датотеке није приказан због своје велике величине
Прегледај датотеку


+ 1145
- 0
benchmarks/db_bench_testDB.cc
Разлика између датотеке није приказан због своје велике величине
Прегледај датотеку


+ 70
- 13
fielddb/field_db.cpp Прегледај датотеку

@ -1,9 +1,14 @@
#include "fielddb/field_db.h"
#include <climits>
#include <cstdint>
#include <cstdio>
#include <iostream>
#include <string>
#include <sys/types.h>
#include <utility>
#include <vector>
#include "leveldb/c.h"
#include "leveldb/cache.h"
#include "leveldb/db.h"
#include "leveldb/env.h"
#include "leveldb/iterator.h"
@ -22,7 +27,7 @@
namespace fielddb {
using namespace leveldb;
//TODO:打开fieldDB
Status FieldDB::OpenFieldDB(const Options& options,
Status FieldDB::OpenFieldDB(Options& options,
const std::string& name, FieldDB** dbptr) {
// options.env->CreateDir("./abc")
if(*dbptr == nullptr){
@ -32,11 +37,18 @@ Status FieldDB::OpenFieldDB(const Options& options,
//
Status status;
DB *indexdb, *kvdb, *metadb;
// options.block_cache = NewLRUCache(ULONG_MAX);
// options.max_open_files = 1000;
// options.write_buffer_size = 512 * 1024 * 1024;
// options.env = getPosixEnv();
status = Open(options, name+"_indexDB", &indexdb);
if(!status.ok()) return status;
// options.env = getPosixEnv();
status = Open(options, name+"_kvDB", &kvdb);
if(!status.ok()) return status;
// options.env = getPosixEnv();
status = Open(options, name+"_metaDB", &metadb);
if(!status.ok()) return status;
@ -45,7 +57,7 @@ Status FieldDB::OpenFieldDB(const Options& options,
(*dbptr)->metaDB_ = metadb;
(*dbptr)->dbname_ = name;
status = (*dbptr)->Recover();
// status = (*dbptr)->Recover();
(*dbptr)->options_ = &options;
(*dbptr)->env_ = options.env;
@ -118,6 +130,7 @@ Request *FieldDB::GetHandleInterval() {
}
Status FieldDB::HandleRequest(Request &req) {
uint64_t start_ = env_->NowMicros();
MutexLock L(&mutex_);
taskqueue_.push_back(&req);
Again:
@ -136,33 +149,61 @@ Again:
{
//1. 构建各个Batch。构建的过程中要保证索引状态的一致性,需要上锁。
MutexLock iL(&index_mu);
uint64_t start_construct = env_->NowMicros();
for(auto *req_ptr : taskqueue_) {
req_ptr->ConstructBatch(KVBatch, IndexBatch, MetaBatch, this, batchKeySet);
if(req_ptr == tail) break;
}
construct_elapsed += env_->NowMicros() - start_construct;
}
//2. 首先写入meta,再并发写入index和kv,完成之后清除meta数据
//此处可以放锁是因为写入的有序性可以通过队列来保证
mutex_.Unlock();
uint64_t start_write = env_->NowMicros();
WriteOptions op;
status = metaDB_->Write(op, &MetaBatch);
assert(status.ok());
if(MetaBatch.ApproximateSize() > 12) {
uint64_t start_meta = env_->NowMicros();
status = metaDB_->Write(op, &MetaBatch);
write_meta_elapsed += env_->NowMicros() - start_meta;
write_bytes += MetaBatch.ApproximateSize();
assert(status.ok());
}
//TODO:index的写入需要在另外一个线程中同时完成
status = indexDB_->Write(op, &IndexBatch);
assert(status.ok());
status = kvDB_->Write(op, &KVBatch);
assert(status.ok());
if(IndexBatch.ApproximateSize() > 12) {
uint64_t start_index = env_->NowMicros();
status = indexDB_->Write(op, &IndexBatch);
write_index_elapsed += env_->NowMicros() - start_index;
write_bytes += IndexBatch.ApproximateSize();
assert(status.ok());
}
if(KVBatch.ApproximateSize() > 12) {
uint64_t start_kv = env_->NowMicros();
status = kvDB_->Write(op, &KVBatch);
write_kv_elapsed += env_->NowMicros() - start_kv;
write_bytes += KVBatch.ApproximateSize();
assert(status.ok());
}
//3. 将meta数据清除
MetaCleaner cleaner;
cleaner.Collect(MetaBatch);
cleaner.CleanMetaBatch(metaDB_);
if(MetaBatch.ApproximateSize() > 12) {
uint64_t start_clean = env_->NowMicros();
MetaCleaner cleaner;
cleaner.Collect(MetaBatch);
cleaner.CleanMetaBatch(metaDB_);
write_clean_elapsed += env_->NowMicros() - start_clean;
}
write_elapsed += env_->NowMicros() - start_write;
mutex_.Lock();
} else {
//对于创建和删除索引的请求,通过prepare完成索引状态的更新
MutexLock iL(&index_mu);
req.Prepare(this);
}
// {
// static int count = 0;
// if(count++ % 100000 == 0) {
// std::cout << "TaskQueue Size : " << taskqueue_.size() << std::endl;
// }
// }
while(true) {
Request *ready = taskqueue_.front();
// int debug = tail->type_;
@ -175,6 +216,11 @@ Again:
}
if (ready == tail) break;
}
elapsed += env_->NowMicros() - start_;
count ++;
dumpStatistics();
if(!taskqueue_.empty()) {
taskqueue_.front()->cond_.Signal();
}
@ -218,8 +264,19 @@ Status FieldDB::Delete(const WriteOptions &options, const Slice &key) {
}
// TODO:根据updates里面的东西,要对是否需要更新index进行分别处理
Status FieldDB::Write(const WriteOptions &options, WriteBatch *updates) {
{
uint64_t start_ = env_->NowMicros();
Status status = kvDB_->Write(options, updates);
temp_elapsed += env_->NowMicros() - start_;
count ++;
dumpStatistics();
return status;
}
//或许应该再做一个接口?或者基于现有的接口进行改造
uint64_t start_ = env_->NowMicros();
BatchReq req(updates,&mutex_);
construct_BatchReq_init_elapsed += env_->NowMicros() - start_;
Status status = HandleRequest(req);
return status;
assert(0);

+ 58
- 4
fielddb/field_db.h Прегледај датотеку

@ -1,5 +1,7 @@
#include "port/port_stdcxx.h"
#include "db/db_impl.h"
#include <cstdint>
#include <cstdio>
#include <deque>
#include <map>
#include <set>
@ -30,6 +32,7 @@ public:
friend class iCreateReq;
friend class iDeleteReq;
friend class DeleteReq;
friend class BatchReq;
//FieldDB *db = new FieldDB()openDB *db
FieldDB() : indexDB_(nullptr), kvDB_(nullptr), metaDB_(nullptr) {};
@ -55,20 +58,21 @@ public:
//
IndexStatus GetIndexStatus(const std::string &fieldName);
static Status OpenFieldDB(const Options& options,const std::string& name,FieldDB** dbptr);
static Status OpenFieldDB(Options& options,const std::string& name,FieldDB** dbptr);
private:
//metaDB的内容进行恢复
Status Recover();
private:
leveldb::DB *kvDB_;
leveldb::DB *metaDB_;
leveldb::DB *indexDB_;
std::string dbname_;
const Options *options_;
Env *env_;
leveldb::DB *metaDB_;
leveldb::DB *indexDB_;
leveldb::DB *kvDB_;
using FieldName = std::string;
// index的状态,creating/deleting
@ -85,6 +89,56 @@ private:
Status HandleRequest(Request &req); //
Request *GetHandleInterval(); //
private:
int count = 0;
int count_Batch = 0;
int count_Batch_Sub = 0;
uint64_t elapsed = 0;
uint64_t construct_elapsed = 0;
uint64_t construct_BatchReq_init_elapsed = 0;
uint64_t construct_BatchReq_elapsed = 0;
uint64_t construct_BatchReq_Sub_elapsed = 0;
uint64_t construct_BatchReq_perSub_elapsed = 0;
uint64_t construct_FieldsReq_Read_elapsed = 0;
uint64_t write_elapsed = 0;
uint64_t write_meta_elapsed = 0;
uint64_t write_index_elapsed = 0;
uint64_t write_kv_elapsed = 0;
uint64_t write_clean_elapsed = 0;
uint64_t write_bytes = 0;
uint64_t write_bytes_lim = 20 * 1024 * 1024;
uint64_t temp_elapsed = 0;
inline void dumpStatistics() {
if(count && count % 500000 == 0 || write_bytes && write_bytes > write_bytes_lim) {
std::cout << "=====================================================\n";
std::cout << "Total Count : " << count;
std::cout << "\tTotal Write Bytes(MB) : " << write_bytes / 1048576.0 << std::endl;
std::cout << "Average Time(ms) : " << elapsed * 1.0 / count;
std::cout << "\tAverage Write rates(MB/s) : " << write_bytes / 1048576.0 / elapsed * 1000000 << std::endl;
std::cout << "Construct Time(ms) : " << construct_elapsed * 1.0 / count << std::endl;
std::cout << "\tConstruct BatchReq Init Time(ms) : " << construct_BatchReq_init_elapsed * 1.0 / count << std::endl;
std::cout << "\tConstruct BatchReq Time(ms) : " << construct_BatchReq_elapsed * 1.0 / count << std::endl;
std::cout << "\tConstruct BatchReq Sub Time(ms) : " << construct_BatchReq_Sub_elapsed * 1.0 / count << std::endl;
std::cout << "\tConstruct BatchReq perSub Time(ms) : " << construct_BatchReq_perSub_elapsed * 1.0 / count_Batch_Sub << std::endl;
std::cout << "\tConstruct FieldsReq Read Time(ms) : " << construct_FieldsReq_Read_elapsed * 1.0 / count << std::endl;
std::cout << "Write Time(ms) : " << write_elapsed * 1.0 / count << std::endl;
std::cout << "\tWrite Meta Time(ms) : " << write_meta_elapsed * 1.0 / count << std::endl;
std::cout << "\tWrite Index Time(ms) : " << write_index_elapsed * 1.0 / count << std::endl;
std::cout << "\tWrite KV Time(ms) : " << write_kv_elapsed * 1.0 / count << std::endl;
std::cout << "\tWrite Clean Time(ms) : " << write_clean_elapsed * 1.0 / count << std::endl;
std::cout << "TaskQueue Size : " << taskqueue_.size() << std::endl;
std::cout << "temp_elased : " << temp_elapsed * 1.0 / count<< std::endl;
// std::cout << MetaBatch.ApproximateSize() << " " << IndexBatch.ApproximateSize() << " " << KVBatch.ApproximateSize() << std::endl;
std::cout << "=====================================================\n";
write_bytes_lim = write_bytes + 20 * 1024 * 1024;
std::fflush(stdout);
}
}
};
Status DestroyDB(const std::string& name,

+ 2
- 1
fielddb/meta.cpp Прегледај датотеку

@ -56,13 +56,14 @@ public:
};
void MetaCleaner::Collect(WriteBatch &MetaBatch) {
if(MetaBatch.ApproximateSize() <= 12) return;
CleanerHandler Handler;
Handler.NeedClean = &NeedClean;
MetaBatch.Iterate(&Handler);
}
void MetaCleaner::CleanMetaBatch(DB *metaDB) {
if(NeedClean.ApproximateSize() == 0) return;
if(NeedClean.ApproximateSize() <= 12) return;
metaDB->Write(WriteOptions(), &NeedClean);
}
}

+ 22
- 6
fielddb/request.cpp Прегледај датотеку

@ -1,5 +1,6 @@
#include "fielddb/request.h"
#include <cassert>
#include <cstdint>
#include <deque>
#include <string>
#include <unordered_set>
@ -55,7 +56,10 @@ void FieldsReq::ConstructBatch(WriteBatch &KVBatch,WriteBatch &IndexBatch,
batchKeySet.insert(*Key);
}
std::string val_str;
Status s = DB->kvDB_->Get(ReadOptions(), *Key, &val_str);
Status s = Status::NotFound("test");
uint64_t start_ = DB->env_->NowMicros();
s = DB->kvDB_->Get(ReadOptions(), *Key, &val_str);
DB->construct_FieldsReq_Read_elapsed += DB->env_->NowMicros() - start_;
FieldArray *oldFields;
if (s.IsNotFound()){
oldFields = nullptr;
@ -335,8 +339,8 @@ BatchReq::BatchReq(WriteBatch *Batch,port::Mutex *mu):
//为key和value构造存储的地方,防止由于string的析构造成可能得内存访问错误
str_buf->push_back(key.ToString());
FieldArray *field = new FieldArray;
field = ParseValue(value.ToString(), field);
if (field == nullptr){ //batch中的value没有field
// field = ParseValue(value.ToString(), field);
if (field->empty()){ //batch中的value没有field
fa_buf->push_back({{"",value.ToString()}});
} else {
fa_buf->push_back(*field);
@ -383,18 +387,30 @@ void BatchReq::ConstructBatch(WriteBatch &KVBatch,WriteBatch &IndexBatch,
WriteBatch Sub_KVBatch,Sub_IndexBatch,Sub_MetaBatch;
std::unordered_set<std::string> Sub_batchKeySet;
//由于batch是有顺序的,根据我们现在的一个key只处理最开始的算法,这里需要反向迭代
uint64_t start_ = DB->env_->NowMicros();
for(auto subreq = sub_requests.rbegin(); subreq != sub_requests.rend(); subreq++ ) {
uint64_t start_sub = DB->env_->NowMicros();
(*subreq)->ConstructBatch(Sub_KVBatch, Sub_IndexBatch, Sub_MetaBatch, DB, Sub_batchKeySet);
DB->construct_BatchReq_perSub_elapsed += DB->env_->NowMicros() - start_sub;
DB->count_Batch_Sub ++;
//所有的对于pendreq的调用传入的参数被改成了this->parent,因此,对于subrequests来说,
//pendreq的传参为对应的Batchreq,因此,此处判断batchreq是否pending可以得到subreq是否有冲突
if(isPending()) {
return;
}
}
KVBatch.Append(Sub_KVBatch);
IndexBatch.Append(Sub_IndexBatch);
MetaBatch.Append(Sub_MetaBatch);
DB->construct_BatchReq_Sub_elapsed += DB->env_->NowMicros() - start_;
if(Sub_KVBatch.ApproximateSize() > 12) {
KVBatch.Append(Sub_KVBatch);
}
if(Sub_IndexBatch.ApproximateSize() > 12) {
IndexBatch.Append(Sub_IndexBatch);
}
if(Sub_MetaBatch.ApproximateSize() > 12) {
MetaBatch.Append(Sub_MetaBatch);
}
batchKeySet.insert(Sub_batchKeySet.begin(),Sub_batchKeySet.end());
DB->construct_BatchReq_elapsed += DB->env_->NowMicros() - start_;
}

+ 2
- 0
include/leveldb/env.h Прегледај датотеку

@ -218,6 +218,8 @@ class LEVELDB_EXPORT Env {
virtual void SleepForMicroseconds(int micros) = 0;
};
Env* getPosixEnv();
// A file abstraction for reading sequentially through a file
class LEVELDB_EXPORT SequentialFile {
public:

+ 111
- 0
testdb/testdb.cc Прегледај датотеку

@ -0,0 +1,111 @@
#include "testdb/testdb.h"
#include "db/db_impl.h"
#include <memory>
#include "leveldb/status.h"
using namespace testdb;
Status testDB::OpentestDB(Options& options,
const std::string& name, testDB** dbptr) {
// options.env->CreateDir("./abc")
if(*dbptr == nullptr){
return Status::NotSupported(name, "new a testDb first\n");
}
//
Status status;
DB *indexdb, *kvdb, *metadb;
// options.block_cache = NewLRUCache(ULONG_MAX);
// options.max_open_files = 1000;
// options.write_buffer_size = 512 * 1024 * 1024;
// options.env = getPosixEnv();
// status = Open(options, name+"_indexDB", &indexdb);
// if(!status.ok()) return status;
// (*dbptr)->indexDB_ = indexdb;
// options.env = getPosixEnv();
status = DB::Open(options, name+"_kvDB", &kvdb);
if(!status.ok()) return status;
(*dbptr)->kvDB_ = kvdb;
// options.env = getPosixEnv();
// status = Open(options, name+"_metaDB", &metadb);
// if(!status.ok()) return status;
// (*dbptr)->metaDB_ = metadb;
(*dbptr)->dbname_ = name;
// status = (*dbptr)->Recover();
(*dbptr)->options_ = &options;
(*dbptr)->env_ = options.env;
return status;
}
Status testDB::Put(const WriteOptions &options, const Slice &key, const Slice &value) {
return kvDB_->Put(options, key, value);
}
Status testDB::PutFields(const WriteOptions &, const Slice &key, const FieldArray &tests) {
return Status::OK();
}
Status testDB::Delete(const WriteOptions &options, const Slice &key) {
return kvDB_->Delete(options, key);
}
Status testDB::Write(const WriteOptions &options, WriteBatch *updates) {
return kvDB_->Write(options, updates);
}
Status testDB::Get(const ReadOptions &options, const Slice &key, std::string *value) {
return kvDB_->Get(options, key, value);
}
Status testDB::GetFields(const ReadOptions &options, const Slice &key, FieldArray *tests) {
return Status::OK();
}
std::vector<std::string> testDB::FindKeysByField(Field &test) {
return std::vector<std::string>();
}
Iterator * testDB::NewIterator(const ReadOptions &options) {
return kvDB_->NewIterator(options);
}
const Snapshot * testDB::GetSnapshot() {
return kvDB_->GetSnapshot();
}
void testDB::ReleaseSnapshot(const Snapshot *snapshot) {
kvDB_->ReleaseSnapshot(snapshot);
}
bool testDB::GetProperty(const Slice &property, std::string *value) {
return kvDB_->GetProperty(property, value);
}
void testDB::GetApproximateSizes(const Range *range, int n, uint64_t *sizes) {
kvDB_->GetApproximateSizes(range, n, sizes);
}
void testDB::CompactRange(const Slice *begin, const Slice *end) {
kvDB_->CompactRange(begin, end);
}
Status DestroyDB(const std::string& name, const Options& options) {
Status s;
s = leveldb::DestroyDB(name+"_kvDB", options);
assert(s.ok());
// s = leveldb::DestroyDB(name+"_indexDB", options);
// assert(s.ok());
// s = leveldb::DestroyDB(name+"_metaDB", options);
// assert(s.ok());
return s;
}
testDB::~testDB() {
delete kvDB_;
// delete indexDB_;
// delete metaDB_;
}

+ 72
- 0
testdb/testdb.h Прегледај датотеку

@ -0,0 +1,72 @@
#include "port/port_stdcxx.h"
#include "db/db_impl.h"
#include <cstdint>
#include <cstdio>
#include <deque>
#include <map>
#include <set>
#include <string>
#include "leveldb/db.h"
#include "leveldb/env.h"
#include "leveldb/options.h"
#include "leveldb/slice.h"
#include "leveldb/status.h"
#include <shared_mutex>
# ifndef test_DB_H
# define test_DB_H
namespace testdb {
using namespace leveldb;
enum IndexStatus{
Creating,
Deleting,
Exist,
NotExist
};
class testDB {
private:
leveldb::DB *kvDB_;
// leveldb::DB *metaDB_;
// leveldb::DB *indexDB_;
std::string dbname_;
const Options *options_;
Env *env_;
public:
friend class Request;
friend class testsReq;
friend class iCreateReq;
friend class iDeleteReq;
friend class DeleteReq;
friend class BatchReq;
//testDB *db = new testDB()openDB *db
// testDB() : indexDB_(nullptr), kvDB_(nullptr), metaDB_(nullptr) {};
testDB() : kvDB_(nullptr) { }
~testDB();
/*lab1的要求,作为db派生类要实现的虚函数*/
Status Put(const WriteOptions &options, const Slice &key, const Slice &value) ;
Status PutFields(const WriteOptions &, const Slice &key, const FieldArray &tests) ;
Status Delete(const WriteOptions &options, const Slice &key) ;
Status Write(const WriteOptions &options, WriteBatch *updates) ;
Status Get(const ReadOptions &options, const Slice &key, std::string *value) ;
Status GetFields(const ReadOptions &options, const Slice &key, FieldArray *tests) ;
std::vector<std::string> FindKeysByField(Field &test) ;
Iterator * NewIterator(const ReadOptions &options) ;
const Snapshot * GetSnapshot() ;
void ReleaseSnapshot(const Snapshot *snapshot) ;
bool GetProperty(const Slice &property, std::string *value) ;
void GetApproximateSizes(const Range *range, int n, uint64_t *sizes) ;
void CompactRange(const Slice *begin, const Slice *end) ;
static Status OpentestDB(Options& options,const std::string& name,testDB** dbptr);
};
Status DestroyDB(const std::string& name,
const Options& options);
} // end of namespace
# endif

+ 4
- 0
util/env_posix.cc Прегледај датотеку

@ -923,4 +923,8 @@ Env* Env::Default() {
return env_container.env();
}
Env* getPosixEnv() {
return new PosixEnv;
}
} // namespace leveldb

+ 1
- 1
util/serialize_value.cc Прегледај датотеку

@ -35,7 +35,7 @@ FieldArray *ParseValue(const std::string& value_str,FieldArray *fields){
valStr = valSlice.ToString();
res->emplace_back(nameStr, valStr);
} else {
std::cout << "name and val not match!" << std::endl;
std::cout << "name and val not match! From ParseValue" << std::endl;
}
nameSlice.clear();
valSlice.clear();

+ 1
- 1
util/serialize_value.h Прегледај датотеку

@ -31,7 +31,7 @@ public:
if(GetLengthPrefixedSlice(&valueSlice, &valSlice)) {
map[nameSlice.ToString()] = valSlice.ToString();
} else {
std::cout << "name and val not match!" << std::endl;
std::cout << "name and val not match! From InternalFieldArray" << std::endl;
}
nameSlice.clear();
valSlice.clear();

Loading…
Откажи
Сачувај