diff --git a/db/db_impl.cc b/db/db_impl.cc index 3e2879e..c311208 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -155,7 +155,10 @@ DBImpl::DBImpl(const Options& raw_options, const std::string& dbname) background_garbage_collect_scheduled_(false), manual_compaction_(nullptr), versions_(new VersionSet(dbname_, &options_, table_cache_, - &internal_comparator_)) {} + &internal_comparator_)) { + InitializeExistingLogs(); + // std::cout<<"init map"<smallest_snapshot) { // Hidden by an newer entry for same user key drop = true; // (A) + // Parse the value based on its first character + if(ikey.type != kTypeDeletion){ + Slice value = input->value(); + char type = value[0]; + if (type == 0x00) { + // Value is less than 100 bytes, use it directly + } else { + // Value is >= 100 bytes, read from external file + uint64_t file_id, valuelog_offset; + std::string file_id_str = value.ToString().substr(1, 8); + Slice file_id_slice(file_id_str); + bool res = GetVarint64(&file_id_slice, &file_id); + if (!res) return Status::Corruption("can't decode file id"); + if(valuelog_origin[file_id] == 0){ + valuelog_origin[file_id] = valuelog_usage[file_id]; + } + valuelog_usage[file_id]--; + // std::cout << "file_id: " << file_id << " usage: " << valuelog_usage[file_id] << std::endl; + } + } } else if (ikey.type == kTypeDeletion && ikey.sequence <= compact->smallest_snapshot && compact->compaction->IsBaseLevelForKey(ikey.user_key)) { @@ -1073,7 +1096,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) { if (compact->builder->NumEntries() == 0) { compact->current_output()->smallest.DecodeFrom(key); } - compact->current_output()->largest.DecodeFrom(key); + compact->current_output()->largest.DecodeFrom(key); compact->builder->Add(key, input->value()); // Close output file if it is big enough @@ -1085,7 +1108,6 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) { } } } - input->Next(); } @@ -1123,6 +1145,9 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) { } VersionSet::LevelSummaryStorage tmp; Log(options_.info_log, "compacted to: %s", versions_->LevelSummary(&tmp)); + // for(int i=0;ic_str() + 1, value->length()); - Slice new_key; - int value_offset = sizeof(uint64_t) * 2; // 16 uint64_t file_id, valuelog_offset; bool res = GetVarint64(&value_log_slice, &file_id); if (!res) return Status::Corruption("can't decode file id"); @@ -1252,7 +1275,7 @@ Status DBImpl::Get(const ReadOptions& options, const Slice& key, { mutex_.Unlock(); - s = ReadValueLog(file_id, valuelog_offset, &new_key, value); + s = ReadValueLog(file_id, valuelog_offset, value); mutex_.Lock(); } @@ -1603,67 +1626,95 @@ void DBImpl::GetApproximateSizes(const Range* range, int n, uint64_t* sizes) { std::vector> DBImpl::WriteValueLog( std::vector> kv) { std::string file_name_ = ValueLogFileName(dbname_, valuelogfile_number_); - std::ofstream valueFile(file_name_, std::ios::app | std::ios::binary); + std::fstream valueFile(file_name_, std::ios::in | std::ios::out | std::ios::binary); if (!valueFile.is_open()) { assert(0); } + valueFile.seekg(0, std::ios::end); // 移动到文件末尾 + uint64_t offset = valueFile.tellg(); - uint64_t offset = valueFile.tellp(); - + // 如果超出fixed_size if(offset>=config::value_log_size){ + int file_capacity=ReadFileSize(valuelogfile_number_); + // std::cout<<"file_capacity: "<(&file_data_size), sizeof(uint64_t)); + valueFile.clear(); // 清除错误状态 + valueFile.seekp(0, std::ios::end); // 返回文件末尾准备写入 + // std::cout<<"file_data_size: "<> res; for (const auto& [key_slice, value_slice] : kv) { - // 写入 key 的长度 - uint64_t key_len = key_slice.size(); - valueFile.write(reinterpret_cast(&key_len), sizeof(uint64_t)); + // 写入 value 的长度 + uint64_t value_len = value_slice.size(); + valueFile.write(reinterpret_cast(&value_len), + sizeof(uint64_t)); if (!valueFile.good()) { valueFile.close(); assert(0); } - // 写入 key 本身 - valueFile.write(key_slice.data(), key_len); + // 写入 value 本身 + valueFile.write(value_slice.data(), value_len); if (!valueFile.good()) { valueFile.close(); assert(0); } - - // 写入 value 的长度 - uint64_t value_len = value_slice.size(); - valueFile.write(reinterpret_cast(&value_len), - sizeof(uint64_t)); + + // 写入 key 的长度 + uint64_t key_len = key_slice.size(); + valueFile.write(reinterpret_cast(&key_len), sizeof(uint64_t)); if (!valueFile.good()) { valueFile.close(); assert(0); } - // 写入 value 本身 - valueFile.write(value_slice.data(), value_len); + // 写入 key 本身 + valueFile.write(key_slice.data(), key_len); if (!valueFile.good()) { valueFile.close(); assert(0); } + // 更新文件数据大小 + file_data_size ++; // 记录 file_id 和 offset res.push_back({valuelogfile_number_, offset}); - // 更新偏移量 offset += sizeof(uint64_t) + key_len + sizeof(uint64_t) + value_len; } - // 解锁资源或进行其他清理操作 + // 在所有数据写入后,将更新的数据大小写回文件开头 + if (!res.empty()) { + valueFile.seekp(0, std::ios::beg); // 移动到文件开头 + valueFile.write(reinterpret_cast(&file_data_size), sizeof(uint64_t)); + if (!valueFile.good()) { + valueFile.close(); + assert(0); + } + } + else{ + valueFile.close(); + assert(0); + } + // 解锁资源或进行其他清理操作 + valueFile.flush(); // 确保所有缓冲区的数据都被写入文件 valueFile.close(); return res; } @@ -1671,23 +1722,39 @@ std::vector> DBImpl::WriteValueLog( void DBImpl::addNewValueLog() { valuelogfile_number_ = versions_->NewFileNumber(); + + std::string file_name_ = ValueLogFileName(dbname_, valuelogfile_number_); + std::fstream valueFile(file_name_, std::ios::app | std::ios::binary); + if (!valueFile.is_open()) { + assert(0); + } + uint64_t file_data_size = 0; // 新增的文件数据大小标志位 + if (valueFile.tellp() != 0) { + assert(0); + } + else{ + valueFile.write(reinterpret_cast(&file_data_size), sizeof(uint64_t)); + if (!valueFile.good()) { + valueFile.close(); + assert(0); + } + else{ + // 正常关闭文件 + valueFile.flush(); // 确保所有缓冲区的数据都被写入文件 + valueFile.close(); + } + } } -Status DBImpl::ReadValueLog(uint64_t file_id, uint64_t offset, Slice* key, +Status DBImpl::ReadValueLog(uint64_t file_id, uint64_t offset, std::string* value) { mutex_.Lock(); if(file_id==valuelogfile_number_){ mutex_.Unlock(); std::string file_name_ = ValueLogFileName(dbname_, file_id); std::ifstream inFile(file_name_, std::ios::in | std::ios::binary); - uint64_t key_len,value_len; + uint64_t value_len; inFile.seekg(offset); - inFile.read((char*)(&key_len),sizeof(uint64_t)); - - char* key_buf=new char[key_len]; - inFile.read(key_buf,key_len); - *key=Slice(key_buf,key_len); - inFile.read((char*)(&value_len),sizeof(uint64_t)); char buf[value_len]; @@ -1723,20 +1790,10 @@ Status DBImpl::ReadValueLog(uint64_t file_id, uint64_t offset, Slice* key, Slice res; s=valuelog_file->Read(offset,sizeof(uint64_t),&res,buf); assert(s.ok()); - uint64_t key_len=*(uint64_t*)(res.data()); - - char*key_buf=new char[key_len]; - - s=valuelog_file->Read(offset+sizeof(uint64_t),key_len,&res,key_buf); - assert(s.ok()); - *key=Slice(key_buf,key_len); - - s=valuelog_file->Read(offset+sizeof(uint64_t)+key_len,sizeof(uint64_t),&res,buf); - assert(s.ok()); uint64_t value_len=*(uint64_t*)(res.data()); char value_buf[value_len]; - s=valuelog_file->Read(offset+sizeof(uint64_t)+key_len+sizeof(uint64_t),value_len,&res,value_buf); + s=valuelog_file->Read(offset+sizeof(uint64_t),value_len,&res,value_buf); assert(s.ok()); *value=std::string(res.data(),res.size()); @@ -1753,7 +1810,6 @@ Status DBImpl::ReadValueLog(uint64_t file_id, uint64_t offset, Slice* key, // 垃圾回收实现 void DBImpl::GarbageCollect() { // 遍历数据库目录,找到所有 valuelog 文件 - std::vector filenames; Status s = env_->GetChildren(dbname_, &filenames); Log(options_.info_log, "start gc "); @@ -1762,11 +1818,25 @@ void DBImpl::GarbageCollect() { for (const auto& filename:filenames) { if (IsValueLogFile(filename)){ uint64_t cur_log_number = GetValueLogID(filename); + if (cur_log_number == valuelogfile_number_) { + continue; + } auto tmp_name = ValueLogFileName(dbname_, cur_log_number); - - if(!versions_->checkOldValueLog(tmp_name))valuelog_set.emplace(filename); + // std::cout <checkOldValueLog(tmp_name) && + valuelog_origin[cur_log_number]) { + if ((float)(valuelog_usage[cur_log_number] / + valuelog_origin[cur_log_number]) <= 0.6) { + valuelog_set.emplace(filename); + } + } } } + // std::cout << "valuelog_set size: " << valuelog_set.size() << std::endl; + Log(options_.info_log, "valuelog_set size: %d", valuelog_set.size()); + + //bool tmp_judge=false;//only clean one file for (std::string valuelog_name : valuelog_set) { Log(options_.info_log, ("gc processing: "+valuelog_name).data()); uint64_t cur_log_number = GetValueLogID(valuelog_name); @@ -1775,8 +1845,9 @@ void DBImpl::GarbageCollect() { continue; } - uint64_t current_offset = 0; - uint64_t tmp_offset = 0; + // 初始化offset为占用大小 + uint64_t current_offset = sizeof(uint64_t); + uint64_t tmp_offset = current_offset; int cnt = 0; @@ -1794,86 +1865,48 @@ void DBImpl::GarbageCollect() { // std::cout << cnt <<" "< filenames; + Status s = env_->GetChildren(dbname_, &filenames); + Log(options_.info_log, "start set file map "); + assert(s.ok()); + std::set valuelog_set; + for (const auto& filename : filenames) { + if (IsValueLogFile(filename)) { + uint64_t cur_log_number = GetValueLogID(filename); + uint64_t file_data_size = ReadFileSize(cur_log_number); + valuelog_usage.emplace(cur_log_number,file_data_size); + // std::cout << "cur_log_number: " << cur_log_number << " file_data_size: " << file_data_size << std::endl; + + } + } +} + +// 读取单个文件的 file_data_size +uint64_t DBImpl::ReadFileSize(uint64_t log_number) { + auto file_name = ValueLogFileName(dbname_, log_number); + std::ifstream valueFile(file_name, std::ios::in | std::ios::binary); + if (!valueFile.is_open()) { + std::cerr << "Failed to open file: " << file_name << std::endl; + return 0; + } + + uint64_t file_data_size = 0; + valueFile.read(reinterpret_cast(&file_data_size), sizeof(uint64_t)); + if (valueFile.fail() || valueFile.bad()) { + std::cerr << "Failed to read data size from file: " << file_name + << std::endl; + valueFile.close(); + return 0; + } + valueFile.close(); + return file_data_size; +} + } // namespace leveldb diff --git a/db/db_impl.h b/db/db_impl.h index 5a81f3e..1bc2886 100644 --- a/db/db_impl.h +++ b/db/db_impl.h @@ -70,9 +70,9 @@ class DBImpl : public DB { std::vector> WriteValueLog( std::vector> value) override; void addNewValueLog() override EXCLUSIVE_LOCKS_REQUIRED(mutex_); - ; + std::pair getNewValuelog(); // use for compaction - Status ReadValueLog(uint64_t file_id, uint64_t offset, Slice* key, + Status ReadValueLog(uint64_t file_id, uint64_t offset, std::string* value) override; // Extra methods (for testing) that are not in the public DB interface @@ -100,6 +100,11 @@ class DBImpl : public DB { // bytes. void RecordReadSample(Slice key); + void InitializeExistingLogs(); + + uint64_t ReadFileSize(uint64_t log_number); + + private: friend class DB; struct CompactionState; @@ -237,6 +242,9 @@ class DBImpl : public DB { std::shared_mutex mem_valuelog_mutex; std::unordered_map mem_valuelogs; GUARDED_BY(mem_valuelog_mutex); + std::map valuelog_usage; + std::map valuelog_origin; + uint32_t seed_ GUARDED_BY(mutex_); // For sampling. // Queue of writers. diff --git a/db/prefetch_iter.cc b/db/prefetch_iter.cc index 66d1337..1e82743 100644 --- a/db/prefetch_iter.cc +++ b/db/prefetch_iter.cc @@ -81,7 +81,6 @@ class DBPreFetchIter : public Iterator { private: std::string GetAndParseTrueValue(Slice tmp_value)const{ - Slice key; if(tmp_value.size()==0){ return ""; } @@ -96,8 +95,8 @@ class DBPreFetchIter : public Iterator { res=GetVarint64(&tmp_value,&valuelog_offset); if(!res)assert(0); std::string str; - Status s=db_->ReadValueLog(file_id,valuelog_offset, &key, &str); - return str; + Status s=db_->ReadValueLog(file_id,valuelog_offset, &str); + return std::move(str); } diff --git a/include/leveldb/db.h b/include/leveldb/db.h index 03915db..0081d22 100644 --- a/include/leveldb/db.h +++ b/include/leveldb/db.h @@ -119,7 +119,7 @@ class LEVELDB_EXPORT DB { // assert(0); // Not implemented // return Status::Corruption("not imp"); // } - virtual Status ReadValueLog(uint64_t file_id, uint64_t offset, Slice* key, std::string* value){ + virtual Status ReadValueLog(uint64_t file_id, uint64_t offset, std::string* value){ assert(0); // Not implemented return Status::Corruption("not imp"); } diff --git a/test/test.cpp b/test/test.cpp index 675fd46..0b073ac 100644 --- a/test/test.cpp +++ b/test/test.cpp @@ -71,7 +71,7 @@ TEST(Test, CheckGetFields) { DB *db; WriteOptions writeOptions; ReadOptions readOptions; - if(OpenDB("testdb_for_XOY", &db).ok() == false) { + if(OpenDB("testdb_for_XOY_large", &db).ok() == false) { std::cerr << "open db failed" << std::endl; abort(); } @@ -107,7 +107,7 @@ TEST(Test, CheckGetFields) { TEST(Test, CheckSearchKey) { DB *db; ReadOptions readOptions; - if(OpenDB("testdb_for_XOY", &db).ok() == false) { + if(OpenDB("testdb_for_XOY_large", &db).ok() == false) { std::cerr << "open db failed" << std::endl; abort(); } @@ -147,7 +147,7 @@ TEST(Test, LARGE_DATA_COMPACT_TEST) { abort(); } std::vector values; - for(int i=0;i<5000;i++){ + for(int i=0;i<50000;i++){ std::string key=std::to_string(i); std::string value; for(int j=0;j<5000;j++){ @@ -156,7 +156,7 @@ TEST(Test, LARGE_DATA_COMPACT_TEST) { values.push_back(value); db->Put(writeOptions,key,value); } - for(int i=0;i<5000;i++){ + for(int i=0;i<50000;i++){ std::string key=std::to_string(i); std::string value; Status s=db->Get(readOptions,key,&value); @@ -179,7 +179,7 @@ TEST(Test, Garbage_Collect_TEST) { abort(); } std::vector values; - for(int i=0;i<5000;i++){ + for(int i=0;i<50000;i++){ std::string key=std::to_string(i); std::string value; for(int j=0;j<1000;j++){ @@ -192,7 +192,7 @@ TEST(Test, Garbage_Collect_TEST) { db->TEST_GarbageCollect(); std::cout<<"finish gc"<