Browse Source

version 2 roughly complete with GC

pull/2/head
alexfisher 9 months ago
parent
commit
54b533d7cb
7 changed files with 47 additions and 10 deletions
  1. +9
    -0
      db/builder.cc
  2. +24
    -5
      db/db_impl.cc
  3. +1
    -1
      db/repair.cc
  4. +4
    -0
      db/version_edit.cc
  5. +4
    -1
      db/version_edit.h
  6. +1
    -1
      db/version_set.cc
  7. +4
    -2
      test/test.cpp

+ 9
- 0
db/builder.cc View File

@ -28,6 +28,15 @@ Status BuildTable(const std::string& dbname, Env* env, const Options& options,
return s;
}
{
auto tmp_value=iter->value();
if(tmp_value.data()[0]==(char)(0x01)){
tmp_value.remove_prefix(1);
assert(GetVarint64(&tmp_value,&meta->valuelog_id));
}
else meta->valuelog_id=0;
}
TableBuilder* builder = new TableBuilder(options, file);
meta->smallest.DecodeFrom(iter->key());
Slice key;

+ 24
- 5
db/db_impl.cc View File

@ -57,6 +57,7 @@ struct DBImpl::CompactionState {
struct Output {
uint64_t number;
uint64_t file_size;
uint64_t valuelog_id;
InternalKey smallest, largest;
};
@ -541,7 +542,7 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit,
level = base->PickLevelForMemTableOutput(min_user_key, max_user_key);
}
edit->AddFile(level, meta.number, meta.file_size, meta.smallest,
meta.largest);
meta.largest,meta.valuelog_id);
}
CompactionStats stats;
@ -745,7 +746,7 @@ void DBImpl::BackgroundCompaction() {
FileMetaData* f = c->input(0, 0);
c->edit()->RemoveFile(c->level(), f->number);
c->edit()->AddFile(c->level() + 1, f->number, f->file_size, f->smallest,
f->largest);
f->largest,f->valuelog_id);
status = versions_->LogAndApply(c->edit(), &mutex_);
if (!status.ok()) {
RecordBackgroundError(status);
@ -819,8 +820,11 @@ Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) {
out.number = file_number;
out.smallest.Clear();
out.largest.Clear();
compact->outputs.push_back(out);
compact->valuelog_file_id=versions_->NewFileNumber();
out.valuelog_id=compact->valuelog_file_id;
compact->outputs.push_back(out);
mutex_.Unlock();
}
@ -913,7 +917,7 @@ Status DBImpl::InstallCompactionResults(CompactionState* compact) {
for (size_t i = 0; i < compact->outputs.size(); i++) {
const CompactionState::Output& out = compact->outputs[i];
compact->compaction->edit()->AddFile(level + 1, out.number, out.file_size,
out.smallest, out.largest);
out.smallest, out.largest,out.valuelog_id);
}
return versions_->LogAndApply(compact->compaction->edit(), &mutex_);
}
@ -947,6 +951,12 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
std::string current_user_key;
bool has_current_user_key = false;
SequenceNumber last_sequence_for_key = kMaxSequenceNumber;
std::set<uint64_t> old_valuelog_ids;
for (int which = 0; which < 2; which++) {
for (int i = 0; i < compact->compaction->num_input_files(which); i++) {
if(compact->compaction->input(which, i)->valuelog_id)old_valuelog_ids.emplace(compact->compaction->input(which, i)->valuelog_id);
}
}
while (input->Valid() && !shutting_down_.load(std::memory_order_acquire)) {
// Prioritize immutable compaction work
if (has_imm_.load(std::memory_order_relaxed)) {
@ -1035,6 +1045,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
new_value=old_value;
}
else{
old_value.remove_prefix(1);
uint64_t file_id,valuelog_offset,valuelog_len;
bool res=GetVarint64(&old_value,&file_id);
if(!res)assert(0);
@ -1078,6 +1089,14 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
if (status.ok()) {
status = input->status();
}
//not completely correct, should be written in new function, related to removeabsol...
if(status.ok()){
for(auto id:old_valuelog_ids){
auto valuelog_filename=ValueLogFileName(dbname_,id);
Status s=env_->RemoveFile(valuelog_filename);
assert(s.ok());
}
}
delete input;
input = nullptr;
@ -1607,7 +1626,7 @@ Status DBImpl::ReadValueLog(uint64_t file_id, uint64_t offset,uint64_t len,Slice
//std::cout<<file_name_<<" "<<offset<<" "<<len<<std::endl;
std::ifstream inFile(file_name_, std::ios::in | std::ios::binary);
if (!inFile.is_open()) {
std::cerr << "Failed to open file for writing!" << std::endl;
std::cerr << "Failed to open file for writing!" class="o"><<file_id<<" "<<offset<<" "<<len<< std::endl;
return Status::Corruption("Failed to open file for writing!");
}
inFile.seekg(offset);

+ 1
- 1
db/repair.cc View File

@ -369,7 +369,7 @@ class Repairer {
// TODO(opt): separate out into multiple levels
const TableInfo& t = tables_[i];
edit_.AddFile(0, t.meta.number, t.meta.file_size, t.meta.smallest,
t.meta.largest);
t.meta.largest,t.meta.valuelog_id);
}
// std::fprintf(stderr,

+ 4
- 0
db/version_edit.cc View File

@ -79,6 +79,7 @@ void VersionEdit::EncodeTo(std::string* dst) const {
PutVarint32(dst, new_files_[i].first); // level
PutVarint64(dst, f.number);
PutVarint64(dst, f.file_size);
PutVarint64(dst, f.valuelog_id);
PutLengthPrefixedSlice(dst, f.smallest.Encode());
PutLengthPrefixedSlice(dst, f.largest.Encode());
}
@ -178,6 +179,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
case kNewFile:
if (GetLevel(&input, &level) && GetVarint64(&input, &f.number) &&
GetVarint64(&input, &f.file_size) &&
GetVarint64(&input,&f.valuelog_id) &&
GetInternalKey(&input, &f.smallest) &&
GetInternalKey(&input, &f.largest)) {
new_files_.push_back(std::make_pair(level, f));
@ -247,6 +249,8 @@ std::string VersionEdit::DebugString() const {
r.append(" ");
AppendNumberTo(&r, f.file_size);
r.append(" ");
AppendNumberTo(&r, f.valuelog_id);
r.append(" ");
r.append(f.smallest.DebugString());
r.append(" .. ");
r.append(f.largest.DebugString());

+ 4
- 1
db/version_edit.h View File

@ -22,8 +22,10 @@ struct FileMetaData {
int allowed_seeks; // Seeks allowed until compaction
uint64_t number;
uint64_t file_size; // File size in bytes
uint64_t valuelog_id=0;
InternalKey smallest; // Smallest internal key served by table
InternalKey largest; // Largest internal key served by table
};
class VersionEdit {
@ -61,12 +63,13 @@ class VersionEdit {
// REQUIRES: This version has not been saved (see VersionSet::SaveTo)
// REQUIRES: "smallest" and "largest" are smallest and largest keys in file
void AddFile(int level, uint64_t file, uint64_t file_size,
const InternalKey& smallest, const InternalKey& largest) {
const InternalKey& smallest, const InternalKey& largest,uint64_t valuelog_id=0) {
FileMetaData f;
f.number = file;
f.file_size = file_size;
f.smallest = smallest;
f.largest = largest;
f.valuelog_id=valuelog_id;
new_files_.push_back(std::make_pair(level, f));
}

+ 1
- 1
db/version_set.cc View File

@ -1087,7 +1087,7 @@ Status VersionSet::WriteSnapshot(log::Writer* log) {
const std::vector<FileMetaData*>& files = current_->files_[level];
for (size_t i = 0; i < files.size(); i++) {
const FileMetaData* f = files[i];
edit.AddFile(level, f->number, f->file_size, f->smallest, f->largest);
edit.AddFile(level, f->number, f->file_size, f->smallest, f->largest,f->valuelog_id);
}
}

+ 4
- 2
test/test.cpp View File

@ -10,6 +10,8 @@ using FieldArray=std::vector>;
Status OpenDB(std::string dbName, DB **db) {
Options options;
options.max_file_size=16*1024;
options.write_buffer_size=32*1024;
options.create_if_missing = true;
return DB::Open(options, dbName, db);
}
@ -165,7 +167,7 @@ TEST(Test, LARGE_DATA_COMPACT_TEST) {
abort();
}
std::vector<std::string> values;
for(int i=0;i<1000;i++){
for(int i=0;i<100000;i++){
std::string key=std::to_string(i);
std::string value;
for(int j=0;j<1000;j++){
@ -174,7 +176,7 @@ TEST(Test, LARGE_DATA_COMPACT_TEST) {
values.push_back(value);
db->Put(writeOptions,key,value);
}
for(int i=0;i<1000;i++){
for(int i=0;i<100000;i++){
std::string key=std::to_string(i);
std::string value;
Status s=db->Get(readOptions,key,&value);

Loading…
Cancel
Save