Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion conanfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

class HomeObjectConan(ConanFile):
name = "homeobject"
version = "3.0.16"
version = "3.0.17"

homepage = "https://github.com/eBay/HomeObject"
description = "Blob Store built on HomeStore"
Expand Down
10 changes: 6 additions & 4 deletions src/lib/homestore_backend/gc_manager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -774,9 +774,6 @@ bool GCManager::pdev_gc_actor::copy_valid_data(
*/
}
#endif

// for emergent gc, we directly use the current shard header as the new header

// TODO::involve ratelimiter in the following code, where read/write are scheduled. or do we need a central
// ratelimter shared by all components except client io?
auto succeed_copying_shard =
Expand All @@ -786,6 +783,8 @@ bool GCManager::pdev_gc_actor::copy_valid_data(
&valid_blob_indexes, &data_service, task_id, &last_shard_state, &copied_blobs, pg_id,
header_sgs = std::move(header_sgs)](auto&& err) {
RELEASE_ASSERT(header_sgs.iovs.size() == 1, "header_sgs.iovs.size() should be 1, but not!");
// shard header occupies one blk
COUNTER_INCREMENT(metrics_, gc_write_blk_count, 1);
iomanager.iobuf_free(reinterpret_cast< uint8_t* >(header_sgs.iovs[0].iov_base));
if (err) {
GCLOGE(task_id, pg_id, shard_id,
Expand Down Expand Up @@ -822,6 +821,7 @@ bool GCManager::pdev_gc_actor::copy_valid_data(
data_service.async_read(pba, data_sgs, total_size)
.thenValue([this, k, &hints, &move_from_chunk, &move_to_chunk, &data_service, task_id,
pg_id, data_sgs = std::move(data_sgs), pba, &copied_blobs](auto&& err) {
COUNTER_INCREMENT(metrics_, gc_read_blk_count, pba.blk_count());
RELEASE_ASSERT(data_sgs.iovs.size() == 1,
"data_sgs.iovs.size() should be 1, but not!");

Expand Down Expand Up @@ -865,6 +865,7 @@ bool GCManager::pdev_gc_actor::copy_valid_data(
return data_service.async_alloc_write(data_sgs, hints, new_pba)
.thenValue([this, shard_id, blob_id, new_pba, &move_to_chunk, task_id, pg_id,
&copied_blobs, data_sgs = std::move(data_sgs)](auto&& err) {
COUNTER_INCREMENT(metrics_, gc_write_blk_count, new_pba.blk_count());
RELEASE_ASSERT(data_sgs.iovs.size() == 1,
"data_sgs.iovs.size() should be 1, but not!");
iomanager.iobuf_free(
Expand Down Expand Up @@ -937,8 +938,9 @@ bool GCManager::pdev_gc_actor::copy_valid_data(
return folly::makeFuture< std::error_code >(std::error_code{});
}

// write shard footer
// write shard footer, which occupies one blk
homestore::MultiBlkId out_blkids;
COUNTER_INCREMENT(metrics_, gc_write_blk_count, 1);
return data_service.async_alloc_write(footer_sgs, hints, out_blkids);
})
.thenValue([this, &move_to_chunk, &shard_id, footer_sgs, task_id, pg_id](auto&& err) {
Expand Down
5 changes: 4 additions & 1 deletion src/lib/homestore_backend/gc_manager.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,8 @@ class GCManager {
REGISTER_GAUGE(failed_egc_task_count, "Number of failed emergent gc tasks");
REGISTER_GAUGE(total_reclaimed_space_by_gc, "Total reclaimed space by gc task");
REGISTER_GAUGE(total_reclaimed_space_by_egc, "Total reclaimed space by emergent gc task");
REGISTER_COUNTER(gc_read_blk_count, "Total read blk count by gc in this pdev");
REGISTER_COUNTER(gc_write_blk_count, "Total written blk count by gc in this pdev");

// gc task level histogram metrics
REGISTER_HISTOGRAM(reclaim_ratio_gc, "the ratio of reclaimed blks to total blks in a gc task",
Expand All @@ -149,6 +151,7 @@ class GCManager {
register_me_to_farm();
attach_gather_cb(std::bind(&pdev_gc_metrics::on_gather, this));
}

~pdev_gc_metrics() { deregister_me_from_farm(); }
pdev_gc_metrics(const pdev_gc_metrics&) = delete;
pdev_gc_metrics(pdev_gc_metrics&&) noexcept = delete;
Expand Down Expand Up @@ -313,7 +316,7 @@ class GCManager {
void drain_pg_pending_gc_task(const pg_id_t pg_id);
void decr_pg_pending_gc_task(const pg_id_t pg_id);
void incr_pg_pending_gc_task(const pg_id_t pg_id);
auto& get_gc_actore_superblks() { return m_gc_actor_sbs; }
auto& get_gc_actor_superblks() { return m_gc_actor_sbs; }
std::shared_ptr< pdev_gc_actor > get_pdev_gc_actor(uint32_t pdev_id);

private:
Expand Down
2 changes: 1 addition & 1 deletion src/lib/homestore_backend/hs_cp_callbacks.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ folly::Future< bool > HSHomeObject::MyCPCallbacks::cp_flush(CP* cp) {

// flush gc durable_entities
auto gc_manager = home_obj_.gc_manager();
auto& gc_actor_superblks = gc_manager->get_gc_actore_superblks();
auto& gc_actor_superblks = gc_manager->get_gc_actor_superblks();
for (auto& gc_actor_sb : gc_actor_superblks) {
const auto pdev_id = gc_actor_sb->pdev_id;
const auto gc_actor = gc_manager->get_pdev_gc_actor(pdev_id);
Expand Down
4 changes: 0 additions & 4 deletions src/lib/homestore_backend/hs_homeobject.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -479,10 +479,6 @@ class HSHomeObject : public HomeObjectImpl {
homestore::MultiBlkId pbas;
};

struct BlobInfoData : public BlobInfo {
Blob blob;
};

enum class BlobState : uint8_t {
ALIVE = 0,
TOMBSTONE = 1,
Expand Down
4 changes: 2 additions & 2 deletions src/lib/homestore_backend/replication_state_machine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -724,14 +724,14 @@ folly::Future< std::error_code > ReplicationStateMachine::on_fetch_data(const in
auto rc = index_table->get(get_req);
if (sisl_unlikely(homestore::btree_status_t::success != rc)) {
// blob never exists or has been gc
LOGD("on_fetch_data failed to get from index table, blob never exists or has been gc, blob_id={}, "
LOGD("on_fetch_data: failed to get from index table, blob never exists or has been gc, blob_id={}, "
"shardID=0x{:x}, pg={}",
blob_id, shard_id, pg_id);
should_return_delete_marker = true;
} else {
pbas = index_value.pbas();
if (sisl_unlikely(pbas == HSHomeObject::tombstone_pbas)) {
LOGD("on_fetch_data: blob has been deleted, blob_id={}, shardID=0x{:x}, pg={}", blob_id,
LOGD("on_fetch_data: got tombstone pba for blob_id={}, shardID=0x{:x}, pg={}", blob_id,
shard_id, pg_id);
should_return_delete_marker = true;
}
Expand Down