Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
98 changes: 61 additions & 37 deletions src/iocore/cache/CacheVC.cc
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@

namespace
{
DbgCtl dbg_ctl_cache_ram{"cache_ram"};
DbgCtl dbg_ctl_cache_bc{"cache_bc"};
DbgCtl dbg_ctl_cache_disk_error{"cache_disk_error"};
DbgCtl dbg_ctl_cache_read{"cache_read"};
Expand Down Expand Up @@ -331,6 +332,20 @@ unmarshal_helper(Doc *doc, Ptr<IOBufferData> &buf, int &okay)
}
}

bool
CacheVC::_ram_cache_cutoff_check(const Doc *doc) const
{
int64_t effective_cutoff =
(stripe->cache_vol->ram_cache_cutoff > 0) ? stripe->cache_vol->ram_cache_cutoff : cache_config_ram_cache_cutoff;
// doc_len == 0 for the first fragment (it is set from the vector)
// The decision on the first fragment is based on
// doc->total_len
// After that, the decision is based of doc_len (doc_len != 0)
// (effective_cutoff == 0) : no cutoffs
return ((!doc_len && static_cast<int64_t>(doc->total_len) < effective_cutoff) ||
(doc_len && static_cast<int64_t>(doc_len) < effective_cutoff) || !effective_cutoff);
}

// [amc] I think this is where all disk reads from cache funnel through here.
int
CacheVC::handleReadDone(int event, Event * /* e ATS_UNUSED */)
Expand Down Expand Up @@ -411,19 +426,7 @@ CacheVC::handleReadDone(int event, Event * /* e ATS_UNUSED */)
}
// Put the request in the ram cache only if its a open_read or lookup
if (vio.op == VIO::READ && okay) {
bool cutoff_check;
// Determine effective cutoff: use per-volume override if set, otherwise use global
int64_t effective_cutoff =
(stripe->cache_vol->ram_cache_cutoff > 0) ? stripe->cache_vol->ram_cache_cutoff : cache_config_ram_cache_cutoff;
// cutoff_check :
// doc_len == 0 for the first fragment (it is set from the vector)
// The decision on the first fragment is based on
// doc->total_len
// After that, the decision is based of doc_len (doc_len != 0)
// (effective_cutoff == 0) : no cutoffs
cutoff_check = ((!doc_len && static_cast<int64_t>(doc->total_len) < effective_cutoff) ||
(doc_len && static_cast<int64_t>(doc_len) < effective_cutoff) || !effective_cutoff);
if (cutoff_check && !f.doc_from_ram_cache) {
if (_ram_cache_cutoff_check(doc) && !f.doc_from_ram_cache) {
uint64_t o = dir_offset(&dir);
stripe->ram_cache->put(read_key, buf.get(), doc->len, http_copy_hdr, o);
}
Expand All @@ -449,27 +452,64 @@ CacheVC::handleReadDone(int event, Event * /* e ATS_UNUSED */)

int
CacheVC::handleRead(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */)

{
cancel_trigger();

f.doc_from_ram_cache = false;

ink_assert(stripe->mutex->thread_holding == this_ethread());

// 1. check RAM cache
if (load_from_ram_cache()) {
goto LramHit;
} else if (load_from_last_open_read_call()) {
goto LmemHit;
} else if (load_from_aggregation_buffer()) {
Dbg(dbg_ctl_cache_ram, "RAM cache hit");
f.doc_from_ram_cache = true;
io.aio_result = io.aiocb.aio_nbytes;

Doc *doc = reinterpret_cast<Doc *>(buf->data());
if (cache_config_ram_cache_compress && doc->doc_type == CACHE_FRAG_TYPE_HTTP && doc->hlen) {
SET_HANDLER(&CacheVC::handleReadDone);
return EVENT_RETURN;
}

POP_HANDLER;
return EVENT_RETURN;
}

// 2. check last open read cache
if (load_from_last_open_read_call()) {
Dbg(dbg_ctl_cache_ram, "last open read hit");
f.doc_from_ram_cache = true;
io.aio_result = io.aiocb.aio_nbytes;

// Try to promote this object to the RAM cache
Doc *doc = reinterpret_cast<Doc *>(buf->data());
if (_ram_cache_cutoff_check(doc)) {
bool http_copy_hdr = cache_config_ram_cache_compress && doc->hlen;
uint64_t o = dir_offset(&dir);
stripe->ram_cache->put(read_key, buf.get(), doc->len, http_copy_hdr, o);
}

POP_HANDLER;
return EVENT_RETURN;
}

// 3. check aggregation buffer
if (load_from_aggregation_buffer()) {
Dbg(dbg_ctl_cache_ram, "aggregation buffer hit");
f.doc_from_ram_cache = true;
io.aio_result = io.aiocb.aio_nbytes;

SET_HANDLER(&CacheVC::handleReadDone);
return EVENT_RETURN;
}

// 4. read from Disk (AIO) due to all memory cache miss
Dbg(dbg_ctl_cache_ram, "all memory cache miss");

ts::Metrics::Counter::increment(cache_rsb.all_mem_misses);
ts::Metrics::Counter::increment(stripe->cache_vol->vol_rsb.all_mem_misses);

// enqueue AIO read
io.aiocb.aio_fildes = stripe->fd;
io.aiocb.aio_offset = stripe->vol_offset(&dir);
if (static_cast<off_t>(io.aiocb.aio_offset + io.aiocb.aio_nbytes) > static_cast<off_t>(stripe->skip + stripe->len)) {
Expand All @@ -480,30 +520,14 @@ CacheVC::handleRead(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */)
io.action = this;
io.thread = mutex->thread_holding->tt == DEDICATED ? AIO_CALLBACK_THREAD_ANY : mutex->thread_holding;
SET_HANDLER(&CacheVC::handleReadDone);
ink_assert(ink_aio_read(&io) >= 0);

// ToDo: Why are these for debug only ??
#if DEBUG
int res = ink_aio_read(&io);
ink_assert(res >= 0);

ts::Metrics::Counter::increment(cache_rsb.pread_count);
ts::Metrics::Counter::increment(stripe->cache_vol->vol_rsb.pread_count);
#endif

return EVENT_CONT;

LramHit: {
f.doc_from_ram_cache = true;
io.aio_result = io.aiocb.aio_nbytes;
Doc *doc = reinterpret_cast<Doc *>(buf->data());
if (cache_config_ram_cache_compress && doc->doc_type == CACHE_FRAG_TYPE_HTTP && doc->hlen) {
SET_HANDLER(&CacheVC::handleReadDone);
return EVENT_RETURN;
}
}
LmemHit:
f.doc_from_ram_cache = true;
io.aio_result = io.aiocb.aio_nbytes;
POP_HANDLER;
return EVENT_RETURN; // allow the caller to release the volume lock
}

bool
Expand Down
3 changes: 3 additions & 0 deletions src/iocore/cache/CacheVC.h
Original file line number Diff line number Diff line change
Expand Up @@ -318,6 +318,9 @@ struct CacheVC : public CacheVConnection {
// this is how much we need to back up the buffer to get the start of the overlapping object.
off_t scan_fix_buffer_offset;
// end region C

private:
bool _ram_cache_cutoff_check(const Doc *doc) const;
};

LINK_DEFINITION(CacheVC, opendir_link)