Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
5b12525
Fix `httpparse` version to match internal
johnhurt Nov 7, 2025
376f39b
Add Custom Protocol Support to the Framework
brk0v Jun 16, 2025
9228982
Add default noop custom param to client Session
drcaramelsyrup Oct 27, 2025
0481e7e
reserve additional space if needed
duke8253 Oct 24, 2025
8c080c8
Use bstr for formatting byte strings
stepancheg Aug 16, 2025
a0b76f3
Prevent downstream error prior to header from canceling cache fill
drcaramelsyrup Oct 30, 2025
b1f0d73
Add max_h2_streams to HttpPeer hash
andrewhavck Oct 31, 2025
454c2cd
expose LRU shard information
duke8253 Oct 31, 2025
5a673ed
fix size calculation for buffer capacity
duke8253 Oct 31, 2025
097600a
Fix Rust 1.91 warnings
drcaramelsyrup Nov 3, 2025
f93fb9e
Allow Extensions in cache LockCore and user tracing
drcaramelsyrup Oct 31, 2025
f067a72
Use static str in ErrorSource or ErrorType as_str
drcaramelsyrup Oct 31, 2025
4f4fcc3
Add maybe_cache_key function
drcaramelsyrup Oct 31, 2025
56d8f7b
pingora-limits Rate: correct some comments, test/example
gdavidsson Oct 27, 2025
a2f5361
Add body-bytes tracking across H1/H2 and proxy metrics
zaidoon1 Nov 1, 2025
32bea77
Allow setting max_weight on MissFinishType::Appended
drcaramelsyrup Nov 5, 2025
4590c6f
Add peek_weight API to LRU
drcaramelsyrup Nov 6, 2025
4e343c6
bump msrv to 1.84
duke8253 Nov 7, 2025
71c9fd2
fix
duke8253 Nov 7, 2025
4d87350
fix
duke8253 Nov 7, 2025
393c01e
upgrade http crate to latest version
zaidoon1 Nov 10, 2025
ef96c36
max multipart ranges configurable in range_header_filter
drcaramelsyrup Nov 11, 2025
a88d048
Fix chunked trailer end parsing
drcaramelsyrup Jun 2, 2025
fa1d565
feat: add ConnectionFilter trait for early TCP connection filtering
Nov 11, 2025
4b8bbc6
Tweak the implementation of and documentation of `connection_filter` …
johnhurt Nov 11, 2025
c3f22fa
Lock age timeouts cause lock reacquisition
drcaramelsyrup Nov 7, 2025
a3aa6cb
Set h1.1 when proxying cacheable responses
drcaramelsyrup Nov 13, 2025
849d4f4
Add or remove accept-ranges on range header filter
drcaramelsyrup Nov 9, 2025
200cee4
update msrv in github ci, fixup .bleep
gumpt Nov 21, 2025
8c41759
Merge remote-tracking branch 'upstream/main' into bump-from-upstream
krichard1212 Nov 24, 2025
2657f8f
Fix code formatting with cargo fmt
krichard1212 Nov 24, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .bleep
Original file line number Diff line number Diff line change
@@ -1 +1 @@
ed8657309187516d2e673037821a9fbd8405d703
4842474141427ccc0d962b42e31c285b203da09d
8 changes: 4 additions & 4 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ jobs:
fail-fast: false
matrix:
# nightly, msrv, and latest stable
toolchain: [nightly, 1.83.0, 1.87.0]
toolchain: [nightly, 1.84.0, 1.91.1]
runs-on: ubuntu-latest
# Only run on "pull_request" event for external PRs. This is to avoid
# duplicate builds for PRs created from internal branches.
Expand Down Expand Up @@ -48,12 +48,12 @@ jobs:

- name: Run cargo clippy
run: |
[[ ${{ matrix.toolchain }} != 1.87.0 ]] || cargo clippy --all-targets --all -- --allow=unknown-lints --deny=warnings
[[ ${{ matrix.toolchain }} != 1.91.1 ]] || cargo clippy --all-targets --all -- --allow=unknown-lints --deny=warnings

- name: Run cargo audit
run: |
[[ ${{ matrix.toolchain }} != 1.87.0 ]] || (cargo install --locked cargo-audit && cargo audit)
[[ ${{ matrix.toolchain }} != 1.91.1 ]] || (cargo install --locked cargo-audit && cargo audit)

- name: Run cargo machete
run: |
[[ ${{ matrix.toolchain }} != 1.87.0 ]] || (cargo install cargo-machete --version 0.7.0 && cargo machete)
[[ ${{ matrix.toolchain }} != 1.91.1 ]] || (cargo install cargo-machete --version 0.7.0 && cargo machete)
4 changes: 3 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,14 @@ members = [
]

[workspace.dependencies]
bstr = "1.12.0"
tokio = "1"
tokio-stream = { version = "0.1" }
async-trait = "0.1.42"
httparse = "1"
bytes = "1.0"
derivative = "2.2.0"
http = "1.0.0"
http = "1"
log = "0.4"
h2 = ">=0.4.11"
once_cell = "1"
Expand Down
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,9 @@ Both x86_64 and aarch64 architectures will be supported.

Pingora keeps a rolling MSRV (minimum supported Rust version) policy of 6 months. This means we will accept PRs that upgrade the MSRV as long as the new Rust version used is at least 6 months old.

Our current MSRV is effectively 1.83.
Our current MSRV is 1.84.

Previously Pingora advertised an MSRV of 1.72. Older Rust versions may still be able to compile via `cargo update` pinning dependencies such as `backtrace@0.3.74`. The advertised MSRV in config files will be officially bumped to 1.83 in an upcoming release.
Building with the optional feature `boringssl` with Boring >= 4.14 requires Rust 1.80.

## Build Requirements

Expand Down
2 changes: 1 addition & 1 deletion clippy.toml
Original file line number Diff line number Diff line change
@@ -1 +1 @@
msrv = "1.72"
msrv = "1.84"
4 changes: 1 addition & 3 deletions pingora-boringssl/src/boring_tokio.rs
Original file line number Diff line number Diff line change
Expand Up @@ -263,9 +263,7 @@ where
return Poll::Pending;
}
Err(e) => {
return Poll::Ready(Err(e
.into_io_error()
.unwrap_or_else(|e| io::Error::new(io::ErrorKind::Other, e))));
return Poll::Ready(Err(e.into_io_error().unwrap_or_else(io::Error::other)));
}
}

Expand Down
1 change: 1 addition & 0 deletions pingora-cache/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ pingora-header-serde = { version = "0.6.0", path = "../pingora-header-serde" }
pingora-http = { version = "0.6.0", path = "../pingora-http" }
pingora-lru = { version = "0.6.0", path = "../pingora-lru" }
pingora-timeout = { version = "0.6.0", path = "../pingora-timeout" }
bstr = { workspace = true }
http = { workspace = true }
indexmap = "1"
once_cell = { workspace = true }
Expand Down
6 changes: 3 additions & 3 deletions pingora-cache/src/cache_control.rs
Original file line number Diff line number Diff line change
Expand Up @@ -255,13 +255,13 @@ impl CacheControl {
self.has_key_without_value("private")
}

fn get_field_names(&self, key: &str) -> Option<ListValueIter> {
fn get_field_names(&self, key: &str) -> Option<ListValueIter<'_>> {
let value = self.directives.get(key)?.as_ref()?;
Some(ListValueIter::from(value))
}

/// Get the values of `private=`
pub fn private_field_names(&self) -> Option<ListValueIter> {
pub fn private_field_names(&self) -> Option<ListValueIter<'_>> {
self.get_field_names("private")
}

Expand All @@ -271,7 +271,7 @@ impl CacheControl {
}

/// Get the values of `no-cache=`
pub fn no_cache_field_names(&self) -> Option<ListValueIter> {
pub fn no_cache_field_names(&self) -> Option<ListValueIter<'_>> {
self.get_field_names("no-cache")
}

Expand Down
40 changes: 37 additions & 3 deletions pingora-cache/src/eviction/lru.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,29 @@ impl<const N: usize> Manager<N> {
Manager(Lru::with_capacity_and_watermark(limit, capacity, watermark))
}

/// Get the number of shards
pub fn shards(&self) -> usize {
self.0.shards()
}

/// Get the weight (total size) of a specific shard
pub fn shard_weight(&self, shard: usize) -> usize {
self.0.shard_weight(shard)
}

/// Get the number of items in a specific shard
pub fn shard_len(&self, shard: usize) -> usize {
self.0.shard_len(shard)
}

/// Get the shard index for a given cache key
///
/// This allows callers to know which shard was affected by an operation
/// without acquiring any locks.
pub fn get_shard_for_key(&self, key: &CompactCacheKey) -> usize {
(u64key(key) % N as u64) as usize
}

/// Serialize the given shard
pub fn serialize_shard(&self, shard: usize) -> Result<Vec<u8>> {
use rmp_serde::encode::Serializer;
Expand Down Expand Up @@ -101,6 +124,12 @@ impl<const N: usize> Manager<N> {
.or_err(InternalError, "when deserializing LRU")?;
Ok(())
}

/// Peek the weight associated with a cache key without changing its LRU order.
pub fn peek_weight(&self, item: &CompactCacheKey) -> Option<usize> {
let key = u64key(item);
self.0.peek_weight(key)
}
}

struct InsertToManager<'a, const N: usize> {
Expand Down Expand Up @@ -171,9 +200,14 @@ impl<const N: usize> EvictionManager for Manager<N> {
.collect()
}

fn increment_weight(&self, item: CompactCacheKey, delta: usize) -> Vec<CompactCacheKey> {
let key = u64key(&item);
self.0.increment_weight(key, delta);
fn increment_weight(
&self,
item: &CompactCacheKey,
delta: usize,
max_weight: Option<usize>,
) -> Vec<CompactCacheKey> {
let key = u64key(item);
self.0.increment_weight(key, delta, max_weight);
self.0
.evict_to_limit()
.into_iter()
Expand Down
10 changes: 9 additions & 1 deletion pingora-cache/src/eviction/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,18 @@ pub trait EvictionManager: Send + Sync {
/// Adjust an item's weight upwards by a delta. If the item is not already admitted,
/// nothing will happen.
///
/// An optional `max_weight` hint indicates the known max weight of the current key in case the
/// weight should not be incremented above this amount.
///
/// Return one or more items to evict. The sizes of these items are deducted
/// from the total size already. The caller needs to make sure that these assets are actually
/// removed from the storage.
fn increment_weight(&self, item: CompactCacheKey, delta: usize) -> Vec<CompactCacheKey>;
fn increment_weight(
&self,
item: &CompactCacheKey,
delta: usize,
max_weight: Option<usize>,
) -> Vec<CompactCacheKey>;

/// Remove an item from the eviction manager.
///
Expand Down
11 changes: 8 additions & 3 deletions pingora-cache/src/eviction/simple_lru.rs
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ impl Manager {
if self.used.load(Ordering::Relaxed) <= self.limit
&& self
.items_watermark
.map_or(true, |w| self.items.load(Ordering::Relaxed) <= w)
.is_none_or(|w| self.items.load(Ordering::Relaxed) <= w)
{
return vec![];
}
Expand Down Expand Up @@ -235,8 +235,13 @@ impl EvictionManager for Manager {
self.evict()
}

fn increment_weight(&self, item: CompactCacheKey, delta: usize) -> Vec<CompactCacheKey> {
let key = u64key(&item);
fn increment_weight(
&self,
item: &CompactCacheKey,
delta: usize,
_max_weight: Option<usize>,
) -> Vec<CompactCacheKey> {
let key = u64key(item);
self.increase_weight(key, delta);
self.evict()
}
Expand Down
2 changes: 1 addition & 1 deletion pingora-cache/src/filters.rs
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ pub fn calculate_fresh_until(
if authorization_present {
let uncacheable = cache_control
.as_ref()
.map_or(true, |cc| !cc.allow_caching_authorized_req());
.is_none_or(|cc| !cc.allow_caching_authorized_req());
if uncacheable {
return None;
}
Expand Down
10 changes: 5 additions & 5 deletions pingora-cache/src/hashtable.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,11 +49,11 @@ where
}

#[allow(dead_code)]
pub fn read(&self, key: u128) -> RwLockReadGuard<HashMap<u128, V>> {
pub fn read(&self, key: u128) -> RwLockReadGuard<'_, HashMap<u128, V>> {
self.get(key).read()
}

pub fn write(&self, key: u128) -> RwLockWriteGuard<HashMap<u128, V>> {
pub fn write(&self, key: u128) -> RwLockWriteGuard<'_, HashMap<u128, V>> {
self.get(key).write()
}

Expand Down Expand Up @@ -103,7 +103,7 @@ where
pub fn new(shard_capacity: usize) -> Self {
use std::num::NonZeroUsize;
// safe, 1 != 0
const ONE: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(1) };
const ONE: NonZeroUsize = NonZeroUsize::new(1).unwrap();
let mut cache = ConcurrentLruCache {
lrus: Default::default(),
};
Expand All @@ -119,11 +119,11 @@ where
}

#[allow(dead_code)]
pub fn read(&self, key: u128) -> RwLockReadGuard<LruCache<u128, V>> {
pub fn read(&self, key: u128) -> RwLockReadGuard<'_, LruCache<u128, V>> {
self.get(key).read()
}

pub fn write(&self, key: u128) -> RwLockWriteGuard<LruCache<u128, V>> {
pub fn write(&self, key: u128) -> RwLockWriteGuard<'_, LruCache<u128, V>> {
self.get(key).write()
}

Expand Down
45 changes: 36 additions & 9 deletions pingora-cache/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -688,7 +688,7 @@ impl HttpCache {
self.inner_mut()
.max_file_size_tracker
.as_mut()
.map_or(true, |t| t.add_body_bytes(bytes_len))
.is_none_or(|t| t.add_body_bytes(bytes_len))
}

/// Check if the max file size has been exceeded according to max file size tracker.
Expand Down Expand Up @@ -823,6 +823,18 @@ impl HttpCache {
}
}

/// Return whether the underlying storage backend supports streaming partial write.
///
/// Returns None if cache is not enabled.
pub fn support_streaming_partial_write(&self) -> Option<bool> {
self.inner.as_ref().and_then(|inner| {
inner
.enabled_ctx
.as_ref()
.map(|c| c.storage.support_streaming_partial_write())
})
}

/// Call this when cache hit is fully read.
///
/// This call will release resource if any and log the timing in tracing if set.
Expand Down Expand Up @@ -969,8 +981,8 @@ impl HttpCache {
MissFinishType::Created(size) => {
eviction.admit(cache_key, size, meta.0.internal.fresh_until)
}
MissFinishType::Appended(size) => {
eviction.increment_weight(cache_key, size)
MissFinishType::Appended(size, max_size) => {
eviction.increment_weight(&cache_key, size, max_size)
}
};
// actual eviction can be done async
Expand Down Expand Up @@ -1250,6 +1262,18 @@ impl HttpCache {
}
}

/// Return the [`CacheKey`] of this asset if any.
///
/// This is allowed to be called in any phase. If the cache key callback was not called,
/// this will return None.
pub fn maybe_cache_key(&self) -> Option<&CacheKey> {
(!matches!(
self.phase(),
CachePhase::Disabled(NoCacheReason::NeverEnabled) | CachePhase::Uninit
))
.then(|| self.cache_key())
}

/// Perform the cache lookup from the given cache storage with the given cache key
///
/// A cache hit will return [CacheMeta] which contains the header and meta info about
Expand Down Expand Up @@ -1426,7 +1450,7 @@ impl HttpCache {
let mut span = inner_enabled.traces.child("cache_lock");
// should always call is_cache_locked() before this function, which should guarantee that
// the inner cache has a read lock and lock ctx
if let Some(lock_ctx) = inner_enabled.lock_ctx.as_mut() {
let (read_lock, status) = if let Some(lock_ctx) = inner_enabled.lock_ctx.as_mut() {
let lock = lock_ctx.lock.take(); // remove the lock from self
if let Some(Locked::Read(r)) = lock {
let now = Instant::now();
Expand All @@ -1437,23 +1461,26 @@ impl HttpCache {
wait_timeout.saturating_sub(self.lock_duration().unwrap_or(Duration::ZERO));
match timeout(wait_timeout, r.wait()).await {
Ok(()) => r.lock_status(),
// TODO: need to differentiate WaitTimeout vs. Lock(Age)Timeout (expired)?
Err(_) => LockStatus::Timeout,
Err(_) => LockStatus::WaitTimeout,
}
} else {
r.wait().await;
r.lock_status()
};
self.digest.add_lock_duration(now.elapsed());
let tag_value: &'static str = status.into();
span.set_tag(|| Tag::new("status", tag_value));
status
(r, status)
} else {
panic!("cache_lock_wait on wrong type of lock")
}
} else {
panic!("cache_lock_wait without cache lock")
};
if let Some(lock_ctx) = self.inner_enabled().lock_ctx.as_ref() {
lock_ctx
.cache_lock
.trace_lock_wait(&mut span, &read_lock, status);
}
status
}

/// How long did this request wait behind the read lock
Expand Down
Loading
Loading