Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 13 additions & 2 deletions lib/internal/quic/quic.js
Original file line number Diff line number Diff line change
Expand Up @@ -2031,7 +2031,15 @@ class QuicStream {
if (len === 0) return true;
// Refuse the write if the chunk doesn't fit in the available
// buffer capacity. The caller should wait for drain and retry.
if (len > stream.#state.writeDesiredSize) return false;
// Set up drainWakeup so that drainableProtocol() returns a
// promise the caller can await, even when writeDesiredSize > 0
// but is smaller than the chunk. Without this, the standard
// while(!writeSync) { dp(); await } loop would spin
// synchronously and starve the event loop.
if (len > stream.#state.writeDesiredSize) {
drainWakeup ??= PromiseWithResolvers();
return false;
}
const result = handle.write([chunk]);
if (result === undefined) return false;
totalBytesWritten += len;
Expand Down Expand Up @@ -2070,7 +2078,10 @@ class QuicStream {
let len = 0;
for (const c of chunks) len += TypedArrayPrototypeGetByteLength(c);
if (len === 0) return true;
if (len > stream.#state.writeDesiredSize) return false;
if (len > stream.#state.writeDesiredSize) {
drainWakeup ??= PromiseWithResolvers();
return false;
}
const result = handle.write(chunks);
if (result === undefined) return false;
totalBytesWritten += len;
Expand Down
8 changes: 6 additions & 2 deletions src/quic/streams.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1592,8 +1592,12 @@ void Stream::UpdateWriteDesiredSize() {
uint32_t old_size = state_->write_desired_size;
state_->write_desired_size = clamped;

// Fire drain when transitioning from 0 to non-zero
if (old_size == 0 && desired > 0) {
// Fire drain when available capacity increases. This covers both the
// classic 0-to-positive transition and the case where writeDesiredSize
// was already positive but too small for the next chunk. The JS drain
// handler is a no-op when no drainWakeup is pending, so the extra
// callbacks when nobody is waiting are harmless.
if (clamped > old_size) {
EmitDrain();
}
}
Expand Down
91 changes: 91 additions & 0 deletions test/parallel/test-quic-stream-bidi-varchunklen.mjs
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
// Flags: --experimental-quic --experimental-stream-iter --no-warnings

// Test: bidirectional data transfer with varying chunk sizes.
// This is a regression test for a stall caused by a mismatch between
// writeSync (which rejects when chunk > writeDesiredSize) and
// drainableProtocol (which returned null when writeDesiredSize > 0).
// When chunks don't evenly fill the high water mark, writeDesiredSize
// can be positive but smaller than the next chunk, causing the
// while(!writeSync) { dp(); await } loop to spin without yielding.
// See: https://github.com/nodejs/node/issues/63216

import { hasQuic, skip, mustCall } from '../common/index.mjs';
import assert from 'node:assert';

const { strictEqual } = assert;

if (!hasQuic) {
skip('QUIC is not enabled');
}

const { listen, connect } = await import('../common/quic.mjs');
const { bytes, drainableProtocol: dp } = await import('stream/iter');

// Varying chunk sizes — the pattern of alternating large and small
// chunks is effective at triggering the writeDesiredSize gap.
const chunkSizes = [60000, 12, 50000, 1600, 20000, 30000, 0, 100];
const numChunks = chunkSizes.length;
const byteLength = chunkSizes.reduce((a, b) => a + b, 0);

// Build a deterministic payload so we can verify integrity.
function buildChunk(index) {
const chunk = new Uint8Array(chunkSizes[index]);
const val = index & 0xff;
for (let i = 0; i < chunkSizes[index]; i++) {
chunk[i] = (val + i) & 0xff;
}
return chunk;
}

function checksum(data) {
let sum = 0;
for (let i = 0; i < data.byteLength; i++) {
sum = (sum + data[i]) | 0;
}
return sum;
}

// Compute expected checksum.
let expectedChecksum = 0;
for (let i = 0; i < numChunks; i++) {
const chunk = buildChunk(i);
expectedChecksum = (expectedChecksum + checksum(chunk)) | 0;
}

const done = Promise.withResolvers();

const serverEndpoint = await listen(mustCall((serverSession) => {
serverSession.onstream = mustCall(async (stream) => {
const received = await bytes(stream);
strictEqual(received.byteLength, byteLength);
strictEqual(checksum(received), expectedChecksum);

stream.writer.endSync();
await stream.closed;
serverSession.close();
done.resolve();
});
}));

const clientSession = await connect(serverEndpoint.address);
await clientSession.opened;

const stream = await clientSession.createBidirectionalStream();
const w = stream.writer;

// Write chunks, respecting backpressure via drainableProtocol.
for (let i = 0; i < numChunks; i++) {
const chunk = buildChunk(i);
while (!w.writeSync(chunk)) {
// Flow controlled — wait for drain before retrying.
const drainable = w[dp]();
if (drainable) await drainable;
}
}

const totalWritten = w.endSync();
strictEqual(totalWritten, byteLength);

await Promise.all([stream.closed, done.promise]);
await clientSession.close();
await serverEndpoint.close();
Loading