Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 8 additions & 2 deletions bun.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

35 changes: 31 additions & 4 deletions packages/opencode/script/pre-release-check.ts
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ function fail(msg: string) {
// ---------------------------------------------------------------------------
// Check 1: Required externals are in package.json dependencies
// ---------------------------------------------------------------------------
console.log("\n[1/4] Checking required externals in package.json...")
console.log("\n[1/5] Checking required externals in package.json...")

const requiredExternals = ["@altimateai/altimate-core"]

Expand All @@ -51,7 +51,7 @@ for (const ext of requiredExternals) {
// ---------------------------------------------------------------------------
// Check 2: Required externals are resolvable in node_modules
// ---------------------------------------------------------------------------
console.log("\n[2/4] Checking required externals are installed...")
console.log("\n[2/5] Checking required externals are installed...")

for (const ext of requiredExternals) {
try {
Expand All @@ -62,10 +62,37 @@ for (const ext of requiredExternals) {
}
}

// ---------------------------------------------------------------------------
// Check 2b: Verify altimate-core napi binary has all expected exports
// ---------------------------------------------------------------------------
console.log("\n[2b/5] Verifying altimate-core napi exports...")

const CRITICAL_EXPORTS = [
"getStatementTypes", "formatSql", "lint", "validate", "transpile",
"extractMetadata", "columnLineage", "trackLineage", "diffSchemas",
"importDdl", "exportDdl", "optimizeContext", "pruneSchema",
"compareQueries", "classifyPii", "checkQueryPii", "parseDbtProject",
]
Comment on lines +70 to +75
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Pre-release export gate is too narrow and can miss broken releases.

This list covers only a subset of the N-API contract validated elsewhere, so a binary with missing non-listed exports can still pass this pre-release check.

🔧 Proposed alignment with full export contract
 const CRITICAL_EXPORTS = [
-  "getStatementTypes", "formatSql", "lint", "validate", "transpile",
-  "extractMetadata", "columnLineage", "trackLineage", "diffSchemas",
-  "importDdl", "exportDdl", "optimizeContext", "pruneSchema",
-  "compareQueries", "classifyPii", "checkQueryPii", "parseDbtProject",
+  "transpile",
+  "formatSql",
+  "extractMetadata",
+  "extractOutputColumns",
+  "getStatementTypes",
+  "compareQueries",
+  "optimizeContext",
+  "optimizeForQuery",
+  "pruneSchema",
+  "diffSchemas",
+  "importDdl",
+  "exportDdl",
+  "schemaFingerprint",
+  "introspectionSql",
+  "lint",
+  "scanSql",
+  "isSafe",
+  "classifyPii",
+  "checkQueryPii",
+  "resolveTerm",
+  "analyzeTags",
+  "columnLineage",
+  "diffLineage",
+  "trackLineage",
+  "complete",
+  "rewrite",
+  "generateTests",
+  "analyzeMigration",
+  "parseDbtProject",
+  "correct",
+  "evaluate",
+  "explain",
+  "fix",
+  "validate",
+  "checkEquivalence",
+  "checkPolicy",
+  "checkSemantics",
+  "initSdk",
+  "resetSdk",
+  "flushSdk",
 ]
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
const CRITICAL_EXPORTS = [
"getStatementTypes", "formatSql", "lint", "validate", "transpile",
"extractMetadata", "columnLineage", "trackLineage", "diffSchemas",
"importDdl", "exportDdl", "optimizeContext", "pruneSchema",
"compareQueries", "classifyPii", "checkQueryPii", "parseDbtProject",
]
const CRITICAL_EXPORTS = [
"transpile",
"formatSql",
"extractMetadata",
"extractOutputColumns",
"getStatementTypes",
"compareQueries",
"optimizeContext",
"optimizeForQuery",
"pruneSchema",
"diffSchemas",
"importDdl",
"exportDdl",
"schemaFingerprint",
"introspectionSql",
"lint",
"scanSql",
"isSafe",
"classifyPii",
"checkQueryPii",
"resolveTerm",
"analyzeTags",
"columnLineage",
"diffLineage",
"trackLineage",
"complete",
"rewrite",
"generateTests",
"analyzeMigration",
"parseDbtProject",
"correct",
"evaluate",
"explain",
"fix",
"validate",
"checkEquivalence",
"checkPolicy",
"checkSemantics",
"initSdk",
"resetSdk",
"flushSdk",
]
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@packages/opencode/script/pre-release-check.ts` around lines 70 - 75, The
pre-release export gate only lists a subset of required N-API exports; update
the CRITICAL_EXPORTS array to include the full export contract used elsewhere so
missing symbols can't slip through; locate the CRITICAL_EXPORTS constant and add
every exported function name that the N-API consumer/validator expects (i.e.,
mirror the complete list of exports validated in the N-API contract) so this
file's check and the N-API validation are aligned.


try {
const core = require("@altimateai/altimate-core")
const missing = CRITICAL_EXPORTS.filter((name) => typeof core[name] !== "function")
if (missing.length > 0) {
fail(
`altimate-core binary is missing ${missing.length} export(s): ${missing.join(", ")}.\n` +
` The platform binary may be stale. Fix: rm -rf node_modules && bun install`,
)
} else {
pass(`All ${CRITICAL_EXPORTS.length} critical napi exports verified`)
}
} catch (e: any) {
fail(`altimate-core failed to load: ${e.message}`)
}

// ---------------------------------------------------------------------------
// Check 3: Build and smoke-test the binary
// ---------------------------------------------------------------------------
console.log("\n[3/4] Building local binary...")
console.log("\n[3/5] Building local binary...")

const buildResult = spawnSync("bun", ["run", "build:local"], {
cwd: pkgDir,
Expand Down Expand Up @@ -105,7 +132,7 @@ if (buildResult.status !== 0) {
if (!binaryPath) {
fail("No binary found in dist/ after build")
} else {
console.log("\n[4/4] Smoke-testing compiled binary...")
console.log("\n[4/5] Smoke-testing compiled binary...")

// Resolve NODE_PATH like the bin wrapper does — start from pkgDir
// to include workspace-level node_modules where NAPI modules live
Expand Down
70 changes: 53 additions & 17 deletions packages/opencode/src/altimate/tools/sql-classify.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,27 +2,57 @@
//
// Uses altimate-core's AST-based getStatementTypes() for accurate classification.
// Handles CTEs, string literals, procedural blocks, all dialects correctly.
// Falls back to regex-based heuristics if the napi binary fails to load.

// eslint-disable-next-line @typescript-eslint/no-explicit-any
const core: any = require("@altimateai/altimate-core")
// Safe import: napi binary may not be available on all platforms
let getStatementTypes: ((sql: string, dialect?: string | null) => any) | null = null
try {
// eslint-disable-next-line @typescript-eslint/no-require-imports
const core = require("@altimateai/altimate-core")
if (typeof core?.getStatementTypes === "function") {
getStatementTypes = core.getStatementTypes
}
} catch {
// napi binary failed to load — will use regex fallback
}

// Categories from altimate-core that indicate write operations
const WRITE_CATEGORIES = new Set(["dml", "ddl", "dcl", "tcl"])
// Only SELECT queries are known safe. "other" (SHOW, SET, USE, etc.) is ambiguous — prompt for permission.
const READ_CATEGORIES = new Set(["query"])

// Hard-deny patterns — blocked regardless of permissions
const HARD_DENY_TYPES = new Set(["DROP DATABASE", "DROP SCHEMA", "TRUNCATE", "TRUNCATE TABLE"])

// Regex fallback: patterns that indicate write operations (case-insensitive, anchored to statement start)
const WRITE_PATTERN =
/^\s*(INSERT|UPDATE|DELETE|MERGE|CREATE|ALTER|DROP|TRUNCATE|GRANT|REVOKE|CALL|EXEC)\b/i
Comment on lines +25 to +27
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

Fallback now under-prompts on ambiguous SQL.

This branch returns "read" for anything outside the small write-prefix list, so fallback mode will silently skip the sql_execute_write prompt for statements the comment above explicitly calls ambiguous, like SHOW, SET, and USE, plus forms like WITH ... DELETE. Since this is the degraded path, false positives are safer than false negatives here — please switch to a positive read allowlist and default everything else to "write".

Minimal safe direction
-const WRITE_PATTERN =
-  /^\s*(INSERT|UPDATE|DELETE|MERGE|CREATE|ALTER|DROP|TRUNCATE|GRANT|REVOKE|CALL|EXEC)\b/i
+const SAFE_READ_PATTERN = /^\s*SELECT\b/i
...
-  const queryType = WRITE_PATTERN.test(trimmed) ? "write" : "read"
+  const queryType = SAFE_READ_PATTERN.test(trimmed) ? "read" : "write"

Also applies to: 38-38

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@packages/opencode/src/altimate/tools/sql-classify.ts` around lines 25 - 27,
The current fallback uses WRITE_PATTERN and defaults anything not matching that
small write-prefix list to "read", which under-prompts ambiguous statements;
change the fallback to use a positive read allowlist instead and treat
everything else as "write". Specifically, replace or augment the WRITE_PATTERN
usage with a READ_PATTERN that only matches clearly read-only statements (e.g.,
SELECT, WITH ... SELECT, EXPLAIN, DESCRIBE/SHOW/PRAGMA/SET variants you deem
safe) and update the fallback logic (the code that currently returns "read" when
WRITE_PATTERN fails) to return "write" by default unless READ_PATTERN matches;
keep the WRITE_PATTERN checks for explicit write-prefixes
(INSERT/UPDATE/DELETE/MERGE/CREATE/ALTER/DROP/TRUNCATE/GRANT/REVOKE/CALL/EXEC)
to still classify obvious writes.

const HARD_DENY_PATTERN =
/^\s*(DROP\s+(DATABASE|SCHEMA)\b|TRUNCATE(\s+TABLE)?\s)/i
Comment on lines +28 to +29
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

Fallback hard-deny is still bypassable.

HARD_DENY_PATTERN is only checked against the start of the normalized string, and the normalization step only removes block comments. In degraded mode, inputs like -- note\nDROP DATABASE prod or SELECT 1; DROP DATABASE prod still return blocked: false. packages/opencode/src/altimate/tools/sql-execute.ts:16-30 relies on this flag to stop execution, so the fallback needs to strip leading line comments and inspect every apparent statement (or conservatively reject multi-statement input) before returning.

Also applies to: 36-37

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@packages/opencode/src/altimate/tools/sql-classify.ts` around lines 28 - 29,
The HARD_DENY_PATTERN check is only applied to the start of the normalized SQL
and normalization currently only strips block comments, so inputs with leading
line comments or multiple statements (e.g. "-- note\nDROP DATABASE..." or
"SELECT 1; DROP DATABASE ...") bypass the deny. Update the normalization in
sql-classify.ts to remove leading/trailing whitespace, strip both block (/* */)
and line (--) comments, then split the input into individual statements by
semicolon (or conservatively detect multiple statements) and apply
HARD_DENY_PATTERN to every trimmed statement; alternatively, reject any input
containing multiple statements outright. Also ensure the code path used by
sql-execute.ts that relies on the blocked flag uses this tightened
classification.


/**
* Regex-based fallback classifier for when altimate-core is unavailable.
* Conservative: treats anything not clearly a SELECT/WITH/SHOW/EXPLAIN as "write".
*/
function classifyFallback(sql: string): { queryType: "read" | "write"; blocked: boolean } {
const trimmed = sql.replace(/\/\*[\s\S]*?\*\//g, "").trim()
const blocked = HARD_DENY_PATTERN.test(trimmed)
const queryType = WRITE_PATTERN.test(trimmed) ? "write" : "read"
return { queryType, blocked }
}

/**
* Classify a SQL string as "read" or "write" using AST parsing.
* If ANY statement is a write, returns "write".
*/
export function classify(sql: string): "read" | "write" {
const result = core.getStatementTypes(sql)
if (!result?.categories?.length) return "read"
// Treat unknown categories (not in WRITE or READ sets) as write to fail safe
return result.categories.some((c: string) => !READ_CATEGORIES.has(c)) ? "write" : "read"
if (!sql || typeof sql !== "string") return "read"
if (!getStatementTypes) return classifyFallback(sql).queryType
try {
const result = getStatementTypes(sql)
if (!result?.categories?.length) return "read"
return result.categories.some((c: string) => !READ_CATEGORIES.has(c)) ? "write" : "read"
} catch {
return classifyFallback(sql).queryType
}
}

/**
Expand All @@ -38,15 +68,21 @@ export function classifyMulti(sql: string): "read" | "write" {
* Returns both the overall query type and whether a hard-deny pattern was found.
*/
export function classifyAndCheck(sql: string): { queryType: "read" | "write"; blocked: boolean } {
const result = core.getStatementTypes(sql)
if (!result?.statements?.length) return { queryType: "read", blocked: false }
if (!sql || typeof sql !== "string") return { queryType: "read", blocked: false }
if (!getStatementTypes) return classifyFallback(sql)
try {
const result = getStatementTypes(sql)
if (!result?.statements?.length) return { queryType: "read", blocked: false }

const blocked = result.statements.some((s: { statement_type: string }) =>
s.statement_type && HARD_DENY_TYPES.has(s.statement_type.toUpperCase()),
)
const blocked = result.statements.some(
(s: { statement_type: string }) =>
s.statement_type && HARD_DENY_TYPES.has(s.statement_type.toUpperCase()),
)

const categories = result.categories ?? []
// Unknown categories (not in WRITE or READ sets) are treated as write to fail safe
const queryType = categories.some((c: string) => !READ_CATEGORIES.has(c)) ? "write" : "read"
return { queryType: queryType as "read" | "write", blocked }
const categories = result.categories ?? []
const queryType = categories.some((c: string) => !READ_CATEGORIES.has(c)) ? "write" : "read"
return { queryType: queryType as "read" | "write", blocked }
} catch {
return classifyFallback(sql)
}
}
65 changes: 62 additions & 3 deletions packages/opencode/src/tool/edit.ts
Original file line number Diff line number Diff line change
Expand Up @@ -629,6 +629,67 @@ export function trimDiff(diff: string): string {
return trimmedLines.join("\n")
}

/**
* Build a helpful error message when oldString isn't found.
* Includes a snippet of the closest-matching region so the model can self-correct.
*/
export function buildNotFoundMessage(content: string, oldString: string): string {
const base = "Could not find oldString in the file."

// Find the first line of oldString and search for it in the file
const firstLine = oldString.split("\n")[0].trim()
if (!firstLine) return base + " The oldString appears to be empty or whitespace-only."

Comment on lines +639 to +642
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Use the first non-empty line from oldString, not the literal first line.

This currently misclassifies inputs like "\n\nactual text" as whitespace-only and skips nearest-match hints. Please scan for the first non-empty trimmed line before fallback.

Proposed fix
-  const firstLine = oldString.split("\n")[0].trim()
+  const firstLine = oldString
+    .split("\n")
+    .map((line) => line.trim())
+    .find((line) => line.length > 0) ?? ""
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@packages/opencode/src/tool/edit.ts` around lines 639 - 642, The code
currently uses the literal first line of oldString which treats inputs like
"\n\nactual text" as empty; update the logic that sets firstLine in edit.ts to
scan oldString's lines and pick the first non-empty trimmed line (e.g., split by
"\n", trim each, find first with length > 0) and only fall back to the
empty/whitespace warning if no non-empty line exists; adjust the subsequent
check that returns base + " The oldString appears to be empty or
whitespace-only." to use this new first non-empty trimmed line so nearest-match
hints work for strings with leading blank lines.

const contentLines = content.split("\n")
let bestLine = -1
let bestScore = 0

// Search for the line with highest similarity to the first line of oldString
for (let i = 0; i < contentLines.length; i++) {
const trimmed = contentLines[i].trim()
if (!trimmed) continue

// Skip very short lines — they produce false similarity matches
const minLen = Math.min(trimmed.length, firstLine.length)
if (minLen < 4) continue

// Exact substring match is best
if (trimmed.includes(firstLine) || firstLine.includes(trimmed)) {
bestLine = i
bestScore = 1
break
}

// Skip if lengths are too different (>3x ratio) — not a meaningful comparison
const maxLen = Math.max(trimmed.length, firstLine.length)
if (minLen * 3 < maxLen) continue

// Levenshtein similarity for close matches
const score = 1 - levenshtein(trimmed, firstLine) / maxLen
if (score > bestScore && score > 0.6) {
bestScore = score
bestLine = i
}
}

if (bestLine === -1) {
return base + ` The first line of your oldString ("${firstLine.slice(0, 80)}") was not found anywhere in the file. Re-read the file before editing.`
}

// Show a small window around the best match
const start = Math.max(0, bestLine - 1)
const end = Math.min(contentLines.length, bestLine + 4)
const snippet = contentLines
.slice(start, end)
.map((l, i) => ` ${start + i + 1} | ${l}`)
.join("\n")

return (
base +
` A similar line was found at line ${bestLine + 1}. The file may have changed since you last read it.\n\nNearest match:\n${snippet}\n\nRe-read the file and use the exact current content for oldString.`
)
}

export function replace(content: string, oldString: string, newString: string, replaceAll = false): string {
if (oldString === newString) {
throw new Error("No changes to apply: oldString and newString are identical.")
Expand Down Expand Up @@ -661,9 +722,7 @@ export function replace(content: string, oldString: string, newString: string, r
}

if (notFound) {
throw new Error(
"Could not find oldString in the file. It must match exactly, including whitespace, indentation, and line endings.",
)
throw new Error(buildNotFoundMessage(content, oldString))
}
throw new Error("Found multiple matches for oldString. Provide more surrounding context to make the match unique.")
}
65 changes: 63 additions & 2 deletions packages/opencode/src/tool/webfetch.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,51 @@ const BROWSER_UA =
// Status codes that warrant a retry with a different User-Agent
const RETRYABLE_STATUSES = new Set([403, 406])

// altimate_change start — session-level URL failure cache (#471)
// Prevents repeated fetches to URLs that already returned 404/410 in this session.
// Keyed by URL string. Cleared when the process restarts (new session).
const failedUrls = new Map<string, { status: number; timestamp: number }>()
const FAILURE_CACHE_TTL = 5 * 60 * 1000 // 5 minutes
Comment on lines +18 to +22
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Misleading comment: cache is process-level, not session-level.

Based on the context snippet from tool.ts, Tool.define() is called once at module load time and the same tool instance is reused across all sessions. The failedUrls Map is module-scoped and persists across all sessions within the same process, not just a single session.

This could cause unintended behavior where a 404 cached from one user's session affects another user's session in the same process.

-// altimate_change start — session-level URL failure cache (`#471`)
-// Prevents repeated fetches to URLs that already returned 404/410 in this session.
-// Keyed by URL string. Cleared when the process restarts (new session).
+// altimate_change start — process-level URL failure cache (`#471`)
+// Prevents repeated fetches to URLs that already returned 404/410 in this process.
+// Keyed by URL string. Shared across all sessions; cleared when the process restarts.
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@packages/opencode/src/tool/webfetch.ts` around lines 18 - 22, The comment for
the failedUrls cache incorrectly states it is session-level, but it is actually
process-level because it is a module-scoped Map reused across all sessions. To
fix this, update the comment above the failedUrls declaration to accurately
describe it as a process-level URL failure cache that persists for the process
lifetime and is cleared only when the process restarts. Remove or revise any
language implying it is scoped per user session.


function isUrlCachedFailure(url: string): { status: number } | null {
const entry = failedUrls.get(url)
if (!entry) return null
if (Date.now() - entry.timestamp > FAILURE_CACHE_TTL) {
failedUrls.delete(url)
return null
}
return { status: entry.status }
}

function cacheUrlFailure(url: string, status: number): void {
// Only cache permanent-ish failures, not transient ones
if (status === 404 || status === 410 || status === 451) {
failedUrls.set(url, { status, timestamp: Date.now() })
}
}

/** Build an actionable error message so the model knows whether to retry. */
function buildFetchError(url: string, status: number, headers?: Headers): string {
switch (status) {
case 404:
return `HTTP 404: ${url} does not exist. Do NOT retry this URL — it will fail again. Try a different URL or search for the correct page.`
case 410:
return `HTTP 410: ${url} has been permanently removed. Do NOT retry. Find an alternative resource.`
case 403:
return `HTTP 403: Access to ${url} is forbidden. The server rejected both bot and browser User-Agents. Try a different source.`
case 429: {
const retryAfter = headers?.get("retry-after")
const wait = retryAfter ? ` (retry after ${retryAfter}s)` : ""
return `HTTP 429: Rate limited by ${new URL(url).hostname}${wait}. Wait before fetching from this domain again, or use a different source.`
}
Comment on lines +50 to +54
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

retry-after header can be a date string, not just seconds.

Per HTTP spec, the Retry-After header can be either a number of seconds or an HTTP-date (e.g., "Fri, 31 Dec 2026 23:59:59 GMT"). Appending "s" unconditionally produces malformed messages for date values.

Suggested fix
     case 429: {
       const retryAfter = headers?.get("retry-after")
-      const wait = retryAfter ? ` (retry after ${retryAfter}s)` : ""
+      let wait = ""
+      if (retryAfter) {
+        // retry-after can be seconds or HTTP-date; only append "s" for numeric values
+        wait = /^\d+$/.test(retryAfter)
+          ? ` (retry after ${retryAfter}s)`
+          : ` (retry after ${retryAfter})`
+      }
       return `HTTP 429: Rate limited by ${new URL(url).hostname}${wait}. Wait before fetching from this domain again, or use a different source.`
     }
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
case 429: {
const retryAfter = headers?.get("retry-after")
const wait = retryAfter ? ` (retry after ${retryAfter}s)` : ""
return `HTTP 429: Rate limited by ${new URL(url).hostname}${wait}. Wait before fetching from this domain again, or use a different source.`
}
case 429: {
const retryAfter = headers?.get("retry-after")
let wait = ""
if (retryAfter) {
// retry-after can be seconds or HTTP-date; only append "s" for numeric values
wait = /^\d+$/.test(retryAfter)
? ` (retry after ${retryAfter}s)`
: ` (retry after ${retryAfter})`
}
return `HTTP 429: Rate limited by ${new URL(url).hostname}${wait}. Wait before fetching from this domain again, or use a different source.`
}
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@packages/opencode/src/tool/webfetch.ts` around lines 50 - 54, The 429
handling currently appends "s" to the Retry-After header unconditionally; change
the logic in the case 429 block (variables retryAfter and wait in
packages/opencode/src/tool/webfetch.ts) to detect whether
headers?.get("retry-after") is a numeric-second value or a HTTP-date string: if
it's a positive integer use " (retry after Xs)", if it's a non-numeric/date
string include it verbatim like " (retry after <value>)" or omit the unit, and
ensure the message remains valid by not appending "s" to date values; keep the
rest of the message about the hostname and suggested actions intact.

case 451:
return `HTTP 451: ${url} is unavailable for legal reasons. Do NOT retry.`
default:
return `HTTP ${status}: Request to ${url} failed. This may be transient — retry once if needed.`
}
}
// altimate_change end

export const WebFetchTool = Tool.define("webfetch", {
description: DESCRIPTION,
parameters: z.object({
Expand All @@ -26,10 +71,23 @@ export const WebFetchTool = Tool.define("webfetch", {
timeout: z.number().describe("Optional timeout in seconds (max 120)").optional(),
}),
async execute(params, ctx) {
// Validate URL
// altimate_change start — URL validation and failure cache (#471)
// Validate URL format
if (!params.url.startsWith("http://") && !params.url.startsWith("https://")) {
throw new Error("URL must start with http:// or https://")
}
try {
new URL(params.url)
} catch {
throw new Error(`Invalid URL: "${params.url.slice(0, 200)}" is not a valid URL. Check the format and try again.`)
}

// Check failure cache — avoid re-fetching URLs that already returned 404/410
const cached = isUrlCachedFailure(params.url)
if (cached) {
throw new Error(buildFetchError(params.url, cached.status))
}
// altimate_change end

await ctx.ask({
permission: "webfetch",
Expand Down Expand Up @@ -83,9 +141,12 @@ export const WebFetchTool = Tool.define("webfetch", {
response = await fetch(params.url, { signal, headers: browserHeaders })
}

// altimate_change start — actionable error messages and failure caching (#471)
if (!response.ok) {
throw new Error(`Request failed with status code: ${response.status}`)
cacheUrlFailure(params.url, response.status)
throw new Error(buildFetchError(params.url, response.status, response.headers))
}
// altimate_change end

// Check content length
const contentLength = response.headers.get("content-length")
Expand Down
Loading
Loading