Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 28 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -92,3 +92,31 @@ state as `__config`.
When `NULLBOILER_HOME` is set, `nullboiler` reads `config.json` from that directory and
resolves relative paths like `db`, `strategies_dir`, `tracker.workflows_dir`, and
`tracker.workspace.root` relative to that config file.

## Workflow Preflight

File-based tracker/pull-mode workflows are loaded from JSON files using the
`WorkflowDef` shape in `src/workflow_loader.zig`. Before starting the server, you
can check those files locally:

```bash
zig build run -- validate-workflows
zig build run -- validate-workflows workflows
```

The command defaults to `workflows` and scans direct `*.json` files in the
directory, matching `loadWorkflows`; it does not recurse into nested directories.
This is only for file-based tracker/pull-mode `WorkflowDef` files, not graph
workflow definitions managed through the HTTP API.

It reports:

- errors for missing or unreadable directories, unreadable files, malformed JSON,
JSON that cannot be parsed as `WorkflowDef`, missing or empty `pipeline_id`,
and duplicate `pipeline_id` values
- warnings for suspicious but currently allowed shapes, including empty `id`,
empty `claim_roles`, dispatch workflows without `dispatch.worker_tags`, and
directories with no JSON workflow files

Validation errors exit with status `1`. Warnings are shown but do not fail the
command, matching the existing runtime loader's permissive behavior.
87 changes: 84 additions & 3 deletions src/main.zig
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,23 @@ pub fn main(init: std.process.Init) !void {
try @import("from_json.zig").run(allocator, all_args[1..]);
return;
}
if (std.mem.eql(u8, all_args[0], "help") or
std.mem.eql(u8, all_args[0], "--help") or
std.mem.eql(u8, all_args[0], "-h"))
{
printUsage();
return;
}
if (std.mem.eql(u8, all_args[0], "validate-workflows")) {
if (all_args.len > 2) {
std.debug.print("error: validate-workflows accepts at most one PATH argument\n\n", .{});
printUsage();
std.process.exit(2);
}
const workflow_dir = if (all_args.len == 2) all_args[1] else "workflows";
try runValidateWorkflows(allocator, workflow_dir);
return;
}
}

var host_override: ?[]const u8 = null;
Expand Down Expand Up @@ -427,6 +444,64 @@ pub fn main(init: std.process.Init) !void {
}
}

fn printUsage() void {
std.debug.print(
\\nullboiler v{s}
\\
\\Usage:
\\ nullboiler [--host HOST] [--port N] [--db PATH] [--config PATH] [--token TOKEN]
\\ nullboiler validate-workflows [PATH]
\\ nullboiler --export-manifest
\\ nullboiler --from-json '<wizard answers json>'
\\ nullboiler --version
\\
\\Commands:
\\ validate-workflows [PATH] Preflight file-based tracker/pull-mode WorkflowDef JSON files.
\\
, .{version});
}

fn runValidateWorkflows(allocator: std.mem.Allocator, workflow_dir: []const u8) !void {
var arena = std.heap.ArenaAllocator.init(allocator);
defer arena.deinit();

const result = try workflow_loader.validateWorkflowFiles(arena.allocator(), workflow_dir);

for (result.diagnostics) |diag| {
const level = switch (diag.severity) {
.@"error" => "ERROR",
.warning => "WARNING",
};
if (diag.field) |field| {
std.debug.print("{s} {s}: {s} ({s})\n", .{ level, diag.file_path, diag.message, field });
} else {
std.debug.print("{s} {s}: {s}\n", .{ level, diag.file_path, diag.message });
}
}

for (result.files) |file| {
if (!file.has_error and file.pipeline_id.len > 0) {
std.debug.print("OK {s} -> {s}\n", .{ file.file_path, file.pipeline_id });
}
}

std.debug.print(
"Checked {d} workflow files: {d} valid, {d} warning{s}, {d} error{s}\n",
.{
result.checked_files,
result.valid_files,
result.warning_count,
if (result.warning_count == 1) "" else "s",
result.error_count,
if (result.error_count == 1) "" else "s",
},
);

if (result.error_count > 0) {
std.process.exit(1);
}
}

fn ensureParentDirForFile(path: []const u8) !void {
if (path.len == 0 or std.mem.eql(u8, path, ":memory:") or std.mem.startsWith(u8, path, "file:")) return;

Expand Down Expand Up @@ -464,12 +539,10 @@ fn readHttpRequest(allocator: std.mem.Allocator, stream: *std.Io.net.Stream, max

var header_end: ?usize = null;
var content_len: usize = 0;
var read_buffer: [request_read_chunk]u8 = undefined;
var reader = stream.reader(std_compat.io(), &read_buffer);
var chunk: [request_read_chunk]u8 = undefined;

while (true) {
const n = try reader.interface.readSliceShort(&chunk);
const n = try readStreamAvailable(stream, &chunk);
if (n == 0) return null;

try buffer.appendSlice(allocator, chunk[0..n]);
Expand Down Expand Up @@ -520,6 +593,14 @@ fn readHttpRequest(allocator: std.mem.Allocator, stream: *std.Io.net.Stream, max
};
}

fn readStreamAvailable(stream: *std.Io.net.Stream, buffer: []u8) std.Io.net.Stream.Reader.Error!usize {
// Reader.readSliceShort fills the whole buffer in Zig 0.16, which stalls
// small HTTP requests until the client closes the socket.
var slices = [_][]u8{buffer};
const io = std_compat.io();
return io.vtable.netRead(io.userdata, stream.socket.handle, &slices);
}

fn parseContentLength(headers_raw: []const u8) ?usize {
var lines = std.mem.splitSequence(u8, headers_raw, "\r\n");
_ = lines.next(); // request line
Expand Down
Loading
Loading