Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ _None_

### Bug Fixes

_None_
- `openai_ask`: improve tool-use handling by requiring named function tools, using `max_completion_tokens`, opting out of OpenAI request storage, omitting sensitive tool diagnostics from logs, and refusing to execute additional tool calls after `max_tool_iterations`. [#719]

### Internal Changes

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ module Fastlane
module Actions
class OpenaiAskAction < Action
OPENAI_API_ENDPOINT = URI('https://api.openai.com/v1/chat/completions').freeze
DEFAULT_MAX_COMPLETION_TOKENS = 2048
DEFAULT_MAX_TOOL_ITERATIONS = 5
DEFAULT_MODEL = 'gpt-4o'

Expand Down Expand Up @@ -39,12 +40,15 @@ def self.run(params)
}

# Backwards-compatible single-shot path when no tools are provided.
if tools.nil? || tools.empty?
if tools.nil?
body = request_body(prompt: prompt, question: question, model: model)
response = Net::HTTP.post(OPENAI_API_ENDPOINT, body, headers)
return parse_text_response(response)
end

Comment thread
iangmaia marked this conversation as resolved.
validate_tools_array!(tools)
validate_max_tool_iterations!(max_tool_iterations)
validate_tools!(tools)
run_with_tools(
prompt: prompt,
question: question,
Expand All @@ -62,7 +66,9 @@ def self.run_with_tools(prompt:, question:, model:, tools:, tool_handlers:, max_
format_message(role: 'user', text: question),
].compact

max_tool_iterations.times do
tool_iterations = 0

loop do
body = request_body_with_messages(messages: messages, tools: tools, model: model)
response = Net::HTTP.post(OPENAI_API_ENDPOINT, body, headers)
assistant_message = parse_assistant_message(response)
Expand All @@ -71,26 +77,31 @@ def self.run_with_tools(prompt:, question:, model:, tools:, tool_handlers:, max_
# No tool calls — model produced a final answer.
return assistant_message['content'] if tool_calls.nil? || tool_calls.empty?

if tool_iterations >= max_tool_iterations
UI.user_error!(
"OpenAI tool-use loop did not produce a final answer after #{max_tool_iterations} tool iterations. " \
'Refusing to execute additional tool calls. Increase `max_tool_iterations` or check that your prompt instructs the model to stop calling tools.'
)
end

# Append the assistant's tool-call message verbatim, then run each handler
# and append the corresponding `role: tool` results.
messages << assistant_message
tool_calls.each do |tool_call|
messages << execute_tool_call(tool_call, tool_handlers)
end
end

UI.user_error!(
"OpenAI tool-use loop did not terminate after #{max_tool_iterations} iterations. " \
'Increase `max_tool_iterations` or check that your prompt instructs the model to stop calling tools.'
)
tool_iterations += 1
end
end

def self.request_body(prompt:, question:, model: DEFAULT_MODEL)
{
model: model,
store: false,
response_format: { type: 'text' },
temperature: 1,
max_tokens: 2048,
max_completion_tokens: DEFAULT_MAX_COMPLETION_TOKENS,
top_p: 1,
messages: [
format_message(role: 'system', text: prompt),
Expand All @@ -102,9 +113,10 @@ def self.request_body(prompt:, question:, model: DEFAULT_MODEL)
def self.request_body_with_messages(messages:, tools:, model: DEFAULT_MODEL)
{
model: model,
store: false,
response_format: { type: 'text' },
temperature: 1,
max_tokens: 2048,
max_completion_tokens: DEFAULT_MAX_COMPLETION_TOKENS,
top_p: 1,
messages: messages,
tools: tools
Expand Down Expand Up @@ -140,8 +152,49 @@ def self.parse_assistant_message(response)
end
end

def self.validate_max_tool_iterations!(max_tool_iterations)
UI.user_error!("Parameter `max_tool_iterations` must be an Integer (got #{max_tool_iterations.class})") unless max_tool_iterations.is_a?(Integer)
UI.user_error!("Parameter `max_tool_iterations` must be >= 1 (got #{max_tool_iterations})") if max_tool_iterations < 1
end

def self.validate_tools_array!(tools)
UI.user_error!('Parameter `tools` must be a non-empty Array when provided') unless tools.is_a?(Array) && !tools.empty?
end

def self.validate_tools!(tools)
invalid_tools = tools.each_with_index.filter_map do |tool, index|
type = tool_type(tool)
next "tools[#{index}] type #{type.empty? ? '<missing>' : type.inspect}" unless type == 'function'

function = tool[:function] || tool['function']
name = function[:name] || function['name'] if function.is_a?(Hash)
next if valid_tool_name?(name)

"tools[#{index}] missing function.name"
end

return if invalid_tools.empty?

UI.user_error!(
'Parameter `tools` only supports OpenAI function tools with a non-empty `function.name`. ' \
"Invalid tool definitions: #{invalid_tools.join(', ')}"
)
end

def self.tool_type(tool)
return '' unless tool.is_a?(Hash)

(tool[:type] || tool['type']).to_s
end

def self.valid_tool_name?(name)
(name.is_a?(String) || name.is_a?(Symbol)) && !name.to_s.empty?
end

def self.execute_tool_call(tool_call, tool_handlers)
name = tool_call.dig('function', 'name')
return unsupported_tool_call_result(tool_call) unless function_tool_call?(tool_call)

name = tool_call.dig('function', 'name').to_s
raw_args = tool_call.dig('function', 'arguments') || '{}'

result =
Expand All @@ -151,8 +204,8 @@ def self.execute_tool_call(tool_call, tool_handlers)
rescue JSON::ParserError
# Short-circuit: the handler never sees malformed args. Tell the model the
# tool-call payload was invalid so it can retry with valid JSON, and log the
# raw arguments locally for debugging without forwarding them to the API.
UI.error("Invalid JSON arguments for tool '#{name}'. Raw payload: #{raw_args}")
# local failure without recording raw arguments that might contain secrets.
UI.error("Invalid JSON arguments for tool '#{name}' in tool call '#{tool_call['id']}'. Raw payload omitted because it may contain secrets.")
{ error: "Invalid JSON arguments for tool '#{name}' — payload could not be parsed. Retry with valid JSON." }
end

Expand All @@ -163,6 +216,40 @@ def self.execute_tool_call(tool_call, tool_handlers)
}
end

def self.function_tool_call?(tool_call)
return false unless tool_call['type'] == 'function'
return false unless tool_call['function'].is_a?(Hash)

name = tool_call.dig('function', 'name')
valid_tool_name?(name)
end

def self.unsupported_tool_call_result(tool_call)
type = tool_call['type'] || '<missing>'
error =
if type == 'function'
'Function tool call is missing a non-empty function.name.'
else
"Unsupported tool call type '#{type}'. Only function tool calls are supported."
end
log_message =
if type == 'function'
"Invalid OpenAI function tool call '#{tool_call['id']}': missing a non-empty function.name."
else
"Unsupported OpenAI tool call type '#{type}' in tool call '#{tool_call['id']}'. Only function tool calls are supported."
end
UI.error(log_message)

{
role: 'tool',
tool_call_id: tool_call['id'],
content: serialize_tool_result(
name: type,
result: { error: error }
)
}
end

# Serializes a tool result to a JSON string. Handlers are contracted to return
# JSON-serializable values, but a buggy handler might return something like a
# `Pathname`, `Proc`, or a custom object whose `to_json` raises. Failing the
Expand All @@ -175,7 +262,7 @@ def self.execute_tool_call(tool_call, tool_handlers)
def self.serialize_tool_result(name:, result:)
JSON.generate(result)
rescue StandardError => e
UI.error("Could not serialize tool result for '#{name}': #{e.class}: #{e.message}. Result class: #{result.class}")
UI.error("Could not serialize tool result for '#{name}': #{e.class}. Result class: #{result.class}. Error message omitted because it may contain secrets.")
JSON.generate({ error: "Tool result for '#{name}' could not be serialized to JSON. Returned class: #{result.class}." })
end

Expand All @@ -185,9 +272,9 @@ def self.serialize_tool_result(name:, result:)
#
# - Missing or non-callable handler: structured `{ error: ... }` so the model can recover.
# - Handler raised: structured `{ error:, exception: }` carrying only the exception class
# so the model can see the failure category and adjust. The full message and backtrace
# are logged locally via `UI.error` but NOT forwarded to the model, because tool
# results are sent to OpenAI and handler exception messages can contain secrets
# so the model can see the failure category and adjust. The exception message and
# backtrace are intentionally omitted from local logs and from the model response
# because tool results and CI logs can expose release secrets
# (tokens, file contents, internal API responses). The loop keeps going rather than
# aborting the lane mid-conversation — the model is the better judge of whether the
# failure is recoverable than a global `rescue` here.
Expand All @@ -198,7 +285,7 @@ def self.invoke_tool_handler(name:, handler:, args:)
begin
handler.call(args)
rescue StandardError => e
UI.error("Handler for tool '#{name}' raised #{e.class}: #{e.message}\n#{e.backtrace&.first(5)&.join("\n")}")
UI.error("Handler for tool '#{name}' raised #{e.class}. Error message and backtrace omitted because they may contain secrets.")
{ error: "Handler for tool '#{name}' raised an exception", exception: e.class.name }
end
end
Expand Down Expand Up @@ -228,7 +315,8 @@ def self.details
When `tools` and `tool_handlers` are provided, the action runs a tool-use (function-calling) loop:
on each turn, if the model calls one or more tools, the corresponding handler is invoked locally
and its return value is sent back to the model as a `role: tool` message. The loop ends when the
model returns a plain text response, or when `max_tool_iterations` is reached.
model returns a plain text response, or before executing tool calls beyond `max_tool_iterations`.
The model gets one final API turn to answer after the last permitted local tool execution round.
DETAILS
end

Expand Down Expand Up @@ -312,13 +400,14 @@ def self.available_options
default_value: DEFAULT_MODEL,
type: String),
FastlaneCore::ConfigItem.new(key: :tools,
description: 'Optional array of tool (function-calling) definitions in OpenAI format. ' \
description: 'Optional array of OpenAI function tool definitions. Each definition must have a non-empty `function.name`. ' \
'When provided, the action runs a tool-use loop',
optional: true,
default_value: nil,
type: Array,
verify_block: proc do |value|
UI.user_error!('Parameter `tools` must be a non-empty Array when provided') if value.empty?
validate_tools_array!(value)
validate_tools!(value)
end),
FastlaneCore::ConfigItem.new(key: :tool_handlers,
description: 'Hash of tool name to a callable (e.g. a Proc) invoked when the model calls that tool. ' \
Expand All @@ -332,13 +421,14 @@ def self.available_options
UI.user_error!("Parameter `tool_handlers` values must respond to :call. Non-callable handlers: #{non_callable.keys}") if non_callable.any?
end),
FastlaneCore::ConfigItem.new(key: :max_tool_iterations,
description: 'Maximum number of tool-use loop iterations before the action fails. ' \
description: 'Maximum number of local tool execution rounds before the action fails. ' \
'The model can receive one final API turn to answer after the last permitted tool result. ' \
'Only used when `tools` are provided',
optional: true,
default_value: DEFAULT_MAX_TOOL_ITERATIONS,
type: Integer,
verify_block: proc do |value|
UI.user_error!("Parameter `max_tool_iterations` must be >= 1 (got #{value})") if value < 1
validate_max_tool_iterations!(value)
end),
]
end
Expand Down
Loading