Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ _None_

### New Features

_None_
- `openai_ask`: support OpenAI tool-use (function calling) and model overrides. New optional parameters: `tools`, `tool_handlers`, and `max_tool_iterations` enable a multi-turn loop where the model invokes locally-defined tools and receives `role: tool` results until it produces a plain text response or hits the iteration cap; `model` overrides the previously hardcoded `gpt-4o`. Tool-handler keys may be strings or symbols (normalized internally), exceptions raised inside a handler are surfaced back to the model as structured `{ error:, exception: }` tool results (carrying only the exception class — the message and backtrace are logged locally but not forwarded, since handler errors can contain secrets) rather than crashing the lane, and the new parameters validate their inputs (non-empty Array, callable handlers, iteration cap >= 1). Backwards-compatible — when `tools` is omitted, behavior is unchanged. Also fixes a long-standing `NameError` when generating action documentation (documented examples are now non-interpolated heredocs) and a `EXEMPLE` typo in the example HEREDOC tags. [#716]

### Bug Fixes

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ module Fastlane
module Actions
class OpenaiAskAction < Action
OPENAI_API_ENDPOINT = URI('https://api.openai.com/v1/chat/completions').freeze
DEFAULT_MAX_TOOL_ITERATIONS = 5
DEFAULT_MODEL = 'gpt-4o'

PREDEFINED_PROMPTS = {
release_notes: <<~PROMPT
Expand All @@ -24,27 +26,68 @@ def self.run(params)
prompt = params[:prompt]
prompt = PREDEFINED_PROMPTS[prompt] if PREDEFINED_PROMPTS.key?(prompt)
question = params[:question]
model = params[:model] || DEFAULT_MODEL
tools = params[:tools]
# Tool names from the OpenAI API are always JSON strings. Normalize handler keys so
# callers can register handlers with either string or symbol keys without surprises.
tool_handlers = (params[:tool_handlers] || {}).transform_keys(&:to_s)
max_tool_iterations = params[:max_tool_iterations] || DEFAULT_MAX_TOOL_ITERATIONS

headers = {
'Content-Type': 'application/json',
Authorization: "Bearer #{api_token}"
}
body = request_body(prompt: prompt, question: question)

response = Net::HTTP.post(OPENAI_API_ENDPOINT, body, headers)
# Backwards-compatible single-shot path when no tools are provided.
if tools.nil? || tools.empty?
body = request_body(prompt: prompt, question: question, model: model)
response = Net::HTTP.post(OPENAI_API_ENDPOINT, body, headers)
return parse_text_response(response)
end

case response
when Net::HTTPOK
json = JSON.parse(response.body)
json['choices']&.first&.dig('message', 'content')
else
UI.user_error!("Error in OpenAI API response: #{response}. #{response.body}")
run_with_tools(
prompt: prompt,
question: question,
model: model,
tools: tools,
tool_handlers: tool_handlers,
max_tool_iterations: max_tool_iterations,
headers: headers
)
end

def self.run_with_tools(prompt:, question:, model:, tools:, tool_handlers:, max_tool_iterations:, headers:)
messages = [
format_message(role: 'system', text: prompt),
format_message(role: 'user', text: question),
].compact

max_tool_iterations.times do
body = request_body_with_messages(messages: messages, tools: tools, model: model)
response = Net::HTTP.post(OPENAI_API_ENDPOINT, body, headers)
assistant_message = parse_assistant_message(response)
tool_calls = assistant_message['tool_calls']

# No tool calls — model produced a final answer.
return assistant_message['content'] if tool_calls.nil? || tool_calls.empty?

# Append the assistant's tool-call message verbatim, then run each handler
# and append the corresponding `role: tool` results.
messages << assistant_message
tool_calls.each do |tool_call|
messages << execute_tool_call(tool_call, tool_handlers)
end
end

UI.user_error!(
"OpenAI tool-use loop did not terminate after #{max_tool_iterations} iterations. " \
'Increase `max_tool_iterations` or check that your prompt instructs the model to stop calling tools.'
)
end

def self.request_body(prompt:, question:)
def self.request_body(prompt:, question:, model: DEFAULT_MODEL)
{
model: 'gpt-4o',
model: model,
response_format: { type: 'text' },
temperature: 1,
max_tokens: 2048,
Expand All @@ -56,6 +99,18 @@ def self.request_body(prompt:, question:)
}.to_json
end

def self.request_body_with_messages(messages:, tools:, model: DEFAULT_MODEL)
{
model: model,
response_format: { type: 'text' },
temperature: 1,
max_tokens: 2048,
top_p: 1,
messages: messages,
tools: tools
}.to_json
end

def self.format_message(role:, text:)
return nil if text.nil? || text.empty?

Expand All @@ -65,6 +120,89 @@ def self.format_message(role:, text:)
}
end

def self.parse_text_response(response)
case response
when Net::HTTPOK
json = JSON.parse(response.body)
json['choices']&.first&.dig('message', 'content')
else
UI.user_error!("Error in OpenAI API response: #{response}. #{response.body}")
end
end

def self.parse_assistant_message(response)
case response
when Net::HTTPOK
json = JSON.parse(response.body)
json['choices']&.first&.dig('message') || {}
else
UI.user_error!("Error in OpenAI API response: #{response}. #{response.body}")
end
end

def self.execute_tool_call(tool_call, tool_handlers)
name = tool_call.dig('function', 'name')
raw_args = tool_call.dig('function', 'arguments') || '{}'

result =
begin
args = JSON.parse(raw_args)
invoke_tool_handler(name: name, handler: tool_handlers[name], args: args)
rescue JSON::ParserError
# Short-circuit: the handler never sees malformed args. Tell the model the
# tool-call payload was invalid so it can retry with valid JSON, and log the
# raw arguments locally for debugging without forwarding them to the API.
UI.error("Invalid JSON arguments for tool '#{name}'. Raw payload: #{raw_args}")
{ error: "Invalid JSON arguments for tool '#{name}' — payload could not be parsed. Retry with valid JSON." }
end

{
role: 'tool',
tool_call_id: tool_call['id'],
content: serialize_tool_result(name: name, result: result)
}
end

# Serializes a tool result to a JSON string. Handlers are contracted to return
# JSON-serializable values, but a buggy handler might return something like a
# `Pathname`, `Proc`, or a custom object whose `to_json` raises. Failing the
# whole conversation over a serialization error is harsh — instead, log locally
# and send a structured `{ error: ... }` back so the model can recover.
#
# The handler's class name is exposed (handler authorship is local, not secret)
# but the exception's message is NOT forwarded — same reasoning as
# `invoke_tool_handler`: handler-returned objects can carry secrets.
def self.serialize_tool_result(name:, result:)
JSON.generate(result)
rescue StandardError => e
UI.error("Could not serialize tool result for '#{name}': #{e.class}: #{e.message}. Result class: #{result.class}")
JSON.generate({ error: "Tool result for '#{name}' could not be serialized to JSON. Returned class: #{result.class}." })
end

# Invokes a tool handler safely. Returns a JSON-serializable value that will be
# sent back to the model as the `content` of a `role: tool` message (the value
# may be a Hash, Array, scalar, etc. — whatever the handler returns).
#
# - Missing or non-callable handler: structured `{ error: ... }` so the model can recover.
# - Handler raised: structured `{ error:, exception: }` carrying only the exception class
# so the model can see the failure category and adjust. The full message and backtrace
# are logged locally via `UI.error` but NOT forwarded to the model, because tool
# results are sent to OpenAI and handler exception messages can contain secrets
# (tokens, file contents, internal API responses). The loop keeps going rather than
# aborting the lane mid-conversation — the model is the better judge of whether the
# failure is recoverable than a global `rescue` here.
def self.invoke_tool_handler(name:, handler:, args:)
return { error: "No handler defined for tool '#{name}'" } if handler.nil?
return { error: "Handler for tool '#{name}' is not callable (got #{handler.class})" } unless handler.respond_to?(:call)

begin
handler.call(args)
rescue StandardError => e
UI.error("Handler for tool '#{name}' raised #{e.class}: #{e.message}\n#{e.backtrace&.first(5)&.join("\n")}")
{ error: "Handler for tool '#{name}' raised an exception", exception: e.class.name }
end
Comment on lines +198 to +203
Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good catch — fixed in f2ae407. The exception message and backtrace are no longer forwarded to OpenAI. Only the exception class name goes back to the model (so it can categorize the failure and adapt); full details (message + 5-frame backtrace) are logged locally via UI.error. Updated the spec to assert the message field is absent from the tool result.

end

#####################################################
# @!group Documentation
#####################################################
Expand All @@ -78,27 +216,62 @@ def self.authors
end

def self.return_value
'The response text from the prompt as returned by OpenAI API'
'The response text from the prompt as returned by OpenAI API. ' \
'When `tools` are provided, returns the assistant content from the first turn that produces a non-tool-call response.'
end

def self.details
<<~DETAILS
Uses the OpenAI API to generate response to a prompt.
Can be used to e.g. ask it to generate Release Notes based on a bullet point technical changelog or similar.

When `tools` and `tool_handlers` are provided, the action runs a tool-use (function-calling) loop:
on each turn, if the model calls one or more tools, the corresponding handler is invoked locally
and its return value is sent back to the model as a `role: tool` message. The loop ends when the
model returns a plain text response, or when `max_tool_iterations` is reached.
DETAILS
end

def self.examples
[
<<~EXEMPLE,
<<~'EXAMPLE',
items = extract_release_notes_for_version(version: app_version, release_notes_file_path: 'RELEASE-NOTES.txt')
nice_changelog = openai_ask(
prompt: :release_notes, # Uses the pre-crafted prompt for App Store / Play Store release notes
question: "Help me write release notes for the following items:\n#{items}",
api_token: get_required_env('OPENAI_API_TOKEN')
)
File.write(File.join('fastlane', 'metadata', 'android', 'en-US', 'changelogs', 'default.txt'), nice_changelog)
EXEMPLE
EXAMPLE
<<~'EXAMPLE',
# Tool-use loop: the model proposes release notes via a tool call; the handler validates
# length locally and rejects until the model produces text under the limit.
notes = openai_ask(
prompt: :release_notes,
question: "Write release notes for: #{items}. Call the validate_length tool with your draft and iterate until it accepts.",
api_token: get_required_env('OPENAI_API_TOKEN'),
tools: [{
type: 'function',
function: {
name: 'validate_length',
description: 'Validates the length of the proposed release notes against a 350-character budget. ' \
'Returns `{ ok: true, length: }` if the text fits, or `{ ok: false, length:, max: }` otherwise. ' \
'Call repeatedly with shorter drafts until it returns ok: true.',
parameters: {
type: 'object',
properties: { text: { type: 'string' } },
required: ['text']
}
}
}],
tool_handlers: {
'validate_length' => ->(args) {
len = args['text'].length
len <= 350 ? { ok: true, length: len } : { ok: false, length: len, max: 350 }
}
}
)
EXAMPLE
]
end

Expand Down Expand Up @@ -132,6 +305,41 @@ def self.available_options
optional: false,
sensitive: true,
type: String),
FastlaneCore::ConfigItem.new(key: :model,
description: 'The OpenAI model to send the request to (e.g. `gpt-4o`, `gpt-4o-mini`, `gpt-4.1`). ' \
"Defaults to `#{DEFAULT_MODEL}`",
optional: true,
default_value: DEFAULT_MODEL,
type: String),
FastlaneCore::ConfigItem.new(key: :tools,
description: 'Optional array of tool (function-calling) definitions in OpenAI format. ' \
'When provided, the action runs a tool-use loop',
optional: true,
default_value: nil,
type: Array,
verify_block: proc do |value|
UI.user_error!('Parameter `tools` must be a non-empty Array when provided') if value.empty?
end),
FastlaneCore::ConfigItem.new(key: :tool_handlers,
description: 'Hash of tool name to a callable (e.g. a Proc) invoked when the model calls that tool. ' \
'The callable receives the parsed arguments Hash and must return a JSON-serializable value, ' \
'which is sent back to the model as the tool result',
optional: true,
default_value: nil,
type: Hash,
verify_block: proc do |value|
non_callable = value.reject { |_k, v| v.respond_to?(:call) }
UI.user_error!("Parameter `tool_handlers` values must respond to :call. Non-callable handlers: #{non_callable.keys}") if non_callable.any?
end),
FastlaneCore::ConfigItem.new(key: :max_tool_iterations,
description: 'Maximum number of tool-use loop iterations before the action fails. ' \
'Only used when `tools` are provided',
optional: true,
default_value: DEFAULT_MAX_TOOL_ITERATIONS,
type: Integer,
verify_block: proc do |value|
UI.user_error!("Parameter `max_tool_iterations` must be >= 1 (got #{value})") if value < 1
end),
]
end

Expand Down
Loading