Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/actions/deploy-integrations/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ runs:
integration=$(basename $integration_path)
exists=$(./.github/scripts/integration-exists.sh $integration)

base_command="bp deploy -v -y --noBuild --public --allowDeprecated $dryrun"
base_command="bp deploy -v -y --noBuild --visibility public --allowDeprecated $dryrun"

upload_sandbox_scripts=false
if [ $exists -eq 0 ]; then
Expand Down
2 changes: 1 addition & 1 deletion .github/actions/deploy-interfaces/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ runs:
interface=$(basename $interface_path)
exists=$(./.github/scripts/interface-exists.sh $interface)

base_command="bp deploy -v -y --public"
base_command="bp deploy -v -y --visibility public"
if [ $exists -eq 0 ]; then
echo -e "\nDeploying interface: ### $interface ###\n"
pnpm retry -n 2 -- pnpm -F "{interfaces/$interface}" -c exec -- "$base_command"
Expand Down
2 changes: 1 addition & 1 deletion .github/actions/deploy-plugins/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ runs:
plugin=$(basename $plugin_path)
exists=$(./.github/scripts/plugin-exists.sh $plugin)

base_command="bp deploy -v -y --public"
base_command="bp deploy -v -y --visibility public"

if [ $exists -eq 0 ]; then
echo -e "\nDeploying plugin: ### $plugin ###\n"
Expand Down
3 changes: 2 additions & 1 deletion integrations/chat/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,8 @@ COPY patches/source-map-js@1.2.1.patch patches/source-map-js@1.2.1.patch
# install
RUN pnpm install --frozen-lockfile
RUN pnpm install --frozen-lockfile --dir integrations/chat
RUN pnpm --filter @botpress/sdk add @bpinternal/zui --save-prod
RUN pnpm --dir packages/sdk add @bpinternal/zui --save-prod
RUN pnpm --dir packages/cli add @botpress/sdk@workspace:* --workspace
RUN pnpm install --frozen-lockfile --dir packages/sdk
RUN pnpm install --frozen-lockfile --dir packages/cli

Expand Down
2 changes: 1 addition & 1 deletion integrations/google-ai/integration.definition.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ export default new IntegrationDefinition({
name: 'google-ai',
title: 'Google AI',
description: 'Gain access to Gemini models for content generation, chat responses, and advanced language tasks.',
version: '6.0.4',
version: '7.0.0',
readme: 'hub.md',
icon: 'icon.svg',
entities: {
Expand Down
8 changes: 7 additions & 1 deletion integrations/google-ai/src/actions/generate-content.ts
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,13 @@ async function buildGenerateContentRequest(
}
}

const thinkingBudget = ThinkingModeBudgetTokens[input.reasoningEffort ?? 'none'] // Default to not use reasoning as Gemini 2.5+ models use optional reasoning
let defaultReasoningEffort: ReasoningEffort = 'none'
if (modelId === 'gemini-3-pro-preview') {
// Gemini 3 Pro doesn't support disabling reasoning, so we use the lowest reasoning effort by default.
defaultReasoningEffort = 'low'
}

const thinkingBudget = ThinkingModeBudgetTokens[input.reasoningEffort ?? defaultReasoningEffort]
const modelSupportsThinking = modelId !== 'models/gemini-2.0-flash' // Gemini 2.0 doesn't support thinking mode

return {
Expand Down
31 changes: 31 additions & 0 deletions integrations/google-ai/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,37 @@ const googleAIClient = new GoogleGenAI({ apiKey: bp.secrets.GOOGLE_AI_API_KEY })
const DEFAULT_LANGUAGE_MODEL_ID: ModelId = 'models/gemini-2.0-flash'

const languageModels: Record<ModelId, llm.ModelDetails> = {
'gemini-3-pro-preview': {
name: 'Gemini 3 Pro (Preview)',
description:
"One of the best models for multimodal understanding, and Google's most powerful agentic and vibe-coding model yet, delivering richer visuals and deeper interactivity, built on a foundation of state-of-the-art reasoning.",
tags: ['preview', 'reasoning', 'agents', 'general-purpose', 'vision'],
input: {
costPer1MTokens: 2,
// Note: Gemini 3 output token limits are actually much higher than the limit enforced below, but we're limiting it for now as they have a tiered token cost that goes up for prompts longer than a certain amount of tokens, as our model pricing is currently based on a flat price per 1M tokens (no matter the prompt size) which is the standard across all major LLM providers except for Google AI.
// Reference: https://ai.google.dev/gemini-api/docs/pricing
maxTokens: 200_000,
},
output: {
costPer1MTokens: 12,
maxTokens: 65_536,
},
},
'gemini-3-flash-preview': {
name: 'Gemini 3 Flash (Preview)',
description: "Google's most balanced model built for speed, scale, and frontier intelligence.",
tags: ['preview', 'reasoning', 'agents', 'general-purpose', 'vision'],
input: {
costPer1MTokens: 0.5,
// Note: Gemini 3 output token limits are actually much higher than the limit enforced below, but we're limiting it for now as they have a tiered token cost that goes up for prompts longer than a certain amount of tokens, as our model pricing is currently based on a flat price per 1M tokens (no matter the prompt size) which is the standard across all major LLM providers except for Google AI.
// Reference: https://ai.google.dev/gemini-api/docs/pricing
maxTokens: 200_000,
},
output: {
costPer1MTokens: 3,
maxTokens: 65_536,
},
},
'gemini-2.5-flash': {
name: 'Gemini 2.5 Flash',
description:
Expand Down
8 changes: 7 additions & 1 deletion integrations/google-ai/src/schemas.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,13 @@ import { z } from '@botpress/sdk'
export const DefaultModelId: ModelId = 'gemini-2.5-flash'

export const ModelId = z
.enum(['gemini-2.5-flash', 'gemini-2.5-pro', 'models/gemini-2.0-flash'])
.enum([
'gemini-3-pro-preview',
'gemini-3-flash-preview',
'gemini-2.5-flash',
'gemini-2.5-pro',
'models/gemini-2.0-flash',
])
.describe('Model to use for content generation')
.placeholder(DefaultModelId)
export type ModelId = z.infer<typeof ModelId>
Expand Down
2 changes: 1 addition & 1 deletion readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ This will deploy your integration's current version to your workspace and make i
By default, all integrations are private to the workspace they have been deployed in. When you are ready to share your version with the community, you can make it public by running:

```sh
bp deploy --public
bp deploy --visibility public
```

This will make your integration available to all Botpress users on the [Botpress Hub](https://app.botpress.cloud/hub). Once a version of your integration is public, it cannot be updated again.
Expand Down
Loading