Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions core/control-plane/schema.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ const modelDescriptionSchema = z.object({
"ovhcloud",
"nebius",
"siliconflow",
"tensorix",
"scaleway",
"watsonx",
]),
Expand Down Expand Up @@ -92,6 +93,7 @@ const embeddingsProviderSchema = z.object({
"ovhcloud",
"nebius",
"siliconflow",
"tensorix",
"scaleway",
"watsonx",
]),
Expand Down
3 changes: 3 additions & 0 deletions core/llm/autodetect.ts
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@ const PROVIDER_HANDLES_TEMPLATING: string[] = [
"docker",
"nous",
"zAI",
"tensorix",
// TODO add these, change to inverted logic so only the ones that need templating are hardcoded
// Asksage.ts
// Azure.ts
Expand Down Expand Up @@ -130,6 +131,7 @@ const PROVIDER_SUPPORTS_IMAGES: string[] = [
"ovhcloud",
"watsonx",
"zAI",
"tensorix",
];

const MODEL_SUPPORTS_IMAGES: RegExp[] = [
Expand Down Expand Up @@ -248,6 +250,7 @@ const PARALLEL_PROVIDERS: string[] = [
"vertexai",
"function-network",
"scaleway",
"tensorix",
];

function llmCanGenerateInParallel(provider: string, model: string): boolean {
Expand Down
14 changes: 14 additions & 0 deletions core/llm/llms/Tensorix.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import OpenAI from "./OpenAI.js";

import type { LLMOptions } from "../../index.js";

class Tensorix extends OpenAI {
static providerName = "tensorix";
static defaultOptions: Partial<LLMOptions> = {
apiBase: "https://api.tensorix.ai/v1/",
model: "deepseek/deepseek-chat-v3.1",
useLegacyCompletionsEndpoint: false,
};
}

export default Tensorix;
2 changes: 2 additions & 0 deletions core/llm/llms/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ import SageMaker from "./SageMaker";
import SambaNova from "./SambaNova";
import Scaleway from "./Scaleway";
import SiliconFlow from "./SiliconFlow";
import Tensorix from "./Tensorix";
import ContinueProxy from "./stubs/ContinueProxy";
import TARS from "./TARS";
import TestLLM from "./Test";
Expand Down Expand Up @@ -121,6 +122,7 @@ export const LLMClasses = [
VertexAI,
xAI,
SiliconFlow,
Tensorix,
Scaleway,
Relace,
Inception,
Expand Down
93 changes: 93 additions & 0 deletions docs/customize/model-providers/more/tensorix.mdx
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
---
title: "Tensorix"
description: "Configure Tensorix with Continue to access DeepSeek, Llama, Qwen, GLM, and other models through a single OpenAI-compatible API gateway"
---

[Tensorix](https://tensorix.ai) is an OpenAI-compatible API gateway that provides access to DeepSeek, Llama, Qwen, GLM, MiniMax, and other models. Pay-as-you-go with no subscription required.

<Info>
You can get an API key from
[app.tensorix.ai](https://app.tensorix.ai).
</Info>

## Chat Model

We recommend configuring **deepseek/deepseek-chat-v3.1** as your chat model.

<Tabs>
<Tab title="YAML">
```yaml title="config.yaml"
name: My Config
version: 0.0.1
schema: v1

models:
- name: DeepSeek Chat
provider: tensorix
model: deepseek/deepseek-chat-v3.1
apiKey: <YOUR_TENSORIX_API_KEY>
roles:
- chat
```
</Tab>
<Tab title="JSON">
```json title="config.json"
{
"models": [
{
"title": "DeepSeek Chat",
"provider": "tensorix",
"model": "deepseek/deepseek-chat-v3.1",
"apiKey": "<YOUR_TENSORIX_API_KEY>"
}
]
}
```
</Tab>
</Tabs>

## Autocomplete Model

<Tabs>
<Tab title="YAML">
```yaml title="config.yaml"
name: My Config
version: 0.0.1
schema: v1

models:
- name: DeepSeek Chat
provider: tensorix
model: deepseek/deepseek-chat-v3.1
apiKey: <YOUR_TENSORIX_API_KEY>
roles:
- autocomplete
```
</Tab>
<Tab title="JSON">
```json title="config.json"
{
"models": [
{
"title": "DeepSeek Chat",
"provider": "tensorix",
"model": "deepseek/deepseek-chat-v3.1",
"apiKey": "<YOUR_TENSORIX_API_KEY>"
}
],
"tabAutocompleteModel": {
"title": "DeepSeek Chat",
"provider": "tensorix",
"model": "deepseek/deepseek-chat-v3.1",
"apiKey": "<YOUR_TENSORIX_API_KEY>"
}
}
```
</Tab>
</Tabs>

## Embeddings Model

Tensorix provides access to various embedding models. [Click here](https://tensorix.ai/models) to see a list of available models.

[View the source](https://github.com/continuedev/continue/blob/main/core/llm/llms/Tensorix.ts)
1 change: 1 addition & 0 deletions docs/docs.json
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,7 @@
"customize/model-providers/more/moonshot",
"customize/model-providers/more/nous",
"customize/model-providers/more/nvidia",
"customize/model-providers/more/tensorix",
"customize/model-providers/more/together",
"customize/model-providers/more/xAI",
"customize/model-providers/more/zai"
Expand Down
9 changes: 7 additions & 2 deletions extensions/vscode/config_schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,7 @@
"kindo",
"moonshot",
"siliconflow",
"tensorix",
"function-network",
"scaleway",
"relace",
Expand Down Expand Up @@ -278,6 +279,7 @@
"### Secure AI management software that helps enterprises adopt and manage AI across their workforce. To get started, obtain an API key from [the Kindo console](https://app.kindo.ai/settings/api), and see the [website](https://app.kindo.ai//)",
"### Moonshot\nTo get started with Moonshot AI, obtain your API key from [Moonshot AI](https://platform.moonshot.cn/). Moonshot AI provides high-quality large language models with competitive pricing.\n> [Reference](https://platform.moonshot.cn/docs/api)",
"### SiliconFlow\nTo get started with SiliconFlow, obtain your API key from [SiliconCloud](https://cloud.siliconflow.cn/account/ak). SiliconCloud provides cost-effective GenAI services based on excellent open source basic models.\n> [Models](https://siliconflow.cn/zh-cn/models)",
"### Tensorix\nTensorix is an OpenAI-compatible API gateway with access to DeepSeek, Llama, Qwen, GLM, and other models. Pay-as-you-go with no subscription required.\nTo get started, create an account and get an API key at [app.tensorix.ai](https://app.tensorix.ai).\n> [Models](https://tensorix.ai/models)",
"### Function Network offers private, affordable user-owned AI\nTo get started with Function Network, obtain your API key from [Function Network](https://www.function.network/join-waitlist). Function Network provides a variety of models for chat, completion, and embeddings.",
"### Scaleway\n Generative APIs are serverless endpoints for the most popular AI models.\nHosted in European data centers and priced competitively per million tokens used, models served by Scaleway are ideal for users requiring low latency, full data privacy, and 100% compliance with EU AI Act. To get access to the Scaleway Generative APIs, read the [Quickstart guide](https://www.scaleway.com/en/docs/ai-data/generative-apis/quickstart/) and get a [valid API key](https://www.scaleway.com/en/docs/identity-and-access-management/iam/how-to/create-api-keys/).",
"### Relace\n Relace provides a fast apply model. To get started, obtain an API key from [here](https://app.relace.ai/settings/api-keys).",
Expand Down Expand Up @@ -2867,6 +2869,7 @@
"watsonx",
"lmstudio",
"siliconflow",
"tensorix",
"function-network",
"scaleway",
"ovhcloud"
Expand Down Expand Up @@ -2932,7 +2935,8 @@
"voyage",
"nvidia",
"gemini",
"siliconflow"
"siliconflow",
"tensorix"
]
}
},
Expand Down Expand Up @@ -3000,7 +3004,8 @@
"watsonx",
"llm",
"huggingface-tei",
"siliconflow"
"siliconflow",
"tensorix"
]
},
"params": {
Expand Down
20 changes: 20 additions & 0 deletions gui/src/pages/AddNewModel/configs/providers.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1218,6 +1218,26 @@ To get started, [register](https://dataplatform.cloud.ibm.com/registration/stepo
],
apiKeyUrl: "https://cloud.siliconflow.cn/account/ak",
},
tensorix: {
title: "Tensorix",
provider: "tensorix",
description:
"Tensorix is an OpenAI-compatible API gateway with access to DeepSeek, Llama, Qwen, GLM, and more.",
longDescription:
"To get started with Tensorix, create an account and get an API key at [app.tensorix.ai](https://app.tensorix.ai).",
tags: [ModelProviderTags.RequiresApiKey, ModelProviderTags.OpenSource],
collectInputFor: [
{
inputType: "text",
key: "apiKey",
label: "API Key",
placeholder: "Enter your Tensorix API key",
required: true,
},
],
packages: [{ ...models.AUTODETECT }],
apiKeyUrl: "https://app.tensorix.ai",
},
venice: {
title: "Venice",
provider: "venice",
Expand Down
2 changes: 2 additions & 0 deletions packages/openai-adapters/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,8 @@ export function constructLlmApi(config: LLMConfig): BaseLlmApi | undefined {
return openAICompatible("https://api.studio.nebius.ai/v1/", config);
case "function-network":
return openAICompatible("https://api.function.network/v1/", config);
case "tensorix":
return openAICompatible("https://api.tensorix.ai/v1/", config);
case "openrouter":
return new OpenRouterApi(config);
case "llama.cpp":
Expand Down
1 change: 1 addition & 0 deletions packages/openai-adapters/src/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ export const OpenAIConfigSchema = BasePlusConfig.extend({
z.literal("xAI"),
z.literal("zAI"),
z.literal("scaleway"),
z.literal("tensorix"),
z.literal("ncompass"),
z.literal("relace"),
z.literal("huggingface-inference-api"),
Expand Down
Loading