Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -206,6 +206,7 @@ DeepWiki now implements a flexible provider-based model selection system support
- **OpenRouter**: Access to multiple models via a unified API, including Claude, Llama, Mistral, etc.
- **Azure OpenAI**: Default `gpt-4o`, also supports `o4-mini`, etc.
- **Ollama**: Support for locally running open-source models like `llama3`
- **MiniMax**: Default `MiniMax-M2.7`, also supports `MiniMax-M2.5`, `MiniMax-M2.5-highspeed` (204K context window)

### Environment Variables

Expand All @@ -216,6 +217,7 @@ Each provider requires its corresponding API key environment variables:
GOOGLE_API_KEY=your_google_api_key # Required for Google Gemini models
OPENAI_API_KEY=your_openai_api_key # Required for OpenAI models
OPENROUTER_API_KEY=your_openrouter_api_key # Required for OpenRouter models
MINIMAX_API_KEY=your_minimax_api_key # Required for MiniMax models
AZURE_OPENAI_API_KEY=your_azure_openai_api_key #Required for Azure OpenAI models
AZURE_OPENAI_ENDPOINT=your_azure_openai_endpoint #Required for Azure OpenAI models
AZURE_OPENAI_VERSION=your_azure_openai_version #Required for Azure OpenAI models
Expand All @@ -235,7 +237,7 @@ DEEPWIKI_CONFIG_DIR=/path/to/custom/config/dir # Optional, for custom config fi
DeepWiki uses JSON configuration files to manage various aspects of the system:

1. **`generator.json`**: Configuration for text generation models
- Defines available model providers (Google, OpenAI, OpenRouter, Azure, Ollama)
- Defines available model providers (Google, OpenAI, OpenRouter, Azure, Ollama, MiniMax)
- Specifies default and available models for each provider
- Contains model-specific parameters like temperature and top_p

Expand Down Expand Up @@ -412,6 +414,7 @@ docker-compose up
| `GOOGLE_API_KEY` | Google Gemini API key for AI generation and embeddings | No | Required for Google Gemini models and Google AI embeddings
| `OPENAI_API_KEY` | OpenAI API key for embeddings and models | Conditional | Required if using OpenAI embeddings or models |
| `OPENROUTER_API_KEY` | OpenRouter API key for alternative models | No | Required only if you want to use OpenRouter models |
| `MINIMAX_API_KEY` | MiniMax API key for MiniMax models | No | Required only if you want to use MiniMax models |
| `AWS_ACCESS_KEY_ID` | AWS access key ID for Bedrock | No | Required for Bedrock if not using instance/role-based credentials |
| `AWS_SECRET_ACCESS_KEY` | AWS secret access key for Bedrock | No | Required for Bedrock if not using instance/role-based credentials |
| `AWS_SESSION_TOKEN` | AWS session token for Bedrock (STS) | No | Required when using temporary credentials |
Expand Down Expand Up @@ -462,6 +465,7 @@ docker run -p 8001:8001 -p 3000:3000 \
-e GOOGLE_API_KEY=your_google_api_key \
-e OPENAI_API_KEY=your_openai_api_key \
-e OPENROUTER_API_KEY=your_openrouter_api_key \
-e MINIMAX_API_KEY=your_minimax_api_key \
-e OLLAMA_HOST=your_ollama_host \
-e AZURE_OPENAI_API_KEY=your_azure_openai_api_key \
-e AZURE_OPENAI_ENDPOINT=your_azure_openai_endpoint \
Expand Down
12 changes: 9 additions & 3 deletions api/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,14 @@
from api.google_embedder_client import GoogleEmbedderClient
from api.azureai_client import AzureAIClient
from api.dashscope_client import DashscopeClient
from api.minimax_client import MiniMaxClient
from adalflow import GoogleGenAIClient, OllamaClient

# Get API keys from environment variables
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
GOOGLE_API_KEY = os.environ.get('GOOGLE_API_KEY')
OPENROUTER_API_KEY = os.environ.get('OPENROUTER_API_KEY')
MINIMAX_API_KEY = os.environ.get('MINIMAX_API_KEY')
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_SESSION_TOKEN = os.environ.get('AWS_SESSION_TOKEN')
Expand All @@ -32,6 +34,8 @@
os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY
if OPENROUTER_API_KEY:
os.environ["OPENROUTER_API_KEY"] = OPENROUTER_API_KEY
if MINIMAX_API_KEY:
os.environ["MINIMAX_API_KEY"] = MINIMAX_API_KEY
if AWS_ACCESS_KEY_ID:
os.environ["AWS_ACCESS_KEY_ID"] = AWS_ACCESS_KEY_ID
if AWS_SECRET_ACCESS_KEY:
Expand Down Expand Up @@ -63,7 +67,8 @@
"OllamaClient": OllamaClient,
"BedrockClient": BedrockClient,
"AzureAIClient": AzureAIClient,
"DashscopeClient": DashscopeClient
"DashscopeClient": DashscopeClient,
"MiniMaxClient": MiniMaxClient
}

def replace_env_placeholders(config: Union[Dict[str, Any], List[Any], str, Any]) -> Union[Dict[str, Any], List[Any], str, Any]:
Expand Down Expand Up @@ -131,15 +136,16 @@ def load_generator_config():
if provider_config.get("client_class") in CLIENT_CLASSES:
provider_config["model_client"] = CLIENT_CLASSES[provider_config["client_class"]]
# Fall back to default mapping based on provider_id
elif provider_id in ["google", "openai", "openrouter", "ollama", "bedrock", "azure", "dashscope"]:
elif provider_id in ["google", "openai", "openrouter", "ollama", "bedrock", "azure", "dashscope", "minimax"]:
default_map = {
"google": GoogleGenAIClient,
"openai": OpenAIClient,
"openrouter": OpenRouterClient,
"ollama": OllamaClient,
"bedrock": BedrockClient,
"azure": AzureAIClient,
"dashscope": DashscopeClient
"dashscope": DashscopeClient,
"minimax": MiniMaxClient
}
provider_config["model_client"] = default_map[provider_id]
else:
Expand Down
16 changes: 16 additions & 0 deletions api/config/generator.json
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,22 @@
"top_p": 0.8
}
}
},
"minimax": {
"client_class": "MiniMaxClient",
"default_model": "MiniMax-M2.7",
"supportsCustomModel": true,
"models": {
"MiniMax-M2.7": {
"temperature": 1.0
},
"MiniMax-M2.5": {
"temperature": 1.0
},
"MiniMax-M2.5-highspeed": {
"temperature": 1.0
}
}
}
}
}
Expand Down
95 changes: 95 additions & 0 deletions api/minimax_client.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
"""MiniMax ModelClient integration.

MiniMax provides OpenAI-compatible API endpoints for chat completions.
This client extends OpenAIClient with MiniMax-specific defaults.

API Documentation:
- OpenAI Compatible: https://platform.minimax.io/docs/api-reference/text-openai-api

Supported models:
- MiniMax-M2.7: Latest flagship model with enhanced reasoning (204K context)
- MiniMax-M2.5: Peak Performance. Ultimate Value. Master the Complex (204K context)
- MiniMax-M2.5-highspeed: Same performance, faster and more agile (204K context)
"""

import os
import logging
from typing import Dict, Optional, Any, Callable, Literal

from openai.types import Completion
from adalflow.core.types import ModelType

from api.openai_client import OpenAIClient

log = logging.getLogger(__name__)


class MiniMaxClient(OpenAIClient):
"""A component wrapper for the MiniMax API client.

MiniMax provides an OpenAI-compatible API, so this client extends OpenAIClient
with MiniMax-specific defaults for base URL and API key environment variable.

Key constraints:
- temperature: must be in (0.0, 1.0], cannot be 0. Default is 1.0.
- response_format: not supported, use prompt engineering instead.

Args:
api_key (Optional[str]): MiniMax API key. Defaults to None (reads from env).
base_url (Optional[str]): API base URL. Defaults to "https://api.minimax.io/v1".
env_api_key_name (str): Env var name for API key. Defaults to "MINIMAX_API_KEY".
env_base_url_name (str): Env var name for base URL. Defaults to "MINIMAX_BASE_URL".

References:
- MiniMax Platform: https://platform.minimax.io
- API Reference: https://platform.minimax.io/docs/api-reference/text-openai-api
"""

def __init__(
self,
api_key: Optional[str] = None,
chat_completion_parser: Callable[[Completion], Any] = None,
input_type: Literal["text", "messages"] = "text",
base_url: Optional[str] = None,
env_base_url_name: str = "MINIMAX_BASE_URL",
env_api_key_name: str = "MINIMAX_API_KEY",
):
super().__init__(
api_key=api_key,
chat_completion_parser=chat_completion_parser,
input_type=input_type,
base_url=base_url or os.getenv(env_base_url_name, "https://api.minimax.io/v1"),
env_base_url_name=env_base_url_name,
env_api_key_name=env_api_key_name,
)

def convert_inputs_to_api_kwargs(
self,
input: Optional[Any] = None,
model_kwargs: Dict = {},
model_type: ModelType = ModelType.UNDEFINED,
) -> Dict:
"""Convert inputs to API kwargs with MiniMax-specific adjustments.

Clamps temperature to (0.0, 1.0] range and removes unsupported
response_format parameter.
"""
final_kwargs = super().convert_inputs_to_api_kwargs(input, model_kwargs, model_type)

if model_type == ModelType.LLM:
# Clamp temperature: MiniMax requires (0.0, 1.0], cannot be 0
temp = final_kwargs.get("temperature")
if temp is not None:
if temp <= 0:
final_kwargs["temperature"] = 0.01
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The value 0.01 is a magic number. It would be better to define it as a module-level constant, for example MINIMAX_MIN_TEMPERATURE = 0.01, to improve readability and maintainability. The same applies to the maximum temperature 1.0 on line 86.

log.debug("Clamped temperature from %s to 0.01 (MiniMax minimum)", temp)
elif temp > 1.0:
final_kwargs["temperature"] = 1.0
log.debug("Clamped temperature from %s to 1.0 (MiniMax maximum)", temp)

# Remove response_format if present (not supported by MiniMax)
if "response_format" in final_kwargs:
final_kwargs.pop("response_format")
log.debug("Removed unsupported response_format parameter for MiniMax")

return final_kwargs
104 changes: 104 additions & 0 deletions tests/integration/test_minimax_integration.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
"""Integration tests for MiniMax client with real API calls.

These tests require the MINIMAX_API_KEY environment variable to be set.
They are skipped if the key is not available.
"""
import os
import pytest

# Try to load API key from .env.local
try:
from dotenv import load_dotenv
env_path = os.path.expanduser("~/.env.local")
if os.path.exists(env_path):
load_dotenv(env_path)
env_path2 = os.path.expanduser("/home/ximi/github_pr/.env.local")
if os.path.exists(env_path2):
load_dotenv(env_path2)
Comment on lines +15 to +17
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

critical

This test file contains a hardcoded absolute path /home/ximi/github_pr/.env.local. This will cause the tests to fail on any other machine or in a CI environment. This path should be removed. Loading environment variables should rely on standard mechanisms like a .env file in the project root or variables set in the execution environment.

except ImportError:
pass

MINIMAX_API_KEY = os.environ.get("MINIMAX_API_KEY")
pytestmark = pytest.mark.skipif(not MINIMAX_API_KEY, reason="MINIMAX_API_KEY not set")


class TestMiniMaxChatIntegration:
"""Integration tests for MiniMax chat completions."""

def test_m27_chat_completion(self):
"""Should complete a basic chat request with MiniMax-M2.7 (default model)."""
from api.minimax_client import MiniMaxClient
from adalflow.core.types import ModelType

client = MiniMaxClient(api_key=MINIMAX_API_KEY)
api_kwargs = client.convert_inputs_to_api_kwargs(
input="Say 'hello' and nothing else.",
model_kwargs={"model": "MiniMax-M2.7", "max_tokens": 20, "temperature": 1.0},
model_type=ModelType.LLM,
)
response = client.call(api_kwargs=api_kwargs, model_type=ModelType.LLM)
assert response is not None

# Parse the response
output = client.parse_chat_completion(response)
assert output is not None
assert output.raw_response is not None
content = str(output.raw_response).lower()
assert "hello" in content

def test_m25_chat_completion(self):
"""Should complete a basic chat request with MiniMax-M2.5."""
from api.minimax_client import MiniMaxClient
from adalflow.core.types import ModelType

client = MiniMaxClient(api_key=MINIMAX_API_KEY)
api_kwargs = client.convert_inputs_to_api_kwargs(
input="Say 'hello' and nothing else.",
model_kwargs={"model": "MiniMax-M2.5", "max_tokens": 20, "temperature": 1.0},
model_type=ModelType.LLM,
)
response = client.call(api_kwargs=api_kwargs, model_type=ModelType.LLM)
assert response is not None

output = client.parse_chat_completion(response)
assert output is not None
assert output.raw_response is not None
content = str(output.raw_response).lower()
assert "hello" in content

def test_highspeed_model(self):
"""Should work with MiniMax-M2.5-highspeed model."""
from api.minimax_client import MiniMaxClient
from adalflow.core.types import ModelType

client = MiniMaxClient(api_key=MINIMAX_API_KEY)
api_kwargs = client.convert_inputs_to_api_kwargs(
input="What is 2+2? Reply with just the number.",
model_kwargs={"model": "MiniMax-M2.5-highspeed", "max_tokens": 100, "temperature": 1.0},
model_type=ModelType.LLM,
)
response = client.call(api_kwargs=api_kwargs, model_type=ModelType.LLM)
assert response is not None

output = client.parse_chat_completion(response)
assert output is not None
assert output.raw_response is not None
# Model may include thinking tags, just verify we got a response
assert len(str(output.raw_response)) > 0

def test_temperature_clamping_works(self):
"""Should handle temperature=0 by clamping to 0.01."""
from api.minimax_client import MiniMaxClient
from adalflow.core.types import ModelType

client = MiniMaxClient(api_key=MINIMAX_API_KEY)
api_kwargs = client.convert_inputs_to_api_kwargs(
input="Say 'test passed'",
model_kwargs={"model": "MiniMax-M2.7", "max_tokens": 20, "temperature": 0},
model_type=ModelType.LLM,
)
# Temperature should be clamped to 0.01
assert api_kwargs["temperature"] == 0.01

response = client.call(api_kwargs=api_kwargs, model_type=ModelType.LLM)
assert response is not None
Loading