-
Notifications
You must be signed in to change notification settings - Fork 1.7k
feat: add MiniMax as first-class LLM provider (M2.7 default) #488
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,95 @@ | ||
| """MiniMax ModelClient integration. | ||
|
|
||
| MiniMax provides OpenAI-compatible API endpoints for chat completions. | ||
| This client extends OpenAIClient with MiniMax-specific defaults. | ||
|
|
||
| API Documentation: | ||
| - OpenAI Compatible: https://platform.minimax.io/docs/api-reference/text-openai-api | ||
|
|
||
| Supported models: | ||
| - MiniMax-M2.7: Latest flagship model with enhanced reasoning (204K context) | ||
| - MiniMax-M2.5: Peak Performance. Ultimate Value. Master the Complex (204K context) | ||
| - MiniMax-M2.5-highspeed: Same performance, faster and more agile (204K context) | ||
| """ | ||
|
|
||
| import os | ||
| import logging | ||
| from typing import Dict, Optional, Any, Callable, Literal | ||
|
|
||
| from openai.types import Completion | ||
| from adalflow.core.types import ModelType | ||
|
|
||
| from api.openai_client import OpenAIClient | ||
|
|
||
| log = logging.getLogger(__name__) | ||
|
|
||
|
|
||
| class MiniMaxClient(OpenAIClient): | ||
| """A component wrapper for the MiniMax API client. | ||
|
|
||
| MiniMax provides an OpenAI-compatible API, so this client extends OpenAIClient | ||
| with MiniMax-specific defaults for base URL and API key environment variable. | ||
|
|
||
| Key constraints: | ||
| - temperature: must be in (0.0, 1.0], cannot be 0. Default is 1.0. | ||
| - response_format: not supported, use prompt engineering instead. | ||
|
|
||
| Args: | ||
| api_key (Optional[str]): MiniMax API key. Defaults to None (reads from env). | ||
| base_url (Optional[str]): API base URL. Defaults to "https://api.minimax.io/v1". | ||
| env_api_key_name (str): Env var name for API key. Defaults to "MINIMAX_API_KEY". | ||
| env_base_url_name (str): Env var name for base URL. Defaults to "MINIMAX_BASE_URL". | ||
|
|
||
| References: | ||
| - MiniMax Platform: https://platform.minimax.io | ||
| - API Reference: https://platform.minimax.io/docs/api-reference/text-openai-api | ||
| """ | ||
|
|
||
| def __init__( | ||
| self, | ||
| api_key: Optional[str] = None, | ||
| chat_completion_parser: Callable[[Completion], Any] = None, | ||
| input_type: Literal["text", "messages"] = "text", | ||
| base_url: Optional[str] = None, | ||
| env_base_url_name: str = "MINIMAX_BASE_URL", | ||
| env_api_key_name: str = "MINIMAX_API_KEY", | ||
| ): | ||
| super().__init__( | ||
| api_key=api_key, | ||
| chat_completion_parser=chat_completion_parser, | ||
| input_type=input_type, | ||
| base_url=base_url or os.getenv(env_base_url_name, "https://api.minimax.io/v1"), | ||
| env_base_url_name=env_base_url_name, | ||
| env_api_key_name=env_api_key_name, | ||
| ) | ||
|
|
||
| def convert_inputs_to_api_kwargs( | ||
| self, | ||
| input: Optional[Any] = None, | ||
| model_kwargs: Dict = {}, | ||
| model_type: ModelType = ModelType.UNDEFINED, | ||
| ) -> Dict: | ||
| """Convert inputs to API kwargs with MiniMax-specific adjustments. | ||
|
|
||
| Clamps temperature to (0.0, 1.0] range and removes unsupported | ||
| response_format parameter. | ||
| """ | ||
| final_kwargs = super().convert_inputs_to_api_kwargs(input, model_kwargs, model_type) | ||
|
|
||
| if model_type == ModelType.LLM: | ||
| # Clamp temperature: MiniMax requires (0.0, 1.0], cannot be 0 | ||
| temp = final_kwargs.get("temperature") | ||
| if temp is not None: | ||
| if temp <= 0: | ||
| final_kwargs["temperature"] = 0.01 | ||
| log.debug("Clamped temperature from %s to 0.01 (MiniMax minimum)", temp) | ||
| elif temp > 1.0: | ||
| final_kwargs["temperature"] = 1.0 | ||
| log.debug("Clamped temperature from %s to 1.0 (MiniMax maximum)", temp) | ||
|
|
||
| # Remove response_format if present (not supported by MiniMax) | ||
| if "response_format" in final_kwargs: | ||
| final_kwargs.pop("response_format") | ||
| log.debug("Removed unsupported response_format parameter for MiniMax") | ||
|
|
||
| return final_kwargs | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,104 @@ | ||
| """Integration tests for MiniMax client with real API calls. | ||
|
|
||
| These tests require the MINIMAX_API_KEY environment variable to be set. | ||
| They are skipped if the key is not available. | ||
| """ | ||
| import os | ||
| import pytest | ||
|
|
||
| # Try to load API key from .env.local | ||
| try: | ||
| from dotenv import load_dotenv | ||
| env_path = os.path.expanduser("~/.env.local") | ||
| if os.path.exists(env_path): | ||
| load_dotenv(env_path) | ||
| env_path2 = os.path.expanduser("/home/ximi/github_pr/.env.local") | ||
| if os.path.exists(env_path2): | ||
| load_dotenv(env_path2) | ||
|
Comment on lines
+15
to
+17
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This test file contains a hardcoded absolute path |
||
| except ImportError: | ||
| pass | ||
|
|
||
| MINIMAX_API_KEY = os.environ.get("MINIMAX_API_KEY") | ||
| pytestmark = pytest.mark.skipif(not MINIMAX_API_KEY, reason="MINIMAX_API_KEY not set") | ||
|
|
||
|
|
||
| class TestMiniMaxChatIntegration: | ||
| """Integration tests for MiniMax chat completions.""" | ||
|
|
||
| def test_m27_chat_completion(self): | ||
| """Should complete a basic chat request with MiniMax-M2.7 (default model).""" | ||
| from api.minimax_client import MiniMaxClient | ||
| from adalflow.core.types import ModelType | ||
|
|
||
| client = MiniMaxClient(api_key=MINIMAX_API_KEY) | ||
| api_kwargs = client.convert_inputs_to_api_kwargs( | ||
| input="Say 'hello' and nothing else.", | ||
| model_kwargs={"model": "MiniMax-M2.7", "max_tokens": 20, "temperature": 1.0}, | ||
| model_type=ModelType.LLM, | ||
| ) | ||
| response = client.call(api_kwargs=api_kwargs, model_type=ModelType.LLM) | ||
| assert response is not None | ||
|
|
||
| # Parse the response | ||
| output = client.parse_chat_completion(response) | ||
| assert output is not None | ||
| assert output.raw_response is not None | ||
| content = str(output.raw_response).lower() | ||
| assert "hello" in content | ||
|
|
||
| def test_m25_chat_completion(self): | ||
| """Should complete a basic chat request with MiniMax-M2.5.""" | ||
| from api.minimax_client import MiniMaxClient | ||
| from adalflow.core.types import ModelType | ||
|
|
||
| client = MiniMaxClient(api_key=MINIMAX_API_KEY) | ||
| api_kwargs = client.convert_inputs_to_api_kwargs( | ||
| input="Say 'hello' and nothing else.", | ||
| model_kwargs={"model": "MiniMax-M2.5", "max_tokens": 20, "temperature": 1.0}, | ||
| model_type=ModelType.LLM, | ||
| ) | ||
| response = client.call(api_kwargs=api_kwargs, model_type=ModelType.LLM) | ||
| assert response is not None | ||
|
|
||
| output = client.parse_chat_completion(response) | ||
| assert output is not None | ||
| assert output.raw_response is not None | ||
| content = str(output.raw_response).lower() | ||
| assert "hello" in content | ||
|
|
||
| def test_highspeed_model(self): | ||
| """Should work with MiniMax-M2.5-highspeed model.""" | ||
| from api.minimax_client import MiniMaxClient | ||
| from adalflow.core.types import ModelType | ||
|
|
||
| client = MiniMaxClient(api_key=MINIMAX_API_KEY) | ||
| api_kwargs = client.convert_inputs_to_api_kwargs( | ||
| input="What is 2+2? Reply with just the number.", | ||
| model_kwargs={"model": "MiniMax-M2.5-highspeed", "max_tokens": 100, "temperature": 1.0}, | ||
| model_type=ModelType.LLM, | ||
| ) | ||
| response = client.call(api_kwargs=api_kwargs, model_type=ModelType.LLM) | ||
| assert response is not None | ||
|
|
||
| output = client.parse_chat_completion(response) | ||
| assert output is not None | ||
| assert output.raw_response is not None | ||
| # Model may include thinking tags, just verify we got a response | ||
| assert len(str(output.raw_response)) > 0 | ||
|
|
||
| def test_temperature_clamping_works(self): | ||
| """Should handle temperature=0 by clamping to 0.01.""" | ||
| from api.minimax_client import MiniMaxClient | ||
| from adalflow.core.types import ModelType | ||
|
|
||
| client = MiniMaxClient(api_key=MINIMAX_API_KEY) | ||
| api_kwargs = client.convert_inputs_to_api_kwargs( | ||
| input="Say 'test passed'", | ||
| model_kwargs={"model": "MiniMax-M2.7", "max_tokens": 20, "temperature": 0}, | ||
| model_type=ModelType.LLM, | ||
| ) | ||
| # Temperature should be clamped to 0.01 | ||
| assert api_kwargs["temperature"] == 0.01 | ||
|
|
||
| response = client.call(api_kwargs=api_kwargs, model_type=ModelType.LLM) | ||
| assert response is not None | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The value
0.01is a magic number. It would be better to define it as a module-level constant, for exampleMINIMAX_MIN_TEMPERATURE = 0.01, to improve readability and maintainability. The same applies to the maximum temperature1.0on line 86.