Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ Strix are autonomous AI agents that act just like real hackers - they run your c

**Prerequisites:**
- Docker (running)
- An LLM API key from any [supported provider](https://docs.strix.ai/llm-providers/overview) (OpenAI, Anthropic, Google, etc.)
- An LLM API key from any [supported provider](https://docs.strix.ai/llm-providers/overview) (OpenAI, Anthropic, Google, etc.), or a Codex CLI ChatGPT login

### Installation & First Scan

Expand Down Expand Up @@ -234,6 +234,14 @@ export PERPLEXITY_API_KEY="your-api-key" # for search capabilities
export STRIX_REASONING_EFFORT="high" # control thinking effort (default: high, quick scan: medium)
```

To use Codex OAuth instead of an API key, log in with the Codex CLI and select a
`codex/` model:

```bash
codex login
export STRIX_LLM="codex/gpt-5.5"
```

> [!NOTE]
> Strix automatically saves your configuration to `~/.strix/cli-config.json`, so you don't have to re-enter it on every run.

Expand Down
21 changes: 19 additions & 2 deletions strix/interface/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@

from strix.config import Config, apply_saved_config, save_current_config
from strix.config.config import resolve_llm_config
from strix.llm.codex_oauth import codex_model_name, complete_codex_oauth
from strix.llm.utils import resolve_strix_model


Expand Down Expand Up @@ -57,11 +58,12 @@ def validate_environment() -> None: # noqa: PLR0912, PLR0915

strix_llm = Config.get("strix_llm")
uses_strix_models = strix_llm and strix_llm.startswith("strix/")
uses_codex_oauth = strix_llm and strix_llm.startswith("codex/")

if not strix_llm:
missing_required_vars.append("STRIX_LLM")

has_base_url = uses_strix_models or any(
has_base_url = uses_strix_models or uses_codex_oauth or any(
[
Config.get("llm_api_base"),
Config.get("openai_api_base"),
Expand All @@ -70,7 +72,7 @@ def validate_environment() -> None: # noqa: PLR0912, PLR0915
]
)

if not Config.get("llm_api_key"):
if not Config.get("llm_api_key") and not uses_codex_oauth:
missing_optional_vars.append("LLM_API_KEY")

if not has_base_url:
Expand Down Expand Up @@ -209,6 +211,21 @@ async def warm_up_llm() -> None:

try:
model_name, api_key, api_base = resolve_llm_config()

if model_name and model_name.startswith("codex/"):
test_messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Reply with just 'OK'."},
]
llm_timeout = int(Config.get("llm_timeout") or "300")
complete_codex_oauth(
codex_model_name(model_name),
test_messages,
Config.get("strix_reasoning_effort"),
llm_timeout,
)
return

litellm_model, _ = resolve_strix_model(model_name)
litellm_model = litellm_model or model_name

Expand Down
Loading