Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
49 commits
Select commit Hold shift + click to select a range
21f9822
✨ Abstract Storage SDK
Phinease Nov 12, 2025
8009cdc
Revert "✨[Request] Abstract Storage SDK"
Phinease Nov 13, 2025
bbd7afa
🚚 Revert "✨ Abstract Storage SDK"
Phinease Nov 13, 2025
d053fe1
add codeql.yml
WMC001 Nov 14, 2025
ac56e1d
🐛 Bugfix: generate button in Agent config page returns error: Empty A…
Jasonxia007 Nov 14, 2025
0b10478
♻️ Imporvement: nexent-data-process now takes less memory (down to ~1…
Jasonxia007 Nov 14, 2025
b2419ac
Merge remote-tracking branch 'origin/release/v1.7.5.3' into release/v…
WMC001 Nov 14, 2025
9c165e6
✨ Release/v1.7.5.3
Phinease Nov 14, 2025
c831419
🐛 Bugfix: ray initialize failed
Jasonxia007 Nov 17, 2025
93fcd68
🐛 fix ray initialize failed
Phinease Nov 17, 2025
d666c6c
⭐️ Release/v1.7.5.3-hotfix
Phinease Nov 17, 2025
c5473a6
Merge pull request #1775 from ModelEngine-Group/release/v1.7.6
Phinease Nov 21, 2025
3b2cdeb
🐛 Bugfix: agent tools save or load parameters failed #1780 from Model…
WMC001 Nov 21, 2025
e27d265
Merge pull request #1781 from ModelEngine-Group/release/v1.7.6
Phinease Nov 21, 2025
a6dabbf
📝 Update multimodal tool description from ModelEngine-Group/release/v…
WMC001 Nov 29, 2025
4c1dcff
[release] 🐛 Front-end development and optimization of market interfac…
WMC001 Nov 29, 2025
c09476d
🐛 Bugfix: single added LLM model won't be saved automatically
WMC001 Nov 29, 2025
09da4ba
⭐️ Release/v1.7.7
Phinease Nov 29, 2025
f5bef9a
Merge branch 'main' into release/v1.7.7.1
WMC001 Dec 5, 2025
ffd4b5d
Bugfix: Build frontend arm image failed from ModelEngine-Group/wmc/bu…
WMC001 Dec 5, 2025
0f16d38
Merge pull request #2031 from ModelEngine-Group/wmc/bugfix_1117
WMC001 Dec 5, 2025
7691fbf
Merge pull request #2029 from ModelEngine-Group/wz/develop_fix_datama…
WMC001 Dec 5, 2025
f1f02b6
Release/v1.7.7.1 from ModelEngine-Group/release/v1.7.7.1
WMC001 Dec 5, 2025
60fb82a
✨ Release/v1.7.8
Phinease Dec 12, 2025
1313e88
♻️ Revert package declare from ModelEngine-Group/xyq/unify_dialog&modal
WMC001 Dec 19, 2025
7bcc859
📝 Updates app version from ModelEngine-Group/wmc/bugfix_1117
WMC001 Dec 19, 2025
9da494a
⭐️ Release/v1.7.8.1
Phinease Dec 19, 2025
24c8cbb
feat: Add PathologyAI - 智能病理诊断助手
hud0567 Dec 21, 2025
7ef7750
⭐️ Add PathologyAI - 智能病理诊断助手
Phinease Dec 25, 2025
d29aa16
Merge branch 'main' into release/v1.7.9
WMC001 Dec 27, 2025
d4afa9c
⭐️ Release/v1.7.9
Phinease Dec 27, 2025
fc23afe
✨ Add SDK publish workflow for PyPI with OIDC authentication
Phinease Jan 5, 2026
f1b4f58
🐛 Adjust main page layout
WMC001 Jan 8, 2026
a0efbce
bugfix sync modle engine model icon
xuyaqist Jan 8, 2026
137291b
Revert "bugfix sync modle engine model icon"
xuyaqist Jan 8, 2026
b91149a
[release] 🐛 Remove cache of tenant config
WMC001 Jan 9, 2026
e175c33
🐛 Bugfix in createmode cannot edit tool & cannot change agent model id
WMC001 Jan 9, 2026
eb18e35
Release/v1.7.9.1
WMC001 Jan 9, 2026
38e304b
Release/v1.7.9.2
WMC001 Jan 16, 2026
3d8c13d
Release/v1.7.9.3
WMC001 Jan 23, 2026
c7f4a15
Add CodeQL analysis workflow configuration
WMC001 Jan 26, 2026
1675714
Merge branch 'main' into release/v1.7.10
WMC001 Jan 31, 2026
d7aa26b
Release/v1.7.10
WMC001 Jan 31, 2026
7d63bbf
🐛 Fixed the data processing image that lacked tiktoken word segmentat…
Zhi-a Feb 6, 2026
add50a7
🐛 Fixed the data processing image that lacked tiktoken word segmentat…
WMC001 Feb 6, 2026
c6bb626
Release/v1.7.10 hotfix
WMC001 Feb 6, 2026
00c83a5
New Requirement: Support for provider Zhipu AI Models (LLM and Embedd…
wadecrack Feb 8, 2026
e6e6360
New Requirement: Support for provider Zhipu AI Models (LLM and Embedd…
wadecrack Feb 9, 2026
bc42e64
Merge branch 'ModelEngine-Group:main' into develop-xq-260206
wadecrack Feb 10, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
103 changes: 103 additions & 0 deletions .github/workflows/codeql_main.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL Advanced"

on:
push:
branches: [ "main" ]
pull_request:
branches: [ "main" ]
schedule:
- cron: '40 13 * * 6'

jobs:
analyze:
name: Analyze (${{ matrix.language }})
# Runner size impacts CodeQL analysis time. To learn more, please see:
# - https://gh.io/recommended-hardware-resources-for-running-codeql
# - https://gh.io/supported-runners-and-hardware-resources
# - https://gh.io/using-larger-runners (GitHub.com only)
# Consider using larger runners or machines with greater resources for possible analysis time improvements.
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
permissions:
# required for all workflows
security-events: write

# required to fetch internal or private CodeQL packs
packages: read

# only required for workflows in private repositories
actions: read
contents: read

strategy:
fail-fast: false
matrix:
include:
- language: actions
build-mode: none
- language: javascript-typescript
build-mode: none
- language: python
build-mode: none
# CodeQL supports the following values keywords for 'language': 'actions', 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'rust', 'swift'
# Use `c-cpp` to analyze code written in C, C++ or both
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
# Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
# To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
# see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
steps:
- name: Checkout repository
uses: actions/checkout@v4

# Add any setup steps before running the `github/codeql-action/init` action.
# This includes steps like installing compilers or runtimes (`actions/setup-node`
# or others). This is typically only required for manual builds.
# - name: Setup runtime (example)
# uses: actions/setup-example@v1

# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v4
with:
languages: ${{ matrix.language }}
build-mode: ${{ matrix.build-mode }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.

# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality

# If the analyze step fails for one of the languages you are analyzing with
# "We were unable to automatically build your code", modify the matrix above
# to set the build mode to "manual" for that language. Then modify this step
# to build your code.
# ℹ️ Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
- name: Run manual build steps
if: matrix.build-mode == 'manual'
shell: bash
run: |
echo 'If you are using a "manual" build mode for one or more of the' \
'languages you are analyzing, replace this with the commands to build' \
'your code, for example:'
echo ' make bootstrap'
echo ' make release'
exit 1

- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v4
with:
category: "/language:${{matrix.language}}"
31 changes: 31 additions & 0 deletions backend/consts/provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,42 @@ class ProviderEnum(str, Enum):
SILICON = "silicon"
OPENAI = "openai"
MODELENGINE = "modelengine"
ZHIPU = "zhipu"


# Silicon Flow
SILICON_BASE_URL = "https://api.siliconflow.cn/v1/"
SILICON_GET_URL = "https://api.siliconflow.cn/v1/models"
# Zhipu AI
ZHIPU_BASE_URL = "https://open.bigmodel.cn/api/paas/v4/"
ZHIPU_GET_URL = "https://open.bigmodel.cn/api/paas/v4/models"
# Hardcoded Zhipu models (injected due to incomplete API response)
HARDCODED_ZHIPU_MODELS = {
"llm": [
{"id": "glm-4-flash", "object": "model", "owned_by": "zhipu"},
{"id": "glm-4-air", "object": "model", "owned_by": "zhipu"},
{"id": "glm-4-long", "object": "model", "owned_by": "zhipu"},
],
"vlm": [
{"id": "glm-4.6v", "object": "model", "owned_by": "zhipu"},
{"id": "glm-4.6v-flash", "object": "model", "owned_by": "zhipu"},
{"id": "glm-4v-plus", "object": "model", "owned_by": "zhipu"},
],
"embedding": [
{"id": "embedding-3", "object": "model", "owned_by": "zhipu"},
{"id": "embedding-2", "object": "model", "owned_by": "zhipu"},
],
"reranker": [
{"id": "reranker-3", "object": "model", "owned_by": "zhipu"},
{"id": "reranker-2", "object": "model", "owned_by": "zhipu"},
],
"tts": [
{"id": "cogspeech-1", "object": "model", "owned_by": "zhipu"},
],
"stt": [
{"id": "sensevoice-v1", "object": "model", "owned_by": "zhipu"},
],
}

# ModelEngine
# Base URL and API key are loaded from environment variables at runtime
4 changes: 3 additions & 1 deletion backend/services/model_management_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

from consts.const import LOCALHOST_IP, LOCALHOST_NAME, DOCKER_INTERNAL_HOST
from consts.model import ModelConnectStatusEnum
from consts.provider import ProviderEnum, SILICON_BASE_URL
from consts.provider import ProviderEnum, SILICON_BASE_URL, ZHIPU_BASE_URL

from database.model_management_db import (
create_model_record,
Expand Down Expand Up @@ -142,6 +142,8 @@ async def batch_create_models_for_tenant(user_id: str, tenant_id: str, batch_pay
elif provider == ProviderEnum.MODELENGINE.value:
# ModelEngine models carry their own base_url in each model dict
model_url = ""
elif provider == ProviderEnum.ZHIPU.value:
model_url = ZHIPU_BASE_URL
else:
model_url = ""

Expand Down
84 changes: 83 additions & 1 deletion backend/services/model_provider_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
DEFAULT_MAXIMUM_CHUNK_SIZE,
)
from consts.model import ModelConnectStatusEnum, ModelRequest
from consts.provider import SILICON_GET_URL, ProviderEnum
from consts.provider import SILICON_GET_URL, ProviderEnum, ZHIPU_GET_URL,ZHIPU_BASE_URL,HARDCODED_ZHIPU_MODELS
from database.model_management_db import get_models_by_tenant_factory_type
from services.model_health_service import embedding_dimension_check
from utils.model_name_utils import split_repo_name, add_repo_to_name
Expand Down Expand Up @@ -68,6 +68,85 @@ async def get_models(self, provider_config: Dict) -> List[Dict]:
logger.error(f"Error getting models from silicon: {e}")
return []

class ZhipuModelProvider(AbstractModelProvider):
"""Concrete implementation for Zhipu AI provider."""

async def get_models(self, provider_config: Dict) -> List[Dict]:
try:
model_type: str = provider_config["model_type"]
model_api_key: str = provider_config["api_key"]

headers = {"Authorization": f"Bearer {model_api_key}"}
if model_type in ("llm", "vlm"):
zhipu_url = f"{ZHIPU_GET_URL}?sub_type=chat"
elif model_type in ("embedding", "multi_embedding"):
zhipu_url = f"{ZHIPU_GET_URL}?sub_type=embedding"
else:
zhipu_url = ZHIPU_GET_URL

async with httpx.AsyncClient(verify=False) as client:
response = await client.get(zhipu_url, headers=headers)
response.raise_for_status()
all_models: List[Dict] = response.json().get("data", [])

logger.debug(f"Zhipu API returned {len(all_models)} models for type '{model_type}'")
# Filter models by type - Zhipu API may not filter by sub_type
# Zhipu API returns models with "object" field indicating type: "chat" or "embedding"
# Zhipu api_models only return chat models
seen_ids = set()
final_list = []
api_models=[]
if(model_type=='llm'):
api_models=all_models
# Normalize API models and add in API order
for item in api_models:
mid = item.get("id")
if not mid:
continue
# ensure canonical fields to match hardcoded shape
# object/owned_by exist in hardcoded items
item.setdefault("object", item.get("object", "model"))
item.setdefault("owned_by", item.get("owned_by", "zhipu"))
item.setdefault("model_tag", "chat")
item.setdefault("model_type", model_type)
item.setdefault("max_tokens", DEFAULT_LLM_MAX_TOKENS)
final_list.append(item)
seen_ids.add(mid)

# Append all hardcoded models of the same type (if any), but avoid duplicates
hardcoded = HARDCODED_ZHIPU_MODELS.get(model_type, [])
for model in hardcoded:
hid = model.get("id")
if not hid or hid in seen_ids:
continue
model_with_tag = model.copy()
# normalize hardcoded entries to include same canonical keys
model_with_tag.setdefault("object", model_with_tag.get("object", "model"))
model_with_tag.setdefault("owned_by", model_with_tag.get("owned_by", "zhipu"))
if model_type in ("llm", "vlm"):
model_with_tag.setdefault("model_tag", "chat")
model_with_tag.setdefault("model_type", model_type)
model_with_tag.setdefault("max_tokens", DEFAULT_LLM_MAX_TOKENS)
elif model_type in ("embedding", "multi_embedding"):
model_with_tag.setdefault("model_tag", "embedding")
model_with_tag.setdefault("model_type", model_type)
final_list.append(model_with_tag)
seen_ids.add(hid)

# Log warning if filtered list is empty but we got API response
if not final_list and all_models:
logger.warning(
f"Zhipu API returned {len(all_models)} models but none matched "
f"filter for type '{model_type}'. Check API response format."
)
# Return normalized list (API models first, then hardcoded additions)
return final_list

except Exception as e:
logger.error(f"Error getting models from Zhipu: {e}")
return []



class ModelEngineProvider(AbstractModelProvider):
"""Concrete implementation for ModelEngine provider."""
Expand Down Expand Up @@ -288,6 +367,9 @@ async def get_provider_models(model_data: dict) -> List[dict]:
elif model_data["provider"] == ProviderEnum.MODELENGINE.value:
provider = ModelEngineProvider()
model_list = await provider.get_models(model_data)
elif model_data["provider"] == ProviderEnum.ZHIPU.value:
provider = ZhipuModelProvider()
model_list = await provider.get_models(model_data)

return model_list

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import { modelService } from "@/services/modelService";
import { ModelType, SingleModelConfig } from "@/types/modelConfig";
import { MODEL_TYPES, PROVIDER_LINKS } from "@/const/modelConfig";
import { useSiliconModelList } from "@/hooks/model/useSiliconModelList";
import { useZhipuModelList } from "@/hooks/model/UseZhipuModelList";
import log from "@/lib/logger";
import {
ModelChunkSizeSlider,
Expand Down Expand Up @@ -217,13 +218,25 @@ export const ModelAddDialog = ({
const [modelMaxTokens, setModelMaxTokens] = useState("4096");

// Use the silicon model list hook
const { getModelList, getProviderSelectedModalList } = useSiliconModelList({
const siliconHook = useSiliconModelList({
form,
setModelList,
setSelectedModelIds,
setShowModelList,
setLoadingModelList,
});
const zhipuHook = useZhipuModelList({
form,
setModelList,
setSelectedModelIds,
setShowModelList,
setLoadingModelList,
});

// Choose the appropriate hook API based on selected provider
const getModelList = form.provider === "zhipu" ? zhipuHook.getModelList : siliconHook.getModelList;
const getProviderSelectedModalList = form.provider === "zhipu" ? zhipuHook.getProviderSelectedModalList : siliconHook.getProviderSelectedModalList;


// When dialog opens, apply default provider and optional default batch mode
useEffect(() => {
Expand Down Expand Up @@ -688,6 +701,7 @@ export const ModelAddDialog = ({
>
<Option value="modelengine">{t("model.provider.modelengine")}</Option>
<Option value="silicon">{t("model.provider.silicon")}</Option>
<Option value="zhipu">{t("model.provider.zhipu")}</Option>
</Select>
{/* ModelEngine URL input (only when provider is ModelEngine) */}
{form.provider === "modelengine" && (
Expand Down
Loading