-
-
Notifications
You must be signed in to change notification settings - Fork 2k
fix: defer faiss imports during startup #7400
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
7fbc192
c5aa029
358a27a
4f02b0b
55c7d9a
96c4a51
7da82ce
17a9c1f
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,6 +1,7 @@ | ||
| import asyncio | ||
| import os | ||
| import uuid | ||
| from functools import partial | ||
| from typing import cast | ||
|
|
||
| import whisper | ||
|
|
@@ -28,17 +29,34 @@ def __init__( | |
| ) -> None: | ||
| super().__init__(provider_config, provider_settings) | ||
| self.set_model(provider_config["model"]) | ||
| self.device = str(provider_config.get("whisper_device", "cpu")).strip().lower() | ||
| self.model = None | ||
|
Comment on lines
29
to
33
|
||
|
|
||
| def _resolve_device(self) -> str: | ||
| if self.device == "mps": | ||
| import torch # torch is a dependency of openai-whisper | ||
|
|
||
| mps_backend = getattr(torch.backends, "mps", None) | ||
| if mps_backend and mps_backend.is_available(): | ||
| return "mps" | ||
| logger.warning("Whisper 已配置为使用 MPS,但当前环境不可用,将回退到 CPU。") | ||
| return "cpu" | ||
| if self.device != "cpu": | ||
| logger.warning( | ||
| "Whisper 配置了未知 device=%s,将回退到 CPU。", | ||
| self.device, | ||
| ) | ||
| return "cpu" | ||
|
|
||
| async def initialize(self) -> None: | ||
| loop = asyncio.get_running_loop() | ||
| device = self._resolve_device() | ||
| logger.info("下载或者加载 Whisper 模型中,这可能需要一些时间 ...") | ||
| self.model = await loop.run_in_executor( | ||
| None, | ||
| whisper.load_model, | ||
| self.model_name, | ||
| partial(whisper.load_model, self.model_name, device=device), | ||
| ) | ||
| logger.info("Whisper 模型加载完成。") | ||
| logger.info("Whisper 模型加载完成。device=%s", device) | ||
|
|
||
| async def _is_silk_file(self, file_path) -> bool: | ||
| silk_header = b"SILK" | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
issue (bug_risk): FaissVecDB is only imported under TYPE_CHECKING, so this runtime type annotation will fail with NameError.
Because FaissVecDB only exists under TYPE_CHECKING, this annotation must not be evaluated at runtime. Please either quote the type (e.g. "FaissVecDB" | None), enable
from __future__ import annotationsin this module, or change the import so FaissVecDB is available at runtime.