Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
56 changes: 53 additions & 3 deletions fastdeploy/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,27 @@
# limitations under the License.
"""

# Configure root logger first to unify log formats
# This must be done before importing any modules that may use the logger
import logging

# Create standard format (without color)
_root_formatter = logging.Formatter(
"%(levelname)-8s %(asctime)s %(process)-5s %(filename)s[line:%(lineno)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)

# Configure root logger
_root_logger = logging.getLogger()
_root_logger.setLevel(logging.INFO)

# Clear existing handlers and add new ones
for handler in _root_logger.handlers[:]:
_root_logger.removeHandler(handler)

_root_handler = logging.StreamHandler()
_root_handler.setFormatter(_root_formatter)
_root_logger.addHandler(_root_handler)

import os
import uuid

Expand Down Expand Up @@ -45,6 +66,36 @@

from paddleformers.utils.log import logger as pf_logger

# Additional configuration for paddleformers logger (it has a special logger structure)
# Get paddleformers logger
_paddleformers_logger = logging.getLogger("paddleformers")
_paddleformers_logger.setLevel(logging.INFO)

# Clear all existing handlers
for handler in _paddleformers_logger.handlers[:]:
_paddleformers_logger.removeHandler(handler)

# Create standard format (without color)
_formatter = logging.Formatter(
"%(levelname)-8s %(asctime)s %(process)-5s %(filename)s[line:%(lineno)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)

# Add new StreamHandler
_stream_handler = logging.StreamHandler()
_stream_handler.setFormatter(_formatter)
_paddleformers_logger.addHandler(_stream_handler)
_paddleformers_logger.propagate = False

# Also configure pf_logger.logger (if it is a Logger object)
if hasattr(pf_logger, "logger") and isinstance(pf_logger.logger, logging.Logger):
pf_logger.logger.setLevel(logging.INFO)
for handler in pf_logger.logger.handlers[:]:
pf_logger.logger.removeHandler(handler)
_stream_handler2 = logging.StreamHandler()
_stream_handler2.setFormatter(_formatter)
pf_logger.logger.addHandler(_stream_handler2)
pf_logger.logger.propagate = False

from fastdeploy.engine.sampling_params import SamplingParams
from fastdeploy.entrypoints.llm import LLM
from fastdeploy.utils import (
Expand All @@ -66,9 +117,8 @@


if envs.FD_DEBUG != 1:
import logging

pf_logger.logger.setLevel(logging.INFO)
# Log level has been configured above
pass

try:
import use_triton_in_paddle
Expand Down
4 changes: 4 additions & 0 deletions fastdeploy/engine/common_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -1140,6 +1140,10 @@ def _insert_zmq_task_to_scheduler(self):
request = Request.from_dict(data)
request.metrics.scheduler_recv_req_time = time.time()
main_process_metrics.requests_number.inc()
trace_carrier = data.get("trace_carrier")
if trace_carrier:
request_id = data["request_id"].split("_")[0]
tracing.trace_set_proc_propagate_context(request_id, trace_carrier)
trace_print(LoggingEventName.PREPROCESSING_END, data["request_id"], data.get("user", ""))
trace_print(LoggingEventName.REQUEST_SCHEDULE_START, data["request_id"], data.get("user", ""))
trace_print(LoggingEventName.REQUEST_QUEUE_START, data["request_id"], data.get("user", ""))
Expand Down
6 changes: 4 additions & 2 deletions fastdeploy/entrypoints/openai/api_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,8 +175,10 @@ async def lifespan(app: FastAPI):
uvicorn_access = logging.getLogger("uvicorn.access")
uvicorn_access.handlers.clear()

# 使用 gunicorn 的格式
formatter = logging.Formatter("[%(asctime)s] [%(process)d] [INFO] %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
formatter = logging.Formatter(
"%(levelname)-8s %(asctime)s %(process)-5s %(filename)s[line:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)

handler = logging.StreamHandler()
handler.setFormatter(formatter)
Expand Down
4 changes: 2 additions & 2 deletions fastdeploy/entrypoints/openai/serving_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -524,10 +524,10 @@ async def chat_completion_stream_generator(
)
yield f"data: {error_data}\n\n"
finally:
trace_print(LoggingEventName.POSTPROCESSING_END, request_id, getattr(request, "user", ""))
tracing.trace_req_finish(request_id)
await self.engine_client.connection_manager.cleanup_request(request_id)
self.engine_client.semaphore.release()
trace_print(LoggingEventName.POSTPROCESSING_END, request_id, getattr(request, "user", ""))
api_server_logger.info(f"release {request_id} {self.engine_client.semaphore.status()}")
yield "data: [DONE]\n\n"

Expand Down Expand Up @@ -691,6 +691,7 @@ async def chat_completion_full_generator(
)
choices.append(choice)
finally:
trace_print(LoggingEventName.POSTPROCESSING_END, request_id, getattr(request, "user", ""))
tracing.trace_req_finish(request_id)
await self.engine_client.connection_manager.cleanup_request(request_id)
self.engine_client.semaphore.release()
Expand Down Expand Up @@ -721,7 +722,6 @@ async def chat_completion_full_generator(
choices=choices,
usage=usage,
)
trace_print(LoggingEventName.POSTPROCESSING_END, request_id, getattr(request, "user", ""))
api_server_logger.info(f"Chat response: {res.model_dump_json()}")
return res

Expand Down
5 changes: 2 additions & 3 deletions fastdeploy/entrypoints/openai/serving_completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -375,8 +375,8 @@ async def completion_full_generator(
except Exception as e:
api_server_logger.error(f"Error in completion_full_generator: {e}", exc_info=True)
finally:
tracing.trace_req_finish(request_id)
trace_print(LoggingEventName.POSTPROCESSING_END, request_id, getattr(request, "user", ""))
tracing.trace_req_finish(request_id)
self.engine_client.semaphore.release()
if dealer is not None:
await self.engine_client.connection_manager.cleanup_request(request_id)
Expand Down Expand Up @@ -652,9 +652,8 @@ async def completion_stream_generator(
api_server_logger.error(f"Error in completion_stream_generator: {e}, {str(traceback.format_exc())}")
yield f"data: {ErrorResponse(error=ErrorInfo(message=str(e), code='400', type=ErrorType.INTERNAL_ERROR)).model_dump_json(exclude_unset=True)}\n\n"
finally:

tracing.trace_req_finish(request_id)
trace_print(LoggingEventName.POSTPROCESSING_END, request_id, getattr(request, "user", ""))
tracing.trace_req_finish(request_id)
del request
if dealer is not None:
await self.engine_client.connection_manager.cleanup_request(request_id)
Expand Down
4 changes: 2 additions & 2 deletions fastdeploy/entrypoints/openai/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@
"formatters": {
"custom": {
"()": "colorlog.ColoredFormatter",
"format": "[%(log_color)s%(asctime)s] [%(levelname)+8s] %(reset)s - %(message)s%(reset)s",
"datefmt": "%Y-%m-%d %H:%M:%S", # 时间戳格式
"format": "%(log_color)s%(levelname)-8s %(asctime)s %(process)-5s %(filename)s[line:%(lineno)d] %(message)s%(reset)s",
"datefmt": "%Y-%m-%d %H:%M:%S", # Timestamp format
"log_colors": {
"DEBUG": "cyan",
"INFO": "green",
Expand Down
2 changes: 0 additions & 2 deletions fastdeploy/logger/formatters.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,6 @@ def format(self, record):

try:
log_fields = {
"thread": record.thread,
"thread_name": record.threadName,
"timestamp": int(time.time() * 1000),
}

Expand Down
66 changes: 33 additions & 33 deletions fastdeploy/logger/logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@
"""

"""
日志模块:用于初始化和获取 FastDeploy 日志记录器。
本模块提供 get_logger 方法,统一管理各子模块的日志记录行为。
Logging module: Used for initializing and getting FastDeploy loggers.
This module provides the get_logger method to uniformly manage logging behavior for each submodule.
"""

import logging
Expand All @@ -24,7 +24,7 @@
from pathlib import Path

from fastdeploy import envs
from fastdeploy.logger.formatters import ColoredFormatter, CustomFormatter
from fastdeploy.logger.formatters import CustomFormatter
from fastdeploy.logger.handlers import DailyRotatingFileHandler, LazyFileHandler
from fastdeploy.logger.setup_logging import setup_logging

Expand All @@ -35,64 +35,64 @@ class FastDeployLogger:
_lock = threading.RLock()

def __new__(cls):
"""单例模式实现"""
"""Singleton pattern implementation"""
if cls._instance is None:
with cls._lock:
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance

def _initialize(self):
"""显式初始化日志系统"""
"""Explicitly initialize the logging system"""
with self._lock:
if not self._initialized:
setup_logging()
self._initialized = True

def get_logger(self, name, file_name=None, without_formater=False, print_to_console=False):
"""
获取日志记录器(兼容原有接口)
Get logger (compatible with the original interface)

Args:
name: 日志器名称
file_name: 日志文件名(保持兼容性)
without_formater: 是否不使用格式化器
print_to_console: 是否打印到控制台
name: Logger name
file_name: Log file name (for compatibility)
without_formater: Whether to not use a formatter
print_to_console: Whether to print to console
"""
# 如果只有一个参数,使用新的统一命名方式
# If only one parameter is provided, use the new unified naming convention
if file_name is None and not without_formater and not print_to_console:
# 延迟初始化
# Lazy initialization
if not self._initialized:
self._initialize()
return self._get_unified_logger(name)

# 兼容原有接口
# Compatible with the original interface
return self._get_legacy_logger(name, file_name, without_formater, print_to_console)

def _get_unified_logger(self, name):
"""
新的统一日志获取方式
New unified way to get logger
"""
if name is None:
return logging.getLogger("fastdeploy")

# 处理 __main__ 特殊情况
# Handle __main__ special case
if name == "__main__":
import __main__

# 获取主模块的 __file__ 属性
# Get the __file__ attribute of the main module
if hasattr(__main__, "__file__"):
# 获取主模块的文件名
# Get the main module file name
base_name = Path(__main__.__file__).stem
# 创建带前缀的日志器
# Create logger with prefix
return logging.getLogger(f"fastdeploy.main.{base_name}")
return logging.getLogger("fastdeploy.main")

# 如果已经是fastdeploy命名空间,直接使用
# If already in fastdeploy namespace, use directly
if name.startswith("fastdeploy.") or name == "fastdeploy":
return logging.getLogger(name)
else:
# 其他情况添加fastdeploy前缀
# Add fastdeploy prefix for other cases
return logging.getLogger(f"fastdeploy.{name}")

def get_trace_logger(self, name, file_name, without_formater=False, print_to_console=False):
Expand All @@ -106,7 +106,7 @@ def get_trace_logger(self, name, file_name, without_formater=False, print_to_con

is_debug = int(envs.FD_DEBUG)
# logger = logging.getLogger(name)
# Use namespace for isolation to avoid logger overwrite and confusion issues for compatibility with original interface
# Use namespace for isolation to avoid logger overwrite and confusion issues, for compatibility with original interface
legacy_name = f"legacy.{name}"
logger = logging.getLogger(legacy_name)

Expand All @@ -118,7 +118,7 @@ def get_trace_logger(self, name, file_name, without_formater=False, print_to_con

# Set formatter
formatter = CustomFormatter(
"[%(asctime)s] [%(levelname)-8s] (%(filename)s:%(funcName)s:%(lineno)d) %(message)s"
"%(levelname)-8s %(asctime)s %(process)-5s %(filename)s[line:%(lineno)d] %(message)s"
)

# Clear existing handlers (maintain original logic)
Expand Down Expand Up @@ -162,7 +162,7 @@ def get_trace_logger(self, name, file_name, without_formater=False, print_to_con

def _get_legacy_logger(self, name, file_name, without_formater=False, print_to_console=False):
"""
兼容原有接口的日志获取方式
Legacy-compatible way to get logger
"""

log_dir = envs.FD_LOG_DIR
Expand All @@ -171,32 +171,32 @@ def _get_legacy_logger(self, name, file_name, without_formater=False, print_to_c

is_debug = envs.FD_DEBUG
# logger = logging.getLogger(name)
# 为了兼容原有接口,使用命名空间进行隔离,避免logger覆盖、混乱等问题
# Use namespace for isolation to avoid logger overwrite and confusion issues, for compatibility with original interface
legacy_name = f"legacy.{name}"
logger = logging.getLogger(legacy_name)

# 设置日志级别
# Set log level
if is_debug:
logger.setLevel(level=logging.DEBUG)
else:
logger.setLevel(level=logging.INFO)

# 设置格式化器
formatter = ColoredFormatter(
# Set formatter - use standard format for both file and console (no color)
formatter = logging.Formatter(
"%(levelname)-8s %(asctime)s %(process)-5s %(filename)s[line:%(lineno)d] %(message)s"
)

# 清除现有的handlers(保持原有逻辑)
# Clear existing handlers (maintain original logic)
for handler in logger.handlers[:]:
logger.removeHandler(handler)

# 创建主日志文件handler
# Create main log file handler
LOG_FILE = f"{log_dir}/{file_name}"
backup_count = int(envs.FD_LOG_BACKUP_COUNT)
# handler = LazyFileHandler(filename=LOG_FILE, backupCount=backup_count, level=hanlder_level)
handler = DailyRotatingFileHandler(LOG_FILE, backupCount=backup_count)

# 创建ERROR日志文件handler(新增功能)
# Create ERROR log file handler (new feature)
if not file_name.endswith(".log"):
file_name = f"{file_name}.log" if "." not in file_name else file_name.split(".")[0] + ".log"
ERROR_LOG_FILE = os.path.join(log_dir, file_name.replace(".log", "_error.log"))
Expand All @@ -208,19 +208,19 @@ def _get_legacy_logger(self, name, file_name, without_formater=False, print_to_c
handler.setFormatter(formatter)
error_handler.setFormatter(formatter)

# 添加文件handlers
# Add file handlers
logger.addHandler(handler)
logger.addHandler(error_handler)

# 控制台handler
# Console handler
if print_to_console:
console_handler = logging.StreamHandler()
if not without_formater:
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
console_handler.propagate = False

# 设置propagate(保持原有逻辑)
# Set propagate (maintain original logic)
# logger.propagate = False

return logger
Loading
Loading