Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 4 additions & 6 deletions packages/sdk/server-ai/src/ldai/__init__.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,15 @@
__version__ = "0.11.0" # x-release-please-version

# Export main client
# Export chat
from ldclient import log

from ldai.chat import Chat
from ldai.client import LDAIClient
# Export judge
from ldai.judge import Judge
# Export models for convenience
from ldai.models import ( # Deprecated aliases for backward compatibility
AIAgentConfig, AIAgentConfigDefault, AIAgentConfigRequest, AIAgents,
AICompletionConfig, AICompletionConfigDefault, AIConfig, AIJudgeConfig,
AIJudgeConfigDefault, JudgeConfiguration, LDAIAgent, LDAIAgentConfig,
LDAIAgentDefaults, LDMessage, ModelConfig, ProviderConfig)
# Export judge types
from ldai.providers.types import EvalScore, JudgeResponse

__all__ = [
Expand All @@ -25,14 +22,15 @@
'AICompletionConfigDefault',
'AIJudgeConfig',
'AIJudgeConfigDefault',
'Judge',
'Chat',
'EvalScore',
'Judge',
'JudgeConfiguration',
'JudgeResponse',
'LDMessage',
'ModelConfig',
'ProviderConfig',
'log',
# Deprecated exports
'AIConfig',
'LDAIAgent',
Expand Down
11 changes: 4 additions & 7 deletions packages/sdk/server-ai/src/ldai/chat/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import asyncio
from typing import Any, Dict, List, Optional

from ldai import log
from ldai.judge import Judge
from ldai.models import AICompletionConfig, LDMessage
from ldai.providers.ai_provider import AIProvider
Expand All @@ -25,7 +26,6 @@ def __init__(
tracker: LDAIConfigTracker,
provider: AIProvider,
judges: Optional[Dict[str, Judge]] = None,
logger: Optional[Any] = None,
):
"""
Initialize the Chat.
Expand All @@ -34,13 +34,11 @@ def __init__(
:param tracker: The tracker for the completion configuration
:param provider: The AI provider to use for chat
:param judges: Optional dictionary of judge instances keyed by their configuration keys
:param logger: Optional logger for logging
"""
self._ai_config = ai_config
self._tracker = tracker
self._provider = provider
self._judges = judges or {}
self._logger = logger
self._messages: List[LDMessage] = []

async def invoke(self, prompt: str) -> ChatResponse:
Expand Down Expand Up @@ -101,10 +99,9 @@ def _start_judge_evaluations(
async def evaluate_judge(judge_config):
judge = self._judges.get(judge_config.key)
if not judge:
if self._logger:
self._logger.warn(
f"Judge configuration is not enabled: {judge_config.key}",
)
log.warn(
f"Judge configuration is not enabled: {judge_config.key}",
)
return None

eval_result = await judge.evaluate_messages(
Expand Down
23 changes: 10 additions & 13 deletions packages/sdk/server-ai/src/ldai/client.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,18 @@
import logging
from typing import Any, Dict, List, Optional, Tuple

import chevron
from ldclient import Context
from ldclient.client import LDClient

from ldai import log
from ldai.chat import Chat
from ldai.judge import Judge
from ldai.models import (AIAgentConfig, AIAgentConfigDefault,
AIAgentConfigRequest, AIAgents, AICompletionConfig,
AICompletionConfigDefault, AIJudgeConfig,
AIJudgeConfigDefault, JudgeConfiguration, LDMessage,
ModelConfig, ProviderConfig)
from ldai.providers.ai_provider_factory import (AIProviderFactory,
SupportedAIProvider)
from ldai.providers.ai_provider_factory import AIProviderFactory
from ldai.tracker import LDAIConfigTracker


Expand All @@ -22,7 +21,6 @@ class LDAIClient:

def __init__(self, client: LDClient):
self._client = client
self._logger = logging.getLogger('ldclient.ai')

def completion_config(
self,
Expand Down Expand Up @@ -122,7 +120,7 @@ async def create_judge(
context: Context,
default_value: AIJudgeConfigDefault,
variables: Optional[Dict[str, Any]] = None,
default_ai_provider: Optional[SupportedAIProvider] = None,
default_ai_provider: Optional[str] = None,
) -> Optional[Judge]:
"""
Creates and returns a new Judge instance for AI evaluation.
Expand Down Expand Up @@ -180,11 +178,11 @@ async def create_judge(
return None

# Create AI provider for the judge
provider = await AIProviderFactory.create(judge_config, self._logger, default_ai_provider)
provider = await AIProviderFactory.create(judge_config, default_ai_provider)
if not provider:
return None

return Judge(judge_config, judge_config.tracker, provider, self._logger)
return Judge(judge_config, judge_config.tracker, provider)
except Exception as error:
# Would log error if logger available
return None
Expand All @@ -194,7 +192,7 @@ async def _initialize_judges(
judge_configs: List[JudgeConfiguration.Judge],
context: Context,
variables: Optional[Dict[str, Any]] = None,
default_ai_provider: Optional[SupportedAIProvider] = None,
default_ai_provider: Optional[str] = None,
) -> Dict[str, Judge]:
"""
Initialize judges from judge configurations.
Expand Down Expand Up @@ -240,7 +238,7 @@ async def create_chat(
context: Context,
default_value: AICompletionConfigDefault,
variables: Optional[Dict[str, Any]] = None,
default_ai_provider: Optional[SupportedAIProvider] = None,
default_ai_provider: Optional[str] = None,
) -> Optional[Chat]:
"""
Creates and returns a new Chat instance for AI conversations.
Expand Down Expand Up @@ -275,15 +273,14 @@ async def create_chat(
print(f"Conversation has {len(messages)} messages")
"""
self._client.track('$ld:ai:config:function:createChat', context, key, 1)
if self._logger:
self._logger.debug(f"Creating chat for key: {key}")
log.debug(f"Creating chat for key: {key}")
config = self.completion_config(key, context, default_value, variables)

if not config.enabled or not config.tracker:
# Would log info if logger available
return None

provider = await AIProviderFactory.create(config, self._logger, default_ai_provider)
provider = await AIProviderFactory.create(config, default_ai_provider)
if not provider:
return None

Expand All @@ -296,7 +293,7 @@ async def create_chat(
default_ai_provider,
)

return Chat(config, config.tracker, provider, judges, self._logger)
return Chat(config, config.tracker, provider, judges)

def agent_config(
self,
Expand Down
47 changes: 18 additions & 29 deletions packages/sdk/server-ai/src/ldai/judge/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

import chevron

from ldai import log
from ldai.judge.evaluation_schema_builder import EvaluationSchemaBuilder
from ldai.models import AIJudgeConfig, LDMessage
from ldai.providers.ai_provider import AIProvider
Expand All @@ -26,20 +27,17 @@ def __init__(
ai_config: AIJudgeConfig,
ai_config_tracker: LDAIConfigTracker,
ai_provider: AIProvider,
logger: Optional[Any] = None,
):
"""
Initialize the Judge.

:param ai_config: The judge AI configuration
:param ai_config_tracker: The tracker for the judge configuration
:param ai_provider: The AI provider to use for evaluation
:param logger: Optional logger for logging
"""
self._ai_config = ai_config
self._ai_config_tracker = ai_config_tracker
self._ai_provider = ai_provider
self._logger = logger
self._evaluation_response_structure = EvaluationSchemaBuilder.build(
ai_config.evaluation_metric_keys
)
Expand All @@ -60,20 +58,17 @@ async def evaluate(
"""
try:
if not self._ai_config.evaluation_metric_keys or len(self._ai_config.evaluation_metric_keys) == 0:
if self._logger:
self._logger.warn(
'Judge configuration is missing required evaluationMetricKeys'
)
log.warn(
'Judge configuration is missing required evaluationMetricKeys'
)
return None

if not self._ai_config.messages:
if self._logger:
self._logger.warn('Judge configuration must include messages')
log.warn('Judge configuration must include messages')
return None

if random.random() > sampling_rate:
if self._logger:
self._logger.debug(f'Judge evaluation skipped due to sampling rate: {sampling_rate}')
log.debug(f'Judge evaluation skipped due to sampling rate: {sampling_rate}')
return None

messages = self._construct_evaluation_messages(input_text, output_text)
Expand All @@ -89,8 +84,7 @@ async def evaluate(
evals = self._parse_evaluation_response(response.data)

if len(evals) != len(self._ai_config.evaluation_metric_keys):
if self._logger:
self._logger.warn('Judge evaluation did not return all evaluations')
log.warn('Judge evaluation did not return all evaluations')
success = False

return JudgeResponse(
Expand All @@ -99,8 +93,7 @@ async def evaluate(
success=success,
)
except Exception as error:
if self._logger:
self._logger.error(f'Judge evaluation failed: {error}')
log.error(f'Judge evaluation failed: {error}')
return JudgeResponse(
evals={},
success=False,
Expand Down Expand Up @@ -193,8 +186,7 @@ def _parse_evaluation_response(self, data: Dict[str, Any]) -> Dict[str, EvalScor
results: Dict[str, EvalScore] = {}

if not data.get('evaluations') or not isinstance(data['evaluations'], dict):
if self._logger:
self._logger.warn('Invalid response: missing or invalid evaluations object')
log.warn('Invalid response: missing or invalid evaluations object')
return results

evaluations = data['evaluations']
Expand All @@ -203,27 +195,24 @@ def _parse_evaluation_response(self, data: Dict[str, Any]) -> Dict[str, EvalScor
evaluation = evaluations.get(metric_key)

if not evaluation or not isinstance(evaluation, dict):
if self._logger:
self._logger.warn(f'Missing evaluation for metric key: {metric_key}')
log.warn(f'Missing evaluation for metric key: {metric_key}')
continue

score = evaluation.get('score')
reasoning = evaluation.get('reasoning')

if not isinstance(score, (int, float)) or score < 0 or score > 1:
if self._logger:
self._logger.warn(
f'Invalid score evaluated for {metric_key}: {score}. '
'Score must be a number between 0 and 1 inclusive'
)
log.warn(
f'Invalid score evaluated for {metric_key}: {score}. '
'Score must be a number between 0 and 1 inclusive'
)
continue

if not isinstance(reasoning, str):
if self._logger:
self._logger.warn(
f'Invalid reasoning evaluated for {metric_key}: {reasoning}. '
'Reasoning must be a string'
)
log.warn(
f'Invalid reasoning evaluated for {metric_key}: {reasoning}. '
'Reasoning must be a string'
)
continue

results[metric_key] = EvalScore(score=float(score), reasoning=reasoning)
Expand Down
21 changes: 1 addition & 20 deletions packages/sdk/server-ai/src/ldai/providers/__init__.py
Original file line number Diff line number Diff line change
@@ -1,28 +1,9 @@
"""AI Provider interfaces and factory for LaunchDarkly AI SDK."""

from ldai.providers.ai_provider import AIProvider
from ldai.providers.ai_provider_factory import (AIProviderFactory,
SupportedAIProvider)

# Export LangChain provider if available
# TODO: Uncomment when langchain provider package is introduced
# try:
# from ldai.providers.langchain import LangChainProvider
# __all__ = [
# 'AIProvider',
# 'AIProviderFactory',
# 'LangChainProvider',
# 'SupportedAIProvider',
# ]
# except ImportError:
# __all__ = [
# 'AIProvider',
# 'AIProviderFactory',
# 'SupportedAIProvider',
# ]
from ldai.providers.ai_provider_factory import AIProviderFactory

__all__ = [
'AIProvider',
'AIProviderFactory',
'SupportedAIProvider',
]
18 changes: 4 additions & 14 deletions packages/sdk/server-ai/src/ldai/providers/ai_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Union

from ldai import log
from ldai.models import AIConfigKind, LDMessage
from ldai.providers.types import ChatResponse, StructuredResponse

Expand All @@ -18,14 +19,6 @@ class AIProvider(ABC):
for better extensibility and backwards compatibility.
"""

def __init__(self, logger: Optional[Any] = None):
"""
Initialize the AI provider.

:param logger: Optional logger for logging provider operations.
"""
self.logger = logger

async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse:
"""
Invoke the chat model with an array of messages.
Expand All @@ -39,8 +32,7 @@ async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse:
:param messages: Array of LDMessage objects representing the conversation
:return: ChatResponse containing the model's response
"""
if self.logger:
self.logger.warn('invokeModel not implemented by this provider')
log.warn('invokeModel not implemented by this provider')

from ldai.models import LDMessage
from ldai.providers.types import LDAIMetrics
Expand Down Expand Up @@ -68,8 +60,7 @@ async def invoke_structured_model(
:param response_structure: Dictionary of output configurations keyed by output name
:return: StructuredResponse containing the structured data
"""
if self.logger:
self.logger.warn('invokeStructuredModel not implemented by this provider')
log.warn('invokeStructuredModel not implemented by this provider')

from ldai.providers.types import LDAIMetrics

Expand All @@ -81,15 +72,14 @@ async def invoke_structured_model(

@staticmethod
@abstractmethod
async def create(ai_config: AIConfigKind, logger: Optional[Any] = None) -> 'AIProvider':
async def create(ai_config: AIConfigKind) -> 'AIProvider':
"""
Static method that constructs an instance of the provider.

Each provider implementation must provide their own static create method
that accepts an AIConfigKind and returns a configured instance.

:param ai_config: The LaunchDarkly AI configuration
:param logger: Optional logger for the provider
:return: Configured provider instance
"""
raise NotImplementedError('Provider implementations must override the static create method')
Loading