Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion config.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ class Config:

TIMESCALE_SERVICE_URL = os.getenv('TIMESCALE_SERVICE_URL')
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
OPENAI_BASE_URL = os.getenv('OPENAI_BASE_URL')
SecretKey = os.getenv('SecretKey')
Encreaption_key = os.getenv('Encreaption_key')
Secret_IV = os.getenv('Secret_IV')
Expand Down Expand Up @@ -55,4 +56,5 @@ class Config:
DOCSTAR_ORG_ID = os.getenv('DOCSTAR_ORG_ID')
DOCSTAR_COLLECTION_ID = os.getenv('DOCSTAR_COLLECTION_ID')
AI_ML_APIKEY = os.getenv('AI_ML_APIKEY')
AI_MIDDLEWARE_PAUTH_KEY = os.getenv('AI_MIDDLEWARE_PAUTH_KEY')
AI_MIDDLEWARE_PAUTH_KEY = os.getenv('AI_MIDDLEWARE_PAUTH_KEY')
FIRECRAWL_API_KEY = os.getenv('FIRECRAWL_API_KEY')
4 changes: 2 additions & 2 deletions src/services/commonServices/openAI/image_model.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
import traceback
from openai import AsyncOpenAI
import uuid
import asyncio
from src.services.utils.gcp_upload_service import uploadDoc
from src.services.utils.openai_client import get_async_openai_client



async def OpenAIImageModel(configuration, apiKey, execution_time_logs, timer):
try:
openai_config = AsyncOpenAI(api_key=apiKey)
openai_config = get_async_openai_client(api_key=apiKey)
timer.start()
chat_completion = await openai_config.images.generate(**configuration)
execution_time_logs.append({"step": "OpenAI image Processing time", "time_taken": timer.stop("OpenAI image Processing time")})
Expand Down
4 changes: 2 additions & 2 deletions src/services/commonServices/openAI/openai_embedding_model.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
from openai import AsyncOpenAI
from src.services.utils.openai_client import get_async_openai_client


async def embedding_model(configuration, apiKey):
try:
openAI = AsyncOpenAI(api_key=apiKey)
openAI = get_async_openai_client(api_key=apiKey)
embedding = await openAI.embeddings.create(**configuration)
return {'success': True, 'response': embedding.to_dict()}

Expand Down
7 changes: 4 additions & 3 deletions src/services/commonServices/openAI/openai_run_batch.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
import json
import os
from openai import AsyncOpenAI
import io

from src.services.utils.openai_client import get_async_openai_client

async def create_batch_file(data, apiKey):
try:
file_content = "\n".join(data)
filelike_obj = io.BytesIO(file_content.encode("utf-8"))
openAI = AsyncOpenAI(api_key=apiKey)
openAI = get_async_openai_client(api_key=apiKey)
batch_input_file = await openAI.files.create(
file=filelike_obj,
purpose="batch"
Expand All @@ -19,7 +20,7 @@ async def create_batch_file(data, apiKey):

async def process_batch_file(batch_input_file, apiKey):
try:
openAI = AsyncOpenAI(api_key=apiKey)
openAI = get_async_openai_client(api_key=apiKey)
batch_input_file_id = batch_input_file.id

result = await openAI.batches.create(
Expand Down
8 changes: 4 additions & 4 deletions src/services/commonServices/openAI/runModel.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
from openai import AsyncOpenAI
import traceback
import copy
from ..api_executor import execute_api_call
# from src.services.utils.unified_token_validator import validate_openai_token_limit
from globals import *
from src.services.utils.openai_client import get_async_openai_client


def remove_duplicate_ids_from_input(configuration):
Expand Down Expand Up @@ -42,7 +42,7 @@ def remove_duplicate_ids_from_input(configuration):


async def openai_test_model(configuration, api_key):
openAI = AsyncOpenAI(api_key=api_key)
openAI = get_async_openai_client(api_key=api_key)
try:
chat_completion = await openAI.chat.completions.create(**configuration)
return {'success': True, 'response': chat_completion.to_dict()}
Expand All @@ -59,7 +59,7 @@ async def openai_response_model(configuration, apiKey, execution_time_logs, brid
# model_name = configuration.get('model')
# validate_openai_token_limit(configuration, model_name, 'openai_response')

client = AsyncOpenAI(api_key=apiKey)
client = get_async_openai_client(api_key=apiKey)

# Define the API call function with retry mechanism for duplicate ID errors
async def api_call_with_retry(config, max_retries=2):
Expand Down Expand Up @@ -137,7 +137,7 @@ async def api_call(config):
async def openai_completion(configuration, apiKey, execution_time_logs, bridge_id, timer, message_id=None, org_id=None, name = "", org_name= "", service = "", count=0, token_calculator=None):
try:

openAI = AsyncOpenAI(api_key=apiKey)
openAI = get_async_openai_client(api_key=apiKey)

# Define the API call function
async def api_call(config):
Expand Down
4 changes: 2 additions & 2 deletions src/services/utils/batch_script.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
from ..cache_service import find_in_cache_with_prefix, delete_in_cache
from openai import AsyncOpenAI
from ..utils.send_error_webhook import create_response_format
from ..commonServices.baseService.baseService import sendResponse
import asyncio
import json
from .ai_middleware_format import Response_formatter
from globals import *
from .openai_client import get_async_openai_client

async def repeat_function():
while True:
Expand All @@ -26,7 +26,7 @@ async def check_batch_status():
batch_id = id.get('id')
if webhook.get('url') is not None:
response_format = create_response_format(webhook.get('url'), webhook.get('headers'))
openAI = AsyncOpenAI(api_key=apikey)
openAI = get_async_openai_client(api_key=apikey)
batch = await openAI.batches.retrieve(batch_id)
if batch.status == "completed":
file = batch.output_file_id or batch.error_file_id
Expand Down
4 changes: 2 additions & 2 deletions src/services/utils/guardrails_validator.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@
"""
import json
import traceback
from openai import AsyncOpenAI
from globals import logger
from exceptions.bad_request import BadRequestException
from config import Config
from src.services.utils.openai_client import get_async_openai_client


# Guardrails template definitions
Expand Down Expand Up @@ -201,7 +201,7 @@ async def validate_with_guardrails(user_message: str, api_key: str, guardrails_c
]

# Initialize OpenAI client
client = AsyncOpenAI(api_key=api_key)
client = get_async_openai_client(api_key=api_key)

# Call OpenAI gpt-5-nano model
response = await client.chat.completions.create(
Expand Down
18 changes: 18 additions & 0 deletions src/services/utils/openai_client.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
from openai import AsyncOpenAI

from config import Config


def get_async_openai_client(api_key, **kwargs):
"""
Build an AsyncOpenAI client that honors the configured base URL override.
Falls back to the default OpenAI endpoint when OPENAI_BASE_URL is unset.
"""
client_kwargs = dict(kwargs)
client_kwargs["api_key"] = api_key

base_url = getattr(Config, "OPENAI_BASE_URL", None)
if base_url:
client_kwargs["base_url"] = base_url.rstrip("/")

return AsyncOpenAI(**client_kwargs)