22
33from fastapi import Header , HTTPException , Request
44
5+ from codegate .clients .clients import ClientType
56from codegate .clients .detector import DetectClient
67from codegate .pipeline .factory import PipelineFactory
78from codegate .providers .openai import OpenAIProvider
@@ -15,6 +16,21 @@ def __init__(self, pipeline_factory: PipelineFactory):
1516 def provider_route_name (self ) -> str :
1617 return "openrouter"
1718
19+ async def process_request (
20+ self ,
21+ data : dict ,
22+ api_key : str ,
23+ request_url_path : str ,
24+ client_type : ClientType ,
25+ ):
26+ # litellm workaround - add openrouter/ prefix to model name to make it openai-compatible
27+ # once we get rid of litellm, this can simply be removed
28+ original_model = data .get ("model" , "" )
29+ if not original_model .startswith ("openrouter/" ):
30+ data ["model" ] = f"openrouter/{ original_model } "
31+
32+ return await super ().process_request (data , api_key , is_fim_request , client_type )
33+
1834 def _setup_routes (self ):
1935 @self .router .post (f"/{ self .provider_route_name } /api/v1/chat/completions" )
2036 @self .router .post (f"/{ self .provider_route_name } /chat/completions" )
@@ -33,12 +49,6 @@ async def create_completion(
3349 base_url = self ._get_base_url ()
3450 data ["base_url" ] = base_url
3551
36- # litellm workaround - add openrouter/ prefix to model name to make it openai-compatible
37- # once we get rid of litellm, this can simply be removed
38- original_model = data .get ("model" , "" )
39- if not original_model .startswith ("openrouter/" ):
40- data ["model" ] = f"openrouter/{ original_model } "
41-
4252 return await self .process_request (
4353 data ,
4454 api_key ,
0 commit comments