-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathllm.py
More file actions
137 lines (116 loc) · 4.38 KB
/
llm.py
File metadata and controls
137 lines (116 loc) · 4.38 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
from openai import OpenAI
import os
from dotenv import load_dotenv, find_dotenv
def get_openai_answer(ques, model_name="gpt-4o-mini", require_json=False, temperature=0.0):
_ = load_dotenv(find_dotenv()) # read local .env file
api_key = os.environ['OPENAI_API_KEY']
api_base = os.environ['OPENAI_API_BASE']
client = OpenAI(api_key=api_key, base_url=api_base)
messages = ques if isinstance(ques, list) else [{"role": "user", "content": ques}]
if require_json:
response = client.chat.completions.create(
model=model_name,
messages=messages,
stream=False,
temperature=temperature,
response_format={
'type': 'json_object'
}
)
else:
response = client.chat.completions.create(
model=model_name,
messages=messages,
stream=False,
temperature=temperature
)
return response.choices[0].message.content
def get_ollama_answers(ques, model_name, require_json=False, temperature=0.0):
_ = load_dotenv(find_dotenv())
api_key = os.environ['OLLAMA_API_KEY']
api_base = os.environ["OLLAMA_API_BASE"]
client = OpenAI(api_key=api_key, base_url=api_base)
messages = ques if isinstance(ques, list) else [{"role": "user", "content": ques}]
if require_json:
response = client.chat.completions.create(
model=model_name,
messages=messages,
stream=True,
max_tokens=8192,
temperature=temperature,
response_format={
'type': 'json_object'
}
)
else:
response = client.chat.completions.create(
model=model_name,
messages=messages,
stream=True,
max_tokens=8192,
temperature=temperature
)
full_response = ""
for chunk in response:
if chunk.choices[0].delta.content:
full_response += chunk.choices[0].delta.content
return full_response
def get_gptgod_answers(ques, model_name, require_json=False, temperature=0.0):
_ = load_dotenv(find_dotenv())
api_key = os.environ['GPTGOD_API_KEY']
api_base = os.environ["GPTGOD_API_BASE"]
client = OpenAI(api_key=api_key, base_url=api_base)
messages = ques if isinstance(ques, list) else [{"role": "user", "content": ques}]
if require_json:
response = client.chat.completions.create(
model=model_name,
messages=messages,
temperature=temperature,
response_format={
'type': 'json_object'
}
)
else:
response = client.chat.completions.create(
model=model_name,
messages=messages,
temperature=temperature
)
return response.choices[0].message.content
def get_deepseek_answers(ques, model_name, require_json=False, temperature=0.0):
_ = load_dotenv(find_dotenv())
api_key = os.environ['DEEPSEEK_API_KEY']
api_base = os.environ["DEEPSEEK_API_BASE"]
client = OpenAI(api_key=api_key, base_url=api_base)
messages = ques if isinstance(ques, list) else [{"role": "user", "content": ques}]
if require_json:
response = client.chat.completions.create(
model=model_name,
messages=messages,
temperature=temperature,
response_format={
'type': 'json_object'
}
)
else:
response = client.chat.completions.create(
model=model_name,
messages=messages,
stream=False,
temperature=temperature
)
return response.choices[0].message.content
# full_response = ""
# for chunk in response:
# if chunk.choices[0].delta.content:
# full_response += chunk.choices[0].delta.content
# return full_response
def get_llm_answers(ques, model_name,require_json=False, temperature=0.0):
if 'gpt' in model_name:
return get_openai_answer(ques, model_name, require_json, temperature)
elif model_name == "llama2" or "deepseek-r1:70b" in model_name:
return get_ollama_answers(ques, model_name, require_json, temperature)
elif "deepseek-chat" in model_name:
return get_deepseek_answers(ques, model_name, require_json, temperature )
else:
return get_gptgod_answers(ques, model_name, require_json, temperature)