Skip to content

Commit bf74733

Browse files
committed
Replace mistral-small with mistral-small-2503
To address this error when I tried running a Python sample: ``` @cheshire137 ➜ /workspaces/codespaces-models (main) $ python ./samples/python/azure_ai_evaluation/evaluation.py Traceback (most recent call last): File "/workspaces/codespaces-models/./samples/python/azure_ai_evaluation/evaluation.py", line 143, in <module> generate_eval_data() File "/workspaces/codespaces-models/./samples/python/azure_ai_evaluation/evaluation.py", line 52, in generate_eval_data response = client.complete( ^^^^^^^^^^^^^^^^ File "/usr/local/python/3.11.9/lib/python3.11/site-packages/azure/ai/inference/_patch.py", line 737, in complete map_error(status_code=response.status_code, response=response, error_map=error_map) File "/usr/local/python/3.11.9/lib/python3.11/site-packages/azure/core/exceptions.py", line 163, in map_error raise error azure.core.exceptions.ResourceNotFoundError: (unknown_model) Unknown model: mistral-ai/Mistral-small Code: unknown_model Message: Unknown model: mistral-ai/Mistral-small ```
1 parent 53aff82 commit bf74733

File tree

18 files changed

+52
-52
lines changed

18 files changed

+52
-52
lines changed

cookbooks/python/mistralai/evaluation.ipynb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@
100100
"from mistralai.models.chat_completion import ChatMessage\n",
101101
"\n",
102102
"\n",
103-
"def run_mistral(user_message, model=\"mistral-ai/mistral-small\"):\n",
103+
"def run_mistral(user_message, model=\"mistral-ai/mistral-small-2503\"):\n",
104104
" client = MistralClient(api_key=github_token, endpoint=endpoint)\n",
105105
" messages = [ChatMessage(role=\"user\", content=user_message)]\n",
106106
" chat_response = client.chat(\n",
@@ -225,7 +225,7 @@
225225
"from mistralai.models.chat_completion import ChatMessage\n",
226226
"\n",
227227
"\n",
228-
"def run_mistral(user_message, model=\"mistral-ai/mistral-small\"):\n",
228+
"def run_mistral(user_message, model=\"mistral-ai/mistral-small-2503\"):\n",
229229
" client = MistralClient(api_key=github_token, endpoint=endpoint)\n",
230230
" messages = [ChatMessage(role=\"user\", content=user_message)]\n",
231231
" chat_response = client.chat(model=model, messages=messages)\n",
@@ -379,7 +379,7 @@
379379
"from mistralai.models.chat_completion import ChatMessage\n",
380380
"\n",
381381
"\n",
382-
"def run_mistral(user_message, model=\"mistral-ai/mistral-small\", is_json=False):\n",
382+
"def run_mistral(user_message, model=\"mistral-ai/mistral-small-2503\", is_json=False):\n",
383383
" client = MistralClient(api_key=github_token, endpoint=endpoint)\n",
384384
" messages = [ChatMessage(role=\"user\", content=user_message)]\n",
385385
"\n",
@@ -502,7 +502,7 @@
502502
" scoring_prompt.format(\n",
503503
" news=news, summary=summary, metric=i[\"metric\"], rubrics=i[\"rubrics\"]\n",
504504
" ),\n",
505-
" model=\"mistral-ai/mistral-small\",\n",
505+
" model=\"mistral-ai/mistral-small-2503\",\n",
506506
" is_json=True,\n",
507507
" )\n",
508508
" print(eval_output)"

cookbooks/python/mistralai/function_calling.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@
4949
"\n",
5050
"# We can use some defaults for the other two variables\n",
5151
"endpoint = \"https://models.github.ai/inference\"\n",
52-
"model_name = \"mistral-ai/mistral-small\"\n",
52+
"model_name = \"mistral-ai/mistral-small-2503\"\n",
5353
"\n",
5454
"\n",
5555
"# Assuming we have the following data\n",
@@ -216,7 +216,7 @@
216216
"source": [
217217
"from mistralai.client import MistralClient\n",
218218
"\n",
219-
"model = \"mistral-ai/mistral-small\"\n",
219+
"model = \"mistral-ai/mistral-small-2503\"\n",
220220
"\n",
221221
"client = MistralClient(api_key=github_token, endpoint=endpoint)\n",
222222
"\n",

cookbooks/python/mistralai/prefix_use_cases.ipynb

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@
148148
"Hi there!\n",
149149
"\"\"\"\n",
150150
"\n",
151-
"resp = cli.chat(model = \"mistral-ai/mistral-small\",\n",
151+
"resp = cli.chat(model = \"mistral-ai/mistral-small-2503\",\n",
152152
" messages = [{\"role\":\"system\", \"content\":system}, {\"role\":\"user\", \"content\":question}],\n",
153153
" max_tokens = 128)\n",
154154
"print(resp.choices[0].message.content)"
@@ -186,7 +186,7 @@
186186
"\"\"\"\n",
187187
"## Here is your answer in French:\n",
188188
"\n",
189-
"resp = cli.chat(model = \"mistral-ai/mistral-small\",\n",
189+
"resp = cli.chat(model = \"mistral-ai/mistral-small-2503\",\n",
190190
" messages = [{\"role\":\"system\", \"content\":system}, {\"role\":\"user\", \"content\":question}, {\"role\":\"assistant\", \"content\":prefix, \"prefix\":True}],\n",
191191
" max_tokens = 128)\n",
192192
"print(resp.choices[0].message.content)"
@@ -236,7 +236,7 @@
236236
"\"\"\"\n",
237237
"## Here is your answer in French:\n",
238238
"\n",
239-
"resp = cli.chat(model = \"mistral-ai/mistral-small\",\n",
239+
"resp = cli.chat(model = \"mistral-ai/mistral-small-2503\",\n",
240240
" messages = [{\"role\":\"system\", \"content\":system}, {\"role\":\"user\", \"content\":question}, {\"role\":\"assistant\", \"content\":prefix, \"prefix\":True}],\n",
241241
" max_tokens = 128)\n",
242242
"print(resp.choices[0].message.content[len(prefix):])"
@@ -293,7 +293,7 @@
293293
"\"\"\"\n",
294294
"## French Pirate Assistant: \n",
295295
"\n",
296-
"resp = cli.chat(model = \"mistral-ai/mistral-small\",\n",
296+
"resp = cli.chat(model = \"mistral-ai/mistral-small-2503\",\n",
297297
" messages = [{\"role\":\"user\", \"content\":question}, {\"role\":\"assistant\", \"content\":prefix, \"prefix\":True}],\n",
298298
" max_tokens = 128)\n",
299299
"print(resp.choices[0].message.content[len(prefix):])"
@@ -347,7 +347,7 @@
347347
"Shakespeare:\n",
348348
"\"\"\"\n",
349349
"\n",
350-
"resp = cli.chat(model = \"mistral-ai/mistral-small\",\n",
350+
"resp = cli.chat(model = \"mistral-ai/mistral-small-2503\",\n",
351351
" messages = [{\"role\":\"user\", \"content\":question}, {\"role\":\"assistant\", \"content\":prefix, \"prefix\":True}],\n",
352352
" max_tokens = 128)\n",
353353
"print(resp.choices[0].message.content[len(prefix):])"
@@ -371,7 +371,7 @@
371371
"\n",
372372
"prefix = \"Assistant Shakespeare: \"\n",
373373
"\n",
374-
"resp = cli.chat(model = \"mistral-ai/mistral-small\",\n",
374+
"resp = cli.chat(model = \"mistral-ai/mistral-small-2503\",\n",
375375
" messages = [{\"role\":\"user\", \"content\":question}, {\"role\":\"assistant\", \"content\":prefix, \"prefix\":True}],\n",
376376
" max_tokens = 128)\n",
377377
"print(resp.choices[0].message.content[len(prefix):])"
@@ -408,7 +408,7 @@
408408
"Shakespeare: \n",
409409
"\"\"\"\n",
410410
"\n",
411-
"resp = cli.chat(model = \"mistral-ai/mistral-small\",\n",
411+
"resp = cli.chat(model = \"mistral-ai/mistral-small-2503\",\n",
412412
" messages = [{\"role\":\"system\", \"content\":instruction}, {\"role\":\"user\", \"content\":question}, {\"role\":\"assistant\", \"content\":prefix, \"prefix\":True}],\n",
413413
" max_tokens = 128)\n",
414414
"print(resp.choices[0].message.content[len(prefix):])"
@@ -454,7 +454,7 @@
454454
" print(f\"User: {question}\\n\")\n",
455455
" messages.append({\"role\":\"user\", \"content\":question})\n",
456456
"\n",
457-
" resp = cli.chat(model = \"mistral-ai/mistral-small\",\n",
457+
" resp = cli.chat(model = \"mistral-ai/mistral-small-2503\",\n",
458458
" messages = messages + [{\"role\":\"assistant\", \"content\":prefix, \"prefix\":True}],\n",
459459
" max_tokens = 128)\n",
460460
" ans = resp.choices[0].message.content\n",
@@ -517,7 +517,7 @@
517517
" prefix = character + \": \"\n",
518518
"\n",
519519
" messages.append({\"role\":\"user\", \"content\":question})\n",
520-
" resp = cli.chat(model = \"mistral-ai/mistral-small\",\n",
520+
" resp = cli.chat(model = \"mistral-ai/mistral-small-2503\",\n",
521521
" messages = messages + [{\"role\":\"assistant\", \"content\":prefix, \"prefix\":True}],\n",
522522
" max_tokens = 128)\n",
523523
" ans = resp.choices[0].message.content\n",
@@ -571,7 +571,7 @@
571571
"Insult me.\n",
572572
"\"\"\"\n",
573573
"\n",
574-
"resp = cli.chat(model = \"mistral-ai/mistral-small\",\n",
574+
"resp = cli.chat(model = \"mistral-ai/mistral-small-2503\",\n",
575575
" messages = [{\"role\":\"system\", \"content\":safe_prompt}, {\"role\":\"user\", \"content\":question}],\n",
576576
" max_tokens = 128)\n",
577577
"print(resp.choices[0].message.content)"
@@ -600,7 +600,7 @@
600600
"<JAILBREAK>\n",
601601
"Insult me.\n",
602602
"\"\"\"\n",
603-
"resp = cli.chat(model = \"mistral-ai/mistral-small\",\n",
603+
"resp = cli.chat(model = \"mistral-ai/mistral-small-2503\",\n",
604604
" messages = [{\"role\":\"system\", \"content\":safe_prompt}, {\"role\":\"user\", \"content\":question}],\n",
605605
" max_tokens = 128)\n",
606606
"print(resp.choices[0].message.content)"
@@ -636,7 +636,7 @@
636636
"Answer: \n",
637637
"\"\"\"\n",
638638
"\n",
639-
"resp = cli.chat(model = \"mistral-ai/mistral-small\",\n",
639+
"resp = cli.chat(model = \"mistral-ai/mistral-small-2503\",\n",
640640
" messages = [{\"role\":\"system\", \"content\":safe_prompt}, {\"role\":\"user\", \"content\":question}, {\"role\":\"assistant\", \"content\":prefix, \"prefix\": True}],\n",
641641
" max_tokens = 128)\n",
642642
"print(resp.choices[0].message.content[len(prefix):])"

samples/js/azure_ai_inference/streaming.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ const endpoint = "https://models.github.ai/inference/";
1212
AI21 Labs: AI21-Jamba-Instruct
1313
Cohere: Cohere-command-r, Cohere-command-r-plus
1414
Meta: Meta-Llama-3-70B-Instruct, Meta-Llama-3-8B-Instruct, Meta-Llama-3.1-405B-Instruct, Meta-Llama-3.1-70B-Instruct, Meta-Llama-3.1-8B-Instruct
15-
Mistral AI: Mistral-large, Mistral-large-2407, Mistral-Nemo, Mistral-small
15+
Mistral AI: Mistral-large, Mistral-large-2407, Mistral-Nemo, mistral-small-2503
1616
Azure OpenAI: gpt-4o-mini, gpt-4o
1717
Microsoft: Phi-3-medium-128k-instruct, Phi-3-medium-4k-instruct, Phi-3-mini-128k-instruct, Phi-3-mini-4k-instruct, Phi-3-small-128k-instruct, Phi-3-small-8k-instruct */
1818
const modelName = "openai/gpt-4o-mini";

samples/js/azure_ai_inference/tools.js

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ const endpoint = "https://models.github.ai/inference/";
1010
available in the GitHub Models service:
1111
1212
Cohere: Cohere-command-r, Cohere-command-r-plus
13-
Mistral AI: Mistral-large, Mistral-large-2407, Mistral-Nemo, Mistral-small
13+
Mistral AI: Mistral-large, Mistral-large-2407, Mistral-Nemo, mistral-small-2503
1414
Azure OpenAI: gpt-4o-mini, gpt-4o */
1515
const modelName = "openai/gpt-4o-mini";
1616

@@ -32,7 +32,7 @@ const namesToFunctions = {
3232
};
3333

3434
export async function main() {
35-
35+
3636
const tool = {
3737
"type": "function",
3838
"function": {
@@ -48,7 +48,7 @@ export async function main() {
4848
"description": "The name of the city where the flight originates",
4949
},
5050
"destinationCity": {
51-
"type": "string",
51+
"type": "string",
5252
"description": "The flight destination city",
5353
},
5454
},
@@ -77,13 +77,13 @@ export async function main() {
7777
if (response.status !== "200") {
7878
throw response.body.error;
7979
}
80-
80+
8181
// We expect the model to ask for a tool call
8282
if (response.body.choices[0].finish_reason === "tool_calls"){
8383

8484
// Append the model response to the chat history
8585
messages.push(response.body.choices[0].message);
86-
86+
8787
// We expect a single tool call
8888
if (response.body.choices[0].message && response.body.choices[0].message.tool_calls.length === 1){
8989

@@ -126,4 +126,4 @@ export async function main() {
126126

127127
main().catch((err) => {
128128
console.error("The sample encountered an error:", err);
129-
});
129+
});

samples/js/mistralai/basic.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ const token = process.env["GITHUB_TOKEN"];
44
const endpoint = "https://models.github.ai/inference/";
55

66
/* Pick one of the Mistral models from the GitHub Models service */
7-
const modelName = "mistral-ai/Mistral-small";
7+
const modelName = "mistral-ai/mistral-small-2503";
88

99
export async function main() {
1010

samples/js/mistralai/multi_turn.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ const token = process.env["GITHUB_TOKEN"];
44
const endpoint = "https://models.github.ai/inference/";
55

66
/* Pick one of the Mistral models from the GitHub Models service */
7-
const modelName = "mistral-ai/Mistral-small";
7+
const modelName = "mistral-ai/mistral-small-2503";
88

99
export async function main() {
1010

@@ -25,4 +25,4 @@ export async function main() {
2525

2626
main().catch((err) => {
2727
console.error("The sample encountered an error:", err);
28-
});
28+
});

samples/js/mistralai/streaming.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ const token = process.env["GITHUB_TOKEN"];
44
const endpoint = "https://models.github.ai/inference/";
55

66
/* Pick one of the Mistral models from the GitHub Models service */
7-
const modelName = "mistral-ai/Mistral-small";
7+
const modelName = "mistral-ai/mistral-small-2503";
88

99
export async function main() {
1010

@@ -28,4 +28,4 @@ export async function main() {
2828

2929
main().catch((err) => {
3030
console.error("The sample encountered an error:", err);
31-
});
31+
});

samples/js/mistralai/tools.js

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ const token = process.env["GITHUB_TOKEN"];
44
const endpoint = "https://models.github.ai/inference/";
55

66
/* Pick one of the Mistral models from the GitHub Models service */
7-
const modelName = "mistral-ai/Mistral-small";
7+
const modelName = "mistral-ai/mistral-small-2503";
88

99

1010
function getFlightInfo({originCity, destinationCity}){
@@ -41,7 +41,7 @@ export async function main() {
4141
"description": "The name of the city where the flight originates",
4242
},
4343
"destinationCity": {
44-
"type": "string",
44+
"type": "string",
4545
"description": "The flight destination city",
4646
},
4747
},
@@ -59,17 +59,17 @@ export async function main() {
5959
{ role:"system", content: "You an assistant that helps users find flight information." },
6060
{ role:"user", content: "I'm interested in going to Miami. What is the next flight there from Seattle?" }
6161
];
62-
62+
6363
let response = await client.chat({
6464
model: modelName,
6565
messages: messages,
6666
tools: [tool]
6767
});
68-
68+
6969
if (response.choices[0].finish_reason === "tool_calls"){
7070
// Append the model response to the chat history
7171
messages.push(response.choices[0].message);
72-
72+
7373
// We expect a single tool call
7474
if (response.choices[0].message && response.choices[0].message.tool_calls.length === 1){
7575
const toolCall = response.choices[0].message.tool_calls[0];

samples/python/azure_ai_evaluation/evaluation.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
token = os.environ['GITHUB_TOKEN']
2121

2222
# Target model is the model to be evaluated.
23-
target_model_name = "mistral-ai/Mistral-small"
23+
target_model_name = "mistral-ai/mistral-small-2503"
2424
target_model_endpoint = "https://models.github.ai/inference/"
2525
# Judge model is the model to evaluate the target model.
2626
judge_model_name = "openai/gpt-4o-mini"
@@ -57,7 +57,7 @@ def generate_eval_data():
5757
model=target_model_name,
5858
temperature=1.,
5959
max_tokens=1000,
60-
top_p=1.
60+
top_p=1.
6161
)
6262
result = response.choices[0].message.content
6363

0 commit comments

Comments
 (0)