Skip to content

Commit ef785be

Browse files
committed
Merge branch 'main' into ddb-maps-grounding
2 parents 7f0ef21 + 5511b4f commit ef785be

File tree

9 files changed

+96
-21
lines changed

9 files changed

+96
-21
lines changed

.changeset/four-vans-mix.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
'@firebase/auth': minor
3+
---
4+
5+
Upgraded react-native-async-storage peerDependency to v2+.

.changeset/gorgeous-rice-carry.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
'@firebase/ai': patch
3+
---
4+
5+
Fix `generateContentStream` returning wrong `inferenceSource`.

packages/ai/integration/chat.test.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,8 @@ import {
2626
} from '../src';
2727
import { testConfigs } from './constants';
2828

29-
describe('Chat Session', () => {
29+
describe('Chat Session', function () {
30+
this.timeout(20_000);
3031
testConfigs.forEach(testConfig => {
3132
describe(`${testConfig.toString()}`, () => {
3233
const commonGenerationConfig: GenerationConfig = {

packages/ai/integration/constants.ts

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,14 @@ const backendNames: Map<BackendType, string> = new Map([
5252
[BackendType.VERTEX_AI, 'Vertex AI']
5353
]);
5454

55-
const modelNames: readonly string[] = ['gemini-2.0-flash', 'gemini-2.5-flash'];
55+
const modelNames: readonly string[] = [
56+
'gemini-2.0-flash-001',
57+
'gemini-2.0-flash-lite-001',
58+
'gemini-2.5-flash',
59+
'gemini-2.5-flash-lite',
60+
'gemini-2.5-pro',
61+
'gemini-3-pro-preview'
62+
];
5663

5764
// The Live API requires a different set of models, and they're different for each backend.
5865
const liveModelNames: Map<BackendType, string[]> = new Map([

packages/ai/integration/count-tokens.test.ts

Lines changed: 32 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -118,9 +118,18 @@ describe('Count Tokens', () => {
118118
};
119119
const response = await model.countTokens([imagePart]);
120120

121+
let expectedImageTokens: number;
122+
if (testConfig.model === 'gemini-3-pro-preview') {
123+
expectedImageTokens =
124+
testConfig.ai.backend.backendType === BackendType.GOOGLE_AI
125+
? 1089
126+
: 1120;
127+
} else {
128+
expectedImageTokens = 258;
129+
}
130+
121131
if (testConfig.ai.backend.backendType === BackendType.GOOGLE_AI) {
122-
const expectedImageTokens = 259;
123-
expect(response.totalTokens).to.equal(expectedImageTokens);
132+
expect(response.totalTokens).to.equal(expectedImageTokens + 1); // There will be 1 unexpected text token
124133
expect(response.totalBillableCharacters).to.be.undefined; // Incorrect behavior
125134
expect(response.promptTokensDetails!.length).to.equal(2);
126135
expect(response.promptTokensDetails![0]).to.deep.equal({
@@ -129,19 +138,18 @@ describe('Count Tokens', () => {
129138
});
130139
expect(response.promptTokensDetails![1]).to.deep.equal({
131140
modality: Modality.IMAGE,
132-
tokenCount: 258
141+
tokenCount: expectedImageTokens
133142
});
134143
} else if (
135144
testConfig.ai.backend.backendType === BackendType.VERTEX_AI
136145
) {
137-
const expectedImageTokens = 258;
138146
expect(response.totalTokens).to.equal(expectedImageTokens);
139147
expect(response.totalBillableCharacters).to.be.undefined; // Incorrect behavior
140148
expect(response.promptTokensDetails!.length).to.equal(1);
141149
// Note: No text tokens are present for Vertex AI with image-only input.
142150
expect(response.promptTokensDetails![0]).to.deep.equal({
143151
modality: Modality.IMAGE,
144-
tokenCount: 258
152+
tokenCount: expectedImageTokens
145153
});
146154
expect(response.promptTokensDetails![0].tokenCount).to.equal(
147155
expectedImageTokens
@@ -220,13 +228,23 @@ describe('Count Tokens', () => {
220228
expect(response.promptTokensDetails).to.exist;
221229
expect(response.promptTokensDetails!.length).to.equal(3);
222230

231+
let expectedImageTokenCount;
232+
if (testConfig.model === 'gemini-3-pro-preview') {
233+
expectedImageTokenCount =
234+
testConfig.ai.backend.backendType === BackendType.GOOGLE_AI
235+
? 1089
236+
: 1120;
237+
} else {
238+
expectedImageTokenCount = 258;
239+
}
240+
223241
expect(imageDetails).to.deep.equal({
224242
modality: Modality.IMAGE,
225-
tokenCount: 258
243+
tokenCount: expectedImageTokenCount
226244
});
227245

228246
if (testConfig.ai.backend.backendType === BackendType.GOOGLE_AI) {
229-
expect(response.totalTokens).to.equal(267);
247+
expect(response.totalTokens).to.equal(expectedImageTokenCount + 9);
230248
expect(response.totalBillableCharacters).to.be.undefined;
231249
expect(textDetails).to.deep.equal({
232250
modality: Modality.TEXT,
@@ -239,7 +257,7 @@ describe('Count Tokens', () => {
239257
} else if (
240258
testConfig.ai.backend.backendType === BackendType.VERTEX_AI
241259
) {
242-
expect(response.totalTokens).to.equal(261);
260+
expect(response.totalTokens).to.equal(expectedImageTokenCount + 3);
243261
expect(textDetails).to.deep.equal({
244262
modality: Modality.TEXT,
245263
tokenCount: 3
@@ -269,7 +287,12 @@ describe('Count Tokens', () => {
269287

270288
const response = await model.countTokens([filePart]);
271289

272-
const expectedFileTokens = 258;
290+
let expectedFileTokens: number;
291+
if (testConfig.model === 'gemini-3-pro-preview') {
292+
expectedFileTokens = 1120;
293+
} else {
294+
expectedFileTokens = 258;
295+
}
273296
expect(response.totalTokens).to.equal(expectedFileTokens);
274297
expect(response.totalBillableCharacters).to.be.undefined;
275298
expect(response.promptTokensDetails).to.exist;

packages/ai/integration/generate-content.test.ts

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ import {
3232
import { testConfigs } from './constants';
3333

3434
describe('Generate Content', function () {
35-
this.timeout(20_000);
35+
this.timeout(90_000); // gemini 3 requests take a long time, especially when using google search and url context.
3636
testConfigs.forEach(testConfig => {
3737
describe(`${testConfig.toString()}`, () => {
3838
const commonGenerationConfig: GenerationConfig = {
@@ -381,8 +381,9 @@ describe('Generate Content', function () {
381381
describe('URL Context', async () => {
382382
// URL Context is not supported in Google AI for gemini-2.0-flash
383383
if (
384-
testConfig.ai.backend.backendType === BackendType.GOOGLE_AI &&
385-
testConfig.model === 'gemini-2.0-flash'
384+
['gemini-2.0-flash-001', 'gemini-2.0-flash-lite-001'].includes(
385+
testConfig.model
386+
) // Models that don't support URL Context
386387
) {
387388
return;
388389
}
@@ -438,9 +439,7 @@ describe('Generate Content', function () {
438439
const urlContextMetadata =
439440
response.candidates?.[0].urlContextMetadata;
440441
const groundingMetadata = response.candidates?.[0].groundingMetadata;
441-
expect(trimmedText).to.contain(
442-
'hypermedia information retrieval initiative'
443-
);
442+
expect(trimmedText.length).to.be.greaterThan(0);
444443
expect(urlContextMetadata?.urlMetadata).to.exist;
445444
expect(
446445
urlContextMetadata?.urlMetadata.length
@@ -508,6 +507,10 @@ describe('Generate Content', function () {
508507
});
509508

510509
it('generateContent: code execution', async () => {
510+
if (testConfig.model === 'gemini-2.0-flash-lite-001') {
511+
// This model does not support code execution
512+
return;
513+
}
511514
const model = getGenerativeModel(testConfig.ai, {
512515
model: testConfig.model,
513516
generationConfig: commonGenerationConfig,

packages/ai/src/methods/generate-content.test.ts

Lines changed: 29 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ import {
2626
import * as request from '../requests/request';
2727
import {
2828
generateContent,
29+
generateContentStream,
2930
templateGenerateContent,
3031
templateGenerateContentStream
3132
} from './generate-content';
@@ -35,6 +36,7 @@ import {
3536
HarmBlockMethod,
3637
HarmBlockThreshold,
3738
HarmCategory,
39+
InferenceSource,
3840
Language,
3941
Outcome
4042
} from '../types';
@@ -548,8 +550,7 @@ describe('generateContent()', () => {
548550
);
549551
});
550552
});
551-
// TODO: define a similar test for generateContentStream
552-
it('on-device', async () => {
553+
it('generateContent on-device', async () => {
553554
const chromeAdapter = fakeChromeAdapter;
554555
const isAvailableStub = stub(chromeAdapter, 'isAvailable').resolves(true);
555556
const mockResponse = getMockResponse(
@@ -566,9 +567,35 @@ describe('generateContent()', () => {
566567
chromeAdapter
567568
);
568569
expect(result.response.text()).to.include('Mountain View, California');
570+
expect(result.response.inferenceSource).to.equal(InferenceSource.ON_DEVICE);
569571
expect(isAvailableStub).to.be.called;
570572
expect(generateContentStub).to.be.calledWith(fakeRequestParams);
571573
});
574+
it('generateContentStream on-device', async () => {
575+
const chromeAdapter = fakeChromeAdapter;
576+
const isAvailableStub = stub(chromeAdapter, 'isAvailable').resolves(true);
577+
const mockResponse = getMockResponseStreaming(
578+
'vertexAI',
579+
'streaming-success-basic-reply-short.txt'
580+
);
581+
const generateContentStreamStub = stub(
582+
chromeAdapter,
583+
'generateContentStream'
584+
).resolves(mockResponse as Response);
585+
const result = await generateContentStream(
586+
fakeApiSettings,
587+
'model',
588+
fakeRequestParams,
589+
chromeAdapter
590+
);
591+
const aggregatedResponse = await result.response;
592+
expect(aggregatedResponse.text()).to.include('Cheyenne');
593+
expect(aggregatedResponse.inferenceSource).to.equal(
594+
InferenceSource.ON_DEVICE
595+
);
596+
expect(isAvailableStub).to.be.called;
597+
expect(generateContentStreamStub).to.be.calledWith(fakeRequestParams);
598+
});
572599
});
573600

574601
describe('templateGenerateContent', () => {

packages/ai/src/methods/generate-content.ts

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,11 @@ export async function generateContentStream(
7070
() =>
7171
generateContentStreamOnCloud(apiSettings, model, params, requestOptions)
7272
);
73-
return processStream(callResult.response, apiSettings); // TODO: Map streaming responses
73+
return processStream(
74+
callResult.response,
75+
apiSettings,
76+
callResult.inferenceSource
77+
);
7478
}
7579

7680
async function generateContentOnCloud(

packages/auth/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@
116116
},
117117
"peerDependencies": {
118118
"@firebase/app": "0.x",
119-
"@react-native-async-storage/async-storage": "^1.18.1"
119+
"@react-native-async-storage/async-storage": "^2.2.0"
120120
},
121121
"peerDependenciesMeta": {
122122
"@react-native-async-storage/async-storage": {

0 commit comments

Comments
 (0)