Skip to content

Commit 8f8e784

Browse files
committed
Update with tool calls
1 parent 3e617c7 commit 8f8e784

File tree

4 files changed

+405
-21
lines changed

4 files changed

+405
-21
lines changed
Lines changed: 298 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,298 @@
1+
import { instrumentGoogleGenAIClient } from '@sentry/core';
2+
import * as Sentry from '@sentry/node';
3+
4+
class MockGoogleGenAI {
5+
constructor(config) {
6+
this.apiKey = config.apiKey;
7+
8+
this.models = {
9+
generateContent: async params => {
10+
// Simulate processing time
11+
await new Promise(resolve => setTimeout(resolve, 10));
12+
13+
// Check if tools are provided to return function call response
14+
if (params.config?.tools && params.config.tools.length > 0) {
15+
const response = {
16+
candidates: [
17+
{
18+
content: {
19+
parts: [
20+
{
21+
text: 'I need to check the light status first.',
22+
},
23+
{
24+
functionCall: {
25+
id: 'call_light_control_1',
26+
name: 'controlLight',
27+
args: {
28+
brightness: 0.3,
29+
colorTemperature: 'warm',
30+
},
31+
},
32+
},
33+
],
34+
role: 'model',
35+
},
36+
finishReason: 'stop',
37+
index: 0,
38+
},
39+
],
40+
usageMetadata: {
41+
promptTokenCount: 15,
42+
candidatesTokenCount: 8,
43+
totalTokenCount: 23,
44+
},
45+
};
46+
47+
// Add functionCalls getter, this should exist in the response object
48+
Object.defineProperty(response, 'functionCalls', {
49+
get: function () {
50+
return [
51+
{
52+
id: 'call_light_control_1',
53+
name: 'controlLight',
54+
args: {
55+
brightness: 0.3,
56+
colorTemperature: 'warm',
57+
},
58+
},
59+
];
60+
},
61+
enumerable: false,
62+
});
63+
64+
return response;
65+
}
66+
67+
return {
68+
candidates: [
69+
{
70+
content: {
71+
parts: [
72+
{
73+
text: 'Mock response from Google GenAI without tools!',
74+
},
75+
],
76+
role: 'model',
77+
},
78+
finishReason: 'stop',
79+
index: 0,
80+
},
81+
],
82+
usageMetadata: {
83+
promptTokenCount: 8,
84+
candidatesTokenCount: 12,
85+
totalTokenCount: 20,
86+
},
87+
};
88+
},
89+
90+
generateContentStream: async params => {
91+
// Simulate processing time
92+
await new Promise(resolve => setTimeout(resolve, 10));
93+
94+
// Check if tools are provided to return function call response
95+
if (params.config?.tools && params.config.tools.length > 0) {
96+
return this._createMockStreamWithTools();
97+
}
98+
99+
return this._createMockStream();
100+
},
101+
};
102+
}
103+
104+
// Helper method to create a mock stream with tool calls
105+
async *_createMockStreamWithTools() {
106+
// First chunk: Text response
107+
yield {
108+
candidates: [
109+
{
110+
content: {
111+
parts: [{ text: 'Let me control the lights for you.' }],
112+
role: 'model',
113+
},
114+
index: 0,
115+
},
116+
],
117+
responseId: 'mock-response-tools-id',
118+
modelVersion: 'gemini-2.0-flash-001',
119+
};
120+
121+
// Second chunk: Function call
122+
const functionCallChunk = {
123+
candidates: [
124+
{
125+
content: {
126+
parts: [
127+
{
128+
functionCall: {
129+
id: 'call_light_stream_1',
130+
name: 'controlLight',
131+
args: {
132+
brightness: 0.5,
133+
colorTemperature: 'cool',
134+
},
135+
},
136+
},
137+
],
138+
role: 'model',
139+
},
140+
index: 0,
141+
},
142+
],
143+
};
144+
145+
// Add functionCalls getter to streaming chunk
146+
Object.defineProperty(functionCallChunk, 'functionCalls', {
147+
get: function () {
148+
return [
149+
{
150+
id: 'call_light_stream_1',
151+
name: 'controlLight',
152+
args: {
153+
brightness: 0.5,
154+
colorTemperature: 'cool',
155+
},
156+
},
157+
];
158+
},
159+
enumerable: false,
160+
});
161+
162+
yield functionCallChunk;
163+
164+
// Final chunk: End with finish reason and usage metadata
165+
yield {
166+
candidates: [
167+
{
168+
content: {
169+
parts: [{ text: ' Done!' }], // Additional text in final chunk
170+
role: 'model',
171+
},
172+
finishReason: 'STOP',
173+
index: 0,
174+
},
175+
],
176+
usageMetadata: {
177+
promptTokenCount: 12,
178+
candidatesTokenCount: 10,
179+
totalTokenCount: 22,
180+
},
181+
};
182+
}
183+
184+
// Helper method to create a regular mock stream without tools
185+
async *_createMockStream() {
186+
// First chunk: Start of response
187+
yield {
188+
candidates: [
189+
{
190+
content: {
191+
parts: [{ text: 'Mock streaming response' }],
192+
role: 'model',
193+
},
194+
index: 0,
195+
},
196+
],
197+
responseId: 'mock-response-id',
198+
modelVersion: 'gemini-1.5-flash',
199+
};
200+
201+
// Final chunk
202+
yield {
203+
candidates: [
204+
{
205+
content: {
206+
parts: [{ text: ' from Google GenAI!' }],
207+
role: 'model',
208+
},
209+
finishReason: 'STOP',
210+
index: 0,
211+
},
212+
],
213+
usageMetadata: {
214+
promptTokenCount: 10,
215+
candidatesTokenCount: 12,
216+
totalTokenCount: 22,
217+
},
218+
};
219+
}
220+
}
221+
222+
async function run() {
223+
const genAI = new MockGoogleGenAI({ apiKey: 'test-api-key' });
224+
const instrumentedClient = instrumentGoogleGenAIClient(genAI);
225+
226+
await Sentry.startSpan({ name: 'main', op: 'function' }, async () => {
227+
// Test 1: Non-streaming with tools
228+
await instrumentedClient.models.generateContent({
229+
model: 'gemini-2.0-flash-001',
230+
contents: 'Dim the lights so the room feels cozy and warm.',
231+
config: {
232+
tools: [
233+
{
234+
functionDeclarations: [
235+
{
236+
name: 'controlLight',
237+
parametersJsonSchema: {
238+
type: 'object',
239+
properties: {
240+
brightness: {
241+
type: 'number',
242+
},
243+
colorTemperature: {
244+
type: 'string',
245+
},
246+
},
247+
required: ['brightness', 'colorTemperature'],
248+
},
249+
},
250+
],
251+
},
252+
],
253+
},
254+
});
255+
256+
// Test 2: Streaming with tools
257+
const stream = await instrumentedClient.models.generateContentStream({
258+
model: 'gemini-2.0-flash-001',
259+
contents: 'Turn on the lights with medium brightness.',
260+
config: {
261+
tools: [
262+
{
263+
functionDeclarations: [
264+
{
265+
name: 'controlLight',
266+
parametersJsonSchema: {
267+
type: 'object',
268+
properties: {
269+
brightness: {
270+
type: 'number',
271+
},
272+
colorTemperature: {
273+
type: 'string',
274+
},
275+
},
276+
required: ['brightness', 'colorTemperature'],
277+
},
278+
},
279+
],
280+
},
281+
],
282+
},
283+
});
284+
285+
// Consume the stream to trigger instrumentation
286+
for await (const _ of stream) {
287+
void _;
288+
}
289+
290+
// Test 3: Without tools for comparison
291+
await instrumentedClient.models.generateContent({
292+
model: 'gemini-1.5-flash',
293+
contents: 'Tell me about the weather.',
294+
});
295+
});
296+
}
297+
298+
run();

dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts

Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -313,4 +313,80 @@ describe('Google GenAI integration', () => {
313313
.completed();
314314
});
315315
});
316+
317+
const EXPECTED_TRANSACTION_TOOLS = {
318+
transaction: 'main',
319+
spans: expect.arrayContaining([
320+
// Non-streaming with tools
321+
expect.objectContaining({
322+
data: expect.objectContaining({
323+
'gen_ai.operation.name': 'models',
324+
'sentry.op': 'gen_ai.models',
325+
'sentry.origin': 'auto.ai.google_genai',
326+
'gen_ai.system': 'google_genai',
327+
'gen_ai.request.model': 'gemini-2.0-flash-001',
328+
'gen_ai.request.available_tools': expect.any(String), // Should include tools
329+
'gen_ai.request.messages': expect.any(String), // Should include contents
330+
'gen_ai.response.text': expect.any(String), // Should include response text
331+
'gen_ai.response.tool_calls': expect.any(String), // Should include tool calls
332+
'gen_ai.usage.input_tokens': 15,
333+
'gen_ai.usage.output_tokens': 8,
334+
'gen_ai.usage.total_tokens': 23,
335+
}),
336+
description: 'models gemini-2.0-flash-001',
337+
op: 'gen_ai.models',
338+
origin: 'auto.ai.google_genai',
339+
status: 'ok',
340+
}),
341+
// Streaming with tools
342+
expect.objectContaining({
343+
data: expect.objectContaining({
344+
'gen_ai.operation.name': 'models',
345+
'sentry.op': 'gen_ai.models',
346+
'sentry.origin': 'auto.ai.google_genai',
347+
'gen_ai.system': 'google_genai',
348+
'gen_ai.request.model': 'gemini-2.0-flash-001',
349+
'gen_ai.request.available_tools': expect.any(String), // Should include tools
350+
'gen_ai.request.messages': expect.any(String), // Should include contents
351+
'gen_ai.response.streaming': true,
352+
'gen_ai.response.text': expect.any(String), // Should include response text
353+
'gen_ai.response.tool_calls': expect.any(String), // Should include tool calls
354+
'gen_ai.response.id': 'mock-response-tools-id',
355+
'gen_ai.response.model': 'gemini-2.0-flash-001',
356+
'gen_ai.usage.input_tokens': 12,
357+
'gen_ai.usage.output_tokens': 10,
358+
'gen_ai.usage.total_tokens': 22,
359+
}),
360+
description: 'models gemini-2.0-flash-001 stream-response',
361+
op: 'gen_ai.models',
362+
origin: 'auto.ai.google_genai',
363+
status: 'ok',
364+
}),
365+
// Without tools for comparison
366+
expect.objectContaining({
367+
data: expect.objectContaining({
368+
'gen_ai.operation.name': 'models',
369+
'sentry.op': 'gen_ai.models',
370+
'sentry.origin': 'auto.ai.google_genai',
371+
'gen_ai.system': 'google_genai',
372+
'gen_ai.request.model': 'gemini-1.5-flash',
373+
'gen_ai.request.messages': expect.any(String), // Should include contents
374+
'gen_ai.response.text': expect.any(String), // Should include response text
375+
'gen_ai.usage.input_tokens': 8,
376+
'gen_ai.usage.output_tokens': 12,
377+
'gen_ai.usage.total_tokens': 20,
378+
}),
379+
description: 'models gemini-1.5-flash',
380+
op: 'gen_ai.models',
381+
origin: 'auto.ai.google_genai',
382+
status: 'ok',
383+
}),
384+
]),
385+
};
386+
387+
createEsmAndCjsTests(__dirname, 'scenario-tools.mjs', 'instrument-with-options.mjs', (createRunner, test) => {
388+
test('creates google genai related spans with tool calls', async () => {
389+
await createRunner().ignore('event').expect({ transaction: EXPECTED_TRANSACTION_TOOLS }).start().completed();
390+
});
391+
});
316392
});

0 commit comments

Comments
 (0)