Skip to content

Commit b3bf56d

Browse files
authored
feat(node): Add OpenAI SDK v6 support and integration tests (#18244)
Upgrades OpenAI instrumentation to support OpenAI SDK v6.0.0 and adds node integration tests to verify compatibility. ### Changes **Instrumentation:** - Bumped OpenAI SDK support to v6.0.0 (<v7) - OpenAI v6 introduces no breaking changes that affect our instrumentation - All existing instrumentation logic remains compatible with the new SDK version ref: https://github.com/openai/openai-node/releases/tag/v6.0.0 **Testing:** - Created v6 test suite in `dev-packages/node-integration-tests/suites/tracing/openai/v6/` - Tests verify OpenAI SDK v6.0.0 instrumentation across: - Chat completions API with and without PII tracking - Responses API with streaming support - Custom integration options (recordInputs, recordOutputs) - Error handling in chat completions and streaming contexts - Root span creation without wrapping spans - Embeddings API
1 parent 584d4bc commit b3bf56d

File tree

8 files changed

+1052
-1
lines changed

8 files changed

+1052
-1
lines changed
Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://[email protected]/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: false,
9+
transport: loggingTransport,
10+
integrations: [
11+
Sentry.openAIIntegration({
12+
recordInputs: true,
13+
recordOutputs: true,
14+
}),
15+
],
16+
});
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://[email protected]/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: true,
9+
transport: loggingTransport,
10+
integrations: [Sentry.openAIIntegration()],
11+
});
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://[email protected]/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: false,
9+
transport: loggingTransport,
10+
integrations: [Sentry.openAIIntegration()],
11+
});
Lines changed: 318 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,318 @@
1+
import { instrumentOpenAiClient } from '@sentry/core';
2+
import * as Sentry from '@sentry/node';
3+
4+
class MockOpenAI {
5+
constructor(config) {
6+
this.apiKey = config.apiKey;
7+
8+
this.chat = {
9+
completions: {
10+
create: async params => {
11+
// Simulate processing time
12+
await new Promise(resolve => setTimeout(resolve, 10));
13+
14+
if (params.model === 'error-model') {
15+
const error = new Error('Model not found');
16+
error.status = 404;
17+
error.headers = { 'x-request-id': 'mock-request-123' };
18+
throw error;
19+
}
20+
21+
// If stream is requested, return an async generator
22+
if (params.stream) {
23+
return this._createChatCompletionStream(params);
24+
}
25+
26+
return {
27+
id: 'chatcmpl-mock123',
28+
object: 'chat.completion',
29+
created: 1677652288,
30+
model: params.model,
31+
system_fingerprint: 'fp_44709d6fcb',
32+
choices: [
33+
{
34+
index: 0,
35+
message: {
36+
role: 'assistant',
37+
content: 'Hello from OpenAI mock!',
38+
},
39+
finish_reason: 'stop',
40+
},
41+
],
42+
usage: {
43+
prompt_tokens: 10,
44+
completion_tokens: 15,
45+
total_tokens: 25,
46+
},
47+
};
48+
},
49+
},
50+
};
51+
52+
this.responses = {
53+
create: async params => {
54+
await new Promise(resolve => setTimeout(resolve, 10));
55+
56+
// If stream is requested, return an async generator
57+
if (params.stream) {
58+
return this._createResponsesApiStream(params);
59+
}
60+
61+
return {
62+
id: 'resp_mock456',
63+
object: 'response',
64+
created_at: 1677652290,
65+
model: params.model,
66+
input_text: params.input,
67+
output_text: `Response to: ${params.input}`,
68+
status: 'completed',
69+
usage: {
70+
input_tokens: 5,
71+
output_tokens: 8,
72+
total_tokens: 13,
73+
},
74+
};
75+
},
76+
};
77+
}
78+
79+
// Create a mock streaming response for chat completions
80+
async *_createChatCompletionStream(params) {
81+
// First chunk with basic info
82+
yield {
83+
id: 'chatcmpl-stream-123',
84+
object: 'chat.completion.chunk',
85+
created: 1677652300,
86+
model: params.model,
87+
system_fingerprint: 'fp_stream_123',
88+
choices: [
89+
{
90+
index: 0,
91+
delta: {
92+
role: 'assistant',
93+
content: 'Hello',
94+
},
95+
finish_reason: null,
96+
},
97+
],
98+
};
99+
100+
// Second chunk with more content
101+
yield {
102+
id: 'chatcmpl-stream-123',
103+
object: 'chat.completion.chunk',
104+
created: 1677652300,
105+
model: params.model,
106+
system_fingerprint: 'fp_stream_123',
107+
choices: [
108+
{
109+
index: 0,
110+
delta: {
111+
content: ' from OpenAI streaming!',
112+
},
113+
finish_reason: 'stop',
114+
},
115+
],
116+
usage: {
117+
prompt_tokens: 12,
118+
completion_tokens: 18,
119+
total_tokens: 30,
120+
completion_tokens_details: {
121+
accepted_prediction_tokens: 0,
122+
audio_tokens: 0,
123+
reasoning_tokens: 0,
124+
rejected_prediction_tokens: 0,
125+
},
126+
prompt_tokens_details: {
127+
audio_tokens: 0,
128+
cached_tokens: 0,
129+
},
130+
},
131+
};
132+
}
133+
134+
// Create a mock streaming response for responses API
135+
async *_createResponsesApiStream(params) {
136+
// Response created event
137+
yield {
138+
type: 'response.created',
139+
response: {
140+
id: 'resp_stream_456',
141+
object: 'response',
142+
created_at: 1677652310,
143+
model: params.model,
144+
status: 'in_progress',
145+
error: null,
146+
incomplete_details: null,
147+
instructions: params.instructions,
148+
max_output_tokens: 1000,
149+
parallel_tool_calls: false,
150+
previous_response_id: null,
151+
reasoning: {
152+
effort: null,
153+
summary: null,
154+
},
155+
store: false,
156+
temperature: 0.7,
157+
text: {
158+
format: {
159+
type: 'text',
160+
},
161+
},
162+
tool_choice: 'auto',
163+
top_p: 1.0,
164+
truncation: 'disabled',
165+
user: null,
166+
metadata: {},
167+
output: [],
168+
output_text: '',
169+
usage: {
170+
input_tokens: 0,
171+
output_tokens: 0,
172+
total_tokens: 0,
173+
},
174+
},
175+
sequence_number: 1,
176+
};
177+
178+
// Response in progress with output text delta
179+
yield {
180+
type: 'response.output_text.delta',
181+
delta: 'Streaming response to: ',
182+
sequence_number: 2,
183+
};
184+
185+
yield {
186+
type: 'response.output_text.delta',
187+
delta: params.input,
188+
sequence_number: 3,
189+
};
190+
191+
// Response completed event
192+
yield {
193+
type: 'response.completed',
194+
response: {
195+
id: 'resp_stream_456',
196+
object: 'response',
197+
created_at: 1677652310,
198+
model: params.model,
199+
status: 'completed',
200+
error: null,
201+
incomplete_details: null,
202+
instructions: params.instructions,
203+
max_output_tokens: 1000,
204+
parallel_tool_calls: false,
205+
previous_response_id: null,
206+
reasoning: {
207+
effort: null,
208+
summary: null,
209+
},
210+
store: false,
211+
temperature: 0.7,
212+
text: {
213+
format: {
214+
type: 'text',
215+
},
216+
},
217+
tool_choice: 'auto',
218+
top_p: 1.0,
219+
truncation: 'disabled',
220+
user: null,
221+
metadata: {},
222+
output: [],
223+
output_text: params.input,
224+
usage: {
225+
input_tokens: 6,
226+
output_tokens: 10,
227+
total_tokens: 16,
228+
},
229+
},
230+
sequence_number: 4,
231+
};
232+
}
233+
}
234+
235+
async function run() {
236+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
237+
const mockClient = new MockOpenAI({
238+
apiKey: 'mock-api-key',
239+
});
240+
241+
const client = instrumentOpenAiClient(mockClient);
242+
243+
// First test: basic chat completion
244+
await client.chat.completions.create({
245+
model: 'gpt-3.5-turbo',
246+
messages: [
247+
{ role: 'system', content: 'You are a helpful assistant.' },
248+
{ role: 'user', content: 'What is the capital of France?' },
249+
],
250+
temperature: 0.7,
251+
max_tokens: 100,
252+
});
253+
254+
// Second test: responses API
255+
await client.responses.create({
256+
model: 'gpt-3.5-turbo',
257+
input: 'Translate this to French: Hello',
258+
instructions: 'You are a translator',
259+
});
260+
261+
// Third test: error handling in chat completions
262+
try {
263+
await client.chat.completions.create({
264+
model: 'error-model',
265+
messages: [{ role: 'user', content: 'This will fail' }],
266+
});
267+
} catch {
268+
// Error is expected and handled
269+
}
270+
271+
// Fourth test: chat completions streaming
272+
const stream1 = await client.chat.completions.create({
273+
model: 'gpt-4',
274+
messages: [
275+
{ role: 'system', content: 'You are a helpful assistant.' },
276+
{ role: 'user', content: 'Tell me about streaming' },
277+
],
278+
stream: true,
279+
temperature: 0.8,
280+
});
281+
282+
// Consume the stream to trigger span instrumentation
283+
for await (const chunk of stream1) {
284+
// Stream chunks are processed automatically by instrumentation
285+
void chunk; // Prevent unused variable warning
286+
}
287+
288+
// Fifth test: responses API streaming
289+
const stream2 = await client.responses.create({
290+
model: 'gpt-4',
291+
input: 'Test streaming responses API',
292+
instructions: 'You are a streaming assistant',
293+
stream: true,
294+
});
295+
296+
for await (const chunk of stream2) {
297+
void chunk;
298+
}
299+
300+
// Sixth test: error handling in streaming context
301+
try {
302+
const errorStream = await client.chat.completions.create({
303+
model: 'error-model',
304+
messages: [{ role: 'user', content: 'This will fail' }],
305+
stream: true,
306+
});
307+
308+
// Try to consume the stream (this should not execute)
309+
for await (const chunk of errorStream) {
310+
void chunk;
311+
}
312+
} catch {
313+
// Error is expected and handled
314+
}
315+
});
316+
}
317+
318+
run();

0 commit comments

Comments
 (0)