Skip to content

Commit 7227a1b

Browse files
fix ai suggestions
1 parent eb30d57 commit 7227a1b

File tree

4 files changed

+95
-68
lines changed

4 files changed

+95
-68
lines changed

packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/callback_handler.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -190,7 +190,7 @@ def _create_shared_attributes(
190190
association_properties.get("base_url") or
191191
association_properties.get("server_address")
192192
)
193-
except Exception:
193+
except (AttributeError, KeyError, TypeError):
194194
pass
195195

196196
if not server_address:

packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/span_utils.py

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -55,18 +55,20 @@ def _set_span_attribute(span: Span, name: str, value: AttributeValue):
5555
span.set_attribute(name, value)
5656

5757

58-
def _get_unified_unknown_model(class_name: str = None, existing_model: str = None) -> str:
58+
def _get_unified_unknown_model(
59+
class_name: Optional[str] = None, existing_model: Optional[str] = None
60+
) -> str:
5961
"""Get unified unknown model name to ensure consistency across all fallbacks."""
6062

6163
if existing_model:
6264
existing_lower = existing_model.lower()
63-
if existing_model.startswith("deepseek"):
65+
if existing_lower.startswith("deepseek"):
6466
return "deepseek-unknown"
65-
elif existing_model.startswith("gpt"):
67+
elif existing_lower.startswith("gpt"):
6668
return "gpt-unknown"
67-
elif existing_model.startswith("claude"):
69+
elif existing_lower.startswith("claude"):
6870
return "claude-unknown"
69-
elif existing_model.startswith("command"):
71+
elif existing_lower.startswith("command"):
7072
return "command-unknown"
7173
elif ("ollama" in existing_lower or "llama" in existing_lower):
7274
return "ollama-unknown"
@@ -130,7 +132,7 @@ def _extract_model_name_from_request(
130132
association_properties = context_api.get_value("association_properties") or {}
131133
if (model := association_properties.get("ls_model_name")) is not None:
132134
return model
133-
except Exception:
135+
except (AttributeError, KeyError, TypeError):
134136
pass
135137

136138
# Extract from serialized information for third-party integrations
@@ -253,7 +255,7 @@ def set_chat_request(
253255
span_holder: SpanHolder,
254256
) -> None:
255257
metadata = kwargs.get("metadata")
256-
set_request_params(span, serialized.get("kwargs", {}), span_holder, serialized, metadata)
258+
set_request_params(span, kwargs, span_holder, serialized, metadata)
257259

258260
if should_send_prompts():
259261
for i, function in enumerate(
@@ -471,7 +473,7 @@ def set_chat_response_usage(
471473
)
472474

473475

474-
def extract_model_name_from_response_metadata(response: LLMResult) -> str:
476+
def extract_model_name_from_response_metadata(response: LLMResult) -> Optional[str]:
475477
"""Enhanced model name extraction from response metadata with third-party support."""
476478

477479
# Standard extraction from response metadata

packages/opentelemetry-instrumentation-langchain/tests/test_model_extraction.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -190,3 +190,14 @@ def test_metadata_has_priority_over_class_inference(self):
190190

191191
result = _extract_model_name_from_request(kwargs, self.span_holder, serialized, metadata)
192192
assert result == "deepseek-v3" # Should use metadata, not fallback to "deepseek-chat"
193+
194+
def test_response_metadata_extraction_returns_none_when_no_model_found(self):
195+
"""Test that extract_model_name_from_response_metadata returns None when no model info is found."""
196+
# Response with no model information anywhere
197+
response = LLMResult(
198+
generations=[[Generation(text="Response without model info")]],
199+
llm_output={"other_field": "value"} # No model-related fields
200+
)
201+
202+
result = extract_model_name_from_response_metadata(response)
203+
assert result is None, "Should return None when no model information is found"

packages/opentelemetry-instrumentation-langchain/tests/test_streaming_metrics.py

Lines changed: 73 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import time
2-
from unittest.mock import Mock
2+
from unittest.mock import Mock, patch
33
from uuid import uuid4
44
from langchain_core.outputs import LLMResult, Generation
55
from opentelemetry.instrumentation.langchain.callback_handler import TraceloopCallbackHandler
@@ -37,33 +37,37 @@ def test_ttft_metric_recorded_on_first_token(self):
3737
mock_span = Mock(spec=Span)
3838
mock_span.attributes = {SpanAttributes.LLM_SYSTEM: "Langchain"}
3939

40-
# Create span holder with specific start time
41-
start_time = time.time()
42-
span_holder = SpanHolder(
43-
span=mock_span,
44-
token=None,
45-
context=None,
46-
children=[],
47-
workflow_name="test",
48-
entity_name="test",
49-
entity_path="test",
50-
start_time=start_time
51-
)
52-
self.handler.spans[run_id] = span_holder
53-
54-
# Simulate first token arrival after a small delay
55-
time.sleep(0.1)
56-
self.handler.on_llm_new_token("Hello", run_id=run_id)
57-
58-
# Verify TTFT metric was recorded
59-
self.ttft_histogram.record.assert_called_once()
60-
args = self.ttft_histogram.record.call_args
61-
ttft_value = args[0][0]
62-
assert ttft_value > 0.05, "TTFT should be greater than 0.05 seconds"
63-
64-
# Verify attributes
65-
attributes = args[1]["attributes"]
66-
assert attributes[SpanAttributes.LLM_SYSTEM] == "Langchain"
40+
# Use mock time for stable testing
41+
with patch('opentelemetry.instrumentation.langchain.callback_handler.time.time') as mock_time, \
42+
patch('opentelemetry.instrumentation.langchain.span_utils.time.time') as mock_span_time:
43+
44+
start_time = 1000.0
45+
mock_time.return_value = start_time
46+
mock_span_time.return_value = start_time
47+
48+
span_holder = SpanHolder(
49+
span=mock_span,
50+
token=None,
51+
context=None,
52+
children=[],
53+
workflow_name="test",
54+
entity_name="test",
55+
entity_path="test",
56+
start_time=start_time
57+
)
58+
self.handler.spans[run_id] = span_holder
59+
60+
mock_time.return_value = start_time + 0.1
61+
mock_span_time.return_value = start_time + 0.1
62+
self.handler.on_llm_new_token("Hello", run_id=run_id)
63+
64+
self.ttft_histogram.record.assert_called_once()
65+
args = self.ttft_histogram.record.call_args
66+
ttft_value = args[0][0]
67+
assert abs(ttft_value - 0.1) < 0.001, f"TTFT should be approximately 0.1 seconds, got {ttft_value}"
68+
69+
attributes = args[1]["attributes"]
70+
assert attributes[SpanAttributes.LLM_SYSTEM] == "Langchain"
6771

6872
def test_ttft_metric_not_recorded_on_subsequent_tokens(self):
6973
"""Test that TTFT metric is only recorded once."""
@@ -134,37 +138,47 @@ def test_streaming_time_to_generate_metric(self):
134138
mock_span = Mock(spec=Span)
135139
mock_span.attributes = {SpanAttributes.LLM_SYSTEM: "Langchain"}
136140

137-
start_time = time.time()
138-
span_holder = SpanHolder(
139-
span=mock_span,
140-
token=None,
141-
context=None,
142-
children=[],
143-
workflow_name="test",
144-
entity_name="test",
145-
entity_path="test",
146-
start_time=start_time
147-
)
148-
self.handler.spans[run_id] = span_holder
149-
150-
# Simulate token arrival
151-
time.sleep(0.05)
152-
self.handler.on_llm_new_token("Hello", run_id=run_id)
153-
154-
# Simulate completion after more time
155-
time.sleep(0.05)
156-
llm_result = LLMResult(
157-
generations=[[Generation(text="Hello world")]],
158-
llm_output={"model_name": "test-model"}
159-
)
160-
161-
self.handler.on_llm_end(llm_result, run_id=run_id)
162-
163-
# Verify streaming time metric was recorded
164-
self.streaming_time_histogram.record.assert_called_once()
165-
args = self.streaming_time_histogram.record.call_args
166-
streaming_time = args[0][0]
167-
assert streaming_time > 0.04, "Streaming time should be greater than 0.04 seconds"
141+
with patch('opentelemetry.instrumentation.langchain.callback_handler.time.time') as mock_time, \
142+
patch('opentelemetry.instrumentation.langchain.span_utils.time.time') as mock_span_time:
143+
144+
start_time = 1000.0
145+
mock_time.return_value = start_time
146+
mock_span_time.return_value = start_time
147+
148+
span_holder = SpanHolder(
149+
span=mock_span,
150+
token=None,
151+
context=None,
152+
children=[],
153+
workflow_name="test",
154+
entity_name="test",
155+
entity_path="test",
156+
start_time=start_time
157+
)
158+
self.handler.spans[run_id] = span_holder
159+
160+
first_token_time = start_time + 0.05
161+
mock_time.return_value = first_token_time
162+
mock_span_time.return_value = first_token_time
163+
self.handler.on_llm_new_token("Hello", run_id=run_id)
164+
165+
completion_time = first_token_time + 0.05
166+
mock_time.return_value = completion_time
167+
mock_span_time.return_value = completion_time
168+
llm_result = LLMResult(
169+
generations=[[Generation(text="Hello world")]],
170+
llm_output={"model_name": "test-model"}
171+
)
172+
173+
self.handler.on_llm_end(llm_result, run_id=run_id)
174+
175+
self.streaming_time_histogram.record.assert_called_once()
176+
args = self.streaming_time_histogram.record.call_args
177+
streaming_time = args[0][0]
178+
assert abs(streaming_time - 0.05) < 0.001, (
179+
f"Streaming time should be approximately 0.05 seconds, "
180+
f"got {streaming_time}"
181+
)
168182

169183
def test_exception_metric_recorded_on_error(self):
170184
"""Test that exception metric is recorded on LLM errors."""

0 commit comments

Comments
 (0)