Skip to content

Commit 584f4a4

Browse files
committed
fix: API Call Bugs
1 parent 3e4e553 commit 584f4a4

File tree

4 files changed

+110
-36
lines changed

4 files changed

+110
-36
lines changed

workflows/agents/memory_agent_concise.py

Lines changed: 18 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -436,12 +436,24 @@ async def _call_llm_for_summary(
436436
]
437437
openai_messages.extend(summary_messages)
438438

439-
response = await client.chat.completions.create(
440-
model=self.default_models["openai"],
441-
messages=openai_messages,
442-
max_tokens=5000,
443-
temperature=0.2,
444-
)
439+
# Try max_tokens and temperature first, fallback to max_completion_tokens without temperature if unsupported
440+
try:
441+
response = await client.chat.completions.create(
442+
model=self.default_models["openai"],
443+
messages=openai_messages,
444+
max_tokens=5000,
445+
temperature=0.2,
446+
)
447+
except Exception as e:
448+
if "max_tokens" in str(e) and "max_completion_tokens" in str(e):
449+
# Retry with max_completion_tokens and no temperature for models that require it
450+
response = await client.chat.completions.create(
451+
model=self.default_models["openai"],
452+
messages=openai_messages,
453+
max_completion_tokens=5000,
454+
)
455+
else:
456+
raise
445457

446458
return {"content": response.choices[0].message.content or ""}
447459

workflows/agents/memory_agent_concise_index.py

Lines changed: 18 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -436,12 +436,24 @@ async def _call_llm_for_summary(
436436
]
437437
openai_messages.extend(summary_messages)
438438

439-
response = await client.chat.completions.create(
440-
model=self.default_models["openai"],
441-
messages=openai_messages,
442-
max_tokens=5000,
443-
temperature=0.2,
444-
)
439+
# Try max_tokens and temperature first, fallback to max_completion_tokens without temperature if unsupported
440+
try:
441+
response = await client.chat.completions.create(
442+
model=self.default_models["openai"],
443+
messages=openai_messages,
444+
max_tokens=5000,
445+
temperature=0.2,
446+
)
447+
except Exception as e:
448+
if "max_tokens" in str(e) and "max_completion_tokens" in str(e):
449+
# Retry with max_completion_tokens and no temperature for models that require it
450+
response = await client.chat.completions.create(
451+
model=self.default_models["openai"],
452+
messages=openai_messages,
453+
max_completion_tokens=5000,
454+
)
455+
else:
456+
raise
445457

446458
return {"content": response.choices[0].message.content or ""}
447459

workflows/code_implementation_workflow.py

Lines changed: 37 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -598,11 +598,23 @@ async def _initialize_llm_client(self):
598598
client = AsyncOpenAI(api_key=openai_key)
599599

600600
# Test connection with default model from config
601-
await client.chat.completions.create(
602-
model=self.default_models["openai"],
603-
max_tokens=10,
604-
messages=[{"role": "user", "content": "test"}],
605-
)
601+
# Try max_tokens first, fallback to max_completion_tokens if unsupported
602+
try:
603+
await client.chat.completions.create(
604+
model=self.default_models["openai"],
605+
max_tokens=10,
606+
messages=[{"role": "user", "content": "test"}],
607+
)
608+
except Exception as e:
609+
if "max_tokens" in str(e) and "max_completion_tokens" in str(e):
610+
# Retry with max_completion_tokens for models that require it
611+
await client.chat.completions.create(
612+
model=self.default_models["openai"],
613+
max_completion_tokens=10,
614+
messages=[{"role": "user", "content": "test"}],
615+
)
616+
else:
617+
raise
606618
self.logger.info(
607619
f"Using OpenAI API with model: {self.default_models['openai']}"
608620
)
@@ -691,13 +703,26 @@ async def _call_openai_with_tools(
691703
openai_messages = [{"role": "system", "content": system_message}]
692704
openai_messages.extend(messages)
693705

694-
response = await client.chat.completions.create(
695-
model=self.default_models["openai"],
696-
messages=openai_messages,
697-
tools=openai_tools if openai_tools else None,
698-
max_tokens=max_tokens,
699-
temperature=0.2,
700-
)
706+
# Try max_tokens first, fallback to max_completion_tokens if unsupported
707+
try:
708+
response = await client.chat.completions.create(
709+
model=self.default_models["openai"],
710+
messages=openai_messages,
711+
tools=openai_tools if openai_tools else None,
712+
max_tokens=max_tokens,
713+
temperature=0.2,
714+
)
715+
except Exception as e:
716+
if "max_tokens" in str(e) and "max_completion_tokens" in str(e):
717+
# Retry with max_completion_tokens for models that require it
718+
response = await client.chat.completions.create(
719+
model=self.default_models["openai"],
720+
messages=openai_messages,
721+
tools=openai_tools if openai_tools else None,
722+
max_completion_tokens=max_tokens,
723+
)
724+
else:
725+
raise
701726

702727
message = response.choices[0].message
703728
content = message.content or ""

workflows/code_implementation_workflow_index.py

Lines changed: 37 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -600,11 +600,23 @@ async def _initialize_llm_client(self):
600600
client = AsyncOpenAI(api_key=openai_key)
601601

602602
# Test connection with default model from config
603-
await client.chat.completions.create(
604-
model=self.default_models["openai"],
605-
max_tokens=10,
606-
messages=[{"role": "user", "content": "test"}],
607-
)
603+
# Try max_tokens first, fallback to max_completion_tokens if unsupported
604+
try:
605+
await client.chat.completions.create(
606+
model=self.default_models["openai"],
607+
max_tokens=10,
608+
messages=[{"role": "user", "content": "test"}],
609+
)
610+
except Exception as e:
611+
if "max_tokens" in str(e) and "max_completion_tokens" in str(e):
612+
# Retry with max_completion_tokens for models that require it
613+
await client.chat.completions.create(
614+
model=self.default_models["openai"],
615+
max_completion_tokens=10,
616+
messages=[{"role": "user", "content": "test"}],
617+
)
618+
else:
619+
raise
608620
self.logger.info(
609621
f"Using OpenAI API with model: {self.default_models['openai']}"
610622
)
@@ -693,13 +705,26 @@ async def _call_openai_with_tools(
693705
openai_messages = [{"role": "system", "content": system_message}]
694706
openai_messages.extend(messages)
695707

696-
response = await client.chat.completions.create(
697-
model=self.default_models["openai"],
698-
messages=openai_messages,
699-
tools=openai_tools if openai_tools else None,
700-
max_tokens=max_tokens,
701-
temperature=0.2,
702-
)
708+
# Try max_tokens and temperature first, fallback to max_completion_tokens without temperature if unsupported
709+
try:
710+
response = await client.chat.completions.create(
711+
model=self.default_models["openai"],
712+
messages=openai_messages,
713+
tools=openai_tools if openai_tools else None,
714+
max_tokens=max_tokens,
715+
temperature=0.2,
716+
)
717+
except Exception as e:
718+
if "max_tokens" in str(e) and "max_completion_tokens" in str(e):
719+
# Retry with max_completion_tokens and no temperature for models that require it
720+
response = await client.chat.completions.create(
721+
model=self.default_models["openai"],
722+
messages=openai_messages,
723+
tools=openai_tools if openai_tools else None,
724+
max_completion_tokens=max_tokens,
725+
)
726+
else:
727+
raise
703728

704729
message = response.choices[0].message
705730
content = message.content or ""

0 commit comments

Comments
 (0)