Skip to content

Commit 6af2c21

Browse files
SDK regeneration (#687)
Co-authored-by: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
1 parent 9a52a66 commit 6af2c21

File tree

12 files changed

+261
-168
lines changed

12 files changed

+261
-168
lines changed

poetry.lock

Lines changed: 12 additions & 12 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ name = "cohere"
33

44
[tool.poetry]
55
name = "cohere"
6-
version = "5.16.2"
6+
version = "5.16.3"
77
description = ""
88
readme = "README.md"
99
authors = []

src/cohere/__init__.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,6 @@
167167
NonStreamedChatResponse,
168168
OAuthAuthorizeResponse,
169169
ParseInfo,
170-
ReasoningEffort,
171170
RerankDocument,
172171
RerankRequestDocumentsItem,
173172
RerankResponse,
@@ -206,7 +205,9 @@
206205
TextResponseFormatV2,
207206
TextSystemMessageV2ContentItem,
208207
TextToolContent,
208+
Thinking,
209209
ThinkingAssistantMessageResponseContentItem,
210+
ThinkingType,
210211
TokenizeResponse,
211212
Tool,
212213
ToolCall,
@@ -496,7 +497,6 @@
496497
"NotImplementedError",
497498
"OAuthAuthorizeResponse",
498499
"ParseInfo",
499-
"ReasoningEffort",
500500
"RerankDocument",
501501
"RerankRequestDocumentsItem",
502502
"RerankResponse",
@@ -539,7 +539,9 @@
539539
"TextResponseFormatV2",
540540
"TextSystemMessageV2ContentItem",
541541
"TextToolContent",
542+
"Thinking",
542543
"ThinkingAssistantMessageResponseContentItem",
544+
"ThinkingType",
543545
"TokenizeResponse",
544546
"TooManyRequestsError",
545547
"Tool",

src/cohere/base_client.py

Lines changed: 32 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,6 @@ def chat_stream(
146146
*,
147147
message: str,
148148
accepts: typing.Optional[typing.Literal["text/event-stream"]] = None,
149-
raw_prompting: typing.Optional[bool] = OMIT,
150149
model: typing.Optional[str] = OMIT,
151150
preamble: typing.Optional[str] = OMIT,
152151
chat_history: typing.Optional[typing.Sequence[Message]] = OMIT,
@@ -165,6 +164,7 @@ def chat_stream(
165164
stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
166165
frequency_penalty: typing.Optional[float] = OMIT,
167166
presence_penalty: typing.Optional[float] = OMIT,
167+
raw_prompting: typing.Optional[bool] = OMIT,
168168
tools: typing.Optional[typing.Sequence[Tool]] = OMIT,
169169
tool_results: typing.Optional[typing.Sequence[ToolResult]] = OMIT,
170170
force_single_step: typing.Optional[bool] = OMIT,
@@ -187,12 +187,6 @@ def chat_stream(
187187
accepts : typing.Optional[typing.Literal["text/event-stream"]]
188188
Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n` delimited events.
189189
190-
raw_prompting : typing.Optional[bool]
191-
When enabled, the user's prompt will be sent to the model without
192-
any pre-processing.
193-
194-
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
195-
196190
model : typing.Optional[str]
197191
The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model.
198192
@@ -340,6 +334,12 @@ def chat_stream(
340334
341335
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
342336
337+
raw_prompting : typing.Optional[bool]
338+
When enabled, the user's prompt will be sent to the model without
339+
any pre-processing.
340+
341+
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
342+
343343
tools : typing.Optional[typing.Sequence[Tool]]
344344
A list of available tools (functions) that the model may suggest invoking before producing a text response.
345345
@@ -414,7 +414,6 @@ def chat_stream(
414414
with self._raw_client.chat_stream(
415415
message=message,
416416
accepts=accepts,
417-
raw_prompting=raw_prompting,
418417
model=model,
419418
preamble=preamble,
420419
chat_history=chat_history,
@@ -433,6 +432,7 @@ def chat_stream(
433432
stop_sequences=stop_sequences,
434433
frequency_penalty=frequency_penalty,
435434
presence_penalty=presence_penalty,
435+
raw_prompting=raw_prompting,
436436
tools=tools,
437437
tool_results=tool_results,
438438
force_single_step=force_single_step,
@@ -447,7 +447,6 @@ def chat(
447447
*,
448448
message: str,
449449
accepts: typing.Optional[typing.Literal["text/event-stream"]] = None,
450-
raw_prompting: typing.Optional[bool] = OMIT,
451450
model: typing.Optional[str] = OMIT,
452451
preamble: typing.Optional[str] = OMIT,
453452
chat_history: typing.Optional[typing.Sequence[Message]] = OMIT,
@@ -466,6 +465,7 @@ def chat(
466465
stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
467466
frequency_penalty: typing.Optional[float] = OMIT,
468467
presence_penalty: typing.Optional[float] = OMIT,
468+
raw_prompting: typing.Optional[bool] = OMIT,
469469
tools: typing.Optional[typing.Sequence[Tool]] = OMIT,
470470
tool_results: typing.Optional[typing.Sequence[ToolResult]] = OMIT,
471471
force_single_step: typing.Optional[bool] = OMIT,
@@ -487,12 +487,6 @@ def chat(
487487
accepts : typing.Optional[typing.Literal["text/event-stream"]]
488488
Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n` delimited events.
489489
490-
raw_prompting : typing.Optional[bool]
491-
When enabled, the user's prompt will be sent to the model without
492-
any pre-processing.
493-
494-
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
495-
496490
model : typing.Optional[str]
497491
The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model.
498492
@@ -640,6 +634,12 @@ def chat(
640634
641635
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
642636
637+
raw_prompting : typing.Optional[bool]
638+
When enabled, the user's prompt will be sent to the model without
639+
any pre-processing.
640+
641+
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
642+
643643
tools : typing.Optional[typing.Sequence[Tool]]
644644
A list of available tools (functions) that the model may suggest invoking before producing a text response.
645645
@@ -725,7 +725,6 @@ def chat(
725725
_response = self._raw_client.chat(
726726
message=message,
727727
accepts=accepts,
728-
raw_prompting=raw_prompting,
729728
model=model,
730729
preamble=preamble,
731730
chat_history=chat_history,
@@ -744,6 +743,7 @@ def chat(
744743
stop_sequences=stop_sequences,
745744
frequency_penalty=frequency_penalty,
746745
presence_penalty=presence_penalty,
746+
raw_prompting=raw_prompting,
747747
tools=tools,
748748
tool_results=tool_results,
749749
force_single_step=force_single_step,
@@ -1602,7 +1602,6 @@ async def chat_stream(
16021602
*,
16031603
message: str,
16041604
accepts: typing.Optional[typing.Literal["text/event-stream"]] = None,
1605-
raw_prompting: typing.Optional[bool] = OMIT,
16061605
model: typing.Optional[str] = OMIT,
16071606
preamble: typing.Optional[str] = OMIT,
16081607
chat_history: typing.Optional[typing.Sequence[Message]] = OMIT,
@@ -1621,6 +1620,7 @@ async def chat_stream(
16211620
stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
16221621
frequency_penalty: typing.Optional[float] = OMIT,
16231622
presence_penalty: typing.Optional[float] = OMIT,
1623+
raw_prompting: typing.Optional[bool] = OMIT,
16241624
tools: typing.Optional[typing.Sequence[Tool]] = OMIT,
16251625
tool_results: typing.Optional[typing.Sequence[ToolResult]] = OMIT,
16261626
force_single_step: typing.Optional[bool] = OMIT,
@@ -1643,12 +1643,6 @@ async def chat_stream(
16431643
accepts : typing.Optional[typing.Literal["text/event-stream"]]
16441644
Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n` delimited events.
16451645
1646-
raw_prompting : typing.Optional[bool]
1647-
When enabled, the user's prompt will be sent to the model without
1648-
any pre-processing.
1649-
1650-
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
1651-
16521646
model : typing.Optional[str]
16531647
The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model.
16541648
@@ -1796,6 +1790,12 @@ async def chat_stream(
17961790
17971791
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
17981792
1793+
raw_prompting : typing.Optional[bool]
1794+
When enabled, the user's prompt will be sent to the model without
1795+
any pre-processing.
1796+
1797+
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
1798+
17991799
tools : typing.Optional[typing.Sequence[Tool]]
18001800
A list of available tools (functions) that the model may suggest invoking before producing a text response.
18011801
@@ -1878,7 +1878,6 @@ async def main() -> None:
18781878
async with self._raw_client.chat_stream(
18791879
message=message,
18801880
accepts=accepts,
1881-
raw_prompting=raw_prompting,
18821881
model=model,
18831882
preamble=preamble,
18841883
chat_history=chat_history,
@@ -1897,6 +1896,7 @@ async def main() -> None:
18971896
stop_sequences=stop_sequences,
18981897
frequency_penalty=frequency_penalty,
18991898
presence_penalty=presence_penalty,
1899+
raw_prompting=raw_prompting,
19001900
tools=tools,
19011901
tool_results=tool_results,
19021902
force_single_step=force_single_step,
@@ -1912,7 +1912,6 @@ async def chat(
19121912
*,
19131913
message: str,
19141914
accepts: typing.Optional[typing.Literal["text/event-stream"]] = None,
1915-
raw_prompting: typing.Optional[bool] = OMIT,
19161915
model: typing.Optional[str] = OMIT,
19171916
preamble: typing.Optional[str] = OMIT,
19181917
chat_history: typing.Optional[typing.Sequence[Message]] = OMIT,
@@ -1931,6 +1930,7 @@ async def chat(
19311930
stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
19321931
frequency_penalty: typing.Optional[float] = OMIT,
19331932
presence_penalty: typing.Optional[float] = OMIT,
1933+
raw_prompting: typing.Optional[bool] = OMIT,
19341934
tools: typing.Optional[typing.Sequence[Tool]] = OMIT,
19351935
tool_results: typing.Optional[typing.Sequence[ToolResult]] = OMIT,
19361936
force_single_step: typing.Optional[bool] = OMIT,
@@ -1952,12 +1952,6 @@ async def chat(
19521952
accepts : typing.Optional[typing.Literal["text/event-stream"]]
19531953
Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n` delimited events.
19541954
1955-
raw_prompting : typing.Optional[bool]
1956-
When enabled, the user's prompt will be sent to the model without
1957-
any pre-processing.
1958-
1959-
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
1960-
19611955
model : typing.Optional[str]
19621956
The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model.
19631957
@@ -2105,6 +2099,12 @@ async def chat(
21052099
21062100
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
21072101
2102+
raw_prompting : typing.Optional[bool]
2103+
When enabled, the user's prompt will be sent to the model without
2104+
any pre-processing.
2105+
2106+
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
2107+
21082108
tools : typing.Optional[typing.Sequence[Tool]]
21092109
A list of available tools (functions) that the model may suggest invoking before producing a text response.
21102110
@@ -2198,7 +2198,6 @@ async def main() -> None:
21982198
_response = await self._raw_client.chat(
21992199
message=message,
22002200
accepts=accepts,
2201-
raw_prompting=raw_prompting,
22022201
model=model,
22032202
preamble=preamble,
22042203
chat_history=chat_history,
@@ -2217,6 +2216,7 @@ async def main() -> None:
22172216
stop_sequences=stop_sequences,
22182217
frequency_penalty=frequency_penalty,
22192218
presence_penalty=presence_penalty,
2219+
raw_prompting=raw_prompting,
22202220
tools=tools,
22212221
tool_results=tool_results,
22222222
force_single_step=force_single_step,

src/cohere/core/client_wrapper.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,10 @@ def __init__(
2222

2323
def get_headers(self) -> typing.Dict[str, str]:
2424
headers: typing.Dict[str, str] = {
25-
"User-Agent": "cohere/5.16.2",
25+
"User-Agent": "cohere/5.16.3",
2626
"X-Fern-Language": "Python",
2727
"X-Fern-SDK-Name": "cohere",
28-
"X-Fern-SDK-Version": "5.16.2",
28+
"X-Fern-SDK-Version": "5.16.3",
2929
}
3030
if self._client_name is not None:
3131
headers["X-Client-Name"] = self._client_name

0 commit comments

Comments
 (0)