Skip to content

Commit db36582

Browse files
SDK regeneration (#695)
Co-authored-by: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
1 parent e393e7d commit db36582

File tree

6 files changed

+12
-76
lines changed

6 files changed

+12
-76
lines changed

poetry.lock

Lines changed: 3 additions & 3 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ name = "cohere"
33

44
[tool.poetry]
55
name = "cohere"
6-
version = "5.17.1"
6+
version = "5.18.0"
77
description = ""
88
readme = "README.md"
99
authors = []

src/cohere/core/client_wrapper.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,10 @@ def __init__(
2222

2323
def get_headers(self) -> typing.Dict[str, str]:
2424
headers: typing.Dict[str, str] = {
25-
"User-Agent": "cohere/5.17.1",
25+
"User-Agent": "cohere/5.18.0",
2626
"X-Fern-Language": "Python",
2727
"X-Fern-SDK-Name": "cohere",
28-
"X-Fern-SDK-Version": "5.17.1",
28+
"X-Fern-SDK-Version": "5.18.0",
2929
}
3030
if self._client_name is not None:
3131
headers["X-Client-Name"] = self._client_name

src/cohere/finetuning/client.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -115,12 +115,12 @@ def create_finetuned_model(
115115
)
116116
client.finetuning.create_finetuned_model(
117117
request=FinetunedModel(
118-
name="api-test",
118+
name="name",
119119
settings=Settings(
120120
base_model=BaseModel(
121-
base_type="BASE_TYPE_CHAT",
121+
base_type="BASE_TYPE_UNSPECIFIED",
122122
),
123-
dataset_id="my-dataset-id",
123+
dataset_id="dataset_id",
124124
),
125125
),
126126
)
@@ -478,12 +478,12 @@ async def create_finetuned_model(
478478
async def main() -> None:
479479
await client.finetuning.create_finetuned_model(
480480
request=FinetunedModel(
481-
name="api-test",
481+
name="name",
482482
settings=Settings(
483483
base_model=BaseModel(
484-
base_type="BASE_TYPE_CHAT",
484+
base_type="BASE_TYPE_UNSPECIFIED",
485485
),
486-
dataset_id="my-dataset-id",
486+
dataset_id="dataset_id",
487487
),
488488
),
489489
)

src/cohere/v2/client.py

Lines changed: 0 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,6 @@ def chat_stream(
6666
logprobs: typing.Optional[bool] = OMIT,
6767
tool_choice: typing.Optional[V2ChatStreamRequestToolChoice] = OMIT,
6868
thinking: typing.Optional[Thinking] = OMIT,
69-
raw_prompting: typing.Optional[bool] = OMIT,
7069
request_options: typing.Optional[RequestOptions] = None,
7170
) -> typing.Iterator[V2ChatStreamResponse]:
7271
"""
@@ -159,12 +158,6 @@ def chat_stream(
159158
160159
thinking : typing.Optional[Thinking]
161160
162-
raw_prompting : typing.Optional[bool]
163-
When enabled, the user's prompt will be sent to the model without
164-
any pre-processing.
165-
166-
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
167-
168161
request_options : typing.Optional[RequestOptions]
169162
Request-specific configuration.
170163
@@ -212,7 +205,6 @@ def chat_stream(
212205
logprobs=logprobs,
213206
tool_choice=tool_choice,
214207
thinking=thinking,
215-
raw_prompting=raw_prompting,
216208
request_options=request_options,
217209
) as r:
218210
yield from r.data
@@ -239,7 +231,6 @@ def chat(
239231
logprobs: typing.Optional[bool] = OMIT,
240232
tool_choice: typing.Optional[V2ChatRequestToolChoice] = OMIT,
241233
thinking: typing.Optional[Thinking] = OMIT,
242-
raw_prompting: typing.Optional[bool] = OMIT,
243234
request_options: typing.Optional[RequestOptions] = None,
244235
) -> V2ChatResponse:
245236
"""
@@ -332,12 +323,6 @@ def chat(
332323
333324
thinking : typing.Optional[Thinking]
334325
335-
raw_prompting : typing.Optional[bool]
336-
When enabled, the user's prompt will be sent to the model without
337-
any pre-processing.
338-
339-
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
340-
341326
request_options : typing.Optional[RequestOptions]
342327
Request-specific configuration.
343328
@@ -383,7 +368,6 @@ def chat(
383368
logprobs=logprobs,
384369
tool_choice=tool_choice,
385370
thinking=thinking,
386-
raw_prompting=raw_prompting,
387371
request_options=request_options,
388372
)
389373
return _response.data
@@ -602,7 +586,6 @@ async def chat_stream(
602586
logprobs: typing.Optional[bool] = OMIT,
603587
tool_choice: typing.Optional[V2ChatStreamRequestToolChoice] = OMIT,
604588
thinking: typing.Optional[Thinking] = OMIT,
605-
raw_prompting: typing.Optional[bool] = OMIT,
606589
request_options: typing.Optional[RequestOptions] = None,
607590
) -> typing.AsyncIterator[V2ChatStreamResponse]:
608591
"""
@@ -695,12 +678,6 @@ async def chat_stream(
695678
696679
thinking : typing.Optional[Thinking]
697680
698-
raw_prompting : typing.Optional[bool]
699-
When enabled, the user's prompt will be sent to the model without
700-
any pre-processing.
701-
702-
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
703-
704681
request_options : typing.Optional[RequestOptions]
705682
Request-specific configuration.
706683
@@ -756,7 +733,6 @@ async def main() -> None:
756733
logprobs=logprobs,
757734
tool_choice=tool_choice,
758735
thinking=thinking,
759-
raw_prompting=raw_prompting,
760736
request_options=request_options,
761737
) as r:
762738
async for _chunk in r.data:
@@ -784,7 +760,6 @@ async def chat(
784760
logprobs: typing.Optional[bool] = OMIT,
785761
tool_choice: typing.Optional[V2ChatRequestToolChoice] = OMIT,
786762
thinking: typing.Optional[Thinking] = OMIT,
787-
raw_prompting: typing.Optional[bool] = OMIT,
788763
request_options: typing.Optional[RequestOptions] = None,
789764
) -> V2ChatResponse:
790765
"""
@@ -877,12 +852,6 @@ async def chat(
877852
878853
thinking : typing.Optional[Thinking]
879854
880-
raw_prompting : typing.Optional[bool]
881-
When enabled, the user's prompt will be sent to the model without
882-
any pre-processing.
883-
884-
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
885-
886855
request_options : typing.Optional[RequestOptions]
887856
Request-specific configuration.
888857
@@ -936,7 +905,6 @@ async def main() -> None:
936905
logprobs=logprobs,
937906
tool_choice=tool_choice,
938907
thinking=thinking,
939-
raw_prompting=raw_prompting,
940908
request_options=request_options,
941909
)
942910
return _response.data

src/cohere/v2/raw_client.py

Lines changed: 0 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,6 @@ def chat_stream(
7575
logprobs: typing.Optional[bool] = OMIT,
7676
tool_choice: typing.Optional[V2ChatStreamRequestToolChoice] = OMIT,
7777
thinking: typing.Optional[Thinking] = OMIT,
78-
raw_prompting: typing.Optional[bool] = OMIT,
7978
request_options: typing.Optional[RequestOptions] = None,
8079
) -> typing.Iterator[HttpResponse[typing.Iterator[V2ChatStreamResponse]]]:
8180
"""
@@ -168,12 +167,6 @@ def chat_stream(
168167
169168
thinking : typing.Optional[Thinking]
170169
171-
raw_prompting : typing.Optional[bool]
172-
When enabled, the user's prompt will be sent to the model without
173-
any pre-processing.
174-
175-
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
176-
177170
request_options : typing.Optional[RequestOptions]
178171
Request-specific configuration.
179172
@@ -217,7 +210,6 @@ def chat_stream(
217210
"thinking": convert_and_respect_annotation_metadata(
218211
object_=thinking, annotation=Thinking, direction="write"
219212
),
220-
"raw_prompting": raw_prompting,
221213
"stream": True,
222214
},
223215
headers={
@@ -413,7 +405,6 @@ def chat(
413405
logprobs: typing.Optional[bool] = OMIT,
414406
tool_choice: typing.Optional[V2ChatRequestToolChoice] = OMIT,
415407
thinking: typing.Optional[Thinking] = OMIT,
416-
raw_prompting: typing.Optional[bool] = OMIT,
417408
request_options: typing.Optional[RequestOptions] = None,
418409
) -> HttpResponse[V2ChatResponse]:
419410
"""
@@ -506,12 +497,6 @@ def chat(
506497
507498
thinking : typing.Optional[Thinking]
508499
509-
raw_prompting : typing.Optional[bool]
510-
When enabled, the user's prompt will be sent to the model without
511-
any pre-processing.
512-
513-
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
514-
515500
request_options : typing.Optional[RequestOptions]
516501
Request-specific configuration.
517502
@@ -555,7 +540,6 @@ def chat(
555540
"thinking": convert_and_respect_annotation_metadata(
556541
object_=thinking, annotation=Thinking, direction="write"
557542
),
558-
"raw_prompting": raw_prompting,
559543
"stream": False,
560544
},
561545
headers={
@@ -1187,7 +1171,6 @@ async def chat_stream(
11871171
logprobs: typing.Optional[bool] = OMIT,
11881172
tool_choice: typing.Optional[V2ChatStreamRequestToolChoice] = OMIT,
11891173
thinking: typing.Optional[Thinking] = OMIT,
1190-
raw_prompting: typing.Optional[bool] = OMIT,
11911174
request_options: typing.Optional[RequestOptions] = None,
11921175
) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[V2ChatStreamResponse]]]:
11931176
"""
@@ -1280,12 +1263,6 @@ async def chat_stream(
12801263
12811264
thinking : typing.Optional[Thinking]
12821265
1283-
raw_prompting : typing.Optional[bool]
1284-
When enabled, the user's prompt will be sent to the model without
1285-
any pre-processing.
1286-
1287-
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
1288-
12891266
request_options : typing.Optional[RequestOptions]
12901267
Request-specific configuration.
12911268
@@ -1329,7 +1306,6 @@ async def chat_stream(
13291306
"thinking": convert_and_respect_annotation_metadata(
13301307
object_=thinking, annotation=Thinking, direction="write"
13311308
),
1332-
"raw_prompting": raw_prompting,
13331309
"stream": True,
13341310
},
13351311
headers={
@@ -1525,7 +1501,6 @@ async def chat(
15251501
logprobs: typing.Optional[bool] = OMIT,
15261502
tool_choice: typing.Optional[V2ChatRequestToolChoice] = OMIT,
15271503
thinking: typing.Optional[Thinking] = OMIT,
1528-
raw_prompting: typing.Optional[bool] = OMIT,
15291504
request_options: typing.Optional[RequestOptions] = None,
15301505
) -> AsyncHttpResponse[V2ChatResponse]:
15311506
"""
@@ -1618,12 +1593,6 @@ async def chat(
16181593
16191594
thinking : typing.Optional[Thinking]
16201595
1621-
raw_prompting : typing.Optional[bool]
1622-
When enabled, the user's prompt will be sent to the model without
1623-
any pre-processing.
1624-
1625-
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
1626-
16271596
request_options : typing.Optional[RequestOptions]
16281597
Request-specific configuration.
16291598
@@ -1667,7 +1636,6 @@ async def chat(
16671636
"thinking": convert_and_respect_annotation_metadata(
16681637
object_=thinking, annotation=Thinking, direction="write"
16691638
),
1670-
"raw_prompting": raw_prompting,
16711639
"stream": False,
16721640
},
16731641
headers={

0 commit comments

Comments
 (0)