@@ -146,7 +146,6 @@ def chat_stream(
146
146
* ,
147
147
message : str ,
148
148
accepts : typing .Optional [typing .Literal ["text/event-stream" ]] = None ,
149
- raw_prompting : typing .Optional [bool ] = OMIT ,
150
149
model : typing .Optional [str ] = OMIT ,
151
150
preamble : typing .Optional [str ] = OMIT ,
152
151
chat_history : typing .Optional [typing .Sequence [Message ]] = OMIT ,
@@ -165,6 +164,7 @@ def chat_stream(
165
164
stop_sequences : typing .Optional [typing .Sequence [str ]] = OMIT ,
166
165
frequency_penalty : typing .Optional [float ] = OMIT ,
167
166
presence_penalty : typing .Optional [float ] = OMIT ,
167
+ raw_prompting : typing .Optional [bool ] = OMIT ,
168
168
tools : typing .Optional [typing .Sequence [Tool ]] = OMIT ,
169
169
tool_results : typing .Optional [typing .Sequence [ToolResult ]] = OMIT ,
170
170
force_single_step : typing .Optional [bool ] = OMIT ,
@@ -187,12 +187,6 @@ def chat_stream(
187
187
accepts : typing.Optional[typing.Literal["text/event-stream"]]
188
188
Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n ` delimited events.
189
189
190
- raw_prompting : typing.Optional[bool]
191
- When enabled, the user's prompt will be sent to the model without
192
- any pre-processing.
193
-
194
- Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
195
-
196
190
model : typing.Optional[str]
197
191
The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model.
198
192
@@ -340,6 +334,12 @@ def chat_stream(
340
334
341
335
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
342
336
337
+ raw_prompting : typing.Optional[bool]
338
+ When enabled, the user's prompt will be sent to the model without
339
+ any pre-processing.
340
+
341
+ Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
342
+
343
343
tools : typing.Optional[typing.Sequence[Tool]]
344
344
A list of available tools (functions) that the model may suggest invoking before producing a text response.
345
345
@@ -414,7 +414,6 @@ def chat_stream(
414
414
with self ._raw_client .chat_stream (
415
415
message = message ,
416
416
accepts = accepts ,
417
- raw_prompting = raw_prompting ,
418
417
model = model ,
419
418
preamble = preamble ,
420
419
chat_history = chat_history ,
@@ -433,6 +432,7 @@ def chat_stream(
433
432
stop_sequences = stop_sequences ,
434
433
frequency_penalty = frequency_penalty ,
435
434
presence_penalty = presence_penalty ,
435
+ raw_prompting = raw_prompting ,
436
436
tools = tools ,
437
437
tool_results = tool_results ,
438
438
force_single_step = force_single_step ,
@@ -447,7 +447,6 @@ def chat(
447
447
* ,
448
448
message : str ,
449
449
accepts : typing .Optional [typing .Literal ["text/event-stream" ]] = None ,
450
- raw_prompting : typing .Optional [bool ] = OMIT ,
451
450
model : typing .Optional [str ] = OMIT ,
452
451
preamble : typing .Optional [str ] = OMIT ,
453
452
chat_history : typing .Optional [typing .Sequence [Message ]] = OMIT ,
@@ -466,6 +465,7 @@ def chat(
466
465
stop_sequences : typing .Optional [typing .Sequence [str ]] = OMIT ,
467
466
frequency_penalty : typing .Optional [float ] = OMIT ,
468
467
presence_penalty : typing .Optional [float ] = OMIT ,
468
+ raw_prompting : typing .Optional [bool ] = OMIT ,
469
469
tools : typing .Optional [typing .Sequence [Tool ]] = OMIT ,
470
470
tool_results : typing .Optional [typing .Sequence [ToolResult ]] = OMIT ,
471
471
force_single_step : typing .Optional [bool ] = OMIT ,
@@ -487,12 +487,6 @@ def chat(
487
487
accepts : typing.Optional[typing.Literal["text/event-stream"]]
488
488
Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n ` delimited events.
489
489
490
- raw_prompting : typing.Optional[bool]
491
- When enabled, the user's prompt will be sent to the model without
492
- any pre-processing.
493
-
494
- Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
495
-
496
490
model : typing.Optional[str]
497
491
The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model.
498
492
@@ -640,6 +634,12 @@ def chat(
640
634
641
635
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
642
636
637
+ raw_prompting : typing.Optional[bool]
638
+ When enabled, the user's prompt will be sent to the model without
639
+ any pre-processing.
640
+
641
+ Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
642
+
643
643
tools : typing.Optional[typing.Sequence[Tool]]
644
644
A list of available tools (functions) that the model may suggest invoking before producing a text response.
645
645
@@ -725,7 +725,6 @@ def chat(
725
725
_response = self ._raw_client .chat (
726
726
message = message ,
727
727
accepts = accepts ,
728
- raw_prompting = raw_prompting ,
729
728
model = model ,
730
729
preamble = preamble ,
731
730
chat_history = chat_history ,
@@ -744,6 +743,7 @@ def chat(
744
743
stop_sequences = stop_sequences ,
745
744
frequency_penalty = frequency_penalty ,
746
745
presence_penalty = presence_penalty ,
746
+ raw_prompting = raw_prompting ,
747
747
tools = tools ,
748
748
tool_results = tool_results ,
749
749
force_single_step = force_single_step ,
@@ -1602,7 +1602,6 @@ async def chat_stream(
1602
1602
* ,
1603
1603
message : str ,
1604
1604
accepts : typing .Optional [typing .Literal ["text/event-stream" ]] = None ,
1605
- raw_prompting : typing .Optional [bool ] = OMIT ,
1606
1605
model : typing .Optional [str ] = OMIT ,
1607
1606
preamble : typing .Optional [str ] = OMIT ,
1608
1607
chat_history : typing .Optional [typing .Sequence [Message ]] = OMIT ,
@@ -1621,6 +1620,7 @@ async def chat_stream(
1621
1620
stop_sequences : typing .Optional [typing .Sequence [str ]] = OMIT ,
1622
1621
frequency_penalty : typing .Optional [float ] = OMIT ,
1623
1622
presence_penalty : typing .Optional [float ] = OMIT ,
1623
+ raw_prompting : typing .Optional [bool ] = OMIT ,
1624
1624
tools : typing .Optional [typing .Sequence [Tool ]] = OMIT ,
1625
1625
tool_results : typing .Optional [typing .Sequence [ToolResult ]] = OMIT ,
1626
1626
force_single_step : typing .Optional [bool ] = OMIT ,
@@ -1643,12 +1643,6 @@ async def chat_stream(
1643
1643
accepts : typing.Optional[typing.Literal["text/event-stream"]]
1644
1644
Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n ` delimited events.
1645
1645
1646
- raw_prompting : typing.Optional[bool]
1647
- When enabled, the user's prompt will be sent to the model without
1648
- any pre-processing.
1649
-
1650
- Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
1651
-
1652
1646
model : typing.Optional[str]
1653
1647
The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model.
1654
1648
@@ -1796,6 +1790,12 @@ async def chat_stream(
1796
1790
1797
1791
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
1798
1792
1793
+ raw_prompting : typing.Optional[bool]
1794
+ When enabled, the user's prompt will be sent to the model without
1795
+ any pre-processing.
1796
+
1797
+ Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
1798
+
1799
1799
tools : typing.Optional[typing.Sequence[Tool]]
1800
1800
A list of available tools (functions) that the model may suggest invoking before producing a text response.
1801
1801
@@ -1878,7 +1878,6 @@ async def main() -> None:
1878
1878
async with self ._raw_client .chat_stream (
1879
1879
message = message ,
1880
1880
accepts = accepts ,
1881
- raw_prompting = raw_prompting ,
1882
1881
model = model ,
1883
1882
preamble = preamble ,
1884
1883
chat_history = chat_history ,
@@ -1897,6 +1896,7 @@ async def main() -> None:
1897
1896
stop_sequences = stop_sequences ,
1898
1897
frequency_penalty = frequency_penalty ,
1899
1898
presence_penalty = presence_penalty ,
1899
+ raw_prompting = raw_prompting ,
1900
1900
tools = tools ,
1901
1901
tool_results = tool_results ,
1902
1902
force_single_step = force_single_step ,
@@ -1912,7 +1912,6 @@ async def chat(
1912
1912
* ,
1913
1913
message : str ,
1914
1914
accepts : typing .Optional [typing .Literal ["text/event-stream" ]] = None ,
1915
- raw_prompting : typing .Optional [bool ] = OMIT ,
1916
1915
model : typing .Optional [str ] = OMIT ,
1917
1916
preamble : typing .Optional [str ] = OMIT ,
1918
1917
chat_history : typing .Optional [typing .Sequence [Message ]] = OMIT ,
@@ -1931,6 +1930,7 @@ async def chat(
1931
1930
stop_sequences : typing .Optional [typing .Sequence [str ]] = OMIT ,
1932
1931
frequency_penalty : typing .Optional [float ] = OMIT ,
1933
1932
presence_penalty : typing .Optional [float ] = OMIT ,
1933
+ raw_prompting : typing .Optional [bool ] = OMIT ,
1934
1934
tools : typing .Optional [typing .Sequence [Tool ]] = OMIT ,
1935
1935
tool_results : typing .Optional [typing .Sequence [ToolResult ]] = OMIT ,
1936
1936
force_single_step : typing .Optional [bool ] = OMIT ,
@@ -1952,12 +1952,6 @@ async def chat(
1952
1952
accepts : typing.Optional[typing.Literal["text/event-stream"]]
1953
1953
Pass text/event-stream to receive the streamed response as server-sent events. The default is `\n ` delimited events.
1954
1954
1955
- raw_prompting : typing.Optional[bool]
1956
- When enabled, the user's prompt will be sent to the model without
1957
- any pre-processing.
1958
-
1959
- Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
1960
-
1961
1955
model : typing.Optional[str]
1962
1956
The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model.
1963
1957
@@ -2105,6 +2099,12 @@ async def chat(
2105
2099
2106
2100
Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
2107
2101
2102
+ raw_prompting : typing.Optional[bool]
2103
+ When enabled, the user's prompt will be sent to the model without
2104
+ any pre-processing.
2105
+
2106
+ Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
2107
+
2108
2108
tools : typing.Optional[typing.Sequence[Tool]]
2109
2109
A list of available tools (functions) that the model may suggest invoking before producing a text response.
2110
2110
@@ -2198,7 +2198,6 @@ async def main() -> None:
2198
2198
_response = await self ._raw_client .chat (
2199
2199
message = message ,
2200
2200
accepts = accepts ,
2201
- raw_prompting = raw_prompting ,
2202
2201
model = model ,
2203
2202
preamble = preamble ,
2204
2203
chat_history = chat_history ,
@@ -2217,6 +2216,7 @@ async def main() -> None:
2217
2216
stop_sequences = stop_sequences ,
2218
2217
frequency_penalty = frequency_penalty ,
2219
2218
presence_penalty = presence_penalty ,
2219
+ raw_prompting = raw_prompting ,
2220
2220
tools = tools ,
2221
2221
tool_results = tool_results ,
2222
2222
force_single_step = force_single_step ,
0 commit comments