|
65 | 65 |
|
66 | 66 | class BaseCohere: |
67 | 67 | """ |
68 | | - Use this class to access the different functions within the SDK. You can instantiate any number of clients with different configuration that will propogate to these functions. |
| 68 | + Use this class to access the different functions within the SDK. You can instantiate any number of clients with different configuration that will propagate to these functions. |
69 | 69 |
|
70 | 70 | Parameters |
71 | 71 | ---------- |
@@ -164,7 +164,7 @@ def chat_stream( |
164 | 164 | ) -> typing.Iterator[StreamedChatResponse]: |
165 | 165 | """ |
166 | 166 | Generates a text response to a user message. |
167 | | - To learn how to use Chat with Streaming and RAG follow [this guide](https://docs.cohere.com/docs/cochat-beta#various-ways-of-using-the-chat-endpoint). |
| 167 | + To learn how to use the Chat API with Streaming and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). |
168 | 168 |
|
169 | 169 | Parameters |
170 | 170 | ---------- |
@@ -460,7 +460,7 @@ def chat_stream( |
460 | 460 | for chunk in response: |
461 | 461 | yield chunk |
462 | 462 | """ |
463 | | - _request: typing.Dict[str, typing.Any] = {"message": message} |
| 463 | + _request: typing.Dict[str, typing.Any] = {"message": message, "stream": True} |
464 | 464 | if model is not OMIT: |
465 | 465 | _request["model"] = model |
466 | 466 | if preamble is not OMIT: |
@@ -583,7 +583,7 @@ def chat( |
583 | 583 | ) -> NonStreamedChatResponse: |
584 | 584 | """ |
585 | 585 | Generates a text response to a user message. |
586 | | - To learn how to use Chat with Streaming and RAG follow [this guide](https://docs.cohere.com/docs/cochat-beta#various-ways-of-using-the-chat-endpoint). |
| 586 | + To learn how to use the Chat API with Streaming and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). |
587 | 587 |
|
588 | 588 | Parameters |
589 | 589 | ---------- |
@@ -803,7 +803,7 @@ def chat( |
803 | 803 | temperature=0.3, |
804 | 804 | ) |
805 | 805 | """ |
806 | | - _request: typing.Dict[str, typing.Any] = {"message": message} |
| 806 | + _request: typing.Dict[str, typing.Any] = {"message": message, "stream": False} |
807 | 807 | if model is not OMIT: |
808 | 808 | _request["model"] = model |
809 | 809 | if preamble is not OMIT: |
@@ -1038,7 +1038,7 @@ def generate_stream( |
1038 | 1038 | for chunk in response: |
1039 | 1039 | yield chunk |
1040 | 1040 | """ |
1041 | | - _request: typing.Dict[str, typing.Any] = {"prompt": prompt} |
| 1041 | + _request: typing.Dict[str, typing.Any] = {"prompt": prompt, "stream": True} |
1042 | 1042 | if model is not OMIT: |
1043 | 1043 | _request["model"] = model |
1044 | 1044 | if num_generations is not OMIT: |
@@ -1253,7 +1253,7 @@ def generate( |
1253 | 1253 | prompt="Please explain to me how LLMs work", |
1254 | 1254 | ) |
1255 | 1255 | """ |
1256 | | - _request: typing.Dict[str, typing.Any] = {"prompt": prompt} |
| 1256 | + _request: typing.Dict[str, typing.Any] = {"prompt": prompt, "stream": False} |
1257 | 1257 | if model is not OMIT: |
1258 | 1258 | _request["model"] = model |
1259 | 1259 | if num_generations is not OMIT: |
@@ -2084,7 +2084,7 @@ def check_api_key(self, *, request_options: typing.Optional[RequestOptions] = No |
2084 | 2084 |
|
2085 | 2085 | class AsyncBaseCohere: |
2086 | 2086 | """ |
2087 | | - Use this class to access the different functions within the SDK. You can instantiate any number of clients with different configuration that will propogate to these functions. |
| 2087 | + Use this class to access the different functions within the SDK. You can instantiate any number of clients with different configuration that will propagate to these functions. |
2088 | 2088 |
|
2089 | 2089 | Parameters |
2090 | 2090 | ---------- |
@@ -2183,7 +2183,7 @@ async def chat_stream( |
2183 | 2183 | ) -> typing.AsyncIterator[StreamedChatResponse]: |
2184 | 2184 | """ |
2185 | 2185 | Generates a text response to a user message. |
2186 | | - To learn how to use Chat with Streaming and RAG follow [this guide](https://docs.cohere.com/docs/cochat-beta#various-ways-of-using-the-chat-endpoint). |
| 2186 | + To learn how to use the Chat API with Streaming and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). |
2187 | 2187 |
|
2188 | 2188 | Parameters |
2189 | 2189 | ---------- |
@@ -2479,7 +2479,7 @@ async def chat_stream( |
2479 | 2479 | async for chunk in response: |
2480 | 2480 | yield chunk |
2481 | 2481 | """ |
2482 | | - _request: typing.Dict[str, typing.Any] = {"message": message} |
| 2482 | + _request: typing.Dict[str, typing.Any] = {"message": message, "stream": True} |
2483 | 2483 | if model is not OMIT: |
2484 | 2484 | _request["model"] = model |
2485 | 2485 | if preamble is not OMIT: |
@@ -2602,7 +2602,7 @@ async def chat( |
2602 | 2602 | ) -> NonStreamedChatResponse: |
2603 | 2603 | """ |
2604 | 2604 | Generates a text response to a user message. |
2605 | | - To learn how to use Chat with Streaming and RAG follow [this guide](https://docs.cohere.com/docs/cochat-beta#various-ways-of-using-the-chat-endpoint). |
| 2605 | + To learn how to use the Chat API with Streaming and RAG follow our [Text Generation guides](https://docs.cohere.com/docs/chat-api). |
2606 | 2606 |
|
2607 | 2607 | Parameters |
2608 | 2608 | ---------- |
@@ -2822,7 +2822,7 @@ async def chat( |
2822 | 2822 | temperature=0.3, |
2823 | 2823 | ) |
2824 | 2824 | """ |
2825 | | - _request: typing.Dict[str, typing.Any] = {"message": message} |
| 2825 | + _request: typing.Dict[str, typing.Any] = {"message": message, "stream": False} |
2826 | 2826 | if model is not OMIT: |
2827 | 2827 | _request["model"] = model |
2828 | 2828 | if preamble is not OMIT: |
@@ -3057,7 +3057,7 @@ async def generate_stream( |
3057 | 3057 | async for chunk in response: |
3058 | 3058 | yield chunk |
3059 | 3059 | """ |
3060 | | - _request: typing.Dict[str, typing.Any] = {"prompt": prompt} |
| 3060 | + _request: typing.Dict[str, typing.Any] = {"prompt": prompt, "stream": True} |
3061 | 3061 | if model is not OMIT: |
3062 | 3062 | _request["model"] = model |
3063 | 3063 | if num_generations is not OMIT: |
@@ -3272,7 +3272,7 @@ async def generate( |
3272 | 3272 | prompt="Please explain to me how LLMs work", |
3273 | 3273 | ) |
3274 | 3274 | """ |
3275 | | - _request: typing.Dict[str, typing.Any] = {"prompt": prompt} |
| 3275 | + _request: typing.Dict[str, typing.Any] = {"prompt": prompt, "stream": False} |
3276 | 3276 | if model is not OMIT: |
3277 | 3277 | _request["model"] = model |
3278 | 3278 | if num_generations is not OMIT: |
|
0 commit comments