diff --git a/src/backend/chat/custom/tool_calls.py b/src/backend/chat/custom/tool_calls.py index 5a67a42d93..a7592dfd7c 100644 --- a/src/backend/chat/custom/tool_calls.py +++ b/src/backend/chat/custom/tool_calls.py @@ -9,7 +9,10 @@ from backend.model_deployments.base import BaseDeployment from backend.schemas.context import Context from backend.services.logger.utils import LoggerFactory -from backend.tools.base import ToolAuthException, ToolError, ToolErrorCode +from backend.tools.base import ( + ToolAuthException, + ToolErrorCode, +) TIMEOUT_SECONDS = 60 @@ -110,11 +113,9 @@ async def _call_tool_async( { "call": tool_call, "outputs": tool.get_tool_error( - ToolError( - text="Tool authentication failed", - details=str(e), - type=ToolErrorCode.AUTH, - ) + details=str(e), + text="Tool authentication failed", + error_type=ToolErrorCode.AUTH, ), } ] @@ -122,7 +123,7 @@ async def _call_tool_async( return [ { "call": tool_call, - "outputs": tool.get_tool_error(ToolError(text=str(e))), + "outputs": tool.get_tool_error(details=str(e)), } ] diff --git a/src/backend/tests/unit/chat/test_tool_calls.py b/src/backend/tests/unit/chat/test_tool_calls.py index 16e5b15b59..0d7b3ba364 100644 --- a/src/backend/tests/unit/chat/test_tool_calls.py +++ b/src/backend/tests/unit/chat/test_tool_calls.py @@ -116,11 +116,10 @@ async def call( "name": "toolkit_calculator", "parameters": {"code": "6*7"}, }, - "outputs": [{'type': 'other', 'success': False, 'text': 'Calculator failed', 'details': ''}], + "outputs": [{'type': 'other', 'success': False, 'text': 'Error calling tool toolkit_calculator.', 'details': 'Calculator failed'}], }, ] - @patch("backend.chat.custom.tool_calls.TIMEOUT_SECONDS", 1) def test_async_call_tools_timeout(mock_get_available_tools) -> None: class MockCalculator(BaseTool): @@ -249,8 +248,8 @@ async def call( ) assert {'call': {'name': 'web_scrape', 'parameters': {'code': '6*7'}}, 'outputs': [ - {'details': '', 'success': False, 'text': "Model didn't pass required parameter: url", 'type' - : 'other'}]} in results + {'type': 'other', 'success': False, 'text': 'Error calling tool web_scrape.', + 'details': "Model didn't pass required parameter: url"}]} in results assert { "call": {"name": "toolkit_calculator", "parameters": {"code": "6*7"}}, "outputs": [{"result": 42}], @@ -299,7 +298,7 @@ async def call( async_call_tools(chat_history, MockCohereDeployment(), ctx) ) assert {'call': {'name': 'toolkit_calculator', 'parameters': {'invalid_param': '6*7'}}, 'outputs': [ - {'details': '', 'success': False, 'text': "Model didn't pass required parameter: code", + {'details': "Model didn't pass required parameter: code", 'success': False, 'text': 'Error calling tool toolkit_calculator.', 'type': 'other'}]} in results def test_tools_params_checker_invalid_param_type(mock_get_available_tools) -> None: @@ -343,9 +342,8 @@ async def call( async_call_tools(chat_history, MockCohereDeployment(), ctx) ) assert {'call': {'name': 'toolkit_calculator', 'parameters': {'code': 6}}, 'outputs': [ - {'details': '', 'success': False, - 'text': "Model passed invalid parameter. Parameter 'code' must be of type str, but got int", - 'type': 'other'}]} in results + {'type': 'other', 'success': False, 'text': 'Error calling tool toolkit_calculator.', + 'details': "Model passed invalid parameter. Parameter 'code' must be of type str, but got int"}]} in results def test_tools_params_checker_required_param_empty(mock_get_available_tools) -> None: class MockCalculator(BaseTool): @@ -388,5 +386,5 @@ async def call( async_call_tools(chat_history, MockCohereDeployment(), ctx) ) assert {'call': {'name': 'toolkit_calculator', 'parameters': {'code': ''}}, 'outputs': [ - {'details': '', 'success': False, 'text': 'Model passed empty value for required parameter: code', - 'type': 'other'}]} in results + {'details': 'Model passed empty value for required parameter: code', 'success': False, + 'text': 'Error calling tool toolkit_calculator.', 'type': 'other'}]} in results diff --git a/src/backend/tests/unit/tools/test_calculator.py b/src/backend/tests/unit/tools/test_calculator.py index 5ff68ab063..3a821a4d45 100644 --- a/src/backend/tests/unit/tools/test_calculator.py +++ b/src/backend/tests/unit/tools/test_calculator.py @@ -17,4 +17,5 @@ async def test_calculator_invalid_syntax() -> None: ctx = Context() calculator = Calculator() result = await calculator.call({"code": "2+"}, ctx) - assert result == {"text": "Parsing error - syntax not allowed."} + + assert result == [{'details': 'parse error [column 2]: parity, expression: 2+', 'success': False, 'text': 'Error calling tool toolkit_calculator.', 'type': 'other'}] diff --git a/src/backend/tests/unit/tools/test_lang_chain.py b/src/backend/tests/unit/tools/test_lang_chain.py index 1ca920502b..50a593dc92 100644 --- a/src/backend/tests/unit/tools/test_lang_chain.py +++ b/src/backend/tests/unit/tools/test_lang_chain.py @@ -78,7 +78,8 @@ async def test_wiki_retriever_no_docs() -> None: ): result = await retriever.call({"query": query}, ctx) - assert result == [] + assert result == [{'details': '','success': False,'text': 'No results found.','type': 'other'}] + @pytest.mark.skipif(not is_cohere_env_set, reason="Cohere API key not set") @@ -163,4 +164,4 @@ async def test_vector_db_retriever_no_docs() -> None: mock_db.as_retriever().get_relevant_documents.return_value = mock_docs result = await retriever.call({"query": query}, ctx) - assert result == [] + assert result == [{'details': '', 'success': False, 'text': 'No results found.', 'type': 'other'}] diff --git a/src/backend/tools/base.py b/src/backend/tools/base.py index b491ae3907..06c2b0aede 100644 --- a/src/backend/tools/base.py +++ b/src/backend/tools/base.py @@ -28,7 +28,6 @@ def __init__(self, message, tool_id: str): self.message = message self.tool_id = tool_id - class ToolError(BaseModel, extra="allow"): type: ToolErrorCode = ToolErrorCode.OTHER success: bool = False @@ -38,6 +37,7 @@ class ToolError(BaseModel, extra="allow"): class ToolArgument(StrEnum): DOMAIN_FILTER = "domain_filter" SITE_FILTER = "site_filter" + class ParametersValidationMeta(type): """ Metaclass to decorate all tools `call` methods with the parameter checker. @@ -90,14 +90,14 @@ def _handle_tool_specific_errors(cls, error: Exception, **kwargs: Any) -> None: ... @classmethod - def get_tool_error(cls, err: ToolError): - tool_error = err.model_dump() + def get_tool_error(cls, details: str, text: str = "Error calling tool", error_type: ToolErrorCode = ToolErrorCode.OTHER): + tool_error = ToolError(text=f"{text} {cls.ID}.", details=details, type=error_type).model_dump() logger.error(event=f"Error calling tool {cls.ID}", error=tool_error) return [tool_error] @classmethod def get_no_results_error(cls): - return cls.get_tool_error(ToolError(text="No results found.")) + return ToolError(text="No results found.", details="No results found for the given params.") @abstractmethod async def call( diff --git a/src/backend/tools/brave_search/tool.py b/src/backend/tools/brave_search/tool.py index 5331500531..4e424b5f40 100644 --- a/src/backend/tools/brave_search/tool.py +++ b/src/backend/tools/brave_search/tool.py @@ -50,15 +50,19 @@ async def call( # Get domain filtering from kwargs filtered_domains = kwargs.get(ToolArgument.DOMAIN_FILTER, []) - response = await self.client.search_async( - q=query, count=self.num_results, include_domains=filtered_domains - ) + try: + response = await self.client.search_async( + q=query, count=self.num_results, include_domains=filtered_domains + ) + except Exception as e: + return self.get_tool_error(details=str(e)) + response = dict(response) results = response.get("web", {}).get("results", []) if not results: - self.get_no_results_error() + return self.get_no_results_error() tool_results = [] for result in results: diff --git a/src/backend/tools/calculator.py b/src/backend/tools/calculator.py index 3b96859663..5de2c1dcda 100644 --- a/src/backend/tools/calculator.py +++ b/src/backend/tools/calculator.py @@ -52,11 +52,11 @@ async def call( to_evaluate = expression.replace("pi", "PI").replace("e", "E") - result = [] try: result = {"text": math_parser.parse(to_evaluate).evaluate({})} except Exception as e: logger.error(event=f"[Calculator] Error parsing expression: {e}") - result = {"text": "Parsing error - syntax not allowed."} + return self.get_tool_error(details=str(e)) - return result + + return result # type: ignore diff --git a/src/backend/tools/files.py b/src/backend/tools/files.py index 146a741c0e..707d4cb1ec 100644 --- a/src/backend/tools/files.py +++ b/src/backend/tools/files.py @@ -58,12 +58,12 @@ async def call(self, parameters: dict, **kwargs: Any) -> List[Dict[str, Any]]: session = kwargs.get("session") user_id = kwargs.get("user_id") if not file: - return [] + return self.get_tool_error(details="Files are not passed in model generated params") _, file_id = file retrieved_file = file_crud.get_file(session, file_id, user_id) if not retrieved_file: - return [] + return self.get_tool_error(details="The wrong files were passed in the tool parameters, or files were not found") return [ { @@ -125,13 +125,15 @@ async def call( user_id = kwargs.get("user_id") if not query or not files: - return [] + return self.get_tool_error( + details="Missing query or files. The wrong files might have been passed in the tool parameters") file_ids = [file_id for _, file_id in files] retrieved_files = file_crud.get_files_by_ids(session, file_ids, user_id) if not retrieved_files: - return [] + return self.get_tool_error( + details="Missing files. The wrong files might have been passed in the tool parameters") results = [] for file in retrieved_files: @@ -142,4 +144,7 @@ async def call( "url": file.file_name, } ) + if not results: + return self.get_no_results_error() + return results diff --git a/src/backend/tools/google_drive/tool.py b/src/backend/tools/google_drive/tool.py index 6131e095d6..d5b0a1f8f5 100644 --- a/src/backend/tools/google_drive/tool.py +++ b/src/backend/tools/google_drive/tool.py @@ -77,9 +77,17 @@ async def call(self, parameters: dict, **kwargs: Any) -> List[Dict[str, Any]]: # Search Google Drive logger.info(event="[Google Drive] Defaulting to raw Google Drive search.") agent_tool_metadata = kwargs["agent_tool_metadata"] - documents = await _default_gdrive_list_files( - user_id=user_id, query=query, agent_tool_metadata=agent_tool_metadata - ) + try: + documents = await _default_gdrive_list_files( + user_id=user_id, query=query, agent_tool_metadata=agent_tool_metadata + ) + except Exception as e: + return self.get_tool_error(details=str(e)) + + if not documents: + logger.info(event="[Google Drive] No documents found.") + return self.get_no_results_error() + return documents @@ -141,20 +149,17 @@ async def _default_gdrive_list_files( fields = f"nextPageToken, files({DOC_FIELDS})" search_results = [] - try: - search_results = ( - service.files() - .list( - pageSize=SEARCH_LIMIT, - q=q, - includeItemsFromAllDrives=True, - supportsAllDrives=True, - fields=fields, - ) - .execute() + search_results = ( + service.files() + .list( + pageSize=SEARCH_LIMIT, + q=q, + includeItemsFromAllDrives=True, + supportsAllDrives=True, + fields=fields, ) - except Exception as error: - logger.error(event="[Google Drive] Error searching files", error=error) + .execute() + ) files = search_results.get("files", []) if not files: diff --git a/src/backend/tools/google_search.py b/src/backend/tools/google_search.py index f05af3f8b1..14a7e21dd1 100644 --- a/src/backend/tools/google_search.py +++ b/src/backend/tools/google_search.py @@ -49,9 +49,11 @@ async def call( # Get domain filtering from kwargs filtered_domains = kwargs.get(ToolArgument.DOMAIN_FILTER, []) domain_filters = [f"site:{domain}" for domain in filtered_domains] - - response = cse.list(q=query, cx=self.CSE_ID, orTerms=domain_filters).execute() - search_results = response.get("items", []) + try: + response = cse.list(q=query, cx=self.CSE_ID, orTerms=domain_filters).execute() + search_results = response.get("items", []) + except Exception as e: + return self.get_tool_error(details=str(e)) if not search_results: return self.get_no_results_error() diff --git a/src/backend/tools/hybrid_search.py b/src/backend/tools/hybrid_search.py index d8b815de04..849691c3cb 100644 --- a/src/backend/tools/hybrid_search.py +++ b/src/backend/tools/hybrid_search.py @@ -115,6 +115,9 @@ async def call( **kwargs, ) + if not reranked_results: + return self.get_no_results_error() + return reranked_results async def rerank_results( diff --git a/src/backend/tools/lang_chain.py b/src/backend/tools/lang_chain.py index 4510a27ea2..71f12c5d1c 100644 --- a/src/backend/tools/lang_chain.py +++ b/src/backend/tools/lang_chain.py @@ -59,11 +59,17 @@ async def call( ) -> List[Dict[str, Any]]: wiki_retriever = WikipediaRetriever() query = parameters.get("query", "") - docs = wiki_retriever.get_relevant_documents(query) - text_splitter = CharacterTextSplitter( - chunk_size=self.chunk_size, chunk_overlap=self.chunk_overlap - ) - documents = text_splitter.split_documents(docs) + try: + docs = wiki_retriever.get_relevant_documents(query) + text_splitter = CharacterTextSplitter( + chunk_size=self.chunk_size, chunk_overlap=self.chunk_overlap + ) + documents = text_splitter.split_documents(docs) + except Exception as e: + return self.get_tool_error(details=str(e)) + + if not documents: + return self.get_no_results_error() return [ { @@ -115,13 +121,18 @@ async def call( cohere_embeddings = CohereEmbeddings(cohere_api_key=self.COHERE_API_KEY) # Load text files and split into chunks - loader = PyPDFLoader(self.filepath) - text_splitter = CharacterTextSplitter(chunk_size=300, chunk_overlap=0) - pages = loader.load_and_split(text_splitter) - - # Create a vector store from the documents - db = Chroma.from_documents(documents=pages, embedding=cohere_embeddings) - query = parameters.get("query", "") - input_docs = db.as_retriever().get_relevant_documents(query) + try: + loader = PyPDFLoader(self.filepath) + text_splitter = CharacterTextSplitter(chunk_size=300, chunk_overlap=0) + pages = loader.load_and_split(text_splitter) + + # Create a vector store from the documents + db = Chroma.from_documents(documents=pages, embedding=cohere_embeddings) + query = parameters.get("query", "") + input_docs = db.as_retriever().get_relevant_documents(query) + except Exception as e: + return self.get_tool_error(details=str(e)) + if not input_docs: + return self.get_no_results_error() return [{"text": doc.page_content} for doc in input_docs] diff --git a/src/backend/tools/python_interpreter.py b/src/backend/tools/python_interpreter.py index 426844ab48..e7015703f9 100644 --- a/src/backend/tools/python_interpreter.py +++ b/src/backend/tools/python_interpreter.py @@ -57,9 +57,15 @@ async def call(self, parameters: dict, ctx: Any, **kwargs: Any): raise Exception("Python Interpreter tool called while URL not set") code = parameters.get("code", "") - res = requests.post(self.INTERPRETER_URL, json={"code": code}) + try: + res = requests.post(self.INTERPRETER_URL, json={"code": code}) + clean_res = self._clean_response(res.json()) + except Exception as e: + return self.get_tool_error(details=str(e)) + + if not clean_res: + return self.get_no_results_error() - clean_res = self._clean_response(res.json()) return clean_res def _clean_response(self, result: Any) -> Dict[str, str]: @@ -82,7 +88,8 @@ def _clean_response(self, result: Any) -> Dict[str, str]: r.setdefault("text", r.get("std_out")) elif r.get("success") is False: error_message = r.get("error", {}).get("message", "") - r.setdefault("text", error_message) + # r.setdefault("text", error_message) + return self.get_tool_error(details=error_message) elif r.get("output_file") and r.get("output_file").get("filename"): if r["output_file"]["filename"] != "": r.setdefault( diff --git a/src/backend/tools/slack/tool.py b/src/backend/tools/slack/tool.py index 35e78f0aea..20c9616374 100644 --- a/src/backend/tools/slack/tool.py +++ b/src/backend/tools/slack/tool.py @@ -68,6 +68,14 @@ async def call(self, parameters: dict, ctx: Any, **kwargs: Any) -> List[Dict[str # Search Slack slack_service = get_slack_service(user_id=user_id, search_limit=SEARCH_LIMIT) - all_results = slack_service.search_all(query=query) - return slack_service.serialize_results(all_results) + try: + all_results = slack_service.search_all(query=query) + results = slack_service.serialize_results(all_results) + except Exception as e: + return self.get_tool_error(details=str(e)) + + if not results: + return self.get_no_results_error() + + return results diff --git a/src/backend/tools/tavily_search.py b/src/backend/tools/tavily_search.py index b8746c74c0..24ef7f3f94 100644 --- a/src/backend/tools/tavily_search.py +++ b/src/backend/tools/tavily_search.py @@ -58,7 +58,7 @@ async def call( ) except Exception as e: logger.error(f"Failed to perform Tavily web search: {str(e)}") - raise Exception(f"Failed to perform Tavily web search: {str(e)}") + return self.get_tool_error(details=str(e)) results = result.get("results", []) diff --git a/src/community/tools/arxiv.py b/src/community/tools/arxiv.py index ce5cfac71c..8293d8b681 100644 --- a/src/community/tools/arxiv.py +++ b/src/community/tools/arxiv.py @@ -38,5 +38,12 @@ def get_tool_definition(cls) -> ToolDefinition: async def call(self, parameters: dict, **kwargs: Any) -> List[Dict[str, Any]]: query = parameters.get("query", "") - result = self.client.run(query) + try: + result = self.client.run(query) + except Exception as e: + return self.get_tool_error(details=str(e)) + + if not result: + return self.get_no_results_error() + return [{"text": result}] diff --git a/src/community/tools/clinicaltrials.py b/src/community/tools/clinicaltrials.py index 3db15271ac..e9c6c4fd16 100644 --- a/src/community/tools/clinicaltrials.py +++ b/src/community/tools/clinicaltrials.py @@ -75,9 +75,13 @@ async def call( response = requests.get(self._url, params=query_params) response.raise_for_status() except requests.exceptions.RequestException as e: - return [{"text": f"Could not retrieve studies: {str(e)}"}] + return self.get_tool_error(details=str(e)) - return self._parse_response(response, location, intervention) + results = self._parse_response(response, location, intervention) + if not results: + return self.get_no_results_error() + + return results def _parse_response( self, response: requests.Response, location: str, intervention: str diff --git a/src/community/tools/connector.py b/src/community/tools/connector.py index b19445ddad..a8edfa549b 100644 --- a/src/community/tools/connector.py +++ b/src/community/tools/connector.py @@ -47,7 +47,13 @@ async def call(self, parameters: dict, **kwargs: Any) -> List[Dict[str, Any]]: "Content-Type": "application/json", "Authorization": f"Bearer {self.api_key}", } + try: + response = requests.get(self.url, json=body, headers=headers) + results = response.json()["results"] + except Exception as e: + return self.get_tool_error(details=str(e)) - response = requests.get(self.url, json=body, headers=headers) + if not results: + return self.get_no_results_error() - return response.json()["results"] + return results diff --git a/src/community/tools/llama_index.py b/src/community/tools/llama_index.py index aafdc1b491..d2dfa2f601 100644 --- a/src/community/tools/llama_index.py +++ b/src/community/tools/llama_index.py @@ -87,35 +87,34 @@ async def call( file_ids = [file_id for _, file_id in files] retrieved_files = file_crud.get_files_by_ids(session, file_ids, user_id) if not retrieved_files: - return [] + return self.get_no_results_error() - all_results = [] file_str_list = [] for file in retrieved_files: file_str_list.append(file.file_content) - all_results.append( - { - "text": file.file_content, - "title": file.file_name, - "url": file.file_name, - } - ) # LLamaIndex get documents from parsed PDFs, split it into sentences, embed, index and retrieve - docs = StringIterableReader().load_data(file_str_list) - node_parser = SentenceSplitter(chunk_size=LlamaIndexUploadPDFRetriever.CHUNK_SIZE) - nodes = node_parser.get_nodes_from_documents(docs) - embed_model = self._get_embedding("search_document") - vector_index = VectorStoreIndex( - nodes, - embed_model=embed_model, - ) - embed_model_query = self._get_embedding("search_query") - retriever = vector_index.as_retriever( - similarity_top_k=10, - embed_model=embed_model_query, - ) - results = retriever.retrieve(query) - llama_results = [{"text": doc.text} for doc in results] + try: + docs = StringIterableReader().load_data(file_str_list) + node_parser = SentenceSplitter(chunk_size=LlamaIndexUploadPDFRetriever.CHUNK_SIZE) + nodes = node_parser.get_nodes_from_documents(docs) + embed_model = self._get_embedding("search_document") + vector_index = VectorStoreIndex( + nodes, + embed_model=embed_model, + ) + embed_model_query = self._get_embedding("search_query") + retriever = vector_index.as_retriever( + similarity_top_k=10, + embed_model=embed_model_query, + ) + results = retriever.retrieve(query) + llama_results = [{"text": doc.text} for doc in results] + except Exception as e: + return self.get_tool_error(details=str(e)) + + if not llama_results and not docs: + return self.get_no_results_error() + # If llama results are found, return them if llama_results: return llama_results diff --git a/src/community/tools/pub_med.py b/src/community/tools/pub_med.py index 6968e57ea3..7680194fb0 100644 --- a/src/community/tools/pub_med.py +++ b/src/community/tools/pub_med.py @@ -38,5 +38,11 @@ def get_tool_definition(cls) -> ToolDefinition: async def call(self, parameters: dict, **kwargs: Any) -> List[Dict[str, Any]]: query = parameters.get("query", "") - result = self.client.invoke(query) + try: + result = self.client.invoke(query) + except Exception as e: + return self.get_tool_error(details=str(e)) + if not result: + return self.get_no_results_error() + return [{"text": result}] diff --git a/src/community/tools/wolfram.py b/src/community/tools/wolfram.py index dc098e77ed..dc4c27e22a 100644 --- a/src/community/tools/wolfram.py +++ b/src/community/tools/wolfram.py @@ -41,5 +41,12 @@ def get_tool_definition(cls) -> ToolDefinition: async def call(self, parameters: dict, **kwargs: Any) -> List[Dict[str, Any]]: to_evaluate = parameters.get("expression", "") - result = self.tool.run(to_evaluate) + try: + result = self.tool.run(to_evaluate) + except Exception as e: + return self.get_tool_error(details=str(e)) + + if not result: + return self.get_no_results_error() + return {"result": result, "text": result} diff --git a/src/interfaces/assistants_web/src/components/MessageRow/ToolEvents.tsx b/src/interfaces/assistants_web/src/components/MessageRow/ToolEvents.tsx index 2a719184c8..3d65dcd722 100644 --- a/src/interfaces/assistants_web/src/components/MessageRow/ToolEvents.tsx +++ b/src/interfaces/assistants_web/src/components/MessageRow/ToolEvents.tsx @@ -5,7 +5,7 @@ import { Fragment, PropsWithChildren } from 'react'; import { StreamSearchResults, StreamToolCallsGeneration, ToolCall } from '@/cohere-client'; import { Markdown } from '@/components/Markdown'; -import { Icon, IconName, Text } from '@/components/UI'; +import { Icon, IconButton, IconName, Text, Tooltip } from '@/components/UI'; import { TOOL_CALCULATOR_ID, TOOL_GOOGLE_DRIVE_ID, @@ -21,6 +21,13 @@ type Props = { events: StreamToolCallsGeneration[] | undefined; }; +const hasToolErrorsDocuments = (search_results: StreamSearchResults | null) => { + return search_results?.documents?.some((document) => document.fields?.success === 'false'); +}; + +const getErrorDocumentsFromEvent = (search_results: StreamSearchResults | null) => + search_results?.documents?.filter((document) => document.fields?.success === 'false') || []; + /** * @description Renders a list of events depending on the model's plan and tool inputs. */ @@ -74,6 +81,7 @@ const ToolEvent: React.FC = ({ plan, event, stream_search_result if (plan) { return {plan}; } + const toolName = event?.name || ''; if (stream_search_results) { const artifacts = @@ -85,7 +93,16 @@ const ToolEvent: React.FC = ({ plan, event, stream_search_result .filter((value, index, self) => index === self.findIndex((t) => t.title === value.title)) || []; - return ( + const hasErrorsDocuments = hasToolErrorsDocuments(stream_search_results); + const errorDocuments = getErrorDocumentsFromEvent(stream_search_results); + + return hasErrorsDocuments ? ( + + {errorDocuments[errorDocuments.length - 1].text} + + ) : toolName && toolName != TOOL_PYTHON_INTERPRETER_ID ? ( {artifacts.length > 0 ? ( <> @@ -108,10 +125,9 @@ const ToolEvent: React.FC = ({ plan, event, stream_search_result <>No resources found. )} - ); + ) : null; } - const toolName = event?.name || ''; const icon = getToolIcon(toolName); switch (toolName) { @@ -193,6 +209,25 @@ const ToolEventWrapper: React.FC> = ({ ); }; +const ToolErrorWrapper: React.FC> = ({ + tooltip = 'Some error occurred', + children, +}) => { + return ( +
+ + + {children} + +
+ ); +}; + const truncateString = (str: string, max_length: number = 50) => { return str.length < max_length ? str : str.substring(0, max_length) + '...'; }; diff --git a/src/interfaces/assistants_web/src/hooks/use-chat.ts b/src/interfaces/assistants_web/src/hooks/use-chat.ts index 3fdf47afee..e1ed63964d 100644 --- a/src/interfaces/assistants_web/src/hooks/use-chat.ts +++ b/src/interfaces/assistants_web/src/hooks/use-chat.ts @@ -260,19 +260,13 @@ export const useChat = (config?: { onSend?: (msg: string) => void }) => { outputFiles = { ...outputFiles, ...newOutputFilesMap }; saveOutputFiles({ ...savedOutputFiles, ...outputFiles }); - // we are only interested in web_search results - // ignore search results of pyhton interpreter tool - if ( - toolEvents[currentToolEventIndex - 1]?.tool_calls?.[0]?.name !== - TOOL_PYTHON_INTERPRETER_ID - ) { - toolEvents.push({ - text: '', - stream_search_results: data, - tool_calls: [], - } as StreamToolCallsGeneration); - currentToolEventIndex += 1; - } + toolEvents.push({ + text: '', + stream_search_results: data, + tool_calls: [], + } as StreamToolCallsGeneration); + currentToolEventIndex += 1; + break; }