Skip to content

Commit af9ad3c

Browse files
authored
feat: show document artifact after generating report (#658)
* feat: show document artifact after generating report * keep chat message content as it is * use artifactEvent from server * add deep research example * bump chat-ui for new editor * import editor css * hide warning for workflowEvent<{}>() in eject mode * fix format * use CL for better testing * generate artifact after streaming report in Python * bump chat-ui to support citations * use isinstance to check stream * fix document editor spacing * Create tame-wolves-obey.md * add sources to document artifact * add sources to document artifact in python * type cast * no need score * fix lint * move handle stream logic to server * refactor: use chunk.text and chunk.raw * bump chat-ui 0.5.6 to fix citations * update changset * fix lock
1 parent 1ff6eaf commit af9ad3c

File tree

9 files changed

+122
-5
lines changed

9 files changed

+122
-5
lines changed

.changeset/tame-wolves-obey.md

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
---
2+
"create-llama": patch
3+
"@llamaindex/server": patch
4+
"@create-llama/llama-index-server": patch
5+
---
6+
7+
feat: show document artifact after generating report

packages/create-llama/templates/components/use-cases/python/deep_research/workflow.py

Lines changed: 37 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import logging
22
import os
33
import uuid
4-
from typing import List, Literal, Optional
4+
from typing import List, Literal, Optional, AsyncGenerator
55

66
from app.index import get_index
77
from llama_index.core.base.llms.types import (
@@ -23,7 +23,18 @@
2323
Workflow,
2424
step,
2525
)
26-
from llama_index.server.api.models import ChatRequest, SourceNodesEvent, UIEvent
26+
from llama_index.server.api.models import (
27+
ArtifactEvent,
28+
ArtifactType,
29+
ChatRequest,
30+
SourceNodesEvent,
31+
UIEvent,
32+
Artifact,
33+
DocumentArtifactData,
34+
DocumentArtifactSource,
35+
)
36+
import time
37+
from llama_index.server.utils.stream import write_response_to_stream
2738
from pydantic import BaseModel, Field
2839

2940
logger = logging.getLogger("uvicorn")
@@ -365,8 +376,31 @@ async def report(self, ctx: Context, ev: ReportEvent) -> StopEvent:
365376
user_request=self.user_request,
366377
stream=self.stream,
367378
)
379+
380+
final_response = await write_response_to_stream(res, ctx)
381+
382+
ctx.write_event_to_stream(
383+
ArtifactEvent(
384+
data=Artifact(
385+
type=ArtifactType.DOCUMENT,
386+
created_at=int(time.time()),
387+
data=DocumentArtifactData(
388+
title="DeepResearch Report",
389+
content=final_response,
390+
type="markdown",
391+
sources=[
392+
DocumentArtifactSource(
393+
id=node.id_,
394+
)
395+
for node in self.context_nodes
396+
],
397+
),
398+
),
399+
)
400+
)
401+
368402
return StopEvent(
369-
result=res,
403+
result="",
370404
)
371405

372406

packages/create-llama/templates/components/use-cases/typescript/deep_research/src/app/workflow.ts

Lines changed: 21 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import { toSourceEvent } from "@llamaindex/server";
1+
import { artifactEvent, toSourceEvent } from "@llamaindex/server";
22
import {
33
agentStreamEvent,
44
createStatefulMiddleware,
@@ -339,6 +339,26 @@ export function getWorkflow(index: VectorStoreIndex | LlamaCloudIndex) {
339339
}),
340340
);
341341
}
342+
343+
// Open the generated report in Canvas
344+
sendEvent(
345+
artifactEvent.with({
346+
type: "artifact",
347+
data: {
348+
type: "document",
349+
created_at: Date.now(),
350+
data: {
351+
title: "DeepResearch Report",
352+
content: response,
353+
type: "markdown",
354+
sources: state.contextNodes.map((node) => ({
355+
id: node.node.id_,
356+
})),
357+
},
358+
},
359+
}),
360+
);
361+
342362
return stopAgentEvent.with({
343363
result: response,
344364
});

packages/server/next/app/layout.tsx

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import type { Metadata } from "next";
22
import { Inter } from "next/font/google";
33

4+
import "@llamaindex/chat-ui/styles/editor.css";
45
import "@llamaindex/chat-ui/styles/markdown.css";
56
import "@llamaindex/chat-ui/styles/pdf.css";
67
import "./globals.css";

packages/server/project-config/eslint.config.mjs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ const eslintConfig = [
1818
"react-hooks/exhaustive-deps": "off",
1919
"@next/next/no-img-element": "off",
2020
"@next/next/no-assign-module-variable": "off",
21+
"@typescript-eslint/no-empty-object-type": "off",
2122
},
2223
},
2324
{

packages/server/src/utils/events.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -110,6 +110,7 @@ export type DocumentArtifactData = {
110110
title: string;
111111
content: string;
112112
type: string; // markdown, html,...
113+
sources?: { id: string }[]; // sources that are used to render citation numbers in the document
113114
};
114115

115116
export type CodeArtifact = Artifact<CodeArtifactData> & {

python/llama-index-server/llama_index/server/models/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
ArtifactType,
55
CodeArtifactData,
66
DocumentArtifactData,
7+
DocumentArtifactSource,
78
)
89
from llama_index.server.models.chat import ChatAPIMessage, ChatRequest
910
from llama_index.server.models.hitl import HumanInputEvent, HumanResponseEvent
@@ -20,6 +21,7 @@
2021
"ArtifactEvent",
2122
"ArtifactType",
2223
"DocumentArtifactData",
24+
"DocumentArtifactSource",
2325
"CodeArtifactData",
2426
"ChatAPIMessage",
2527
"ChatRequest",

python/llama-index-server/llama_index/server/models/artifacts.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import logging
22
from enum import Enum
3-
from typing import Literal, Optional, Union
3+
from typing import List, Literal, Optional, Union
44

55
from llama_index.core.workflow.events import Event
66
from llama_index.server.models.chat import ChatAPIMessage
@@ -21,10 +21,16 @@ class CodeArtifactData(BaseModel):
2121
language: str
2222

2323

24+
class DocumentArtifactSource(BaseModel):
25+
id: str
26+
# we can add more fields here
27+
28+
2429
class DocumentArtifactData(BaseModel):
2530
title: str
2631
content: str
2732
type: Literal["markdown", "html"]
33+
sources: Optional[List[DocumentArtifactSource]] = None
2834

2935

3036
class Artifact(BaseModel):
Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
from typing import AsyncGenerator, Union
2+
from llama_index.core.base.llms.types import (
3+
CompletionResponse,
4+
CompletionResponseAsyncGen,
5+
)
6+
from llama_index.core.workflow import Context
7+
from llama_index.core.agent.workflow.workflow_events import AgentStream
8+
9+
10+
async def write_response_to_stream(
11+
res: Union[CompletionResponse, CompletionResponseAsyncGen],
12+
ctx: Context,
13+
current_agent_name: str = "assistant",
14+
) -> str:
15+
"""
16+
Handle both streaming and non-streaming LLM responses.
17+
18+
Args:
19+
res: The LLM response (either streaming or non-streaming)
20+
ctx: The workflow context for writing events to stream
21+
current_agent_name: The name of the current agent (default: "assistant")
22+
23+
Returns:
24+
The final response text as a string
25+
"""
26+
final_response = ""
27+
28+
if isinstance(res, AsyncGenerator):
29+
# Handle streaming response (CompletionResponseAsyncGen)
30+
async for chunk in res:
31+
ctx.write_event_to_stream(
32+
AgentStream(
33+
delta=chunk.delta or "",
34+
response=final_response,
35+
current_agent_name=current_agent_name,
36+
tool_calls=[],
37+
raw=chunk.raw or "",
38+
)
39+
)
40+
final_response = chunk.text
41+
else:
42+
# Handle non-streaming response (CompletionResponse)
43+
final_response = res.text
44+
45+
return final_response

0 commit comments

Comments
 (0)