Skip to content
This repository was archived by the owner on Aug 5, 2025. It is now read-only.

Commit fa9fc2e

Browse files
Merge pull request #63 from Chainlit/damien/eng-1724-integrations-change-how-we-handle-metadatatags
feat(wrappers): add decoration wrapper
2 parents 56e554f + 05bb5e8 commit fa9fc2e

26 files changed

+869
-194
lines changed

.gitignore

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@ dist-ssr
1515
*.local
1616

1717
.env
18-
examples
1918

2019
# Editor directories and files
2120
.vscode/*

examples/openai.ts

Lines changed: 141 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,141 @@
1+
import 'dotenv/config';
2+
import OpenAI from 'openai';
3+
4+
import { LiteralClient } from '../src';
5+
6+
const literalClient = new LiteralClient();
7+
8+
const openai = new OpenAI();
9+
10+
// Instrument the OpenAI client
11+
literalClient.instrumentation.openai();
12+
13+
async function main() {
14+
// This will be automagically logged by Literal AI
15+
await openai.chat.completions.create({
16+
model: 'gpt-4',
17+
messages: [{ role: 'user', content: 'Say this is a test' }]
18+
});
19+
20+
await openai.chat.completions.create({
21+
model: 'gpt-4',
22+
messages: [
23+
{
24+
role: 'user',
25+
content:
26+
'Write a README document in markdown explaining the basic usage of the `with-csv` npm library.'
27+
}
28+
]
29+
});
30+
31+
await literalClient
32+
.run({
33+
name: 'Test run',
34+
input: {
35+
content:
36+
'{"question": ["how to code chat with pdf and use pdfplumber and ollama local",[]],"images_content": {}}'
37+
},
38+
output: {
39+
content: `To create a chat application that interacts with PDF files using \`pdfplumber\` and the Ollama local model in Chainlit, you can follow the example below. This example demonstrates how to upload a PDF, extract text using \`pdfplumber\`, and allow users to ask questions about its content using the Ollama model.
40+
41+
### Step 1: Install Required Packages
42+
Make sure you have the necessary packages installed:
43+
\`\`\`bash
44+
pip install chainlit pdfplumber langchain
45+
\`\`\`
46+
47+
### Step 2: Create the Application Script
48+
Create a Python file named \`app.py\` and add the following code:
49+
50+
\`\`\`python
51+
import os
52+
import pdfplumber
53+
import chainlit as cl
54+
from langchain.llms.ollama import Ollama
55+
from langchain.prompts import ChatPromptTemplate
56+
from langchain.schema import StrOutputParser
57+
from langchain.schema.runnable import Runnable
58+
from langchain.schema.runnable.config import RunnableConfig
59+
from langchain.text_splitter import RecursiveCharacterTextSplitter
60+
from langchain.memory import ChatMessageHistory, ConversationBufferMemory
61+
from langchain.schema import Document
62+
63+
# Initialize the Ollama model
64+
model = Ollama(model="llama2")
65+
66+
@cl.on_chat_start
67+
async def on_chat_start():
68+
files = await cl.AskFileMessage(
69+
content="Please upload a PDF file to start asking questions.",
70+
accept=["application/pdf"],
71+
max_size_mb=20,
72+
timeout=180,
73+
).send()
74+
75+
file = files[0]
76+
docs = process_pdf(file)
77+
78+
message_history = ChatMessageHistory()
79+
memory = ConversationBufferMemory(memory_key="chat_history", output_key="answer", chat_memory=message_history, return_messages=True)
80+
81+
prompt = ChatPromptTemplate.from_messages(
82+
[
83+
("system", "You're a knowledgeable assistant who provides accurate answers based on the PDF content."),
84+
("human", "{question}"),
85+
]
86+
)
87+
88+
runnable = prompt | model | StrOutputParser()
89+
cl.user_session.set("runnable", runnable)
90+
91+
await cl.Message(content="You can now ask questions about the PDF!").send()
92+
93+
def process_pdf(file):
94+
text = ""
95+
with pdfplumber.open(file.path) as pdf:
96+
for page in pdf.pages:
97+
text += page.extract_text() + "\
98+
"
99+
100+
# Split the text into chunks
101+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
102+
docs = text_splitter.split_text(text)
103+
documents = [Document(page_content=chunk) for chunk in docs]
104+
return documents
105+
106+
@cl.on_message
107+
async def on_message(message: cl.Message):
108+
runnable = cl.user_session.get("runnable") # type: Runnable
109+
msg = cl.Message(content="")
110+
111+
for chunk in await cl.make_async(runnable.stream)(
112+
{"question": message.content},
113+
config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),
114+
):
115+
await msg.stream_token(chunk)
116+
117+
await msg.send()
118+
\`\`\`
119+
120+
### Step 3: Run the Application
121+
To start the Chainlit application, run the following command in your terminal:
122+
\`\`\`bash
123+
chainlit run app.py
124+
\`\`\`
125+
126+
### Step 4: Interact with the Application
127+
Open your browser and navigate to \`http://localhost:8000\`. You can upload a PDF file and start asking questions about its content.
128+
129+
### Explanation
130+
- **PDF Processing**: The \`process_pdf\` function uses \`pdfplumber\` to extract text from the uploaded PDF file.
131+
- **Text Splitting**: The extracted text is split into manageable chunks using \`RecursiveCharacterTextSplitter\`.
132+
- **Ollama Model**: The Ollama model is used to generate responses based on the extracted text.
133+
- **Conversational Interface**: Users can ask questions, and the model will respond based on the content of the PDF.
134+
135+
This setup allows you to create a conversational interface that can answer questions based on the content of a PDF file using \`pdfplumber\` and the Ollama local model.`
136+
}
137+
})
138+
.send();
139+
}
140+
141+
main();

examples/prompt.ts

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
import 'dotenv/config';
2+
3+
import { LiteralClient } from '../src';
4+
5+
const literalClient = new LiteralClient();
6+
7+
async function main() {
8+
const prompt = await literalClient.api.getPrompt('');
9+
10+
console.log(prompt);
11+
}
12+
13+
main();

jest.config.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ const config: Config.InitialOptions = {
44
verbose: true,
55
transform: {
66
'^.+\\.ts?$': 'ts-jest'
7-
}
7+
},
8+
testTimeout: 30_000
89
};
910
export default config;

package-lock.json

Lines changed: 24 additions & 11 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

package.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@
5959
},
6060
"peerDependencies": {
6161
"@ai-sdk/openai": "0.0.x",
62+
"@langchain/openai": "^0.2.7",
6263
"ai": "3.x",
6364
"langchain": "0.1.x",
6465
"llamaindex": "0.3.x",

src/api.ts

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -410,7 +410,6 @@ export class API {
410410
variables: variables
411411
}
412412
});
413-
414413
if (response.data.errors) {
415414
throw new Error(JSON.stringify(response.data.errors));
416415
}
@@ -851,21 +850,22 @@ export class API {
851850
* @returns A Promise resolving to the newly created `Generation` object.
852851
*/
853852
async createGeneration(generation: Generation) {
854-
const mutation = `
855-
mutation CreateGeneration($generation: GenerationPayloadInput!) {
856-
createGeneration(generation: $generation) {
857-
id,
858-
type
859-
}
860-
}
861-
`;
862-
863-
const variables = {
864-
generation
865-
};
853+
const stepId = generation.id;
854+
const stepMetadata = generation.metadata;
855+
const stepTags = generation.tags;
856+
857+
delete generation.id;
858+
859+
const generationAsStep = this.client.step({
860+
id: stepId,
861+
metadata: stepMetadata,
862+
tags: stepTags,
863+
generation,
864+
name: generation.type ?? '',
865+
type: 'llm'
866+
});
866867

867-
const response = await this.makeGqlCall(mutation, variables);
868-
return response.data.createGeneration as PersistedGeneration;
868+
return generationAsStep.send();
869869
}
870870

871871
/**
@@ -929,13 +929,13 @@ export class API {
929929
$metadata: Json,
930930
$participantId: String,
931931
$tags: [String!],
932-
) {
932+
) {
933933
upsertThread(
934-
id: $threadId
935-
name: $name
936-
metadata: $metadata
937-
participantId: $participantId
938-
tags: $tags
934+
id: $threadId
935+
name: $name
936+
metadata: $metadata
937+
participantId: $participantId
938+
tags: $tags
939939
) {
940940
${threadFields}
941941
}

src/evaluation/experiment-item-run.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,10 @@ export class ExperimentItemRun extends Step {
4444
? currentStore?.rootRun
4545
: this.type === 'run'
4646
? this
47-
: null
47+
: null,
48+
metadata: currentStore?.metadata ?? null,
49+
tags: currentStore?.tags ?? null,
50+
stepId: currentStore?.stepId ?? null
4851
},
4952
async () => {
5053
try {

src/index.ts

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,9 @@ type StoredContext = {
1818
currentStep: Step | null;
1919
currentExperimentItemRunId?: string | null;
2020
rootRun: Step | null;
21+
metadata: Record<string, any> | null;
22+
tags: string[] | null;
23+
stepId: string | null;
2124
};
2225

2326
/**
@@ -217,4 +220,30 @@ export class LiteralClient {
217220

218221
return store.rootRun;
219222
}
223+
224+
decorate(options: {
225+
metadata?: Record<string, any>;
226+
tags?: string[];
227+
stepId?: string;
228+
}) {
229+
return {
230+
wrap: async <T>(cb: () => T) => {
231+
const currentStore = this.store.getStore();
232+
233+
return this.store.run(
234+
{
235+
currentThread: currentStore?.currentThread ?? null,
236+
currentExperimentItemRunId:
237+
currentStore?.currentExperimentItemRunId ?? null,
238+
currentStep: currentStore?.currentStep ?? null,
239+
rootRun: currentStore?.rootRun ?? null,
240+
metadata: options?.metadata ?? null,
241+
tags: options?.tags ?? null,
242+
stepId: options?.stepId ?? null
243+
},
244+
() => cb()
245+
);
246+
}
247+
};
248+
}
220249
}

0 commit comments

Comments
 (0)