๐ LangChain Quickstartยถ
In this quickstart you will create a simple LCEL Chain and learn how to log it and get feedback on an LLM response.
For evaluation, we will leverage the RAG triad of groundedness, context relevance and answer relevance.
You'll also learn how to use feedbacks for guardrails, via filtering retrieved context.
# !pip install trulens trulens-apps-langchain trulens-providers-openai openai langchain langchainhub langchain-openai langchain_community faiss-cpu bs4 tiktoken
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
Import from LangChain and TruLensยถ
# Imports main tools:
from trulens.apps.langchain import TruChain
from trulens.core import TruSession
session = TruSession()
session.reset_database()
# Imports from LangChain to build app
import bs4
from langchain import hub
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import WebBaseLoader
from langchain.schema import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
Load documentsยถ
loader = WebBaseLoader(
web_paths=("https://lilianweng.github.io/posts/2023-06-23-agent/",),
bs_kwargs=dict(
parse_only=bs4.SoupStrainer(
class_=("post-content", "post-title", "post-header")
)
),
)
docs = loader.load()
Create Vector Storeยถ
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
embeddings = OpenAIEmbeddings()
text_splitter = RecursiveCharacterTextSplitter()
documents = text_splitter.split_documents(docs)
vectorstore = FAISS.from_documents(documents, embeddings)
Create RAGยถ
retriever = vectorstore.as_retriever()
prompt = hub.pull("rlm/rag-prompt")
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
rag_chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
Send your first requestยถ
rag_chain.invoke("What is Task Decomposition?")
Initialize Feedback Function(s)ยถ
import numpy as np
from trulens.core import Feedback
from trulens.providers.openai import OpenAI
# Initialize provider class
provider = OpenAI()
# select context to be used in feedback. the location of context is app specific.
context = TruChain.select_context(rag_chain)
# Define a groundedness feedback function
f_groundedness = (
Feedback(
provider.groundedness_measure_with_cot_reasons, name="Groundedness"
)
.on(context.collect()) # collect context chunks into a list
.on_output()
)
# Question/answer relevance between overall question and answer.
f_answer_relevance = Feedback(
provider.relevance_with_cot_reasons, name="Answer Relevance"
).on_input_output()
# Context relevance between question and each context chunk.
f_context_relevance = (
Feedback(
provider.context_relevance_with_cot_reasons, name="Context Relevance"
)
.on_input()
.on(context)
.aggregate(np.mean)
)
Instrument chain for logging with TruLensยถ
tru_recorder = TruChain(
rag_chain,
app_name="ChatApplication",
app_version="Chain1",
feedbacks=[f_answer_relevance, f_context_relevance, f_groundedness],
)
with tru_recorder as recording:
llm_response = rag_chain.invoke("What is Task Decomposition?")
display(llm_response)
Check results
session.get_leaderboard()
By looking closer at context relevance, we see that our retriever is returning irrelevant context.
from trulens.dashboard.display import get_feedback_result
last_record = recording.records[-1]
get_feedback_result(last_record, "Context Relevance")
Use guardrailsยถ
In addition to making informed iteration, we can also directly use feedback results as guardrails at inference time. In particular, here we show how to use the context relevance score as a guardrail to filter out irrelevant context before it gets passed to the LLM. This both reduces hallucination and improves efficiency.
Below, you can see the TruLens feedback display of each context relevance chunk retrieved by our RAG.
Wouldn't it be great if we could automatically filter out context chunks with relevance scores below 0.5?
We can do so with the TruLens guardrail, WithFeedbackFilterDocuments. All we have to do is use the method of_retriever
to create a new filtered retriever, passing in the original retriever along with the feedback function and threshold we want to use.
from trulens.apps.langchain import WithFeedbackFilterDocuments
# note: feedback function used for guardrail must only return a score, not also reasons
f_context_relevance_score = Feedback(provider.context_relevance)
filtered_retriever = WithFeedbackFilterDocuments.of_retriever(
retriever=retriever, feedback=f_context_relevance_score, threshold=0.75
)
rag_chain = (
{
"context": filtered_retriever | format_docs,
"question": RunnablePassthrough(),
}
| prompt
| llm
| StrOutputParser()
)
Then we can operate as normal
tru_recorder = TruChain(
rag_chain,
app_name="ChatApplication_Filtered",
app_version="Chain1",
feedbacks=[f_answer_relevance, f_context_relevance, f_groundedness],
)
with tru_recorder as recording:
llm_response = rag_chain.invoke("What is Task Decomposition?")
display(llm_response)
See the power of context filters!ยถ
If we inspect the context relevance of our retrieval now, you see only relevant context chunks!
from trulens.dashboard.display import get_feedback_result
last_record = recording.records[-1]
get_feedback_result(last_record, "Context Relevance")
from trulens.dashboard import run_dashboard
run_dashboard(session)
Retrieve records and feedbackยถ
# The record of the app invocation can be retrieved from the `recording`:
rec = recording.get() # use .get if only one record
# recs = recording.records # use .records if multiple
display(rec)
# The results of the feedback functions can be rertrieved from
# `Record.feedback_results` or using the `wait_for_feedback_result` method. The
# results if retrieved directly are `Future` instances (see
# `concurrent.futures`). You can use `as_completed` to wait until they have
# finished evaluating or use the utility method:
for feedback, feedback_result in rec.wait_for_feedback_results().items():
print(feedback.name, feedback_result.result)
# See more about wait_for_feedback_results:
# help(rec.wait_for_feedback_results)
records, feedback = session.get_records_and_feedback()
records.head()
session.get_leaderboard()
Explore in a Dashboardยถ
run_dashboard(session) # open a local streamlit app to explore
# stop_dashboard(session) # stop if needed
Learn more about the call stackยถ
json_like = last_record.layout_calls_as_app()
json_like
from ipytree import Node
from ipytree import Tree
def display_call_stack(data):
tree = Tree()
tree.add_node(Node("Record ID: {}".format(data["record_id"])))
tree.add_node(Node("App ID: {}".format(data["app_id"])))
tree.add_node(Node("Cost: {}".format(data["cost"])))
tree.add_node(Node("Performance: {}".format(data["perf"])))
tree.add_node(Node("Timestamp: {}".format(data["ts"])))
tree.add_node(Node("Tags: {}".format(data["tags"])))
tree.add_node(Node("Main Input: {}".format(data["main_input"])))
tree.add_node(Node("Main Output: {}".format(data["main_output"])))
tree.add_node(Node("Main Error: {}".format(data["main_error"])))
calls_node = Node("Calls")
tree.add_node(calls_node)
for call in data["calls"]:
call_node = Node("Call")
calls_node.add_node(call_node)
for step in call["stack"]:
step_node = Node("Step: {}".format(step["path"]))
call_node.add_node(step_node)
if "expanded" in step:
expanded_node = Node("Expanded")
step_node.add_node(expanded_node)
for expanded_step in step["expanded"]:
expanded_step_node = Node(
"Step: {}".format(expanded_step["path"])
)
expanded_node.add_node(expanded_step_node)
return tree
# Usage
tree = display_call_stack(json_like)
tree