LangChain Async¶
This notebook demonstrates how to monitor a LangChain async apps. Note that this notebook does not demonstrate streaming. See langchain_stream.ipynb
for that.
Import from LangChain and TruLens¶
In [ ]:
Copied!
# !pip install trulens trulens.apps.langchain trulens-providers-huggingface 'langchain>=0.2.16' 'langchain-openai>=0.0.1rc0'
# !pip install trulens trulens.apps.langchain trulens-providers-huggingface 'langchain>=0.2.16' 'langchain-openai>=0.0.1rc0'
In [ ]:
Copied!
from langchain.prompts import PromptTemplate
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_openai import ChatOpenAI, OpenAI
from trulens.core import Feedback, TruSession
from trulens.providers.huggingface import Huggingface
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain.prompts import PromptTemplate
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_openai import ChatOpenAI, OpenAI
from trulens.core import Feedback, TruSession
from trulens.providers.huggingface import Huggingface
from langchain_community.chat_message_histories import ChatMessageHistory
In [ ]:
Copied!
import os
os.environ["HUGGINGFACE_API_KEY"] = "hf_..."
os.environ["OPENAI_API_KEY"] = "sk-..."
import os
os.environ["HUGGINGFACE_API_KEY"] = "hf_..."
os.environ["OPENAI_API_KEY"] = "sk-..."
Create Async Application¶
In [ ]:
Copied!
chatllm = ChatOpenAI(
temperature=0.0,
)
llm = OpenAI(
temperature=0.0,
)
memory = ChatMessageHistory()
# Setup a simple question/answer chain with streaming ChatOpenAI.
prompt = PromptTemplate(
input_variables=["human_input", "chat_history"],
template="""
You are having a conversation with a person. Make small talk.
{chat_history}
Human: {human_input}
AI:""",
)
chain = RunnableWithMessageHistory(
prompt | chatllm,
lambda: memory,
input_messages_key="input",
history_messages_key="chat_history",)
chatllm = ChatOpenAI(
temperature=0.0,
)
llm = OpenAI(
temperature=0.0,
)
memory = ChatMessageHistory()
# Setup a simple question/answer chain with streaming ChatOpenAI.
prompt = PromptTemplate(
input_variables=["human_input", "chat_history"],
template="""
You are having a conversation with a person. Make small talk.
{chat_history}
Human: {human_input}
AI:""",
)
chain = RunnableWithMessageHistory(
prompt | chatllm,
lambda: memory,
input_messages_key="input",
history_messages_key="chat_history",)
Set up a language match feedback function.¶
In [ ]:
Copied!
session = TruSession()
session.reset_database()
hugs = Huggingface()
f_lang_match = Feedback(hugs.language_match).on_input_output()
session = TruSession()
session.reset_database()
hugs = Huggingface()
f_lang_match = Feedback(hugs.language_match).on_input_output()
Set up evaluation and tracking with TruLens¶
In [ ]:
Copied!
# Example of how to also get filled-in prompt templates in timeline:
from trulens.core.instruments import instrument
from trulens.apps.langchain import TruChain
instrument.method(PromptTemplate, "format")
tc = TruChain(chain, feedbacks=[f_lang_match], app_name="chat_with_memory")
# Example of how to also get filled-in prompt templates in timeline:
from trulens.core.instruments import instrument
from trulens.apps.langchain import TruChain
instrument.method(PromptTemplate, "format")
tc = TruChain(chain, feedbacks=[f_lang_match], app_name="chat_with_memory")
In [ ]:
Copied!
tc.print_instrumented()
tc.print_instrumented()
Start the TruLens dashboard¶
In [ ]:
Copied!
from trulens.dashboard import run_dashboard
run_dashboard(session)
from trulens.dashboard import run_dashboard
run_dashboard(session)
Use the application¶
In [ ]:
Copied!
message = "Hi. How are you?"
async with tc as recording:
response = await chain.ainvoke(
input=dict(human_input=message, chat_history=[]),
)
record = recording.get()
message = "Hi. How are you?"
async with tc as recording:
response = await chain.ainvoke(
input=dict(human_input=message, chat_history=[]),
)
record = recording.get()
In [ ]:
Copied!
# Check the main output:
record.main_output
# Check the main output:
record.main_output
In [ ]:
Copied!
# Check costs:
record.cost
# Check costs:
record.cost
In [ ]:
Copied!
# Check feedback:
record.feedback_results[0].result()
# Check feedback:
record.feedback_results[0].result()