Iterating on LLM Apps with TruLens¶
Our simple RAG often struggles with retrieving not enough information from the insurance manual to properly answer the question. The information needed may be just outside the chunk that is identified and retrieved by our app. Reducing the size of the chunk and adding "sentence windows" to our retrieval is an advanced RAG technique that can help with retrieving more targeted, complete context. Here we can try this technique, and test its success with TruLens.
# !pip install trulens trulens-apps-llamaindex trulens-providers-openai langchain llama_index llama_hub llmsherpa sentence-transformers sentencepiece
# Set your API keys. If you already have them in your var env., you can skip these steps.
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from trulens.core import TruSession
Load data and test set¶
from llama_hub.smart_pdf_loader import SmartPDFLoader
llmsherpa_api_url = "https://readers.llmsherpa.com/api/document/developer/parseDocument?renderFormat=all"
pdf_loader = SmartPDFLoader(llmsherpa_api_url=llmsherpa_api_url)
documents = pdf_loader.load_data(
"https://www.iii.org/sites/default/files/docs/pdf/Insurance_Handbook_20103.pdf"
)
# Load some questions for evaluation
honest_evals = [
"What are the typical coverage options for homeowners insurance?",
"What are the requirements for long term care insurance to start?",
"Can annuity benefits be passed to beneficiaries?",
"Are credit scores used to set insurance premiums? If so, how?",
"Who provides flood insurance?",
"Can you get flood insurance outside high-risk areas?",
"How much in losses does fraud account for in property & casualty insurance?",
"Do pay-as-you-drive insurance policies have an impact on greenhouse gas emissions? How much?",
"What was the most costly earthquake in US history for insurers?",
"Does it matter who is at fault to be compensated when injured on the job?",
]
Set up Evaluation¶
import numpy as np
from trulens.core import Feedback
from trulens.apps.llamaindex import TruLlama
from trulens.providers.openai import OpenAI as fOpenAI
session = TruSession()
# start fresh
session.reset_database()
provider = fOpenAI()
context = TruLlama.select_context()
answer_relevance = Feedback(
provider.relevance_with_cot_reasons, name="Answer Relevance"
).on_input_output()
context_relevance = (
Feedback(
provider.context_relevance_with_cot_reasons, name="Context Relevance"
)
.on_input()
.on(context)
.aggregate(np.mean)
)
# embedding distance
from langchain.embeddings.openai import OpenAIEmbeddings
from trulens.feedback.embeddings import Embeddings
model_name = "text-embedding-ada-002"
embed_model = OpenAIEmbeddings(
model=model_name, openai_api_key=os.environ["OPENAI_API_KEY"]
)
embed = Embeddings(embed_model=embed_model)
f_embed_dist = Feedback(embed.cosine_distance).on_input().on(context)
f_groundedness = (
Feedback(
provider.groundedness_measure_with_cot_reasons, name="Groundedness"
)
.on(context.collect())
.on_output()
)
honest_feedbacks = [
answer_relevance,
context_relevance,
f_embed_dist,
f_groundedness,
]
Our simple RAG often struggles with retrieving not enough information from the insurance manual to properly answer the question. The information needed may be just outside the chunk that is identified and retrieved by our app. Let's try sentence window retrieval to retrieve a wider chunk.
import os
from llama_index import Prompt
from llama_index.core import Document
from llama_index.core import ServiceContext
from llama_index.core import StorageContext
from llama_index.core import VectorStoreIndex
from llama_index.core import load_index_from_storage
from llama_index.core.indices.postprocessor import (
MetadataReplacementPostProcessor,
)
from llama_index.core.indices.postprocessor import SentenceTransformerRerank
from llama_index.core.node_parser import SentenceWindowNodeParser
from llama_index.llms.openai import OpenAI
# initialize llm
llm = OpenAI(model="gpt-3.5-turbo", temperature=0.5)
# knowledge store
document = Document(text="\n\n".join([doc.text for doc in documents]))
# set system prompt
system_prompt = Prompt(
"We have provided context information below that you may use. \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Please answer the question: {query_str}\n"
)
def build_sentence_window_index(
document,
llm,
embed_model="local:BAAI/bge-small-en-v1.5",
save_dir="sentence_index",
):
# create the sentence window node parser w/ default settings
node_parser = SentenceWindowNodeParser.from_defaults(
window_size=3,
window_metadata_key="window",
original_text_metadata_key="original_text",
)
sentence_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
node_parser=node_parser,
)
if not os.path.exists(save_dir):
sentence_index = VectorStoreIndex.from_documents(
[document], service_context=sentence_context
)
sentence_index.storage_context.persist(persist_dir=save_dir)
else:
sentence_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=save_dir),
service_context=sentence_context,
)
return sentence_index
sentence_index = build_sentence_window_index(
document,
llm,
embed_model="local:BAAI/bge-small-en-v1.5",
save_dir="sentence_index",
)
def get_sentence_window_query_engine(
sentence_index,
system_prompt,
similarity_top_k=6,
rerank_top_n=2,
):
# define postprocessors
postproc = MetadataReplacementPostProcessor(target_metadata_key="window")
rerank = SentenceTransformerRerank(
top_n=rerank_top_n, model="BAAI/bge-reranker-base"
)
sentence_window_engine = sentence_index.as_query_engine(
similarity_top_k=similarity_top_k,
node_postprocessors=[postproc, rerank],
text_qa_template=system_prompt,
)
return sentence_window_engine
sentence_window_engine = get_sentence_window_query_engine(
sentence_index, system_prompt=system_prompt
)
tru_recorder_rag_sentencewindow = TruLlama(
sentence_window_engine,
app_name="RAG",
app_version="2_sentence_window",
feedbacks=honest_feedbacks,
)
# Run evaluation on 10 sample questions
with tru_recorder_rag_sentencewindow as recording:
for question in honest_evals:
response = sentence_window_engine.query(question)
session.get_leaderboard(
app_ids=[
tru_recorder_rag_basic.app_id,
tru_recorder_rag_sentencewindow.app_id,
]
)
How does the sentence window RAG compare to our prototype? You decide!