Google Vertex¶
In this quickstart you will learn how to run evaluation functions using models from google Vertex like PaLM-2.
In [ ]:
Copied!
# !pip install trulens trulens-apps-langchain trulens-providers-litellm google-cloud-aiplatform==1.36.3 litellm==1.11.1 langchain==0.0.347
# !pip install trulens trulens-apps-langchain trulens-providers-litellm google-cloud-aiplatform==1.36.3 litellm==1.11.1 langchain==0.0.347
Authentication¶
In [ ]:
Copied!
from google.cloud import aiplatform
from google.cloud import aiplatform
In [ ]:
Copied!
aiplatform.init(project="...", location="us-central1")
aiplatform.init(project="...", location="us-central1")
Import from LangChain and TruLens¶
In [ ]:
Copied!
# Imports main tools:
# Imports from langchain to build app. You may need to install langchain first
# with the following:
# !pip install langchain>=0.0.170
from langchain.chains import LLMChain
from langchain.llms import VertexAI
from langchain.prompts import PromptTemplate
from langchain.prompts.chat import ChatPromptTemplate
from langchain.prompts.chat import HumanMessagePromptTemplate
from trulens.core import Feedback
from trulens.core import TruSession
from trulens.apps.langchain import TruChain
from trulens.providers.litellm import LiteLLM
session = TruSession()
session.reset_database()
# Imports main tools:
# Imports from langchain to build app. You may need to install langchain first
# with the following:
# !pip install langchain>=0.0.170
from langchain.chains import LLMChain
from langchain.llms import VertexAI
from langchain.prompts import PromptTemplate
from langchain.prompts.chat import ChatPromptTemplate
from langchain.prompts.chat import HumanMessagePromptTemplate
from trulens.core import Feedback
from trulens.core import TruSession
from trulens.apps.langchain import TruChain
from trulens.providers.litellm import LiteLLM
session = TruSession()
session.reset_database()
Create Simple LLM Application¶
This example uses a LangChain framework and OpenAI LLM
In [ ]:
Copied!
full_prompt = HumanMessagePromptTemplate(
prompt=PromptTemplate(
template="Provide a helpful response with relevant background information for the following: {prompt}",
input_variables=["prompt"],
)
)
chat_prompt_template = ChatPromptTemplate.from_messages([full_prompt])
llm = VertexAI()
chain = LLMChain(llm=llm, prompt=chat_prompt_template, verbose=True)
full_prompt = HumanMessagePromptTemplate(
prompt=PromptTemplate(
template="Provide a helpful response with relevant background information for the following: {prompt}",
input_variables=["prompt"],
)
)
chat_prompt_template = ChatPromptTemplate.from_messages([full_prompt])
llm = VertexAI()
chain = LLMChain(llm=llm, prompt=chat_prompt_template, verbose=True)
Send your first request¶
In [ ]:
Copied!
prompt_input = "What is a good name for a store that sells colorful socks?"
prompt_input = "What is a good name for a store that sells colorful socks?"
In [ ]:
Copied!
llm_response = chain(prompt_input)
display(llm_response)
llm_response = chain(prompt_input)
display(llm_response)
Initialize Feedback Function(s)¶
In [ ]:
Copied!
# Initialize LiteLLM-based feedback function collection class:
litellm = LiteLLM(model_engine="chat-bison")
# Define a relevance function using LiteLLM
relevance = Feedback(litellm.relevance_with_cot_reasons).on_input_output()
# By default this will check relevance on the main app input and main app
# output.
# Initialize LiteLLM-based feedback function collection class:
litellm = LiteLLM(model_engine="chat-bison")
# Define a relevance function using LiteLLM
relevance = Feedback(litellm.relevance_with_cot_reasons).on_input_output()
# By default this will check relevance on the main app input and main app
# output.
Instrument chain for logging with TruLens¶
In [ ]:
Copied!
tru_recorder = TruChain(
chain, app_name="Chain1_ChatApplication", feedbacks=[relevance]
)
tru_recorder = TruChain(
chain, app_name="Chain1_ChatApplication", feedbacks=[relevance]
)
In [ ]:
Copied!
with tru_recorder as recording:
llm_response = chain(prompt_input)
display(llm_response)
with tru_recorder as recording:
llm_response = chain(prompt_input)
display(llm_response)
In [ ]:
Copied!
session.get_records_and_feedback()[0]
session.get_records_and_feedback()[0]
Explore in a Dashboard¶
In [ ]:
Copied!
from trulens.dashboard import run_dashboard
run_dashboard(session) # open a local streamlit app to explore
# stop_dashboard(session) # stop if needed
from trulens.dashboard import run_dashboard
run_dashboard(session) # open a local streamlit app to explore
# stop_dashboard(session) # stop if needed
Or view results directly in your notebook¶
In [ ]:
Copied!
session.get_records_and_feedback()[0]
session.get_records_and_feedback()[0]