Language Verification¶
In this example you will learn how to implement language verification with TruLens.
Setup¶
In [ ]:
Copied!
# !pip install trulens trulens-providers-huggingface
# !pip install trulens trulens-providers-huggingface
Add API keys¶
For this quickstart you will need Open AI and Huggingface keys
In [ ]:
Copied!
import os
os.environ["OPENAI_API_KEY"] = "..."
os.environ["HUGGINGFACE_API_KEY"] = "..."
import os
os.environ["OPENAI_API_KEY"] = "..."
os.environ["HUGGINGFACE_API_KEY"] = "..."
In [ ]:
Copied!
import openai
openai.api_key = os.environ["OPENAI_API_KEY"]
import openai
openai.api_key = os.environ["OPENAI_API_KEY"]
Import from TruLens¶
In [ ]:
Copied!
# Imports main tools:
from trulens.core import Feedback
from trulens.core import TruSession
from trulens.providers.huggingface import Huggingface
session = TruSession()
session.reset_database()
# Imports main tools:
from trulens.core import Feedback
from trulens.core import TruSession
from trulens.providers.huggingface import Huggingface
session = TruSession()
session.reset_database()
Create Simple Text to Text Application¶
This example uses a bare bones OpenAI LLM, and a non-LLM just for demonstration purposes.
In [ ]:
Copied!
def gpt35_turbo(prompt):
return openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": "You are a question and answer bot. Answer upbeat.",
},
{"role": "user", "content": prompt},
],
)["choices"][0]["message"]["content"]
def gpt35_turbo(prompt):
return openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": "You are a question and answer bot. Answer upbeat.",
},
{"role": "user", "content": prompt},
],
)["choices"][0]["message"]["content"]
In [ ]:
Copied!
response = openai.Moderation.create(input="I hate black people")
output = response["results"][0]
response = openai.Moderation.create(input="I hate black people")
output = response["results"][0]
In [ ]:
Copied!
output["category_scores"]["hate"]
output["category_scores"]["hate"]
Initialize Feedback Function(s)¶
In [ ]:
Copied!
# HuggingFace based feedback function collection class
hugs = Huggingface()
f_langmatch = Feedback(hugs.language_match).on_input_output()
feedbacks = [f_langmatch]
# HuggingFace based feedback function collection class
hugs = Huggingface()
f_langmatch = Feedback(hugs.language_match).on_input_output()
feedbacks = [f_langmatch]
Instrument the callable for logging with TruLens¶
In [ ]:
Copied!
from trulens.apps.basic import TruBasicApp
gpt35_turbo_recorder = TruBasicApp(
gpt35_turbo, app_name="gpt-3.5-turbo", feedbacks=feedbacks
)
from trulens.apps.basic import TruBasicApp
gpt35_turbo_recorder = TruBasicApp(
gpt35_turbo, app_name="gpt-3.5-turbo", feedbacks=feedbacks
)
In [ ]:
Copied!
prompts = [
"Comment ça va?",
"¿Cómo te llamas?",
"你好吗?",
"Wie geht es dir?",
"Как се казваш?",
"Come ti chiami?",
"Como vai?" "Hoe gaat het?",
"¿Cómo estás?",
"ما اسمك؟",
"Qu'est-ce que tu fais?",
"Какво правиш?",
"你在做什么?",
"Was machst du?",
"Cosa stai facendo?",
]
prompts = [
"Comment ça va?",
"¿Cómo te llamas?",
"你好吗?",
"Wie geht es dir?",
"Как се казваш?",
"Come ti chiami?",
"Como vai?" "Hoe gaat het?",
"¿Cómo estás?",
"ما اسمك؟",
"Qu'est-ce que tu fais?",
"Какво правиш?",
"你在做什么?",
"Was machst du?",
"Cosa stai facendo?",
]
In [ ]:
Copied!
with gpt35_turbo_recorder as recording:
for prompt in prompts:
print(prompt)
gpt35_turbo_recorder.app(prompt)
with gpt35_turbo_recorder as recording:
for prompt in prompts:
print(prompt)
gpt35_turbo_recorder.app(prompt)
Explore in a Dashboard¶
In [ ]:
Copied!
from trulens.dashboard import run_dashboard
run_dashboard(session) # open a local streamlit app to explore
# stop_dashboard(session) # stop if needed
from trulens.dashboard import run_dashboard
run_dashboard(session) # open a local streamlit app to explore
# stop_dashboard(session) # stop if needed
Or view results directly in your notebook¶
In [ ]:
Copied!
session.get_records_and_feedback()[0]
session.get_records_and_feedback()[0]