Anthropic Quickstart¶
Anthropic is an AI safety and research company that's working to build reliable, interpretable, and steerable AI systems. Through our LiteLLM integration, you are able to easily run feedback functions with Anthropic's Claude and Claude Instant.
In [ ]:
Copied!
# !pip install trulens anthropic trulens-providers-litellm langchain==0.0.347
# !pip install trulens anthropic trulens-providers-litellm langchain==0.0.347
In [ ]:
Copied!
import os
os.environ["ANTHROPIC_API_KEY"] = "..."
import os
os.environ["ANTHROPIC_API_KEY"] = "..."
Chat with Claude¶
In [ ]:
Copied!
from anthropic import AI_PROMPT
from anthropic import HUMAN_PROMPT
from anthropic import Anthropic
anthropic = Anthropic()
def claude_2_app(prompt):
completion = anthropic.completions.create(
model="claude-2",
max_tokens_to_sample=300,
prompt=f"{HUMAN_PROMPT} {prompt} {AI_PROMPT}",
).completion
return completion
claude_2_app("How does a case reach the supreme court?")
from anthropic import AI_PROMPT
from anthropic import HUMAN_PROMPT
from anthropic import Anthropic
anthropic = Anthropic()
def claude_2_app(prompt):
completion = anthropic.completions.create(
model="claude-2",
max_tokens_to_sample=300,
prompt=f"{HUMAN_PROMPT} {prompt} {AI_PROMPT}",
).completion
return completion
claude_2_app("How does a case reach the supreme court?")
Initialize Feedback Function(s)¶
In [ ]:
Copied!
from trulens.core import TruSession
session = TruSession()
session.reset_database()
from trulens.core import TruSession
session = TruSession()
session.reset_database()
In [ ]:
Copied!
from trulens.core import Feedback
from trulens.providers.litellm import LiteLLM
# Initialize Huggingface-based feedback function collection class:
claude_2 = LiteLLM(model_engine="claude-2")
# Define a language match feedback function using HuggingFace.
f_relevance = Feedback(claude_2.relevance).on_input_output()
# By default this will check language match on the main app input and main app
# output.
from trulens.core import Feedback
from trulens.providers.litellm import LiteLLM
# Initialize Huggingface-based feedback function collection class:
claude_2 = LiteLLM(model_engine="claude-2")
# Define a language match feedback function using HuggingFace.
f_relevance = Feedback(claude_2.relevance).on_input_output()
# By default this will check language match on the main app input and main app
# output.
Instrument chain for logging with TruLens¶
In [ ]:
Copied!
from trulens.apps.basic import TruBasicApp
tru_recorder = TruBasicApp(claude_2_app, app_name="Anthropic Claude 2", feedbacks=[f_relevance])
from trulens.apps.basic import TruBasicApp
tru_recorder = TruBasicApp(claude_2_app, app_name="Anthropic Claude 2", feedbacks=[f_relevance])
In [ ]:
Copied!
with tru_recorder as recording:
llm_response = tru_recorder.app(
"How does a case make it to the supreme court?"
)
with tru_recorder as recording:
llm_response = tru_recorder.app(
"How does a case make it to the supreme court?"
)
Explore in a Dashboard¶
In [ ]:
Copied!
from trulens.dashboard import run_dashboard
run_dashboard(session) # open a local streamlit app to explore
# stop_dashboard(session) # stop if needed
from trulens.dashboard import run_dashboard
run_dashboard(session) # open a local streamlit app to explore
# stop_dashboard(session) # stop if needed
Or view results directly in your notebook¶
In [ ]:
Copied!
session.get_records_and_feedback()[0]
session.get_records_and_feedback()[0]