In this guide you'll learn how to implement and use Toolhouse with just a few lines of code. For the purpose of this example, we'll build an assistant that can run the code it generates. We'll do so by using the Code interpreter tool from the Toolhouse.
Sign up
You can sign up for free by going to the Toolhouse sign up page. You can skip this step if you already signed up for Toolhouse.
Create an API Key
You will need to generate an API Key in order to use Toolhouse with your code.
You'll find a field that says API Key Name. Give your API Key a name, then click Generate
Copy the API Key and save it where you save your secrets.
In this guide, we'll assume you have a .env file. Save the API Key as TOOLHOUSE_API_KEY in your environment file. This allows Toolhouse to pick up its value directly in your code.
TOOLHOUSE_API_KEY=<Your API Key value>
Add a tool
In Toolhouse, tools are function definitions. In Toolhouse, tools can be local (meaning they run on your infrastructure, such as your cloud environment or just your laptop) or cloud (meaning they are executed in the Toolhouse Cloud).
All the tools on Toolhouse are pre-installed on your account!
You can either call all tools by default as they come pre-installed in your account, or create Bundles to add tools. To add tools to a bundle, follow these steps:
Then, simply instantiate the SDK and pass the tool definitions in your LLM call. Toolhouse will encapsulate the logic you usually need to pass the tool output back to the LLM, so you won't have to worry about writing boilerplate logic.
import os
from typing import List
# 👋 Make sure you've also installed the OpenAI SDK through: pip install openai
from openai import OpenAI
from toolhouse import Toolhouse
# Let's set our API Keys.
# Please remember to use a safer system to store your API KEYS
# after finishing the quick start.
client = OpenAI(api_key='YOUR_OPENAI_API_KEY')
th = Toolhouse(api_key='TOOLHOUSE_API_KEY',
provider="openai")
# Define the OpenAI model we want to use
MODEL = 'gpt-4o-mini'
messages = [{
"role": "user",
"content":
"Generate FizzBuzz code."
"Execute it to show me the results up to 10."
}]
response = client.chat.completions.create(
model=MODEL,
messages=messages,
# Passes Code Execution as a tool
tools=th.get_tools()
)
# Runs the Code Execution tool, gets the result,
# and appends it to the context
messages += th.run_tools(response)
response = client.chat.completions.create(
model=MODEL,
messages=messages,
tools=th.get_tools()
)
# Prints the response with the answer
print(response.choices[0].message.content)
import os
from toolhouse import Toolhouse
# 👋 Make sure you've also installed the OpenAI SDK through: pip install openai
from openai import AzureOpenAI
client = AzureOpenAI(
api_key=os.environ.get("AZURE_OPENAI_API_KEY"),
api_version="2024-02-01",
azure_endpoint = os.environ.get("AZURE_OPENAI_ENDPOINT")
)
DEPLOYMENT_ID = 'REPLACE_WITH_YOUR_DEPLOYMENT_NAME'
# If you don't specify an API key, Toolhouse will expect you
# specify one in the TOOLHOUSE_API_KEY env variable.
th = Toolhouse(api_key='TOOLHOUSE_API_KEY')
messages = [{
"role": "user",
"content":
"Generate FizzBuzz code."
"Execute it to show me the results up to 10."
}]
response = client.chat.completions.create(
model=MODEL,
messages=messages,
# Passes Code Execution as a tool
tools=th.get_tools()
)
# Runs the Code Execution tool, gets the result,
# and appends it to the context
messages += th.run_tools(response)
response = client.chat.completions.create(
model=MODEL,
messages=messages,
tools=th.get_tools()
)
# Prints the response with the answer
print(response.choices[0].message.content)
import os
from toolhouse import Toolhouse
# 👋 Make sure you've also installed the Anthropic SDK through: pip install anthropic
from anthropic import Anthropic
# ANTHROPIC_KEY must be set in your environment
client = Anthropic(api_key=os.getenv("ANTHROPIC_KEY"))
MODEL = "claude-3-5-sonnet-latest"
# If you don't specify an API key, Toolhouse will expect you
# specify one in the TOOLHOUSE_API_KEY env variable.
th = Toolhouse(api_key='TOOLHOUSE_API_KEY', provider='anthropic')
messages = [{
"role": "user",
"content":
"Generate FizzBuzz code."
"Execute it to show me the results up to 10."
}]
response = client.messages.create(
model=MODEL,
messages=messages,
max_tokens=1000,
# Passes Code Execution as a tool
tools=th.get_tools(),
)
# Runs the Code Execution tool, gets the result,
# and appends it to the context
messages += th.run_tools(response)
response = client.messages.create(
model=MODEL,
messages=messages,
max_tokens=1000,
tools=th.get_tools(),
)
print(response.content[0].text)
import os
from toolhouse import Toolhouse
# 👋 Make sure you've also installed the Anthropic SDK through: pip install anthropic
from anthropic import AnthropicBedrock
# If you don't specify an API key, Toolhouse will expect you
# specify one in the TOOLHOUSE_API_KEY env variable.
th = Toolhouse(api_key='TOOLHOUSE_API_KEY', provider='anthropic')
client = AnthropicBedrock(
aws_access_key=os.environ.get('AWS_ACCESS_KEY'),
aws_secret_key=os.environ.get('AWS_SECRET_KEY'),
aws_session_token=os.environ.get('AWS_SESSION_TOKEN'),
aws_region=os.environ.get('AWS_REGION'),
)
MODEL = "anthropic.claude-3-5-sonnet-20241022-v2:0"
messages = [{
"role": "user",
"content":
"Generate FizzBuzz code."
"Execute it to show me the results up to 10."
}]
response = client.messages.create(
model=MODEL,
messages=messages,
max_tokens=1000,
# Passes Code Execution as a tool
tools=th.get_tools(),
)
# Runs the Code Execution tool, gets the result,
# and appends it to the context
messages += th.run_tools(response)
response = client.messages.create(
model=MODEL,
messages=messages,
max_tokens=1000,
tools=th.get_tools(),
)
print(response.content[0].text)
import os
from toolhouse import Toolhouse
# 👋 Make sure you've also installed the Groq SDK through: pip install groq
from groq import Groq
client = Groq(api_key=os.environ.get('GROQ_API_KEY'))
MODEL = "llama-3.3-70b-versatile"
# If you don't specify an API key, Toolhouse will expect you
# specify one in the TOOLHOUSE_API_KEY env variable.
th = Toolhouse(api_key='TOOLHOUSE_API_KEY')
messages = [{
"role": "user",
"content":
"Generate FizzBuzz code."
"Execute it to show me the results up to 10."
}]
response = client.chat.completions.create(
model=MODEL,
messages=messages,
# Passes Code Execution as a tool
tools=th.get_tools(),
)
# Runs the Code Execution tool, gets the result,
# and appends it to the context
tool_run = th.run_tools(response)
messages.extend(tool_run)
response = client.chat.completions.create(
model=MODEL,
messages=messages,
tools=th.get_tools(),
)
print(response.choices[0].message.content)
import os
from toolhouse import Toolhouse
# 👋 Make sure you've also installed the OpenAI SDK through: pip install openai
from openai import OpenAI
client = OpenAI(api_key=os.environ.get('TOGETHER_API_KEY'), base_url="https://api.together.xyz/v1")
MODEL = 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo'
# If you don't specify an API key, Toolhouse will expect you
# specify one in the TOOLHOUSE_API_KEY env variable.
th = Toolhouse(api_key='TOOLHOUSE_API_KEY')
messages = [{
"role": "user",
"content":
"Generate FizzBuzz code."
"Execute it to show me the results up to 10."
}]
response = client.chat.completions.create(
model=MODEL,
messages=messages,
# Passes Code Execution as a tool
tools=th.get_tools(),
)
# Runs the Code Execution tool, gets the result,
# and appends it to the context
messages += th.run_tools(response)
response = client.chat.completions.create(
model=MODEL,
messages=messages,
tools=th.get_tools(),
)
print(response.choices[0].text)