LangGraph
Build stateful, multi-step AI agents using LangGraph with Tensoras.ai as the LLM provider.
Installation
pip install langgraph langchain-tensorasAuthentication
export TENSORAS_API_KEY="tns_your_key_here"Basic Agent
Create a LangGraph agent that uses Tensoras for inference and can call tools:
from langchain_tensoras import ChatTensoras
from langchain_core.tools import tool
from langgraph.prebuilt import create_react_agent
# Define tools
@tool
def get_weather(city: str) -> str:
"""Get the current weather for a city."""
return f"The weather in {city} is sunny, 72F."
@tool
def search_docs(query: str) -> str:
"""Search internal documentation."""
return f"Found 3 results for '{query}': ..."
# Create agent
llm = ChatTensoras(model="llama-3.3-70b")
agent = create_react_agent(llm, tools=[get_weather, search_docs])
# Run
result = agent.invoke({
"messages": [{"role": "user", "content": "What is the weather in Tokyo?"}],
})
for message in result["messages"]:
print(f"{message.type}: {message.content}")Custom Graph with Tool Calling
For more control, build a custom LangGraph workflow with explicit nodes and edges:
from langchain_tensoras import ChatTensoras
from langchain_core.messages import HumanMessage, AIMessage, ToolMessage
from langchain_core.tools import tool
from langgraph.graph import StateGraph, MessagesState, START, END
from langgraph.prebuilt import ToolNode
# Define tools
@tool
def calculate(expression: str) -> str:
"""Evaluate a math expression."""
return str(eval(expression))
@tool
def lookup_product(product_id: str) -> str:
"""Look up product details by ID."""
return f"Product {product_id}: Widget Pro, $49.99, in stock."
tools = [calculate, lookup_product]
# LLM with tools bound
llm = ChatTensoras(model="llama-3.3-70b")
llm_with_tools = llm.bind_tools(tools)
# Define nodes
def call_model(state: MessagesState):
response = llm_with_tools.invoke(state["messages"])
return {"messages": [response]}
def should_continue(state: MessagesState):
last_message = state["messages"][-1]
if last_message.tool_calls:
return "tools"
return END
# Build graph
graph = StateGraph(MessagesState)
graph.add_node("agent", call_model)
graph.add_node("tools", ToolNode(tools))
graph.add_edge(START, "agent")
graph.add_conditional_edges("agent", should_continue, {"tools": "tools", END: END})
graph.add_edge("tools", "agent")
app = graph.compile()
# Run
result = app.invoke({
"messages": [HumanMessage(content="What is 42 * 17? Also look up product P-1234.")],
})
for message in result["messages"]:
print(f"{message.type}: {message.content}")Streaming Agent Responses
Stream both intermediate steps and final tokens:
from langchain_tensoras import ChatTensoras
from langchain_core.tools import tool
from langgraph.prebuilt import create_react_agent
@tool
def get_weather(city: str) -> str:
"""Get the current weather for a city."""
return f"Sunny, 72F in {city}."
llm = ChatTensoras(model="llama-3.3-70b")
agent = create_react_agent(llm, tools=[get_weather])
# Stream events
async for event in agent.astream_events(
{"messages": [{"role": "user", "content": "Weather in Paris?"}]},
version="v2",
):
if event["event"] == "on_chat_model_stream":
chunk = event["data"]["chunk"]
if chunk.content:
print(chunk.content, end="", flush=True)
elif event["event"] == "on_tool_end":
print(f"\n[Tool result: {event['data'].output}]")RAG Agent
Combine LangGraph agents with Tensoras Knowledge Base retrieval:
from langchain_tensoras import ChatTensoras, TensorasRetriever
from langchain_core.tools import tool
from langgraph.prebuilt import create_react_agent
retriever = TensorasRetriever(knowledge_base_id="kb_a1b2c3d4", top_k=5)
@tool
def search_knowledge_base(query: str) -> str:
"""Search the company knowledge base for relevant information."""
docs = retriever.invoke(query)
return "\n\n".join(doc.page_content for doc in docs)
llm = ChatTensoras(model="llama-3.3-70b")
agent = create_react_agent(
llm,
tools=[search_knowledge_base],
prompt="You are a support agent. Use the knowledge base tool to answer questions accurately.",
)
result = agent.invoke({
"messages": [{"role": "user", "content": "How do I configure SSO?"}],
})
print(result["messages"][-1].content)Checkpointing and Memory
Add persistent memory to your agent using LangGraph checkpointers:
from langchain_tensoras import ChatTensoras
from langchain_core.tools import tool
from langgraph.prebuilt import create_react_agent
from langgraph.checkpoint.memory import MemorySaver
@tool
def get_weather(city: str) -> str:
"""Get the current weather for a city."""
return f"Sunny, 72F in {city}."
llm = ChatTensoras(model="llama-3.3-70b")
memory = MemorySaver()
agent = create_react_agent(llm, tools=[get_weather], checkpointer=memory)
config = {"configurable": {"thread_id": "user-123"}}
# First turn
result = agent.invoke(
{"messages": [{"role": "user", "content": "Weather in London?"}]},
config=config,
)
print(result["messages"][-1].content)
# Follow-up (agent remembers context)
result = agent.invoke(
{"messages": [{"role": "user", "content": "How about Paris?"}]},
config=config,
)
print(result["messages"][-1].content)Next Steps
- LangChain Integration — LangChain components for Tensoras
- CrewAI Integration — multi-agent orchestration
- Tool Calling — how tool calling works in Tensoras
- Python SDK — full SDK reference