Vercel AI SDK
Use Tensoras.ai with the Vercel AI SDK to build AI-powered applications in Next.js, Nuxt, SvelteKit, and other frameworks.
Installation
npm install ai @tensoras/ai-providerAuthentication
Set your API key as an environment variable:
export TENSORAS_API_KEY="tns_your_key_here"Or in your .env.local file for Next.js:
TENSORAS_API_KEY=tns_your_key_hereProvider Setup
import { tensoras } from "@tensoras/ai-provider";
const model = tensoras("llama-3.3-70b");The provider reads TENSORAS_API_KEY from the environment. To pass it explicitly:
import { createTensoras } from "@tensoras/ai-provider";
const tensoras = createTensoras({
apiKey: "tns_your_key_here",
});
const model = tensoras("llama-3.3-70b");Generate Text
import { generateText } from "ai";
import { tensoras } from "@tensoras/ai-provider";
const { text } = await generateText({
model: tensoras("llama-3.3-70b"),
prompt: "Explain retrieval-augmented generation in one sentence.",
});
console.log(text);Stream Text
import { streamText } from "ai";
import { tensoras } from "@tensoras/ai-provider";
const result = streamText({
model: tensoras("llama-3.3-70b"),
prompt: "Write a haiku about open-source AI.",
});
for await (const chunk of result.textStream) {
process.stdout.write(chunk);
}Generate Object (Structured Output)
import { generateObject } from "ai";
import { tensoras } from "@tensoras/ai-provider";
import { z } from "zod";
const { object } = await generateObject({
model: tensoras("llama-3.3-70b"),
schema: z.object({
recipe: z.object({
name: z.string(),
ingredients: z.array(z.string()),
steps: z.array(z.string()),
}),
}),
prompt: "Generate a recipe for chocolate chip cookies.",
});
console.log(object.recipe.name);
console.log(object.recipe.ingredients);Tool Calling
import { generateText, tool } from "ai";
import { tensoras } from "@tensoras/ai-provider";
import { z } from "zod";
const result = await generateText({
model: tensoras("llama-3.3-70b"),
tools: {
weather: tool({
description: "Get the current weather for a city",
parameters: z.object({
city: z.string().describe("The city name"),
}),
execute: async ({ city }) => {
return { city, temperature: 72, condition: "sunny" };
},
}),
},
prompt: "What is the weather in San Francisco?",
});
console.log(result.text);Next.js: Chat Route
Create an API route that streams responses:
// app/api/chat/route.ts
import { streamText } from "ai";
import { tensoras } from "@tensoras/ai-provider";
export async function POST(req: Request) {
const { messages } = await req.json();
const result = streamText({
model: tensoras("llama-3.3-70b"),
system: "You are a helpful assistant.",
messages,
});
return result.toDataStreamResponse();
}Next.js: Chat UI with useChat
Build a chat interface using the useChat hook:
// app/page.tsx
"use client";
import { useChat } from "ai/react";
export default function Chat() {
const { messages, input, handleInputChange, handleSubmit, isLoading } =
useChat();
return (
<div>
<div>
{messages.map((m) => (
<div key={m.id}>
<strong>{m.role === "user" ? "You" : "AI"}:</strong>
<p>{m.content}</p>
</div>
))}
</div>
<form onSubmit={handleSubmit}>
<input
value={input}
onChange={handleInputChange}
placeholder="Type a message..."
disabled={isLoading}
/>
<button type="submit" disabled={isLoading}>
Send
</button>
</form>
</div>
);
}Next.js: RAG with Knowledge Bases
Use Tensoras Knowledge Bases in your API route for RAG:
// app/api/chat/route.ts
import { streamText } from "ai";
import { tensoras } from "@tensoras/ai-provider";
import Tensoras from "tensoras";
const tns = new Tensoras();
export async function POST(req: Request) {
const { messages } = await req.json();
const lastMessage = messages[messages.length - 1].content;
// Retrieve relevant context from Knowledge Base
const retrieval = await tns.chat.completions.create({
model: "llama-3.3-70b",
messages,
knowledge_bases: ["kb_a1b2c3d4"],
stream: false,
});
const context = retrieval.citations
?.map((c) => c.text)
.join("\n\n");
const result = streamText({
model: tensoras("llama-3.3-70b"),
system: `You are a helpful assistant. Use the following context to answer questions:\n\n${context}`,
messages,
});
return result.toDataStreamResponse();
}Embedding Model
Use Tensoras embeddings with the Vercel AI SDK:
import { embed } from "ai";
import { tensoras } from "@tensoras/ai-provider";
const { embedding } = await embed({
model: tensoras.textEmbeddingModel("gte-large-en-v1.5"),
value: "What is deep learning?",
});
console.log(`Dimensions: ${embedding.length}`);Next Steps
- Node.js SDK — full Tensoras Node.js client
- Streaming — SSE details and cancellation
- Structured Outputs — JSON schema enforcement
- Tool Calling — how tool calling works in Tensoras