A comprehensive guide to implementing AI features in your Next.js application using the Vercel AI SDK, including streaming responses, React hooks, and provider integration.
This guide demonstrates how to integrate the Vercel AI SDK into your Next.js application, including streaming AI responses, using React hooks, and integrating with AI providers like OpenRouter.
The Vercel AI SDK is a TypeScript toolkit that provides a unified interface for building AI-powered applications. It supports multiple AI providers, streaming responses, and React hooks for seamless integration with Next.js applications.
Key features include:
Install the core AI SDK and React integration:
npm install ai @ai-sdk/react
# or
yarn add ai @ai-sdk/react
# or
pnpm add ai @ai-sdk/react
For this project, we're using OpenRouter which provides access to hundreds of models:
npm install @openrouter/ai-sdk-provider
# or
yarn add @openrouter/ai-sdk-provider
# or
pnpm add @openrouter/ai-sdk-provider
Add the following to your
.env.local# OpenRouter Configuration
OPENROUTER_API_KEY="sk-or-v1-your-openrouter-api-key"
OPENROUTER_MODEL="x-ai/grok-4-fast:free"
OPENROUTER_SITE_URL="https://yourdomain.com"
OPENROUTER_APP_NAME="Your App Name"
# Alternative providers (optional)
OPENAI_API_KEY="sk-your-openai-api-key"
ANTHROPIC_API_KEY="sk-ant-your-anthropic-api-key"
Update your
.env.example# AI Configuration
OPENROUTER_API_KEY="sk-or-v1-..."
OPENROUTER_MODEL="x-ai/grok-4-fast:free"
OPENROUTER_SITE_URL="http://localhost:3000"
OPENROUTER_APP_NAME="Next.js AI App"
Create
lib/ai.ts// lib/ai.ts
import { OpenRouter } from "@openrouter/ai-sdk-provider";
import { streamText } from "ai";
if (!process.env.OPENROUTER_API_KEY) {
throw new Error("OPENROUTER_API_KEY is not set");
}
export const openrouter = new OpenRouter({
apiKey: process.env.OPENROUTER_API_KEY!,
// Optional: route metadata for OpenRouter
headers: {
'HTTP-Referer': process.env.OPENROUTER_SITE_URL ?? '',
'X-Title': process.env.OPENROUTER_APP_NAME ?? 'Next.js AI App',
},
});
export const defaultModel =
process.env.OPENROUTER_MODEL || "x-ai/grok-4-fast:free";
// Helper to stream text with Vercel AI SDK via OpenRouter
export const streamCompletion = async (
messages: { role: "user" | "system" | "assistant"; content: string }[]
) => {
return streamText({
model: openrouter.chat(defaultModel),
messages,
});
};
Create
app/api/chat/route.ts// app/api/chat/route.ts
import { NextRequest } from 'next/server';
import { streamText, UIMessage, convertToModelMessages } from 'ai';
import { createOpenRouter } from '@openrouter/ai-sdk-provider';
// Allow streaming responses up to 30 seconds
export const maxDuration = 30;
// Factory for OpenRouter provider
function getOpenRouter() {
const apiKey = process.env.OPENROUTER_API_KEY;
if (!apiKey) {
throw new Error('Missing OPENROUTER_API_KEY environment variable.');
}
const openrouter = createOpenRouter({
apiKey,
headers: {
'HTTP-Referer': process.env.OPENROUTER_SITE_URL ?? '',
'X-Title': process.env.OPENROUTER_APP_NAME ?? 'Next.js AI App',
},
});
return openrouter;
}
export async function POST(req: NextRequest) {
const openrouter = getOpenRouter();
// Client may pass ?model=... in the URL to override default
const { searchParams } = new URL(req.url);
const modelOverride = searchParams.get('model') ?? undefined;
const modelName =
modelOverride || process.env.OPENROUTER_MODEL || 'x-ai/grok-4-fast:free';
// Parse UI messages payload
const { messages }: { messages: UIMessage[] } = await req.json();
// Convert UI messages to model messages
const modelMessages = convertToModelMessages(messages);
// System prompt keeps responses focused and safe
const systemPrompt =
'You are a helpful, concise assistant for a Next.js app. Respond with clear, short answers.';
// Stream text back out using the selected model
const result = streamText({
model: openrouter.chat(modelName),
messages: [
{ role: 'system', content: systemPrompt },
...modelMessages,
],
// Optional: tune parameters
temperature: 0.7,
maxTokens: 1000,
});
return result.toUIMessageStreamResponse();
}
Create a chat component using the
useChat// components/chat.tsx
'use client';
import { useChat } from '@ai-sdk/react';
import { Button } from '@/components/ui/button';
import { Input } from '@/components/ui/input';
import { Card } from '@/components/ui/card';
export default function Chat() {
const {
messages,
input,
handleInputChange,
handleSubmit,
isLoading,
error,
} = useChat({
api: '/api/chat',
});
return (
<div className="max-w-4xl mx-auto p-4">
<Card className="p-6">
<div className="space-y-4 h-96 overflow-y-auto">
{messages.map((message) => (
<div
key={message.id}
className={`p-3 rounded-lg ${
message.role === 'user'
? 'bg-blue-100 ml-12'
: 'bg-gray-100 mr-12'
}`}
>
<div className="font-semibold">
{message.role === 'user' ? 'You' : 'AI'}
</div>
<div className="mt-1">{message.content}</div>
</div>
))}
</div>
{error && (
<div className="text-red-500 p-3 bg-red-50 rounded-lg mt-4">
Error: {error.message}
</div>
)}
<form onSubmit={handleSubmit} className="flex gap-2 mt-4">
<Input
value={input}
onChange={handleInputChange}
placeholder="Type your message..."
disabled={isLoading}
className="flex-1"
/>
<Button type="submit" disabled={isLoading || !input.trim()}>
{isLoading ? 'Sending...' : 'Send'}
</Button>
</form>
</Card>
</div>
);
}
Use the chat component in your pages:
// app/chat/page.tsx
import Chat from '@/components/chat';
export default function ChatPage() {
return (
<div className="container mx-auto py-8">
<h1 className="text-3xl font-bold mb-6">AI Chat</h1>
<Chat />
</div>
);
}
You can switch between different models by passing the model parameter:
// API call with specific model
const response = await fetch('/api/chat?model=anthropic/claude-3.5-sonnet', {
method: 'POST',
body: JSON.stringify({ messages }),
});
For server-side AI generation:
// app/api/generate/route.ts
import { generateText } from 'ai';
import { openrouter } from '@/lib/ai';
export async function POST(req: Request) {
const { prompt } = await req.json();
const { text } = await generateText({
model: openrouter.chat('x-ai/grok-4-fast:free'),
prompt,
});
return Response.json({ text });
}
Enable tool calling for more advanced AI interactions:
// app/api/chat-with-tools/route.ts
import { streamText } from 'ai';
import { openrouter } from '@/lib/ai';
import { z } from 'zod';
export async function POST(req: Request) {
const { messages } = await req.json();
const result = streamText({
model: openrouter.chat('x-ai/grok-4-fast:free'),
messages,
tools: {
getWeather: {
description: 'Get the current weather for a city',
parameters: z.object({
city: z.string().describe('The city to get weather for'),
}),
execute: async ({ city }) => {
// Your weather API integration
return { temperature: 72, condition: 'sunny' };
},
},
},
});
return result.toUIMessageStreamResponse();
}
The
useChatconst {
messages, // Array of messages
input, // Current input value
handleInputChange, // Handle input changes
handleSubmit, // Handle form submission
isLoading, // Loading state
error, // Error state
reload, // Reload last message
stop, // Stop streaming
} = useChat({
api: '/api/chat',
onResponse, // Response callback
onError, // Error callback
});
For simple text completion:
import { useCompletion } from '@ai-sdk/react';
const { completion, complete, isLoading } = useCompletion({
api: '/api/completion',
});
'use client';
import { useChat } from '@ai-sdk/react';
export default function ChatWithErrorHandling() {
const { messages, input, handleInputChange, handleSubmit, error } = useChat({
api: '/api/chat',
onError: (error) => {
console.error('Chat error:', error);
// Handle error (show toast, retry, etc.)
},
});
if (error) {
return (
<div className="error-container">
<p>Something went wrong: {error.message}</p>
<button onClick={() => window.location.reload()}>
Retry
</button>
</div>
);
}
return (
// Your chat UI
);
}
// app/api/chat/route.ts
export async function POST(req: NextRequest) {
try {
const openrouter = getOpenRouter();
// ... rest of the implementation
} catch (error) {
console.error('AI API Error:', error);
return Response.json(
{ error: 'Failed to process request' },
{ status: 500 }
);
}
}
Make sure to set your environment variables in your deployment platform:
Vercel:
OPENROUTER_API_KEYNetlify:
Consider implementing rate limiting for your AI endpoints:
// middleware.ts or in your API route
import { NextRequest } from 'next/server';
// Simple rate limiting example
const rateLimit = new Map();
export async function POST(req: NextRequest) {
const ip = req.ip || 'unknown';
const now = Date.now();
const windowMs = 60000; // 1 minute
const maxRequests = 10;
if (!rateLimit.has(ip)) {
rateLimit.set(ip, { count: 1, resetTime: now + windowMs });
} else {
const userLimit = rateLimit.get(ip);
if (now > userLimit.resetTime) {
userLimit.count = 1;
userLimit.resetTime = now + windowMs;
} else if (userLimit.count >= maxRequests) {
return Response.json(
{ error: 'Rate limit exceeded' },
{ status: 429 }
);
} else {
userLimit.count++;
}
}
// Continue with your AI logic...
}
// lib/ai.test.ts
import { streamCompletion } from './ai';
describe('AI Functions', () => {
it('should stream completion successfully', async () => {
const messages = [{ role: 'user', content: 'Hello' }];
const result = await streamCompletion(messages);
expect(result).toBeDefined();
});
});
Test your API routes:
// app/api/chat/route.test.ts
import { NextRequest } from 'next/server';
describe('/api/chat', () => {
it('should handle chat requests', async () => {
const request = new NextRequest('http://localhost:3000/api/chat', {
method: 'POST',
body: JSON.stringify({
messages: [{ role: 'user', content: 'Test' }]
}),
});
const response = await POST(request);
expect(response.status).toBe(200);
});
});
Missing API Key
OPENROUTER_API_KEYStreaming Issues
toUIMessageStreamResponse()Model Not Found
Rate Limiting
The Vercel AI SDK provides a powerful and flexible way to integrate AI capabilities into your Next.js application. With streaming support, React hooks, and provider abstraction, you can quickly build sophisticated AI-powered features.
This setup with OpenRouter gives you access to hundreds of AI models, making it easy to experiment and find the right model for your use case.
For more advanced features, explore the AI SDK's tool calling, structured outputs, and multi-modal capabilities.