Skip to main content
UI

AI SDK UI Integration

VoltAgent works with Vercel's AI SDK UI hooks for building chat interfaces. This guide shows how to integrate VoltAgent with useChat.

Prerequisites

npm install ai @ai-sdk/react
# or
pnpm add ai @ai-sdk/react

Endpoints

VoltAgent provides two streaming endpoints:

  • /agents/:id/chat - UI message stream (useChat compatible)
  • /agents/:id/stream - Raw fullStream events

Use /chat for UI integration with useChat. Use /stream when you need low-level events such as reasoning-start, reasoning-delta, and reasoning-end.

Basic Implementation

import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport } from 'ai';
import { useCallback } from 'react';

function ChatComponent() {
const agentId = 'your-agent-id';
const apiUrl = 'http://localhost:3141';

const createTransport = useCallback(() => {
return new DefaultChatTransport({
api: `${apiUrl}/agents/${agentId}/chat`,
prepareSendMessagesRequest({ messages }) {
// VoltAgent expects the last message
const lastMessage = messages[messages.length - 1];

return {
body: {
input: [lastMessage], // Array of UIMessage
options: {}
}
};
}
});
}, [apiUrl, agentId]);

const {
messages,
sendMessage,
isLoading,
stop
} = useChat({
transport: createTransport()
});

return (
<div>
{messages.map(msg => (
<div key={msg.id}>
<strong>{msg.role}:</strong> {msg.content}
</div>
))}
</div>
);
}

With Memory and Context

function ChatWithMemory() {
const [userId] = useState("user-123");
const [conversationId] = useState(() => crypto.randomUUID());

const createTransport = useCallback(() => {
return new DefaultChatTransport({
api: `${apiUrl}/agents/${agentId}/chat`,
prepareSendMessagesRequest({ messages }) {
const lastMessage = messages[messages.length - 1];

return {
body: {
input: [lastMessage],
options: {
// Memory
memory: {
userId,
conversationId,
},

// Model parameters
temperature: 0.7,
maxOutputTokens: 4000,
maxSteps: 10,

// Context
context: {
role: "admin",
timezone: Intl.DateTimeFormat().resolvedOptions().timeZone,
},
},
},
};
},
});
}, [userId, conversationId]);

const { messages, sendMessage } = useChat({
transport: createTransport(),
});

// Your UI...
}

File Attachments

function ChatWithFiles() {
const [selectedFiles, setSelectedFiles] = useState<FileList | null>(null);
const fileInputRef = useRef<HTMLInputElement>(null);

const createTransport = useCallback(() => {
return new DefaultChatTransport({
api: `${apiUrl}/agents/${agentId}/chat`,
prepareSendMessagesRequest({ messages }) {
const lastMessage = messages[messages.length - 1];

return {
body: {
input: [lastMessage],
options: {
memory: {
userId: 'user-123'
}
}
}
};
}
});
}, []);

const { messages, sendMessage: sendAIMessage } = useChat({
transport: createTransport()
});

const handleSubmit = async (text: string) => {
// Send with files using AI SDK's native format
await sendAIMessage({
text,
...(selectedFiles && { files: selectedFiles })
});

// Clear files after sending
setSelectedFiles(null);
if (fileInputRef.current) {
fileInputRef.current.value = '';
}
};

return (
<div>
<input
ref={fileInputRef}
type="file"
multiple
onChange={(e) => setSelectedFiles(e.target.files)}
/>

{/* Show selected files */}
{selectedFiles && Array.from(selectedFiles).map((file, i) => (
<div key={i}>{file.name}</div>
))}

{/* Your chat UI... */}
</div>
);
}

Handling Stream States

function StreamingChat() {
const { messages, status, stop } = useChat({
transport: createTransport()
});

// status: 'idle' | 'streaming' | 'submitted' | 'error'

return (
<div>
{status === 'submitted' && <div>Sending...</div>}
{status === 'streaming' && (
<div>
Agent is typing...
<button onClick={stop}>Stop</button>
</div>
)}

{messages.map(msg => (
<div key={msg.id}>{msg.content}</div>
))}
</div>
);
}

Tool Calls Display

function MessageWithTools({ message }) {
const toolInvocations = message.toolInvocations;

return (
<div>
<p>{message.content}</p>

{toolInvocations?.map((invocation, i) => (
<div key={i}>
<strong>Tool: {invocation.toolName}</strong>
<pre>{JSON.stringify(invocation.args, null, 2)}</pre>
{invocation.result && (
<div>Result: {JSON.stringify(invocation.result)}</div>
)}
</div>
))}
</div>
);
}

Complete Example

import { useChat } from '@ai-sdk/react';
import { DefaultChatTransport } from 'ai';
import { useCallback, useState, useRef } from 'react';

export function ChatInterface() {
const [input, setInput] = useState('');
const [userId] = useState('user-123');
const [conversationId, setConversationId] = useState(() => crypto.randomUUID());

const createTransport = useCallback(() => {
return new DefaultChatTransport({
api: `${process.env.NEXT_PUBLIC_API_URL}/agents/assistant/chat`,
prepareSendMessagesRequest({ messages }) {
const lastMessage = messages[messages.length - 1];

return {
body: {
input: [lastMessage],
options: {
memory: {
userId,
conversationId,
},
temperature: 0.7,
maxSteps: 10
}
}
};
}
});
}, [userId, conversationId]);

const {
messages,
sendMessage,
stop,
status,
setMessages
} = useChat({
transport: createTransport(),
onFinish: () => {
console.log('Message completed');
},
onError: (error) => {
console.error('Chat error:', error);
}
});

const handleSubmit = async (e: React.FormEvent) => {
e.preventDefault();
if (!input.trim()) return;

await sendMessage(input);
setInput('');
};

const resetConversation = () => {
stop();
setMessages([]);
setConversationId(crypto.randomUUID());
};

return (
<div className="chat-container">
<div className="messages">
{messages.map(message => (
<div key={message.id} className={`message ${message.role}`}>
<strong>{message.role}:</strong>
<div>{message.content}</div>
</div>
))}

{status === 'streaming' && (
<div className="typing-indicator">Agent is typing...</div>
)}
</div>

<form onSubmit={handleSubmit}>
<input
value={input}
onChange={(e) => setInput(e.target.value)}
placeholder="Type a message..."
disabled={status === 'streaming'}
/>

<button type="submit" disabled={!input.trim() || status === 'streaming'}>
Send
</button>

{status === 'streaming' && (
<button type="button" onClick={stop}>
Stop
</button>
)}

<button type="button" onClick={resetConversation}>
Clear
</button>
</form>
</div>
);
}

Request Options

VoltAgent Specific

OptionTypeDescription
memoryobjectRuntime memory envelope (preferred)
memory.userIdstringUser identifier for memory persistence
memory.conversationIdstringConversation thread ID
contextobjectDynamic context (converted to Map internally)
memory.options.contextLimitnumberNumber of previous messages to include from memory
memory.options.readOnlybooleanRead memory context but skip all memory writes for this call
memory.options.conversationPersistence.modestring"step" (default) or "finish"
memory.options.conversationPersistence.debounceMsnumberDebounce window in milliseconds (default: 200)
memory.options.conversationPersistence.flushOnToolResultbooleanFlush immediately on tool-result/tool-error in step mode (default: true)
userIdstringDeprecated: use memory.userId
conversationIdstringDeprecated: use memory.conversationId
contextLimitnumberDeprecated: use memory.options.contextLimit
semanticMemoryobjectDeprecated: use memory.options.semanticMemory
conversationPersistence.modestringDeprecated: use memory.options.conversationPersistence.mode
conversationPersistence.debounceMsnumberDeprecated: use memory.options.conversationPersistence.debounceMs
conversationPersistence.flushOnToolResultbooleanDeprecated: use memory.options.conversationPersistence.flushOnToolResult

Example:

options: {
memory: {
userId,
conversationId,
options: {
readOnly: false,
conversationPersistence: {
mode: "step",
debounceMs: 200,
flushOnToolResult: true,
},
},
},
}

When both top-level legacy memory fields and memory envelope fields are provided, memory values are used.

Set memory.options.readOnly: true to load memory context without persisting new messages for that request.

AI SDK Core Options

OptionTypeDefaultDescription
temperaturenumber0.7Controls randomness (0-1)
maxOutputTokensnumber4000Maximum tokens to generate
maxTokensnumber-Alias for maxOutputTokens
maxStepsnumber5Maximum tool-use iterations
topPnumber-Nucleus sampling (0-1)
topKnumber-Sample from top K options
frequencyPenaltynumber0Repeat penalty for words/phrases (0-2)
presencePenaltynumber0Repeat penalty for information (0-2)
seednumber-Random seed for deterministic results
stopSequencesstring[]-Sequences that halt generation
maxRetriesnumber2Number of retry attempts

Provider-Specific Options

OptionTypeDescription
providerOptionsobjectProvider-specific settings
providerOptions.openai.reasoningEffortstringOpenAI reasoning effort (e.g. "low", "medium")
providerOptions.openai.textVerbositystringOpenAI verbosity ("low", "medium", "high")
providerOptions.anthropic.sendReasoningbooleanInclude Anthropic reasoning metadata
providerOptions.google.thinkingConfigobjectGemini thinking budget/configuration
providerOptions.xai.reasoningEffortstringxAI reasoning effort
providerOptions.extraOptionsobjectAdditional provider-specific options
providerOptions.onStepFinishfunctionCallback when a step completes

Semantic Memory Options

OptionTypeDescription
memory.options.semanticMemoryobjectConfiguration for semantic search
memory.options.semanticMemory.enabledbooleanEnable semantic retrieval for this call
memory.options.semanticMemory.semanticLimitnumberMaximum similar messages to retrieve
memory.options.semanticMemory.semanticThresholdnumberMinimum similarity score
memory.options.semanticMemory.mergeStrategystring"prepend" or "append" or "interleave"

useChat Hook Options

OptionTypeDescription
transportDefaultChatTransportRequired for VoltAgent
onFinish(message) => voidStream complete callback
onError(error) => voidError handler
initialMessagesUIMessage[]Pre-load messages

Troubleshooting

Agent not found

  • Check agent ID matches registered agent
  • Verify API URL
  • Ensure agent is running

Stream not working

  • Use /chat endpoint, not /stream
  • Check transport configuration
  • Verify request body format

Messages not persisting

  • Include options.memory.userId
  • Use consistent options.memory.conversationId
  • Check agent memory configuration

CORS errors

  • Configure CORS on VoltAgent server
  • Use proxy in development

Table of Contents