We are building a functional AI agent that can reason, use tools, and iterate ā all in about 100 lines of JavaScript. The agent follows the ReAct pattern: Reason about what to do, Act by calling a tool, Observe the result, and repeat until done.
Prerequisites
- Node.js 18+
- Ollama running with a model that supports tool use
Step 1: Define the Tools
Create agent.js. First, define the tools the agent can use:
const TOOLS = {
calculate: {
description: 'Evaluate a math expression',
execute: (expr) => {
try { return String(eval(expr)); }
catch { return 'Error: invalid expression'; }
}
},
search_files: {
description: 'Search for files matching a pattern',
execute: (pattern) => {
const { execSync } = require('child_process');
try { return execSync(`find . -name "${pattern}" -maxdepth 3 2>/dev/null`).toString().trim() || 'No files found'; }
catch { return 'Search failed'; }
}
},
read_file: {
description: 'Read contents of a file',
execute: (path) => {
const fs = require('fs');
try { return fs.readFileSync(path, 'utf-8').slice(0, 2000); }
catch { return 'Error: cannot read file'; }
}
},
current_time: {
description: 'Get the current date and time',
execute: () => new Date().toISOString()
}
};
Step 2: Build the Agent Loop
const OLLAMA_URL = 'http://localhost:11434/api/chat';
const MODEL = 'llama3.2';
const MAX_STEPS = 10;
const SYSTEM_PROMPT = `You are an AI agent that can use tools to accomplish tasks.
Available tools:
${Object.entries(TOOLS).map(([name, t]) => `- ${name}: ${t.description}`).join('\n')}
To use a tool, respond with EXACTLY this format:
TOOL: tool_name
INPUT: the input for the tool
To give a final answer, respond with:
ANSWER: your final response
Always reason step by step before acting.`;
async function callOllama(messages) {
const res = await fetch(OLLAMA_URL, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ model: MODEL, messages, stream: false }),
});
const data = await res.json();
return data.message?.content || '';
}
async function runAgent(task) {
console.log(`\nš¤ Agent Task: ${task}\n`);
const messages = [
{ role: 'system', content: SYSTEM_PROMPT },
{ role: 'user', content: task },
];
for (let step = 0; step < MAX_STEPS; step++) {
const response = await callOllama(messages);
console.log(`\n--- Step ${step + 1} ---\n${response}`);
messages.push({ role: 'assistant', content: response });
// Check for final answer
const answerMatch = response.match(/ANSWER:\s*(.+)/s);
if (answerMatch) {
console.log(`\nā
Final Answer: ${answerMatch[1].trim()}`);
return answerMatch[1].trim();
}
// Check for tool call
const toolMatch = response.match(/TOOL:\s*(\w+)\s*\nINPUT:\s*(.+)/s);
if (toolMatch) {
const [, toolName, toolInput] = toolMatch;
const tool = TOOLS[toolName.trim()];
if (tool) {
const result = tool.execute(toolInput.trim());
console.log(`\nš§ ${toolName}(${toolInput.trim()}) ā ${result.slice(0, 200)}`);
messages.push({ role: 'user', content: `Tool result for ${toolName}: ${result}` });
} else {
messages.push({ role: 'user', content: `Error: unknown tool "${toolName}"` });
}
} else {
messages.push({ role: 'user', content: 'Please use a TOOL or provide an ANSWER.' });
}
}
return 'Agent reached maximum steps without an answer.';
}
// Run it
const task = process.argv.slice(2).join(' ') || 'What time is it and what files are in the current directory?';
runAgent(task);
Step 3: Test It
node agent.js "What is 42 * 17 and what is the current time?"
node agent.js "Find all JavaScript files in this directory and tell me how many there are"
node agent.js "Read the package.json file and summarize what this project does"
Testing It
The agent should reason about which tools to use, call them in sequence, observe results, and produce a final answer. Watch the step-by-step output to verify the reasoning chain makes sense.
What is Next
Add more tools: web search, database queries, API calls. Add memory so the agent remembers past conversations. Implement guardrails to prevent dangerous tool calls. Or upgrade to a planning architecture for complex multi-step tasks.
Frequently Asked Questions
Why does the agent sometimes ignore tool results?
Some models struggle with the ReAct format. Try a model specifically fine-tuned for tool use, or add explicit instructions to read and use the tool result in the system prompt.
How do I add new tools?
Add an entry to the TOOLS object with a description and execute function. The system prompt automatically updates to list all available tools.