COOKIES! This blog uses cookies!
I am completely out of control of cookies here, otherwise I would have disabled them (it is controlled by the platform).
If you don't like cookies and being tracked please leave this blog immediately.

Monday, 22 September 2025

Simple LangGraph agent workflow with checkbox as a tool

First of all we need dependencies,

In my case I installed langchain/core, langgraph and LLM provider package, in this case `@langchain/google-vertexai`, but any other may be used.

package.js

{
"name": "langchain-experiments",
"version": "1.0.0",
"description": "",
"license": "ISC",
"author": "",
"type": "module",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"dependencies": {
"@langchain/core": "^0.3.77",
"@langchain/google-vertexai": "^0.2.18",
"@langchain/langgraph": "^0.4.9",
"zod": "^3.25.76"
}
}

Now, let's create an agent, which resembles ReAct agent, however this one has some custom edges and is generally simpler.

agent.js

import {MessagesAnnotation, StateGraph} from "@langchain/langgraph";
import {ToolNode} from "@langchain/langgraph/prebuilt";
import {HumanMessage} from "@langchain/core/messages";

/**
* This is a simple agent that uses a tool to complete a checklist.
*/
export function myAgent(params) {
const {llm, tools, checklist} = params;
const llmWithTools = llm.bindTools(tools);

const toolNode = new ToolNode(tools);

function printChecklist({messages}) {
console.log(checklist);
const checklistMessage = "checklist: " + JSON.stringify(checklist);
return {messages: [new HumanMessage(checklistMessage)]};
}

function shouldContinue({messages}) {
const lastMessage = messages[messages.length - 1];
// If the LLM makes a tool call, then we route to the "tools" node
if (lastMessage.tool_calls?.length) {
console.log("Continuing to tools");
return "tools";
}
// Otherwise, we go check_checklist
console.log("Continuing to print_checklist");
return "print_checklist";
}

function isChecklistComplete({messages}) {
const lastMessage = messages[messages.length - 1];

if (Object.values(checklist).every(v => v === true)) {
// If f all checklist items are complete go to the special __end__ node
console.log("Checklist complete!");
return "__end__";
}
// Otherwise, we go back to agent
console.log("Checklist not complete!");
return "agent";
}

async function callModel(state) {
console.log("Calling model...");
// It most likely to die here if you don't provide tool.
const response = await llmWithTools.invoke(state.messages);
console.log(`Agent: ${response.content}`);
// We return a list, because this will get added to the existing list
return {messages: [response]};
}

const workflow = new StateGraph(MessagesAnnotation);
workflow.addNode("print_checklist", printChecklist);
workflow.addNode("agent", callModel)
workflow.addNode("tools", toolNode)
workflow.addEdge("__start__", "print_checklist");
workflow.addEdge("tools", "print_checklist");
workflow.addConditionalEdges("print_checklist", isChecklistComplete);
workflow.addConditionalEdges("agent", shouldContinue);

return workflow.compile();
}

Entry point to launch the agent:

index.js

import {ChatVertexAI} from "@langchain/google-vertexai";
import {HumanMessage, SystemMessage} from "@langchain/core/messages";
import {myAgent} from "./agent.js";
import {z} from "zod";
import {tool} from "@langchain/core/tools";

const checklist = {
"say_hello": false,
"say_how_are_you": false
}
const completeChecklistItemToolDefinition = {
name: 'complete_checklist_item',
description: `Tool to Complete one Checklist Item. Use example: complete_checklist_item "task_1"`,
schema: z.string(),
};
const completeChecklistItemToolImpl = (s) => {
let message;
if (s in checklist) {
checklist[s] = true;
message = `Completed Checklist Item '${s}'`;
} else {
message = `Checklist item '${s}' not found.`;
}
console.log(message);
return message;
};
const completeChecklistItemTool = tool(completeChecklistItemToolImpl, completeChecklistItemToolDefinition)

const llm = new ChatVertexAI({
model: 'gemini-2.5-pro',
});

const tools = [completeChecklistItemTool];


export const agent = myAgent({
llm,
tools,
checklist
});
agent.invoke({
messages: [
new SystemMessage("You must use complete_checklist_item to complete all checklist items one by one."),
new HumanMessage("Hello")
]

}); 


Expected output:

{ say_hello: false, say_how_are_you: false }

Checklist not complete!

Calling model...

Agent: 

Continuing to tools

Completed Checklist Item 'say_hello'

{ say_hello: true, say_how_are_you: false }

Checklist not complete!

Calling model...

Agent: 

Continuing to tools

Completed Checklist Item 'say_how_are_you'

{ say_hello: true, say_how_are_you: true }

Checklist complete!