Skip to content

Next JS (LangGraph)

If your AI chatbot is running on a NextJS backend with LangGraph.js, use this guide to send user conversation data to Growl.

If you’re using streamEvents or stream function to generate LLM responses, you can send conversation data to Growl by calling the after method.

import { NextRequest, after } from "next/server";
import { Message as VercelChatMessage, StreamingTextResponse } from "ai";
import { createReactAgent } from "@langchain/langgraph/prebuilt";
import { ChatOpenAI } from "@langchain/openai";
import { AIMessage, ChatMessage, HumanMessage } from "@langchain/core/messages";
import recordGrowlEvent from "./growl";
const convertVercelMessageToLangChainMessage = (message: VercelChatMessage) => {
if (message.role === "user") {
return new HumanMessage(message.content);
} else if (message.role === "assistant") {
return new AIMessage(message.content);
} else {
return new ChatMessage(message.content, message.role);
}
};
export async function POST(req: NextRequest) {
const body = await req.json();
// visitor_id is required - must be obtained from window.GrowlAds.getVisitorId() on client-side
const visitor_id: string = body.visitor_id;
const messages = (body.messages ?? [])
.filter(
(message: VercelChatMessage) =>
message.role === "user" || message.role === "assistant",
)
.map(convertVercelMessageToLangChainMessage);
const chat = new ChatOpenAI({
model: "gpt-4o-mini",
temperature: 0,
});
const agent = createReactAgent({
llm: chat,
tools: [],
});
const eventStream = await agent.streamEvents({ messages }, { version: "v2" });
const summary = {
textParts: [] as string[], // collect *all* text chunks here
fullText: "",
};
const headersObject = Object.fromEntries(req.headers.entries());
after(async () => {
summary.fullText = summary.textParts.join("");
await recordGrowlEvent({
publisher_id: "<publisher_id>",
user_id: "<user-id>",
user_email: "<user-email>",
visitor_id: visitor_id,
chat_id: "<chat-id>",
headers: headersObject,
user_message: { text: messages[messages.length - 1].content },
ai_message: { text: summary.fullText },
});
});
const textEncoder = new TextEncoder();
const transformStream = new ReadableStream({
async start(controller) {
for await (const { event, data } of eventStream) {
if (event === "on_chat_model_stream") {
// Intermediate chat model generations will contain tool calls and no content
if (!!data.chunk.content) {
controller.enqueue(textEncoder.encode(data.chunk.content));
summary.textParts.push(data.chunk.content || ""); // store the entire text
}
}
}
controller.close();
},
});
return new StreamingTextResponse(transformStream);
}

If you’re using invoke function, you can schedule the HTTP call to Growl using the after function provided by Next.JS SDK.

import { NextRequest, NextResponse, after } from "next/server";
import { Message as VercelChatMessage, StreamingTextResponse } from "ai";
import { createReactAgent } from "@langchain/langgraph/prebuilt";
import { ChatOpenAI } from "@langchain/openai";
import { AIMessage, ChatMessage, HumanMessage } from "@langchain/core/messages";
import recordGrowlEvent from "./growl";
const convertVercelMessageToLangChainMessage = (message: VercelChatMessage) => {
if (message.role === "user") {
return new HumanMessage(message.content);
} else if (message.role === "assistant") {
return new AIMessage(message.content);
} else {
return new ChatMessage(message.content, message.role);
}
};
export async function POST(req: NextRequest) {
const body = await req.json();
// visitor_id is required - must be obtained from window.GrowlAds.getVisitorId() on client-side
const visitor_id: string = body.visitor_id;
const messages = (body.messages ?? [])
.filter(
(message: VercelChatMessage) =>
message.role === "user" || message.role === "assistant"
)
.map(convertVercelMessageToLangChainMessage);
const chat = new ChatOpenAI({
model: "gpt-4o-mini",
temperature: 0,
});
const agent = createReactAgent({
llm: chat,
tools: [],
});
const result = await agent.invoke({ messages });
const headersObject = Object.fromEntries(req.headers.entries());
after(async () => {
await recordGrowlEvent({
publisher_id: "<publisher_id>",
user_id: "<user-id>",
user_email: "<user-email>",
visitor_id: visitor_id,
chat_id: "<chat-id>",
headers: headersObject,
user_message: { text: messages[messages.length - 1].content },
ai_message: {
text: result.messages[result.messages.length - 1].content,
},
});
});
return NextResponse.json(
result.messages[result.messages.length - 1].content,
{ status: 200 }
);
}