Building Memory-Enhanced AI Agents for Business Problems
1. Introduction
This document provides a structured approach to building AI agents with long-term memory using LangGraph and LangMem. The approach integrates three key types of memory:
Semantic Memory – Stores factual knowledge.
Episodic Memory – Captures past interactions and examples.
Procedural Memory – Learns from feedback and improves responses.
This strategy can be applied to various business problems, including:
Customer Support Automation
Financial Risk Assessment
Legal Document Analysis
Supply Chain Optimization
HR and Employee Engagement
The framework ensures scalability, adaptability, and personalization of AI agents.
2. System Workflow
Step 1: Setting Up the Environment
import os
from dotenv import load_dotenv
from typing import TypedDict, Literal, Annotated
from langgraph.graph import StateGraph, START, END, add_messages
from langchain.chat_models import init_chat_model
from langchain_core.tools import tool
from langchain.prompts import PromptTemplate
from langchain.schema import HumanMessage
from pydantic import BaseModel, Field
from langgraph.store.memory import InMemoryStore # Memory Storage
from langmem import create_manage_memory_tool, create_search_memory_tool # LangMem Tools
# Load environment variables
load_dotenv()
# Initialize Language Model (LLM)
llm = init_chat_model("openai:gpt-4o-mini")
# Initialize Memory Store (In-Memory for simplicity)
store = InMemoryStore(index={"embed": "openai:text-embedding-3-small"})
Step 2: Defining Agent State
class State(TypedDict):
input_data: dict # Incoming business problem data
messages: Annotated[list, add_messages] # Conversation history
classification: str # Decision made by the agent
This state ensures structured storage of inputs, past interactions, and classification results.
Step 3: Implementing Decision Logic (Episodic Memory)
Defining a Structured Router
class Router(BaseModel):
reasoning: str = Field(description="Step-by-step reasoning behind classification.")
classification: Literal["ignore", "process", "escalate"] = Field(
description="Classification of the business problem."
)
llm_router = llm.with_structured_output(Router)
This router categorizes business problems based on previous examples.
Enhancing Decision-Making with Memory
def format_few_shot_examples(examples):
"""Format examples from episodic memory for better learning."""
return "\n\n".join(
[f"Problem: {eg.value['input_data']['description']}\nCategory: {eg.value['classification']}" for eg in examples]
)
def classify_business_problem(state: State, config: dict, store: InMemoryStore) -> dict:
"""Classify the business problem based on past experiences."""
input_data = state["input_data"]
user_id = config["configurable"]["user_id"]
namespace = ("business_agent", user_id, "examples")
# Retrieve relevant examples
examples = store.search(namespace, query=str(input_data))
formatted_examples = format_few_shot_examples(examples)
# Create Prompt
prompt_template = PromptTemplate.from_template("""Classify the business problem:
Problem: {description}
Category: {category}
Here are some previous classifications:
{examples}
""")
prompt = prompt_template.format(examples=formatted_examples, **input_data)
messages = [HumanMessage(content=prompt)]
result = llm_router.invoke(messages)
return {"classification": result.classification}
Step 4: Action Execution (Semantic Memory)
Defining AI Tools
@tool
def send_notification(to: str, subject: str, content: str) -> str:
"""Send a notification email or message."""
print(f"Notification sent to {to}: {subject}\n{content}")
return f"Notification sent to {to}"
@tool
def query_database(query: str) -> str:
"""Retrieve information from a structured database."""
return f"Querying database for: {query}"
Step 5: Enhancing Responses (Procedural Memory)
def create_response_prompt(state, config, store):
"""Retrieve and customize response instructions."""
user_id = config["configurable"]["user_id"]
system_prompt = store.get(("business_agent", user_id, "prompts"), "response_prompt").value
messages = state['messages']
return [{"role": "system", "content": system_prompt}] + messages
from langgraph.prebuilt import create_react_agent
response_agent = create_react_agent(
tools=[send_notification, query_database],
prompt=create_response_prompt,
store=store,
model=llm
)
Step 6: Building and Compiling the Graph
workflow = StateGraph(State)
workflow.add_node("classify", lambda state, config: classify_business_problem(state, config, store))
workflow.add_node("response_agent", response_agent)
def route_based_on_classification(state):
"""Route based on classification result."""
return "response_agent" if state["classification"] == "process" else END
workflow.add_edge(START, "classify")
workflow.add_conditional_edges("classify", route_based_on_classification,
{"response_agent": "response_agent", END: END})
# Compile the Agent
agent = workflow.compile(store=store)
Step 7: Running the Agent
input_data = {"description": "Customer raised a fraud complaint regarding an unauthorized transaction."}
config = {"configurable": {"user_id": "test_user"}}
inputs = {"input_data": input_data, "messages": []}
for output in agent.stream(inputs, config=config):
for key, value in output.items():
print(f"-----\n{key}:")
print(value)
print("-----")
Step 8: Improving the Agent with Feedback
def optimize_prompts(feedback: str, config: dict, store: InMemoryStore):
"""Improve prompt instructions based on user feedback."""
user_id = config["configurable"]["user_id"]
response_prompt = store.get(("business_agent", user_id, "prompts"), "response_prompt").value
store.put(("business_agent", user_id, "prompts"), "response_prompt", response_prompt + f"\n\n{feedback}")
return "Prompts optimized based on feedback!"
3. Conclusion
This AI agent template provides a scalable and adaptable solution for business automation, improving efficiency through continuous learning and memory integration.