Building a Chatbot with Long-Term Memory
Learn how to create an intelligent chatbot that remembers user preferences, conversation history, and learns from interactions.
Overview
This example demonstrates how to build a chatbot that:
- Remembers user preferences and context
 - Learns from conversation patterns
 - Provides personalized responses
 - Maintains conversation continuity across sessions
 
Complete Implementation
Python Implementation
Python
1import os2from datetime import datetime3from typing import List, Dict, Optional4from recall import RecallClient5from openai import OpenAI6
7class MemoryBot:8    """An intelligent chatbot with long-term memory."""9
10    def __init__(self, recall_client: RecallClient, openai_client: OpenAI):11        self.recall = recall_client12        self.openai = openai_client13        self.conversation_buffer = []14
15    async def process_message(16        self,17        user_id: str,18        message: str,19        session_id: Optional[str] = None20    ) -> str:21        """Process a user message and generate a response."""22
23        # 1. Retrieve relevant memories24        memories = await self._get_relevant_memories(user_id, message)25
26        # 2. Build context from memories27        context = self._build_context(memories)28
29        # 3. Generate response30        response = await self._generate_response(31            message=message,32            context=context,33            user_id=user_id34        )35
36        # 4. Store interaction as memory37        await self._store_interaction(38            user_id=user_id,39            user_message=message,40            bot_response=response,41            session_id=session_id42        )43
44        # 5. Extract and store learnings45        await self._extract_learnings(46            user_id=user_id,47            message=message,48            response=response49        )50
51        return response52
53    async def _get_relevant_memories(54        self,55        user_id: str,56        message: str57    ) -> List[Dict]:58        """Retrieve memories relevant to the current message."""59
60        # Search for directly relevant memories61        direct_memories = await self.recall.search(62            query=message,63            user_id=user_id,64            limit=5,65            threshold=0.766        )67
68        # Get recent conversation context69        recent_context = await self.recall.search(70            query="recent conversation",71            user_id=user_id,72            filters={"type": "conversation"},73            limit=374        )75
76        # Get user preferences77        preferences = await self.recall.search(78            query="user preferences settings",79            user_id=user_id,80            filters={"type": "preference"},81            limit=382        )83
84        return {85            "relevant": direct_memories,86            "recent": recent_context,87            "preferences": preferences88        }89
90    def _build_context(self, memories: Dict) -> str:91        """Build context string from memories."""92        context_parts = []93
94        # Add user preferences95        if memories["preferences"]:96            prefs = [m["content"] for m in memories["preferences"]]97            context_parts.append(f"User Preferences:\n- " + "\n- ".join(prefs))98
99        # Add recent conversation context100        if memories["recent"]:101            recent = [m["content"] for m in memories["recent"]]102            context_parts.append(f"Recent Context:\n- " + "\n- ".join(recent))103
104        # Add relevant memories105        if memories["relevant"]:106            relevant = [m["content"] for m in memories["relevant"]]107            context_parts.append(f"Relevant Information:\n- " + "\n- ".join(relevant))108
109        return "\n\n".join(context_parts)110
111    async def _generate_response(112        self,113        message: str,114        context: str,115        user_id: str116    ) -> str:117        """Generate response using LLM with context."""118
119        system_prompt = f"""You are a helpful assistant with memory.120        Use the following context about the user to provide personalized responses:121
122        {context}123
124        Remember to:125        - Be consistent with user preferences126        - Reference previous conversations when relevant127        - Maintain continuity in the conversation128        """129
130        response = self.openai.chat.completions.create(131            model="gpt-4",132            messages=[133                {"role": "system", "content": system_prompt},134                {"role": "user", "content": message}135            ],136            temperature=0.7137        )138
139        return response.choices[0].message.content140
141    async def _store_interaction(142        self,143        user_id: str,144        user_message: str,145        bot_response: str,146        session_id: Optional[str]147    ):148        """Store the interaction as a memory."""149
150        # Store user message151        await self.recall.add(152            content=f"User said: {user_message}",153            user_id=user_id,154            priority="medium",155            metadata={156                "type": "conversation",157                "role": "user",158                "session_id": session_id,159                "timestamp": datetime.now().isoformat()160            }161        )162
163        # Store bot response164        await self.recall.add(165            content=f"Assistant responded: {bot_response}",166            user_id=user_id,167            priority="medium",168            metadata={169                "type": "conversation",170                "role": "assistant",171                "session_id": session_id,172                "timestamp": datetime.now().isoformat()173            }174        )175
176    async def _extract_learnings(177        self,178        user_id: str,179        message: str,180        response: str181    ):182        """Extract and store learnings from the conversation."""183
184        # Use LLM to extract key information185        extraction_prompt = f"""186        From this conversation, extract any important information about the user:187
188        User: {message}189        Assistant: {response}190
191        Extract:192        1. User preferences (if any)193        2. Personal information (if any)194        3. Behavioral patterns (if any)195
196        Format as JSON array of findings. Return empty array if nothing notable.197        """198
199        extraction = self.openai.chat.completions.create(200            model="gpt-3.5-turbo",201            messages=[{"role": "system", "content": extraction_prompt}],202            response_format={"type": "json_object"}203        )204
205        learnings = json.loads(extraction.choices[0].message.content)206
207        # Store each learning as a high-priority memory208        for learning in learnings.get("findings", []):209            await self.recall.add(210                content=learning["content"],211                user_id=user_id,212                priority="high",213                metadata={214                    "type": learning["type"],215                    "confidence": learning.get("confidence", 0.8),216                    "learned_at": datetime.now().isoformat()217                }218            )219
220
221# Usage Example222async def main():223    # Initialize clients224    recall_client = RecallClient(225        redis_url="redis://localhost:6379",226        mem0_api_key=os.getenv("MEM0_API_KEY")227    )228
229    openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))230
231    # Create bot232    bot = MemoryBot(recall_client, openai_client)233
234    # Simulate conversation235    user_id = "user_123"236    session_id = "session_456"237
238    # First interaction239    response1 = await bot.process_message(240        user_id=user_id,241        message="Hi! I prefer concise responses and I'm interested in Python.",242        session_id=session_id243    )244    print(f"Bot: {response1}")245
246    # Second interaction (bot remembers preferences)247    response2 = await bot.process_message(248        user_id=user_id,249        message="What's a good way to handle errors?",250        session_id=session_id251    )252    print(f"Bot: {response2}")253    # Bot provides concise Python-specific error handling advice254
255    # Later session (different session_id)256    response3 = await bot.process_message(257        user_id=user_id,258        message="Can you help me with coding?",259        session_id="session_789"260    )261    print(f"Bot: {response3}")262    # Bot remembers user prefers Python and concise responses263
264if __name__ == "__main__":265    import asyncio266    asyncio.run(main())TypeScript Implementation
TypeScript
1import { RecallClient, Memory, Priority } from "@recall/client";2import OpenAI from "openai";3
4interface MemoryContext {5  relevant: Memory[];6  recent: Memory[];7  preferences: Memory[];8}9
10class MemoryBot {11  constructor(12    private recall: RecallClient,13    private openai: OpenAI,14  ) {}15
16  async processMessage(17    userId: string,18    message: string,19    sessionId?: string,20  ): Promise<string> {21    // 1. Retrieve relevant memories22    const memories = await this.getRelevantMemories(userId, message);23
24    // 2. Build context25    const context = this.buildContext(memories);26
27    // 3. Generate response28    const response = await this.generateResponse(message, context);29
30    // 4. Store interaction31    await this.storeInteraction(userId, message, response, sessionId);32
33    // 5. Extract learnings34    await this.extractLearnings(userId, message, response);35
36    return response;37  }38
39  private async getRelevantMemories(40    userId: string,41    message: string,42  ): Promise<MemoryContext> {43    // Parallel memory retrieval44    const [relevant, recent, preferences] = await Promise.all([45      // Direct relevance search46      this.recall.search({47        query: message,48        userId,49        limit: 5,50        threshold: 0.7,51      }),52
53      // Recent conversation54      this.recall.search({55        query: "recent conversation",56        userId,57        filters: { type: "conversation" },58        limit: 3,59      }),60
61      // User preferences62      this.recall.search({63        query: "preferences settings",64        userId,65        filters: { type: "preference" },66        limit: 3,67      }),68    ]);69
70    return { relevant, recent, preferences };71  }72
73  private buildContext(memories: MemoryContext): string {74    const parts: string[] = [];75
76    if (memories.preferences.length > 0) {77      const prefs = memories.preferences.map((m) => m.content);78      parts.push(`User Preferences:\n- ${prefs.join("\n- ")}`);79    }80
81    if (memories.recent.length > 0) {82      const recent = memories.recent.map((m) => m.content);83      parts.push(`Recent Context:\n- ${recent.join("\n- ")}`);84    }85
86    if (memories.relevant.length > 0) {87      const relevant = memories.relevant.map((m) => m.content);88      parts.push(`Relevant Information:\n- ${relevant.join("\n- ")}`);89    }90
91    return parts.join("\n\n");92  }93
94  private async generateResponse(95    message: string,96    context: string,97  ): Promise<string> {98    const completion = await this.openai.chat.completions.create({99      model: "gpt-4",100      messages: [101        {102          role: "system",103          content: `You are a helpful assistant with memory.104          Use this context about the user: ${context}105          Be consistent with preferences and reference past conversations.`,106        },107        { role: "user", content: message },108      ],109      temperature: 0.7,110    });111
112    return completion.choices[0].message.content || "";113  }114
115  private async storeInteraction(116    userId: string,117    userMessage: string,118    botResponse: string,119    sessionId?: string,120  ): Promise<void> {121    const timestamp = new Date().toISOString();122
123    // Store both messages in parallel124    await Promise.all([125      this.recall.add({126        content: `User said: ${userMessage}`,127        userId,128        priority: "medium",129        metadata: {130          type: "conversation",131          role: "user",132          sessionId,133          timestamp,134        },135      }),136
137      this.recall.add({138        content: `Assistant responded: ${botResponse}`,139        userId,140        priority: "medium",141        metadata: {142          type: "conversation",143          role: "assistant",144          sessionId,145          timestamp,146        },147      }),148    ]);149  }150
151  private async extractLearnings(152    userId: string,153    message: string,154    response: string,155  ): Promise<void> {156    const extraction = await this.openai.chat.completions.create({157      model: "gpt-3.5-turbo",158      messages: [159        {160          role: "system",161          content: `Extract learnings from:162        User: ${message}163        Assistant: ${response}164        Return JSON array of findings or empty array.`,165        },166      ],167      response_format: { type: "json_object" },168    });169
170    const learnings = JSON.parse(extraction.choices[0].message.content || "{}");171
172    // Store learnings173    for (const learning of learnings.findings || []) {174      await this.recall.add({175        content: learning.content,176        userId,177        priority: "high",178        metadata: {179          type: learning.type,180          confidence: learning.confidence || 0.8,181          learnedAt: new Date().toISOString(),182        },183      });184    }185  }186}187
188// Usage189async function main() {190  const recall = new RecallClient({191    redisUrl: "redis://localhost:6379",192    mem0ApiKey: process.env.MEM0_API_KEY,193  });194
195  const openai = new OpenAI({196    apiKey: process.env.OPENAI_API_KEY,197  });198
199  const bot = new MemoryBot(recall, openai);200
201  // Process messages202  const response = await bot.processMessage(203    "user_123",204    "I prefer TypeScript and brief responses",205    "session_456",206  );207
208  console.log("Bot:", response);209}210
211main().catch(console.error);Advanced Features
Conversation Summarization
Periodically summarize long conversations to maintain context without overwhelming the memory system:
Python
1async def summarize_conversation(self, user_id: str, session_id: str):2    """Summarize a conversation session for long-term storage."""3
4    # Get all messages from session5    messages = await self.recall.search(6        query=f"session {session_id}",7        user_id=user_id,8        filters={"session_id": session_id},9        limit=10010    )11
12    if len(messages) < 5:13        return  # Too short to summarize14
15    # Create conversation transcript16    transcript = "\n".join([m["content"] for m in messages])17
18    # Generate summary19    summary = self.openai.chat.completions.create(20        model="gpt-3.5-turbo",21        messages=[{22            "role": "system",23            "content": f"Summarize this conversation, highlighting key points and user preferences:\n{transcript}"24        }]25    )26
27    # Store summary as high-priority memory28    await self.recall.add(29        content=f"Conversation Summary: {summary.choices[0].message.content}",30        user_id=user_id,31        priority="high",32        metadata={33            "type": "summary",34            "session_id": session_id,35            "message_count": len(messages),36            "summarized_at": datetime.now().isoformat()37        }38    )39
40    # Optionally reduce priority of individual messages41    for message in messages:42        await self.recall.update(43            memory_id=message["id"],44            priority="low"45        )Sentiment Tracking
Track user sentiment over time to provide more empathetic responses:
Python
1async def track_sentiment(self, user_id: str, message: str):2    """Track and store user sentiment."""3
4    # Analyze sentiment5    sentiment_analysis = self.openai.chat.completions.create(6        model="gpt-3.5-turbo",7        messages=[{8            "role": "system",9            "content": f"Analyze sentiment (positive/negative/neutral) and emotion: {message}"10        }]11    )12
13    sentiment_data = json.loads(sentiment_analysis.choices[0].message.content)14
15    # Store sentiment as memory16    await self.recall.add(17        content=f"User sentiment: {sentiment_data['sentiment']}, emotion: {sentiment_data['emotion']}",18        user_id=user_id,19        priority="medium",20        metadata={21            "type": "sentiment",22            "sentiment": sentiment_data["sentiment"],23            "emotion": sentiment_data["emotion"],24            "confidence": sentiment_data["confidence"],25            "timestamp": datetime.now().isoformat()26        }27    )28
29    # Check for sentiment patterns30    recent_sentiments = await self.recall.search(31        query="user sentiment",32        user_id=user_id,33        filters={"type": "sentiment"},34        limit=1035    )36
37    # Detect concerning patterns38    negative_count = sum(1 for s in recent_sentiments if s["metadata"]["sentiment"] == "negative")39    if negative_count > 7:40        # Adjust response tone41        self.response_tone = "empathetic"Performance Optimization
Memory Prioritization
Implement intelligent memory prioritization based on usage:
Python
1class SmartMemoryManager:2    def __init__(self, recall_client: RecallClient):3        self.recall = recall_client4
5    async def auto_prioritize(self, user_id: str):6        """Automatically adjust memory priorities based on access patterns."""7
8        memories = await self.recall.get_all(user_id=user_id)9
10        for memory in memories:11            access_count = memory.get("access_count", 0)12            age_days = (datetime.now() - memory["created_at"]).days13
14            # Calculate new priority15            if access_count > 10 and age_days < 7:16                new_priority = "high"17            elif access_count > 5 or age_days < 30:18                new_priority = "medium"19            else:20                new_priority = "low"21
22            # Update if changed23            if new_priority != memory["priority"]:24                await self.recall.update(25                    memory_id=memory["id"],26                    priority=new_priority27                )Response Caching
Cache common responses for faster interaction:
Python
1from functools import lru_cache2import hashlib3
4class CachedBot(MemoryBot):5    def __init__(self, *args, **kwargs):6        super().__init__(*args, **kwargs)7        self.response_cache = {}8
9    def _get_cache_key(self, message: str, context: str) -> str:10        """Generate cache key from message and context."""11        content = f"{message}:{context}"12        return hashlib.md5(content.encode()).hexdigest()13
14    async def process_message(self, user_id: str, message: str, session_id: Optional[str] = None) -> str:15        # Check cache first16        memories = await self._get_relevant_memories(user_id, message)17        context = self._build_context(memories)18        cache_key = self._get_cache_key(message, context)19
20        if cache_key in self.response_cache:21            # Cache hit - still store interaction22            response = self.response_cache[cache_key]23            await self._store_interaction(user_id, message, response, session_id)24            return response25
26        # Cache miss - generate response27        response = await super().process_message(user_id, message, session_id)28        self.response_cache[cache_key] = response29        return responseDeployment Considerations
Scaling for Multiple Users
Python
1# Use connection pooling2from redis import ConnectionPool3import asyncio4
5pool = ConnectionPool(6    host='localhost',7    port=6379,8    max_connections=1009)10
11# Create bot pool for handling multiple users12class BotPool:13    def __init__(self, pool_size: int = 10):14        self.bots = []15        for _ in range(pool_size):16            recall_client = RecallClient(redis_connection_pool=pool)17            openai_client = OpenAI()18            self.bots.append(MemoryBot(recall_client, openai_client))19
20    async def get_bot(self) -> MemoryBot:21        """Get available bot from pool."""22        # Simple round-robin23        return self.bots[hash(asyncio.current_task()) % len(self.bots)]24
25# Usage26bot_pool = BotPool(pool_size=20)27bot = await bot_pool.get_bot()28response = await bot.process_message(user_id, message)Next Steps
- Explore API Integration for REST API implementation
 - Learn about Real-time Features for WebSocket support
 - Check Production Guide for deployment best practices