mirror of
https://gitee.com/wanwujie/deer-flow
synced 2026-04-03 06:12:14 +08:00
205 lines
6.7 KiB
Python
205 lines
6.7 KiB
Python
|
|
"""Prompt templates for memory update and injection."""
|
||
|
|
|
||
|
|
from typing import Any
|
||
|
|
|
||
|
|
# Prompt template for updating memory based on conversation
|
||
|
|
MEMORY_UPDATE_PROMPT = """You are a memory management system. Your task is to analyze a conversation and update the user's memory profile.
|
||
|
|
|
||
|
|
Current Memory State:
|
||
|
|
<current_memory>
|
||
|
|
{current_memory}
|
||
|
|
</current_memory>
|
||
|
|
|
||
|
|
New Conversation to Process:
|
||
|
|
<conversation>
|
||
|
|
{conversation}
|
||
|
|
</conversation>
|
||
|
|
|
||
|
|
Instructions:
|
||
|
|
1. Analyze the conversation for important information about the user
|
||
|
|
2. Extract relevant facts, preferences, and context
|
||
|
|
3. Update the memory sections as needed:
|
||
|
|
- workContext: User's work-related information (job, projects, tools, technologies)
|
||
|
|
- personalContext: Personal preferences, communication style, background
|
||
|
|
- topOfMind: Current focus areas, ongoing tasks, immediate priorities
|
||
|
|
|
||
|
|
4. For facts extraction:
|
||
|
|
- Extract specific, verifiable facts about the user
|
||
|
|
- Assign appropriate categories: preference, knowledge, context, behavior, goal
|
||
|
|
- Estimate confidence (0.0-1.0) based on how explicit the information is
|
||
|
|
- Avoid duplicating existing facts
|
||
|
|
|
||
|
|
5. Update history sections:
|
||
|
|
- recentMonths: Summary of recent activities and discussions
|
||
|
|
- earlierContext: Important historical context
|
||
|
|
- longTermBackground: Persistent background information
|
||
|
|
|
||
|
|
Output Format (JSON):
|
||
|
|
{{
|
||
|
|
"user": {{
|
||
|
|
"workContext": {{ "summary": "...", "shouldUpdate": true/false }},
|
||
|
|
"personalContext": {{ "summary": "...", "shouldUpdate": true/false }},
|
||
|
|
"topOfMind": {{ "summary": "...", "shouldUpdate": true/false }}
|
||
|
|
}},
|
||
|
|
"history": {{
|
||
|
|
"recentMonths": {{ "summary": "...", "shouldUpdate": true/false }},
|
||
|
|
"earlierContext": {{ "summary": "...", "shouldUpdate": true/false }},
|
||
|
|
"longTermBackground": {{ "summary": "...", "shouldUpdate": true/false }}
|
||
|
|
}},
|
||
|
|
"newFacts": [
|
||
|
|
{{ "content": "...", "category": "preference|knowledge|context|behavior|goal", "confidence": 0.0-1.0 }}
|
||
|
|
],
|
||
|
|
"factsToRemove": ["fact_id_1", "fact_id_2"]
|
||
|
|
}}
|
||
|
|
|
||
|
|
Important Rules:
|
||
|
|
- Only set shouldUpdate=true if there's meaningful new information
|
||
|
|
- Keep summaries concise (1-3 sentences each)
|
||
|
|
- Only add facts that are clearly stated or strongly implied
|
||
|
|
- Remove facts that are contradicted by new information
|
||
|
|
- Preserve existing information that isn't contradicted
|
||
|
|
- Focus on information useful for future interactions
|
||
|
|
|
||
|
|
Return ONLY valid JSON, no explanation or markdown."""
|
||
|
|
|
||
|
|
|
||
|
|
# Prompt template for extracting facts from a single message
|
||
|
|
FACT_EXTRACTION_PROMPT = """Extract factual information about the user from this message.
|
||
|
|
|
||
|
|
Message:
|
||
|
|
{message}
|
||
|
|
|
||
|
|
Extract facts in this JSON format:
|
||
|
|
{{
|
||
|
|
"facts": [
|
||
|
|
{{ "content": "...", "category": "preference|knowledge|context|behavior|goal", "confidence": 0.0-1.0 }}
|
||
|
|
]
|
||
|
|
}}
|
||
|
|
|
||
|
|
Categories:
|
||
|
|
- preference: User preferences (likes/dislikes, styles, tools)
|
||
|
|
- knowledge: User's expertise or knowledge areas
|
||
|
|
- context: Background context (location, job, projects)
|
||
|
|
- behavior: Behavioral patterns
|
||
|
|
- goal: User's goals or objectives
|
||
|
|
|
||
|
|
Rules:
|
||
|
|
- Only extract clear, specific facts
|
||
|
|
- Confidence should reflect certainty (explicit statement = 0.9+, implied = 0.6-0.8)
|
||
|
|
- Skip vague or temporary information
|
||
|
|
|
||
|
|
Return ONLY valid JSON."""
|
||
|
|
|
||
|
|
|
||
|
|
def format_memory_for_injection(memory_data: dict[str, Any], max_tokens: int = 2000) -> str:
|
||
|
|
"""Format memory data for injection into system prompt.
|
||
|
|
|
||
|
|
Args:
|
||
|
|
memory_data: The memory data dictionary.
|
||
|
|
max_tokens: Maximum tokens to use (approximate via character count).
|
||
|
|
|
||
|
|
Returns:
|
||
|
|
Formatted memory string for system prompt injection.
|
||
|
|
"""
|
||
|
|
if not memory_data:
|
||
|
|
return ""
|
||
|
|
|
||
|
|
sections = []
|
||
|
|
|
||
|
|
# Format user context
|
||
|
|
user_data = memory_data.get("user", {})
|
||
|
|
if user_data:
|
||
|
|
user_sections = []
|
||
|
|
|
||
|
|
work_ctx = user_data.get("workContext", {})
|
||
|
|
if work_ctx.get("summary"):
|
||
|
|
user_sections.append(f"Work: {work_ctx['summary']}")
|
||
|
|
|
||
|
|
personal_ctx = user_data.get("personalContext", {})
|
||
|
|
if personal_ctx.get("summary"):
|
||
|
|
user_sections.append(f"Personal: {personal_ctx['summary']}")
|
||
|
|
|
||
|
|
top_of_mind = user_data.get("topOfMind", {})
|
||
|
|
if top_of_mind.get("summary"):
|
||
|
|
user_sections.append(f"Current Focus: {top_of_mind['summary']}")
|
||
|
|
|
||
|
|
if user_sections:
|
||
|
|
sections.append("User Context:\n" + "\n".join(f"- {s}" for s in user_sections))
|
||
|
|
|
||
|
|
# Format history
|
||
|
|
history_data = memory_data.get("history", {})
|
||
|
|
if history_data:
|
||
|
|
history_sections = []
|
||
|
|
|
||
|
|
recent = history_data.get("recentMonths", {})
|
||
|
|
if recent.get("summary"):
|
||
|
|
history_sections.append(f"Recent: {recent['summary']}")
|
||
|
|
|
||
|
|
earlier = history_data.get("earlierContext", {})
|
||
|
|
if earlier.get("summary"):
|
||
|
|
history_sections.append(f"Earlier: {earlier['summary']}")
|
||
|
|
|
||
|
|
if history_sections:
|
||
|
|
sections.append("History:\n" + "\n".join(f"- {s}" for s in history_sections))
|
||
|
|
|
||
|
|
# Format facts (most relevant ones)
|
||
|
|
facts = memory_data.get("facts", [])
|
||
|
|
if facts:
|
||
|
|
# Sort by confidence and take top facts
|
||
|
|
sorted_facts = sorted(facts, key=lambda f: f.get("confidence", 0), reverse=True)
|
||
|
|
# Limit to avoid too much content
|
||
|
|
top_facts = sorted_facts[:15]
|
||
|
|
|
||
|
|
fact_lines = []
|
||
|
|
for fact in top_facts:
|
||
|
|
content = fact.get("content", "")
|
||
|
|
category = fact.get("category", "")
|
||
|
|
if content:
|
||
|
|
fact_lines.append(f"- [{category}] {content}")
|
||
|
|
|
||
|
|
if fact_lines:
|
||
|
|
sections.append("Known Facts:\n" + "\n".join(fact_lines))
|
||
|
|
|
||
|
|
if not sections:
|
||
|
|
return ""
|
||
|
|
|
||
|
|
result = "\n\n".join(sections)
|
||
|
|
|
||
|
|
# Rough token limit (approximate 4 chars per token)
|
||
|
|
max_chars = max_tokens * 4
|
||
|
|
if len(result) > max_chars:
|
||
|
|
result = result[:max_chars] + "\n..."
|
||
|
|
|
||
|
|
return result
|
||
|
|
|
||
|
|
|
||
|
|
def format_conversation_for_update(messages: list[Any]) -> str:
|
||
|
|
"""Format conversation messages for memory update prompt.
|
||
|
|
|
||
|
|
Args:
|
||
|
|
messages: List of conversation messages.
|
||
|
|
|
||
|
|
Returns:
|
||
|
|
Formatted conversation string.
|
||
|
|
"""
|
||
|
|
lines = []
|
||
|
|
for msg in messages:
|
||
|
|
role = getattr(msg, "type", "unknown")
|
||
|
|
content = getattr(msg, "content", str(msg))
|
||
|
|
|
||
|
|
# Handle content that might be a list (multimodal)
|
||
|
|
if isinstance(content, list):
|
||
|
|
text_parts = [p.get("text", "") for p in content if isinstance(p, dict) and "text" in p]
|
||
|
|
content = " ".join(text_parts) if text_parts else str(content)
|
||
|
|
|
||
|
|
# Truncate very long messages
|
||
|
|
if len(str(content)) > 1000:
|
||
|
|
content = str(content)[:1000] + "..."
|
||
|
|
|
||
|
|
if role == "human":
|
||
|
|
lines.append(f"User: {content}")
|
||
|
|
elif role == "ai":
|
||
|
|
lines.append(f"Assistant: {content}")
|
||
|
|
|
||
|
|
return "\n\n".join(lines)
|