mirror of
https://gitee.com/wanwujie/deer-flow
synced 2026-04-03 06:12:14 +08:00
Add comprehensive MCP integration using langchain-mcp-adapters to enable pluggable external tools from MCP servers. Features: - MCP server configuration via mcp_config.json - Automatic lazy initialization for seamless use in both FastAPI and LangGraph Studio - Support for multiple MCP servers (filesystem, postgres, github, brave-search, etc.) - Environment variable resolution in configuration - Tool caching mechanism for optimal performance - Complete documentation and setup guide Implementation: - Add src/mcp module with client, tools, and cache components - Integrate MCP config loading in AppConfig - Update tool system to include MCP tools automatically - Add eager initialization in FastAPI lifespan handler - Add lazy initialization fallback for LangGraph Studio Dependencies: - Add langchain-mcp-adapters>=0.1.0 Documentation: - Add MCP_SETUP.md with comprehensive setup guide - Update CLAUDE.md with MCP system architecture - Update config.example.yaml with MCP configuration notes - Update README.md with MCP setup instructions Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
84 lines
2.1 KiB
Python
84 lines
2.1 KiB
Python
#!/usr/bin/env python
|
|
"""
|
|
Debug script for lead_agent.
|
|
Run this file directly in VS Code with breakpoints.
|
|
|
|
Usage:
|
|
1. Set breakpoints in agent.py or other files
|
|
2. Press F5 or use "Run and Debug" panel
|
|
3. Input messages in the terminal to interact with the agent
|
|
"""
|
|
|
|
import asyncio
|
|
import os
|
|
import sys
|
|
|
|
# Ensure we can import from src
|
|
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
|
|
|
# Load environment variables
|
|
from dotenv import load_dotenv
|
|
from langchain_core.messages import HumanMessage
|
|
|
|
from src.agents import make_lead_agent
|
|
|
|
load_dotenv()
|
|
|
|
|
|
async def main():
|
|
# Initialize MCP tools at startup
|
|
try:
|
|
from src.mcp import initialize_mcp_tools
|
|
|
|
await initialize_mcp_tools()
|
|
except Exception as e:
|
|
print(f"Warning: Failed to initialize MCP tools: {e}")
|
|
|
|
# Create agent with default config
|
|
config = {
|
|
"configurable": {
|
|
"thread_id": "debug-thread-001",
|
|
"thinking_enabled": True,
|
|
# Uncomment to use a specific model
|
|
"model_name": "deepseek-v3.2",
|
|
}
|
|
}
|
|
|
|
agent = make_lead_agent(config)
|
|
|
|
print("=" * 50)
|
|
print("Lead Agent Debug Mode")
|
|
print("Type 'quit' or 'exit' to stop")
|
|
print("=" * 50)
|
|
|
|
while True:
|
|
try:
|
|
user_input = input("\nYou: ").strip()
|
|
if not user_input:
|
|
continue
|
|
if user_input.lower() in ("quit", "exit"):
|
|
print("Goodbye!")
|
|
break
|
|
|
|
# Invoke the agent
|
|
state = {"messages": [HumanMessage(content=user_input)]}
|
|
result = await agent.ainvoke(state, config=config, context={"thread_id": "debug-thread-001"})
|
|
|
|
# Print the response
|
|
if result.get("messages"):
|
|
last_message = result["messages"][-1]
|
|
print(f"\nAgent: {last_message.content}")
|
|
|
|
except KeyboardInterrupt:
|
|
print("\nInterrupted. Goodbye!")
|
|
break
|
|
except Exception as e:
|
|
print(f"\nError: {e}")
|
|
import traceback
|
|
|
|
traceback.print_exc()
|
|
|
|
|
|
if __name__ == "__main__":
|
|
asyncio.run(main())
|