feat: enable skills support for subagents

Extract get_skills_prompt_section() from apply_prompt_template() so
subagents can also receive the available skills list in their system
prompt. This allows subagents to discover and load skills via read_file,
just like the lead agent.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
hetao
2026-02-11 11:04:50 +08:00
parent ebf4ec2786
commit 4a85c5de7b
2 changed files with 62 additions and 43 deletions

View File

@@ -224,21 +224,7 @@ User: "staging"
You: "Deploying to staging..." [proceed] You: "Deploying to staging..." [proceed]
</clarification_system> </clarification_system>
<skill_system> {skills_section}
You have access to skills that provide optimized workflows for specific tasks. Each skill contains best practices, frameworks, and references to additional resources.
**Progressive Loading Pattern:**
1. When a user query matches a skill's use case, immediately call `read_file` on the skill's main file using the path attribute provided in the skill tag below
2. Read and understand the skill's workflow and instructions
3. The skill file contains references to external resources under the same folder
4. Load referenced resources only when needed during execution
5. Follow the skill's instructions precisely
**Skills are located at:** {skills_base_path}
{skills_list}
</skill_system>
{subagent_section} {subagent_section}
@@ -266,7 +252,10 @@ You have access to skills that provide optimized workflows for specific tasks. E
- Format: Use Markdown link format `[citation:TITLE](URL)` - Format: Use Markdown link format `[citation:TITLE](URL)`
- Example: - Example:
```markdown ```markdown
The key AI trends for 2026 include enhanced reasoning capabilities and multimodal integration [citation:AI Trends 2026](https://techcrunch.com/ai-trends). Recent breakthroughs in language models have also accelerated progress [citation:OpenAI Research](https://openai.com/research). The key AI trends for 2026 include enhanced reasoning capabilities and multimodal integration
[citation:AI Trends 2026](https://techcrunch.com/ai-trends).
Recent breakthroughs in language models have also accelerated progress
[citation:OpenAI Research](https://openai.com/research).
``` ```
</citations> </citations>
@@ -313,29 +302,48 @@ def _get_memory_context() -> str:
return "" return ""
def apply_prompt_template(subagent_enabled: bool = False) -> str: def get_skills_prompt_section() -> str:
# Load only enabled skills """Generate the skills prompt section with available skills list.
Returns the <skill_system>...</skill_system> block listing all enabled skills,
suitable for injection into any agent's system prompt.
"""
skills = load_skills(enabled_only=True) skills = load_skills(enabled_only=True)
# Get config
try: try:
from src.config import get_app_config from src.config import get_app_config
config = get_app_config() config = get_app_config()
container_base_path = config.skills.container_path container_base_path = config.skills.container_path
except Exception: except Exception:
# Fallback to defaults if config fails
container_base_path = "/mnt/skills" container_base_path = "/mnt/skills"
# Generate skills list XML with paths (path points to SKILL.md file) if not skills:
if skills: return ""
skill_items = "\n".join( skill_items = "\n".join(
f" <skill>\n <name>{skill.name}</name>\n <description>{skill.description}</description>\n <location>{skill.get_container_file_path(container_base_path)}</location>\n </skill>" for skill in skills f" <skill>\n <name>{skill.name}</name>\n <description>{skill.description}</description>\n <location>{skill.get_container_file_path(container_base_path)}</location>\n </skill>" for skill in skills
) )
skills_list = f"<available_skills>\n{skill_items}\n</available_skills>" skills_list = f"<available_skills>\n{skill_items}\n</available_skills>"
else:
skills_list = "<!-- No skills available -->"
return f"""<skill_system>
You have access to skills that provide optimized workflows for specific tasks. Each skill contains best practices, frameworks, and references to additional resources.
**Progressive Loading Pattern:**
1. When a user query matches a skill's use case, immediately call `read_file` on the skill's main file using the path attribute provided in the skill tag below
2. Read and understand the skill's workflow and instructions
3. The skill file contains references to external resources under the same folder
4. Load referenced resources only when needed during execution
5. Follow the skill's instructions precisely
**Skills are located at:** {container_base_path}
{skills_list}
</skill_system>"""
def apply_prompt_template(subagent_enabled: bool = False) -> str:
# Get memory context # Get memory context
memory_context = _get_memory_context() memory_context = _get_memory_context()
@@ -360,10 +368,12 @@ def apply_prompt_template(subagent_enabled: bool = False) -> str:
else "" else ""
) )
# Get skills section
skills_section = get_skills_prompt_section()
# Format the prompt with dynamic skills and memory # Format the prompt with dynamic skills and memory
prompt = SYSTEM_PROMPT_TEMPLATE.format( prompt = SYSTEM_PROMPT_TEMPLATE.format(
skills_list=skills_list, skills_section=skills_section,
skills_base_path=container_base_path,
memory_context=memory_context, memory_context=memory_context,
subagent_section=subagent_section, subagent_section=subagent_section,
subagent_reminder=subagent_reminder, subagent_reminder=subagent_reminder,

View File

@@ -3,12 +3,14 @@
import logging import logging
import time import time
import uuid import uuid
from dataclasses import replace
from typing import Annotated, Literal from typing import Annotated, Literal
from langchain.tools import InjectedToolCallId, ToolRuntime, tool from langchain.tools import InjectedToolCallId, ToolRuntime, tool
from langgraph.config import get_stream_writer from langgraph.config import get_stream_writer
from langgraph.typing import ContextT from langgraph.typing import ContextT
from src.agents.lead_agent.prompt import get_skills_prompt_section
from src.agents.thread_state import ThreadState from src.agents.thread_state import ThreadState
from src.subagents import SubagentExecutor, get_subagent_config from src.subagents import SubagentExecutor, get_subagent_config
from src.subagents.executor import SubagentStatus, get_background_task_result from src.subagents.executor import SubagentStatus, get_background_task_result
@@ -60,12 +62,18 @@ def task_tool(
if config is None: if config is None:
return f"Error: Unknown subagent type '{subagent_type}'. Available: general-purpose, bash" return f"Error: Unknown subagent type '{subagent_type}'. Available: general-purpose, bash"
# Override max_turns if specified # Build config overrides
if max_turns is not None: overrides: dict = {}
# Create a copy with updated max_turns
from dataclasses import replace
config = replace(config, max_turns=max_turns) skills_section = get_skills_prompt_section()
if skills_section:
overrides["system_prompt"] = config.system_prompt + "\n\n" + skills_section
if max_turns is not None:
overrides["max_turns"] = max_turns
if overrides:
config = replace(config, **overrides)
# Extract parent context from runtime # Extract parent context from runtime
sandbox_state = None sandbox_state = None
@@ -118,7 +126,6 @@ def task_tool(
# Send Task Started message' # Send Task Started message'
writer({"type": "task_started", "task_id": task_id, "description": description}) writer({"type": "task_started", "task_id": task_id, "description": description})
while True: while True:
result = get_background_task_result(task_id) result = get_background_task_result(task_id)
@@ -138,13 +145,15 @@ def task_tool(
# Send task_running event for each new message # Send task_running event for each new message
for i in range(last_message_count, current_message_count): for i in range(last_message_count, current_message_count):
message = result.ai_messages[i] message = result.ai_messages[i]
writer({ writer(
{
"type": "task_running", "type": "task_running",
"task_id": task_id, "task_id": task_id,
"message": message, "message": message,
"message_index": i + 1, # 1-based index for display "message_index": i + 1, # 1-based index for display
"total_messages": current_message_count "total_messages": current_message_count,
}) }
)
logger.info(f"[trace={trace_id}] Task {task_id} sent message #{i + 1}/{current_message_count}") logger.info(f"[trace={trace_id}] Task {task_id} sent message #{i + 1}/{current_message_count}")
last_message_count = current_message_count last_message_count = current_message_count