diff --git a/backend/src/agents/lead_agent/prompt.py b/backend/src/agents/lead_agent/prompt.py
index fe03d14..a22b61b 100644
--- a/backend/src/agents/lead_agent/prompt.py
+++ b/backend/src/agents/lead_agent/prompt.py
@@ -224,21 +224,7 @@ User: "staging"
You: "Deploying to staging..." [proceed]
-
-You have access to skills that provide optimized workflows for specific tasks. Each skill contains best practices, frameworks, and references to additional resources.
-
-**Progressive Loading Pattern:**
-1. When a user query matches a skill's use case, immediately call `read_file` on the skill's main file using the path attribute provided in the skill tag below
-2. Read and understand the skill's workflow and instructions
-3. The skill file contains references to external resources under the same folder
-4. Load referenced resources only when needed during execution
-5. Follow the skill's instructions precisely
-
-**Skills are located at:** {skills_base_path}
-
-{skills_list}
-
-
+{skills_section}
{subagent_section}
@@ -266,7 +252,10 @@ You have access to skills that provide optimized workflows for specific tasks. E
- Format: Use Markdown link format `[citation:TITLE](URL)`
- Example:
```markdown
-The key AI trends for 2026 include enhanced reasoning capabilities and multimodal integration [citation:AI Trends 2026](https://techcrunch.com/ai-trends). Recent breakthroughs in language models have also accelerated progress [citation:OpenAI Research](https://openai.com/research).
+The key AI trends for 2026 include enhanced reasoning capabilities and multimodal integration
+[citation:AI Trends 2026](https://techcrunch.com/ai-trends).
+Recent breakthroughs in language models have also accelerated progress
+[citation:OpenAI Research](https://openai.com/research).
```
@@ -313,29 +302,48 @@ def _get_memory_context() -> str:
return ""
-def apply_prompt_template(subagent_enabled: bool = False) -> str:
- # Load only enabled skills
+def get_skills_prompt_section() -> str:
+ """Generate the skills prompt section with available skills list.
+
+ Returns the ... block listing all enabled skills,
+ suitable for injection into any agent's system prompt.
+ """
skills = load_skills(enabled_only=True)
- # Get config
try:
from src.config import get_app_config
config = get_app_config()
container_base_path = config.skills.container_path
except Exception:
- # Fallback to defaults if config fails
container_base_path = "/mnt/skills"
- # Generate skills list XML with paths (path points to SKILL.md file)
- if skills:
- skill_items = "\n".join(
- f" \n {skill.name}\n {skill.description}\n {skill.get_container_file_path(container_base_path)}\n " for skill in skills
- )
- skills_list = f"\n{skill_items}\n"
- else:
- skills_list = ""
+ if not skills:
+ return ""
+ skill_items = "\n".join(
+ f" \n {skill.name}\n {skill.description}\n {skill.get_container_file_path(container_base_path)}\n " for skill in skills
+ )
+ skills_list = f"\n{skill_items}\n"
+
+ return f"""
+You have access to skills that provide optimized workflows for specific tasks. Each skill contains best practices, frameworks, and references to additional resources.
+
+**Progressive Loading Pattern:**
+1. When a user query matches a skill's use case, immediately call `read_file` on the skill's main file using the path attribute provided in the skill tag below
+2. Read and understand the skill's workflow and instructions
+3. The skill file contains references to external resources under the same folder
+4. Load referenced resources only when needed during execution
+5. Follow the skill's instructions precisely
+
+**Skills are located at:** {container_base_path}
+
+{skills_list}
+
+"""
+
+
+def apply_prompt_template(subagent_enabled: bool = False) -> str:
# Get memory context
memory_context = _get_memory_context()
@@ -360,10 +368,12 @@ def apply_prompt_template(subagent_enabled: bool = False) -> str:
else ""
)
+ # Get skills section
+ skills_section = get_skills_prompt_section()
+
# Format the prompt with dynamic skills and memory
prompt = SYSTEM_PROMPT_TEMPLATE.format(
- skills_list=skills_list,
- skills_base_path=container_base_path,
+ skills_section=skills_section,
memory_context=memory_context,
subagent_section=subagent_section,
subagent_reminder=subagent_reminder,
diff --git a/backend/src/tools/builtins/task_tool.py b/backend/src/tools/builtins/task_tool.py
index 32560ea..1400c87 100644
--- a/backend/src/tools/builtins/task_tool.py
+++ b/backend/src/tools/builtins/task_tool.py
@@ -3,12 +3,14 @@
import logging
import time
import uuid
+from dataclasses import replace
from typing import Annotated, Literal
from langchain.tools import InjectedToolCallId, ToolRuntime, tool
from langgraph.config import get_stream_writer
from langgraph.typing import ContextT
+from src.agents.lead_agent.prompt import get_skills_prompt_section
from src.agents.thread_state import ThreadState
from src.subagents import SubagentExecutor, get_subagent_config
from src.subagents.executor import SubagentStatus, get_background_task_result
@@ -60,12 +62,18 @@ def task_tool(
if config is None:
return f"Error: Unknown subagent type '{subagent_type}'. Available: general-purpose, bash"
- # Override max_turns if specified
- if max_turns is not None:
- # Create a copy with updated max_turns
- from dataclasses import replace
+ # Build config overrides
+ overrides: dict = {}
- config = replace(config, max_turns=max_turns)
+ skills_section = get_skills_prompt_section()
+ if skills_section:
+ overrides["system_prompt"] = config.system_prompt + "\n\n" + skills_section
+
+ if max_turns is not None:
+ overrides["max_turns"] = max_turns
+
+ if overrides:
+ config = replace(config, **overrides)
# Extract parent context from runtime
sandbox_state = None
@@ -118,7 +126,6 @@ def task_tool(
# Send Task Started message'
writer({"type": "task_started", "task_id": task_id, "description": description})
-
while True:
result = get_background_task_result(task_id)
@@ -138,13 +145,15 @@ def task_tool(
# Send task_running event for each new message
for i in range(last_message_count, current_message_count):
message = result.ai_messages[i]
- writer({
- "type": "task_running",
- "task_id": task_id,
- "message": message,
- "message_index": i + 1, # 1-based index for display
- "total_messages": current_message_count
- })
+ writer(
+ {
+ "type": "task_running",
+ "task_id": task_id,
+ "message": message,
+ "message_index": i + 1, # 1-based index for display
+ "total_messages": current_message_count,
+ }
+ )
logger.info(f"[trace={trace_id}] Task {task_id} sent message #{i + 1}/{current_message_count}")
last_message_count = current_message_count