mirror of
https://gitee.com/wanwujie/deer-flow
synced 2026-04-22 05:34:45 +08:00
refactor: refactor the prompt management mechanism (#17)
This commit is contained in:
@@ -7,24 +7,18 @@ from langchain.schema import HumanMessage, SystemMessage
|
||||
|
||||
from src.config.agents import AGENT_LLM_MAP
|
||||
from src.llms.llm import get_llm_by_type
|
||||
from src.prompts.template import get_prompt_template
|
||||
from src.prose.graph.state import ProseState
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
prompt = """
|
||||
You are an AI writing assistant that continues existing text based on context from prior text.
|
||||
- Give more weight/priority to the later characters than the beginning ones.
|
||||
- Limit your response to no more than 200 characters, but make sure to construct complete sentences.
|
||||
- Use Markdown formatting when appropriate
|
||||
"""
|
||||
|
||||
|
||||
def prose_continue_node(state: ProseState):
|
||||
logger.info("Generating prose continue content...")
|
||||
model = get_llm_by_type(AGENT_LLM_MAP["prose_writer"])
|
||||
prose_content = model.invoke(
|
||||
[
|
||||
SystemMessage(content=prompt),
|
||||
SystemMessage(content=get_prompt_template("prose/prose_continue")),
|
||||
HumanMessage(content=state["content"]),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -7,24 +7,18 @@ from langchain.schema import HumanMessage, SystemMessage
|
||||
|
||||
from src.config.agents import AGENT_LLM_MAP
|
||||
from src.llms.llm import get_llm_by_type
|
||||
from src.prompts.template import get_prompt_template
|
||||
from src.prose.graph.state import ProseState
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
prompt = """
|
||||
You are an AI writing assistant that fixes grammar and spelling errors in existing text.
|
||||
- Limit your response to no more than 200 characters, but make sure to construct complete sentences.
|
||||
- Use Markdown formatting when appropriate.
|
||||
- If the text is already correct, just return the original text.
|
||||
"""
|
||||
|
||||
|
||||
def prose_fix_node(state: ProseState):
|
||||
logger.info("Generating prose fix content...")
|
||||
model = get_llm_by_type(AGENT_LLM_MAP["prose_writer"])
|
||||
prose_content = model.invoke(
|
||||
[
|
||||
SystemMessage(content=prompt),
|
||||
SystemMessage(content=get_prompt_template("prose/prose_fix")),
|
||||
HumanMessage(content=f"The existing text is: {state['content']}"),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -8,22 +8,17 @@ from langchain.schema import HumanMessage, SystemMessage
|
||||
from src.config.agents import AGENT_LLM_MAP
|
||||
from src.llms.llm import get_llm_by_type
|
||||
from src.prose.graph.state import ProseState
|
||||
from src.prompts.template import get_prompt_template
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
prompt = """
|
||||
You are an AI writing assistant that improves existing text.
|
||||
- Limit your response to no more than 200 characters, but make sure to construct complete sentences.
|
||||
- Use Markdown formatting when appropriate.
|
||||
"""
|
||||
|
||||
|
||||
def prose_improve_node(state: ProseState):
|
||||
logger.info("Generating prose improve content...")
|
||||
model = get_llm_by_type(AGENT_LLM_MAP["prose_writer"])
|
||||
prose_content = model.invoke(
|
||||
[
|
||||
SystemMessage(content=prompt),
|
||||
SystemMessage(content=get_prompt_template("prose/prose_improver")),
|
||||
HumanMessage(content=f"The existing text is: {state['content']}"),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -7,22 +7,18 @@ from langchain.schema import HumanMessage, SystemMessage
|
||||
|
||||
from src.config.agents import AGENT_LLM_MAP
|
||||
from src.llms.llm import get_llm_by_type
|
||||
from src.prompts.template import get_prompt_template
|
||||
from src.prose.graph.state import ProseState
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
prompt = """
|
||||
You are an AI writing assistant that lengthens existing text.
|
||||
- Use Markdown formatting when appropriate.
|
||||
"""
|
||||
|
||||
|
||||
def prose_longer_node(state: ProseState):
|
||||
logger.info("Generating prose longer content...")
|
||||
model = get_llm_by_type(AGENT_LLM_MAP["prose_writer"])
|
||||
prose_content = model.invoke(
|
||||
[
|
||||
SystemMessage(content=prompt),
|
||||
SystemMessage(content=get_prompt_template("prose/prose_longer")),
|
||||
HumanMessage(content=f"The existing text is: {state['content']}"),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -7,13 +7,10 @@ from langchain.schema import HumanMessage, SystemMessage
|
||||
|
||||
from src.config.agents import AGENT_LLM_MAP
|
||||
from src.llms.llm import get_llm_by_type
|
||||
from src.prompts.template import get_prompt_template
|
||||
from src.prose.graph.state import ProseState
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
prompt = """
|
||||
You are an AI writing assistant that shortens existing text.
|
||||
- Use Markdown formatting when appropriate.
|
||||
"""
|
||||
|
||||
|
||||
def prose_shorter_node(state: ProseState):
|
||||
@@ -21,7 +18,7 @@ def prose_shorter_node(state: ProseState):
|
||||
model = get_llm_by_type(AGENT_LLM_MAP["prose_writer"])
|
||||
prose_content = model.invoke(
|
||||
[
|
||||
SystemMessage(content=prompt),
|
||||
SystemMessage(content=get_prompt_template("prose/prose_shorter")),
|
||||
HumanMessage(content=f"The existing text is: {state['content']}"),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -7,14 +7,10 @@ from langchain.schema import HumanMessage, SystemMessage
|
||||
|
||||
from src.config.agents import AGENT_LLM_MAP
|
||||
from src.llms.llm import get_llm_by_type
|
||||
from src.prompts.template import get_prompt_template
|
||||
from src.prose.graph.state import ProseState
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
prompt = """
|
||||
You area an AI writing assistant that generates text based on a prompt.
|
||||
- You take an input from the user and a command for manipulating the text."
|
||||
- Use Markdown formatting when appropriate.
|
||||
"""
|
||||
|
||||
|
||||
def prose_zap_node(state: ProseState):
|
||||
@@ -22,7 +18,7 @@ def prose_zap_node(state: ProseState):
|
||||
model = get_llm_by_type(AGENT_LLM_MAP["prose_writer"])
|
||||
prose_content = model.invoke(
|
||||
[
|
||||
SystemMessage(content=prompt),
|
||||
SystemMessage(content=get_prompt_template("prose/prose_zap")),
|
||||
HumanMessage(
|
||||
content=f"For this text: {state['content']}.\nYou have to respect the command: {state['command']}"
|
||||
),
|
||||
|
||||
Reference in New Issue
Block a user