refactor: refactor the prompt management mechanism (#17)

This commit is contained in:
DanielWalnut
2025-05-09 15:50:46 +08:00
committed by GitHub
parent 091f437bc5
commit 97a15dce36
16 changed files with 32 additions and 42 deletions

View File

@@ -7,24 +7,18 @@ from langchain.schema import HumanMessage, SystemMessage
from src.config.agents import AGENT_LLM_MAP
from src.llms.llm import get_llm_by_type
from src.prompts.template import get_prompt_template
from src.prose.graph.state import ProseState
logger = logging.getLogger(__name__)
prompt = """
You are an AI writing assistant that continues existing text based on context from prior text.
- Give more weight/priority to the later characters than the beginning ones.
- Limit your response to no more than 200 characters, but make sure to construct complete sentences.
- Use Markdown formatting when appropriate
"""
def prose_continue_node(state: ProseState):
logger.info("Generating prose continue content...")
model = get_llm_by_type(AGENT_LLM_MAP["prose_writer"])
prose_content = model.invoke(
[
SystemMessage(content=prompt),
SystemMessage(content=get_prompt_template("prose/prose_continue")),
HumanMessage(content=state["content"]),
],
)