feat: prose completion api

This commit is contained in:
Jiang Feng
2025-04-26 23:12:13 +08:00
parent ba8c5fbcd3
commit 66794a4b73
16 changed files with 395 additions and 32 deletions

View File

@@ -0,0 +1,67 @@
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
# SPDX-License-Identifier: MIT
import asyncio
import logging
from langgraph.graph import END, START, StateGraph
from src.prose.graph.prose_continue_node import prose_continue_node
from src.prose.graph.prose_fix_node import prose_fix_node
from src.prose.graph.prose_improve_node import prose_improve_node
from src.prose.graph.prose_longer_node import prose_longer_node
from src.prose.graph.prose_shorter_node import prose_shorter_node
from src.prose.graph.prose_zap_node import prose_zap_node
from src.prose.graph.state import ProseState
def optional_node(state: ProseState):
return state["option"]
def build_graph():
"""Build and return the ppt workflow graph."""
# build state graph
builder = StateGraph(ProseState)
builder.add_node("prose_continue", prose_continue_node)
builder.add_node("prose_improve", prose_improve_node)
builder.add_node("prose_shorter", prose_shorter_node)
builder.add_node("prose_longer", prose_longer_node)
builder.add_node("prose_fix", prose_fix_node)
builder.add_node("prose_zap", prose_zap_node)
builder.add_conditional_edges(
START,
optional_node,
{
"continue": "prose_continue",
"improve": "prose_improve",
"shorter": "prose_shorter",
"longer": "prose_longer",
"fix": "prose_fix",
"zap": "prose_zap",
},
END,
)
return builder.compile()
async def _test_workflow():
workflow = build_graph()
events = workflow.astream(
{
"content": "The weather in Beijing is sunny",
"option": "continue",
},
stream_mode="messages",
subgraphs=True,
)
async for node, event in events:
e = event[0]
print({"id": e.id, "object": "chat.completion.chunk", "content": e.content})
if __name__ == "__main__":
from dotenv import load_dotenv
load_dotenv()
logging.basicConfig(level=logging.INFO)
asyncio.run(_test_workflow())

View File

@@ -0,0 +1,31 @@
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
# SPDX-License-Identifier: MIT
import logging
from langchain.schema import HumanMessage, SystemMessage
from src.config.agents import AGENT_LLM_MAP
from src.llms.llm import get_llm_by_type
from src.prose.graph.state import ProseState
logger = logging.getLogger(__name__)
def prose_continue_node(state: ProseState):
logger.info("Generating prose continue content...")
model = get_llm_by_type(AGENT_LLM_MAP["prose_writer"])
prose_content = model.invoke(
[
SystemMessage(
content="""
You are an AI writing assistant that continues existing text based on context from prior text.
- Give more weight/priority to the later characters than the beginning ones.
- Limit your response to no more than 200 characters, but make sure to construct complete sentences.
- Use Markdown formatting when appropriate
"""
),
HumanMessage(content=state["content"]),
],
)
return {"output": prose_content.content}

View File

@@ -0,0 +1,32 @@
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
# SPDX-License-Identifier: MIT
import logging
from langchain.schema import HumanMessage, SystemMessage
from src.config.agents import AGENT_LLM_MAP
from src.llms.llm import get_llm_by_type
from src.prose.graph.state import ProseState
logger = logging.getLogger(__name__)
def prose_fix_node(state: ProseState):
logger.info("Generating prose fix content...")
model = get_llm_by_type(AGENT_LLM_MAP["prose_writer"])
prose_content = model.invoke(
[
SystemMessage(
content="""
You are an AI writing assistant that fixes grammar and spelling errors in existing text.
- Limit your response to no more than 200 characters, but make sure to construct complete sentences.
- Use Markdown formatting when appropriate.
- If the text is already correct, just return the original text.
"""
),
HumanMessage(content=f"The existing text is: {state['content']}"),
],
)
logger.info(f"prose_content: {prose_content}")
return {"output": prose_content.content}

View File

@@ -0,0 +1,31 @@
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
# SPDX-License-Identifier: MIT
import logging
from langchain.schema import HumanMessage, SystemMessage
from src.config.agents import AGENT_LLM_MAP
from src.llms.llm import get_llm_by_type
from src.prose.graph.state import ProseState
logger = logging.getLogger(__name__)
def prose_improve_node(state: ProseState):
logger.info("Generating prose improve content...")
model = get_llm_by_type(AGENT_LLM_MAP["prose_writer"])
prose_content = model.invoke(
[
SystemMessage(
content="""
You are an AI writing assistant that improves existing text.
- Limit your response to no more than 200 characters, but make sure to construct complete sentences.
- Use Markdown formatting when appropriate.
"""
),
HumanMessage(content=f"The existing text is: {state['content']}"),
],
)
logger.info(f"prose_content: {prose_content}")
return {"output": prose_content.content}

View File

@@ -0,0 +1,30 @@
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
# SPDX-License-Identifier: MIT
import logging
from langchain.schema import HumanMessage, SystemMessage
from src.config.agents import AGENT_LLM_MAP
from src.llms.llm import get_llm_by_type
from src.prose.graph.state import ProseState
logger = logging.getLogger(__name__)
def prose_longer_node(state: ProseState):
logger.info("Generating prose longer content...")
model = get_llm_by_type(AGENT_LLM_MAP["prose_writer"])
prose_content = model.invoke(
[
SystemMessage(
content="""
You are an AI writing assistant that lengthens existing text.
- Use Markdown formatting when appropriate.
"""
),
HumanMessage(content=f"The existing text is: {state['content']}"),
],
)
logger.info(f"prose_content: {prose_content}")
return {"output": prose_content.content}

View File

@@ -0,0 +1,30 @@
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
# SPDX-License-Identifier: MIT
import logging
from langchain.schema import HumanMessage, SystemMessage
from src.config.agents import AGENT_LLM_MAP
from src.llms.llm import get_llm_by_type
from src.prose.graph.state import ProseState
logger = logging.getLogger(__name__)
def prose_shorter_node(state: ProseState):
logger.info("Generating prose shorter content...")
model = get_llm_by_type(AGENT_LLM_MAP["prose_writer"])
prose_content = model.invoke(
[
SystemMessage(
content="""
You are an AI writing assistant that shortens existing text.
- Use Markdown formatting when appropriate.
"""
),
HumanMessage(content=f"The existing text is: {state['content']}"),
],
)
logger.info(f"prose_content: {prose_content}")
return {"output": prose_content.content}

View File

@@ -0,0 +1,33 @@
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
# SPDX-License-Identifier: MIT
import logging
from langchain.schema import HumanMessage, SystemMessage
from src.config.agents import AGENT_LLM_MAP
from src.llms.llm import get_llm_by_type
from src.prose.graph.state import ProseState
logger = logging.getLogger(__name__)
def prose_zap_node(state: ProseState):
logger.info("Generating prose zap content...")
model = get_llm_by_type(AGENT_LLM_MAP["prose_writer"])
prose_content = model.invoke(
[
SystemMessage(
content="""
You area an AI writing assistant that generates text based on a prompt.
- You take an input from the user and a command for manipulating the text."
- Use Markdown formatting when appropriate.
"""
),
HumanMessage(
content=f"For this text: {state['content']}.\nYou have to respect the command: {state['command']}"
),
],
)
logger.info(f"prose_content: {prose_content}")
return {"output": prose_content.content}

20
src/prose/graph/state.py Normal file
View File

@@ -0,0 +1,20 @@
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
# SPDX-License-Identifier: MIT
from langgraph.graph import MessagesState
class ProseState(MessagesState):
"""State for the prose generation."""
# The content of the prose
content: str = ""
# Prose writer option: continue, improve, shorter, longer, fix, zap
option: str = ""
# The user custom command for the prose writer
command: str = ""
# Output
output: str = ""