diff --git a/README.md b/README.md index 0932b68..2266c3b 100644 --- a/README.md +++ b/README.md @@ -332,6 +332,8 @@ Skills are loaded progressively — only when the task needs them, not all at on Tools follow the same philosophy. DeerFlow comes with a core toolset — web search, web fetch, file operations, bash execution — and supports custom tools via MCP servers and Python functions. Swap anything. Add anything. +Gateway-generated follow-up suggestions now normalize both plain-string model output and block/list-style rich content before parsing the JSON array response, so provider-specific content wrappers do not silently drop suggestions. + ``` # Paths inside the sandbox container /mnt/skills/public diff --git a/backend/CLAUDE.md b/backend/CLAUDE.md index 224eada..a2cbcea 100644 --- a/backend/CLAUDE.md +++ b/backend/CLAUDE.md @@ -168,6 +168,7 @@ FastAPI application on port 8001 with health check at `GET /health`. | **Memory** (`/api/memory`) | `GET /` - memory data; `POST /reload` - force reload; `GET /config` - config; `GET /status` - config + data | | **Uploads** (`/api/threads/{id}/uploads`) | `POST /` - upload files (auto-converts PDF/PPT/Excel/Word); `GET /list` - list; `DELETE /{filename}` - delete | | **Artifacts** (`/api/threads/{id}/artifacts`) | `GET /{path}` - serve artifacts; `?download=true` for file download | +| **Suggestions** (`/api/threads/{id}/suggestions`) | `POST /` - generate follow-up questions; rich list/block model content is normalized before JSON parsing | Proxied through nginx: `/api/langgraph/*` → LangGraph, all other `/api/*` → Gateway. diff --git a/backend/src/gateway/routers/suggestions.py b/backend/src/gateway/routers/suggestions.py index 9a4e3a6..b5bb8df 100644 --- a/backend/src/gateway/routers/suggestions.py +++ b/backend/src/gateway/routers/suggestions.py @@ -60,6 +60,24 @@ def _parse_json_string_list(text: str) -> list[str] | None: return out +def _extract_response_text(content: object) -> str: + if isinstance(content, str): + return content + if isinstance(content, list): + parts: list[str] = [] + for block in content: + if isinstance(block, str): + parts.append(block) + elif isinstance(block, dict) and block.get("type") == "text": + text = block.get("text") + if isinstance(text, str): + parts.append(text) + return "\n".join(parts) if parts else "" + if content is None: + return "" + return str(content) + + def _format_conversation(messages: list[SuggestionMessage]) -> str: parts: list[str] = [] for m in messages: @@ -104,7 +122,7 @@ async def generate_suggestions(thread_id: str, request: SuggestionsRequest) -> S try: model = create_chat_model(name=request.model_name, thinking_enabled=False) response = model.invoke(prompt) - raw = str(response.content or "") + raw = _extract_response_text(response.content) suggestions = _parse_json_string_list(raw) or [] cleaned = [s.replace("\n", " ").strip() for s in suggestions if s.strip()] cleaned = cleaned[:n] diff --git a/backend/tests/test_suggestions_router.py b/backend/tests/test_suggestions_router.py index e97bf9e..3ef8f15 100644 --- a/backend/tests/test_suggestions_router.py +++ b/backend/tests/test_suggestions_router.py @@ -51,6 +51,24 @@ def test_generate_suggestions_parses_and_limits(monkeypatch): assert result.suggestions == ["Q1", "Q2", "Q3"] +def test_generate_suggestions_parses_list_block_content(monkeypatch): + req = suggestions.SuggestionsRequest( + messages=[ + suggestions.SuggestionMessage(role="user", content="Hi"), + suggestions.SuggestionMessage(role="assistant", content="Hello"), + ], + n=2, + model_name=None, + ) + fake_model = MagicMock() + fake_model.invoke.return_value = MagicMock(content=[{"type": "text", "text": '```json\n["Q1", "Q2"]\n```'}]) + monkeypatch.setattr(suggestions, "create_chat_model", lambda **kwargs: fake_model) + + result = asyncio.run(suggestions.generate_suggestions("t1", req)) + + assert result.suggestions == ["Q1", "Q2"] + + def test_generate_suggestions_returns_empty_on_model_error(monkeypatch): req = suggestions.SuggestionsRequest( messages=[suggestions.SuggestionMessage(role="user", content="Hi")],