mirror of
https://gitee.com/wanwujie/deer-flow
synced 2026-04-08 08:20:20 +08:00
* fix: resolve issue #651 - crawl error with None content handling Fixed issue #651 by adding comprehensive null-safety checks and error handling to the crawl system. The fix prevents the ‘TypeError: Incoming markup is of an invalid type: None’ crash by: 1. Validating HTTP responses from Jina API 2. Handling None/empty content at extraction stage 3. Adding fallback handling in Article markdown/message conversion 4. Improving error diagnostics with detailed logging 5. Adding 16 new tests with 100% coverage for critical paths * Update src/crawler/readability_extractor.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * Update src/crawler/article.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
This commit is contained in:
@@ -18,20 +18,36 @@ class Article:
|
||||
markdown = ""
|
||||
if including_title:
|
||||
markdown += f"# {self.title}\n\n"
|
||||
markdown += md(self.html_content)
|
||||
|
||||
if self.html_content is None or not str(self.html_content).strip():
|
||||
markdown += "*No content available*\n"
|
||||
else:
|
||||
markdown += md(self.html_content)
|
||||
|
||||
return markdown
|
||||
|
||||
def to_message(self) -> list[dict]:
|
||||
image_pattern = r"!\[.*?\]\((.*?)\)"
|
||||
|
||||
content: list[dict[str, str]] = []
|
||||
parts = re.split(image_pattern, self.to_markdown())
|
||||
markdown = self.to_markdown()
|
||||
|
||||
if not markdown or not markdown.strip():
|
||||
return [{"type": "text", "text": "No content available"}]
|
||||
|
||||
parts = re.split(image_pattern, markdown)
|
||||
|
||||
for i, part in enumerate(parts):
|
||||
if i % 2 == 1:
|
||||
image_url = urljoin(self.url, part.strip())
|
||||
content.append({"type": "image_url", "image_url": {"url": image_url}})
|
||||
else:
|
||||
content.append({"type": "text", "text": part.strip()})
|
||||
text_part = part.strip()
|
||||
if text_part:
|
||||
content.append({"type": "text", "text": text_part})
|
||||
|
||||
# If after processing all parts, content is still empty, provide a fallback message.
|
||||
if not content:
|
||||
content = [{"type": "text", "text": "No content available"}]
|
||||
|
||||
return content
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
import logging
|
||||
|
||||
from .article import Article
|
||||
from .jina_client import JinaClient
|
||||
from .readability_extractor import ReadabilityExtractor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Crawler:
|
||||
def crawl(self, url: str) -> Article:
|
||||
@@ -19,9 +22,19 @@ class Crawler:
|
||||
#
|
||||
# Instead of using Jina's own markdown converter, we'll use
|
||||
# our own solution to get better readability results.
|
||||
jina_client = JinaClient()
|
||||
html = jina_client.crawl(url, return_format="html")
|
||||
extractor = ReadabilityExtractor()
|
||||
article = extractor.extract_article(html)
|
||||
try:
|
||||
jina_client = JinaClient()
|
||||
html = jina_client.crawl(url, return_format="html")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to fetch URL {url} from Jina: {repr(e)}")
|
||||
raise
|
||||
|
||||
try:
|
||||
extractor = ReadabilityExtractor()
|
||||
article = extractor.extract_article(html)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to extract article from {url}: {repr(e)}")
|
||||
raise
|
||||
|
||||
article.url = url
|
||||
return article
|
||||
|
||||
@@ -23,4 +23,11 @@ class JinaClient:
|
||||
)
|
||||
data = {"url": url}
|
||||
response = requests.post("https://r.jina.ai/", headers=headers, json=data)
|
||||
|
||||
if response.status_code != 200:
|
||||
raise ValueError(f"Jina API returned status {response.status_code}: {response.text}")
|
||||
|
||||
if not response.text or not response.text.strip():
|
||||
raise ValueError("Jina API returned empty response")
|
||||
|
||||
return response.text
|
||||
|
||||
@@ -1,15 +1,28 @@
|
||||
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
import logging
|
||||
from readabilipy import simple_json_from_html_string
|
||||
|
||||
from .article import Article
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ReadabilityExtractor:
|
||||
def extract_article(self, html: str) -> Article:
|
||||
article = simple_json_from_html_string(html, use_readability=True)
|
||||
|
||||
content = article.get("content")
|
||||
if not content or not str(content).strip():
|
||||
logger.warning("Readability extraction returned empty content")
|
||||
content = "<p>No content could be extracted from this page</p>"
|
||||
|
||||
title = article.get("title")
|
||||
if not title or not str(title).strip():
|
||||
title = "Untitled"
|
||||
|
||||
return Article(
|
||||
title=article.get("title"),
|
||||
html_content=article.get("content"),
|
||||
title=title,
|
||||
html_content=content,
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user