Upgrade langchain version to 1.x (#720)

* fix: revert the part of patch of issue-710 to extract the content from the plan

* Upgrade the ddgs for the new compatible version

* Upgraded langchain to 1.1.0
updated langchain related package to the new compatable version

* Update pyproject.toml

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
This commit is contained in:
Willem Jiang
2025-11-28 22:09:13 +08:00
committed by GitHub
parent b24f4d3f38
commit 170c4eb33c
17 changed files with 522 additions and 195 deletions

View File

@@ -19,7 +19,7 @@ class TestCrawlTool:
url = "https://example.com"
# Act
result = crawl_tool(url)
result = crawl_tool.invoke({"url": url})
# Assert
assert isinstance(result, str)
@@ -44,7 +44,7 @@ class TestCrawlTool:
url = "https://example.com"
# Act
result = crawl_tool(url)
result = crawl_tool.invoke({"url": url})
# Assert
result_dict = json.loads(result)
@@ -61,7 +61,7 @@ class TestCrawlTool:
url = "https://example.com"
# Act
result = crawl_tool(url)
result = crawl_tool.invoke({"url": url})
# Assert
assert isinstance(result, str)
@@ -80,7 +80,7 @@ class TestCrawlTool:
url = "https://example.com"
# Act
result = crawl_tool(url)
result = crawl_tool.invoke({"url": url})
# Assert
assert isinstance(result, str)
@@ -103,7 +103,7 @@ class TestCrawlTool:
url = "https://example.com"
# Act
result = crawl_tool(url)
result = crawl_tool.invoke({"url": url})
# Assert
assert isinstance(result, str)
@@ -123,7 +123,7 @@ class TestCrawlTool:
url = "https://example.com"
# Act
result = crawl_tool(url)
result = crawl_tool.invoke({"url": url})
# Assert
assert isinstance(result, str)
@@ -171,7 +171,7 @@ class TestPDFHandling:
pdf_url = "https://example.com/document.pdf"
# Act
result = crawl_tool(pdf_url)
result = crawl_tool.invoke({"url": pdf_url})
# Assert
assert isinstance(result, str)
@@ -189,7 +189,7 @@ class TestPDFHandling:
issue_pdf_url = "https://pdf.dfcfw.com/pdf/H3_AP202503071644153386_1.pdf"
# Act
result = crawl_tool(issue_pdf_url)
result = crawl_tool.invoke({"url": issue_pdf_url})
# Assert
result_dict = json.loads(result)
@@ -204,7 +204,7 @@ class TestPDFHandling:
pdf_url = "https://example.com/document.pdf"
# Act
result = crawl_tool(pdf_url)
result = crawl_tool.invoke({"url": pdf_url})
# Assert
# Crawler should not be instantiated for PDF URLs