diff --git a/conf.yaml.example b/conf.yaml.example index df19fce..eb9319c 100644 --- a/conf.yaml.example +++ b/conf.yaml.example @@ -3,13 +3,15 @@ # configurations to match your specific settings and requirements. # - Replace `api_key` with your own credentials. # - Replace `base_url` and `model` name if you want to use a custom model. +# - Set `verify_ssl` to `false` if your LLM server uses self-signed certificates # - A restart is required every time you change the `config.yaml` file. BASIC_MODEL: base_url: https://ark.cn-beijing.volces.com/api/v3 model: "doubao-1-5-pro-32k-250115" api_key: xxxx - + # verify_ssl: false # Uncomment this line to disable SSL certificate verification for self-signed certificates + # Reasoning model is optional. # Uncomment the following settings if you want to use reasoning model # for planning. diff --git a/docs/configuration_guide.md b/docs/configuration_guide.md index ee9f73d..1b16ed6 100644 --- a/docs/configuration_guide.md +++ b/docs/configuration_guide.md @@ -58,6 +58,21 @@ BASIC_MODEL: api_key: YOUR_API_KEY ``` +### How to use models with self-signed SSL certificates? + +If your LLM server uses self-signed SSL certificates, you can disable SSL certificate verification by adding the `verify_ssl: false` parameter to your model configuration: + +```yaml +BASIC_MODEL: + base_url: "https://your-llm-server.com/api/v1" + model: "your-model-name" + api_key: YOUR_API_KEY + verify_ssl: false # Disable SSL certificate verification for self-signed certificates +``` + +> [!WARNING] +> Disabling SSL certificate verification reduces security and should only be used in development environments or when you trust the LLM server. In production environments, it's recommended to use properly signed SSL certificates. + ### How to use Ollama models? DeerFlow supports the integration of Ollama models. You can refer to [litellm Ollama](https://docs.litellm.ai/docs/providers/ollama).
diff --git a/src/llms/llm.py b/src/llms/llm.py index 2379b68..85de2b6 100644 --- a/src/llms/llm.py +++ b/src/llms/llm.py @@ -4,6 +4,8 @@ from pathlib import Path from typing import Any, Dict import os +import ssl +import httpx from langchain_openai import ChatOpenAI from langchain_deepseek import ChatDeepSeek @@ -71,6 +73,16 @@ def _create_llm_use_conf( if llm_type == "reasoning": merged_conf["api_base"] = merged_conf.pop("base_url", None) + # Handle SSL verification settings + verify_ssl = merged_conf.pop("verify_ssl", True) + + # Create custom HTTP client if SSL verification is disabled + if not verify_ssl: + http_client = httpx.Client(verify=False) + http_async_client = httpx.AsyncClient(verify=False) + merged_conf["http_client"] = http_client + merged_conf["http_async_client"] = http_async_client + return ( ChatOpenAI(**merged_conf) if llm_type != "reasoning" @@ -78,6 +90,7 @@ def _create_llm_use_conf( ) + def get_llm_by_type( llm_type: LLMType, ) -> ChatOpenAI: