feat: lite deep researcher implementation

This commit is contained in:
He Tao
2025-04-07 16:25:55 +08:00
commit 03798ded08
58 changed files with 4242 additions and 0 deletions

49
src/llms/llm.py Normal file
View File

@@ -0,0 +1,49 @@
from langchain_openai import ChatOpenAI
from src.config import load_yaml_config
from pathlib import Path
from typing import Dict, Any
from src.config.agents import LLMType
# Cache for LLM instances
_llm_cache: dict[LLMType, ChatOpenAI] = {}
def _create_llm_use_conf(llm_type: LLMType, conf: Dict[str, Any]) -> ChatOpenAI:
llm_type_map = {
"reasoning": conf.get("REASONING_MODEL"),
"basic": conf.get("BASIC_MODEL"),
"vision": conf.get("VISION_MODEL"),
}
llm_conf = llm_type_map.get(llm_type)
if not llm_conf:
raise ValueError(f"Unknown LLM type: {llm_type}")
if not isinstance(llm_conf, dict):
raise ValueError(f"Invalid LLM Conf: {llm_type}")
return ChatOpenAI(**llm_conf)
def get_llm_by_type(
llm_type: LLMType,
) -> ChatOpenAI:
"""
Get LLM instance by type. Returns cached instance if available.
"""
if llm_type in _llm_cache:
return _llm_cache[llm_type]
conf = load_yaml_config(
str((Path(__file__).parent.parent.parent / "conf.yaml").resolve())
)
llm = _create_llm_use_conf(llm_type, conf)
_llm_cache[llm_type] = llm
return llm
# Initialize LLMs for different purposes - now these will be cached
reasoning_llm = get_llm_by_type("reasoning")
basic_llm = get_llm_by_type("basic")
vl_llm = get_llm_by_type("vision")
if __name__ == "__main__":
print(basic_llm.invoke("Hello"))