Add Kubernetes-based sandbox provider for multi-instance support (#19)

* feat: adds docker-based dev environment

* docs: updates Docker command help

* fix local dev

* feat(sandbox): add Kubernetes-based sandbox provider for multi-instance support

* fix: skills path in k8s

* feat: add example config for k8s sandbox

* fix: docker config

* fix: load skills on docker dev

* feat: support sandbox execution to Kubernetes Deployment model

* chore: rename web service name
This commit is contained in:
JeffJiang
2026-02-09 21:59:13 +08:00
committed by GitHub
parent 554ec7a91e
commit b6da3a219e
20 changed files with 981 additions and 94 deletions

View File

@@ -18,10 +18,10 @@ models:
display_name: GPT-4
use: langchain_openai:ChatOpenAI
model: gpt-4
api_key: $OPENAI_API_KEY # Use environment variable
api_key: $OPENAI_API_KEY # Use environment variable
max_tokens: 4096
temperature: 0.7
supports_vision: true # Enable vision support for view_image tool
supports_vision: true # Enable vision support for view_image tool
# Example: Anthropic Claude model
# - name: claude-3-5-sonnet
@@ -210,7 +210,7 @@ title:
enabled: true
max_words: 6
max_chars: 60
model_name: null # Use default model (first model in models list)
model_name: null # Use default model (first model in models list)
# ============================================================================
# Summarization Configuration
@@ -289,10 +289,10 @@ summarization:
# Stores user context and conversation history for personalized responses
memory:
enabled: true
storage_path: .deer-flow/memory.json # Path relative to backend directory
debounce_seconds: 30 # Wait time before processing queued updates
model_name: null # Use default model
max_facts: 100 # Maximum number of facts to store
fact_confidence_threshold: 0.7 # Minimum confidence for storing facts
injection_enabled: true # Whether to inject memory into system prompt
max_injection_tokens: 2000 # Maximum tokens for memory injection
storage_path: .deer-flow/memory.json # Path relative to backend directory
debounce_seconds: 30 # Wait time before processing queued updates
model_name: null # Use default model
max_facts: 100 # Maximum number of facts to store
fact_confidence_threshold: 0.7 # Minimum confidence for storing facts
injection_enabled: true # Whether to inject memory into system prompt
max_injection_tokens: 2000 # Maximum tokens for memory injection