# =================================================== # SmartPerfetto Configuration # =================================================== # Copy this file to the env file used by your run mode: # Local source: cp backend/.env.example backend/.env # Docker Hub: cp backend/.env.example .env # # For local source runs, a working Claude Code setup can be enough because # the Claude Agent SDK can use Claude Code's local auth/config. That includes # Claude Code subscription login and Claude Code-configured third-party # provider base URLs/API keys. # For Docker, direct Anthropic API, or third-party proxy use, set the env vars # below explicitly. # --------------------------------------------------- # AI Service Configuration # --------------------------------------------------- # SmartPerfetto uses the Claude Agent SDK as its AI runtime. # You can connect it to Anthropic directly, or to any LLM # provider via an API proxy that translates requests to # Anthropic Messages API format. # # Two ways to connect: # # 1. Local Claude Code setup — no env needed when `claude` already works # 2. Direct Anthropic API — set ANTHROPIC_API_KEY # 3. Third-party LLM via proxy — set ANTHROPIC_BASE_URL to # a proxy (one-api / new-api / LiteLLM) that converts # Anthropic Messages API -> provider's OpenAI-compatible API # # ┌──────────────────────────────────────────────────┐ # │ SmartPerfetto │ # │ │ (Claude Agent SDK) │ # │ ▼ │ # │ ANTHROPIC_BASE_URL ──► API Proxy (one-api等) │ # │ │ │ # │ ┌─────────┼─────────┐ │ # │ ▼ ▼ ▼ │ # │ GLM-5.1 DeepSeek Qwen │ # └──────────────────────────────────────────────────┘ # # Popular open-source API proxies: # - one-api: https://github.com/songquanpeng/one-api # - new-api: https://github.com/Calcium-Ion/new-api # - LiteLLM: https://github.com/BerriAI/litellm # =================================================== # Option 1: Anthropic Direct (default, recommended) # =================================================== # Get your API key at https://console.anthropic.com/ ANTHROPIC_API_KEY=your_anthropic_api_key_here # =================================================== # Option 2: AWS Bedrock (Bearer Token) # =================================================== # Use Claude via AWS Bedrock with bearer token authentication. # CLAUDE_CODE_USE_BEDROCK=1 # ANTHROPIC_BEDROCK_BASE_URL=https://bedrock-runtime.us-east-1.amazonaws.com # AWS_BEARER_TOKEN_BEDROCK=your_bearer_token_here # =================================================== # Option 3: Third-party LLM via API Proxy # =================================================== # Uncomment ONE provider block below. All require a proxy # (one-api / new-api / LiteLLM) that accepts Anthropic API # format and forwards to the provider's OpenAI-compatible API. # # In your proxy, create a channel for the provider and map # the model names below. Then point ANTHROPIC_BASE_URL at # the proxy's Anthropic-compatible endpoint. # --- Proxy Connection (required for all providers below) --- # ANTHROPIC_BASE_URL=http://localhost:3000 # Your proxy address # ANTHROPIC_API_KEY=sk-proxy-xxx # Proxy auth token # --------------------------------------------------- # GLM / Z.ai (智谱AI / ZhipuAI) # --------------------------------------------------- # Console: https://open.bigmodel.cn/ # Proxy backend URL: https://open.bigmodel.cn/api/paas/v4 # CLAUDE_MODEL=glm-5.1 # CLAUDE_LIGHT_MODEL=glm-4.7-flash # --------------------------------------------------- # DeepSeek # --------------------------------------------------- # Console: https://platform.deepseek.com/ # Proxy backend URL: https://api.deepseek.com # Note: deepseek-chat / deepseek-reasoner are legacy compatibility aliases. # CLAUDE_MODEL=deepseek-v4-pro # CLAUDE_LIGHT_MODEL=deepseek-v4-flash # --------------------------------------------------- # Qwen (通义千问 / Alibaba Cloud) # --------------------------------------------------- # Console: https://dashscope.console.aliyun.com/ # Proxy backend URL: https://dashscope.aliyuncs.com/compatible-mode/v1 # CLAUDE_MODEL=qwen3-max # CLAUDE_LIGHT_MODEL=qwen3.5-flash # --------------------------------------------------- # Kimi (月之暗面 / Moonshot AI) # --------------------------------------------------- # Console: https://platform.moonshot.cn/ # Proxy backend URL: https://api.moonshot.cn/v1 # CLAUDE_MODEL=kimi-k2.6 # CLAUDE_LIGHT_MODEL=kimi-k2.5 # --------------------------------------------------- # Doubao (豆包 / ByteDance Volcano Engine) # --------------------------------------------------- # Console: https://console.volcengine.com/ark # Proxy backend URL: https://ark.cn-beijing.volces.com/api/v3 # Note: Production Ark deployments often use endpoint IDs (ep-xxx). # Coding Plan setups may also expose model names such as # doubao-seed-2.0-code or doubao-seed-code. # CLAUDE_MODEL=ep-your-doubao-pro-endpoint-id # CLAUDE_LIGHT_MODEL=ep-your-doubao-lite-endpoint-id # --------------------------------------------------- # MiniMax # --------------------------------------------------- # Console: https://platform.minimaxi.com/ # Proxy backend URL: https://api.minimaxi.com/v1 # CLAUDE_MODEL=MiniMax-M2.7 # CLAUDE_LIGHT_MODEL=MiniMax-M2.5 # --------------------------------------------------- # Xiaomi MiMo # --------------------------------------------------- # If your MiMo account exposes an OpenAI-compatible endpoint, # add it to your proxy first, then map the MiMo model ID below. # If your MiMo gateway already exposes Anthropic Messages # compatibility directly, you can point ANTHROPIC_BASE_URL there. # Proxy backend URL: # CLAUDE_MODEL=mimo-your-main-model # CLAUDE_LIGHT_MODEL=mimo-your-light-model # --------------------------------------------------- # Legacy / account-specific proxy examples # --------------------------------------------------- # The providers below are kept for users whose proxy accounts still expose # these model IDs. Verify the current model catalog and OpenAI-compatible # backend URL in the provider console before use. # --------------------------------------------------- # Baichuan (百川智能) # --------------------------------------------------- # Console: https://platform.baichuan-ai.com/ # Proxy backend URL: https://api.baichuan-ai.com/v1 # CLAUDE_MODEL=Baichuan4 # CLAUDE_LIGHT_MODEL=Baichuan3-Turbo # --------------------------------------------------- # Yi (零一万物 / 01.AI) # --------------------------------------------------- # Console: https://platform.lingyiwanwu.com/ # Proxy backend URL: https://api.lingyiwanwu.com/v1 # CLAUDE_MODEL=yi-large # CLAUDE_LIGHT_MODEL=yi-lightning # --------------------------------------------------- # Spark (讯飞星火 / iFlytek) # --------------------------------------------------- # Console: https://console.xfyun.cn/ # Proxy backend URL: https://spark-api-open.xf-yun.com/v1 # CLAUDE_MODEL=4.0Ultra # CLAUDE_LIGHT_MODEL=generalv3.5 # --------------------------------------------------- # Hunyuan (腾讯混元 / Tencent) # --------------------------------------------------- # Console: https://cloud.tencent.com/product/hunyuan # Proxy backend URL: https://api.hunyuan.cloud.tencent.com/v1 # CLAUDE_MODEL=hunyuan-turbo-latest # CLAUDE_LIGHT_MODEL=hunyuan-lite # --------------------------------------------------- # OpenAI # --------------------------------------------------- # Console: https://platform.openai.com/ # Proxy backend URL: https://api.openai.com/v1 # CLAUDE_MODEL=gpt-5.5 # CLAUDE_LIGHT_MODEL=gpt-5.4-mini # --------------------------------------------------- # Google Gemini # --------------------------------------------------- # Console: https://aistudio.google.com/ # Proxy backend URL: https://generativelanguage.googleapis.com/v1beta/openai # Preview path: # CLAUDE_MODEL=gemini-3-pro-preview # CLAUDE_LIGHT_MODEL=gemini-3-flash-preview # Stable path: # CLAUDE_MODEL=gemini-2.5-pro # CLAUDE_LIGHT_MODEL=gemini-2.5-flash # --------------------------------------------------- # Ollama (local, free) # --------------------------------------------------- # Install: https://ollama.com/ # No proxy needed if proxy supports Ollama backend, # or point proxy backend URL at: http://localhost:11434/v1 # Verify the chosen local tag supports tool calling before full analysis. # CLAUDE_MODEL=qwen3:30b # CLAUDE_LIGHT_MODEL=qwen3:30b # --------------------------------------------------- # Model & Agent Tuning (optional, for all providers) # --------------------------------------------------- # CLAUDE_MAX_TURNS=60 # Max reasoning turns per full analysis (default 60) # CLAUDE_QUICK_MAX_TURNS=10 # Quick/fast mode max reasoning turns (default 10) # CLAUDE_MAX_BUDGET_USD=5 # Per-analysis budget cap (Anthropic only) # CLAUDE_EFFORT=high # SDK extended thinking: low|medium|high|max (Anthropic only) # CLAUDE_ENABLE_SUB_AGENTS=false # Sub-agent delegation (research preview, off by default for all users) # CLAUDE_SUB_AGENT_MODEL=sonnet # Sub-agent model: haiku|sonnet|opus|inherit # SMARTPERFETTO_OUTPUT_LANGUAGE=zh-CN # User-facing output language: zh-CN (default) or en # Per-turn timeouts — raise for slower LLMs (DeepSeek / Ollama / GLM / Qwen etc). # Defaults are tuned for Claude Sonnet / Haiku; most non-Anthropic providers need larger values. # CLAUDE_FULL_PER_TURN_MS=60000 # Full analysis per-turn budget (default 60s) # CLAUDE_QUICK_PER_TURN_MS=40000 # Quick (fast mode) per-turn budget (default 40s) # CLAUDE_VERIFIER_TIMEOUT_MS=60000 # Verifier single-turn LLM timeout (default 60s) # CLAUDE_CLASSIFIER_TIMEOUT_MS=30000 # Query complexity classifier timeout (default 30s) # --------------------------------------------------- # Server # --------------------------------------------------- PORT=3000 NODE_ENV=development FRONTEND_URL=http://localhost:10000 # --------------------------------------------------- # API Authentication (optional, recommended for shared/public deployments) # --------------------------------------------------- # If set, all protected APIs require: # Authorization: Bearer # SMARTPERFETTO_API_KEY=replace_with_a_strong_random_secret # --------------------------------------------------- # Request Throttling (optional, in-memory, per API key/IP) # --------------------------------------------------- # SMARTPERFETTO_USAGE_MAX_REQUESTS=200 # SMARTPERFETTO_USAGE_MAX_TRACE_REQUESTS=100 # SMARTPERFETTO_USAGE_WINDOW_MS=86400000 # --------------------------------------------------- # Agent Safety Limits (optional) # --------------------------------------------------- # AGENT_SQL_MAX_ROWS=1000 # AGENT_SQL_TABLE_CACHE_TTL_MS=300000 # AGENT_TASK_TIMEOUT_MS=180000 # --------------------------------------------------- # Trace Processor (optional -- auto-detected if not set) # --------------------------------------------------- # TRACE_PROCESSOR_PATH=/path/to/trace_processor_shell # TRACE_PROCESSOR_DOWNLOAD_BASE=https://your-mirror/perfetto-luci-artifacts # TRACE_PROCESSOR_DOWNLOAD_URL=https://your-mirror/trace_processor_shell # PERFETTO_PATH=/path/to/perfetto # --------------------------------------------------- # File Upload # --------------------------------------------------- MAX_FILE_SIZE=2147483648 UPLOAD_DIR=./uploads