-
Notifications
You must be signed in to change notification settings - Fork 488
Expand file tree
/
Copy pathconfig.yaml.example
More file actions
159 lines (147 loc) · 4.78 KB
/
config.yaml.example
File metadata and controls
159 lines (147 loc) · 4.78 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
# AutoDev CLI Configuration File
#
# This file stores your LLM configurations. You can have multiple configurations
# and switch between them using the `active` field.
#
# Location: ~/.autodev/config.yaml
# Active configuration name (must match one of the configs below)
active: work-gpt4
# List of available configurations
configs:
# Configuration 1: Work GPT-4
- name: work-gpt4
provider: openai
apiKey: sk-your-openai-api-key-here
model: gpt-4-turbo
temperature: 0.7
maxTokens: 8192
# baseUrl: https://api.openai.com/v1 # Optional: custom API endpoint
# Configuration 2: Personal Claude
- name: personal-claude
provider: anthropic
apiKey: sk-ant-your-anthropic-api-key-here
model: claude-3-5-sonnet-20241022
temperature: 0.7
maxTokens: 8192
# Configuration 3: DeepSeek
- name: deepseek-chat
provider: deepseek
apiKey: sk-your-deepseek-api-key-here
model: deepseek-chat
baseUrl: https://api.deepseek.com/v1
# Configuration 4: Local Ollama
- name: local-llama
provider: ollama
apiKey: "" # Ollama doesn't need API key
model: llama3.2
baseUrl: http://localhost:11434
# Configuration 5: Google Gemini
- name: gemini
provider: google
apiKey: your-google-api-key-here
model: gemini-pro
# Configuration 6: OpenRouter
- name: router
provider: openrouter
apiKey: sk-or-your-openrouter-api-key-here
model: anthropic/claude-3.5-sonnet
baseUrl: https://openrouter.ai/api/v1
# Configuration 7: ChatGLM (智谱AI)
- name: glm-chat
provider: glm
apiKey: your-glm-api-key-here.xxxxx
model: glm-4-plus
baseUrl: https://open.bigmodel.cn/api/paas/v4/
temperature: 0.7
maxTokens: 128000
# Configuration 8: Qwen (阿里通义千问)
- name: qwen-max
provider: qwen
apiKey: sk-your-qwen-api-key-here
model: qwen-max
baseUrl: https://dashscope.aliyuncs.com/api/v1/
temperature: 0.7
maxTokens: 8000
# Configuration 9: Kimi (月之暗面)
- name: kimi-32k
provider: kimi
apiKey: sk-your-kimi-api-key-here
model: moonshot-v1-32k
baseUrl: https://api.moonshot.cn/v1/
temperature: 0.7
maxTokens: 8192
# Configuration 10: Custom OpenAI-compatible
- name: custom-api
provider: custom-openai-base
apiKey: your-api-key-here
model: your-model-name
baseUrl: https://api.example.com/v1/
temperature: 0.7
maxTokens: 8192
# ============================================================================
# Provider-specific notes:
# ============================================================================
#
# OpenAI:
# - Get API key from: https://platform.openai.com/api-keys
# - Models: gpt-4, gpt-4-turbo, gpt-3.5-turbo
#
# Anthropic:
# - Get API key from: https://console.anthropic.com/
# - Models: claude-3-5-sonnet-20241022, claude-3-opus, claude-3-sonnet
#
# Google:
# - Get API key from: https://makersuite.google.com/app/apikey
# - Models: gemini-pro, gemini-2.0-flash-exp
#
# DeepSeek:
# - Get API key from: https://platform.deepseek.com/
# - Models: deepseek-chat, deepseek-coder
#
# Ollama:
# - Install from: https://ollama.com/
# - No API key needed (local)
# - Models: llama3.2, mistral, codellama, etc.
#
# OpenRouter:
# - Get API key from: https://openrouter.ai/keys
# - Access to multiple providers through one API
# - Models: anthropic/claude-3.5-sonnet, openai/gpt-4, etc.
#
# GLM (智谱AI):
# - Get API key from: https://open.bigmodel.cn/
# - Base URL: https://open.bigmodel.cn/api/paas/v4 (auto-filled)
# - Models: glm-4-plus, glm-4-air, glm-4-flash, glm-4, glm-3-turbo
#
# Qwen (阿里通义千问):
# - Get API key from: https://dashscope.console.aliyun.com/
# - Base URL: https://dashscope.aliyuncs.com/api/v1 (auto-filled)
# - Models: qwen-max, qwen-plus, qwen-turbo, qwen-long
#
# Kimi (月之暗面 Moonshot):
# - Get API key from: https://platform.moonshot.cn/
# - Base URL: https://api.moonshot.cn/v1 (auto-filled)
# - Models: moonshot-v1-8k, moonshot-v1-32k, moonshot-v1-128k
#
# Custom OpenAI-compatible (custom-openai-base):
# - For any other OpenAI-compatible API
# - Requires: baseUrl, apiKey, and model name
# - Set baseUrl to your custom endpoint
#
# ============================================================================
# Managing configurations:
# ============================================================================
#
# Add a new configuration:
# 1. Add a new item to the `configs` list
# 2. Give it a unique `name`
# 3. Set the `provider`, `apiKey`, and `model`
#
# Switch active configuration:
# - Change the `active` field to match a config name
# - Or use: `autodev config set <name>` (coming soon)
#
# Delete a configuration:
# - Remove the item from the `configs` list
#
# ============================================================================