|
| 1 | +local sse = require("kide.http.sse") |
| 2 | +local max_output_tokens = 4096 * 2 |
| 3 | +local code_json = { |
| 4 | + input = {}, |
| 5 | + model = "gpt-4o", |
| 6 | + max_output_tokens = max_output_tokens, |
| 7 | + stop = "```", |
| 8 | + stream = true, |
| 9 | + temperature = 0.0, |
| 10 | +} |
| 11 | + |
| 12 | +local chat_json = { |
| 13 | + input = {}, |
| 14 | + model = "gpt-4o", |
| 15 | + max_output_tokens = max_output_tokens, |
| 16 | + text = { |
| 17 | + format = { |
| 18 | + type = "text", |
| 19 | + }, |
| 20 | + }, |
| 21 | + stream = true, |
| 22 | + temperature = 1.3, |
| 23 | + top_p = 1, |
| 24 | +} |
| 25 | + |
| 26 | +local reasoner_json = { |
| 27 | + input = {}, |
| 28 | + model = "gpt-4o", |
| 29 | + max_output_tokens = max_output_tokens, |
| 30 | + stream = true, |
| 31 | +} |
| 32 | + |
| 33 | +local commit_json = { |
| 34 | + input = {}, |
| 35 | + model = "gpt-4o", |
| 36 | + max_output_tokens = max_output_tokens, |
| 37 | + text = { |
| 38 | + format = { |
| 39 | + type = "text", |
| 40 | + }, |
| 41 | + }, |
| 42 | + stream = true, |
| 43 | + temperature = 1.0, |
| 44 | + top_p = 1, |
| 45 | +} |
| 46 | + |
| 47 | +local translate_json = { |
| 48 | + input = {}, |
| 49 | + model = "gpt-4o", |
| 50 | + max_output_tokens = max_output_tokens, |
| 51 | + text = { |
| 52 | + format = { |
| 53 | + type = "text", |
| 54 | + }, |
| 55 | + }, |
| 56 | + stream = true, |
| 57 | + temperature = 1.3, |
| 58 | + top_p = 1, |
| 59 | +} |
| 60 | + |
| 61 | +---@class gpt.OpenAIClient : gpt.Client |
| 62 | +---@field base_url string |
| 63 | +---@field api_key string |
| 64 | +---@field type string |
| 65 | +---@field payload table |
| 66 | +---@field sse http.SseClient? |
| 67 | +local OpenAI = { |
| 68 | + models = { |
| 69 | + "gpt-4o", |
| 70 | + "gpt-4o-mini", |
| 71 | + }, |
| 72 | +} |
| 73 | +OpenAI.__index = OpenAI |
| 74 | + |
| 75 | +function OpenAI.new(type) |
| 76 | + local self = setmetatable({}, OpenAI) |
| 77 | + self.base_url = "https://api.openai.com/v1" |
| 78 | + self.api_key = vim.env["OPENAI_API_KEY"] |
| 79 | + self.type = type or "chat" |
| 80 | + if self.type == "chat" then |
| 81 | + self.payload = chat_json |
| 82 | + elseif self.type == "reasoner" then |
| 83 | + self.payload = reasoner_json |
| 84 | + elseif self.type == "code" then |
| 85 | + self.payload = code_json |
| 86 | + elseif self.type == "commit" then |
| 87 | + self.payload = commit_json |
| 88 | + elseif self.type == "translate" then |
| 89 | + self.payload = translate_json |
| 90 | + end |
| 91 | + return self |
| 92 | +end |
| 93 | + |
| 94 | +function OpenAI.set_model(model) |
| 95 | + OpenAI._c_model = model |
| 96 | +end |
| 97 | + |
| 98 | +function OpenAI:payload_message(messages) |
| 99 | + local json = vim.deepcopy(self.payload) |
| 100 | + if OpenAI._c_model then |
| 101 | + json.model = OpenAI._c_model |
| 102 | + end |
| 103 | + self.model = json.model |
| 104 | + local input = {} |
| 105 | + for _, message in ipairs(messages) do |
| 106 | + if type(message.content) == "table" then |
| 107 | + input[#input + 1] = { |
| 108 | + role = message.role, |
| 109 | + content = message.content, |
| 110 | + } |
| 111 | + else |
| 112 | + input[#input + 1] = { |
| 113 | + role = message.role, |
| 114 | + content = { |
| 115 | + { |
| 116 | + type = "input_text", |
| 117 | + text = message.content or "", |
| 118 | + }, |
| 119 | + }, |
| 120 | + } |
| 121 | + end |
| 122 | + end |
| 123 | + json.input = input |
| 124 | + return json |
| 125 | +end |
| 126 | + |
| 127 | +function OpenAI:url() |
| 128 | + return self.base_url .. "/responses" |
| 129 | +end |
| 130 | + |
| 131 | +---@param messages table<gpt.Message> |
| 132 | +function OpenAI:request(messages, callback) |
| 133 | + local payload = self:payload_message(messages) |
| 134 | + local function normalize_usage(usage) |
| 135 | + if not usage then |
| 136 | + return nil |
| 137 | + end |
| 138 | + local cached_tokens = nil |
| 139 | + if usage.input_tokens_details and usage.input_tokens_details.cached_tokens then |
| 140 | + cached_tokens = usage.input_tokens_details.cached_tokens |
| 141 | + end |
| 142 | + return { |
| 143 | + prompt_cache_hit_tokens = cached_tokens, |
| 144 | + prompt_tokens = usage.input_tokens, |
| 145 | + completion_tokens = usage.output_tokens, |
| 146 | + total_tokens = usage.total_tokens, |
| 147 | + } |
| 148 | + end |
| 149 | + local function callback_data(resp_json, event_type) |
| 150 | + if resp_json.error then |
| 151 | + vim.notify("OpenAI error: " .. vim.inspect(resp_json), vim.log.levels.ERROR) |
| 152 | + return |
| 153 | + end |
| 154 | + local etype = resp_json.type or event_type |
| 155 | + if etype == "response.output_text.delta" then |
| 156 | + local text = resp_json.delta or resp_json.text or resp_json.content or "" |
| 157 | + if text ~= "" then |
| 158 | + callback({ |
| 159 | + data = text, |
| 160 | + }) |
| 161 | + end |
| 162 | + elseif etype == "response.reasoning.delta" then |
| 163 | + local text = resp_json.delta or resp_json.text or resp_json.content or "" |
| 164 | + if text ~= "" then |
| 165 | + callback({ |
| 166 | + reasoning = text, |
| 167 | + }) |
| 168 | + end |
| 169 | + elseif etype == "response.completed" then |
| 170 | + local usage = nil |
| 171 | + if resp_json.response and resp_json.response.usage then |
| 172 | + usage = normalize_usage(resp_json.response.usage) |
| 173 | + end |
| 174 | + callback({ |
| 175 | + usage = usage, |
| 176 | + done = true, |
| 177 | + data = "", |
| 178 | + }) |
| 179 | + elseif etype == "response.failed" or etype == "response.cancelled" then |
| 180 | + callback({ |
| 181 | + done = true, |
| 182 | + data = "", |
| 183 | + }) |
| 184 | + end |
| 185 | + end |
| 186 | + local job |
| 187 | + local tmp = "" |
| 188 | + local current_event = nil |
| 189 | + local is_json = function(text) |
| 190 | + return (vim.startswith(text, "{") and vim.endswith(text, "}")) |
| 191 | + or (vim.startswith(text, "[") and vim.endswith(text, "]")) |
| 192 | + end |
| 193 | + ---@param event http.SseEvent |
| 194 | + local callback_handle = function(_, event) |
| 195 | + if not event.data then |
| 196 | + return |
| 197 | + end |
| 198 | + for _, value in ipairs(event.data) do |
| 199 | + -- 忽略 SSE 换行输出 |
| 200 | + if value ~= "" then |
| 201 | + if vim.startswith(value, "event: ") then |
| 202 | + current_event = string.sub(value, 8, -1) |
| 203 | + elseif vim.startswith(value, "data: ") then |
| 204 | + local text = string.sub(value, 7, -1) |
| 205 | + if text == "[DONE]" then |
| 206 | + tmp = "" |
| 207 | + callback({ |
| 208 | + data = text, |
| 209 | + done = true, |
| 210 | + }) |
| 211 | + else |
| 212 | + tmp = tmp .. text |
| 213 | + if is_json(tmp) then |
| 214 | + local resp_json = vim.fn.json_decode(tmp) |
| 215 | + callback_data(resp_json, current_event) |
| 216 | + current_event = nil |
| 217 | + tmp = "" |
| 218 | + end |
| 219 | + end |
| 220 | + elseif vim.startswith(value, ": keep-alive") then |
| 221 | + -- 这里可能是心跳检测报文, 输出提示 |
| 222 | + vim.notify("[SSE] " .. value, vim.log.levels.INFO, { id = "gpt:" .. job, title = "OpenAI" }) |
| 223 | + else |
| 224 | + tmp = tmp .. value |
| 225 | + if is_json(tmp) then |
| 226 | + local resp_json = vim.fn.json_decode(tmp) |
| 227 | + callback_data(resp_json, current_event) |
| 228 | + current_event = nil |
| 229 | + tmp = "" |
| 230 | + end |
| 231 | + end |
| 232 | + end |
| 233 | + end |
| 234 | + end |
| 235 | + |
| 236 | + self.sse = sse.new(self:url()) |
| 237 | + :POST() |
| 238 | + :auth(self.api_key) |
| 239 | + :body(payload) |
| 240 | + :handle(callback_handle) |
| 241 | + :send() |
| 242 | + job = self.sse.job |
| 243 | +end |
| 244 | + |
| 245 | +function OpenAI:close() |
| 246 | + if self.sse then |
| 247 | + self.sse:stop() |
| 248 | + end |
| 249 | +end |
| 250 | + |
| 251 | +return OpenAI |
0 commit comments