diff --git a/MIGRATION.md b/MIGRATION.md index 0b328e70..6bf947a9 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -55,6 +55,10 @@ local select = require('CopilotChat.select') chat.setup { -- Restore the behaviour for CopilotChat to use unnamed register by default selection = select.unnamed, + -- Restore the format with ## headers as prefixes, + question_header = '## User ', + answer_header = '## Copilot ', + error_header = '## Error ', } -- Restore CopilotChatVisual diff --git a/README.md b/README.md index 44205c44..a9df6148 100644 --- a/README.md +++ b/README.md @@ -198,8 +198,11 @@ Also see [here](/lua/CopilotChat/config.lua): model = 'gpt-4', -- GPT model to use, 'gpt-3.5-turbo' or 'gpt-4' temperature = 0.1, -- GPT temperature - name = 'CopilotChat', -- Name to use in chat + question_header = '', -- Header to use for user questions + answer_header = '**Copilot** ', -- Header to use for AI answers + error_header = '**Error** ', -- Header to use for errors separator = '---', -- Separator to use in chat + show_folds = true, -- Shows folds for sections in chat show_help = true, -- Shows help message as virtual lines when waiting for user input auto_follow_cursor = true, -- Auto-follow cursor in chat diff --git a/lua/CopilotChat/chat.lua b/lua/CopilotChat/chat.lua index 12e93e9c..331ec174 100644 --- a/lua/CopilotChat/chat.lua +++ b/lua/CopilotChat/chat.lua @@ -215,9 +215,10 @@ function Chat:finish(msg) else msg = self.help end + msg = vim.trim(msg) if msg and msg ~= '' then - local line = vim.api.nvim_buf_line_count(self.bufnr) - 1 + local line = vim.api.nvim_buf_line_count(self.bufnr) - 2 show_virt_line(msg, math.max(0, line - 1), self.bufnr, self.mark_ns) end end diff --git a/lua/CopilotChat/config.lua b/lua/CopilotChat/config.lua index e7812748..fe3cb17d 100644 --- a/lua/CopilotChat/config.lua +++ b/lua/CopilotChat/config.lua @@ -59,7 +59,9 @@ local select = require('CopilotChat.select') ---@field system_prompt string? ---@field model string? ---@field temperature number? ----@field name string? +---@field question_header string? +---@field answer_header string? +---@field error_header string? ---@field separator string? ---@field show_folds boolean? ---@field show_help boolean? @@ -82,8 +84,11 @@ return { model = 'gpt-4', -- GPT model to use, 'gpt-3.5-turbo' or 'gpt-4' temperature = 0.1, -- GPT temperature - name = 'CopilotChat', -- Name to use in chat + question_header = '', -- Header to use for user questions + answer_header = '**Copilot** ', -- Header to use for AI answers + error_header = '**Error** ', -- Header to use for errors separator = '---', -- Separator to use in chat + show_folds = true, -- Shows folds for sections in chat show_help = true, -- Shows help message as virtual lines when waiting for user input auto_follow_cursor = true, -- Auto-follow cursor in chat diff --git a/lua/CopilotChat/init.lua b/lua/CopilotChat/init.lua index 36b9715a..6db36630 100644 --- a/lua/CopilotChat/init.lua +++ b/lua/CopilotChat/init.lua @@ -356,11 +356,11 @@ function M.ask(prompt, config, source) end if state.copilot:stop() then - append('\n\n' .. config.separator .. '\n\n') + append('\n\n' .. config.question_header .. config.separator .. '\n\n') end append(updated_prompt) - append('\n\n**' .. config.name .. '** ' .. config.separator .. '\n\n') + append('\n\n' .. config.answer_header .. config.separator .. '\n\n') state.chat:follow() local selected_context = config.context @@ -373,9 +373,9 @@ function M.ask(prompt, config, source) local function on_error(err) vim.schedule(function() - append('\n\n**Error** ' .. config.separator .. '\n\n') + append('\n\n' .. config.error_header .. config.separator .. '\n\n') append('```\n' .. err .. '\n```') - append('\n\n' .. config.separator .. '\n\n') + append('\n\n' .. config.question_header .. config.separator .. '\n\n') state.chat:finish() if M.config.auto_follow_cursor and M.config.auto_insert_mode and state.chat:active() then vim.cmd('startinsert') @@ -405,7 +405,7 @@ function M.ask(prompt, config, source) on_error = on_error, on_done = function(response, token_count) vim.schedule(function() - append('\n\n' .. config.separator .. '\n\n') + append('\n\n' .. config.question_header .. config.separator .. '\n\n') state.response = response if tiktoken.available() and token_count and token_count > 0 then state.chat:finish(token_count .. ' tokens used') @@ -443,7 +443,7 @@ function M.reset(no_insert) wrap(function() state.chat:clear() - append('\n') + append(M.config.question_header .. M.config.separator .. '\n\n') state.chat:finish() state.chat:follow() @@ -491,22 +491,20 @@ function M.load(name, history_path) for i, message in ipairs(history) do if message.role == 'user' then if i > 1 then - append('\n\n' .. M.config.separator .. '\n\n') - else - append('\n') + append('\n\n') end + append(M.config.question_header .. M.config.separator .. '\n\n') append(message.content) elseif message.role == 'assistant' then - append('\n\n**' .. M.config.name .. '** ' .. M.config.separator .. '\n\n') + append('\n\n' .. M.config.answer_header .. M.config.separator .. '\n\n') append(message.content) end end - if #history == 0 then - append('\n') - else - append('\n\n' .. M.config.separator .. '\n') + if #history > 0 then + append('\n\n') end + append(M.config.question_header .. M.config.separator .. '\n\n') state.chat:finish() M.open() @@ -733,7 +731,7 @@ function M.setup(config) end end) - append('\n') + append(M.config.question_header .. M.config.separator .. '\n\n') state.chat:finish() end)