Update how we handle mingw and building

This commit is contained in:
doylet 2024-04-09 16:08:17 +10:00
parent 5b7d78eaff
commit 90e20b2168
1 changed files with 53 additions and 12 deletions

View File

@ -37,6 +37,8 @@ call plug#begin(stdpath('config') . '/plugged')
Plug 'hrsh7th/cmp-buffer'
Plug 'hrsh7th/cmp-path'
Plug 'VonHeikemen/lsp-zero.nvim', {'branch': 'v3.x'}
Plug 'huggingface/llm.nvim'
" lsp-zero end
call plug#end()
@ -54,7 +56,22 @@ lua <<EOF
}
}
-- LSP Setup =====================================================================================
-- Vim Dispatch //////////////////////////////////////////////////////////////////////////////////
if vim.fn.has('win64') or vim.fn.has('win32') or vim.fn.has('win16') then
if os.getenv('SHELL') ~= nil then
vim.o.shellcmdflag = '-c'
vim.o.shellslash = true
vim.o.makeprg = "./build.sh"
else
vim.o.makeprg = "./build.bat"
end
else
-- vim.api.nvim_set_keymap('t', '<Esc>', '<C-\\><C-n>', {noremap = true})
vim.o.makeprg = "./build.sh"
end
vim.api.nvim_set_keymap('n', '<C-b>', ':Make<CR>', {noremap = true})
-- LSP Setup /////////////////////////////////////////////////////////////////////////////////////
local lsp_zero = require('lsp-zero')
lsp_zero.on_attach(function(client, bufnr)
-- see :help lsp-zero-keybindings
@ -105,6 +122,41 @@ lua <<EOF
formatting = cmp_format,
})
-- LLM ===========================================================================================
-- local llm = require('llm')
-- llm.setup({
-- api_token = nil, -- cf Install paragraph
-- model = "codellama/CodeLlama-13b-hf", -- the model ID, behavior depends on backend
-- backend = "openai", -- backend ID, "huggingface" | "ollama" | "openai" | "tgi"
-- url = "http://localhost:8080/v1/chat/completions", -- the http url of the backend
-- tokens_to_clear = { "<EOT>" }, -- tokens to remove from the model's output
-- -- parameters that are added to the request body, values are arbitrary, you can set any field:value pair here it will be passed as is to the backend
-- request_body = { },
-- -- set this if the model supports fill in the middle
-- fim = {
-- enabled = true,
-- prefix = "<PRE> ",
-- middle = " <MID>",
-- suffix = " <SUF>",
-- },
-- debounce_ms = 150,
-- accept_keymap = "<S-CR>",
-- dismiss_keymap = "<CR>",
-- tls_skip_verify_insecure = false,
-- -- llm-ls configuration, cf llm-ls section
-- lsp = {
-- bin_path = "C:/Home/Downloads/llm-ls.exe",
-- host = nil,
-- port = nil,
-- },
-- tokenizer = {
-- path = "C:/Home/Models/codellama-7b_tokenizer.json",
-- }, -- cf Tokenizer paragraph
-- context_window = 4096, -- max number of tokens for the context window
-- enable_suggestions_on_startup = true,
-- enable_suggestions_on_files = "*", -- pattern matching syntax to enable suggestions on specific files, either a string or a list of strings
-- })
-- Harpoon =======================================================================================
local harpoon = require('harpoon')
harpoon:setup()
@ -312,14 +364,3 @@ nnoremap <leader>s :vs<CR>
" Go to previous error
nnoremap <A-j> :cn<CR>
nnoremap <A-k> :cp<CR>
" Vim Dispatch =====================================================================================
let s:running_windows = has("win16") || has("win32") || has("win64")
if s:running_windows
set makeprg=build.bat
else
" Set vim terminal to enter normal mode using escape like normal vim behaviour
tnoremap <Esc> <C-\><C-n>
set makeprg=./build.sh
endif
nnoremap <C-b> :Make<cr>