Add LLM to dev setup

This commit is contained in:
doylet 2025-08-13 20:28:21 +10:00
parent 0b27a36349
commit 579ef3c118
2 changed files with 120 additions and 58 deletions

View File

@ -22,8 +22,9 @@ call plug#begin(stdpath('config') . '/plugged')
Plug 'https://github.com/sainnhe/gruvbox-material' Plug 'https://github.com/sainnhe/gruvbox-material'
Plug 'https://github.com/Tetralux/odin.vim' "Odin Syntax highlighting Plug 'https://github.com/Tetralux/odin.vim' "Odin Syntax highlighting
" Harpoon ////////////////////////////////////////////////////////////////////////////////////// " NOTE: LLM
Plug 'nvim-lua/plenary.nvim' Plug 'nvim-lua/plenary.nvim'
Plug 'olimorris/codecompanion.nvim'
" lsp-zero begin " lsp-zero begin
" LSP Support " LSP Support
@ -35,7 +36,6 @@ call plug#begin(stdpath('config') . '/plugged')
Plug 'L3MON4D3/LuaSnip' Plug 'L3MON4D3/LuaSnip'
Plug 'hrsh7th/cmp-buffer' Plug 'hrsh7th/cmp-buffer'
Plug 'hrsh7th/cmp-path' Plug 'hrsh7th/cmp-path'
Plug 'huggingface/llm.nvim'
" lsp-zero end " lsp-zero end
call plug#end() call plug#end()
@ -102,40 +102,97 @@ lua <<EOF
formatting = cmp_format, formatting = cmp_format,
}) })
-- LLM /////////////////////////////////////////////////////////////////////////////////////////// require("codecompanion").setup({
-- local llm = require('llm') display = {
-- llm.setup({ chat = {
-- api_token = nil, -- cf Install paragraph auto_scroll = false,
-- model = "codellama/CodeLlama-13b-hf", -- the model ID, behavior depends on backend fold_context = true,
-- backend = "openai", -- backend ID, "huggingface" | "ollama" | "openai" | "tgi" show_settings = true,
-- url = "http://localhost:8080/v1/chat/completions", -- the http url of the backend show_header_separator = true,
-- tokens_to_clear = { "<EOT>" }, -- tokens to remove from the model's output }
-- -- parameters that are added to the request body, values are arbitrary, you can set any field:value pair here it will be passed as is to the backend },
-- request_body = { }, adapters = {
-- -- set this if the model supports fill in the middle opts = {
-- fim = { show_defaults = false,
-- enabled = true, },
-- prefix = "<PRE> ", llamafile = function()
-- middle = " <MID>", return require("codecompanion.adapters").extend("openai_compatible", {
-- suffix = " <SUF>", env = {
url = "http://127.0.0.1:8080", -- optional: default value is ollama url http://127.0.0.1:11434
chat_url = "/v1/chat/completions", -- optional: default value, override if different
-- api_key = "OpenAI_API_KEY", -- optional: if your endpoint is authenticated
-- models_endpoint = "/v1/models", -- optional: attaches to the end of the URL to form the endpoint to retrieve models
},
-- schema = {
-- model = {
-- default = "deepseek-r1-671b", -- define llm model to be used
-- }, -- },
-- debounce_ms = 150, -- temperature = {
-- accept_keymap = "<S-CR>", -- order = 2,
-- dismiss_keymap = "<CR>", -- mapping = "parameters",
-- tls_skip_verify_insecure = false, -- type = "number",
-- -- llm-ls configuration, cf llm-ls section -- optional = true,
-- lsp = { -- default = 0.8,
-- bin_path = "C:/Home/Downloads/llm-ls.exe", -- desc = "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both.",
-- host = nil, -- validate = function(n)
-- port = nil, -- return n >= 0 and n <= 2, "Must be between 0 and 2"
-- end,
-- }, -- },
-- tokenizer = { -- max_completion_tokens = {
-- path = "C:/Home/Models/codellama-7b_tokenizer.json", -- order = 3,
-- }, -- cf Tokenizer paragraph -- mapping = "parameters",
-- context_window = 4096, -- max number of tokens for the context window -- type = "integer",
-- enable_suggestions_on_startup = true, -- optional = true,
-- enable_suggestions_on_files = "*", -- pattern matching syntax to enable suggestions on specific files, either a string or a list of strings -- default = nil,
-- }) -- desc = "An upper bound for the number of tokens that can be generated for a completion.",
-- validate = function(n)
-- return n > 0, "Must be greater than 0"
-- end,
-- },
-- stop = {
-- order = 4,
-- mapping = "parameters",
-- type = "string",
-- optional = true,
-- default = nil,
-- desc = "Sets the stop sequences to use. When this pattern is encountered the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.",
-- validate = function(s)
-- return s:len() > 0, "Cannot be an empty string"
-- end,
-- },
-- logit_bias = {
-- order = 5,
-- mapping = "parameters",
-- type = "map",
-- optional = true,
-- default = nil,
-- desc = "Modify the likelihood of specified tokens appearing in the completion. Maps tokens (specified by their token ID) to an associated bias value from -100 to 100. Use https://platform.openai.com/tokenizer to find token IDs.",
-- subtype_key = {
-- type = "integer",
-- },
-- subtype = {
-- type = "integer",
-- validate = function(n)
-- return n >= -100 and n <= 100, "Must be between -100 and 100"
-- end,
-- },
-- },
-- },
})
end,
},
strategies = {
chat = {
adapter = "llamafile"
},
inline = {
adapter = "llamafile"
},
cmd = {
adapter = "llamafile"
},
}
})
-- Treesitter //////////////////////////////////////////////////////////////////////////////////// -- Treesitter ////////////////////////////////////////////////////////////////////////////////////
-- TODO: 2022-06-19 Treesitter is too slow on large C++ files -- TODO: 2022-06-19 Treesitter is too slow on large C++ files
@ -161,7 +218,6 @@ lua <<EOF
-- }, -- },
-- } -- }
-- Per-Project Bindings ///////////////////////////////////////////////////////////////////////////// -- Per-Project Bindings /////////////////////////////////////////////////////////////////////////////
-- Automatically load project file on buffer enter -- Automatically load project file on buffer enter
vim.api.nvim_create_autocmd({"BufEnter"}, { vim.api.nvim_create_autocmd({"BufEnter"}, {
@ -321,6 +377,7 @@ nnoremap <leader>R <cmd>FzfLua grep_cword<cr>
nnoremap <leader>t <cmd>FzfLua lsp_live_workspace_symbols<cr> nnoremap <leader>t <cmd>FzfLua lsp_live_workspace_symbols<cr>
nnoremap <leader>T <cmd>FzfLua lsp_finder<cr> nnoremap <leader>T <cmd>FzfLua lsp_finder<cr>
nnoremap <leader>b <cmd>FzfLua buffers<cr> nnoremap <leader>b <cmd>FzfLua buffers<cr>
nnoremap <leader>c <cmd>CodeCompanionChat toggle<cr>
nnoremap <leader><leader> <cmd>FzfLua<cr> nnoremap <leader><leader> <cmd>FzfLua<cr>
" Map Ctrl+HJKL to navigate buffer window " Map Ctrl+HJKL to navigate buffer window
@ -381,4 +438,4 @@ let g:easy_align_delimiters = {
\ } \ }
" Enter live-interactive easy align mode when a visual selection is active " Enter live-interactive easy align mode when a visual selection is active
xmap <leader>a <Plug>(LiveEasyAlign) xmap <leader>a <Plug>(LiveEasyAlign

View File

@ -16,12 +16,18 @@ virustotal_url="https://www.virustotal.com/gui/file"
option="$1" option="$1"
shift shift
if [ "$option" = "clang" ]; then exe_dir="$clang_dir/$1/bin" && PATH="$exe_dir:$PATH" && cmd_line="$exe_dir/$2" && shift && shift; fi if [ "$option" = "clang" ]; then exe_dir="$clang_dir/$1/bin" && PATH="$exe_dir:$PATH" && cmd_line="$2" && shift && shift; fi
if [ "$option" = "cmake" ]; then exe_dir="$cmake_dir/$1" && PATH="$exe_dir:$PATH" && cmd_line="$exe_dir/$2" && shift && shift; fi if [ "$option" = "cmake" ]; then exe_dir="$cmake_dir/$1" && PATH="$exe_dir:$PATH" && cmd_line="$2" && shift && shift; fi
if [ "$option" = "node" ]; then exe_dir="$node_dir/$1/bin" && PATH="$exe_dir:$PATH" && cmd_line="$exe_dir/$2" && shift && shift; fi if [ "$option" = "node" ]; then exe_dir="$node_dir/$1/bin" && PATH="$exe_dir:$PATH" && cmd_line="$2" && shift && shift; fi
if [ "$option" = "node_env" ]; then PATH="$node_dir/$1/bin:$PATH" && shift && cmd_line="$1" && shift; fi if [ "$option" = "python" ]; then
if [ "$option" = "python" ]; then cmd_prefix="LD_LIBRARY_PATH=$python_dir/$1/install/lib:$LD_LIBRARY_PATH" && exe_dir="$python_dir/$1/install/bin" && PATH="$exe_dir:$PATH" && cmd_line="$exe_dir/$2" && shift && shift; fi python_root=$python_dir/$1/install
if [ "$option" = "python_env" ]; then cmd_prefix="LD_LIBRARY_PATH=$python_dir/$1/install/lib:$LD_LIBRARY_PATH" && PATH="$python_dir/$1/install/bin:$PATH" && shift && cmd_line="$1" && shift; fi # Shit like building UWSGI in Python via PIP with wheel doesn't seem to use
# LD_LIBRARY_PATH but LDFLAGS works. Dependency hell
cmd_prefix="LDFLAGS=${python_root}/lib LD_LIBRARY_PATH=$python_root/lib:$LD_LIBRARY_PATH PYTHONHOME=$python_root"
exe_dir="$python_root/bin"
PATH="$exe_dir:$PATH"
cmd_line="$2"&& shift && shift;
fi
if [ "$option" = "virustotal" ]; then virustotal_hash=$(sha256sum "$1" | awk '{print $1}') && cmd_line="xdg-open $virustotal_url/$virustotal_hash &" && shift; fi if [ "$option" = "virustotal" ]; then virustotal_hash=$(sha256sum "$1" | awk '{print $1}') && cmd_line="xdg-open $virustotal_url/$virustotal_hash &" && shift; fi
if [ -z "$cmd_line" ]; then option="help"; fi if [ -z "$cmd_line" ]; then option="help"; fi
@ -35,23 +41,22 @@ USAGE: dev [option] [args...]
NOTES: NOTES:
Commands suffixed with '_env' augment the system PATH with the tool's path for the current shell session. Commands suffixed with '_env' augment the system PATH with the tool's path for the current shell session.
You can chain the '_env' commands to augment the PATH, e.g: You can chain the commands to augment the PATH, e.g:
dev.sh python_env 3.12.9+20250317 dev.sh node 20.18.2 yarn build-everything dev.sh python 3.12.9+20250317 dev.sh node 20.18.2 yarn build-everything
OPTIONS: OPTIONS:
cmake [version] [exe] CMake build system: '$cmake_dir/[version]/[exe]' cmake [version] [cmd...] CMake build system: 'PATH=$cmake_dir/[version]:\$PATH [cmd...]'
Versions: $cmake_versions Versions: $cmake_versions
clang [version] [exe] CLANG compiler: '$clang_dir/[version]/[exe]' clang [version] [cmd..] CLANG compiler: 'PATH=$clang_dir/[version]:\$PATH [cmd...]'
Example: 'dev clang 18.1.4 clang++.exe --help' Example: 'dev clang 18.1.4 clang++.exe --help'
Versions: $clang_versions Versions: $clang_versions
node [version] [exe] Node JS: '$node_dir/[version]/[exe]' node [version] [cmd...] Node JS: 'PATH=$node_dir/[version]:\$PATH [cmd...]'
Versions: $node_versions Versions: $node_versions
python [version] [exe] Python: '$python_dir/[version]/install/bin/[exe]' python [version] [cmd...] Python: 'PATH=$python_dir/[version]/install/bin:\$PATH [cmd...]'
python_env [cmd...] '$python_dir/[version]/install/bin:[PATH]'
Versions: $python_versions Versions: $python_versions
virustotal [file] Lookup file SHA256 hash on VirusTotal: '$virustotal_url/[file]' virustotal [file] Lookup file SHA256 hash on VirusTotal: '$virustotal_url/[file]'