Add LLM to dev setup
This commit is contained in:
parent
0b27a36349
commit
579ef3c118
@ -22,8 +22,9 @@ call plug#begin(stdpath('config') . '/plugged')
|
||||
Plug 'https://github.com/sainnhe/gruvbox-material'
|
||||
Plug 'https://github.com/Tetralux/odin.vim' "Odin Syntax highlighting
|
||||
|
||||
" Harpoon //////////////////////////////////////////////////////////////////////////////////////
|
||||
" NOTE: LLM
|
||||
Plug 'nvim-lua/plenary.nvim'
|
||||
Plug 'olimorris/codecompanion.nvim'
|
||||
|
||||
" lsp-zero begin
|
||||
" LSP Support
|
||||
@ -35,7 +36,6 @@ call plug#begin(stdpath('config') . '/plugged')
|
||||
Plug 'L3MON4D3/LuaSnip'
|
||||
Plug 'hrsh7th/cmp-buffer'
|
||||
Plug 'hrsh7th/cmp-path'
|
||||
Plug 'huggingface/llm.nvim'
|
||||
" lsp-zero end
|
||||
call plug#end()
|
||||
|
||||
@ -102,40 +102,97 @@ lua <<EOF
|
||||
formatting = cmp_format,
|
||||
})
|
||||
|
||||
-- LLM ///////////////////////////////////////////////////////////////////////////////////////////
|
||||
-- local llm = require('llm')
|
||||
-- llm.setup({
|
||||
-- api_token = nil, -- cf Install paragraph
|
||||
-- model = "codellama/CodeLlama-13b-hf", -- the model ID, behavior depends on backend
|
||||
-- backend = "openai", -- backend ID, "huggingface" | "ollama" | "openai" | "tgi"
|
||||
-- url = "http://localhost:8080/v1/chat/completions", -- the http url of the backend
|
||||
-- tokens_to_clear = { "<EOT>" }, -- tokens to remove from the model's output
|
||||
-- -- parameters that are added to the request body, values are arbitrary, you can set any field:value pair here it will be passed as is to the backend
|
||||
-- request_body = { },
|
||||
-- -- set this if the model supports fill in the middle
|
||||
-- fim = {
|
||||
-- enabled = true,
|
||||
-- prefix = "<PRE> ",
|
||||
-- middle = " <MID>",
|
||||
-- suffix = " <SUF>",
|
||||
-- },
|
||||
-- debounce_ms = 150,
|
||||
-- accept_keymap = "<S-CR>",
|
||||
-- dismiss_keymap = "<CR>",
|
||||
-- tls_skip_verify_insecure = false,
|
||||
-- -- llm-ls configuration, cf llm-ls section
|
||||
-- lsp = {
|
||||
-- bin_path = "C:/Home/Downloads/llm-ls.exe",
|
||||
-- host = nil,
|
||||
-- port = nil,
|
||||
-- },
|
||||
-- tokenizer = {
|
||||
-- path = "C:/Home/Models/codellama-7b_tokenizer.json",
|
||||
-- }, -- cf Tokenizer paragraph
|
||||
-- context_window = 4096, -- max number of tokens for the context window
|
||||
-- enable_suggestions_on_startup = true,
|
||||
-- enable_suggestions_on_files = "*", -- pattern matching syntax to enable suggestions on specific files, either a string or a list of strings
|
||||
-- })
|
||||
require("codecompanion").setup({
|
||||
display = {
|
||||
chat = {
|
||||
auto_scroll = false,
|
||||
fold_context = true,
|
||||
show_settings = true,
|
||||
show_header_separator = true,
|
||||
}
|
||||
},
|
||||
adapters = {
|
||||
opts = {
|
||||
show_defaults = false,
|
||||
},
|
||||
llamafile = function()
|
||||
return require("codecompanion.adapters").extend("openai_compatible", {
|
||||
env = {
|
||||
url = "http://127.0.0.1:8080", -- optional: default value is ollama url http://127.0.0.1:11434
|
||||
chat_url = "/v1/chat/completions", -- optional: default value, override if different
|
||||
-- api_key = "OpenAI_API_KEY", -- optional: if your endpoint is authenticated
|
||||
-- models_endpoint = "/v1/models", -- optional: attaches to the end of the URL to form the endpoint to retrieve models
|
||||
},
|
||||
-- schema = {
|
||||
-- model = {
|
||||
-- default = "deepseek-r1-671b", -- define llm model to be used
|
||||
-- },
|
||||
-- temperature = {
|
||||
-- order = 2,
|
||||
-- mapping = "parameters",
|
||||
-- type = "number",
|
||||
-- optional = true,
|
||||
-- default = 0.8,
|
||||
-- desc = "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both.",
|
||||
-- validate = function(n)
|
||||
-- return n >= 0 and n <= 2, "Must be between 0 and 2"
|
||||
-- end,
|
||||
-- },
|
||||
-- max_completion_tokens = {
|
||||
-- order = 3,
|
||||
-- mapping = "parameters",
|
||||
-- type = "integer",
|
||||
-- optional = true,
|
||||
-- default = nil,
|
||||
-- desc = "An upper bound for the number of tokens that can be generated for a completion.",
|
||||
-- validate = function(n)
|
||||
-- return n > 0, "Must be greater than 0"
|
||||
-- end,
|
||||
-- },
|
||||
-- stop = {
|
||||
-- order = 4,
|
||||
-- mapping = "parameters",
|
||||
-- type = "string",
|
||||
-- optional = true,
|
||||
-- default = nil,
|
||||
-- desc = "Sets the stop sequences to use. When this pattern is encountered the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.",
|
||||
-- validate = function(s)
|
||||
-- return s:len() > 0, "Cannot be an empty string"
|
||||
-- end,
|
||||
-- },
|
||||
-- logit_bias = {
|
||||
-- order = 5,
|
||||
-- mapping = "parameters",
|
||||
-- type = "map",
|
||||
-- optional = true,
|
||||
-- default = nil,
|
||||
-- desc = "Modify the likelihood of specified tokens appearing in the completion. Maps tokens (specified by their token ID) to an associated bias value from -100 to 100. Use https://platform.openai.com/tokenizer to find token IDs.",
|
||||
-- subtype_key = {
|
||||
-- type = "integer",
|
||||
-- },
|
||||
-- subtype = {
|
||||
-- type = "integer",
|
||||
-- validate = function(n)
|
||||
-- return n >= -100 and n <= 100, "Must be between -100 and 100"
|
||||
-- end,
|
||||
-- },
|
||||
-- },
|
||||
-- },
|
||||
})
|
||||
end,
|
||||
},
|
||||
strategies = {
|
||||
chat = {
|
||||
adapter = "llamafile"
|
||||
},
|
||||
inline = {
|
||||
adapter = "llamafile"
|
||||
},
|
||||
cmd = {
|
||||
adapter = "llamafile"
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
-- Treesitter ////////////////////////////////////////////////////////////////////////////////////
|
||||
-- TODO: 2022-06-19 Treesitter is too slow on large C++ files
|
||||
@ -161,7 +218,6 @@ lua <<EOF
|
||||
-- },
|
||||
-- }
|
||||
|
||||
|
||||
-- Per-Project Bindings /////////////////////////////////////////////////////////////////////////////
|
||||
-- Automatically load project file on buffer enter
|
||||
vim.api.nvim_create_autocmd({"BufEnter"}, {
|
||||
@ -321,6 +377,7 @@ nnoremap <leader>R <cmd>FzfLua grep_cword<cr>
|
||||
nnoremap <leader>t <cmd>FzfLua lsp_live_workspace_symbols<cr>
|
||||
nnoremap <leader>T <cmd>FzfLua lsp_finder<cr>
|
||||
nnoremap <leader>b <cmd>FzfLua buffers<cr>
|
||||
nnoremap <leader>c <cmd>CodeCompanionChat toggle<cr>
|
||||
nnoremap <leader><leader> <cmd>FzfLua<cr>
|
||||
|
||||
" Map Ctrl+HJKL to navigate buffer window
|
||||
@ -381,4 +438,4 @@ let g:easy_align_delimiters = {
|
||||
\ }
|
||||
|
||||
" Enter live-interactive easy align mode when a visual selection is active
|
||||
xmap <leader>a <Plug>(LiveEasyAlign)
|
||||
xmap <leader>a <Plug>(LiveEasyAlign
|
||||
|
@ -16,13 +16,19 @@ virustotal_url="https://www.virustotal.com/gui/file"
|
||||
option="$1"
|
||||
shift
|
||||
|
||||
if [ "$option" = "clang" ]; then exe_dir="$clang_dir/$1/bin" && PATH="$exe_dir:$PATH" && cmd_line="$exe_dir/$2" && shift && shift; fi
|
||||
if [ "$option" = "cmake" ]; then exe_dir="$cmake_dir/$1" && PATH="$exe_dir:$PATH" && cmd_line="$exe_dir/$2" && shift && shift; fi
|
||||
if [ "$option" = "node" ]; then exe_dir="$node_dir/$1/bin" && PATH="$exe_dir:$PATH" && cmd_line="$exe_dir/$2" && shift && shift; fi
|
||||
if [ "$option" = "node_env" ]; then PATH="$node_dir/$1/bin:$PATH" && shift && cmd_line="$1" && shift; fi
|
||||
if [ "$option" = "python" ]; then cmd_prefix="LD_LIBRARY_PATH=$python_dir/$1/install/lib:$LD_LIBRARY_PATH" && exe_dir="$python_dir/$1/install/bin" && PATH="$exe_dir:$PATH" && cmd_line="$exe_dir/$2" && shift && shift; fi
|
||||
if [ "$option" = "python_env" ]; then cmd_prefix="LD_LIBRARY_PATH=$python_dir/$1/install/lib:$LD_LIBRARY_PATH" && PATH="$python_dir/$1/install/bin:$PATH" && shift && cmd_line="$1" && shift; fi
|
||||
if [ "$option" = "virustotal" ]; then virustotal_hash=$(sha256sum "$1" | awk '{print $1}') && cmd_line="xdg-open $virustotal_url/$virustotal_hash &" && shift; fi
|
||||
if [ "$option" = "clang" ]; then exe_dir="$clang_dir/$1/bin" && PATH="$exe_dir:$PATH" && cmd_line="$2" && shift && shift; fi
|
||||
if [ "$option" = "cmake" ]; then exe_dir="$cmake_dir/$1" && PATH="$exe_dir:$PATH" && cmd_line="$2" && shift && shift; fi
|
||||
if [ "$option" = "node" ]; then exe_dir="$node_dir/$1/bin" && PATH="$exe_dir:$PATH" && cmd_line="$2" && shift && shift; fi
|
||||
if [ "$option" = "python" ]; then
|
||||
python_root=$python_dir/$1/install
|
||||
# Shit like building UWSGI in Python via PIP with wheel doesn't seem to use
|
||||
# LD_LIBRARY_PATH but LDFLAGS works. Dependency hell
|
||||
cmd_prefix="LDFLAGS=${python_root}/lib LD_LIBRARY_PATH=$python_root/lib:$LD_LIBRARY_PATH PYTHONHOME=$python_root"
|
||||
exe_dir="$python_root/bin"
|
||||
PATH="$exe_dir:$PATH"
|
||||
cmd_line="$2"&& shift && shift;
|
||||
fi
|
||||
if [ "$option" = "virustotal" ]; then virustotal_hash=$(sha256sum "$1" | awk '{print $1}') && cmd_line="xdg-open $virustotal_url/$virustotal_hash &" && shift; fi
|
||||
|
||||
if [ -z "$cmd_line" ]; then option="help"; fi
|
||||
if [ "$option" = "help" ]; then
|
||||
@ -35,26 +41,25 @@ USAGE: dev [option] [args...]
|
||||
|
||||
NOTES:
|
||||
Commands suffixed with '_env' augment the system PATH with the tool's path for the current shell session.
|
||||
You can chain the '_env' commands to augment the PATH, e.g:
|
||||
You can chain the commands to augment the PATH, e.g:
|
||||
|
||||
dev.sh python_env 3.12.9+20250317 dev.sh node 20.18.2 yarn build-everything
|
||||
dev.sh python 3.12.9+20250317 dev.sh node 20.18.2 yarn build-everything
|
||||
|
||||
OPTIONS:
|
||||
cmake [version] [exe] CMake build system: '$cmake_dir/[version]/[exe]'
|
||||
Versions: $cmake_versions
|
||||
cmake [version] [cmd...] CMake build system: 'PATH=$cmake_dir/[version]:\$PATH [cmd...]'
|
||||
Versions: $cmake_versions
|
||||
|
||||
clang [version] [exe] CLANG compiler: '$clang_dir/[version]/[exe]'
|
||||
Example: 'dev clang 18.1.4 clang++.exe --help'
|
||||
Versions: $clang_versions
|
||||
clang [version] [cmd..] CLANG compiler: 'PATH=$clang_dir/[version]:\$PATH [cmd...]'
|
||||
Example: 'dev clang 18.1.4 clang++.exe --help'
|
||||
Versions: $clang_versions
|
||||
|
||||
node [version] [exe] Node JS: '$node_dir/[version]/[exe]'
|
||||
Versions: $node_versions
|
||||
node [version] [cmd...] Node JS: 'PATH=$node_dir/[version]:\$PATH [cmd...]'
|
||||
Versions: $node_versions
|
||||
|
||||
python [version] [exe] Python: '$python_dir/[version]/install/bin/[exe]'
|
||||
python_env [cmd...] '$python_dir/[version]/install/bin:[PATH]'
|
||||
Versions: $python_versions
|
||||
python [version] [cmd...] Python: 'PATH=$python_dir/[version]/install/bin:\$PATH [cmd...]'
|
||||
Versions: $python_versions
|
||||
|
||||
virustotal [file] Lookup file SHA256 hash on VirusTotal: '$virustotal_url/[file]'
|
||||
virustotal [file] Lookup file SHA256 hash on VirusTotal: '$virustotal_url/[file]'
|
||||
EOF
|
||||
exit 0
|
||||
fi
|
||||
|
Loading…
x
Reference in New Issue
Block a user