Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Extended config #11

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,16 @@ You can provide the following optional configuration table to the `setup` functi
local defaults = {
-- See plugin debugging logs
debug = false,
-- New settings to allow a bit more of customization, and checking if ollama is run via docker or if its installed in the local system
docker = true,
ollama_host = "localhost",
ollama_port = "11434",

-- The model for ollama to use. This model will be automatically downloaded.
model = llama2,
}
-- can also set a keymap to open ollama
vim.keymap.set("n", "<leader>co","<cmd>Llama<CR>")
```

### Model library
Expand Down
47 changes: 33 additions & 14 deletions lua/nvim-llama/init.lua
Original file line number Diff line number Diff line change
Expand Up @@ -20,22 +20,33 @@ local function set_commands()
M.interactive_llama()
end, {})
end

local function is_docker_installed()
local handle = io.popen("docker --version 2>&1")
local result = handle:read("*a")
handle:close()

return result:match("Docker version")
end
local function is_ollama_installed()
if settings.docker == false then
local handle = io.popen("ollama --version 2>&1")
local result = handle:read("*a")
handle:close()
return result:match("ollama version")
end
end

local function is_docker_running()
if settings.docker == true then
local handle = io.popen("docker info > /dev/null 2>&1; echo $?")
local result = handle:read("*a")
handle:close()

return result:match("0\n")
end
end
if settings.docker == false then
return true
end
end

local function check_docker()
if not is_docker_installed() then
Expand All @@ -51,16 +62,6 @@ local function check_docker()
return true
end

local function async(command, args, callback)
vim.loop.spawn(command, {args = args}, function(code)
if code == 0 then
callback(true)
else
callback(false)
end
end)
end

local function is_container_running()
local command = string.format("docker ps --filter 'name=^/nvim-llama$' --format '{{.Names}}'")
local handle = io.popen(command)
Expand All @@ -69,6 +70,13 @@ local function is_container_running()

return result == "nvim-llama"
end
local function is_ollama_running()
local command = string.format("curl http://" .. settings.ollama_host .. ":" .. settings.ollama_port)
local handle = io.popen(command)
local result = trim(handle:read("*a"))
handle:close()
return result:match("Ollama is running")
end

local function check_ollama_container()
local container_name = "nvim-llama"
Expand Down Expand Up @@ -98,6 +106,7 @@ function M.setup(config)
settings.set(config)
end

if settings.docker == true then
local status, err = pcall(check_docker)
if not status then
print("Error checking docker status: " .. err)
Expand All @@ -107,7 +116,17 @@ function M.setup(config)
if not status then
print("Error checking docker status: " .. err)
end

end
if settings.docker == false then
local status, err = pcall(is_ollama_installed)
if not status then
print("Ollama doesnt seem to be installed" .. err)
end
status, err = pcall(is_ollama_running)
if not status then
print("Ollama doesnt seem to be running: " .. err)
end
end
set_commands()
end

Expand Down
38 changes: 31 additions & 7 deletions lua/nvim-llama/ollama.lua
Original file line number Diff line number Diff line change
Expand Up @@ -38,26 +38,50 @@ function M.restart()
if err then
error("Failed to restart Ollama container: " .. err)
end
return result
end

function M.start()
function M.start(docker,ollama_host,ollama_port)
if docker == true then
M.prepare()
local start_command = "docker run -d -p 11434:11434 -v " .. home .. "/.ollama:/root/.ollama --name nvim-llama ollama/ollama"
local start_command = "docker run -d -p" .. ollama_port .. ":11434 -v " .. home .. "/.ollama:/root/.ollama --name nvim-llama ollama/ollama"
local handle, err = io.popen(start_command)
local result = handle:read("*a")
handle:close()

if err then
error("Failed to start Ollama container: " .. err)
end
end
return result
end
if docker == false then
is_started = "curl " .. "http://" .. ollama_host ":" .. ollama_port
local _, err = io.popen(is_started)
if err ~= nil then
error("Failed to get to the ollama_endpoint")
end

end
end

function M.run(model)
return "docker exec -it nvim-llama ollama run " .. model
function M.run(model,docker)
if docker == true then
cmd ="docker exec -it nvim-llama ollama run " .. model
return cmd
end
if docker == false then
cmd ="ollama run " .. model
return cmd
end
return cmd
end

function M.list()
function M.list(docker)
if docker == true then
return "docker exec -it nvim-llama ollama list"
end
if docker == false then
return "ollama list"
end
end

return M
5 changes: 4 additions & 1 deletion lua/nvim-llama/settings.lua
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,12 @@ M.namespace = vim.api.nvim_create_namespace("nvim-llama")
local defaults = {
-- See plugin debugging logs
debug = false,
docker = true,
ollama_host = "localhost",
ollama_port = "11434",

-- the model to use with Ollama.
model = 'llama2',
model = 'llama3',
}

M.current = defaults
Expand Down