Skip to content

Commit

Permalink
chore(pylint): Patch last needed
Browse files Browse the repository at this point in the history
  • Loading branch information
lperdereau committed Dec 9, 2024
1 parent 715c3e3 commit c58cc6f
Show file tree
Hide file tree
Showing 6 changed files with 40 additions and 49 deletions.
50 changes: 22 additions & 28 deletions src/pvecontrol/actions/node.py
Original file line number Diff line number Diff line change
@@ -1,72 +1,67 @@
import logging
import time

from pvecontrol.node import NodeStatus
from pvecontrol.vm import VmStatus
from pvecontrol.utils import (
filter_keys,
print_tableoutput,
print_task,
print_taskstatus,
)
from pvecontrol.utils import print_tableoutput, print_task


def action_nodelist(proxmox, args):
"""List proxmox nodes in the cluster using proxmoxer api"""
print_tableoutput(proxmox.nodes, columns=args.columns, sortby=args.sort_by, filters=args.filter)


#pylint: disable=too-many-branches,too-many-statements
def action_nodeevacuate(proxmox, args):
"""Evacuate a node by migrating all it's VM out"""
# check node exists
srcnode = proxmox.find_node(args.node)
logging.debug(srcnode)
if not srcnode:
print("Node %s does not exist" % args.node)
print(f"Node {args.node} does not exist")
return
# check node is online
if srcnode.status != NodeStatus.ONLINE:
print("Node %s is not online" % args.node)
print(f"Node {args.node} is not online")
return

targets = []
# compute targets migration possible
if args.target:
for t in list(set(args.target)):
if t == srcnode.node:
print("Target node %s is the same as source node, skipping" % t)
print(f"Target node {t} is the same as source node, skipping")
continue
tg = proxmox.find_node(t)
if not tg:
print("Target node %s does not exist, skipping" % t)
print(f"Target node {t} does not exist, skipping")
continue
if tg.status != NodeStatus.ONLINE:
print("Target node %s is not online, skipping" % t)
print(f"Target node {t} is not online, skipping")
continue
targets.append(tg)
else:
targets = [n for n in proxmox.nodes if n.status == NodeStatus.ONLINE and n.node != srcnode.node]
if len(targets) == 0:
print("No target node available")
return
logging.debug("Migration targets: %s" % ([t.node for t in targets]))
logging.debug("Migration targets: %s", ([t.node for t in targets]))

plan = []
for vm in srcnode.vms:
logging.debug("Selecting node for VM: %i, maxmem: %i, cpus: %i" % (vm.vmid, vm.maxmem, vm.cpus))
logging.debug("Selecting node for VM: %i, maxmem: %i, cpus: %i", vm.vmid, vm.maxmem, vm.cpus)
if vm.status != VmStatus.RUNNING and not args.no_skip_stopped:
logging.debug("VM %i is not running, skipping" % (vm.vmid))
logging.debug("VM %i is not running, skipping", vm.vmid)
continue
# check ressources
for target in targets:
logging.debug(
"Test target: %s, allocatedmem: %i, allocatedcpu: %i"
% (target.node, target.allocatedmem, target.allocatedcpu)
"Test target: %s, allocatedmem: %i, allocatedcpu: %i",
target.node, target.allocatedmem, target.allocatedcpu
)
if (vm.maxmem + target.allocatedmem) > (target.maxmem - proxmox.config["node"]["memoryminimum"]):
logging.debug("Discard target: %s, will overcommit ram" % (target.node))
logging.debug("Discard target: %s, will overcommit ram", target.node)
elif (vm.cpus + target.allocatedcpu) > (target.maxcpu * proxmox.config["node"]["cpufactor"]):
logging.debug("Discard target: %s, will overcommit cpu" % (target.node))
logging.debug("Discard target: %s, will overcommit cpu", target.node)
else:
plan.append(
{
Expand All @@ -79,35 +74,34 @@ def action_nodeevacuate(proxmox, args):
target.allocatedmem += vm.maxmem
target.allocatedcpu += vm.cpus
logging.debug(
"Selected target %s: new allocatedmem %i, new allocatedcpu %i"
% (target.node, target.allocatedmem, target.allocatedcpu)
"Selected target %s: new allocatedmem %i, new allocatedcpu %i",
target.node, target.allocatedmem, target.allocatedcpu
)
break
else:
print("No target found for VM %s" % vm.vmid)
print("No target found for VM %s", vm.vmid)

logging.debug(plan)
# validate input
if len(plan) == 0:
print("No VM to migrate")
return
for p in plan:
print("Migrating VM %s (%s) from %s to %s" % (p["vmid"], p["vm"].name, p["node"], p["target"].node))
print(f"Migrating VM {p['vmid']} ({p['vm'].name}) from {p['node']} to {p['target'].node}")
confirmation = input("Confirm (yes):")
logging.debug("Confirmation input: %s" % confirmation)
logging.debug("Confirmation input: %s", confirmation)
if confirmation.lower() != "yes":
print("Aborting")
return
# run migrations

for p in plan:
logging.debug("Migrating VM %s from %s to %s" % (p["vmid"], p["node"], p["target"].node))
print("Migrate VM: %i / %s from %s to %s" % (p["vmid"], p["vm"].name, p["node"], p["target"].node))
print(f"Migrate VM: {p['vmid']} / {p['vm'].name} from {p['node']} to {p['target'].node}")
if not args.dry_run:
upid = p["vm"].migrate(p["target"].node, args.online)
logging.debug("Migration UPID: %s" % upid)
logging.debug("Migration UPID: %s", upid)
proxmox.refresh()
task = proxmox.find_task(upid)
_task = proxmox.find_task(upid)
print_task(proxmox, upid, args.follow, args.wait)
else:
print("Dry run, skipping migration")
1 change: 1 addition & 0 deletions src/pvecontrol/actions/vm.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ def action_vmmigrate(proxmox, args):
# Lancer tache de migration
upid = proxmox.api.nodes(node.node).qemu(vmid).migrate.post(**options)
# Suivre la task cree
# pylint: disable=duplicate-code
proxmox.refresh()
_task = proxmox.find_task(upid)
print_task(proxmox, upid, args.follow, args.wait)
Expand Down
18 changes: 7 additions & 11 deletions src/pvecontrol/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,35 +11,31 @@ class PVECluster:
"""Proxmox VE Cluster"""

def __init__(self, name, host, user, password, config, verify_ssl=False):
self._api = ProxmoxAPI(host, user=user, password=password, verify_ssl=verify_ssl)
self.api = ProxmoxAPI(host, user=user, password=password, verify_ssl=verify_ssl)
self.name = name
self.config = config
self._initstatus()

def _initstatus(self):
self.status = self._api.cluster.status.get()
self.resources = self._api.cluster.resources.get()
self.status = self.api.cluster.status.get()
self.resources = self.api.cluster.resources.get()

self.nodes = []
for node in self._api.nodes.get():
self.nodes.append(PVENode(self._api, node["node"], node["status"], node))
for node in self.api.nodes.get():
self.nodes.append(PVENode(self.api, node["node"], node["status"], node))

self.storages = []
for storage in self.get_resources_storages():
self.storages.append(PVEStorage(storage.pop("node"), storage.pop("id"), storage.pop("shared"), **storage))

self.tasks = []
for task in self._api.cluster.tasks.get():
for task in self.api.cluster.tasks.get():
logging.debug("Get task informations: %s", (str(task)))
self.tasks.append(PVETask(self._api, task["upid"]))
self.tasks.append(PVETask(self.api, task["upid"]))

def refresh(self):
self._initstatus()

@property
def api(self):
self._api

def __str__(self):
output = f"Proxmox VE Cluster {self.name}\n"
output += f" Status: {self.status}\n"
Expand Down
6 changes: 3 additions & 3 deletions src/pvecontrol/node.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def __init__(self, api, node, status, kwargs=None):

self.node = node
self.status = NodeStatus[status.upper()]
self._api = api
self.api = api
self.cpu = kwargs.get("cpu", 0)
self.allocatedcpu = 0
self.maxcpu = kwargs.get("maxcpu", 0)
Expand Down Expand Up @@ -52,8 +52,8 @@ def _init_vms(self):
self.vms = []
if self.status == NodeStatus.ONLINE:
self.vms = [
PVEVm(self._api, self.node, vm["vmid"], vm["status"], vm)
for vm in self._api.nodes(self.node).qemu.get()
PVEVm(self.api, self.node, vm["vmid"], vm["status"], vm)
for vm in self.api.nodes(self.node).qemu.get()
]

def _init_allocatedmem(self):
Expand Down
6 changes: 3 additions & 3 deletions src/pvecontrol/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ class PVETask:
def __init__(self, api, upid):
task = Tasks.decode_upid(upid)

self._api = api
self.api = api
self.upid = upid
self.node = task["node"]
self.starttime = task["starttime"]
Expand All @@ -41,7 +41,7 @@ def __init__(self, api, upid):
self.refresh()

def log(self, limit=0, start=0):
return self._api.nodes(self.node).tasks(self.upid).log.get(limit=limit, start=start)
return self.api.nodes(self.node).tasks(self.upid).log.get(limit=limit, start=start)

def running(self):
return self.runningstatus == TaskRunningStatus.RUNNING
Expand All @@ -54,7 +54,7 @@ def refresh(self):
# if self.node != NodeStatus.online:
# return
try:
status = self._api.nodes(self.node).tasks(self.upid).status.get()
status = self.api.nodes(self.node).tasks(self.upid).status.get()
# Some task information can be vanished over time (tasks status files removed from the node filesystem)
# In this case API return an error and we consider this tasks vanished and don't get more informations
except proxmoxer.core.ResourceException:
Expand Down
8 changes: 4 additions & 4 deletions src/pvecontrol/vm.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def __init__(self, api, node, vmid, status, kwargs=None):
self.vmid = vmid
self.status = VmStatus[status.upper()]
self.node = node
self._api = api
self.api = api

self.name = kwargs.get("name", "")
self.lock = kwargs.get("lock", "")
Expand All @@ -32,7 +32,7 @@ def __init__(self, api, node, vmid, status, kwargs=None):
self.tags = kwargs.get("tags", "")
self.template = kwargs.get("template", 0)

self.config = self._api.nodes(self.node).qemu(vmid).config.get()
self.config = self.api.nodes(self.node).qemu(vmid).config.get()

def __str__(self):
str_keys = [
Expand All @@ -56,11 +56,11 @@ def migrate(self, target, online=False):
options = {}
options["node"] = self.node
options["target"] = target
check = self._api.nodes(self.node).qemu(self.vmid).migrate.get(**options)
check = self.api.nodes(self.node).qemu(self.vmid).migrate.get(**options)
# logging.debug("Migration check: %s"%check)
options["online"] = int(online)
if len(check["local_disks"]) > 0:
options["with-local-disks"] = int(True)

upid = self._api.nodes(self.node).qemu(self.vmid).migrate.post(**options)
upid = self.api.nodes(self.node).qemu(self.vmid).migrate.post(**options)
return upid

0 comments on commit c58cc6f

Please sign in to comment.