From c58cc6f71d2acad89462f8260e80d7c739a6d523 Mon Sep 17 00:00:00 2001 From: Louis PERDEREAU Date: Mon, 9 Dec 2024 13:40:45 +0100 Subject: [PATCH] chore(pylint): Patch last needed --- src/pvecontrol/actions/node.py | 50 +++++++++++++++------------------- src/pvecontrol/actions/vm.py | 1 + src/pvecontrol/cluster.py | 18 +++++------- src/pvecontrol/node.py | 6 ++-- src/pvecontrol/task.py | 6 ++-- src/pvecontrol/vm.py | 8 +++--- 6 files changed, 40 insertions(+), 49 deletions(-) diff --git a/src/pvecontrol/actions/node.py b/src/pvecontrol/actions/node.py index 9958026..da98a66 100644 --- a/src/pvecontrol/actions/node.py +++ b/src/pvecontrol/actions/node.py @@ -1,14 +1,8 @@ import logging -import time from pvecontrol.node import NodeStatus from pvecontrol.vm import VmStatus -from pvecontrol.utils import ( - filter_keys, - print_tableoutput, - print_task, - print_taskstatus, -) +from pvecontrol.utils import print_tableoutput, print_task def action_nodelist(proxmox, args): @@ -16,17 +10,18 @@ def action_nodelist(proxmox, args): print_tableoutput(proxmox.nodes, columns=args.columns, sortby=args.sort_by, filters=args.filter) +#pylint: disable=too-many-branches,too-many-statements def action_nodeevacuate(proxmox, args): """Evacuate a node by migrating all it's VM out""" # check node exists srcnode = proxmox.find_node(args.node) logging.debug(srcnode) if not srcnode: - print("Node %s does not exist" % args.node) + print(f"Node {args.node} does not exist") return # check node is online if srcnode.status != NodeStatus.ONLINE: - print("Node %s is not online" % args.node) + print(f"Node {args.node} is not online") return targets = [] @@ -34,14 +29,14 @@ def action_nodeevacuate(proxmox, args): if args.target: for t in list(set(args.target)): if t == srcnode.node: - print("Target node %s is the same as source node, skipping" % t) + print(f"Target node {t} is the same as source node, skipping") continue tg = proxmox.find_node(t) if not tg: - print("Target node %s does not exist, skipping" % t) + print(f"Target node {t} does not exist, skipping") continue if tg.status != NodeStatus.ONLINE: - print("Target node %s is not online, skipping" % t) + print(f"Target node {t} is not online, skipping") continue targets.append(tg) else: @@ -49,24 +44,24 @@ def action_nodeevacuate(proxmox, args): if len(targets) == 0: print("No target node available") return - logging.debug("Migration targets: %s" % ([t.node for t in targets])) + logging.debug("Migration targets: %s", ([t.node for t in targets])) plan = [] for vm in srcnode.vms: - logging.debug("Selecting node for VM: %i, maxmem: %i, cpus: %i" % (vm.vmid, vm.maxmem, vm.cpus)) + logging.debug("Selecting node for VM: %i, maxmem: %i, cpus: %i", vm.vmid, vm.maxmem, vm.cpus) if vm.status != VmStatus.RUNNING and not args.no_skip_stopped: - logging.debug("VM %i is not running, skipping" % (vm.vmid)) + logging.debug("VM %i is not running, skipping", vm.vmid) continue # check ressources for target in targets: logging.debug( - "Test target: %s, allocatedmem: %i, allocatedcpu: %i" - % (target.node, target.allocatedmem, target.allocatedcpu) + "Test target: %s, allocatedmem: %i, allocatedcpu: %i", + target.node, target.allocatedmem, target.allocatedcpu ) if (vm.maxmem + target.allocatedmem) > (target.maxmem - proxmox.config["node"]["memoryminimum"]): - logging.debug("Discard target: %s, will overcommit ram" % (target.node)) + logging.debug("Discard target: %s, will overcommit ram", target.node) elif (vm.cpus + target.allocatedcpu) > (target.maxcpu * proxmox.config["node"]["cpufactor"]): - logging.debug("Discard target: %s, will overcommit cpu" % (target.node)) + logging.debug("Discard target: %s, will overcommit cpu", target.node) else: plan.append( { @@ -79,12 +74,12 @@ def action_nodeevacuate(proxmox, args): target.allocatedmem += vm.maxmem target.allocatedcpu += vm.cpus logging.debug( - "Selected target %s: new allocatedmem %i, new allocatedcpu %i" - % (target.node, target.allocatedmem, target.allocatedcpu) + "Selected target %s: new allocatedmem %i, new allocatedcpu %i", + target.node, target.allocatedmem, target.allocatedcpu ) break else: - print("No target found for VM %s" % vm.vmid) + print("No target found for VM %s", vm.vmid) logging.debug(plan) # validate input @@ -92,22 +87,21 @@ def action_nodeevacuate(proxmox, args): print("No VM to migrate") return for p in plan: - print("Migrating VM %s (%s) from %s to %s" % (p["vmid"], p["vm"].name, p["node"], p["target"].node)) + print(f"Migrating VM {p['vmid']} ({p['vm'].name}) from {p['node']} to {p['target'].node}") confirmation = input("Confirm (yes):") - logging.debug("Confirmation input: %s" % confirmation) + logging.debug("Confirmation input: %s", confirmation) if confirmation.lower() != "yes": print("Aborting") return # run migrations for p in plan: - logging.debug("Migrating VM %s from %s to %s" % (p["vmid"], p["node"], p["target"].node)) - print("Migrate VM: %i / %s from %s to %s" % (p["vmid"], p["vm"].name, p["node"], p["target"].node)) + print(f"Migrate VM: {p['vmid']} / {p['vm'].name} from {p['node']} to {p['target'].node}") if not args.dry_run: upid = p["vm"].migrate(p["target"].node, args.online) - logging.debug("Migration UPID: %s" % upid) + logging.debug("Migration UPID: %s", upid) proxmox.refresh() - task = proxmox.find_task(upid) + _task = proxmox.find_task(upid) print_task(proxmox, upid, args.follow, args.wait) else: print("Dry run, skipping migration") diff --git a/src/pvecontrol/actions/vm.py b/src/pvecontrol/actions/vm.py index fa76820..38b5b70 100644 --- a/src/pvecontrol/actions/vm.py +++ b/src/pvecontrol/actions/vm.py @@ -54,6 +54,7 @@ def action_vmmigrate(proxmox, args): # Lancer tache de migration upid = proxmox.api.nodes(node.node).qemu(vmid).migrate.post(**options) # Suivre la task cree + # pylint: disable=duplicate-code proxmox.refresh() _task = proxmox.find_task(upid) print_task(proxmox, upid, args.follow, args.wait) diff --git a/src/pvecontrol/cluster.py b/src/pvecontrol/cluster.py index a55a772..ce25c38 100644 --- a/src/pvecontrol/cluster.py +++ b/src/pvecontrol/cluster.py @@ -11,35 +11,31 @@ class PVECluster: """Proxmox VE Cluster""" def __init__(self, name, host, user, password, config, verify_ssl=False): - self._api = ProxmoxAPI(host, user=user, password=password, verify_ssl=verify_ssl) + self.api = ProxmoxAPI(host, user=user, password=password, verify_ssl=verify_ssl) self.name = name self.config = config self._initstatus() def _initstatus(self): - self.status = self._api.cluster.status.get() - self.resources = self._api.cluster.resources.get() + self.status = self.api.cluster.status.get() + self.resources = self.api.cluster.resources.get() self.nodes = [] - for node in self._api.nodes.get(): - self.nodes.append(PVENode(self._api, node["node"], node["status"], node)) + for node in self.api.nodes.get(): + self.nodes.append(PVENode(self.api, node["node"], node["status"], node)) self.storages = [] for storage in self.get_resources_storages(): self.storages.append(PVEStorage(storage.pop("node"), storage.pop("id"), storage.pop("shared"), **storage)) self.tasks = [] - for task in self._api.cluster.tasks.get(): + for task in self.api.cluster.tasks.get(): logging.debug("Get task informations: %s", (str(task))) - self.tasks.append(PVETask(self._api, task["upid"])) + self.tasks.append(PVETask(self.api, task["upid"])) def refresh(self): self._initstatus() - @property - def api(self): - self._api - def __str__(self): output = f"Proxmox VE Cluster {self.name}\n" output += f" Status: {self.status}\n" diff --git a/src/pvecontrol/node.py b/src/pvecontrol/node.py index 63ed9af..c0f97df 100644 --- a/src/pvecontrol/node.py +++ b/src/pvecontrol/node.py @@ -24,7 +24,7 @@ def __init__(self, api, node, status, kwargs=None): self.node = node self.status = NodeStatus[status.upper()] - self._api = api + self.api = api self.cpu = kwargs.get("cpu", 0) self.allocatedcpu = 0 self.maxcpu = kwargs.get("maxcpu", 0) @@ -52,8 +52,8 @@ def _init_vms(self): self.vms = [] if self.status == NodeStatus.ONLINE: self.vms = [ - PVEVm(self._api, self.node, vm["vmid"], vm["status"], vm) - for vm in self._api.nodes(self.node).qemu.get() + PVEVm(self.api, self.node, vm["vmid"], vm["status"], vm) + for vm in self.api.nodes(self.node).qemu.get() ] def _init_allocatedmem(self): diff --git a/src/pvecontrol/task.py b/src/pvecontrol/task.py index 51299bc..02d3458 100644 --- a/src/pvecontrol/task.py +++ b/src/pvecontrol/task.py @@ -28,7 +28,7 @@ class PVETask: def __init__(self, api, upid): task = Tasks.decode_upid(upid) - self._api = api + self.api = api self.upid = upid self.node = task["node"] self.starttime = task["starttime"] @@ -41,7 +41,7 @@ def __init__(self, api, upid): self.refresh() def log(self, limit=0, start=0): - return self._api.nodes(self.node).tasks(self.upid).log.get(limit=limit, start=start) + return self.api.nodes(self.node).tasks(self.upid).log.get(limit=limit, start=start) def running(self): return self.runningstatus == TaskRunningStatus.RUNNING @@ -54,7 +54,7 @@ def refresh(self): # if self.node != NodeStatus.online: # return try: - status = self._api.nodes(self.node).tasks(self.upid).status.get() + status = self.api.nodes(self.node).tasks(self.upid).status.get() # Some task information can be vanished over time (tasks status files removed from the node filesystem) # In this case API return an error and we consider this tasks vanished and don't get more informations except proxmoxer.core.ResourceException: diff --git a/src/pvecontrol/vm.py b/src/pvecontrol/vm.py index 30e9021..ec85d17 100644 --- a/src/pvecontrol/vm.py +++ b/src/pvecontrol/vm.py @@ -21,7 +21,7 @@ def __init__(self, api, node, vmid, status, kwargs=None): self.vmid = vmid self.status = VmStatus[status.upper()] self.node = node - self._api = api + self.api = api self.name = kwargs.get("name", "") self.lock = kwargs.get("lock", "") @@ -32,7 +32,7 @@ def __init__(self, api, node, vmid, status, kwargs=None): self.tags = kwargs.get("tags", "") self.template = kwargs.get("template", 0) - self.config = self._api.nodes(self.node).qemu(vmid).config.get() + self.config = self.api.nodes(self.node).qemu(vmid).config.get() def __str__(self): str_keys = [ @@ -56,11 +56,11 @@ def migrate(self, target, online=False): options = {} options["node"] = self.node options["target"] = target - check = self._api.nodes(self.node).qemu(self.vmid).migrate.get(**options) + check = self.api.nodes(self.node).qemu(self.vmid).migrate.get(**options) # logging.debug("Migration check: %s"%check) options["online"] = int(online) if len(check["local_disks"]) > 0: options["with-local-disks"] = int(True) - upid = self._api.nodes(self.node).qemu(self.vmid).migrate.post(**options) + upid = self.api.nodes(self.node).qemu(self.vmid).migrate.post(**options) return upid