Skip to content

Commit

Permalink
This provides a way to pass in hours to be ignored via the command line.
Browse files Browse the repository at this point in the history
  • Loading branch information
naved001 committed Oct 21, 2024
1 parent 79532e1 commit 6bcc398
Show file tree
Hide file tree
Showing 3 changed files with 35 additions and 10 deletions.
9 changes: 5 additions & 4 deletions openshift_metrics/invoice.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import math
from dataclasses import dataclass, field
from collections import namedtuple
from typing import List, Tuple
from typing import List, Tuple, Optional
from decimal import Decimal, ROUND_HALF_UP
import datetime

Expand Down Expand Up @@ -138,7 +138,7 @@ def get_runtime(
def end_time(self) -> int:
return self.start_time + self.duration

def generate_pod_row(self):
def generate_pod_row(self, ignore_times):
"""
This returns a row to represent pod data.
It converts the epoch_time stamps to datetime timestamps so it's more readable.
Expand All @@ -154,7 +154,7 @@ def generate_pod_row(self):
memory_request = self.memory_request.quantize(
Decimal(".0001"), rounding=ROUND_HALF_UP
)
runtime = self.get_runtime().quantize(Decimal(".0001"), rounding=ROUND_HALF_UP)
runtime = self.get_runtime(ignore_times).quantize(Decimal(".0001"), rounding=ROUND_HALF_UP)
return [
self.namespace,
start_time,
Expand Down Expand Up @@ -195,6 +195,7 @@ class ProjectInvoce:
intitution: str
institution_specific_code: str
rates: Rates
ignore_hours: Optional[List[Tuple[datetime.datetime, datetime.datetime]]] = None
su_hours: dict = field(
default_factory=lambda: {
SU_CPU: 0,
Expand All @@ -210,7 +211,7 @@ class ProjectInvoce:
def add_pod(self, pod: Pod) -> None:
"""Aggregate a pods data"""
su_type, su_count, _ = pod.get_service_unit()
duration_in_hours = pod.get_runtime()
duration_in_hours = pod.get_runtime(self.ignore_hours)
self.su_hours[su_type] += su_count * duration_in_hours

def get_rate(self, su_type) -> Decimal:
Expand Down
27 changes: 25 additions & 2 deletions openshift_metrics/merge.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import argparse
from datetime import datetime
import json
from typing import Tuple

import utils

Expand All @@ -17,6 +18,19 @@ def compare_dates(date_str1, date_str2):
return date1 < date2


def parse_timestamp_range(timestamp_range: str) -> Tuple[datetime, datetime]:
try:
start_str, end_str = timestamp_range.split(",")
start_dt = datetime.fromisoformat(start_str)
end_dt = datetime.fromisoformat(end_str)
if start_dt < end_dt:
raise argparse.ArgumentTypeError("Ignore start time is after ignore end time")
return start_dt, end_dt
except ValueError:
raise argparse.ArgumentTypeError(
"Timestamp range must be in the format 'YYYY-MM-DDTHH:MM:SS,YYYY-MM-DDTHH:MM:SS'"
)

def main():
"""Reads the metrics from files and generates the reports"""
parser = argparse.ArgumentParser()
Expand All @@ -26,13 +40,21 @@ def main():
"--upload-to-s3",
action="store_true"
)
parser.add_argument(
"--ignore-hours",
type=parse_timestamp_range,
nargs="*",
help="List of timestamp ranges to ignore in the format 'YYYY-MM-DDTHH:MM:SS,YYYY-MM-DDTHH:MM:SS'"
)

args = parser.parse_args()
files = args.files

if args.output_file:
output_file = args.output_file
else:
output_file = f"{datetime.today().strftime('%Y-%m-%d')}.csv"
ignore_hours = args.ignore_hours

report_start_date = None
report_end_date = None
Expand Down Expand Up @@ -77,9 +99,10 @@ def main():
utils.write_metrics_by_namespace(
condensed_metrics_dict,
output_file,
report_month
report_month,
ignore_hours,
)
utils.write_metrics_by_pod(condensed_metrics_dict, "pod-" + output_file)
utils.write_metrics_by_pod(condensed_metrics_dict, "pod-" + output_file, ignore_hours)

if args.upload_to_s3:
primary_location = (
Expand Down
9 changes: 5 additions & 4 deletions openshift_metrics/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def csv_writer(rows, file_name):
csvwriter.writerows(rows)


def write_metrics_by_namespace(condensed_metrics_dict, file_name, report_month):
def write_metrics_by_namespace(condensed_metrics_dict, file_name, report_month, ignore_hours=None):
"""
Process metrics dictionary to aggregate usage by namespace and then write that to a file
"""
Expand Down Expand Up @@ -157,7 +157,8 @@ def write_metrics_by_namespace(condensed_metrics_dict, file_name, report_month):
invoice_address="",
intitution="",
institution_specific_code=cf_institution_code,
rates=rates
rates=rates,
ignore_hours=ignore_hours,
)
invoices[namespace] = project_invoice

Expand Down Expand Up @@ -186,7 +187,7 @@ def write_metrics_by_namespace(condensed_metrics_dict, file_name, report_month):
csv_writer(rows, file_name)


def write_metrics_by_pod(condensed_metrics_dict, file_name):
def write_metrics_by_pod(condensed_metrics_dict, file_name, ignore_hours=None):
"""
Generates metrics report by pod.
"""
Expand Down Expand Up @@ -227,6 +228,6 @@ def write_metrics_by_pod(condensed_metrics_dict, file_name):
node_hostname=pod_metric_dict.get("node", "Unknown Node"),
node_model=pod_metric_dict.get("node_model", "Unknown Model"),
)
rows.append(pod_obj.generate_pod_row())
rows.append(pod_obj.generate_pod_row(ignore_hours))

csv_writer(rows, file_name)

0 comments on commit 6bcc398

Please sign in to comment.