Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: Changes to users approving scripts #1452

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
74 changes: 72 additions & 2 deletions tests/load-tests/ci-scripts/utility_scripts/show-pipelineruns.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
import yaml
import time
import re
import csv

import matplotlib.pyplot
import matplotlib.colors
Expand Down Expand Up @@ -65,7 +66,6 @@ def __init__(self, data_dir):
self.data_pipelineruns = {}
self.data_taskruns = []
self.data_pods = []
self.data_taskruns = []
self.data_dir = data_dir
self.pr_lanes = []

Expand All @@ -79,12 +79,15 @@ def __init__(self, data_dir):
self.pod_skips = 0 # how many Pods we skipped
self.pr_duration = datetime.timedelta(0) # total time of all PipelineRuns
self.tr_duration = datetime.timedelta(0) # total time of all TaskRuns
self.pod_duration = datetime.timedelta(0) # total time of all Pods running
self.pod_pending_duration = datetime.timedelta(0) # total time of all Pods pending
self.pr_idle_duration = datetime.timedelta(
0
) # total time in PipelineRuns when no TaskRun was running
self.pr_conditions = collections.defaultdict(lambda: 0)
self.tr_conditions = collections.defaultdict(lambda: 0)
self.tr_statuses = collections.defaultdict(lambda: 0)
self.pod_conditions = collections.defaultdict(lambda: 0)

self._populate(self.data_dir)
self._merge_taskruns()
Expand Down Expand Up @@ -147,6 +150,10 @@ def _merge_pods(self):
"node_name"
]

self.data_pipelineruns[pod["pipelinerun"]]["taskRuns"][pod["task"]]["pod_start_time"] = pod["start_time"]
self.data_pipelineruns[pod["pipelinerun"]]["taskRuns"][pod["task"]]["pod_creation_timestamp"] = pod["creation_timestamp"]
self.data_pipelineruns[pod["pipelinerun"]]["taskRuns"][pod["task"]]["pod_finished_time"] = pod["finished_time"]

self.data_pods = []

def _populate(self, data_dir):
Expand Down Expand Up @@ -347,19 +354,66 @@ def _populate_pod(self, pod):
self.pod_skips += 1
return

try:
pod_creation_timestamp = pod["metadata"]["creationTimestamp"]
except KeyError as e:
logging.info(f"Pod {pod_name} missing creationTimestamp, skipping: {e}")
self.pod_skips += 1
return

try:
pod_start_time = pod["status"]["startTime"]
except KeyError as e:
logging.info(f"Pod {pod_name} missing startTime, skipping: {e}")
self.pod_skips += 1
return

try:
pod_finished_time = None
for container in pod["status"]["containerStatuses"]:
if pod_finished_time is None:
pod_finished_time = container["state"]["terminated"]["finishedAt"]
elif pod_finished_time < container["state"]["terminated"]["finishedAt"]:
pod_finished_time = container["state"]["terminated"]["finishedAt"]
except KeyError as e:
logging.info(f"Pod {pod_name} missing finishedAt timestamp for container, skipping: {e}")
self.pod_skips += 1
return

try:
pod_conditions = pod["status"]["conditions"]
except KeyError as e:
logging.info(f"Pod {pod_name} missing conditions, skipping: {e}")
self.pod_skips += 1
return

self.data_pods.append(
{
"name": pod_name,
"pipelinerun": pod_pipelinerun,
"task": pod_task,
"node_name": pod_node_name,
"creation_timestamp": pod_creation_timestamp,
"start_time": pod_start_time,
"finished_time": pod_finished_time,
}
)

for condition in pod_conditions:
c_type = condition["type"]
c_status = condition["status"]
c_reason = condition["reason"] if "reason" in condition else None
self.pod_conditions[f"{c_type} / {c_status} / {c_reason}"] += 1

def _dump_json(self, data, path):
with open(path, "w") as fp:
json.dump(data, fp, cls=DateTimeEncoder, sort_keys=True, indent=4)

def _dump_csv(self, data, path):
with open(path, "w") as fp:
writer = csv.writer(fp)
writer.writerows(data)

def _load_json(self, path):
with open(path, "r") as fp:
return json.load(fp, cls=DateTimeDecoder)
Expand Down Expand Up @@ -474,6 +528,7 @@ def add_time_interval(existing, new):
for i in self.data_pipelineruns.values()
]
)
tr_without_pod_times = 0

for pr_name, pr_times in self.data_pipelineruns.items():
pr_duration = pr_times[end] - pr_times[start]
Expand All @@ -484,6 +539,11 @@ def add_time_interval(existing, new):
for tr_name, tr_times in pr_times["taskRuns"].items():
self.tr_duration += tr_times[end] - tr_times[start]
add_time_interval(trs, tr_times)
if "pod_finished_time" in tr_times and "pod_start_time" in tr_times and "pod_creation_timestamp" in tr_times:
self.pod_duration += tr_times["pod_finished_time"] - tr_times["pod_start_time"]
self.pod_pending_duration += tr_times["pod_start_time"] - tr_times["pod_creation_timestamp"]
else:
tr_without_pod_times += 1

# Combine new intervals so they do not overlap
trs_no_overlap = []
Expand All @@ -504,7 +564,7 @@ def add_time_interval(existing, new):
f"There was {self.pr_count} PipelineRuns and {self.tr_count} TaskRuns and {self.pod_count} Pods."
)
print(
f"In total PipelineRuns took {self.pr_duration} and TaskRuns took {self.tr_duration}, PipelineRuns were idle for {self.pr_idle_duration}"
f"In total PipelineRuns took {self.pr_duration} and TaskRuns took {self.tr_duration}, Pods were pending for {self.pod_pending_duration} and running for {self.pod_duration} (having {tr_without_pod_times} TRs without pod times), PipelineRuns were idle for {self.pr_idle_duration}"
)
pr_duration_avg = (
(self.pr_duration / self.pr_count).total_seconds()
Expand Down Expand Up @@ -613,20 +673,30 @@ def _show_pr_tr_conditions(self):
headers=["Condition message", "Count"],
)
)
self._dump_csv([["Condition message", "Count"]] + list(self.pr_conditions.items()), os.path.join(self.data_dir, "show-pipelineruns-pipelinerun-conditions.csv"))
print("\nTaskRuns conditions frequency")
print(
tabulate.tabulate(
self.tr_conditions.items(),
headers=["Condition message", "Count"],
)
)
self._dump_csv([["Condition message", "Count"]] + list(self.tr_conditions.items()), os.path.join(self.data_dir, "show-pipelineruns-taskrun-conditions.csv"))
print("\nTaskRuns status messages frequency")
print(
tabulate.tabulate(
self.tr_statuses.items(),
headers=["Status message", "Count"],
)
)
print("\nPods conditions frequency")
print(
tabulate.tabulate(
self.pod_conditions.items(),
headers=["Condition description", "Count"],
)
)
self._dump_csv([["Condition description", "Count"]] + list(self.pod_conditions.items()), os.path.join(self.data_dir, "show-pipelineruns-pod-conditions.csv"))

def _plot_graph(self):
"""
Expand Down
Loading