|
13 | 13 | import json |
14 | 14 | import numpy as np |
15 | 15 | import pandas as pd |
16 | | -import seaborn as sns |
17 | 16 |
|
18 | 17 | ############################################################################ |
19 | 18 | # # |
@@ -152,6 +151,7 @@ def __init__(self, pipeline_path=None): |
152 | 151 |
|
153 | 152 | if pipeline_path: |
154 | 153 | self.extract_from_pipeline(pipeline_path) |
| 154 | + self.pipeline_file = pipeline_path |
155 | 155 |
|
156 | 156 | def __add__(self, other): |
157 | 157 | """ |
@@ -745,6 +745,46 @@ def extract_resources(pipelines): |
745 | 745 | return [Resources(p) for p in pipelines] |
746 | 746 |
|
747 | 747 |
|
| 748 | +def print_statistics(resource_object): |
| 749 | + """ |
| 750 | + prints resource statistics for one dataframe of pipeline resources |
| 751 | + """ |
| 752 | + print ("<--- Extracted resource summary from file ", resource_object.pipeline_file) |
| 753 | + dframe = resource_object.df |
| 754 | + meta = resource_object.meta |
| 755 | + |
| 756 | + # estimate runtime from iteration count |
| 757 | + max_iter = dframe['iter'].max() |
| 758 | + print ("Iterations: ", max_iter) |
| 759 | + # each iteration takes 5 seconds in the pipeline runner --> should be made dynamic and adaptive |
| 760 | + print ("Estimated runtime (s): ", max_iter * 5) |
| 761 | + |
| 762 | + #(a) PSS memory |
| 763 | + summed_pss_per_iter=dframe.groupby("iter")['pss'].sum() |
| 764 | + mean_pss = summed_pss_per_iter.mean() |
| 765 | + max_pss = summed_pss_per_iter.max() |
| 766 | + print ("Mean-PSS (MB): ", mean_pss) |
| 767 | + print ("Max-PSS (MB): ", max_pss) |
| 768 | + |
| 769 | + #(b) CPU consumption |
| 770 | + summed_cpu_per_iter=dframe.groupby("iter")['cpu'].sum() |
| 771 | + mean_cpu = summed_cpu_per_iter.mean() |
| 772 | + max_cpu = summed_cpu_per_iter.max() |
| 773 | + print ("Mean-CPU (cores): ", mean_cpu) |
| 774 | + print ("Max-CPU (cores): ", max_cpu) |
| 775 | + print ("CPU-efficiency: ", mean_cpu / meta["cpu_limit"]) |
| 776 | + print ("---> ") |
| 777 | + |
| 778 | +def stat(args): |
| 779 | + """ |
| 780 | + providing simple global statistics of resources |
| 781 | + """ |
| 782 | + resources = extract_resources(args.pipelines) |
| 783 | + # iterate over all resource objects and make individual statistics |
| 784 | + for res in resources: |
| 785 | + print_statistics(res) |
| 786 | + |
| 787 | + |
748 | 788 | def history(args): |
749 | 789 | """ |
750 | 790 | Entrypoint for history |
@@ -988,6 +1028,10 @@ def main(): |
988 | 1028 | parser = argparse.ArgumentParser(description="Metrics evaluation of O2 simulation workflow") |
989 | 1029 | sub_parsers = parser.add_subparsers(dest="command") |
990 | 1030 |
|
| 1031 | + stat_parser = sub_parsers.add_parser("stat", help="Print simple summary of resource usage") |
| 1032 | + stat_parser.set_defaults(func=stat) |
| 1033 | + stat_parser.add_argument("-p", "--pipelines", nargs="*", help="pipeline_metric files from o2_dpg_workflow_runner", required=True) |
| 1034 | + |
991 | 1035 | plot_parser = sub_parsers.add_parser("history", help="Plot (multiple) metrcis from extracted metrics JSON file(s)") |
992 | 1036 | plot_parser.set_defaults(func=history) |
993 | 1037 | plot_parser.add_argument("-p", "--pipelines", nargs="*", help="pipeline_metric files from o2_dpg_workflow_runner", required=True) |
|
0 commit comments