From afd70f6df5e44b0ffd0557ee5791b7e5daa94307 Mon Sep 17 00:00:00 2001 From: elronbandel Date: Sun, 20 Oct 2024 10:32:05 +0300 Subject: [PATCH 1/6] Fix performance tracking action Signed-off-by: elronbandel --- .github/workflows/performance.yml | 63 ++++++------------- .../card_profiler.py | 16 +++-- .../compare_performance_results.py | 17 ++++- .../profile.sh | 0 .../run_profile.py | 0 performance_profile/__init__.py | 1 - performance_profile/compare_branches.sh | 63 ------------------- performance_profile/logs/cards_benchmark.json | 1 - pyproject.toml | 2 +- 9 files changed, 40 insertions(+), 123 deletions(-) rename {performance_profile => performance}/card_profiler.py (91%) rename {performance_profile => performance}/compare_performance_results.py (63%) rename {performance_profile => performance}/profile.sh (100%) rename {performance_profile => performance}/run_profile.py (100%) delete mode 100644 performance_profile/__init__.py delete mode 100644 performance_profile/compare_branches.sh delete mode 100644 performance_profile/logs/cards_benchmark.json diff --git a/.github/workflows/performance.yml b/.github/workflows/performance.yml index bc4b28790f..0c3f6bb034 100644 --- a/.github/workflows/performance.yml +++ b/.github/workflows/performance.yml @@ -29,66 +29,39 @@ jobs: - run: uv pip install --system -e ".[tests]" - run: pip install coverage[toml] - - name: Save card_profiler python script - uses: actions/upload-artifact@v4 - with: - name: card_profiler - path: performance_profile/card_profiler.py - compression-level: 0 - overwrite: true + - name: Prepare the dirs for performance evaluation in main + run: | + mkdir -p performance_action + mkdir -p performance_action/logs + echo "" > performance_action/__init__.py + echo " " > performance_action/logs/cards_benchmark.prof + echo " " > performance_action/logs/cards_benchmark.json + cp performance/card_profile.py performance_action/card_profile.py + cp performance/compare_performance_results.py performance_action/compare_performance_results.py - name: Checkout main branch uses: actions/checkout@v4 with: ref: main - - - name: Prepare the dirs for performance evaluation in main - run: | - mkdir -p performance_profile - mkdir -p performance_profile/logs - echo "" > performance_profile/__init__.py - echo " " > performance_profile/logs/cards_benchmark.prof - echo " " > performance_profile/logs/cards_benchmark.json - - - name: Download card_profiler python script - uses: actions/download-artifact@v4 - with: - name: card_profiler - path: performance_profile/ + clean: false - name: Run performance on main branch run: | - python -m performance_profile.card_profiler - cp performance_profile/logs/cards_benchmark.json performance_profile/logs/main_cards_benchmark.json - - - name: Save main performance json - uses: actions/upload-artifact@v4 - with: - name: main_performance_json - path: performance_profile/logs/main_cards_benchmark.json - compression-level: 0 - overwrite: true + python performance_action/card_profiler.py + cp performance/logs/cards_benchmark.json performance_action/logs/main_cards_benchmark.json + rm performance/logs/cards_benchmark.json - name: Checkout PR branch uses: actions/checkout@v4 with: ref: ${{ github.head_ref }} - - - name: Create performance_profile/logs dir - run: | - mkdir -p performance_profile/logs - echo " " > performance_profile/logs/cards_benchmark.prof + clean: false - name: Run performance on PR branch run: | - python -m performance_profile.card_profiler - cp performance_profile/logs/cards_benchmark.json performance_profile/logs/pr_cards_benchmark.json - - - name: Download main performance result - uses: actions/download-artifact@v4 - with: - name: main_performance_json - path: performance_profile/logs/ + python performance_action/card_profiler.py + cp performance/logs/cards_benchmark.json performance_action/logs/pr_cards_benchmark.json + rm performance/logs/cards_benchmark.json - name: Compare main and PR performance results - run: python -m performance_profile.compare_performance_results + run: python performance_action/compare_performance_results.py performance_action/logs/main_cards_benchmark.json performance_action/logs/pr_cards_benchmark.json diff --git a/performance_profile/card_profiler.py b/performance/card_profiler.py similarity index 91% rename from performance_profile/card_profiler.py rename to performance/card_profiler.py index e1b2855cd6..5135bfc98b 100644 --- a/performance_profile/card_profiler.py +++ b/performance/card_profiler.py @@ -21,17 +21,17 @@ from unitxt root dir, run the following linux commands: -python performance_profile/card_profiler.py +python performance/card_profiler.py The script computes the total runtime of the benchmark, and the time spent in loading the dataset, accumulated across the cards in the benchmark, and wraps both results into a json file: -performance_profile/logs/cards_benchmark.json +performance/logs/cards_benchmark.json -In addition, the script generates a binary file named performance_profile/logs/cards_benchmark.prof, +In addition, the script generates a binary file named performance/logs/cards_benchmark.prof, which can be nicely and interactively visualized via snakeviz: (pip install snakeviz) -snakeviz performance_profile/logs/cards_benchmark.prof +snakeviz performance/logs/cards_benchmark.prof snakeviz opens an interactive internet browser window allowing to explore all time-details. See exporing options here: https://jiffyclub.github.io/snakeviz/ @@ -114,11 +114,9 @@ def profile_from_cards(): def main(): logger.info(f"benchmark cards are: {cards}") - cProfile.run( - "profile_from_cards()", "performance_profile/logs/cards_benchmark.prof" - ) + cProfile.run("profile_from_cards()", "performance/logs/cards_benchmark.prof") f = StringIO() - pst = pstats.Stats("performance_profile/logs/cards_benchmark.prof", stream=f) + pst = pstats.Stats("performance/logs/cards_benchmark.prof", stream=f) pst.strip_dirs() pst.sort_stats("name") # sort by function name pst.print_stats("profiler_do_the_profiling|profiler_load_by_recipe") @@ -136,7 +134,7 @@ def main(): "load_time": load_time, "net_time": diff, } - with open("performance_profile/logs/cards_benchmark.json", "w") as outfile: + with open("performance/logs/cards_benchmark.json", "w") as outfile: json.dump(dictionary, outfile) diff --git a/performance_profile/compare_performance_results.py b/performance/compare_performance_results.py similarity index 63% rename from performance_profile/compare_performance_results.py rename to performance/compare_performance_results.py index d3cabd029e..d7bbf71ca9 100644 --- a/performance_profile/compare_performance_results.py +++ b/performance/compare_performance_results.py @@ -1,3 +1,4 @@ +import argparse import json import sys @@ -5,11 +6,21 @@ logger = get_logger() +# Argument parser to get file paths from the command line +parser = argparse.ArgumentParser(description="Compare performance profiles.") +parser.add_argument( + "main_perf_file", type=str, help="Path to main performance profile JSON file" +) +parser.add_argument( + "pr_perf_file", type=str, help="Path to PR performance profile JSON file" +) +args = parser.parse_args() + # Reading both performance json files: -with open("performance_profile/logs/main_cards_benchmark.json") as openfile: +with open(args.main_perf_file) as openfile: main_perf = json.load(openfile) -with open("performance_profile/logs/pr_cards_benchmark.json") as openfile: +with open(args.pr_perf_file) as openfile: pr_perf = json.load(openfile) logger.critical( @@ -33,5 +44,5 @@ sys.exit(1) logger.critical( - "Compared to main branch, performance or the PR branch is within acceptable limits." + "Compared to main branch, performance of the PR branch is within acceptable limits." ) diff --git a/performance_profile/profile.sh b/performance/profile.sh similarity index 100% rename from performance_profile/profile.sh rename to performance/profile.sh diff --git a/performance_profile/run_profile.py b/performance/run_profile.py similarity index 100% rename from performance_profile/run_profile.py rename to performance/run_profile.py diff --git a/performance_profile/__init__.py b/performance_profile/__init__.py deleted file mode 100644 index 8b13789179..0000000000 --- a/performance_profile/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/performance_profile/compare_branches.sh b/performance_profile/compare_branches.sh deleted file mode 100644 index 0dfde252bd..0000000000 --- a/performance_profile/compare_branches.sh +++ /dev/null @@ -1,63 +0,0 @@ -# generate a safe -safe=`date +"%H_%M_%S_%N"` -mkdir $safe -# copy the updated card_prifller, potentially updated by this current branch to the safe, -# to be accessible to another branch -cp performance_profile/card_profiler.py $safe -# record the name of this current branch -current_branch=`git branch --show-current` -# check out any other branch against which you want to compare the current branch -# e.g. here: main -git checkout main -# prepare the dirs for performance evaluation on main -# before that - determine the way to get things back in place, so can checkout -# from main -if [ -e performance_profile ]; then - end_performance_profile="echo directory performance_profile was here before and it stays" - if [ -e performance_profile/logs ]; then - end_performance_profile_logs="echo directory performance_profile/logs was here before and it stays" - if [ -e performance_profile/logs/cards_benchmark.prof ]; then - end_cards_benchmark_prof="git restore performance_profile/logs/cards_benchmark.prof" - else - end_cards_benchmark_prof="rm performance_profile/logs/cards_benchmark.prof" - fi - if [ -e performance_profile/logs/cards_benchmark.json ]; then - end_cards_benchmark_json="git restore performance_profile/logs/cards_benchmark.json" - else - end_cards_benchmark_json="rm performance_profile/logs/cards_benchmark.json" - fi - else - end_performance_profile_logs="rm -rf performance_profile/logs" - fi - if [ -e performance_profile/card_profiler.py ]; then - end_performance_profile_card_profiler="git restore performance_profile/card_profiler.py" - else - end_performance_profile_card_profiler="rm performance_profile/card_profiler.py" - fi -else - end_performance_profile="rm -rf performance_profile" -fi - -mkdir -p performance_profile -mkdir -p performance_profile/logs -# copy out card_profiler from the safe -cp $safe/card_profiler.py performance_profile/ -# run performance on main branch and save result in safe -python -m performance_profile.card_profiler -cp performance_profile/logs/cards_benchmark.json $safe/main_cards_benchmark.json -# delete all new files that may have counterparts in current branch, and prevent the git checkout back -eval "$end_performance_profile_card_profiler" -eval "$end_cards_benchmark_prof" -eval "$end_cards_benchmark_json" -eval "$end_performance_profile_logs" -eval "$end_performance_profile" -# checkout back to current branch -git checkout $current_branch -mkdir -p performance_profile/logs -# Run performance on PR branch -python -m performance_profile.card_profiler -cp performance_profile/logs/cards_benchmark.json performance_profile/logs/pr_cards_benchmark.json -# Download main performance result from the safe -cp $safe/main_cards_benchmark.json performance_profile/logs/ -# compare main and PR performance results -python -m performance_profile.compare_performance_results diff --git a/performance_profile/logs/cards_benchmark.json b/performance_profile/logs/cards_benchmark.json deleted file mode 100644 index 1fb05218d8..0000000000 --- a/performance_profile/logs/cards_benchmark.json +++ /dev/null @@ -1 +0,0 @@ -{"total_time": 53.369, "load_time": 11.454, "net_time": 41.915} \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 080ba8762a..9c2b028b6e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,7 +55,7 @@ target-version = "py38" "utils/hf/prepare_dataset.py" = ["T201"] "utils/hf/prepare_metric.py" = ["T201"] "utils/compare_unitxt_datasets_between_versions.py" = ["C901"] -"performance_profile/run_profile.py" = ["T201"] +"performance/run_profile.py" = ["T201"] [tool.ruff.lint] # Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default. From ad6ae5a8256a813e126bc5f60a5c7e0980413293 Mon Sep 17 00:00:00 2001 From: elronbandel Date: Sun, 20 Oct 2024 10:36:10 +0300 Subject: [PATCH 2/6] Fix Signed-off-by: elronbandel --- .github/workflows/performance.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/performance.yml b/.github/workflows/performance.yml index 0c3f6bb034..d7daa5bef8 100644 --- a/.github/workflows/performance.yml +++ b/.github/workflows/performance.yml @@ -25,9 +25,10 @@ jobs: with: python-version: '3.9' - - run: curl -LsSf https://astral.sh/uv/install.sh | sh - - run: uv pip install --system -e ".[tests]" - - run: pip install coverage[toml] + - name: Install Requirements + run: | + curl -LsSf https://astral.sh/uv/install.sh | sh + uv pip install --system -e ".[tests]" - name: Prepare the dirs for performance evaluation in main run: | @@ -36,7 +37,7 @@ jobs: echo "" > performance_action/__init__.py echo " " > performance_action/logs/cards_benchmark.prof echo " " > performance_action/logs/cards_benchmark.json - cp performance/card_profile.py performance_action/card_profile.py + cp performance/card_profiler.py performance_action/card_profiler.py cp performance/compare_performance_results.py performance_action/compare_performance_results.py - name: Checkout main branch From bd9275efade25feb0c793f523601c62d49224801 Mon Sep 17 00:00:00 2001 From: elronbandel Date: Sun, 20 Oct 2024 10:43:41 +0300 Subject: [PATCH 3/6] Fix Signed-off-by: elronbandel --- performance/card_profiler.py | 90 ++++++++++++++++++++---------------- 1 file changed, 49 insertions(+), 41 deletions(-) diff --git a/performance/card_profiler.py b/performance/card_profiler.py index 5135bfc98b..a1f58a5c01 100644 --- a/performance/card_profiler.py +++ b/performance/card_profiler.py @@ -1,6 +1,7 @@ import cProfile import json import pstats +import tempfile from io import StringIO from unitxt.api import load_recipe @@ -15,35 +16,35 @@ settings = get_settings() settings.allow_unverified_code = True -"""Profiles the execution-time of api.load_dataset(), over a benchmark of cards. -Usage: set values for variables cards (the benchmark) +class CardProfiler: + """Profiles the execution-time of api.load_dataset(), over a benchmark of cards. -from unitxt root dir, run the following linux commands: + Usage: set values for variables cards (the benchmark) -python performance/card_profiler.py + from unitxt root dir, run the following linux commands: -The script computes the total runtime of the benchmark, and the time spent in loading the dataset, -accumulated across the cards in the benchmark, and wraps both results into a json file: -performance/logs/cards_benchmark.json + python performance/card_profiler.py -In addition, the script generates a binary file named performance/logs/cards_benchmark.prof, -which can be nicely and interactively visualized via snakeviz: + The script computes the total runtime of the benchmark, and the time spent in loading the dataset, + accumulated across the cards in the benchmark, and wraps both results into a json file: + performance/logs/cards_benchmark.json -(pip install snakeviz) -snakeviz performance/logs/cards_benchmark.prof + In addition, the script generates a binary file named performance/logs/cards_benchmark.prof, + which can be nicely and interactively visualized via snakeviz: -snakeviz opens an interactive internet browser window allowing to explore all time-details. -See exporing options here: https://jiffyclub.github.io/snakeviz/ -(can also use the -s flag for snakeviz which will only set up a server and print out the url -to use from another computer in order to view results shown by that server) + (pip install snakeviz) + snakeviz performance/logs/cards_benchmark.prof -In the browser window, look (ctrl-F) for methods named profiler_... to read profiling data for the major steps in the process. -You will find the total time of each step, accumulated along all cards in the benchmark. -""" + snakeviz opens an interactive internet browser window allowing to explore all time-details. + See exporing options here: https://jiffyclub.github.io/snakeviz/ + (can also use the -s flag for snakeviz which will only set up a server and print out the url + to use from another computer in order to view results shown by that server) + In the browser window, look (ctrl-F) for methods named profiler_... to read profiling data for the major steps in the process. + You will find the total time of each step, accumulated along all cards in the benchmark. + """ -class CardProfiler: def profiler_instantiate_recipe(self, **kwargs) -> StandardRecipe: return load_recipe(**kwargs) @@ -114,28 +115,35 @@ def profile_from_cards(): def main(): logger.info(f"benchmark cards are: {cards}") - cProfile.run("profile_from_cards()", "performance/logs/cards_benchmark.prof") - f = StringIO() - pst = pstats.Stats("performance/logs/cards_benchmark.prof", stream=f) - pst.strip_dirs() - pst.sort_stats("name") # sort by function name - pst.print_stats("profiler_do_the_profiling|profiler_load_by_recipe") - s = f.getvalue() - assert s.split("\n")[7].split()[3] == "cumtime" - assert "profiler_do_the_profiling" in s.split("\n")[8] - tot_time = round(float(s.split("\n")[8].split()[3]), 3) - assert "profiler_load_by_recipe" in s.split("\n")[9] - load_time = round(float(s.split("\n")[9].split()[3]), 3) - diff = round(tot_time - load_time, 3) - - # Data to be written - dictionary = { - "total_time": tot_time, - "load_time": load_time, - "net_time": diff, - } - with open("performance/logs/cards_benchmark.json", "w") as outfile: - json.dump(dictionary, outfile) + with tempfile.NamedTemporaryFile(suffix=".prof", delete=False) as temp_prof_file: + temp_prof_file_path = temp_prof_file.name + with open(temp_prof_file_path, "w+") as outfile: + pass + cProfile.run("profile_from_cards()", temp_prof_file_path) + + f = StringIO() + pst = pstats.Stats(temp_prof_file_path, stream=f) + pst.strip_dirs() + pst.sort_stats("name") # sort by function name + pst.print_stats("profiler_do_the_profiling|profiler_load_by_recipe") + s = f.getvalue() + assert s.split("\n")[7].split()[3] == "cumtime" + assert "profiler_do_the_profiling" in s.split("\n")[8] + tot_time = round(float(s.split("\n")[8].split()[3]), 3) + assert "profiler_load_by_recipe" in s.split("\n")[9] + load_time = round(float(s.split("\n")[9].split()[3]), 3) + diff = round(tot_time - load_time, 3) + + # Data to be written + dictionary = { + "total_time": tot_time, + "load_time": load_time, + "net_time": diff, + } + with open("performance/logs/cards_benchmark.json", "w+") as outfile: + json.dump(dictionary, outfile) + + logger.info(f"Profiling data saved to temporary file: {temp_prof_file_path}") if __name__ == "__main__": From b5c1dd3bbd3d6d4dc2e69026ba79c9d50b0c93ad Mon Sep 17 00:00:00 2001 From: elronbandel Date: Sun, 20 Oct 2024 10:55:41 +0300 Subject: [PATCH 4/6] Update Signed-off-by: elronbandel --- .github/workflows/performance.yml | 10 +++------- performance/card_profiler.py | 26 ++++++++++++++++++++++---- 2 files changed, 25 insertions(+), 11 deletions(-) diff --git a/.github/workflows/performance.yml b/.github/workflows/performance.yml index d7daa5bef8..1f306bb2cd 100644 --- a/.github/workflows/performance.yml +++ b/.github/workflows/performance.yml @@ -48,9 +48,7 @@ jobs: - name: Run performance on main branch run: | - python performance_action/card_profiler.py - cp performance/logs/cards_benchmark.json performance_action/logs/main_cards_benchmark.json - rm performance/logs/cards_benchmark.json + python performance_action/card_profiler.py --output_file performance_action/main_results.json - name: Checkout PR branch uses: actions/checkout@v4 @@ -60,9 +58,7 @@ jobs: - name: Run performance on PR branch run: | - python performance_action/card_profiler.py - cp performance/logs/cards_benchmark.json performance_action/logs/pr_cards_benchmark.json - rm performance/logs/cards_benchmark.json + python performance_action/card_profiler.py --output_file performance_action/pr_results.json - name: Compare main and PR performance results - run: python performance_action/compare_performance_results.py performance_action/logs/main_cards_benchmark.json performance_action/logs/pr_cards_benchmark.json + run: python performance_action/compare_performance_results.py performance_action/main_results.json performance_action/pr_results.json diff --git a/performance/card_profiler.py b/performance/card_profiler.py index a1f58a5c01..f00b82f00f 100644 --- a/performance/card_profiler.py +++ b/performance/card_profiler.py @@ -1,5 +1,7 @@ +import argparse import cProfile import json +import os import pstats import tempfile from io import StringIO @@ -113,12 +115,26 @@ def profile_from_cards(): def main(): + # Parse command-line arguments + parser = argparse.ArgumentParser(description="Card Profiler") + parser.add_argument( + "--output_file", + type=str, + required=True, + help="Path to save output files (without extension)", + ) + args = parser.parse_args() + + # Ensure the directory for the output file exists + output_dir = os.path.dirname(args.output_file) + if output_dir: + os.makedirs(output_dir, exist_ok=True) + logger.info(f"benchmark cards are: {cards}") + # Create a temporary .prof file with tempfile.NamedTemporaryFile(suffix=".prof", delete=False) as temp_prof_file: temp_prof_file_path = temp_prof_file.name - with open(temp_prof_file_path, "w+") as outfile: - pass cProfile.run("profile_from_cards()", temp_prof_file_path) f = StringIO() @@ -140,10 +156,12 @@ def main(): "load_time": load_time, "net_time": diff, } - with open("performance/logs/cards_benchmark.json", "w+") as outfile: + + # Write the profiling results to the JSON file (user-specified) + with open(args.output_file, "w+") as outfile: json.dump(dictionary, outfile) - logger.info(f"Profiling data saved to temporary file: {temp_prof_file_path}") + logger.info(f"JSON output saved to: {args.output_file}") if __name__ == "__main__": From d6e54da573084ccaae9b6aff7450f34b30c2b496 Mon Sep 17 00:00:00 2001 From: elronbandel Date: Sun, 20 Oct 2024 11:11:34 +0300 Subject: [PATCH 5/6] try Signed-off-by: elronbandel --- .github/workflows/performance.yml | 4 ++- performance/compare_performance_results.py | 41 +++++++++++----------- pyproject.toml | 2 +- 3 files changed, 25 insertions(+), 22 deletions(-) diff --git a/.github/workflows/performance.yml b/.github/workflows/performance.yml index 1f306bb2cd..f61512fd8f 100644 --- a/.github/workflows/performance.yml +++ b/.github/workflows/performance.yml @@ -61,4 +61,6 @@ jobs: python performance_action/card_profiler.py --output_file performance_action/pr_results.json - name: Compare main and PR performance results - run: python performance_action/compare_performance_results.py performance_action/main_results.json performance_action/pr_results.json + run: | + echo "### Performance Comparison Results" >> $GITHUB_STEP_SUMMARY + python performance_action/compare_performance_results.py performance_action/main_results.json performance_action/pr_results.json >> $GITHUB_STEP_SUMMARY diff --git a/performance/compare_performance_results.py b/performance/compare_performance_results.py index d7bbf71ca9..354c955fe4 100644 --- a/performance/compare_performance_results.py +++ b/performance/compare_performance_results.py @@ -2,10 +2,6 @@ import json import sys -from unitxt.logging_utils import get_logger - -logger = get_logger() - # Argument parser to get file paths from the command line parser = argparse.ArgumentParser(description="Compare performance profiles.") parser.add_argument( @@ -16,33 +12,38 @@ ) args = parser.parse_args() -# Reading both performance json files: +# Reading both performance JSON files: with open(args.main_perf_file) as openfile: main_perf = json.load(openfile) with open(args.pr_perf_file) as openfile: pr_perf = json.load(openfile) -logger.critical( - f"Net time (total minus load) of running benchmark on main is {main_perf['net_time']}" -) -logger.critical( - f"Net time (total minus load) of running benchmark on PR branch is {pr_perf['net_time']}" -) - +# Check for valid net_time in the main performance profile if main_perf["net_time"] == 0: - logger.critical("Net run time on main is 0, can't calculate ratio of times.") + print("Net run time on main is 0, can't calculate ratio of times.") sys.exit(1) +# Calculate the ratio between PR and main branch net times ratio = pr_perf["net_time"] / main_perf["net_time"] -if ratio > 1.1: - logger.critical("Performance degradation exceeds 10% !") - logger.critical( - "Explore branch performance via 'python performance_profile/card_profiler.py', followed by 'snakeviz performance_profile/logs/cards_benchmark.prof'" +# Markdown table formatting +table_header = "| Branch | Net Time (seconds) | Performance Ratio |\n" +table_divider = "|--------------|--------------------|-------------------|\n" +table_main = f"| Main Branch | {main_perf['net_time']:<18} | - |\n" +table_pr = f"| PR Branch | {pr_perf['net_time']:<18} | {ratio:.2f} |\n" + +# Print markdown table +print("### Performance Comparison Results\n") +print(table_header + table_divider + table_main + table_pr) + +# Performance degradation check (5% threshold) +if ratio > 1.05: + print("\n**Warning**: Performance degradation exceeds 5%!") + print( + "Explore branch performance via 'python performance_profile/card_profiler.py'," + " followed by 'snakeviz performance_profile/logs/cards_benchmark.prof'." ) sys.exit(1) -logger.critical( - "Compared to main branch, performance of the PR branch is within acceptable limits." -) +print("\nPerformance of the PR branch is within acceptable limits.") diff --git a/pyproject.toml b/pyproject.toml index 9c2b028b6e..3bbb971a3f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,7 +55,7 @@ target-version = "py38" "utils/hf/prepare_dataset.py" = ["T201"] "utils/hf/prepare_metric.py" = ["T201"] "utils/compare_unitxt_datasets_between_versions.py" = ["C901"] -"performance/run_profile.py" = ["T201"] +"performance/*.py" = ["T201"] [tool.ruff.lint] # Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default. From e5f6835c8bc263b84b9f179a1626f3b7fc4172c7 Mon Sep 17 00:00:00 2001 From: elronbandel Date: Sun, 20 Oct 2024 11:15:01 +0300 Subject: [PATCH 6/6] try Signed-off-by: elronbandel --- .github/workflows/performance.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/performance.yml b/.github/workflows/performance.yml index f61512fd8f..3e2c18a0e1 100644 --- a/.github/workflows/performance.yml +++ b/.github/workflows/performance.yml @@ -62,5 +62,4 @@ jobs: - name: Compare main and PR performance results run: | - echo "### Performance Comparison Results" >> $GITHUB_STEP_SUMMARY python performance_action/compare_performance_results.py performance_action/main_results.json performance_action/pr_results.json >> $GITHUB_STEP_SUMMARY