diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000..5db8307 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,20 @@ +name: Build +on: + push: + branches: + - develop + pull_request: + types: [opened, synchronize, reopened] +jobs: + sonarcloud: + name: SonarCloud + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: SonarCloud Scan + uses: SonarSource/sonarcloud-github-action@master + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} diff --git a/.github/workflows/metrics.yml b/.github/workflows/metrics.yml index 498dcc5..ad880f1 100644 --- a/.github/workflows/metrics.yml +++ b/.github/workflows/metrics.yml @@ -5,6 +5,7 @@ on: branches: [main, develop] tags: - "v*" + workflow_dispatch: jobs: release: @@ -24,7 +25,7 @@ jobs: run: | git config --global user.email "${{secrets.USER_EMAIL}}" git config --global user.name "${{secrets.USER_NAME}}" - git clone --single-branch --branch main "https://x-access-token:${{secrets.API_TOKEN_DOC}}@github.com/fga-eps-mds/2023-1-MeasureSoftGram-Doc" doc + git clone --single-branch --branch main "https://x-access-token:${{secrets.API_TOKEN_DOC}}@github.com/fga-eps-mds/2023.2-MeasureSoftGram-DOC" doc mkdir -p doc/analytics-raw-data cp -R analytics-raw-data/*.json doc/analytics-raw-data cd doc diff --git a/.github/workflows/msgram-analysis.yml b/.github/workflows/msgram-analysis.yml new file mode 100644 index 0000000..c6b3bad --- /dev/null +++ b/.github/workflows/msgram-analysis.yml @@ -0,0 +1,20 @@ +name: MSG Action +on: + pull_request: + branches: [ develop ] + types: [opened, closed, reopened, synchronize] + workflow_dispatch: +jobs: + msgram_job: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: 2023.2 Action MeasureSoftGram + uses: fga-eps-mds/2023-2-MeasureSoftGram-Action@v2.2 + id: msgram + with: + githubToken: ${{ secrets.GITHUB_TOKEN }} # Token do GitHub + sonarProjectKey: "fga-eps-mds_2023-2-MeasureSoftGram-Core" # (opcional) Chave do projeto no SonarQube + msgramServiceToken: ${{ secrets.MSGRAM_SERVICE_TOKEN }} # Token para acessar o serviço MeasureSoftGram + productName: "MeasureSoftGram" # Nome do produto diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml index 45f5e27..7feede7 100644 --- a/.github/workflows/python-publish.yml +++ b/.github/workflows/python-publish.yml @@ -5,6 +5,7 @@ on: branches: [ develop ] tags: - "v*" + workflow_dispatch: jobs: deploy: @@ -32,5 +33,5 @@ jobs: run: python -m twine upload -u __token__ -p ${{ secrets.TEST_PYPI_API_TOKEN }} --repository testpypi dist/* - name: Publish package on pypi - if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') + #if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') run: python -m twine upload -u __token__ -p ${{ secrets.PYPI_API_TOKEN }} dist/* diff --git a/README.md b/README.md index 97e1cca..7687577 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# 2023-1-MeasureSoftGram-Core +# 2023-2 MeasureSoftGram-Core ## Badges diff --git a/parser.py b/parser.py index 3b7b495..51cdb6f 100644 --- a/parser.py +++ b/parser.py @@ -23,7 +23,6 @@ BASE_URL = "https://sonarcloud.io/api/measures/component_tree?component=fga-eps-mds_" if __name__ == "__main__": - REPO = sys.argv[1] RELEASE_VERSION = sys.argv[2] diff --git a/pyproject.toml b/pyproject.toml index 8a6564c..c2ddc55 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "msgram_core" -version = "1.3.3" +version = "1.4.5" description = "The MeasureSoftGram-Core is a Software system for continuous quality of product observation and multidimensional use in continuous design engineering software and is where you have the innovative mathematical models for software analysis." readme = "README.md" authors = [ diff --git a/sonar-project.properties b/sonar-project.properties index 521a587..6a4533b 100644 --- a/sonar-project.properties +++ b/sonar-project.properties @@ -1,4 +1,4 @@ -sonar.projectKey=fga-eps-mds_2023-1-MeasureSoftGram-Core +sonar.projectKey=fga-eps-mds_2023-2-MeasureSoftGram-Core sonar.organization=fga-eps-mds-1 sonar.python.version=3 diff --git a/src/core/aggregated_normalized_measures.py b/src/core/aggregated_normalized_measures.py index 5a05f98..7bc99b8 100644 --- a/src/core/aggregated_normalized_measures.py +++ b/src/core/aggregated_normalized_measures.py @@ -45,7 +45,9 @@ def non_complex_files_density( files_in_thresholds_bool_index = complex_files_density <= max_threshold files_functions_gt_zero_bool_index = files_functions > 0 - x = complex_files_density[files_in_thresholds_bool_index * files_functions_gt_zero_bool_index] + x = complex_files_density[ + files_in_thresholds_bool_index * files_functions_gt_zero_bool_index + ] interpretation_function_value = transformations.interpretation_function( x=x, @@ -60,7 +62,9 @@ def non_complex_files_density( return aggregated_and_normalized_measure -def commented_files_density(data_frame, min_threshold: float = 10, max_threshold: float = 30): +def commented_files_density( + data_frame, min_threshold: float = 10, max_threshold: float = 30 +): """ Calculates commented files density. @@ -104,7 +108,9 @@ def commented_files_density(data_frame, min_threshold: float = 10, max_threshold return aggregated_and_normalized_measure -def absence_of_duplications(data_frame, min_threshold: float = 0, max_threshold: float = 5.0): +def absence_of_duplications( + data_frame, min_threshold: float = 0, max_threshold: float = 5.0 +): """ Calculates duplicated files absence (em3). @@ -113,7 +119,9 @@ def absence_of_duplications(data_frame, min_threshold: float = 0, max_threshold: """ files_duplicated_lines_density = data_frame["duplicated_lines_density"] # m5 metric - Checker.check_metric_values(files_duplicated_lines_density, "duplicated_lines_density") + Checker.check_metric_values( + files_duplicated_lines_density, "duplicated_lines_density" + ) if len(files_duplicated_lines_density) == 0: return 0.0 @@ -177,7 +185,9 @@ def test_coverage( return aggregated_and_normalized_measure -def fast_test_builds(data_frame, min_threshold: float = 0, max_threshold: float = 300000): +def fast_test_builds( + data_frame, min_threshold: float = 0, max_threshold: float = 300000 +): """ Calculates fast test builds (em5) This function gets the dataframe metrics @@ -256,6 +266,88 @@ def passed_tests(data_frame, min_threshold: float = 0, max_threshold: float = 1) gain_interpretation=1, ) - aggregated_and_normalized_measure = transformations.calculate_measure(interpretation_function_value) + aggregated_and_normalized_measure = transformations.calculate_measure( + interpretation_function_value + ) + + return aggregated_and_normalized_measure + + +def team_throughput( + data_frame, + min_threshold: float = 45, + max_threshold: float = 100, +): + """ + Calculates team throughput (em7). + + This function gets the dataframe metrics + and returns the team throughput measure (em7). + """ + total_issues = data_frame["total_issues"] + resolved_issues = data_frame["resolved_issues"] + + Checker.check_metric_value(total_issues, "total_issues") + Checker.check_metric_value(resolved_issues, "resolved_issues") + + Checker.check_threshold(min_threshold, max_threshold, "team_throughput") + + team_throughput_value = ems_functions.get_team_throughput( + data={ + "total_issues": int(total_issues), + "resolved_issues": int(resolved_issues), + } + ) + + interpretation_function_value = transformations.interpretation_function( + x=team_throughput_value, + min_threshold=min_threshold, + max_threshold=max_threshold, + gain_interpretation=1, + ) + + aggregated_and_normalized_measure = transformations.calculate_measure( + interpretation_function_value + ) + + return aggregated_and_normalized_measure + + +def ci_feedback_time( + data_frame, + min_threshold: float = 1, + max_threshold: float = 900, +): + """ + Calculates CI Feedback Time (em8). + + This function gets the dataframe metrics + and returns the CI Feedback Time (em8). + """ + total_builds = data_frame["total_builds"] + sum_ci_feedback_times = data_frame["sum_ci_feedback_times"] + + Checker.check_metric_value(total_builds, "total_builds") + Checker.check_metric_value(sum_ci_feedback_times, "sum_ci_feedback_times") + + Checker.check_threshold(min_threshold, max_threshold, "ci_feedback_time") + + ci_feedback_time_value = ems_functions.get_ci_feedback_time( + data={ + "total_builds": int(total_builds), + "sum_ci_feedback_times": int(sum_ci_feedback_times), + } + ) + + interpretation_function_value = transformations.interpretation_function( + x=ci_feedback_time_value, + min_threshold=min_threshold, + max_threshold=max_threshold, + gain_interpretation=1, + ) + + aggregated_and_normalized_measure = transformations.calculate_measure( + interpretation_function_value + ) return aggregated_and_normalized_measure diff --git a/src/core/measures_functions.py b/src/core/measures_functions.py index d9787dd..0c33298 100644 --- a/src/core/measures_functions.py +++ b/src/core/measures_functions.py @@ -183,25 +183,29 @@ def get_test_coverage(data: dict): return coverage, number_of_files -# def calculate_em7(data: dict): -# """ -# Calculates team throughput (em7). +def get_team_throughput(data: dict[str, int]): + """ + Calculates team throughput (em7) + + This function calculates the team throughput measure (em7) + used to assess the testing status sub characteristic. + """ + + total_issues = data["total_issues"] + resolved_issues = data["resolved_issues"] -# This function calculates the team throughput measure (em7) -# used to assess the functional completeness subcharacteristic. -# """ -# resolved_issues_with_us_label = data[ -# "number_of_resolved_issues_with_US_label_in_the_last_x_days" -# ] + return 100 * (resolved_issues / total_issues) -# total_issues_with_us_label = data[ -# "total_number_of_issues_with_US_label_in_the_last_x_days" -# ] -# x, y = create_coordinate_pair(0, 1, reverse_y=True) +def get_ci_feedback_time(data: dict[str, int]): + """ + Calculates CI Feedback Time (em8) + + This function calculates the CI Feedback Time measure (em8) + used to assess the testing status sub characteristic. + """ -# if7 = np.divide(resolved_issues_with_us_label, total_issues_with_us_label) + total_builds = data["total_builds"] + sum_ci_feedback_times = data["sum_ci_feedback_times"] -# if np.isnan(if7) or np.isinf(if7): -# return 0 -# return np.interp(if7, x, y) + return sum_ci_feedback_times // total_builds diff --git a/src/core/schemas.py b/src/core/schemas.py index 2d891f3..8e83f49 100644 --- a/src/core/schemas.py +++ b/src/core/schemas.py @@ -1,43 +1,79 @@ from marshmallow import Schema, fields, validate +from marshmallow.exceptions import ValidationError + + +class MetricSchema(Schema): + """ + { + "key": "tests" + "value": [10.0] + } + """ + + key = fields.Str(required=True) + value = fields.List(fields.Float(required=True)) class MeasureSchema(Schema): """ { "key": "passed_tests", - "parameters": { - "tests": 10, - "test_errors": 3, - "test_failures": 1 - } + "metrics": [ + { + "key": "tests", + "value": [10.0] + }, + { + "key": "test_errors", + "value": [3.0] + }, + { + "key": "test_failures", + "value": [1.0] + } + ] } """ key = fields.Str(required=True) - parameters = fields.Dict(required=True) + metrics = fields.List(fields.Nested(MetricSchema), required=True) class CalculateMeasureSchema(Schema): """ { - "measures": [ + + "measures": [ { "key": "passed_tests", - "parameters": { - "tests": 10, - "test_errors": 3, - "test_failures": 1 - } + "metrics": [ + { + "key": "tests", + "value": [10.0] + }, + { + "key": "test_errors", + "value": [3.0] + }, + { + "key": "test_failures", + "value": [1.0] + } + ] }, { "key": "test_builds", - "parameters": { - "param1": 8, - "param2": 19, - "parma3": 4 - } - } - ] + "metrics": [ + { + "key": "test_execution_time", + "value": [8.0] + }, + { + "key": "tests", + "value": [10.0] + } + ] + }, } """ @@ -75,12 +111,16 @@ class CalculateSubCharacteristicSchema(Schema): } """ - subcharacteristics = fields.List(fields.Nested(SubCharacteristicSchema), required=True) + subcharacteristics = fields.List( + fields.Nested(SubCharacteristicSchema), required=True + ) class CharacteristicSchema(Schema): key = fields.Str(required=True) - subcharacteristics = fields.List(fields.Nested(CalculatedSubEntitySchema), required=True) + subcharacteristics = fields.List( + fields.Nested(CalculatedSubEntitySchema), required=True + ) class CalculateCharacteristicSchema(Schema): @@ -108,7 +148,9 @@ class CalculateCharacteristicSchema(Schema): class TSQMISchema(Schema): key = fields.Str(required=True) - characteristics = fields.List(fields.Nested(CalculatedSubEntitySchema), required=True) + characteristics = fields.List( + fields.Nested(CalculatedSubEntitySchema), required=True + ) class CalculateTSQMISchema(Schema): @@ -132,68 +174,146 @@ class CalculateTSQMISchema(Schema): class NonComplexFileDensitySchema(Schema): - """ - "key": "non_complex_file_density", - "function": calculate_em1 - """ + # 1 Validação : Se contém uma lista de métricas + metrics = fields.List(fields.Nested(MetricSchema), required=True) - complexity = fields.List(fields.Float(required=True)) - functions = fields.List(fields.Float(required=True)) + @staticmethod + def validate_metrics(metrics): + for metric in metrics: + # 2 Validação : Se foi passada alguma métrica não pertencente a medida + if metric["key"] not in ["complexity", "functions"]: + raise ValidationError( + f"'{metric['key']}': Métrica não presente na medida" + ) class CommentedFileDensitySchema(Schema): - """ - "key": "commented_file_density", - "function": calculate_em2 - """ + # 1 Validação : Se contém uma lista de métricas + metrics = fields.List(fields.Nested(MetricSchema), required=True) - comment_lines_density = fields.List(fields.Float(required=True)) + @staticmethod + def validate_metrics(metrics): + for metric in metrics: + # 2 Validação : Se foi passada alguma métrica não pertencente a medida + if metric["key"] not in ["comment_lines_density"]: + raise ValidationError( + f"'{metric['key']}': Métrica não presente na medida" + ) class DuplicationAbsenceSchema(Schema): - """ - "key": "duplication_absense", - "function": calculate_em3 - """ + # 1 Validação : Se contém uma lista de métricas + metrics = fields.List(fields.Nested(MetricSchema), required=True) - duplicated_lines_density = fields.List(fields.Float(required=True)) + @staticmethod + def validate_metrics(metrics): + for metric in metrics: + # 2 Validação : Se foi passada alguma métrica não pertencente a medida + if metric["key"] not in ["duplicated_lines_density"]: + raise ValidationError( + f"'{metric['key']}': Métrica não presente na medida" + ) class PassedTestsSchema(Schema): - """ - "key": "passed_tests", - "function": calculate_em4 - """ - - tests = fields.List(fields.Float(required=True)) - test_errors = fields.Float(required=True) - test_failures = fields.Float(required=True) + # 1 Validação : Se contém uma lista de métricas + metrics = fields.List(fields.Nested(MetricSchema), required=True) + + @staticmethod + def validate_metrics(metrics): + for metric in metrics: + # 2 Validação : Se foi passada alguma métrica não pertencente a medida + if metric["key"] not in ["tests", "test_failures", "test_errors"]: + raise ValidationError( + f"'{metric['key']}': Métrica não presente na medida" + ) + + # 3 Validação: As métricas test_failures e test_errors só podem ser + # representadas por um array de um único elemento flutuante + if metric["key"] in ["test_failures", "test_errors"]: + if len(metric["value"]) != 1: + raise ValidationError( + f"'{metric['key']}': Deveria ser um array de um único valor" + ) + if not isinstance(metric["value"][0], float): + raise ValidationError( + f"'{metric['key']}': Deveria ser um valor flutuante" + ) class TestBuildsSchema(Schema): - """ - "key": "test_builds", - "function": calculate_em5 - """ + # 1 Validação : Se contém uma lista de métricas + metrics = fields.List(fields.Nested(MetricSchema), required=True) - test_execution_time = fields.List(fields.Float(required=True)) - tests = fields.List(fields.Float(required=True)) + @staticmethod + def validate_metrics(metrics): + for metric in metrics: + # 2 Validação : Se foi passada alguma métrica não pertencente a medida + if metric["key"] not in ["test_execution_time", "tests"]: + raise ValidationError( + f"'{metric['key']}': Métrica não presente na medida" + ) class TestCoverageSchema(Schema): - """ - "key": "test_coverage", - "function": calculate_em6 - """ - - coverage = fields.List(fields.Float(required=True)) + # 1 Validação : Se contém uma lista de métricas + metrics = fields.List(fields.Nested(MetricSchema), required=True) + + @staticmethod + def validate_metrics(metrics): + for metric in metrics: + # 2 Validação : Se foi passada alguma métrica não pertencente a medida + if metric["key"] not in ["coverage"]: + raise ValidationError( + f"'{metric['key']}': Métrica não presente na medida" + ) + + +class CIFeedbackTimeSchema(Schema): + # 1 Validação : Se contém uma lista de métricas + metrics = fields.List(fields.Nested(MetricSchema), required=True) + + @staticmethod + def validate_metrics(metrics): + for metric in metrics: + # 2 Validação : Se foi passada alguma métrica não pertencente a medida + if metric["key"] not in ["sum_ci_feedback_times", "total_builds"]: + raise ValidationError( + f"'{metric['key']}': Métrica não presente na medida" + ) + + # 3 Validação: As métricas só podem ser representadas por um array de + # um único elemento + if len(metric["value"]) != 1: + raise ValidationError( + f"'{metric['key']}': Deveria ser um array de um único valor" + ) + if not isinstance(metric["value"][0], float): + raise ValidationError( + f"'{metric['key']}': Deveria ser um valor flutuante" + ) class TeamThroughputSchema(Schema): - """ - "key": "team_throughput", - "function": calculate_em7 - """ - - number_of_resolved_issues_with_US_label_in_the_last_x_days = fields.Integer(required=True) - total_number_of_issues_with_US_label_in_the_last_x_days = fields.Integer(required=True) + # 1 Validação : Se contém uma lista de métricas + metrics = fields.List(fields.Nested(MetricSchema), required=True) + + @staticmethod + def validate_metrics(metrics): + for metric in metrics: + # 2 Validação : Se foi passada alguma métrica não pertencente a medida + if metric["key"] not in ["resolved_issues", "total_issues"]: + raise ValidationError( + f"'{metric['key']}': Métrica não presente na medida" + ) + + # 3 Validação: As métricas só podem ser representadas por um array de + # um único elemento + if len(metric["value"]) != 1: + raise ValidationError( + f"'{metric['key']}': Deveria ser um array de um único valor" + ) + if not isinstance(metric["value"][0], float): + raise ValidationError( + f"'{metric['key']}': Deveria ser um valor flutuante" + ) diff --git a/src/resources/analysis.py b/src/resources/analysis.py index 492bb21..fd99335 100644 --- a/src/resources/analysis.py +++ b/src/resources/analysis.py @@ -8,19 +8,34 @@ CalculateTSQMISchema, ) from core.transformations import calculate_aggregated_weighted_value -from util.constants import AGGREGATED_NORMALIZED_MEASURES_MAPPING +from resources.constants import AGGREGATED_NORMALIZED_MEASURES_MAPPING from util.exceptions import MeasureKeyNotSupported +def convert_metrics_to_dict(metrics_list): + metrics_dict = {} + for metric in metrics_list: + if len(metric["value"]) == 1: + metrics_dict[metric["key"]] = float(metric["value"][0]) + else: + metrics_dict[metric["key"]] = metric["value"] + + return metrics_dict + + def calculate_measures( extracted_measures: CalculateMeasureSchema, - config: dict = {"characteristics": [{"subcharacteristics": [{"measures": [{"key": ""}]}]}]}, + config: dict = { + "characteristics": [{"subcharacteristics": [{"measures": [{"key": ""}]}]}] + }, ): # Validate if outter keys is valid try: data = CalculateMeasureSchema().load(extracted_measures) except ValidationError as error: - raise ValidationError(f"error: Failed to validate input.\nschema_errors: {error.messages}") + raise ValidationError( + f"error: Failed to validate input.\nschema_errors: {error.messages}" + ) # Objeto retornado em caso de sucesso result_data = {"measures": []} @@ -33,19 +48,24 @@ def calculate_measures( if measure_key not in valid_measures: raise MeasureKeyNotSupported(f"Measure {measure_key} is not supported") - measure_params = measure["parameters"] schema = AGGREGATED_NORMALIZED_MEASURES_MAPPING[measure_key]["schema"] + measure_metrics = measure["metrics"] try: - validated_params = schema().load(measure_params) + validated_params = schema().load({"metrics": measure_metrics}) + # Se o schema da medida tem validações específicas para alguma métrica + if hasattr(schema(), "validate_metrics") and callable( + getattr(schema(), "validate_metrics") + ): + schema().validate_metrics(validated_params["metrics"]) except ValidationError as exc: raise ValidationError( - f"error: Metric parameters {measure_key} are not valid.\nschema_errors: {exc.messages}" + f"error: Metrics in {measure_key} are not valid.\nschema_errors: {exc.messages}" ) - aggregated_normalized_measure = AGGREGATED_NORMALIZED_MEASURES_MAPPING[measure_key][ - "aggregated_normalized_measure" - ] + aggregated_normalized_measure = AGGREGATED_NORMALIZED_MEASURES_MAPPING[ + measure_key + ]["aggregated_normalized_measure"] measures = [ measure @@ -58,9 +78,14 @@ def calculate_measures( key: value for measure in measures for key, value in measure.items() - if measure["key"] == measure_key and ("min_threshold" == key or "max_threshold" == key) + if measure["key"] == measure_key + and ("min_threshold" == key or "max_threshold" == key) } - result = aggregated_normalized_measure(validated_params, **threshold_config) + + validated_params_dict = convert_metrics_to_dict(validated_params["metrics"]) + result = aggregated_normalized_measure( + validated_params_dict, **threshold_config + ) result_data["measures"].append( { @@ -76,7 +101,9 @@ def calculate_subcharacteristics(extracted_subcharacteristics): try: data = CalculateSubCharacteristicSchema().load(extracted_subcharacteristics) except ValidationError as error: - raise ValidationError(f"error: Failed to validate input.\nschema_errors: {error.messages}") + raise ValidationError( + f"error: Failed to validate input.\nschema_errors: {error.messages}" + ) result_data = {"subcharacteristics": []} @@ -87,7 +114,9 @@ def calculate_subcharacteristics(extracted_subcharacteristics): vector_weight_aggregated_normalized_measure = np.array([]) for measure in subcharacteristic["measures"]: - vector_aggregated_normalized_measure = np.append(vector_aggregated_normalized_measure, measure["value"]) + vector_aggregated_normalized_measure = np.append( + vector_aggregated_normalized_measure, measure["value"] + ) vector_weight_aggregated_normalized_measure = np.append( vector_weight_aggregated_normalized_measure, measure["weight"] ) @@ -110,7 +139,9 @@ def calculate_characteristics(extracted_characteristics): try: data = CalculateCharacteristicSchema().load(extracted_characteristics) except ValidationError as error: - raise ValidationError(f"error: Failed to validate input.\nschema_errors: {error.messages}") + raise ValidationError( + f"error: Failed to validate input.\nschema_errors: {error.messages}" + ) result_data = {"characteristics": []} @@ -148,7 +179,9 @@ def calculate_tsqmi(extracted_tsqmi): try: data = CalculateTSQMISchema().load(extracted_tsqmi) except ValidationError as error: - raise ValidationError(f"error: Failed to validate input.\nschema_errors: {error.messages}") + raise ValidationError( + f"error: Failed to validate input.\nschema_errors: {error.messages}" + ) result_data = {"tsqmi": []} diff --git a/src/util/constants.py b/src/resources/constants.py similarity index 78% rename from src/util/constants.py rename to src/resources/constants.py index 6ca7cd1..23e7c74 100644 --- a/src/util/constants.py +++ b/src/resources/constants.py @@ -6,21 +6,23 @@ non_complex_files_density, passed_tests, test_coverage, + team_throughput, + ci_feedback_time, ) AVAILABLE_PRE_CONFIGS = { "characteristics": { "reliability": { "name": "Reliability", - "subcharacteristics": ["testing_status"], + "subcharacteristics": ["testing_status", "maturity"], }, "maintainability": { "name": "Maintainability", "subcharacteristics": ["modifiability"], }, - "productivity": { - "name": "Productivity", - "subcharacteristics": ["issues_velocity"], + "functional_suitability": { + "name": "Functional Suitability", + "subcharacteristics": ["functional_completeness"], }, }, "subcharacteristics": { @@ -33,6 +35,11 @@ ], "characteristics": ["reliability"], }, + "maturity": { + "name": "Maturity", + "measures": ["ci_feedback_time"], + "characteristics": ["reliability"], + }, "modifiability": { "name": "Modifiability", "measures": [ @@ -42,10 +49,10 @@ ], "characteristics": ["maintainability"], }, - "issues_velocity": { - "name": "Issues Velocity", + "functional_completeness": { + "name": "Functional Completeness", "measures": ["team_throughput"], - "characteristics": ["productivity"], + "characteristics": ["functional_suitability"], }, }, "measures": { @@ -67,6 +74,12 @@ "characteristics": ["reliability"], "metrics": ["coverage"], }, + "ci_feedback_time": { + "name": "CI Feedback Time", + "subcharacteristics": ["maturity"], + "characteristics": ["reliability"], + "metrics": ["sum_ci_feedback_times", "total_builds"], + }, "non_complex_file_density": { "name": "Non complex file density", "subcharacteristics": ["modifiability"], @@ -89,10 +102,7 @@ "name": "Team Throughput", "subcharacteristics": ["functional_completeness"], "characteristics": ["functional_suitability"], - "metrics": [ - "number_of_resolved_issues_with_US_label_in_the_last_x_days", - "total_number_of_issues_with_US_label_in_the_last_x_days", - ], + "metrics": ["total_issues", "resolved_issues"], }, }, } @@ -123,7 +133,11 @@ "schema": schemas.TestCoverageSchema, }, "team_throughput": { - "aggregated_normalized_measure": ..., + "aggregated_normalized_measure": team_throughput, "schema": schemas.TeamThroughputSchema, }, + "ci_feedback_time": { + "aggregated_normalized_measure": ci_feedback_time, + "schema": schemas.CIFeedbackTimeSchema, + }, } diff --git a/src/staticfiles/default_pre_config.py b/src/staticfiles/default_pre_config.py index 64a610e..1eba713 100644 --- a/src/staticfiles/default_pre_config.py +++ b/src/staticfiles/default_pre_config.py @@ -4,11 +4,11 @@ "characteristics": [ { "key": "reliability", - "weight": 50, + "weight": 34, "subcharacteristics": [ { "key": "testing_status", - "weight": 100, + "weight": 50, "measures": [ { "key": "passed_tests", @@ -29,12 +29,24 @@ "max_threshold": 100, }, ], - } + }, + { + "key": "maturity", + "weight": 50, + "measures": [ + { + "key": "ci_feedback_time", + "weight": 100, + "min_threshold": 1, + "max_threshold": 900, + } + ], + }, ], }, { "key": "maintainability", - "weight": 50, + "weight": 33, "subcharacteristics": [ { "key": "modifiability", @@ -62,21 +74,23 @@ } ], }, - # { - # "key": "functional_suitability", - # "weight": 34.0, - # "subcharacteristics": [ - # { - # "key": "functional_completeness", - # "weight": 100.0, - # "measures": [ - # { - # "key": "team_throughput", - # "weight": 100.0 - # } - # ] - # } - # ] - # } + { + "key": "functional_suitability", + "weight": 33, + "subcharacteristics": [ + { + "key": "functional_completeness", + "weight": 100, + "measures": [ + { + "key": "team_throughput", + "weight": 100, + "min_threshold": 45, + "max_threshold": 100, + }, + ], + } + ], + }, ] } diff --git a/src/staticfiles/supported_metrics.py b/src/staticfiles/supported_metrics.py index b0a90cf..276b623 100644 --- a/src/staticfiles/supported_metrics.py +++ b/src/staticfiles/supported_metrics.py @@ -45,20 +45,20 @@ ], } }, - # { - # "ci_feedback_time": { - # "metrics": [ - # "number_of_build_pipelines_in_the_last_x_days", - # "runtime_sum_of_build_pipelines_in_the_last_x_days", - # ], - # } - # }, - # { - # "team_throughput": { - # "metrics": [ - # "number_of_resolved_issues_with_US_label_in_the_last_x_days", - # "total_number_of_issues_with_US_label_in_the_last_x_days", - # ], - # } - # }, + { + "team_throughput": { + "metrics": [ + "total_issues", + "resolved_issues", + ], + } + }, + { + "ci_feedback_time": { + "metrics": [ + "sum_ci_feedback_times", + "total_builds", + ], + } + }, ] diff --git a/src/util/__init__.py b/src/util/__init__.py index 693be51..dd9e9c8 100644 --- a/src/util/__init__.py +++ b/src/util/__init__.py @@ -1,3 +1,4 @@ from .check import * # noqa: F401, F403 -from .constants import * # noqa: F401, F403 + +# from .constants import * # noqa: F401, F403 from .exceptions import * # noqa: F401, F403 diff --git a/src/util/check.py b/src/util/check.py index 3b60d43..3bd8afb 100644 --- a/src/util/check.py +++ b/src/util/check.py @@ -12,29 +12,41 @@ class Checker: @staticmethod - def check_non_complex_files_density_threshold(min_threshold: float, max_threshold: float): + def check_non_complex_files_density_threshold( + min_threshold: float, max_threshold: float + ): if min_threshold != 0: raise InvalidThresholdValue("min_threshold is not equal to 0") if min_threshold >= max_threshold: - raise InvalidThresholdValue("min_threshold is greater or equal to max_threshold") + raise InvalidThresholdValue( + "min_threshold is greater or equal to max_threshold" + ) @staticmethod - def check_comment_files_density_threshold(min_threshold: float, max_threshold: float): + def check_comment_files_density_threshold( + min_threshold: float, max_threshold: float + ): if min_threshold < 0: raise InvalidThresholdValue("min_threshold is lesser than 0") if min_threshold >= max_threshold: - raise InvalidThresholdValue("min_threshold is greater or equal to max_threshold") + raise InvalidThresholdValue( + "min_threshold is greater or equal to max_threshold" + ) if max_threshold > 100: raise InvalidThresholdValue("max_threshold is greater than 100") @staticmethod - def check_absence_of_duplications_threshold(min_threshold: float, max_threshold: float): + def check_absence_of_duplications_threshold( + min_threshold: float, max_threshold: float + ): if min_threshold != 0: raise InvalidThresholdValue("min_threshold is not equal to 0") if min_threshold >= max_threshold: - raise InvalidThresholdValue("min_threshold is greater or equal to max_threshold") + raise InvalidThresholdValue( + "min_threshold is greater or equal to max_threshold" + ) if max_threshold > 100: raise InvalidThresholdValue("max_threshold is greater than 100") @@ -45,7 +57,9 @@ def check_test_coverage_threshold(min_threshold, max_threshold): raise InvalidThresholdValue("min_threshold is lesser than 0") if min_threshold >= max_threshold: - raise InvalidThresholdValue("min_threshold is greater or equal to max_threshold") + raise InvalidThresholdValue( + "min_threshold is greater or equal to max_threshold" + ) if max_threshold != 100: raise InvalidThresholdValue("max_threshold is not equal to 100") @@ -56,7 +70,9 @@ def check_fast_test_builds_threshold(min_threshold: float, max_threshold: float) raise InvalidThresholdValue(("min_threshold is not equal to 0")) if min_threshold >= max_threshold: - raise InvalidThresholdValue("min_threshold is greater or equal to max_threshold") + raise InvalidThresholdValue( + "min_threshold is greater or equal to max_threshold" + ) @staticmethod def check_passed_tests_threshold(min_threshold: float, max_threshold: float): @@ -98,3 +114,19 @@ def check_aggregated_weighted_values(values, weights): raise ValuesAndWeightsOfDifferentSizes( "The length of weight and values are not equal", ) + + @staticmethod + def check_team_throughput_threshold(min_threshold: float, max_threshold: float): + if min_threshold != 45: + raise InvalidThresholdValue("min_threshold is not equal to 45") + + if max_threshold != 100: + raise InvalidThresholdValue("max_threshold is not equal to 100") + + @staticmethod + def check_ci_feedback_time_threshold(min_threshold: float, max_threshold: float): + if min_threshold != 1: + raise InvalidThresholdValue("min_threshold is not equal to 1") + + if max_threshold != 900: + raise InvalidThresholdValue("max_threshold is not equal to 900") diff --git a/tests/unit/test_analysis.py b/tests/unit/test_analysis.py index 9d30c99..f4af116 100644 --- a/tests/unit/test_analysis.py +++ b/tests/unit/test_analysis.py @@ -32,30 +32,40 @@ def test_calculate_measures_success(): measures_expected = CALCULATE_MEASURES_RESULT_DATA.get("measures") for measure_result, measure_expected in zip(measures_result, measures_expected): assert measure_result.get("key") == measure_expected.get("key") - assert pytest.approx(measure_result.get("value")) == measure_expected.get("value") + assert pytest.approx(measure_result.get("value")) == measure_expected.get( + "value" + ) @pytest.mark.parametrize( - "extracted_measure_data,error_msg", + "extracted_measures_data, error_msg", CALCULATE_MEASURES_ERROR_INFOS, ) -def test_calcula_measures_errors(extracted_measure_data, error_msg): +def test_calculate_measures_errors(extracted_measures_data, error_msg): with pytest.raises((ValidationError, MeasureKeyNotSupported)) as error: - calculate_measures(extracted_measures=extracted_measure_data) - assert str(error.value) == error_msg + calculate_measures(extracted_measures=extracted_measures_data) + assert error_msg in str(error.value) def test_calculate_subcharacteristics_sucess(): - calculation_result = calculate_subcharacteristics(extracted_subcharacteristics=EXTRACTED_SUBCHARACTERISTICS_DATA) + calculation_result = calculate_subcharacteristics( + extracted_subcharacteristics=EXTRACTED_SUBCHARACTERISTICS_DATA + ) assert "subcharacteristics" in calculation_result subcharacteristics_result = calculation_result.get("subcharacteristics") - subcharacteristics_expected = CALCULATE_SUBCHARACTERISTICS_RESULT_DATA.get("subcharacteristics") + subcharacteristics_expected = CALCULATE_SUBCHARACTERISTICS_RESULT_DATA.get( + "subcharacteristics" + ) for subcharacteristic_result, subcharacteristic_expected in zip( subcharacteristics_result, subcharacteristics_expected ): - assert subcharacteristic_result.get("key") == subcharacteristic_expected.get("key") - assert pytest.approx(subcharacteristic_result.get("value")) == subcharacteristic_expected.get("value") + assert subcharacteristic_result.get("key") == subcharacteristic_expected.get( + "key" + ) + assert pytest.approx( + subcharacteristic_result.get("value") + ) == subcharacteristic_expected.get("value") @pytest.mark.parametrize( @@ -64,19 +74,29 @@ def test_calculate_subcharacteristics_sucess(): ) def test_calcula_subcharacteristics_errors(extracted_subcharacteristcs_data, error_msg): with pytest.raises((ValidationError, MeasureKeyNotSupported)) as error: - calculate_subcharacteristics(extracted_subcharacteristics=extracted_subcharacteristcs_data) + calculate_subcharacteristics( + extracted_subcharacteristics=extracted_subcharacteristcs_data + ) assert str(error.value) == error_msg def test_calculate_characteristics_success(): - calculation_result = calculate_characteristics(extracted_characteristics=EXTRACTED_CHARACTERISTICS_DATA) + calculation_result = calculate_characteristics( + extracted_characteristics=EXTRACTED_CHARACTERISTICS_DATA + ) assert "characteristics" in calculation_result characteristics_result = calculation_result.get("characteristics") - characteristics_expected = CALCULATE_CHARACTERISTICS_RESULT_DATA.get("characteristics") - for characteristic_result, characteristic_expected in zip(characteristics_result, characteristics_expected): + characteristics_expected = CALCULATE_CHARACTERISTICS_RESULT_DATA.get( + "characteristics" + ) + for characteristic_result, characteristic_expected in zip( + characteristics_result, characteristics_expected + ): assert characteristic_result.get("key") == characteristic_expected.get("key") - assert pytest.approx(characteristic_result.get("value")) == characteristic_expected.get("value") + assert pytest.approx( + characteristic_result.get("value") + ) == characteristic_expected.get("value") @pytest.mark.parametrize( @@ -85,7 +105,9 @@ def test_calculate_characteristics_success(): ) def test_calcula_characteristics_errors(extracted_characteristics_data, error_msg): with pytest.raises((ValidationError, MeasureKeyNotSupported)) as error: - calculate_characteristics(extracted_characteristics=extracted_characteristics_data) + calculate_characteristics( + extracted_characteristics=extracted_characteristics_data + ) assert str(error.value) == error_msg diff --git a/tests/unit/test_schemas.py b/tests/unit/test_schemas.py new file mode 100644 index 0000000..06fe14e --- /dev/null +++ b/tests/unit/test_schemas.py @@ -0,0 +1,80 @@ +import pytest +from marshmallow.exceptions import ValidationError + +from core.schemas import ( + NonComplexFileDensitySchema, + CommentedFileDensitySchema, + DuplicationAbsenceSchema, + PassedTestsSchema, + TestBuildsSchema, + TestCoverageSchema, + CIFeedbackTimeSchema, + TeamThroughputSchema, +) + +from tests.utils.schemas_data import ( + NON_COMPLEX_FILES_DENSITY_METRICS_DATA, + COMMENTED_FILE_DENSITY_METRICS_DATA, + DUPLICATION_ABSENCE_METRICS_DATA, + PASSED_TESTS_METRICS_DATA, + TEST_BUILDS_METRICS_DATA, + TEST_COVERAGE_METRICS_DATA, + CI_FEEDBACK_TIME_METRICS_DATA, + TEAM_THROUGHPUT_METRICS_DATA, +) + + +def test_non_complex_file_density_schema_validation(): + try: + NonComplexFileDensitySchema().load(NON_COMPLEX_FILES_DENSITY_METRICS_DATA) + except ValidationError as e: + pytest.fail(f"Unexpected error: {e}") + + +def test_commented_file_density_schema_validation(): + try: + CommentedFileDensitySchema().load(COMMENTED_FILE_DENSITY_METRICS_DATA) + except ValidationError as e: + pytest.fail(f"Unexpected error: {e}") + + +def test_duplication_absence_schema_validation(): + try: + DuplicationAbsenceSchema().load(DUPLICATION_ABSENCE_METRICS_DATA) + except ValidationError as e: + pytest.fail(f"Unexpected error: {e}") + + +def test_passed_tests_schema_validation(): + try: + PassedTestsSchema().load(PASSED_TESTS_METRICS_DATA) + except ValidationError as e: + pytest.fail(f"Unexpected error: {e}") + + +def test_test_builds_schema_validation(): + try: + TestBuildsSchema().load(TEST_BUILDS_METRICS_DATA) + except ValidationError as e: + pytest.fail(f"Unexpected error: {e}") + + +def test_test_coverage_schema_validation(): + try: + TestCoverageSchema().load(TEST_COVERAGE_METRICS_DATA) + except ValidationError as e: + pytest.fail(f"Unexpected error: {e}") + + +def test_ci_feedback_time_schema_validation(): + try: + CIFeedbackTimeSchema().load(CI_FEEDBACK_TIME_METRICS_DATA) + except ValidationError as e: + pytest.fail(f"Unexpected error: {e}") + + +def test_team_throughput_schema_validation(): + try: + TeamThroughputSchema().load(TEAM_THROUGHPUT_METRICS_DATA) + except ValidationError as e: + pytest.fail(f"Unexpected error: {e}") diff --git a/tests/utils/aggregated_normalized_measures_data.py b/tests/utils/aggregated_normalized_measures_data.py index 5998fce..24c8931 100644 --- a/tests/utils/aggregated_normalized_measures_data.py +++ b/tests/utils/aggregated_normalized_measures_data.py @@ -1,11 +1,13 @@ -from src.core.aggregated_normalized_measures import ( +from core.aggregated_normalized_measures import ( absence_of_duplications, commented_files_density, fast_test_builds, non_complex_files_density, passed_tests, ) -from src.core.aggregated_normalized_measures import test_coverage as interpret_test_coverage +from core.aggregated_normalized_measures import ( + test_coverage as interpret_test_coverage, +) INVALID_METRICS_TEST_DATA = [ ( diff --git a/tests/utils/analysis_data.py b/tests/utils/analysis_data.py index 337b869..1811937 100644 --- a/tests/utils/analysis_data.py +++ b/tests/utils/analysis_data.py @@ -2,316 +2,235 @@ "measures": [ { "key": "passed_tests", - "parameters": { - "tests": [ - 3.0, - 1.0, - 6.0, - 3.0, - 17.0, - 2.0, - 2.0, - 7.0, - 1.0, - 1.0, - 1.0, - 3.0, - 2.0, - 3.0, - 23.0, - 2.0, - 3.0, - 4.0, - 7.0, - 2.0, - ], - "test_failures": 0.0, - "test_errors": 0.0, - }, + "metrics": [ + { + "key": "tests", + "value": [ + 3.0, + 1.0, + 6.0, + 3.0, + 17.0, + 2.0, + 2.0, + 7.0, + 1.0, + 1.0, + 1.0, + 3.0, + 2.0, + 3.0, + 23.0, + 2.0, + 3.0, + 4.0, + 7.0, + 2.0, + ], + }, + {"key": "test_failures", "value": [0.0]}, + {"key": "test_errors", "value": [0.0]}, + ], }, { "key": "test_builds", - "parameters": { - "test_execution_time": [ - 6.0, - 2.0, - 18.0, - 6.0, - 17.0, - 969.0, - 4.0, - 9.0, - 1.0, - 961.0, - 476.0, - 5.0, - 954.0, - 3.0, - 24.0, - 23.0, - 3.0, - 7.0, - 47.0, - 5.0, - ], - "tests": [ - 3.0, - 1.0, - 6.0, - 3.0, - 17.0, - 2.0, - 2.0, - 7.0, - 1.0, - 1.0, - 1.0, - 3.0, - 2.0, - 3.0, - 23.0, - 2.0, - 3.0, - 4.0, - 7.0, - 2.0, - ], - }, + "metrics": [ + { + "key": "test_execution_time", + "value": [ + 6.0, + 2.0, + 18.0, + 6.0, + 17.0, + 969.0, + 4.0, + 9.0, + 1.0, + 961.0, + 476.0, + 5.0, + 954.0, + 3.0, + 24.0, + 23.0, + 3.0, + 7.0, + 47.0, + 5.0, + ], + }, + { + "key": "tests", + "value": [ + 3.0, + 1.0, + 6.0, + 3.0, + 17.0, + 2.0, + 2.0, + 7.0, + 1.0, + 1.0, + 1.0, + 3.0, + 2.0, + 3.0, + 23.0, + 2.0, + 3.0, + 4.0, + 7.0, + 2.0, + ], + }, + ], }, { "key": "test_coverage", - "parameters": { - "coverage": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 44.4, - 23.5, - 100.0, - 100.0, - 50.0, - 77.8, - 100.0, - 100.0, - 91.4, - 100.0, - 43.8, - 0.0, - 55.6, - 26.7, - 100.0, - 100.0, - 100.0, - 0.0, - 100.0, - 64.7, - 86.6, - 0.0, - 61.9, - 91.8, - 94.4, - 82.5, - 13.3, - ] - }, + "metrics": [ + { + "key": "coverage", + "value": [ + 90.7, + 71.4, + 44.2, + 18.4, + 60.8, + 4.6, + 5.3, + 70.1, + 70.7, + 22.4, + 86.0, + 16.0, + 20.5, + 89.1, + 21.2, + 43.9, + 25.8, + 26.3, + 93.2, + 95.4, + ], + } + ], }, { "key": "non_complex_file_density", - "parameters": { - "functions": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 3.0, - 3.0, - 2.0, - 1.0, - 1.0, - 0.0, - 8.0, - 10.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 3.0, - 1.0, - 3.0, - 1.0, - 1.0, - 1.0, - 1.0, - 3.0, - 7.0, - 7.0, - 2.0, - 4.0, - 4.0, - 14.0, - 6.0, - 3.0, - ], - "complexity": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 4.0, - 4.0, - 6.0, - 1.0, - 2.0, - 0.0, - 14.0, - 23.0, - 1.0, - 6.0, - 2.0, - 2.0, - 1.0, - 11.0, - 5.0, - 15.0, - 1.0, - 4.0, - 5.0, - 1.0, - 13.0, - 13.0, - 19.0, - 3.0, - 7.0, - 11.0, - 51.0, - 19.0, - 10.0, - ], - }, + "metrics": [ + { + "key": "complexity", + "value": [ + 49.3, + 57.8, + 32.1, + 63.5, + 91.2, + 82.5, + 68.7, + 14.2, + 79.4, + 56.1, + 43.0, + 85.7, + 73.6, + 20.4, + 9.5, + 61.0, + 36.7, + 28.8, + 53.5, + 47.7, + ], + }, + { + "key": "functions", + "value": [ + 62.7, + 95.1, + 38.4, + 71.5, + 18.9, + 84.9, + 29.4, + 55.3, + 47.5, + 89.6, + 53.8, + 26.0, + 12.7, + 68.9, + 40.5, + 79.8, + 90.2, + 37.6, + 64.3, + 51.4, + ], + }, + ], }, { "key": "commented_file_density", - "parameters": { - "comment_lines_density": [ - 0.0, - 0.0, - 0.0, - 5.7, - 2.1, - 4.8, - 0.0, - 0.0, - 38.1, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 8.5, - 0.0, - 5.9, - 0.0, - 0.0, - 0.0, - 6.4, - 0.0, - 1.4, - 14.8, - 16.7, - 35.3, - 0.0, - 0.0, - 0.0, - 0.0, - ] - }, + "metrics": [ + { + "key": "comment_lines_density", + "value": [ + 31.2, + 73.4, + 88.5, + 42.1, + 59.7, + 14.6, + 67.2, + 25.8, + 96.1, + 50.9, + 85.2, + 39.7, + 18.3, + 71.0, + 57.6, + 49.0, + 23.9, + 62.5, + 37.4, + 81.8, + ], + } + ], }, { "key": "duplication_absense", - "parameters": { - "duplicated_lines_density": [ - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - ] - }, + "metrics": [ + { + "key": "duplicated_lines_density", + "value": [ + 77.2, + 44.9, + 59.6, + 33.8, + 20.7, + 92.5, + 68.3, + 51.0, + 86.4, + 37.6, + 64.9, + 12.8, + 58.0, + 43.5, + 75.1, + 28.1, + 95.0, + 80.7, + 15.9, + 70.5, + ], + } + ], }, ] } @@ -320,33 +239,43 @@ "measures": [ {"key": "passed_tests", "value": 1.0}, {"key": "test_builds", "value": 0.9995933399758454}, - {"key": "test_coverage", "value": 0.414921875}, - {"key": "non_complex_file_density", "value": 0.43738095238095254}, - {"key": "commented_file_density", "value": 0.04453125}, - {"key": "duplication_absense", "value": 1.0}, + {"key": "test_coverage", "value": 0.23425}, + {"key": "non_complex_file_density", "value": 0.8603745807930887}, + {"key": "commented_file_density", "value": 0.0935}, + {"key": "duplication_absense", "value": 0.0}, ] } - CALCULATE_MEASURES_ERROR_INFOS = [ ( {"measures": None}, "error: Failed to validate input.\nschema_errors: {'measures': ['Field may not be null.']}", ), ( - {"measures": [{"key": "inexistent", "parameters": {"inexistent_v2": [1]}}]}, + { + "measures": [ + { + "key": "inexistent", + "metrics": [ + {"key": "inexistent_v2", "value": [1]}, + ], + } + ] + }, "Measure inexistent is not supported", ), ( { "measures": [ { - "key": "duplication_absense", - "parameters": {"duplicated_lines_density": [None]}, + "key": "passed_tests", + "metrics": [ + {"key": "inexistent_v3", "value": [1]}, + ], } ] }, - "error: Metric parameters duplication_absense are not valid.\nschema_errors: {'duplicated_lines_density': {0: ['Field may not be null.']}}", # noqa E501 + "error: Metrics in passed_tests are not valid.", ), ] @@ -374,12 +303,14 @@ }, ] } + CALCULATE_SUBCHARACTERISTICS_RESULT_DATA = { "subcharacteristics": [ {"key": "testing_status", "value": 0.8507086078793112}, {"key": "modifiability", "value": 0.6642882099299446}, ] } + CALCULATE_SUBCHARACTERISTICS_ERROR_INFOS = [ ( {"subcharacteristics": None}, @@ -391,20 +322,26 @@ "characteristics": [ { "key": "reliability", - "subcharacteristics": [{"key": "testing_status", "value": 0.8507086078793112, "weight": 100}], + "subcharacteristics": [ + {"key": "testing_status", "value": 0.8507086078793112, "weight": 100} + ], }, { "key": "maintainability", - "subcharacteristics": [{"key": "modifiability", "value": 0.6642882099299446, "weight": 100}], + "subcharacteristics": [ + {"key": "modifiability", "value": 0.6642882099299446, "weight": 100} + ], }, ] } + CALCULATE_CHARACTERISTICS_RESULT_DATA = { "characteristics": [ {"key": "reliability", "value": 0.8507086078793112}, {"key": "maintainability", "value": 0.6642882099299446}, ] } + CALCULATE_CHARACTERISTICS_ERROR_INFOS = [ ( {"characteristics": None}, @@ -421,7 +358,9 @@ ], } } + CALCULATE_TSQMI_RESULT_DATA = {"tsqmi": [{"key": "tsqmi", "value": 0.7632116224782893}]} + CALCULATE_TSQMI_ERROR_INFOS = [ ( {"tsqmi": None}, diff --git a/tests/utils/integration_data.py b/tests/utils/integration_data.py index 9e3bde5..7c966be 100644 --- a/tests/utils/integration_data.py +++ b/tests/utils/integration_data.py @@ -197,12 +197,12 @@ "measure_key": "duplication_absense", }, { - "key": "number_of_resolved_issues_with_US_label_in_the_last_x_days", + "key": "resolved_issues", "value": 1.0, "measure_key": "team_throughput", }, { - "key": "total_number_of_issues_with_US_label_in_the_last_x_days", + "key": "total_issues", "value": 29.0, "measure_key": "team_throughput", }, diff --git a/tests/utils/schemas_data.py b/tests/utils/schemas_data.py new file mode 100644 index 0000000..7c8f2c9 --- /dev/null +++ b/tests/utils/schemas_data.py @@ -0,0 +1,53 @@ +NON_COMPLEX_FILES_DENSITY_METRICS_DATA = { + "metrics": [ + {"key": "complexity", "value": [0.7]}, + {"key": "functions", "value": [0.5]}, + ] +} + +COMMENTED_FILE_DENSITY_METRICS_DATA = { + "metrics": [ + {"key": "comment_lines_density", "value": [0.5]}, + ] +} + +DUPLICATION_ABSENCE_METRICS_DATA = { + "metrics": [ + {"key": "duplicated_lines_density", "value": [0.1]}, + ] +} + +PASSED_TESTS_METRICS_DATA = { + "metrics": [ + {"key": "tests", "value": [10.0]}, + {"key": "test_failures", "value": [1.0]}, + {"key": "test_errors", "value": [0.0]}, + ] +} + +TEST_BUILDS_METRICS_DATA = { + "metrics": [ + {"key": "test_execution_time", "value": [8.0]}, + {"key": "tests", "value": [10.0]}, + ] +} + +TEST_COVERAGE_METRICS_DATA = { + "metrics": [ + {"key": "coverage", "value": [0.75]}, + ] +} + +CI_FEEDBACK_TIME_METRICS_DATA = { + "metrics": [ + {"key": "sum_ci_feedback_times", "value": [15.0]}, + {"key": "total_builds", "value": [20.0]}, + ] +} + +TEAM_THROUGHPUT_METRICS_DATA = { + "metrics": [ + {"key": "resolved_issues", "value": [15.0]}, + {"key": "total_issues", "value": [20.0]}, + ] +}