From aa7cbe8f0ee6c301001d746801b5182df2b2d8fa Mon Sep 17 00:00:00 2001 From: yuxuan-ms Date: Mon, 2 Dec 2024 16:12:47 +0800 Subject: [PATCH 1/5] Change the default name function of parameters testcase. --- doc/newsfragments/3160_changed.case_name.rst | 1 + testplan/common/utils/reporting.py | 2 +- testplan/testing/multitest/parametrization.py | 41 ++++++++++++++++--- testplan/testing/multitest/suite.py | 8 ++-- .../testing/multitest/test_parametrization.py | 19 ++++----- 5 files changed, 51 insertions(+), 20 deletions(-) create mode 100755 doc/newsfragments/3160_changed.case_name.rst diff --git a/doc/newsfragments/3160_changed.case_name.rst b/doc/newsfragments/3160_changed.case_name.rst new file mode 100755 index 000000000..4d3d5e121 --- /dev/null +++ b/doc/newsfragments/3160_changed.case_name.rst @@ -0,0 +1 @@ +Limit the length of parameterization testcase name to 255 characters. If the name length exceeds 255 characters, index-suffixed names (e.g., ``{func_name} 1``, ``{func_name} 2``) will be used. \ No newline at end of file diff --git a/testplan/common/utils/reporting.py b/testplan/common/utils/reporting.py index 12dd069e3..6ae133564 100644 --- a/testplan/common/utils/reporting.py +++ b/testplan/common/utils/reporting.py @@ -26,7 +26,7 @@ def __new__(cls): cls.__instance = object.__new__(cls) return cls.__instance - def __str__(self): + def __repr__(self): return self.descr diff --git a/testplan/testing/multitest/parametrization.py b/testplan/testing/multitest/parametrization.py index 528e8de93..3337338f3 100644 --- a/testplan/testing/multitest/parametrization.py +++ b/testplan/testing/multitest/parametrization.py @@ -9,6 +9,7 @@ from testplan.common.utils import callable as callable_utils from testplan.common.utils import convert, interface from testplan.testing import tagging +from typing import Callable, Optional # Although any string will be processed as normal, it's a good # approach to warn the user if the generated method name is not a @@ -206,8 +207,11 @@ def _generated(self, env, result): ) # Users request the feature that when `name_func` set to `None`, # then simply append integer suffixes to the names of testcases - _generated.name = ( - name_func(name, kwargs) if name_func is not None else f"{name} {idx}" + _generated.name = _parametrization_report_name_func_wrapper( + name_func=name_func, + name=name, + kwargs=kwargs, + index=idx, ) if hasattr(function, "__xfail__"): @@ -267,10 +271,11 @@ def _check_tag_func(tag_func): ) -def _parametrization_name_func_wrapper(func_name, kwargs): +def _parametrization_name_func_wrapper(func_name: str, kwargs: dict): """ Make sure that name generation doesn't end up with invalid / unreadable - attribute names/types etc. + attribute names/types etc. The return value can be used as a + method __name__. If somehow a 'bad' function name is generated, will just return the original ``func_name`` instead (which will later on be suffixed with an @@ -291,6 +296,32 @@ def _parametrization_name_func_wrapper(func_name, kwargs): return generated_name +def _parametrization_report_name_func_wrapper( + name_func: Optional[Callable], name: str, kwargs: dict, index: int +): + """ + Make sure that generated name is not too long, + if it is, then use index suffixed names e.g. "{func_name} 1", "{func_name} 2", will be used. + + The return value is used for reporting purposes, it is not used as a method __name__. + """ + if name_func: + generated_name = name_func(name, kwargs) + if not isinstance(generated_name, str): + raise ValueError( + "The return value of name_func must be a string, " + f"it is of type: {type(generated_name)}, value: {generated_name}" + ) + if len(generated_name) <= MAX_METHOD_NAME_LENGTH: + return generated_name + else: + warnings.warn( + f"The name name_func returned ({generated_name}) is too long, using index suffixed names." + ) + + return f"{name} {index}" + + def parametrization_name_func(func_name, kwargs): """ Method name generator for parametrized testcases. @@ -321,7 +352,7 @@ def default_name_func(func_name, kwargs): >>> import collections >>> default_name_func('Test Method', collections.OrderedDict(('foo', 5), ('bar', 10))) - 'Test Method {foo:5, bar:10}' + 'Test Method ' :param func_name: Name of the parametrization target function. :type func_name: ``str`` diff --git a/testplan/testing/multitest/suite.py b/testplan/testing/multitest/suite.py index 9d03ee7c5..7f276cca1 100644 --- a/testplan/testing/multitest/suite.py +++ b/testplan/testing/multitest/suite.py @@ -580,10 +580,10 @@ def _validate_testcase(func): raise exc if len(func.name) > defaults.MAX_TEST_NAME_LENGTH: - warnings.warn( - 'Name defined for testcase "{}" is too long,' - ' consider customizing testcase name with argument "name_func"' - " in @testcase decorator.".format(func.__name__) + raise ValueError( + f'Name defined for testcase "{func.name}" is longer than {defaults.MAX_TEST_NAME_LENGTH},' + ' consider customizing testcase name with argument "name"' + " in @testcase decorator." ) diff --git a/tests/functional/testplan/testing/multitest/test_parametrization.py b/tests/functional/testplan/testing/multitest/test_parametrization.py index 4ce973faf..34c2b5e0c 100644 --- a/tests/functional/testplan/testing/multitest/test_parametrization.py +++ b/tests/functional/testplan/testing/multitest/test_parametrization.py @@ -1,11 +1,9 @@ import sys import logging -from contextlib import contextmanager from unittest import mock -from imp import reload +from importlib import reload import pytest - from testplan.defaults import MAX_TEST_NAME_LENGTH from testplan.testing.multitest import MultiTest, testsuite, testcase from testplan.testing.multitest.parametrization import ( @@ -24,8 +22,8 @@ LOGGER = logging.getLogger() -@contextmanager -def module_reloaded(mod): +@pytest.fixture() +def suite_reloaded(): """ If uncaught exception raised, Testplan process should abort. However, if the process is managed by PyTest for testing purpose, then the @@ -33,6 +31,7 @@ def module_reloaded(mod): modules still exist in memory, some global variables need to be reset. """ yield + mod = "testplan.testing.multitest.suite" if mod in sys.modules: reload(sys.modules[mod]) @@ -214,7 +213,7 @@ def test_sample(self, env, result, a, b): ), ), ) -def test_invalid_parametrization(val, msg): +def test_invalid_parametrization(suite_reloaded, val, msg): """Correct arguments should be passed to parametrized testcases.""" with pytest.raises(ParametrizationError): @@ -227,7 +226,7 @@ def sample_test(self, env, result, a, b, c=3): pytest.fail(msg) -def test_duplicate_parametrization_template_definition(): +def test_duplicate_parametrization_template_definition(suite_reloaded): """No duplicate name of testcase or parametrization template allowed.""" with pytest.raises(ValueError): @@ -305,8 +304,8 @@ def sample(self, env, result, test__val): ( ("a" * MAX_METHOD_NAME_LENGTH, "b" * MAX_METHOD_NAME_LENGTH), [ - "sample_test ".format("a" * MAX_METHOD_NAME_LENGTH), - "sample_test ".format("b" * MAX_METHOD_NAME_LENGTH), + "sample_test 0", + "sample_test 1", ], ["sample_test__0", "sample_test__1"], "Should use original method name + index fallback if" @@ -451,7 +450,7 @@ def sample_test(self, env, result, a, b): ), ), ) -def test_invalid_name_func(name_func, msg, err): +def test_invalid_name_func(suite_reloaded, name_func, msg, err): """Custom naming function should be correctly defined.""" with pytest.raises(err): From f626618d964f8126f80c69caf1fe9ddd84b12c35 Mon Sep 17 00:00:00 2001 From: Zhenyu Yao <111329301+zhenyu-ms@users.noreply.github.com> Date: Thu, 5 Dec 2024 18:26:09 +0800 Subject: [PATCH 2/5] Revert "Refactor/pick a faster json lib (#1152)" This reverts commit c3e5747cb4bcf633a35d6591a1b0503b64a89d1e. --- .../3147_changed.another_json_lib.rst | 1 - examples/ExecutionPools/Discover/test_plan.py | 3 +- pyproject.toml | 4 +- testplan/common/report/base.py | 98 ++++---- testplan/common/report/schemas.py | 24 +- testplan/common/utils/json.py | 43 ---- testplan/exporters/testing/http/__init__.py | 11 +- testplan/exporters/testing/json/base.py | 14 +- testplan/importers/testplan.py | 8 +- testplan/monitor/resource.py | 10 +- testplan/parser.py | 20 +- testplan/report/testing/base.py | 52 ++++- testplan/report/testing/schemas.py | 31 +-- testplan/runnable/interactive/http.py | 6 - testplan/testing/cpp/hobbestest.py | 4 +- testplan/testing/listing.py | 3 +- .../AssertionTypes/basicAssertionUtils.js | 66 ++++-- .../testplan/exporters/testing/test_json.py | 19 +- .../unit/testplan/common/report/test_base.py | 161 +------------ tests/unit/testplan/report/test_testing.py | 212 +++++++++++++++--- 20 files changed, 385 insertions(+), 405 deletions(-) delete mode 100644 doc/newsfragments/3147_changed.another_json_lib.rst delete mode 100644 testplan/common/utils/json.py diff --git a/doc/newsfragments/3147_changed.another_json_lib.rst b/doc/newsfragments/3147_changed.another_json_lib.rst deleted file mode 100644 index 5104d1c73..000000000 --- a/doc/newsfragments/3147_changed.another_json_lib.rst +++ /dev/null @@ -1 +0,0 @@ -Use a new JSON library ``orjson`` to improve performance when using Python 3.8 or later versions. \ No newline at end of file diff --git a/examples/ExecutionPools/Discover/test_plan.py b/examples/ExecutionPools/Discover/test_plan.py index 37ffaa3ac..ce9107883 100755 --- a/examples/ExecutionPools/Discover/test_plan.py +++ b/examples/ExecutionPools/Discover/test_plan.py @@ -45,7 +45,6 @@ def main(plan): if __name__ == "__main__": res = main() - if res.report.entries: - assert len(res.report.entries) == 5 + assert len(res.report.entries) == 5 print("Exiting code: {}".format(res.exit_code)) sys.exit(res.exit_code) diff --git a/pyproject.toml b/pyproject.toml index 5e179d852..f0c3295c0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -66,9 +66,7 @@ "typing_extensions", "dill", "gherkin-official==4.1.3", - "parse", - "orjson; python_version>='3.8'", - "flask-orjson; python_version>='3.8'" + "parse" ] requires-python = ">=3.7" diff --git a/testplan/common/report/base.py b/testplan/common/report/base.py index 132cd12aa..a50c3f5f5 100644 --- a/testplan/common/report/base.py +++ b/testplan/common/report/base.py @@ -263,8 +263,8 @@ def __init__( uid: Optional[str] = None, entries: Optional[list] = None, parent_uids: Optional[List[str]] = None, - status_override: Optional[Status] = None, - status_reason: Optional[str] = None, + status_override=None, + status_reason=None, ): self.name = name self.description = description @@ -433,56 +433,6 @@ def is_empty(self) -> bool: """ return len(self.entries) == len(self.logs) == 0 - @property - def passed(self) -> bool: - """Shortcut for getting if report status should be considered passed.""" - return self.status.normalised() == Status.PASSED - - @property - def failed(self) -> bool: - """ - Shortcut for checking if report status should be considered failed. - """ - return self.status <= Status.FAILED - - @property - def unstable(self) -> bool: - """ - Shortcut for checking if report status should be considered unstable. - """ - return self.status.normalised() == Status.UNSTABLE - - @property - def unknown(self) -> bool: - """ - Shortcut for checking if report status is unknown. - """ - return self.status.normalised() == Status.UNKNOWN - - @property - def status(self) -> Status: - """Return the report status.""" - if self.status_override: - return self.status_override - return self._status - - @status.setter - def status(self, new_status: Status): - self._status = new_status - - @property - def runtime_status(self) -> RuntimeStatus: - """ - Used for interactive mode, the runtime status of a testcase will be one - of ``RuntimeStatus``. - """ - return self._runtime_status - - @runtime_status.setter - def runtime_status(self, new_status: RuntimeStatus): - """Set the runtime status.""" - self._runtime_status = new_status - @property def hash(self): """Return a hash of all entries in this report.""" @@ -518,8 +468,34 @@ def __init__(self, name, **kwargs): for child in self.entries: self.set_parent_uids(child) - @Report.status.getter - def status(self) -> Status: + @property + def passed(self): + """Shortcut for getting if report status should be considered passed.""" + return self.status.normalised() == Status.PASSED + + @property + def failed(self): + """ + Shortcut for checking if report status should be considered failed. + """ + return self.status <= Status.FAILED + + @property + def unstable(self): + """ + Shortcut for checking if report status should be considered unstable. + """ + return self.status.normalised() == Status.UNSTABLE + + @property + def unknown(self): + """ + Shortcut for checking if report status is unknown. + """ + return self.status.normalised() == Status.UNKNOWN + + @property + def status(self): """ Status of the report, will be used to decide if a Testplan run has completed successfully or not. @@ -537,8 +513,12 @@ def status(self) -> Status: return self._status + @status.setter + def status(self, new_status): + self._status = new_status + @property - def runtime_status(self) -> RuntimeStatus: + def runtime_status(self): """ The runtime status is used for interactive running, and reports whether a particular entry is READY, WAITING, RUNNING, RESETTING, @@ -554,7 +534,7 @@ def runtime_status(self) -> RuntimeStatus: return self._runtime_status @runtime_status.setter - def runtime_status(self, new_status: RuntimeStatus): + def runtime_status(self, new_status): """Set the runtime_status of all child entries.""" for entry in self: if entry.category != ReportCategories.SYNTHESIZED: @@ -655,11 +635,11 @@ def remove_by_uid(self, uid): __delitem__ = remove_by_uid - def pre_order_iterate(self): + def pre_order_reports(self): yield self for e in self: if isinstance(e, BaseReportGroup): - yield from e.pre_order_iterate() + yield from e.pre_order_reports() elif isinstance(e, Report): yield e @@ -981,7 +961,7 @@ def set_runtime_status_filtered( ) -> None: """ Alternative setter for the runtime status of an entry. Propagates only - to the specified entries. + to the specified entries. :param new_status: new runtime status to be set :param entries: tree-like structure of entries names diff --git a/testplan/common/report/schemas.py b/testplan/common/report/schemas.py index 2e10db426..8f96657b0 100644 --- a/testplan/common/report/schemas.py +++ b/testplan/common/report/schemas.py @@ -4,16 +4,12 @@ from marshmallow import Schema, fields, post_load from marshmallow.utils import EXCLUDE -from testplan.common.report.base import ( - BaseReportGroup, - Report, - RuntimeStatus, - Status, -) from testplan.common.serialization import fields as custom_fields from testplan.common.serialization import schemas from testplan.common.utils import timing +from .base import Report, BaseReportGroup, Status, RuntimeStatus + __all__ = ["ReportLogSchema", "ReportSchema", "BaseReportGroupSchema"] # pylint: disable=unused-argument @@ -95,14 +91,6 @@ class Meta: allow_none=True, ) status_reason = fields.String(allow_none=True) - status = fields.Function( - lambda x: x.status.to_json_compatible(), - Status.from_json_compatible, - ) - runtime_status = fields.Function( - lambda x: x.runtime_status.to_json_compatible(), - RuntimeStatus.from_json_compatible, - ) logs = fields.Nested(ReportLogSchema, many=True) hash = fields.Integer(dump_only=True) parent_uids = fields.List(fields.String()) @@ -139,6 +127,14 @@ class BaseReportGroupSchema(ReportSchema): }, many=True, ) + status = fields.Function( + lambda x: x.status.to_json_compatible(), + Status.from_json_compatible, + ) + runtime_status = fields.Function( + lambda x: x.runtime_status.to_json_compatible(), + RuntimeStatus.from_json_compatible, + ) counter = fields.Dict(dump_only=True) children = fields.List(fields.Nested(ReportLinkSchema)) diff --git a/testplan/common/utils/json.py b/testplan/common/utils/json.py deleted file mode 100644 index f18df7c96..000000000 --- a/testplan/common/utils/json.py +++ /dev/null @@ -1,43 +0,0 @@ -import json -from pathlib import Path -from typing import Union - -_USE_ORJSON = False - -try: - import orjson -except ImportError: - pass -else: - _USE_ORJSON = True - - -def json_loads(data: str): - if _USE_ORJSON: - return orjson.loads(data) - else: - return json.loads(data) - - -def json_dumps(data, indent_2=False, default=None) -> str: - if _USE_ORJSON: - return orjson.dumps( - data, - default=default, - option=orjson.OPT_INDENT_2 if indent_2 else 0, - ).decode() - else: - if default: - - class _E(json.JSONEncoder): - def default(self, o): - return default(o) - - else: - _E = None - return json.dumps(data, cls=_E, indent=2 if indent_2 else None) - - -def json_load_from_path(path: Union[str, Path]) -> dict: - with open(path) as fp: - return json_loads(fp.read()) diff --git a/testplan/exporters/testing/http/__init__.py b/testplan/exporters/testing/http/__init__.py index 299d688fd..7a3daa7e2 100644 --- a/testplan/exporters/testing/http/__init__.py +++ b/testplan/exporters/testing/http/__init__.py @@ -3,6 +3,7 @@ must be able to handle POST request and receive data in JSON format. """ +import json from typing import Any, Tuple, Union, Optional, Dict import requests @@ -14,13 +15,19 @@ ExportContext, verify_export_context, ) -from testplan.common.utils.json import json_dumps from testplan.common.utils.validation import is_valid_url from testplan.report import TestReport from testplan.report.testing.schemas import TestReportSchema from ..base import Exporter +class CustomJsonEncoder(json.JSONEncoder): + """To jsonify data that cannot be serialized by default JSONEncoder.""" + + def default(self, obj: Any) -> str: # pylint: disable = method-hidden + return str(obj) + + class HTTPExporterConfig(ExporterConfig): """ Configuration object for @@ -76,7 +83,7 @@ def _upload_report( response = requests.post( url=url, headers=headers, - data=json_dumps(data, default=str), + data=json.dumps(data, cls=CustomJsonEncoder), timeout=self.cfg.timeout, ) response.raise_for_status() diff --git a/testplan/exporters/testing/json/base.py b/testplan/exporters/testing/json/base.py index 308243038..54a2ce6c9 100644 --- a/testplan/exporters/testing/json/base.py +++ b/testplan/exporters/testing/json/base.py @@ -4,6 +4,7 @@ """ import hashlib +import json import os import pathlib @@ -16,7 +17,6 @@ ExportContext, verify_export_context, ) -from testplan.common.utils.json import json_dumps, json_loads from testplan.common.utils.path import makedirs from testplan.defaults import ATTACHMENTS, RESOURCE_DATA from testplan.report.testing.base import TestReport, TestCaseReport @@ -60,7 +60,7 @@ def save_resource_data( ) -> pathlib.Path: directory.mkdir(parents=True, exist_ok=True) with open(report.resource_meta_path) as meta_file: - meta_info = json_loads(meta_file.read()) + meta_info = json.load(meta_file) for host_meta in meta_info["entries"]: if "resource_file" in host_meta: dist_path = ( @@ -70,7 +70,7 @@ def save_resource_data( host_meta["resource_file"] = dist_path.name meta_path = directory / pathlib.Path(report.resource_meta_path).name with open(meta_path, "w") as meta_file: - meta_file.write(json_dumps(meta_info)) + json.dump(meta_info, meta_file) return meta_path @@ -172,9 +172,9 @@ def export( attachments_dir.mkdir(parents=True, exist_ok=True) with open(structure_filepath, "w") as json_file: - json_file.write(json_dumps(structure)) + json.dump(structure, json_file) with open(assertions_filepath, "w") as json_file: - json_file.write(json_dumps(assertions)) + json.dump(assertions, json_file) meta["attachments"] = save_attachments( report=source, directory=attachments_dir @@ -190,7 +190,7 @@ def export( meta["assertions_file"] = assertions_filename with open(json_path, "w") as json_file: - json_file.write(json_dumps(meta)) + json.dump(meta, json_file) else: data["attachments"] = save_attachments( report=source, directory=attachments_dir @@ -198,7 +198,7 @@ def export( data["version"] = 1 with open(json_path, "w") as json_file: - json_file.write(json_dumps(data)) + json.dump(data, json_file) self.logger.user_info("JSON generated at %s", json_path) result = {"json": self.cfg.json_path} diff --git a/testplan/importers/testplan.py b/testplan/importers/testplan.py index 1acaa9743..644fe93f7 100644 --- a/testplan/importers/testplan.py +++ b/testplan/importers/testplan.py @@ -1,11 +1,11 @@ """ Implements one-phase importer for Testplan JSON format. """ +import json from typing import List -from testplan.common.utils.json import json_loads -from testplan.importers import ImportedResult, ResultImporter -from testplan.report import ReportCategories, TestGroupReport, TestReport +from testplan.importers import ResultImporter, ImportedResult +from testplan.report import TestGroupReport, TestReport, ReportCategories from testplan.report.testing.schemas import TestReportSchema @@ -41,7 +41,7 @@ def __init__(self, path: str): def import_result(self) -> ImportedResult: """ """ with open(self.path) as fp: - result_json = json_loads(fp.read()) + result_json = json.load(fp) result = self.schema.load(result_json) return TestplanImportedResult(result) diff --git a/testplan/monitor/resource.py b/testplan/monitor/resource.py index 27715e659..32e9866f9 100755 --- a/testplan/monitor/resource.py +++ b/testplan/monitor/resource.py @@ -1,6 +1,7 @@ import os import csv import time +import json import socket import pathlib import asyncio @@ -15,7 +16,6 @@ from typing import Dict, Optional, Union, TextIO, NamedTuple from testplan.defaults import RESOURCE_META_FILE_NAME from testplan.common.utils.path import pwd -from testplan.common.utils.json import json_dumps, json_loads from testplan.common.utils.strings import slugify from testplan.common.utils.logger import LOGFILE_FORMAT from testplan.common.utils.timing import wait @@ -345,7 +345,7 @@ async def handle_request(self, msg: bytes): with open( self.file_directory / f"{slugify(client_id)}.meta", "w" ) as f: - f.write(json_dumps(message.data)) + json.dump(message.data, f) elif message.cmd == communication.Message.Message: self.logger.info("Received resource data from %s.", client_id) if client_id not in self._file_handler: @@ -463,7 +463,7 @@ def normalize_data(self, client_id: str) -> Optional[dict]: ) json_file_path = self.file_directory / f"{slugify(client_id)}.json" with open(json_file_path, "w") as json_file: - json_file.write(json_dumps(resource_data)) + json.dump(resource_data, json_file) return { "resource_file": str(json_file_path.resolve()), "max_cpu": max(resource_data["cpu"]), @@ -478,7 +478,7 @@ def dump(self) -> str: resource_info = [] for host_meta_path in self.file_directory.glob("*.meta"): with open(host_meta_path) as meta_file: - meta = json_loads(meta_file.read()) + meta = json.load(meta_file) summary_data = self.normalize_data(meta["uid"]) if summary_data: meta.update(summary_data) @@ -488,7 +488,7 @@ def dump(self) -> str: resource_info.append(meta) meta_file_path = self.file_directory / RESOURCE_META_FILE_NAME with open(meta_file_path, "w") as meta_file: - meta_file.write(json_dumps({"entries": resource_info})) + json.dump({"entries": resource_info}, meta_file) return str(meta_file_path.resolve()) def start(self, timeout=5): diff --git a/testplan/parser.py b/testplan/parser.py index 922a85494..a8e927245 100644 --- a/testplan/parser.py +++ b/testplan/parser.py @@ -4,6 +4,7 @@ """ import argparse import copy +import json import sys import warnings from typing import Dict, List @@ -12,7 +13,6 @@ from testplan import defaults from testplan.common.utils import logger -from testplan.common.utils.json import json_load_from_path from testplan.report.testing import ( ReportFilterAction, ReportTagsAction, @@ -137,7 +137,7 @@ def generate_parser(self) -> HelpParser: general_group.add_argument( "--trace-tests", metavar="PATH", - type=json_load_from_path, + type=_read_json_file, dest="tracing_tests", help="Enable the tracing tests feature. A JSON file containing " "file names and line numbers to be watched by the tracer must be " @@ -158,7 +158,7 @@ def generate_parser(self) -> HelpParser: general_group.add_argument( "--xfail-tests", metavar="PATH", - type=json_load_from_path, + type=_read_json_file, help=""" Read a list of testcase name patterns from a JSON files, and mark matching testcases as xfail. This feature works for MultiTest, GTest and CPPUnit. @@ -580,6 +580,11 @@ def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, self.LEVELS[values]) +def _read_json_file(file: str) -> dict: + with open(file, "r") as fp: + return json.load(fp) + + def _read_text_file(file: str) -> List[str]: with open(file, "r") as fp: return fp.read().splitlines() @@ -597,7 +602,8 @@ def _read_text_file(file: str) -> List[str]: def _runtime_json_file(file: str) -> dict: - runtime_info = json_load_from_path(file) - if runtime_schema.is_valid(runtime_info): - return runtime_info - raise RuntimeError("Unexpected runtime file format!") + with open(file) as fp: + runtime_info = json.load(fp) + if runtime_schema.is_valid(runtime_info): + return runtime_info + raise RuntimeError("Unexpected runtime file format!") diff --git a/testplan/report/testing/base.py b/testplan/report/testing/base.py index 6319261d5..3ac4521a1 100644 --- a/testplan/report/testing/base.py +++ b/testplan/report/testing/base.py @@ -50,12 +50,12 @@ from typing_extensions import Self from testplan.common.report import ( - BaseReportGroup, + Status, + RuntimeStatus, + ReportCategories, ExceptionLogger, Report, - ReportCategories, - RuntimeStatus, - Status, + BaseReportGroup, ) from testplan.testing import tagging from testplan.testing.common import TEST_PART_PATTERN_FORMAT_STRING @@ -461,7 +461,7 @@ def annotate_part_num(self): _wrap = lambda s: TEST_PART_PATTERN_FORMAT_STRING.format( s, self.part[0], self.part[1] ) - for e in self.pre_order_iterate(): + for e in self.pre_order_reports(): if ( isinstance(e, TestCaseReport) and e.category == ReportCategories.SYNTHESIZED @@ -503,7 +503,33 @@ def _get_comparison_attrs(self): "tags_index", ] - @Report.status.getter + @property + def passed(self) -> bool: + """Shortcut for getting if report status should be considered passed.""" + return self.status.normalised() == Status.PASSED + + @property + def failed(self) -> bool: + """ + Shortcut for checking if report status should be considered failed. + """ + return self.status <= Status.FAILED + + @property + def unstable(self) -> bool: + """ + Shortcut for checking if report status should be considered unstable. + """ + return self.status.normalised() == Status.UNSTABLE + + @property + def unknown(self) -> bool: + """ + Shortcut for checking if report status is unknown. + """ + return self.status.normalised() == Status.UNKNOWN + + @property def status(self) -> Status: """ Entries in this context correspond to serialized (raw) @@ -519,7 +545,19 @@ def status(self) -> Status: return self._status - @Report.runtime_status.setter + @status.setter + def status(self, new_status): + self._status = new_status + + @property + def runtime_status(self): + """ + Used for interactive mode, the runtime status of a testcase may be one + of ``RuntimeStatus``. + """ + return self._runtime_status + + @runtime_status.setter def runtime_status(self, new_status): """ Set the runtime status. As a special case, when a testcase is re-run diff --git a/testplan/report/testing/schemas.py b/testplan/report/testing/schemas.py index 134456ebc..b37c74dbd 100644 --- a/testplan/report/testing/schemas.py +++ b/testplan/report/testing/schemas.py @@ -1,22 +1,23 @@ """Schema classes for test Reports.""" import functools -import math +import json from boltons.iterutils import is_scalar, remap from marshmallow import Schema, fields, post_load from marshmallow.utils import EXCLUDE from testplan.common.report.schemas import ( + ReportSchema, BaseReportGroupSchema, - ReportLinkSchema, ReportLogSchema, - ReportSchema, TimerField, + ReportLinkSchema, ) + +from testplan.common.report import Status, RuntimeStatus from testplan.common.serialization import fields as custom_fields from testplan.common.serialization.schemas import load_tree_data -from testplan.common.utils.json import json_dumps from testplan.report.testing.base import ( TestCaseReport, TestGroupReport, @@ -51,7 +52,7 @@ class EntriesField(fields.Field): @staticmethod def _json_serializable(v): try: - json_dumps(v) + json.dumps(v, ensure_ascii=True) except (UnicodeDecodeError, TypeError): return False else: @@ -67,16 +68,8 @@ def visit(parent, key, _value): False - remove the node tuple - update the node data. """ - if is_scalar(_value): - if isinstance(_value, float): - if math.isnan(_value): - return key, "NaN" - elif math.isinf(_value): - if _value > 0: - return key, "Infinity" - return key, "-Infinity" - elif not self._json_serializable(_value): - return key, str(_value) + if is_scalar(_value) and not self._json_serializable(_value): + return key, str(_value) return True return remap(value, visit=visit) @@ -89,6 +82,14 @@ class TestCaseReportSchema(ReportSchema): entries = fields.List(EntriesField()) category = fields.String(dump_only=True) + status = fields.Function( + lambda x: x.status.to_json_compatible(), + Status.from_json_compatible, + ) + runtime_status = fields.Function( + lambda x: x.runtime_status.to_json_compatible(), + RuntimeStatus.from_json_compatible, + ) counter = fields.Dict(dump_only=True) tags = TagField() diff --git a/testplan/runnable/interactive/http.py b/testplan/runnable/interactive/http.py index 0cacec057..5a8d62ca4 100644 --- a/testplan/runnable/interactive/http.py +++ b/testplan/runnable/interactive/http.py @@ -57,12 +57,6 @@ def generate_interactive_api(ihandler): api_blueprint = flask.Blueprint("api", "testplan") api = flask_restx.Api(api_blueprint) app = flask.Flask("testplan", static_folder=static_dir) - try: - from flask_orjson import OrjsonProvider - except ImportError: - pass - else: - app.json = OrjsonProvider(app) app.register_blueprint(api_blueprint, url_prefix=api_prefix) post_export_model = api.model( diff --git a/testplan/testing/cpp/hobbestest.py b/testplan/testing/cpp/hobbestest.py index cadd5edba..e3d307a36 100644 --- a/testplan/testing/cpp/hobbestest.py +++ b/testplan/testing/cpp/hobbestest.py @@ -1,9 +1,9 @@ +import json import os from schema import Or from testplan.common.config import ConfigOption -from testplan.common.utils.json import json_loads from testplan.report import ( ReportCategories, RuntimeStatus, @@ -94,7 +94,7 @@ def read_test_data(self): :rtype: ``dict`` ot ``list`` """ with open(self.report_path) as report_file: - return json_loads(report_file.read()) + return json.load(report_file) def process_test_data(self, test_data): """ diff --git a/testplan/testing/listing.py b/testplan/testing/listing.py index 2a9774f37..1ddf93b8a 100644 --- a/testplan/testing/listing.py +++ b/testplan/testing/listing.py @@ -10,7 +10,6 @@ from typing import TYPE_CHECKING, List, Tuple, Union from urllib.parse import urlparse -from testplan.common.utils.json import json_dumps from testplan.common.utils.logger import TESTPLAN_LOGGER from testplan.common.utils.parser import ArgMixin from testplan.testing import tagging @@ -308,7 +307,7 @@ class SimpleJsonLister(MetadataBasedLister): ) def get_output(self, metadata: TestPlanMetadata): - return json_dumps(dataclasses.asdict(metadata), indent_2=True) + return json.dumps(dataclasses.asdict(metadata), indent=2) class ListingRegistry: diff --git a/testplan/web_ui/testing/src/AssertionPane/AssertionTypes/basicAssertionUtils.js b/testplan/web_ui/testing/src/AssertionPane/AssertionTypes/basicAssertionUtils.js index f713ddb2c..860d53ee1 100644 --- a/testplan/web_ui/testing/src/AssertionPane/AssertionTypes/basicAssertionUtils.js +++ b/testplan/web_ui/testing/src/AssertionPane/AssertionTypes/basicAssertionUtils.js @@ -35,18 +35,27 @@ function prepareLogContent(assertion, defaultContent) { let decodedMsg = null; if (assertion.message !== undefined) { - decodedMsg = ( - /^https?:\/\//.test(value), - }, - }} - > - {assertion.message} - - ); + let bytearray; + if ( + typeof assertion.message === "object" && + typeof (bytearray = assertion.message["_BYTES_KEY"]) !== "undefined" && + Array.isArray(bytearray) + ) { + decodedMsg = bytearray.length ? String.fromCodePoint(...bytearray) : ""; + } else { + decodedMsg = ( + /^https?:\/\//.test(value), + }, + }} + > + {assertion.message} + + ); + } } const preContent =
{decodedMsg}
; @@ -237,18 +246,27 @@ function prepareFailContent(assertion, defaultContent) { let decodedMsg = null; if (assertion.message !== undefined) { - decodedMsg = ( - /^https?:\/\//.test(value), - }, - }} - > - {assertion.message} - - ); + let bytearray; + if ( + typeof assertion.message === "object" && + typeof (bytearray = assertion.message["_BYTES_KEY"]) !== "undefined" && + Array.isArray(bytearray) + ) { + decodedMsg = bytearray.length ? String.fromCodePoint(...bytearray) : ""; + } else { + decodedMsg = ( + /^https?:\/\//.test(value), + }, + }} + > + {assertion.message} + + ); + } } const preContent =
{decodedMsg}
; diff --git a/tests/functional/testplan/exporters/testing/test_json.py b/tests/functional/testplan/exporters/testing/test_json.py index f722fe9e2..444e59e2c 100644 --- a/tests/functional/testplan/exporters/testing/test_json.py +++ b/tests/functional/testplan/exporters/testing/test_json.py @@ -1,7 +1,6 @@ """Test the JSON exporter.""" import copy import json -import math import os import pathlib import tempfile @@ -34,11 +33,6 @@ def test_attach(self, env, result): result.attach(tmpfile.name) - @multitest.testcase - def test_special_values(self, env, result): - result.ne(float("nan"), float("nan")) - result.lt(float("-inf"), float("inf")) - @multitest.testsuite class Beta: @@ -184,8 +178,8 @@ def test_json_exporter_generating_split_report(runpath): assert len(structure[0]["entries"]) == 1 # one suite in 1st multitest assert structure[0]["entries"][0]["name"] == "Alpha" # 1st suite name assert ( - len(structure[0]["entries"][0]["entries"]) == 5 - ) # 4 testcases, 1 synthesized + len(structure[0]["entries"][0]["entries"]) == 4 + ) # 3 testcases, 1 synthesized assert ( len(structure[0]["entries"][0]["entries"][2]["entries"]) == 3 ) # 3 parametrized testcases @@ -200,7 +194,7 @@ def test_json_exporter_generating_split_report(runpath): assert structure[1]["entries"][1]["name"] == "Beta" # 1st suite name assert len(structure[1]["entries"][1]["entries"]) == 2 # 2 testcases - assert len(assertions) == 10 # 10 cases in total + assert len(assertions) == 9 # 9 assertions in total # only one assertion in each testcase in suite `Alpha` assert assertions["test_comparison"][0]["type"] == "Equal" assert assertions["test_membership__arg_1"][0]["type"] == "Contain" @@ -216,13 +210,6 @@ def test_json_exporter_generating_split_report(runpath): assert assertions["setup"][0]["type"] == "Log" assert assertions["After Start"][0]["type"] == "Log" - # special values representation preserved - # NOTE: these values are of type float in old impl, - # NOTE: converted to js repr in cope with json lib change - assert assertions["test_special_values"][0]["first"] == "NaN" - assert assertions["test_special_values"][1]["first"] == "-Infinity" - assert assertions["test_special_values"][1]["second"] == "Infinity" - def test_implicit_exporter_initialization(runpath): """ diff --git a/tests/unit/testplan/common/report/test_base.py b/tests/unit/testplan/common/report/test_base.py index 79040c37e..6b0213ad9 100644 --- a/tests/unit/testplan/common/report/test_base.py +++ b/tests/unit/testplan/common/report/test_base.py @@ -10,8 +10,6 @@ MergeError, Report, ReportCategories, - RuntimeStatus, - Status, ) from testplan.common.report.log import LOGGER from testplan.common.utils.testing import disable_log_propagation @@ -20,48 +18,6 @@ DummyReportGroup = functools.partial(BaseReportGroup, name="dummy") -def test_report_status_basic_op(): - assert Status.ERROR <= Status.ERROR - assert Status.FAILED > Status.ERROR - assert Status.INCOMPLETE < Status.FAILED - with pytest.raises(TypeError): - Status.INCOMPLETE < Status.XPASS_STRICT - with pytest.raises(TypeError): - Status.XFAIL >= Status.SKIPPED - assert Status.XFAIL != Status.XPASS - assert Status.XFAIL is not Status.XPASS - assert Status.UNKNOWN < Status.NONE - assert not Status.NONE - - assert Status.XPASS_STRICT.normalised() is Status.FAILED - assert Status.PASSED.normalised() is Status.PASSED - - assert not Status.INCOMPLETE.precede(Status.XPASS_STRICT) - assert Status.INCOMPLETE.precede(Status.FAILED) - - -def test_report_status_precedent(): - """ - `precedent` should return the value with the - highest precedence (the lowest index). - """ - - assert Status.FAILED == Status.precedent([Status.FAILED, Status.UNKNOWN]) - assert Status.ERROR == Status.precedent([Status.ERROR, Status.UNKNOWN]) - assert Status.INCOMPLETE == Status.precedent( - [Status.INCOMPLETE, Status.UNKNOWN] - ) - assert Status.XPASS_STRICT == Status.precedent( - [Status.XPASS_STRICT, Status.UNKNOWN] - ) - assert Status.UNKNOWN == Status.precedent([Status.UNKNOWN, Status.PASSED]) - assert Status.PASSED == Status.precedent([Status.PASSED, Status.SKIPPED]) - assert Status.PASSED == Status.precedent([Status.PASSED, Status.XFAIL]) - assert Status.PASSED == Status.precedent([Status.PASSED, Status.XPASS]) - assert Status.PASSED == Status.precedent([Status.PASSED, Status.UNSTABLE]) - assert Status.UNSTABLE == Status.precedent([Status.UNSTABLE, Status.NONE]) - - @disable_log_propagation(LOGGER) def test_exception_logger_suppression(): """ExceptionLoggerBase should suppress and log given exceptions.""" @@ -188,13 +144,7 @@ def test_filter(self): assert rep_copy_2.entries == [["bar", "baz"], {"hello": "world"}] -class DummyStatusReport: - def __init__(self, status, uid=None): - self.uid = uid or 0 - self.status = status - - -class TestBaseReportGroup: +class TestReportGroup: def test_build_index(self): """ Should set `_index` attribute with child @@ -428,7 +378,7 @@ def test_graft_round_trip(self): grand_parent.append(parent) parent.append(child) - refs = list(grand_parent.pre_order_iterate()) + refs = list(grand_parent.pre_order_reports()) parts = list(grand_parent.pre_order_disassemble()) # disassembled in place @@ -446,114 +396,7 @@ def test_graft_round_trip(self): assert "dummy" in grand_parent assert "dummy" in parent - @pytest.mark.parametrize( - "statuses,expected", - ( - ([Status.ERROR, Status.FAILED, Status.PASSED], Status.ERROR), - ([Status.FAILED, Status.PASSED], Status.FAILED), - ( - [Status.INCOMPLETE, Status.PASSED, Status.SKIPPED], - Status.INCOMPLETE, - ), - ([Status.SKIPPED, Status.PASSED], Status.PASSED), - ([Status.INCOMPLETE, Status.FAILED], Status.INCOMPLETE), - ), - ) - def test_status(self, statuses, expected): - """Should return the precedent status from children.""" - - reports = [ - DummyStatusReport(uid=idx, status=status) - for idx, status in enumerate(statuses) - ] - group = DummyReportGroup(entries=reports) - assert group.status == expected - - def test_status_no_entries(self): - """ - Should return Status.UNKNOWN when `status_override` - is None and report has no entries. - """ - group = DummyReportGroup() - - assert group.status_override is Status.NONE - assert group.status == Status.UNKNOWN - - def test_status_override(self): - """ - `status_override` of a group should take - precedence over child statuses. - """ - group = DummyReportGroup( - entries=[DummyStatusReport(status=Status.FAILED)] - ) - - assert group.status == Status.FAILED - - group.status_override = Status.PASSED - - assert group.status == Status.PASSED - - def test_merge(self): - """ - Should merge children and set `status_override` - using `report.status_override` precedence. - """ - report_orig = DummyReportGroup(uid=1) - report_clone = DummyReportGroup(uid=1) - - assert report_orig.status_override is Status.NONE - - report_clone.status_override = Status.PASSED - - with mock.patch.object(report_orig, "merge_entries"): - report_orig.merge(report_clone) - report_orig.merge_entries.assert_called_once_with( - report_clone, strict=True - ) - assert report_orig.status_override == report_clone.status_override - - def test_hash(self): - """ - Test that a hash is generated for report groups, which depends on the - entries they contain. - """ - grand_parent = DummyReportGroup() - parent = DummyReportGroup() - child = Report(name="testcase") - - orig_root_hash = grand_parent.hash - - grand_parent.append(parent) - updated_root_hash = grand_parent.hash - assert updated_root_hash != orig_root_hash - - parent.append(child) - - orig_root_hash = updated_root_hash - updated_root_hash = grand_parent.hash - assert updated_root_hash != orig_root_hash - - child.append({"name": "entry", "passed": True}) - - orig_root_hash = updated_root_hash - updated_root_hash = grand_parent.hash - assert updated_root_hash != orig_root_hash - def test_report_categories_type(): assert ReportCategories.MULTITEST == "multitest" assert type(ReportCategories.MULTITEST) is str - - -def test_runtime_status_basic_op(): - assert RuntimeStatus.WAITING < RuntimeStatus.READY - assert RuntimeStatus.RESETTING >= RuntimeStatus.RUNNING - assert RuntimeStatus.RUNNING.precede(RuntimeStatus.FINISHED) - assert RuntimeStatus.NOT_RUN < RuntimeStatus.NONE - assert not RuntimeStatus.NONE - - assert RuntimeStatus.NOT_RUN.to_json_compatible() == "not_run" - assert ( - RuntimeStatus.from_json_compatible("not_run") == RuntimeStatus.NOT_RUN - ) diff --git a/tests/unit/testplan/report/test_testing.py b/tests/unit/testplan/report/test_testing.py index f82cd543f..2680d32c5 100644 --- a/tests/unit/testplan/report/test_testing.py +++ b/tests/unit/testplan/report/test_testing.py @@ -1,19 +1,22 @@ +# TODO: move certain tests to tests/unit/testplan/common/report/test_base.py + import functools import json from collections import OrderedDict +from unittest import mock import pytest from boltons.iterutils import get_path from testplan.common import entity +from testplan.common.report.log import LOGGER as report_logger +from testplan.common.utils.testing import check_report, disable_log_propagation from testplan.common.report import ( - ReportCategories, - RuntimeStatus, Status, + RuntimeStatus, + ReportCategories, + BaseReportGroup, ) -from testplan.common.report.log import LOGGER as report_logger -from testplan.common.utils.json import json_dumps, json_loads -from testplan.common.utils.testing import check_report, disable_log_propagation from testplan.report.testing.base import ( TestCaseReport, TestGroupReport, @@ -22,8 +25,50 @@ from testplan.report.testing.schemas import TestReportSchema from testplan.testing.result import Result -DummyCaseReport = functools.partial(TestCaseReport, name="dummy") -DummyGroupReport = functools.partial(TestGroupReport, name="dummy") +DummyReport = functools.partial(TestCaseReport, name="dummy") +DummyReportGroup = functools.partial(BaseReportGroup, name="dummy") + + +def test_report_status_basic_op(): + assert Status.ERROR <= Status.ERROR + assert Status.FAILED > Status.ERROR + assert Status.INCOMPLETE < Status.FAILED + with pytest.raises(TypeError): + Status.INCOMPLETE < Status.XPASS_STRICT + with pytest.raises(TypeError): + Status.XFAIL >= Status.SKIPPED + assert Status.XFAIL != Status.XPASS + assert Status.XFAIL is not Status.XPASS + assert Status.UNKNOWN < Status.NONE + assert not Status.NONE + + assert Status.XPASS_STRICT.normalised() is Status.FAILED + assert Status.PASSED.normalised() is Status.PASSED + + assert not Status.INCOMPLETE.precede(Status.XPASS_STRICT) + assert Status.INCOMPLETE.precede(Status.FAILED) + + +def test_report_status_precedent(): + """ + `precedent` should return the value with the + highest precedence (the lowest index). + """ + + assert Status.FAILED == Status.precedent([Status.FAILED, Status.UNKNOWN]) + assert Status.ERROR == Status.precedent([Status.ERROR, Status.UNKNOWN]) + assert Status.INCOMPLETE == Status.precedent( + [Status.INCOMPLETE, Status.UNKNOWN] + ) + assert Status.XPASS_STRICT == Status.precedent( + [Status.XPASS_STRICT, Status.UNKNOWN] + ) + assert Status.UNKNOWN == Status.precedent([Status.UNKNOWN, Status.PASSED]) + assert Status.PASSED == Status.precedent([Status.PASSED, Status.SKIPPED]) + assert Status.PASSED == Status.precedent([Status.PASSED, Status.XFAIL]) + assert Status.PASSED == Status.precedent([Status.PASSED, Status.XPASS]) + assert Status.PASSED == Status.precedent([Status.PASSED, Status.UNSTABLE]) + assert Status.UNSTABLE == Status.precedent([Status.UNSTABLE, Status.NONE]) @disable_log_propagation(report_logger) @@ -48,37 +93,93 @@ def test_report_exception_logger(): assert rep.status_override is Status.ERROR -class TestTestGroupReport: - def test_hash_merge(self): +class DummyStatusReport: + def __init__(self, status, uid=None): + self.uid = uid or 0 + self.status = status + + +class TestBaseReportGroup: + @pytest.mark.parametrize( + "statuses,expected", + ( + ([Status.ERROR, Status.FAILED, Status.PASSED], Status.ERROR), + ([Status.FAILED, Status.PASSED], Status.FAILED), + ( + [Status.INCOMPLETE, Status.PASSED, Status.SKIPPED], + Status.INCOMPLETE, + ), + ([Status.SKIPPED, Status.PASSED], Status.PASSED), + ([Status.INCOMPLETE, Status.FAILED], Status.INCOMPLETE), + ), + ) + def test_status(self, statuses, expected): + """Should return the precedent status from children.""" + + reports = [ + DummyStatusReport(uid=idx, status=status) + for idx, status in enumerate(statuses) + ] + group = DummyReportGroup(entries=reports) + assert group.status == expected + + def test_status_no_entries(self): """ - Test that the hash is updated after new report entries are merged in. + Should return Status.UNKNOWN when `status_override` + is None and report has no entries. """ - parent = DummyGroupReport() - child = DummyCaseReport(name="testcase") - parent.append(child) - orig_parent_hash = parent.hash + group = DummyReportGroup() - parent2 = DummyGroupReport(uid=parent.uid) - child2 = DummyCaseReport(name="testcase", uid=child.uid) - child2.append({"name": "entry", "passed": True}) - parent2.append(child2) + assert group.status_override is Status.NONE + assert group.status == Status.UNKNOWN - parent.merge(parent2) - assert parent.hash != orig_parent_hash + def test_status_override(self): + """ + `status_override` of a group should take + precedence over child statuses. + """ + group = DummyReportGroup( + entries=[DummyStatusReport(status=Status.FAILED)] + ) + + assert group.status == Status.FAILED + + group.status_override = Status.PASSED + + assert group.status == Status.PASSED + + def test_merge(self): + """ + Should merge children and set `status_override` + using `report.status_override` precedence. + """ + report_orig = DummyReportGroup(uid=1) + report_clone = DummyReportGroup(uid=1) + + assert report_orig.status_override is Status.NONE + + report_clone.status_override = Status.PASSED + + with mock.patch.object(report_orig, "merge_entries"): + report_orig.merge(report_clone) + report_orig.merge_entries.assert_called_once_with( + report_clone, strict=True + ) + assert report_orig.status_override == report_clone.status_override def test_merge_children_not_strict(self): """ Not strict merge should append child entries and update the index if they do not exist in the parent. """ - child_clone_1 = DummyCaseReport(uid=10) - child_clone_2 = DummyCaseReport(uid=20) - parent_clone = DummyGroupReport( + child_clone_1 = DummyReport(uid=10) + child_clone_2 = DummyReport(uid=20) + parent_clone = DummyReportGroup( uid=1, entries=[child_clone_1, child_clone_2] ) - child_orig_1 = DummyCaseReport(uid=10) - parent_orig = DummyGroupReport(uid=1, entries=[child_orig_1]) + child_orig_1 = DummyReport(uid=10) + parent_orig = DummyReportGroup(uid=1, entries=[child_orig_1]) parent_orig.merge(parent_clone, strict=False) assert parent_orig.entries == [child_orig_1, child_clone_2] @@ -87,6 +188,50 @@ def test_merge_children_not_strict(self): parent_orig.merge(parent_clone, strict=False) assert parent_orig.entries == [child_orig_1, child_clone_2] + def test_hash(self): + """ + Test that a hash is generated for report groups, which depends on the + entries they contain. + """ + grand_parent = DummyReportGroup() + parent = DummyReportGroup() + child = TestCaseReport(name="testcase") + + orig_root_hash = grand_parent.hash + + grand_parent.append(parent) + updated_root_hash = grand_parent.hash + assert updated_root_hash != orig_root_hash + + parent.append(child) + + orig_root_hash = updated_root_hash + updated_root_hash = grand_parent.hash + assert updated_root_hash != orig_root_hash + + child.append({"name": "entry", "passed": True}) + + orig_root_hash = updated_root_hash + updated_root_hash = grand_parent.hash + assert updated_root_hash != orig_root_hash + + def test_hash_merge(self): + """ + Test that the hash is updated after new report entries are merged in. + """ + parent = DummyReportGroup() + child = TestCaseReport(name="testcase") + parent.append(child) + orig_parent_hash = parent.hash + + parent2 = DummyReportGroup(uid=parent.uid) + child2 = TestCaseReport(name="testcase", uid=child.uid) + child2.append({"name": "entry", "passed": True}) + parent2.append(child2) + + parent.merge(parent2) + assert parent.hash != orig_parent_hash + class TestTestCaseReport: @pytest.mark.parametrize( @@ -275,8 +420,8 @@ def test_report_json_serialization(dummy_test_plan_report): """JSON Serialized & deserialized reports should be equal.""" test_plan_schema = TestReportSchema() - data = json_dumps(test_plan_schema.dump(dummy_test_plan_report)) - deserialized_report = test_plan_schema.load(json_loads(data)) + data = test_plan_schema.dumps(dummy_test_plan_report) + deserialized_report = test_plan_schema.loads(data) check_report(actual=deserialized_report, expected=dummy_test_plan_report) @@ -451,6 +596,19 @@ def iter_report_entries(report): yield from iter_report_entries(entry) +def test_runtime_status_basic_op(): + assert RuntimeStatus.WAITING < RuntimeStatus.READY + assert RuntimeStatus.RESETTING >= RuntimeStatus.RUNNING + assert RuntimeStatus.RUNNING.precede(RuntimeStatus.FINISHED) + assert RuntimeStatus.NOT_RUN < RuntimeStatus.NONE + assert not RuntimeStatus.NONE + + assert RuntimeStatus.NOT_RUN.to_json_compatible() == "not_run" + assert ( + RuntimeStatus.from_json_compatible("not_run") == RuntimeStatus.NOT_RUN + ) + + def test_runtime_status_setting(dummy_test_plan_report): for status in list(RuntimeStatus)[:-1]: dummy_test_plan_report.runtime_status = status From 4a92d9b5185079990b31f25ef8a9eced5003a8f0 Mon Sep 17 00:00:00 2001 From: yuxuan-ms Date: Mon, 9 Dec 2024 13:42:25 +0800 Subject: [PATCH 3/5] JSONExporter will log a "file not found" warning in the log instead of raising an exception. --- .../3167_changed.json_exporter.rst | 1 + testplan/exporters/testing/json/base.py | 76 ++++++++++--------- testplan/runners/pools/base.py | 2 +- 3 files changed, 43 insertions(+), 36 deletions(-) create mode 100755 doc/newsfragments/3167_changed.json_exporter.rst diff --git a/doc/newsfragments/3167_changed.json_exporter.rst b/doc/newsfragments/3167_changed.json_exporter.rst new file mode 100755 index 000000000..2bc36a8bc --- /dev/null +++ b/doc/newsfragments/3167_changed.json_exporter.rst @@ -0,0 +1 @@ +``JSONExporter`` will log a "file not found" warning in the log instead of raising an exception. \ No newline at end of file diff --git a/testplan/exporters/testing/json/base.py b/testplan/exporters/testing/json/base.py index 54a2ce6c9..efa072365 100644 --- a/testplan/exporters/testing/json/base.py +++ b/testplan/exporters/testing/json/base.py @@ -24,37 +24,6 @@ from ..base import Exporter -def save_attachments(report: TestReport, directory: str) -> Dict[str, str]: - """ - Saves the report attachments to the given directory. - - :param report: Testplan report. - :param directory: directory to save attachments in - :return: dictionary of destination paths - """ - moved_attachments = {} - attachments = getattr(report, "attachments", None) - if attachments: - for dst, src in attachments.items(): - src = pathlib.Path(src) - dst_path = pathlib.Path(directory) / dst - makedirs(dst_path.parent) - if not src.is_file(): - dirname = src.parent - # Try retrieving the file from "_attachments" directory that is - # near to the test report, the downloaded report might be moved - src = pathlib.Path.cwd() / ATTACHMENTS / dst - if not src.is_file(): - raise FileNotFoundError( - f'Attachment "{dst}" not found in either {dirname} or' - f' the nearest "{ATTACHMENTS}" directory of test report' - ) - copyfile(src=src, dst=dst_path) - moved_attachments[dst] = str(dst_path) - - return moved_attachments - - def save_resource_data( report: TestReport, directory: pathlib.Path ) -> pathlib.Path: @@ -176,8 +145,9 @@ def export( with open(assertions_filepath, "w") as json_file: json.dump(assertions, json_file) - meta["attachments"] = save_attachments( - report=source, directory=attachments_dir + meta["attachments"] = self.save_attachments( + report=source, + directory=attachments_dir, ) meta["version"] = 2 meta["attachments"][structure_filename] = str( @@ -192,8 +162,9 @@ def export( with open(json_path, "w") as json_file: json.dump(meta, json_file) else: - data["attachments"] = save_attachments( - report=source, directory=attachments_dir + data["attachments"] = self.save_attachments( + report=source, + directory=attachments_dir, ) data["version"] = 1 @@ -208,6 +179,41 @@ def export( ) return result + def save_attachments( + self, report: TestReport, directory: str + ) -> Dict[str, str]: + """ + Saves the report attachments to the given directory. + + :param report: Testplan report. + :param directory: directory to save attachments in + :return: dictionary of destination paths + """ + moved_attachments = {} + attachments = getattr(report, "attachments", None) + if attachments: + for dst, src in attachments.items(): + src = pathlib.Path(src) + dst_path = pathlib.Path(directory) / dst + makedirs(dst_path.parent) + if not src.is_file(): + dirname = src.parent + # Try retrieving the file from "_attachments" directory that is + # near to the test report, the downloaded report might be moved + src = pathlib.Path.cwd() / ATTACHMENTS / dst + if not src.is_file(): + self.logger.warning( + 'Attachment "%s" not found in either %s or the nearest "%s" directory of test report', + dst, + dirname, + ATTACHMENTS, + ) + continue + copyfile(src=src, dst=dst_path) + moved_attachments[dst] = str(dst_path) + + return moved_attachments + @staticmethod def split_json_report(data): """Split a single Json into several parts.""" diff --git a/testplan/runners/pools/base.py b/testplan/runners/pools/base.py index ff71f76ea..df9e65969 100644 --- a/testplan/runners/pools/base.py +++ b/testplan/runners/pools/base.py @@ -162,7 +162,7 @@ def rebase_task_path(self, task: Task) -> None: def discard_running_tasks(self): self._discard_running.set() - def __str__(self): + def __repr__(self): return f"{self.__class__.__name__}[{self.cfg.index}]" From 34498398aa8770539902ab0aed01d9b642a7caf3 Mon Sep 17 00:00:00 2001 From: Zhenyu Yao <111329301+zhenyu-ms@users.noreply.github.com> Date: Wed, 11 Dec 2024 10:12:19 +0800 Subject: [PATCH 4/5] Fix/make driver's installed files writable (#1159) --- testplan/common/utils/path.py | 2 +- .../testing/multitest/driver/myapp/test_app.py | 14 ++++++++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/testplan/common/utils/path.py b/testplan/common/utils/path.py index f43fda3ac..d60a42e9d 100644 --- a/testplan/common/utils/path.py +++ b/testplan/common/utils/path.py @@ -204,7 +204,7 @@ def instantiate(template, values, destination): try: target.write(render(source.read(), values)) except UnicodeDecodeError: - shutil.copy(template, destination) + shutil.copyfile(template, destination) except Exception as exc: raise Exception( "On reading/writing template: {} - of file {}".format( diff --git a/tests/unit/testplan/testing/multitest/driver/myapp/test_app.py b/tests/unit/testplan/testing/multitest/driver/myapp/test_app.py index 4ea6a7af3..9a41091f4 100644 --- a/tests/unit/testplan/testing/multitest/driver/myapp/test_app.py +++ b/tests/unit/testplan/testing/multitest/driver/myapp/test_app.py @@ -256,6 +256,7 @@ def test_install_files(runpath): bfile = os.path.join( os.path.abspath(os.path.dirname(__file__)), "binary_file" ) + os.chmod(bfile, 0o444) # only 644 or 755 supported by git stdout_regexps = [ re.compile(r".*binary=(?P.*)"), re.compile(r".*command=(?P.*)"), @@ -280,12 +281,13 @@ def test_install_files(runpath): assert os.path.exists(app.extracts["binary"]) assert bool(json.loads(app.extracts["command"])) assert os.path.exists(app.extracts["app_path"]) - assert os.path.exists(os.path.join(app.app_path, "etc", "binary_file")) - assert os.path.exists(os.path.join(app.app_path, "etc", "config.yaml")) - assert os.path.exists(os.path.join(dst, "config.yaml")) - assert os.path.exists( - os.path.join(app.app_path, "etc", "rel_path", "config.yaml") - ) + for p in [ + os.path.join(app.app_path, "etc", "binary_file"), + os.path.join(app.app_path, "etc", "config.yaml"), + os.path.join(dst, "config.yaml"), + os.path.join(app.app_path, "etc", "rel_path", "config.yaml"), + ]: + assert os.access(p, os.F_OK | os.R_OK | os.W_OK) def test_echo_hello(runpath): From 7e19949c8a4943a95e873111f094e640d92b83ab Mon Sep 17 00:00:00 2001 From: Zhenyu Yao <111329301+zhenyu-ms@users.noreply.github.com> Date: Wed, 11 Dec 2024 14:35:59 +0800 Subject: [PATCH 5/5] Refactor/pick a faster json lib (#1156) * use rapidjson instead * use orjson instead * fix tests; relocate some report properties * fix pyproject.toml * add newsfrag; some tweaks * misc - fix pytest warnings - remove unused requirements* files - adjust ci --- .github/actions/pip-cache/action.yml | 4 +- doc/en/getting_started.rst | 8 +- .../3147_changed.another_json_lib.rst | 1 + examples/ExecutionPools/Discover/test_plan.py | 3 +- pyproject.toml | 7 +- pytest.ini | 12 +- requirements-basic.txt | 44 ---- requirements-rtd.txt | 5 - testplan/common/report/base.py | 98 ++++---- testplan/common/report/schemas.py | 24 +- testplan/common/utils/json.py | 43 ++++ testplan/exporters/testing/http/__init__.py | 11 +- testplan/exporters/testing/json/base.py | 14 +- testplan/importers/testplan.py | 8 +- testplan/monitor/resource.py | 10 +- testplan/parser.py | 20 +- testplan/report/testing/base.py | 52 +---- testplan/report/testing/schemas.py | 31 ++- testplan/runnable/interactive/http.py | 6 + testplan/testing/cpp/hobbestest.py | 4 +- testplan/testing/listing.py | 3 +- .../testing/multitest/driver/http/client.py | 2 +- .../testing/multitest/driver/http/server.py | 2 +- .../AssertionTypes/basicAssertionUtils.js | 66 ++---- .../testplan/exporters/testing/test_json.py | 19 +- .../unit/testplan/common/report/test_base.py | 161 ++++++++++++- tests/unit/testplan/report/test_testing.py | 212 +++--------------- tests/unit/testplan/test_parser.py | 3 - 28 files changed, 420 insertions(+), 453 deletions(-) create mode 100644 doc/newsfragments/3147_changed.another_json_lib.rst delete mode 100644 requirements-basic.txt delete mode 100644 requirements-rtd.txt create mode 100644 testplan/common/utils/json.py diff --git a/.github/actions/pip-cache/action.yml b/.github/actions/pip-cache/action.yml index b85cd5b28..82c99dad2 100644 --- a/.github/actions/pip-cache/action.yml +++ b/.github/actions/pip-cache/action.yml @@ -6,5 +6,5 @@ runs: - name: Restore Pip Cache uses: actions/cache@v4 with: - path: ~/.cache/pip - key: ${{ hashFiles('setup.py') }}-${{ hashFiles('requirements.txt') }} + path: ${{ runner.os == 'Linux' && '~/.cache/pip' || '~\AppData\Local\pip\Cache' }} + key: ${{ runner.os }}-${{ hashFiles('pyproject.toml') }} diff --git a/doc/en/getting_started.rst b/doc/en/getting_started.rst index 5dba3c891..3a9bcc957 100644 --- a/doc/en/getting_started.rst +++ b/doc/en/getting_started.rst @@ -76,7 +76,7 @@ Also find all our downloadable examples :ref:`here `. Working with the source ----------------------- - + You will need a working python 3.7+ interrpreter preferably a venv, and for the interactive ui you need node installed. We are using `doit `_ as the taskrunner ``doit list`` can show all the commands. @@ -85,10 +85,10 @@ We are using `doit `_ as the taskrunner ``doit git clone https://github.com/morganstanley/testplan.git cd testplan - # install all dev requirements - pip install -r requirements-txt # this install testplan in editable mode + # install testplan in editable mode & all dev requirements + pip install -e . - #build the interactive UI (if you do not like it is opening a browserwindow remove the `-o`) + # build the interactive UI (if you do not like it is opening a browserwindow remove the `-o`) doit build_ui -o Internal tests diff --git a/doc/newsfragments/3147_changed.another_json_lib.rst b/doc/newsfragments/3147_changed.another_json_lib.rst new file mode 100644 index 000000000..5104d1c73 --- /dev/null +++ b/doc/newsfragments/3147_changed.another_json_lib.rst @@ -0,0 +1 @@ +Use a new JSON library ``orjson`` to improve performance when using Python 3.8 or later versions. \ No newline at end of file diff --git a/examples/ExecutionPools/Discover/test_plan.py b/examples/ExecutionPools/Discover/test_plan.py index ce9107883..37ffaa3ac 100755 --- a/examples/ExecutionPools/Discover/test_plan.py +++ b/examples/ExecutionPools/Discover/test_plan.py @@ -45,6 +45,7 @@ def main(plan): if __name__ == "__main__": res = main() - assert len(res.report.entries) == 5 + if res.report.entries: + assert len(res.report.entries) == 5 print("Exiting code: {}".format(res.exit_code)) sys.exit(res.exit_code) diff --git a/pyproject.toml b/pyproject.toml index f0c3295c0..90309e991 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,12 +34,9 @@ "setuptools", "pytest", "pytest-mock", - "py", "psutil", "schema", - "pytz", "lxml", - "python-dateutil", "reportlab", "marshmallow", "termcolor", @@ -66,7 +63,9 @@ "typing_extensions", "dill", "gherkin-official==4.1.3", - "parse" + "parse", + "orjson; python_version>='3.8'", + "flask-orjson; python_version>='3.8'" ] requires-python = ">=3.7" diff --git a/pytest.ini b/pytest.ini index 328b7dd2e..8c10f8750 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,10 +1,12 @@ [pytest] filterwarnings = ignore::pytest.PytestWarning - ignore::DeprecationWarning:flask_restx.*: - ignore::DeprecationWarning:jinja2.*: - ignore::DeprecationWarning:jsonschema.*: - ignore::DeprecationWarning:marshmallow.*: - ignore::DeprecationWarning:werkzeug.*: + ignore:.*flask_restx.*:DeprecationWarning + ; jsonschema warning from flask_restx + ignore:.*jsonschema.*:DeprecationWarning + ignore:.*load_module.*:DeprecationWarning + ignore:.*LogMatcher.*:UserWarning + ; under most cases, included files are not hit + ignore:No data was collected:coverage.exceptions.CoverageWarning norecursedirs=tests/helpers diff --git a/requirements-basic.txt b/requirements-basic.txt deleted file mode 100644 index 9e0760a14..000000000 --- a/requirements-basic.txt +++ /dev/null @@ -1,44 +0,0 @@ -pytest -py -psutil>=5.6.7 -setuptools -schema -pytz -lxml -python-dateutil -reportlab -marshmallow -termcolor -colorama -pyzmq -terminaltables -requests>=2.4.3 -flask>2.0.0 -werkzeug>2.0.0 -flask_restx -cheroot -boltons -validators<=0.20.0 -Pillow -plotly -rpyc -coverage -memoization -typing_extensions -click>=8.1.0 - -# Documentation -# --------------- -# sphinx -# sphinx_rtd_theme -# sphinx-click - -# matplot assertion -# ----------------- -matplotlib -numpy - -# Data science -# ------------ -# scikit-learn -# scipy diff --git a/requirements-rtd.txt b/requirements-rtd.txt deleted file mode 100644 index 941be7a12..000000000 --- a/requirements-rtd.txt +++ /dev/null @@ -1,5 +0,0 @@ -docutils<0.18 -Jinja2<3.1.0 -sphinx<2 --r ./requirements.txt -./releaseherald \ No newline at end of file diff --git a/testplan/common/report/base.py b/testplan/common/report/base.py index a50c3f5f5..132cd12aa 100644 --- a/testplan/common/report/base.py +++ b/testplan/common/report/base.py @@ -263,8 +263,8 @@ def __init__( uid: Optional[str] = None, entries: Optional[list] = None, parent_uids: Optional[List[str]] = None, - status_override=None, - status_reason=None, + status_override: Optional[Status] = None, + status_reason: Optional[str] = None, ): self.name = name self.description = description @@ -433,6 +433,56 @@ def is_empty(self) -> bool: """ return len(self.entries) == len(self.logs) == 0 + @property + def passed(self) -> bool: + """Shortcut for getting if report status should be considered passed.""" + return self.status.normalised() == Status.PASSED + + @property + def failed(self) -> bool: + """ + Shortcut for checking if report status should be considered failed. + """ + return self.status <= Status.FAILED + + @property + def unstable(self) -> bool: + """ + Shortcut for checking if report status should be considered unstable. + """ + return self.status.normalised() == Status.UNSTABLE + + @property + def unknown(self) -> bool: + """ + Shortcut for checking if report status is unknown. + """ + return self.status.normalised() == Status.UNKNOWN + + @property + def status(self) -> Status: + """Return the report status.""" + if self.status_override: + return self.status_override + return self._status + + @status.setter + def status(self, new_status: Status): + self._status = new_status + + @property + def runtime_status(self) -> RuntimeStatus: + """ + Used for interactive mode, the runtime status of a testcase will be one + of ``RuntimeStatus``. + """ + return self._runtime_status + + @runtime_status.setter + def runtime_status(self, new_status: RuntimeStatus): + """Set the runtime status.""" + self._runtime_status = new_status + @property def hash(self): """Return a hash of all entries in this report.""" @@ -468,34 +518,8 @@ def __init__(self, name, **kwargs): for child in self.entries: self.set_parent_uids(child) - @property - def passed(self): - """Shortcut for getting if report status should be considered passed.""" - return self.status.normalised() == Status.PASSED - - @property - def failed(self): - """ - Shortcut for checking if report status should be considered failed. - """ - return self.status <= Status.FAILED - - @property - def unstable(self): - """ - Shortcut for checking if report status should be considered unstable. - """ - return self.status.normalised() == Status.UNSTABLE - - @property - def unknown(self): - """ - Shortcut for checking if report status is unknown. - """ - return self.status.normalised() == Status.UNKNOWN - - @property - def status(self): + @Report.status.getter + def status(self) -> Status: """ Status of the report, will be used to decide if a Testplan run has completed successfully or not. @@ -513,12 +537,8 @@ def status(self): return self._status - @status.setter - def status(self, new_status): - self._status = new_status - @property - def runtime_status(self): + def runtime_status(self) -> RuntimeStatus: """ The runtime status is used for interactive running, and reports whether a particular entry is READY, WAITING, RUNNING, RESETTING, @@ -534,7 +554,7 @@ def runtime_status(self): return self._runtime_status @runtime_status.setter - def runtime_status(self, new_status): + def runtime_status(self, new_status: RuntimeStatus): """Set the runtime_status of all child entries.""" for entry in self: if entry.category != ReportCategories.SYNTHESIZED: @@ -635,11 +655,11 @@ def remove_by_uid(self, uid): __delitem__ = remove_by_uid - def pre_order_reports(self): + def pre_order_iterate(self): yield self for e in self: if isinstance(e, BaseReportGroup): - yield from e.pre_order_reports() + yield from e.pre_order_iterate() elif isinstance(e, Report): yield e @@ -961,7 +981,7 @@ def set_runtime_status_filtered( ) -> None: """ Alternative setter for the runtime status of an entry. Propagates only - to the specified entries. + to the specified entries. :param new_status: new runtime status to be set :param entries: tree-like structure of entries names diff --git a/testplan/common/report/schemas.py b/testplan/common/report/schemas.py index 8f96657b0..2e10db426 100644 --- a/testplan/common/report/schemas.py +++ b/testplan/common/report/schemas.py @@ -4,12 +4,16 @@ from marshmallow import Schema, fields, post_load from marshmallow.utils import EXCLUDE +from testplan.common.report.base import ( + BaseReportGroup, + Report, + RuntimeStatus, + Status, +) from testplan.common.serialization import fields as custom_fields from testplan.common.serialization import schemas from testplan.common.utils import timing -from .base import Report, BaseReportGroup, Status, RuntimeStatus - __all__ = ["ReportLogSchema", "ReportSchema", "BaseReportGroupSchema"] # pylint: disable=unused-argument @@ -91,6 +95,14 @@ class Meta: allow_none=True, ) status_reason = fields.String(allow_none=True) + status = fields.Function( + lambda x: x.status.to_json_compatible(), + Status.from_json_compatible, + ) + runtime_status = fields.Function( + lambda x: x.runtime_status.to_json_compatible(), + RuntimeStatus.from_json_compatible, + ) logs = fields.Nested(ReportLogSchema, many=True) hash = fields.Integer(dump_only=True) parent_uids = fields.List(fields.String()) @@ -127,14 +139,6 @@ class BaseReportGroupSchema(ReportSchema): }, many=True, ) - status = fields.Function( - lambda x: x.status.to_json_compatible(), - Status.from_json_compatible, - ) - runtime_status = fields.Function( - lambda x: x.runtime_status.to_json_compatible(), - RuntimeStatus.from_json_compatible, - ) counter = fields.Dict(dump_only=True) children = fields.List(fields.Nested(ReportLinkSchema)) diff --git a/testplan/common/utils/json.py b/testplan/common/utils/json.py new file mode 100644 index 000000000..f18df7c96 --- /dev/null +++ b/testplan/common/utils/json.py @@ -0,0 +1,43 @@ +import json +from pathlib import Path +from typing import Union + +_USE_ORJSON = False + +try: + import orjson +except ImportError: + pass +else: + _USE_ORJSON = True + + +def json_loads(data: str): + if _USE_ORJSON: + return orjson.loads(data) + else: + return json.loads(data) + + +def json_dumps(data, indent_2=False, default=None) -> str: + if _USE_ORJSON: + return orjson.dumps( + data, + default=default, + option=orjson.OPT_INDENT_2 if indent_2 else 0, + ).decode() + else: + if default: + + class _E(json.JSONEncoder): + def default(self, o): + return default(o) + + else: + _E = None + return json.dumps(data, cls=_E, indent=2 if indent_2 else None) + + +def json_load_from_path(path: Union[str, Path]) -> dict: + with open(path) as fp: + return json_loads(fp.read()) diff --git a/testplan/exporters/testing/http/__init__.py b/testplan/exporters/testing/http/__init__.py index 7a3daa7e2..299d688fd 100644 --- a/testplan/exporters/testing/http/__init__.py +++ b/testplan/exporters/testing/http/__init__.py @@ -3,7 +3,6 @@ must be able to handle POST request and receive data in JSON format. """ -import json from typing import Any, Tuple, Union, Optional, Dict import requests @@ -15,19 +14,13 @@ ExportContext, verify_export_context, ) +from testplan.common.utils.json import json_dumps from testplan.common.utils.validation import is_valid_url from testplan.report import TestReport from testplan.report.testing.schemas import TestReportSchema from ..base import Exporter -class CustomJsonEncoder(json.JSONEncoder): - """To jsonify data that cannot be serialized by default JSONEncoder.""" - - def default(self, obj: Any) -> str: # pylint: disable = method-hidden - return str(obj) - - class HTTPExporterConfig(ExporterConfig): """ Configuration object for @@ -83,7 +76,7 @@ def _upload_report( response = requests.post( url=url, headers=headers, - data=json.dumps(data, cls=CustomJsonEncoder), + data=json_dumps(data, default=str), timeout=self.cfg.timeout, ) response.raise_for_status() diff --git a/testplan/exporters/testing/json/base.py b/testplan/exporters/testing/json/base.py index efa072365..8a389a9c2 100644 --- a/testplan/exporters/testing/json/base.py +++ b/testplan/exporters/testing/json/base.py @@ -4,7 +4,6 @@ """ import hashlib -import json import os import pathlib @@ -17,6 +16,7 @@ ExportContext, verify_export_context, ) +from testplan.common.utils.json import json_dumps, json_loads from testplan.common.utils.path import makedirs from testplan.defaults import ATTACHMENTS, RESOURCE_DATA from testplan.report.testing.base import TestReport, TestCaseReport @@ -29,7 +29,7 @@ def save_resource_data( ) -> pathlib.Path: directory.mkdir(parents=True, exist_ok=True) with open(report.resource_meta_path) as meta_file: - meta_info = json.load(meta_file) + meta_info = json_loads(meta_file.read()) for host_meta in meta_info["entries"]: if "resource_file" in host_meta: dist_path = ( @@ -39,7 +39,7 @@ def save_resource_data( host_meta["resource_file"] = dist_path.name meta_path = directory / pathlib.Path(report.resource_meta_path).name with open(meta_path, "w") as meta_file: - json.dump(meta_info, meta_file) + meta_file.write(json_dumps(meta_info)) return meta_path @@ -141,9 +141,9 @@ def export( attachments_dir.mkdir(parents=True, exist_ok=True) with open(structure_filepath, "w") as json_file: - json.dump(structure, json_file) + json_file.write(json_dumps(structure)) with open(assertions_filepath, "w") as json_file: - json.dump(assertions, json_file) + json_file.write(json_dumps(assertions)) meta["attachments"] = self.save_attachments( report=source, @@ -160,7 +160,7 @@ def export( meta["assertions_file"] = assertions_filename with open(json_path, "w") as json_file: - json.dump(meta, json_file) + json_file.write(json_dumps(meta)) else: data["attachments"] = self.save_attachments( report=source, @@ -169,7 +169,7 @@ def export( data["version"] = 1 with open(json_path, "w") as json_file: - json.dump(data, json_file) + json_file.write(json_dumps(data)) self.logger.user_info("JSON generated at %s", json_path) result = {"json": self.cfg.json_path} diff --git a/testplan/importers/testplan.py b/testplan/importers/testplan.py index 644fe93f7..1acaa9743 100644 --- a/testplan/importers/testplan.py +++ b/testplan/importers/testplan.py @@ -1,11 +1,11 @@ """ Implements one-phase importer for Testplan JSON format. """ -import json from typing import List -from testplan.importers import ResultImporter, ImportedResult -from testplan.report import TestGroupReport, TestReport, ReportCategories +from testplan.common.utils.json import json_loads +from testplan.importers import ImportedResult, ResultImporter +from testplan.report import ReportCategories, TestGroupReport, TestReport from testplan.report.testing.schemas import TestReportSchema @@ -41,7 +41,7 @@ def __init__(self, path: str): def import_result(self) -> ImportedResult: """ """ with open(self.path) as fp: - result_json = json.load(fp) + result_json = json_loads(fp.read()) result = self.schema.load(result_json) return TestplanImportedResult(result) diff --git a/testplan/monitor/resource.py b/testplan/monitor/resource.py index 32e9866f9..27715e659 100755 --- a/testplan/monitor/resource.py +++ b/testplan/monitor/resource.py @@ -1,7 +1,6 @@ import os import csv import time -import json import socket import pathlib import asyncio @@ -16,6 +15,7 @@ from typing import Dict, Optional, Union, TextIO, NamedTuple from testplan.defaults import RESOURCE_META_FILE_NAME from testplan.common.utils.path import pwd +from testplan.common.utils.json import json_dumps, json_loads from testplan.common.utils.strings import slugify from testplan.common.utils.logger import LOGFILE_FORMAT from testplan.common.utils.timing import wait @@ -345,7 +345,7 @@ async def handle_request(self, msg: bytes): with open( self.file_directory / f"{slugify(client_id)}.meta", "w" ) as f: - json.dump(message.data, f) + f.write(json_dumps(message.data)) elif message.cmd == communication.Message.Message: self.logger.info("Received resource data from %s.", client_id) if client_id not in self._file_handler: @@ -463,7 +463,7 @@ def normalize_data(self, client_id: str) -> Optional[dict]: ) json_file_path = self.file_directory / f"{slugify(client_id)}.json" with open(json_file_path, "w") as json_file: - json.dump(resource_data, json_file) + json_file.write(json_dumps(resource_data)) return { "resource_file": str(json_file_path.resolve()), "max_cpu": max(resource_data["cpu"]), @@ -478,7 +478,7 @@ def dump(self) -> str: resource_info = [] for host_meta_path in self.file_directory.glob("*.meta"): with open(host_meta_path) as meta_file: - meta = json.load(meta_file) + meta = json_loads(meta_file.read()) summary_data = self.normalize_data(meta["uid"]) if summary_data: meta.update(summary_data) @@ -488,7 +488,7 @@ def dump(self) -> str: resource_info.append(meta) meta_file_path = self.file_directory / RESOURCE_META_FILE_NAME with open(meta_file_path, "w") as meta_file: - json.dump({"entries": resource_info}, meta_file) + meta_file.write(json_dumps({"entries": resource_info})) return str(meta_file_path.resolve()) def start(self, timeout=5): diff --git a/testplan/parser.py b/testplan/parser.py index a8e927245..922a85494 100644 --- a/testplan/parser.py +++ b/testplan/parser.py @@ -4,7 +4,6 @@ """ import argparse import copy -import json import sys import warnings from typing import Dict, List @@ -13,6 +12,7 @@ from testplan import defaults from testplan.common.utils import logger +from testplan.common.utils.json import json_load_from_path from testplan.report.testing import ( ReportFilterAction, ReportTagsAction, @@ -137,7 +137,7 @@ def generate_parser(self) -> HelpParser: general_group.add_argument( "--trace-tests", metavar="PATH", - type=_read_json_file, + type=json_load_from_path, dest="tracing_tests", help="Enable the tracing tests feature. A JSON file containing " "file names and line numbers to be watched by the tracer must be " @@ -158,7 +158,7 @@ def generate_parser(self) -> HelpParser: general_group.add_argument( "--xfail-tests", metavar="PATH", - type=_read_json_file, + type=json_load_from_path, help=""" Read a list of testcase name patterns from a JSON files, and mark matching testcases as xfail. This feature works for MultiTest, GTest and CPPUnit. @@ -580,11 +580,6 @@ def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, self.LEVELS[values]) -def _read_json_file(file: str) -> dict: - with open(file, "r") as fp: - return json.load(fp) - - def _read_text_file(file: str) -> List[str]: with open(file, "r") as fp: return fp.read().splitlines() @@ -602,8 +597,7 @@ def _read_text_file(file: str) -> List[str]: def _runtime_json_file(file: str) -> dict: - with open(file) as fp: - runtime_info = json.load(fp) - if runtime_schema.is_valid(runtime_info): - return runtime_info - raise RuntimeError("Unexpected runtime file format!") + runtime_info = json_load_from_path(file) + if runtime_schema.is_valid(runtime_info): + return runtime_info + raise RuntimeError("Unexpected runtime file format!") diff --git a/testplan/report/testing/base.py b/testplan/report/testing/base.py index 3ac4521a1..6319261d5 100644 --- a/testplan/report/testing/base.py +++ b/testplan/report/testing/base.py @@ -50,12 +50,12 @@ from typing_extensions import Self from testplan.common.report import ( - Status, - RuntimeStatus, - ReportCategories, + BaseReportGroup, ExceptionLogger, Report, - BaseReportGroup, + ReportCategories, + RuntimeStatus, + Status, ) from testplan.testing import tagging from testplan.testing.common import TEST_PART_PATTERN_FORMAT_STRING @@ -461,7 +461,7 @@ def annotate_part_num(self): _wrap = lambda s: TEST_PART_PATTERN_FORMAT_STRING.format( s, self.part[0], self.part[1] ) - for e in self.pre_order_reports(): + for e in self.pre_order_iterate(): if ( isinstance(e, TestCaseReport) and e.category == ReportCategories.SYNTHESIZED @@ -503,33 +503,7 @@ def _get_comparison_attrs(self): "tags_index", ] - @property - def passed(self) -> bool: - """Shortcut for getting if report status should be considered passed.""" - return self.status.normalised() == Status.PASSED - - @property - def failed(self) -> bool: - """ - Shortcut for checking if report status should be considered failed. - """ - return self.status <= Status.FAILED - - @property - def unstable(self) -> bool: - """ - Shortcut for checking if report status should be considered unstable. - """ - return self.status.normalised() == Status.UNSTABLE - - @property - def unknown(self) -> bool: - """ - Shortcut for checking if report status is unknown. - """ - return self.status.normalised() == Status.UNKNOWN - - @property + @Report.status.getter def status(self) -> Status: """ Entries in this context correspond to serialized (raw) @@ -545,19 +519,7 @@ def status(self) -> Status: return self._status - @status.setter - def status(self, new_status): - self._status = new_status - - @property - def runtime_status(self): - """ - Used for interactive mode, the runtime status of a testcase may be one - of ``RuntimeStatus``. - """ - return self._runtime_status - - @runtime_status.setter + @Report.runtime_status.setter def runtime_status(self, new_status): """ Set the runtime status. As a special case, when a testcase is re-run diff --git a/testplan/report/testing/schemas.py b/testplan/report/testing/schemas.py index b37c74dbd..134456ebc 100644 --- a/testplan/report/testing/schemas.py +++ b/testplan/report/testing/schemas.py @@ -1,23 +1,22 @@ """Schema classes for test Reports.""" import functools -import json +import math from boltons.iterutils import is_scalar, remap from marshmallow import Schema, fields, post_load from marshmallow.utils import EXCLUDE from testplan.common.report.schemas import ( - ReportSchema, BaseReportGroupSchema, + ReportLinkSchema, ReportLogSchema, + ReportSchema, TimerField, - ReportLinkSchema, ) - -from testplan.common.report import Status, RuntimeStatus from testplan.common.serialization import fields as custom_fields from testplan.common.serialization.schemas import load_tree_data +from testplan.common.utils.json import json_dumps from testplan.report.testing.base import ( TestCaseReport, TestGroupReport, @@ -52,7 +51,7 @@ class EntriesField(fields.Field): @staticmethod def _json_serializable(v): try: - json.dumps(v, ensure_ascii=True) + json_dumps(v) except (UnicodeDecodeError, TypeError): return False else: @@ -68,8 +67,16 @@ def visit(parent, key, _value): False - remove the node tuple - update the node data. """ - if is_scalar(_value) and not self._json_serializable(_value): - return key, str(_value) + if is_scalar(_value): + if isinstance(_value, float): + if math.isnan(_value): + return key, "NaN" + elif math.isinf(_value): + if _value > 0: + return key, "Infinity" + return key, "-Infinity" + elif not self._json_serializable(_value): + return key, str(_value) return True return remap(value, visit=visit) @@ -82,14 +89,6 @@ class TestCaseReportSchema(ReportSchema): entries = fields.List(EntriesField()) category = fields.String(dump_only=True) - status = fields.Function( - lambda x: x.status.to_json_compatible(), - Status.from_json_compatible, - ) - runtime_status = fields.Function( - lambda x: x.runtime_status.to_json_compatible(), - RuntimeStatus.from_json_compatible, - ) counter = fields.Dict(dump_only=True) tags = TagField() diff --git a/testplan/runnable/interactive/http.py b/testplan/runnable/interactive/http.py index 5a8d62ca4..0cacec057 100644 --- a/testplan/runnable/interactive/http.py +++ b/testplan/runnable/interactive/http.py @@ -57,6 +57,12 @@ def generate_interactive_api(ihandler): api_blueprint = flask.Blueprint("api", "testplan") api = flask_restx.Api(api_blueprint) app = flask.Flask("testplan", static_folder=static_dir) + try: + from flask_orjson import OrjsonProvider + except ImportError: + pass + else: + app.json = OrjsonProvider(app) app.register_blueprint(api_blueprint, url_prefix=api_prefix) post_export_model = api.model( diff --git a/testplan/testing/cpp/hobbestest.py b/testplan/testing/cpp/hobbestest.py index e3d307a36..cadd5edba 100644 --- a/testplan/testing/cpp/hobbestest.py +++ b/testplan/testing/cpp/hobbestest.py @@ -1,9 +1,9 @@ -import json import os from schema import Or from testplan.common.config import ConfigOption +from testplan.common.utils.json import json_loads from testplan.report import ( ReportCategories, RuntimeStatus, @@ -94,7 +94,7 @@ def read_test_data(self): :rtype: ``dict`` ot ``list`` """ with open(self.report_path) as report_file: - return json.load(report_file) + return json_loads(report_file.read()) def process_test_data(self, test_data): """ diff --git a/testplan/testing/listing.py b/testplan/testing/listing.py index 1ddf93b8a..2a9774f37 100644 --- a/testplan/testing/listing.py +++ b/testplan/testing/listing.py @@ -10,6 +10,7 @@ from typing import TYPE_CHECKING, List, Tuple, Union from urllib.parse import urlparse +from testplan.common.utils.json import json_dumps from testplan.common.utils.logger import TESTPLAN_LOGGER from testplan.common.utils.parser import ArgMixin from testplan.testing import tagging @@ -307,7 +308,7 @@ class SimpleJsonLister(MetadataBasedLister): ) def get_output(self, metadata: TestPlanMetadata): - return json.dumps(dataclasses.asdict(metadata), indent=2) + return json_dumps(dataclasses.asdict(metadata), indent_2=True) class ListingRegistry: diff --git a/testplan/testing/multitest/driver/http/client.py b/testplan/testing/multitest/driver/http/client.py index be6c9b243..31053b2c5 100644 --- a/testplan/testing/multitest/driver/http/client.py +++ b/testplan/testing/multitest/driver/http/client.py @@ -195,7 +195,7 @@ def send(self, method, api, **kwargs): args=(method, api, drop_response, self.timeout), kwargs=kwargs, ) - request_thread.setDaemon(True) + request_thread.daemon = True request_thread.start() self.request_threads.append((request_thread, drop_response)) diff --git a/testplan/testing/multitest/driver/http/server.py b/testplan/testing/multitest/driver/http/server.py index e8eb2096b..0757f0c25 100644 --- a/testplan/testing/multitest/driver/http/server.py +++ b/testplan/testing/multitest/driver/http/server.py @@ -454,7 +454,7 @@ def starting(self): timeout=self.timeout, logger=self.logger, ) - self._server_thread.setName(self.name) + self._server_thread.name = self.name self._server_thread.start() while not hasattr(self._server_thread.server, "server_port"): diff --git a/testplan/web_ui/testing/src/AssertionPane/AssertionTypes/basicAssertionUtils.js b/testplan/web_ui/testing/src/AssertionPane/AssertionTypes/basicAssertionUtils.js index 860d53ee1..f713ddb2c 100644 --- a/testplan/web_ui/testing/src/AssertionPane/AssertionTypes/basicAssertionUtils.js +++ b/testplan/web_ui/testing/src/AssertionPane/AssertionTypes/basicAssertionUtils.js @@ -35,27 +35,18 @@ function prepareLogContent(assertion, defaultContent) { let decodedMsg = null; if (assertion.message !== undefined) { - let bytearray; - if ( - typeof assertion.message === "object" && - typeof (bytearray = assertion.message["_BYTES_KEY"]) !== "undefined" && - Array.isArray(bytearray) - ) { - decodedMsg = bytearray.length ? String.fromCodePoint(...bytearray) : ""; - } else { - decodedMsg = ( - /^https?:\/\//.test(value), - }, - }} - > - {assertion.message} - - ); - } + decodedMsg = ( + /^https?:\/\//.test(value), + }, + }} + > + {assertion.message} + + ); } const preContent =
{decodedMsg}
; @@ -246,27 +237,18 @@ function prepareFailContent(assertion, defaultContent) { let decodedMsg = null; if (assertion.message !== undefined) { - let bytearray; - if ( - typeof assertion.message === "object" && - typeof (bytearray = assertion.message["_BYTES_KEY"]) !== "undefined" && - Array.isArray(bytearray) - ) { - decodedMsg = bytearray.length ? String.fromCodePoint(...bytearray) : ""; - } else { - decodedMsg = ( - /^https?:\/\//.test(value), - }, - }} - > - {assertion.message} - - ); - } + decodedMsg = ( + /^https?:\/\//.test(value), + }, + }} + > + {assertion.message} + + ); } const preContent =
{decodedMsg}
; diff --git a/tests/functional/testplan/exporters/testing/test_json.py b/tests/functional/testplan/exporters/testing/test_json.py index 444e59e2c..f722fe9e2 100644 --- a/tests/functional/testplan/exporters/testing/test_json.py +++ b/tests/functional/testplan/exporters/testing/test_json.py @@ -1,6 +1,7 @@ """Test the JSON exporter.""" import copy import json +import math import os import pathlib import tempfile @@ -33,6 +34,11 @@ def test_attach(self, env, result): result.attach(tmpfile.name) + @multitest.testcase + def test_special_values(self, env, result): + result.ne(float("nan"), float("nan")) + result.lt(float("-inf"), float("inf")) + @multitest.testsuite class Beta: @@ -178,8 +184,8 @@ def test_json_exporter_generating_split_report(runpath): assert len(structure[0]["entries"]) == 1 # one suite in 1st multitest assert structure[0]["entries"][0]["name"] == "Alpha" # 1st suite name assert ( - len(structure[0]["entries"][0]["entries"]) == 4 - ) # 3 testcases, 1 synthesized + len(structure[0]["entries"][0]["entries"]) == 5 + ) # 4 testcases, 1 synthesized assert ( len(structure[0]["entries"][0]["entries"][2]["entries"]) == 3 ) # 3 parametrized testcases @@ -194,7 +200,7 @@ def test_json_exporter_generating_split_report(runpath): assert structure[1]["entries"][1]["name"] == "Beta" # 1st suite name assert len(structure[1]["entries"][1]["entries"]) == 2 # 2 testcases - assert len(assertions) == 9 # 9 assertions in total + assert len(assertions) == 10 # 10 cases in total # only one assertion in each testcase in suite `Alpha` assert assertions["test_comparison"][0]["type"] == "Equal" assert assertions["test_membership__arg_1"][0]["type"] == "Contain" @@ -210,6 +216,13 @@ def test_json_exporter_generating_split_report(runpath): assert assertions["setup"][0]["type"] == "Log" assert assertions["After Start"][0]["type"] == "Log" + # special values representation preserved + # NOTE: these values are of type float in old impl, + # NOTE: converted to js repr in cope with json lib change + assert assertions["test_special_values"][0]["first"] == "NaN" + assert assertions["test_special_values"][1]["first"] == "-Infinity" + assert assertions["test_special_values"][1]["second"] == "Infinity" + def test_implicit_exporter_initialization(runpath): """ diff --git a/tests/unit/testplan/common/report/test_base.py b/tests/unit/testplan/common/report/test_base.py index 6b0213ad9..79040c37e 100644 --- a/tests/unit/testplan/common/report/test_base.py +++ b/tests/unit/testplan/common/report/test_base.py @@ -10,6 +10,8 @@ MergeError, Report, ReportCategories, + RuntimeStatus, + Status, ) from testplan.common.report.log import LOGGER from testplan.common.utils.testing import disable_log_propagation @@ -18,6 +20,48 @@ DummyReportGroup = functools.partial(BaseReportGroup, name="dummy") +def test_report_status_basic_op(): + assert Status.ERROR <= Status.ERROR + assert Status.FAILED > Status.ERROR + assert Status.INCOMPLETE < Status.FAILED + with pytest.raises(TypeError): + Status.INCOMPLETE < Status.XPASS_STRICT + with pytest.raises(TypeError): + Status.XFAIL >= Status.SKIPPED + assert Status.XFAIL != Status.XPASS + assert Status.XFAIL is not Status.XPASS + assert Status.UNKNOWN < Status.NONE + assert not Status.NONE + + assert Status.XPASS_STRICT.normalised() is Status.FAILED + assert Status.PASSED.normalised() is Status.PASSED + + assert not Status.INCOMPLETE.precede(Status.XPASS_STRICT) + assert Status.INCOMPLETE.precede(Status.FAILED) + + +def test_report_status_precedent(): + """ + `precedent` should return the value with the + highest precedence (the lowest index). + """ + + assert Status.FAILED == Status.precedent([Status.FAILED, Status.UNKNOWN]) + assert Status.ERROR == Status.precedent([Status.ERROR, Status.UNKNOWN]) + assert Status.INCOMPLETE == Status.precedent( + [Status.INCOMPLETE, Status.UNKNOWN] + ) + assert Status.XPASS_STRICT == Status.precedent( + [Status.XPASS_STRICT, Status.UNKNOWN] + ) + assert Status.UNKNOWN == Status.precedent([Status.UNKNOWN, Status.PASSED]) + assert Status.PASSED == Status.precedent([Status.PASSED, Status.SKIPPED]) + assert Status.PASSED == Status.precedent([Status.PASSED, Status.XFAIL]) + assert Status.PASSED == Status.precedent([Status.PASSED, Status.XPASS]) + assert Status.PASSED == Status.precedent([Status.PASSED, Status.UNSTABLE]) + assert Status.UNSTABLE == Status.precedent([Status.UNSTABLE, Status.NONE]) + + @disable_log_propagation(LOGGER) def test_exception_logger_suppression(): """ExceptionLoggerBase should suppress and log given exceptions.""" @@ -144,7 +188,13 @@ def test_filter(self): assert rep_copy_2.entries == [["bar", "baz"], {"hello": "world"}] -class TestReportGroup: +class DummyStatusReport: + def __init__(self, status, uid=None): + self.uid = uid or 0 + self.status = status + + +class TestBaseReportGroup: def test_build_index(self): """ Should set `_index` attribute with child @@ -378,7 +428,7 @@ def test_graft_round_trip(self): grand_parent.append(parent) parent.append(child) - refs = list(grand_parent.pre_order_reports()) + refs = list(grand_parent.pre_order_iterate()) parts = list(grand_parent.pre_order_disassemble()) # disassembled in place @@ -396,7 +446,114 @@ def test_graft_round_trip(self): assert "dummy" in grand_parent assert "dummy" in parent + @pytest.mark.parametrize( + "statuses,expected", + ( + ([Status.ERROR, Status.FAILED, Status.PASSED], Status.ERROR), + ([Status.FAILED, Status.PASSED], Status.FAILED), + ( + [Status.INCOMPLETE, Status.PASSED, Status.SKIPPED], + Status.INCOMPLETE, + ), + ([Status.SKIPPED, Status.PASSED], Status.PASSED), + ([Status.INCOMPLETE, Status.FAILED], Status.INCOMPLETE), + ), + ) + def test_status(self, statuses, expected): + """Should return the precedent status from children.""" + + reports = [ + DummyStatusReport(uid=idx, status=status) + for idx, status in enumerate(statuses) + ] + group = DummyReportGroup(entries=reports) + assert group.status == expected + + def test_status_no_entries(self): + """ + Should return Status.UNKNOWN when `status_override` + is None and report has no entries. + """ + group = DummyReportGroup() + + assert group.status_override is Status.NONE + assert group.status == Status.UNKNOWN + + def test_status_override(self): + """ + `status_override` of a group should take + precedence over child statuses. + """ + group = DummyReportGroup( + entries=[DummyStatusReport(status=Status.FAILED)] + ) + + assert group.status == Status.FAILED + + group.status_override = Status.PASSED + + assert group.status == Status.PASSED + + def test_merge(self): + """ + Should merge children and set `status_override` + using `report.status_override` precedence. + """ + report_orig = DummyReportGroup(uid=1) + report_clone = DummyReportGroup(uid=1) + + assert report_orig.status_override is Status.NONE + + report_clone.status_override = Status.PASSED + + with mock.patch.object(report_orig, "merge_entries"): + report_orig.merge(report_clone) + report_orig.merge_entries.assert_called_once_with( + report_clone, strict=True + ) + assert report_orig.status_override == report_clone.status_override + + def test_hash(self): + """ + Test that a hash is generated for report groups, which depends on the + entries they contain. + """ + grand_parent = DummyReportGroup() + parent = DummyReportGroup() + child = Report(name="testcase") + + orig_root_hash = grand_parent.hash + + grand_parent.append(parent) + updated_root_hash = grand_parent.hash + assert updated_root_hash != orig_root_hash + + parent.append(child) + + orig_root_hash = updated_root_hash + updated_root_hash = grand_parent.hash + assert updated_root_hash != orig_root_hash + + child.append({"name": "entry", "passed": True}) + + orig_root_hash = updated_root_hash + updated_root_hash = grand_parent.hash + assert updated_root_hash != orig_root_hash + def test_report_categories_type(): assert ReportCategories.MULTITEST == "multitest" assert type(ReportCategories.MULTITEST) is str + + +def test_runtime_status_basic_op(): + assert RuntimeStatus.WAITING < RuntimeStatus.READY + assert RuntimeStatus.RESETTING >= RuntimeStatus.RUNNING + assert RuntimeStatus.RUNNING.precede(RuntimeStatus.FINISHED) + assert RuntimeStatus.NOT_RUN < RuntimeStatus.NONE + assert not RuntimeStatus.NONE + + assert RuntimeStatus.NOT_RUN.to_json_compatible() == "not_run" + assert ( + RuntimeStatus.from_json_compatible("not_run") == RuntimeStatus.NOT_RUN + ) diff --git a/tests/unit/testplan/report/test_testing.py b/tests/unit/testplan/report/test_testing.py index 2680d32c5..f82cd543f 100644 --- a/tests/unit/testplan/report/test_testing.py +++ b/tests/unit/testplan/report/test_testing.py @@ -1,22 +1,19 @@ -# TODO: move certain tests to tests/unit/testplan/common/report/test_base.py - import functools import json from collections import OrderedDict -from unittest import mock import pytest from boltons.iterutils import get_path from testplan.common import entity -from testplan.common.report.log import LOGGER as report_logger -from testplan.common.utils.testing import check_report, disable_log_propagation from testplan.common.report import ( - Status, - RuntimeStatus, ReportCategories, - BaseReportGroup, + RuntimeStatus, + Status, ) +from testplan.common.report.log import LOGGER as report_logger +from testplan.common.utils.json import json_dumps, json_loads +from testplan.common.utils.testing import check_report, disable_log_propagation from testplan.report.testing.base import ( TestCaseReport, TestGroupReport, @@ -25,50 +22,8 @@ from testplan.report.testing.schemas import TestReportSchema from testplan.testing.result import Result -DummyReport = functools.partial(TestCaseReport, name="dummy") -DummyReportGroup = functools.partial(BaseReportGroup, name="dummy") - - -def test_report_status_basic_op(): - assert Status.ERROR <= Status.ERROR - assert Status.FAILED > Status.ERROR - assert Status.INCOMPLETE < Status.FAILED - with pytest.raises(TypeError): - Status.INCOMPLETE < Status.XPASS_STRICT - with pytest.raises(TypeError): - Status.XFAIL >= Status.SKIPPED - assert Status.XFAIL != Status.XPASS - assert Status.XFAIL is not Status.XPASS - assert Status.UNKNOWN < Status.NONE - assert not Status.NONE - - assert Status.XPASS_STRICT.normalised() is Status.FAILED - assert Status.PASSED.normalised() is Status.PASSED - - assert not Status.INCOMPLETE.precede(Status.XPASS_STRICT) - assert Status.INCOMPLETE.precede(Status.FAILED) - - -def test_report_status_precedent(): - """ - `precedent` should return the value with the - highest precedence (the lowest index). - """ - - assert Status.FAILED == Status.precedent([Status.FAILED, Status.UNKNOWN]) - assert Status.ERROR == Status.precedent([Status.ERROR, Status.UNKNOWN]) - assert Status.INCOMPLETE == Status.precedent( - [Status.INCOMPLETE, Status.UNKNOWN] - ) - assert Status.XPASS_STRICT == Status.precedent( - [Status.XPASS_STRICT, Status.UNKNOWN] - ) - assert Status.UNKNOWN == Status.precedent([Status.UNKNOWN, Status.PASSED]) - assert Status.PASSED == Status.precedent([Status.PASSED, Status.SKIPPED]) - assert Status.PASSED == Status.precedent([Status.PASSED, Status.XFAIL]) - assert Status.PASSED == Status.precedent([Status.PASSED, Status.XPASS]) - assert Status.PASSED == Status.precedent([Status.PASSED, Status.UNSTABLE]) - assert Status.UNSTABLE == Status.precedent([Status.UNSTABLE, Status.NONE]) +DummyCaseReport = functools.partial(TestCaseReport, name="dummy") +DummyGroupReport = functools.partial(TestGroupReport, name="dummy") @disable_log_propagation(report_logger) @@ -93,93 +48,37 @@ def test_report_exception_logger(): assert rep.status_override is Status.ERROR -class DummyStatusReport: - def __init__(self, status, uid=None): - self.uid = uid or 0 - self.status = status - - -class TestBaseReportGroup: - @pytest.mark.parametrize( - "statuses,expected", - ( - ([Status.ERROR, Status.FAILED, Status.PASSED], Status.ERROR), - ([Status.FAILED, Status.PASSED], Status.FAILED), - ( - [Status.INCOMPLETE, Status.PASSED, Status.SKIPPED], - Status.INCOMPLETE, - ), - ([Status.SKIPPED, Status.PASSED], Status.PASSED), - ([Status.INCOMPLETE, Status.FAILED], Status.INCOMPLETE), - ), - ) - def test_status(self, statuses, expected): - """Should return the precedent status from children.""" - - reports = [ - DummyStatusReport(uid=idx, status=status) - for idx, status in enumerate(statuses) - ] - group = DummyReportGroup(entries=reports) - assert group.status == expected - - def test_status_no_entries(self): - """ - Should return Status.UNKNOWN when `status_override` - is None and report has no entries. - """ - group = DummyReportGroup() - - assert group.status_override is Status.NONE - assert group.status == Status.UNKNOWN - - def test_status_override(self): - """ - `status_override` of a group should take - precedence over child statuses. - """ - group = DummyReportGroup( - entries=[DummyStatusReport(status=Status.FAILED)] - ) - - assert group.status == Status.FAILED - - group.status_override = Status.PASSED - - assert group.status == Status.PASSED - - def test_merge(self): +class TestTestGroupReport: + def test_hash_merge(self): """ - Should merge children and set `status_override` - using `report.status_override` precedence. + Test that the hash is updated after new report entries are merged in. """ - report_orig = DummyReportGroup(uid=1) - report_clone = DummyReportGroup(uid=1) - - assert report_orig.status_override is Status.NONE + parent = DummyGroupReport() + child = DummyCaseReport(name="testcase") + parent.append(child) + orig_parent_hash = parent.hash - report_clone.status_override = Status.PASSED + parent2 = DummyGroupReport(uid=parent.uid) + child2 = DummyCaseReport(name="testcase", uid=child.uid) + child2.append({"name": "entry", "passed": True}) + parent2.append(child2) - with mock.patch.object(report_orig, "merge_entries"): - report_orig.merge(report_clone) - report_orig.merge_entries.assert_called_once_with( - report_clone, strict=True - ) - assert report_orig.status_override == report_clone.status_override + parent.merge(parent2) + assert parent.hash != orig_parent_hash def test_merge_children_not_strict(self): """ Not strict merge should append child entries and update the index if they do not exist in the parent. """ - child_clone_1 = DummyReport(uid=10) - child_clone_2 = DummyReport(uid=20) - parent_clone = DummyReportGroup( + child_clone_1 = DummyCaseReport(uid=10) + child_clone_2 = DummyCaseReport(uid=20) + parent_clone = DummyGroupReport( uid=1, entries=[child_clone_1, child_clone_2] ) - child_orig_1 = DummyReport(uid=10) - parent_orig = DummyReportGroup(uid=1, entries=[child_orig_1]) + child_orig_1 = DummyCaseReport(uid=10) + parent_orig = DummyGroupReport(uid=1, entries=[child_orig_1]) parent_orig.merge(parent_clone, strict=False) assert parent_orig.entries == [child_orig_1, child_clone_2] @@ -188,50 +87,6 @@ def test_merge_children_not_strict(self): parent_orig.merge(parent_clone, strict=False) assert parent_orig.entries == [child_orig_1, child_clone_2] - def test_hash(self): - """ - Test that a hash is generated for report groups, which depends on the - entries they contain. - """ - grand_parent = DummyReportGroup() - parent = DummyReportGroup() - child = TestCaseReport(name="testcase") - - orig_root_hash = grand_parent.hash - - grand_parent.append(parent) - updated_root_hash = grand_parent.hash - assert updated_root_hash != orig_root_hash - - parent.append(child) - - orig_root_hash = updated_root_hash - updated_root_hash = grand_parent.hash - assert updated_root_hash != orig_root_hash - - child.append({"name": "entry", "passed": True}) - - orig_root_hash = updated_root_hash - updated_root_hash = grand_parent.hash - assert updated_root_hash != orig_root_hash - - def test_hash_merge(self): - """ - Test that the hash is updated after new report entries are merged in. - """ - parent = DummyReportGroup() - child = TestCaseReport(name="testcase") - parent.append(child) - orig_parent_hash = parent.hash - - parent2 = DummyReportGroup(uid=parent.uid) - child2 = TestCaseReport(name="testcase", uid=child.uid) - child2.append({"name": "entry", "passed": True}) - parent2.append(child2) - - parent.merge(parent2) - assert parent.hash != orig_parent_hash - class TestTestCaseReport: @pytest.mark.parametrize( @@ -420,8 +275,8 @@ def test_report_json_serialization(dummy_test_plan_report): """JSON Serialized & deserialized reports should be equal.""" test_plan_schema = TestReportSchema() - data = test_plan_schema.dumps(dummy_test_plan_report) - deserialized_report = test_plan_schema.loads(data) + data = json_dumps(test_plan_schema.dump(dummy_test_plan_report)) + deserialized_report = test_plan_schema.load(json_loads(data)) check_report(actual=deserialized_report, expected=dummy_test_plan_report) @@ -596,19 +451,6 @@ def iter_report_entries(report): yield from iter_report_entries(entry) -def test_runtime_status_basic_op(): - assert RuntimeStatus.WAITING < RuntimeStatus.READY - assert RuntimeStatus.RESETTING >= RuntimeStatus.RUNNING - assert RuntimeStatus.RUNNING.precede(RuntimeStatus.FINISHED) - assert RuntimeStatus.NOT_RUN < RuntimeStatus.NONE - assert not RuntimeStatus.NONE - - assert RuntimeStatus.NOT_RUN.to_json_compatible() == "not_run" - assert ( - RuntimeStatus.from_json_compatible("not_run") == RuntimeStatus.NOT_RUN - ) - - def test_runtime_status_setting(dummy_test_plan_report): for status in list(RuntimeStatus)[:-1]: dummy_test_plan_report.runtime_status = status diff --git a/tests/unit/testplan/test_parser.py b/tests/unit/testplan/test_parser.py index 1222287c8..b387ba1b7 100644 --- a/tests/unit/testplan/test_parser.py +++ b/tests/unit/testplan/test_parser.py @@ -1,7 +1,4 @@ -from jsonschema.cli import parse_args - from testplan import TestplanMock -from testplan.parser import TestplanParser from testplan.testing.listing import SimpleJsonLister, NameLister