diff --git a/.ci/build_wheel.py b/.ci/build_wheel.py index 6acac20032..260c796c1e 100644 --- a/.ci/build_wheel.py +++ b/.ci/build_wheel.py @@ -15,6 +15,9 @@ "win": "win_amd64", "manylinux1": "manylinux1_x86_64", "manylinux_2_17": "manylinux_2_17_x86_64", + "linux": "manylinux_2_17_x86_64", # Accommodate tox.ini platform substitutions + "win32": "win_amd64", + "darwin": "any", } argParser = argparse.ArgumentParser() diff --git a/requirements/requirements_docs.txt b/requirements/requirements_docs.txt index 46e8fbb8d2..e1d01cf017 100644 --- a/requirements/requirements_docs.txt +++ b/requirements/requirements_docs.txt @@ -1,4 +1,4 @@ -ansys-sphinx-theme[autoapi]==1.2.3 +ansys-sphinx-theme[autoapi]==1.2.4 enum-tools[sphinx]==0.12.0 graphviz==0.20.1 imageio==2.36.0 diff --git a/requirements/requirements_install.txt b/requirements/requirements_install.txt index 831f4a7e72..94d5b5c6eb 100644 --- a/requirements/requirements_install.txt +++ b/requirements/requirements_install.txt @@ -1,5 +1,5 @@ importlib-metadata==8.5.0 numpy==2.1.3 packaging==24.2 -psutil==6.1.0 +psutil==6.1.1 tqdm==4.67.1 diff --git a/src/ansys/dpf/core/dpf_operator.py b/src/ansys/dpf/core/dpf_operator.py index 62578d3d0d..ed5f69182a 100644 --- a/src/ansys/dpf/core/dpf_operator.py +++ b/src/ansys/dpf/core/dpf_operator.py @@ -829,8 +829,10 @@ def _find_outputs_corresponding_pins(self, type_names, inpt, pin, corresponding_ if python_name == "B": python_name = "bool" + # Type match if type(inpt).__name__ == python_name: corresponding_pins.append(pin) + # if the inpt has multiple potential outputs, find which ones can match elif isinstance(inpt, (_Outputs, Operator, Result)): if isinstance(inpt, Operator): output_pin_available = inpt.outputs._get_given_output([python_name]) @@ -840,12 +842,14 @@ def _find_outputs_corresponding_pins(self, type_names, inpt, pin, corresponding_ output_pin_available = inpt._get_given_output([python_name]) for outputpin in output_pin_available: corresponding_pins.append((pin, outputpin)) + # If any output type matches python_name elif isinstance(inpt, Output): - for inpttype in inpt._python_expected_types: - if inpttype == python_name: - corresponding_pins.append(pin) if python_name == "Any": corresponding_pins.append(pin) + else: + for inpttype in inpt._python_expected_types: + if inpttype == python_name: + corresponding_pins.append(pin) elif python_name == "Any": corresponding_pins.append(pin) diff --git a/tests/test_operator.py b/tests/test_operator.py index 380c5edbd7..124e988b50 100644 --- a/tests/test_operator.py +++ b/tests/test_operator.py @@ -1496,3 +1496,9 @@ def test_operator_id(server_type): assert op.id not in ids ids.add(op.id) + + +def test_operator_find_outputs_corresponding_pins_any(server_type): + f1 = ops.utility.forward() + f2 = ops.utility.forward() + f2.inputs.any.connect(f1.outputs.any) diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000000..697a6eab17 --- /dev/null +++ b/tox.ini @@ -0,0 +1,118 @@ +# This is work in progress, testing workflow in local/CI is gradually being transferred to tox + +# Usage instructions: +# `tox` will run all tests sequentially, `tox --parallel` will run all tests in parallel (much faster). +# Run specific selection of tests with `tox -e pretest,,posttest` e.g., `tox -e pretest,test-api,test-launcher,posttest` +# `--parallel` flag can be passed when running specific selections. + +[tox] +description = Default tox environment list and core configurations + +# List all tests to run in parallel or sequential mode here +# So invocation can be specified as `tox`/`tox --parallel` to run all tests in sequential/parallel mode +envlist = pretest,test-{api,launcher,server,local_server,multi_server,remote_workflow,remote_operator,workflow,service,operators},posttest + +isolated_build_env = build + +[testenv] +description = Default configuration for test environments, unless overridden + +pass_env = + PACKAGE_NAME + MODULE + ANSYS_DPF_ACCEPT_LA + ANSYSLMD_LICENSE_FILE + AWP_ROOT242 + +package = external # To allow custom wheel builds + +[testenv:build_external] +description = Environment for custom build of package wheels, solves PyDPF custom wheel building requirement + +package_glob = {toxinidir}{/}dist{/}ansys_dpf_core* + +# {on_platform} substitution to automatically detect os type. +commands = + python .ci/build_wheel.py -p {on_platform} -w + +[testenv:pretest] +description = Environment to kill servers and organize test files prior to testing + +deps = + psutil + +skip_install = True + +commands = + # Clear any running servers that may be locking resources + python -c "import psutil; proc_name = 'Ans.Dpf.Grpc'; nb_procs = len([proc.kill() for proc in psutil.process_iter() if proc_name in proc.name()]); \ + print(f'Killed \{nb_procs} \{proc_name} processes.')" + + # Organize test files + python -c "\ + import os, shutil; \ + test_data=['test_launcher','test_server','test_local_server','test_multi_server','test_workflow','test_remote_workflow','test_remote_operator','test_service','test_custom_type_field']; \ + [(os.makedirs(d, exist_ok=True), shutil.copy('tests/conftest.py', d), shutil.copy(f'tests/\{d}.py', d) if os.path.exists(f'tests/\{d}.py') else None) for d in test_data]; \ + [os.remove(f'tests/\{d}.py') for d in test_data if os.path.exists(f'tests/\{d}.py')]" + +[testenv:posttest] +description = Environment to kill servers and revert test files to original state after testing + +depends = pretest, test-{api,launcher,server,local_server,multi_server,remote_workflow,remote_operator,workflow,service,operators} + +deps = + psutil + +skip_install = True + +commands = + # Revert project layout to previous state + python -c "\ + import os, shutil; \ + test_data=['test_launcher','test_server','test_local_server','test_multi_server','test_workflow','test_remote_workflow','test_remote_operator','test_service', 'test_custom_type_field']; \ + [shutil.move(f'\{d}/\{d}.py', f'tests/\{d}.py') for d in test_data if os.path.exists(f'\{d}/\{d}.py')]; \ + [shutil.rmtree(d) for d in test_data if os.path.exists(d)]" + + # Clear any running servers that may be locking resources + python -c "import psutil; proc_name = 'Ans.Dpf.Grpc'; nb_procs = len([proc.kill() for proc in psutil.process_iter() if proc_name in proc.name()]); \ + print(f'Killed \{nb_procs} \{proc_name} processes.')" + +[testenv:test-{api,launcher,server,local_server,multi_server,remote_workflow,remote_operator,workflow,service,operators}] +description = Environment where project testing configuration is defined + +depends = pretest + +setenv = + # Pytest extra arguments + COVERAGE = --cov=ansys.dpf.core --cov-report=xml --cov-report=html --log-level=ERROR --cov-append + RERUNS = --reruns=2 --reruns-delay=1 + DEBUG = -v -s --durations=10 --durations-min=1.0 + + api: JUNITXML = --junitxml=tests/junit/test-results.xml + launcher: JUNITXML = --junitxml=tests/junit/test-results2.xml + server: JUNITXML = --junitxml=tests/junit/test-results3.xml + local_server: JUNITXML = --junitxml=tests/junit/test-results4.xml + multi_server: JUNITXML = --junitxml=tests/junit/test-results5.xml + remote_workflow: JUNITXML = --junitxml=tests/junit/test-results6.xml + remote_operator: JUNITXML = --junitxml=tests/junit/test-results7.xml + workflow: JUNITXML = --junitxml=tests/junit/test-results8.xml + service: JUNITXML = --junitxml=tests/junit/test-results9.xml + operators: JUNITXML = --junitxml=../tests/junit/test-results12.xml + + # Tests sets + api: PYTEST_PYTHON_FILES = tests + launcher: PYTEST_PYTHON_FILES = test_launcher + server: PYTEST_PYTHON_FILES = test_server + local_server: PYTEST_PYTHON_FILES = test_local_server + multi_server: PYTEST_PYTHON_FILES = test_multi_server + remote_workflow: PYTEST_PYTHON_FILES = test_remote_workflow + remote_operator: PYTEST_PYTHON_FILES = test_remote_operator + workflow: PYTEST_PYTHON_FILES = test_workflow + service: PYTEST_PYTHON_FILES = test_service + operators: PYTEST_PYTHON_FILES = tests/operators + +deps = + -r requirements/requirements_test.txt + +commands = + pytest {env:PYTEST_PYTHON_FILES} {env:DEBUG} {env:COVERAGE} {env:RERUNS} {env:JUNITXML} \ No newline at end of file