From ae6ae130d9d53c397ddd7992f9846375749b5051 Mon Sep 17 00:00:00 2001 From: Katia Patkin <87335417+kptkin@users.noreply.github.com> Date: Thu, 19 Jan 2023 14:50:44 -0800 Subject: [PATCH] chore(sdk): split unit tests to system tests and proper unit tests (#4811) --- .circleci/config.yml | 164 +++-- .codecov.yml | 4 +- .flake8-base | 7 +- .../check-mocks-synced-with-yea-wandb.yml | 6 +- pytest.ini | 4 +- .../{unit_tests => pytest_tests}/__init__.py | 0 .../assets/2x2.png | Bin .../assets/Box.gltf | 0 .../assets/cube.obj | 0 .../assets/events.out.tfevents.1585769947.cvp | Bin .../events.out.tfevents.1611911647.big-histos | Bin .../assets/launch_k8s_config.yaml | 17 + .../assets/point_cloud.pts.json | 0 .../assets/ssl_certs/README.md | 0 .../assets/ssl_certs/cc8b642c.0 | 0 .../assets/ssl_certs/localhost.crt | 0 .../assets/ssl_certs/localhost.key | 0 .../assets/test.png | Bin .../assets/test2.png | Bin .../assets/test_mod.py | 0 .../files/code/standalone_tests/code-toad.py | 0 .../files/config.yaml | 0 .../files/diff.patch | 0 .../files/output.log | 0 .../files/requirements.txt | 0 ...3d0cc7146adb88cef2fcca4ec244a2d6a162.patch | 0 .../files/wandb-metadata.json | 0 .../files/wandb-summary.json | 0 .../run-g9dvvkua.wandb | Bin .../assets/wandb_tensorflow_summary.pb | Bin tests/pytest_tests/conftest.py | 246 +++++++ .../system_tests}/conftest.py | 371 ---------- .../system_tests/test_artifacts}/conftest.py | 0 .../test_artifacts}/test_wandb_artifacts.py | 0 .../test_wandb_artifacts_cache.py | 0 .../test_wandb_artifacts_full.py | 0 .../system_tests/test_cli_full.py | 213 ++++++ .../system_tests}/test_data_types_full.py | 0 .../test_file_stream_internal.py | 0 .../system_tests}/test_file_upload.py | 0 .../system_tests}/test_footer.py | 0 .../system_tests}/test_keras_full.py | 0 .../system_tests}/test_kfp.py | 0 .../system_tests}/test_label_full.py | 0 .../system_tests}/test_metric_full.py | 0 .../system_tests}/test_metric_internal.py | 0 .../system_tests/test_mode_disabled_full.py} | 11 - .../system_tests/test_model_workflow.py | 8 + .../system_tests}/test_mp_full.py | 9 +- .../system_tests}/test_offline_sync.py | 0 .../system_tests/test_public_api.py | 128 ++++ .../system_tests}/test_redir_full.py | 0 .../system_tests}/test_report_api.py | 0 .../system_tests}/test_runtime.py | 0 .../system_tests}/test_save_policies.py | 0 .../system_tests}/test_sender.py | 0 .../system_tests}/test_start_method.py | 0 .../system_tests/test_sweep}/__init__.py | 0 .../test_sweep/test_public_api.py | 51 ++ .../test_sweep}/test_sweep_scheduler.py | 0 .../test_sweep}/test_wandb_agent.py | 0 .../test_sweep}/test_wandb_agent_full.py | 0 .../test_sweep}/test_wandb_sweep.py | 0 .../system_tests}/test_system_info.py | 25 + .../system_tests}/test_tb_watcher.py | 0 .../system_tests}/test_telemetry_full.py | 0 .../system_tests}/test_time_resolution.py | 0 .../system_tests}/test_torch_full.py | 0 .../test_validation_data_logger.py | 5 + .../system_tests}/test_wandb.py | 185 ++--- .../system_tests}/test_wandb_integration.py | 0 .../system_tests}/test_wandb_run.py | 329 ++------- .../system_tests/test_wandb_settings.py | 290 ++++++++ .../system_tests}/test_wandb_tensorflow.py | 0 .../system_tests/test_wandb_verify.py | 14 + .../tests_launch/test_github_reference.py | 0 .../system_tests}/tests_launch/test_job.py | 0 .../system_tests}/tests_launch/test_launch.py | 0 .../tests_launch/test_launch_add.py | 0 .../tests_launch/test_launch_cli.py | 0 .../tests_launch/test_launch_kubernetes.py | 0 .../tests_launch/test_launch_run.py | 0 .../tests_launch/test_launch_sweep.py | 0 .../tests_launch/test_wandb_reference.py | 0 .../unit_tests}/__init__.py | 0 tests/pytest_tests/unit_tests/conftest.py | 70 ++ .../unit_tests/saved_model_constructors.py | 0 .../unit_tests/test_cli.py} | 333 +++++---- .../unit_tests/test_data_types.py | 8 +- .../unit_tests/test_datastore.py | 0 .../unit_tests/test_dir_watcher.py | 0 .../unit_tests/test_docker.py | 0 .../unit_tests/test_dtypes.py | 0 .../unit_tests/test_file_stream.py} | 0 .../unit_tests/test_flow_control.py | 0 .../unit_tests/test_import_wandb.py | 0 .../unit_tests/test_internal_api.py | 6 + .../unit_tests/test_job_builder.py | 0 .../unit_tests/test_keras.py | 0 .../unit_tests/test_lib/test_apikey.py} | 0 .../unit_tests/test_lib}/test_filesystem.py | 0 .../unit_tests/test_lib}/test_fsm.py | 0 .../unit_tests/test_lib/test_git.py} | 0 .../unit_tests/test_lib/test_hashutil.py} | 5 + .../unit_tests/test_lib}/test_mailbox.py | 0 .../unit_tests/test_lib}/test_printer.py | 0 .../unit_tests/test_lib/test_redir.py} | 0 .../unit_tests/test_lib}/test_run_status.py | 0 .../unit_tests/test_lib}/test_runid.py | 0 .../unit_tests/test_lib}/test_sock_client.py | 0 .../unit_tests/test_lib}/test_sparkline.py | 0 .../unit_tests/test_lib}/test_telemetry.py | 0 .../unit_tests/test_library_public.py | 0 .../unit_tests/test_metaflow.py | 0 .../unit_tests/test_mode_disabled.py | 14 + .../unit_tests/test_model_workflows.py | 7 - .../unit_tests/test_monkeypatch_keras.py | 0 .../unit_tests/test_plot.py | 0 .../unit_tests/test_plots.py | 0 .../unit_tests/test_profiler.py | 0 .../unit_tests/test_public_api.py | 152 +++++ .../unit_tests/test_require_helpers.py | 0 .../unit_tests/test_require_user.py | 0 .../unit_tests/test_retry.py | 0 .../unit_tests/test_sample.py | 0 .../unit_tests/test_saved_model.py | 0 .../{ => pytest_tests}/unit_tests/test_ssl.py | 2 + .../unit_tests/test_step_upload.py | 0 .../test_system_metrics_gpu.py | 0 .../test_system_metrics_gpu_apple.py | 0 .../test_system_metrics_ipu.py | 0 .../test_system_metrics_tpu.py | 0 .../test_system_metrics_trainium.py | 0 .../test_system_monitor.py | 0 .../unit_tests/test_tables.py | 0 .../unit_tests/test_torch.py | 0 .../unit_tests/test_util.py | 0 .../unit_tests/test_wandb_config.py | 0 .../unit_tests/test_wandb_define_metric.py} | 0 .../pytest_tests/unit_tests/test_wandb_log.py | 48 ++ .../unit_tests/test_wandb_login.py} | 0 .../pytest_tests/unit_tests/test_wandb_run.py | 215 ++++++ .../unit_tests/test_wandb_save.py | 79 +++ .../unit_tests/test_wandb_settings.py | 639 +++++------------- .../unit_tests/test_wandb_summary.py | 0 .../unit_tests/test_wandb_verify.py | 10 - tests/pytest_tests/unit_tests_old/__init__.py | 0 .../unit_tests_old/assets/2x2.png | Bin .../assets/fixtures/environment.yml | 0 .../assets/fixtures/launch/launch_kube.json | 0 .../launch/launch_sagemaker_config.json | 0 .../fixtures/launch/launch_vertex_config.json | 0 .../assets/fixtures/report_spec_v5.json | 0 .../assets/fixtures/requirements.txt | 0 .../unit_tests_old/assets/fixtures/train.py | 0 .../assets/notebooks/code_saving.ipynb | 0 .../assets/notebooks/ipython_exit.py | 0 .../assets/notebooks/login_timeout.ipynb | 0 .../assets/notebooks/magic.ipynb | 0 .../assets/notebooks/one_cell.ipynb | 0 .../assets/notebooks/setup.ipynb | 0 .../unit_tests_old/conftest.py | 0 .../unit_tests_old/internal_update_test.py | 0 .../unit_tests_old/logs/cleanup.sh | 0 .../unit_tests_old/test_cli.py | 0 .../unit_tests_old/test_data_types.py | 0 .../unit_tests_old/test_file_stream.py | 0 .../unit_tests_old/test_file_upload.py | 0 .../unit_tests_old/test_footer.py | 0 .../unit_tests_old/test_internal_api.py | 0 .../unit_tests_old/test_keras.py | 0 .../unit_tests_old/test_logging.py | 0 .../unit_tests_old/test_metric_internal.py | 0 .../unit_tests_old/test_offline_sync.py | 2 +- .../unit_tests_old/test_public_api.py | 2 +- .../unit_tests_old/test_runtime.py | 0 .../unit_tests_old/test_sender.py | 2 +- .../unit_tests_old/test_summary.py | 0 .../unit_tests_old/test_tb_watcher.py | 0 .../unit_tests_old/test_time_resolution.py | 0 .../unit_tests_old/test_wandb.py | 0 .../unit_tests_old/test_wandb_agent.py | 0 .../unit_tests_old/test_wandb_artifacts.py | 0 .../unit_tests_old/test_wandb_controller.py | 0 .../unit_tests_old/test_wandb_integration.py | 4 +- .../unit_tests_old/test_wandb_run.py | 0 .../unit_tests_old/tests_launch/__init__.py | 0 .../tests_launch/test_kaniko_build.py | 2 +- .../tests_launch/test_launch.py | 2 +- .../tests_launch/test_launch_aws.py | 2 +- .../tests_launch/test_launch_cli.py | 0 .../tests_launch/test_launch_docker.py | 0 .../tests_launch/test_launch_gcp.py | 0 .../tests_launch/test_launch_jobs.py | 2 +- .../tests_launch/test_launch_kubernetes.py | 0 .../tests_launch/test_launch_local_process.py | 0 .../tests_launch/test_launch_utils.py | 0 .../tests_s_nb/test_notebooks.py | 2 +- .../unit_tests_old/utils/__init__.py | 0 .../unit_tests_old/utils/artifact_emu.py | 0 .../unit_tests_old/utils/dummy_data.py | 0 .../unit_tests_old/utils/mock_requests.py | 0 .../unit_tests_old/utils/mock_server.py | 0 .../unit_tests_old/utils/notebook_client.py | 0 .../unit_tests_old/utils/parse_metrics.py | 0 .../unit_tests_old/utils/records.py | 0 .../unit_tests_old/utils/utils.py | 0 .../unit_tests/assets/launch_k8s_config.yaml | 17 - tests/unit_tests/test_cli.py | 200 ------ tests/unit_tests/test_public_api.py | 323 --------- tox.ini | 17 +- 211 files changed, 2175 insertions(+), 2077 deletions(-) rename tests/{unit_tests => pytest_tests}/__init__.py (100%) rename tests/{unit_tests => pytest_tests}/assets/2x2.png (100%) rename tests/{unit_tests => pytest_tests}/assets/Box.gltf (100%) rename tests/{unit_tests => pytest_tests}/assets/cube.obj (100%) rename tests/{unit_tests => pytest_tests}/assets/events.out.tfevents.1585769947.cvp (100%) rename tests/{unit_tests => pytest_tests}/assets/events.out.tfevents.1611911647.big-histos (100%) create mode 100644 tests/pytest_tests/assets/launch_k8s_config.yaml rename tests/{unit_tests => pytest_tests}/assets/point_cloud.pts.json (100%) rename tests/{unit_tests => pytest_tests}/assets/ssl_certs/README.md (100%) rename tests/{unit_tests => pytest_tests}/assets/ssl_certs/cc8b642c.0 (100%) rename tests/{unit_tests => pytest_tests}/assets/ssl_certs/localhost.crt (100%) rename tests/{unit_tests => pytest_tests}/assets/ssl_certs/localhost.key (100%) rename tests/{unit_tests => pytest_tests}/assets/test.png (100%) rename tests/{unit_tests => pytest_tests}/assets/test2.png (100%) rename tests/{unit_tests => pytest_tests}/assets/test_mod.py (100%) rename tests/{unit_tests => pytest_tests}/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/code/standalone_tests/code-toad.py (100%) rename tests/{unit_tests => pytest_tests}/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/config.yaml (100%) rename tests/{unit_tests => pytest_tests}/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/diff.patch (100%) rename tests/{unit_tests => pytest_tests}/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/output.log (100%) rename tests/{unit_tests => pytest_tests}/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/requirements.txt (100%) rename tests/{unit_tests => pytest_tests}/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/upstream_diff_78c43d0cc7146adb88cef2fcca4ec244a2d6a162.patch (100%) rename tests/{unit_tests => pytest_tests}/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/wandb-metadata.json (100%) rename tests/{unit_tests => pytest_tests}/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/wandb-summary.json (100%) rename tests/{unit_tests => pytest_tests}/assets/wandb/offline-run-20210216_154407-g9dvvkua/run-g9dvvkua.wandb (100%) rename tests/{unit_tests => pytest_tests}/assets/wandb_tensorflow_summary.pb (100%) create mode 100644 tests/pytest_tests/conftest.py rename tests/{unit_tests => pytest_tests/system_tests}/conftest.py (81%) rename tests/{unit_tests/artifacts => pytest_tests/system_tests/test_artifacts}/conftest.py (100%) rename tests/{unit_tests/artifacts => pytest_tests/system_tests/test_artifacts}/test_wandb_artifacts.py (100%) rename tests/{unit_tests/artifacts => pytest_tests/system_tests/test_artifacts}/test_wandb_artifacts_cache.py (100%) rename tests/{unit_tests/artifacts => pytest_tests/system_tests/test_artifacts}/test_wandb_artifacts_full.py (100%) create mode 100644 tests/pytest_tests/system_tests/test_cli_full.py rename tests/{unit_tests => pytest_tests/system_tests}/test_data_types_full.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/test_file_stream_internal.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/test_file_upload.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/test_footer.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/test_keras_full.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/test_kfp.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/test_label_full.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/test_metric_full.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/test_metric_internal.py (100%) rename tests/{unit_tests/test_mode_disabled.py => pytest_tests/system_tests/test_mode_disabled_full.py} (89%) create mode 100644 tests/pytest_tests/system_tests/test_model_workflow.py rename tests/{unit_tests => pytest_tests/system_tests}/test_mp_full.py (94%) rename tests/{unit_tests => pytest_tests/system_tests}/test_offline_sync.py (100%) create mode 100644 tests/pytest_tests/system_tests/test_public_api.py rename tests/{unit_tests => pytest_tests/system_tests}/test_redir_full.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/test_report_api.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/test_runtime.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/test_save_policies.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/test_sender.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/test_start_method.py (100%) rename tests/{unit_tests_old => pytest_tests/system_tests/test_sweep}/__init__.py (100%) create mode 100644 tests/pytest_tests/system_tests/test_sweep/test_public_api.py rename tests/{unit_tests => pytest_tests/system_tests/test_sweep}/test_sweep_scheduler.py (100%) rename tests/{unit_tests => pytest_tests/system_tests/test_sweep}/test_wandb_agent.py (100%) rename tests/{unit_tests => pytest_tests/system_tests/test_sweep}/test_wandb_agent_full.py (100%) rename tests/{unit_tests => pytest_tests/system_tests/test_sweep}/test_wandb_sweep.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/test_system_info.py (81%) rename tests/{unit_tests => pytest_tests/system_tests}/test_tb_watcher.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/test_telemetry_full.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/test_time_resolution.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/test_torch_full.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/test_validation_data_logger.py (99%) rename tests/{unit_tests => pytest_tests/system_tests}/test_wandb.py (74%) rename tests/{unit_tests => pytest_tests/system_tests}/test_wandb_integration.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/test_wandb_run.py (59%) create mode 100644 tests/pytest_tests/system_tests/test_wandb_settings.py rename tests/{unit_tests => pytest_tests/system_tests}/test_wandb_tensorflow.py (100%) create mode 100644 tests/pytest_tests/system_tests/test_wandb_verify.py rename tests/{unit_tests => pytest_tests/system_tests}/tests_launch/test_github_reference.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/tests_launch/test_job.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/tests_launch/test_launch.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/tests_launch/test_launch_add.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/tests_launch/test_launch_cli.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/tests_launch/test_launch_kubernetes.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/tests_launch/test_launch_run.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/tests_launch/test_launch_sweep.py (100%) rename tests/{unit_tests => pytest_tests/system_tests}/tests_launch/test_wandb_reference.py (100%) rename tests/{unit_tests_old/tests_launch => pytest_tests/unit_tests}/__init__.py (100%) create mode 100644 tests/pytest_tests/unit_tests/conftest.py rename tests/{ => pytest_tests}/unit_tests/saved_model_constructors.py (100%) rename tests/{unit_tests/test_cli_full.py => pytest_tests/unit_tests/test_cli.py} (69%) rename tests/{ => pytest_tests}/unit_tests/test_data_types.py (99%) rename tests/{ => pytest_tests}/unit_tests/test_datastore.py (100%) rename tests/{ => pytest_tests}/unit_tests/test_dir_watcher.py (100%) rename tests/{ => pytest_tests}/unit_tests/test_docker.py (100%) rename tests/{ => pytest_tests}/unit_tests/test_dtypes.py (100%) rename tests/{unit_tests/test_file_stream_user.py => pytest_tests/unit_tests/test_file_stream.py} (100%) rename tests/{ => pytest_tests}/unit_tests/test_flow_control.py (100%) rename tests/{ => pytest_tests}/unit_tests/test_import_wandb.py (100%) rename tests/{ => pytest_tests}/unit_tests/test_internal_api.py (99%) rename tests/{ => pytest_tests}/unit_tests/test_job_builder.py (100%) rename tests/{ => pytest_tests}/unit_tests/test_keras.py (100%) rename tests/{unit_tests/test_lib.py => pytest_tests/unit_tests/test_lib/test_apikey.py} (100%) rename tests/{unit_tests/lib => pytest_tests/unit_tests/test_lib}/test_filesystem.py (100%) rename tests/{unit_tests => pytest_tests/unit_tests/test_lib}/test_fsm.py (100%) rename tests/{unit_tests/test_git_repo.py => pytest_tests/unit_tests/test_lib/test_git.py} (100%) rename tests/{unit_tests/test_lib_hashutil.py => pytest_tests/unit_tests/test_lib/test_hashutil.py} (89%) rename tests/{unit_tests => pytest_tests/unit_tests/test_lib}/test_mailbox.py (100%) rename tests/{unit_tests => pytest_tests/unit_tests/test_lib}/test_printer.py (100%) rename tests/{unit_tests/test_redir_user.py => pytest_tests/unit_tests/test_lib/test_redir.py} (100%) rename tests/{unit_tests => pytest_tests/unit_tests/test_lib}/test_run_status.py (100%) rename tests/{unit_tests/lib => pytest_tests/unit_tests/test_lib}/test_runid.py (100%) rename tests/{unit_tests => pytest_tests/unit_tests/test_lib}/test_sock_client.py (100%) rename tests/{unit_tests => pytest_tests/unit_tests/test_lib}/test_sparkline.py (100%) rename tests/{unit_tests => pytest_tests/unit_tests/test_lib}/test_telemetry.py (100%) rename tests/{ => pytest_tests}/unit_tests/test_library_public.py (100%) rename tests/{ => pytest_tests}/unit_tests/test_metaflow.py (100%) create mode 100644 tests/pytest_tests/unit_tests/test_mode_disabled.py rename tests/{ => pytest_tests}/unit_tests/test_model_workflows.py (80%) rename tests/{ => pytest_tests}/unit_tests/test_monkeypatch_keras.py (100%) rename tests/{ => pytest_tests}/unit_tests/test_plot.py (100%) rename tests/{ => pytest_tests}/unit_tests/test_plots.py (100%) rename tests/{ => pytest_tests}/unit_tests/test_profiler.py (100%) create mode 100644 tests/pytest_tests/unit_tests/test_public_api.py rename tests/{ => pytest_tests}/unit_tests/test_require_helpers.py (100%) rename tests/{ => pytest_tests}/unit_tests/test_require_user.py (100%) rename tests/{ => pytest_tests}/unit_tests/test_retry.py (100%) rename tests/{ => pytest_tests}/unit_tests/test_sample.py (100%) rename tests/{ => pytest_tests}/unit_tests/test_saved_model.py (100%) rename tests/{ => pytest_tests}/unit_tests/test_ssl.py (96%) rename tests/{ => pytest_tests}/unit_tests/test_step_upload.py (100%) rename tests/{unit_tests => pytest_tests/unit_tests/test_system_metric}/test_system_metrics_gpu.py (100%) rename tests/{unit_tests => pytest_tests/unit_tests/test_system_metric}/test_system_metrics_gpu_apple.py (100%) rename tests/{unit_tests => pytest_tests/unit_tests/test_system_metric}/test_system_metrics_ipu.py (100%) rename tests/{unit_tests => pytest_tests/unit_tests/test_system_metric}/test_system_metrics_tpu.py (100%) rename tests/{unit_tests => pytest_tests/unit_tests/test_system_metric}/test_system_metrics_trainium.py (100%) rename tests/{unit_tests => pytest_tests/unit_tests/test_system_metric}/test_system_monitor.py (100%) rename tests/{ => pytest_tests}/unit_tests/test_tables.py (100%) rename tests/{ => pytest_tests}/unit_tests/test_torch.py (100%) rename tests/{ => pytest_tests}/unit_tests/test_util.py (100%) rename tests/{ => pytest_tests}/unit_tests/test_wandb_config.py (100%) rename tests/{unit_tests/test_metric_user.py => pytest_tests/unit_tests/test_wandb_define_metric.py} (100%) create mode 100644 tests/pytest_tests/unit_tests/test_wandb_log.py rename tests/{unit_tests/test_login.py => pytest_tests/unit_tests/test_wandb_login.py} (100%) create mode 100644 tests/pytest_tests/unit_tests/test_wandb_run.py create mode 100644 tests/pytest_tests/unit_tests/test_wandb_save.py rename tests/{ => pytest_tests}/unit_tests/test_wandb_settings.py (71%) rename tests/{ => pytest_tests}/unit_tests/test_wandb_summary.py (100%) rename tests/{ => pytest_tests}/unit_tests/test_wandb_verify.py (87%) create mode 100644 tests/pytest_tests/unit_tests_old/__init__.py rename tests/{ => pytest_tests}/unit_tests_old/assets/2x2.png (100%) rename tests/{ => pytest_tests}/unit_tests_old/assets/fixtures/environment.yml (100%) rename tests/{ => pytest_tests}/unit_tests_old/assets/fixtures/launch/launch_kube.json (100%) rename tests/{ => pytest_tests}/unit_tests_old/assets/fixtures/launch/launch_sagemaker_config.json (100%) rename tests/{ => pytest_tests}/unit_tests_old/assets/fixtures/launch/launch_vertex_config.json (100%) rename tests/{ => pytest_tests}/unit_tests_old/assets/fixtures/report_spec_v5.json (100%) rename tests/{ => pytest_tests}/unit_tests_old/assets/fixtures/requirements.txt (100%) rename tests/{ => pytest_tests}/unit_tests_old/assets/fixtures/train.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/assets/notebooks/code_saving.ipynb (100%) rename tests/{ => pytest_tests}/unit_tests_old/assets/notebooks/ipython_exit.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/assets/notebooks/login_timeout.ipynb (100%) rename tests/{ => pytest_tests}/unit_tests_old/assets/notebooks/magic.ipynb (100%) rename tests/{ => pytest_tests}/unit_tests_old/assets/notebooks/one_cell.ipynb (100%) rename tests/{ => pytest_tests}/unit_tests_old/assets/notebooks/setup.ipynb (100%) rename tests/{ => pytest_tests}/unit_tests_old/conftest.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/internal_update_test.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/logs/cleanup.sh (100%) rename tests/{ => pytest_tests}/unit_tests_old/test_cli.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/test_data_types.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/test_file_stream.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/test_file_upload.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/test_footer.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/test_internal_api.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/test_keras.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/test_logging.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/test_metric_internal.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/test_offline_sync.py (98%) rename tests/{ => pytest_tests}/unit_tests_old/test_public_api.py (99%) rename tests/{ => pytest_tests}/unit_tests_old/test_runtime.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/test_sender.py (99%) rename tests/{ => pytest_tests}/unit_tests_old/test_summary.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/test_tb_watcher.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/test_time_resolution.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/test_wandb.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/test_wandb_agent.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/test_wandb_artifacts.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/test_wandb_controller.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/test_wandb_integration.py (98%) rename tests/{ => pytest_tests}/unit_tests_old/test_wandb_run.py (100%) create mode 100644 tests/pytest_tests/unit_tests_old/tests_launch/__init__.py rename tests/{ => pytest_tests}/unit_tests_old/tests_launch/test_kaniko_build.py (99%) rename tests/{ => pytest_tests}/unit_tests_old/tests_launch/test_launch.py (99%) rename tests/{ => pytest_tests}/unit_tests_old/tests_launch/test_launch_aws.py (99%) rename tests/{ => pytest_tests}/unit_tests_old/tests_launch/test_launch_cli.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/tests_launch/test_launch_docker.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/tests_launch/test_launch_gcp.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/tests_launch/test_launch_jobs.py (99%) rename tests/{ => pytest_tests}/unit_tests_old/tests_launch/test_launch_kubernetes.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/tests_launch/test_launch_local_process.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/tests_launch/test_launch_utils.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/tests_s_nb/test_notebooks.py (99%) rename tests/{ => pytest_tests}/unit_tests_old/utils/__init__.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/utils/artifact_emu.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/utils/dummy_data.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/utils/mock_requests.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/utils/mock_server.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/utils/notebook_client.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/utils/parse_metrics.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/utils/records.py (100%) rename tests/{ => pytest_tests}/unit_tests_old/utils/utils.py (100%) delete mode 100644 tests/unit_tests/assets/launch_k8s_config.yaml delete mode 100644 tests/unit_tests/test_cli.py delete mode 100644 tests/unit_tests/test_public_api.py diff --git a/.circleci/config.yml b/.circleci/config.yml index 23f55a6248b..59fb9663665 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -238,6 +238,9 @@ jobs: execute: type: boolean default: true + tox_args: + type: string + default: "" docker: - image: "python:<>.<>" parallelism: << parameters.parallelism >> @@ -262,7 +265,7 @@ jobs: name: Run tests no_output_timeout: 10m command: | - CI_PYTEST_SPLIT_ARGS="--splits $CIRCLE_NODE_TOTAL --group $(( $CIRCLE_NODE_INDEX + 1 ))" tox -v -e << parameters.toxenv >> + CI_PYTEST_SPLIT_ARGS="--splits $CIRCLE_NODE_TOTAL --group $(( $CIRCLE_NODE_INDEX + 1 ))" tox -v -e << parameters.toxenv >> -- << parameters.tox_args >> - save-test-results # conditionally post a notification to slack if the job failed - when: @@ -282,7 +285,7 @@ jobs: default: 3 python_version_minor: type: integer - default: 7 + default: 8 toxenv: type: string notify_on_failure: @@ -291,9 +294,9 @@ jobs: xdist: type: integer default: 6 - test_path: + tox_args: type: string - default: tests/unit_tests + default: "" docker: - image: "python:<>.<>" - image: gcr.io/wandb-production/local-testcontainer:<< pipeline.parameters.wandb_server_tag >> @@ -324,7 +327,7 @@ jobs: command: | CI_PYTEST_PARALLEL=<< parameters.xdist >> \ tox -v -e <> \ - -- --wandb-server-tag << pipeline.parameters.wandb_server_tag >> << parameters.test_path >> + -- --wandb-server-tag << pipeline.parameters.wandb_server_tag >> << parameters.tox_args >> - save-test-results # conditionally post a notification to slack if the job failed - when: @@ -414,13 +417,16 @@ jobs: default: 3 machine_executor: type: string - default: "default" # "default" or "server-2019-cuda" + default: "default" # "default" or "server-2019-cuda" executor_size: type: string - default: "large" # could only be "medium" for "server-2019-cuda" + default: "large" # could only be "medium" for "server-2019-cuda" execute: type: boolean default: true + tox_args: + type: string + default: "" executor: name: win/<< parameters.machine_executor >> size: << parameters.executor_size >> @@ -442,7 +448,7 @@ jobs: cp /c/tools/miniconda3/python* /c/tools/miniconda3/lib/venv/scripts/nt/ - when: condition: - equal: [ "server-2019-cuda", << parameters.machine_executor >> ] + equal: ["server-2019-cuda", << parameters.machine_executor >>] steps: - run: name: Update tox.ini on a GPU machine to install the proper pytorch version @@ -459,7 +465,7 @@ jobs: echo $GCLOUD_SERVICE_KEY > key.json gcloud auth activate-service-account --key-file=key.json yes | gcloud auth configure-docker - DATE=$(date -u +%Y%m%d) CI_PYTEST_PARALLEL=<< parameters.xdist >> CI_PYTEST_SPLIT_ARGS="--splits $CIRCLE_NODE_TOTAL --group $(( $CIRCLE_NODE_INDEX + 1 ))" tox -v -e << parameters.toxenv >> + DATE=$(date -u +%Y%m%d) CI_PYTEST_PARALLEL=<< parameters.xdist >> CI_PYTEST_SPLIT_ARGS="--splits $CIRCLE_NODE_TOTAL --group $(( $CIRCLE_NODE_INDEX + 1 ))" tox -v -e << parameters.toxenv >> -- << parameters.tox_args >> no_output_timeout: 10m - save-test-results @@ -479,6 +485,9 @@ jobs: xdist: type: integer default: 3 + tox_args: + type: string + default: "" macos: xcode: 13.4.1 resource_class: large @@ -494,7 +503,7 @@ jobs: # Tests failed with Too many open files, so added ulimit command: | ulimit -n 4096 - CI_PYTEST_PARALLEL=<< parameters.xdist >> CI_PYTEST_SPLIT_ARGS="--splits $CIRCLE_NODE_TOTAL --group $(( $CIRCLE_NODE_INDEX + 1 ))" python3 -m tox -v -e << parameters.toxenv >> + CI_PYTEST_PARALLEL=<< parameters.xdist >> CI_PYTEST_SPLIT_ARGS="--splits $CIRCLE_NODE_TOTAL --group $(( $CIRCLE_NODE_INDEX + 1 ))" python3 -m tox -v -e << parameters.toxenv >> -- << parameters.tox_args >> no_output_timeout: 10m - save-test-results @@ -680,8 +689,8 @@ jobs: condition: << parameters.execute >> steps: - gcloud/install -# version: "413.0.0" -# components: "docker-credential-gcr" + # version: "413.0.0" + # components: "docker-credential-gcr" - setup_gcloud - setup_docker_buildx: docker_layer_caching: false @@ -1065,8 +1074,8 @@ workflows: - tox-base: matrix: parameters: - python_version_major: [ 3 ] - python_version_minor: [ 7, 8 ] + python_version_major: [3] + python_version_minor: [7, 8] shard: - "imports1" - "imports2" @@ -1082,6 +1091,7 @@ workflows: - "imports12" name: "func-s_<>-lin-py<><>" toxenv: "func-s_<>-py<><>" + tox_args: "tests/functional_tests" context: slack-secrets notify_on_failure: true notify_on_failure_channel: $SLACK_SDK_NIGHTLY_CI_GROWTH_CHANNEL @@ -1091,8 +1101,8 @@ workflows: - protobuf-compatability: matrix: parameters: - python_version_major: [ 3 ] - python_version_minor: [ 7, 8, 9, 10 ] + python_version_major: [3] + python_version_minor: [7, 8, 9, 10] name: "protobuf-compatability-py<><>" # # standalone GPU tests on Windows @@ -1102,10 +1112,11 @@ workflows: executor_size: "medium" matrix: parameters: - python_version_major: [ 3 ] - python_version_minor: [ 9 ] + python_version_major: [3] + python_version_minor: [9] name: "func-s_standalone_gpu-win-py<><>" toxenv: "standalone-gpu-py<><>" + tox_args: "tests/functional_tests" parallelism: 2 xdist: 1 # @@ -1201,16 +1212,18 @@ workflows: name: "code-check" python_version_minor: 8 toxenv: "protocheck3,protocheck4,generatecheck,codecovcheck,mypy,mypy-report,pyupgrade,black,isort-check,flake8,docstrings" + # - # Unit tests with pytest on Linux, using real wandb server + # System tests with pytest on Linux, using real wandb server # - pytest: matrix: parameters: python_version_major: [3] python_version_minor: [6, 7, 8, 9, 10] - name: "unit-s_base-lin-py<><>" + name: "system-linux-py<><>" toxenv: "py<><>,covercircle" + tox_args: "tests/pytest_tests/system_tests --ignore=tests/pytest_tests/system_tests/tests_launch" # # Unit tests with pytest on Linux, using the old mock server # @@ -1219,19 +1232,59 @@ workflows: parameters: python_version_major: [3] python_version_minor: [6, 7, 8, 9, 10] - name: "unit-s_base_mock_server-lin-py<><>" + name: "unit-linux-mock_server-py<><>" toxenv: "py<><>,covercircle" + tox_args: "tests/pytest_tests/unit_tests_old --ignore=tests/pytest_tests/unit_tests_old/tests_launch --ignore=tests/pytest_tests/unit_tests_old/tests_s_nb" # - # Unit tests with pytest on Linux, using real wandb server -- for launch (unit_tests/tests_launch) + # Unit tests with pytest on Linux # - - pytest: + - tox-base: matrix: parameters: python_version_major: [3] - python_version_minor: [8] - name: "unit-s_base-lin-pylaunch<><>" - toxenv: "pylaunch<><>,covercircle" - test_path: "tests/unit_tests/tests_launch" + python_version_minor: [6, 7, 8, 9, 10] + name: "unit-linux-py<><>" + toxenv: "py<><>,covercircle" + tox_args: "tests/pytest_tests/unit_tests" + + # + # Unit tests with pytest on Windows and MacOS + # + - win: + matrix: + parameters: + python_version_major: [3] + python_version_minor: [9] + name: "unit-win-mock_server-py<><>" + toxenv: "py<><>,wincovercircle" + tox_args: "tests/pytest_tests/unit_tests_old --timeout 300 --ignore=tests/pytest_tests/unit_tests_old/tests_launch --ignore=tests/pytest_tests/unit_tests_old/tests_s_nb" + + - win: + matrix: + parameters: + python_version_major: [3] + python_version_minor: [9] + name: "unit-win-py<><>" + toxenv: "py<><>,wincovercircle" + tox_args: "tests/pytest_tests/unit_tests --timeout 300" + + - mac: + matrix: + parameters: + python_version_major: [3] + python_version_minor: [9] + name: "unit-macos-mock_server-py<><>" + toxenv: "py<><>,covercircle" + tox_args: "tests/pytest_tests/unit_tests_old --ignore=tests/pytest_tests/unit_tests_old/tests_launch --ignore=tests/pytest_tests/unit_tests_old/tests_s_nb" + + - mac: + matrix: + parameters: + python_version_major: [3] + python_version_minor: [9] + name: "unit-macos-py<><>" + toxenv: "py<><>,covercircle" + tox_args: "tests/pytest_tests/unit_tests" # # Functional tests with yea on Linux (base only) @@ -1243,8 +1296,10 @@ workflows: python_version_minor: [7] shard: - "base" - name: "func-s_<>-lin-py<><>" + name: "func-linux-s_<>-py<><>" toxenv: "func-s_<>-py<><>,func-covercircle" + tox_args: "tests/functional_tests" + parallelism: 4 # # Functional tests with yea on Linux @@ -1268,8 +1323,10 @@ workflows: - "noml" - "grpc" - "docs" - name: "func-s_<>-lin-py<><>" + name: "func-linux-s_<>-py<><>" toxenv: "func-s_<>-py<><>,func-covercircle" + tox_args: "tests/functional_tests" + # # Functional tests with yea on Windows # @@ -1278,33 +1335,24 @@ workflows: parameters: python_version_major: [3] python_version_minor: [9] - name: "func-s_base-win-py<><>" + name: "func-win-s_base-py<><>" toxenv: "func-s_base-py<><>" + tox_args: "tests/functional_tests" parallelism: 6 xdist: 1 - # - # Unit tests with pytest on Windows and MacOS, using real wandb server - # - - win: - matrix: - parameters: - python_version_major: [3] - python_version_minor: [9] - name: "unit-s_base-win-py<><>" - toxenv: "py<><>,wincovercircle -- --timeout 300 tests/unit_tests" - - mac: - matrix: - parameters: - python_version_major: [3] - python_version_minor: [9] - name: "unit-s_base_mock_server-mac-py<><>" - toxenv: "py<><>,covercircle" + # # wandb launch tests # - launch: - name: "mach-launch" + name: "launch-mock-server" toxenv: "pylaunch,covercircle" + + - pytest: + name: "launch-relay-server" + toxenv: "pylaunch,covercircle" + tox_args: "tests/pytest_tests/system_tests/tests_launch" + # # sharded unit tests # @@ -1314,14 +1362,14 @@ workflows: parameters: python_version_major: [3] python_version_minor: [6] - name: "unit-s_nb-lin-py<><>" + name: "unit-linux-notebooks-py<><>" toxenv: "unit-s_nb-py<><>,unit-covercircle" - win: matrix: parameters: python_version_major: [3] python_version_minor: [9] - name: "unit-s_nb-win-py<><>" + name: "unit-win-notebooks-py<><>" toxenv: "unit-s_nb-py<><>" parallelism: 1 xdist: 1 @@ -1330,7 +1378,7 @@ workflows: parameters: python_version_major: [3] python_version_minor: [9] - name: "unit-s_nb-mac-py<><>" + name: "unit-macos-notebooks-py<><>" toxenv: "unit-s_nb-py<><>" parallelism: 1 xdist: 1 @@ -1340,7 +1388,6 @@ workflows: - pex: name: "pex" - # todo: needs love # manual_test: # when: << pipeline.parameters.manual_test >> @@ -1389,8 +1436,8 @@ workflows: execute: << pipeline.parameters.manual_nightly_execute_shard_imports >> matrix: parameters: - python_version_major: [ 3 ] - python_version_minor: [ 7, 8 ] + python_version_major: [3] + python_version_minor: [7, 8] shard: - "imports1" - "imports2" @@ -1415,8 +1462,8 @@ workflows: - protobuf-compatability: matrix: parameters: - python_version_major: [ 3 ] - python_version_minor: [ 7, 8, 9, 10 ] + python_version_major: [3] + python_version_minor: [7, 8, 9, 10] name: "protobuf-compatability-py<><>" # # standalone GPU tests on Windows @@ -1427,10 +1474,11 @@ workflows: execute: << pipeline.parameters.manual_nightly_execute_shard_standalone_gpu_win >> matrix: parameters: - python_version_major: [ 3 ] - python_version_minor: [ 9 ] + python_version_major: [3] + python_version_minor: [9] name: "func-s_standalone_gpu-win-py<><>" toxenv: "standalone-gpu-py<><>" + tox_args: "tests/functional_tests" parallelism: 2 xdist: 1 # diff --git a/.codecov.yml b/.codecov.yml index f5075ab2931..89be49b5f3c 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -4,14 +4,14 @@ codecov: # To calculate after_n_builds use # ./tools/coverage-tool.py jobs | wc -l # also change comment block after_n_builds just below - after_n_builds: 38 + after_n_builds: 51 wait_for_ci: no comment: layout: "reach, diff, flags, files" behavior: default require_changes: no - after_n_builds: 38 + after_n_builds: 51 ignore: - "wandb/vendor" diff --git a/.flake8-base b/.flake8-base index 53aa4f1a030..36ac98d4dfd 100644 --- a/.flake8-base +++ b/.flake8-base @@ -13,11 +13,8 @@ exclude = *.egg-info .cache .eggs - ./tests/conftest.py - ./tests/assets/fixtures/train.py - ./tests/utils/ - ./tests/logs/ - ./tests/unit_tests_old/ + ./tests/pytest_tests/assets/fixtures/train.py + ./tests/pytest_tests/unit_tests_old/ ./tests/functional_tests/t0_main/metaflow/ ./tests/functional_tests/t0_main/fastai/t1_v1.py ./wandb/__init__.py diff --git a/.github/workflows/check-mocks-synced-with-yea-wandb.yml b/.github/workflows/check-mocks-synced-with-yea-wandb.yml index e23325c5ab3..08c5ca67ad2 100644 --- a/.github/workflows/check-mocks-synced-with-yea-wandb.yml +++ b/.github/workflows/check-mocks-synced-with-yea-wandb.yml @@ -16,19 +16,19 @@ jobs: id: mock_requests run: | wget https://raw.githubusercontent.com/wandb/yea-wandb/main/src/yea_wandb/mock_requests.py - diff mock_requests.py tests/unit_tests_old/utils/mock_requests.py + diff mock_requests.py tests/pytest_tests/unit_tests_old/utils/mock_requests.py continue-on-error: true - name: Check if mock_server is up to date with wandb/wandb:master id: mock_server run: | wget https://raw.githubusercontent.com/wandb/yea-wandb/main/src/yea_wandb/mock_server.py - diff mock_server.py tests/unit_tests_old/utils/mock_server.py + diff mock_server.py tests/pytest_tests/unit_tests_old/utils/mock_server.py continue-on-error: true - name: Check if artifact_emu is up to date with wandb/wandb:master id: artifact_emu run: | wget https://raw.githubusercontent.com/wandb/yea-wandb/main/src/yea_wandb/artifact_emu.py - diff artifact_emu.py tests/unit_tests_old/utils/artifact_emu.py + diff artifact_emu.py tests/pytest_tests/unit_tests_old/utils/artifact_emu.py continue-on-error: true - name: Check on failures if: | diff --git a/pytest.ini b/pytest.ini index ccf24e09bfd..a9f40418961 100644 --- a/pytest.ini +++ b/pytest.ini @@ -17,6 +17,4 @@ norecursedirs = build/ tests/functional_tests tests/standalone_tests - tests/unit_tests - tests/unit_tests/tests_* - tests/unit_tests_old/tests_* + diff --git a/tests/unit_tests/__init__.py b/tests/pytest_tests/__init__.py similarity index 100% rename from tests/unit_tests/__init__.py rename to tests/pytest_tests/__init__.py diff --git a/tests/unit_tests/assets/2x2.png b/tests/pytest_tests/assets/2x2.png similarity index 100% rename from tests/unit_tests/assets/2x2.png rename to tests/pytest_tests/assets/2x2.png diff --git a/tests/unit_tests/assets/Box.gltf b/tests/pytest_tests/assets/Box.gltf similarity index 100% rename from tests/unit_tests/assets/Box.gltf rename to tests/pytest_tests/assets/Box.gltf diff --git a/tests/unit_tests/assets/cube.obj b/tests/pytest_tests/assets/cube.obj similarity index 100% rename from tests/unit_tests/assets/cube.obj rename to tests/pytest_tests/assets/cube.obj diff --git a/tests/unit_tests/assets/events.out.tfevents.1585769947.cvp b/tests/pytest_tests/assets/events.out.tfevents.1585769947.cvp similarity index 100% rename from tests/unit_tests/assets/events.out.tfevents.1585769947.cvp rename to tests/pytest_tests/assets/events.out.tfevents.1585769947.cvp diff --git a/tests/unit_tests/assets/events.out.tfevents.1611911647.big-histos b/tests/pytest_tests/assets/events.out.tfevents.1611911647.big-histos similarity index 100% rename from tests/unit_tests/assets/events.out.tfevents.1611911647.big-histos rename to tests/pytest_tests/assets/events.out.tfevents.1611911647.big-histos diff --git a/tests/pytest_tests/assets/launch_k8s_config.yaml b/tests/pytest_tests/assets/launch_k8s_config.yaml new file mode 100644 index 00000000000..0db30d99b5a --- /dev/null +++ b/tests/pytest_tests/assets/launch_k8s_config.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 + +clusters: + - name: test-cluster + cluster: {} + +contexts: + - context: + cluster: test-cluster + user: test-user + name: active-context + +current-context: active-context + +users: + - name: test-user + user: {} diff --git a/tests/unit_tests/assets/point_cloud.pts.json b/tests/pytest_tests/assets/point_cloud.pts.json similarity index 100% rename from tests/unit_tests/assets/point_cloud.pts.json rename to tests/pytest_tests/assets/point_cloud.pts.json diff --git a/tests/unit_tests/assets/ssl_certs/README.md b/tests/pytest_tests/assets/ssl_certs/README.md similarity index 100% rename from tests/unit_tests/assets/ssl_certs/README.md rename to tests/pytest_tests/assets/ssl_certs/README.md diff --git a/tests/unit_tests/assets/ssl_certs/cc8b642c.0 b/tests/pytest_tests/assets/ssl_certs/cc8b642c.0 similarity index 100% rename from tests/unit_tests/assets/ssl_certs/cc8b642c.0 rename to tests/pytest_tests/assets/ssl_certs/cc8b642c.0 diff --git a/tests/unit_tests/assets/ssl_certs/localhost.crt b/tests/pytest_tests/assets/ssl_certs/localhost.crt similarity index 100% rename from tests/unit_tests/assets/ssl_certs/localhost.crt rename to tests/pytest_tests/assets/ssl_certs/localhost.crt diff --git a/tests/unit_tests/assets/ssl_certs/localhost.key b/tests/pytest_tests/assets/ssl_certs/localhost.key similarity index 100% rename from tests/unit_tests/assets/ssl_certs/localhost.key rename to tests/pytest_tests/assets/ssl_certs/localhost.key diff --git a/tests/unit_tests/assets/test.png b/tests/pytest_tests/assets/test.png similarity index 100% rename from tests/unit_tests/assets/test.png rename to tests/pytest_tests/assets/test.png diff --git a/tests/unit_tests/assets/test2.png b/tests/pytest_tests/assets/test2.png similarity index 100% rename from tests/unit_tests/assets/test2.png rename to tests/pytest_tests/assets/test2.png diff --git a/tests/unit_tests/assets/test_mod.py b/tests/pytest_tests/assets/test_mod.py similarity index 100% rename from tests/unit_tests/assets/test_mod.py rename to tests/pytest_tests/assets/test_mod.py diff --git a/tests/unit_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/code/standalone_tests/code-toad.py b/tests/pytest_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/code/standalone_tests/code-toad.py similarity index 100% rename from tests/unit_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/code/standalone_tests/code-toad.py rename to tests/pytest_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/code/standalone_tests/code-toad.py diff --git a/tests/unit_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/config.yaml b/tests/pytest_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/config.yaml similarity index 100% rename from tests/unit_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/config.yaml rename to tests/pytest_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/config.yaml diff --git a/tests/unit_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/diff.patch b/tests/pytest_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/diff.patch similarity index 100% rename from tests/unit_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/diff.patch rename to tests/pytest_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/diff.patch diff --git a/tests/unit_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/output.log b/tests/pytest_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/output.log similarity index 100% rename from tests/unit_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/output.log rename to tests/pytest_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/output.log diff --git a/tests/unit_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/requirements.txt b/tests/pytest_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/requirements.txt similarity index 100% rename from tests/unit_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/requirements.txt rename to tests/pytest_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/requirements.txt diff --git a/tests/unit_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/upstream_diff_78c43d0cc7146adb88cef2fcca4ec244a2d6a162.patch b/tests/pytest_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/upstream_diff_78c43d0cc7146adb88cef2fcca4ec244a2d6a162.patch similarity index 100% rename from tests/unit_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/upstream_diff_78c43d0cc7146adb88cef2fcca4ec244a2d6a162.patch rename to tests/pytest_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/upstream_diff_78c43d0cc7146adb88cef2fcca4ec244a2d6a162.patch diff --git a/tests/unit_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/wandb-metadata.json b/tests/pytest_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/wandb-metadata.json similarity index 100% rename from tests/unit_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/wandb-metadata.json rename to tests/pytest_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/wandb-metadata.json diff --git a/tests/unit_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/wandb-summary.json b/tests/pytest_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/wandb-summary.json similarity index 100% rename from tests/unit_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/wandb-summary.json rename to tests/pytest_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/files/wandb-summary.json diff --git a/tests/unit_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/run-g9dvvkua.wandb b/tests/pytest_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/run-g9dvvkua.wandb similarity index 100% rename from tests/unit_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/run-g9dvvkua.wandb rename to tests/pytest_tests/assets/wandb/offline-run-20210216_154407-g9dvvkua/run-g9dvvkua.wandb diff --git a/tests/unit_tests/assets/wandb_tensorflow_summary.pb b/tests/pytest_tests/assets/wandb_tensorflow_summary.pb similarity index 100% rename from tests/unit_tests/assets/wandb_tensorflow_summary.pb rename to tests/pytest_tests/assets/wandb_tensorflow_summary.pb diff --git a/tests/pytest_tests/conftest.py b/tests/pytest_tests/conftest.py new file mode 100644 index 00000000000..473c7230ed5 --- /dev/null +++ b/tests/pytest_tests/conftest.py @@ -0,0 +1,246 @@ +import os +import shutil +import sys +import unittest.mock +from pathlib import Path +from queue import Queue +from typing import Any, Callable, Generator, Union + +import git +import pytest +import wandb +import wandb.old.settings +import wandb.util +from click.testing import CliRunner +from wandb import Api +from wandb.sdk.interface.interface_queue import InterfaceQueue +from wandb.sdk.lib import filesystem, runid +from wandb.sdk.lib.git import GitRepo + +# -------------------------------- +# Misc Fixtures utilities +# -------------------------------- + + +@pytest.fixture(scope="session") +def assets_path() -> Generator[Callable, None, None]: + def assets_path_fn(path: Path) -> Path: + return Path(__file__).resolve().parent / "assets" / path + + yield assets_path_fn + + +@pytest.fixture +def copy_asset(assets_path) -> Generator[Callable, None, None]: + def copy_asset_fn( + path: Union[str, Path], dst: Union[str, Path, None] = None + ) -> Path: + src = assets_path(path) + if src.is_file(): + return shutil.copy(src, dst or path) + return shutil.copytree(src, dst or path) + + yield copy_asset_fn + + +# -------------------------------- +# Misc Fixtures +# -------------------------------- + + +@pytest.fixture(scope="function", autouse=True) +def filesystem_isolate(tmp_path): + # Click>=8 implements temp_dir argument which depends on python>=3.7 + kwargs = dict(temp_dir=tmp_path) if sys.version_info >= (3, 7) else {} + with CliRunner().isolated_filesystem(**kwargs): + yield + + +# todo: this fixture should probably be autouse=True +@pytest.fixture(scope="function", autouse=False) +def local_settings(filesystem_isolate): + """Place global settings in an isolated dir""" + config_path = os.path.join(os.getcwd(), ".config", "wandb", "settings") + filesystem.mkdir_exists_ok(os.path.join(".config", "wandb")) + + # todo: this breaks things in unexpected places + # todo: get rid of wandb.old + with unittest.mock.patch.object( + wandb.old.settings.Settings, + "_global_path", + return_value=config_path, + ): + yield + + +@pytest.fixture(scope="function", autouse=True) +def local_netrc(filesystem_isolate): + """Never use our real credentials, put them in their own isolated dir""" + + original_expanduser = os.path.expanduser # TODO: this seems overkill... + + open(".netrc", "wb").close() # Touch that netrc file + + def expand(path): + if "netrc" in path: + try: + full_path = os.path.realpath("netrc") + except OSError: + full_path = original_expanduser(path) + else: + full_path = original_expanduser(path) + return full_path + + # monkeypatch.setattr(os.path, "expanduser", expand) + with unittest.mock.patch.object(os.path, "expanduser", expand): + yield + + +@pytest.fixture +def dummy_api_key(): + return "1824812581259009ca9981580f8f8a9012409eee" + + +@pytest.fixture +def patch_apikey(dummy_api_key, mocker): + mocker.patch("wandb.wandb_lib.apikey.isatty", lambda stream: True) + mocker.patch("wandb.wandb_lib.apikey.input", lambda x: 1) + mocker.patch("wandb.wandb_lib.apikey.getpass", lambda x: dummy_api_key) + yield + + +@pytest.fixture +def patch_prompt(monkeypatch): + monkeypatch.setattr( + wandb.util, "prompt_choices", lambda x, input_timeout=None, jupyter=False: x[0] + ) + monkeypatch.setattr( + wandb.wandb_lib.apikey, + "prompt_choices", + lambda x, input_timeout=None, jupyter=False: x[0], + ) + + +@pytest.fixture +def runner(patch_apikey, patch_prompt): + return CliRunner() + + +@pytest.fixture +def git_repo(runner): + with runner.isolated_filesystem(), git.Repo.init(".") as repo: + filesystem.mkdir_exists_ok("wandb") + # Because the forked process doesn't use my monkey patch above + with open(os.path.join("wandb", "settings"), "w") as f: + f.write("[default]\nproject: test") + open("README", "wb").close() + repo.index.add(["README"]) + repo.index.commit("Initial commit") + yield GitRepo(lazy=False) + + +@pytest.fixture(scope="function", autouse=True) +def unset_global_objects(): + from wandb.sdk.lib.module import unset_globals + + yield + unset_globals() + + +@pytest.fixture(scope="session", autouse=True) +def env_teardown(): + wandb.teardown() + yield + wandb.teardown() + if not os.environ.get("CI") == "true": + # TODO: uncomment this for prod? better make controllable with an env var + # subprocess.run(["wandb", "server", "stop"]) + pass + + +@pytest.fixture(scope="function", autouse=True) +def clean_up(): + yield + wandb.teardown() + + +@pytest.fixture +def api(): + return Api() + + +# -------------------------------- +# Fixtures for user test point +# -------------------------------- + + +@pytest.fixture() +def record_q() -> "Queue": + return Queue() + + +@pytest.fixture() +def mocked_interface(record_q: "Queue") -> InterfaceQueue: + return InterfaceQueue(record_q=record_q) + + +@pytest.fixture +def mocked_backend(mocked_interface: InterfaceQueue) -> Generator[object, None, None]: + class MockedBackend: + def __init__(self) -> None: + self.interface = mocked_interface + + yield MockedBackend() + + +def dict_factory(): + def helper(): + return dict() + + return helper + + +@pytest.fixture(scope="function") +def test_settings(): + def update_test_settings( + extra_settings: Union[ + dict, wandb.sdk.wandb_settings.Settings + ] = dict_factory() # noqa: B008 + ): + settings = wandb.Settings( + console="off", + save_code=False, + ) + if isinstance(extra_settings, dict): + settings.update(extra_settings, source=wandb.sdk.wandb_settings.Source.BASE) + elif isinstance(extra_settings, wandb.sdk.wandb_settings.Settings): + settings.update(extra_settings) + settings._set_run_start_time() + return settings + + yield update_test_settings + + +@pytest.fixture(scope="function") +def mock_run(test_settings, mocked_backend) -> Generator[Callable, None, None]: + from wandb.sdk.lib.module import unset_globals + + def mock_run_fn(use_magic_mock=False, **kwargs: Any) -> "wandb.sdk.wandb_run.Run": + kwargs_settings = kwargs.pop("settings", dict()) + kwargs_settings = { + **{ + "run_id": runid.generate_id(), + }, + **kwargs_settings, + } + run = wandb.wandb_sdk.wandb_run.Run( + settings=test_settings(kwargs_settings), **kwargs + ) + run._set_backend( + unittest.mock.MagicMock() if use_magic_mock else mocked_backend + ) + run._set_globals() + return run + + yield mock_run_fn + unset_globals() diff --git a/tests/unit_tests/conftest.py b/tests/pytest_tests/system_tests/conftest.py similarity index 81% rename from tests/unit_tests/conftest.py rename to tests/pytest_tests/system_tests/conftest.py index fb22745cb94..c54f2a43dc5 100644 --- a/tests/unit_tests/conftest.py +++ b/tests/pytest_tests/system_tests/conftest.py @@ -1,15 +1,12 @@ import dataclasses -import io import json import logging import os import platform import secrets -import shutil import socket import string import subprocess -import sys import threading import time import unittest.mock @@ -25,7 +22,6 @@ Any, Callable, Dict, - Generator, Iterable, List, Mapping, @@ -34,7 +30,6 @@ ) import flask -import git import pandas as pd import pytest import requests @@ -42,16 +37,12 @@ import wandb import wandb.old.settings import wandb.util -from click.testing import CliRunner -from wandb import Api from wandb.sdk.interface.interface_queue import InterfaceQueue from wandb.sdk.internal import context from wandb.sdk.internal.handler import HandleManager from wandb.sdk.internal.sender import SendManager from wandb.sdk.internal.settings_static import SettingsStatic from wandb.sdk.internal.writer import WriteManager -from wandb.sdk.lib import filesystem, runid -from wandb.sdk.lib.git import GitRepo from wandb.sdk.lib.mailbox import Mailbox try: @@ -99,340 +90,6 @@ class ConsoleFormatter: END = "\033[0m" -# -------------------------------- -# Misc Fixtures utilities -# -------------------------------- - - -@pytest.fixture(scope="session") -def assets_path() -> Callable: - def assets_path_fn(path: Path) -> Path: - return Path(__file__).resolve().parent / "assets" / path - - yield assets_path_fn - - -@pytest.fixture -def copy_asset(assets_path) -> Callable: - def copy_asset_fn( - path: Union[str, Path], dst: Union[str, Path, None] = None - ) -> Path: - src = assets_path(path) - if src.is_file(): - return shutil.copy(src, dst or path) - return shutil.copytree(src, dst or path) - - yield copy_asset_fn - - -# -------------------------------- -# Misc Fixtures -# -------------------------------- - - -@pytest.fixture -def mock_responses(): - with responses.RequestsMock() as rsps: - yield rsps - - -@pytest.fixture(scope="function", autouse=True) -def unset_global_objects(): - from wandb.sdk.lib.module import unset_globals - - yield - unset_globals() - - -@pytest.fixture(scope="session", autouse=True) -def env_teardown(): - wandb.teardown() - yield - wandb.teardown() - if not os.environ.get("CI") == "true": - # TODO: uncomment this for prod? better make controllable with an env var - # subprocess.run(["wandb", "server", "stop"]) - pass - - -@pytest.fixture(scope="function", autouse=True) -def clean_up(): - yield - wandb.teardown() - - -@pytest.fixture(scope="function", autouse=True) -def filesystem_isolate(tmp_path): - # Click>=8 implements temp_dir argument which depends on python>=3.7 - kwargs = dict(temp_dir=tmp_path) if sys.version_info >= (3, 7) else {} - with CliRunner().isolated_filesystem(**kwargs): - yield - - -# todo: this fixture should probably be autouse=True -@pytest.fixture(scope="function", autouse=False) -def local_settings(filesystem_isolate): - """Place global settings in an isolated dir""" - config_path = os.path.join(os.getcwd(), ".config", "wandb", "settings") - filesystem.mkdir_exists_ok(os.path.join(".config", "wandb")) - - # todo: this breaks things in unexpected places - # todo: get rid of wandb.old - with unittest.mock.patch.object( - wandb.old.settings.Settings, - "_global_path", - return_value=config_path, - ): - yield - - -@pytest.fixture(scope="function", autouse=True) -def local_netrc(filesystem_isolate): - """Never use our real credentials, put them in their own isolated dir""" - - original_expanduser = os.path.expanduser # TODO: this seems overkill... - - open(".netrc", "wb").close() # Touch that netrc file - - def expand(path): - if "netrc" in path: - try: - full_path = os.path.realpath("netrc") - except OSError: - full_path = original_expanduser(path) - else: - full_path = original_expanduser(path) - return full_path - - # monkeypatch.setattr(os.path, "expanduser", expand) - with unittest.mock.patch.object(os.path, "expanduser", expand): - yield - - -@pytest.fixture -def mocked_ipython(mocker): - mocker.patch("wandb.sdk.lib.ipython._get_python_type", lambda: "jupyter") - mocker.patch("wandb.sdk.wandb_settings._get_python_type", lambda: "jupyter") - html_mock = mocker.MagicMock() - mocker.patch("wandb.sdk.lib.ipython.display_html", html_mock) - ipython = unittest.mock.MagicMock() - ipython.html = html_mock - - def run_cell(cell): - print("Running cell: ", cell) - exec(cell) - - ipython.run_cell = run_cell - # TODO: this is really unfortunate, for reasons not clear to me, monkeypatch doesn't work - orig_get_ipython = wandb.jupyter.get_ipython - orig_display = wandb.jupyter.display - wandb.jupyter.get_ipython = lambda: ipython - wandb.jupyter.display = lambda obj: html_mock(obj._repr_html_()) - yield ipython - wandb.jupyter.get_ipython = orig_get_ipython - wandb.jupyter.display = orig_display - - -@pytest.fixture -def git_repo(runner): - with runner.isolated_filesystem(), git.Repo.init(".") as repo: - filesystem.mkdir_exists_ok("wandb") - # Because the forked process doesn't use my monkey patch above - with open(os.path.join("wandb", "settings"), "w") as f: - f.write("[default]\nproject: test") - open("README", "wb").close() - repo.index.add(["README"]) - repo.index.commit("Initial commit") - yield GitRepo(lazy=False) - - -@pytest.fixture -def dummy_api_key(): - return "1824812581259009ca9981580f8f8a9012409eee" - - -@pytest.fixture -def patch_apikey(dummy_api_key, mocker): - mocker.patch("wandb.wandb_lib.apikey.isatty", lambda stream: True) - mocker.patch("wandb.wandb_lib.apikey.input", lambda x: 1) - mocker.patch("wandb.wandb_lib.apikey.getpass", lambda x: dummy_api_key) - yield - - -@pytest.fixture -def patch_prompt(monkeypatch): - monkeypatch.setattr( - wandb.util, "prompt_choices", lambda x, input_timeout=None, jupyter=False: x[0] - ) - monkeypatch.setattr( - wandb.wandb_lib.apikey, - "prompt_choices", - lambda x, input_timeout=None, jupyter=False: x[0], - ) - - -@pytest.fixture -def runner(patch_apikey, patch_prompt): - return CliRunner() - - -@pytest.fixture -def api(): - return Api() - - -@pytest.fixture -def mock_sagemaker(): - config_path = "/opt/ml/input/config/hyperparameters.json" - resource_path = "/opt/ml/input/config/resourceconfig.json" - secrets_path = "secrets.env" - - orig_exist = os.path.exists - - def exists(path): - if path in (config_path, secrets_path, resource_path): - return True - else: - return orig_exist(path) - - def magic_factory(original): - def magic(path, *args, **kwargs): - if path == config_path: - return io.StringIO('{"foo": "bar"}') - elif path == resource_path: - return io.StringIO('{"hosts":["a", "b"]}') - elif path == secrets_path: - return io.StringIO("WANDB_TEST_SECRET=TRUE") - else: - return original(path, *args, **kwargs) - - return magic - - with unittest.mock.patch.dict( - os.environ, - { - "TRAINING_JOB_NAME": "sage", - "CURRENT_HOST": "maker", - }, - ), unittest.mock.patch("wandb.util.os.path.exists", exists,), unittest.mock.patch( - "builtins.open", - magic_factory(open), - create=True, - ): - yield - - -# -------------------------------- -# Fixtures for user test point -# -------------------------------- - - -class RecordsUtil: - def __init__(self, queue: "Queue") -> None: - self.records = [] - while not queue.empty(): - self.records.append(queue.get()) - - def __len__(self) -> int: - return len(self.records) - - def __getitem__(self, name: str) -> Generator: - for record in self.records: - yield from self.resolve_item(record, name) - - @staticmethod - def resolve_item(obj, attr: str, sep: str = ".") -> List: - for name in attr.split(sep): - if not obj.HasField(name): - return [] - obj = getattr(obj, name) - return [obj] - - @staticmethod - def dictify(obj, key: str = "key", value: str = "value_json") -> Dict: - return {getattr(item, key): getattr(item, value) for item in obj} - - @property - def config(self) -> List: - return [self.dictify(_c.update) for _c in self["config"]] - - @property - def history(self) -> List: - return [self.dictify(_h.item) for _h in self["history"]] - - @property - def partial_history(self) -> List: - return [self.dictify(_h.item) for _h in self["request.partial_history"]] - - @property - def preempting(self) -> List: - return list(self["preempting"]) - - @property - def summary(self) -> List: - return list(self["summary"]) - - @property - def files(self) -> List: - return list(self["files"]) - - @property - def metric(self): - return list(self["metric"]) - - -@pytest.fixture -def parse_records() -> Generator[Callable, None, None]: - def records_parser_fn(q: "Queue") -> RecordsUtil: - return RecordsUtil(q) - - yield records_parser_fn - - -@pytest.fixture() -def record_q() -> "Queue": - return Queue() - - -@pytest.fixture() -def mocked_interface(record_q: "Queue") -> InterfaceQueue: - return InterfaceQueue(record_q=record_q) - - -@pytest.fixture -def mocked_backend(mocked_interface: InterfaceQueue) -> Generator[object, None, None]: - class MockedBackend: - def __init__(self) -> None: - self.interface = mocked_interface - - yield MockedBackend() - - -@pytest.fixture(scope="function") -def mock_run(test_settings, mocked_backend) -> Generator[Callable, None, None]: - from wandb.sdk.lib.module import unset_globals - - def mock_run_fn(use_magic_mock=False, **kwargs: Any) -> "wandb.sdk.wandb_run.Run": - kwargs_settings = kwargs.pop("settings", dict()) - kwargs_settings = { - **{ - "run_id": runid.generate_id(), - }, - **kwargs_settings, - } - run = wandb.wandb_sdk.wandb_run.Run( - settings=test_settings(kwargs_settings), **kwargs - ) - run._set_backend( - unittest.mock.MagicMock() if use_magic_mock else mocked_backend - ) - run._set_globals() - return run - - yield mock_run_fn - unset_globals() - - # -------------------------------- # Fixtures for internal test point # -------------------------------- @@ -1748,34 +1405,6 @@ def relay_server_context(inject: Optional[List[InjectedResponse]] = None): return relay_server_context -def dict_factory(): - def helper(): - return dict() - - return helper - - -@pytest.fixture(scope="function") -def test_settings(): - def update_test_settings( - extra_settings: Union[ - dict, wandb.sdk.wandb_settings.Settings - ] = dict_factory() # noqa: B008 - ): - settings = wandb.Settings( - console="off", - save_code=False, - ) - if isinstance(extra_settings, dict): - settings.update(extra_settings, source=wandb.sdk.wandb_settings.Source.BASE) - elif isinstance(extra_settings, wandb.sdk.wandb_settings.Settings): - settings.update(extra_settings) - settings._set_run_start_time() - return settings - - yield update_test_settings - - @pytest.fixture(scope="function") def wandb_init(user, test_settings, request): # mirror wandb.sdk.wandb_init.init args, overriding name and entity defaults diff --git a/tests/unit_tests/artifacts/conftest.py b/tests/pytest_tests/system_tests/test_artifacts/conftest.py similarity index 100% rename from tests/unit_tests/artifacts/conftest.py rename to tests/pytest_tests/system_tests/test_artifacts/conftest.py diff --git a/tests/unit_tests/artifacts/test_wandb_artifacts.py b/tests/pytest_tests/system_tests/test_artifacts/test_wandb_artifacts.py similarity index 100% rename from tests/unit_tests/artifacts/test_wandb_artifacts.py rename to tests/pytest_tests/system_tests/test_artifacts/test_wandb_artifacts.py diff --git a/tests/unit_tests/artifacts/test_wandb_artifacts_cache.py b/tests/pytest_tests/system_tests/test_artifacts/test_wandb_artifacts_cache.py similarity index 100% rename from tests/unit_tests/artifacts/test_wandb_artifacts_cache.py rename to tests/pytest_tests/system_tests/test_artifacts/test_wandb_artifacts_cache.py diff --git a/tests/unit_tests/artifacts/test_wandb_artifacts_full.py b/tests/pytest_tests/system_tests/test_artifacts/test_wandb_artifacts_full.py similarity index 100% rename from tests/unit_tests/artifacts/test_wandb_artifacts_full.py rename to tests/pytest_tests/system_tests/test_artifacts/test_wandb_artifacts_full.py diff --git a/tests/pytest_tests/system_tests/test_cli_full.py b/tests/pytest_tests/system_tests/test_cli_full.py new file mode 100644 index 00000000000..a4d0959dd53 --- /dev/null +++ b/tests/pytest_tests/system_tests/test_cli_full.py @@ -0,0 +1,213 @@ +import netrc +import os +import traceback +from unittest import mock + +import pytest +import wandb +from wandb.cli import cli + + +@pytest.fixture +def empty_netrc(monkeypatch): + class FakeNet: + @property + def hosts(self): + return {"api.wandb.ai": None} + + monkeypatch.setattr(netrc, "netrc", lambda *args: FakeNet()) + + +# @contextlib.contextmanager +# def config_dir(): +# try: +# os.environ["WANDB_CONFIG"] = os.getcwd() +# yield +# finally: +# del os.environ["WANDB_CONFIG"] + + +def debug_result(result, prefix=None): + prefix = prefix or "" + print("DEBUG({}) {} = {}".format(prefix, "out", result.output)) + print("DEBUG({}) {} = {}".format(prefix, "exc", result.exception)) + print( + "DEBUG({}) {} = {}".format(prefix, "tb", traceback.print_tb(result.exc_info[2])) + ) + + +@pytest.mark.xfail(reason="This test is flakey on CI") +def test_init_reinit(runner, empty_netrc, user): + with runner.isolated_filesystem(), mock.patch( + "wandb.sdk.lib.apikey.len", return_value=40 + ): + result = runner.invoke(cli.login, [user]) + debug_result(result, "login") + result = runner.invoke(cli.init, input="y\n\n\n") + debug_result(result, "init") + assert result.exit_code == 0 + with open("netrc") as f: + generated_netrc = f.read() + with open("wandb/settings") as f: + generated_wandb = f.read() + assert user in generated_netrc + assert user in generated_wandb + + +@pytest.mark.xfail(reason="This test is flakey on CI") +def test_init_add_login(runner, empty_netrc, user): + with runner.isolated_filesystem(), mock.patch( + "wandb.sdk.lib.apikey.len", return_value=40 + ): + with open("netrc", "w") as f: + f.write("previous config") + result = runner.invoke(cli.login, [user]) + debug_result(result, "login") + result = runner.invoke(cli.init, input=f"y\n{user}\nvanpelt\n") + debug_result(result, "init") + assert result.exit_code == 0 + with open("netrc") as f: + generated_netrc = f.read() + with open("wandb/settings") as f: + generated_wandb = f.read() + assert user in generated_netrc + assert user in generated_wandb + + +@pytest.mark.xfail(reason="This test is flakey on CI") +def test_init_existing_login(runner, user): + with runner.isolated_filesystem(): + with open("netrc", "w") as f: + f.write(f"machine localhost\n\tlogin {user}\tpassword {user}") + result = runner.invoke(cli.init, input="y\nvanpelt\nfoo\n") + print(result.output) + print(result.exception) + print(traceback.print_tb(result.exc_info[2])) + assert result.exit_code == 0 + with open("wandb/settings") as f: + generated_wandb = f.read() + assert user in generated_wandb + assert "This directory is configured" in result.output + + +@pytest.mark.xfail(reason="This test is flakey on CI") +def test_pull(runner, wandb_init): + with runner.isolated_filesystem(): + project_name = "test_pull" + file_name = "weights.h5" + run = wandb_init(project=project_name) + with open(file_name, "w") as f: + f.write("WEIGHTS") + run.save(file_name) + run.finish() + + # delete the file so that we can pull it and check that it is there + os.remove(file_name) + + result = runner.invoke(cli.pull, [run.id, "--project", project_name]) + print(result.output) + print(result.exception) + print(traceback.print_tb(result.exc_info[2])) + assert result.exit_code == 0 + assert f"Downloading: {project_name}/{run.id}" in result.output + assert os.path.isfile(file_name) + assert f"File {file_name}" in result.output + + +@pytest.mark.parametrize( + "tb_file_name,history_length", + [ + ("events.out.tfevents.1585769947.cvp", 17), + pytest.param( + "events.out.tfevents.1611911647.big-histos", + 27, + marks=[ + pytest.mark.flaky, + pytest.mark.xfail(reason="test seems flaky, reenable with WB-5015"), + ], + ), + ], +) +def test_sync_tensorboard( + runner, + relay_server, + wandb_init, + copy_asset, + tb_file_name, + history_length, +): + with relay_server() as relay, runner.isolated_filesystem(): + project_name = "test_sync_tensorboard" + run = wandb.init(project=project_name) + run.finish() + + copy_asset(tb_file_name) + + result = runner.invoke( + cli.sync, [".", f"--id={run.id}", f"--project={project_name}"] + ) + + assert result.exit_code == 0 + assert "Found 1 tfevent files" in result.output + history = relay.context.get_run_history(run.id) + assert len(history) == history_length + + # Check the no sync tensorboard flag + result = runner.invoke(cli.sync, [".", "--no-sync-tensorboard"]) + assert "Skipping directory: {}\n".format(os.path.abspath(".")) in result.output + assert tb_file_name in os.listdir(".") + + +def test_sync_wandb_run(runner, relay_server, user, copy_asset): + # note: we have to mock out ArtifactSaver.save + # because the artifact does not actually exist + # among assets listed in the .wandb file. + # this a problem for a real backend that we use now + # (as we used to use a mock backend) + # todo: create a new test asset that will contain an artifact + with relay_server() as relay, runner.isolated_filesystem(), mock.patch( + "wandb.sdk.internal.artifacts.ArtifactSaver.save", return_value=None + ): + copy_asset("wandb") + + result = runner.invoke(cli.sync, ["--sync-all"]) + print(result.output) + print(traceback.print_tb(result.exc_info[2])) + assert result.exit_code == 0 + + assert f"{user}/code-toad/runs/g9dvvkua ... done." in result.output + assert len(relay.context.events) == 1 + + # Check we marked the run as synced + result = runner.invoke(cli.sync, ["--sync-all"]) + assert result.exit_code == 0 + assert "wandb: ERROR Nothing to sync." in result.output + + +def test_sync_wandb_run_and_tensorboard(runner, relay_server, user, copy_asset): + with relay_server() as relay, runner.isolated_filesystem(), mock.patch( + "wandb.sdk.internal.artifacts.ArtifactSaver.save", return_value=None + ): + run_dir = os.path.join("wandb", "offline-run-20210216_154407-g9dvvkua") + copy_asset("wandb") + tb_file_name = "events.out.tfevents.1585769947.cvp" + copy_asset(tb_file_name, os.path.join(run_dir, tb_file_name)) + + result = runner.invoke(cli.sync, ["--sync-all"]) + print(result.output) + print(traceback.print_tb(result.exc_info[2])) + assert result.exit_code == 0 + + assert f"{user}/code-toad/runs/g9dvvkua ... done." in result.output + assert len(relay.context.events) == 1 + + uploaded_files = relay.context.get_run_uploaded_files("g9dvvkua") + assert "code/standalone_tests/code-toad.py" in uploaded_files + + # Check we marked the run as synced + result = runner.invoke(cli.sync, [run_dir]) + assert result.exit_code == 0 + assert ( + "WARNING Found .wandb file, not streaming tensorboard metrics" + in result.output + ) diff --git a/tests/unit_tests/test_data_types_full.py b/tests/pytest_tests/system_tests/test_data_types_full.py similarity index 100% rename from tests/unit_tests/test_data_types_full.py rename to tests/pytest_tests/system_tests/test_data_types_full.py diff --git a/tests/unit_tests/test_file_stream_internal.py b/tests/pytest_tests/system_tests/test_file_stream_internal.py similarity index 100% rename from tests/unit_tests/test_file_stream_internal.py rename to tests/pytest_tests/system_tests/test_file_stream_internal.py diff --git a/tests/unit_tests/test_file_upload.py b/tests/pytest_tests/system_tests/test_file_upload.py similarity index 100% rename from tests/unit_tests/test_file_upload.py rename to tests/pytest_tests/system_tests/test_file_upload.py diff --git a/tests/unit_tests/test_footer.py b/tests/pytest_tests/system_tests/test_footer.py similarity index 100% rename from tests/unit_tests/test_footer.py rename to tests/pytest_tests/system_tests/test_footer.py diff --git a/tests/unit_tests/test_keras_full.py b/tests/pytest_tests/system_tests/test_keras_full.py similarity index 100% rename from tests/unit_tests/test_keras_full.py rename to tests/pytest_tests/system_tests/test_keras_full.py diff --git a/tests/unit_tests/test_kfp.py b/tests/pytest_tests/system_tests/test_kfp.py similarity index 100% rename from tests/unit_tests/test_kfp.py rename to tests/pytest_tests/system_tests/test_kfp.py diff --git a/tests/unit_tests/test_label_full.py b/tests/pytest_tests/system_tests/test_label_full.py similarity index 100% rename from tests/unit_tests/test_label_full.py rename to tests/pytest_tests/system_tests/test_label_full.py diff --git a/tests/unit_tests/test_metric_full.py b/tests/pytest_tests/system_tests/test_metric_full.py similarity index 100% rename from tests/unit_tests/test_metric_full.py rename to tests/pytest_tests/system_tests/test_metric_full.py diff --git a/tests/unit_tests/test_metric_internal.py b/tests/pytest_tests/system_tests/test_metric_internal.py similarity index 100% rename from tests/unit_tests/test_metric_internal.py rename to tests/pytest_tests/system_tests/test_metric_internal.py diff --git a/tests/unit_tests/test_mode_disabled.py b/tests/pytest_tests/system_tests/test_mode_disabled_full.py similarity index 89% rename from tests/unit_tests/test_mode_disabled.py rename to tests/pytest_tests/system_tests/test_mode_disabled_full.py index 5b04fcd2dae..a7a14c61562 100644 --- a/tests/unit_tests/test_mode_disabled.py +++ b/tests/pytest_tests/system_tests/test_mode_disabled_full.py @@ -3,7 +3,6 @@ """ import os -import pickle from unittest import mock import pytest # type: ignore @@ -89,16 +88,6 @@ def test_disabled_summary(wandb_init): assert run.summary["nested"]["level"] == 3 -def test_disabled_can_pickle(): - """Will it pickle?""" - # This case comes up when using wandb in disabled mode, with keras - # https://wandb.atlassian.net/browse/WB-3981 - obj = wandb.wandb_sdk.lib.RunDisabled() - with open("test.pkl", "wb") as file: - pickle.dump(obj, file) - os.remove("test.pkl") - - def test_disabled_globals(wandb_init): # Test wandb.* attributes run = wandb_init(config={"foo": {"bar": {"x": "y"}}}, mode="disabled") diff --git a/tests/pytest_tests/system_tests/test_model_workflow.py b/tests/pytest_tests/system_tests/test_model_workflow.py new file mode 100644 index 00000000000..ca56810179f --- /dev/null +++ b/tests/pytest_tests/system_tests/test_model_workflow.py @@ -0,0 +1,8 @@ +import pytest + + +def test_offline_link_artifact(wandb_init): + run = wandb_init(mode="offline") + with pytest.raises(NotImplementedError): + run.link_artifact(None, "entity/project/portfolio", "latest") + run.finish() diff --git a/tests/unit_tests/test_mp_full.py b/tests/pytest_tests/system_tests/test_mp_full.py similarity index 94% rename from tests/unit_tests/test_mp_full.py rename to tests/pytest_tests/system_tests/test_mp_full.py index eaab6854b09..81be0231370 100644 --- a/tests/unit_tests/test_mp_full.py +++ b/tests/pytest_tests/system_tests/test_mp_full.py @@ -115,21 +115,16 @@ def test_multiproc_strict(relay_server, wandb_init): assert summary["mystep"] == 3 -def test_multiproc_strict_bad(test_settings): - with pytest.raises(ValueError): - test_settings(dict(strict="bad")) - - @pytest.mark.timeout(300) def test_multiproc_spawn(runner, user): # WB5640. Before the WB5640 fix this code fragment would raise an # exception, this test checks that it runs without error with runner.isolated_filesystem(): - from tests.unit_tests.assets import test_mod + from tests.pytest_tests.assets import test_mod test_mod.main() sys.modules["__main__"].__spec__ = importlib.machinery.ModuleSpec( - name="tests.unit_tests.assets.test_mod", + name="tests.pytest_tests.assets.test_mod", loader=importlib.machinery.BuiltinImporter, ) test_mod.main() diff --git a/tests/unit_tests/test_offline_sync.py b/tests/pytest_tests/system_tests/test_offline_sync.py similarity index 100% rename from tests/unit_tests/test_offline_sync.py rename to tests/pytest_tests/system_tests/test_offline_sync.py diff --git a/tests/pytest_tests/system_tests/test_public_api.py b/tests/pytest_tests/system_tests/test_public_api.py new file mode 100644 index 00000000000..ad222c27f41 --- /dev/null +++ b/tests/pytest_tests/system_tests/test_public_api.py @@ -0,0 +1,128 @@ +"""Tests for the `wandb.apis.PublicApi` module.""" + + +from unittest import mock + +import pytest +import wandb +import wandb.apis.public +import wandb.util +from wandb import Api +from wandb.sdk.lib import runid + + +@pytest.mark.parametrize( + "path", + [ + "test/test/test/test", + "test/test/test/test/test", + ], +) +def test_from_path_bad_path(user, path): + with pytest.raises(wandb.Error, match="Invalid path"): + Api().from_path(path) + + +def test_from_path_bad_report_path(user): + with pytest.raises(wandb.Error, match="Invalid report path"): + Api().from_path("test/test/reports/test-foo") + + +@pytest.mark.parametrize( + "path", + [ + "test/test/reports/XYZ", + "test/test/reports/Name-foo--XYZ", + ], +) +def test_from_path_report_type(user, path): + report = Api().from_path(path) + assert isinstance(report, wandb.apis.public.BetaReport) + + +def test_project_to_html(user): + with mock.patch.dict("os.environ", {"WANDB_ENTITY": "mock_entity"}): + project = Api().from_path("test") + assert "mock_entity/test/workspace?jupyter=true" in project.to_html() + + +@pytest.mark.xfail(reason="TODO: fix this test") +def test_run_from_tensorboard(runner, relay_server, user, api, copy_asset): + with relay_server() as relay, runner.isolated_filesystem(): + tb_file_name = "events.out.tfevents.1585769947.cvp" + copy_asset(tb_file_name) + run_id = runid.generate_id() + api.sync_tensorboard(".", project="test", run_id=run_id) + uploaded_files = relay.context.get_run_uploaded_files(run_id) + assert uploaded_files[0].endswith(tb_file_name) + assert len(uploaded_files) == 17 + + +def test_update_aliases_on_artifact(user, relay_server, wandb_init): + project = "test" + run = wandb_init(entity=user, project=project) + artifact = wandb.Artifact("test-artifact", "test-type") + with open("boom.txt", "w") as f: + f.write("testing") + artifact.add_file("boom.txt", "test-name") + art = run.log_artifact(artifact, aliases=["sequence"]) + run.link_artifact(art, f"{user}/{project}/my-sample-portfolio") + artifact.wait() + run.finish() + + # fetch artifact under original parent sequence + artifact = Api().artifact( + name=f"{user}/{project}/test-artifact:v0", type="test-type" + ) + aliases = artifact.aliases + assert "sequence" in aliases + + # fetch artifact under portfolio + # and change aliases under portfolio only + artifact = Api().artifact( + name=f"{user}/{project}/my-sample-portfolio:v0", type="test-type" + ) + aliases = artifact.aliases + assert "sequence" not in aliases + artifact.aliases = ["portfolio"] + artifact.aliases.append("boom") + artifact.save() + + artifact = Api().artifact( + name=f"{user}/{project}/my-sample-portfolio:v0", type="test-type" + ) + aliases = artifact.aliases + assert "portfolio" in aliases + assert "boom" in aliases + assert "sequence" not in aliases + + +def test_artifact_version(wandb_init): + def create_test_artifact(content: str): + art = wandb.Artifact("test-artifact", "test-type") + with open("boom.txt", "w") as f: + f.write(content) + art.add_file("boom.txt", "test-name") + return art + + # Create an artifact sequence + portfolio (auto-created if it doesn't exist) + project = "test" + run = wandb_init(project=project) + + art = create_test_artifact("aaaaa") + run.log_artifact(art, aliases=["a"]) + art.wait() + + art = create_test_artifact("bbbb") + run.log_artifact(art, aliases=["b"]) + run.link_artifact(art, f"{project}/my-sample-portfolio") + art.wait() + run.finish() + + # Pull down from portfolio, verify version is indexed from portfolio not sequence + artifact = Api().artifact( + name=f"{project}/my-sample-portfolio:latest", type="test-type" + ) + + assert artifact.version == "v0" + assert artifact.source_version == "v1" diff --git a/tests/unit_tests/test_redir_full.py b/tests/pytest_tests/system_tests/test_redir_full.py similarity index 100% rename from tests/unit_tests/test_redir_full.py rename to tests/pytest_tests/system_tests/test_redir_full.py diff --git a/tests/unit_tests/test_report_api.py b/tests/pytest_tests/system_tests/test_report_api.py similarity index 100% rename from tests/unit_tests/test_report_api.py rename to tests/pytest_tests/system_tests/test_report_api.py diff --git a/tests/unit_tests/test_runtime.py b/tests/pytest_tests/system_tests/test_runtime.py similarity index 100% rename from tests/unit_tests/test_runtime.py rename to tests/pytest_tests/system_tests/test_runtime.py diff --git a/tests/unit_tests/test_save_policies.py b/tests/pytest_tests/system_tests/test_save_policies.py similarity index 100% rename from tests/unit_tests/test_save_policies.py rename to tests/pytest_tests/system_tests/test_save_policies.py diff --git a/tests/unit_tests/test_sender.py b/tests/pytest_tests/system_tests/test_sender.py similarity index 100% rename from tests/unit_tests/test_sender.py rename to tests/pytest_tests/system_tests/test_sender.py diff --git a/tests/unit_tests/test_start_method.py b/tests/pytest_tests/system_tests/test_start_method.py similarity index 100% rename from tests/unit_tests/test_start_method.py rename to tests/pytest_tests/system_tests/test_start_method.py diff --git a/tests/unit_tests_old/__init__.py b/tests/pytest_tests/system_tests/test_sweep/__init__.py similarity index 100% rename from tests/unit_tests_old/__init__.py rename to tests/pytest_tests/system_tests/test_sweep/__init__.py diff --git a/tests/pytest_tests/system_tests/test_sweep/test_public_api.py b/tests/pytest_tests/system_tests/test_sweep/test_public_api.py new file mode 100644 index 00000000000..f461f712afd --- /dev/null +++ b/tests/pytest_tests/system_tests/test_sweep/test_public_api.py @@ -0,0 +1,51 @@ +import pytest +import wandb +from wandb import Api + +from .test_wandb_sweep import ( + SWEEP_CONFIG_BAYES, + SWEEP_CONFIG_GRID, + SWEEP_CONFIG_GRID_NESTED, + SWEEP_CONFIG_RANDOM, + VALID_SWEEP_CONFIGS_MINIMAL, +) + + +@pytest.mark.parametrize( + "sweep_config,expected_run_count", + [ + (SWEEP_CONFIG_GRID, 3), + (SWEEP_CONFIG_GRID_NESTED, 9), + (SWEEP_CONFIG_BAYES, None), + (SWEEP_CONFIG_RANDOM, None), + ], + ids=["test grid", "test grid nested", "test bayes", "test random"], +) +def test_sweep_api_expected_run_count( + user, relay_server, sweep_config, expected_run_count +): + _project = "test" + with relay_server() as relay: + sweep_id = wandb.sweep(sweep_config, entity=user, project=_project) + + for comm in relay.context.raw_data: + q = comm["request"].get("query") + print(q) + + print(f"sweep_id{sweep_id}") + sweep = Api().sweep(f"{user}/{_project}/sweeps/{sweep_id}") + + assert sweep.expected_run_count == expected_run_count + + +@pytest.mark.parametrize("sweep_config", VALID_SWEEP_CONFIGS_MINIMAL) +def test_sweep_api(user, relay_server, sweep_config): + _project = "test" + with relay_server(): + sweep_id = wandb.sweep(sweep_config, entity=user, project=_project) + print(f"sweep_id{sweep_id}") + sweep = Api().sweep(f"{user}/{_project}/sweeps/{sweep_id}") + assert sweep.entity == user + assert f"{user}/{_project}/sweeps/{sweep_id}" in sweep.url + assert sweep.state == "PENDING" + assert str(sweep) == f"" diff --git a/tests/unit_tests/test_sweep_scheduler.py b/tests/pytest_tests/system_tests/test_sweep/test_sweep_scheduler.py similarity index 100% rename from tests/unit_tests/test_sweep_scheduler.py rename to tests/pytest_tests/system_tests/test_sweep/test_sweep_scheduler.py diff --git a/tests/unit_tests/test_wandb_agent.py b/tests/pytest_tests/system_tests/test_sweep/test_wandb_agent.py similarity index 100% rename from tests/unit_tests/test_wandb_agent.py rename to tests/pytest_tests/system_tests/test_sweep/test_wandb_agent.py diff --git a/tests/unit_tests/test_wandb_agent_full.py b/tests/pytest_tests/system_tests/test_sweep/test_wandb_agent_full.py similarity index 100% rename from tests/unit_tests/test_wandb_agent_full.py rename to tests/pytest_tests/system_tests/test_sweep/test_wandb_agent_full.py diff --git a/tests/unit_tests/test_wandb_sweep.py b/tests/pytest_tests/system_tests/test_sweep/test_wandb_sweep.py similarity index 100% rename from tests/unit_tests/test_wandb_sweep.py rename to tests/pytest_tests/system_tests/test_sweep/test_wandb_sweep.py diff --git a/tests/unit_tests/test_system_info.py b/tests/pytest_tests/system_tests/test_system_info.py similarity index 81% rename from tests/unit_tests/test_system_info.py rename to tests/pytest_tests/system_tests/test_system_info.py index 20947712f89..d8d75248182 100644 --- a/tests/unit_tests/test_system_info.py +++ b/tests/pytest_tests/system_tests/test_system_info.py @@ -5,6 +5,7 @@ import unittest.mock import pytest +import wandb from wandb.sdk.interface.interface_queue import InterfaceQueue from wandb.sdk.internal import context from wandb.sdk.internal.sender import SendManager @@ -106,6 +107,30 @@ def test_executable_outside_cwd(meta, test_settings): assert data["program"] == "asdf.py" +@pytest.fixture +def mocked_ipython(mocker): + mocker.patch("wandb.sdk.lib.ipython._get_python_type", lambda: "jupyter") + mocker.patch("wandb.sdk.wandb_settings._get_python_type", lambda: "jupyter") + html_mock = mocker.MagicMock() + mocker.patch("wandb.sdk.lib.ipython.display_html", html_mock) + ipython = unittest.mock.MagicMock() + ipython.html = html_mock + + def run_cell(cell): + print("Running cell: ", cell) + exec(cell) + + ipython.run_cell = run_cell + # TODO: this is really unfortunate, for reasons not clear to me, monkeypatch doesn't work + orig_get_ipython = wandb.jupyter.get_ipython + orig_display = wandb.jupyter.display + wandb.jupyter.get_ipython = lambda: ipython + wandb.jupyter.display = lambda obj: html_mock(obj._repr_html_()) + yield ipython + wandb.jupyter.get_ipython = orig_get_ipython + wandb.jupyter.display = orig_display + + def test_jupyter_name(meta, test_settings, mocked_ipython): meta = meta(test_settings(dict(notebook_name="test_nb"))) data = meta.probe() diff --git a/tests/unit_tests/test_tb_watcher.py b/tests/pytest_tests/system_tests/test_tb_watcher.py similarity index 100% rename from tests/unit_tests/test_tb_watcher.py rename to tests/pytest_tests/system_tests/test_tb_watcher.py diff --git a/tests/unit_tests/test_telemetry_full.py b/tests/pytest_tests/system_tests/test_telemetry_full.py similarity index 100% rename from tests/unit_tests/test_telemetry_full.py rename to tests/pytest_tests/system_tests/test_telemetry_full.py diff --git a/tests/unit_tests/test_time_resolution.py b/tests/pytest_tests/system_tests/test_time_resolution.py similarity index 100% rename from tests/unit_tests/test_time_resolution.py rename to tests/pytest_tests/system_tests/test_time_resolution.py diff --git a/tests/unit_tests/test_torch_full.py b/tests/pytest_tests/system_tests/test_torch_full.py similarity index 100% rename from tests/unit_tests/test_torch_full.py rename to tests/pytest_tests/system_tests/test_torch_full.py diff --git a/tests/unit_tests/test_validation_data_logger.py b/tests/pytest_tests/system_tests/test_validation_data_logger.py similarity index 99% rename from tests/unit_tests/test_validation_data_logger.py rename to tests/pytest_tests/system_tests/test_validation_data_logger.py index d16fbdfcf8a..ddebdf98488 100644 --- a/tests/unit_tests/test_validation_data_logger.py +++ b/tests/pytest_tests/system_tests/test_validation_data_logger.py @@ -1,6 +1,7 @@ import numpy as np import pytest import wandb +import wandb.data_types from wandb.sdk.integration_utils.data_logging import ( CAN_INFER_IMAGE_AND_VIDEO, ValidationDataLogger, @@ -150,6 +151,7 @@ def test_data_logger_val_user_proc(wandb_init): run.finish() +@pytest.mark.skip def test_data_logger_val_inferred_proc(wandb_init): run = wandb_init() np.random.seed(42) @@ -230,6 +232,7 @@ def test_data_logger_val_inferred_proc(wandb_init): run.finish() +@pytest.mark.skip def test_data_logger_val_inferred_proc_no_class(wandb_init): run = wandb_init() vd = ValidationDataLogger( @@ -352,6 +355,7 @@ def test_data_logger_pred_user_proc(wandb_init): run.finish() +@pytest.mark.skip def test_data_logger_pred_inferred_proc(wandb_init): run = wandb_init() vd = ValidationDataLogger( @@ -429,6 +433,7 @@ def test_data_logger_pred_inferred_proc(wandb_init): run.finish() +@pytest.mark.skip def test_data_logger_pred_inferred_proc_no_classes(wandb_init): run = wandb_init() vd = ValidationDataLogger( diff --git a/tests/unit_tests/test_wandb.py b/tests/pytest_tests/system_tests/test_wandb.py similarity index 74% rename from tests/unit_tests/test_wandb.py rename to tests/pytest_tests/system_tests/test_wandb.py index c3a096fabdf..5171b5fe3d3 100644 --- a/tests/unit_tests/test_wandb.py +++ b/tests/pytest_tests/system_tests/test_wandb.py @@ -4,8 +4,10 @@ """ import glob import inspect +import io import os import tempfile +import unittest.mock from contextlib import contextmanager from pathlib import Path from unittest import mock @@ -17,6 +19,47 @@ from wandb.viz import custom_chart +@pytest.fixture +def mock_sagemaker(): + config_path = "/opt/ml/input/config/hyperparameters.json" + resource_path = "/opt/ml/input/config/resourceconfig.json" + secrets_path = "secrets.env" + + orig_exist = os.path.exists + + def exists(path): + if path in (config_path, secrets_path, resource_path): + return True + else: + return orig_exist(path) + + def magic_factory(original): + def magic(path, *args, **kwargs): + if path == config_path: + return io.StringIO('{"foo": "bar"}') + elif path == resource_path: + return io.StringIO('{"hosts":["a", "b"]}') + elif path == secrets_path: + return io.StringIO("WANDB_TEST_SECRET=TRUE") + else: + return original(path, *args, **kwargs) + + return magic + + with unittest.mock.patch.dict( + os.environ, + { + "TRAINING_JOB_NAME": "sage", + "CURRENT_HOST": "maker", + }, + ), unittest.mock.patch("wandb.util.os.path.exists", exists,), unittest.mock.patch( + "builtins.open", + magic_factory(open), + create=True, + ): + yield + + def test_wandb_init_fixture_args(wandb_init): """Test that the fixture args are in sync with the real wandb.init().""" # comparing lists of args as order also matters @@ -291,48 +334,6 @@ def test_run_url(wandb_init): # ---------------------------------- -def test_nice_log_error(): - with pytest.raises(wandb.Error): - wandb.log({"no": "init"}) - - -def test_nice_log_error_config(): - with pytest.raises( - wandb.Error, match=r"You must call wandb.init\(\) before wandb.config.update" - ): - wandb.config.update({"foo": 1}) - with pytest.raises( - wandb.Error, match=r"You must call wandb.init\(\) before wandb.config.foo" - ): - wandb.config.foo = 1 - - -def test_nice_log_error_summary(): - with pytest.raises( - wandb.Error, - match=r"You must call wandb.init\(\) before wandb.summary\['great'\]", - ): - wandb.summary["great"] = 1 - with pytest.raises( - wandb.Error, match=r"You must call wandb.init\(\) before wandb.summary.bam" - ): - wandb.summary.bam = 1 - - -def test_log_only_strings_as_keys(mock_run): - run = mock_run() - with pytest.raises(ValueError): - run.log({1: 1000}) - with pytest.raises(ValueError): - run.log({("tup", "idx"): 1000}) - - -def test_log_not_dict(mock_run): - run = mock_run() - with pytest.raises(ValueError): - run.log(10) - - def test_log_step(relay_server, wandb_init): with relay_server() as relay: run = wandb_init() @@ -447,76 +448,6 @@ def test_save_invalid_path(wandb_init): run.finish() -@pytest.mark.xfail(reason="This test is flaky") -def test_save_policy_symlink(mock_run, parse_records, record_q): - run = mock_run() - - with open("test.rad", "w") as f: - f.write("something") - run.save("test.rad") - assert os.path.exists(os.path.join(run.dir, "test.rad")) - parsed = parse_records(record_q) - file_record = parsed.files[0].files[0] - assert file_record.path == "test.rad" - assert file_record.policy == 2 - - -@pytest.mark.xfail(reason="This test is flaky") -def test_save_policy_glob_symlink(mock_run, parse_records, record_q, capsys): - run = mock_run() - - with open("test.rad", "w") as f: - f.write("something") - with open("foo.rad", "w") as f: - f.write("something") - run.save("*.rad") - _, err = capsys.readouterr() - assert "Symlinked 2 files" in err - assert os.path.exists(os.path.join(run.dir, "test.rad")) - assert os.path.exists(os.path.join(run.dir, "foo.rad")) - - # test_save_policy_glob_symlink - parsed = parse_records(record_q) - file_record = parsed.files[0].files[0] - assert file_record.path == "*.rad" - assert file_record.policy == 2 - - -@pytest.mark.xfail(reason="This test is flaky") -def test_save_absolute_path(mock_run, parse_records, record_q, capsys): - run = mock_run() - root = tempfile.gettempdir() - test_path = os.path.join(root, "test.txt") - with open(test_path, "w") as f: - f.write("something") - - run.save(test_path) - _, err = capsys.readouterr() - assert "Saving files without folders" in err - assert os.path.exists(os.path.join(run.dir, "test.txt")) - parsed = parse_records(record_q) - file_record = parsed.files[0].files[0] - assert file_record.path == "test.txt" - assert file_record.policy == 2 - - -@pytest.mark.xfail(reason="This test is flaky") -def test_save_relative_path(mock_run, parse_records, record_q): - run = mock_run() - root = tempfile.gettempdir() - test_path = os.path.join(root, "tmp", "test.txt") - print("DAMN", os.path.dirname(test_path)) - filesystem.mkdir_exists_ok(os.path.dirname(test_path)) - with open(test_path, "w") as f: - f.write("something") - run.save(test_path, base_path=root, policy="now") - assert os.path.exists(os.path.join(run.dir, test_path)) - parsed = parse_records(record_q) - file_record = parsed.files[0].files[0] - assert file_record.path == os.path.relpath(test_path, root) - assert file_record.policy == 0 - - # ---------------------------------- # wandb.restore # ---------------------------------- @@ -594,36 +525,6 @@ def test_attach_usage_errors(wandb_init): run.finish() -# ---------------------------------- -# wandb.teardown -# ---------------------------------- - -# In a notebook environment we might get into a situation where the service process will be removed -# but the singleton setup instance still exists, hence it will try to do the teardown. -# Howeverwandb.teardown will encounter an error because the service process is already gone. -# but since we have an error handle logic in the teardown, we don't see the error -# only informational message about the error. -def test_teardown_error_path(capsys): - with mock.patch.dict( - os.environ, {wandb.env.SERVICE: "2-96604-tcp-localhost-57337"} - ): - with mock.patch.object( - wandb.sdk.wandb_manager._Manager, - "_get_service_interface", - return_value=mock.MagicMock(), - ): - wandb.setup() - assert wandb.wandb_sdk.wandb_setup._WandbSetup._instance - wandb.teardown() - assert wandb.env.SERVICE not in os.environ - assert not wandb.wandb_sdk.wandb_setup._WandbSetup._instance - _, err = capsys.readouterr() - assert ( - "While tearing down the service manager. The following error has occurred:" - in err - ) - - # TODO: test these or make sure they are tested somewhere # run.use_artifact() # run.log_artifact() diff --git a/tests/unit_tests/test_wandb_integration.py b/tests/pytest_tests/system_tests/test_wandb_integration.py similarity index 100% rename from tests/unit_tests/test_wandb_integration.py rename to tests/pytest_tests/system_tests/test_wandb_integration.py diff --git a/tests/unit_tests/test_wandb_run.py b/tests/pytest_tests/system_tests/test_wandb_run.py similarity index 59% rename from tests/unit_tests/test_wandb_run.py rename to tests/pytest_tests/system_tests/test_wandb_run.py index 3f88e7cb3cb..91afa585f37 100644 --- a/tests/unit_tests/test_wandb_run.py +++ b/tests/pytest_tests/system_tests/test_wandb_run.py @@ -1,6 +1,5 @@ import os import pickle -import platform import sys from unittest import mock @@ -71,274 +70,6 @@ def test_invalid_project_name(user, project_name): assert 'Invalid project name "{project_name}"' in str(e.value) -def test_run_step_property(mock_run): - run = mock_run() - run.log(dict(this=1)) - run.log(dict(this=2)) - assert run.step == 2 - - -def test_log_avoids_mutation(mock_run): - run = mock_run() - d = dict(this=1) - run.log(d) - assert d == dict(this=1) - - -def test_display(mock_run): - run = mock_run(settings=wandb.Settings(mode="offline")) - assert run.display() is False - - -@pytest.mark.parametrize( - "config, sweep_config, expected_config", - [ - ( - dict(param1=2, param2=4), - dict(), - dict(param1=2, param2=4), - ), - ( - dict(param1=2, param2=4), - dict(param3=9), - dict(param1=2, param2=4, param3=9), - ), - ( - dict(param1=2, param2=4), - dict(param2=8, param3=9), - dict(param1=2, param2=8, param3=9), - ), - ], -) -def test_run_config(mock_run, config, sweep_config, expected_config): - run = mock_run(config=config, sweep_config=sweep_config) - assert dict(run.config) == expected_config - - -def test_run_urls(mock_run): - base_url = "https://my.cool.site.com" - entity = "me" - project = "lol" - run_id = "my-run" - run = mock_run( - settings=wandb.Settings( - base_url=base_url, - entity=entity, - project=project, - run_id=run_id, - ) - ) - assert run.get_project_url() == f"{base_url}/{entity}/{project}" - assert run.get_url() == f"{base_url}/{entity}/{project}/runs/{run.id}" - - -def test_run_publish_config(mock_run, parse_records, record_q): - run = mock_run() - run.config.t = 1 - run.config.t2 = 2 - - parsed = parse_records(record_q) - - assert len(parsed.records) == 2 - assert len(parsed.summary) == 0 - - config = parsed.config - assert len(config) == 2 - assert config[0]["t"] == "1" - assert config[1]["t2"] == "2" - - -def test_run_publish_history(mock_run, parse_records, record_q): - run = mock_run() - run.log(dict(this=1)) - run.log(dict(that=2)) - - parsed = parse_records(record_q) - - assert len(parsed.records) == 2 - assert len(parsed.summary) == 0 - - history = parsed.history or parsed.partial_history - assert len(history) == 2 - assert history[0]["this"] == "1" - assert history[1]["that"] == "2" - - -@pytest.mark.skipif( - platform.system() == "Windows", - reason="numpy.float128 does not exist on windows", -) -@pytest.mark.skipif( - platform.system() == "Darwin" and platform.machine() == "arm64", - reason="numpy.float128 does not exist on Macs with the Apple M1 chip", -) -# @pytest.mark.GH2255 #TODO think of a marker format for tests that fix reported issues -def test_numpy_high_precision_float_downcasting(mock_run, parse_records, record_q): - run = mock_run() - run.log(dict(this=np.float128(0.0))) - - parsed = parse_records(record_q) - - assert len(parsed.records) == 1 - assert len(parsed.summary) == 0 - - history = parsed.history or parsed.partial_history - assert len(history) == 1 - assert history[0]["this"] == "0.0" - - -def test_mark_preempting(mock_run, parse_records, record_q): - run = mock_run() - run.log(dict(this=1)) - run.log(dict(that=2)) - run.mark_preempting() - - parsed = parse_records(record_q) - - assert len(parsed.records) == 3 - - assert len(parsed.preempting) == 1 - assert parsed.records[-1].HasField("preempting") - - -def test_run_pub_config(mock_run, record_q, parse_records): - run = mock_run() - run.config.t = 1 - run.config.t2 = 2 - - parsed = parse_records(record_q) - assert len(parsed.records) == 2 - assert len(parsed.summary) == 0 - assert len(parsed.config) == 2 - assert parsed.config[0]["t"] == "1" - assert parsed.config[1]["t2"] == "2" - - -def test_run_pub_history(mock_run, record_q, parse_records): - run = mock_run() - run.log(dict(this=1)) - run.log(dict(that=2)) - - parsed = parse_records(record_q) - assert len(parsed.records) == 2 - assert len(parsed.summary) == 0 - history = parsed.history or parsed.partial_history - assert len(history) == 2 - assert history[0]["this"] == "1" - assert history[1]["that"] == "2" - - -def test_deprecated_run_log_sync(mock_run, capsys): - run = mock_run() - run.log(dict(this=1), sync=True) - _, stderr = capsys.readouterr() - assert ( - "`sync` argument is deprecated and does not affect the behaviour of `wandb.log`" - in stderr - ) - - -def test_run_log_mp_warn(mock_run, capsys): - run = mock_run() - run._init_pid += 1 - run.log(dict(this=1)) - _, stderr = capsys.readouterr() - assert ( - f"`log` ignored (called from pid={os.getpid()}, " - f"`init` called from pid={run._init_pid})" in stderr - ) - - -def test_use_artifact_offline(mock_run): - run = mock_run(settings=wandb.Settings(mode="offline")) - with pytest.raises(Exception) as e_info: - run.use_artifact("boom-data") - assert str(e_info.value) == "Cannot use artifact when in offline mode." - - -def test_run_basic(): - s = wandb.Settings() - c = dict(param1=2, param2=4) - run = wandb_sdk.wandb_run.Run(settings=s, config=c) - assert dict(run.config) == dict(param1=2, param2=4) - - -def test_run_sweep(): - s = wandb.Settings() - c = dict(param1=2, param2=4) - sw = dict(param3=9) - run = wandb_sdk.wandb_run.Run(settings=s, config=c, sweep_config=sw) - assert dict(run.config) == dict(param1=2, param2=4, param3=9) - - -def test_run_sweep_overlap(): - s = wandb.Settings() - c = dict(param1=2, param2=4) - sw = dict(param2=8, param3=9) - run = wandb_sdk.wandb_run.Run(settings=s, config=c, sweep_config=sw) - assert dict(run.config) == dict(param1=2, param2=8, param3=9) - - -def test_except_hook(test_settings): - # Test to make sure we respect excepthooks by 3rd parties like pdb - errs = [] - - def hook(etype, val, tb): - return errs.append(str(val)) - - sys.excepthook = hook - - # We cant use raise statement in pytest context - def raise_(exc): - return sys.excepthook(type(exc), exc, None) - - raise_(Exception("Before wandb.init()")) - - run = wandb.init(mode="offline", settings=test_settings()) - - old_stderr_write = sys.stderr.write - stderr = [] - sys.stderr.write = stderr.append - - raise_(Exception("After wandb.init()")) - - assert errs == ["Before wandb.init()", "After wandb.init()"] - - # make sure wandb prints the traceback - assert "".join(stderr) == "Exception: After wandb.init()\n" - - sys.stderr.write = old_stderr_write - run.finish() - - -def assertion(run_id, found, stderr): - msg = ( - "`resume` will be ignored since W&B syncing is set to `offline`. " - f"Starting a new run with run id {run_id}" - ) - return msg in stderr if found else msg not in stderr - - -@pytest.mark.parametrize( - "resume, found", - [ - ("auto", True), - ("allow", True), - ("never", True), - ("must", True), - ("", False), - (0, False), - (True, True), - (None, False), - ], -) -def test_offline_resume(test_settings, capsys, resume, found): - run = wandb.init(mode="offline", resume=resume, settings=test_settings()) - captured = capsys.readouterr() - assert assertion(run.id, found, captured.err) - run.finish() - - def test_unlogged_artifact_in_config(user, test_settings): run = wandb.init(settings=test_settings()) artifact = wandb.Artifact("my-arti", type="dataset") @@ -454,3 +185,63 @@ def test_settings_unexpected_args_telemetry(runner, relay_server, capsys, user): # whose field 2 corresponds to unexpected arguments in Settings assert 2 in telemetry.get("11", []) run.finish() + + +def test_except_hook(test_settings): + # Test to make sure we respect excepthooks by 3rd parties like pdb + errs = [] + + def hook(etype, val, tb): + return errs.append(str(val)) + + sys.excepthook = hook + + # We cant use raise statement in pytest context + def raise_(exc): + return sys.excepthook(type(exc), exc, None) + + raise_(Exception("Before wandb.init()")) + + run = wandb.init(mode="offline", settings=test_settings()) + + old_stderr_write = sys.stderr.write + stderr = [] + sys.stderr.write = stderr.append + + raise_(Exception("After wandb.init()")) + + assert errs == ["Before wandb.init()", "After wandb.init()"] + + # make sure wandb prints the traceback + assert "".join(stderr) == "Exception: After wandb.init()\n" + + sys.stderr.write = old_stderr_write + run.finish() + + +def assertion(run_id, found, stderr): + msg = ( + "`resume` will be ignored since W&B syncing is set to `offline`. " + f"Starting a new run with run id {run_id}" + ) + return msg in stderr if found else msg not in stderr + + +@pytest.mark.parametrize( + "resume, found", + [ + ("auto", True), + ("allow", True), + ("never", True), + ("must", True), + ("", False), + (0, False), + (True, True), + (None, False), + ], +) +def test_offline_resume(test_settings, capsys, resume, found): + run = wandb.init(mode="offline", resume=resume, settings=test_settings()) + captured = capsys.readouterr() + assert assertion(run.id, found, captured.err) + run.finish() diff --git a/tests/pytest_tests/system_tests/test_wandb_settings.py b/tests/pytest_tests/system_tests/test_wandb_settings.py new file mode 100644 index 00000000000..4b66b746b1b --- /dev/null +++ b/tests/pytest_tests/system_tests/test_wandb_settings.py @@ -0,0 +1,290 @@ +""" +settings test. +""" + +import datetime +import os +import platform +from unittest import mock + +import pytest # type: ignore +import wandb +from wandb.sdk import wandb_login, wandb_settings + +Source = wandb_settings.Source + +# TODO: replace wandb_init with mock_run or move tests to integration tests + +# ------------------------------------ +# test Settings class +# ------------------------------------ + + +@pytest.mark.skipif( + platform.system() == "Windows", + reason="backend crashes on Windows in CI", +) +@mock.patch.dict( + os.environ, {"WANDB_START_METHOD": "thread", "USERNAME": "test"}, clear=True +) +def test_console_run(wandb_init): + run = wandb_init(mode="offline", settings={"console": "auto"}) + assert run._settings.console == "auto" + assert run._settings._console == wandb_settings.SettingsConsole.WRAP + run.finish() + + +# note: patching os.environ because other tests may have created env variables +# that are not in the default environment, which would cause these test to fail. +# setting {"USERNAME": "test"} because on Windows getpass.getuser() would otherwise fail. +@pytest.mark.skipif( + platform.system() == "Windows", reason="backend crashes on Windows in CI" +) +@mock.patch.dict(os.environ, {"USERNAME": "test"}, clear=True) +def test_sync_dir(wandb_init): + run = wandb_init(mode="offline") + print(run._settings.sync_dir) + assert run._settings.sync_dir == os.path.realpath( + os.path.join(".", "wandb", "latest-run") + ) + run.finish() + + +@pytest.mark.skipif( + platform.system() == "Windows", reason="backend crashes on Windows in CI" +) +@mock.patch.dict(os.environ, {"USERNAME": "test"}, clear=True) +def test_sync_file(wandb_init): + run = wandb_init(mode="offline") + assert run._settings.sync_file == os.path.realpath( + os.path.join(".", "wandb", "latest-run", f"run-{run.id}.wandb") + ) + run.finish() + + +@pytest.mark.skipif( + platform.system() == "Windows", reason="backend crashes on Windows in CI" +) +@mock.patch.dict(os.environ, {"USERNAME": "test"}, clear=True) +def test_files_dir(wandb_init): + run = wandb_init(mode="offline") + assert run._settings.files_dir == os.path.realpath( + os.path.join(".", "wandb", "latest-run", "files") + ) + run.finish() + + +@pytest.mark.skipif( + platform.system() == "Windows", reason="backend crashes on Windows in CI" +) +@mock.patch.dict(os.environ, {"USERNAME": "test"}, clear=True) +def test_tmp_dir(wandb_init): + run = wandb_init(mode="offline") + assert run._settings.tmp_dir == os.path.realpath( + os.path.join(".", "wandb", "latest-run", "tmp") + ) + run.finish() + + +@pytest.mark.skipif( + platform.system() == "Windows", reason="backend crashes on Windows in CI" +) +@mock.patch.dict(os.environ, {"USERNAME": "test"}, clear=True) +def test_tmp_code_dir(wandb_init): + run = wandb_init(mode="offline") + assert run._settings._tmp_code_dir == os.path.realpath( + os.path.join(".", "wandb", "latest-run", "tmp", "code") + ) + run.finish() + + +@pytest.mark.skipif( + platform.system() == "Windows", reason="backend crashes on Windows in CI" +) +@mock.patch.dict(os.environ, {"USERNAME": "test"}, clear=True) +def test_log_symlink_user(wandb_init): + run = wandb_init(mode="offline") + assert os.path.realpath(run._settings.log_symlink_user) == os.path.abspath( + run._settings.log_user + ) + run.finish() + + +@pytest.mark.skipif( + platform.system() == "Windows", reason="backend crashes on Windows in CI" +) +@mock.patch.dict(os.environ, {"USERNAME": "test"}, clear=True) +def test_log_symlink_internal(wandb_init): + run = wandb_init(mode="offline") + assert os.path.realpath(run._settings.log_symlink_internal) == os.path.abspath( + run._settings.log_internal + ) + run.finish() + + +@pytest.mark.skipif( + platform.system() == "Windows", reason="backend crashes on Windows in CI" +) +@mock.patch.dict(os.environ, {"USERNAME": "test"}, clear=True) +def test_sync_symlink_latest(wandb_init): + run = wandb_init(mode="offline") + time_tag = datetime.datetime.strftime( + run._settings._start_datetime, "%Y%m%d_%H%M%S" + ) + assert os.path.realpath(run._settings.sync_symlink_latest) == os.path.abspath( + os.path.join(".", "wandb", f"offline-run-{time_tag}-{run.id}") + ) + run.finish() + + +@pytest.mark.skipif( + platform.system() == "Windows", + reason="backend crashes on Windows in CI, likely bc of the overloaded env", +) +@mock.patch.dict(os.environ, {"USERNAME": "test"}, clear=True) +def test_console(runner, test_settings): + with runner.isolated_filesystem(): + test_settings = test_settings() + run = wandb.init(mode="offline") + assert run._settings.console == "auto" + assert run._settings._console == wandb_settings.SettingsConsole.REDIRECT + test_settings.update({"console": "off"}, source=Source.BASE) + assert test_settings._console == wandb_settings.SettingsConsole.OFF + test_settings.update({"console": "wrap"}, source=Source.BASE) + assert test_settings._console == wandb_settings.SettingsConsole.WRAP + run.finish() + + +def test_code_saving_save_code_env_false(wandb_init, test_settings): + settings = test_settings() + settings.update({"save_code": None}, source=Source.BASE) + with mock.patch.dict("os.environ", WANDB_SAVE_CODE="false"): + run = wandb_init(settings=settings) + assert run.settings.save_code is False + run.finish() + + +def test_code_saving_disable_code(wandb_init, test_settings): + settings = test_settings() + settings.update({"save_code": None}, source=Source.BASE) + with mock.patch.dict("os.environ", WANDB_DISABLE_CODE="true"): + run = wandb_init(settings=settings) + assert run.settings.save_code is False + run.finish() + + +def test_silent_run(wandb_init, test_settings): + test_settings = test_settings() + test_settings.update({"silent": "true"}, source=Source.SETTINGS) + assert test_settings.silent is True + run = wandb_init(settings=test_settings) + assert run._settings.silent is True + run.finish() + + +@pytest.mark.skip(reason="causes other tests that depend on capsys to fail") +def test_silent_env_run(wandb_init): + with mock.patch.dict("os.environ", WANDB_SILENT="true"): + run = wandb_init() + assert run._settings.silent is True + run.finish() + + +def test_strict_run(wandb_init, test_settings): + test_settings = test_settings() + test_settings.update({"strict": "true"}, source=Source.SETTINGS) + assert test_settings.strict is True + run = wandb_init(settings=test_settings) + assert run._settings.strict is True + run.finish() + + +def test_show_info_run(wandb_init): + run = wandb_init() + assert run._settings.show_info is True + run.finish() + + +def test_show_info_false_run(wandb_init, test_settings): + test_settings = test_settings() + test_settings.update({"show_info": "false"}, source=Source.SETTINGS) + run = wandb_init(settings=test_settings) + assert run._settings.show_info is False + run.finish() + + +def test_show_warnings_run(wandb_init, test_settings): + test_settings = test_settings() + test_settings.update({"show_warnings": "true"}, source=Source.SETTINGS) + run = wandb_init(settings=test_settings) + assert run._settings.show_warnings is True + run.finish() + + +def test_show_warnings_false_run(wandb_init, test_settings): + test_settings = test_settings() + test_settings.update({"show_warnings": "false"}, source=Source.SETTINGS) + run = wandb_init(settings=test_settings) + assert run._settings.show_warnings is False + run.finish() + + +def test_show_errors_run(wandb_init, test_settings): + test_settings = test_settings() + test_settings.update({"show_errors": True}, source=Source.SETTINGS) + run = wandb_init(settings=test_settings) + assert run._settings.show_errors is True + run.finish() + + +def test_show_errors_false_run(wandb_init, test_settings): + test_settings = test_settings() + test_settings.update({"show_errors": False}, source=Source.SETTINGS) + run = wandb_init(settings=test_settings) + assert run._settings.show_errors is False + run.finish() + + +def test_not_jupyter(wandb_init): + run = wandb_init() + assert run._settings._jupyter is False + run.finish() + + +def test_resume_fname_run(wandb_init): + run = wandb_init() + assert run._settings.resume_fname == os.path.join( + run._settings.root_dir, "wandb", "wandb-resume.json" + ) + run.finish() + + +def test_wandb_dir_run(wandb_init): + run = wandb_init() + assert os.path.abspath(run._settings.wandb_dir) == os.path.abspath( + os.path.join(run._settings.root_dir, "wandb") + ) + run.finish() + + +def test_override_login_settings(user, test_settings): + wlogin = wandb_login._WandbLogin() + login_settings = test_settings().copy() + login_settings.update(show_emoji=True) + wlogin.setup({"_settings": login_settings}) + assert wlogin._settings.show_emoji is True + + +def test_override_login_settings_with_dict(user): + wlogin = wandb_login._WandbLogin() + login_settings = dict(show_emoji=True) + wlogin.setup({"_settings": login_settings}) + assert wlogin._settings.show_emoji is True + + +def test_setup_offline(user, test_settings): + # this is to increase coverage + login_settings = test_settings().copy() + login_settings.update(mode="offline") + assert wandb.setup(settings=login_settings)._instance._get_entity() is None + assert wandb.setup(settings=login_settings)._instance._load_viewer() is None diff --git a/tests/unit_tests/test_wandb_tensorflow.py b/tests/pytest_tests/system_tests/test_wandb_tensorflow.py similarity index 100% rename from tests/unit_tests/test_wandb_tensorflow.py rename to tests/pytest_tests/system_tests/test_wandb_tensorflow.py diff --git a/tests/pytest_tests/system_tests/test_wandb_verify.py b/tests/pytest_tests/system_tests/test_wandb_verify.py new file mode 100644 index 00000000000..ced1bb5cbdd --- /dev/null +++ b/tests/pytest_tests/system_tests/test_wandb_verify.py @@ -0,0 +1,14 @@ +import unittest.mock + +import wandb.sdk.verify.verify as wandb_verify +from wandb.apis import InternalApi + + +def test_check_logged_in(wandb_init): + internal_api = unittest.mock.MagicMock(spec=InternalApi) + internal_api.api_key = None + assert not wandb_verify.check_logged_in(internal_api, "localhost:8000") + + run = wandb_init() + assert wandb_verify.check_logged_in(InternalApi(), run.settings.base_url) + run.finish() diff --git a/tests/unit_tests/tests_launch/test_github_reference.py b/tests/pytest_tests/system_tests/tests_launch/test_github_reference.py similarity index 100% rename from tests/unit_tests/tests_launch/test_github_reference.py rename to tests/pytest_tests/system_tests/tests_launch/test_github_reference.py diff --git a/tests/unit_tests/tests_launch/test_job.py b/tests/pytest_tests/system_tests/tests_launch/test_job.py similarity index 100% rename from tests/unit_tests/tests_launch/test_job.py rename to tests/pytest_tests/system_tests/tests_launch/test_job.py diff --git a/tests/unit_tests/tests_launch/test_launch.py b/tests/pytest_tests/system_tests/tests_launch/test_launch.py similarity index 100% rename from tests/unit_tests/tests_launch/test_launch.py rename to tests/pytest_tests/system_tests/tests_launch/test_launch.py diff --git a/tests/unit_tests/tests_launch/test_launch_add.py b/tests/pytest_tests/system_tests/tests_launch/test_launch_add.py similarity index 100% rename from tests/unit_tests/tests_launch/test_launch_add.py rename to tests/pytest_tests/system_tests/tests_launch/test_launch_add.py diff --git a/tests/unit_tests/tests_launch/test_launch_cli.py b/tests/pytest_tests/system_tests/tests_launch/test_launch_cli.py similarity index 100% rename from tests/unit_tests/tests_launch/test_launch_cli.py rename to tests/pytest_tests/system_tests/tests_launch/test_launch_cli.py diff --git a/tests/unit_tests/tests_launch/test_launch_kubernetes.py b/tests/pytest_tests/system_tests/tests_launch/test_launch_kubernetes.py similarity index 100% rename from tests/unit_tests/tests_launch/test_launch_kubernetes.py rename to tests/pytest_tests/system_tests/tests_launch/test_launch_kubernetes.py diff --git a/tests/unit_tests/tests_launch/test_launch_run.py b/tests/pytest_tests/system_tests/tests_launch/test_launch_run.py similarity index 100% rename from tests/unit_tests/tests_launch/test_launch_run.py rename to tests/pytest_tests/system_tests/tests_launch/test_launch_run.py diff --git a/tests/unit_tests/tests_launch/test_launch_sweep.py b/tests/pytest_tests/system_tests/tests_launch/test_launch_sweep.py similarity index 100% rename from tests/unit_tests/tests_launch/test_launch_sweep.py rename to tests/pytest_tests/system_tests/tests_launch/test_launch_sweep.py diff --git a/tests/unit_tests/tests_launch/test_wandb_reference.py b/tests/pytest_tests/system_tests/tests_launch/test_wandb_reference.py similarity index 100% rename from tests/unit_tests/tests_launch/test_wandb_reference.py rename to tests/pytest_tests/system_tests/tests_launch/test_wandb_reference.py diff --git a/tests/unit_tests_old/tests_launch/__init__.py b/tests/pytest_tests/unit_tests/__init__.py similarity index 100% rename from tests/unit_tests_old/tests_launch/__init__.py rename to tests/pytest_tests/unit_tests/__init__.py diff --git a/tests/pytest_tests/unit_tests/conftest.py b/tests/pytest_tests/unit_tests/conftest.py new file mode 100644 index 00000000000..6302a936630 --- /dev/null +++ b/tests/pytest_tests/unit_tests/conftest.py @@ -0,0 +1,70 @@ +from queue import Queue +from typing import Callable, Dict, Generator, List + +import pytest + +# -------------------------------- +# Fixtures for user test point +# -------------------------------- + + +class RecordsUtil: + def __init__(self, queue: "Queue") -> None: + self.records = [] + while not queue.empty(): + self.records.append(queue.get()) + + def __len__(self) -> int: + return len(self.records) + + def __getitem__(self, name: str) -> Generator: + for record in self.records: + yield from self.resolve_item(record, name) + + @staticmethod + def resolve_item(obj, attr: str, sep: str = ".") -> List: + for name in attr.split(sep): + if not obj.HasField(name): + return [] + obj = getattr(obj, name) + return [obj] + + @staticmethod + def dictify(obj, key: str = "key", value: str = "value_json") -> Dict: + return {getattr(item, key): getattr(item, value) for item in obj} + + @property + def config(self) -> List: + return [self.dictify(_c.update) for _c in self["config"]] + + @property + def history(self) -> List: + return [self.dictify(_h.item) for _h in self["history"]] + + @property + def partial_history(self) -> List: + return [self.dictify(_h.item) for _h in self["request.partial_history"]] + + @property + def preempting(self) -> List: + return list(self["preempting"]) + + @property + def summary(self) -> List: + return list(self["summary"]) + + @property + def files(self) -> List: + return list(self["files"]) + + @property + def metric(self): + return list(self["metric"]) + + +@pytest.fixture +def parse_records() -> Generator[Callable, None, None]: + def records_parser_fn(q: "Queue") -> RecordsUtil: + return RecordsUtil(q) + + yield records_parser_fn diff --git a/tests/unit_tests/saved_model_constructors.py b/tests/pytest_tests/unit_tests/saved_model_constructors.py similarity index 100% rename from tests/unit_tests/saved_model_constructors.py rename to tests/pytest_tests/unit_tests/saved_model_constructors.py diff --git a/tests/unit_tests/test_cli_full.py b/tests/pytest_tests/unit_tests/test_cli.py similarity index 69% rename from tests/unit_tests/test_cli_full.py rename to tests/pytest_tests/unit_tests/test_cli.py index fb0df165976..51f1163dae8 100644 --- a/tests/unit_tests/test_cli_full.py +++ b/tests/pytest_tests/unit_tests/test_cli.py @@ -1,4 +1,4 @@ -import contextlib +import datetime import getpass import importlib import netrc @@ -9,6 +9,7 @@ import pytest import wandb +from wandb.apis.internal import InternalApi from wandb.cli import cli DOCKER_SHA = ( @@ -55,100 +56,185 @@ def hosts(self): monkeypatch.setattr(netrc, "netrc", lambda *args: FakeNet()) -@contextlib.contextmanager -def config_dir(): - try: - os.environ["WANDB_CONFIG"] = os.getcwd() - yield - finally: - del os.environ["WANDB_CONFIG"] +@pytest.mark.skip(reason="Currently dont have on in cling") +def test_enable_on(runner, git_repo): + with runner.isolated_filesystem(): + with open("wandb/settings", "w") as f: + f.write("[default]\nproject=rad") + result = runner.invoke(cli.on) + print(result.output) + print(result.exception) + print(traceback.print_tb(result.exc_info[2])) + assert "W&B enabled" in str(result.output) + assert result.exit_code == 0 -def debug_result(result, prefix=None): - prefix = prefix or "" - print("DEBUG({}) {} = {}".format(prefix, "out", result.output)) - print("DEBUG({}) {} = {}".format(prefix, "exc", result.exception)) - print( - "DEBUG({}) {} = {}".format(prefix, "tb", traceback.print_tb(result.exc_info[2])) - ) +@pytest.mark.skip(reason="Currently dont have off in cling") +def test_enable_off(runner, git_repo): + with runner.isolated_filesystem(): + with open("wandb/settings", "w") as f: + f.write("[default]\nproject=rad") + result = runner.invoke(cli.off) + print(result.output) + print(result.exception) + print(traceback.print_tb(result.exc_info[2])) + assert "W&B disabled" in str(result.output) + assert "disabled" in open("wandb/settings").read() + assert result.exit_code == 0 -@pytest.mark.xfail(reason="This test is flakey on CI") -def test_init_reinit(runner, empty_netrc, user): - with runner.isolated_filesystem(), mock.patch( - "wandb.sdk.lib.apikey.len", return_value=40 - ): - result = runner.invoke(cli.login, [user]) - debug_result(result, "login") - result = runner.invoke(cli.init, input="y\n\n\n") - debug_result(result, "init") +def test_no_project_bad_command(runner): + with runner.isolated_filesystem(): + result = runner.invoke(cli.cli, ["fsd"]) + print(result.output) + print(result.exception) + print(traceback.print_tb(result.exc_info[2])) + assert "No such command" in result.output + assert result.exit_code == 2 + + +def test_login_key_arg(runner, dummy_api_key): + with runner.isolated_filesystem(): + # If the test was run from a directory containing .wandb, then __stage_dir__ + # was '.wandb' when imported by api.py, reload to fix. UGH! + # reload(wandb) + result = runner.invoke(cli.login, [dummy_api_key]) + print("Output: ", result.output) + print("Exception: ", result.exception) + print("Traceback: ", traceback.print_tb(result.exc_info[2])) assert result.exit_code == 0 with open("netrc") as f: generated_netrc = f.read() - with open("wandb/settings") as f: - generated_wandb = f.read() - assert user in generated_netrc - assert user in generated_wandb + assert dummy_api_key in generated_netrc -@pytest.mark.xfail(reason="This test is flakey on CI") -def test_init_add_login(runner, empty_netrc, user): - with runner.isolated_filesystem(), mock.patch( - "wandb.sdk.lib.apikey.len", return_value=40 - ): +def test_login_host_trailing_slash_fix_invalid(runner, dummy_api_key, local_settings): + with runner.isolated_filesystem(): with open("netrc", "w") as f: - f.write("previous config") - result = runner.invoke(cli.login, [user]) - debug_result(result, "login") - result = runner.invoke(cli.init, input=f"y\n{user}\nvanpelt\n") - debug_result(result, "init") + f.write(f"machine \n login user\npassword {dummy_api_key}") + result = runner.invoke( + cli.login, ["--host", "https://google.com/", dummy_api_key] + ) assert result.exit_code == 0 with open("netrc") as f: generated_netrc = f.read() - with open("wandb/settings") as f: - generated_wandb = f.read() - assert user in generated_netrc - assert user in generated_wandb + assert generated_netrc == ( + "machine google.com\n" + " login user\n" + " password {}\n".format(dummy_api_key) + ) -@pytest.mark.xfail(reason="This test is flakey on CI") -def test_init_existing_login(runner, user): +@pytest.mark.parametrize( + "host, error", + [ + ("https://app.wandb.ai", "did you mean https://api.wandb.ai"), + ("ftp://google.com", "URL must start with `http(s)://`"), + ], +) +def test_login_bad_host(runner, host, error, local_settings): with runner.isolated_filesystem(): - with open("netrc", "w") as f: - f.write(f"machine localhost\n\tlogin {user}\tpassword {user}") - result = runner.invoke(cli.init, input="y\nvanpelt\nfoo\n") - print(result.output) - print(result.exception) - print(traceback.print_tb(result.exc_info[2])) + result = runner.invoke(cli.login, ["--host", host]) + assert error in result.output + assert result.exit_code != 0 + + +def test_login_onprem_key_arg(runner, dummy_api_key): + with runner.isolated_filesystem(): + onprem_key = "test-" + dummy_api_key + # with runner.isolated_filesystem(): + result = runner.invoke(cli.login, [onprem_key]) + print("Output: ", result.output) + print("Exception: ", result.exception) + print("Traceback: ", traceback.print_tb(result.exc_info[2])) assert result.exit_code == 0 - with open("wandb/settings") as f: - generated_wandb = f.read() - assert user in generated_wandb - assert "This directory is configured" in result.output + with open("netrc") as f: + generated_netrc = f.read() + assert onprem_key in generated_netrc -@pytest.mark.xfail(reason="This test is flakey on CI") -def test_pull(runner, wandb_init): +def test_login_invalid_key_arg(runner, dummy_api_key): with runner.isolated_filesystem(): - project_name = "test_pull" - file_name = "weights.h5" - run = wandb_init(project=project_name) - with open(file_name, "w") as f: - f.write("WEIGHTS") - run.save(file_name) - run.finish() - - # delete the file so that we can pull it and check that it is there - os.remove(file_name) - - result = runner.invoke(cli.pull, [run.id, "--project", project_name]) - print(result.output) - print(result.exception) - print(traceback.print_tb(result.exc_info[2])) + invalid_key = "test--" + dummy_api_key + result = runner.invoke(cli.login, [invalid_key]) + assert "API key must be 40 characters long, yours was" in str(result) + assert result.exit_code == 1 + + +@pytest.mark.skip(reason="Just need to make the mocking work correctly") +def test_login_anonymously(runner, dummy_api_key, monkeypatch, empty_netrc): + with runner.isolated_filesystem(): + api = InternalApi() + monkeypatch.setattr(cli, "api", api) + monkeypatch.setattr( + wandb.sdk.internal.internal_api.Api, + "create_anonymous_api_key", + lambda *args, **kwargs: dummy_api_key, + ) + result = runner.invoke(cli.login, ["--anonymously"]) + print("Output: ", result.output) + print("Exception: ", result.exception) + print("Traceback: ", traceback.print_tb(result.exc_info[2])) assert result.exit_code == 0 - assert f"Downloading: {project_name}/{run.id}" in result.output - assert os.path.isfile(file_name) - assert f"File {file_name}" in result.output + with open("netrc") as f: + generated_netrc = f.read() + assert dummy_api_key in generated_netrc + + +def test_sync_gc(runner): + with runner.isolated_filesystem(): + if not os.path.isdir("wandb"): + os.mkdir("wandb") + d1 = datetime.datetime.now() + d2 = d1 - datetime.timedelta(hours=3) + run1 = d1.strftime("run-%Y%m%d_%H%M%S-abcd") + run2 = d2.strftime("run-%Y%m%d_%H%M%S-efgh") + run1_dir = os.path.join("wandb", run1) + run2_dir = os.path.join("wandb", run2) + os.mkdir(run1_dir) + with open(os.path.join(run1_dir, "run-abcd.wandb"), "w") as f: + f.write("") + with open(os.path.join(run1_dir, "run-abcd.wandb.synced"), "w") as f: + f.write("") + os.mkdir(run2_dir) + with open(os.path.join(run2_dir, "run-efgh.wandb"), "w") as f: + f.write("") + with open(os.path.join(run2_dir, "run-efgh.wandb.synced"), "w") as f: + f.write("") + assert ( + runner.invoke( + cli.sync, ["--clean", "--clean-old-hours", "2"], input="y\n" + ).exit_code + ) == 0 + + assert os.path.exists(run1_dir) + assert not os.path.exists(run2_dir) + assert ( + runner.invoke( + cli.sync, ["--clean", "--clean-old-hours", "0"], input="y\n" + ).exit_code + == 0 + ) + assert not os.path.exists(run1_dir) + + +def test_cli_login_reprompts_when_no_key_specified(runner, mocker, dummy_api_key): + with runner.isolated_filesystem(): + mocker.patch("wandb.wandb_lib.apikey.getpass", input) + # this first gives login an empty API key, which should cause + # it to re-prompt. this is what we are testing. we then give + # it a valid API key (the dummy API key with a different final + # letter to check that our monkeypatch input is working as + # expected) to terminate the prompt finally we grep for the + # Error: No API key specified to assert that the re-prompt + # happened + result = runner.invoke(cli.login, input=f"\n{dummy_api_key[:-1]}q\n") + print(f"DEBUG(login) out = {result.output}") + print(f"DEBUG(login) exc = {result.exception}") + print(f"DEBUG(login) tb = {traceback.print_tb(result.exc_info[2])}") + with open("netrc") as f: + print(f.read()) + assert "ERROR No API key specified." in result.output def test_docker_run_digest(runner, docker, monkeypatch): @@ -561,105 +647,6 @@ def test_local_already_running(runner, docker, local_settings): assert "A container named wandb-local is already running" in result.output -@pytest.mark.parametrize( - "tb_file_name,history_length", - [ - ("events.out.tfevents.1585769947.cvp", 17), - pytest.param( - "events.out.tfevents.1611911647.big-histos", - 27, - marks=[ - pytest.mark.flaky, - pytest.mark.xfail(reason="test seems flaky, reenable with WB-5015"), - ], - ), - ], -) -def test_sync_tensorboard( - runner, - relay_server, - wandb_init, - copy_asset, - tb_file_name, - history_length, -): - with relay_server() as relay, runner.isolated_filesystem(): - project_name = "test_sync_tensorboard" - run = wandb.init(project=project_name) - run.finish() - - copy_asset(tb_file_name) - - result = runner.invoke( - cli.sync, [".", f"--id={run.id}", f"--project={project_name}"] - ) - - assert result.exit_code == 0 - assert "Found 1 tfevent files" in result.output - history = relay.context.get_run_history(run.id) - assert len(history) == history_length - - # Check the no sync tensorboard flag - result = runner.invoke(cli.sync, [".", "--no-sync-tensorboard"]) - assert "Skipping directory: {}\n".format(os.path.abspath(".")) in result.output - assert tb_file_name in os.listdir(".") - - -def test_sync_wandb_run(runner, relay_server, user, copy_asset): - # note: we have to mock out ArtifactSaver.save - # because the artifact does not actually exist - # among assets listed in the .wandb file. - # this a problem for a real backend that we use now - # (as we used to use a mock backend) - # todo: create a new test asset that will contain an artifact - with relay_server() as relay, runner.isolated_filesystem(), mock.patch( - "wandb.sdk.internal.artifacts.ArtifactSaver.save", return_value=None - ): - copy_asset("wandb") - - result = runner.invoke(cli.sync, ["--sync-all"]) - print(result.output) - print(traceback.print_tb(result.exc_info[2])) - assert result.exit_code == 0 - - assert f"{user}/code-toad/runs/g9dvvkua ... done." in result.output - assert len(relay.context.events) == 1 - - # Check we marked the run as synced - result = runner.invoke(cli.sync, ["--sync-all"]) - assert result.exit_code == 0 - assert "wandb: ERROR Nothing to sync." in result.output - - -def test_sync_wandb_run_and_tensorboard(runner, relay_server, user, copy_asset): - with relay_server() as relay, runner.isolated_filesystem(), mock.patch( - "wandb.sdk.internal.artifacts.ArtifactSaver.save", return_value=None - ): - run_dir = os.path.join("wandb", "offline-run-20210216_154407-g9dvvkua") - copy_asset("wandb") - tb_file_name = "events.out.tfevents.1585769947.cvp" - copy_asset(tb_file_name, os.path.join(run_dir, tb_file_name)) - - result = runner.invoke(cli.sync, ["--sync-all"]) - print(result.output) - print(traceback.print_tb(result.exc_info[2])) - assert result.exit_code == 0 - - assert f"{user}/code-toad/runs/g9dvvkua ... done." in result.output - assert len(relay.context.events) == 1 - - uploaded_files = relay.context.get_run_uploaded_files("g9dvvkua") - assert "code/standalone_tests/code-toad.py" in uploaded_files - - # Check we marked the run as synced - result = runner.invoke(cli.sync, [run_dir]) - assert result.exit_code == 0 - assert ( - "WARNING Found .wandb file, not streaming tensorboard metrics" - in result.output - ) - - def test_cli_debug_log_scoping(runner, test_settings): with runner.isolated_filesystem(): os.chdir(os.getcwd()) diff --git a/tests/unit_tests/test_data_types.py b/tests/pytest_tests/unit_tests/test_data_types.py similarity index 99% rename from tests/unit_tests/test_data_types.py rename to tests/pytest_tests/unit_tests/test_data_types.py index 1e03f0dd385..37155d42aa2 100644 --- a/tests/unit_tests/test_data_types.py +++ b/tests/pytest_tests/unit_tests/test_data_types.py @@ -2,6 +2,7 @@ import io import os import platform +from pathlib import Path import matplotlib.pyplot as plt # noqa: E402 import numpy as np @@ -298,6 +299,9 @@ def mock_reference_get_responses(): yield rsps +@pytest.mark.skipif( + platform.system() == "Windows", reason="Windows doesn't support symlinks" +) def test_image_refs(mock_reference_get_responses): mock_reference_get_responses.add( method="GET", @@ -309,7 +313,7 @@ def test_image_refs(mock_reference_get_responses): art = wandb.Artifact("image_ref_test", "images") art.add(image_obj, "image_ref") image_expected = { - "path": "media/images/75c13e5a637fb8052da9/puppy.jpg", + "path": str(Path("media/images/75c13e5a637fb8052da9/puppy.jpg")), "sha256": "75c13e5a637fb8052da99792fca8323c06b138966cd30482e84d62c83adc01ee", "_type": "image-file", "format": "jpg", @@ -319,7 +323,7 @@ def test_image_refs(mock_reference_get_responses): "digest": "SZvdv5ouAEq2DEOgVBwOog==", "size": 173, }, - "media/images/75c13e5a637fb8052da9/puppy.jpg": { + str(Path("media/images/75c13e5a637fb8052da9/puppy.jpg")): { "digest": "testEtag", "ref": "http://nonexistent/puppy.jpg", "extra": {"etag": "testEtag"}, diff --git a/tests/unit_tests/test_datastore.py b/tests/pytest_tests/unit_tests/test_datastore.py similarity index 100% rename from tests/unit_tests/test_datastore.py rename to tests/pytest_tests/unit_tests/test_datastore.py diff --git a/tests/unit_tests/test_dir_watcher.py b/tests/pytest_tests/unit_tests/test_dir_watcher.py similarity index 100% rename from tests/unit_tests/test_dir_watcher.py rename to tests/pytest_tests/unit_tests/test_dir_watcher.py diff --git a/tests/unit_tests/test_docker.py b/tests/pytest_tests/unit_tests/test_docker.py similarity index 100% rename from tests/unit_tests/test_docker.py rename to tests/pytest_tests/unit_tests/test_docker.py diff --git a/tests/unit_tests/test_dtypes.py b/tests/pytest_tests/unit_tests/test_dtypes.py similarity index 100% rename from tests/unit_tests/test_dtypes.py rename to tests/pytest_tests/unit_tests/test_dtypes.py diff --git a/tests/unit_tests/test_file_stream_user.py b/tests/pytest_tests/unit_tests/test_file_stream.py similarity index 100% rename from tests/unit_tests/test_file_stream_user.py rename to tests/pytest_tests/unit_tests/test_file_stream.py diff --git a/tests/unit_tests/test_flow_control.py b/tests/pytest_tests/unit_tests/test_flow_control.py similarity index 100% rename from tests/unit_tests/test_flow_control.py rename to tests/pytest_tests/unit_tests/test_flow_control.py diff --git a/tests/unit_tests/test_import_wandb.py b/tests/pytest_tests/unit_tests/test_import_wandb.py similarity index 100% rename from tests/unit_tests/test_import_wandb.py rename to tests/pytest_tests/unit_tests/test_import_wandb.py diff --git a/tests/unit_tests/test_internal_api.py b/tests/pytest_tests/unit_tests/test_internal_api.py similarity index 99% rename from tests/unit_tests/test_internal_api.py rename to tests/pytest_tests/unit_tests/test_internal_api.py index 90f2a6895a1..81f127f3c08 100644 --- a/tests/unit_tests/test_internal_api.py +++ b/tests/pytest_tests/unit_tests/test_internal_api.py @@ -17,6 +17,12 @@ from .test_retry import MockTime, mock_time # noqa: F401 +@pytest.fixture +def mock_responses(): + with responses.RequestsMock() as rsps: + yield rsps + + def test_agent_heartbeat_with_no_agent_id_fails(): a = internal.Api() with pytest.raises(ValueError): diff --git a/tests/unit_tests/test_job_builder.py b/tests/pytest_tests/unit_tests/test_job_builder.py similarity index 100% rename from tests/unit_tests/test_job_builder.py rename to tests/pytest_tests/unit_tests/test_job_builder.py diff --git a/tests/unit_tests/test_keras.py b/tests/pytest_tests/unit_tests/test_keras.py similarity index 100% rename from tests/unit_tests/test_keras.py rename to tests/pytest_tests/unit_tests/test_keras.py diff --git a/tests/unit_tests/test_lib.py b/tests/pytest_tests/unit_tests/test_lib/test_apikey.py similarity index 100% rename from tests/unit_tests/test_lib.py rename to tests/pytest_tests/unit_tests/test_lib/test_apikey.py diff --git a/tests/unit_tests/lib/test_filesystem.py b/tests/pytest_tests/unit_tests/test_lib/test_filesystem.py similarity index 100% rename from tests/unit_tests/lib/test_filesystem.py rename to tests/pytest_tests/unit_tests/test_lib/test_filesystem.py diff --git a/tests/unit_tests/test_fsm.py b/tests/pytest_tests/unit_tests/test_lib/test_fsm.py similarity index 100% rename from tests/unit_tests/test_fsm.py rename to tests/pytest_tests/unit_tests/test_lib/test_fsm.py diff --git a/tests/unit_tests/test_git_repo.py b/tests/pytest_tests/unit_tests/test_lib/test_git.py similarity index 100% rename from tests/unit_tests/test_git_repo.py rename to tests/pytest_tests/unit_tests/test_lib/test_git.py diff --git a/tests/unit_tests/test_lib_hashutil.py b/tests/pytest_tests/unit_tests/test_lib/test_hashutil.py similarity index 89% rename from tests/unit_tests/test_lib_hashutil.py rename to tests/pytest_tests/unit_tests/test_lib/test_hashutil.py index a2e8800e245..a4cd2d7c4c2 100644 --- a/tests/unit_tests/test_lib_hashutil.py +++ b/tests/pytest_tests/unit_tests/test_lib/test_hashutil.py @@ -1,7 +1,9 @@ import base64 import hashlib +import platform import tempfile +import pytest from hypothesis import given from hypothesis import strategies as st from wandb.sdk.lib import hashutil @@ -42,6 +44,7 @@ def test_md5_file_b64_no_files(): @given(st.binary()) +@pytest.mark.skipif(platform.system() == "Windows", reason="Fails on Windows") def test_md5_file_hex_single_file(data): with tempfile.NamedTemporaryFile() as f: f.write(data) @@ -50,6 +53,7 @@ def test_md5_file_hex_single_file(data): @given(st.binary(), st.text(), st.binary()) +@pytest.mark.skipif(platform.system() == "Windows", reason="Fails on Windows") def test_md5_file_b64_three_files(data1, text, data2): open("a.bin", "wb").write(data1) open("b.txt", "w").write(text) @@ -62,6 +66,7 @@ def test_md5_file_b64_three_files(data1, text, data2): @given(st.binary(), st.text(), st.binary()) +@pytest.mark.skipif(platform.system() == "Windows", reason="Fails on Windows") def test_md5_file_hex_three_files(data1, text, data2): open("a.bin", "wb").write(data1) open("b.txt", "w").write(text) diff --git a/tests/unit_tests/test_mailbox.py b/tests/pytest_tests/unit_tests/test_lib/test_mailbox.py similarity index 100% rename from tests/unit_tests/test_mailbox.py rename to tests/pytest_tests/unit_tests/test_lib/test_mailbox.py diff --git a/tests/unit_tests/test_printer.py b/tests/pytest_tests/unit_tests/test_lib/test_printer.py similarity index 100% rename from tests/unit_tests/test_printer.py rename to tests/pytest_tests/unit_tests/test_lib/test_printer.py diff --git a/tests/unit_tests/test_redir_user.py b/tests/pytest_tests/unit_tests/test_lib/test_redir.py similarity index 100% rename from tests/unit_tests/test_redir_user.py rename to tests/pytest_tests/unit_tests/test_lib/test_redir.py diff --git a/tests/unit_tests/test_run_status.py b/tests/pytest_tests/unit_tests/test_lib/test_run_status.py similarity index 100% rename from tests/unit_tests/test_run_status.py rename to tests/pytest_tests/unit_tests/test_lib/test_run_status.py diff --git a/tests/unit_tests/lib/test_runid.py b/tests/pytest_tests/unit_tests/test_lib/test_runid.py similarity index 100% rename from tests/unit_tests/lib/test_runid.py rename to tests/pytest_tests/unit_tests/test_lib/test_runid.py diff --git a/tests/unit_tests/test_sock_client.py b/tests/pytest_tests/unit_tests/test_lib/test_sock_client.py similarity index 100% rename from tests/unit_tests/test_sock_client.py rename to tests/pytest_tests/unit_tests/test_lib/test_sock_client.py diff --git a/tests/unit_tests/test_sparkline.py b/tests/pytest_tests/unit_tests/test_lib/test_sparkline.py similarity index 100% rename from tests/unit_tests/test_sparkline.py rename to tests/pytest_tests/unit_tests/test_lib/test_sparkline.py diff --git a/tests/unit_tests/test_telemetry.py b/tests/pytest_tests/unit_tests/test_lib/test_telemetry.py similarity index 100% rename from tests/unit_tests/test_telemetry.py rename to tests/pytest_tests/unit_tests/test_lib/test_telemetry.py diff --git a/tests/unit_tests/test_library_public.py b/tests/pytest_tests/unit_tests/test_library_public.py similarity index 100% rename from tests/unit_tests/test_library_public.py rename to tests/pytest_tests/unit_tests/test_library_public.py diff --git a/tests/unit_tests/test_metaflow.py b/tests/pytest_tests/unit_tests/test_metaflow.py similarity index 100% rename from tests/unit_tests/test_metaflow.py rename to tests/pytest_tests/unit_tests/test_metaflow.py diff --git a/tests/pytest_tests/unit_tests/test_mode_disabled.py b/tests/pytest_tests/unit_tests/test_mode_disabled.py new file mode 100644 index 00000000000..a807b5ae012 --- /dev/null +++ b/tests/pytest_tests/unit_tests/test_mode_disabled.py @@ -0,0 +1,14 @@ +import os +import pickle + +import wandb + + +def test_disabled_can_pickle(): + """Will it pickle?""" + # This case comes up when using wandb in disabled mode, with keras + # https://wandb.atlassian.net/browse/WB-3981 + obj = wandb.wandb_sdk.lib.RunDisabled() + with open("test.pkl", "wb") as file: + pickle.dump(obj, file) + os.remove("test.pkl") diff --git a/tests/unit_tests/test_model_workflows.py b/tests/pytest_tests/unit_tests/test_model_workflows.py similarity index 80% rename from tests/unit_tests/test_model_workflows.py rename to tests/pytest_tests/unit_tests/test_model_workflows.py index b05738a270c..21a40fbe295 100644 --- a/tests/unit_tests/test_model_workflows.py +++ b/tests/pytest_tests/unit_tests/test_model_workflows.py @@ -29,10 +29,3 @@ def test_add_any(): _add_any(artifact, ["invalid input type"], "invalid") assert True - - -def test_offline_link_artifact(wandb_init): - run = wandb_init(mode="offline") - with pytest.raises(NotImplementedError): - run.link_artifact(None, "entity/project/portfolio", "latest") - run.finish() diff --git a/tests/unit_tests/test_monkeypatch_keras.py b/tests/pytest_tests/unit_tests/test_monkeypatch_keras.py similarity index 100% rename from tests/unit_tests/test_monkeypatch_keras.py rename to tests/pytest_tests/unit_tests/test_monkeypatch_keras.py diff --git a/tests/unit_tests/test_plot.py b/tests/pytest_tests/unit_tests/test_plot.py similarity index 100% rename from tests/unit_tests/test_plot.py rename to tests/pytest_tests/unit_tests/test_plot.py diff --git a/tests/unit_tests/test_plots.py b/tests/pytest_tests/unit_tests/test_plots.py similarity index 100% rename from tests/unit_tests/test_plots.py rename to tests/pytest_tests/unit_tests/test_plots.py diff --git a/tests/unit_tests/test_profiler.py b/tests/pytest_tests/unit_tests/test_profiler.py similarity index 100% rename from tests/unit_tests/test_profiler.py rename to tests/pytest_tests/unit_tests/test_profiler.py diff --git a/tests/pytest_tests/unit_tests/test_public_api.py b/tests/pytest_tests/unit_tests/test_public_api.py new file mode 100644 index 00000000000..b7fa653375d --- /dev/null +++ b/tests/pytest_tests/unit_tests/test_public_api.py @@ -0,0 +1,152 @@ +from unittest import mock + +import pytest +import wandb +from wandb import Api + + +def test_api_auto_login_no_tty(): + with pytest.raises(wandb.UsageError): + Api() + + +@pytest.mark.usefixtures("patch_apikey", "patch_prompt") +def test_base_url_sanitization(): + with mock.patch.object(wandb, "login", mock.MagicMock()): + api = Api({"base_url": "https://wandb.corp.net///"}) + assert api.settings["base_url"] == "https://wandb.corp.net" + + +@pytest.mark.parametrize( + "path", + [ + "user/proj/run", # simple + "/user/proj/run", # leading slash + "user/proj:run", # docker + "user/proj/runs/run", # path_url + ], +) +@pytest.mark.usefixtures("patch_apikey", "patch_prompt") +def test_parse_path(path): + with mock.patch.object(wandb, "login", mock.MagicMock()): + user, project, run = Api()._parse_path(path) + assert user == "user" + assert project == "proj" + assert run == "run" + + +@pytest.mark.usefixtures("patch_apikey", "patch_prompt") +def test_parse_project_path(): + with mock.patch.object(wandb, "login", mock.MagicMock()): + enitty, project = Api()._parse_project_path("user/proj") + assert enitty == "user" + assert project == "proj" + + +@pytest.mark.usefixtures("patch_apikey", "patch_prompt") +def test_parse_project_path_proj(): + with mock.patch.dict("os.environ", {"WANDB_ENTITY": "mock_entity"}): + entity, project = Api()._parse_project_path("proj") + assert entity == "mock_entity" + assert project == "proj" + + +@pytest.mark.usefixtures("patch_apikey", "patch_prompt") +def test_parse_path_docker_proj(): + with mock.patch.dict("os.environ", {"WANDB_ENTITY": "mock_entity"}): + user, project, run = Api()._parse_path("proj:run") + assert user == "mock_entity" + assert project == "proj" + assert run == "run" + + +@pytest.mark.usefixtures("patch_apikey", "patch_prompt") +def test_parse_path_user_proj(): + with mock.patch.dict("os.environ", {"WANDB_ENTITY": "mock_entity"}): + user, project, run = Api()._parse_path("proj/run") + assert user == "mock_entity" + assert project == "proj" + assert run == "run" + + +@pytest.mark.usefixtures("patch_apikey", "patch_prompt") +def test_parse_path_proj(): + with mock.patch.dict("os.environ", {"WANDB_ENTITY": "mock_entity"}): + user, project, run = Api()._parse_path("proj") + assert user == "mock_entity" + assert project == "proj" + assert run == "proj" + + +@pytest.mark.usefixtures("patch_apikey", "patch_prompt") +def test_direct_specification_of_api_key(): + # test_settings has a different API key + api = Api(api_key="abcd" * 10) + assert api.api_key == "abcd" * 10 + + +@pytest.mark.parametrize( + "path", + [ + "test", + "test/test", + ], +) +@pytest.mark.usefixtures("patch_apikey", "patch_prompt") +def test_from_path_project_type(path): + with mock.patch.object(wandb, "login", mock.MagicMock()): + project = Api().from_path(path) + assert isinstance(project, wandb.apis.public.Project) + + +@pytest.mark.usefixtures("patch_apikey", "patch_prompt") +def test_report_to_html(): + path = "test/test/reports/My-Report--XYZ" + report = Api().from_path(path) + report_html = report.to_html(hidden=True) + assert "test/test/reports/My-Report--XYZ" in report_html + assert "= (3, 8): from typing import get_type_hints @@ -29,11 +22,16 @@ def get_type_hints(obj): return obj.__annotations__ +from wandb.sdk import wandb_settings + Property = wandb_settings.Property Settings = wandb_settings.Settings Source = wandb_settings.Source -# TODO: replace wandb_init with mock_run or move tests to integration tests + +def test_multiproc_strict_bad(test_settings): + with pytest.raises(ValueError): + test_settings(dict(strict="bad")) def test_str_as_bool(): @@ -201,6 +199,171 @@ def test_property_repr(): # ------------------------------------ +def test_start_run(): + s = Settings() + s._set_run_start_time() + assert s._Settings_start_time is not None + assert s._Settings_start_datetime is not None + + +# fixme: +@pytest.mark.skip(reason="For now, we don't raise an error and simply ignore it") +def test_unexpected_arguments(): + with pytest.raises(TypeError): + Settings(lol=False) + + +def test_mapping_interface(): + s = Settings() + for setting in s: + assert setting in s + + +def test_is_local(): + s = Settings(base_url=None) + assert s.is_local is False + + +def test_default_props_match_class_attributes(): + # make sure that the default properties match the class attributes + s = Settings() + class_attributes = list(get_type_hints(Settings).keys()) + default_props = list(s._default_props().keys()) + assert set(default_props) - set(class_attributes) == set() + + +# fixme: remove this once full validation is restored +def test_settings_strict_validation(capsys): + s = Settings(api_key=271828, lol=True) + assert s.api_key == 271828 + with pytest.raises(AttributeError): + s.lol + captured = capsys.readouterr().err + msgs = ( + "Ignoring unexpected arguments: ['lol']", + "Invalid value for property api_key: 271828", + ) + for msg in msgs: + assert msg in captured + + +def test_static_settings_json_dump(): + s = Settings() + static_settings = s.make_static() + assert json.dumps(static_settings) + + +# fixme: remove this once full validation is restored +def test_no_repeat_warnings(capsys): + s = Settings(api_key=234) + assert s.api_key == 234 + s.update(api_key=234) + captured = capsys.readouterr().err + msg = "Invalid value for property api_key: 234" + assert captured.count(msg) == 1 + + +def test_program_python_m(): + with tempfile.TemporaryDirectory() as tmpdir: + path_module = os.path.join(tmpdir, "module") + os.mkdir(path_module) + with open(os.path.join(path_module, "lib.py"), "w") as f: + f.write( + "import wandb\n\n\n" + "if __name__ == '__main__':\n" + " run = wandb.init(mode='offline')\n" + " print(run.settings.program)\n" + ) + output = subprocess.check_output( + [sys.executable, "-m", "module.lib"], cwd=tmpdir + ) + assert "-m module.lib" in output.decode("utf-8") + + +@pytest.mark.skip(reason="Unskip once api_key validation is restored") +def test_local_api_key_validation(): + with pytest.raises(UsageError): + wandb.Settings( + api_key="local-87eLxjoRhY6u2ofg63NAJo7rVYHZo4NGACOvpSsF", + ) + s = wandb.Settings( + api_key="local-87eLxjoRhY6u2ofg63NAJo7rVYHZo4NGACOvpSsF", + base_url="https://api.wandb.test", + ) + + # ensure that base_url is copied first without causing an error in api_key validation + s.copy() + + # ensure that base_url is applied first without causing an error in api_key validation + wandb.Settings()._apply_settings(s) + + +def test_run_urls(): + base_url = "https://my.cool.site.com" + entity = "me" + project = "lol" + run_id = "123" + s = Settings( + base_url=base_url, + entity=entity, + project=project, + run_id=run_id, + ) + assert s.project_url == f"{base_url}/{entity}/{project}" + assert s.run_url == f"{base_url}/{entity}/{project}/runs/{run_id}" + + +def test_offline(test_settings): + test_settings = test_settings() + assert test_settings._offline is False + test_settings.update({"disabled": True}, source=Source.BASE) + assert test_settings._offline is True + test_settings.update({"disabled": None}, source=Source.BASE) + test_settings.update({"mode": "dryrun"}, source=Source.BASE) + assert test_settings._offline is True + test_settings.update({"mode": "offline"}, source=Source.BASE) + assert test_settings._offline is True + + +def test_silent(test_settings): + test_settings = test_settings() + test_settings.update({"silent": "true"}, source=Source.BASE) + assert test_settings.silent is True + + +def test_show_info(test_settings): + test_settings = test_settings() + test_settings.update({"show_info": True}, source=Source.BASE) + assert test_settings.show_info is True + + test_settings.update({"show_info": False}, source=Source.BASE) + assert test_settings.show_info is False + + +def test_show_warnings(test_settings): + test_settings = test_settings() + test_settings.update({"show_warnings": "true"}, source=Source.SETTINGS) + assert test_settings.show_warnings is True + + test_settings.update({"show_warnings": "false"}, source=Source.SETTINGS) + assert test_settings.show_warnings is False + + +def test_show_errors(test_settings): + test_settings = test_settings() + test_settings.update({"show_errors": True}, source=Source.SETTINGS) + assert test_settings.show_errors is True + + test_settings.update({"show_errors": False}, source=Source.SETTINGS) + assert test_settings.show_errors is False + + +def test_noop(test_settings): + test_settings = test_settings() + test_settings.update({"mode": "disabled"}, source=Source.BASE) + assert test_settings._noop is True + + def test_attrib_get(): s = Settings() assert s.base_url == "https://api.wandb.ai" @@ -568,20 +731,6 @@ def test_strict(): assert not settings.strict -@pytest.mark.skipif( - platform.system() == "Windows", - reason="backend crashes on Windows in CI", -) -@mock.patch.dict( - os.environ, {"WANDB_START_METHOD": "thread", "USERNAME": "test"}, clear=True -) -def test_console_run(wandb_init): - run = wandb_init(mode="offline", settings={"console": "auto"}) - assert run._settings.console == "auto" - assert run._settings._console == wandb_settings.SettingsConsole.WRAP - run.finish() - - def test_validate_console_problem_anonymous(): s = Settings() with pytest.raises(UsageError): @@ -592,6 +741,18 @@ def test_validate_console_problem_anonymous(): s.update(anonymous="lol") +def test_wandb_dir(test_settings): + test_settings = test_settings() + assert os.path.abspath(test_settings.wandb_dir) == os.path.abspath("wandb") + + +def test_resume_fname(test_settings): + test_settings = test_settings() + assert test_settings.resume_fname == os.path.abspath( + os.path.join(".", "wandb", "wandb-resume.json") + ) + + @pytest.mark.skip(reason="CircleCI still lets you write to root_dir") def test_non_writable_root_dir(capsys): with CliRunner().isolated_filesystem(): @@ -606,415 +767,6 @@ def test_non_writable_root_dir(capsys): assert "wasn't writable, using system temp directory" in err -# note: patching os.environ because other tests may have created env variables -# that are not in the default environment, which would cause these test to fail. -# setting {"USERNAME": "test"} because on Windows getpass.getuser() would otherwise fail. -@pytest.mark.skipif( - platform.system() == "Windows", reason="backend crashes on Windows in CI" -) -@mock.patch.dict(os.environ, {"USERNAME": "test"}, clear=True) -def test_sync_dir(wandb_init): - run = wandb_init(mode="offline") - print(run._settings.sync_dir) - assert run._settings.sync_dir == os.path.realpath( - os.path.join(".", "wandb", "latest-run") - ) - run.finish() - - -@pytest.mark.skipif( - platform.system() == "Windows", reason="backend crashes on Windows in CI" -) -@mock.patch.dict(os.environ, {"USERNAME": "test"}, clear=True) -def test_sync_file(wandb_init): - run = wandb_init(mode="offline") - assert run._settings.sync_file == os.path.realpath( - os.path.join(".", "wandb", "latest-run", f"run-{run.id}.wandb") - ) - run.finish() - - -@pytest.mark.skipif( - platform.system() == "Windows", reason="backend crashes on Windows in CI" -) -@mock.patch.dict(os.environ, {"USERNAME": "test"}, clear=True) -def test_files_dir(wandb_init): - run = wandb_init(mode="offline") - assert run._settings.files_dir == os.path.realpath( - os.path.join(".", "wandb", "latest-run", "files") - ) - run.finish() - - -@pytest.mark.skipif( - platform.system() == "Windows", reason="backend crashes on Windows in CI" -) -@mock.patch.dict(os.environ, {"USERNAME": "test"}, clear=True) -def test_tmp_dir(wandb_init): - run = wandb_init(mode="offline") - assert run._settings.tmp_dir == os.path.realpath( - os.path.join(".", "wandb", "latest-run", "tmp") - ) - run.finish() - - -@pytest.mark.skipif( - platform.system() == "Windows", reason="backend crashes on Windows in CI" -) -@mock.patch.dict(os.environ, {"USERNAME": "test"}, clear=True) -def test_tmp_code_dir(wandb_init): - run = wandb_init(mode="offline") - assert run._settings._tmp_code_dir == os.path.realpath( - os.path.join(".", "wandb", "latest-run", "tmp", "code") - ) - run.finish() - - -@pytest.mark.skipif( - platform.system() == "Windows", reason="backend crashes on Windows in CI" -) -@mock.patch.dict(os.environ, {"USERNAME": "test"}, clear=True) -def test_log_symlink_user(wandb_init): - run = wandb_init(mode="offline") - assert os.path.realpath(run._settings.log_symlink_user) == os.path.abspath( - run._settings.log_user - ) - run.finish() - - -@pytest.mark.skipif( - platform.system() == "Windows", reason="backend crashes on Windows in CI" -) -@mock.patch.dict(os.environ, {"USERNAME": "test"}, clear=True) -def test_log_symlink_internal(wandb_init): - run = wandb_init(mode="offline") - assert os.path.realpath(run._settings.log_symlink_internal) == os.path.abspath( - run._settings.log_internal - ) - run.finish() - - -@pytest.mark.skipif( - platform.system() == "Windows", reason="backend crashes on Windows in CI" -) -@mock.patch.dict(os.environ, {"USERNAME": "test"}, clear=True) -def test_sync_symlink_latest(wandb_init): - run = wandb_init(mode="offline") - time_tag = datetime.datetime.strftime( - run._settings._start_datetime, "%Y%m%d_%H%M%S" - ) - assert os.path.realpath(run._settings.sync_symlink_latest) == os.path.abspath( - os.path.join(".", "wandb", f"offline-run-{time_tag}-{run.id}") - ) - run.finish() - - -def test_start_run(): - s = Settings() - s._set_run_start_time() - assert s._Settings_start_time is not None - assert s._Settings_start_datetime is not None - - -# fixme: -@pytest.mark.skip(reason="For now, we don't raise an error and simply ignore it") -def test_unexpected_arguments(): - with pytest.raises(TypeError): - Settings(lol=False) - - -def test_mapping_interface(): - s = Settings() - for setting in s: - assert setting in s - - -def test_is_local(): - s = Settings(base_url=None) - assert s.is_local is False - - -def test_default_props_match_class_attributes(): - # make sure that the default properties match the class attributes - s = Settings() - class_attributes = list(get_type_hints(Settings).keys()) - default_props = list(s._default_props().keys()) - assert set(default_props) - set(class_attributes) == set() - - -# fixme: remove this once full validation is restored -def test_settings_strict_validation(capsys): - s = Settings(api_key=271828, lol=True) - assert s.api_key == 271828 - with pytest.raises(AttributeError): - s.lol - captured = capsys.readouterr().err - msgs = ( - "Ignoring unexpected arguments: ['lol']", - "Invalid value for property api_key: 271828", - ) - for msg in msgs: - assert msg in captured - - -def test_static_settings_json_dump(): - s = Settings() - static_settings = s.make_static() - assert json.dumps(static_settings) - - -# fixme: remove this once full validation is restored -def test_no_repeat_warnings(capsys): - s = Settings(api_key=234) - assert s.api_key == 234 - s.update(api_key=234) - captured = capsys.readouterr().err - msg = "Invalid value for property api_key: 234" - assert captured.count(msg) == 1 - - -def test_program_python_m(): - with tempfile.TemporaryDirectory() as tmpdir: - path_module = os.path.join(tmpdir, "module") - os.mkdir(path_module) - with open(os.path.join(path_module, "lib.py"), "w") as f: - f.write( - "import wandb\n\n\n" - "if __name__ == '__main__':\n" - " run = wandb.init(mode='offline')\n" - " print(run.settings.program)\n" - ) - output = subprocess.check_output( - [sys.executable, "-m", "module.lib"], cwd=tmpdir - ) - assert "-m module.lib" in output.decode("utf-8") - - -@pytest.mark.skip(reason="Unskip once api_key validation is restored") -def test_local_api_key_validation(): - with pytest.raises(UsageError): - wandb.Settings( - api_key="local-87eLxjoRhY6u2ofg63NAJo7rVYHZo4NGACOvpSsF", - ) - s = wandb.Settings( - api_key="local-87eLxjoRhY6u2ofg63NAJo7rVYHZo4NGACOvpSsF", - base_url="https://api.wandb.test", - ) - - # ensure that base_url is copied first without causing an error in api_key validation - s.copy() - - # ensure that base_url is applied first without causing an error in api_key validation - wandb.Settings()._apply_settings(s) - - -def test_run_urls(): - base_url = "https://my.cool.site.com" - entity = "me" - project = "lol" - run_id = "123" - s = Settings( - base_url=base_url, - entity=entity, - project=project, - run_id=run_id, - ) - assert s.project_url == f"{base_url}/{entity}/{project}" - assert s.run_url == f"{base_url}/{entity}/{project}/runs/{run_id}" - - -def test_code_saving_save_code_env_false(wandb_init, test_settings): - settings = test_settings() - settings.update({"save_code": None}, source=Source.BASE) - with mock.patch.dict("os.environ", WANDB_SAVE_CODE="false"): - run = wandb_init(settings=settings) - assert run.settings.save_code is False - run.finish() - - -def test_code_saving_disable_code(wandb_init, test_settings): - settings = test_settings() - settings.update({"save_code": None}, source=Source.BASE) - with mock.patch.dict("os.environ", WANDB_DISABLE_CODE="true"): - run = wandb_init(settings=settings) - assert run.settings.save_code is False - run.finish() - - -def test_offline(test_settings): - test_settings = test_settings() - assert test_settings._offline is False - test_settings.update({"disabled": True}, source=Source.BASE) - assert test_settings._offline is True - test_settings.update({"disabled": None}, source=Source.BASE) - test_settings.update({"mode": "dryrun"}, source=Source.BASE) - assert test_settings._offline is True - test_settings.update({"mode": "offline"}, source=Source.BASE) - assert test_settings._offline is True - - -def test_silent(test_settings): - test_settings = test_settings() - test_settings.update({"silent": "true"}, source=Source.BASE) - assert test_settings.silent is True - - -def test_silent_run(wandb_init, test_settings): - test_settings = test_settings() - test_settings.update({"silent": "true"}, source=Source.SETTINGS) - assert test_settings.silent is True - run = wandb_init(settings=test_settings) - assert run._settings.silent is True - run.finish() - - -def test_silent_env_run(wandb_init): - with mock.patch.dict("os.environ", WANDB_SILENT="true"): - run = wandb_init() - assert run._settings.silent is True - run.finish() - - -def test_strict_run(wandb_init, test_settings): - test_settings = test_settings() - test_settings.update({"strict": "true"}, source=Source.SETTINGS) - assert test_settings.strict is True - run = wandb_init(settings=test_settings) - assert run._settings.strict is True - run.finish() - - -def test_show_info(test_settings): - test_settings = test_settings() - test_settings.update({"show_info": True}, source=Source.BASE) - assert test_settings.show_info is True - - test_settings.update({"show_info": False}, source=Source.BASE) - assert test_settings.show_info is False - - -def test_show_info_run(wandb_init): - run = wandb_init() - assert run._settings.show_info is True - run.finish() - - -def test_show_info_false_run(wandb_init, test_settings): - test_settings = test_settings() - test_settings.update({"show_info": "false"}, source=Source.SETTINGS) - run = wandb_init(settings=test_settings) - assert run._settings.show_info is False - run.finish() - - -def test_show_warnings(test_settings): - test_settings = test_settings() - test_settings.update({"show_warnings": "true"}, source=Source.SETTINGS) - assert test_settings.show_warnings is True - - test_settings.update({"show_warnings": "false"}, source=Source.SETTINGS) - assert test_settings.show_warnings is False - - -def test_show_warnings_run(wandb_init, test_settings): - test_settings = test_settings() - test_settings.update({"show_warnings": "true"}, source=Source.SETTINGS) - run = wandb_init(settings=test_settings) - assert run._settings.show_warnings is True - run.finish() - - -def test_show_warnings_false_run(wandb_init, test_settings): - test_settings = test_settings() - test_settings.update({"show_warnings": "false"}, source=Source.SETTINGS) - run = wandb_init(settings=test_settings) - assert run._settings.show_warnings is False - run.finish() - - -def test_show_errors(test_settings): - test_settings = test_settings() - test_settings.update({"show_errors": True}, source=Source.SETTINGS) - assert test_settings.show_errors is True - - test_settings.update({"show_errors": False}, source=Source.SETTINGS) - assert test_settings.show_errors is False - - -def test_show_errors_run(wandb_init, test_settings): - test_settings = test_settings() - test_settings.update({"show_errors": True}, source=Source.SETTINGS) - run = wandb_init(settings=test_settings) - assert run._settings.show_errors is True - run.finish() - - -def test_show_errors_false_run(wandb_init, test_settings): - test_settings = test_settings() - test_settings.update({"show_errors": False}, source=Source.SETTINGS) - run = wandb_init(settings=test_settings) - assert run._settings.show_errors is False - run.finish() - - -def test_noop(test_settings): - test_settings = test_settings() - test_settings.update({"mode": "disabled"}, source=Source.BASE) - assert test_settings._noop is True - - -def test_not_jupyter(wandb_init): - run = wandb_init() - assert run._settings._jupyter is False - run.finish() - - -@pytest.mark.skipif( - platform.system() == "Windows", - reason="backend crashes on Windows in CI, likely bc of the overloaded env", -) -@mock.patch.dict(os.environ, {"USERNAME": "test"}, clear=True) -def test_console(runner, test_settings): - with runner.isolated_filesystem(): - test_settings = test_settings() - run = wandb.init(mode="offline") - assert run._settings.console == "auto" - assert run._settings._console == wandb_settings.SettingsConsole.REDIRECT - test_settings.update({"console": "off"}, source=Source.BASE) - assert test_settings._console == wandb_settings.SettingsConsole.OFF - test_settings.update({"console": "wrap"}, source=Source.BASE) - assert test_settings._console == wandb_settings.SettingsConsole.WRAP - run.finish() - - -def test_resume_fname(test_settings): - test_settings = test_settings() - assert test_settings.resume_fname == os.path.abspath( - os.path.join(".", "wandb", "wandb-resume.json") - ) - - -def test_resume_fname_run(wandb_init): - run = wandb_init() - assert run._settings.resume_fname == os.path.join( - run._settings.root_dir, "wandb", "wandb-resume.json" - ) - run.finish() - - -def test_wandb_dir(test_settings): - test_settings = test_settings() - assert os.path.abspath(test_settings.wandb_dir) == os.path.abspath("wandb") - - -def test_wandb_dir_run(wandb_init): - run = wandb_init() - assert os.path.abspath(run._settings.wandb_dir) == os.path.abspath( - os.path.join(run._settings.root_dir, "wandb") - ) - run.finish() - - def test_log_user(test_settings): test_settings = test_settings({"run_id": "test"}) _, run_dir, log_dir, fname = os.path.abspath( @@ -1037,29 +789,6 @@ def test_log_internal(test_settings): assert fname == "debug-internal.log" -def test_override_login_settings(user, test_settings): - wlogin = wandb_login._WandbLogin() - login_settings = test_settings().copy() - login_settings.update(show_emoji=True) - wlogin.setup({"_settings": login_settings}) - assert wlogin._settings.show_emoji is True - - -def test_override_login_settings_with_dict(user): - wlogin = wandb_login._WandbLogin() - login_settings = dict(show_emoji=True) - wlogin.setup({"_settings": login_settings}) - assert wlogin._settings.show_emoji is True - - -def test_setup_offline(user, test_settings): - # this is to increase coverage - login_settings = test_settings().copy() - login_settings.update(mode="offline") - assert wandb.setup(settings=login_settings)._instance._get_entity() is None - assert wandb.setup(settings=login_settings)._instance._load_viewer() is None - - # -------------------------- # test static settings # -------------------------- diff --git a/tests/unit_tests/test_wandb_summary.py b/tests/pytest_tests/unit_tests/test_wandb_summary.py similarity index 100% rename from tests/unit_tests/test_wandb_summary.py rename to tests/pytest_tests/unit_tests/test_wandb_summary.py diff --git a/tests/unit_tests/test_wandb_verify.py b/tests/pytest_tests/unit_tests/test_wandb_verify.py similarity index 87% rename from tests/unit_tests/test_wandb_verify.py rename to tests/pytest_tests/unit_tests/test_wandb_verify.py index 9ba1cb4ed84..69e103ac2ba 100644 --- a/tests/unit_tests/test_wandb_verify.py +++ b/tests/pytest_tests/unit_tests/test_wandb_verify.py @@ -6,16 +6,6 @@ from wandb.apis import InternalApi -def test_check_logged_in(wandb_init): - internal_api = unittest.mock.MagicMock(spec=InternalApi) - internal_api.api_key = None - assert not wandb_verify.check_logged_in(internal_api, "localhost:8000") - - run = wandb_init() - assert wandb_verify.check_logged_in(InternalApi(), run.settings.base_url) - run.finish() - - def test_print_results(capsys): failed_test_or_tests = ["test1", "test2"] wandb_verify.print_results(None, warning=True) diff --git a/tests/pytest_tests/unit_tests_old/__init__.py b/tests/pytest_tests/unit_tests_old/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/unit_tests_old/assets/2x2.png b/tests/pytest_tests/unit_tests_old/assets/2x2.png similarity index 100% rename from tests/unit_tests_old/assets/2x2.png rename to tests/pytest_tests/unit_tests_old/assets/2x2.png diff --git a/tests/unit_tests_old/assets/fixtures/environment.yml b/tests/pytest_tests/unit_tests_old/assets/fixtures/environment.yml similarity index 100% rename from tests/unit_tests_old/assets/fixtures/environment.yml rename to tests/pytest_tests/unit_tests_old/assets/fixtures/environment.yml diff --git a/tests/unit_tests_old/assets/fixtures/launch/launch_kube.json b/tests/pytest_tests/unit_tests_old/assets/fixtures/launch/launch_kube.json similarity index 100% rename from tests/unit_tests_old/assets/fixtures/launch/launch_kube.json rename to tests/pytest_tests/unit_tests_old/assets/fixtures/launch/launch_kube.json diff --git a/tests/unit_tests_old/assets/fixtures/launch/launch_sagemaker_config.json b/tests/pytest_tests/unit_tests_old/assets/fixtures/launch/launch_sagemaker_config.json similarity index 100% rename from tests/unit_tests_old/assets/fixtures/launch/launch_sagemaker_config.json rename to tests/pytest_tests/unit_tests_old/assets/fixtures/launch/launch_sagemaker_config.json diff --git a/tests/unit_tests_old/assets/fixtures/launch/launch_vertex_config.json b/tests/pytest_tests/unit_tests_old/assets/fixtures/launch/launch_vertex_config.json similarity index 100% rename from tests/unit_tests_old/assets/fixtures/launch/launch_vertex_config.json rename to tests/pytest_tests/unit_tests_old/assets/fixtures/launch/launch_vertex_config.json diff --git a/tests/unit_tests_old/assets/fixtures/report_spec_v5.json b/tests/pytest_tests/unit_tests_old/assets/fixtures/report_spec_v5.json similarity index 100% rename from tests/unit_tests_old/assets/fixtures/report_spec_v5.json rename to tests/pytest_tests/unit_tests_old/assets/fixtures/report_spec_v5.json diff --git a/tests/unit_tests_old/assets/fixtures/requirements.txt b/tests/pytest_tests/unit_tests_old/assets/fixtures/requirements.txt similarity index 100% rename from tests/unit_tests_old/assets/fixtures/requirements.txt rename to tests/pytest_tests/unit_tests_old/assets/fixtures/requirements.txt diff --git a/tests/unit_tests_old/assets/fixtures/train.py b/tests/pytest_tests/unit_tests_old/assets/fixtures/train.py similarity index 100% rename from tests/unit_tests_old/assets/fixtures/train.py rename to tests/pytest_tests/unit_tests_old/assets/fixtures/train.py diff --git a/tests/unit_tests_old/assets/notebooks/code_saving.ipynb b/tests/pytest_tests/unit_tests_old/assets/notebooks/code_saving.ipynb similarity index 100% rename from tests/unit_tests_old/assets/notebooks/code_saving.ipynb rename to tests/pytest_tests/unit_tests_old/assets/notebooks/code_saving.ipynb diff --git a/tests/unit_tests_old/assets/notebooks/ipython_exit.py b/tests/pytest_tests/unit_tests_old/assets/notebooks/ipython_exit.py similarity index 100% rename from tests/unit_tests_old/assets/notebooks/ipython_exit.py rename to tests/pytest_tests/unit_tests_old/assets/notebooks/ipython_exit.py diff --git a/tests/unit_tests_old/assets/notebooks/login_timeout.ipynb b/tests/pytest_tests/unit_tests_old/assets/notebooks/login_timeout.ipynb similarity index 100% rename from tests/unit_tests_old/assets/notebooks/login_timeout.ipynb rename to tests/pytest_tests/unit_tests_old/assets/notebooks/login_timeout.ipynb diff --git a/tests/unit_tests_old/assets/notebooks/magic.ipynb b/tests/pytest_tests/unit_tests_old/assets/notebooks/magic.ipynb similarity index 100% rename from tests/unit_tests_old/assets/notebooks/magic.ipynb rename to tests/pytest_tests/unit_tests_old/assets/notebooks/magic.ipynb diff --git a/tests/unit_tests_old/assets/notebooks/one_cell.ipynb b/tests/pytest_tests/unit_tests_old/assets/notebooks/one_cell.ipynb similarity index 100% rename from tests/unit_tests_old/assets/notebooks/one_cell.ipynb rename to tests/pytest_tests/unit_tests_old/assets/notebooks/one_cell.ipynb diff --git a/tests/unit_tests_old/assets/notebooks/setup.ipynb b/tests/pytest_tests/unit_tests_old/assets/notebooks/setup.ipynb similarity index 100% rename from tests/unit_tests_old/assets/notebooks/setup.ipynb rename to tests/pytest_tests/unit_tests_old/assets/notebooks/setup.ipynb diff --git a/tests/unit_tests_old/conftest.py b/tests/pytest_tests/unit_tests_old/conftest.py similarity index 100% rename from tests/unit_tests_old/conftest.py rename to tests/pytest_tests/unit_tests_old/conftest.py diff --git a/tests/unit_tests_old/internal_update_test.py b/tests/pytest_tests/unit_tests_old/internal_update_test.py similarity index 100% rename from tests/unit_tests_old/internal_update_test.py rename to tests/pytest_tests/unit_tests_old/internal_update_test.py diff --git a/tests/unit_tests_old/logs/cleanup.sh b/tests/pytest_tests/unit_tests_old/logs/cleanup.sh similarity index 100% rename from tests/unit_tests_old/logs/cleanup.sh rename to tests/pytest_tests/unit_tests_old/logs/cleanup.sh diff --git a/tests/unit_tests_old/test_cli.py b/tests/pytest_tests/unit_tests_old/test_cli.py similarity index 100% rename from tests/unit_tests_old/test_cli.py rename to tests/pytest_tests/unit_tests_old/test_cli.py diff --git a/tests/unit_tests_old/test_data_types.py b/tests/pytest_tests/unit_tests_old/test_data_types.py similarity index 100% rename from tests/unit_tests_old/test_data_types.py rename to tests/pytest_tests/unit_tests_old/test_data_types.py diff --git a/tests/unit_tests_old/test_file_stream.py b/tests/pytest_tests/unit_tests_old/test_file_stream.py similarity index 100% rename from tests/unit_tests_old/test_file_stream.py rename to tests/pytest_tests/unit_tests_old/test_file_stream.py diff --git a/tests/unit_tests_old/test_file_upload.py b/tests/pytest_tests/unit_tests_old/test_file_upload.py similarity index 100% rename from tests/unit_tests_old/test_file_upload.py rename to tests/pytest_tests/unit_tests_old/test_file_upload.py diff --git a/tests/unit_tests_old/test_footer.py b/tests/pytest_tests/unit_tests_old/test_footer.py similarity index 100% rename from tests/unit_tests_old/test_footer.py rename to tests/pytest_tests/unit_tests_old/test_footer.py diff --git a/tests/unit_tests_old/test_internal_api.py b/tests/pytest_tests/unit_tests_old/test_internal_api.py similarity index 100% rename from tests/unit_tests_old/test_internal_api.py rename to tests/pytest_tests/unit_tests_old/test_internal_api.py diff --git a/tests/unit_tests_old/test_keras.py b/tests/pytest_tests/unit_tests_old/test_keras.py similarity index 100% rename from tests/unit_tests_old/test_keras.py rename to tests/pytest_tests/unit_tests_old/test_keras.py diff --git a/tests/unit_tests_old/test_logging.py b/tests/pytest_tests/unit_tests_old/test_logging.py similarity index 100% rename from tests/unit_tests_old/test_logging.py rename to tests/pytest_tests/unit_tests_old/test_logging.py diff --git a/tests/unit_tests_old/test_metric_internal.py b/tests/pytest_tests/unit_tests_old/test_metric_internal.py similarity index 100% rename from tests/unit_tests_old/test_metric_internal.py rename to tests/pytest_tests/unit_tests_old/test_metric_internal.py diff --git a/tests/unit_tests_old/test_offline_sync.py b/tests/pytest_tests/unit_tests_old/test_offline_sync.py similarity index 98% rename from tests/unit_tests_old/test_offline_sync.py rename to tests/pytest_tests/unit_tests_old/test_offline_sync.py index aabfbfe8808..ed7124dee5b 100644 --- a/tests/unit_tests_old/test_offline_sync.py +++ b/tests/pytest_tests/unit_tests_old/test_offline_sync.py @@ -6,7 +6,7 @@ import pytest -from tests.unit_tests_old import utils +from tests.pytest_tests.unit_tests_old import utils @pytest.mark.flaky diff --git a/tests/unit_tests_old/test_public_api.py b/tests/pytest_tests/unit_tests_old/test_public_api.py similarity index 99% rename from tests/unit_tests_old/test_public_api.py rename to tests/pytest_tests/unit_tests_old/test_public_api.py index 4b6382ad1c7..27fd19eecf7 100644 --- a/tests/unit_tests_old/test_public_api.py +++ b/tests/pytest_tests/unit_tests_old/test_public_api.py @@ -14,7 +14,7 @@ import wandb from wandb.sdk.lib import filesystem -from tests.unit_tests_old import utils +from tests.pytest_tests.unit_tests_old import utils def test_from_path(mock_server, api): diff --git a/tests/unit_tests_old/test_runtime.py b/tests/pytest_tests/unit_tests_old/test_runtime.py similarity index 100% rename from tests/unit_tests_old/test_runtime.py rename to tests/pytest_tests/unit_tests_old/test_runtime.py diff --git a/tests/unit_tests_old/test_sender.py b/tests/pytest_tests/unit_tests_old/test_sender.py similarity index 99% rename from tests/unit_tests_old/test_sender.py rename to tests/pytest_tests/unit_tests_old/test_sender.py index e8eea9176a9..b72d209e0c7 100644 --- a/tests/unit_tests_old/test_sender.py +++ b/tests/pytest_tests/unit_tests_old/test_sender.py @@ -6,7 +6,7 @@ import wandb from wandb.sdk.lib.printer import INFO -from tests.unit_tests_old import utils +from tests.pytest_tests.unit_tests_old import utils def test_send_status_request_stopped(mock_server, backend_interface): diff --git a/tests/unit_tests_old/test_summary.py b/tests/pytest_tests/unit_tests_old/test_summary.py similarity index 100% rename from tests/unit_tests_old/test_summary.py rename to tests/pytest_tests/unit_tests_old/test_summary.py diff --git a/tests/unit_tests_old/test_tb_watcher.py b/tests/pytest_tests/unit_tests_old/test_tb_watcher.py similarity index 100% rename from tests/unit_tests_old/test_tb_watcher.py rename to tests/pytest_tests/unit_tests_old/test_tb_watcher.py diff --git a/tests/unit_tests_old/test_time_resolution.py b/tests/pytest_tests/unit_tests_old/test_time_resolution.py similarity index 100% rename from tests/unit_tests_old/test_time_resolution.py rename to tests/pytest_tests/unit_tests_old/test_time_resolution.py diff --git a/tests/unit_tests_old/test_wandb.py b/tests/pytest_tests/unit_tests_old/test_wandb.py similarity index 100% rename from tests/unit_tests_old/test_wandb.py rename to tests/pytest_tests/unit_tests_old/test_wandb.py diff --git a/tests/unit_tests_old/test_wandb_agent.py b/tests/pytest_tests/unit_tests_old/test_wandb_agent.py similarity index 100% rename from tests/unit_tests_old/test_wandb_agent.py rename to tests/pytest_tests/unit_tests_old/test_wandb_agent.py diff --git a/tests/unit_tests_old/test_wandb_artifacts.py b/tests/pytest_tests/unit_tests_old/test_wandb_artifacts.py similarity index 100% rename from tests/unit_tests_old/test_wandb_artifacts.py rename to tests/pytest_tests/unit_tests_old/test_wandb_artifacts.py diff --git a/tests/unit_tests_old/test_wandb_controller.py b/tests/pytest_tests/unit_tests_old/test_wandb_controller.py similarity index 100% rename from tests/unit_tests_old/test_wandb_controller.py rename to tests/pytest_tests/unit_tests_old/test_wandb_controller.py diff --git a/tests/unit_tests_old/test_wandb_integration.py b/tests/pytest_tests/unit_tests_old/test_wandb_integration.py similarity index 98% rename from tests/unit_tests_old/test_wandb_integration.py rename to tests/pytest_tests/unit_tests_old/test_wandb_integration.py index a50a8249593..f363d55f5ce 100644 --- a/tests/unit_tests_old/test_wandb_integration.py +++ b/tests/pytest_tests/unit_tests_old/test_wandb_integration.py @@ -15,7 +15,7 @@ import pytest import wandb -from tests.unit_tests_old import utils +from tests.pytest_tests.unit_tests_old import utils reloadFn = importlib.reload @@ -67,7 +67,7 @@ def test_parallel_runs(runner, live_mock_server, test_settings, test_name): run_files_sorted = sorted( [ "config.yaml", - f"code/tests/unit_tests_old/logs/{test_name}/train.py", + f"code/tests/pytest_tests/unit_tests_old/logs/{test_name}/train.py", "requirements.txt", "wandb-metadata.json", "wandb-summary.json", diff --git a/tests/unit_tests_old/test_wandb_run.py b/tests/pytest_tests/unit_tests_old/test_wandb_run.py similarity index 100% rename from tests/unit_tests_old/test_wandb_run.py rename to tests/pytest_tests/unit_tests_old/test_wandb_run.py diff --git a/tests/pytest_tests/unit_tests_old/tests_launch/__init__.py b/tests/pytest_tests/unit_tests_old/tests_launch/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/unit_tests_old/tests_launch/test_kaniko_build.py b/tests/pytest_tests/unit_tests_old/tests_launch/test_kaniko_build.py similarity index 99% rename from tests/unit_tests_old/tests_launch/test_kaniko_build.py rename to tests/pytest_tests/unit_tests_old/tests_launch/test_kaniko_build.py index cea4638a78c..5fe81f48575 100644 --- a/tests/unit_tests_old/tests_launch/test_kaniko_build.py +++ b/tests/pytest_tests/unit_tests_old/tests_launch/test_kaniko_build.py @@ -18,7 +18,7 @@ _wait_for_completion, ) -from tests.unit_tests_old.utils import fixture_open +from tests.pytest_tests.unit_tests_old.utils import fixture_open from .test_launch import mocked_fetchable_git_repo # noqa: F401 diff --git a/tests/unit_tests_old/tests_launch/test_launch.py b/tests/pytest_tests/unit_tests_old/tests_launch/test_launch.py similarity index 99% rename from tests/unit_tests_old/tests_launch/test_launch.py rename to tests/pytest_tests/unit_tests_old/tests_launch/test_launch.py index 497c9c8a67c..9bbe8372fa3 100644 --- a/tests/unit_tests_old/tests_launch/test_launch.py +++ b/tests/pytest_tests/unit_tests_old/tests_launch/test_launch.py @@ -22,7 +22,7 @@ ) from wandb.sdk.lib import runid -from tests.unit_tests_old.utils import fixture_open, notebook_path +from tests.pytest_tests.unit_tests_old.utils import fixture_open, notebook_path EMPTY_BACKEND_CONFIG = { PROJECT_DOCKER_ARGS: {}, diff --git a/tests/unit_tests_old/tests_launch/test_launch_aws.py b/tests/pytest_tests/unit_tests_old/tests_launch/test_launch_aws.py similarity index 99% rename from tests/unit_tests_old/tests_launch/test_launch_aws.py rename to tests/pytest_tests/unit_tests_old/tests_launch/test_launch_aws.py index e97866fda25..b8e4c52ba22 100644 --- a/tests/unit_tests_old/tests_launch/test_launch_aws.py +++ b/tests/pytest_tests/unit_tests_old/tests_launch/test_launch_aws.py @@ -18,7 +18,7 @@ validate_sagemaker_requirements, ) -from tests.unit_tests_old.utils import fixture_open +from tests.pytest_tests.unit_tests_old.utils import fixture_open from .test_launch import mocked_fetchable_git_repo # noqa: F401 diff --git a/tests/unit_tests_old/tests_launch/test_launch_cli.py b/tests/pytest_tests/unit_tests_old/tests_launch/test_launch_cli.py similarity index 100% rename from tests/unit_tests_old/tests_launch/test_launch_cli.py rename to tests/pytest_tests/unit_tests_old/tests_launch/test_launch_cli.py diff --git a/tests/unit_tests_old/tests_launch/test_launch_docker.py b/tests/pytest_tests/unit_tests_old/tests_launch/test_launch_docker.py similarity index 100% rename from tests/unit_tests_old/tests_launch/test_launch_docker.py rename to tests/pytest_tests/unit_tests_old/tests_launch/test_launch_docker.py diff --git a/tests/unit_tests_old/tests_launch/test_launch_gcp.py b/tests/pytest_tests/unit_tests_old/tests_launch/test_launch_gcp.py similarity index 100% rename from tests/unit_tests_old/tests_launch/test_launch_gcp.py rename to tests/pytest_tests/unit_tests_old/tests_launch/test_launch_gcp.py diff --git a/tests/unit_tests_old/tests_launch/test_launch_jobs.py b/tests/pytest_tests/unit_tests_old/tests_launch/test_launch_jobs.py similarity index 99% rename from tests/unit_tests_old/tests_launch/test_launch_jobs.py rename to tests/pytest_tests/unit_tests_old/tests_launch/test_launch_jobs.py index d403d770f6d..b346d9ae593 100644 --- a/tests/unit_tests_old/tests_launch/test_launch_jobs.py +++ b/tests/pytest_tests/unit_tests_old/tests_launch/test_launch_jobs.py @@ -11,7 +11,7 @@ from wandb.sdk.data_types._dtypes import TypeRegistry from wandb.sdk.launch.launch_add import launch_add -from tests.unit_tests_old import utils +from tests.pytest_tests.unit_tests_old import utils from .test_launch import ( EMPTY_BACKEND_CONFIG, diff --git a/tests/unit_tests_old/tests_launch/test_launch_kubernetes.py b/tests/pytest_tests/unit_tests_old/tests_launch/test_launch_kubernetes.py similarity index 100% rename from tests/unit_tests_old/tests_launch/test_launch_kubernetes.py rename to tests/pytest_tests/unit_tests_old/tests_launch/test_launch_kubernetes.py diff --git a/tests/unit_tests_old/tests_launch/test_launch_local_process.py b/tests/pytest_tests/unit_tests_old/tests_launch/test_launch_local_process.py similarity index 100% rename from tests/unit_tests_old/tests_launch/test_launch_local_process.py rename to tests/pytest_tests/unit_tests_old/tests_launch/test_launch_local_process.py diff --git a/tests/unit_tests_old/tests_launch/test_launch_utils.py b/tests/pytest_tests/unit_tests_old/tests_launch/test_launch_utils.py similarity index 100% rename from tests/unit_tests_old/tests_launch/test_launch_utils.py rename to tests/pytest_tests/unit_tests_old/tests_launch/test_launch_utils.py diff --git a/tests/unit_tests_old/tests_s_nb/test_notebooks.py b/tests/pytest_tests/unit_tests_old/tests_s_nb/test_notebooks.py similarity index 99% rename from tests/unit_tests_old/tests_s_nb/test_notebooks.py rename to tests/pytest_tests/unit_tests_old/tests_s_nb/test_notebooks.py index 9f016676116..f1e9181f3cc 100644 --- a/tests/unit_tests_old/tests_s_nb/test_notebooks.py +++ b/tests/pytest_tests/unit_tests_old/tests_s_nb/test_notebooks.py @@ -8,7 +8,7 @@ import wandb from wandb.errors import UsageError -from tests.unit_tests_old import utils +from tests.pytest_tests.unit_tests_old import utils def test_login_timeout(notebook, monkeypatch): diff --git a/tests/unit_tests_old/utils/__init__.py b/tests/pytest_tests/unit_tests_old/utils/__init__.py similarity index 100% rename from tests/unit_tests_old/utils/__init__.py rename to tests/pytest_tests/unit_tests_old/utils/__init__.py diff --git a/tests/unit_tests_old/utils/artifact_emu.py b/tests/pytest_tests/unit_tests_old/utils/artifact_emu.py similarity index 100% rename from tests/unit_tests_old/utils/artifact_emu.py rename to tests/pytest_tests/unit_tests_old/utils/artifact_emu.py diff --git a/tests/unit_tests_old/utils/dummy_data.py b/tests/pytest_tests/unit_tests_old/utils/dummy_data.py similarity index 100% rename from tests/unit_tests_old/utils/dummy_data.py rename to tests/pytest_tests/unit_tests_old/utils/dummy_data.py diff --git a/tests/unit_tests_old/utils/mock_requests.py b/tests/pytest_tests/unit_tests_old/utils/mock_requests.py similarity index 100% rename from tests/unit_tests_old/utils/mock_requests.py rename to tests/pytest_tests/unit_tests_old/utils/mock_requests.py diff --git a/tests/unit_tests_old/utils/mock_server.py b/tests/pytest_tests/unit_tests_old/utils/mock_server.py similarity index 100% rename from tests/unit_tests_old/utils/mock_server.py rename to tests/pytest_tests/unit_tests_old/utils/mock_server.py diff --git a/tests/unit_tests_old/utils/notebook_client.py b/tests/pytest_tests/unit_tests_old/utils/notebook_client.py similarity index 100% rename from tests/unit_tests_old/utils/notebook_client.py rename to tests/pytest_tests/unit_tests_old/utils/notebook_client.py diff --git a/tests/unit_tests_old/utils/parse_metrics.py b/tests/pytest_tests/unit_tests_old/utils/parse_metrics.py similarity index 100% rename from tests/unit_tests_old/utils/parse_metrics.py rename to tests/pytest_tests/unit_tests_old/utils/parse_metrics.py diff --git a/tests/unit_tests_old/utils/records.py b/tests/pytest_tests/unit_tests_old/utils/records.py similarity index 100% rename from tests/unit_tests_old/utils/records.py rename to tests/pytest_tests/unit_tests_old/utils/records.py diff --git a/tests/unit_tests_old/utils/utils.py b/tests/pytest_tests/unit_tests_old/utils/utils.py similarity index 100% rename from tests/unit_tests_old/utils/utils.py rename to tests/pytest_tests/unit_tests_old/utils/utils.py diff --git a/tests/unit_tests/assets/launch_k8s_config.yaml b/tests/unit_tests/assets/launch_k8s_config.yaml deleted file mode 100644 index dffc92cd9c7..00000000000 --- a/tests/unit_tests/assets/launch_k8s_config.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: v1 - -clusters: -- name: test-cluster - cluster: {} - -contexts: -- context: - cluster: test-cluster - user: test-user - name: active-context - -current-context: active-context - -users: -- name: test-user - user: {} diff --git a/tests/unit_tests/test_cli.py b/tests/unit_tests/test_cli.py deleted file mode 100644 index 880b8ebca08..00000000000 --- a/tests/unit_tests/test_cli.py +++ /dev/null @@ -1,200 +0,0 @@ -import datetime -import netrc -import os -import traceback - -import pytest -import wandb -from wandb.apis.internal import InternalApi -from wandb.cli import cli - - -@pytest.fixture -def empty_netrc(monkeypatch): - class FakeNet: - @property - def hosts(self): - return {"api.wandb.ai": None} - - monkeypatch.setattr(netrc, "netrc", lambda *args: FakeNet()) - - -@pytest.mark.skip(reason="Currently dont have on in cling") -def test_enable_on(runner, git_repo): - with runner.isolated_filesystem(): - with open("wandb/settings", "w") as f: - f.write("[default]\nproject=rad") - result = runner.invoke(cli.on) - print(result.output) - print(result.exception) - print(traceback.print_tb(result.exc_info[2])) - assert "W&B enabled" in str(result.output) - assert result.exit_code == 0 - - -@pytest.mark.skip(reason="Currently dont have off in cling") -def test_enable_off(runner, git_repo): - with runner.isolated_filesystem(): - with open("wandb/settings", "w") as f: - f.write("[default]\nproject=rad") - result = runner.invoke(cli.off) - print(result.output) - print(result.exception) - print(traceback.print_tb(result.exc_info[2])) - assert "W&B disabled" in str(result.output) - assert "disabled" in open("wandb/settings").read() - assert result.exit_code == 0 - - -def test_no_project_bad_command(runner): - with runner.isolated_filesystem(): - result = runner.invoke(cli.cli, ["fsd"]) - print(result.output) - print(result.exception) - print(traceback.print_tb(result.exc_info[2])) - assert "No such command" in result.output - assert result.exit_code == 2 - - -def test_login_key_arg(runner, dummy_api_key): - with runner.isolated_filesystem(): - # If the test was run from a directory containing .wandb, then __stage_dir__ - # was '.wandb' when imported by api.py, reload to fix. UGH! - # reload(wandb) - result = runner.invoke(cli.login, [dummy_api_key]) - print("Output: ", result.output) - print("Exception: ", result.exception) - print("Traceback: ", traceback.print_tb(result.exc_info[2])) - assert result.exit_code == 0 - with open("netrc") as f: - generated_netrc = f.read() - assert dummy_api_key in generated_netrc - - -def test_login_host_trailing_slash_fix_invalid(runner, dummy_api_key, local_settings): - with runner.isolated_filesystem(): - with open("netrc", "w") as f: - f.write(f"machine \n login user\npassword {dummy_api_key}") - result = runner.invoke( - cli.login, ["--host", "https://google.com/", dummy_api_key] - ) - assert result.exit_code == 0 - with open("netrc") as f: - generated_netrc = f.read() - assert generated_netrc == ( - "machine google.com\n" - " login user\n" - " password {}\n".format(dummy_api_key) - ) - - -@pytest.mark.parametrize( - "host, error", - [ - ("https://app.wandb.ai", "did you mean https://api.wandb.ai"), - ("ftp://google.com", "URL must start with `http(s)://`"), - ], -) -def test_login_bad_host(runner, host, error, local_settings): - with runner.isolated_filesystem(): - result = runner.invoke(cli.login, ["--host", host]) - assert error in result.output - assert result.exit_code != 0 - - -def test_login_onprem_key_arg(runner, dummy_api_key): - with runner.isolated_filesystem(): - onprem_key = "test-" + dummy_api_key - # with runner.isolated_filesystem(): - result = runner.invoke(cli.login, [onprem_key]) - print("Output: ", result.output) - print("Exception: ", result.exception) - print("Traceback: ", traceback.print_tb(result.exc_info[2])) - assert result.exit_code == 0 - with open("netrc") as f: - generated_netrc = f.read() - assert onprem_key in generated_netrc - - -def test_login_invalid_key_arg(runner, dummy_api_key): - with runner.isolated_filesystem(): - invalid_key = "test--" + dummy_api_key - result = runner.invoke(cli.login, [invalid_key]) - assert "API key must be 40 characters long, yours was" in str(result) - assert result.exit_code == 1 - - -@pytest.mark.skip(reason="Just need to make the mocking work correctly") -def test_login_anonymously(runner, dummy_api_key, monkeypatch, empty_netrc): - with runner.isolated_filesystem(): - api = InternalApi() - monkeypatch.setattr(cli, "api", api) - monkeypatch.setattr( - wandb.sdk.internal.internal_api.Api, - "create_anonymous_api_key", - lambda *args, **kwargs: dummy_api_key, - ) - result = runner.invoke(cli.login, ["--anonymously"]) - print("Output: ", result.output) - print("Exception: ", result.exception) - print("Traceback: ", traceback.print_tb(result.exc_info[2])) - assert result.exit_code == 0 - with open("netrc") as f: - generated_netrc = f.read() - assert dummy_api_key in generated_netrc - - -def test_sync_gc(runner): - with runner.isolated_filesystem(): - if not os.path.isdir("wandb"): - os.mkdir("wandb") - d1 = datetime.datetime.now() - d2 = d1 - datetime.timedelta(hours=3) - run1 = d1.strftime("run-%Y%m%d_%H%M%S-abcd") - run2 = d2.strftime("run-%Y%m%d_%H%M%S-efgh") - run1_dir = os.path.join("wandb", run1) - run2_dir = os.path.join("wandb", run2) - os.mkdir(run1_dir) - with open(os.path.join(run1_dir, "run-abcd.wandb"), "w") as f: - f.write("") - with open(os.path.join(run1_dir, "run-abcd.wandb.synced"), "w") as f: - f.write("") - os.mkdir(run2_dir) - with open(os.path.join(run2_dir, "run-efgh.wandb"), "w") as f: - f.write("") - with open(os.path.join(run2_dir, "run-efgh.wandb.synced"), "w") as f: - f.write("") - assert ( - runner.invoke( - cli.sync, ["--clean", "--clean-old-hours", "2"], input="y\n" - ).exit_code - ) == 0 - - assert os.path.exists(run1_dir) - assert not os.path.exists(run2_dir) - assert ( - runner.invoke( - cli.sync, ["--clean", "--clean-old-hours", "0"], input="y\n" - ).exit_code - == 0 - ) - assert not os.path.exists(run1_dir) - - -def test_cli_login_reprompts_when_no_key_specified(runner, mocker, dummy_api_key): - with runner.isolated_filesystem(): - mocker.patch("wandb.wandb_lib.apikey.getpass", input) - # this first gives login an empty API key, which should cause - # it to re-prompt. this is what we are testing. we then give - # it a valid API key (the dummy API key with a different final - # letter to check that our monkeypatch input is working as - # expected) to terminate the prompt finally we grep for the - # Error: No API key specified to assert that the re-prompt - # happened - result = runner.invoke(cli.login, input=f"\n{dummy_api_key[:-1]}q\n") - print(f"DEBUG(login) out = {result.output}") - print(f"DEBUG(login) exc = {result.exception}") - print(f"DEBUG(login) tb = {traceback.print_tb(result.exc_info[2])}") - with open("netrc") as f: - print(f.read()) - assert "ERROR No API key specified." in result.output diff --git a/tests/unit_tests/test_public_api.py b/tests/unit_tests/test_public_api.py deleted file mode 100644 index 10c7e841cc2..00000000000 --- a/tests/unit_tests/test_public_api.py +++ /dev/null @@ -1,323 +0,0 @@ -"""Tests for the `wandb.apis.PublicApi` module.""" - - -from unittest import mock - -import pytest -import wandb -import wandb.apis.public -import wandb.util -from wandb import Api -from wandb.sdk.lib import runid - -from .test_wandb_sweep import ( - SWEEP_CONFIG_BAYES, - SWEEP_CONFIG_GRID, - SWEEP_CONFIG_GRID_NESTED, - SWEEP_CONFIG_RANDOM, - VALID_SWEEP_CONFIGS_MINIMAL, -) - - -def test_api_auto_login_no_tty(): - with pytest.raises(wandb.UsageError): - Api() - - -@pytest.mark.usefixtures("patch_apikey", "patch_prompt") -def test_base_url_sanitization(): - with mock.patch.object(wandb, "login", mock.MagicMock()): - api = Api({"base_url": "https://wandb.corp.net///"}) - assert api.settings["base_url"] == "https://wandb.corp.net" - - -@pytest.mark.parametrize( - "path", - [ - "user/proj/run", # simple - "/user/proj/run", # leading slash - "user/proj:run", # docker - "user/proj/runs/run", # path_url - ], -) -@pytest.mark.usefixtures("patch_apikey", "patch_prompt") -def test_parse_path(path): - with mock.patch.object(wandb, "login", mock.MagicMock()): - user, project, run = Api()._parse_path(path) - assert user == "user" - assert project == "proj" - assert run == "run" - - -@pytest.mark.usefixtures("patch_apikey", "patch_prompt") -def test_parse_project_path(): - with mock.patch.object(wandb, "login", mock.MagicMock()): - enitty, project = Api()._parse_project_path("user/proj") - assert enitty == "user" - assert project == "proj" - - -@pytest.mark.usefixtures("patch_apikey", "patch_prompt") -def test_parse_project_path_proj(): - with mock.patch.dict("os.environ", {"WANDB_ENTITY": "mock_entity"}): - entity, project = Api()._parse_project_path("proj") - assert entity == "mock_entity" - assert project == "proj" - - -@pytest.mark.usefixtures("patch_apikey", "patch_prompt") -def test_parse_path_docker_proj(): - with mock.patch.dict("os.environ", {"WANDB_ENTITY": "mock_entity"}): - user, project, run = Api()._parse_path("proj:run") - assert user == "mock_entity" - assert project == "proj" - assert run == "run" - - -@pytest.mark.usefixtures("patch_apikey", "patch_prompt") -def test_parse_path_user_proj(): - with mock.patch.dict("os.environ", {"WANDB_ENTITY": "mock_entity"}): - user, project, run = Api()._parse_path("proj/run") - assert user == "mock_entity" - assert project == "proj" - assert run == "run" - - -@pytest.mark.usefixtures("patch_apikey", "patch_prompt") -def test_parse_path_proj(): - with mock.patch.dict("os.environ", {"WANDB_ENTITY": "mock_entity"}): - user, project, run = Api()._parse_path("proj") - assert user == "mock_entity" - assert project == "proj" - assert run == "proj" - - -@pytest.mark.usefixtures("patch_apikey", "patch_prompt") -def test_direct_specification_of_api_key(): - # test_settings has a different API key - api = Api(api_key="abcd" * 10) - assert api.api_key == "abcd" * 10 - - -@pytest.mark.parametrize( - "path", - [ - "test", - "test/test", - ], -) -@pytest.mark.usefixtures("patch_apikey", "patch_prompt") -def test_from_path_project_type(path): - with mock.patch.object(wandb, "login", mock.MagicMock()): - project = Api().from_path(path) - assert isinstance(project, wandb.apis.public.Project) - - -@pytest.mark.parametrize( - "path", - [ - "test/test/test/test", - "test/test/test/test/test", - ], -) -def test_from_path_bad_path(user, path): - with pytest.raises(wandb.Error, match="Invalid path"): - Api().from_path(path) - - -def test_from_path_bad_report_path(user): - with pytest.raises(wandb.Error, match="Invalid report path"): - Api().from_path("test/test/reports/test-foo") - - -@pytest.mark.parametrize( - "path", - [ - "test/test/reports/XYZ", - "test/test/reports/Name-foo--XYZ", - ], -) -def test_from_path_report_type(user, path): - report = Api().from_path(path) - assert isinstance(report, wandb.apis.public.BetaReport) - - -def test_project_to_html(user): - with mock.patch.dict("os.environ", {"WANDB_ENTITY": "mock_entity"}): - project = Api().from_path("test") - assert "mock_entity/test/workspace?jupyter=true" in project.to_html() - - -@pytest.mark.usefixtures("patch_apikey", "patch_prompt") -def test_report_to_html(): - path = "test/test/reports/My-Report--XYZ" - report = Api().from_path(path) - report_html = report.to_html(hidden=True) - assert "test/test/reports/My-Report--XYZ" in report_html - assert "" - - -@pytest.mark.parametrize( - "sweep_config,expected_run_count", - [ - (SWEEP_CONFIG_GRID, 3), - (SWEEP_CONFIG_GRID_NESTED, 9), - (SWEEP_CONFIG_BAYES, None), - (SWEEP_CONFIG_RANDOM, None), - ], - ids=["test grid", "test grid nested", "test bayes", "test random"], -) -def test_sweep_api_expected_run_count( - user, relay_server, sweep_config, expected_run_count -): - _project = "test" - with relay_server() as relay: - sweep_id = wandb.sweep(sweep_config, entity=user, project=_project) - - for comm in relay.context.raw_data: - q = comm["request"].get("query") - print(q) - - print(f"sweep_id{sweep_id}") - sweep = Api().sweep(f"{user}/{_project}/sweeps/{sweep_id}") - - assert sweep.expected_run_count == expected_run_count - - -def test_update_aliases_on_artifact(user, relay_server, wandb_init): - project = "test" - run = wandb_init(entity=user, project=project) - artifact = wandb.Artifact("test-artifact", "test-type") - with open("boom.txt", "w") as f: - f.write("testing") - artifact.add_file("boom.txt", "test-name") - art = run.log_artifact(artifact, aliases=["sequence"]) - run.link_artifact(art, f"{user}/{project}/my-sample-portfolio") - artifact.wait() - run.finish() - - # fetch artifact under original parent sequence - artifact = Api().artifact( - name=f"{user}/{project}/test-artifact:v0", type="test-type" - ) - aliases = artifact.aliases - assert "sequence" in aliases - - # fetch artifact under portfolio - # and change aliases under portfolio only - artifact = Api().artifact( - name=f"{user}/{project}/my-sample-portfolio:v0", type="test-type" - ) - aliases = artifact.aliases - assert "sequence" not in aliases - artifact.aliases = ["portfolio"] - artifact.aliases.append("boom") - artifact.save() - - artifact = Api().artifact( - name=f"{user}/{project}/my-sample-portfolio:v0", type="test-type" - ) - aliases = artifact.aliases - assert "portfolio" in aliases - assert "boom" in aliases - assert "sequence" not in aliases - - -def test_artifact_version(wandb_init): - def create_test_artifact(content: str): - art = wandb.Artifact("test-artifact", "test-type") - with open("boom.txt", "w") as f: - f.write(content) - art.add_file("boom.txt", "test-name") - return art - - # Create an artifact sequence + portfolio (auto-created if it doesn't exist) - project = "test" - run = wandb_init(project=project) - - art = create_test_artifact("aaaaa") - run.log_artifact(art, aliases=["a"]) - art.wait() - - art = create_test_artifact("bbbb") - run.log_artifact(art, aliases=["b"]) - run.link_artifact(art, f"{project}/my-sample-portfolio") - art.wait() - run.finish() - - # Pull down from portfolio, verify version is indexed from portfolio not sequence - artifact = Api().artifact( - name=f"{project}/my-sample-portfolio:latest", type="test-type" - ) - - assert artifact.version == "v0" - assert artifact.source_version == "v1" diff --git a/tox.ini b/tox.ini index 3b3d8a6eff7..f31080883a0 100644 --- a/tox.ini +++ b/tox.ini @@ -6,7 +6,7 @@ envlist= mypy, flake8, docstrings, - py{36,37,38,39,launch,launch38}, + py{36,37,38,39,launch}, func-s_{base,sklearn,metaflow,tf115,tf21,tf24,tf25,tf26,ray112,ray2,service,docs, imports{1,2,3,4,5,6,7,8,9,10,11,12},noml,grpc}-py37, standalone-{cpu,gpu,tpu,local}-py38, @@ -41,19 +41,19 @@ deps = bokeh # TODO: remove after conftest.py refactor nbclient # TODO: remove after conftest.py refactor -[testenv:py{36,37,38,39,310,launch,launch38}] +[testenv:py{36,37,38,39,310,launch}] deps = {[unitbase]deps} -r{toxinidir}/requirements_dev.txt install_command = - py{36,37,38,39,launch,launch38}: pip install --timeout 600 --extra-index-url https://download.pytorch.org/whl/cpu {opts} {packages} + py{36,37,38,39,launch}: pip install --timeout 600 --extra-index-url https://download.pytorch.org/whl/cpu {opts} {packages} passenv = USERNAME CI_PYTEST_SPLIT_ARGS CI_PYTEST_PARALLEL CI setenv = - py{36,37,38,39,310,launch,launch38}: COVERAGE_FILE={envdir}/.coverage + py{36,37,38,39,310,launch}: COVERAGE_FILE={envdir}/.coverage py{37,39}: WINDIR=C:\\Windows # Pytorch installations on non-darwin need the `-f` whitelist_externals = @@ -61,9 +61,8 @@ whitelist_externals = bash commands = mkdir -p test-results - py{36,37,38,39,310}: python -m pytest {env:CI_PYTEST_SPLIT_ARGS:} -n={env:CI_PYTEST_PARALLEL:10} --durations=20 --reruns 3 --reruns-delay 1 --junitxml=test-results/junit.xml --cov-config=.coveragerc --cov --cov-report= --no-cov-on-fail {posargs:tests/} - pylaunch: python -m pytest {env:CI_PYTEST_SPLIT_ARGS:} -n={env:CI_PYTEST_PARALLEL:4} --durations=20 --reruns 3 --reruns-delay 1 --junitxml=test-results/junit.xml --cov-config=.coveragerc --cov --cov-report= --no-cov-on-fail {posargs:tests/unit_tests_old/tests_launch/} - pylaunch38: python -m pytest {env:CI_PYTEST_SPLIT_ARGS:} -n={env:CI_PYTEST_PARALLEL:4} --durations=20 --reruns 3 --reruns-delay 1 --junitxml=test-results/junit.xml --cov-config=.coveragerc --cov --cov-report= --no-cov-on-fail {posargs:tests/unit_tests/tests_launch/} + py{36,37,38,39,310}: python -m pytest {env:CI_PYTEST_SPLIT_ARGS:} -n={env:CI_PYTEST_PARALLEL:10} --durations=20 --reruns 3 --reruns-delay 1 --junitxml=test-results/junit.xml --cov-config=.coveragerc --cov --cov-report= --no-cov-on-fail {posargs:tests/pytest_tests} + pylaunch: python -m pytest {env:CI_PYTEST_SPLIT_ARGS:} -n={env:CI_PYTEST_PARALLEL:4} --durations=20 --reruns 3 --reruns-delay 1 --junitxml=test-results/junit.xml --cov-config=.coveragerc --cov --cov-report= --no-cov-on-fail {posargs:tests/pytest_tests/unit_tests_old/tests_launch/} [testenv:unit-s_nb-py{36,37,38,39,310}] deps = @@ -74,7 +73,7 @@ deps = whitelist_externals = mkdir setenv = - py{36,37,38,39,310,launch,launch38}: COVERAGE_FILE={envdir}/.coverage + py{36,37,38,39,310,launch}: COVERAGE_FILE={envdir}/.coverage py{37}: WINDIR=C:\\Windows s_nb: WB_UNIT_SHARD=s_nb passenv = @@ -84,7 +83,7 @@ passenv = commands = s_nb: ipython kernel install --user --name=wandb_python mkdir -p test-results - python -m pytest {env:CI_PYTEST_SPLIT_ARGS:} -n={env:CI_PYTEST_PARALLEL:{env:WB_UNIT_PARALLEL:4}} --durations=20 --reruns 3 --reruns-delay 1 --junitxml=test-results/junit.xml --cov-config=.coveragerc --cov --cov-report= --no-cov-on-fail --timeout 300 {posargs:tests/unit_tests_old/tests_{env:WB_UNIT_SHARD}/} + python -m pytest {env:CI_PYTEST_SPLIT_ARGS:} -n={env:CI_PYTEST_PARALLEL:{env:WB_UNIT_PARALLEL:4}} --durations=20 --reruns 3 --reruns-delay 1 --junitxml=test-results/junit.xml --cov-config=.coveragerc --cov --cov-report= --no-cov-on-fail --timeout 300 {posargs:tests/pytest_tests/unit_tests_old/tests_{env:WB_UNIT_SHARD}/} [testenv:dev] usedevelop = true