From d44041f8414a248fd6418ba207eea7fc84e9cf98 Mon Sep 17 00:00:00 2001 From: Yihuang Yu Date: Thu, 22 Aug 2024 15:00:20 +0800 Subject: [PATCH] rfe: Introduce ruff-pre-commit Ruff is a fast, extensible Python linter designed to catch common issues in Python code, such as syntax errors, stylistic inconsistencies, and potential bugs. Signed-off-by: Yihuang Yu --- .ci/cfg-lint-check.py | 32 +- .pre-commit-config.yaml | 6 + .ruff.toml | 38 + deps/input_event/input_event_linux.py | 1318 +++++---- deps/input_event/input_event_win.py | 568 ++-- generic/tests/arm_kvm_unit_tests.py | 30 +- generic/tests/autotest_control.py | 29 +- generic/tests/autotest_distro_detect.py | 5 +- generic/tests/autotest_regression.py | 192 +- generic/tests/avocado_guest.py | 31 +- generic/tests/boot.py | 17 +- generic/tests/boot_savevm.py | 43 +- generic/tests/build.py | 11 +- generic/tests/clock_getres.py | 16 +- generic/tests/dd_test.py | 136 +- generic/tests/downgrade_qcow2_version.py | 10 +- generic/tests/ethtool.py | 125 +- generic/tests/fillup_disk.py | 5 +- generic/tests/guest_suspend.py | 22 +- generic/tests/guest_test.py | 35 +- generic/tests/hwclock.py | 13 +- generic/tests/invalid_para_mq.py | 24 +- generic/tests/iofuzz.py | 40 +- generic/tests/iometer_windows.py | 53 +- generic/tests/ioquit.py | 12 +- generic/tests/iozone_windows.py | 78 +- generic/tests/jumbo.py | 155 +- generic/tests/kdump.py | 160 +- generic/tests/ksm_services.py | 32 +- generic/tests/linux_stress.py | 21 +- generic/tests/lvm.py | 86 +- generic/tests/mac_change.py | 82 +- generic/tests/module_probe.py | 19 +- generic/tests/monotonic_time.py | 41 +- generic/tests/multi_queues_test.py | 71 +- generic/tests/multicast.py | 32 +- generic/tests/netperf.py | 587 ++-- generic/tests/netstress_kill_guest.py | 85 +- generic/tests/nfs_corrupt.py | 115 +- generic/tests/nic_promisc.py | 17 +- generic/tests/nicdriver_unload.py | 90 +- generic/tests/ntpd.py | 125 +- generic/tests/ntttcp.py | 94 +- generic/tests/os_update.py | 2 +- generic/tests/ping.py | 93 +- generic/tests/pktgen_perf.py | 62 +- generic/tests/pxe_boot.py | 16 +- generic/tests/rtc.py | 17 +- generic/tests/save_restore.py | 44 +- generic/tests/shutdown.py | 26 +- generic/tests/stress_boot.py | 25 +- generic/tests/syzkaller.py | 75 +- generic/tests/trans_hugepage.py | 70 +- generic/tests/trans_hugepage_defrag.py | 17 +- generic/tests/trans_hugepage_memory_stress.py | 30 +- generic/tests/trans_hugepage_relocated.py | 10 +- generic/tests/trans_hugepage_swapping.py | 51 +- generic/tests/tsc.py | 17 +- generic/tests/unattended_install.py | 5 +- generic/tests/vlan.py | 169 +- generic/tests/whql_client_install.py | 68 +- generic/tests/whql_env_setup.py | 78 +- generic/tests/whql_hck_client_install.py | 41 +- generic/tests/whql_submission.py | 159 +- .../tests/migration_multi_host.py | 66 +- .../migration_multi_host_auto_converge.py | 217 +- .../tests/migration_multi_host_cancel.py | 32 +- ...migration_multi_host_downtime_and_speed.py | 136 +- .../migration_multi_host_firewall_block.py | 172 +- .../migration_multi_host_helper_tests.py | 55 +- .../tests/migration_multi_host_ping_pong.py | 162 +- .../tests/migration_multi_host_timedrift.py | 86 +- ...migration_multi_host_with_file_transfer.py | 144 +- .../tests/migration_multi_host_with_kdump.py | 124 +- .../tests/migration_multi_host_with_reboot.py | 31 +- ...ation_multi_host_with_speed_measurement.py | 89 +- .../tests/migration_multi_host_with_stress.py | 53 +- .../tests/migration_multi_host_with_xbzrle.py | 164 +- openvswitch/tests/load_module.py | 14 +- openvswitch/tests/ovs_basic.py | 157 +- provider/ansible.py | 104 +- provider/backup_utils.py | 252 +- provider/block_devices_plug.py | 272 +- provider/block_dirty_bitmap.py | 117 +- provider/blockdev_backup_base.py | 76 +- provider/blockdev_backup_parallel.py | 6 +- provider/blockdev_base.py | 58 +- provider/blockdev_commit_base.py | 45 +- provider/blockdev_full_backup_base.py | 5 +- provider/blockdev_full_backup_parallel.py | 3 +- provider/blockdev_live_backup_base.py | 40 +- provider/blockdev_mirror_base.py | 46 +- provider/blockdev_mirror_nowait.py | 18 +- provider/blockdev_mirror_parallel.py | 17 +- provider/blockdev_mirror_wait.py | 12 +- provider/blockdev_snapshot_base.py | 35 +- provider/blockdev_stream_base.py | 26 +- provider/blockdev_stream_nowait.py | 12 +- provider/blockdev_stream_parallel.py | 15 +- provider/cdrom.py | 71 +- provider/chardev_utils.py | 54 +- provider/cpu_utils.py | 84 +- provider/cpuflags.py | 7 +- provider/dpdk_utils.py | 78 +- provider/hostdev/__init__.py | 11 +- provider/hostdev/dev_setup.py | 2 +- provider/hostdev/utils.py | 4 +- provider/in_place_upgrade_base.py | 39 +- provider/input_event_proxy.py | 680 +++-- provider/input_tests.py | 156 +- provider/job_utils.py | 76 +- provider/message_queuing.py | 94 +- provider/nbd_image_export.py | 205 +- provider/netperf.py | 49 +- provider/netperf_base.py | 57 +- provider/netperf_test.py | 50 +- provider/pktgen_utils.py | 217 +- provider/qemu_img_utils.py | 23 +- provider/qsd.py | 224 +- provider/sgx.py | 97 +- provider/slof.py | 90 +- provider/storage_benchmark.py | 274 +- provider/thp_fragment_tool.py | 11 +- provider/throttle_utils.py | 136 +- provider/vdpa_sim_utils.py | 65 +- provider/vioinput_basic.py | 38 +- provider/virt_storage/backend/base.py | 19 +- provider/virt_storage/backend/directory.py | 9 +- provider/virt_storage/backend/rbd.py | 20 +- provider/virt_storage/exception.py | 6 +- provider/virt_storage/helper/fscli.py | 13 +- provider/virt_storage/helper/rbdcli.py | 11 +- provider/virt_storage/storage_admin.py | 28 +- provider/virt_storage/storage_volume.py | 66 +- provider/virt_storage/utils/state.py | 33 +- provider/virt_storage/utils/storage_util.py | 12 +- provider/virt_storage/virt_auth.py | 9 +- provider/virt_storage/virt_device.py | 9 +- provider/virt_storage/virt_encryption.py | 9 +- provider/virt_storage/virt_secret.py | 5 +- provider/virt_storage/virt_source.py | 9 +- provider/virt_storage/virt_target.py | 5 +- provider/virtio_fs_utils.py | 311 +- provider/virtio_mem_utils.py | 4 +- provider/win_dev.py | 9 +- provider/win_driver_installer_test.py | 216 +- provider/win_driver_utils.py | 272 +- provider/win_dump_utils.py | 69 +- provider/win_hlk_suite.py | 263 +- provider/win_wora.py | 26 +- qemu/deps/cdrom/tray_open.py | 9 +- qemu/deps/performance/start_testpmd.py | 76 +- qemu/deps/softlockup/heartbeat_slu.py | 81 +- qemu/deps/spice/build_install.py | 161 +- qemu/deps/spice/key_event_form.py | 5 +- .../win_driver_install/win_driver_install.py | 143 +- .../win_serial/VirtIoChannel_guest_recieve.py | 33 +- qemu/deps/win_serial/serial-host-send.py | 16 +- qemu/deps/win_serial/windows_support.py | 61 +- qemu/deps/windows_ga_install/get_package.py | 87 +- qemu/tests/9p.py | 16 +- qemu/tests/aio_test.py | 38 +- qemu/tests/ansible_test.py | 35 +- .../ansible_with_responsive_migration.py | 68 +- qemu/tests/apicv_test.py | 47 +- qemu/tests/arm_cpu_test_clusters.py | 26 +- qemu/tests/arm_gic.py | 24 +- qemu/tests/audio.py | 7 +- qemu/tests/avic_test.py | 1 + qemu/tests/balloon_boot_in_pause.py | 94 +- qemu/tests/balloon_check.py | 246 +- qemu/tests/balloon_disable.py | 9 +- qemu/tests/balloon_hotplug.py | 79 +- qemu/tests/balloon_memhp.py | 30 +- qemu/tests/balloon_minimum.py | 11 +- qemu/tests/balloon_sc_interrogate.py | 21 +- qemu/tests/balloon_service.py | 52 +- qemu/tests/balloon_stop_continue.py | 4 +- qemu/tests/balloon_stress.py | 32 +- qemu/tests/balloon_thp.py | 26 +- qemu/tests/balloon_uniqueness.py | 37 +- qemu/tests/bitmap_boundary_test.py | 22 +- qemu/tests/bitmaps_merge_with_nospace.py | 17 +- qemu/tests/blk_commit.py | 18 +- qemu/tests/blk_stream.py | 25 +- qemu/tests/block_aio_io_uring.py | 20 +- .../tests/block_auto_detect_size_increased.py | 58 +- qemu/tests/block_boot_multi_disks.py | 53 +- qemu/tests/block_boot_under_low_speed.py | 32 +- qemu/tests/block_check_event.py | 10 +- qemu/tests/block_check_fds.py | 23 +- qemu/tests/block_check_max_tranfer_length.py | 30 +- qemu/tests/block_check_memory_leak.py | 27 +- qemu/tests/block_check_serial.py | 17 +- qemu/tests/block_commit_reboot.py | 15 +- qemu/tests/block_commit_stress.py | 34 +- qemu/tests/block_copy.py | 112 +- qemu/tests/block_detect_zeroes.py | 38 +- qemu/tests/block_discard.py | 61 +- qemu/tests/block_discard_hotplug.py | 18 +- qemu/tests/block_discard_write_same.py | 25 +- qemu/tests/block_during_io.py | 129 +- qemu/tests/block_hotplug.py | 156 +- qemu/tests/block_hotplug_in_pause.py | 142 +- qemu/tests/block_hotplug_negative.py | 14 +- qemu/tests/block_hotplug_passthrough.py | 41 +- qemu/tests/block_hotplug_scsi_hba.py | 47 +- qemu/tests/block_hotplug_with_cpu_hotplug.py | 30 +- qemu/tests/block_io_with_unaligned_offset.py | 3 +- qemu/tests/block_iothread_test.py | 58 +- qemu/tests/block_iscsi_4kdisk.py | 18 +- qemu/tests/block_iscsi_fault_disk.py | 79 +- .../block_iscsi_format_large_size_disk.py | 51 +- qemu/tests/block_iscsi_lvm.py | 34 +- .../block_iscsi_with_specical_max_sectors.py | 49 +- ...lock_kill_reconnect_with_remote_storage.py | 70 +- qemu/tests/block_libblkio_release.py | 4 +- qemu/tests/block_lvm_read_only.py | 11 +- qemu/tests/block_multifunction.py | 125 +- qemu/tests/block_multifunction_scale.py | 40 +- qemu/tests/block_performance_test.py | 198 +- qemu/tests/block_repeat_blockdev_add.py | 6 +- qemu/tests/block_resize.py | 194 +- qemu/tests/block_resize_unplug.py | 90 +- qemu/tests/block_scsi_device.py | 51 +- qemu/tests/block_scsi_generic_inquiry.py | 43 +- qemu/tests/block_stream.py | 32 +- qemu/tests/block_stream_check_backingfile.py | 30 +- qemu/tests/block_stream_drop_backingfile.py | 61 +- qemu/tests/block_stream_installation.py | 10 +- qemu/tests/block_stream_negative.py | 22 +- qemu/tests/block_stream_reboot.py | 7 +- qemu/tests/block_stream_simple.py | 5 +- qemu/tests/block_stream_stress.py | 22 +- qemu/tests/block_transfer_parameters_check.py | 44 +- qemu/tests/block_vhost_vdpa_test.py | 39 +- qemu/tests/block_with_iommu.py | 35 +- qemu/tests/block_with_share_rw.py | 29 +- qemu/tests/block_with_write_threshold.py | 66 +- qemu/tests/blockdev_commit.py | 15 +- qemu/tests/blockdev_commit_auto_readonly.py | 4 +- qemu/tests/blockdev_commit_backing_file.py | 15 +- qemu/tests/blockdev_commit_cor.py | 4 +- .../tests/blockdev_commit_filter_node_name.py | 17 +- qemu/tests/blockdev_commit_fio.py | 13 +- qemu/tests/blockdev_commit_firewall.py | 39 +- .../blockdev_commit_forbidden_actions.py | 29 +- .../blockdev_commit_general_operation.py | 29 +- qemu/tests/blockdev_commit_hotunplug.py | 17 +- qemu/tests/blockdev_commit_install.py | 10 +- .../tests/blockdev_commit_non_existed_node.py | 11 +- qemu/tests/blockdev_commit_powerdown.py | 4 +- ...blockdev_commit_query_named_block_nodes.py | 5 +- qemu/tests/blockdev_commit_reboot.py | 4 +- qemu/tests/blockdev_commit_server_down.py | 50 +- qemu/tests/blockdev_commit_specify_node.py | 14 +- qemu/tests/blockdev_commit_speed_limit.py | 22 +- qemu/tests/blockdev_commit_standby.py | 12 +- qemu/tests/blockdev_commit_stop_cont.py | 5 +- qemu/tests/blockdev_commit_stress.py | 3 +- qemu/tests/blockdev_commit_throttle.py | 2 - qemu/tests/blockdev_commit_to_nospace.py | 7 +- qemu/tests/blockdev_commit_top.py | 2 - qemu/tests/blockdev_commit_with_ignore.py | 22 +- qemu/tests/blockdev_commit_with_ioerror.py | 27 +- qemu/tests/blockdev_commit_with_stop.py | 13 +- ...lockdev_full_backup_invalid_max_workers.py | 6 +- .../blockdev_full_backup_invalid_sync_mode.py | 4 +- .../tests/blockdev_full_backup_multi_disks.py | 10 +- .../blockdev_full_backup_nonexist_target.py | 11 +- qemu/tests/blockdev_full_backup_reboot.py | 1 - qemu/tests/blockdev_full_backup_stress.py | 8 +- .../tests/blockdev_full_backup_with_bitmap.py | 7 +- qemu/tests/blockdev_full_backup_x_perf.py | 12 +- qemu/tests/blockdev_full_mirror.py | 29 +- .../blockdev_inc_backup_add_bitmap_to_raw.py | 15 +- ...blockdev_inc_backup_add_disabled_bitmap.py | 22 +- ...ackup_add_persistent_bitmap_when_paused.py | 28 +- .../tests/blockdev_inc_backup_after_commit.py | 54 +- .../tests/blockdev_inc_backup_bitmap_inuse.py | 36 +- ...blockdev_inc_backup_bitmap_max_len_name.py | 62 +- .../blockdev_inc_backup_bitmap_mode_test.py | 53 +- .../blockdev_inc_backup_bitmap_not_exist.py | 18 +- qemu/tests/blockdev_inc_backup_bitmap_size.py | 32 +- ...ckdev_inc_backup_bitmap_vm_crash_reboot.py | 38 +- ...kdev_inc_backup_bitmap_with_granularity.py | 35 +- ...blockdev_inc_backup_bitmap_with_hotplug.py | 38 +- .../tests/blockdev_inc_backup_clear_bitmap.py | 46 +- ...blockdev_inc_backup_convert_with_bitmap.py | 43 +- .../blockdev_inc_backup_disable_bitmap.py | 50 +- ...v_inc_backup_disabled_persistent_bitmap.py | 55 +- .../blockdev_inc_backup_enable_bitmap.py | 37 +- ...lockdev_inc_backup_expose_active_bitmap.py | 54 +- qemu/tests/blockdev_inc_backup_filternode.py | 44 +- qemu/tests/blockdev_inc_backup_inc_success.py | 47 +- ...blockdev_inc_backup_inconsistent_bitmap.py | 80 +- ...kup_merge_bitmaps_with_diff_granularity.py | 52 +- ...ckdev_inc_backup_merge_external_bitmaps.py | 48 +- ...dev_inc_backup_merge_to_nonexist_bitmap.py | 31 +- ...v_inc_backup_merge_with_nonexist_bitmap.py | 38 +- ...ckdev_inc_backup_migrate_without_bitmap.py | 63 +- ...blockdev_inc_backup_mod_readonly_bitmap.py | 38 +- ...ckdev_inc_backup_modify_backing_bitmaps.py | 32 +- .../tests/blockdev_inc_backup_never_always.py | 38 +- qemu/tests/blockdev_inc_backup_no_bitmap.py | 23 +- qemu/tests/blockdev_inc_backup_no_space.py | 27 +- ...ockdev_inc_backup_non_persistent_bitmap.py | 41 +- ...dev_inc_backup_nospace_with_bitmap_mode.py | 47 +- .../blockdev_inc_backup_pull_mode_diff.py | 206 +- .../blockdev_inc_backup_pull_mode_test.py | 136 +- .../blockdev_inc_backup_pull_mode_vm_down.py | 63 +- ...blockdev_inc_backup_pull_mode_vm_reboot.py | 64 +- .../blockdev_inc_backup_remove_bitmap.py | 28 +- qemu/tests/blockdev_inc_backup_resize.py | 67 +- ...lockdev_inc_backup_rm_persistent_bitmap.py | 70 +- ...lockdev_inc_backup_sync_bitmap_nobitmap.py | 26 +- .../blockdev_inc_backup_target_not_exist.py | 27 +- qemu/tests/blockdev_inc_backup_test.py | 72 +- .../blockdev_inc_backup_with_complete_mode.py | 60 +- .../blockdev_inc_backup_with_guest_agent.py | 55 +- qemu/tests/blockdev_inc_backup_with_ignore.py | 60 +- .../blockdev_inc_backup_with_migration.py | 72 +- .../blockdev_inc_backup_without_bitmap.py | 20 +- .../blockdev_inc_backup_without_bitmapmode.py | 31 +- ...lockdev_inc_backup_xpt_allocation_depth.py | 42 +- qemu/tests/blockdev_inc_backup_xpt_bitmap.py | 55 +- .../blockdev_inc_backup_xpt_incon_bitmap.py | 51 +- ...lockdev_inc_backup_xpt_multiple_bitmaps.py | 67 +- ...blockdev_inc_backup_xpt_nonexist_bitmap.py | 64 +- .../blockdev_mirror_after_block_error.py | 8 +- ...lockdev_mirror_cancel_ready_job_with_io.py | 31 +- ...ev_mirror_cancel_ready_job_with_ioerror.py | 70 +- .../blockdev_mirror_cancel_running_job.py | 14 +- .../blockdev_mirror_complete_running_job.py | 14 +- qemu/tests/blockdev_mirror_error.py | 3 +- qemu/tests/blockdev_mirror_filternode.py | 20 +- qemu/tests/blockdev_mirror_firewall.py | 49 +- .../blockdev_mirror_forbidden_actions.py | 31 +- qemu/tests/blockdev_mirror_hotunplug.py | 17 +- qemu/tests/blockdev_mirror_install.py | 37 +- qemu/tests/blockdev_mirror_multiple_blocks.py | 1 + qemu/tests/blockdev_mirror_no_space.py | 20 +- .../tests/blockdev_mirror_qemuio_ready_job.py | 10 +- qemu/tests/blockdev_mirror_qemuio_target.py | 6 +- qemu/tests/blockdev_mirror_readonly.py | 4 +- qemu/tests/blockdev_mirror_ready_vm_down.py | 51 +- .../blockdev_mirror_remote_server_down.py | 42 +- qemu/tests/blockdev_mirror_same_src_tgt.py | 12 +- qemu/tests/blockdev_mirror_simple.py | 19 +- qemu/tests/blockdev_mirror_speed.py | 46 +- qemu/tests/blockdev_mirror_src_no_space.py | 12 +- qemu/tests/blockdev_mirror_stress.py | 21 +- qemu/tests/blockdev_mirror_sync_none.py | 18 +- qemu/tests/blockdev_mirror_sync_top.py | 30 +- qemu/tests/blockdev_mirror_to_rbd.py | 1 + qemu/tests/blockdev_mirror_vm_stop_cont.py | 4 +- qemu/tests/blockdev_mirror_with_ignore.py | 17 +- qemu/tests/blockdev_snapshot_chains.py | 35 +- qemu/tests/blockdev_snapshot_data_file.py | 16 +- qemu/tests/blockdev_snapshot_guest_agent.py | 36 +- qemu/tests/blockdev_snapshot_install.py | 13 +- qemu/tests/blockdev_snapshot_merge.py | 20 +- qemu/tests/blockdev_snapshot_multi_disks.py | 72 +- qemu/tests/blockdev_snapshot_readonly.py | 14 +- qemu/tests/blockdev_snapshot_reboot.py | 17 +- qemu/tests/blockdev_snapshot_stop_cont.py | 14 +- qemu/tests/blockdev_snapshot_stress.py | 17 +- qemu/tests/blockdev_stream_backing_file.py | 26 +- qemu/tests/blockdev_stream_base_itself.py | 24 +- qemu/tests/blockdev_stream_cor_base.py | 47 +- qemu/tests/blockdev_stream_dirty_bitmap.py | 17 +- qemu/tests/blockdev_stream_filter_nodename.py | 35 +- .../blockdev_stream_forbidden_actions.py | 36 +- qemu/tests/blockdev_stream_general.py | 39 +- qemu/tests/blockdev_stream_hotunplug.py | 29 +- qemu/tests/blockdev_stream_install.py | 34 +- qemu/tests/blockdev_stream_multiple_blocks.py | 16 +- qemu/tests/blockdev_stream_no_backing.py | 15 +- qemu/tests/blockdev_stream_no_space.py | 18 +- .../blockdev_stream_none_existed_overlay.py | 7 +- qemu/tests/blockdev_stream_on_error_ignore.py | 47 +- qemu/tests/blockdev_stream_powerdown.py | 15 +- .../blockdev_stream_remote_server_down.py | 43 +- qemu/tests/blockdev_stream_speed.py | 58 +- qemu/tests/blockdev_stream_stress.py | 2 +- qemu/tests/blockdev_stream_subchain.py | 81 +- qemu/tests/blockdev_stream_to_invalid_node.py | 13 +- qemu/tests/blockdev_stream_vm_reboot.py | 2 +- qemu/tests/blockdev_stream_vm_stop_cont.py | 6 +- qemu/tests/blockdev_stream_with_ioerror.py | 24 +- qemu/tests/boot_N_M_virtserialports.py | 32 +- qemu/tests/boot_cpu_model.py | 24 +- qemu/tests/boot_e1000e_with_cpu_flag.py | 10 +- qemu/tests/boot_from_device.py | 21 +- qemu/tests/boot_from_nbd_image.py | 26 +- qemu/tests/boot_from_remote.py | 84 +- qemu/tests/boot_from_virtiofs.py | 29 +- qemu/tests/boot_nbdimage_with_qsd.py | 24 +- qemu/tests/boot_nic_with_iommu.py | 13 +- qemu/tests/boot_order_check.py | 61 +- qemu/tests/boot_time.py | 24 +- qemu/tests/boot_with_different_vectors.py | 60 +- qemu/tests/boot_with_disable_ept.py | 7 +- qemu/tests/boot_with_machine_types.py | 34 +- qemu/tests/boot_with_multiqueue.py | 53 +- qemu/tests/boot_with_remote_readonly_image.py | 23 +- qemu/tests/boot_without_vectors.py | 25 +- qemu/tests/bridge_mirror.py | 57 +- qemu/tests/bridge_qinq.py | 226 +- qemu/tests/bridge_vlan.py | 157 +- qemu/tests/cache_sizes_test.py | 12 +- qemu/tests/cdrom.py | 558 ++-- qemu/tests/cdrom_block_size_check.py | 106 +- qemu/tests/ceph_image_mem_leak.py | 29 +- qemu/tests/cgroup.py | 1108 ++++---- qemu/tests/change_media.py | 84 +- qemu/tests/chardev_acpi.py | 18 +- qemu/tests/chardev_free_port.py | 41 +- qemu/tests/chardev_hotplug.py | 48 +- qemu/tests/chardev_legacy_unplug.py | 16 +- qemu/tests/chardev_remove_pending_watches.py | 22 +- qemu/tests/chardev_serial_login.py | 68 +- qemu/tests/chardev_tls_encryption.py | 58 +- qemu/tests/check_basepage_size.py | 29 +- qemu/tests/check_block_size.py | 29 +- qemu/tests/check_coredump.py | 59 +- qemu/tests/check_cvq_event.py | 11 +- qemu/tests/check_link_speed_duplex.py | 74 +- qemu/tests/check_nic_link_status.py | 46 +- qemu/tests/check_reports_end_offset.py | 10 +- qemu/tests/check_roms.py | 10 +- qemu/tests/check_unhalt_vcpu.py | 6 +- qemu/tests/client_guest_shutdown.py | 16 +- qemu/tests/cluster_size_check.py | 33 +- .../tests/commit_snapshot_to_backing_image.py | 35 +- qemu/tests/commit_snapshot_to_raw_backing.py | 45 +- qemu/tests/commit_with_backing.py | 59 +- qemu/tests/commit_with_bitmaps_nospace.py | 6 +- qemu/tests/convert_after_resize_snapshot.py | 48 +- qemu/tests/convert_image_from_raw.py | 44 +- qemu/tests/convert_to_virtual_disk.py | 16 +- qemu/tests/cpu_add.py | 120 +- qemu/tests/cpu_device_hotplug.py | 66 +- qemu/tests/cpu_device_hotplug_during_boot.py | 47 +- qemu/tests/cpu_device_hotplug_maximum.py | 69 +- qemu/tests/cpu_device_hotplug_time_jump.py | 16 +- qemu/tests/cpu_device_hotpluggable.py | 125 +- .../cpu_device_hotpluggable_with_numa.py | 78 +- .../cpu_device_hotpluggable_with_stress.py | 108 +- qemu/tests/cpu_hotplug.py | 63 +- qemu/tests/cpu_info_check.py | 128 +- qemu/tests/cpu_model_inter_generation.py | 35 +- qemu/tests/cpu_model_negative.py | 69 +- qemu/tests/cpu_offline_online.py | 18 +- qemu/tests/cpu_rdrand.py | 32 +- qemu/tests/cpu_topology_details_test.py | 60 +- qemu/tests/cpu_topology_test.py | 71 +- qemu/tests/cpuflags.py | 630 ++--- qemu/tests/cpuid.py | 308 +- qemu/tests/cpuinfo_query.py | 10 +- qemu/tests/create_large_img.py | 5 +- qemu/tests/create_macvtap_device.py | 76 +- qemu/tests/create_snapshot_on_running_base.py | 37 +- qemu/tests/ctrl_vlan.py | 76 +- qemu/tests/curl_cookie_with_secret.py | 50 +- qemu/tests/cyginstall.py | 6 +- qemu/tests/device_bit_check.py | 53 +- qemu/tests/device_option_check.py | 66 +- qemu/tests/differential_backup.py | 91 +- qemu/tests/disable_win_update.py | 18 +- qemu/tests/discard_for_raw_block_target.py | 45 +- qemu/tests/disk_extension.py | 75 +- qemu/tests/disk_extension_lvm.py | 31 +- qemu/tests/drive_mirror.py | 77 +- qemu/tests/drive_mirror_cancel.py | 7 +- qemu/tests/drive_mirror_complete.py | 5 +- qemu/tests/drive_mirror_installation.py | 10 +- qemu/tests/drive_mirror_powerdown.py | 10 +- qemu/tests/drive_mirror_simple.py | 15 +- qemu/tests/drive_mirror_stress.py | 26 +- qemu/tests/driver_in_use.py | 80 +- qemu/tests/driver_load.py | 30 +- qemu/tests/dump_guest_core.py | 44 +- qemu/tests/dump_guest_memory.py | 32 +- qemu/tests/edk2_basic.py | 8 +- qemu/tests/edk2_stability_test.py | 8 +- qemu/tests/eject_media.py | 49 +- qemu/tests/emulate_vf_reboot.py | 7 +- qemu/tests/emulate_vf_shutdown.py | 10 +- qemu/tests/enable_scatter_windows.py | 88 +- qemu/tests/enforce_quit.py | 12 +- qemu/tests/enospc.py | 96 +- qemu/tests/ept_test.py | 4 +- qemu/tests/expose_host_mtu.py | 55 +- qemu/tests/file_copy_stress.py | 13 +- qemu/tests/fio_linux.py | 30 +- qemu/tests/fio_perf.py | 221 +- qemu/tests/fio_windows.py | 24 +- qemu/tests/flag_check.py | 64 +- qemu/tests/floppy.py | 281 +- qemu/tests/flow_caches_stress_test.py | 93 +- qemu/tests/format_disk.py | 112 +- qemu/tests/fullscreen_setup.py | 24 +- qemu/tests/fwcfg.py | 48 +- qemu/tests/getfd.py | 17 +- qemu/tests/gluster_boot_snap_boot.py | 21 +- qemu/tests/gluster_create_images.py | 3 +- qemu/tests/guest_iommu_group.py | 19 +- qemu/tests/guest_iommu_test.py | 76 +- qemu/tests/hdparm.py | 67 +- qemu/tests/hello_world.py | 21 +- qemu/tests/hotplug_block_resize.py | 108 +- qemu/tests/hotplug_mem.py | 48 +- qemu/tests/hotplug_mem_migration.py | 29 +- qemu/tests/hotplug_mem_negative.py | 47 +- qemu/tests/hotplug_mem_repeat.py | 34 +- qemu/tests/hotplug_mem_reserve.py | 39 +- qemu/tests/hotplug_mem_share_discard_data.py | 45 +- qemu/tests/hotplug_mem_simple.py | 7 +- qemu/tests/hotplug_mem_stress_ng.py | 7 +- .../hotplug_port_chardev_pci_with_console.py | 18 +- qemu/tests/hotplug_unplug_during_io_repeat.py | 61 +- qemu/tests/hotplug_virtio_mem.py | 14 +- qemu/tests/hpt_huge_page_negative.py | 13 +- qemu/tests/hpt_max_page_size.py | 21 +- qemu/tests/hpt_miscellaneous.py | 44 +- qemu/tests/hugepage_mem_stress.py | 52 +- qemu/tests/hugepage_reset.py | 90 +- qemu/tests/hugepage_specify_node.py | 77 +- qemu/tests/hv_avic.py | 21 +- qemu/tests/hv_check_cpu_utilization.py | 53 +- qemu/tests/hv_crash.py | 4 +- qemu/tests/hv_enforce_cpuid_msr_check.py | 32 +- qemu/tests/hv_enforce_cpuid_smoke.py | 20 +- qemu/tests/hv_flag_cpuid_check.py | 28 +- qemu/tests/hv_info_check.py | 25 +- qemu/tests/hv_kvm_unit_test.py | 15 +- qemu/tests/hv_time.py | 34 +- qemu/tests/hv_tlbflush.py | 57 +- qemu/tests/hv_type.py | 6 +- qemu/tests/hv_vapic_test.py | 24 +- qemu/tests/image_commit_bypass_host_cache.py | 24 +- qemu/tests/image_compare_bypass_host_cache.py | 15 +- qemu/tests/image_convert_bypass_host_cache.py | 53 +- qemu/tests/image_create_with_large_size.py | 5 +- qemu/tests/image_create_with_preallocation.py | 26 +- qemu/tests/image_creation_lock_release.py | 13 +- ...mage_creation_luks_with_non_utf8_secret.py | 25 +- qemu/tests/image_locking_read_test.py | 45 +- qemu/tests/image_rebase_bypass_host_cache.py | 35 +- qemu/tests/in_place_upgrade.py | 28 +- qemu/tests/in_place_upgrade_legacy.py | 32 +- qemu/tests/insert_media.py | 29 +- qemu/tests/interrupt_check.py | 58 +- qemu/tests/invalid_cpu_device_hotplug.py | 70 +- qemu/tests/invalid_parameter.py | 5 +- qemu/tests/ioeventfd.py | 148 +- qemu/tests/iozone_linux.py | 51 +- qemu/tests/iperf_test.py | 105 +- qemu/tests/ipi_x2apic.py | 28 +- qemu/tests/kdump_with_stress.py | 37 +- qemu/tests/kernbench.py | 46 +- qemu/tests/kernel_install.py | 87 +- qemu/tests/kexec.py | 11 +- qemu/tests/kill_app.py | 13 +- qemu/tests/ksm_base.py | 93 +- qemu/tests/ksm_ksmtuned.py | 106 +- qemu/tests/ksm_overcommit.py | 269 +- qemu/tests/kvm_stat.py | 66 +- qemu/tests/kvm_unit_test.py | 115 +- qemu/tests/kvm_unit_test_nested.py | 3 +- .../larger_buffer_with_none_cache_mode.py | 35 +- qemu/tests/libvirt_host_model_test.py | 46 +- qemu/tests/live_backup.py | 1 + qemu/tests/live_backup_add_bitmap.py | 14 +- qemu/tests/live_backup_base.py | 92 +- qemu/tests/live_snapshot.py | 21 +- qemu/tests/live_snapshot_base.py | 33 +- qemu/tests/live_snapshot_basic.py | 36 +- qemu/tests/live_snapshot_chain.py | 71 +- qemu/tests/live_snapshot_integrity.py | 4 +- qemu/tests/live_snapshot_negative.py | 44 +- qemu/tests/live_snapshot_runtime.py | 10 +- qemu/tests/live_snapshot_simple.py | 1 + qemu/tests/live_snapshot_stress.py | 19 +- qemu/tests/live_snapshot_transaction.py | 26 +- qemu/tests/luks_convert.py | 25 +- qemu/tests/luks_image_over_qsd.py | 94 +- qemu/tests/macvtap_event_notification.py | 45 +- qemu/tests/macvtap_guest_communicate.py | 70 +- qemu/tests/max_channel_lun.py | 59 +- qemu/tests/memhp_threads.py | 30 +- qemu/tests/memory_leak_after_nichotplug.py | 55 +- qemu/tests/microcode_test.py | 1 + qemu/tests/migration.py | 135 +- qemu/tests/migration_after_nichotplug.py | 69 +- qemu/tests/migration_after_vm_paused.py | 102 +- .../migration_virtio_mem_ignore_shared.py | 75 +- qemu/tests/migration_with_block.py | 288 +- qemu/tests/migration_with_dst_problem.py | 337 +-- qemu/tests/migration_with_file_transfer.py | 60 +- qemu/tests/migration_with_json_backing.py | 69 +- qemu/tests/migration_with_netperf.py | 51 +- qemu/tests/migration_with_numa.py | 82 +- qemu/tests/migration_with_reboot.py | 29 +- qemu/tests/migration_with_sgx.py | 6 +- .../tests/migration_with_speed_measurement.py | 65 +- qemu/tests/migration_with_vsock.py | 46 +- qemu/tests/mlock_basic.py | 69 +- qemu/tests/mmu_basic.py | 19 +- qemu/tests/monitor_cmds_check.py | 11 +- qemu/tests/mq_change_qnum.py | 97 +- qemu/tests/mq_enabled_chk.py | 30 +- qemu/tests/msi_change_flag.py | 91 +- qemu/tests/multi_disk.py | 204 +- qemu/tests/multi_disk_random_hotplug.py | 278 +- qemu/tests/multi_disk_wild_hotplug.py | 41 +- qemu/tests/multi_macvtap_devices.py | 47 +- qemu/tests/multi_nics_stress.py | 212 +- qemu/tests/multi_nics_verify.py | 46 +- qemu/tests/multi_vms_file_transfer.py | 113 +- qemu/tests/multi_vms_nics.py | 201 +- qemu/tests/multi_vms_with_stress.py | 74 +- qemu/tests/nbd_long_export_name.py | 48 +- qemu/tests/nbd_map_snapshots.py | 25 +- qemu/tests/nbd_unix_connection.py | 8 +- qemu/tests/negative_create.py | 8 +- qemu/tests/nested_block_resize.py | 50 +- qemu/tests/nested_block_resize_l1.py | 22 +- qemu/tests/nested_hyperv_on_kvm.py | 38 +- qemu/tests/nested_interactive_agent.py | 26 +- qemu/tests/nested_libguestfs_unittest.py | 63 +- qemu/tests/nested_system_reset.py | 3 +- qemu/tests/nested_test.py | 60 +- qemu/tests/nested_vsock_con_sockets.py | 67 +- qemu/tests/nested_vsock_con_sockets_l1.py | 38 +- qemu/tests/netkvm_change_param_value_test.py | 29 +- qemu/tests/netkvm_cpu_mapping.py | 11 +- qemu/tests/netkvm_in_use.py | 40 +- qemu/tests/netkvm_protocol_binding.py | 50 +- qemu/tests/netkvm_rss_test.py | 12 +- qemu/tests/netperf_stress.py | 226 +- qemu/tests/netperf_udp.py | 94 +- qemu/tests/netperf_udp_perf.py | 182 +- qemu/tests/netuser_buffer_test.py | 30 +- qemu/tests/nfs_perf.py | 111 +- qemu/tests/nic_acpi_index.py | 43 +- qemu/tests/nic_acpi_index_boot.py | 10 +- qemu/tests/nic_bonding.py | 34 +- qemu/tests/nic_bonding_host.py | 66 +- qemu/tests/nic_hotplug.py | 109 +- qemu/tests/nic_opt.py | 106 +- qemu/tests/nic_teaming.py | 95 +- qemu/tests/nmi_bsod_catch.py | 39 +- qemu/tests/nmi_watchdog.py | 29 +- qemu/tests/nonexist_vcpu_hotplug.py | 37 +- qemu/tests/numa_basic.py | 38 +- qemu/tests/numa_consistency.py | 47 +- qemu/tests/numa_cpu.py | 141 +- qemu/tests/numa_dist.py | 14 +- qemu/tests/numa_hmat.py | 34 +- qemu/tests/numa_maxnodes.py | 24 +- qemu/tests/numa_memdev_mlock.py | 15 +- qemu/tests/numa_memdev_options.py | 144 +- qemu/tests/numa_negative.py | 22 +- qemu/tests/numa_node_affinity.py | 12 +- qemu/tests/numa_opts.py | 76 +- qemu/tests/numa_prealloc_handling.py | 20 +- qemu/tests/numa_prealloc_threads.py | 68 +- qemu/tests/numa_stress.py | 74 +- qemu/tests/nvdimm.py | 53 +- qemu/tests/nvdimm_mapsync.py | 18 +- qemu/tests/nvdimm_mode.py | 5 +- qemu/tests/nvdimm_negative.py | 15 +- qemu/tests/nvdimm_redis.py | 9 +- qemu/tests/nvme_plug.py | 33 +- qemu/tests/nx.py | 32 +- qemu/tests/offload_checksum_windows.py | 21 +- qemu/tests/openflow_acl_test.py | 184 +- qemu/tests/openflow_test.py | 208 +- qemu/tests/ovmf_check_efi.py | 9 +- qemu/tests/ovs_enslave_port.py | 20 +- qemu/tests/ovs_host_vlan.py | 188 +- qemu/tests/ovs_mirror.py | 85 +- qemu/tests/ovs_qos.py | 122 +- qemu/tests/ovs_quit.py | 18 +- qemu/tests/passthrough_fc_with_lun_device.py | 57 +- .../passthrough_with_multipath_device.py | 34 +- qemu/tests/pci_bridge.py | 132 +- qemu/tests/pci_devices.py | 246 +- qemu/tests/pci_hotplug.py | 230 +- qemu/tests/pci_hotplug_check.py | 200 +- qemu/tests/pci_hotunplug.py | 36 +- qemu/tests/pcie_hotplug_opt.py | 78 +- qemu/tests/perf_kvm.py | 15 +- qemu/tests/performance.py | 226 +- qemu/tests/physical_resources_check.py | 170 +- qemu/tests/ping_kill_test.py | 35 +- qemu/tests/pktgen.py | 60 +- qemu/tests/ple_test.py | 21 +- qemu/tests/plug_cdrom.py | 74 +- qemu/tests/power_htm.py | 27 +- qemu/tests/power_nvram.py | 16 +- qemu/tests/ppc_change_smt.py | 26 +- qemu/tests/ppc_check_cpu_and_mmu.py | 24 +- qemu/tests/ppc_ic_mode_check.py | 14 +- qemu/tests/ppc_nested_compat.py | 19 +- qemu/tests/pvpanic.py | 53 +- qemu/tests/pvpanic_event_check.py | 29 +- qemu/tests/pvpanic_memory_leak.py | 12 +- qemu/tests/pxe_query_cpus.py | 51 +- qemu/tests/qcow2perf.py | 49 +- qemu/tests/qemu_disk_img.py | 93 +- qemu/tests/qemu_disk_img_commit.py | 21 +- qemu/tests/qemu_disk_img_convert.py | 26 +- qemu/tests/qemu_disk_img_info.py | 142 +- qemu/tests/qemu_disk_img_rebase.py | 26 +- qemu/tests/qemu_disk_img_snapshot.py | 15 +- qemu/tests/qemu_guest_agent.py | 2505 +++++++++-------- qemu/tests/qemu_guest_agent_hotplug.py | 4 +- qemu/tests/qemu_guest_agent_snapshot.py | 70 +- qemu/tests/qemu_guest_agent_suspend.py | 7 +- qemu/tests/qemu_guest_agent_update.py | 68 +- qemu/tests/qemu_img.py | 354 +-- qemu/tests/qemu_img_bitmap.py | 113 +- qemu/tests/qemu_img_check_data_integrity.py | 24 +- ...heck_data_integrity_with_lazy_refcounts.py | 21 +- qemu/tests/qemu_img_check_fragmentation.py | 28 +- qemu/tests/qemu_img_convert_from_vdi.py | 3 +- ...u_img_convert_image_with_unaligned_size.py | 11 +- .../qemu_img_convert_with_backing_file.py | 48 +- .../qemu_img_convert_with_copy_offloading.py | 13 +- ...mu_img_convert_with_inconsistent_bitmap.py | 22 +- .../tests/qemu_img_convert_with_rate_limit.py | 10 +- .../qemu_img_convert_with_target_is_zero.py | 14 +- ...napshot_on_qcow2_target_from_raw_source.py | 16 +- qemu/tests/qemu_img_extent_size_hint.py | 9 +- qemu/tests/qemu_img_info_while_vm_running.py | 14 +- qemu/tests/qemu_img_lock_reject_boot.py | 18 +- qemu/tests/qemu_img_luks_key_management.py | 32 +- qemu/tests/qemu_img_map_unaligned_image.py | 106 +- qemu/tests/qemu_img_measure_convert_image.py | 34 +- qemu/tests/qemu_img_measure_new_image.py | 32 +- ...mg_supports_convert_coroutines_complete.py | 16 +- qemu/tests/qemu_io.py | 72 +- qemu/tests/qemu_io_blkdebug.py | 66 +- qemu/tests/qemu_iotests.py | 28 +- qemu/tests/qemu_killer_report.py | 13 +- qemu/tests/qemu_no_shutdown.py | 20 +- qemu/tests/qemu_nobody.py | 53 +- qemu/tests/qemu_option_check.py | 40 +- qemu/tests/qemu_output.py | 12 +- qemu/tests/qmp_basic.py | 153 +- qemu/tests/qmp_basic_rhel6.py | 117 +- qemu/tests/qmp_command.py | 98 +- qemu/tests/qmp_event_notification.py | 45 +- qemu/tests/qsd_backup_pull.py | 136 +- qemu/tests/qsd_backup_push.py | 39 +- qemu/tests/qsd_block_commit.py | 22 +- qemu/tests/qsd_block_mirror.py | 28 +- qemu/tests/qsd_block_stream.py | 24 +- qemu/tests/qsd_blockdev_check.py | 14 +- qemu/tests/qsd_export_vhub_check.py | 3 +- qemu/tests/qsd_hotplug_vubp.py | 30 +- qemu/tests/qsd_install.py | 3 +- qemu/tests/qsd_live_snapshot.py | 20 +- qemu/tests/qsd_object_check.py | 11 +- qemu/tests/qsd_pidfile_check.py | 6 +- qemu/tests/qsd_qmp_cmd_check.py | 19 +- qemu/tests/qsd_vubp_options.py | 23 +- qemu/tests/queues_number_test.py | 59 +- qemu/tests/raw_image_create_and_check.py | 23 +- qemu/tests/rdtsc_sync_test.py | 40 +- qemu/tests/readonly_disk.py | 59 +- qemu/tests/readonly_floppy.py | 51 +- qemu/tests/rebase_negative_test.py | 33 +- qemu/tests/rebase_onto_no_backing_file.py | 42 +- qemu/tests/rebase_onto_qcow2.py | 48 +- qemu/tests/rebase_second_snapshot_to_base.py | 54 +- qemu/tests/reboot_time.py | 31 +- qemu/tests/reject_qemu_img_info.py | 27 +- qemu/tests/remote_block_resize.py | 51 +- qemu/tests/remote_convert.py | 24 +- qemu/tests/remote_image_compress.py | 57 +- qemu/tests/remote_image_guestfish_access.py | 41 +- qemu/tests/remote_image_multiwrite.py | 14 +- qemu/tests/remote_image_ncat_access.py | 65 +- qemu/tests/remote_image_nmap_access.py | 25 +- qemu/tests/remote_image_qemu_info_access.py | 22 +- qemu/tests/remote_image_qemu_io_access.py | 90 +- qemu/tests/remote_image_unix_socket_access.py | 34 +- qemu/tests/remote_server_disconnected.py | 46 +- qemu/tests/remove_interface_from_host.py | 65 +- qemu/tests/resize_qemu_img.py | 53 +- qemu/tests/resize_short_overlay.py | 4 +- qemu/tests/rh_kernel_update.py | 133 +- qemu/tests/rh_qemu_iotests.py | 44 +- qemu/tests/rh_qemu_update.py | 129 +- qemu/tests/rng_bat.py | 50 +- qemu/tests/rng_driver_negative.py | 16 +- qemu/tests/rng_host_guest_read.py | 10 +- qemu/tests/rng_hotplug.py | 25 +- qemu/tests/rng_maxbytes_period.py | 43 +- qemu/tests/rng_stress.py | 35 +- qemu/tests/rv_audio.py | 66 +- qemu/tests/rv_build_install.py | 100 +- qemu/tests/rv_connect.py | 208 +- qemu/tests/rv_copyandpaste.py | 1145 +++++--- qemu/tests/rv_fullscreen.py | 18 +- qemu/tests/rv_input.py | 226 +- qemu/tests/rv_logging.py | 69 +- qemu/tests/rv_smartcard.py | 79 +- qemu/tests/rv_vdagent.py | 22 +- qemu/tests/rv_video.py | 50 +- qemu/tests/rv_vmshutdown.py | 38 +- qemu/tests/s390x_cpu_model_baseline.py | 55 +- qemu/tests/s390x_cpu_model_boot.py | 19 +- qemu/tests/s390x_cpu_model_expansion.py | 46 +- qemu/tests/same_mac_address.py | 30 +- qemu/tests/save_restore_vm.py | 14 +- qemu/tests/savevm_loadvm.py | 17 +- qemu/tests/seabios.py | 22 +- qemu/tests/seabios_bin.py | 40 +- qemu/tests/seabios_bootmenu_prompt.py | 24 +- qemu/tests/seabios_hotplug_unplug.py | 42 +- qemu/tests/seabios_order_once.py | 19 +- qemu/tests/seabios_reboot_timeout.py | 9 +- qemu/tests/seabios_scsi_lun.py | 11 +- qemu/tests/seabios_strict.py | 25 +- qemu/tests/secure_execution.py | 13 +- qemu/tests/secure_img.py | 24 +- qemu/tests/serial_no_listening.py | 24 +- qemu/tests/set_link.py | 125 +- qemu/tests/sev_basic_config.py | 7 +- qemu/tests/sev_dhcert_boot.py | 34 +- qemu/tests/sev_hotplug_mem.py | 19 +- qemu/tests/sgx_basic.py | 6 +- qemu/tests/sgx_cpu.py | 25 +- qemu/tests/sgx_multi_vms.py | 13 +- qemu/tests/single_driver_install.py | 101 +- qemu/tests/slof_balloon.py | 54 +- qemu/tests/slof_boot.py | 50 +- qemu/tests/slof_device_tree.py | 16 +- qemu/tests/slof_greater_lun_id.py | 13 +- qemu/tests/slof_hugepage.py | 85 +- qemu/tests/slof_memory.py | 14 +- qemu/tests/slof_multi_devices.py | 53 +- qemu/tests/slof_next_entry.py | 42 +- qemu/tests/slof_open_bios.py | 28 +- qemu/tests/slof_order.py | 58 +- qemu/tests/slof_user_interface.py | 127 +- qemu/tests/smartcard_setup.py | 18 +- qemu/tests/smbios_default_check.py | 19 +- qemu/tests/smbios_table.py | 70 +- qemu/tests/smt_test.py | 46 +- qemu/tests/snapshot_negative_test.py | 10 +- qemu/tests/softlockup.py | 38 +- qemu/tests/spapr_vty_multi_backends.py | 50 +- qemu/tests/sr_iov_boot_negative.py | 12 +- qemu/tests/sr_iov_hotplug.py | 187 +- qemu/tests/sr_iov_hotplug_negative.py | 41 +- qemu/tests/sr_iov_irqbalance.py | 116 +- qemu/tests/sr_iov_sanity.py | 84 +- qemu/tests/steal_time.py | 17 +- qemu/tests/stepmaker.py | 92 +- qemu/tests/steps.py | 108 +- qemu/tests/stop_continue.py | 41 +- qemu/tests/stress_kernel_compile.py | 39 +- qemu/tests/suspend_under_stress.py | 34 +- qemu/tests/sve_basic.py | 43 +- qemu/tests/sve_guest_suite.py | 84 +- qemu/tests/sve_host_suite.py | 46 +- qemu/tests/sve_invalid.py | 52 +- qemu/tests/sysprep.py | 36 +- qemu/tests/system_reset_bootable.py | 11 +- qemu/tests/systemtap_tracing.py | 26 +- qemu/tests/tcpreplay.py | 34 +- qemu/tests/test_SMM_enabled.py | 6 +- qemu/tests/test_vdpa_control_virtqueue.py | 11 +- qemu/tests/thin_provision_check_mode.py | 30 +- qemu/tests/thin_provision_guest_fstrim.py | 27 +- qemu/tests/thin_write_in_qemu_img_commit.py | 38 +- qemu/tests/throttle_block_set_io_throttle.py | 18 +- qemu/tests/throttle_cdrom_test.py | 18 +- .../throttle_multi_guests_parameter_test.py | 63 +- qemu/tests/throttle_operation_test.py | 53 +- qemu/tests/throttle_parameter_test.py | 28 +- qemu/tests/time_conv.py | 26 +- qemu/tests/time_manage.py | 44 +- qemu/tests/timedrift.py | 92 +- qemu/tests/timedrift_adjust_time.py | 121 +- qemu/tests/timedrift_check_after_load_vm.py | 57 +- qemu/tests/timedrift_check_non_event.py | 30 +- qemu/tests/timedrift_check_when_crash.py | 45 +- .../timedrift_check_when_hotplug_vcpu.py | 18 +- qemu/tests/timedrift_check_with_syscall.py | 14 +- qemu/tests/timedrift_monotonicity.py | 41 +- qemu/tests/timedrift_no_net.py | 163 +- qemu/tests/timedrift_no_net_win.py | 94 +- qemu/tests/timedrift_with_cpu_offline.py | 37 +- qemu/tests/timedrift_with_migration.py | 78 +- qemu/tests/timedrift_with_multi_vms.py | 33 +- qemu/tests/timedrift_with_reboot.py | 72 +- qemu/tests/timedrift_with_stop.py | 77 +- qemu/tests/timer_rtc_sync.py | 55 +- qemu/tests/timerdevice_boot.py | 100 +- .../timerdevice_change_guest_clksource.py | 29 +- qemu/tests/timerdevice_check_ntp_offset.py | 23 +- .../tests/timerdevice_clock_drift_with_ntp.py | 29 +- .../timerdevice_clock_drift_with_sleep.py | 76 +- qemu/tests/timerdevice_host_time_back.py | 33 +- ...timerdevice_kvmclock_newer_msrs_support.py | 10 +- qemu/tests/timerdevice_time_jump_check.py | 13 +- qemu/tests/timerdevice_tsc_enable.py | 12 +- ...merdevice_tscsync_change_host_clksource.py | 48 +- qemu/tests/timerdevice_tscsync_longtime.py | 26 +- qemu/tests/timerdevice_tscwrite.py | 19 +- qemu/tests/tpm_bind_luks.py | 70 +- qemu/tests/tpm_check_buffer_size.py | 33 +- qemu/tests/tpm_unattended_install.py | 47 +- qemu/tests/tpm_verify_device.py | 54 +- qemu/tests/tpm_with_bitlocker.py | 43 +- qemu/tests/tpm_with_check_aavmf.py | 15 +- qemu/tests/tpm_with_hlk.py | 35 +- qemu/tests/tpm_with_tss.py | 16 +- qemu/tests/trace_cmd_boot.py | 36 +- qemu/tests/tracing_exception_injection.py | 15 +- qemu/tests/transfer_file_over_ipv6.py | 207 +- qemu/tests/trim_support_test.py | 48 +- qemu/tests/tsc_drift.py | 22 +- qemu/tests/uefi_boot_from_device.py | 19 +- qemu/tests/uefi_check_debugcon.py | 52 +- qemu/tests/uefi_check_log_info.py | 23 +- qemu/tests/uefi_check_resolution.py | 53 +- qemu/tests/uefi_check_secure_mor.py | 46 +- qemu/tests/uefi_pkg.py | 38 +- qemu/tests/uefi_secureboot.py | 76 +- qemu/tests/uefishell.py | 142 +- .../unattended_install_reboot_driftfix.py | 1 - qemu/tests/unittest_kvmctl.py | 6 +- qemu/tests/unplug_block_during_io_reboot.py | 55 +- ...fe_rebase_to_none_existing_backing_file.py | 42 +- qemu/tests/usb_basic_check.py | 16 +- qemu/tests/usb_common.py | 50 +- qemu/tests/usb_device_check.py | 27 +- qemu/tests/usb_device_check_negative.py | 18 +- qemu/tests/usb_host.py | 77 +- qemu/tests/usb_hotplug.py | 18 +- qemu/tests/usb_redir.py | 162 +- qemu/tests/usb_smartcard_sharing.py | 93 +- qemu/tests/usb_storage.py | 89 +- qemu/tests/valgrind_memalign.py | 18 +- qemu/tests/vdi_image_convert.py | 7 +- qemu/tests/vdi_image_create.py | 4 +- qemu/tests/vdpa_dpdk.py | 116 +- qemu/tests/vdpa_pxe_boot.py | 2 +- qemu/tests/vdpa_sim_blk_test.py | 32 +- .../tests/verify_panic_status_with_pvpanic.py | 21 +- qemu/tests/vfio_net_lifecycle.py | 3 +- qemu/tests/vhost_with_cgroup.py | 28 +- qemu/tests/vioinput_hotplug.py | 12 +- qemu/tests/vioinput_keyboard.py | 9 +- qemu/tests/vioinput_mice.py | 131 +- qemu/tests/vioser_in_use.py | 59 +- qemu/tests/virt_firmware_basic_test.py | 57 +- qemu/tests/virt_firmware_check_phys_bits.py | 75 +- qemu/tests/virt_subtest_combine.py | 20 +- qemu/tests/virtio_aer_opt.py | 61 +- .../virtio_blk_with_discard_write_zeroes.py | 63 +- qemu/tests/virtio_chardev_trace.py | 50 +- qemu/tests/virtio_console.py | 1128 ++++---- qemu/tests/virtio_driver_sign_check.py | 34 +- .../virtio_fs_group_permission_access.py | 113 +- qemu/tests/virtio_fs_host_owner_win.py | 154 +- qemu/tests/virtio_fs_hotplug.py | 176 +- qemu/tests/virtio_fs_map_uid_gid.py | 174 +- qemu/tests/virtio_fs_memory_leak_check.py | 53 +- qemu/tests/virtio_fs_multi_users_access.py | 148 +- qemu/tests/virtio_fs_multi_vms.py | 184 +- qemu/tests/virtio_fs_readonly.py | 36 +- qemu/tests/virtio_fs_sandbox.py | 110 +- qemu/tests/virtio_fs_set_capability.py | 233 +- qemu/tests/virtio_fs_share_data.py | 794 +++--- qemu/tests/virtio_fs_subtest_during_io.py | 55 +- qemu/tests/virtio_fs_supp_group_transfer.py | 138 +- qemu/tests/virtio_fs_support_win_fs.py | 13 +- .../virtio_fs_with_unknown_group_name.py | 44 +- qemu/tests/virtio_fs_write_same_space.py | 49 +- qemu/tests/virtio_fsd_check_info.py | 8 +- qemu/tests/virtio_mem_dynamic_memslots.py | 2 +- ...tio_mem_dynamic_memslots_with_migration.py | 4 +- qemu/tests/virtio_mem_numa_basic.py | 35 +- qemu/tests/virtio_mode.py | 40 +- qemu/tests/virtio_net_dpdk.py | 264 +- qemu/tests/virtio_port_hotplug.py | 47 +- qemu/tests/virtio_port_login.py | 51 +- qemu/tests/virtio_scsi_mq.py | 147 +- qemu/tests/virtio_serial_empty_line.py | 39 +- qemu/tests/virtio_serial_file_transfer.py | 136 +- .../virtio_serial_file_transfer_max_ports.py | 26 +- ...io_serial_file_transfer_offline_migrate.py | 26 +- .../virtio_serial_hotplug_existed_port_pci.py | 26 +- .../virtio_serial_hotplug_max_chardevs.py | 76 +- qemu/tests/virtio_serial_hotplug_port_pci.py | 71 +- .../virtio_serial_hotplug_port_pci_chardev.py | 20 +- .../virtio_serial_large_file_transfer.py | 38 +- qemu/tests/virtio_serial_throttling.py | 20 +- .../virtio_serial_unplug_port_chardev_pci.py | 23 +- .../virtio_serial_various_chardev_hotplug.py | 52 +- qemu/tests/virtio_trace_pipenb.py | 16 +- .../virtio_win_installer_version_check.py | 46 +- qemu/tests/virtual_nic_private.py | 78 +- qemu/tests/virtual_nic_send_buffer.py | 59 +- qemu/tests/virtual_nic_stress.py | 29 +- qemu/tests/vmstop.py | 18 +- qemu/tests/vnc.py | 68 +- qemu/tests/vpmu_check_instructions.py | 20 +- qemu/tests/vsock_hotplug.py | 68 +- qemu/tests/vsock_negative_test.py | 44 +- qemu/tests/vsock_perf.py | 31 +- qemu/tests/vsock_test.py | 103 +- qemu/tests/vsock_test_suite.py | 45 +- qemu/tests/watchdog.py | 243 +- qemu/tests/win_heavyload.py | 45 +- qemu/tests/win_irq_check.py | 50 +- qemu/tests/win_msft_sign_check.py | 14 +- qemu/tests/win_nics_teaming.py | 45 +- qemu/tests/win_serial_tool_test.py | 25 +- qemu/tests/win_sigverif.py | 32 +- qemu/tests/win_video_play.py | 9 +- .../win_virtio_driver_install_by_installer.py | 37 +- .../win_virtio_driver_install_from_update.py | 37 +- .../win_virtio_driver_installer_repair.py | 44 +- .../win_virtio_driver_installer_uninstall.py | 8 +- .../win_virtio_driver_update_by_installer.py | 121 +- qemu/tests/win_virtio_driver_update_test.py | 19 +- .../win_virtio_serial_data_transfer_reboot.py | 58 +- qemu/tests/win_virtio_update.py | 127 +- qemu/tests/windows_info.py | 9 +- qemu/tests/x2avic_test.py | 3 +- qemu/tests/x86_cpu_L3_cache.py | 51 +- qemu/tests/x86_cpu_asyncpf.py | 38 +- qemu/tests/x86_cpu_flag_disable.py | 13 +- qemu/tests/x86_cpu_flag_intel_pt.py | 11 +- qemu/tests/x86_cpu_flag_nonstop_tsc.py | 9 +- qemu/tests/x86_cpu_flags.py | 13 +- qemu/tests/x86_cpu_model.py | 56 +- qemu/tests/x86_cpu_protection_key.py | 24 +- qemu/tests/x86_cpu_test_0x40000001.py | 12 +- qemu/tests/x86_cpu_test_dies.py | 32 +- qemu/tests/x86_cpu_v_spec_ctrl.py | 15 +- qemu/tests/yonit_bitmap.py | 25 +- qemu/tests/zero_copy.py | 24 +- 1053 files changed, 34282 insertions(+), 30095 deletions(-) create mode 100644 .ruff.toml diff --git a/.ci/cfg-lint-check.py b/.ci/cfg-lint-check.py index 639a5a17c7..0585ad56cc 100755 --- a/.ci/cfg-lint-check.py +++ b/.ci/cfg-lint-check.py @@ -6,38 +6,38 @@ def cfg_lint_check(): - print('Running cfg lint check...') + print("Running cfg lint check...") exit_code = 0 for file in sys.argv[1:]: status_code = 0 blank_line = 0 - cfg_path = os.path.abspath(os.path.join(os.path.dirname(__file__), - os.pardir, file)) - with open(cfg_path, 'r') as f: + cfg_path = os.path.abspath( + os.path.join(os.path.dirname(__file__), os.pardir, file) + ) + with open(cfg_path, "r") as f: contents = f.read() for num, line in enumerate(contents.splitlines(), 1): # Only strip whitespaces, handle other blank characters below - stripped_line = line.lstrip(' ') - blank_line = (blank_line + 1 - if re.search(r'^\s*$', stripped_line) else 0) + stripped_line = line.lstrip(" ") + blank_line = blank_line + 1 if re.search(r"^\s*$", stripped_line) else 0 if blank_line >= 2: - print(f'{file}:{num}: Too many blank lines') + print(f"{file}:{num}: Too many blank lines") status_code = 1 - if re.search(r'\s$', line): - print(f'{file}:{num}: Trailing whitespaces') + if re.search(r"\s$", line): + print(f"{file}:{num}: Trailing whitespaces") status_code = 1 - if re.search(r'^\s', stripped_line): - print(f'{file}:{num}: Wrong indent(Unexpected blank characters') + if re.search(r"^\s", stripped_line): + print(f"{file}:{num}: Wrong indent(Unexpected blank characters") status_code = 1 if (len(line) - len(stripped_line)) % 4: - print(f'{file}:{num}: Wrong indent(4x spaces mismatch)') + print(f"{file}:{num}: Wrong indent(4x spaces mismatch)") status_code = 1 - if not contents.endswith('\n'): - print(f'{file} Missing final newline') + if not contents.endswith("\n"): + print(f"{file} Missing final newline") status_code = 1 exit_code = exit_code or status_code sys.exit(exit_code) -if __name__ == '__main__': +if __name__ == "__main__": cfg_lint_check() diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8ccc30c265..dfb04c3ffd 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -20,3 +20,9 @@ repos: args: ["--fix=lf"] - id: no-commit-to-branch - id: trailing-whitespace +- repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.6.1 + hooks: + - id: ruff + args: ["--fix"] + - id: ruff-format diff --git a/.ruff.toml b/.ruff.toml new file mode 100644 index 0000000000..1e4072f452 --- /dev/null +++ b/.ruff.toml @@ -0,0 +1,38 @@ +# Same as Black. +line-length = 88 +indent-width = 4 + +[lint] +select = [ + # pycodestyle + "E", + "W", + # Pyflakes + "F", + # flake8-logging-format + "G", + # pylint Error + "PLE", + # pyupgrade + "UP", + # isort + "I", +] +ignore = ["E402", "E501", "E722", "E741", "UP015"] + +# Allow fix for all enabled rules (when `--fix`) is provided. +fixable = ["ALL"] +unfixable = [] + +[format] +# Like Black, use double quotes for strings. +quote-style = "double" + +# Like Black, indent with spaces, rather than tabs. +indent-style = "space" + +# Like Black, respect magic trailing commas. +skip-magic-trailing-comma = false + +# Like Black, automatically detect the appropriate line ending. +line-ending = "auto" diff --git a/deps/input_event/input_event_linux.py b/deps/input_event/input_event_linux.py index ca23c5b233..abb2a9945b 100755 --- a/deps/input_event/input_event_linux.py +++ b/deps/input_event/input_event_linux.py @@ -1,21 +1,20 @@ import json import os -import sys import select import struct +import sys import threading - -TYPE_SYNC = 'SYNC' -TYPE_INFO = 'INFO' -TYPE_READY = 'READY' -TYPE_EVENT = 'EVENT' -TYPE_ERROR = 'ERROR' +TYPE_SYNC = "SYNC" +TYPE_INFO = "INFO" +TYPE_READY = "READY" +TYPE_EVENT = "EVENT" +TYPE_ERROR = "ERROR" EMPTY_CONTENT = {} def send_message(mtype, content): - message = {'type': mtype, 'content': content} + message = {"type": mtype, "content": content} sys.stdout.write(json.dumps(message)) sys.stdout.write(os.linesep) sys.stdout.flush() @@ -26,7 +25,7 @@ def sync_notify(): def info_notify(dev, info): - send_message(TYPE_INFO, {'device': dev, 'info': info}) + send_message(TYPE_INFO, {"device": dev, "info": info}) def ready_notify(): @@ -34,631 +33,623 @@ def ready_notify(): def event_notify(dev, event): - send_message(TYPE_EVENT, {'device': dev, 'event': event}) + send_message(TYPE_EVENT, {"device": dev, "event": event}) def error_notify(error, dev=None): - send_message(TYPE_ERROR, {'device': dev, 'message': error}) + send_message(TYPE_ERROR, {"device": dev, "message": error}) -EV_PACK_FMT = 'llHHI' +EV_PACK_FMT = "llHHI" EV_PACK_SIZE = struct.calcsize(EV_PACK_FMT) EV_TYPES = { - 0x00: 'EV_SYN', - 0x01: 'EV_KEY', - 0x02: 'EV_REL', - 0x03: 'EV_ABS', - 0x04: 'EV_MSC', - 0x05: 'EV_SW', - 0x11: 'EV_LED', - 0x12: 'EV_SND', - 0x14: 'EV_REP', - 0x15: 'EV_FF', - 0x16: 'EV_PWR', - 0x17: 'EV_FF_STATUS' + 0x00: "EV_SYN", + 0x01: "EV_KEY", + 0x02: "EV_REL", + 0x03: "EV_ABS", + 0x04: "EV_MSC", + 0x05: "EV_SW", + 0x11: "EV_LED", + 0x12: "EV_SND", + 0x14: "EV_REP", + 0x15: "EV_FF", + 0x16: "EV_PWR", + 0x17: "EV_FF_STATUS", } -EV_SYN_CODES = { - 0: 'SYN_REPORT', - 1: 'SYN_CONFIG', - 2: 'SYN_MT_REPORT', - 3: 'SYN_DROPPED' -} +EV_SYN_CODES = {0: "SYN_REPORT", 1: "SYN_CONFIG", 2: "SYN_MT_REPORT", 3: "SYN_DROPPED"} EV_KEY_CODES = { - 0: 'KEY_RESERVED', - 1: 'KEY_ESC', - 2: 'KEY_1', - 3: 'KEY_2', - 4: 'KEY_3', - 5: 'KEY_4', - 6: 'KEY_5', - 7: 'KEY_6', - 8: 'KEY_7', - 9: 'KEY_8', - 10: 'KEY_9', - 11: 'KEY_0', - 12: 'KEY_MINUS', - 13: 'KEY_EQUAL', - 14: 'KEY_BACKSPACE', - 15: 'KEY_TAB', - 16: 'KEY_Q', - 17: 'KEY_W', - 18: 'KEY_E', - 19: 'KEY_R', - 20: 'KEY_T', - 21: 'KEY_Y', - 22: 'KEY_U', - 23: 'KEY_I', - 24: 'KEY_O', - 25: 'KEY_P', - 26: 'KEY_LEFTBRACE', - 27: 'KEY_RIGHTBRACE', - 28: 'KEY_ENTER', - 29: 'KEY_LEFTCTRL', - 30: 'KEY_A', - 31: 'KEY_S', - 32: 'KEY_D', - 33: 'KEY_F', - 34: 'KEY_G', - 35: 'KEY_H', - 36: 'KEY_J', - 37: 'KEY_K', - 38: 'KEY_L', - 39: 'KEY_SEMICOLON', - 40: 'KEY_APOSTROPHE', - 41: 'KEY_GRAVE', - 42: 'KEY_LEFTSHIFT', - 43: 'KEY_BACKSLASH', - 44: 'KEY_Z', - 45: 'KEY_X', - 46: 'KEY_C', - 47: 'KEY_V', - 48: 'KEY_B', - 49: 'KEY_N', - 50: 'KEY_M', - 51: 'KEY_COMMA', - 52: 'KEY_DOT', - 53: 'KEY_SLASH', - 54: 'KEY_RIGHTSHIFT', - 55: 'KEY_KPASTERISK', - 56: 'KEY_LEFTALT', - 57: 'KEY_SPACE', - 58: 'KEY_CAPSLOCK', - 59: 'KEY_F1', - 60: 'KEY_F2', - 61: 'KEY_F3', - 62: 'KEY_F4', - 63: 'KEY_F5', - 64: 'KEY_F6', - 65: 'KEY_F7', - 66: 'KEY_F8', - 67: 'KEY_F9', - 68: 'KEY_F10', - 69: 'KEY_NUMLOCK', - 70: 'KEY_SCROLLLOCK', - 71: 'KEY_KP7', - 72: 'KEY_KP8', - 73: 'KEY_KP9', - 74: 'KEY_KPMINUS', - 75: 'KEY_KP4', - 76: 'KEY_KP5', - 77: 'KEY_KP6', - 78: 'KEY_KPPLUS', - 79: 'KEY_KP1', - 80: 'KEY_KP2', - 81: 'KEY_KP3', - 82: 'KEY_KP0', - 83: 'KEY_KPDOT', - 85: 'KEY_ZENKAKUHANKAKU', - 86: 'KEY_102ND', - 87: 'KEY_F11', - 88: 'KEY_F12', - 89: 'KEY_RO', - 90: 'KEY_KATAKANA', - 91: 'KEY_HIRAGANA', - 92: 'KEY_HENKAN', - 93: 'KEY_KATAKANAHIRAGANA', - 94: 'KEY_MUHENKAN', - 95: 'KEY_KPJPCOMMA', - 96: 'KEY_KPENTER', - 97: 'KEY_RIGHTCTRL', - 98: 'KEY_KPSLASH', - 99: 'KEY_SYSRQ', - 100: 'KEY_RIGHTALT', - 101: 'KEY_LINEFEED', - 102: 'KEY_HOME', - 103: 'KEY_UP', - 104: 'KEY_PAGEUP', - 105: 'KEY_LEFT', - 106: 'KEY_RIGHT', - 107: 'KEY_END', - 108: 'KEY_DOWN', - 109: 'KEY_PAGEDOWN', - 110: 'KEY_INSERT', - 111: 'KEY_DELETE', - 112: 'KEY_MACRO', - 113: 'KEY_MUTE', - 114: 'KEY_VOLUMEDOWN', - 115: 'KEY_VOLUMEUP', - 116: 'KEY_POWER', - 117: 'KEY_KPEQUAL', - 118: 'KEY_KPPLUSMINUS', - 119: 'KEY_PAUSE', - 120: 'KEY_SCALE', - 121: 'KEY_KPCOMMA', - 122: 'KEY_HANGEUL', - 123: 'KEY_HANJA', - 124: 'KEY_YEN', - 125: 'KEY_LEFTMETA', - 126: 'KEY_RIGHTMETA', - 127: 'KEY_COMPOSE', - 128: 'KEY_STOP', - 129: 'KEY_AGAIN', - 130: 'KEY_PROPS', - 131: 'KEY_UNDO', - 132: 'KEY_FRONT', - 133: 'KEY_COPY', - 134: 'KEY_OPEN', - 135: 'KEY_PASTE', - 136: 'KEY_FIND', - 137: 'KEY_CUT', - 138: 'KEY_HELP', - 139: 'KEY_MENU', - 140: 'KEY_CALC', - 141: 'KEY_SETUP', - 142: 'KEY_SLEEP', - 143: 'KEY_WAKEUP', - 144: 'KEY_FILE', - 145: 'KEY_SENDFILE', - 146: 'KEY_DELETEFILE', - 147: 'KEY_XFER', - 148: 'KEY_PROG1', - 149: 'KEY_PROG2', - 150: 'KEY_WWW', - 151: 'KEY_MSDOS', - 152: 'KEY_SCREENLOCK', # alias: KEY_COFFEE - 153: 'KEY_DIRECTION', - 154: 'KEY_CYCLEWINDOWS', - 155: 'KEY_MAIL', - 156: 'KEY_BOOKMARKS', - 157: 'KEY_COMPUTER', - 158: 'KEY_BACK', - 159: 'KEY_FORWARD', - 160: 'KEY_CLOSECD', - 161: 'KEY_EJECTCD', - 162: 'KEY_EJECTCLOSECD', - 163: 'KEY_NEXTSONG', - 164: 'KEY_PLAYPAUSE', - 165: 'KEY_PREVIOUSSONG', - 166: 'KEY_STOPCD', - 167: 'KEY_RECORD', - 168: 'KEY_REWIND', - 169: 'KEY_PHONE', - 170: 'KEY_ISO', - 171: 'KEY_CONFIG', - 172: 'KEY_HOMEPAGE', - 173: 'KEY_REFRESH', - 174: 'KEY_EXIT', - 175: 'KEY_MOVE', - 176: 'KEY_EDIT', - 177: 'KEY_SCROLLUP', - 178: 'KEY_SCROLLDOWN', - 179: 'KEY_KPLEFTPAREN', - 180: 'KEY_KPRIGHTPAREN', - 181: 'KEY_NEW', - 182: 'KEY_REDO', - 183: 'KEY_F13', - 184: 'KEY_F14', - 185: 'KEY_F15', - 186: 'KEY_F16', - 187: 'KEY_F17', - 188: 'KEY_F18', - 189: 'KEY_F19', - 190: 'KEY_F20', - 191: 'KEY_F21', - 192: 'KEY_F22', - 193: 'KEY_F23', - 194: 'KEY_F24', - 200: 'KEY_PLAYCD', - 201: 'KEY_PAUSECD', - 202: 'KEY_PROG3', - 203: 'KEY_PROG4', - 204: 'KEY_DASHBOARD', - 205: 'KEY_SUSPEND', - 206: 'KEY_CLOSE', - 207: 'KEY_PLAY', - 208: 'KEY_FASTFORWARD', - 209: 'KEY_BASSBOOST', - 210: 'KEY_PRINT', - 211: 'KEY_HP', - 212: 'KEY_CAMERA', - 213: 'KEY_SOUND', - 214: 'KEY_QUESTION', - 215: 'KEY_EMAIL', - 216: 'KEY_CHAT', - 217: 'KEY_SEARCH', - 218: 'KEY_CONNECT', - 219: 'KEY_FINANCE', - 220: 'KEY_SPORT', - 221: 'KEY_SHOP', - 222: 'KEY_ALTERASE', - 223: 'KEY_CANCEL', - 224: 'KEY_BRIGHTNESSDOWN', - 225: 'KEY_BRIGHTNESSUP', - 226: 'KEY_MEDIA', - 227: 'KEY_SWITCHVIDEOMODE', - 228: 'KEY_KBDILLUMTOGGLE', - 229: 'KEY_KBDILLUMDOWN', - 230: 'KEY_KBDILLUMUP', - 231: 'KEY_SEND', - 232: 'KEY_REPLY', - 233: 'KEY_FORWARDMAIL', - 234: 'KEY_SAVE', - 235: 'KEY_DOCUMENTS', - 236: 'KEY_BATTERY', - 237: 'KEY_BLUETOOTH', - 238: 'KEY_WLAN', - 239: 'KEY_UWB', - 240: 'KEY_UNKNOWN', - 241: 'KEY_VIDEO_NEXT', - 242: 'KEY_VIDEO_PREV', - 243: 'KEY_BRIGHTNESS_CYCLE', - 244: 'KEY_BRIGHTNESS_ZERO', - 245: 'KEY_DISPLAY_OFF', - 246: 'KEY_WIMAX', - 247: 'KEY_RFKILL', - 248: 'KEY_MICMUTE', - 0x100: 'BTN_0', # alias: BTN_MISC - 0x101: 'BTN_1', - 0x102: 'BTN_2', - 0x103: 'BTN_3', - 0x104: 'BTN_4', - 0x105: 'BTN_5', - 0x106: 'BTN_6', - 0x107: 'BTN_7', - 0x108: 'BTN_8', - 0x109: 'BTN_9', - 0x110: 'BTN_LEFT', # alias: BTN_MOUSE - 0x111: 'BTN_RIGHT', - 0x112: 'BTN_MIDDLE', - 0x113: 'BTN_SIDE', - 0x114: 'BTN_EXTRA', - 0x115: 'BTN_FORWARD', - 0x116: 'BTN_BACK', - 0x117: 'BTN_TASK', - 0x120: 'BTN_TRIGGER', # alias: BTN_JOYSTICK - 0x121: 'BTN_THUMB', - 0x122: 'BTN_THUMB2', - 0x123: 'BTN_TOP', - 0x124: 'BTN_TOP2', - 0x125: 'BTN_PINKIE', - 0x126: 'BTN_BASE', - 0x127: 'BTN_BASE2', - 0x128: 'BTN_BASE3', - 0x129: 'BTN_BASE4', - 0x12a: 'BTN_BASE5', - 0x12b: 'BTN_BASE6', - 0x12f: 'BTN_DEAD', - 0x130: 'BTN_A', # alias: BTN_GAMEPAD - 0x131: 'BTN_B', - 0x132: 'BTN_C', - 0x133: 'BTN_X', - 0x134: 'BTN_Y', - 0x135: 'BTN_Z', - 0x136: 'BTN_TL', - 0x137: 'BTN_TR', - 0x138: 'BTN_TL2', - 0x139: 'BTN_TR2', - 0x13a: 'BTN_SELECT', - 0x13b: 'BTN_START', - 0x13c: 'BTN_MODE', - 0x13d: 'BTN_THUMBL', - 0x13e: 'BTN_THUMBR', - 0x140: 'BTN_TOOL_PEN', # alias: BTN_DIGI - 0x141: 'BTN_TOOL_RUBBER', - 0x142: 'BTN_TOOL_BRUSH', - 0x143: 'BTN_TOOL_PENCIL', - 0x144: 'BTN_TOOL_AIRBRUSH', - 0x145: 'BTN_TOOL_FINGER', - 0x146: 'BTN_TOOL_MOUSE', - 0x147: 'BTN_TOOL_LENS', - 0x148: 'BTN_TOOL_QUINTTAP', - 0x149: 'BTN_STYLUS3', - 0x14a: 'BTN_TOUCH', - 0x14b: 'BTN_STYLUS', - 0x14c: 'BTN_STYLUS2', - 0x14d: 'BTN_TOOL_DOUBLETAP', - 0x14e: 'BTN_TOOL_TRIPLETAP', - 0x14f: 'BTN_TOOL_QUADTAP', - 0x150: 'BTN_GEAR_DOWN', # alias: BTN_WHEEL - 0x151: 'BTN_GEAR_UP', - 0x160: 'KEY_OK', - 0x161: 'KEY_SELECT', - 0x162: 'KEY_GOTO', - 0x163: 'KEY_CLEAR', - 0x164: 'KEY_POWER2', - 0x165: 'KEY_OPTION', - 0x166: 'KEY_INFO', - 0x167: 'KEY_TIME', - 0x168: 'KEY_VENDOR', - 0x169: 'KEY_ARCHIVE', - 0x16a: 'KEY_PROGRAM', - 0x16b: 'KEY_CHANNEL', - 0x16c: 'KEY_FAVORITES', - 0x16d: 'KEY_EPG', - 0x16e: 'KEY_PVR', - 0x16f: 'KEY_MHP', - 0x170: 'KEY_LANGUAGE', - 0x171: 'KEY_TITLE', - 0x172: 'KEY_SUBTITLE', - 0x173: 'KEY_ANGLE', - 0x174: 'KEY_ZOOM', - 0x175: 'KEY_MODE', - 0x176: 'KEY_KEYBOARD', - 0x177: 'KEY_SCREEN', - 0x178: 'KEY_PC', - 0x179: 'KEY_TV', - 0x17a: 'KEY_TV2', - 0x17b: 'KEY_VCR', - 0x17c: 'KEY_VCR2', - 0x17d: 'KEY_SAT', - 0x17e: 'KEY_SAT2', - 0x17f: 'KEY_CD', - 0x180: 'KEY_TAPE', - 0x181: 'KEY_RADIO', - 0x182: 'KEY_TUNER', - 0x183: 'KEY_PLAYER', - 0x184: 'KEY_TEXT', - 0x185: 'KEY_DVD', - 0x186: 'KEY_AUX', - 0x187: 'KEY_MP3', - 0x188: 'KEY_AUDIO', - 0x189: 'KEY_VIDEO', - 0x18a: 'KEY_DIRECTORY', - 0x18b: 'KEY_LIST', - 0x18c: 'KEY_MEMO', - 0x18d: 'KEY_CALENDAR', - 0x18e: 'KEY_RED', - 0x18f: 'KEY_GREEN', - 0x190: 'KEY_YELLOW', - 0x191: 'KEY_BLUE', - 0x192: 'KEY_CHANNELUP', - 0x193: 'KEY_CHANNELDOWN', - 0x194: 'KEY_FIRST', - 0x195: 'KEY_LAST', - 0x196: 'KEY_AB', - 0x197: 'KEY_NEXT', - 0x198: 'KEY_RESTART', - 0x199: 'KEY_SLOW', - 0x19a: 'KEY_SHUFFLE', - 0x19b: 'KEY_BREAK', - 0x19c: 'KEY_PREVIOUS', - 0x19d: 'KEY_DIGITS', - 0x19e: 'KEY_TEEN', - 0x19f: 'KEY_TWEN', - 0x1a0: 'KEY_VIDEOPHONE', - 0x1a1: 'KEY_GAMES', - 0x1a2: 'KEY_ZOOMIN', - 0x1a3: 'KEY_ZOOMOUT', - 0x1a4: 'KEY_ZOOMRESET', - 0x1a5: 'KEY_WORDPROCESSOR', - 0x1a6: 'KEY_EDITOR', - 0x1a7: 'KEY_SPREADSHEET', - 0x1a8: 'KEY_GRAPHICSEDITOR', - 0x1a9: 'KEY_PRESENTATION', - 0x1aa: 'KEY_DATABASE', - 0x1ab: 'KEY_NEWS', - 0x1ac: 'KEY_VOICEMAIL', - 0x1ad: 'KEY_ADDRESSBOOK', - 0x1ae: 'KEY_MESSENGER', - 0x1af: 'KEY_DISPLAYTOGGLE', - 0x1b0: 'KEY_SPELLCHECK', - 0x1b1: 'KEY_LOGOFF', - 0x1b2: 'KEY_DOLLAR', - 0x1b3: 'KEY_EURO', - 0x1b4: 'KEY_FRAMEBACK', - 0x1b5: 'KEY_FRAMEFORWARD', - 0x1b6: 'KEY_CONTEXT_MENU', - 0x1b7: 'KEY_MEDIA_REPEAT', - 0x1b8: 'KEY_10CHANNELSUP', - 0x1b9: 'KEY_10CHANNELSDOWN', - 0x1ba: 'KEY_IMAGES', - 0x1c0: 'KEY_DEL_EOL', - 0x1c1: 'KEY_DEL_EOS', - 0x1c2: 'KEY_INS_LINE', - 0x1c3: 'KEY_DEL_LINE', - 0x1d0: 'KEY_FN', - 0x1d1: 'KEY_FN_ESC', - 0x1d2: 'KEY_FN_F1', - 0x1d3: 'KEY_FN_F2', - 0x1d4: 'KEY_FN_F3', - 0x1d5: 'KEY_FN_F4', - 0x1d6: 'KEY_FN_F5', - 0x1d7: 'KEY_FN_F6', - 0x1d8: 'KEY_FN_F7', - 0x1d9: 'KEY_FN_F8', - 0x1da: 'KEY_FN_F9', - 0x1db: 'KEY_FN_F10', - 0x1dc: 'KEY_FN_F11', - 0x1dd: 'KEY_FN_F12', - 0x1de: 'KEY_FN_1', - 0x1df: 'KEY_FN_2', - 0x1e0: 'KEY_FN_D', - 0x1e1: 'KEY_FN_E', - 0x1e2: 'KEY_FN_F', - 0x1e3: 'KEY_FN_S', - 0x1e4: 'KEY_FN_B', - 0x1f1: 'KEY_BRL_DOT1', - 0x1f2: 'KEY_BRL_DOT2', - 0x1f3: 'KEY_BRL_DOT3', - 0x1f4: 'KEY_BRL_DOT4', - 0x1f5: 'KEY_BRL_DOT5', - 0x1f6: 'KEY_BRL_DOT6', - 0x1f7: 'KEY_BRL_DOT7', - 0x1f8: 'KEY_BRL_DOT8', - 0x1f9: 'KEY_BRL_DOT9', - 0x1fa: 'KEY_BRL_DOT10', - 0x200: 'KEY_NUMERIC_0', - 0x201: 'KEY_NUMERIC_1', - 0x202: 'KEY_NUMERIC_2', - 0x203: 'KEY_NUMERIC_3', - 0x204: 'KEY_NUMERIC_4', - 0x205: 'KEY_NUMERIC_5', - 0x206: 'KEY_NUMERIC_6', - 0x207: 'KEY_NUMERIC_7', - 0x208: 'KEY_NUMERIC_8', - 0x209: 'KEY_NUMERIC_9', - 0x20a: 'KEY_NUMERIC_STAR', - 0x20b: 'KEY_NUMERIC_POUND', - 0x210: 'KEY_CAMERA_FOCUS', - 0x211: 'KEY_WPS_BUTTON', - 0x212: 'KEY_TOUCHPAD_TOGGLE', - 0x213: 'KEY_TOUCHPAD_ON', - 0x214: 'KEY_TOUCHPAD_OFF', - 0x215: 'KEY_CAMERA_ZOOMIN', - 0x216: 'KEY_CAMERA_ZOOMOUT', - 0x217: 'KEY_CAMERA_UP', - 0x218: 'KEY_CAMERA_DOWN', - 0x219: 'KEY_CAMERA_LEFT', - 0x21a: 'KEY_CAMERA_RIGHT', - 0x21b: 'KEY_ATTENDANT_ON', - 0x21c: 'KEY_ATTENDANT_OFF', - 0x21d: 'KEY_ATTENDANT_TOGGLE', - 0x21e: 'KEY_LIGHTS_TOGGLE', - 0x231: 'KEY_ROTATE_LOCK_TOGGLE', - 0x240: 'KEY_BUTTONCONFIG', - 0x243: 'KEY_CONTROLPANEL', - 0x246: 'KEY_VOICECOMMAND', - 0x250: 'KEY_BRIGHTNESS_MIN', - 0x278: 'KEY_ONSCREEN_KEYBOARD', - 0x2c0: 'BTN_TRIGGER_HAPPY1', # alias: BTN_TRIGGER_HAPPY - 0x2c1: 'BTN_TRIGGER_HAPPY2', - 0x2c2: 'BTN_TRIGGER_HAPPY3', - 0x2c3: 'BTN_TRIGGER_HAPPY4', - 0x2c4: 'BTN_TRIGGER_HAPPY5', - 0x2c5: 'BTN_TRIGGER_HAPPY6', - 0x2c6: 'BTN_TRIGGER_HAPPY7', - 0x2c7: 'BTN_TRIGGER_HAPPY8', - 0x2c8: 'BTN_TRIGGER_HAPPY9', - 0x2c9: 'BTN_TRIGGER_HAPPY10', - 0x2ca: 'BTN_TRIGGER_HAPPY11', - 0x2cb: 'BTN_TRIGGER_HAPPY12', - 0x2cc: 'BTN_TRIGGER_HAPPY13', - 0x2cd: 'BTN_TRIGGER_HAPPY14', - 0x2ce: 'BTN_TRIGGER_HAPPY15', - 0x2cf: 'BTN_TRIGGER_HAPPY16', - 0x2d0: 'BTN_TRIGGER_HAPPY17', - 0x2d1: 'BTN_TRIGGER_HAPPY18', - 0x2d2: 'BTN_TRIGGER_HAPPY19', - 0x2d3: 'BTN_TRIGGER_HAPPY20', - 0x2d4: 'BTN_TRIGGER_HAPPY21', - 0x2d5: 'BTN_TRIGGER_HAPPY22', - 0x2d6: 'BTN_TRIGGER_HAPPY23', - 0x2d7: 'BTN_TRIGGER_HAPPY24', - 0x2d8: 'BTN_TRIGGER_HAPPY25', - 0x2d9: 'BTN_TRIGGER_HAPPY26', - 0x2da: 'BTN_TRIGGER_HAPPY27', - 0x2db: 'BTN_TRIGGER_HAPPY28', - 0x2dc: 'BTN_TRIGGER_HAPPY29', - 0x2dd: 'BTN_TRIGGER_HAPPY30', - 0x2de: 'BTN_TRIGGER_HAPPY31', - 0x2df: 'BTN_TRIGGER_HAPPY32', - 0x2e0: 'BTN_TRIGGER_HAPPY33', - 0x2e1: 'BTN_TRIGGER_HAPPY34', - 0x2e2: 'BTN_TRIGGER_HAPPY35', - 0x2e3: 'BTN_TRIGGER_HAPPY36', - 0x2e4: 'BTN_TRIGGER_HAPPY37', - 0x2e5: 'BTN_TRIGGER_HAPPY38', - 0x2e6: 'BTN_TRIGGER_HAPPY39', - 0x2e7: 'BTN_TRIGGER_HAPPY40' + 0: "KEY_RESERVED", + 1: "KEY_ESC", + 2: "KEY_1", + 3: "KEY_2", + 4: "KEY_3", + 5: "KEY_4", + 6: "KEY_5", + 7: "KEY_6", + 8: "KEY_7", + 9: "KEY_8", + 10: "KEY_9", + 11: "KEY_0", + 12: "KEY_MINUS", + 13: "KEY_EQUAL", + 14: "KEY_BACKSPACE", + 15: "KEY_TAB", + 16: "KEY_Q", + 17: "KEY_W", + 18: "KEY_E", + 19: "KEY_R", + 20: "KEY_T", + 21: "KEY_Y", + 22: "KEY_U", + 23: "KEY_I", + 24: "KEY_O", + 25: "KEY_P", + 26: "KEY_LEFTBRACE", + 27: "KEY_RIGHTBRACE", + 28: "KEY_ENTER", + 29: "KEY_LEFTCTRL", + 30: "KEY_A", + 31: "KEY_S", + 32: "KEY_D", + 33: "KEY_F", + 34: "KEY_G", + 35: "KEY_H", + 36: "KEY_J", + 37: "KEY_K", + 38: "KEY_L", + 39: "KEY_SEMICOLON", + 40: "KEY_APOSTROPHE", + 41: "KEY_GRAVE", + 42: "KEY_LEFTSHIFT", + 43: "KEY_BACKSLASH", + 44: "KEY_Z", + 45: "KEY_X", + 46: "KEY_C", + 47: "KEY_V", + 48: "KEY_B", + 49: "KEY_N", + 50: "KEY_M", + 51: "KEY_COMMA", + 52: "KEY_DOT", + 53: "KEY_SLASH", + 54: "KEY_RIGHTSHIFT", + 55: "KEY_KPASTERISK", + 56: "KEY_LEFTALT", + 57: "KEY_SPACE", + 58: "KEY_CAPSLOCK", + 59: "KEY_F1", + 60: "KEY_F2", + 61: "KEY_F3", + 62: "KEY_F4", + 63: "KEY_F5", + 64: "KEY_F6", + 65: "KEY_F7", + 66: "KEY_F8", + 67: "KEY_F9", + 68: "KEY_F10", + 69: "KEY_NUMLOCK", + 70: "KEY_SCROLLLOCK", + 71: "KEY_KP7", + 72: "KEY_KP8", + 73: "KEY_KP9", + 74: "KEY_KPMINUS", + 75: "KEY_KP4", + 76: "KEY_KP5", + 77: "KEY_KP6", + 78: "KEY_KPPLUS", + 79: "KEY_KP1", + 80: "KEY_KP2", + 81: "KEY_KP3", + 82: "KEY_KP0", + 83: "KEY_KPDOT", + 85: "KEY_ZENKAKUHANKAKU", + 86: "KEY_102ND", + 87: "KEY_F11", + 88: "KEY_F12", + 89: "KEY_RO", + 90: "KEY_KATAKANA", + 91: "KEY_HIRAGANA", + 92: "KEY_HENKAN", + 93: "KEY_KATAKANAHIRAGANA", + 94: "KEY_MUHENKAN", + 95: "KEY_KPJPCOMMA", + 96: "KEY_KPENTER", + 97: "KEY_RIGHTCTRL", + 98: "KEY_KPSLASH", + 99: "KEY_SYSRQ", + 100: "KEY_RIGHTALT", + 101: "KEY_LINEFEED", + 102: "KEY_HOME", + 103: "KEY_UP", + 104: "KEY_PAGEUP", + 105: "KEY_LEFT", + 106: "KEY_RIGHT", + 107: "KEY_END", + 108: "KEY_DOWN", + 109: "KEY_PAGEDOWN", + 110: "KEY_INSERT", + 111: "KEY_DELETE", + 112: "KEY_MACRO", + 113: "KEY_MUTE", + 114: "KEY_VOLUMEDOWN", + 115: "KEY_VOLUMEUP", + 116: "KEY_POWER", + 117: "KEY_KPEQUAL", + 118: "KEY_KPPLUSMINUS", + 119: "KEY_PAUSE", + 120: "KEY_SCALE", + 121: "KEY_KPCOMMA", + 122: "KEY_HANGEUL", + 123: "KEY_HANJA", + 124: "KEY_YEN", + 125: "KEY_LEFTMETA", + 126: "KEY_RIGHTMETA", + 127: "KEY_COMPOSE", + 128: "KEY_STOP", + 129: "KEY_AGAIN", + 130: "KEY_PROPS", + 131: "KEY_UNDO", + 132: "KEY_FRONT", + 133: "KEY_COPY", + 134: "KEY_OPEN", + 135: "KEY_PASTE", + 136: "KEY_FIND", + 137: "KEY_CUT", + 138: "KEY_HELP", + 139: "KEY_MENU", + 140: "KEY_CALC", + 141: "KEY_SETUP", + 142: "KEY_SLEEP", + 143: "KEY_WAKEUP", + 144: "KEY_FILE", + 145: "KEY_SENDFILE", + 146: "KEY_DELETEFILE", + 147: "KEY_XFER", + 148: "KEY_PROG1", + 149: "KEY_PROG2", + 150: "KEY_WWW", + 151: "KEY_MSDOS", + 152: "KEY_SCREENLOCK", # alias: KEY_COFFEE + 153: "KEY_DIRECTION", + 154: "KEY_CYCLEWINDOWS", + 155: "KEY_MAIL", + 156: "KEY_BOOKMARKS", + 157: "KEY_COMPUTER", + 158: "KEY_BACK", + 159: "KEY_FORWARD", + 160: "KEY_CLOSECD", + 161: "KEY_EJECTCD", + 162: "KEY_EJECTCLOSECD", + 163: "KEY_NEXTSONG", + 164: "KEY_PLAYPAUSE", + 165: "KEY_PREVIOUSSONG", + 166: "KEY_STOPCD", + 167: "KEY_RECORD", + 168: "KEY_REWIND", + 169: "KEY_PHONE", + 170: "KEY_ISO", + 171: "KEY_CONFIG", + 172: "KEY_HOMEPAGE", + 173: "KEY_REFRESH", + 174: "KEY_EXIT", + 175: "KEY_MOVE", + 176: "KEY_EDIT", + 177: "KEY_SCROLLUP", + 178: "KEY_SCROLLDOWN", + 179: "KEY_KPLEFTPAREN", + 180: "KEY_KPRIGHTPAREN", + 181: "KEY_NEW", + 182: "KEY_REDO", + 183: "KEY_F13", + 184: "KEY_F14", + 185: "KEY_F15", + 186: "KEY_F16", + 187: "KEY_F17", + 188: "KEY_F18", + 189: "KEY_F19", + 190: "KEY_F20", + 191: "KEY_F21", + 192: "KEY_F22", + 193: "KEY_F23", + 194: "KEY_F24", + 200: "KEY_PLAYCD", + 201: "KEY_PAUSECD", + 202: "KEY_PROG3", + 203: "KEY_PROG4", + 204: "KEY_DASHBOARD", + 205: "KEY_SUSPEND", + 206: "KEY_CLOSE", + 207: "KEY_PLAY", + 208: "KEY_FASTFORWARD", + 209: "KEY_BASSBOOST", + 210: "KEY_PRINT", + 211: "KEY_HP", + 212: "KEY_CAMERA", + 213: "KEY_SOUND", + 214: "KEY_QUESTION", + 215: "KEY_EMAIL", + 216: "KEY_CHAT", + 217: "KEY_SEARCH", + 218: "KEY_CONNECT", + 219: "KEY_FINANCE", + 220: "KEY_SPORT", + 221: "KEY_SHOP", + 222: "KEY_ALTERASE", + 223: "KEY_CANCEL", + 224: "KEY_BRIGHTNESSDOWN", + 225: "KEY_BRIGHTNESSUP", + 226: "KEY_MEDIA", + 227: "KEY_SWITCHVIDEOMODE", + 228: "KEY_KBDILLUMTOGGLE", + 229: "KEY_KBDILLUMDOWN", + 230: "KEY_KBDILLUMUP", + 231: "KEY_SEND", + 232: "KEY_REPLY", + 233: "KEY_FORWARDMAIL", + 234: "KEY_SAVE", + 235: "KEY_DOCUMENTS", + 236: "KEY_BATTERY", + 237: "KEY_BLUETOOTH", + 238: "KEY_WLAN", + 239: "KEY_UWB", + 240: "KEY_UNKNOWN", + 241: "KEY_VIDEO_NEXT", + 242: "KEY_VIDEO_PREV", + 243: "KEY_BRIGHTNESS_CYCLE", + 244: "KEY_BRIGHTNESS_ZERO", + 245: "KEY_DISPLAY_OFF", + 246: "KEY_WIMAX", + 247: "KEY_RFKILL", + 248: "KEY_MICMUTE", + 0x100: "BTN_0", # alias: BTN_MISC + 0x101: "BTN_1", + 0x102: "BTN_2", + 0x103: "BTN_3", + 0x104: "BTN_4", + 0x105: "BTN_5", + 0x106: "BTN_6", + 0x107: "BTN_7", + 0x108: "BTN_8", + 0x109: "BTN_9", + 0x110: "BTN_LEFT", # alias: BTN_MOUSE + 0x111: "BTN_RIGHT", + 0x112: "BTN_MIDDLE", + 0x113: "BTN_SIDE", + 0x114: "BTN_EXTRA", + 0x115: "BTN_FORWARD", + 0x116: "BTN_BACK", + 0x117: "BTN_TASK", + 0x120: "BTN_TRIGGER", # alias: BTN_JOYSTICK + 0x121: "BTN_THUMB", + 0x122: "BTN_THUMB2", + 0x123: "BTN_TOP", + 0x124: "BTN_TOP2", + 0x125: "BTN_PINKIE", + 0x126: "BTN_BASE", + 0x127: "BTN_BASE2", + 0x128: "BTN_BASE3", + 0x129: "BTN_BASE4", + 0x12A: "BTN_BASE5", + 0x12B: "BTN_BASE6", + 0x12F: "BTN_DEAD", + 0x130: "BTN_A", # alias: BTN_GAMEPAD + 0x131: "BTN_B", + 0x132: "BTN_C", + 0x133: "BTN_X", + 0x134: "BTN_Y", + 0x135: "BTN_Z", + 0x136: "BTN_TL", + 0x137: "BTN_TR", + 0x138: "BTN_TL2", + 0x139: "BTN_TR2", + 0x13A: "BTN_SELECT", + 0x13B: "BTN_START", + 0x13C: "BTN_MODE", + 0x13D: "BTN_THUMBL", + 0x13E: "BTN_THUMBR", + 0x140: "BTN_TOOL_PEN", # alias: BTN_DIGI + 0x141: "BTN_TOOL_RUBBER", + 0x142: "BTN_TOOL_BRUSH", + 0x143: "BTN_TOOL_PENCIL", + 0x144: "BTN_TOOL_AIRBRUSH", + 0x145: "BTN_TOOL_FINGER", + 0x146: "BTN_TOOL_MOUSE", + 0x147: "BTN_TOOL_LENS", + 0x148: "BTN_TOOL_QUINTTAP", + 0x149: "BTN_STYLUS3", + 0x14A: "BTN_TOUCH", + 0x14B: "BTN_STYLUS", + 0x14C: "BTN_STYLUS2", + 0x14D: "BTN_TOOL_DOUBLETAP", + 0x14E: "BTN_TOOL_TRIPLETAP", + 0x14F: "BTN_TOOL_QUADTAP", + 0x150: "BTN_GEAR_DOWN", # alias: BTN_WHEEL + 0x151: "BTN_GEAR_UP", + 0x160: "KEY_OK", + 0x161: "KEY_SELECT", + 0x162: "KEY_GOTO", + 0x163: "KEY_CLEAR", + 0x164: "KEY_POWER2", + 0x165: "KEY_OPTION", + 0x166: "KEY_INFO", + 0x167: "KEY_TIME", + 0x168: "KEY_VENDOR", + 0x169: "KEY_ARCHIVE", + 0x16A: "KEY_PROGRAM", + 0x16B: "KEY_CHANNEL", + 0x16C: "KEY_FAVORITES", + 0x16D: "KEY_EPG", + 0x16E: "KEY_PVR", + 0x16F: "KEY_MHP", + 0x170: "KEY_LANGUAGE", + 0x171: "KEY_TITLE", + 0x172: "KEY_SUBTITLE", + 0x173: "KEY_ANGLE", + 0x174: "KEY_ZOOM", + 0x175: "KEY_MODE", + 0x176: "KEY_KEYBOARD", + 0x177: "KEY_SCREEN", + 0x178: "KEY_PC", + 0x179: "KEY_TV", + 0x17A: "KEY_TV2", + 0x17B: "KEY_VCR", + 0x17C: "KEY_VCR2", + 0x17D: "KEY_SAT", + 0x17E: "KEY_SAT2", + 0x17F: "KEY_CD", + 0x180: "KEY_TAPE", + 0x181: "KEY_RADIO", + 0x182: "KEY_TUNER", + 0x183: "KEY_PLAYER", + 0x184: "KEY_TEXT", + 0x185: "KEY_DVD", + 0x186: "KEY_AUX", + 0x187: "KEY_MP3", + 0x188: "KEY_AUDIO", + 0x189: "KEY_VIDEO", + 0x18A: "KEY_DIRECTORY", + 0x18B: "KEY_LIST", + 0x18C: "KEY_MEMO", + 0x18D: "KEY_CALENDAR", + 0x18E: "KEY_RED", + 0x18F: "KEY_GREEN", + 0x190: "KEY_YELLOW", + 0x191: "KEY_BLUE", + 0x192: "KEY_CHANNELUP", + 0x193: "KEY_CHANNELDOWN", + 0x194: "KEY_FIRST", + 0x195: "KEY_LAST", + 0x196: "KEY_AB", + 0x197: "KEY_NEXT", + 0x198: "KEY_RESTART", + 0x199: "KEY_SLOW", + 0x19A: "KEY_SHUFFLE", + 0x19B: "KEY_BREAK", + 0x19C: "KEY_PREVIOUS", + 0x19D: "KEY_DIGITS", + 0x19E: "KEY_TEEN", + 0x19F: "KEY_TWEN", + 0x1A0: "KEY_VIDEOPHONE", + 0x1A1: "KEY_GAMES", + 0x1A2: "KEY_ZOOMIN", + 0x1A3: "KEY_ZOOMOUT", + 0x1A4: "KEY_ZOOMRESET", + 0x1A5: "KEY_WORDPROCESSOR", + 0x1A6: "KEY_EDITOR", + 0x1A7: "KEY_SPREADSHEET", + 0x1A8: "KEY_GRAPHICSEDITOR", + 0x1A9: "KEY_PRESENTATION", + 0x1AA: "KEY_DATABASE", + 0x1AB: "KEY_NEWS", + 0x1AC: "KEY_VOICEMAIL", + 0x1AD: "KEY_ADDRESSBOOK", + 0x1AE: "KEY_MESSENGER", + 0x1AF: "KEY_DISPLAYTOGGLE", + 0x1B0: "KEY_SPELLCHECK", + 0x1B1: "KEY_LOGOFF", + 0x1B2: "KEY_DOLLAR", + 0x1B3: "KEY_EURO", + 0x1B4: "KEY_FRAMEBACK", + 0x1B5: "KEY_FRAMEFORWARD", + 0x1B6: "KEY_CONTEXT_MENU", + 0x1B7: "KEY_MEDIA_REPEAT", + 0x1B8: "KEY_10CHANNELSUP", + 0x1B9: "KEY_10CHANNELSDOWN", + 0x1BA: "KEY_IMAGES", + 0x1C0: "KEY_DEL_EOL", + 0x1C1: "KEY_DEL_EOS", + 0x1C2: "KEY_INS_LINE", + 0x1C3: "KEY_DEL_LINE", + 0x1D0: "KEY_FN", + 0x1D1: "KEY_FN_ESC", + 0x1D2: "KEY_FN_F1", + 0x1D3: "KEY_FN_F2", + 0x1D4: "KEY_FN_F3", + 0x1D5: "KEY_FN_F4", + 0x1D6: "KEY_FN_F5", + 0x1D7: "KEY_FN_F6", + 0x1D8: "KEY_FN_F7", + 0x1D9: "KEY_FN_F8", + 0x1DA: "KEY_FN_F9", + 0x1DB: "KEY_FN_F10", + 0x1DC: "KEY_FN_F11", + 0x1DD: "KEY_FN_F12", + 0x1DE: "KEY_FN_1", + 0x1DF: "KEY_FN_2", + 0x1E0: "KEY_FN_D", + 0x1E1: "KEY_FN_E", + 0x1E2: "KEY_FN_F", + 0x1E3: "KEY_FN_S", + 0x1E4: "KEY_FN_B", + 0x1F1: "KEY_BRL_DOT1", + 0x1F2: "KEY_BRL_DOT2", + 0x1F3: "KEY_BRL_DOT3", + 0x1F4: "KEY_BRL_DOT4", + 0x1F5: "KEY_BRL_DOT5", + 0x1F6: "KEY_BRL_DOT6", + 0x1F7: "KEY_BRL_DOT7", + 0x1F8: "KEY_BRL_DOT8", + 0x1F9: "KEY_BRL_DOT9", + 0x1FA: "KEY_BRL_DOT10", + 0x200: "KEY_NUMERIC_0", + 0x201: "KEY_NUMERIC_1", + 0x202: "KEY_NUMERIC_2", + 0x203: "KEY_NUMERIC_3", + 0x204: "KEY_NUMERIC_4", + 0x205: "KEY_NUMERIC_5", + 0x206: "KEY_NUMERIC_6", + 0x207: "KEY_NUMERIC_7", + 0x208: "KEY_NUMERIC_8", + 0x209: "KEY_NUMERIC_9", + 0x20A: "KEY_NUMERIC_STAR", + 0x20B: "KEY_NUMERIC_POUND", + 0x210: "KEY_CAMERA_FOCUS", + 0x211: "KEY_WPS_BUTTON", + 0x212: "KEY_TOUCHPAD_TOGGLE", + 0x213: "KEY_TOUCHPAD_ON", + 0x214: "KEY_TOUCHPAD_OFF", + 0x215: "KEY_CAMERA_ZOOMIN", + 0x216: "KEY_CAMERA_ZOOMOUT", + 0x217: "KEY_CAMERA_UP", + 0x218: "KEY_CAMERA_DOWN", + 0x219: "KEY_CAMERA_LEFT", + 0x21A: "KEY_CAMERA_RIGHT", + 0x21B: "KEY_ATTENDANT_ON", + 0x21C: "KEY_ATTENDANT_OFF", + 0x21D: "KEY_ATTENDANT_TOGGLE", + 0x21E: "KEY_LIGHTS_TOGGLE", + 0x231: "KEY_ROTATE_LOCK_TOGGLE", + 0x240: "KEY_BUTTONCONFIG", + 0x243: "KEY_CONTROLPANEL", + 0x246: "KEY_VOICECOMMAND", + 0x250: "KEY_BRIGHTNESS_MIN", + 0x278: "KEY_ONSCREEN_KEYBOARD", + 0x2C0: "BTN_TRIGGER_HAPPY1", # alias: BTN_TRIGGER_HAPPY + 0x2C1: "BTN_TRIGGER_HAPPY2", + 0x2C2: "BTN_TRIGGER_HAPPY3", + 0x2C3: "BTN_TRIGGER_HAPPY4", + 0x2C4: "BTN_TRIGGER_HAPPY5", + 0x2C5: "BTN_TRIGGER_HAPPY6", + 0x2C6: "BTN_TRIGGER_HAPPY7", + 0x2C7: "BTN_TRIGGER_HAPPY8", + 0x2C8: "BTN_TRIGGER_HAPPY9", + 0x2C9: "BTN_TRIGGER_HAPPY10", + 0x2CA: "BTN_TRIGGER_HAPPY11", + 0x2CB: "BTN_TRIGGER_HAPPY12", + 0x2CC: "BTN_TRIGGER_HAPPY13", + 0x2CD: "BTN_TRIGGER_HAPPY14", + 0x2CE: "BTN_TRIGGER_HAPPY15", + 0x2CF: "BTN_TRIGGER_HAPPY16", + 0x2D0: "BTN_TRIGGER_HAPPY17", + 0x2D1: "BTN_TRIGGER_HAPPY18", + 0x2D2: "BTN_TRIGGER_HAPPY19", + 0x2D3: "BTN_TRIGGER_HAPPY20", + 0x2D4: "BTN_TRIGGER_HAPPY21", + 0x2D5: "BTN_TRIGGER_HAPPY22", + 0x2D6: "BTN_TRIGGER_HAPPY23", + 0x2D7: "BTN_TRIGGER_HAPPY24", + 0x2D8: "BTN_TRIGGER_HAPPY25", + 0x2D9: "BTN_TRIGGER_HAPPY26", + 0x2DA: "BTN_TRIGGER_HAPPY27", + 0x2DB: "BTN_TRIGGER_HAPPY28", + 0x2DC: "BTN_TRIGGER_HAPPY29", + 0x2DD: "BTN_TRIGGER_HAPPY30", + 0x2DE: "BTN_TRIGGER_HAPPY31", + 0x2DF: "BTN_TRIGGER_HAPPY32", + 0x2E0: "BTN_TRIGGER_HAPPY33", + 0x2E1: "BTN_TRIGGER_HAPPY34", + 0x2E2: "BTN_TRIGGER_HAPPY35", + 0x2E3: "BTN_TRIGGER_HAPPY36", + 0x2E4: "BTN_TRIGGER_HAPPY37", + 0x2E5: "BTN_TRIGGER_HAPPY38", + 0x2E6: "BTN_TRIGGER_HAPPY39", + 0x2E7: "BTN_TRIGGER_HAPPY40", } EV_REL_CODES = { - 0x00: 'REL_X', - 0x01: 'REL_Y', - 0x02: 'REL_Z', - 0x03: 'REL_RX', - 0x04: 'REL_RY', - 0x05: 'REL_RZ', - 0x06: 'REL_HWHEEL', - 0x07: 'REL_DIAL', - 0x08: 'REL_WHEEL', - 0x09: 'REL_MISC' + 0x00: "REL_X", + 0x01: "REL_Y", + 0x02: "REL_Z", + 0x03: "REL_RX", + 0x04: "REL_RY", + 0x05: "REL_RZ", + 0x06: "REL_HWHEEL", + 0x07: "REL_DIAL", + 0x08: "REL_WHEEL", + 0x09: "REL_MISC", } EV_ABS_CODES = { - 0x00: 'ABS_X', - 0x01: 'ABS_Y', - 0x02: 'ABS_Z', - 0x03: 'ABS_RX', - 0x04: 'ABS_RY', - 0x05: 'ABS_RZ', - 0x06: 'ABS_THROTTLE', - 0x07: 'ABS_RUDDER', - 0x08: 'ABS_WHEEL', - 0x09: 'ABS_GAS', - 0x0a: 'ABS_BRAKE', - 0x10: 'ABS_HAT0X', - 0x11: 'ABS_HAT0Y', - 0x12: 'ABS_HAT1X', - 0x13: 'ABS_HAT1Y', - 0x14: 'ABS_HAT2X', - 0x15: 'ABS_HAT2Y', - 0x16: 'ABS_HAT3X', - 0x17: 'ABS_HAT3Y', - 0x18: 'ABS_PRESSURE', - 0x19: 'ABS_DISTANCE', - 0x1a: 'ABS_TILT_X', - 0x1b: 'ABS_TILT_Y', - 0x1c: 'ABS_TOOL_WIDTH', - 0x20: 'ABS_VOLUME', - 0x28: 'ABS_MISC', - 0x2f: 'ABS_MT_SLOT', - 0x30: 'ABS_MT_TOUCH_MAJOR', - 0x31: 'ABS_MT_TOUCH_MINOR', - 0x32: 'ABS_MT_WIDTH_MAJOR', - 0x33: 'ABS_MT_WIDTH_MINOR', - 0x34: 'ABS_MT_ORIENTATION', - 0x35: 'ABS_MT_POSITION_X', - 0x36: 'ABS_MT_POSITION_Y', - 0x37: 'ABS_MT_TOOL_TYPE', - 0x38: 'ABS_MT_BLOB_ID', - 0x39: 'ABS_MT_TRACKING_ID', - 0x3a: 'ABS_MT_PRESSURE', - 0x3b: 'ABS_MT_DISTANCE', - 0x3c: 'ABS_MT_TOOL_X', - 0x3d: 'ABS_MT_TOOL_Y' + 0x00: "ABS_X", + 0x01: "ABS_Y", + 0x02: "ABS_Z", + 0x03: "ABS_RX", + 0x04: "ABS_RY", + 0x05: "ABS_RZ", + 0x06: "ABS_THROTTLE", + 0x07: "ABS_RUDDER", + 0x08: "ABS_WHEEL", + 0x09: "ABS_GAS", + 0x0A: "ABS_BRAKE", + 0x10: "ABS_HAT0X", + 0x11: "ABS_HAT0Y", + 0x12: "ABS_HAT1X", + 0x13: "ABS_HAT1Y", + 0x14: "ABS_HAT2X", + 0x15: "ABS_HAT2Y", + 0x16: "ABS_HAT3X", + 0x17: "ABS_HAT3Y", + 0x18: "ABS_PRESSURE", + 0x19: "ABS_DISTANCE", + 0x1A: "ABS_TILT_X", + 0x1B: "ABS_TILT_Y", + 0x1C: "ABS_TOOL_WIDTH", + 0x20: "ABS_VOLUME", + 0x28: "ABS_MISC", + 0x2F: "ABS_MT_SLOT", + 0x30: "ABS_MT_TOUCH_MAJOR", + 0x31: "ABS_MT_TOUCH_MINOR", + 0x32: "ABS_MT_WIDTH_MAJOR", + 0x33: "ABS_MT_WIDTH_MINOR", + 0x34: "ABS_MT_ORIENTATION", + 0x35: "ABS_MT_POSITION_X", + 0x36: "ABS_MT_POSITION_Y", + 0x37: "ABS_MT_TOOL_TYPE", + 0x38: "ABS_MT_BLOB_ID", + 0x39: "ABS_MT_TRACKING_ID", + 0x3A: "ABS_MT_PRESSURE", + 0x3B: "ABS_MT_DISTANCE", + 0x3C: "ABS_MT_TOOL_X", + 0x3D: "ABS_MT_TOOL_Y", } EV_MSC_CODES = { - 0x00: 'MSC_SERIAL', - 0x01: 'MSC_PULSELED', - 0x02: 'MSC_GESTURE', - 0x03: 'MSC_RAW', - 0x04: 'MSC_SCAN', - 0x05: 'MSC_TIMESTAMP' + 0x00: "MSC_SERIAL", + 0x01: "MSC_PULSELED", + 0x02: "MSC_GESTURE", + 0x03: "MSC_RAW", + 0x04: "MSC_SCAN", + 0x05: "MSC_TIMESTAMP", } EV_LED_CODES = { - 0x00: 'LED_NUML', - 0x01: 'LED_CAPSL', - 0x02: 'LED_SCROLLL', - 0x03: 'LED_COMPOSE', - 0x04: 'LED_KANA', - 0x05: 'LED_SLEEP', - 0x06: 'LED_SUSPEND', - 0x07: 'LED_MUTE', - 0x08: 'LED_MISC', - 0x09: 'LED_MAIL', - 0x0a: 'LED_CHARGING' + 0x00: "LED_NUML", + 0x01: "LED_CAPSL", + 0x02: "LED_SCROLLL", + 0x03: "LED_COMPOSE", + 0x04: "LED_KANA", + 0x05: "LED_SLEEP", + 0x06: "LED_SUSPEND", + 0x07: "LED_MUTE", + 0x08: "LED_MISC", + 0x09: "LED_MAIL", + 0x0A: "LED_CHARGING", } -EV_REP_CODES = { - 0x00: 'REP_DELAY', - 0x01: 'REP_PERIOD' -} +EV_REP_CODES = {0x00: "REP_DELAY", 0x01: "REP_PERIOD"} EV_CODE_MAP = { 0x00: EV_SYN_CODES, @@ -667,54 +658,54 @@ def error_notify(error, dev=None): 0x03: EV_ABS_CODES, 0x04: EV_MSC_CODES, 0x11: EV_LED_CODES, - 0x14: EV_REP_CODES + 0x14: EV_REP_CODES, } def parse_procfs_entry(): - entry_path = '/proc/bus/input/devices' + entry_path = "/proc/bus/input/devices" devs = [] - with open(entry_path, 'r') as info: + with open(entry_path, "r") as info: dev_info = {} for line in info: - item = line.rstrip().split(' ', 1) - if not len(item[0]): # new section + item = line.rstrip().split(" ", 1) + if not len(item[0]): # new section devs.append(dev_info) dev_info = {} - elif item[0] == 'I:': # id info + elif item[0] == "I:": # id info id_info = {} - for key_pair in item[1].split(' '): - key, val = key_pair.split('=') + for key_pair in item[1].split(" "): + key, val = key_pair.split("=") id_info[key.lower()] = int(val, 16) - dev_info['id'] = id_info - elif item[0] == 'N:': # dev name - val = item[1].split('=', 1)[1] - dev_info['name'] = val.strip('\'"') - elif item[0] == 'P:': # phys path - val = item[1].split('=', 1)[1] - dev_info['phys'] = val - elif item[0] == 'S:': # sysfs path - val = item[1].split('=', 1)[1] - dev_info['sysfs'] = val - elif item[0] == 'U:': # unique id - val = item[1].split('=', 1)[1] - dev_info['uniq'] = val - elif item[0] == 'H:': # list of handlers - val = item[1].split('=', 1)[1] - dev_info['handlers'] = val.split() - elif item[0] == 'B:': # bitmaps info - bitmaps = dev_info.setdefault('bitmaps', {}) - key, val = item[1].split('=', 1) + dev_info["id"] = id_info + elif item[0] == "N:": # dev name + val = item[1].split("=", 1)[1] + dev_info["name"] = val.strip("'\"") + elif item[0] == "P:": # phys path + val = item[1].split("=", 1)[1] + dev_info["phys"] = val + elif item[0] == "S:": # sysfs path + val = item[1].split("=", 1)[1] + dev_info["sysfs"] = val + elif item[0] == "U:": # unique id + val = item[1].split("=", 1)[1] + dev_info["uniq"] = val + elif item[0] == "H:": # list of handlers + val = item[1].split("=", 1)[1] + dev_info["handlers"] = val.split() + elif item[0] == "B:": # bitmaps info + bitmaps = dev_info.setdefault("bitmaps", {}) + key, val = item[1].split("=", 1) bitmaps[key.lower()] = val return devs def open_dev(name): - path = '/dev/input/%s' % name + path = f"/dev/input/{name}" try: fd = os.open(path, os.O_RDONLY) except Exception as details: - msg = 'could not open device: %s' % str(details) + msg = f"could not open device: {str(details)}" error_notify(msg, name) sys.exit(-1) return fd @@ -733,15 +724,15 @@ def get_devs(): for info in parse_procfs_entry(): dev = None is_target = False - for handler in info['handlers']: - if handler.startswith('event'): + for handler in info["handlers"]: + if handler.startswith("event"): dev = handler - elif handler.startswith('mouse'): + elif handler.startswith("mouse"): is_target = True - elif handler.startswith('kbd'): + elif handler.startswith("kbd"): # we assume that a keyboard device must support: # EV_SYN, EV_KEY, EV_LED and EV_REP (mask 0x120003) - hexcode = '0x%s' % info['bitmaps']['ev'] + hexcode = "0x{}".format(info["bitmaps"]["ev"]) flags = int(hexcode, 16) if (~flags & 0x120003) == 0: is_target = True @@ -754,7 +745,8 @@ def get_devs(): def is_graphical_env(): import subprocess - child = subprocess.Popen('runlevel', stdout=subprocess.PIPE) + + child = subprocess.Popen("runlevel", stdout=subprocess.PIPE) try: stdout = child.communicate()[0] runlevel = int(stdout.decode().strip().split()[1]) @@ -768,33 +760,31 @@ def is_graphical_env(): if GRAPHICAL: - os.environ['DISPLAY'] = ':0' + os.environ["DISPLAY"] = ":0" USING_GI = True try: import gi - gi.require_version('Gtk', '3.0') - from gi.repository import Gtk - from gi.repository import Gdk - from gi.repository import GObject + + gi.require_version("Gtk", "3.0") + from gi.repository import Gdk, GObject, Gtk except ImportError: USING_GI = False + import gobject as GObject import gtk as Gtk import gtk.gdk as Gdk - import gobject as GObject class DesktopCover(Gtk.Window): - def __init__(self): - super(DesktopCover, self).__init__() + super().__init__() self.notified = False self.set_keep_above(True) - self.connect('destroy', Gtk.main_quit) - self.connect('window-state-event', self.on_fullscreen) + self.connect("destroy", Gtk.main_quit) + self.connect("window-state-event", self.on_fullscreen) - self.label = Gtk.Label('label1') - self.label.set_text('listening input events') + self.label = Gtk.Label("label1") + self.label.set_text("listening input events") self.add(self.label) self.fullscreen() @@ -811,18 +801,20 @@ def on_fullscreen(self, window, event): def launch_cover(): GObject.threads_init() - app = DesktopCover() + DesktopCover() Gtk.main() def format_event(raw_event): tv_sec, tv_usec, ev_type, ev_code, ev_value = raw_event - event = {'typeNum': ev_type, - 'typeName': EV_TYPES.get(ev_type, 'UNKNOWN'), - 'codeNum': ev_code, - 'codeName': EV_CODE_MAP.get(ev_type, {}).get(ev_code, 'UNKNOWN'), - 'value': ev_value, - 'timestamp': (tv_sec * (10 ** 6) + tv_usec)} + event = { + "typeNum": ev_type, + "typeName": EV_TYPES.get(ev_type, "UNKNOWN"), + "codeNum": ev_code, + "codeName": EV_CODE_MAP.get(ev_type, {}).get(ev_code, "UNKNOWN"), + "value": ev_value, + "timestamp": (tv_sec * (10**6) + tv_usec), + } return event @@ -836,7 +828,7 @@ def listen(devs): data = os.read(fd, EV_PACK_SIZE) raw_event = struct.unpack(EV_PACK_FMT, data) except Exception as details: - msg = 'failed to get event: %s' % str(details) + msg = f"failed to get event: {str(details)}" error_notify(msg, dev) watch.remove(fd) event = format_event(raw_event) diff --git a/deps/input_event/input_event_win.py b/deps/input_event/input_event_win.py index df436ad6d0..36c5fa1a55 100755 --- a/deps/input_event/input_event_win.py +++ b/deps/input_event/input_event_win.py @@ -1,9 +1,10 @@ +import atexit +import ctypes import json import os import sys -import atexit -import ctypes from ctypes import wintypes + try: import Tkinter as tkinter except ImportError: @@ -12,17 +13,16 @@ import win32api import win32con - -TYPE_SYNC = 'SYNC' -TYPE_INFO = 'INFO' -TYPE_READY = 'READY' -TYPE_EVENT = 'EVENT' -TYPE_ERROR = 'ERROR' +TYPE_SYNC = "SYNC" +TYPE_INFO = "INFO" +TYPE_READY = "READY" +TYPE_EVENT = "EVENT" +TYPE_ERROR = "ERROR" EMPTY_CONTENT = {} def send_message(mtype, content): - message = {'type': mtype, 'content': content} + message = {"type": mtype, "content": content} sys.stdout.write(json.dumps(message)) sys.stdout.write(os.linesep) sys.stdout.flush() @@ -33,7 +33,7 @@ def sync_notify(): def info_notify(dev, info): - send_message(TYPE_INFO, {'device': dev, 'info': info}) + send_message(TYPE_INFO, {"device": dev, "info": info}) def ready_notify(): @@ -41,11 +41,11 @@ def ready_notify(): def event_notify(dev, event): - send_message(TYPE_EVENT, {'device': dev, 'event': event}) + send_message(TYPE_EVENT, {"device": dev, "event": event}) def error_notify(error, dev=None): - send_message(TYPE_ERROR, {'device': dev, 'message': error}) + send_message(TYPE_ERROR, {"device": dev, "message": error}) # input notifications @@ -53,243 +53,241 @@ def error_notify(error, dev=None): # https://docs.microsoft.com/en-us/windows/desktop/inputdev/mouse-input-notifications WPARAM_TYPES = { - 0x0100: 'WM_KEYDOWN', - 0x0101: 'WM_KEYUP', - 0x0104: 'WM_SYSKEYDOWN', - 0x0105: 'WM_SYSKEYUP', - 0x0200: 'WM_MOUSEMOVE', - 0x0201: 'WM_LBUTTONDOWN', - 0x0202: 'WM_LBUTTONUP', - 0x0204: 'WM_RBUTTONDOWN', - 0x0205: 'WM_RBUTTONUP', - 0x0207: 'WM_MBUTTONDOWN', - 0x0208: 'WM_MBUTTONUP', - 0x020A: 'WM_MOUSEWHEEL', - 0x020B: 'WM_XBUTTONDOWN', - 0x020C: 'WM_XBUTTONUP', - 0x020E: 'WM_MOUSEHWHEEL' + 0x0100: "WM_KEYDOWN", + 0x0101: "WM_KEYUP", + 0x0104: "WM_SYSKEYDOWN", + 0x0105: "WM_SYSKEYUP", + 0x0200: "WM_MOUSEMOVE", + 0x0201: "WM_LBUTTONDOWN", + 0x0202: "WM_LBUTTONUP", + 0x0204: "WM_RBUTTONDOWN", + 0x0205: "WM_RBUTTONUP", + 0x0207: "WM_MBUTTONDOWN", + 0x0208: "WM_MBUTTONUP", + 0x020A: "WM_MOUSEWHEEL", + 0x020B: "WM_XBUTTONDOWN", + 0x020C: "WM_XBUTTONUP", + 0x020E: "WM_MOUSEHWHEEL", } # virtual-key codes # https://docs.microsoft.com/en-us/windows/desktop/inputdev/virtual-key-codes VK_CODES = { - 0x01: 'VK_LBUTTON', - 0x02: 'VK_RBUTTON', - 0x03: 'VK_CANCEL', - 0x04: 'VK_MBUTTON', - 0x05: 'VK_XBUTTON1', - 0x06: 'VK_XBUTTON2', - 0x08: 'VK_BACK', - 0x09: 'VK_TAB', - 0x0C: 'VK_CLEAR', - 0x0D: 'VK_RETURN', - 0x10: 'VK_SHIFT', - 0x11: 'VK_CONTROL', - 0x12: 'VK_MENU', - 0x13: 'VK_PAUSE', - 0x14: 'VK_CAPITAL', - 0x15: 'VK_KANA', - 0x15: 'VK_HANGUEL', - 0x15: 'VK_HANGUL', - 0x17: 'VK_JUNJA', - 0x18: 'VK_FINAL', - 0x19: 'VK_HANJA', - 0x19: 'VK_KANJI', - 0x1B: 'VK_ESCAPE', - 0x1C: 'VK_CONVERT', - 0x1D: 'VK_NONCONVERT', - 0x1E: 'VK_ACCEPT', - 0x1F: 'VK_MODECHANGE', - 0x20: 'VK_SPACE', - 0x21: 'VK_PRIOR', - 0x22: 'VK_NEXT', - 0x23: 'VK_END', - 0x24: 'VK_HOME', - 0x25: 'VK_LEFT', - 0x26: 'VK_UP', - 0x27: 'VK_RIGHT', - 0x28: 'VK_DOWN', - 0x29: 'VK_SELECT', - 0x2A: 'VK_PRINT', - 0x2B: 'VK_EXECUTE', - 0x2C: 'VK_SNAPSHOT', - 0x2D: 'VK_INSERT', - 0x2E: 'VK_DELETE', - 0x2F: 'VK_HELP', - 0x30: 'VK_0', - 0x31: 'VK_1', - 0x32: 'VK_2', - 0x33: 'VK_3', - 0x34: 'VK_4', - 0x35: 'VK_5', - 0x36: 'VK_6', - 0x37: 'VK_7', - 0x38: 'VK_8', - 0x39: 'VK_9', - 0x41: 'VK_A', - 0x42: 'VK_B', - 0x43: 'VK_C', - 0x44: 'VK_D', - 0x45: 'VK_E', - 0x46: 'VK_F', - 0x47: 'VK_G', - 0x48: 'VK_H', - 0x49: 'VK_I', - 0x4A: 'VK_J', - 0x4B: 'VK_K', - 0x4C: 'VK_L', - 0x4D: 'VK_M', - 0x4E: 'VK_N', - 0x4F: 'VK_O', - 0x50: 'VK_P', - 0x51: 'VK_Q', - 0x52: 'VK_R', - 0x53: 'VK_S', - 0x54: 'VK_T', - 0x55: 'VK_U', - 0x56: 'VK_V', - 0x57: 'VK_W', - 0x58: 'VK_X', - 0x59: 'VK_Y', - 0x5A: 'VK_Z', - 0x5B: 'VK_LWIN', - 0x5C: 'VK_RWIN', - 0x5D: 'VK_APPS', - 0x5F: 'VK_SLEEP', - 0x60: 'VK_NUMPAD0', - 0x61: 'VK_NUMPAD1', - 0x62: 'VK_NUMPAD2', - 0x63: 'VK_NUMPAD3', - 0x64: 'VK_NUMPAD4', - 0x65: 'VK_NUMPAD5', - 0x66: 'VK_NUMPAD6', - 0x67: 'VK_NUMPAD7', - 0x68: 'VK_NUMPAD8', - 0x69: 'VK_NUMPAD9', - 0x6A: 'VK_MULTIPLY', - 0x6B: 'VK_ADD', - 0x6C: 'VK_SEPARATOR', - 0x6D: 'VK_SUBTRACT', - 0x6E: 'VK_DECIMAL', - 0x6F: 'VK_DIVIDE', - 0x70: 'VK_F1', - 0x71: 'VK_F2', - 0x72: 'VK_F3', - 0x73: 'VK_F4', - 0x74: 'VK_F5', - 0x75: 'VK_F6', - 0x76: 'VK_F7', - 0x77: 'VK_F8', - 0x78: 'VK_F9', - 0x79: 'VK_F10', - 0x7A: 'VK_F11', - 0x7B: 'VK_F12', - 0x7C: 'VK_F13', - 0x7D: 'VK_F14', - 0x7E: 'VK_F15', - 0x7F: 'VK_F16', - 0x80: 'VK_F17', - 0x81: 'VK_F18', - 0x82: 'VK_F19', - 0x83: 'VK_F20', - 0x84: 'VK_F21', - 0x85: 'VK_F22', - 0x86: 'VK_F23', - 0x87: 'VK_F24', - 0x90: 'VK_NUMLOCK', - 0x91: 'VK_SCROLL', - 0x92: 'VK_OEM_0x92', - 0x93: 'VK_OEM_0x93', - 0x94: 'VK_OEM_0x94', - 0x95: 'VK_OEM_0x95', - 0x96: 'VK_OEM_0x96', - 0xA0: 'VK_LSHIFT', - 0xA1: 'VK_RSHIFT', - 0xA2: 'VK_LCONTROL', - 0xA3: 'VK_RCONTROL', - 0xA4: 'VK_LMENU', - 0xA5: 'VK_RMENU', - 0xA6: 'VK_BROWSER_BACK', - 0xA7: 'VK_BROWSER_FORWARD', - 0xA8: 'VK_BROWSER_REFRESH', - 0xA9: 'VK_BROWSER_STOP', - 0xAA: 'VK_BROWSER_SEARCH', - 0xAB: 'VK_BROWSER_FAVORITES', - 0xAC: 'VK_BROWSER_HOME', - 0xAD: 'VK_VOLUME_MUTE', - 0xAE: 'VK_VOLUME_DOWN', - 0xAF: 'VK_VOLUME_UP', - 0xB0: 'VK_MEDIA_NEXT_TRACK', - 0xB1: 'VK_MEDIA_PREV_TRACK', - 0xB2: 'VK_MEDIA_STOP', - 0xB3: 'VK_MEDIA_PLAY_PAUSE', - 0xB4: 'VK_LAUNCH_MAIL', - 0xB5: 'VK_LAUNCH_MEDIA_SELECT', - 0xB6: 'VK_LAUNCH_APP1', - 0xB7: 'VK_LAUNCH_APP2', - 0xBA: 'VK_OEM_1', - 0xBB: 'VK_OEM_PLUS', - 0xBC: 'VK_OEM_COMMA', - 0xBD: 'VK_OEM_MINUS', - 0xBE: 'VK_OEM_PERIOD', - 0xBF: 'VK_OEM_2', - 0xC0: 'VK_OEM_3', - 0xDB: 'VK_OEM_4', - 0xDC: 'VK_OEM_5', - 0xDD: 'VK_OEM_6', - 0xDE: 'VK_OEM_7', - 0xDF: 'VK_OEM_8', - 0xE1: 'VK_OEM_0xE1', - 0xE2: 'VK_OEM_102', - 0xE3: 'VK_OEM_0xE3', - 0xE4: 'VK_OEM_0xE4', - 0xE5: 'VK_PROCESSKEY', - 0xE6: 'VK_OEM_0xE6', - 0xE7: 'VK_PACKET', - 0xE9: 'VK_OEM_0xE9', - 0xEA: 'VK_OEM_0xEA', - 0xEB: 'VK_OEM_0xEB', - 0xEC: 'VK_OEM_0xEC', - 0xED: 'VK_OEM_0xED', - 0xEE: 'VK_OEM_0xEE', - 0xEF: 'VK_OEM_0xEF', - 0xF0: 'VK_OEM_0xF0', - 0xF1: 'VK_OEM_0xF1', - 0xF2: 'VK_OEM_0xF2', - 0xF3: 'VK_OEM_0xF3', - 0xF4: 'VK_OEM_0xF4', - 0xF5: 'VK_OEM_0xF5', - 0xF6: 'VK_ATTN', - 0xF7: 'VK_CRSEL', - 0xF8: 'VK_EXSEL', - 0xF9: 'VK_EREOF', - 0xFA: 'VK_PLAY', - 0xFB: 'VK_ZOOM', - 0xFC: 'VK_NONAME', - 0xFD: 'VK_PA1', - 0xFE: 'VK_OEM_CLEAR' + 0x01: "VK_LBUTTON", + 0x02: "VK_RBUTTON", + 0x03: "VK_CANCEL", + 0x04: "VK_MBUTTON", + 0x05: "VK_XBUTTON1", + 0x06: "VK_XBUTTON2", + 0x08: "VK_BACK", + 0x09: "VK_TAB", + 0x0C: "VK_CLEAR", + 0x0D: "VK_RETURN", + 0x10: "VK_SHIFT", + 0x11: "VK_CONTROL", + 0x12: "VK_MENU", + 0x13: "VK_PAUSE", + 0x14: "VK_CAPITAL", + 0x15: "VK_HANGUL", + 0x17: "VK_JUNJA", + 0x18: "VK_FINAL", + 0x19: "VK_KANJI", + 0x1B: "VK_ESCAPE", + 0x1C: "VK_CONVERT", + 0x1D: "VK_NONCONVERT", + 0x1E: "VK_ACCEPT", + 0x1F: "VK_MODECHANGE", + 0x20: "VK_SPACE", + 0x21: "VK_PRIOR", + 0x22: "VK_NEXT", + 0x23: "VK_END", + 0x24: "VK_HOME", + 0x25: "VK_LEFT", + 0x26: "VK_UP", + 0x27: "VK_RIGHT", + 0x28: "VK_DOWN", + 0x29: "VK_SELECT", + 0x2A: "VK_PRINT", + 0x2B: "VK_EXECUTE", + 0x2C: "VK_SNAPSHOT", + 0x2D: "VK_INSERT", + 0x2E: "VK_DELETE", + 0x2F: "VK_HELP", + 0x30: "VK_0", + 0x31: "VK_1", + 0x32: "VK_2", + 0x33: "VK_3", + 0x34: "VK_4", + 0x35: "VK_5", + 0x36: "VK_6", + 0x37: "VK_7", + 0x38: "VK_8", + 0x39: "VK_9", + 0x41: "VK_A", + 0x42: "VK_B", + 0x43: "VK_C", + 0x44: "VK_D", + 0x45: "VK_E", + 0x46: "VK_F", + 0x47: "VK_G", + 0x48: "VK_H", + 0x49: "VK_I", + 0x4A: "VK_J", + 0x4B: "VK_K", + 0x4C: "VK_L", + 0x4D: "VK_M", + 0x4E: "VK_N", + 0x4F: "VK_O", + 0x50: "VK_P", + 0x51: "VK_Q", + 0x52: "VK_R", + 0x53: "VK_S", + 0x54: "VK_T", + 0x55: "VK_U", + 0x56: "VK_V", + 0x57: "VK_W", + 0x58: "VK_X", + 0x59: "VK_Y", + 0x5A: "VK_Z", + 0x5B: "VK_LWIN", + 0x5C: "VK_RWIN", + 0x5D: "VK_APPS", + 0x5F: "VK_SLEEP", + 0x60: "VK_NUMPAD0", + 0x61: "VK_NUMPAD1", + 0x62: "VK_NUMPAD2", + 0x63: "VK_NUMPAD3", + 0x64: "VK_NUMPAD4", + 0x65: "VK_NUMPAD5", + 0x66: "VK_NUMPAD6", + 0x67: "VK_NUMPAD7", + 0x68: "VK_NUMPAD8", + 0x69: "VK_NUMPAD9", + 0x6A: "VK_MULTIPLY", + 0x6B: "VK_ADD", + 0x6C: "VK_SEPARATOR", + 0x6D: "VK_SUBTRACT", + 0x6E: "VK_DECIMAL", + 0x6F: "VK_DIVIDE", + 0x70: "VK_F1", + 0x71: "VK_F2", + 0x72: "VK_F3", + 0x73: "VK_F4", + 0x74: "VK_F5", + 0x75: "VK_F6", + 0x76: "VK_F7", + 0x77: "VK_F8", + 0x78: "VK_F9", + 0x79: "VK_F10", + 0x7A: "VK_F11", + 0x7B: "VK_F12", + 0x7C: "VK_F13", + 0x7D: "VK_F14", + 0x7E: "VK_F15", + 0x7F: "VK_F16", + 0x80: "VK_F17", + 0x81: "VK_F18", + 0x82: "VK_F19", + 0x83: "VK_F20", + 0x84: "VK_F21", + 0x85: "VK_F22", + 0x86: "VK_F23", + 0x87: "VK_F24", + 0x90: "VK_NUMLOCK", + 0x91: "VK_SCROLL", + 0x92: "VK_OEM_0x92", + 0x93: "VK_OEM_0x93", + 0x94: "VK_OEM_0x94", + 0x95: "VK_OEM_0x95", + 0x96: "VK_OEM_0x96", + 0xA0: "VK_LSHIFT", + 0xA1: "VK_RSHIFT", + 0xA2: "VK_LCONTROL", + 0xA3: "VK_RCONTROL", + 0xA4: "VK_LMENU", + 0xA5: "VK_RMENU", + 0xA6: "VK_BROWSER_BACK", + 0xA7: "VK_BROWSER_FORWARD", + 0xA8: "VK_BROWSER_REFRESH", + 0xA9: "VK_BROWSER_STOP", + 0xAA: "VK_BROWSER_SEARCH", + 0xAB: "VK_BROWSER_FAVORITES", + 0xAC: "VK_BROWSER_HOME", + 0xAD: "VK_VOLUME_MUTE", + 0xAE: "VK_VOLUME_DOWN", + 0xAF: "VK_VOLUME_UP", + 0xB0: "VK_MEDIA_NEXT_TRACK", + 0xB1: "VK_MEDIA_PREV_TRACK", + 0xB2: "VK_MEDIA_STOP", + 0xB3: "VK_MEDIA_PLAY_PAUSE", + 0xB4: "VK_LAUNCH_MAIL", + 0xB5: "VK_LAUNCH_MEDIA_SELECT", + 0xB6: "VK_LAUNCH_APP1", + 0xB7: "VK_LAUNCH_APP2", + 0xBA: "VK_OEM_1", + 0xBB: "VK_OEM_PLUS", + 0xBC: "VK_OEM_COMMA", + 0xBD: "VK_OEM_MINUS", + 0xBE: "VK_OEM_PERIOD", + 0xBF: "VK_OEM_2", + 0xC0: "VK_OEM_3", + 0xDB: "VK_OEM_4", + 0xDC: "VK_OEM_5", + 0xDD: "VK_OEM_6", + 0xDE: "VK_OEM_7", + 0xDF: "VK_OEM_8", + 0xE1: "VK_OEM_0xE1", + 0xE2: "VK_OEM_102", + 0xE3: "VK_OEM_0xE3", + 0xE4: "VK_OEM_0xE4", + 0xE5: "VK_PROCESSKEY", + 0xE6: "VK_OEM_0xE6", + 0xE7: "VK_PACKET", + 0xE9: "VK_OEM_0xE9", + 0xEA: "VK_OEM_0xEA", + 0xEB: "VK_OEM_0xEB", + 0xEC: "VK_OEM_0xEC", + 0xED: "VK_OEM_0xED", + 0xEE: "VK_OEM_0xEE", + 0xEF: "VK_OEM_0xEF", + 0xF0: "VK_OEM_0xF0", + 0xF1: "VK_OEM_0xF1", + 0xF2: "VK_OEM_0xF2", + 0xF3: "VK_OEM_0xF3", + 0xF4: "VK_OEM_0xF4", + 0xF5: "VK_OEM_0xF5", + 0xF6: "VK_ATTN", + 0xF7: "VK_CRSEL", + 0xF8: "VK_EXSEL", + 0xF9: "VK_EREOF", + 0xFA: "VK_PLAY", + 0xFB: "VK_ZOOM", + 0xFC: "VK_NONAME", + 0xFD: "VK_PA1", + 0xFE: "VK_OEM_CLEAR", } ULONG_PTR = wintypes.WPARAM LRESULT = wintypes.LPARAM -HookProc = wintypes.WINFUNCTYPE( - LRESULT, ctypes.c_int, wintypes.WPARAM, wintypes.LPARAM) +HookProc = wintypes.WINFUNCTYPE(LRESULT, ctypes.c_int, wintypes.WPARAM, wintypes.LPARAM) # keyboard facilities -KEYBOARD_DEV = 'keyboard' +KEYBOARD_DEV = "keyboard" KEYBOARD_HOOK = None class KBDLLHOOKSTRUCT(ctypes.Structure): - _fields_ = (('vkCode', wintypes.DWORD), - ('scanCode', wintypes.DWORD), - ('flags', wintypes.DWORD), - ('time', wintypes.DWORD), - ('dwExtraInfo', ULONG_PTR)) + _fields_ = ( + ("vkCode", wintypes.DWORD), + ("scanCode", wintypes.DWORD), + ("flags", wintypes.DWORD), + ("time", wintypes.DWORD), + ("dwExtraInfo", ULONG_PTR), + ) LPKBDLLHOOKSTRUCT = ctypes.POINTER(KBDLLHOOKSTRUCT) @@ -299,31 +297,31 @@ class KBDLLHOOKSTRUCT(ctypes.Structure): def LowLevelKeyboardProc(nCode, wParam, lParam): global KEYBOARD_HOOK if nCode != win32con.HC_ACTION: - return ctypes.windll.user32.CallNextHookEx( - KEYBOARD_HOOK, nCode, wParam, lParam) + return ctypes.windll.user32.CallNextHookEx(KEYBOARD_HOOK, nCode, wParam, lParam) raw_event = ctypes.cast(lParam, LPKBDLLHOOKSTRUCT)[0] flags = raw_event.flags flags_text = [] if flags & 1: - flags_text.append('EXTENDED') + flags_text.append("EXTENDED") if (flags >> 5) & 1: - flags_text.append('ALTDOWN') + flags_text.append("ALTDOWN") if (flags >> 7) & 1: - flags_text.append('UP') - event = {'typeNum': wParam, - 'typeName': WPARAM_TYPES.get(wParam, 'UNKNOWN'), - 'vkCode': raw_event.vkCode, - 'vkCodeName': VK_CODES.get(raw_event.vkCode, 'UNKNOWN'), - 'scanCode': raw_event.scanCode, - 'flags': flags, - 'flagsText': flags_text, - 'timestamp': raw_event.time} + flags_text.append("UP") + event = { + "typeNum": wParam, + "typeName": WPARAM_TYPES.get(wParam, "UNKNOWN"), + "vkCode": raw_event.vkCode, + "vkCodeName": VK_CODES.get(raw_event.vkCode, "UNKNOWN"), + "scanCode": raw_event.scanCode, + "flags": flags, + "flagsText": flags_text, + "timestamp": raw_event.time, + } event_notify(KEYBOARD_DEV, event) if raw_event.vkCode == win32con.VK_F11: - return ctypes.windll.user32.CallNextHookEx( - KEYBOARD_HOOK, nCode, wParam, lParam) + return ctypes.windll.user32.CallNextHookEx(KEYBOARD_HOOK, nCode, wParam, lParam) else: return 1 @@ -332,7 +330,8 @@ def register_keyboard_hook(): global KEYBOARD_HOOK handle = win32api.GetModuleHandle(None) KEYBOARD_HOOK = ctypes.windll.user32.SetWindowsHookExA( - win32con.WH_KEYBOARD_LL, LowLevelKeyboardProc, handle, 0) + win32con.WH_KEYBOARD_LL, LowLevelKeyboardProc, handle, 0 + ) atexit.register(ctypes.windll.user32.UnhookWindowsHookEx, KEYBOARD_HOOK) @@ -344,16 +343,18 @@ def disable_hotkeys(): # pointer facilities -MOUSE_DEV = 'pointer' +MOUSE_DEV = "pointer" MOUSE_HOOK = None class MSLLHOOKSTRUCT(ctypes.Structure): - _fields_ = (('pt', wintypes.POINT), - ('mouseData', wintypes.DWORD), - ('flags', wintypes.DWORD), - ('time', wintypes.DWORD), - ('dwExtraInfo', ULONG_PTR)) + _fields_ = ( + ("pt", wintypes.POINT), + ("mouseData", wintypes.DWORD), + ("flags", wintypes.DWORD), + ("time", wintypes.DWORD), + ("dwExtraInfo", ULONG_PTR), + ) LPMSLLHOOKSTRUCT = ctypes.POINTER(MSLLHOOKSTRUCT) @@ -363,63 +364,62 @@ class MSLLHOOKSTRUCT(ctypes.Structure): def LowLevelMouseProc(nCode, wParam, lParam): global MOUSE_HOOK if nCode != win32con.HC_ACTION: - return ctypes.windll.user32.CallNextHookEx( - MOUSE_HOOK, nCode, wParam, lParam) + return ctypes.windll.user32.CallNextHookEx(MOUSE_HOOK, nCode, wParam, lParam) raw_event = ctypes.cast(lParam, LPMSLLHOOKSTRUCT)[0] mouse_data = raw_event.mouseData - mouse_data_text = '' - if wParam in (0x020A, 0x020E): # WM_MOUSEWHEEL and WM_MOUSEHWHEEL - value = (mouse_data >> 16) - if value == 0x0078: # delta value is 120 - mouse_data_text = 'WHEELFORWARD' - elif value == 0xFF88: # delta value is -120 - mouse_data_text = 'WHEELBACKWARD' - elif wParam in (0x020B, 0x020C): # WM_XBUTTONDOWN and WM_XBUTTONUP - value = (mouse_data >> 16) + mouse_data_text = "" + if wParam in (0x020A, 0x020E): # WM_MOUSEWHEEL and WM_MOUSEHWHEEL + value = mouse_data >> 16 + if value == 0x0078: # delta value is 120 + mouse_data_text = "WHEELFORWARD" + elif value == 0xFF88: # delta value is -120 + mouse_data_text = "WHEELBACKWARD" + elif wParam in (0x020B, 0x020C): # WM_XBUTTONDOWN and WM_XBUTTONUP + value = mouse_data >> 16 if value & 0x0001: - mouse_data_text = 'XBUTTON1' + mouse_data_text = "XBUTTON1" elif value & 0x0002: - mouse_data_text = 'XBUTTON2' - event = {'typeNum': wParam, - 'typeName': WPARAM_TYPES.get(wParam, 'UNKNOWN'), - 'xPos': raw_event.pt.x, - 'yPos': raw_event.pt.y, - 'mouseData': mouse_data, - 'mouseDataText': mouse_data_text, - 'flags': raw_event.flags, - 'timestamp': raw_event.time} + mouse_data_text = "XBUTTON2" + event = { + "typeNum": wParam, + "typeName": WPARAM_TYPES.get(wParam, "UNKNOWN"), + "xPos": raw_event.pt.x, + "yPos": raw_event.pt.y, + "mouseData": mouse_data, + "mouseDataText": mouse_data_text, + "flags": raw_event.flags, + "timestamp": raw_event.time, + } event_notify(MOUSE_DEV, event) - return ctypes.windll.user32.CallNextHookEx( - MOUSE_HOOK, nCode, wParam, lParam) + return ctypes.windll.user32.CallNextHookEx(MOUSE_HOOK, nCode, wParam, lParam) def register_pointer_hook(): global MOUSE_HOOK handle = win32api.GetModuleHandle(None) MOUSE_HOOK = ctypes.windll.user32.SetWindowsHookExA( - win32con.WH_MOUSE_LL, LowLevelMouseProc, handle, 0) + win32con.WH_MOUSE_LL, LowLevelMouseProc, handle, 0 + ) atexit.register(ctypes.windll.user32.UnhookWindowsHookEx, MOUSE_HOOK) -class DesktopCover(object): - +class DesktopCover: def __init__(self): self.notified = False self.tk = tkinter.Tk() - self.tk.attributes('-fullscreen', True) - self.tk.attributes('-topmost', True) + self.tk.attributes("-fullscreen", True) + self.tk.attributes("-topmost", True) self.tk.focus() self.tk.grab_set_global() - self.tk.bind('', self.on_fullscreen) + self.tk.bind("", self.on_fullscreen) self.frame = tkinter.Frame(self.tk) self.frame.pack() - self.label = tkinter.Label( - self.frame, text='listening input events') + self.label = tkinter.Label(self.frame, text="listening input events") self.label.grid() def on_fullscreen(self, event=None): @@ -431,7 +431,7 @@ def mainloop(self): self.tk.mainloop() -if __name__ == '__main__': +if __name__ == "__main__": sync_notify() disable_hotkeys() diff --git a/generic/tests/arm_kvm_unit_tests.py b/generic/tests/arm_kvm_unit_tests.py index 6d9e2e167f..57c7b9f1f7 100644 --- a/generic/tests/arm_kvm_unit_tests.py +++ b/generic/tests/arm_kvm_unit_tests.py @@ -2,9 +2,7 @@ from glob import glob from shutil import rmtree -from avocado.utils import git -from avocado.utils import process - +from avocado.utils import git, process from virttest import error_context @@ -25,26 +23,30 @@ def run(test, params, env): :type env: virttest.utils_env.Env """ - repo_url = params['repo_url'] - sub_type = params['sub_type'] + repo_url = params["repo_url"] + sub_type = params["sub_type"] repo_dir = git.get_repo( - repo_url, destination_dir=os.path.join(test.tmpdir, 'kvm-unit-tests')) - tests_dir = os.path.join(repo_dir, 'tests') + repo_url, destination_dir=os.path.join(test.tmpdir, "kvm-unit-tests") + ) + tests_dir = os.path.join(repo_dir, "tests") failed_tests = [] try: - error_context.base_context(f'Run {sub_type} sub tests', test.log.info) - process.system(f'cd {repo_dir} && ./configure && make standalone', - verbose=False, shell=True) - for test_file in glob(os.path.join(tests_dir, sub_type + '*')): + error_context.base_context(f"Run {sub_type} sub tests", test.log.info) + process.system( + f"cd {repo_dir} && ./configure && make standalone", + verbose=False, + shell=True, + ) + for test_file in glob(os.path.join(tests_dir, sub_type + "*")): test_name = os.path.basename(test_file) s, o = process.getstatusoutput(test_file) - test.log.debug(f'Output of "{test_name}":\n{o}') + test.log.debug('Output of "%s":\n%s', test_name, o) if s and s != 77: failed_tests.append(os.path.basename(test_file)) if failed_tests: - test.fail(f'Certain {sub_type} test cases fail: {failed_tests}') - test.log.info(f'All {sub_type} tests passed') + test.fail(f"Certain {sub_type} test cases fail: {failed_tests}") + test.log.info("All %s tests passed", sub_type) finally: rmtree(repo_dir, ignore_errors=True) diff --git a/generic/tests/autotest_control.py b/generic/tests/autotest_control.py index 1ea7001eda..24ecaa818f 100644 --- a/generic/tests/autotest_control.py +++ b/generic/tests/autotest_control.py @@ -1,9 +1,8 @@ -import os import logging +import os import sys from autotest.client.shared import error - from virttest import utils_test @@ -23,18 +22,25 @@ def run(test, params, env): # Collect test parameters timeout = int(params.get("test_timeout", 300)) control_args = params.get("control_args") - control_path = os.path.join(test.virtdir, "control", - params.get("test_control_file")) + control_path = os.path.join( + test.virtdir, "control", params.get("test_control_file") + ) ignore_sess_terminated = params.get("ignore_session_terminated") == "yes" outputdir = test.outputdir - utils_test.run_autotest(vm, session, control_path, timeout, outputdir, - params, control_args=control_args, - ignore_session_terminated=ignore_sess_terminated) + utils_test.run_autotest( + vm, + session, + control_path, + timeout, + outputdir, + params, + control_args=control_args, + ignore_session_terminated=ignore_sess_terminated, + ) -def run_autotest_control_background(test, params, env, - test_control_file="control"): +def run_autotest_control_background(test, params, env, test_control_file="control"): """ Wrapper of run() and make it run in the background through fork() and let it run in the child process. @@ -51,6 +57,7 @@ def run_autotest_control_background(test, params, env, :param env: Dictionary with test environment. :param test_control_file: The control file of autotest running in the guest """ + def flush(): sys.stdout.flush() sys.stderr.flush() @@ -63,9 +70,9 @@ def flush(): return pid flag_fname = "/tmp/autotest-flag-file-pid-" + str(os.getpid()) - open(flag_fname, 'w').close() + open(flag_fname, "w").close() try: - params['test_control_file'] = test_control_file + params["test_control_file"] = test_control_file # Launch autotest run(test, params, env) os.remove(flag_fname) diff --git a/generic/tests/autotest_distro_detect.py b/generic/tests/autotest_distro_detect.py index b5b7d4e409..702cf10008 100644 --- a/generic/tests/autotest_distro_detect.py +++ b/generic/tests/autotest_distro_detect.py @@ -1,6 +1,6 @@ import os -import tempfile import string +import tempfile from virttest import utils_test @@ -85,5 +85,4 @@ def run(test, params, env): control_path = generate_control_file(params) outputdir = test.outputdir - utils_test.run_autotest(vm, session, control_path, timeout, outputdir, - params) + utils_test.run_autotest(vm, session, control_path, timeout, outputdir, params) diff --git a/generic/tests/autotest_regression.py b/generic/tests/autotest_regression.py index a86eadbc46..6bdd45d6cd 100644 --- a/generic/tests/autotest_regression.py +++ b/generic/tests/autotest_regression.py @@ -1,9 +1,7 @@ import logging import aexpect - from autotest.client.shared import error - from virttest import utils_misc @@ -29,18 +27,17 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ - github_repo = 'git://github.com/autotest/autotest.git' + github_repo = "git://github.com/autotest/autotest.git" step_failures = [] - autotest_repo = params.get('autotest_repo', github_repo) - autotest_branch = params['autotest_branch'] - autotest_commit = params['autotest_commit'] - password = params['password'] - autotest_install_timeout = int( - params.get('autotest_install_timeout', 1800)) - unittests_run_timeout = int(params.get('unittests_run_timeout', 1800)) - unittests_args = params.get('unittests_args', '') - pylint_run_timeout = int(params.get('pylint_run_timeout', 1800)) + autotest_repo = params.get("autotest_repo", github_repo) + autotest_branch = params["autotest_branch"] + autotest_commit = params["autotest_commit"] + password = params["password"] + autotest_install_timeout = int(params.get("autotest_install_timeout", 1800)) + unittests_run_timeout = int(params.get("unittests_run_timeout", 1800)) + unittests_args = params.get("unittests_args", "") + pylint_run_timeout = int(params.get("pylint_run_timeout", 1800)) vm_names = params["vms"].split() has_client_vm = len(vm_names) > 1 server_name = vm_names[0] @@ -62,57 +59,70 @@ def run(test, params, env): installer_file = "install-autotest-server.sh" if autotest_repo == github_repo: - installer_url = ("https://raw.github.com/autotest/autotest/%s" - "/contrib/%s" % (autotest_branch, installer_file)) + installer_url = ( + f"https://raw.github.com/autotest/autotest/{autotest_branch}" + f"/contrib/{installer_file}" + ) else: - installer_url = ("https://raw.github.com/autotest/autotest/master" - "/contrib/%s" % installer_file) + installer_url = ( + "https://raw.github.com/autotest/autotest/master" + f"/contrib/{installer_file}" + ) # Download the install script and execute it - download_cmd = ("python -c 'from urllib2 import urlopen; " - "r = urlopen(\"%s\"); " - "f = open(\"%s\", \"w\"); " - "f.write(r.read())'" % (installer_url, - installer_file)) + download_cmd = ( + "python -c 'from urllib2 import urlopen; " + f'r = urlopen("{installer_url}"); ' + f'f = open("{installer_file}", "w"); ' + "f.write(r.read())'" + ) session_server.cmd(download_cmd) - permission_cmd = ("chmod +x install-autotest-server.sh") + permission_cmd = "chmod +x install-autotest-server.sh" session_server.cmd(permission_cmd) - install_cmd = ("./install-autotest-server.sh -u Aut0t3st -d Aut0t3st " - "-g %s -b %s" % (autotest_repo, autotest_branch)) + install_cmd = ( + "./install-autotest-server.sh -u Aut0t3st -d Aut0t3st " + f"-g {autotest_repo} -b {autotest_branch}" + ) if autotest_commit: - install_cmd += " -c %s" % autotest_commit + install_cmd += f" -c {autotest_commit}" session_server.cmd(install_cmd, timeout=autotest_install_timeout) except aexpect.ShellCmdError as e: for line in e.output.splitlines(): logging.error(line) step_failures.append(step1) - vm_server.copy_files_from(guest_path="/tmp/install-autotest-server*log", - host_path=test.resultsdir) + vm_server.copy_files_from( + guest_path="/tmp/install-autotest-server*log", host_path=test.resultsdir + ) top_commit = None try: session_server.cmd("test -d /usr/local/autotest/.git") session_server.cmd("cd /usr/local/autotest") - top_commit = session_server.cmd( - "echo `git log -n 1 --pretty=format:%H`") + top_commit = session_server.cmd("echo `git log -n 1 --pretty=format:%H`") top_commit = top_commit.strip() - logging.info("Autotest top commit for repo %s, branch %s: %s", - autotest_repo, autotest_branch, top_commit) + logging.info( + "Autotest top commit for repo %s, branch %s: %s", + autotest_repo, + autotest_branch, + top_commit, + ) except aexpect.ShellCmdError as e: for line in e.output.splitlines(): logging.error(line) if top_commit is not None: session_server.close() - session_server = vm_server.wait_for_login(timeout=timeout, - username='autotest', - password='Aut0t3st') + session_server = vm_server.wait_for_login( + timeout=timeout, username="autotest", password="Aut0t3st" + ) step2 = "unittests" try: session_server.cmd("cd /usr/local/autotest") - session_server.cmd("utils/unittest_suite.py %s" % unittests_args, - timeout=unittests_run_timeout) + session_server.cmd( + f"utils/unittest_suite.py {unittests_args}", + timeout=unittests_run_timeout, + ) except aexpect.ShellCmdError as e: for line in e.output.splitlines(): logging.error(line) @@ -121,8 +131,9 @@ def run(test, params, env): step3 = "pylint" try: session_server.cmd("cd /usr/local/autotest") - session_server.cmd("utils/check_patch.py --full --yes", - timeout=pylint_run_timeout) + session_server.cmd( + "utils/check_patch.py --full --yes", timeout=pylint_run_timeout + ) except aexpect.ShellCmdError as e: for line in e.output.splitlines(): logging.error(line) @@ -131,8 +142,9 @@ def run(test, params, env): step4 = "client_run" try: session_server.cmd("cd /usr/local/autotest/client") - session_server.cmd("./autotest-local run sleeptest", - timeout=pylint_run_timeout) + session_server.cmd( + "./autotest-local run sleeptest", timeout=pylint_run_timeout + ) session_server.cmd("rm -rf results/default") except aexpect.ShellCmdError as e: for line in e.output.splitlines(): @@ -144,11 +156,12 @@ def run(test, params, env): try: session_client.cmd("iptables -F") session_server.cmd("cd /usr/local/autotest") - session_server.cmd("server/autotest-remote -m %s --ssh-user root " - "--ssh-pass %s " - "-c client/tests/sleeptest/control" % - (client_ip, password), - timeout=pylint_run_timeout) + session_server.cmd( + f"server/autotest-remote -m {client_ip} --ssh-user root " + f"--ssh-pass {password} " + "-c client/tests/sleeptest/control", + timeout=pylint_run_timeout, + ) session_server.cmd("rm -rf results-*") except aexpect.ShellCmdError as e: for line in e.output.splitlines(): @@ -157,37 +170,46 @@ def run(test, params, env): step6 = "registering_client_cli" try: - label_name = "label-%s" % utils_misc.generate_random_id() - create_label_cmd = ("/usr/local/autotest/cli/autotest-rpc-client " - "label create -t %s -w %s" % - (label_name, server_ip)) + label_name = f"label-{utils_misc.generate_random_id()}" + create_label_cmd = ( + "/usr/local/autotest/cli/autotest-rpc-client " + f"label create -t {label_name} -w {server_ip}" + ) session_server.cmd(create_label_cmd) - list_labels_cmd = ("/usr/local/autotest/cli/autotest-rpc-client " - "label list -a -w %s" % server_ip) + list_labels_cmd = ( + "/usr/local/autotest/cli/autotest-rpc-client " + f"label list -a -w {server_ip}" + ) list_labels_output = session_server.cmd(list_labels_cmd) for line in list_labels_output.splitlines(): logging.debug(line) if label_name not in list_labels_output: - raise ValueError("No label %s in the output of %s" % - (label_name, list_labels_cmd)) - - create_host_cmd = ("/usr/local/autotest/cli/autotest-rpc-client " - "host create -t %s %s -w %s" % - (label_name, client_ip, server_ip)) + raise ValueError( + f"No label {label_name} in the output of {list_labels_cmd}" + ) + + create_host_cmd = ( + "/usr/local/autotest/cli/autotest-rpc-client " + f"host create -t {label_name} {client_ip} -w {server_ip}" + ) session_server.cmd(create_host_cmd) - list_hosts_cmd = ("/usr/local/autotest/cli/autotest-rpc-client " - "host list -w %s" % server_ip) + list_hosts_cmd = ( + "/usr/local/autotest/cli/autotest-rpc-client " + f"host list -w {server_ip}" + ) list_hosts_output = session_server.cmd(list_hosts_cmd) for line in list_hosts_output.splitlines(): logging.debug(line) if client_ip not in list_hosts_output: - raise ValueError("No client %s in the output of %s" % - (client_ip, create_label_cmd)) + raise ValueError( + f"No client {client_ip} in the output of {create_label_cmd}" + ) if label_name not in list_hosts_output: - raise ValueError("No label %s in the output of %s" % - (label_name, create_label_cmd)) + raise ValueError( + f"No label {label_name} in the output of {create_label_cmd}" + ) except (aexpect.ShellCmdError, ValueError) as e: if isinstance(e, aexpect.ShellCmdError): @@ -201,11 +223,13 @@ def run(test, params, env): try: session_client.cmd("iptables -F") - job_name = "Sleeptest %s" % utils_misc.generate_random_id() + job_name = f"Sleeptest {utils_misc.generate_random_id()}" def job_is_status(status): - list_jobs_cmd = ("/usr/local/autotest/cli/autotest-rpc-client " - "job list -a -w %s" % server_ip) + list_jobs_cmd = ( + "/usr/local/autotest/cli/autotest-rpc-client " + f"job list -a -w {server_ip}" + ) list_jobs_output = session_server.cmd(list_jobs_cmd) if job_name in list_jobs_output: if status in list_jobs_output: @@ -217,8 +241,10 @@ def job_is_status(status): else: return False else: - raise ValueError("Job %s does not show in the " - "output of %s" % (job_name, list_jobs_cmd)) + raise ValueError( + f"Job {job_name} does not show in the " + f"output of {list_jobs_cmd}" + ) def job_is_completed(): return job_is_status("Completed") @@ -226,33 +252,37 @@ def job_is_completed(): def job_is_running(): return job_is_status("Running") - job_create_cmd = ("/usr/local/autotest/cli/autotest-rpc-client " - "job create --test sleeptest -m %s '%s' -w %s" % - (client_ip, job_name, server_ip)) + job_create_cmd = ( + "/usr/local/autotest/cli/autotest-rpc-client " + f"job create --test sleeptest -m {client_ip} '{job_name}' -w {server_ip}" + ) session_server.cmd(job_create_cmd) - if not utils_misc.wait_for(job_is_running, 300, 0, 10, - "Waiting for job to start running"): + if not utils_misc.wait_for( + job_is_running, 300, 0, 10, "Waiting for job to start running" + ): raise ValueError("Job did not start running") # Wait for the session to become unresponsive if not utils_misc.wait_for( - lambda: not session_client.is_responsive(), - timeout=300): + lambda: not session_client.is_responsive(), timeout=300 + ): raise ValueError("Client machine did not reboot") # Establish a new client session session_client = vm_client.wait_for_login(timeout=timeout) # Wait for the job to complete - if not utils_misc.wait_for(job_is_completed, 300, 0, 10, - "Waiting for job to complete"): + if not utils_misc.wait_for( + job_is_completed, 300, 0, 10, "Waiting for job to complete" + ): raise ValueError("Job did not complete") # Copy logs back so we can analyze them vm_server.copy_files_from( guest_path="/usr/local/autotest/results/*", - host_path=test.resultsdir) + host_path=test.resultsdir, + ) except (aexpect.ShellCmdError, ValueError) as e: if isinstance(e, aexpect.ShellCmdError): @@ -271,8 +301,10 @@ def report_version(): if step_failures: logging.error("The autotest regression testing failed") report_version() - raise error.TestFail("The autotest regression testing had the " - "following steps failed: %s" % step_failures) + raise error.TestFail( + "The autotest regression testing had the " + f"following steps failed: {step_failures}" + ) else: logging.info("The autotest regression testing passed") report_version() diff --git a/generic/tests/avocado_guest.py b/generic/tests/avocado_guest.py index e6ba6ce44b..3f68fd9765 100644 --- a/generic/tests/avocado_guest.py +++ b/generic/tests/avocado_guest.py @@ -14,23 +14,30 @@ def run(test, params, env): timeout = int(params.get("test_timeout", 3600)) testlist = [] avocadoinstalltype = params.get("avocadoinstalltype", "pip") - avocadotestrepo = params.get("avocadotestrepo", - "https://github.com/avocado-framework-tests/avocado-misc-tests.git") + avocadotestrepo = params.get( + "avocadotestrepo", + "https://github.com/avocado-framework-tests/avocado-misc-tests.git", + ) avocadotest = params.get("avocadotest", "cpu/ebizzy.py") avocadomux = params.get("avocadomux", "") avocadotestargs = params.get("avocadotestargs", "") - for index, item in enumerate(avocadotest.split(',')): + for index, item in enumerate(avocadotest.split(",")): try: - mux = '' - mux = avocadomux.split(',')[index] + mux = "" + mux = avocadomux.split(",")[index] except IndexError: pass testlist.append((item, mux)) - avocado_obj = utils_test.AvocadoGuest(vm, params, test, testlist, - timeout=timeout, - testrepo=avocadotestrepo, - installtype=avocadoinstalltype, - reinstall=False, - add_args=avocadotestargs, - ignore_result=False) + avocado_obj = utils_test.AvocadoGuest( + vm, + params, + test, + testlist, + timeout=timeout, + testrepo=avocadotestrepo, + installtype=avocadoinstalltype, + reinstall=False, + add_args=avocadotestargs, + ignore_result=False, + ) avocado_obj.run_avocado() diff --git a/generic/tests/boot.py b/generic/tests/boot.py index 577d36dfcf..22fab45b9f 100644 --- a/generic/tests/boot.py +++ b/generic/tests/boot.py @@ -1,7 +1,6 @@ import time -from virttest import error_context -from virttest import utils_test +from virttest import error_context, utils_test @error_context.context_aware @@ -22,8 +21,7 @@ def run(test, params, env): serial_login = params.get("serial_login", "no") == "yes" vms = env.get_all_vms() for vm in vms: - error_context.context("Try to log into guest '%s'." % vm.name, - test.log.info) + error_context.context(f"Try to log into guest '{vm.name}'.", test.log.info) if serial_login: session = vm.wait_for_serial_login(timeout=timeout) else: @@ -40,8 +38,7 @@ def run(test, params, env): session.close() if params.get("reboot_method"): for vm in vms: - error_context.context("Reboot guest '%s'." % vm.name, - test.log.info) + error_context.context(f"Reboot guest '{vm.name}'.", test.log.info) if params["reboot_method"] == "system_reset": time.sleep(int(params.get("sleep_before_reset", 10))) # Reboot the VM @@ -50,9 +47,7 @@ def run(test, params, env): else: session = vm.wait_for_login(timeout=timeout) for i in range(int(params.get("reboot_count", 1))): - session = vm.reboot(session, - params["reboot_method"], - 0, - timeout, - serial_login) + session = vm.reboot( + session, params["reboot_method"], 0, timeout, serial_login + ) session.close() diff --git a/generic/tests/boot_savevm.py b/generic/tests/boot_savevm.py index bf71dfc4d7..5be9380aeb 100644 --- a/generic/tests/boot_savevm.py +++ b/generic/tests/boot_savevm.py @@ -1,10 +1,8 @@ -import time -import tempfile import os +import tempfile +import time -from virttest import qemu_storage -from virttest import data_dir -from virttest import utils_misc +from virttest import data_dir, qemu_storage, utils_misc def run(test, params, env): @@ -25,14 +23,16 @@ def run(test, params, env): vm = env.get_vm(params["main_vm"]) if params.get("with_floppy") == "yes": floppy_name = params.get("floppies", "fl") - floppy_params = {"image_format": params.get("floppy_format", "qcow2"), - "image_size": params.get("floppy_size", "1.4M"), - "image_name": params.get("%s_name" % floppy_name, - "images/test"), - "vm_type": params.get("vm_type"), - "qemu_img_binary": utils_misc.get_qemu_img_binary(params)} - floppy = qemu_storage.QemuImg(floppy_params, - data_dir.get_data_dir(), floppy_name) + floppy_params = { + "image_format": params.get("floppy_format", "qcow2"), + "image_size": params.get("floppy_size", "1.4M"), + "image_name": params.get(f"{floppy_name}_name", "images/test"), + "vm_type": params.get("vm_type"), + "qemu_img_binary": utils_misc.get_qemu_img_binary(params), + } + floppy = qemu_storage.QemuImg( + floppy_params, data_dir.get_data_dir(), floppy_name + ) floppy.create(floppy_params) floppy_orig_info = floppy.snapshot_list() vm.create(params=params) @@ -43,8 +43,8 @@ def run(test, params, env): savevm_login_timeout = float(params["savevm_timeout"]) savevm_statedir = params.get("savevm_statedir", tempfile.gettempdir()) fd, savevm_statefile = tempfile.mkstemp( - suffix='.img', prefix=vm.name + '-', - dir=savevm_statedir) + suffix=".img", prefix=vm.name + "-", dir=savevm_statedir + ) os.close(fd) # save_to_file doesn't need the file open start_time = time.time() cycles = 0 @@ -54,7 +54,7 @@ def run(test, params, env): test.log.info("Save/Restore cycle %d", cycles + 1) time.sleep(savevm_delay) vm.pause() - if params['save_method'] == 'save_to_file': + if params["save_method"] == "save_to_file": vm.save_to_file(savevm_statefile) # Re-use same filename vm.restore_from_file(savevm_statefile) else: @@ -74,7 +74,7 @@ def run(test, params, env): time_elapsed = int(time.time() - start_time) info = "after %s s, %d load/save cycles" % (time_elapsed, cycles + 1) if not successful_login: - test.fail("Can't log on '%s' %s" % (vm.name, info)) + test.fail(f"Can't log on '{vm.name}' {info}") else: test.log.info("Test ended %s", info) @@ -82,7 +82,8 @@ def run(test, params, env): vm.destroy() floppy_info = floppy.snapshot_list() if floppy_info == floppy_orig_info: - test.fail("savevm didn't create snapshot in floppy." - " original snapshot list is: %s" - " now snapshot list is: %s" - % (floppy_orig_info, floppy_info)) + test.fail( + "savevm didn't create snapshot in floppy." + f" original snapshot list is: {floppy_orig_info}" + f" now snapshot list is: {floppy_info}" + ) diff --git a/generic/tests/build.py b/generic/tests/build.py index e37dbd31da..0e9763925b 100644 --- a/generic/tests/build.py +++ b/generic/tests/build.py @@ -22,11 +22,12 @@ def run(test, params, env): installer_obj.write_version_keyval(test) if installer_obj.minor_failure is True: minor_failure = True - reason = "%s_%s: %s" % (installer_obj.name, - installer_obj.mode, - installer_obj.minor_failure_reason) + reason = f"{installer_obj.name}_{installer_obj.mode}: {installer_obj.minor_failure_reason}" minor_failure_reasons.append(reason) if minor_failure: - test.error("Minor (worked around) failures during build " - "test: %s" % ", ".join(minor_failure_reasons)) + test.error( + "Minor (worked around) failures during build " "test: {}".format( + ", ".join(minor_failure_reasons) + ) + ) diff --git a/generic/tests/clock_getres.py b/generic/tests/clock_getres.py index b7c2ad188c..07bed92786 100644 --- a/generic/tests/clock_getres.py +++ b/generic/tests/clock_getres.py @@ -1,8 +1,6 @@ import os -from virttest import error_context -from virttest import utils_test -from virttest import data_dir +from virttest import data_dir, error_context, utils_test @error_context.context_aware @@ -23,22 +21,24 @@ def run(test, params, env): getres_cmd = params.get("getres_cmd") - if not getres_cmd or session.cmd_status("test -x %s" % getres_cmd): + if not getres_cmd or session.cmd_status(f"test -x {getres_cmd}"): source_name = "clock_getres/clock_getres.c" source_name = os.path.join(data_dir.get_deps_dir(), source_name) getres_cmd = "/tmp/clock_getres" dest_name = "/tmp/clock_getres.c" if not os.path.isfile(source_name): - test.error("Could not find %s" % source_name) + test.error(f"Could not find {source_name}") vm.copy_files_to(source_name, dest_name) - session.cmd("gcc -lrt -o %s %s" % (getres_cmd, dest_name)) + session.cmd(f"gcc -lrt -o {getres_cmd} {dest_name}") session.cmd(getres_cmd) test.log.info("PASS: Guest reported appropriate clock resolution") sub_test = params.get("sub_test") if sub_test: - error_context.context("Run sub test '%s' after checking" - " clock resolution" % sub_test, test.log.info) + error_context.context( + f"Run sub test '{sub_test}' after checking" " clock resolution", + test.log.info, + ) utils_test.run_virt_sub_test(test, params, env, sub_test) diff --git a/generic/tests/dd_test.py b/generic/tests/dd_test.py index 292f272722..31d70cb9fd 100644 --- a/generic/tests/dd_test.py +++ b/generic/tests/dd_test.py @@ -4,14 +4,12 @@ :author: Lukas Doktor :copyright: 2012 Red Hat, Inc. """ + import os import re -import aexpect -from virttest import utils_misc -from virttest import utils_disk -from virttest import utils_numeric -from virttest import error_context +import aexpect +from virttest import error_context, utils_disk, utils_misc, utils_numeric try: from itertools import zip_longest as zip_longest @@ -29,8 +27,9 @@ def run(test, params, env): 2). run dd command in guest with special params(eg. oflag, bs and so on) 3). check command exit stauts and output """ + def _get_file(filename, select, test=test): - """ Picks the actual file based on select value """ + """Picks the actual file based on select value""" if filename == "NULL": return "/dev/null" elif filename == "ZERO": @@ -40,110 +39,119 @@ def _get_file(filename, select, test=test): elif filename == "URANDOM": return "/dev/urandom" elif filename in params.objects("images"): - drive_id = params["blk_extra_params_%s" % filename].split("=")[1] + drive_id = params[f"blk_extra_params_{filename}"].split("=")[1] drive_path = utils_misc.get_linux_drive_path(session, drive_id) if drive_path: return drive_path - test.error("Failed to get '%s' drive path" % filename) + test.error(f"Failed to get '{filename}' drive path") else: # get all matching filenames try: - disks = sorted(session.cmd("ls -1d %s" % filename).split('\n')) - except aexpect.ShellCmdError: # No matching file (creating new?) + disks = sorted(session.cmd(f"ls -1d {filename}").split("\n")) + except aexpect.ShellCmdError: # No matching file (creating new?) disks = [filename] - if disks[-1] == '': + if disks[-1] == "": disks = disks[:-1] try: return disks[select] except IndexError: - err = ("Incorrect cfg: dd_select out of the range (disks=%s," - " select=%s)" % (disks, select)) + err = ( + f"Incorrect cfg: dd_select out of the range (disks={disks}," + f" select={select})" + ) test.log.error(err) test.error(err) def _check_disk_partitions_number(): - """ Check the data disk partitions number. """ - del partitions[:] # pylint: disable=E0606 - partitions.extend(re.findall( - r'%s\d+' % dev_id, ' '.join(utils_disk.get_linux_disks(session, True)))) + """Check the data disk partitions number.""" + del partitions[:] # pylint: disable=E0606 + partitions.extend( + re.findall( + rf"{dev_id}\d+", " ".join(utils_disk.get_linux_disks(session, True)) + ) + ) return len(partitions) == bs_count - vm = env.get_vm(params['main_vm']) + vm = env.get_vm(params["main_vm"]) timeout = int(params.get("login_timeout", 360)) error_context.context("Wait guest boot up", test.log.info) session = vm.wait_for_login(timeout=timeout) - dd_keys = ['dd_if', 'dd_of', 'dd_bs', 'dd_count', 'dd_iflag', - 'dd_oflag', 'dd_skip', 'dd_seek'] + dd_keys = [ + "dd_if", + "dd_of", + "dd_bs", + "dd_count", + "dd_iflag", + "dd_oflag", + "dd_skip", + "dd_seek", + ] dd_params = {key: params.get(key, None) for key in dd_keys} - if dd_params['dd_bs'] is None: - dd_params['dd_bs'] = '512' - dd_params['dd_bs'] = dd_params['dd_bs'].split() - bs_count = len(dd_params['dd_bs']) + if dd_params["dd_bs"] is None: + dd_params["dd_bs"] = "512" + dd_params["dd_bs"] = dd_params["dd_bs"].split() + bs_count = len(dd_params["dd_bs"]) dd_timeout = int(params.get("dd_timeout", 180)) dd_output = params.get("dd_output", "") dd_stat = int(params.get("dd_stat", 0)) dev_partitioned = [] - for arg in ['dd_if', 'dd_of']: + for arg in ["dd_if", "dd_of"]: filename = dd_params[arg] - path = _get_file(filename, - int(params.get('%s_select' % arg, '-1'))) - if (bs_count > 1 - and filename in params.objects('images')): + path = _get_file(filename, int(params.get(f"{arg}_select", "-1"))) + if bs_count > 1 and filename in params.objects("images"): psize = float( - utils_numeric.normalize_data_size( - params.get("partition_size", '2G') - ) + utils_numeric.normalize_data_size(params.get("partition_size", "2G")) ) start = 0.0 dev_id = os.path.split(path)[-1] dev_partitioned.append(dev_id) - utils_disk.create_partition_table_linux(session, dev_id, 'gpt') + utils_disk.create_partition_table_linux(session, dev_id, "gpt") for i in range(bs_count): - utils_disk.create_partition_linux(session, dev_id, - '%fM' % psize, - '%fM' % start) + utils_disk.create_partition_linux( + session, dev_id, f"{psize:f}M", f"{start:f}M" + ) start += psize partitions = [] if not utils_misc.wait_for(_check_disk_partitions_number, 30, step=3.0): - test.error('Failed to get %d partitions on %s.' % (bs_count, dev_id)) + test.error("Failed to get %d partitions on %s." % (bs_count, dev_id)) partitions.sort() - dd_params[arg] = [path.replace(dev_id, part) - for part in partitions] + dd_params[arg] = [path.replace(dev_id, part) for part in partitions] else: dd_params[arg] = [path] if bs_count > 1 and not dev_partitioned: - test.error('with multiple bs, either dd_if or \ - dd_of must be a block device') + test.error( + "with multiple bs, either dd_if or \ + dd_of must be a block device" + ) - dd_cmd = ['dd'] + dd_cmd = ["dd"] for key in dd_keys: value = dd_params[key] if value is None: continue - arg = key.split('_')[-1] - if key in ['dd_if', 'dd_of', 'dd_bs']: - part = '%s=%s' % (arg, '{}') + arg = key.split("_")[-1] + if key in ["dd_if", "dd_of", "dd_bs"]: + part = "{}={}".format(arg, "{}") else: - part = '%s=%s' % (arg, value) + part = f"{arg}={value}" dd_cmd.append(part) - dd_cmd = ' '.join(dd_cmd) + dd_cmd = " ".join(dd_cmd) - remaining = [dd_params[key] for key in ['dd_if', 'dd_of', 'dd_bs']] - if len(dd_params['dd_if']) != bs_count: - fillvalue = dd_params['dd_if'][-1] + remaining = [dd_params[key] for key in ["dd_if", "dd_of", "dd_bs"]] + if len(dd_params["dd_if"]) != bs_count: + fillvalue = dd_params["dd_if"][-1] else: - fillvalue = dd_params['dd_of'][-1] - cmd = [dd_cmd.format(*t) for t in - zip_longest(*remaining, fillvalue=fillvalue)] - cmd = ' & '.join(cmd) + fillvalue = dd_params["dd_of"][-1] + cmd = [dd_cmd.format(*t) for t in zip_longest(*remaining, fillvalue=fillvalue)] + cmd = " & ".join(cmd) test.log.info("Using '%s' cmd", cmd) try: @@ -151,28 +159,26 @@ def _check_disk_partitions_number(): try: (stat, out) = session.cmd_status_output(cmd, timeout=dd_timeout) except aexpect.ShellTimeoutError: - err = ("dd command timed-out (cmd='%s', timeout=%d)" - % (cmd, dd_timeout)) + err = "dd command timed-out (cmd='%s', timeout=%d)" % (cmd, dd_timeout) test.fail(err) except aexpect.ShellCmdError as details: stat = details.status out = details.output - error_context.context("Check command exit status and output", - test.log.info) - test.log.debug("Returned dd_status: %s\nReturned output:\n%s", - stat, out) + error_context.context("Check command exit status and output", test.log.info) + test.log.debug("Returned dd_status: %s\nReturned output:\n%s", stat, out) if stat != dd_stat: - err = ("Return code doesn't match (expected=%s, actual=%s)\n" - "Output:\n%s" % (dd_stat, stat, out)) + err = ( + f"Return code doesn't match (expected={dd_stat}, actual={stat})\n" + f"Output:\n{out}" + ) test.fail(err) if dd_output not in out: - err = ("Output doesn't match:\nExpected:\n%s\nActual:\n%s" - % (dd_output, out)) + err = f"Output doesn't match:\nExpected:\n{dd_output}\nActual:\n{out}" test.fail(err) test.log.info("dd test succeeded.") finally: - #login again in case the previous session expired + # login again in case the previous session expired session = vm.wait_for_login(timeout=timeout) for dev_id in dev_partitioned: utils_disk.clean_partition_linux(session, dev_id) diff --git a/generic/tests/downgrade_qcow2_version.py b/generic/tests/downgrade_qcow2_version.py index ad587b4c05..9648bf76be 100644 --- a/generic/tests/downgrade_qcow2_version.py +++ b/generic/tests/downgrade_qcow2_version.py @@ -1,7 +1,4 @@ -from virttest import utils_test -from virttest import error_context -from virttest import qemu_storage -from virttest import data_dir +from virttest import data_dir, error_context, qemu_storage, utils_test @error_context.context_aware @@ -18,8 +15,7 @@ def run(test, params, env): :param env: Dictionary with test environment """ ver_to = params.get("lower_version_qcow2", "0.10") - error_context.context("Downgrade qcow2 image version to '%s'" - % ver_to, test.log.info) + error_context.context(f"Downgrade qcow2 image version to '{ver_to}'", test.log.info) image = params.get("images").split()[0] t_params = params.object_params(image) qemu_image = qemu_storage.QemuImg(t_params, data_dir.get_data_dir(), image) @@ -28,5 +24,5 @@ def run(test, params, env): actual_compat = utils_test.get_image_version(qemu_image) if actual_compat != ver_to: err_msg = "Fail to downgrade qcow2 image version!" - err_msg += "Actual: %s, expect: %s" % (actual_compat, ver_to) + err_msg += f"Actual: {actual_compat}, expect: {ver_to}" test.fail(err_msg) diff --git a/generic/tests/ethtool.py b/generic/tests/ethtool.py index bd730459a5..e767385432 100644 --- a/generic/tests/ethtool.py +++ b/generic/tests/ethtool.py @@ -2,11 +2,7 @@ import aexpect from avocado.utils import crypto, process - -from virttest import utils_net -from virttest import utils_misc -from virttest import remote -from virttest import error_context +from virttest import error_context, remote, utils_misc, utils_net @error_context.context_aware @@ -36,23 +32,23 @@ def run(test, params, env): find a way to get it installed using yum/apt-get/ whatever """ + def ethtool_get(session): feature_pattern = { - 'tx': 'tx.*checksumming', - 'rx': 'rx.*checksumming', - 'sg': 'scatter.*gather', - 'tso': 'tcp.*segmentation.*offload', - 'gso': 'generic.*segmentation.*offload', - 'gro': 'generic.*receive.*offload', - 'lro': 'large.*receive.*offload', + "tx": "tx.*checksumming", + "rx": "rx.*checksumming", + "sg": "scatter.*gather", + "tso": "tcp.*segmentation.*offload", + "gso": "generic.*segmentation.*offload", + "gro": "generic.*receive.*offload", + "lro": "large.*receive.*offload", } - o = session.cmd("ethtool -k %s" % ethname) + o = session.cmd(f"ethtool -k {ethname}") status = {} for f in feature_pattern.keys(): try: - temp = re.findall( - "%s: (.*)" % feature_pattern.get(f), o)[0] + temp = re.findall(f"{feature_pattern.get(f)}: (.*)", o)[0] if temp.find("[fixed]") != -1: test.log.debug("%s is fixed", f) continue @@ -71,23 +67,25 @@ def ethtool_set(session, status): :param status: New status will be changed to """ txt = "Set offload status for device " - txt += "'%s': %s" % (ethname, str(status)) + txt += f"'{ethname}': {str(status)}" error_context.context(txt, test.log.info) - cmd = "ethtool -K %s " % ethname - cmd += " ".join([o + ' ' + s for o, s in status.items()]) - err_msg = "Failed to set offload status for device '%s'" % ethname + cmd = f"ethtool -K {ethname} " + cmd += " ".join([o + " " + s for o, s in status.items()]) + err_msg = f"Failed to set offload status for device '{ethname}'" try: session.cmd_output_safe(cmd) except aexpect.ShellCmdError as e: test.log.error("%s, detail: %s", err_msg, e) return False - curr_status = dict((k, v) for k, v in ethtool_get(session).items() - if k in status.keys()) + curr_status = dict( + (k, v) for k, v in ethtool_get(session).items() if k in status.keys() + ) if curr_status != status: - test.log.error("%s, got: '%s', expect: '%s'", err_msg, - str(curr_status), str(status)) + test.log.error( + "%s, got: '%s', expect: '%s'", err_msg, str(curr_status), str(status) + ) return False return True @@ -107,7 +105,7 @@ def compare_md5sum(name): error_context.context(txt, test.log.info) host_result = crypto.hash_file(name, algorithm="md5") try: - o = session.cmd_output("md5sum %s" % name) + o = session.cmd_output(f"md5sum {name}") guest_result = re.findall(r"\w+", o)[0] except IndexError: test.log.error("Could not get file md5sum in guest") @@ -124,51 +122,54 @@ def transfer_file(src): :return: Tuple (status, error msg/tcpdump result) """ sess = vm.wait_for_login(timeout=login_timeout) - session.cmd_output("rm -rf %s" % filename) - dd_cmd = ("dd if=/dev/urandom of=%s bs=1M count=%s" % - (filename, params.get("filesize"))) - failure = (False, "Failed to create file using dd, cmd: %s" % dd_cmd) - txt = "Creating file in source host, cmd: %s" % dd_cmd + session.cmd_output(f"rm -rf {filename}") + dd_cmd = "dd if=/dev/urandom of={} bs=1M count={}".format( + filename, + params.get("filesize"), + ) + failure = (False, f"Failed to create file using dd, cmd: {dd_cmd}") + txt = f"Creating file in source host, cmd: {dd_cmd}" error_context.context(txt, test.log.info) - ethname = utils_net.get_linux_ifname(session, - vm.get_mac_address(0)) - tcpdump_cmd = "tcpdump -lep -i %s -s 0 tcp -vv port ssh" % ethname + ethname = utils_net.get_linux_ifname(session, vm.get_mac_address(0)) + tcpdump_cmd = f"tcpdump -lep -i {ethname} -s 0 tcp -vv port ssh" if src == "guest": - tcpdump_cmd += " and src %s" % guest_ip + tcpdump_cmd += f" and src {guest_ip}" copy_files_func = vm.copy_files_from try: sess.cmd_output(dd_cmd, timeout=360) - except aexpect.ShellCmdError as e: + except aexpect.ShellCmdError: return failure else: - tcpdump_cmd += " and dst %s" % guest_ip + tcpdump_cmd += f" and dst {guest_ip}" copy_files_func = vm.copy_files_to try: process.system(dd_cmd, shell=True) - except process.CmdError as e: + except process.CmdError: return failure # only capture the new tcp port after offload setup original_tcp_ports = re.findall( - r"tcp.*:(\d+).*%s" % guest_ip, - process.system_output("/bin/netstat -nap").decode()) + rf"tcp.*:(\d+).*{guest_ip}", + process.system_output("/bin/netstat -nap").decode(), + ) for i in original_tcp_ports: - tcpdump_cmd += " and not port %s" % i + tcpdump_cmd += f" and not port {i}" - txt = "Listening traffic using command: %s" % tcpdump_cmd + txt = f"Listening traffic using command: {tcpdump_cmd}" error_context.context(txt, test.log.info) sess.sendline(tcpdump_cmd) if not utils_misc.wait_for( - lambda: session.cmd_status("pgrep tcpdump") == 0, 30): + lambda: session.cmd_status("pgrep tcpdump") == 0, 30 + ): return (False, "Tcpdump process wasn't launched") - txt = "Transferring file %s from %s" % (filename, src) + txt = f"Transferring file {filename} from {src}" error_context.context(txt, test.log.info) try: copy_files_func(filename, filename) except remote.SCPError as e: - return (False, "File transfer failed (%s)" % e) + return (False, f"File transfer failed ({e})") session.cmd("killall tcpdump") try: @@ -201,8 +202,9 @@ def so_callback(status="on"): return False error_context.context("Check if contained large frame", test.log.info) # MTU: default IPv4 MTU is 1500 Bytes, ethernet header is 14 Bytes - return (status == "on") ^ (len([i for i in re.findall( - r"length (\d*):", o) if int(i) > mtu]) == 0) + return (status == "on") ^ ( + len([i for i in re.findall(r"length (\d*):", o) if int(i) > mtu]) == 0 + ) def ro_callback(status="on"): s, o = transfer_file("host") @@ -239,7 +241,14 @@ def ro_callback(status="on"): "tx": (tx_callback, (), ()), "rx": (rx_callback, (), ()), "sg": (tx_callback, ("tx",), ()), - "tso": (so_callback, ("tx", "sg",), ("gso",)), + "tso": ( + so_callback, + ( + "tx", + "sg", + ), + ("gso",), + ), "gso": (so_callback, (), ("tso",)), "gro": (ro_callback, ("rx",), ("lro",)), "lro": (rx_callback, (), ("gro",)), @@ -254,15 +263,20 @@ def ro_callback(status="on"): offload_stat.update(dict.fromkeys(test_matrix[f_type][1], "on")) # lro is fixed for e1000 and e1000e, while trying to exclude # lro by setting "lro off", the command of ethtool returns error - if not (f_type == "gro" and (vm.virtnet[0].nic_model == "e1000e" - or vm.virtnet[0].nic_model == "e1000")): + if not ( + f_type == "gro" + and ( + vm.virtnet[0].nic_model == "e1000e" + or vm.virtnet[0].nic_model == "e1000" + ) + ): offload_stat.update(dict.fromkeys(test_matrix[f_type][2], "off")) if not ethtool_set(session, offload_stat): e_msg = "Failed to set offload status" test.log.error(e_msg) failed_tests.append(e_msg) - txt = "Run callback function %s" % callback.__name__ + txt = f"Run callback function {callback.__name__}" error_context.context(txt, test.log.info) # Some older kernel versions split packets by GSO @@ -270,23 +284,23 @@ def ro_callback(status="on"): # corrupts our results. Disable check when GSO is # enabled. if not callback(status="on") and f_type != "gso": - e_msg = "Callback failed after enabling %s" % f_type + e_msg = f"Callback failed after enabling {f_type}" test.log.error(e_msg) failed_tests.append(e_msg) if not ethtool_set(session, {f_type: "off"}): - e_msg = "Failed to disable %s" % f_type + e_msg = f"Failed to disable {f_type}" test.log.error(e_msg) failed_tests.append(e_msg) - txt = "Run callback function %s" % callback.__name__ + txt = f"Run callback function {callback.__name__}" error_context.context(txt, test.log.info) if not callback(status="off"): - e_msg = "Callback failed after disabling %s" % f_type + e_msg = f"Callback failed after disabling {f_type}" test.log.error(e_msg) failed_tests.append(e_msg) if failed_tests: - test.fail("Failed tests: %s" % failed_tests) + test.fail(f"Failed tests: {failed_tests}") finally: try: @@ -299,5 +313,4 @@ def ro_callback(status="on"): session = vm.wait_for_serial_login(timeout=login_timeout) ethtool_restore_params(session, pretest_status) except Exception as detail: - test.log.warn("Could not restore parameter of" - " eth card: '%s'", detail) + test.log.warning("Could not restore parameter of" " eth card: '%s'", detail) diff --git a/generic/tests/fillup_disk.py b/generic/tests/fillup_disk.py index a73ee570c3..af2a1fe689 100644 --- a/generic/tests/fillup_disk.py +++ b/generic/tests/fillup_disk.py @@ -28,8 +28,7 @@ def run(test, params, env): number = 0 try: - error_context.context("Start filling the disk in %s" % fill_dir, - test.log.info) + error_context.context(f"Start filling the disk in {fill_dir}", test.log.info) cmd = params.get("fillup_cmd") while not filled: # As we want to test the backing file, so bypass the cache @@ -40,7 +39,7 @@ def run(test, params, env): test.log.debug("Successfully filled up the disk") filled = True elif s != 0: - test.fail("Command dd failed to execute: %s" % o) + test.fail(f"Command dd failed to execute: {o}") number += 1 finally: error_context.context("Cleaning the temporary files...", test.log.info) diff --git a/generic/tests/guest_suspend.py b/generic/tests/guest_suspend.py index d67801a21e..6b7f51c058 100644 --- a/generic/tests/guest_suspend.py +++ b/generic/tests/guest_suspend.py @@ -1,9 +1,7 @@ -from virttest import error_context -from virttest import utils_test +from virttest import error_context, utils_test class GuestSuspendBaseTest(utils_test.qemu.GuestSuspend): - def do_guest_suspend(self, **args): suspend_type = args.get("suspend_type", self.SUSPEND_TYPE_MEM) @@ -65,7 +63,8 @@ def guest_suspend_mem(self, params): suspend_bg_program_chk_cmd=params.get("s3_bg_program_chk_cmd"), suspend_bg_program_kill_cmd=params.get("s3_bg_program_kill_cmd"), suspend_start_cmd=params.get("s3_start_cmd"), - suspend_log_chk_cmd=params.get("s3_log_chk_cmd")) + suspend_log_chk_cmd=params.get("s3_log_chk_cmd"), + ) def guest_suspend_disk(self, params): """ @@ -91,11 +90,11 @@ def guest_suspend_disk(self, params): suspend_bg_program_chk_cmd=params.get("s4_bg_program_chk_cmd"), suspend_bg_program_kill_cmd=params.get("s4_bg_program_kill_cmd"), suspend_start_cmd=params.get("s4_start_cmd"), - suspend_log_chk_cmd=params.get("s4_log_chk_cmd")) + suspend_log_chk_cmd=params.get("s4_log_chk_cmd"), + ) class GuestSuspendNegativeTest(GuestSuspendBaseTest): - """ This class is used to test the situation which sets 'disable_s3/s4' to '1' in qemu cli. Guest should disable suspend function in this case. @@ -104,8 +103,10 @@ class GuestSuspendNegativeTest(GuestSuspendBaseTest): def do_guest_suspend(self, **args): s, o = self._check_guest_suspend_log(**args) if not s: - self.test.fail("Guest reports support Suspend even if it's" - " disabled in qemu. Output:\n '%s'" % o) + self.test.fail( + "Guest reports support Suspend even if it's" + f" disabled in qemu. Output:\n '{o}'" + ) @error_context.context_aware @@ -131,5 +132,6 @@ def run(test, params, env): elif suspend_type == gs.SUSPEND_TYPE_DISK: gs.guest_suspend_disk(params) else: - test.error("Unknown guest suspend type, Check your" - " 'guest_suspend_type' config.") + test.error( + "Unknown guest suspend type, Check your" " 'guest_suspend_type' config." + ) diff --git a/generic/tests/guest_test.py b/generic/tests/guest_test.py index dfc7bd3674..69a3a47349 100644 --- a/generic/tests/guest_test.py +++ b/generic/tests/guest_test.py @@ -1,11 +1,11 @@ -import os import logging +import os import sys from avocado.core import exceptions from virttest import utils_misc -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def run(test, params, env): @@ -35,8 +35,7 @@ def run(test, params, env): if reboot_before_test: test.log.debug("Rebooting guest before test ...") - session = vm.reboot(session, timeout=login_timeout, - serial=serial_login) + session = vm.reboot(session, timeout=login_timeout, serial=serial_login) try: test.log.info("Starting script...") @@ -58,37 +57,39 @@ def run(test, params, env): dst_rsc_dir = params.get("dst_rsc_dir") # Change dir to dst_rsc_dir, and remove the guest script dir there - rm_cmd = "cd %s && (rmdir /s /q %s || del /s /q %s)" % \ - (dst_rsc_dir, rsc_dir, rsc_dir) + rm_cmd = ( + f"cd {dst_rsc_dir} && (rmdir /s /q {rsc_dir} || del /s /q {rsc_dir})" + ) session.cmd(rm_cmd, timeout=test_timeout) test.log.debug("Clean directory succeeded.") # then download the resource. - rsc_cmd = "cd %s && %s %s" % ( - dst_rsc_dir, download_cmd, rsc_server) + rsc_cmd = f"cd {dst_rsc_dir} && {download_cmd} {rsc_server}" session.cmd(rsc_cmd, timeout=test_timeout) test.log.info("Download resource finished.") else: - session.cmd_output("del /f %s || rm -r %s" % (dst_rsc_path, dst_rsc_path), - internal_timeout=0) + session.cmd_output( + f"del /f {dst_rsc_path} || rm -r {dst_rsc_path}", + internal_timeout=0, + ) script_path = utils_misc.get_path(test.virtdir, script) vm.copy_files_to(script_path, dst_rsc_path, timeout=60) - cmd = "%s %s %s" % (interpreter, dst_rsc_path, script_params) + cmd = f"{interpreter} {dst_rsc_path} {script_params}" try: test.log.info("------------ Script output ------------") - s, o = session.cmd_status_output(cmd, print_func=test.log.info, - timeout=test_timeout, safe=True) + s, o = session.cmd_status_output( + cmd, print_func=test.log.info, timeout=test_timeout, safe=True + ) if s != 0: - test.fail("Run script '%s' failed, script output is: %s" % (cmd, o)) + test.fail(f"Run script '{cmd}' failed, script output is: {o}") finally: test.log.info("------------ End of script output ------------") if reboot_after_test: test.log.debug("Rebooting guest after test ...") - session = vm.reboot(session, timeout=login_timeout, - serial=serial_login) + session = vm.reboot(session, timeout=login_timeout, serial=serial_login) test.log.debug("guest test PASSED.") finally: @@ -124,7 +125,7 @@ def flush(): return pid flag_fname = "/tmp/guest_test-flag-file-pid-" + str(os.getpid()) - open(flag_fname, 'w').close() + open(flag_fname, "w").close() try: # Launch guest_test run(test, params, env) diff --git a/generic/tests/hwclock.py b/generic/tests/hwclock.py index cb54dfb48e..453c31fdfc 100644 --- a/generic/tests/hwclock.py +++ b/generic/tests/hwclock.py @@ -13,15 +13,16 @@ def run(test, params, env): :param env: Dictionary with the test environment. """ vm = env.get_vm(params["main_vm"]) - date_pattern = params.get("date_pattern", 'Sat *Feb *2 *03:04:.. 1980') + date_pattern = params.get("date_pattern", "Sat *Feb *2 *03:04:.. 1980") vm.verify_alive() session = vm.wait_for_login(timeout=240) test.log.info("Setting hwclock to 2/2/80 03:04:00") session.cmd('/sbin/hwclock --set --date "2/2/80 03:04:00"') - date = session.cmd_output('LC_ALL=C /sbin/hwclock') + date = session.cmd_output("LC_ALL=C /sbin/hwclock") if not re.match(date_pattern, date): - test.fail("Fail to set hwclock back to the 80s. " - "Output of hwclock is '%s'. " - "Expected output pattern is '%s'." % (date.rstrip(), - date_pattern)) + test.fail( + "Fail to set hwclock back to the 80s. " + f"Output of hwclock is '{date.rstrip()}'. " + f"Expected output pattern is '{date_pattern}'." + ) diff --git a/generic/tests/invalid_para_mq.py b/generic/tests/invalid_para_mq.py index 499662ccbf..bbef86ad1b 100644 --- a/generic/tests/invalid_para_mq.py +++ b/generic/tests/invalid_para_mq.py @@ -1,8 +1,6 @@ import re -from virttest import utils_net -from virttest import env_process -from virttest import error_context +from virttest import env_process, error_context, utils_net @error_context.context_aware @@ -22,8 +20,7 @@ def run(test, params, env): params["start_vm"] = "yes" nic_queues = int(params["queues"]) try: - error_context.context("Boot the vm using queues %s'" % nic_queues, - test.log.info) + error_context.context(f"Boot the vm using queues {nic_queues}'", test.log.info) env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.destroy() @@ -32,19 +29,22 @@ def run(test, params, env): message = str(exp) # clean up tap device when qemu coredump to ensure, # to ensure next test has clean network envrioment - if (hasattr(exp, 'ifname') and exp.ifname and - exp.ifname in utils_net.get_host_iface()): + if ( + hasattr(exp, "ifname") + and exp.ifname + and exp.ifname in utils_net.get_host_iface() + ): try: bridge = params.get("netdst", "switch") utils_net.del_from_bridge(exp.ifname, bridge) except Exception as warning: - test.log.warn("Error occurent when clean tap device(%s)", - str(warning)) + test.log.warning( + "Error occurent when clean tap device(%s)", str(warning) + ) error_context.context("Check Qemu not coredump", test.log.info) if "(core dumped)" in message: test.fail("Qemu core dumped when boot with invalid parameters.") - error_context.context("Check Qemu quit with except message", - test.log.info) - if not re.search(params['key_words'], message, re.M | re.I): + error_context.context("Check Qemu quit with except message", test.log.info) + if not re.search(params["key_words"], message, re.M | re.I): test.log.info("Error message: %s", message) test.fail("Can't detect expect error") diff --git a/generic/tests/iofuzz.py b/generic/tests/iofuzz.py index 0fa832070d..0371074250 100644 --- a/generic/tests/iofuzz.py +++ b/generic/tests/iofuzz.py @@ -1,13 +1,8 @@ -import re import random +import re import aexpect - -from virttest import data_dir -from virttest import qemu_storage -from virttest import qemu_vm -from virttest import storage -from virttest import virt_vm +from virttest import data_dir, qemu_storage, qemu_vm, storage, virt_vm def run(test, params, env): @@ -28,11 +23,12 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def qemu_img_check(): """ Check guest disk image, and backup image when error occured """ - params["backup_image_on_check_error"] = 'yes' + params["backup_image_on_check_error"] = "yes" base_dir = data_dir.get_data_dir() image_name = storage.get_image_filename(params, base_dir) image = qemu_storage.QemuImg(params, base_dir, image_name) @@ -48,8 +44,10 @@ def outb(session, port, data): value will be converted to octal before its written. """ test.log.debug("outb(0x%x, 0x%x)", port, data) - outb_cmd = ("echo -e '\\%s' | dd of=/dev/port seek=%d bs=1 count=1" % - (oct(data), port)) + outb_cmd = "echo -e '\\%s' | dd of=/dev/port seek=%d bs=1 count=1" % ( + oct(data), + port, + ) try: session.cmd(outb_cmd) except aexpect.ShellError as err: @@ -81,24 +79,24 @@ def fuzz(test, session, inst_list): :raise error.TestFail: If the VM process dies in the middle of the fuzzing procedure. """ - for (wr_op, operand) in inst_list: + for wr_op, operand in inst_list: if wr_op == "read": inb(session, operand[0]) elif wr_op == "write": outb(session, operand[0], operand[1]) else: - test.error("Unknown command %s" % wr_op) + test.error(f"Unknown command {wr_op}") if not session.is_responsive(): test.log.debug("Session is not responsive") try: vm.verify_alive() except qemu_vm.QemuSegFaultError as err: - test.fail("Qemu crash, error info: %s" % err) + test.fail(f"Qemu crash, error info: {err}") except virt_vm.VMDeadKernelCrashError as err: - test.fail("Guest kernel crash, info: %s" % err) + test.fail(f"Guest kernel crash, info: {err}") else: - test.log.warn("Guest is not alive during test") + test.log.warning("Guest is not alive during test") if vm.process.is_alive(): test.log.debug("VM is alive, try to re-login") @@ -109,8 +107,7 @@ def fuzz(test, session, inst_list): qemu_img_check() session = vm.reboot(method="system_reset") else: - test.fail("VM has quit abnormally during " - "%s: %s" % (wr_op, operand)) + test.fail("VM has quit abnormally during " f"{wr_op}: {operand}") login_timeout = float(params.get("login_timeout", 240)) vm = env.get_vm(params["main_vm"]) @@ -129,10 +126,10 @@ def fuzz(test, session, inst_list): skip_devices = params.get("skip_devices", "") fuzz_count = int(params.get("fuzz_count", 10)) - for (beg, end, name) in devices: + for beg, end, name in devices: ports[(int(beg, base=16), int(end, base=16))] = name.strip() - for (beg, end) in ports.keys(): + for beg, end in ports.keys(): name = ports[(beg, end)] if name in skip_devices: test.log.info("Skipping device %s", name) @@ -151,8 +148,9 @@ def fuzz(test, session, inst_list): # Write random values to random ports of the range for _ in range(fuzz_count * (end - beg + 1)): - inst.append(("write", [o_random.randint(beg, end), - o_random.randint(0, 255)])) + inst.append( + ("write", [o_random.randint(beg, end), o_random.randint(0, 255)]) + ) fuzz(test, session, inst) vm.verify_alive() diff --git a/generic/tests/iometer_windows.py b/generic/tests/iometer_windows.py index 067b22992e..ff931ea52e 100644 --- a/generic/tests/iometer_windows.py +++ b/generic/tests/iometer_windows.py @@ -2,14 +2,13 @@ :author: Golita Yue :author: Amos Kong """ -import time -import re + import os +import re +import time + +from virttest import data_dir, error_context, utils_misc, utils_test -from virttest import data_dir -from virttest import error_context -from virttest import utils_misc -from virttest import utils_test from provider import win_driver_utils @@ -29,6 +28,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def install_iometer(): error_context.context("Install Iometer", test.log.info) session.cmd(re.sub("WIN_UTILS", vol_utils, ins_cmd), cmd_timeout) @@ -37,19 +37,22 @@ def install_iometer(): def register_iometer(): error_context.context("Register Iometer", test.log.info) session.cmd_output( - re.sub("WIN_UTILS", vol_utils, params["register_cmd"]), cmd_timeout) + re.sub("WIN_UTILS", vol_utils, params["register_cmd"]), cmd_timeout + ) def prepare_ifc_file(): error_context.context("Prepare icf for Iometer", test.log.info) icf_file = os.path.join(data_dir.get_deps_dir(), "iometer", icf_name) - vm.copy_files_to(icf_file, "%s\\%s" % (ins_path, icf_name)) + vm.copy_files_to(icf_file, f"{ins_path}\\{icf_name}") def _is_iometer_alive(): cmd = 'TASKLIST /FI "IMAGENAME eq Iometer.exe' _session = vm.wait_for_login(timeout=360) if not utils_misc.wait_for( - lambda: 'Iometer.exe' in _session.cmd_output( - cmd, timeout=180), 600, step=3.0): + lambda: "Iometer.exe" in _session.cmd_output(cmd, timeout=180), + 600, + step=3.0, + ): test.fail("Iometer is not alive!") _session.close() @@ -61,23 +64,23 @@ def _run_backgroud(args): def run_iometer(): error_context.context("Start Iometer", test.log.info) args = ( - ' && '.join((("cd %s" % ins_path), run_cmd % (icf_name, res_file))), - run_timeout) - if params.get('bg_mode', 'no') == 'yes': + " && ".join(((f"cd {ins_path}"), run_cmd % (icf_name, res_file))), + run_timeout, + ) + if params.get("bg_mode", "no") == "yes": _run_backgroud(args) _is_iometer_alive() - time.sleep(int(params.get('sleep_time', '180'))) + time.sleep(int(params.get("sleep_time", "180"))) _is_iometer_alive() else: session.cmd(*args) - error_context.context( - "Copy result '%s' to host" % res_file, test.log.info) + error_context.context(f"Copy result '{res_file}' to host", test.log.info) vm.copy_files_from(res_file, test.resultsdir) def change_vm_status(): - method, command = params.get('command_opts').split(',') - test.log.info('Sending command(%s): %s', method, command) - if method == 'shell': + method, command = params.get("command_opts").split(",") + test.log.info("Sending command(%s): %s", method, command) + if method == "shell": vm.wait_for_login(timeout=360).sendline(command) else: getattr(vm.monitor, command)() @@ -86,9 +89,9 @@ def change_vm_status(): raise test.fail("Not received SHUTDOWN QMP event.") def check_vm_status(timeout=600): - action = 'shutdown' if shutdown_vm else 'login' - if not getattr(vm, 'wait_for_%s' % action)(timeout=timeout): - test.fail('Failed to %s vm.' % action) + action = "shutdown" if shutdown_vm else "login" + if not getattr(vm, f"wait_for_{action}")(timeout=timeout): + test.fail(f"Failed to {action} vm.") def format_multi_disks(): disk_letters = params["disk_letters"].split() @@ -105,8 +108,8 @@ def format_multi_disks(): res_file = params["result_file"] run_cmd = params["run_cmd"] run_timeout = int(params.get("run_timeout", 1000)) - shutdown_vm = params.get('shutdown_vm', 'no') == 'yes' - reboot_vm = params.get('reboot_vm', 'no') == 'yes' + shutdown_vm = params.get("shutdown_vm", "no") == "yes" + reboot_vm = params.get("reboot_vm", "no") == "yes" vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=360) @@ -118,7 +121,7 @@ def format_multi_disks(): # events ready, add 10s to wait events done. time.sleep(10) # format the target disk - if params.get('format_multi_disks', 'no') == 'yes': + if params.get("format_multi_disks", "no") == "yes": format_multi_disks() else: utils_test.run_virt_sub_test(test, params, env, "format_disk") diff --git a/generic/tests/ioquit.py b/generic/tests/ioquit.py index ef0771390c..89688cb617 100644 --- a/generic/tests/ioquit.py +++ b/generic/tests/ioquit.py @@ -1,12 +1,9 @@ -import time import random -import six +import time -from virttest import qemu_storage -from virttest import data_dir -from virttest import error_context -from virttest import utils_misc +import six from avocado.utils import process +from virttest import data_dir, error_context, qemu_storage, utils_misc @error_context.context_aware @@ -54,8 +51,7 @@ def run(test, params, env): except Exception as exc: if "Leaked clusters" not in six.text_type(exc): raise - error_context.context("Detected cluster leaks, try to repair it", - test.log.info) + error_context.context("Detected cluster leaks, try to repair it", test.log.info) restore_cmd = params.get("image_restore_cmd") % image.image_filename cmd_status = process.system(restore_cmd, shell=True) if cmd_status: diff --git a/generic/tests/iozone_windows.py b/generic/tests/iozone_windows.py index e5e9abdd34..af37132e5b 100644 --- a/generic/tests/iozone_windows.py +++ b/generic/tests/iozone_windows.py @@ -2,11 +2,14 @@ import re import time -from virttest import postprocess_iozone -from virttest import utils_misc -from virttest import utils_test -from virttest import utils_disk -from virttest import error_context +from virttest import ( + error_context, + postprocess_iozone, + utils_disk, + utils_misc, + utils_test, +) + from provider import win_driver_utils @@ -31,11 +34,13 @@ def post_result(results_path, analysisdir): :params results_path: iozone test result path :params analysisdir: output of analysis result """ - a = postprocess_iozone.IOzoneAnalyzer(list_files=[results_path], - output_dir=analysisdir) + a = postprocess_iozone.IOzoneAnalyzer( + list_files=[results_path], output_dir=analysisdir + ) a.analyze() - p = postprocess_iozone.IOzonePlotter(results_file=results_path, - output_dir=analysisdir) + p = postprocess_iozone.IOzonePlotter( + results_file=results_path, output_dir=analysisdir + ) p.plot_all() def get_driver(): @@ -54,7 +59,7 @@ def get_driver(): return driver_name def run_iozone_parallel(timeout): - """ Run the iozone parallel. """ + """Run the iozone parallel.""" iozone_sessions = [] iozone_threads = [] thread_maps = {} @@ -78,42 +83,43 @@ def run_iozone_parallel(timeout): thread_name = iozone_thread.name iozone_threads.remove(iozone_thread) iozone_threads.append( - utils_misc.InterruptedThread(thread_maps[thread_name][0], - thread_maps[thread_name][1])) + utils_misc.InterruptedThread( + thread_maps[thread_name][0], thread_maps[thread_name][1] + ) + ) iozone_threads[-1].name = thread_name iozone_threads[-1].start() for iozone_thread in iozone_threads: iozone_thread.join() - test.log.info('All the iozone threads are done.') + test.log.info("All the iozone threads are done.") def check_gpt_labletype(disk_index): """ Check the disk is gpt labletype. """ cmd = "echo list disk > {0} && diskpart /s {0} && del {0}" - pattern = r'Disk %s.+?B.{8}\*' % disk_index + pattern = rf"Disk {disk_index}.+?B.{{8}}\*" return re.search(pattern, session.cmd_output(cmd.format("test.dp"))) timeout = int(params.get("login_timeout", 360)) iozone_timeout = int(params.get("iozone_timeout")) - disk_letters = params.get("disk_letter", 'C').split() + disk_letters = params.get("disk_letter", "C").split() disk_indexes = params.get("disk_index", "2").split() disk_fstypes = params.get("disk_fstype", "ntfs").split() labletype = params.get("labletype", "msdos") - results_path = os.path.join(test.resultsdir, - 'raw_output_%s' % test.iteration) - analysisdir = os.path.join(test.resultsdir, 'analysis_%s' % test.iteration) + results_path = os.path.join(test.resultsdir, f"raw_output_{test.iteration}") + analysisdir = os.path.join(test.resultsdir, f"analysis_{test.iteration}") vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) driver_name = get_driver() if driver_name: - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, driver_name, - timeout) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name, timeout + ) if params.get("format_disk", "no") == "yes": for index, letter, fstype in zip(disk_indexes, disk_letters, disk_fstypes): @@ -121,14 +127,16 @@ def check_gpt_labletype(disk_index): if orig_letters: orig_letter = orig_letters[0] if orig_letter != letter: - test.log.info("Change the drive letter from %s to %s", - orig_letter, letter) + test.log.info( + "Change the drive letter from %s to %s", orig_letter, letter + ) utils_disk.drop_drive_letter(session, orig_letter) utils_disk.set_drive_letter(session, index, target_letter=letter) else: error_context.context("Format disk", test.log.info) - utils_misc.format_windows_disk(session, index, letter, fstype=fstype, - labletype=labletype) + utils_misc.format_windows_disk( + session, index, letter, fstype=fstype, labletype=labletype + ) if params.get("gpt_check", "no") == "yes": if not check_gpt_labletype(disk_indexes[0]): @@ -136,23 +144,23 @@ def check_gpt_labletype(disk_index): cmd = params["iozone_cmd"] iozone_cmd = utils_misc.set_winutils_letter(session, cmd) - error_context.context("Running IOzone command on guest, timeout %ss" - % iozone_timeout, test.log.info) + error_context.context( + f"Running IOzone command on guest, timeout {iozone_timeout}s", test.log.info + ) - if params.get('run_iozone_parallel', 'no') == 'yes': - disk_letters.append('C') - run_iozone_parallel(int(params['stress_timeout'])) + if params.get("run_iozone_parallel", "no") == "yes": + disk_letters.append("C") + run_iozone_parallel(int(params["stress_timeout"])) if params.get("need_memory_leak_check", "no") == "yes": win_driver_utils.memory_leak_check(vm, test, params) return - status, results = session.cmd_status_output(cmd=iozone_cmd, - timeout=iozone_timeout) - error_context.context("Write results to %s" % results_path, test.log.info) + status, results = session.cmd_status_output(cmd=iozone_cmd, timeout=iozone_timeout) + error_context.context(f"Write results to {results_path}", test.log.info) if status != 0: - test.fail("iozone test failed: %s" % results) + test.fail(f"iozone test failed: {results}") - with open(results_path, 'w') as file: + with open(results_path, "w") as file: file.write(results) if params.get("post_result", "no") == "yes": diff --git a/generic/tests/jumbo.py b/generic/tests/jumbo.py index 7af16c0102..e489fd92b7 100644 --- a/generic/tests/jumbo.py +++ b/generic/tests/jumbo.py @@ -2,12 +2,14 @@ import re from avocado.utils import process -from virttest import utils_misc -from virttest import utils_test -from virttest import utils_net -from virttest import utils_sriov -from virttest import error_context -from virttest import env_process +from virttest import ( + env_process, + error_context, + utils_misc, + utils_net, + utils_sriov, + utils_test, +) @error_context.context_aware @@ -30,25 +32,24 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def get_ovs_ports(ovs): - ''' + """ get the ovs bridge all Interface list. :param ovs: Ovs bridge name - ''' - cmd = "ovs-vsctl list-ports %s" % ovs + """ + cmd = f"ovs-vsctl list-ports {ovs}" return process.getoutput(cmd, shell=True) netdst = params.get("netdst", "switch") host_bridges = utils_net.find_bridge_manager(netdst) if not isinstance(host_bridges, utils_net.Bridge): - ovs = host_bridges host_hw_interface = get_ovs_ports(netdst) tmp_ports = re.findall(r"t[0-9]{1,}-[a-zA-Z0-9]{6}", host_hw_interface) if tmp_ports: for p in tmp_ports: - process.system_output("ovs-vsctl del-port %s %s" % - (netdst, p)) + process.system_output(f"ovs-vsctl del-port {netdst} {p}") params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, params["main_vm"]) @@ -57,8 +58,7 @@ def get_ovs_ports(ovs): mtu_default = 1500 mtu = params.get("mtu", "1500") def_max_icmp_size = int(mtu) - 28 - max_icmp_pkt_size = int(params.get("max_icmp_pkt_size", - def_max_icmp_size)) + max_icmp_pkt_size = int(params.get("max_icmp_pkt_size", def_max_icmp_size)) flood_time = params.get("flood_time", "300") os_type = params.get("os_type") os_variant = params.get("os_variant") @@ -82,8 +82,7 @@ def get_ovs_ports(ovs): ifaces_in_use = host_bridges.list_iface() target_ifaces = set(ifaces_in_use) - set(br_in_use) - error_context.context("Change all Bridge NICs MTU to %s" % - mtu, test.log.info) + error_context.context(f"Change all Bridge NICs MTU to {mtu}", test.log.info) for iface in target_ifaces: process.run(host_mtu_cmd % (iface, mtu), shell=True) @@ -93,25 +92,27 @@ def get_ovs_ports(ovs): mac = vm.get_mac_address(0) if os_type == "linux": ethname = utils_net.get_linux_ifname(session, mac) - guest_mtu_cmd = "ifconfig %s mtu %s" % (ethname, mtu) + guest_mtu_cmd = f"ifconfig {ethname} mtu {mtu}" else: connection_id = utils_net.get_windows_nic_attribute( - session, "macaddress", mac, "netconnectionid") + session, "macaddress", mac, "netconnectionid" + ) index = utils_net.get_windows_nic_attribute( - session, "netconnectionid", connection_id, "index") + session, "netconnectionid", connection_id, "index" + ) if os_variant == "winxp": pnpdevice_id = utils_net.get_windows_nic_attribute( - session, "netconnectionid", connection_id, "pnpdeviceid") + session, "netconnectionid", connection_id, "pnpdeviceid" + ) cd_num = utils_misc.get_winutils_vol(session) - copy_cmd = r"xcopy %s:\devcon\wxp_x86\devcon.exe c:\ " % cd_num + copy_cmd = rf"xcopy {cd_num}:\devcon\wxp_x86\devcon.exe c:\ " session.cmd(copy_cmd) reg_set_mtu_pattern = params.get("reg_mtu_cmd") mtu_key_word = params.get("mtu_key", "MTU") - reg_set_mtu = reg_set_mtu_pattern % (int(index), mtu_key_word, - int(mtu)) - guest_mtu_cmd = "%s " % reg_set_mtu + reg_set_mtu = reg_set_mtu_pattern % (int(index), mtu_key_word, int(mtu)) + guest_mtu_cmd = f"{reg_set_mtu} " session.cmd(guest_mtu_cmd) if os_type == "windows": @@ -119,9 +120,9 @@ def get_ovs_ports(ovs): if os_variant == "winxp": connection_id = pnpdevice_id.split("&")[-1] mode = "devcon" - utils_net.restart_windows_guest_network(session_serial, - connection_id, - mode=mode) + utils_net.restart_windows_guest_network( + session_serial, connection_id, mode=mode + ) error_context.context("Chaning the MTU of host tap ...", test.log.info) host_mtu_cmd = "ifconfig %s mtu %s" @@ -131,95 +132,104 @@ def get_ovs_ports(ovs): process.run(host_mtu_cmd % (base_if, mtu), shell=True) process.run(host_mtu_cmd % (ifname, mtu), shell=True) - error_context.context("Add a temporary static ARP entry ...", - test.log.info) - arp_add_cmd = "arp -s %s %s -i %s" % (guest_ip, mac, ifname) + error_context.context("Add a temporary static ARP entry ...", test.log.info) + arp_add_cmd = f"arp -s {guest_ip} {mac} -i {ifname}" process.run(arp_add_cmd, shell=True) def is_mtu_ok(): - status, _ = utils_test.ping(guest_ip, 1, - packetsize=max_icmp_pkt_size, - hint="do", timeout=2) + status, _ = utils_test.ping( + guest_ip, 1, packetsize=max_icmp_pkt_size, hint="do", timeout=2 + ) return status == 0 def verify_mtu(): test.log.info("Verify the path MTU") - status, output = utils_test.ping(guest_ip, 10, - packetsize=max_icmp_pkt_size, - hint="do", timeout=15) + status, output = utils_test.ping( + guest_ip, 10, packetsize=max_icmp_pkt_size, hint="do", timeout=15 + ) if status != 0: test.log.error(output) test.fail("Path MTU is not as expected") if utils_test.get_loss_ratio(output) != 0: test.log.error(output) - test.fail("Packet loss ratio during MTU " - "verification is not zero") + test.fail("Packet loss ratio during MTU " "verification is not zero") def flood_ping(): test.log.info("Flood with large frames") - utils_test.ping(guest_ip, - packetsize=max_icmp_pkt_size, - flood=True, timeout=float(flood_time)) + utils_test.ping( + guest_ip, + packetsize=max_icmp_pkt_size, + flood=True, + timeout=float(flood_time), + ) def large_frame_ping(count=100): test.log.info("Large frame ping") - _, output = utils_test.ping(guest_ip, count, - packetsize=max_icmp_pkt_size, - timeout=float(count) * 2) + _, output = utils_test.ping( + guest_ip, count, packetsize=max_icmp_pkt_size, timeout=float(count) * 2 + ) ratio = utils_test.get_loss_ratio(output) if ratio != 0: - test.fail("Loss ratio of large frame ping is %s" % ratio) + test.fail(f"Loss ratio of large frame ping is {ratio}") def size_increase_ping(step=random.randrange(90, 110)): test.log.info("Size increase ping") for size in range(0, max_icmp_pkt_size + 1, step): test.log.info("Ping %s with size %s", guest_ip, size) - status, output = utils_test.ping(guest_ip, 1, - packetsize=size, - hint=hint, timeout=1) + status, output = utils_test.ping( + guest_ip, 1, packetsize=size, hint=hint, timeout=1 + ) if status != 0: - status, output = utils_test.ping(guest_ip, 10, - packetsize=size, - adaptive=True, - hint=hint, - timeout=20) + status, output = utils_test.ping( + guest_ip, + 10, + packetsize=size, + adaptive=True, + hint=hint, + timeout=20, + ) fail_ratio = int(params.get("fail_ratio", 50)) if utils_test.get_loss_ratio(output) > fail_ratio: - test.fail("Ping loss ratio is greater " - "than 50% for size %s" % size) + test.fail( + "Ping loss ratio is greater " + "than 50{: f}or size {}".format(*size) + ) test.log.info("Waiting for the MTU to be OK") wait_mtu_ok = 20 if not utils_misc.wait_for(is_mtu_ok, wait_mtu_ok, 0, 1): - test.log.debug(process.getoutput("ifconfig -a", - verbose=False, - ignore_status=True, - shell=True)) - test.error("MTU is not as expected even after %s " - "seconds" % wait_mtu_ok) + test.log.debug( + process.getoutput( + "ifconfig -a", verbose=False, ignore_status=True, shell=True + ) + ) + test.error(f"MTU is not as expected even after {wait_mtu_ok} " "seconds") # Functional Test - error_context.context("Checking whether MTU change is ok", - test.log.info) + error_context.context("Checking whether MTU change is ok", test.log.info) if params.get("emulate_vf") == "yes": error_context.context("Create emulate VFs devices", test.log.info) pci_id = params.get("get_pci_id") nic_pci = session.cmd_output(pci_id).strip() check_vf_num = params.get("get_vf_num") sriov_numvfs = int(session.cmd_output(check_vf_num % nic_pci)) - utils_sriov.set_vf(f'/sys/bus/pci/devices/{nic_pci}', vf_no=sriov_numvfs, session=session) + utils_sriov.set_vf( + f"/sys/bus/pci/devices/{nic_pci}", vf_no=sriov_numvfs, session=session + ) ifnames = utils_net.get_linux_ifname(session) for i in range(1, len(ifnames)): - set_vf_mtu_cmd = params.get('set_vf_mtu') - status, output = session.cmd_status_output(set_vf_mtu_cmd % (ifnames[i], mtu)) + set_vf_mtu_cmd = params.get("set_vf_mtu") + status, output = session.cmd_status_output( + set_vf_mtu_cmd % (ifnames[i], mtu) + ) if status != 0: - test.log.info("Setup vf device's mtu failed with: %s" % output) + test.log.info("Setup vf device's mtu failed with: %s", output) ifname = ifnames[1] vf_mac = utils_net.get_linux_mac(session, ifname) - session.cmd_output_safe("ip link set dev %s up" % ifname) + session.cmd_output_safe(f"ip link set dev {ifname} up") session.cmd_output_safe("dhclient -r") - session.cmd_output_safe("dhclient %s" % ifname) + session.cmd_output_safe(f"dhclient {ifname}") guest_ip = utils_net.get_guest_ip_addr(session, vf_mac) if guest_ip is None: test.error("VF can no got ip address") @@ -238,10 +248,9 @@ def size_increase_ping(step=random.randrange(90, 110)): if params.get("emulate_vf") == "yes": ifname = vm.get_ifname(0) guest_ip = vm.get_address(0) - grep_cmd = "grep '%s.*%s' /proc/net/arp" % (guest_ip, ifname) - if process.system(grep_cmd, shell=True) == '0': - process.run("arp -d %s -i %s" % (guest_ip, ifname), - shell=True) + grep_cmd = f"grep '{guest_ip}.*{ifname}' /proc/net/arp" + if process.system(grep_cmd, shell=True) == "0": + process.run(f"arp -d {guest_ip} -i {ifname}", shell=True) test.log.info("Removing the temporary ARP entry successfully") test.log.info("Change back Bridge NICs MTU to %s", mtu_default) diff --git a/generic/tests/kdump.py b/generic/tests/kdump.py index 376590167e..503bb82fb6 100644 --- a/generic/tests/kdump.py +++ b/generic/tests/kdump.py @@ -2,12 +2,9 @@ import os from avocado.utils import process -from virttest import utils_conn -from virttest import utils_misc -from virttest import utils_net -from virttest import error_context +from virttest import error_context, utils_conn, utils_misc, utils_net -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") @error_context.context_aware @@ -19,14 +16,13 @@ def preprocess_kdump(test, vm, timeout): """ kdump_cfg_path = vm.params.get("kdump_cfg_path", "/etc/kdump.conf") auth_key_path = vm.params.get("auth_key_path") - backup_key_cmd = ("/bin/cp -f %s %s-bk" % - (auth_key_path, auth_key_path)) - cp_kdumpcf_cmd = "/bin/cp -f %s %s-bk" % (kdump_cfg_path, kdump_cfg_path) + backup_key_cmd = f"/bin/cp -f {auth_key_path} {auth_key_path}-bk" + cp_kdumpcf_cmd = f"/bin/cp -f {kdump_cfg_path} {kdump_cfg_path}-bk" cp_kdumpcf_cmd = vm.params.get("cp_kdumpcf_cmd", cp_kdumpcf_cmd) session = vm.wait_for_login(timeout=timeout) if auth_key_path: - create_key_cmd = ("/bin/touch %s" % auth_key_path) + create_key_cmd = f"/bin/touch {auth_key_path}" if not os.path.exists("/root/.ssh"): process.run("mkdir /root/.ssh", shell=True) test.log.info("Create authorized_keys file if it not existed.") @@ -52,15 +48,12 @@ def postprocess_kdump(test, vm, timeout): """ kdump_cfg_path = vm.params.get("kdump_cfg_path", "/etc/kdump.conf") auth_key_path = vm.params.get("auth_key_path") - restore_kdumpcf_cmd = ("/bin/cp -f %s-bk %s" % - (kdump_cfg_path, kdump_cfg_path)) - restore_kdumpcf_cmd = vm.params.get("restore_kdumpcf_cmd", - restore_kdumpcf_cmd) + restore_kdumpcf_cmd = f"/bin/cp -f {kdump_cfg_path}-bk {kdump_cfg_path}" + restore_kdumpcf_cmd = vm.params.get("restore_kdumpcf_cmd", restore_kdumpcf_cmd) session = vm.wait_for_login(timeout=timeout) if auth_key_path: - restore_key_cmd = ("/bin/cp -f %s-bk %s" % - (auth_key_path, auth_key_path)) + restore_key_cmd = f"/bin/cp -f {auth_key_path}-bk {auth_key_path}" test.log.info("Restore authorized_keys file.") process.run(restore_key_cmd, shell=True) @@ -74,8 +67,9 @@ def postprocess_kdump(test, vm, timeout): @error_context.context_aware -def kdump_enable(vm, vm_name, crash_kernel_prob_cmd, - kernel_param_cmd, kdump_enable_cmd, timeout): +def kdump_enable( + vm, vm_name, crash_kernel_prob_cmd, kernel_param_cmd, kdump_enable_cmd, timeout +): """ Check, configure and enable the kdump in guest. @@ -89,36 +83,35 @@ def kdump_enable(vm, vm_name, crash_kernel_prob_cmd, kdump_config = vm.params.get("kdump_config") vmcore_path = vm.params.get("vmcore_path", "/var/crash") kdump_method = vm.params.get("kdump_method", "basic") - kdump_propagate_cmd = vm.params.get("kdump_propagate_cmd", - 'kdumpctl propagate') + kdump_propagate_cmd = vm.params.get("kdump_propagate_cmd", "kdumpctl propagate") kdump_enable_timeout = int(vm.params.get("kdump_enable_timeout", 360)) - error_context.context("Try to log into guest '%s'." % vm_name, - LOG_JOB.info) + error_context.context(f"Try to log into guest '{vm_name}'.", LOG_JOB.info) session = vm.wait_for_login(timeout=timeout) - error_context.context("Checking the existence of crash kernel in %s" % - vm_name, LOG_JOB.info) + error_context.context( + f"Checking the existence of crash kernel in {vm_name}", LOG_JOB.info + ) try: session.cmd(crash_kernel_prob_cmd) except Exception: - error_context.context("Crash kernel is not loaded. Trying to load it", - LOG_JOB.info) + error_context.context( + "Crash kernel is not loaded. Trying to load it", LOG_JOB.info + ) session.cmd(kernel_param_cmd) session = vm.reboot(session, timeout=timeout) if kdump_config: if kdump_method == "ssh": - host_ip = utils_net.get_ip_address_by_interface( - vm.params.get('netdst')) + host_ip = utils_net.get_ip_address_by_interface(vm.params.get("netdst")) kdump_config = kdump_config % (host_ip, vmcore_path) - error_context.context("Configuring the Core Collector...", - LOG_JOB.info) + error_context.context("Configuring the Core Collector...", LOG_JOB.info) - session.cmd("cat /dev/null > %s" % kdump_cfg_path) - session.cmd("echo 'core_collector makedumpfile -F -c -d 31' > %s" - % kdump_cfg_path) + session.cmd(f"cat /dev/null > {kdump_cfg_path}") + session.cmd( + f"echo 'core_collector makedumpfile -F -c -d 31' > {kdump_cfg_path}" + ) for config_line in kdump_config.split(";"): config_cmd = "echo -e '%s' >> %s " config_con = config_line.strip() @@ -129,26 +122,25 @@ def kdump_enable(vm, vm_name, crash_kernel_prob_cmd, guest_pwd = vm.params.get("guest_pwd", "redhat") guest_ip = vm.get_address() - error_context.context("Setup ssh login without password...", - LOG_JOB.info) + error_context.context("Setup ssh login without password...", LOG_JOB.info) session.cmd("rm -rf /root/.ssh/*") - ssh_connection = utils_conn.SSHConnection(server_ip=host_ip, - server_pwd=host_pwd, - client_ip=guest_ip, - client_pwd=guest_pwd) + ssh_connection = utils_conn.SSHConnection( + server_ip=host_ip, + server_pwd=host_pwd, + client_ip=guest_ip, + client_pwd=guest_pwd, + ) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() - LOG_JOB.info("Trying to propagate with command '%s'", - kdump_propagate_cmd) + LOG_JOB.info("Trying to propagate with command '%s'", kdump_propagate_cmd) session.cmd(kdump_propagate_cmd, timeout=120) - error_context.context("Enabling kdump service...", - LOG_JOB.info) + error_context.context("Enabling kdump service...", LOG_JOB.info) # the initrd may be rebuilt here so we need to wait a little more session.cmd(kdump_enable_cmd, timeout=kdump_enable_timeout) @@ -168,20 +160,24 @@ def crash_test(test, vm, vcpu, crash_cmd, timeout): kdump_method = vm.params.get("kdump_method", "basic") vmcore_rm_cmd = vm.params.get("vmcore_rm_cmd", "rm -rf %s/*") vmcore_rm_cmd = vmcore_rm_cmd % vmcore_path - kdump_restart_cmd = vm.params.get("kdump_restart_cmd", - "systemctl restart kdump.service") - kdump_status_cmd = vm.params.get("kdump_status_cmd", - "systemctl status kdump.service") - kdump_propagate_cmd = vm.params.get("kdump_propagate_cmd", - 'kdumpctl propagate') + kdump_restart_cmd = vm.params.get( + "kdump_restart_cmd", "systemctl restart kdump.service" + ) + kdump_status_cmd = vm.params.get( + "kdump_status_cmd", "systemctl status kdump.service" + ) + kdump_propagate_cmd = vm.params.get("kdump_propagate_cmd", "kdumpctl propagate") session = vm.wait_for_login(timeout=timeout) test.log.info("Delete the vmcore file.") if kdump_method == "ssh": - output = session.cmd("cat %s" % vm.params["kdump_rsa_path"]) + output = session.cmd("cat {}".format(vm.params["kdump_rsa_path"])) process.run(vmcore_rm_cmd, shell=True) - process.run("cat /dev/null > %s" % vm.params["auth_key_path"], - shell=True, sudo=True) + process.run( + "cat /dev/null > {}".format(vm.params["auth_key_path"]), + shell=True, + sudo=True, + ) authorized_key_cmd = vm.params["authorized_key_cmd"] process.run(authorized_key_cmd % output, shell=True, sudo=True) session.cmd(kdump_propagate_cmd, timeout=120) @@ -223,25 +219,25 @@ def check_vmcore(test, vm, session, timeout): if not utils_misc.wait_for(lambda: not session.is_responsive(), 240, 0, 1): test.fail("Could not trigger crash.") - error_context.context("Waiting for kernel crash dump to complete", - test.log.info) + error_context.context("Waiting for kernel crash dump to complete", test.log.info) if vm.params.get("kdump_method") != "ssh": session = vm.wait_for_login(timeout=timeout) error_context.context("Probing vmcore file...", test.log.info) if vm.params.get("kdump_method") == "ssh": test.log.info("Checking vmcore file on host") - status = utils_misc.wait_for(lambda: - process.system(vmcore_chk_cmd, - shell=True) == 0, - ignore_errors=True, - timeout=200) + status = utils_misc.wait_for( + lambda: process.system(vmcore_chk_cmd, shell=True) == 0, + ignore_errors=True, + timeout=200, + ) else: test.log.info("Checking vmcore file on guest") - status = utils_misc.wait_for(lambda: - session.cmd_status(vmcore_chk_cmd) == 0, - ignore_errors=True, - timeout=200) + status = utils_misc.wait_for( + lambda: session.cmd_status(vmcore_chk_cmd) == 0, + ignore_errors=True, + timeout=200, + ) if not status: postprocess_kdump(test, vm, timeout) test.fail("Could not found vmcore file.") @@ -269,8 +265,9 @@ def run(test, params, env): def_kdump_enable_cmd = "chkconfig kdump on && service kdump restart" kdump_enable_cmd = params.get("kdump_enable_cmd", def_kdump_enable_cmd) def_crash_kernel_prob_cmd = "grep -q 1 /sys/kernel/kexec_crash_loaded" - crash_kernel_prob_cmd = params.get("crash_kernel_prob_cmd", - def_crash_kernel_prob_cmd) + crash_kernel_prob_cmd = params.get( + "crash_kernel_prob_cmd", def_crash_kernel_prob_cmd + ) kdump_cfg_path = params.get("kdump_cfg_path", "/etc/kdump.conf") vms = params.get("vms", "vm1 vm2").split() @@ -284,24 +281,32 @@ def run(test, params, env): vm_list.append(vm) preprocess_kdump(test, vm, timeout) - vm.copy_files_from(kdump_cfg_path, - os.path.join(test.debugdir, - "kdump.conf-%s" % vm_name)) - - session = kdump_enable(vm, vm_name, crash_kernel_prob_cmd, - kernel_param_cmd, kdump_enable_cmd, timeout) + vm.copy_files_from( + kdump_cfg_path, os.path.join(test.debugdir, f"kdump.conf-{vm_name}") + ) + + session = kdump_enable( + vm, + vm_name, + crash_kernel_prob_cmd, + kernel_param_cmd, + kdump_enable_cmd, + timeout, + ) session_list.append(session) for vm in vm_list: - error_context.context("Kdump Testing, force the Linux kernel" - " to crash", test.log.info) + error_context.context( + "Kdump Testing, force the Linux kernel" " to crash", test.log.info + ) crash_cmd = params.get("crash_cmd", "echo c > /proc/sysrq-trigger") session = vm.wait_for_login(timeout=timeout) - vm.copy_files_from(kdump_cfg_path, - os.path.join(test.debugdir, - "kdump.conf-%s-test" % vm.name)) + vm.copy_files_from( + kdump_cfg_path, + os.path.join(test.debugdir, f"kdump.conf-{vm.name}-test"), + ) if crash_cmd == "nmi": crash_test(test, vm, None, crash_cmd, timeout) else: @@ -311,8 +316,9 @@ def run(test, params, env): crash_test(test, vm, i, crash_cmd, timeout) for i in range(len(vm_list)): - error_context.context("Check the vmcore file after triggering" - " a crash", test.log.info) + error_context.context( + "Check the vmcore file after triggering" " a crash", test.log.info + ) check_vmcore(test, vm_list[i], session_list[i], crash_timeout) finally: for s in session_list: diff --git a/generic/tests/ksm_services.py b/generic/tests/ksm_services.py index 3b2db95051..cda6c8baa6 100644 --- a/generic/tests/ksm_services.py +++ b/generic/tests/ksm_services.py @@ -1,5 +1,5 @@ -import re import os +import re import shutil from avocado.utils import process @@ -34,8 +34,9 @@ def test_setting_params(test, ksmctler, params): continue else: set_values[key] = default_values[key] + value_delta - test.log.debug("\nDefault parameters:%s\n" - "Set parameters:%s", default_values, set_values) + test.log.debug( + "\nDefault parameters:%s\n" "Set parameters:%s", default_values, set_values + ) try: # Setting new value @@ -44,7 +45,7 @@ def test_setting_params(test, ksmctler, params): # Restart ksm service to check ksmctler.restart_ksm() except process.CmdError as detail: - test.fail("Set parameters failed:%s" % str(detail)) + test.fail(f"Set parameters failed:{str(detail)}") fail_flag = 0 for key, value in set_values.items(): @@ -65,32 +66,33 @@ def test_ksmtuned_service(test, ksmctler, params): 1.Set debug options for ksmtuned 2.Check if debug log is created """ + def backup_config(ksmtuned_conf): - shutil.copy(ksmtuned_conf, "%s.bak" % ksmtuned_conf) - return "%s.bak" % ksmtuned_conf + shutil.copy(ksmtuned_conf, f"{ksmtuned_conf}.bak") + return f"{ksmtuned_conf}.bak" def debug_ksmtuned(log_path, debug, ksmtuned_conf="/etc/ksmtuned.conf"): try: - fd = open(ksmtuned_conf, 'r') + fd = open(ksmtuned_conf, "r") contents = fd.readlines() fd.close() - except IOError as e: - test.fail("Open ksmtuned config file failed:%s" % e) + except OSError as e: + test.fail(f"Open ksmtuned config file failed:{e}") new_contents = [] for con in contents: if re.match("^.*LOGFILE.*", con): - con = "LOGFILE=%s\n" % log_path + con = f"LOGFILE={log_path}\n" elif re.match("^.*DEBUG.*", con): - con = "DEBUG=%s\n" % debug + con = f"DEBUG={debug}\n" new_contents.append(con) test.log.debug("\nksmtuned configures:\n%s", new_contents) try: - fd = open(ksmtuned_conf, 'w') + fd = open(ksmtuned_conf, "w") fd.writelines(new_contents) fd.close() - except IOError as e: - test.fail("Write options to config file failed:%s" % e) + except OSError as e: + test.fail(f"Write options to config file failed:{e}") log_path = params.get("ksmtuned_log_path", "/var/log/test_ksmtuned") debug = params.get("ksmtuned_debug", 1) @@ -114,7 +116,7 @@ def debug_ksmtuned(log_path, debug, ksmtuned_conf="/etc/ksmtuned.conf"): try: os.remove(log_path) except OSError: - pass # file do not exists + pass # file do not exists def run(test, params, env): diff --git a/generic/tests/linux_stress.py b/generic/tests/linux_stress.py index 425b739e60..20b6118f8f 100644 --- a/generic/tests/linux_stress.py +++ b/generic/tests/linux_stress.py @@ -2,9 +2,7 @@ import time from avocado.core import exceptions - -from virttest import utils_test -from virttest import data_dir +from virttest import data_dir, utils_test def run(test, params, env): @@ -42,8 +40,12 @@ def run(test, params, env): try: up_time[vm.name] = vm.uptime() stress_server[vm.name] = utils_test.VMStress( - vm, stress_type, params, download_type="tarball", - downloaded_file_path=stress_file) + vm, + stress_type, + params, + download_type="tarball", + downloaded_file_path=stress_file, + ) stress_server[vm.name].load_stress_tool() except exceptions.TestError as err_msg: error = True @@ -53,18 +55,17 @@ def run(test, params, env): time.sleep(stress_duration) for vm in vms: try: - s_ping, o_ping = utils_test.ping( - vm.get_address(), count=5, timeout=20) + s_ping, o_ping = utils_test.ping(vm.get_address(), count=5, timeout=20) if s_ping != 0: error = True - test.log.error( - "%s seem to have gone out of network", vm.name) + test.log.error("%s seem to have gone out of network", vm.name) continue uptime = vm.uptime() if up_time[vm.name] > uptime: error = True test.log.error( - "%s seem to have rebooted during the stress run", vm.name) + "%s seem to have rebooted during the stress run", vm.name + ) stress_server[vm.name].unload_stress() stress_server[vm.name].clean() vm.verify_dmesg() diff --git a/generic/tests/lvm.py b/generic/tests/lvm.py index ae77fae3a6..149f6ee7cb 100644 --- a/generic/tests/lvm.py +++ b/generic/tests/lvm.py @@ -1,25 +1,28 @@ -import os import logging -from virttest import error_context -from virttest import utils_misc -from virttest import utils_test +import os + +from virttest import error_context, utils_misc, utils_test -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") @error_context.context_aware def mount_lv(lv_path, session): - error_context.context("mounting filesystem made on logical volume %s" - % os.path.basename(lv_path), LOG_JOB.info) + error_context.context( + f"mounting filesystem made on logical volume {os.path.basename(lv_path)}", + LOG_JOB.info, + ) session.cmd("mkdir -p /mnt/kvm_test_lvm") - session.cmd("mount %s /mnt/kvm_test_lvm" % lv_path) + session.cmd(f"mount {lv_path} /mnt/kvm_test_lvm") @error_context.context_aware def umount_lv(lv_path, session): - error_context.context("umounting filesystem made on logical volume " - "%s" % os.path.basename(lv_path), LOG_JOB.info) - session.cmd("umount %s" % lv_path) + error_context.context( + "umounting filesystem made on logical volume " f"{os.path.basename(lv_path)}", + LOG_JOB.info, + ) + session.cmd(f"umount {lv_path}") session.cmd("rm -rf /mnt/kvm_test_lvm") @@ -54,7 +57,7 @@ def run(test, params, env): vg_name = "vg_kvm_test" lv_name = "lv_kvm_test" - lv_path = "/dev/%s/%s" % (vg_name, lv_name) + lv_path = f"/dev/{vg_name}/{lv_name}" clean = params.get("clean", "yes") timeout = params.get("lvm_timeout", "600") check_mount = params.get("check_mount", "mountpoint /mnt/kvm_test_lvm") @@ -64,32 +67,36 @@ def run(test, params, env): if sub_type == "lvm_create": disk_list = [] for disk in params.objects("images")[-2:]: - d_id = params["blk_extra_params_%s" % disk].split("=")[1] + d_id = params[f"blk_extra_params_{disk}"].split("=")[1] d_path = utils_misc.get_linux_drive_path(session, d_id) if not d_path: - test.error("Failed to get '%s' drive path" % d_id) + test.error(f"Failed to get '{d_id}' drive path") disk_list.append(d_path) disks = " ".join(disk_list) - error_context.context("adding physical volumes %s" % disks, - test.log.info) - session.cmd("pvcreate %s" % disks) - error_context.context("creating a volume group out of %s" % disks, - test.log.info) - session.cmd("vgcreate %s %s" % (vg_name, disks)) - error_context.context("activating volume group %s" % vg_name, - test.log.info) - session.cmd("vgchange -ay %s" % vg_name) - error_context.context("creating logical volume on volume group %s" - % vg_name, test.log.info) - session.cmd("lvcreate -L2000 -n %s %s" % (lv_name, vg_name)) - error_context.context("creating %s filesystem on logical volume" - " %s" % (fs_type, lv_name), test.log.info) - session.cmd("yes | mkfs.%s %s" % (fs_type, lv_path), timeout=int(timeout)) + error_context.context(f"adding physical volumes {disks}", test.log.info) + session.cmd(f"pvcreate {disks}") + error_context.context( + f"creating a volume group out of {disks}", test.log.info + ) + session.cmd(f"vgcreate {vg_name} {disks}") + error_context.context(f"activating volume group {vg_name}", test.log.info) + session.cmd(f"vgchange -ay {vg_name}") + error_context.context( + f"creating logical volume on volume group {vg_name}", test.log.info + ) + session.cmd(f"lvcreate -L2000 -n {lv_name} {vg_name}") + error_context.context( + f"creating {fs_type} filesystem on logical volume" f" {lv_name}", + test.log.info, + ) + session.cmd(f"yes | mkfs.{fs_type} {lv_path}", timeout=int(timeout)) mount_lv(lv_path, session) umount_lv(lv_path, session) - error_context.context("checking %s filesystem made on logical " - "volume %s" % (fs_type, lv_name), test.log.info) - session.cmd("fsck %s" % lv_path, timeout=int(timeout)) + error_context.context( + f"checking {fs_type} filesystem made on logical " f"volume {lv_name}", + test.log.info, + ) + session.cmd(f"fsck {lv_path}", timeout=int(timeout)) if clean == "no": mount_lv(lv_path, session) elif sub_type == "fillup_disk" or sub_type == "ioquit": @@ -105,13 +112,10 @@ def run(test, params, env): if clean == "yes": if check_mount_lv(check_mount, session): umount_lv(lv_path, session) - error_context.context("removing logical volume %s" % lv_path, - test.log.info) - session.cmd("yes | lvremove %s" % lv_path) - error_context.context("disabling volume group %s" % vg_name, - test.log.info) - session.cmd("vgchange -a n %s" % vg_name) - error_context.context("removing volume group %s" % vg_name, - test.log.info) - session.cmd("vgremove -f %s" % vg_name) + error_context.context(f"removing logical volume {lv_path}", test.log.info) + session.cmd(f"yes | lvremove {lv_path}") + error_context.context(f"disabling volume group {vg_name}", test.log.info) + session.cmd(f"vgchange -a n {vg_name}") + error_context.context(f"removing volume group {vg_name}", test.log.info) + session.cmd(f"vgremove -f {vg_name}") session.close() diff --git a/generic/tests/mac_change.py b/generic/tests/mac_change.py index d6b20c2e59..2885ec673f 100644 --- a/generic/tests/mac_change.py +++ b/generic/tests/mac_change.py @@ -1,9 +1,6 @@ import re -from virttest import error_context -from virttest import utils_misc -from virttest import utils_net -from virttest import utils_test +from virttest import error_context, utils_misc, utils_net, utils_test @error_context.context_aware @@ -23,11 +20,11 @@ def check_guest_mac(test, mac, vm, device_id=None): if device_id not in network_info: err = "Could not find device '%s' from query-network monitor command.\n" - err += "query-network command output: %s" % network_info + err += f"query-network command output: {network_info}" test.error(err) - if not re.search(("%s.*%s" % (device_id, mac)), network_info, re.M | re.I): + if not re.search((f"{device_id}.*{mac}"), network_info, re.M | re.I): err = "Could not get correct mac from qmp command!\n" - err += "query-network command output: %s" % network_info + err += f"query-network command output: {network_info}" test.fail(err) @@ -71,65 +68,59 @@ def run(test, params, env): if os_type == "linux": interface = utils_net.get_linux_ifname(session_serial, old_mac) if params.get("shutdown_int", "yes") == "yes": - int_shutdown_cmd = params.get("int_shutdown_cmd", - "ifconfig %s down") + int_shutdown_cmd = params.get("int_shutdown_cmd", "ifconfig %s down") session_serial.cmd_output_safe(int_shutdown_cmd % interface) else: - - connection_id = utils_net.get_windows_nic_attribute(session_serial, - "macaddress", - old_mac, - "netconnectionid") - nic_index = utils_net.get_windows_nic_attribute(session_serial, - "netconnectionid", - connection_id, - "index") + connection_id = utils_net.get_windows_nic_attribute( + session_serial, "macaddress", old_mac, "netconnectionid" + ) + nic_index = utils_net.get_windows_nic_attribute( + session_serial, "netconnectionid", connection_id, "index" + ) if os_variant == "winxp" and session is not None: - pnpdevice_id = utils_net.get_windows_nic_attribute(session, - "netconnectionid", - connection_id, - "pnpdeviceid") + pnpdevice_id = utils_net.get_windows_nic_attribute( + session, "netconnectionid", connection_id, "pnpdeviceid" + ) cd_drive = utils_misc.get_winutils_vol(session) - copy_cmd = r"xcopy %s:\devcon\wxp_x86\devcon.exe c:\ " % cd_drive + copy_cmd = rf"xcopy {cd_drive}:\devcon\wxp_x86\devcon.exe c:\ " session.cmd(copy_cmd) # Start change MAC address - error_context.context("Changing MAC address to %s" % new_mac, test.log.info) + error_context.context(f"Changing MAC address to {new_mac}", test.log.info) if os_type == "linux": change_cmd = change_cmd_pattern % (interface, new_mac) else: - change_cmd = change_cmd_pattern % (int(nic_index), - "".join(new_mac.split(":"))) + change_cmd = change_cmd_pattern % (int(nic_index), "".join(new_mac.split(":"))) try: session_serial.cmd_output_safe(change_cmd) # Verify whether MAC address was changed to the new one - error_context.context("Verify the new mac address, and restart the network", - test.log.info) + error_context.context( + "Verify the new mac address, and restart the network", test.log.info + ) if os_type == "linux": if params.get("shutdown_int", "yes") == "yes": - int_activate_cmd = params.get("int_activate_cmd", - "ifconfig %s up") + int_activate_cmd = params.get("int_activate_cmd", "ifconfig %s up") session_serial.cmd_output_safe(int_activate_cmd % interface) - session_serial.cmd_output_safe("ifconfig | grep -i %s" % new_mac) + session_serial.cmd_output_safe(f"ifconfig | grep -i {new_mac}") test.log.info("Mac address change successfully, net restart...") - dhclient_cmd = "dhclient -r && dhclient %s" % interface + dhclient_cmd = f"dhclient -r && dhclient {interface}" session_serial.sendline(dhclient_cmd) else: mode = "netsh" if os_variant == "winxp": connection_id = pnpdevice_id.split("&")[-1] mode = "devcon" - utils_net.restart_windows_guest_network(session_serial, - connection_id, - mode=mode) + utils_net.restart_windows_guest_network( + session_serial, connection_id, mode=mode + ) o = session_serial.cmd_output_safe("ipconfig /all") if params.get("ctrl_mac_addr") == "off": mac_check = old_mac else: mac_check = new_mac - if not re.findall("%s" % "-".join(mac_check.split(":")), o, re.I): + if not re.findall("{}".format("-".join(mac_check.split(":"))), o, re.I): test.fail("Guest mac change failed") test.log.info("Guest mac have been modified successfully") @@ -139,7 +130,7 @@ def run(test, params, env): # Just warning when failed to see the session become dead, # because there is a little chance the ip does not change. msg = "The session is still responsive, settings may fail." - test.log.warn(msg) + test.log.warning(msg) session.close() # In the following case, mac address should not change, @@ -156,8 +147,10 @@ def run(test, params, env): if not session.is_responsive(): test.error("The new session is not responsive.") if params.get("reboot_vm_after_mac_changed") == "yes": - error_context.context("Reboot guest and check the the mac address by " - "monitor", test.log.info) + error_context.context( + "Reboot guest and check the the mac address by " "monitor", + test.log.info, + ) mac_check = new_mac if os_type == "linux": nic = vm.virtnet[0] @@ -171,8 +164,9 @@ def run(test, params, env): session_serial = vm.reboot(session_serial, serial=True) check_guest_mac(test, mac_check, vm) if params.get("file_transfer", "no") == "yes": - error_context.context("File transfer between host and guest.", - test.log.info) + error_context.context( + "File transfer between host and guest.", test.log.info + ) utils_test.run_file_transfer(test, params, env) else: if params.get("ctrl_mac_addr") == "off": @@ -184,9 +178,9 @@ def run(test, params, env): clean_cmd_pattern = params.get("clean_cmd") clean_cmd = clean_cmd_pattern % int(nic_index) session_serial.cmd_output_safe(clean_cmd) - utils_net.restart_windows_guest_network(session_serial, - connection_id, - mode=mode) + utils_net.restart_windows_guest_network( + session_serial, connection_id, mode=mode + ) nic = vm.virtnet[0] nic.mac = old_mac vm.virtnet.update_db() diff --git a/generic/tests/module_probe.py b/generic/tests/module_probe.py index 659a26eecd..5dba76b1c7 100644 --- a/generic/tests/module_probe.py +++ b/generic/tests/module_probe.py @@ -1,5 +1,5 @@ -from virttest import base_installer from avocado.utils import linux_modules +from virttest import base_installer def run(test, params, env): @@ -13,19 +13,19 @@ def run(test, params, env): if vm: vm.destroy() env.unregister_vm(vm.name) - installer_object = base_installer.NoopInstaller('noop', - 'module_probe', - test, params) - test.log.debug('installer object: %r', installer_object) + installer_object = base_installer.NoopInstaller( + "noop", "module_probe", test, params + ) + test.log.debug("installer object: %r", installer_object) submodules = [] modules_str = " " for module in installer_object.module_list: - if " %s " % module in modules_str: + if f" {module} " in modules_str: continue tmp_list = [module] if linux_modules.module_is_loaded(module): tmp_list += linux_modules.get_submodules(module) - modules_str += "%s " % " ".join(tmp_list) + modules_str += "{} ".format(" ".join(tmp_list)) if len(tmp_list) > 1: for _ in submodules: if _[0] in tmp_list: @@ -48,8 +48,9 @@ def run(test, params, env): test.log.error(e) break except Exception as e: - test.fail("Failed to load modules [%r]: %s" % - (installer_object.module_list, e)) + test.fail( + f"Failed to load modules [{installer_object.module_list!r}]: {e}" + ) installer_object.unload_modules() finally: try: diff --git a/generic/tests/monotonic_time.py b/generic/tests/monotonic_time.py index 0f2fe7f13e..2896a73590 100644 --- a/generic/tests/monotonic_time.py +++ b/generic/tests/monotonic_time.py @@ -1,15 +1,13 @@ -import os import logging - +import os from inspect import ismethod -from virttest import data_dir -from virttest import error_context +from virttest import data_dir, error_context -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") -class TimeClientTest(object): +class TimeClientTest: def __init__(self, test, params, env, test_name): self.test = test self.vm = env.get_vm(params["main_vm"]) @@ -20,7 +18,7 @@ def __init__(self, test, params, env, test_name): def setUp(self): LOG_JOB.info("Copy files to guest") self.vm.copy_files_to(self.host_dir, os.path.dirname(self.src_dir)) - self.session.cmd("cd %s && make clobber && make" % self.src_dir) + self.session.cmd(f"cd {self.src_dir} && make clobber && make") def runTest(self): for attr in dir(self): @@ -31,7 +29,7 @@ def runTest(self): func() def cleanUp(self): - self.session.cmd("rm -rf %s" % self.src_dir) + self.session.cmd(f"rm -rf {self.src_dir}") self.session.close() @@ -46,35 +44,34 @@ def _test(self, test_type=None, duration=300, threshold=None): :params threshold: Same resolution as clock source. """ if not test_type: - self.test.error('missing test type') + self.test.error("missing test type") LOG_JOB.info("Test type: %s", test_type) timeout = float(duration) + 100.0 - cmd = self.src_dir + '/time_test' - cmd += ' --duration ' + str(duration) + cmd = self.src_dir + "/time_test" + cmd += " --duration " + str(duration) if threshold: - cmd += ' --threshold ' + str(threshold) - cmd += ' ' + test_type + cmd += " --threshold " + str(threshold) + cmd += " " + test_type (exit_status, stdout) = self.session.cmd_status_output(cmd, timeout=timeout) - LOG_JOB.info('Time test command exit status: %s', - exit_status) + LOG_JOB.info("Time test command exit status: %s", exit_status) if exit_status != 0: for line in stdout.splitlines(): - if line.startswith('ERROR:'): + if line.startswith("ERROR:"): self.test.error(line) - if line.startswith('FAIL:'): + if line.startswith("FAIL:"): self.test.fail(line) - self.test.error('unknown test failure') + self.test.error("unknown test failure") def test_Gtod(self): - self._test(test_type='gtod', threshold=0) + self._test(test_type="gtod", threshold=0) def test_Tsc_lfence(self): - self._test(test_type='tsc_lfence', threshold=0) + self._test(test_type="tsc_lfence", threshold=0) def test_Clock(self): - self._test(test_type='clock', threshold=0) + self._test(test_type="clock", threshold=0) @error_context.context_aware @@ -93,7 +90,7 @@ def run(test, params, env): :param env: Dictionary with the test environment. """ - monotonic_test = MonotonicTime(test, params, env, 'monotonic_time') + monotonic_test = MonotonicTime(test, params, env, "monotonic_time") monotonic_test.setUp() monotonic_test.runTest() monotonic_test.cleanUp() diff --git a/generic/tests/multi_queues_test.py b/generic/tests/multi_queues_test.py index 146943b480..e9ae786d54 100644 --- a/generic/tests/multi_queues_test.py +++ b/generic/tests/multi_queues_test.py @@ -2,10 +2,7 @@ import time from avocado.utils import process -from virttest import utils_net -from virttest import utils_misc -from virttest import utils_test -from virttest import error_context +from virttest import error_context, utils_misc, utils_net, utils_test @error_context.context_aware @@ -24,6 +21,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def get_virtio_queues_irq(session): """ Return multi queues input irq list @@ -38,7 +36,7 @@ def get_cpu_affinity_hint(session, irq_number): """ Return the cpu affinity_hint of irq_number """ - cmd_get_cpu_affinity = r"cat /proc/irq/%s/affinity_hint" % irq_number + cmd_get_cpu_affinity = rf"cat /proc/irq/{irq_number}/affinity_hint" return session.cmd_output(cmd_get_cpu_affinity).strip() def get_cpu_index(cpu_id): @@ -66,7 +64,7 @@ def get_cpu_irq_statistics(session, irq_number, cpu_id=None): Get guest interrupts statistics """ online_cpu_number_cmd = r"cat /proc/interrupts | head -n 1 | wc -w" - cmd = r"cat /proc/interrupts | sed -n '/^\s*%s:/p'" % irq_number + cmd = rf"cat /proc/interrupts | sed -n '/^\s*{irq_number}:/p'" online_cpu_number = int(session.cmd_output_safe(online_cpu_number_cmd)) irq_statics = session.cmd_output(cmd) irq_statics_list = list(map(int, irq_statics.split()[1:online_cpu_number])) @@ -90,25 +88,22 @@ def get_cpu_irq_statistics(session, irq_number, cpu_id=None): vm.verify_alive() session = vm.wait_for_login(timeout=login_timeout) for i, nic in enumerate(vm.virtnet): - if "virtio" in nic['nic_model']: - ifname = utils_net.get_linux_ifname(session, - vm.get_mac_address(i)) - session.cmd_output("ethtool -L %s combined %d" % (ifname, - queues)) - o = session.cmd_output("ethtool -l %s" % ifname) + if "virtio" in nic["nic_model"]: + ifname = utils_net.get_linux_ifname(session, vm.get_mac_address(i)) + session.cmd_output("ethtool -L %s combined %d" % (ifname, queues)) + o = session.cmd_output(f"ethtool -l {ifname}") if len(re.findall(r"Combined:\s+%d\s" % queues, o)) != 2: - test.error("Fail to enable MQ feature of (%s)" % - nic.nic_name) + test.error(f"Fail to enable MQ feature of ({nic.nic_name})") test.log.info("MQ feature of (%s) is enabled", nic.nic_name) taskset_cpu = params.get("netperf_taskset_cpu") if taskset_cpu: - taskset_cmd = "taskset -c %s " % " ".join(taskset_cpu) + taskset_cmd = "taskset -c {} ".format(" ".join(taskset_cpu)) params["netperf_cmd_prefix"] = taskset_cmd - check_cpu_affinity = params.get("check_cpu_affinity", 'no') - check_vhost = params.get("check_vhost_threads", 'yes') - if check_cpu_affinity == 'yes' and (vm.cpuinfo.smp == queues): + check_cpu_affinity = params.get("check_cpu_affinity", "no") + check_vhost = params.get("check_vhost_threads", "yes") + if check_cpu_affinity == "yes" and (vm.cpuinfo.smp == queues): process.system("systemctl stop irqbalance.service") session.cmd("systemctl stop irqbalance.service") set_cpu_affinity(session) @@ -117,36 +112,40 @@ def get_cpu_irq_statistics(session, irq_number, cpu_id=None): n_instance = int(params.get("netperf_para_sessions", queues)) try: if bg_sub_test: - error_context.context("Run test %s background" % bg_sub_test, - test.log.info) + error_context.context( + f"Run test {bg_sub_test} background", test.log.info + ) # Set flag, when the sub test really running, will change this # flag to True stress_thread = utils_misc.InterruptedThread( - utils_test.run_virt_sub_test, (test, params, env), - {"sub_type": bg_sub_test}) + utils_test.run_virt_sub_test, + (test, params, env), + {"sub_type": bg_sub_test}, + ) stress_thread.start() - if params.get("vhost") == 'vhost=on' and check_vhost == 'yes': - vhost_thread_pattern = params.get("vhost_thread_pattern", - r"\w+\s+(\d+)\s.*\[vhost-%s\]") + if params.get("vhost") == "vhost=on" and check_vhost == "yes": + vhost_thread_pattern = params.get( + "vhost_thread_pattern", r"\w+\s+(\d+)\s.*\[vhost-%s\]" + ) vhost_threads = vm.get_vhost_threads(vhost_thread_pattern) time.sleep(120) - error_context.context("Check vhost threads on host", - test.log.info) - top_cmd = (r'top -n 1 -bis | tail -n +7 | grep -E "^ *%s "' - % ' |^ *'.join(map(str, vhost_threads))) + error_context.context("Check vhost threads on host", test.log.info) + top_cmd = r'top -n 1 -bis | tail -n +7 | grep -E "^ *{} "'.format( + " |^ *".join(map(str, vhost_threads)) + ) top_info = None while session.cmd_status("ps -C netperf") == 0: - top_info = process.system_output(top_cmd, ignore_status=True, - shell=True).decode() + top_info = process.system_output( + top_cmd, ignore_status=True, shell=True + ).decode() if top_info: break test.log.info(top_info) vhost_re = re.compile(r"(0:00.\d{2}).*vhost-\d+[\d|+]") invalid_vhost_thread = len(vhost_re.findall(top_info, re.I)) - running_threads = (len(top_info.splitlines()) - - int(invalid_vhost_thread)) + running_threads = len(top_info.splitlines()) - int(invalid_vhost_thread) n_instance = min(n_instance, int(queues), int(vm.cpuinfo.smp)) if running_threads != n_instance: @@ -154,7 +153,7 @@ def get_cpu_irq_statistics(session, irq_number, cpu_id=None): test.fail(err_msg % (n_instance, running_threads)) # check cpu affinity - if check_cpu_affinity == 'yes' and (vm.cpuinfo.smp == queues): + if check_cpu_affinity == "yes" and (vm.cpuinfo.smp == queues): error_context.context("Check cpu affinity", test.log.info) vectors = params.get("vectors", None) enable_msix_vectors = params.get("enable_msix_vectors") @@ -168,7 +167,7 @@ def get_cpu_irq_statistics(session, irq_number, cpu_id=None): cpu_index = get_cpu_index(cpu_id) if cpu_index: for cpu in cpu_index: - cpu_irq_affinity["%s" % cpu] = irq + cpu_irq_affinity[f"{cpu}"] = irq else: test.error("Can not get the cpu") @@ -196,5 +195,5 @@ def get_cpu_irq_statistics(session, irq_number, cpu_id=None): finally: if session: session.close() - if check_cpu_affinity == 'yes' and (vm.cpuinfo.smp == queues): + if check_cpu_affinity == "yes" and (vm.cpuinfo.smp == queues): process.system("systemctl start irqbalance.service") diff --git a/generic/tests/multicast.py b/generic/tests/multicast.py index 717d76a7d1..6ab953693b 100644 --- a/generic/tests/multicast.py +++ b/generic/tests/multicast.py @@ -2,7 +2,6 @@ import re import aexpect - from avocado.utils import process from virttest import utils_test @@ -29,7 +28,7 @@ def run_guest(cmd): try: session.cmd(cmd) except aexpect.ShellError as e: - test.log.warn(e) + test.log.warning(e) def run_host_guest(cmd): run_guest(cmd) @@ -37,8 +36,10 @@ def run_host_guest(cmd): # flush the firewall rules cmd_flush = "iptables -F" - cmd_selinux = ("if [ -e /selinux/enforce ]; then setenforce 0; " - "else echo 'no /selinux/enforce file present'; fi") + cmd_selinux = ( + "if [ -e /selinux/enforce ]; then setenforce 0; " + "else echo 'no /selinux/enforce file present'; fi" + ) run_host_guest(cmd_flush) run_host_guest(cmd_selinux) # make sure guest replies to broadcasts @@ -58,14 +59,15 @@ def run_host_guest(cmd): # copy python script to guest for joining guest to multicast groups mcast_path = os.path.join(test.virtdir, "scripts/multicast_guest.py") vm.copy_files_to(mcast_path, "/tmp") - output = session.cmd_output("python /tmp/multicast_guest.py %d %s %d" % - (mgroup_count, prefix, suffix)) + output = session.cmd_output( + "python /tmp/multicast_guest.py %d %s %d" % (mgroup_count, prefix, suffix) + ) # if success to join multicast, the process will be paused, and return PID. try: pid = re.findall(r"join_mcast_pid:(\d+)", output)[0] except IndexError: - test.fail("Can't join multicast groups,output:%s" % output) + test.fail(f"Can't join multicast groups,output:{output}") try: for i in range(mgroup_count): @@ -75,18 +77,24 @@ def run_host_guest(cmd): test.log.info("Initial ping test, mcast: %s", mcast) s, o = utils_test.ping(mcast, 10, interface=ifname, timeout=20) if s != 0: - test.fail(" Ping return non-zero value %s" % o) + test.fail(f" Ping return non-zero value {o}") test.log.info("Flood ping test, mcast: %s", mcast) - utils_test.ping(mcast, None, interface=ifname, flood=True, - output_func=None, timeout=flood_minutes * 60) + utils_test.ping( + mcast, + None, + interface=ifname, + flood=True, + output_func=None, + timeout=flood_minutes * 60, + ) test.log.info("Final ping test, mcast: %s", mcast) s, o = utils_test.ping(mcast, 10, interface=ifname, timeout=20) if s != 0: - test.fail("Ping failed, status: %s, output: %s" % (s, o)) + test.fail(f"Ping failed, status: {s}, output: {o}") finally: test.log.debug(session.cmd_output("ipmaddr show")) - session.cmd_output("kill -s SIGCONT %s" % pid) + session.cmd_output(f"kill -s SIGCONT {pid}") session.close() diff --git a/generic/tests/netperf.py b/generic/tests/netperf.py index 5b31465c80..44126c3403 100644 --- a/generic/tests/netperf.py +++ b/generic/tests/netperf.py @@ -1,21 +1,15 @@ import logging import os -import threading import re +import threading import time from avocado.utils import process +from virttest import error_context, remote, utils_misc, utils_net, utils_test, virt_vm -from virttest import virt_vm -from virttest import utils_test -from virttest import utils_misc -from virttest import utils_net -from virttest import remote -from virttest import error_context -from provider import netperf_base -from provider import win_driver_utils +from provider import netperf_base, win_driver_utils -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") _netserver_started = False @@ -24,10 +18,14 @@ def start_netserver_win(session, start_cmd, test): check_reg = re.compile(r"NETSERVER.*EXE", re.I) if not check_reg.findall(session.cmd_output("tasklist")): session.sendline(start_cmd) - if not utils_misc.wait_for(lambda: check_reg.findall( - session.cmd_output("tasklist")), - 30, 5, 1, "Wait netserver start"): - msg = "Can not start netserver with command %s" % start_cmd + if not utils_misc.wait_for( + lambda: check_reg.findall(session.cmd_output("tasklist")), + 30, + 5, + 1, + "Wait netserver start", + ): + msg = f"Can not start netserver with command {start_cmd}" test.fail(msg) @@ -45,6 +43,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def mtu_set(mtu): """ Set server/client/host's mtu @@ -61,12 +60,14 @@ def mtu_set(mtu): netperf_base.ssh_cmd(server_ctl, server_mtu_cmd % (ethname, mtu)) elif params.get("os_type") == "windows": connection_id = utils_net.get_windows_nic_attribute( - server_ctl, "macaddress", mac, "netconnectionid") + server_ctl, "macaddress", mac, "netconnectionid" + ) netperf_base.ssh_cmd(server_ctl, server_mtu_cmd % (connection_id, mtu)) error_context.context("Changing the MTU of client", test.log.info) - netperf_base.ssh_cmd(client, client_mtu_cmd - % (params.get("client_physical_nic"), mtu)) + netperf_base.ssh_cmd( + client, client_mtu_cmd % (params.get("client_physical_nic"), mtu) + ) netdst = params.get("netdst", "switch") host_bridges = utils_net.Bridge() @@ -75,8 +76,14 @@ def mtu_set(mtu): if netdst in br_in_use: ifaces_in_use = host_bridges.list_iface() target_ifaces = list(ifaces_in_use + br_in_use) - if process.system("which ovs-vsctl && systemctl status openvswitch.service", - ignore_status=True, shell=True) == 0: + if ( + process.system( + "which ovs-vsctl && systemctl status openvswitch.service", + ignore_status=True, + shell=True, + ) + == 0 + ): ovs_br_all = netperf_base.ssh_cmd(host, "ovs-vsctl list-br") ovs_br = [] if ovs_br_all: @@ -85,39 +92,41 @@ def mtu_set(mtu): ovs_br.append(nic.netdst) elif nic.nettype == "vdpa": vf_pci = netperf_base.ssh_cmd( - host, - "vdpa dev show |grep %s | grep -o 'pci/[^[:space:]]*' | awk -F/ '{print $2}'" - % nic.netdst) + host, + f"vdpa dev show |grep {nic.netdst} | grep -o 'pci/[^[:space:]]*' | awk -F/ '{{print $2}}'", + ) pf_pci = netperf_base.ssh_cmd( - host, - "grep PCI_SLOT_NAME /sys/bus/pci/devices/%s/physfn/uevent | cut -d'=' -f2" - % vf_pci) + host, + f"grep PCI_SLOT_NAME /sys/bus/pci/devices/{vf_pci}/physfn/uevent | cut -d'=' -f2", + ) port = netperf_base.ssh_cmd( - host, - "ls /sys/bus/pci/devices/%s/net/ | head -n 1" - % pf_pci) - ovs_br_vdpa = netperf_base.ssh_cmd(host, "ovs-vsctl port-to-br %s" % port) - cmd = "ovs-ofctl add-flow {} 'in_port=1,idle_timeout=0 actions=output:2'".format(ovs_br_vdpa) - cmd += "&& ovs-ofctl add-flow {} 'in_port=2,idle_timeout=0 actions=output:1'".format(ovs_br_vdpa) - cmd += "&& ovs-ofctl dump-flows {}".format(ovs_br_vdpa) + host, f"ls /sys/bus/pci/devices/{pf_pci}/net/ | head -n 1" + ) + ovs_br_vdpa = netperf_base.ssh_cmd( + host, f"ovs-vsctl port-to-br {port}" + ) + cmd = f"ovs-ofctl add-flow {ovs_br_vdpa} 'in_port=1,idle_timeout=0 actions=output:2'" + cmd += f"&& ovs-ofctl add-flow {ovs_br_vdpa} 'in_port=2,idle_timeout=0 actions=output:1'" + cmd += f"&& ovs-ofctl dump-flows {ovs_br_vdpa}" netperf_base.ssh_cmd(host, cmd) ovs_br.append(ovs_br_vdpa) for br in ovs_br: - ovs_list = "ovs-vsctl list-ports %s" % br + ovs_list = f"ovs-vsctl list-ports {br}" ovs_port = netperf_base.ssh_cmd(host, ovs_list) target_ifaces.extend(ovs_port.split() + [br]) if vm.virtnet[0].nettype == "macvtap": target_ifaces.extend([vm.virtnet[0].netdst, vm.get_ifname(0)]) - error_context.context("Change all Bridge NICs MTU to %s" - % mtu, test.log.info) + error_context.context(f"Change all Bridge NICs MTU to {mtu}", test.log.info) for iface in target_ifaces: try: - process.run(host_mtu_cmd % (iface, mtu), ignore_status=False, - shell=True) + process.run( + host_mtu_cmd % (iface, mtu), ignore_status=False, shell=True + ) except process.CmdError as err: if "SIOCSIFMTU" in err.result.stderr.decode(): - test.cancel("The ethenet device does not support jumbo," - "cancel test") + test.cancel( + "The ethenet device does not support jumbo," "cancel test" + ) vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -137,10 +146,10 @@ def mtu_set(mtu): status, output = session.cmd_status_output(verifier_clear_cmd) test.log.info(output) if ".sys" in output: - msg = "%s does not work correctly" % verifier_clear_cmd + msg = f"{verifier_clear_cmd} does not work correctly" test.error(msg) elif s != 0: - msg = "Config command %s failed. Output: %s" % (cmd, o) + msg = f"Config command {cmd} failed. Output: {o}" test.error(msg) session.close() if params.get("reboot_after_config", "yes") == "yes": @@ -157,55 +166,63 @@ def mtu_set(mtu): session = vm.wait_for_serial_login(timeout=login_timeout) ifname = utils_net.get_linux_ifname(session) for i in ifname: - cmd = "ethtool -i %s |grep driver| awk -F': ' '{print $2}'" % i + cmd = f"ethtool -i {i} |grep driver| awk -F': ' '{{print $2}}'" driver = session.cmd_output(cmd).strip() if driver == "net_failover": - session.cmd_output("dhclient -r && dhclient %s" % i) + session.cmd_output(f"dhclient -r && dhclient {i}") break if params.get("os_type") == "windows" and params.get("install_vioprot_cmd"): media_type = params["virtio_win_media_type"] driver_name = params["driver_name"] session = vm.wait_for_login(nic_index=2, timeout=login_timeout) for driver_name in driver_name.split(): - inf_path = win_driver_utils.get_driver_inf_path(session, test, - media_type, - driver_name) + inf_path = win_driver_utils.get_driver_inf_path( + session, test, media_type, driver_name + ) if driver_name == "netkvm": device_name = params.get("device_name") device_hwid = params.get("device_hwid") devcon_path = utils_misc.set_winutils_letter( - session, params.get("devcon_path")) - status, output = session.cmd_status_output("dir %s" % - devcon_path) + session, params.get("devcon_path") + ) + status, output = session.cmd_status_output(f"dir {devcon_path}") if status: - test.error("Not found devcon.exe, details: %s" % output) - - error_context.context("Uninstall %s driver" % driver_name, - test.log.info) - win_driver_utils.uninstall_driver(session, test, - devcon_path, driver_name, - device_name, device_hwid) + test.error(f"Not found devcon.exe, details: {output}") + + error_context.context( + f"Uninstall {driver_name} driver", test.log.info + ) + win_driver_utils.uninstall_driver( + session, + test, + devcon_path, + driver_name, + device_name, + device_hwid, + ) for hwid in device_hwid.split(): - install_driver_cmd = "%s install %s %s" % (devcon_path, - inf_path, - hwid) + install_driver_cmd = f"{devcon_path} install {inf_path} {hwid}" status, output = session.cmd_status_output( - install_driver_cmd, - timeout=login_timeout) + install_driver_cmd, timeout=login_timeout + ) if status: - test.fail("Failed to install driver '%s', " - "details:\n%s" % (driver_name, output)) + test.fail( + f"Failed to install driver '{driver_name}', " + f"details:\n{output}" + ) if driver_name == "VIOPROT": test.log.info("Will install inf file found at '%s'", inf_path) install_cmd = params.get("install_vioprot_cmd") % inf_path status, output = session.cmd_status_output(install_cmd) if status: - test.error("Install inf file failed, output=%s" % output) + test.error(f"Install inf file failed, output={output}") session.cmd_output_safe("ipconfig /renew", timeout=login_timeout) session.close() else: try: - vm.wait_for_serial_login(timeout=login_timeout, restart_network=True).close() + vm.wait_for_serial_login( + timeout=login_timeout, restart_network=True + ).close() except virt_vm.VMIPAddressMissingError: pass @@ -220,15 +237,14 @@ def mtu_set(mtu): queues = int(params.get("queues", 1)) if queues > 1: if params.get("os_type") == "linux": - session.cmd_status_output("ethtool -L %s combined %s" % - (ethname, queues)) + session.cmd_status_output(f"ethtool -L {ethname} combined {queues}") else: test.log.info("FIXME: support to enable MQ for Windows guest!") if params.get("server_private_ip") and params.get("os_type") == "linux": server_ip = params.get("server_private_ip") cmd = "systemctl stop NetworkManager.service" - cmd += " && ifconfig %s %s up" % (ethname, server_ip) + cmd += f" && ifconfig {ethname} {server_ip} up" session.cmd_output(cmd) else: server_ip = vm.wait_for_get_address(0, timeout=90) @@ -244,8 +260,7 @@ def mtu_set(mtu): utils_test.service_setup(vm, session, test.virtdir) session.close() - if (params.get("os_type") == "windows" and - params.get("use_cygwin") == "yes"): + if params.get("os_type") == "windows" and params.get("use_cygwin") == "yes": cygwin_prompt = params.get("cygwin_prompt", r"\$\s+$") cygwin_start = params.get("cygwin_start") server_cyg = vm.wait_for_login(timeout=login_timeout) @@ -254,12 +269,16 @@ def mtu_set(mtu): else: server_cyg = None - test.log.debug(process.system_output("numactl --hardware", - verbose=False, ignore_status=True, - shell=True).decode()) - test.log.debug(process.system_output("numactl --show", - verbose=False, ignore_status=True, - shell=True).decode()) + test.log.debug( + process.system_output( + "numactl --hardware", verbose=False, ignore_status=True, shell=True + ).decode() + ) + test.log.debug( + process.system_output( + "numactl --show", verbose=False, ignore_status=True, shell=True + ).decode() + ) # pin guest vcpus/memory/vhost threads to last numa node of host by default numa_node = netperf_base.pin_vm_threads(vm, params.get("numa_node")) @@ -267,12 +286,14 @@ def mtu_set(mtu): host_ip = host if host != "localhost": params_host = params.object_params("host") - host = remote.wait_for_login(params_host.get("shell_client"), - host_ip, - params_host.get("shell_port"), - params_host.get("username"), - params_host.get("password"), - params_host.get("shell_prompt")) + host = remote.wait_for_login( + params_host.get("shell_client"), + host_ip, + params_host.get("shell_port"), + params_host.get("username"), + params_host.get("password"), + params_host.get("shell_prompt"), + ) client = params.get("client", "localhost") client_ip = client @@ -285,14 +306,17 @@ def mtu_set(mtu): client_ip = vm_client.wait_for_get_address(0, timeout=5) elif client != "localhost" and params.get("os_type_client") == "linux": client_pub_ip = params.get("client_public_ip") - tmp = remote.wait_for_login(params.get("shell_client_client"), - client_pub_ip, - params.get("shell_port_client"), - params.get("username_client"), - params.get("password_client"), - params.get("shell_prompt_client")) - cmd = "ifconfig %s %s up" % (params.get("client_physical_nic"), - client_ip) + tmp = remote.wait_for_login( + params.get("shell_client_client"), + client_pub_ip, + params.get("shell_port_client"), + params.get("username_client"), + params.get("password_client"), + params.get("shell_prompt_client"), + ) + cmd = "ifconfig {} {} up".format( + params.get("client_physical_nic"), client_ip + ) netperf_base.ssh_cmd(tmp, cmd) else: tmp = "localhost" @@ -315,19 +339,18 @@ def mtu_set(mtu): prepare_list = set([server_ctl, client, host]) tag_dict = {server_ctl: "server", client: "client", host: "host"} if client_pub_ip: - ip_dict = {server_ctl: server_ctl_ip, client: client_pub_ip, - host: host_ip} + ip_dict = {server_ctl: server_ctl_ip, client: client_pub_ip, host: host_ip} else: - ip_dict = {server_ctl: server_ctl_ip, client: client_ip, - host: host_ip} + ip_dict = {server_ctl: server_ctl_ip, client: client_ip, host: host_ip} for i in prepare_list: params_tmp = params.object_params(tag_dict[i]) if params_tmp.get("os_type") == "linux": shell_port = int(params_tmp["shell_port"]) password = params_tmp["password"] username = params_tmp["username"] - netperf_base.env_setup(test, params, i, ip_dict[i], - username, shell_port, password) + netperf_base.env_setup( + test, params, i, ip_dict[i], username, shell_port, password + ) elif params_tmp.get("os_type") == "windows": windows_disable_firewall = params.get("windows_disable_firewall") netperf_base.ssh_cmd(i, windows_disable_firewall) @@ -339,21 +362,28 @@ def mtu_set(mtu): try: error_context.context("Start netperf testing", test.log.info) - start_test(server_ip, server_ctl, host, clients, test.resultsdir, - test_duration=int(params.get('l')), - sessions_rr=params.get('sessions_rr'), - sessions=params.get('sessions'), - sizes_rr=params.get('sizes_rr'), - sizes=params.get('sizes'), - protocols=params.get('protocols'), - netserver_port=params.get('netserver_port', "12865"), - params=params, server_cyg=server_cyg, test=test) + start_test( + server_ip, + server_ctl, + host, + clients, + test.resultsdir, + test_duration=int(params.get("l")), + sessions_rr=params.get("sessions_rr"), + sessions=params.get("sessions"), + sizes_rr=params.get("sizes_rr"), + sizes=params.get("sizes"), + protocols=params.get("protocols"), + netserver_port=params.get("netserver_port", "12865"), + params=params, + server_cyg=server_cyg, + test=test, + ) if params.get("log_hostinfo_script"): src = os.path.join(test.virtdir, params.get("log_hostinfo_script")) path = os.path.join(test.resultsdir, "systeminfo") - process.system_output("bash %s %s &> %s" % ( - src, test.resultsdir, path), shell=True) + process.system_output(f"bash {src} {test.resultsdir} &> {path}", shell=True) if params.get("log_guestinfo_script") and params.get("log_guestinfo_exec"): src = os.path.join(test.virtdir, params.get("log_guestinfo_script")) @@ -361,31 +391,44 @@ def mtu_set(mtu): destpath = params.get("log_guestinfo_path", "/tmp/log_guestinfo.sh") vm.copy_files_to(src, destpath, nic_index=1) logexec = params.get("log_guestinfo_exec", "bash") - output = server_ctl.cmd_output("%s %s" % (logexec, destpath)) + output = server_ctl.cmd_output(f"{logexec} {destpath}") logfile = open(path, "a+") logfile.write(output) logfile.close() finally: if mtu != 1500: mtu_default = 1500 - error_context.context("Change back server, client and host's mtu to %s" - % mtu_default) + error_context.context( + f"Change back server, client and host's mtu to {mtu_default}" + ) mtu_set(mtu_default) - if params.get("client_physical_nic") and params.get( - "os_type_client") == "linux": - cmd = 'ifconfig %s 0.0.0.0' % params.get("client_physical_nic") + if ( + params.get("client_physical_nic") + and params.get("os_type_client") == "linux" + ): + cmd = "ifconfig {} 0.0.0.0".format(params.get("client_physical_nic")) netperf_base.ssh_cmd(client, cmd) # FIXME: `test` should be a mandatory argument here @error_context.context_aware -def start_test(server, server_ctl, host, clients, resultsdir, test_duration=60, - sessions_rr="50 100 250 500", - sessions="1 2 4", - sizes_rr="64 256 512 1024 2048", - sizes="64 256 512 1024 2048 4096", - protocols="TCP_STREAM TCP_MAERTS TCP_RR TCP_CRR", - netserver_port=None, params=None, server_cyg=None, test=None): +def start_test( + server, + server_ctl, + host, + clients, + resultsdir, + test_duration=60, + sessions_rr="50 100 250 500", + sessions="1 2 4", + sizes_rr="64 256 512 1024 2048", + sizes="64 256 512 1024 2048 4096", + protocols="TCP_STREAM TCP_MAERTS TCP_RR TCP_CRR", + netserver_port=None, + params=None, + server_cyg=None, + test=None, +): """ Start to test with different kind of configurations @@ -407,20 +450,31 @@ def start_test(server, server_ctl, host, clients, resultsdir, test_duration=60, if params is None: params = {} - fd = open("%s/netperf-result.%s.RHS" % (resultsdir, time.time()), "w") - netperf_base.record_env_version(test, params, host, server_ctl, - fd, test_duration) - - record_list = ['size', 'sessions', 'throughput', 'trans.rate', 'CPU', - 'thr_per_CPU', 'rx_pkts', 'tx_pkts', 'rx_byts', 'tx_byts', - 're_pkts', 'exits', 'tpkt_per_exit'] + fd = open(f"{resultsdir}/netperf-result.{time.time()}.RHS", "w") + netperf_base.record_env_version(test, params, host, server_ctl, fd, test_duration) + + record_list = [ + "size", + "sessions", + "throughput", + "trans.rate", + "CPU", + "thr_per_CPU", + "rx_pkts", + "tx_pkts", + "rx_byts", + "tx_byts", + "re_pkts", + "exits", + "tpkt_per_exit", + ] for i in range(int(params.get("queues", 0))): - record_list.append('rx_intr_%s' % i) - record_list.append('rx_intr_sum') + record_list.append(f"rx_intr_{i}") + record_list.append("rx_intr_sum") for i in range(int(params.get("queues", 0))): - record_list.append('tx_intr_%s' % i) - record_list.append('tx_intr_sum') + record_list.append(f"tx_intr_{i}") + record_list.append("tx_intr_sum") base = params.get("format_base", "12") fbase = params.get("format_fbase", "2") @@ -433,7 +487,7 @@ def start_test(server, server_ctl, host, clients, resultsdir, test_duration=60, mpstat_index = 0 for protocol in protocols.split(): - error_context.context("Testing %s protocol" % protocol, test.log.info) + error_context.context(f"Testing {protocol} protocol", test.log.info) protocol_log = "" if protocol in ("TCP_RR", "TCP_CRR"): sessions_test = sessions_rr.split() @@ -452,46 +506,54 @@ def start_test(server, server_ctl, host, clients, resultsdir, test_duration=60, for i in sizes_test: for j in sessions_test: if protocol in ("TCP_RR", "TCP_CRR"): - nf_args = "-t %s -v 1 -- -r %s,%s" % (protocol, i, i) - elif (protocol == "TCP_MAERTS"): - nf_args = "-C -c -t %s -- -m ,%s" % (protocol, i) + nf_args = f"-t {protocol} -v 1 -- -r {i},{i}" + elif protocol == "TCP_MAERTS": + nf_args = f"-C -c -t {protocol} -- -m ,{i}" else: - nf_args = "-C -c -t %s -- -m %s" % (protocol, i) - - ret = launch_client(j, server, server_ctl, host, clients, - test_duration, nf_args, netserver_port, - params, server_cyg, test) + nf_args = f"-C -c -t {protocol} -- -m {i}" + + ret = launch_client( + j, + server, + server_ctl, + host, + clients, + test_duration, + nf_args, + netserver_port, + params, + server_cyg, + test, + ) if ret: - thu = float(ret['thu']) - cpu = 100 - float(ret['mpstat'].split()[mpstat_index]) + thu = float(ret["thu"]) + cpu = 100 - float(ret["mpstat"].split()[mpstat_index]) normal = thu / cpu - if ret.get('tx_pkt') and ret.get('exits'): - ret['tpkt_per_exit'] = float( - ret['tx_pkts']) / float(ret['exits']) + if ret.get("tx_pkt") and ret.get("exits"): + ret["tpkt_per_exit"] = float(ret["tx_pkts"]) / float( + ret["exits"] + ) - ret['size'] = int(i) - ret['sessions'] = int(j) + ret["size"] = int(i) + ret["sessions"] = int(j) if protocol in ("TCP_RR", "TCP_CRR"): - ret['trans.rate'] = thu + ret["trans.rate"] = thu else: - ret['throughput'] = thu - ret['CPU'] = cpu - ret['thr_per_CPU'] = normal + ret["throughput"] = thu + ret["CPU"] = cpu + ret["thr_per_CPU"] = normal row, key_list = netperf_base.netperf_record( - ret, record_list, - header=record_header, - base=base, - fbase=fbase) + ret, record_list, header=record_header, base=base, fbase=fbase + ) category = "" if record_header: record_header = False - category = row.split('\n')[0] + category = row.split("\n")[0] - test.write_test_keyval({'category': category}) - prefix = '%s--%s--%s' % (protocol, i, j) + test.write_test_keyval({"category": category}) + prefix = f"{protocol}--{i}--{j}" for key in key_list: - test.write_test_keyval( - {'%s--%s' % (prefix, key): ret[key]}) + test.write_test_keyval({f"{prefix}--{key}": ret[key]}) test.log.info(row) fd.write(row + "\n") @@ -499,26 +561,42 @@ def start_test(server, server_ctl, host, clients, resultsdir, test_duration=60, fd.flush() test.log.debug("Remove temporary files") - process.system_output("rm -f /tmp/netperf.%s.nf" % ret['pid'], - verbose=False, ignore_status=True, - shell=True) + process.system_output( + "rm -f /tmp/netperf.{}.nf".format(ret["pid"]), + verbose=False, + ignore_status=True, + shell=True, + ) test.log.info("Netperf thread completed successfully") else: test.log.debug( "Not all netperf clients start to work, please enlarge" - " '%s' number or skip this tests", int(j)) + " '%s' number or skip this tests", + int(j), + ) continue fd.close() @error_context.context_aware -def launch_client(sessions, server, server_ctl, host, clients, l, nf_args, - port, params, server_cyg, test): - """ Launch netperf clients """ +def launch_client( + sessions, + server, + server_ctl, + host, + clients, + l, + nf_args, + port, + params, + server_cyg, + test, +): + """Launch netperf clients""" netperf_version = params.get("netperf_version", "2.6.0") - client_path = "/tmp/netperf-%s/src/netperf" % netperf_version - server_path = "/tmp/netperf-%s/src/netserver" % netperf_version + client_path = f"/tmp/netperf-{netperf_version}/src/netperf" + server_path = f"/tmp/netperf-{netperf_version}/src/netserver" get_status_flag = params.get("get_status_in_guest", "no") == "yes" global _netserver_started # Start netserver @@ -536,42 +614,43 @@ def launch_client(sessions, server, server_ctl, host, clients, l, nf_args, netserver_path = params.get("netserver_path") netperf_install_cmd = params.get("netperf_install_cmd") start_session = server_cyg - test.log.info("Start netserver with cygwin, cmd is: %s", - netserv_start_cmd) + test.log.info( + "Start netserver with cygwin, cmd is: %s", netserv_start_cmd + ) if "netserver" not in server_ctl.cmd_output("tasklist"): - netperf_pack = "netperf-%s" % params.get("netperf_version") - s_check_cmd = "dir %s" % netserver_path - p_check_cmd = "dir %s" % cygwin_root - if not ("netserver.exe" in server_ctl.cmd(s_check_cmd) and - netperf_pack in server_ctl.cmd(p_check_cmd)): + netperf_pack = "netperf-{}".format(params.get("netperf_version")) + s_check_cmd = f"dir {netserver_path}" + p_check_cmd = f"dir {cygwin_root}" + if not ( + "netserver.exe" in server_ctl.cmd(s_check_cmd) + and netperf_pack in server_ctl.cmd(p_check_cmd) + ): error_context.context( - "Install netserver in Windows guest cygwin", - test.log.info) - cmd = "xcopy %s %s /S /I /Y" % ( - netperf_src, cygwin_root) + "Install netserver in Windows guest cygwin", test.log.info + ) + cmd = f"xcopy {netperf_src} {cygwin_root} /S /I /Y" server_ctl.cmd(cmd) - server_cyg.cmd_output( - netperf_install_cmd, timeout=timeout) + server_cyg.cmd_output(netperf_install_cmd, timeout=timeout) if "netserver.exe" not in server_ctl.cmd(s_check_cmd): err_msg = "Install netserver cygwin failed" test.error(err_msg) - test.log.info( - "Install netserver in cygwin successfully") + test.log.info("Install netserver in cygwin successfully") else: start_session = server_ctl netserv_start_cmd = params.get("netserv_start_cmd") % cdrom_drv - test.log.info("Start netserver without cygwin, cmd is: %s", - netserv_start_cmd) + test.log.info( + "Start netserver without cygwin, cmd is: %s", netserv_start_cmd + ) - error_context.context("Start netserver on windows guest", - test.log.info) + error_context.context("Start netserver on windows guest", test.log.info) start_netserver_win(start_session, netserv_start_cmd, test) else: test.log.info("Netserver start cmd is '%s'", server_path) - netperf_base.ssh_cmd(server_ctl, "pidof netserver || %s" % server_path) + netperf_base.ssh_cmd(server_ctl, f"pidof netserver || {server_path}") ncpu = netperf_base.ssh_cmd( - server_ctl, "cat /proc/cpuinfo |grep processor |wc -l") + server_ctl, "cat /proc/cpuinfo |grep processor |wc -l" + ) ncpu = re.findall(r"\d+", ncpu)[-1] test.log.info("Netserver start successfully") @@ -584,7 +663,7 @@ def count_interrupt(name): """ sum = 0 intr = [] - stat = netperf_base.ssh_cmd(server_ctl, "grep %s /proc/interrupts" % name) + stat = netperf_base.ssh_cmd(server_ctl, f"grep {name} /proc/interrupts") for i in stat.strip().split("\n"): for cpu in range(int(ncpu)): sum += int(i.split()[cpu + 1]) @@ -600,10 +679,11 @@ def get_state(): if ifname is None: raise RuntimeError(f"no available iface associated with {server}") - path = "find /sys/devices|grep net/%s/statistics" % ifname - cmd = "%s/rx_packets|xargs cat;%s/tx_packets|xargs cat;" \ - "%s/rx_bytes|xargs cat;%s/tx_bytes|xargs cat" % (path, - path, path, path) + path = f"find /sys/devices|grep net/{ifname}/statistics" + cmd = ( + f"{path}/rx_packets|xargs cat;{path}/tx_packets|xargs cat;" + f"{path}/rx_bytes|xargs cat;{path}/tx_bytes|xargs cat" + ) output = netperf_base.ssh_cmd(server_ctl, cmd).split()[-4:] nrx = int(output[0]) @@ -611,51 +691,69 @@ def get_state(): nrxb = int(output[2]) ntxb = int(output[3]) - nre = int(netperf_base.ssh_cmd( - server_ctl, "grep Tcp /proc/net/snmp|tail -1").split()[12]) - state_list = ['rx_pkts', nrx, 'tx_pkts', ntx, 'rx_byts', nrxb, - 'tx_byts', ntxb, 're_pkts', nre] + nre = int( + netperf_base.ssh_cmd(server_ctl, "grep Tcp /proc/net/snmp|tail -1").split()[ + 12 + ] + ) + state_list = [ + "rx_pkts", + nrx, + "tx_pkts", + ntx, + "rx_byts", + nrxb, + "tx_byts", + ntxb, + "re_pkts", + nre, + ] try: nrx_intr = count_interrupt("virtio.-input") ntx_intr = count_interrupt("virtio.-output") sum = 0 for i in range(len(nrx_intr)): - state_list.append('rx_intr_%s' % i) + state_list.append(f"rx_intr_{i}") state_list.append(nrx_intr[i]) sum += nrx_intr[i] - state_list.append('rx_intr_sum') + state_list.append("rx_intr_sum") state_list.append(sum) sum = 0 for i in range(len(ntx_intr)): - state_list.append('tx_intr_%s' % i) + state_list.append(f"tx_intr_{i}") state_list.append(ntx_intr[i]) sum += ntx_intr[i] - state_list.append('tx_intr_sum') + state_list.append("tx_intr_sum") state_list.append(sum) except IndexError: ninit = count_interrupt("virtio.") - state_list.append('intr') + state_list.append("intr") state_list.append(ninit) exits = int(netperf_base.ssh_cmd(host, "cat /sys/kernel/debug/kvm/exits")) - state_list.append('exits') + state_list.append("exits") state_list.append(exits) return state_list def thread_cmd(params, i, numa_enable, client_s, timeout): - fname = "/tmp/netperf.%s.nf" % pid + fname = f"/tmp/netperf.{pid}.nf" option = "`command -v python python3 | head -1 ` " option += "/tmp/netperf_agent.py %d %s -D 1 -H %s -l %s %s" % ( - i, client_path, server, int(l) * 1.5, nf_args) - option += " >> %s" % fname + i, + client_path, + server, + int(l) * 1.5, + nf_args, + ) + option += f" >> {fname}" netperf_base.netperf_thread(params, numa_enable, client_s, option, fname) def all_clients_up(): try: - content = netperf_base.ssh_cmd(clients[-1], "cat %s" % fname) + content = netperf_base.ssh_cmd(clients[-1], f"cat {fname}") except: content = "" return False @@ -665,11 +763,13 @@ def all_clients_up(): def stop_netperf_clients(): if params.get("os_type_client") == "linux": - netperf_base.ssh_cmd(clients[-1], params.get("client_kill_linux"), - ignore_status=True) + netperf_base.ssh_cmd( + clients[-1], params.get("client_kill_linux"), ignore_status=True + ) else: - netperf_base.ssh_cmd(clients[-1], params.get("client_kill_windows"), - ignore_status=True) + netperf_base.ssh_cmd( + clients[-1], params.get("client_kill_windows"), ignore_status=True + ) def parse_demo_result(fname, sessions): """ @@ -688,12 +788,13 @@ def parse_demo_result(fname, sessions): break nresult = i - 1 if nresult < int(sessions): - test.error("We couldn't expect this parallism, expect %s get %s" - % (sessions, nresult)) + test.error( + f"We couldn't expect this parallism, expect {sessions} get {nresult}" + ) niteration = nresult // sessions result = 0.0 - for this in lines[-sessions * niteration:]: + for this in lines[-sessions * niteration :]: if "Interim" in this: result += float(re.findall(r"Interim result: *(\S+)", this)[0]) result = result / niteration @@ -704,31 +805,41 @@ def parse_demo_result(fname, sessions): while tries > 0: error_context.context("Start netperf client threads", test.log.info) pid = str(os.getpid()) - fname = "/tmp/netperf.%s.nf" % pid - netperf_base.ssh_cmd(clients[-1], "rm -f %s" % fname) + fname = f"/tmp/netperf.{pid}.nf" + netperf_base.ssh_cmd(clients[-1], f"rm -f {fname}") numa_enable = params.get("netperf_with_numa", "yes") == "yes" timeout_netperf_start = int(l) * 0.5 client_thread = threading.Thread( - target=thread_cmd, - kwargs={"params": params, - "i": int(sessions), - "numa_enable": numa_enable, - "client_s": clients[0], - "timeout": timeout_netperf_start}) + target=thread_cmd, + kwargs={ + "params": params, + "i": int(sessions), + "numa_enable": numa_enable, + "client_s": clients[0], + "timeout": timeout_netperf_start, + }, + ) client_thread.start() ret = {} - ret['pid'] = pid - - if utils_misc.wait_for(all_clients_up, timeout_netperf_start, 0.0, 0.2, - "Wait until all netperf clients start to work"): + ret["pid"] = pid + + if utils_misc.wait_for( + all_clients_up, + timeout_netperf_start, + 0.0, + 0.2, + "Wait until all netperf clients start to work", + ): test.log.debug("All netperf clients start to work.") # real & effective test starts if get_status_flag: start_state = get_state() - ret['mpstat'] = netperf_base.ssh_cmd(host, "mpstat 1 %d |tail -n 1" % (l - 1)) - finished_result = netperf_base.ssh_cmd(clients[-1], "cat %s" % fname) + ret["mpstat"] = netperf_base.ssh_cmd( + host, "mpstat 1 %d |tail -n 1" % (l - 1) + ) + finished_result = netperf_base.ssh_cmd(clients[-1], f"cat {fname}") # stop netperf clients stop_netperf_clients() @@ -738,22 +849,22 @@ def parse_demo_result(fname, sessions): end_state = get_state() if len(start_state) != len(end_state): msg = "Initial state not match end state:\n" - msg += " start state: %s\n" % start_state - msg += " end state: %s\n" % end_state - test.log.warn(msg) + msg += f" start state: {start_state}\n" + msg += f" end state: {end_state}\n" + test.log.warning(msg) else: for i in range(len(end_state) // 2): - ret[end_state[i * 2]] = (end_state[i * 2 + 1] - - start_state[i * 2 + 1]) + ret[end_state[i * 2]] = ( + end_state[i * 2 + 1] - start_state[i * 2 + 1] + ) client_thread.join() - error_context.context("Testing Results Treatment and Report", - test.log.info) + error_context.context("Testing Results Treatment and Report", test.log.info) f = open(fname, "w") f.write(finished_result) f.close() - ret['thu'] = parse_demo_result(fname, int(sessions)) + ret["thu"] = parse_demo_result(fname, int(sessions)) return ret break else: diff --git a/generic/tests/netstress_kill_guest.py b/generic/tests/netstress_kill_guest.py index 2787338c1e..76475aa220 100644 --- a/generic/tests/netstress_kill_guest.py +++ b/generic/tests/netstress_kill_guest.py @@ -2,12 +2,14 @@ import time from avocado.utils import process -from virttest import data_dir -from virttest import utils_misc -from virttest import utils_net -from virttest import utils_netperf -from virttest import env_process -from virttest import error_context +from virttest import ( + data_dir, + env_process, + error_context, + utils_misc, + utils_net, + utils_netperf, +) @error_context.context_aware @@ -32,6 +34,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def get_ethernet_driver(session): """ Get driver of network cards. @@ -60,24 +63,27 @@ def netperf_stress(test, params, vm): n_client = utils_netperf.NetperfClient( vm.get_address(), params.get("client_path"), - netperf_source=os.path.join(data_dir.get_deps_dir("netperf"), - params.get("netperf_client_link")), + netperf_source=os.path.join( + data_dir.get_deps_dir("netperf"), params.get("netperf_client_link") + ), client=params.get("shell_client"), port=params.get("shell_port"), username=params.get("username"), password=params.get("password"), prompt=params.get("shell_prompt"), - linesep=params.get("shell_linesep", "\n").encode().decode( - 'unicode_escape'), + linesep=params.get("shell_linesep", "\n").encode().decode("unicode_escape"), status_test_command=params.get("status_test_command", ""), - compile_option=params.get("compile_option", "")) + compile_option=params.get("compile_option", ""), + ) n_server = utils_netperf.NetperfServer( utils_net.get_host_ip_address(params), params.get("server_path", "/var/tmp"), - netperf_source=os.path.join(data_dir.get_deps_dir("netperf"), - params.get("netperf_server_link")), + netperf_source=os.path.join( + data_dir.get_deps_dir("netperf"), params.get("netperf_server_link") + ), password=params.get("hostpassword"), - compile_option=params.get("compile_option", "")) + compile_option=params.get("compile_option", ""), + ) try: n_server.start() @@ -86,21 +92,24 @@ def netperf_stress(test, params, vm): test_protocols = params.get("test_protocol") netperf_output_unit = params.get("netperf_output_unit") test_option = params.get("test_option", "") - test_option += " -l %s" % test_duration + test_option += f" -l {test_duration}" if params.get("netperf_remote_cpu") == "yes": test_option += " -C" if params.get("netperf_local_cpu") == "yes": test_option += " -c" if netperf_output_unit in "GMKgmk": - test_option += " -f %s" % netperf_output_unit - t_option = "%s -t %s" % (test_option, test_protocols) - n_client.bg_start(utils_net.get_host_ip_address(params), - t_option, - params.get_numeric("netperf_para_sessions"), - params.get("netperf_cmd_prefix", ""), - package_sizes=params.get("netperf_sizes")) - if utils_misc.wait_for(n_client.is_netperf_running, 10, 0, 1, - "Wait netperf test start"): + test_option += f" -f {netperf_output_unit}" + t_option = f"{test_option} -t {test_protocols}" + n_client.bg_start( + utils_net.get_host_ip_address(params), + t_option, + params.get_numeric("netperf_para_sessions"), + params.get("netperf_cmd_prefix", ""), + package_sizes=params.get("netperf_sizes"), + ) + if utils_misc.wait_for( + n_client.is_netperf_running, 10, 0, 1, "Wait netperf test start" + ): test.log.info("Netperf test start successfully.") else: test.error("Can not start netperf client.") @@ -123,18 +132,21 @@ def netload_kill_problem(test, session_serial): test.log.warning("Could not stop firewall in guest") try: - error_context.context(("Run subtest netperf_stress between" - " host and guest.", test.log.info)) + error_context.context( + ("Run subtest netperf_stress between" " host and guest.", test.log.info) + ) stress_thread = None wait_time = int(params.get("wait_bg_time", 60)) bg_stress_run_flag = params.get("bg_stress_run_flag") vm_wait_time = int(params.get("wait_before_kill_vm")) env[bg_stress_run_flag] = False stress_thread = utils_misc.InterruptedThread( - netperf_stress, (test, params, vm)) + netperf_stress, (test, params, vm) + ) stress_thread.start() - utils_misc.wait_for(lambda: wait_time, 0, 1, - "Wait netperf_stress test start") + utils_misc.wait_for( + lambda: wait_time, 0, 1, "Wait netperf_stress test start" + ) test.log.info("Sleep %ss before killing the VM", vm_wait_time) time.sleep(vm_wait_time) msg = "During netperf running, Check that we can kill VM with signal 0" @@ -150,20 +162,17 @@ def netdriver_kill_problem(test, session_serial): times = params.get_numeric("repeat_times", 10) modules = get_ethernet_driver(session_serial) test.log.debug("Guest network driver(s): %s", modules) - msg = "Repeatedly load/unload network driver(s) for %s times." % times + msg = f"Repeatedly load/unload network driver(s) for {times} times." error_context.context(msg, test.log.info) for i in range(times): for module in modules: - error_context.context("Unload driver %s. Repeat: %s/%s" % - (module, i, times)) - session_serial.cmd_output_safe("rmmod %s" % module) + error_context.context(f"Unload driver {module}. Repeat: {i}/{times}") + session_serial.cmd_output_safe(f"rmmod {module}") for module in modules: - error_context.context("Load driver %s. Repeat: %s/%s" % - (module, i, times)) - session_serial.cmd_output_safe("modprobe %s" % module) + error_context.context(f"Load driver {module}. Repeat: {i}/{times}") + session_serial.cmd_output_safe(f"modprobe {module}") - error_context.context("Check that we can kill VM with signal 0.", - test.log.info) + error_context.context("Check that we can kill VM with signal 0.", test.log.info) kill_and_check(test, vm) vm = env.get_vm(params["main_vm"]) diff --git a/generic/tests/nfs_corrupt.py b/generic/tests/nfs_corrupt.py index a83652a540..50fc574259 100644 --- a/generic/tests/nfs_corrupt.py +++ b/generic/tests/nfs_corrupt.py @@ -1,36 +1,37 @@ -import os import logging - +import os from functools import partial -from avocado.utils import process -from avocado.utils import service -from virttest import utils_misc -from virttest import utils_net -from virttest import error_context -from virttest import utils_disk -from virttest import utils_numeric -from virttest import virt_vm -from virttest import qemu_monitor +from avocado.utils import process, service +from virttest import ( + error_context, + qemu_monitor, + utils_disk, + utils_misc, + utils_net, + utils_numeric, + virt_vm, +) from virttest.qemu_storage import QemuImg -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class NFSCorruptError(Exception): - def __init__(self, *args): Exception.__init__(self, *args) -class NFSCorruptConfig(object): - +class NFSCorruptConfig: """ This class sets up nfs_corrupt test environment. """ - iptables_template = ("iptables -t filter -{{op}} OUTPUT -d {ip} -m state" - " --state NEW,RELATED,ESTABLISHED -p tcp --dport 2049" - " -j REJECT") + + iptables_template = ( + "iptables -t filter -{{op}} OUTPUT -d {ip} -m state" + " --state NEW,RELATED,ESTABLISHED -p tcp --dport 2049" + " -j REJECT" + ) def __init__(self, test, params, ip="localhost"): self.nfs_dir = os.path.join(test.tmpdir, "nfs_dir") @@ -46,14 +47,18 @@ def __init__(self, test, params, ip="localhost"): self.service_name = name break else: - msg = ("Fail to set up NFS for this host, service " - "with name 'nfs' and 'nfs-server' not exist.") + msg = ( + "Fail to set up NFS for this host, service " + "with name 'nfs' and 'nfs-server' not exist." + ) raise NFSCorruptError(msg) for attrname in ["start", "stop", "restart", "status"]: - setattr(self, attrname, - partial(getattr(self.service_manager, attrname), - self.service_name)) + setattr( + self, + attrname, + partial(getattr(self.service_manager, attrname), self.service_name), + ) @error_context.context_aware def setup(self, force_start=False): @@ -70,21 +75,14 @@ def setup(self, force_start=False): except OSError: pass - error_context.context("Checking available space to export", - LOG_JOB.info) + error_context.context("Checking available space to export", LOG_JOB.info) stat = os.statvfs(self.nfs_dir) free = stat.f_bsize * stat.f_bfree required = float( - utils_misc.normalize_data_size( - self.required_size, - order_magnitude="B" - ) - ) + utils_misc.normalize_data_size(self.required_size, order_magnitude="B") + ) if free < required: - msg = "Space available: %s, space needed: %s" % ( - utils_numeric.format_size_human_readable(free), - self.required_size - ) + msg = f"Space available: {utils_numeric.format_size_human_readable(free)}, space needed: {self.required_size}" raise NFSCorruptError(msg) if force_start: @@ -93,17 +91,20 @@ def setup(self, force_start=False): if not self.status(): self.start() - process.run("exportfs %s:%s -o rw,no_root_squash" % - (self.nfs_ip, self.nfs_dir), shell=True) - process.run("mount %s:%s %s -o rw,soft,timeo=30,retrans=1,vers=3" % - (self.nfs_ip, self.nfs_dir, self.mnt_dir), shell=True) + process.run( + f"exportfs {self.nfs_ip}:{self.nfs_dir} -o rw,no_root_squash", + shell=True, + ) + process.run( + f"mount {self.nfs_ip}:{self.nfs_dir} {self.mnt_dir} -o rw,soft,timeo=30,retrans=1,vers=3", + shell=True, + ) @error_context.context_aware def cleanup(self, force_stop=False): error_context.context("Cleaning up test NFS share", LOG_JOB.info) - process.run("umount -l -f %s" % self.mnt_dir, shell=True) - process.run("exportfs -u %s:%s" % (self.nfs_ip, self.nfs_dir), - shell=True) + process.run(f"umount -l -f {self.mnt_dir}", shell=True) + process.run(f"exportfs -u {self.nfs_ip}:{self.nfs_dir}", shell=True) if force_stop: self.stop() @@ -125,7 +126,7 @@ def is_mounted_dir_acessible(self): return False return True - def iptables_rule_gen(self, op='A'): + def iptables_rule_gen(self, op="A"): """ Generate iptables rules to block/accept nfs connection. """ @@ -150,6 +151,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def get_nfs_devname(params, session): """ Get the possbile name of nfs storage dev name in guest. @@ -215,11 +217,10 @@ def check_vm_status(vm, status): test.error("failed to create VM") session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) - nfs_devname = utils_misc.get_linux_drive_path(session, stg_params[ - "nfs_serial"]) + nfs_devname = utils_misc.get_linux_drive_path(session, stg_params["nfs_serial"]) # Write disk on NFS server error_context.context("Write disk that image on NFS", test.log.info) - write_disk_cmd = "dd if=/dev/zero of=%s oflag=direct" % nfs_devname + write_disk_cmd = f"dd if=/dev/zero of={nfs_devname} oflag=direct" test.log.info("dd with command: %s", write_disk_cmd) session.sendline(write_disk_cmd) try: @@ -229,31 +230,27 @@ def check_vm_status(vm, status): pass try: - error_context.context("Make sure guest is running before test", - test.log.info) + error_context.context("Make sure guest is running before test", test.log.info) vm.resume() vm.verify_status("running") try: - error_context.context("Reject NFS connection on host", - test.log.info) - process.system(config.iptables_rule_gen('A')) + error_context.context("Reject NFS connection on host", test.log.info) + process.system(config.iptables_rule_gen("A")) - error_context.context("Check if VM status is 'paused'", - test.log.info) + error_context.context("Check if VM status is 'paused'", test.log.info) if not utils_misc.wait_for( lambda: check_vm_status(vm, "paused"), - int(params.get('wait_paused_timeout', 240))): + int(params.get("wait_paused_timeout", 240)), + ): test.error("Guest is not paused after stop NFS") finally: - error_context.context("Accept NFS connection on host", - test.log.info) - process.system(config.iptables_rule_gen('D')) + error_context.context("Accept NFS connection on host", test.log.info) + process.system(config.iptables_rule_gen("D")) error_context.context("Ensure nfs is resumed", test.log.info) - nfs_resume_timeout = int(params.get('nfs_resume_timeout', 240)) - if not utils_misc.wait_for(config.is_mounted_dir_acessible, - nfs_resume_timeout): + nfs_resume_timeout = int(params.get("nfs_resume_timeout", 240)) + if not utils_misc.wait_for(config.is_mounted_dir_acessible, nfs_resume_timeout): test.error("NFS connection does not resume") error_context.context("Continue guest", test.log.info) diff --git a/generic/tests/nic_promisc.py b/generic/tests/nic_promisc.py index be68434386..2ce12a1db0 100644 --- a/generic/tests/nic_promisc.py +++ b/generic/tests/nic_promisc.py @@ -1,9 +1,7 @@ import os -from virttest import utils_misc -from virttest import utils_test -from virttest import utils_net from virttest import error_context as error +from virttest import utils_misc, utils_net, utils_test @error.context_aware @@ -19,10 +17,11 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def set_nic_promisc_onoff(session): if os_type == "linux": - session.cmd_output_safe("ip link set %s promisc on" % ethname) - session.cmd_output_safe("ip link set %s promisc off" % ethname) + session.cmd_output_safe(f"ip link set {ethname} promisc on") + session.cmd_output_safe(f"ip link set {ethname} promisc off") else: cmd = "c:\\set_win_promisc.py" session.cmd(cmd) @@ -43,13 +42,15 @@ def set_nic_promisc_onoff(session): try: transfer_thread = utils_misc.InterruptedThread( - utils_test.run_file_transfer, (test, params, env)) + utils_test.run_file_transfer, (test, params, env) + ) error.context("Run utils_test.file_transfer ...", test.log.info) transfer_thread.start() - error.context("Perform file transfer while turning nic promisc on/off", - test.log.info) + error.context( + "Perform file transfer while turning nic promisc on/off", test.log.info + ) while transfer_thread.is_alive(): set_nic_promisc_onoff(session_serial) except Exception: diff --git a/generic/tests/nicdriver_unload.py b/generic/tests/nicdriver_unload.py index fc64df15bf..745832ee04 100644 --- a/generic/tests/nicdriver_unload.py +++ b/generic/tests/nicdriver_unload.py @@ -1,12 +1,9 @@ import os -import time import random +import time from avocado.utils import crypto, process -from virttest import utils_misc -from virttest import utils_net -from virttest import data_dir -from virttest import error_context +from virttest import data_dir, error_context, utils_misc, utils_net @error_context.context_aware @@ -24,12 +21,13 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def reset_guest_udevrules(session, rules_file, rules_content): """ Write guest udev rules, then reboot the guest and return the new session """ - set_cmd = "echo '%s' > %s" % (rules_content, rules_file) + set_cmd = f"echo '{rules_content}' > {rules_file}" session.cmd_output_safe(set_cmd) return vm.reboot() @@ -65,7 +63,7 @@ def all_threads_alive(threads): vm_mac_address = vm.get_mac_address() udev_rules_file = "/etc/udev/rules.d/70-persistent-net.rules" rules = params.get("rules") - if not session.cmd_status("[ -e %s ]" % udev_rules_file): + if not session.cmd_status(f"[ -e {udev_rules_file} ]"): if not rules: test.cancel("You must set udev rules before test") rules = rules % vm_mac_address @@ -80,26 +78,29 @@ def all_threads_alive(threads): sys_path = params.get("sys_path") % (ethname) # readlink in RHEL4.8 doesn't have '-e' param, should use '-f' in RHEL4.8. readlink_cmd = params.get("readlink_command", "readlink -e") - driver = os.path.basename(session.cmd("%s %s" % (readlink_cmd, - sys_path)).strip()) + driver = os.path.basename(session.cmd(f"{readlink_cmd} {sys_path}").strip()) test.log.info("The guest interface %s using driver %s", ethname, driver) - error_context.context("Host test file prepare, create %dMB file on host" % - filesize, test.log.info) + error_context.context( + "Host test file prepare, create %dMB file on host" % filesize, test.log.info + ) tmp_dir = data_dir.get_tmp_dir() - host_path = os.path.join(tmp_dir, "host_file_%s" % - utils_misc.generate_random_string(8)) - guest_path = os.path.join("/home", "guest_file_%s" % - utils_misc.generate_random_string(8)) + host_path = os.path.join( + tmp_dir, f"host_file_{utils_misc.generate_random_string(8)}" + ) + guest_path = os.path.join( + "/home", f"guest_file_{utils_misc.generate_random_string(8)}" + ) cmd = "dd if=/dev/zero of=%s bs=1M count=%d" % (host_path, filesize) process.run(cmd) file_checksum = crypto.hash_file(host_path, algorithm="md5") - error_context.context("Guest test file prepare, Copy file %s from host to " - "guest" % host_path, test.log.info) + error_context.context( + f"Guest test file prepare, Copy file {host_path} from host to " "guest", + test.log.info, + ) vm.copy_files_to(host_path, guest_path, timeout=transfer_timeout) - if session.cmd_status("md5sum %s | grep %s" % - (guest_path, file_checksum)): + if session.cmd_status(f"md5sum {guest_path} | grep {file_checksum}"): test.cancel("File MD5SUMs changed after copy to guest") test.log.info("Test env prepare successfully") @@ -111,16 +112,18 @@ def all_threads_alive(threads): file_paths = [] host_file_paths = [] for sess_index in range(int(params.get("sessions_num", "10"))): - sess_path = os.path.join("/home", "dst-%s" % sess_index) - host_sess_path = os.path.join(tmp_dir, "dst-%s" % sess_index) + sess_path = os.path.join("/home", f"dst-{sess_index}") + host_sess_path = os.path.join(tmp_dir, f"dst-{sess_index}") thread1 = utils_misc.InterruptedThread( - vm.copy_files_to, (host_path, sess_path), - {"timeout": transfer_timeout}) + vm.copy_files_to, (host_path, sess_path), {"timeout": transfer_timeout} + ) thread2 = utils_misc.InterruptedThread( - vm.copy_files_from, (guest_path, host_sess_path), - {"timeout": transfer_timeout}) + vm.copy_files_from, + (guest_path, host_sess_path), + {"timeout": transfer_timeout}, + ) thread1.start() threads.append(thread1) thread2.start() @@ -131,35 +134,40 @@ def all_threads_alive(threads): utils_misc.wait_for(lambda: all_threads_alive(threads), 60, 10, 1) time.sleep(5) - error_context.context("Repeatedly unload/load NIC driver during file " - "transfer", test.log.info) + error_context.context( + "Repeatedly unload/load NIC driver during file " "transfer", test.log.info + ) while not all_threads_done(threads): - error_context.context("Shutdown the driver for NIC interface.", - test.log.info) + error_context.context( + "Shutdown the driver for NIC interface.", test.log.info + ) sleep_time = random.randint(1, 5) error_context.context("Unload NIC driver.", test.log.info) - session_serial.cmd_output_safe("modprobe -r %s" % driver) + session_serial.cmd_output_safe(f"modprobe -r {driver}") time.sleep(sleep_time) error_context.context("Load NIC driver.", test.log.info) - session_serial.cmd_output_safe("modprobe %s" % driver) + session_serial.cmd_output_safe(f"modprobe {driver}") time.sleep(sleep_time) # files md5sums check - error_context.context("File transfer finished, checking files md5sums", - test.log.info) + error_context.context( + "File transfer finished, checking files md5sums", test.log.info + ) err_info = [] for copied_file in file_paths: - if session_serial.cmd_status("md5sum %s | grep %s" % - (copied_file, file_checksum)): + if session_serial.cmd_status( + f"md5sum {copied_file} | grep {file_checksum}" + ): err_msg = "Guest file %s md5sum changed" err_info.append(err_msg % copied_file) for copied_file in host_file_paths: - if process.system("md5sum %s | grep %s" % - (copied_file, file_checksum), shell=True): + if process.system( + f"md5sum {copied_file} | grep {file_checksum}", shell=True + ): err_msg = "Host file %s md5sum changed" err_info.append(err_msg % copied_file) if err_info: - test.error("files MD5SUMs changed after copying %s" % err_info) + test.error(f"files MD5SUMs changed after copying {err_info}") except Exception: for thread in threads: thread.join(suppress_exception=True) @@ -168,10 +176,10 @@ def all_threads_alive(threads): for thread in threads: thread.join() for copied_file in file_paths: - session_serial.cmd("rm -rf %s" % copied_file) + session_serial.cmd(f"rm -rf {copied_file}") for copied_file in host_file_paths: - process.system("rm -rf %s" % copied_file) - session_serial.cmd("%s %s" % ("rm -rf", guest_path)) + process.system(f"rm -rf {copied_file}") + session_serial.cmd("{} {}".format("rm -rf", guest_path)) os.remove(host_path) session.close() session_serial.close() diff --git a/generic/tests/ntpd.py b/generic/tests/ntpd.py index 8bd4a51dfb..eb2aa2c1ac 100644 --- a/generic/tests/ntpd.py +++ b/generic/tests/ntpd.py @@ -1,15 +1,12 @@ import time import aexpect - from avocado.utils import process -from virttest import remote -from virttest import utils_test +from virttest import remote, utils_test from virttest.staging import service -class NTPTest(object): - +class NTPTest: """ This class is for ntpd test """ @@ -35,11 +32,14 @@ def __init__(self, test, params, env): self.long_sleep = int(params.get("long_sleep", "0")) self.vm = env.get_vm(self.vm_name) try: - self.server_session = remote.wait_for_login('ssh', - self.server_ip, "22", - self.server_user, - self.server_password, - r"[\$#]\s*$") + self.server_session = remote.wait_for_login( + "ssh", + self.server_ip, + "22", + self.server_user, + self.server_password, + r"[\$#]\s*$", + ) self.session = self.vm.wait_for_login() except remote.LoginTimeoutError as detail: self.test.cancel(str(detail)) @@ -60,38 +60,36 @@ def server_config(self): 3.restrict the host and guest """ self.test.log.info("waiting for login server.....") - self.server_hostname = self.server_session.\ - cmd_output('hostname').strip() + self.server_hostname = self.server_session.cmd_output("hostname").strip() self.test.log.debug("service hostname is %s", self.server_hostname) - cmd = 'echo \'ZONE = "America/New_York"\' > /etc/sysconfig/clock' + cmd = "echo 'ZONE = \"America/New_York\"' > /etc/sysconfig/clock" status = self.server_session.cmd_status(cmd) if status: self.test.error("set ZONE in server failed.") - cmd_ln = 'ln -sf /usr/share/zoneinfo/America/New_York /etc/localtime' + cmd_ln = "ln -sf /usr/share/zoneinfo/America/New_York /etc/localtime" self.server_session.cmd_status(cmd_ln) # Add server of local clock - output = self.server_session.cmd_output("grep '^server %s'" - " /etc/ntp.conf" - % self.local_clock).strip() + output = self.server_session.cmd_output( + f"grep '^server {self.local_clock}'" " /etc/ntp.conf" + ).strip() if not output: - status = self.server_session.cmd_status("echo 'server %s' >> " - "/etc/ntp.conf" - % self.local_clock) + status = self.server_session.cmd_status( + f"echo 'server {self.local_clock}' >> " "/etc/ntp.conf" + ) if status: self.test.error("config local_clock failed.") # Add host and guest in restrict - output = self.server_session.cmd_output("grep '^restrict %s'" - " /etc/ntp.conf" - % self.net_range).strip() + output = self.server_session.cmd_output( + f"grep '^restrict {self.net_range}'" " /etc/ntp.conf" + ).strip() if not output: - status = self.server_session.cmd_status("echo 'restrict %s " - "mask %s %s' " - ">> /etc/ntp.conf" - % (self.net_range, - self.mask, - self.restrict_option)) + status = self.server_session.cmd_status( + f"echo 'restrict {self.net_range} " + f"mask {self.mask} {self.restrict_option}' " + ">> /etc/ntp.conf" + ) if status: self.test.error("config restrict failed.") @@ -110,12 +108,12 @@ def host_config(self): 4.start ntpd service """ # Set the time zone to New_York - cmd = ('echo \'ZONE = "America/New_York"\' > /etc/sysconfig/clock;') + cmd = "echo 'ZONE = \"America/New_York\"' > /etc/sysconfig/clock;" try: process.run(cmd, ignore_status=False, shell=True) except process.CmdError as detail: - self.test.fail("set Zone on host failed.%s" % detail) - cmd_ln = 'ln -sf /usr/share/zoneinfo/America/New_York /etc/localtime' + self.test.fail(f"set Zone on host failed.{detail}") + cmd_ln = "ln -sf /usr/share/zoneinfo/America/New_York /etc/localtime" process.run(cmd_ln, ignore_status=True, shell=True) # Check the cpu info of constant_tsc @@ -140,19 +138,21 @@ def host_config(self): self.test.fail("timing by ntpdate on host failed!!") # Delete server of local clock - result = process.run("grep '^server %s' /etc/ntp.conf" % - self.local_clock, ignore_status=True, shell=True) + result = process.run( + f"grep '^server {self.local_clock}' /etc/ntp.conf", + ignore_status=True, + shell=True, + ) if result.stdout.strip(): - process.run("sed -i '/%s/d' /etc/ntp.conf" % self.local_clock, - shell=True) + process.run(f"sed -i '/{self.local_clock}/d' /etc/ntp.conf", shell=True) # Check the ntp.conf and add server ip into it - cmd = "grep '^server %s' /etc/ntp.conf" % self.server_ip + cmd = f"grep '^server {self.server_ip}' /etc/ntp.conf" result = process.run(cmd, ignore_status=True, shell=True) if not result.stdout.strip(): - cmd = "echo 'server %s' >> /etc/ntp.conf" % self.server_ip + cmd = f"echo 'server {self.server_ip}' >> /etc/ntp.conf" try: process.run(cmd, ignore_status=False, shell=True) - except process.CmdError as detail: + except process.CmdError: self.test.fail("config /etc/ntp.conf on host failed!!") # Start ntpd service @@ -168,9 +168,9 @@ def guest_config(self): 4.restart ntpd service """ # Set the time zone to american new york - cmd = ('echo \'ZONE = "America/New_York"\' > /etc/sysconfig/clock;') + cmd = "echo 'ZONE = \"America/New_York\"' > /etc/sysconfig/clock;" self.session.cmd(cmd) - cmd_ln = 'ln -sf /usr/share/zoneinfo/America/New_York /etc/localtime' + cmd_ln = "ln -sf /usr/share/zoneinfo/America/New_York /etc/localtime" self.session.cmd(cmd_ln) # Timing by ntpdate @@ -190,15 +190,17 @@ def guest_config(self): self.test.fail("timing by ntpdate on guest failed!!") # Delete server of local clock - output = self.session.cmd_output("grep '%s' /etc/ntp.conf" - % self.local_clock).strip() + output = self.session.cmd_output( + f"grep '{self.local_clock}' /etc/ntp.conf" + ).strip() if not output: - self.session.cmd("sed -i '/%s/d' /etc/ntp.conf" % self.local_clock) + self.session.cmd(f"sed -i '/{self.local_clock}/d' /etc/ntp.conf") # Check the ntp.conf and add server ip into it - output = self.session.cmd_output("grep '^server %s' /etc/ntp.conf" - % self.server_ip) + output = self.session.cmd_output( + f"grep '^server {self.server_ip}' /etc/ntp.conf" + ) if not output: - cmd = "echo 'server %s' >> /etc/ntp.conf" % self.server_ip + cmd = f"echo 'server {self.server_ip}' >> /etc/ntp.conf" status = self.session.cmd_status(cmd) if status: self.test.fail("config /etc/ntp.conf on server failed!!") @@ -214,21 +216,19 @@ def ntpq_test(self): self.test.log.info("waiting for ntpd timing : %s s", self.ntpd_sleep) time.sleep(self.ntpd_sleep) # Test on host - cmd_ip = "ntpq -p | grep '^*%s'" % self.server_ip + cmd_ip = f"ntpq -p | grep '^*{self.server_ip}'" cmd_name = "" if self.server_hostname: - cmd_name = "ntpq -p | grep '^*%s'" % self.server_hostname + cmd_name = f"ntpq -p | grep '^*{self.server_hostname}'" result_ntpq_ip = process.run(cmd_ip, ignore_status=True, shell=True) - result_ntpq_name = process.run(cmd_name, ignore_status=True, - shell=True) - if (not result_ntpq_ip.stdout.strip() and - not result_ntpq_name.stdout.strip()): - self.test.fail("ntpd setting failed of %s host !!" % self.vm_name) + result_ntpq_name = process.run(cmd_name, ignore_status=True, shell=True) + if not result_ntpq_ip.stdout.strip() and not result_ntpq_name.stdout.strip(): + self.test.fail(f"ntpd setting failed of {self.vm_name} host !!") # Test on guest output_ip = self.session.cmd_output(cmd_ip).strip() output_name = self.session.cmd_output(cmd_name).strip() if not output_ip and not output_name: - self.test.fail("ntpd setting failed of %s guest !!" % self.vm_name) + self.test.fail(f"ntpd setting failed of {self.vm_name} guest !!") def long_time_test(self): """ @@ -257,8 +257,9 @@ def run(test, params, env): """ ntp_test = NTPTest(test, params, env) - ping_s, _ = utils_test.ping(ntp_test.server_ip, count=1, - timeout=5, session=ntp_test.session) + ping_s, _ = utils_test.ping( + ntp_test.server_ip, count=1, timeout=5, session=ntp_test.session + ) if ping_s: ntp_test.close_session() test.cancel("Please make sure the guest can ping server!") @@ -269,7 +270,7 @@ def run(test, params, env): try: ntp_test.server_config() except (aexpect.ShellError, remote.LoginTimeoutError) as detail: - test.fail("server config failed. %s" % detail) + test.fail(f"server config failed. {detail}") test.log.info("waiting for ntp server : %s s", ntp_test.ntpdate_sleep) # Host and Guest will use server's ntpd service to set time. # here wait for some seconds for server ntpd service valid @@ -279,24 +280,24 @@ def run(test, params, env): try: ntp_test.host_config() except (aexpect.ShellError, remote.LoginTimeoutError) as detail: - test.fail("host config failed.%s" % detail) + test.fail(f"host config failed.{detail}") # Guest configuration try: ntp_test.guest_config() except (aexpect.ShellError, remote.LoginTimeoutError) as detail: - test.fail("guest config failed.%s" % detail) + test.fail(f"guest config failed.{detail}") try: # Wait 20min for ntpq test ntp_test.ntpq_test() except (aexpect.ShellError, remote.LoginTimeoutError) as detail: - test.fail("ntpq test failed.%s" % detail) + test.fail(f"ntpq test failed.{detail}") try: # Wait 24h for test ntp_test.long_time_test() except (aexpect.ShellError, remote.LoginTimeoutError) as detail: - test.fail("long time test failed.%s" % detail) + test.fail(f"long time test failed.{detail}") finally: ntp_test.close_session() diff --git a/generic/tests/ntttcp.py b/generic/tests/ntttcp.py index 2ade11ec72..08bda485b6 100644 --- a/generic/tests/ntttcp.py +++ b/generic/tests/ntttcp.py @@ -1,13 +1,10 @@ -import os import glob +import os import re import aexpect - from avocado.utils import process -from virttest import utils_misc -from virttest import utils_test -from virttest import error_context +from virttest import error_context, utils_misc, utils_test _receiver_ready = False @@ -23,13 +20,14 @@ def _verify_vm_driver(vm, test, driver_name, timeout=360): :param timeout: the timeout for the login and verify operation """ - error_context.context("Check if driver is installed" - " and verified for vm: %s" % vm.name, test.log.info) + error_context.context( + "Check if driver is installed" f" and verified for vm: {vm.name}", + test.log.info, + ) session = vm.wait_for_login(timeout=timeout) - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, - driver_name, - timeout) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name, timeout + ) session.close() @@ -46,8 +44,7 @@ def run(test, params, env): :param env: Dictionary with test environment. """ login_timeout = int(params.get("login_timeout", 360)) - results_path = os.path.join(test.resultsdir, - 'raw_output_%s' % test.iteration) + results_path = os.path.join(test.resultsdir, f"raw_output_{test.iteration}") platform = "x86" if "64" in params["vm_arch_name"]: platform = "x64" @@ -62,13 +59,11 @@ def run(test, params, env): # verify driver _verify_vm_driver(vm_sender, test, driver_verifier) - test.log.debug(process.system("numactl --hardware", ignore_status=True, - shell=True)) - test.log.debug(process.system("numactl --show", ignore_status=True, - shell=True)) + test.log.debug(process.system("numactl --hardware", ignore_status=True, shell=True)) + test.log.debug(process.system("numactl --show", ignore_status=True, shell=True)) # pin guest vcpus/memory/vhost threads to last numa node of host by default - if params.get('numa_node'): - numa_node = int(params.get('numa_node')) + if params.get("numa_node"): + numa_node = int(params.get("numa_node")) node = utils_misc.NumaNode(numa_node) utils_test.qemu.pin_vm_threads(vm_sender, node) @@ -80,9 +75,8 @@ def run(test, params, env): sess = vm_receiver.wait_for_login(timeout=login_timeout) receiver_addr = vm_receiver.get_address() if not receiver_addr: - test.error("Can't get receiver(%s) ip address" % - vm_receiver.name) - if params.get('numa_node'): + test.error(f"Can't get receiver({vm_receiver.name}) ip address") + if params.get("numa_node"): utils_test.qemu.pin_vm_threads(vm_receiver, node) finally: if sess: @@ -90,7 +84,7 @@ def run(test, params, env): @error_context.context_aware def install_ntttcp(session): - """ Install ntttcp through a remote session """ + """Install ntttcp through a remote session""" test.log.info("Installing NTttcp ...") try: # Don't install ntttcp if it's already installed @@ -98,61 +92,59 @@ def install_ntttcp(session): session.cmd(params.get("check_ntttcp_cmd")) except aexpect.ShellCmdError: ntttcp_install_cmd = params.get("ntttcp_install_cmd") - ntttcp_install_cmd = utils_misc.set_winutils_letter(session, ntttcp_install_cmd) + ntttcp_install_cmd = utils_misc.set_winutils_letter( + session, ntttcp_install_cmd + ) error_context.context("Installing NTttcp on guest") session.cmd(ntttcp_install_cmd % (platform, platform), timeout=200) def receiver(): - """ Receive side """ + """Receive side""" test.log.info("Starting receiver process on %s", receiver_addr) session = vm_receiver.wait_for_login(timeout=login_timeout) install_ntttcp(session) ntttcp_receiver_cmd = params.get("ntttcp_receiver_cmd") global _receiver_ready - f = open(results_path + ".receiver", 'a') + f = open(results_path + ".receiver", "a") for b in buffers: utils_misc.wait_for(lambda: not _wait(), timeout) _receiver_ready = True rbuf = params.get("fixed_rbuf", b) - cmd = ntttcp_receiver_cmd % ( - session_num, receiver_addr, rbuf, buf_num) - r = session.cmd_output(cmd, timeout=timeout, - print_func=test.log.debug) - f.write("Send buffer size: %s\n%s\n%s" % (b, cmd, r)) + cmd = ntttcp_receiver_cmd % (session_num, receiver_addr, rbuf, buf_num) + r = session.cmd_output(cmd, timeout=timeout, print_func=test.log.debug) + f.write(f"Send buffer size: {b}\n{cmd}\n{r}") f.close() session.close() def _wait(): - """ Check if receiver is ready """ + """Check if receiver is ready""" global _receiver_ready if _receiver_ready: return _receiver_ready return False def sender(): - """ Send side """ + """Send side""" test.log.info("Sarting sender process ...") session = vm_sender.wait_for_serial_login(timeout=login_timeout) install_ntttcp(session) ntttcp_sender_cmd = params.get("ntttcp_sender_cmd") - f = open(results_path + ".sender", 'a') + f = open(results_path + ".sender", "a") try: global _receiver_ready for b in buffers: - cmd = ntttcp_sender_cmd % ( - session_num, receiver_addr, b, buf_num) + cmd = ntttcp_sender_cmd % (session_num, receiver_addr, b, buf_num) # Wait until receiver ready utils_misc.wait_for(_wait, timeout) - r = session.cmd_output(cmd, timeout=timeout, - print_func=test.log.debug) + r = session.cmd_output(cmd, timeout=timeout, print_func=test.log.debug) _receiver_ready = False - f.write("Send buffer size: %s\n%s\n%s" % (b, cmd, r)) + f.write(f"Send buffer size: {b}\n{cmd}\n{r}") finally: f.close() session.close() def parse_file(resultfile): - """ Parse raw result files and generate files with standard format """ + """Parse raw result files and generate files with standard format""" fileobj = open(resultfile, "r") lst = [] found = False @@ -184,20 +176,24 @@ def parse_file(resultfile): else: test.error("Can't start backgroud receiver thread") finally: - for i in glob.glob("%s.receiver" % results_path): - f = open("%s.RHS" % results_path, "w") + for i in glob.glob(f"{results_path}.receiver"): + f = open(f"{results_path}.RHS", "w") raw = " buf(k)| throughput(Mbit/s)" test.log.info(raw) - f.write("#ver# %s\n#ver# host kernel: %s\n" % - (process.system_output("rpm -q qemu-kvm", shell=True, - verbose=False, ignore_status=True), - os.uname()[2])) - desc = """#desc# The tests are sessions of "NTttcp", send buf" -" number is %s. 'throughput' was taken from ntttcp's report. + f.write( + "#ver# {}\n#ver# host kernel: {}\n".format( + process.system_output( + "rpm -q qemu-kvm", shell=True, verbose=False, ignore_status=True + ), + os.uname()[2], + ) + ) + desc = f"""#desc# The tests are sessions of "NTttcp", send buf" +" number is {buf_num}. 'throughput' was taken from ntttcp's report. #desc# How to read the results: #desc# - The Throughput is measured in Mbit/sec. #desc# -""" % (buf_num) +""" f.write(desc) f.write(raw + "\n") for j in parse_file(i): diff --git a/generic/tests/os_update.py b/generic/tests/os_update.py index 803aa685ca..6e22359e37 100644 --- a/generic/tests/os_update.py +++ b/generic/tests/os_update.py @@ -18,5 +18,5 @@ def run(test, params, env): session = vm.wait_for_login() test.log.debug("Performing %s on VM %s", cmd, vm.name) if session.cmd_status(cmd, timeout=timeout) != 0: - test.fail("Failed to update VM %s using %s" % (vm.name, cmd)) + test.fail(f"Failed to update VM {vm.name} using {cmd}") session.close() diff --git a/generic/tests/ping.py b/generic/tests/ping.py index bf170618ae..475f8601e6 100644 --- a/generic/tests/ping.py +++ b/generic/tests/ping.py @@ -1,31 +1,43 @@ from avocado.utils import process -from virttest import utils_test -from virttest import utils_net -from virttest import error_context +from virttest import error_context, utils_net, utils_test -def _ping_with_params(test, params, dest, interface=None, - packet_size=None, interval=None, - count=0, session=None, flood=False): +def _ping_with_params( + test, + params, + dest, + interface=None, + packet_size=None, + interval=None, + count=0, + session=None, + flood=False, +): if flood: cmd = "ping " + dest + " -f -q" if interface: - cmd += " -S %s" % interface + cmd += f" -S {interface}" flood_minutes = float(params.get("flood_minutes", 10)) - status, output = utils_net.raw_ping(cmd, flood_minutes * 60, - session, test.log.debug) + status, output = utils_net.raw_ping( + cmd, flood_minutes * 60, session, test.log.debug + ) else: timeout = float(count) * 1.5 - status, output = utils_net.ping(dest, count, interval, interface, - packet_size, session=session, - timeout=timeout) + status, output = utils_net.ping( + dest, + count, + interval, + interface, + packet_size, + session=session, + timeout=timeout, + ) if status != 0: - test.fail("Ping failed, status: %s," - " output: %s" % (status, output)) + test.fail(f"Ping failed, status: {status}," f" output: {output}") if params.get("strict_check", "no") == "yes": ratio = utils_test.get_loss_ratio(output) if ratio != 0: - test.fail("Loss ratio is %s" % ratio) + test.fail(f"Loss ratio is {ratio}") @error_context.context_aware @@ -72,9 +84,12 @@ def run(test, params, env): ext_host = process.system_output(ext_host_get_cmd, shell=True) ext_host = ext_host.decode() except process.CmdError: - test.log.warn("Can't get specified host with cmd '%s'," - " Fallback to default host '%s'", - ext_host_get_cmd, ext_host) + test.log.warning( + "Can't get specified host with cmd '%s'," + " Fallback to default host '%s'", + ext_host_get_cmd, + ext_host, + ) dest_ips = [ext_host] sessions = [session] interfaces = [None] @@ -91,33 +106,45 @@ def run(test, params, env): interface = None nic_name = nic.get("nic_name") if not ip: - test.fail("Could not get the ip of nic index %d: %s", - i, nic_name) + test.fail("Could not get the ip of nic index %d: %s", i, nic_name) dest_ips.append(ip) sessions.append(None) interfaces.append(interface) - for (ip, interface, session) in zip(dest_ips, interfaces, sessions): - error_context.context("Ping test with dest: %s" % ip, test.log.info) + for ip, interface, session in zip(dest_ips, interfaces, sessions): + error_context.context(f"Ping test with dest: {ip}", test.log.info) # ping with different size & interval for size in packet_sizes: for interval in interval_times: - test.log.info("Ping with packet size: %s and interval: %s", - size, interval) - _ping_with_params(test, params, ip, interface, size, - interval, session=session, count=counts) + test.log.info( + "Ping with packet size: %s and interval: %s", size, interval + ) + _ping_with_params( + test, + params, + ip, + interface, + size, + interval, + session=session, + count=counts, + ) # ping with flood if params.get_boolean("flood_ping"): if not ping_ext_host or params.get("os_type") == "linux": error_context.context("Flood ping test", test.log.info) - _ping_with_params(test, params, ip, interface, - session=session, flood=True) + _ping_with_params( + test, params, ip, interface, session=session, flood=True + ) # ping to check whether the network is alive - error_context.context("Ping test after flood ping," - " Check if the network is still alive", - test.log.info) - _ping_with_params(test, params, ip, interface, - session=session, count=counts) + error_context.context( + "Ping test after flood ping," + " Check if the network is still alive", + test.log.info, + ) + _ping_with_params( + test, params, ip, interface, session=session, count=counts + ) diff --git a/generic/tests/pktgen_perf.py b/generic/tests/pktgen_perf.py index 4c3127795b..96c8a43d2a 100644 --- a/generic/tests/pktgen_perf.py +++ b/generic/tests/pktgen_perf.py @@ -3,16 +3,15 @@ import time from avocado.utils import process +from virttest import env_process, error_context, utils_misc, utils_test from provider import pktgen_utils -from provider.vdpa_sim_utils import VhostVdpaNetSimulatorTest, VirtioVdpaNetSimulatorTest +from provider.vdpa_sim_utils import ( + VhostVdpaNetSimulatorTest, + VirtioVdpaNetSimulatorTest, +) -from virttest import env_process -from virttest import utils_test -from virttest import utils_misc -from virttest import error_context - -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") @error_context.context_aware @@ -59,7 +58,7 @@ def _pin_vm_threads(node): def init_vm_and_login(test, params, env, result_file, pktgen_runner): error_context.context("Init the VM, and try to login", test.log.info) - params["start_vm"] = 'yes' + params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, params.get("main_vm")) vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -69,9 +68,11 @@ def init_vm_and_login(test, params, env, result_file, pktgen_runner): process.system_output("numactl --show") _pin_vm_threads(params.get("numa_node")) guest_ver = session_serial.cmd_output(guest_ver_cmd) - result_file.write("### guest-kernel-ver :%s" % guest_ver) - if pktgen_runner.is_version_lt_rhel7(session_serial.cmd('uname -r')): - pktgen_runner.install_package(guest_ver.strip(), vm=vm, session_serial=session_serial) + result_file.write(f"### guest-kernel-ver :{guest_ver}") + if pktgen_runner.is_version_lt_rhel7(session_serial.cmd("uname -r")): + pktgen_runner.install_package( + guest_ver.strip(), vm=vm, session_serial=session_serial + ) return vm, session_serial # get parameter from dictionary @@ -87,15 +88,15 @@ def init_vm_and_login(test, params, env, result_file, pktgen_runner): result_file = open(result_path, "w") kvm_ver = process.system_output(kvm_ver_chk_cmd, shell=True).decode() host_ver = os.uname()[2] - result_file.write("### kvm-userspace-ver : %s\n" % kvm_ver) - result_file.write("### kvm_version : %s\n" % host_ver) + result_file.write(f"### kvm-userspace-ver : {kvm_ver}\n") + result_file.write(f"### kvm_version : {host_ver}\n") if disable_iptables_rules_cmd: error_context.context("disable iptables rules on host") process.system(disable_iptables_rules_cmd, shell=True) pktgen_runner = pktgen_utils.PktgenRunner() - if pktgen_runner.is_version_lt_rhel7(process.getoutput('uname -r')): + if pktgen_runner.is_version_lt_rhel7(process.getoutput("uname -r")): pktgen_runner.install_package(host_ver) vdpa_net_test = None @@ -107,22 +108,36 @@ def init_vm_and_login(test, params, env, result_file, pktgen_runner): interface = vdpa_net_test.add_dev(params.get("netdst"), params.get("mac")) LOG_JOB.info("The virtio_vdpa device name is: '%s'", interface) LOG_JOB.info("Test virtio_vdpa with the simulator on the host") - pktgen_utils.run_tests_for_category(params, result_file, interface=interface) + pktgen_utils.run_tests_for_category( + params, result_file, interface=interface + ) elif vdpa_test and test_vm: vdpa_net_test = VhostVdpaNetSimulatorTest() vdpa_net_test.setup() - dev = vdpa_net_test.add_dev(params.get("netdst_nic2"), params.get("mac_nic2")) + dev = vdpa_net_test.add_dev( + params.get("netdst_nic2"), params.get("mac_nic2") + ) LOG_JOB.info("The vhost_vdpa device name is: '%s'", dev) LOG_JOB.info("Test vhost_vdpa with the simulator on the vm") process.system_output("cat /sys/module/vdpa_sim/parameters/use_va") - vm, session_serial = init_vm_and_login(test, params, env, result_file, pktgen_runner) - pktgen_utils.run_tests_for_category(params, result_file, test_vm, vm, session_serial) + vm, session_serial = init_vm_and_login( + test, params, env, result_file, pktgen_runner + ) + pktgen_utils.run_tests_for_category( + params, result_file, test_vm, vm, session_serial + ) elif not vdpa_test: - vm, session_serial = init_vm_and_login(test, params, env, result_file, pktgen_runner) + vm, session_serial = init_vm_and_login( + test, params, env, result_file, pktgen_runner + ) if vp_vdpa: - pktgen_utils.run_tests_for_category(params, result_file, test_vm, vm, session_serial, vp_vdpa) + pktgen_utils.run_tests_for_category( + params, result_file, test_vm, vm, session_serial, vp_vdpa + ) else: - pktgen_utils.run_tests_for_category(params, result_file, test_vm, vm, session_serial) + pktgen_utils.run_tests_for_category( + params, result_file, test_vm, vm, session_serial + ) finally: if test_vm: vm.verify_kernel_crash() @@ -134,5 +149,6 @@ def init_vm_and_login(test, params, env, result_file, pktgen_runner): time.sleep(5) vdpa_net_test.remove_dev(params.get("netdst_nic2")) vdpa_net_test.cleanup() - error_context.context("Verify Host and guest kernel no error" - "and call trace", test.log.info) + error_context.context( + "Verify Host and guest kernel no error" "and call trace", test.log.info + ) diff --git a/generic/tests/pxe_boot.py b/generic/tests/pxe_boot.py index 262c61196b..accaf0880d 100644 --- a/generic/tests/pxe_boot.py +++ b/generic/tests/pxe_boot.py @@ -1,5 +1,4 @@ import aexpect - from virttest import error_context @@ -22,14 +21,15 @@ def run(test, params, env): timeout = int(params.get("pxe_timeout", 60)) error_context.context("Snoop packet in the tap device", test.log.info) - tcpdump_cmd = "tcpdump -nli %s port '(tftp or bootps)'" % vm.get_ifname() + tcpdump_cmd = f"tcpdump -nli {vm.get_ifname()} port '(tftp or bootps)'" try: - tcpdump_process = aexpect.run_bg(command=tcpdump_cmd, - output_func=test.log.debug, - output_prefix="(pxe capture) ") - if not tcpdump_process.read_until_output_matches(['tftp'], - timeout=timeout): - test.fail("Couldn't find any TFTP packets after %s seconds" % timeout) + tcpdump_process = aexpect.run_bg( + command=tcpdump_cmd, + output_func=test.log.debug, + output_prefix="(pxe capture) ", + ) + if not tcpdump_process.read_until_output_matches(["tftp"], timeout=timeout): + test.fail(f"Couldn't find any TFTP packets after {timeout} seconds") test.log.info("Found TFTP packet") finally: try: diff --git a/generic/tests/rtc.py b/generic/tests/rtc.py index d88da6b79b..cbe8ed2adc 100644 --- a/generic/tests/rtc.py +++ b/generic/tests/rtc.py @@ -5,18 +5,19 @@ class RtcTest(TimeClientTest): def __init__(self, test, params, env, test_name, rtc_path): - super(RtcTest, self).__init__(test, params, env, test_name) + super().__init__(test, params, env, test_name) self.def_rtc = rtc_path self.maxfreq = 64 def _test(self): - if self.session.cmd_status("ls %s" % self.def_rtc): - self.test.cancel("RTC device %s does not exist" % self.def_rtc) - (exit_status, output) = self.session.cmd_status_output("cd %s && ./rtctest %s %s" % - (self.src_dir, self.def_rtc, - self.maxfreq), timeout=240) + if self.session.cmd_status(f"ls {self.def_rtc}"): + self.test.cancel(f"RTC device {self.def_rtc} does not exist") + (exit_status, output) = self.session.cmd_status_output( + f"cd {self.src_dir} && ./rtctest {self.def_rtc} {self.maxfreq}", + timeout=240, + ) if exit_status != 0: - self.test.fail("Test fail on RTC device, output: %s" % output) + self.test.fail(f"Test fail on RTC device, output: {output}") def test_RTC(self): self._test() @@ -33,7 +34,7 @@ def run(test, params, env): :param env: Dictionary with the test environment. """ - rtc_test = RtcTest(test, params, env, 'rtc', '/dev/rtc0') + rtc_test = RtcTest(test, params, env, "rtc", "/dev/rtc0") rtc_test.setUp() rtc_test.runTest() rtc_test.cleanUp() diff --git a/generic/tests/save_restore.py b/generic/tests/save_restore.py index 2e5427dbfd..ca9b5587cb 100644 --- a/generic/tests/save_restore.py +++ b/generic/tests/save_restore.py @@ -1,6 +1,6 @@ -import time -import tempfile import os.path +import tempfile +import time def run(test, params, env): @@ -53,8 +53,7 @@ def check_system(test, vm, timeout): session = vm.wait_for_login(timeout=timeout) result = session.is_responsive(timeout=timeout / 10.0) if not result: - test.log.warning( - "Login session established, but non-responsive") + test.log.warning("Login session established, but non-responsive") # assume guest is just busy with stuff except: test.fail("VM check timed out and/or VM non-responsive") @@ -70,34 +69,35 @@ def check_system(test, vm, timeout): repeat = int(params.get("save_restore_repeat", "1")) path = os.path.abspath(params.get("save_restore_path", "/tmp")) - file_pfx = vm.name + '-' + file_pfx = vm.name + "-" save_file = get_save_filename(path, file_pfx) save_restore_bg_command = params.get("save_restore_bg_command") bg_command_pid = None if save_restore_bg_command: - session.cmd(save_restore_bg_command + ' &') + session.cmd(save_restore_bg_command + " &") try: # assume sh-like shell, try to get background process's pid - bg_command_pid = int(session.cmd('jobs -rp')) + bg_command_pid = int(session.cmd("jobs -rp")) except ValueError: - test.log.warning( - "Background guest command 'job -rp' output not PID") + test.log.warning("Background guest command 'job -rp' output not PID") del session # don't leave stray ssh session lying around over save/restore start_time = time.time() # 'now' needs outside scope for error.TestFail() at end # especially if exception thrown in loop before completion - now = time_to_stop = (start_time + save_restore_duration) + now = time_to_stop = start_time + save_restore_duration while True: try: vm.verify_kernel_crash() check_system(test, vm, 120) # networking needs time to recover - test.log.info("Save/restores left: %d (or %0.4f more seconds)", - repeat, (time_to_stop - time.time())) + test.log.info( + "Save/restores left: %d (or %0.4f more seconds)", + repeat, + (time_to_stop - time.time()), + ) if start_delay: - test.log.debug("Sleeping %0.4f seconds start_delay", - start_delay) + test.log.debug("Sleeping %0.4f seconds start_delay", start_delay) time.sleep(start_delay) vm.pause() vm.verify_kernel_crash() @@ -105,8 +105,7 @@ def check_system(test, vm, timeout): vm.save_to_file(save_file) vm.verify_kernel_crash() if restore_delay: - test.log.debug("Sleeping %0.4f seconds restore_delay", - restore_delay) + test.log.debug("Sleeping %0.4f seconds restore_delay", restore_delay) time.sleep(restore_delay) vm.restore_from_file(save_file) vm.verify_kernel_crash() @@ -127,12 +126,13 @@ def check_system(test, vm, timeout): test.log.info("Save/Restore itteration(s) complete.") if save_restore_bg_command and bg_command_pid: session = vm.wait_for_login(timeout=120) - status = session.cmd_status('kill %d' % bg_command_pid) + status = session.cmd_status("kill %d" % bg_command_pid) if status != 0: - test.log.warning("Background guest command kill %d failed", - bg_command_pid) + test.log.warning("Background guest command kill %d failed", bg_command_pid) del session if repeat > 0: # time_to_stop reached but itterations didn't complete - test.fail("Save/Restore save_restore_duration" - " exceeded by %0.4f seconds with %d itterations" - " remaining." % (now - time_to_stop, repeat + 1)) + test.fail( + "Save/Restore save_restore_duration" + " exceeded by %0.4f seconds with %d itterations" + " remaining." % (now - time_to_stop, repeat + 1) + ) diff --git a/generic/tests/shutdown.py b/generic/tests/shutdown.py index 0a25fc8e7f..152d69221a 100644 --- a/generic/tests/shutdown.py +++ b/generic/tests/shutdown.py @@ -1,9 +1,7 @@ -import time import re +import time -from virttest import utils_test -from virttest import env_process -from virttest import error_context +from virttest import env_process, error_context, utils_test @error_context.context_aware @@ -30,8 +28,9 @@ def run(test, params, env): vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) - error_context.base_context("shutting down the VM %s/%s" % - (i + 1, shutdown_count), test.log.info) + error_context.base_context( + f"shutting down the VM {i + 1}/{shutdown_count}", test.log.info + ) if params.get("setup_runlevel") == "yes": error_context.context("Setup the runlevel for guest", test.log.info) utils_test.qemu.setup_runlevel(params, session) @@ -39,16 +38,17 @@ def run(test, params, env): if shutdown_method == "shell": # Send a shutdown command to the guest's shell session.sendline(shutdown_command) - error_context.context("waiting VM to go down (shutdown shell cmd)", - test.log.info) + error_context.context( + "waiting VM to go down (shutdown shell cmd)", test.log.info + ) elif shutdown_method == "system_powerdown": # Sleep for a while -- give the guest a chance to finish booting time.sleep(sleep_time) # Send a system_powerdown monitor command vm.monitor.system_powerdown() - error_context.context("waiting VM to go down " - "(system_powerdown monitor cmd)", - test.log.info) + error_context.context( + "waiting VM to go down " "(system_powerdown monitor cmd)", test.log.info + ) if not vm.wait_for_shutdown(360): test.fail("Guest refuses to go down") @@ -57,13 +57,13 @@ def run(test, params, env): check_failed = False vm_status = vm.monitor.get_status() if vm.monitor.protocol == "qmp": - if vm_status['status'] != "shutdown": + if vm_status["status"] != "shutdown": check_failed = True else: if not re.findall(r"paused\s+\(shutdown\)", vm_status): check_failed = True if check_failed: - test.fail("Status check from monitor is: %s" % str(vm_status)) + test.fail(f"Status check from monitor is: {str(vm_status)}") if params.get("disable_shutdown") == "yes": # Quit the qemu process vm.destroy(gracefully=False) diff --git a/generic/tests/stress_boot.py b/generic/tests/stress_boot.py index 0e736b59ec..be5628b726 100644 --- a/generic/tests/stress_boot.py +++ b/generic/tests/stress_boot.py @@ -1,6 +1,5 @@ -from virttest import env_process -from virttest import error_context from avocado.utils import process +from virttest import env_process, error_context @error_context.context_aware @@ -17,8 +16,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ - error_context.base_context("waiting for the first guest to be up", - test.log.info) + error_context.base_context("waiting for the first guest to be up", test.log.info) host_cpu_cnt_cmd = params.get("host_cpu_cnt_cmd") host_cpu_num = int(process.getoutput(host_cpu_cnt_cmd).strip()) @@ -32,7 +30,7 @@ def run(test, params, env): params["mem"] = vm_mem_size params["start_vm"] = "yes" - vm_name = params['main_vm'] + vm_name = params["main_vm"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) @@ -48,8 +46,7 @@ def run(test, params, env): try: while num <= int(params.get("max_vms")): # Clone vm according to the first one - error_context.base_context("booting guest #%d" % num, - test.log.info) + error_context.base_context("booting guest #%d" % num, test.log.info) vm_name = "vm%d" % num vm_params = vm.params.copy() curr_vm = vm.clone(vm_name, vm_params) @@ -63,14 +60,18 @@ def run(test, params, env): # Check whether all previous shell sessions are responsive for i, se in enumerate(sessions): - error_context.context("checking responsiveness of guest" - " #%d" % (i + 1), test.log.debug) + error_context.context( + "checking responsiveness of guest" " #%d" % (i + 1), + test.log.debug, + ) se.cmd(params.get("alive_test_cmd")) num += 1 except Exception as emsg: - test.fail("Expect to boot up %s guests." - "Failed to boot up #%d guest with " - "error: %s." % (params["max_vms"], num, emsg)) + test.fail( + "Expect to boot up %s guests." + "Failed to boot up #%d guest with " + "error: %s." % (params["max_vms"], num, emsg) + ) finally: for se in sessions: se.close() diff --git a/generic/tests/syzkaller.py b/generic/tests/syzkaller.py index 9d5764998a..2ad2e5c09e 100644 --- a/generic/tests/syzkaller.py +++ b/generic/tests/syzkaller.py @@ -1,15 +1,11 @@ -import os import json -import time +import os import shutil +import time -from avocado.utils.software_manager import manager from avocado.utils import process - -from virttest import utils_misc -from virttest import ssh_key -from virttest import storage -from virttest import data_dir +from avocado.utils.software_manager import manager +from virttest import data_dir, ssh_key, storage, utils_misc def run(test, params, env): @@ -29,18 +25,21 @@ def run(test, params, env): test.cancel("golang package install failed") home = os.environ["HOME"] if not ("goroot/bin" in os.environ["PATH"] and "go/bin" in os.environ["PATH"]): - process.run('echo "PATH=%s/goroot/bin:%s/go/bin:$PATH" >> %s/.bashrc' % (home, home, home), shell=True) - process.run("source %s/.bashrc" % home, shell=True) + process.run( + f'echo "PATH={home}/goroot/bin:{home}/go/bin:$PATH" >> {home}/.bashrc', + shell=True, + ) + process.run(f"source {home}/.bashrc", shell=True) process.run("go get -u -d github.com/google/syzkaller/...", shell=True) - process.run("cd %s/go/src/github.com/google/syzkaller;make" % home, shell=True) - syzkaller_path = "%s/go/src/github.com/google/syzkaller" % home + process.run(f"cd {home}/go/src/github.com/google/syzkaller;make", shell=True) + syzkaller_path = f"{home}/go/src/github.com/google/syzkaller" # Step 2: Setup Guest for passwordless ssh from host vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() - ssh_key.setup_ssh_key(vm.get_address(), - params.get("username"), - params.get("password")) + ssh_key.setup_ssh_key( + vm.get_address(), params.get("username"), params.get("password") + ) session.close() vm.destroy() @@ -49,17 +48,26 @@ def run(test, params, env): guest_kernel_branch = params.get("syz_kernel_branch") guest_kernel_config = params.get("syz_kernel_config") guest_kernel_build_path = utils_misc.get_path(test.debugdir, "linux") - process.run("git clone --depth 1 %s -b %s %s" % (guest_kernel_repo, guest_kernel_branch, guest_kernel_build_path), shell=True) - process.run("cd %s;git log -1;make %s" % (guest_kernel_build_path, guest_kernel_config), shell=True) - process.run('cd %s; echo "CONFIG_KCOV=y\nCONFIG_GCC_PLUGINS=y" >> .config; make olddefconfig' % guest_kernel_build_path, shell=True) - process.run("cd %s;make -j 40" % guest_kernel_build_path, shell=True) + process.run( + f"git clone --depth 1 {guest_kernel_repo} -b {guest_kernel_branch} {guest_kernel_build_path}", + shell=True, + ) + process.run( + f"cd {guest_kernel_build_path};git log -1;make {guest_kernel_config}", + shell=True, + ) + process.run( + f'cd {guest_kernel_build_path}; echo "CONFIG_KCOV=y\nCONFIG_GCC_PLUGINS=y" >> .config; make olddefconfig', + shell=True, + ) + process.run(f"cd {guest_kernel_build_path};make -j 40", shell=True) # Step 4: Prepare syzkaller config with qemu params and guest params syz_config_path = utils_misc.get_path(test.debugdir, "syzkaller_config") - os.makedirs("%s/syzkaller" % test.debugdir) - workdir = "%s/syzkaller" % test.debugdir - sshkey = "%s/.ssh/id_rsa" % os.environ["HOME"] - kernel_path = "%s/vmlinux" % guest_kernel_build_path + os.makedirs(f"{test.debugdir}/syzkaller") + workdir = f"{test.debugdir}/syzkaller" + sshkey = "{}/.ssh/id_rsa".format(os.environ["HOME"]) + kernel_path = f"{guest_kernel_build_path}/vmlinux" vm_config = { "count": int(params.get("syz_count")), @@ -67,33 +75,36 @@ def run(test, params, env): "mem": int(params.get("mem")), "kernel": kernel_path, "cmdline": params.get("kernel_args"), - "qemu_args": params.get("syz_qemu_args") - } + "qemu_args": params.get("syz_qemu_args"), + } syz_config = { - 'target': params.get("syz_target"), - 'workdir': workdir, + "target": params.get("syz_target"), + "workdir": workdir, "http": params.get("syz_http"), "image": storage.get_image_filename(params, data_dir.get_data_dir()), "syzkaller": syzkaller_path, "procs": int(params.get("syz_procs")), "type": "qemu", "sshkey": sshkey, - "vm": vm_config + "vm": vm_config, } try: with open(syz_config_path, "w") as fp: json.dump(syz_config, fp) - except IOError as err: + except OSError as err: test.error("Unable to update syzkaller config: %s", err) end_time = time.time() # Step 5: Start sykaller config with specified time # Let's calculate the syzkaller timeout from # test timeout excluding current elapsed time + buffer testtimeout = int(params.get("test_timeout")) - (int(end_time - start_time) + 10) - cmd = "%s/bin/syz-manager -config %s %s" % (syzkaller_path, syz_config_path, params.get("syz_cmd_params")) - process.run(cmd, timeout=testtimeout, - ignore_status=True, shell=True) + cmd = "{}/bin/syz-manager -config {} {}".format( + syzkaller_path, + syz_config_path, + params.get("syz_cmd_params"), + ) + process.run(cmd, timeout=testtimeout, ignore_status=True, shell=True) # Let's delete linux kernel folder from test-results as it would # consume lot of space and test log have all the information about # it incase to retrieve it back. diff --git a/generic/tests/trans_hugepage.py b/generic/tests/trans_hugepage.py index 33ea250898..1a04d72bf1 100644 --- a/generic/tests/trans_hugepage.py +++ b/generic/tests/trans_hugepage.py @@ -2,8 +2,7 @@ import re from avocado.utils import process -from virttest import funcatexit -from virttest import error_context +from virttest import error_context, funcatexit def cleanup(debugfs_path, session): @@ -11,7 +10,7 @@ def cleanup(debugfs_path, session): Umount the debugfs and close the session """ if os.path.ismount(debugfs_path): - process.run("umount %s" % debugfs_path, shell=True) + process.run(f"umount {debugfs_path}", shell=True) if os.path.isdir(debugfs_path): os.removedirs(debugfs_path) session.close() @@ -28,6 +27,7 @@ def run(test, params, env): :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ + def get_mem_status(params, role): if role == "host": info = process.getoutput("cat /proc/meminfo") @@ -35,20 +35,20 @@ def get_mem_status(params, role): info = session.cmd("cat /proc/meminfo") output = None for h in re.split("\n+", info): - if h.startswith("%s" % params): - output = re.split(r'\s+', h)[1] + if h.startswith(f"{params}"): + output = re.split(r"\s+", h)[1] if output is None: raise ValueError(f"unsupported meminfo param: {params}") return int(output) dd_timeout = float(params.get("dd_timeout", 900)) - mem = params.get_numeric('mem') - largepages_files = params.objects('largepages_files') + mem = params.get_numeric("mem") + largepages_files = params.objects("largepages_files") failures = [] debugfs_flag = 1 - debugfs_path = os.path.join(test.tmpdir, 'debugfs') - mem_path = os.path.join("/tmp", 'thp_space') + debugfs_path = os.path.join(test.tmpdir, "debugfs") + mem_path = os.path.join("/tmp", "thp_space") login_timeout = float(params.get("login_timeout", "3600")) @@ -56,51 +56,52 @@ def get_mem_status(params, role): if not os.path.ismount(debugfs_path): if not os.path.isdir(debugfs_path): os.makedirs(debugfs_path) - process.run("mount -t debugfs none %s" % debugfs_path, shell=True) + process.run(f"mount -t debugfs none {debugfs_path}", shell=True) vm = env.get_vm(params.get("main_vm")) session = vm.wait_for_login(timeout=login_timeout) - funcatexit.register(env, params.get("type"), cleanup, debugfs_path, - session) + funcatexit.register(env, params.get("type"), cleanup, debugfs_path, session) test.log.info("Smoke test start") error_context.context("smoke test") - nr_ah_before = get_mem_status('AnonHugePages', 'host') + nr_ah_before = get_mem_status("AnonHugePages", "host") if nr_ah_before <= 0: - e_msg = 'smoke: Host is not using THP' + e_msg = "smoke: Host is not using THP" test.log.error(e_msg) failures.append(e_msg) # Protect system from oom killer - if get_mem_status('MemFree', 'guest') // 1024 < mem: - mem = get_mem_status('MemFree', 'guest') // 1024 + if get_mem_status("MemFree", "guest") // 1024 < mem: + mem = get_mem_status("MemFree", "guest") // 1024 - session.cmd("mkdir -p %s" % mem_path) + session.cmd(f"mkdir -p {mem_path}") - session.cmd("mount -t tmpfs -o size=%sM none %s" % (str(mem), mem_path)) + session.cmd(f"mount -t tmpfs -o size={str(mem)}M none {mem_path}") count = mem // 4 - session.cmd("dd if=/dev/zero of=%s/1 bs=4000000 count=%s" % - (mem_path, count), timeout=dd_timeout) + session.cmd( + f"dd if=/dev/zero of={mem_path}/1 bs=4000000 count={count}", + timeout=dd_timeout, + ) - nr_ah_after = get_mem_status('AnonHugePages', 'host') + nr_ah_after = get_mem_status("AnonHugePages", "host") if nr_ah_after <= nr_ah_before: - e_msg = ('smoke: Host did not use new THP during dd') + e_msg = "smoke: Host did not use new THP during dd" test.log.error(e_msg) failures.append(e_msg) if debugfs_flag == 1: largepages = 0 for largepages_file in largepages_files: - largepages_path = '%s/kvm/%s' % (debugfs_path, largepages_file) + largepages_path = f"{debugfs_path}/kvm/{largepages_file}" if os.path.exists(largepages_path): - largepages += int(open(largepages_path, 'r').read()) + largepages += int(open(largepages_path, "r").read()) if largepages <= 0: - e_msg = 'smoke: KVM is not using THP' + e_msg = "smoke: KVM is not using THP" test.log.error(e_msg) failures.append(e_msg) @@ -110,8 +111,8 @@ def get_mem_status(params, role): count = count // 3 test.log.info("Stress test start") error_context.context("stress test") - cmd = "rm -rf %s/*; for i in `seq %s`; do dd " % (mem_path, count) - cmd += "if=/dev/zero of=%s/$i bs=4000000 count=1& done;wait" % mem_path + cmd = f"rm -rf {mem_path}/*; for i in `seq {count}`; do dd " + cmd += f"if=/dev/zero of={mem_path}/$i bs=4000000 count=1& done;wait" output = session.cmd_output(cmd, timeout=dd_timeout) if len(re.findall("No space", output)) > count * 0.05: @@ -120,19 +121,22 @@ def get_mem_status(params, role): failures.append(e_msg) try: - output = session.cmd('pidof dd') + output = session.cmd("pidof dd") except Exception: output = None if output is not None: - for i in re.split('\n+', output): - session.cmd('kill -9 %s' % i) + for i in re.split("\n+", output): + session.cmd(f"kill -9 {i}") - session.cmd("umount %s" % mem_path) + session.cmd(f"umount {mem_path}") test.log.info("Stress test finished") error_context.context("") if failures: - test.fail("THP base test reported %s failures:\n%s" % - (len(failures), "\n".join(failures))) + test.fail( + "THP base test reported {} failures:\n{}".format( + len(failures), "\n".join(failures) + ) + ) diff --git a/generic/tests/trans_hugepage_defrag.py b/generic/tests/trans_hugepage_defrag.py index 9b601e8391..6aca212b86 100644 --- a/generic/tests/trans_hugepage_defrag.py +++ b/generic/tests/trans_hugepage_defrag.py @@ -1,9 +1,7 @@ -import time import os +import time -from virttest import test_setup -from virttest import error_context -from virttest import kernel_interface +from virttest import error_context, kernel_interface, test_setup from provider import thp_fragment_tool @@ -51,8 +49,7 @@ def change_feature_status(status, feature_path, test_config): off_action = "0" else: raise ValueError( - "Uknown possible values for file %s: %s" - % (test_config.thp_path, possible_values) + f"Uknown possible values for file {test_config.thp_path}: {possible_values}" ) if status == "on": @@ -74,20 +71,20 @@ def change_feature_status(status, feature_path, test_config): change_feature_status("off", "defrag", test_config) thps_defrag_off = int(thp_fragment_tool.get_tool_output().split()[1]) - test.log.debug("THPs allocated with defrag off: %d" % thps_defrag_off) + test.log.debug("THPs allocated with defrag off: %d", thps_defrag_off) error_context.context("activating khugepaged defrag functionality", test.log.info) change_feature_status("on", "khugepaged/defrag", test_config) change_feature_status("on", "defrag", test_config) thps_defrag_on = int(thp_fragment_tool.get_tool_output().split()[1]) - test.log.debug("THPs allocated with defrag on: %d" % thps_defrag_on) + test.log.debug("THPs allocated with defrag on: %d", thps_defrag_on) if thps_defrag_off >= thps_defrag_on: test.fail( "No memory defragmentation on host: " - "%s THPs before turning " - "khugepaged defrag on, %s after it" % (thps_defrag_off, thps_defrag_on) + f"{thps_defrag_off} THPs before turning " + f"khugepaged defrag on, {thps_defrag_on} after it" ) test.log.info("Defrag test succeeded") thp_fragment_tool.clean() diff --git a/generic/tests/trans_hugepage_memory_stress.py b/generic/tests/trans_hugepage_memory_stress.py index c75c48105f..ca679ad889 100644 --- a/generic/tests/trans_hugepage_memory_stress.py +++ b/generic/tests/trans_hugepage_memory_stress.py @@ -3,9 +3,8 @@ import time from avocado.utils import process -from virttest import utils_test +from virttest import error_context, utils_test from virttest.staging import utils_memory -from virttest import error_context @error_context.context_aware @@ -21,7 +20,7 @@ def run(test, params, env): nr_ah = [] debugfs_flag = 1 - debugfs_path = os.path.join(test.tmpdir, 'debugfs') + debugfs_path = os.path.join(test.tmpdir, "debugfs") mem = int(params.get("mem")) qemu_mem = int(params.get("qemu_mem", "64")) hugetlbfs_path = params.get("hugetlbfs_path", "/proc/sys/vm/nr_hugepages") @@ -32,17 +31,15 @@ def run(test, params, env): if not os.path.isdir(debugfs_path): os.makedirs(debugfs_path) try: - process.system("mount -t debugfs none %s" % debugfs_path, - shell=True) + process.system(f"mount -t debugfs none {debugfs_path}", shell=True) except Exception: debugfs_flag = 0 try: # Allocated free memory to hugetlbfs - mem_free = int(utils_memory.read_from_meminfo('MemFree')) / 1024 - mem_swap = int(utils_memory.read_from_meminfo('SwapFree')) / 1024 - hugepage_size = (int(utils_memory.read_from_meminfo('Hugepagesize')) / - 1024) + mem_free = int(utils_memory.read_from_meminfo("MemFree")) / 1024 + mem_swap = int(utils_memory.read_from_meminfo("SwapFree")) / 1024 + hugepage_size = int(utils_memory.read_from_meminfo("Hugepagesize")) / 1024 nr_hugetlbfs = math.ceil((mem_free + mem_swap - mem - qemu_mem) / hugepage_size) fd = open(hugetlbfs_path, "w") fd.write(str(nr_hugetlbfs)) @@ -50,25 +47,24 @@ def run(test, params, env): error_context.context("Memory stress test") - nr_ah.append(int(utils_memory.read_from_meminfo('AnonHugePages'))) + nr_ah.append(int(utils_memory.read_from_meminfo("AnonHugePages"))) if nr_ah[0] <= 0: test.fail("VM is not using transparent hugepage") # Run stress memory heavy in guest - test_mem = float(mem)*float(params.get("mem_ratio", 0.8)) - stress_args = "--cpu 4 --io 4 --vm 2 --vm-bytes %sM" % int(test_mem / 2) + test_mem = float(mem) * float(params.get("mem_ratio", 0.8)) + stress_args = f"--cpu 4 --io 4 --vm 2 --vm-bytes {int(test_mem / 2)}M" stress_test = utils_test.VMStress(vm, "stress", params, stress_args=stress_args) stress_test.load_stress_tool() time.sleep(int(params.get("stress_time", 120))) - nr_ah.append(int(utils_memory.read_from_meminfo('AnonHugePages'))) + nr_ah.append(int(utils_memory.read_from_meminfo("AnonHugePages"))) test.log.debug("The huge page using for guest is: %s", nr_ah) if nr_ah[1] <= nr_ah[0]: - test.log.warn( - "VM don't use transparent hugepage while memory stress") + test.log.warning("VM don't use transparent hugepage while memory stress") if debugfs_flag == 1: - if int(open(hugetlbfs_path, 'r').read()) <= 0: + if int(open(hugetlbfs_path, "r").read()) <= 0: test.fail("KVM doesn't use transparenthugepage") test.log.info("memory stress test finished") @@ -80,6 +76,6 @@ def run(test, params, env): fd.write("0") fd.close() if os.path.ismount(debugfs_path): - process.run("umount %s" % debugfs_path, shell=True) + process.run(f"umount {debugfs_path}", shell=True) if os.path.isdir(debugfs_path): os.removedirs(debugfs_path) diff --git a/generic/tests/trans_hugepage_relocated.py b/generic/tests/trans_hugepage_relocated.py index 8c1ef70b20..47a350419f 100644 --- a/generic/tests/trans_hugepage_relocated.py +++ b/generic/tests/trans_hugepage_relocated.py @@ -1,8 +1,7 @@ -import time import re +import time from avocado.utils import process - from virttest import utils_test from virttest.staging import utils_memory @@ -84,7 +83,7 @@ def nr_hugepage_check(sleep_time, wait_time): guest_mem_free = int(guest_mem_free) / 1024 / 1024 file_size = min(1024, int(guest_mem_free / 2)) - cmd = "mount -t tmpfs -o size=%sM none /mnt" % file_size + cmd = f"mount -t tmpfs -o size={file_size}M none /mnt" s, o = session.cmd_status_output(cmd) if nr_hugetlbfs: with open(hugetlbfs_path, "w") as hugepage_cfg: @@ -98,7 +97,7 @@ def nr_hugepage_check(sleep_time, wait_time): while bg.is_alive(): count = file_size / 2 - cmd = "dd if=/dev/urandom of=/mnt/test bs=2M count=%s" % count + cmd = f"dd if=/dev/urandom of=/mnt/test bs=2M count={count}" s, o = session.cmd_status_output(cmd, dd_timeout) if bg: @@ -122,8 +121,7 @@ def nr_hugepage_check(sleep_time, wait_time): test.log.debug("The total mem_increase: %d", mem_increase) if mem_increase < file_size * 0.5: test.error( - "Hugepages allocated can not reach a half: %s/%s" - % (mem_increase, file_size) + f"Hugepages allocated can not reach a half: {mem_increase}/{file_size}" ) session.close() test.log.info("Relocated test succeed") diff --git a/generic/tests/trans_hugepage_swapping.py b/generic/tests/trans_hugepage_swapping.py index 48a95a9adb..881ffc4b8d 100644 --- a/generic/tests/trans_hugepage_swapping.py +++ b/generic/tests/trans_hugepage_swapping.py @@ -1,9 +1,8 @@ -import os import math +import os from avocado.utils import process -from virttest import env_process -from virttest import error_context +from virttest import env_process, error_context from virttest.staging import utils_memory from virttest.utils_misc import normalize_data_size @@ -26,7 +25,7 @@ def get_mem_info(mem_check_list): mem_info = {} for key in mem_check_list: value = utils_memory.read_from_meminfo(key) - mem_info[key] = int(float(normalize_data_size("%s kB" % value))) + mem_info[key] = int(float(normalize_data_size(f"{value} kB"))) return mem_info try: @@ -38,8 +37,13 @@ def get_mem_info(mem_check_list): # @swap_size: Swap size - SwapTotal # @swap_free: Free swap size - SwapFree # @hugepage_size: Page size of one hugepage - Hugepagesize - mem_check_list = ["MemTotal", "MemFree", "SwapTotal", - "SwapFree", "Hugepagesize"] + mem_check_list = [ + "MemTotal", + "MemFree", + "SwapTotal", + "SwapFree", + "Hugepagesize", + ] mem_info = get_mem_info(mem_check_list) total = mem_info["MemTotal"] free = mem_info["MemFree"] @@ -48,7 +52,7 @@ def get_mem_info(mem_check_list): hugepage_size = mem_info["Hugepagesize"] login_timeout = params.get_numeric("login_timeout", 360) check_cmd_timeout = params.get_numeric("check_cmd_timeout", 900) - mem_path = os.path.join(test.tmpdir, 'thp_space') + mem_path = os.path.join(test.tmpdir, "thp_space") # If swap is enough fill all memory with dd if swap_free_initial > (total - free): @@ -62,21 +66,23 @@ def get_mem_info(mem_check_list): try: if not os.path.isdir(mem_path): os.makedirs(mem_path) - process.run("mount -t tmpfs -o size=%sM none %s" % - (tmpfs_size, mem_path), shell=True) + process.run( + f"mount -t tmpfs -o size={tmpfs_size}M none {mem_path}", + shell=True, + ) # Set the memory size of vm # To ignore the oom killer set it to the free swap size vm = env.get_vm(params.get("main_vm")) vm.verify_alive() - if params.get_numeric('mem') > swap_free_initial: + if params.get_numeric("mem") > swap_free_initial: vm.destroy() - vm_name = 'vmsw' + vm_name = "vmsw" vm0 = params.get("main_vm") vm0_key = env.get_vm(vm0) - params['vms'] = params['vms'] + " " + vm_name + params["vms"] = params["vms"] + " " + vm_name # For ppc, vm mem must align to 256MB, apply it for all arch - params['mem'] = math.floor(swap_free_initial / 256) * 256 + params["mem"] = math.floor(swap_free_initial / 256) * 256 vm_key = vm0_key.clone(vm0, params) env.register_vm(vm_name, vm_key) env_process.preprocess_vm(test, params, env, vm_name) @@ -85,29 +91,34 @@ def get_mem_info(mem_check_list): session = vm.wait_for_login(timeout=login_timeout) error_context.context("Disable swap in the guest", test.log.info) - s, o = session.cmd_status_output('swapoff -a') + s, o = session.cmd_status_output("swapoff -a") if s != 0: - test.error("Disable swap in guest failed as %s" % o) + test.error(f"Disable swap in guest failed as {o}") error_context.context("making guest to swap memory", test.log.debug) free = mem_info["MemFree"] count = free // hugepage_size - cmd = ("dd if=/dev/zero of=%s/zero bs=%sM count=%s" % - (mem_path, hugepage_size, count)) + cmd = ( + f"dd if=/dev/zero of={mem_path}/zero bs={hugepage_size}M count={count}" + ) process.run(cmd, shell=True) mem_info = get_mem_info(mem_check_list) swap_free_after = mem_info["SwapFree"] - error_context.context("Swap after filling memory: %d" % swap_free_after, test.log.debug) + error_context.context( + "Swap after filling memory: %d" % swap_free_after, test.log.debug + ) if swap_free_after - swap_free_initial >= 0: test.fail("No data was swapped to memory") # Try harder to make guest memory to be swapped - session.cmd("find / -name \"*\"", timeout=check_cmd_timeout, ignore_all_errors=True) + session.cmd( + 'find / -name "*"', timeout=check_cmd_timeout, ignore_all_errors=True + ) finally: if session is not None: - process.run("umount %s" % mem_path, shell=True) + process.run(f"umount {mem_path}", shell=True) test.log.info("Swapping test succeed") diff --git a/generic/tests/tsc.py b/generic/tests/tsc.py index cb7ccff6cd..78c497e4f6 100644 --- a/generic/tests/tsc.py +++ b/generic/tests/tsc.py @@ -7,28 +7,27 @@ class TscTest(TimeClientTest): def __init__(self, test, params, env, test_name): - super(TscTest, self).__init__(test, params, env, test_name) - self.args = '-t 650' + super().__init__(test, params, env, test_name) + self.args = "-t 650" def _test(self): - cmd = self.src_dir + '/checktsc ' + cmd = self.src_dir + "/checktsc " cmd += self.args (exit_status, result) = self.session.cmd_status_output(cmd) if exit_status != 0: self.test.log.error("Program checktsc exit status is %s", exit_status) - default_fail = ("UNKNOWN FAILURE: rc=%d from %s" % (exit_status, cmd)) + default_fail = "UNKNOWN FAILURE: rc=%d from %s" % (exit_status, cmd) if exit_status == 1: - if result.strip('\n').endswith('FAIL'): + if result.strip("\n").endswith("FAIL"): max_delta = 0 - reason = '' + reason = "" threshold = int(self.args.split()[1]) - latencies = re.findall(r"CPU \d+ - CPU \d+ =\s+-*\d+", - result) + latencies = re.findall(r"CPU \d+ - CPU \d+ =\s+-*\d+", result) for ln in latencies: - cur_delta = int(ln.split('=', 2)[1]) + cur_delta = int(ln.split("=", 2)[1]) if abs(cur_delta) > max_delta: max_delta = abs(cur_delta) reason = ln diff --git a/generic/tests/unattended_install.py b/generic/tests/unattended_install.py index 4c12c55e2c..f81aee6eba 100644 --- a/generic/tests/unattended_install.py +++ b/generic/tests/unattended_install.py @@ -1,6 +1,6 @@ +import random import threading import time -import random from virttest.tests import unattended_install @@ -40,8 +40,7 @@ def thread_func(vm): error_flag = True if not params.get("master_images_clone"): - test.cancel("provide the param `master_images_clone` to clone" - "images for vms") + test.cancel("provide the param `master_images_clone` to clone" "images for vms") trigger_time = int(params.get("install_trigger_time", 0)) random_trigger = params.get("random_trigger", "no") == "yes" diff --git a/generic/tests/vlan.py b/generic/tests/vlan.py index 7c8993cbce..1ac46d7e8c 100644 --- a/generic/tests/vlan.py +++ b/generic/tests/vlan.py @@ -1,10 +1,8 @@ -import time import re +import time import aexpect -from virttest import utils_test -from virttest import utils_net -from virttest import error_context +from virttest import error_context, utils_net, utils_test from virttest.utils_windows import virtio_win @@ -35,23 +33,23 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def add_vlan(test, session, v_id, iface="eth0", cmd_type="ip"): """ Creates a vlan-device on iface by cmd that assigned by cmd_type now only support 'ip' and 'vconfig' """ - vlan_if = '%s.%s' % (iface, v_id) - txt = "Create vlan interface '%s' on %s" % (vlan_if, iface) + vlan_if = f"{iface}.{v_id}" + txt = f"Create vlan interface '{vlan_if}' on {iface}" error_context.context(txt, test.log.info) cmd = "" if cmd_type == "vconfig": - cmd = "vconfig add %s %s" % (iface, v_id) + cmd = f"vconfig add {iface} {v_id}" elif cmd_type == "ip": - v_name = "%s.%s" % (iface, v_id) - cmd = "ip link add link %s %s type vlan id %s " % (iface, - v_name, v_id) + v_name = f"{iface}.{v_id}" + cmd = f"ip link add link {iface} {v_name} type vlan id {v_id} " else: - err_msg = "Unexpected vlan operation command: %s, " % cmd_type + err_msg = f"Unexpected vlan operation command: {cmd_type}, " err_msg += "only support 'ip' and 'vconfig' now" test.error(err_msg) session.cmd(cmd) @@ -60,17 +58,18 @@ def set_ip_vlan(session, v_id, vlan_ip, iface="eth0"): """ Set ip address of vlan interface """ - iface = "%s.%s" % (iface, v_id) - txt = "Assign IP '%s' to vlan interface '%s'" % (vlan_ip, iface) + iface = f"{iface}.{v_id}" + txt = f"Assign IP '{vlan_ip}' to vlan interface '{iface}'" error_context.context(txt, test.log.info) - session.cmd("ifconfig %s %s" % (iface, vlan_ip)) + session.cmd(f"ifconfig {iface} {vlan_ip}") def set_arp_ignore(session): """ Enable arp_ignore for all ipv4 device in guest """ - error_context.context("Enable arp_ignore for all ipv4 device in guest", - test.log.info) + error_context.context( + "Enable arp_ignore for all ipv4 device in guest", test.log.info + ) ignore_cmd = "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore" session.cmd(ignore_cmd) @@ -78,18 +77,17 @@ def rem_vlan(test, session, v_id, iface="eth0", cmd_type="ip"): """ Removes the named vlan interface(iface+v_id) """ - v_iface = '%s.%s' % (iface, v_id) + v_iface = f"{iface}.{v_id}" rem_vlan_cmd = "" if cmd_type == "vconfig": - rem_vlan_cmd = "vconfig rem %s" % v_iface + rem_vlan_cmd = f"vconfig rem {v_iface}" elif cmd_type == "ip": - rem_vlan_cmd = "ip link delete %s" % v_iface + rem_vlan_cmd = f"ip link delete {v_iface}" else: - err_msg = "Unexpected vlan operation command: %s, " % cmd_type + err_msg = f"Unexpected vlan operation command: {cmd_type}, " err_msg += "only support 'ip' and 'vconfig' now" test.error(err_msg) - error_context.context("Remove vlan interface '%s'." % v_iface, - test.log.info) + error_context.context(f"Remove vlan interface '{v_iface}'.", test.log.info) return session.cmd_status(rem_vlan_cmd) def find_free_port(dst): @@ -122,11 +120,10 @@ def nc_transfer(test, src, dst): except aexpect.ExpectError: # kill server session_ctl[dst].cmd_output_safe("killall -9 nc") - test.fail("Fail to receive file" - " from vm%s to vm%s" % (src + 1, dst + 1)) + test.fail("Fail to receive file" f" from vm{src + 1} to vm{dst + 1}") # check MD5 message digest of receive file in dst output = sessions[dst].cmd_output("md5sum receive").strip() - digest_receive = re.findall(r'(\w+)', output)[0] + digest_receive = re.findall(r"(\w+)", output)[0] if digest_receive == digest_origin[src]: test.log.info("File succeed received in vm %s", vlan_ip[dst]) else: @@ -142,14 +139,18 @@ def flood_ping(src, dst): # does not have the other method to interrupt the process in # the guest rather than close the session. """ - txt = "Flood ping from %s interface %s to %s" % (vms[src].name, - ifname[src], - vlan_ip[dst]) + txt = ( + f"Flood ping from {vms[src].name} interface {ifname[src]} to {vlan_ip[dst]}" + ) error_context.context(txt, test.log.info) session_flood = vms[src].wait_for_login(timeout=60) - utils_test.ping(vlan_ip[dst], flood=True, - interface=ifname[src], - session=session_flood, timeout=10) + utils_test.ping( + vlan_ip[dst], + flood=True, + interface=ifname[src], + session=session_flood, + timeout=10, + ) session_flood.close() def get_netkvmco_path(session): @@ -173,9 +174,9 @@ def get_netkvmco_path(session): err = "Could not get architecture dirname of the vm" test.error(err) - middle_path = "%s\\%s" % (guest_name, guest_arch) + middle_path = f"{guest_name}\\{guest_arch}" find_cmd = 'dir /b /s %s\\netkvmco.dll | findstr "\\%s\\\\"' - find_cmd %= (viowin_ltr, middle_path) + find_cmd %= (viowin_ltr, middle_path) netkvmco_path = session.cmd(find_cmd).strip() test.log.info("Found netkvmco.dll file at %s", netkvmco_path) return netkvmco_path @@ -186,8 +187,8 @@ def get_netkvmco_path(session): ifname = [] vm_ip = [] digest_origin = [] - vlan_ip = ['', ''] - ip_unit = ['1', '2'] + vlan_ip = ["", ""] + ip_unit = ["1", "2"] subnet = params.get("subnet", "192.168") vlan_num = int(params.get("vlan_num", 5)) maximal = int(params.get("maximal", 4094)) @@ -205,38 +206,39 @@ def get_netkvmco_path(session): for vm_index, vm in enumerate(vms): if params["os_type"] == "windows": session = vm.wait_for_login(timeout=login_timeout) - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, - driver_verifier) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_verifier + ) session = vm.wait_for_serial_login(timeout=login_timeout) dev_mac = vm.virtnet[0].mac connection_id = utils_net.get_windows_nic_attribute( - session, "macaddress", dev_mac, "netconnectionid") + session, "macaddress", dev_mac, "netconnectionid" + ) session.cmd(set_vlan_cmd % connection_id) - utils_net.restart_windows_guest_network( - session, connection_id) + utils_net.restart_windows_guest_network(session, connection_id) time.sleep(10) nicid = utils_net.get_windows_nic_attribute( - session=session, key="netenabled", value=True, - target="netconnectionID") + session=session, key="netenabled", value=True, target="netconnectionID" + ) ifname.append(nicid) - vm_ip.append(utils_net.get_guest_ip_addr(session, dev_mac, - os_type="windows", - linklocal=True)) + vm_ip.append( + utils_net.get_guest_ip_addr( + session, dev_mac, os_type="windows", linklocal=True + ) + ) test.log.debug("IP address is %s in %s", vm_ip, vm.name) session_ctl.append(session) continue - error_context.base_context("Prepare test env on %s" % vm.name) + error_context.base_context(f"Prepare test env on {vm.name}") session = vm.wait_for_login(timeout=login_timeout) if not session: - err_msg = "Could not log into guest %s" % vm.name + err_msg = f"Could not log into guest {vm.name}" test.error(err_msg) sessions.append(session) test.log.info("Logged in %s successful", vm.name) session_ctl.append(vm.wait_for_login(timeout=login_timeout)) - ifname.append(utils_net.get_linux_ifname(session, - vm.get_mac_address())) + ifname.append(utils_net.get_linux_ifname(session, vm.get_mac_address())) # get guest ip vm_ip.append(vm.get_address()) test.log.debug("IP address is %s in %s", vm_ip, vm.name) @@ -245,34 +247,37 @@ def get_netkvmco_path(session): session.cmd(dd_cmd % file_size) # record MD5 message digest of file md5sum_output = session.cmd("md5sum file", timeout=60) - digest_origin.append(re.findall(r'(\w+)', md5sum_output)[0]) + digest_origin.append(re.findall(r"(\w+)", md5sum_output)[0]) # stop firewall in vm stop_firewall_cmd = "systemctl stop firewalld||service firewalld stop" session.cmd_output_safe(stop_firewall_cmd) - error_context.context("Load 8021q module in guest %s" % vm.name, - test.log.info) + error_context.context(f"Load 8021q module in guest {vm.name}", test.log.info) session.cmd_output_safe("modprobe 8021q") - error_context.context("Setup vlan environment in guest %s" % vm.name, - test.log.info) + error_context.context( + f"Setup vlan environment in guest {vm.name}", test.log.info + ) for vlan_i in range(1, vlan_num + 1): add_vlan(test, session, vlan_i, ifname[vm_index], cmd_type) - v_ip = "%s.%s.%s" % (subnet, vlan_i, ip_unit[vm_index]) + v_ip = f"{subnet}.{vlan_i}.{ip_unit[vm_index]}" set_ip_vlan(session, vlan_i, v_ip, ifname[vm_index]) set_arp_ignore(session) if params["os_type"] == "windows": for vm_index, vm in enumerate(vms): - status, output = utils_test.ping(dest=vm_ip[(vm_index + 1) % 2], count=10, - session=session_ctl[vm_index], - timeout=30) + status, output = utils_test.ping( + dest=vm_ip[(vm_index + 1) % 2], + count=10, + session=session_ctl[vm_index], + timeout=30, + ) loss = utils_test.get_loss_ratio(output) if not loss and ("TTL=" in output): pass # window get loss=0 when ping fail sometimes, need further check else: - test.fail("Guests ping test hit unexpected loss, error info: %s" % output) + test.fail(f"Guests ping test hit unexpected loss, error info: {output}") for sess in session_ctl: if sess: @@ -281,21 +286,23 @@ def get_netkvmco_path(session): try: for vlan in range(1, vlan_num + 1): - error_context.base_context("Test for vlan %s" % vlan, test.log.info) + error_context.base_context(f"Test for vlan {vlan}", test.log.info) error_context.context("Ping test between vlans", test.log.info) - interface = ifname[0] + '.' + str(vlan) + interface = ifname[0] + "." + str(vlan) for vm_index, vm in enumerate(vms): for vlan2 in range(1, vlan_num + 1): - interface = ifname[vm_index] + '.' + str(vlan) - dest = ".".join((subnet, str(vlan2), - ip_unit[(vm_index + 1) % 2])) - status, output = utils_test.ping(dest, count=2, - interface=interface, - session=sessions[vm_index], - timeout=30) - if ((vlan == vlan2) ^ (status == 0)): - err_msg = "%s ping %s unexpected, " % (interface, dest) - err_msg += "error info: %s" % output + interface = ifname[vm_index] + "." + str(vlan) + dest = ".".join((subnet, str(vlan2), ip_unit[(vm_index + 1) % 2])) + status, output = utils_test.ping( + dest, + count=2, + interface=interface, + session=sessions[vm_index], + timeout=30, + ) + if (vlan == vlan2) ^ (status == 0): + err_msg = f"{interface} ping {dest} unexpected, " + err_msg += f"error info: {output}" test.fail(err_msg) error_context.context("Flood ping between vlans", test.log.info) @@ -304,8 +311,9 @@ def get_netkvmco_path(session): flood_ping(0, 1) flood_ping(1, 0) - error_context.context("Transferring data between vlans by nc", - test.log.info) + error_context.context( + "Transferring data between vlans by nc", test.log.info + ) nc_transfer(test, 0, 1) nc_transfer(test, 1, 0) @@ -316,8 +324,9 @@ def get_netkvmco_path(session): error_context.base_context("Remove vlan") for vm_index, vm in enumerate(vms): for vlan in range(1, vlan_num + 1): - status = rem_vlan(test, sessions[vm_index], vlan, - ifname[vm_index], cmd_type) + status = rem_vlan( + test, sessions[vm_index], vlan, ifname[vm_index], cmd_type + ) if status: test.log.error("Remove vlan %s failed", vlan) @@ -326,8 +335,7 @@ def get_netkvmco_path(session): bound = maximal + 1 try: error_context.base_context("Vlan scalability test") - error_context.context("Testing the plumb of vlan interface", - test.log.info) + error_context.context("Testing the plumb of vlan interface", test.log.info) for vlan_index in range(1, bound): add_vlan(test, sessions[0], vlan_index, ifname[0], cmd_type) vlan_added = vlan_index @@ -339,11 +347,10 @@ def get_netkvmco_path(session): test.log.error("Remove vlan %s failed", vlan_index) error_context.base_context("Vlan negative test") - error_context.context("Create vlan with ID %s in guest" % bound, - test.log.info) + error_context.context(f"Create vlan with ID {bound} in guest", test.log.info) try: add_vlan(test, sessions[0], bound, ifname[0], cmd_type) - test.fail("Maximal ID allow to vlan is %s" % maximal) + test.fail(f"Maximal ID allow to vlan is {maximal}") except aexpect.ShellCmdError as detail: pattern = params["msg_pattern"] if not re.search(pattern, detail.output, re.M | re.I): diff --git a/generic/tests/whql_client_install.py b/generic/tests/whql_client_install.py index f16c136970..8b845357c6 100644 --- a/generic/tests/whql_client_install.py +++ b/generic/tests/whql_client_install.py @@ -1,12 +1,8 @@ -import time import os +import time from aexpect import rss_client - -from virttest import utils_misc -from virttest import utils_test -from virttest import remote -from virttest import data_dir +from virttest import data_dir, remote, utils_misc, utils_test def run(test, params, env): @@ -35,14 +31,17 @@ def run(test, params, env): server_address = params.get("server_address") server_shell_port = int(params.get("server_shell_port")) server_file_transfer_port = int(params.get("server_file_transfer_port")) - server_studio_path = params.get("server_studio_path", "%programfiles%\\ " - "Microsoft Driver Test Manager\\Studio") + server_studio_path = params.get( + "server_studio_path", + "%programfiles%\\ " "Microsoft Driver Test Manager\\Studio", + ) server_username = params.get("server_username") server_password = params.get("server_password") client_username = params.get("client_username") client_password = params.get("client_password") - dsso_bin = params.get("dsso_delete_machine_binary", - "whql/whql_delete_machine_15.exe") + dsso_bin = params.get( + "dsso_delete_machine_binary", "whql/whql_delete_machine_15.exe" + ) dsso_delete_machine_binary = os.path.join(data_dir.get_deps_dir(), dsso_bin) install_timeout = float(params.get("install_timeout", 600)) install_cmd = params.get("install_cmd") @@ -53,14 +52,18 @@ def run(test, params, env): utils_test.stop_windows_service(session, svc) # Copy dsso_delete_machine_binary to server - rss_client.upload(server_address, server_file_transfer_port, - dsso_delete_machine_binary, server_studio_path, - timeout=60) + rss_client.upload( + server_address, + server_file_transfer_port, + dsso_delete_machine_binary, + server_studio_path, + timeout=60, + ) # Open a shell session with server - server_session = remote.remote_login("nc", server_address, - server_shell_port, "", "", - session.prompt, session.linesep) + server_session = remote.remote_login( + "nc", server_address, server_shell_port, "", "", session.prompt, session.linesep + ) server_session.set_status_test_command(session.status_test_command) # Get server and client information @@ -71,7 +74,7 @@ def run(test, params, env): server_workgroup = server_session.cmd_output(cmd).strip() server_workgroup = server_workgroup.splitlines()[-1] regkey = r"HKLM\SYSTEM\CurrentControlSet\Services\Tcpip\Parameters" - cmd = "reg query %s /v Domain" % regkey + cmd = f"reg query {regkey} /v Domain" o = server_session.cmd_output(cmd).strip().splitlines()[-1] try: server_dns_suffix = o.split(None, 2)[2] @@ -79,28 +82,28 @@ def run(test, params, env): server_dns_suffix = "" # Delete the client machine from the server's data store (if it's there) - server_session.cmd("cd %s" % server_studio_path) - cmd = "%s %s %s" % (os.path.basename(dsso_delete_machine_binary), - server_name, client_name) + server_session.cmd(f"cd {server_studio_path}") + cmd = f"{os.path.basename(dsso_delete_machine_binary)} {server_name} {client_name}" server_session.cmd(cmd, print_func=test.log.info) server_session.close() # Rename the client machine - client_name = "autotest_%s" % utils_misc.generate_random_string(4) + client_name = f"autotest_{utils_misc.generate_random_string(4)}" test.log.info("Renaming client machine to '%s'", client_name) - cmd = ('wmic computersystem where name="%%computername%%" rename name="%s"' - % client_name) + cmd = f'wmic computersystem where name="%computername%" rename name="{client_name}"' session.cmd(cmd, timeout=600) # Join the server's workgroup test.log.info("Joining workgroup '%s'", server_workgroup) - cmd = ('wmic computersystem where name="%%computername%%" call ' - 'joindomainorworkgroup name="%s"' % server_workgroup) + cmd = ( + 'wmic computersystem where name="%computername%" call ' + f'joindomainorworkgroup name="{server_workgroup}"' + ) session.cmd(cmd, timeout=600) # Set the client machine's DNS suffix test.log.info("Setting DNS suffix to '%s'", server_dns_suffix) - cmd = 'reg add %s /v Domain /d "%s" /f' % (regkey, server_dns_suffix) + cmd = f'reg add {regkey} /v Domain /d "{server_dns_suffix}" /f' session.cmd(cmd, timeout=300) # Reboot @@ -108,8 +111,7 @@ def run(test, params, env): # Access shared resources on the server machine test.log.info("Attempting to access remote share on server") - cmd = r"net use \\%s /user:%s %s" % (server_name, server_username, - server_password) + cmd = rf"net use \\{server_name} /user:{server_username} {server_password}" end_time = time.time() + 120 while time.time() < end_time: try: @@ -123,14 +125,16 @@ def run(test, params, env): # Install test.log.info("Installing DTM client (timeout=%ds)", install_timeout) - install_cmd = r"cmd /c \\%s\%s" % (server_name, install_cmd.lstrip("\\")) + install_cmd = r"cmd /c \\{}\{}".format(server_name, install_cmd.lstrip("\\")) session.cmd(install_cmd, timeout=install_timeout) # Setup auto logon test.log.info("Setting up auto logon for user '%s'", client_username) - cmd = ('reg add ' - '"HKLM\\Software\\Microsoft\\Windows NT\\CurrentVersion\\winlogon" ' - '/v "%s" /d "%s" /t REG_SZ /f') + cmd = ( + "reg add " + '"HKLM\\Software\\Microsoft\\Windows NT\\CurrentVersion\\winlogon" ' + '/v "%s" /d "%s" /t REG_SZ /f' + ) session.cmd(cmd % ("AutoAdminLogon", "1")) session.cmd(cmd % ("DefaultUserName", client_username)) session.cmd(cmd % ("DefaultPassword", client_password)) diff --git a/generic/tests/whql_env_setup.py b/generic/tests/whql_env_setup.py index 786bcb785f..337cda4317 100644 --- a/generic/tests/whql_env_setup.py +++ b/generic/tests/whql_env_setup.py @@ -1,14 +1,10 @@ -import time import os import re +import time -from avocado.utils import process from avocado.utils import download as utils_download -from virttest import utils_misc -from virttest import utils_test -from virttest import env_process -from virttest import data_dir -from virttest import error_context +from avocado.utils import process +from virttest import data_dir, env_process, error_context, utils_misc, utils_test @error_context.context_aware @@ -28,11 +24,11 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ - log_path = "%s/../debug" % test.resultsdir + log_path = f"{test.resultsdir}/../debug" # Prepare the tools iso error_context.context("Prepare the tools iso", test.log.info) src_list = params.get("src_list") - src_path = params.get("src_path", "%s/whql_src" % test.tmpdir) + src_path = params.get("src_path", f"{test.tmpdir}/whql_src") if not os.path.exists(src_path): os.makedirs(src_path) if src_list is not None: @@ -47,7 +43,7 @@ def run(test, params, env): cdrom_whql_dir = os.path.split(cdrom_whql)[0] if not os.path.exists(cdrom_whql_dir): os.makedirs(cdrom_whql_dir) - cmd = "mkisofs -J -o %s %s" % (cdrom_whql, src_path) + cmd = f"mkisofs -J -o {cdrom_whql} {src_path}" process.system(cmd, shell=True) params["cdroms"] += " whql" @@ -59,8 +55,7 @@ def run(test, params, env): timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) error_log = utils_misc.get_path(log_path, "whql_setup_error_log") - run_guest_log = params.get( - "run_guest_log", "%s/whql_qemu_comman" % test.tmpdir) + run_guest_log = params.get("run_guest_log", f"{test.tmpdir}/whql_qemu_comman") # Record qmmu command line in a log file error_context.context("Record qemu command line", test.log.info) @@ -69,7 +64,7 @@ def run(test, params, env): fd.read() else: fd = open(run_guest_log, "w") - fd.write("%s\n" % vm.qemu_command) + fd.write(f"{vm.qemu_command}\n") fd.close() # Get set up commands @@ -91,29 +86,36 @@ def run(test, params, env): disk_driver_install = params.get("disk_driver_install", "") vm_ma_cmd = "wmic computersystem set AutomaticManagedPagefile=False" - vm_cmd = "wmic pagefileset where name=\"C:\\\\pagefile.sys\" set " - vm_cmd += "InitialSize=%s,MaximumSize=%s" % (vm_size, vm_size) + vm_cmd = 'wmic pagefileset where name="C:\\\\pagefile.sys" set ' + vm_cmd += f"InitialSize={vm_size},MaximumSize={vm_size}" vm_ma_cmd = "" vm_cmd = "" if symbol_files: symbol_cmd = "del C:\\\\symbols &&" - symbol_cmd += "git clone %s C:\\\\symbol_files C:\\\\symbols" % \ - symbol_files + symbol_cmd += f"git clone {symbol_files} C:\\\\symbol_files C:\\\\symbols" else: symbol_cmd = "" wmic_prepare_cmd = "echo exit > cmd && cmd /s wmic" error_context.context("Configure guest system", test.log.info) - cmd_list = [wmic_prepare_cmd, auto_restart, disable_uas, symbol_cmd, - vm_ma_cmd, vm_cmd, dbgview_cmd, qxl_install, disable_firewall, - timezone_cmd] + cmd_list = [ + wmic_prepare_cmd, + auto_restart, + disable_uas, + symbol_cmd, + vm_ma_cmd, + vm_cmd, + dbgview_cmd, + qxl_install, + disable_firewall, + timezone_cmd, + ] if nic_cmd: for index, nic in enumerate(re.split(r"\s+", params.get("nics"))): - setup_params = params.get("nic_setup_params_%s" % nic, "") + setup_params = params.get(f"nic_setup_params_{nic}", "") if params.get("vm_arch_name", "") == "x86_64": nic_cmd = re.sub("set", "set_64", nic_cmd) - cmd_list.append("%s %s %s" % (nic_cmd, str(index + 1), - setup_params)) + cmd_list.append(f"{nic_cmd} {str(index + 1)} {setup_params}") if disk_init_cmd: disk_num = len(re.split(r"\s+", params.get("images"))) if disk_driver_install: @@ -121,13 +123,13 @@ def run(test, params, env): labels = "IJKLMNOPQRSTUVWXYZ" for index, images in enumerate(re.split(r"\s+", params.get("images"))): if index > 0: - cmd_list.append(disk_init_cmd % (str(index), - labels[index - 1])) - format_cmd_image = format_cmd % (labels[index - 1], - params.get("win_format_%s" % images)) - if params.get("win_extra_%s" % images): - format_cmd_image += " %s" % params.get( - "win_extra_%s" % images) + cmd_list.append(disk_init_cmd % (str(index), labels[index - 1])) + format_cmd_image = format_cmd % ( + labels[index - 1], + params.get(f"win_format_{images}"), + ) + if params.get(f"win_extra_{images}"): + format_cmd_image += " {}".format(params.get(f"win_extra_{images}")) cmd_list.append(format_cmd_image) cmd_list += [update_cmd, disable_update] @@ -138,8 +140,7 @@ def run(test, params, env): if symbol_files: error_context.context("Update symbol files", test.log.info) install_check_tool = False - check_tool_chk = params.get("check_tool_chk", - "C:\\debuggers\\symchk.exe") + check_tool_chk = params.get("check_tool_chk", "C:\\debuggers\\symchk.exe") output = session.cmd_output(check_tool_chk) if "cannot find" in output: install_check_tool = True @@ -151,9 +152,9 @@ def run(test, params, env): symbol_check_pattern = params.get("symbol_check_pattern") symbol_pid_pattern = params.get("symbol_pid_pattern") - download = utils_test.BackgroundTest(session.cmd, - (symbol_file_download, - setup_timeout)) + download = utils_test.BackgroundTest( + session.cmd, (symbol_file_download, setup_timeout) + ) sessioncheck = vm.wait_for_login(timeout=timeout) download.start() @@ -165,7 +166,7 @@ def run(test, params, env): s, o = sessioncheck.cmd_status_output(cmd) pid = re.findall(symbol_pid_pattern, o, re.S) if pid: - cmd = "taskkill /PID %s /F" % pid[0] + cmd = f"taskkill /PID {pid[0]} /F" try: sessioncheck.cmd(cmd) except Exception: @@ -182,11 +183,10 @@ def run(test, params, env): s, o = session.cmd_status_output(cmd, timeout=setup_timeout) except Exception as err: failed_flag += 1 - utils_misc.log_line( - error_log, "Unexpected exception: %s" % err) + utils_misc.log_line(error_log, f"Unexpected exception: {err}") if s != 0: failed_flag += 1 utils_misc.log_line(error_log, o) if failed_flag != 0: - test.fail("Have %s setup fialed. Please check the log." % failed_flag) + test.fail(f"Have {failed_flag} setup fialed. Please check the log.") diff --git a/generic/tests/whql_hck_client_install.py b/generic/tests/whql_hck_client_install.py index 256e5172b7..f71371a578 100644 --- a/generic/tests/whql_hck_client_install.py +++ b/generic/tests/whql_hck_client_install.py @@ -1,9 +1,8 @@ import logging -from virttest import remote -from virttest import error_context +from virttest import error_context, remote -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") @error_context.context_aware @@ -30,9 +29,9 @@ def run_whql_hck_client_install(test, params, env): client_password = params["client_password"] server_domname = params["server_domname"] - server_session = remote.remote_login("nc", server_address, - server_shell_port, "", "", - session.prompt, session.linesep) + server_session = remote.remote_login( + "nc", server_address, server_shell_port, "", "", session.prompt, session.linesep + ) client_name = session.cmd_output("echo %computername%").strip() install_timeout = float(params.get("install_timeout", 1800)) @@ -44,16 +43,20 @@ def run_whql_hck_client_install(test, params, env): # Join the server's workgroup if params.get("join_domain") == "yes": error_context.context("Join the workgroup", LOG_JOB.info) - cmd = ("netdom join %s /domain:%s /UserD:%s " - "/PasswordD:%s" % (client_name, server_domname, - client_username, client_password)) + cmd = ( + f"netdom join {client_name} /domain:{server_domname} /UserD:{client_username} " + f"/PasswordD:{client_password}" + ) session.cmd(cmd, timeout=600) - error_context.context(("Setting up auto logon for user '%s'" % - client_username), LOG_JOB.info) - cmd = ('reg add ' - '"HKLM\\Software\\Microsoft\\Windows NT\\CurrentVersion\\winlogon"' - ' /v "%s" /d "%s" /t REG_SZ /f') + error_context.context( + (f"Setting up auto logon for user '{client_username}'"), LOG_JOB.info + ) + cmd = ( + "reg add " + '"HKLM\\Software\\Microsoft\\Windows NT\\CurrentVersion\\winlogon"' + ' /v "%s" /d "%s" /t REG_SZ /f' + ) session.cmd(cmd % ("AutoAdminLogon", "1")) session.cmd(cmd % ("DefaultUserName", server_username)) session.cmd(cmd % ("DefaultPassword", server_password)) @@ -61,14 +64,16 @@ def run_whql_hck_client_install(test, params, env): session = vm.reboot(session) if params.get("pre_hck_install"): - error_context.context("Install some program before install HCK client", - LOG_JOB.info) + error_context.context( + "Install some program before install HCK client", LOG_JOB.info + ) install_cmd = params.get("pre_hck_install") session.cmd(install_cmd, timeout=install_timeout) install_cmd = params["install_cmd"] - error_context.context(("Installing HCK client (timeout=%ds)" % - install_timeout), LOG_JOB.info) + error_context.context( + ("Installing HCK client (timeout=%ds)" % install_timeout), LOG_JOB.info + ) session.cmd(install_cmd, timeout=install_timeout) reboot_timeout = login_timeout + 1500 session = vm.reboot(session, timeout=reboot_timeout) diff --git a/generic/tests/whql_submission.py b/generic/tests/whql_submission.py index 46b7b6c378..015e19028b 100644 --- a/generic/tests/whql_submission.py +++ b/generic/tests/whql_submission.py @@ -3,9 +3,7 @@ import aexpect from aexpect import rss_client - -from virttest import utils_misc -from virttest import remote +from virttest import remote, utils_misc def run(test, params, env): @@ -43,27 +41,40 @@ def run(test, params, env): server_address = params.get("server_address") server_shell_port = int(params.get("server_shell_port")) server_file_transfer_port = int(params.get("server_file_transfer_port")) - server_studio_path = params.get("server_studio_path", "%programfiles%\\ " - "Microsoft Driver Test Manager\\Studio") - dsso_test_binary = params.get("dsso_test_binary", - "deps/whql_submission_15.exe") + server_studio_path = params.get( + "server_studio_path", + "%programfiles%\\ " "Microsoft Driver Test Manager\\Studio", + ) + dsso_test_binary = params.get("dsso_test_binary", "deps/whql_submission_15.exe") dsso_test_binary = utils_misc.get_path(test.virtdir, dsso_test_binary) - dsso_delete_machine_binary = params.get("dsso_delete_machine_binary", - "deps/whql_delete_machine_15.exe") - dsso_delete_machine_binary = utils_misc.get_path(test.virtdir, - dsso_delete_machine_binary) + dsso_delete_machine_binary = params.get( + "dsso_delete_machine_binary", "deps/whql_delete_machine_15.exe" + ) + dsso_delete_machine_binary = utils_misc.get_path( + test.virtdir, dsso_delete_machine_binary + ) test_timeout = float(params.get("test_timeout", 600)) # Copy dsso binaries to the server for filename in dsso_test_binary, dsso_delete_machine_binary: - rss_client.upload(server_address, server_file_transfer_port, - filename, server_studio_path, timeout=60) + rss_client.upload( + server_address, + server_file_transfer_port, + filename, + server_studio_path, + timeout=60, + ) # Open a shell session with the server - server_session = remote.remote_login("nc", server_address, - server_shell_port, "", "", - sessions[0].prompt, - sessions[0].linesep) + server_session = remote.remote_login( + "nc", + server_address, + server_shell_port, + "", + "", + sessions[0].prompt, + sessions[0].linesep, + ) server_session.set_status_test_command(sessions[0].status_test_command) # Get the computer names of the server and clients @@ -72,15 +83,15 @@ def run(test, params, env): client_names = [session.cmd_output(cmd).strip() for session in sessions] # Delete all client machines from the server's data store - server_session.cmd("cd %s" % server_studio_path) + server_session.cmd(f"cd {server_studio_path}") for client_name in client_names: - cmd = "%s %s %s" % (os.path.basename(dsso_delete_machine_binary), - server_name, client_name) + cmd = f"{os.path.basename(dsso_delete_machine_binary)} {server_name} {client_name}" server_session.cmd(cmd, print_func=test.log.debug) # Reboot the client machines - sessions = utils_misc.parallel((vm.reboot, (session,)) - for vm, session in zip(vms, sessions)) + sessions = utils_misc.parallel( + (vm.reboot, (session,)) for vm, session in zip(vms, sessions) + ) # Check the NICs again for vm in vms: @@ -92,32 +103,40 @@ def run(test, params, env): # Run whql_pre_command and close the sessions if params.get("whql_pre_command"): for session in sessions: - session.cmd(params.get("whql_pre_command"), - int(params.get("whql_pre_command_timeout", 600))) + session.cmd( + params.get("whql_pre_command"), + int(params.get("whql_pre_command_timeout", 600)), + ) session.close() # Run the automation program on the server - pool_name = "%s_pool" % client_names[0] - submission_name = "%s_%s" % (client_names[0], - params.get("submission_name")) - cmd = "%s %s %s %s %s %s" % (os.path.basename(dsso_test_binary), - server_name, pool_name, submission_name, - test_timeout, " ".join(client_names)) + pool_name = f"{client_names[0]}_pool" + submission_name = "{}_{}".format(client_names[0], params.get("submission_name")) + cmd = "{} {} {} {} {} {}".format( + os.path.basename(dsso_test_binary), + server_name, + pool_name, + submission_name, + test_timeout, + " ".join(client_names), + ) server_session.sendline(cmd) # Helper function: wait for a given prompt and raise an exception if an # error occurs def find_prompt(test, prompt): m, o = server_session.read_until_last_line_matches( - [prompt, server_session.prompt], print_func=test.log.info, - timeout=600) + [prompt, server_session.prompt], print_func=test.log.info, timeout=600 + ) if m != 0: errors = re.findall("^Error:.*$", o, re.I | re.M) if errors: test.error(errors[0]) else: - test.error("Error running automation program: " - "could not find '%s' prompt" % prompt) + test.error( + "Error running automation program: " + f"could not find '{prompt}' prompt" + ) # Tell the automation program which device to test find_prompt(test, "Device to test:") @@ -171,8 +190,9 @@ def find_prompt(test, prompt): # Wait for the automation program to terminate try: - o = server_session.read_up_to_prompt(print_func=test.log.info, - timeout=test_timeout + 300) + o = server_session.read_up_to_prompt( + print_func=test.log.info, timeout=test_timeout + 300 + ) # (test_timeout + 300 is used here because the automation program is # supposed to terminate cleanly on its own when test_timeout expires) done = True @@ -192,16 +212,19 @@ def find_prompt(test, prompt): for r in results: if "report" in r: try: - rss_client.download(server_address, - server_file_transfer_port, - r["report"], test.debugdir) + rss_client.download( + server_address, + server_file_transfer_port, + r["report"], + test.debugdir, + ) except rss_client.FileTransferNotFoundError: pass if "logs" in r: try: - rss_client.download(server_address, - server_file_transfer_port, - r["logs"], test.debugdir) + rss_client.download( + server_address, server_file_transfer_port, r["logs"], test.debugdir + ) except rss_client.FileTransferNotFoundError: pass else: @@ -209,11 +232,13 @@ def find_prompt(test, prompt): # Create symlinks to test log dirs to make it easier # to access them (their original names are not human # readable) - link_name = "logs_%s" % r["report"].split("\\")[-1] + link_name = "logs_{}".format(r["report"].split("\\")[-1]) link_name = link_name.replace(" ", "_") link_name = link_name.replace("/", "_") - os.symlink(r["logs"].split("\\")[-1], - os.path.join(test.debugdir, link_name)) + os.symlink( + r["logs"].split("\\")[-1], + os.path.join(test.debugdir, link_name), + ) except (KeyError, OSError): pass @@ -222,6 +247,7 @@ def find_prompt(test, prompt): def print_summary_line(f, line): test.log.info(line) f.write(line + "\n") + if results: # Make sure all results have the required keys for r in results: @@ -233,9 +259,10 @@ def print_summary_line(f, line): r["notrun"] = int(r.get("notrun", 0)) r["notapplicable"] = int(r.get("notapplicable", 0)) # Sort the results by failures and total test count in descending order - results = [(r["fail"], - r["pass"] + r["fail"] + r["notrun"] + r["notapplicable"], - r) for r in results] + results = [ + (r["fail"], r["pass"] + r["fail"] + r["notrun"] + r["notapplicable"], r) + for r in results + ] results.sort(reverse=True) results = [r[-1] for r in results] # Print results @@ -244,14 +271,26 @@ def print_summary_line(f, line): name_length = max(len(r["job"]) for r in results) fmt = "%%-6s %%-%ds %%-15s %%-8s %%-8s %%-8s %%-15s" % name_length f = open(os.path.join(test.debugdir, "summary"), "w") - print_summary_line(f, fmt % ("ID", "Job", "Status", "Pass", "Fail", - "NotRun", "NotApplicable")) - print_summary_line(f, fmt % ("--", "---", "------", "----", "----", - "------", "-------------")) + print_summary_line( + f, fmt % ("ID", "Job", "Status", "Pass", "Fail", "NotRun", "NotApplicable") + ) + print_summary_line( + f, fmt % ("--", "---", "------", "----", "----", "------", "-------------") + ) for r in results: - print_summary_line(f, fmt % (r["id"], r["job"], r["status"], - r["pass"], r["fail"], r["notrun"], - r["notapplicable"])) + print_summary_line( + f, + fmt + % ( + r["id"], + r["job"], + r["status"], + r["pass"], + r["fail"], + r["notrun"], + r["notapplicable"], + ), + ) f.close() test.log.info("(see logs and HTML reports in %s)", test.debugdir) @@ -263,16 +302,14 @@ def print_summary_line(f, line): # Fail if there are failed or incomplete jobs (kill the client VMs if there # are incomplete jobs) - failed_jobs = [r["job"] for r in results - if r["status"].lower() == "investigate"] - running_jobs = [r["job"] for r in results - if r["status"].lower() == "inprogress"] + failed_jobs = [r["job"] for r in results if r["status"].lower() == "investigate"] + running_jobs = [r["job"] for r in results if r["status"].lower() == "inprogress"] errors = [] if failed_jobs: - errors += ["Jobs failed: %s." % failed_jobs] + errors += [f"Jobs failed: {failed_jobs}."] if running_jobs: for vm in vms: vm.destroy() - errors += ["Jobs did not complete on time: %s." % running_jobs] + errors += [f"Jobs did not complete on time: {running_jobs}."] if errors: test.fail(" ".join(errors)) diff --git a/multi_host_migration/tests/migration_multi_host.py b/multi_host_migration/tests/migration_multi_host.py index 13c5933535..3140a04a74 100644 --- a/multi_host_migration/tests/migration_multi_host.py +++ b/multi_host_migration/tests/migration_multi_host.py @@ -1,6 +1,7 @@ import logging -import time import random +import time + from autotest.client.shared import error from virttest import utils_test from virttest.utils_test.qemu import migration @@ -34,10 +35,8 @@ def run(test, params, env): mig_type = migration.MultihostMigrationRdma class TestMultihostMigration(mig_type): - def __init__(self, test, params, env): - super(TestMultihostMigration, self).__init__(test, params, env, - preprocess_env) + super().__init__(test, params, env, preprocess_env) self.srchost = self.params.get("hosts")[0] self.dsthost = self.params.get("hosts")[1] self.is_src = params["hostid"] == self.srchost @@ -47,12 +46,15 @@ def __init__(self, test, params, env): self.pre_sub_test = params.get("pre_sub_test") self.post_sub_test = params.get("post_sub_test") self.login_before_pre_tests = params.get("login_before_pre_tests", "no") - self.mig_bg_command = params.get("migration_bg_command", - "cd /tmp; nohup ping localhost &") - self.mig_bg_check_command = params.get("migration_bg_check_command", - "pgrep ping") - self.mig_bg_kill_command = params.get("migration_bg_kill_command", - "pkill -9 ping") + self.mig_bg_command = params.get( + "migration_bg_command", "cd /tmp; nohup ping localhost &" + ) + self.mig_bg_check_command = params.get( + "migration_bg_check_command", "pgrep ping" + ) + self.mig_bg_kill_command = params.get( + "migration_bg_kill_command", "pkill -9 ping" + ) self.need_to_login = params.get("need_to_login", "no") def run_pre_sub_test(self): @@ -62,19 +64,21 @@ def run_pre_sub_test(self): if self.login_before_pre_tests == "yes": vm = env.get_vm(params["main_vm"]) vm.wait_for_login(timeout=self.login_timeout) - error.context("Run sub test '%s' before migration on src" - % self.pre_sub_test, logging.info) - utils_test.run_virt_sub_test(test, params, env, - self.pre_sub_test) + error.context( + f"Run sub test '{self.pre_sub_test}' before migration on src", + logging.info, + ) + utils_test.run_virt_sub_test(test, params, env, self.pre_sub_test) def run_post_sub_test(self): # is destination host if not self.is_src: if self.post_sub_test: - error.context("Run sub test '%s' after migration on dst" - % self.post_sub_test, logging.info) - utils_test.run_virt_sub_test(test, params, env, - self.post_sub_test) + error.context( + f"Run sub test '{self.post_sub_test}' after migration on dst", + logging.info, + ) + utils_test.run_virt_sub_test(test, params, env, self.post_sub_test) def migration_scenario(self, worker=None): def start_worker(mig_data): @@ -85,8 +89,9 @@ def start_worker(mig_data): logging.debug("Sending command: '%s'", self.mig_bg_command) s, o = session.cmd_status_output(self.mig_bg_command) if s != 0: - raise error.TestError("Failed to run bg cmd in guest," - " Output is '%s'." % o) + raise error.TestError( + "Failed to run bg cmd in guest," f" Output is '{o}'." + ) time.sleep(5) def check_worker(mig_data): @@ -102,8 +107,9 @@ def check_worker(mig_data): logging.info("Check the background command in the guest.") s, o = session.cmd_status_output(self.mig_bg_check_command) if s: - raise error.TestFail("Background command not found," - " Output is '%s'." % o) + raise error.TestFail( + "Background command not found," f" Output is '{o}'." + ) logging.info("Kill the background command in the guest.") session.sendline(self.mig_bg_kill_command) @@ -117,15 +123,21 @@ def check_worker(mig_data): max_t = int(params.get("max_random_timeout", 5)) random_timeout = random.randint(min_t, max_t) params["start_migration_timeout"] = random_timeout - error.context("Wait for %d seconds, then do migration." - % random_timeout, logging.info) + error.context( + "Wait for %d seconds, then do migration." % random_timeout, + logging.info, + ) self.run_pre_sub_test() if self.need_to_login == "yes": - self.migrate_wait([self.vm], self.srchost, self.dsthost, - start_work=start_worker, - check_work=check_worker) + self.migrate_wait( + [self.vm], + self.srchost, + self.dsthost, + start_work=start_worker, + check_work=check_worker, + ) else: self.migrate_wait([self.vm], self.srchost, self.dsthost) diff --git a/multi_host_migration/tests/migration_multi_host_auto_converge.py b/multi_host_migration/tests/migration_multi_host_auto_converge.py index 4a2ed20b70..8a2e3918dc 100644 --- a/multi_host_migration/tests/migration_multi_host_auto_converge.py +++ b/multi_host_migration/tests/migration_multi_host_auto_converge.py @@ -1,10 +1,8 @@ import logging import time -from autotest.client.shared import error -from autotest.client.shared import utils -from virttest import virt_vm -from virttest import utils_test -from virttest import utils_misc + +from autotest.client.shared import error, utils +from virttest import utils_misc, utils_test, virt_vm from virttest.utils_test.qemu import migration @@ -52,19 +50,15 @@ def run(test, params, env): mig_type = migration.MultihostMigrationRdma class TestMultihostMigration(mig_type, migration.MigrationBase): - """ multihost migration test """ def __init__(self, test, params, env): - - super(TestMultihostMigration, self).__init__(test, params, env) + super().__init__(test, params, env) self.srchost = self.params.get("hosts")[0] self.dsthost = self.params.get("hosts")[1] - super(TestMultihostMigration, self).__setup__(test, params, env, - self.srchost, - self.dsthost) + super().__setup__(test, params, env, self.srchost, self.dsthost) self.load_host_cmd = params.get("load_host_cmd") self.need_stress = params.get("need_stress") == "yes" self.need_cleanup = self.need_stress @@ -92,8 +86,7 @@ def set_auto_converge(self): """ for i in range(1, len(self.sub_test)): - self.set_migration_capability(self.sub_test[i], - "auto-converge") + self.set_migration_capability(self.sub_test[i], "auto-converge") self.capabilitys.append("auto-converge") self.capabilitys_state.append(self.sub_test[i]) self.get_migration_capability(len(self.capabilitys) - 1) @@ -125,8 +118,9 @@ def check_running(): if self.check_running_cmd: if not utils_misc.wait_for(check_running, timeout=360): - raise error.TestFail("Failed to start '%s' in guest." % - self.bg_stress_test) + raise error.TestFail( + f"Failed to start '{self.bg_stress_test}' in guest." + ) @error.context_aware def load_host(self): @@ -162,10 +156,10 @@ def analysis_sar_output(self, output): all_items = set(vars()) interested_items = set(["cpu_average", "memory_average"]) if not interested_items.issubset(all_items): - raise error.TestFail("Failed to get '%s' '%s' in " - "sar output: '%s'" % (sar_cpu_str, - sar_memory_str, - output)) + raise error.TestFail( + f"Failed to get '{sar_cpu_str}' '{sar_memory_str}' in " + f"sar output: '{output}'" + ) logging.info("cpu average list: %s", cpu_average) logging.info("memory average list: %s", memory_average) sar_output.append(cpu_average) @@ -179,15 +173,15 @@ def get_sar_output(self): :param vm: vm object """ - error.context("Get output of command sar during migration", - logging.info) + error.context("Get output of command sar during migration", logging.info) vm = self.env.get_vm(self.params["main_vm"]) session = vm.wait_for_login(timeout=self.login_timeout) while vm.is_alive(): - s, o = session.cmd_status_output(get_sar_output_cmd) # pylint: disable=E0606 + s, o = session.cmd_status_output(get_sar_output_cmd) # pylint: disable=E0606 if s != 0: - raise error.TestFail("Failed to get sar output in guest." - "The detail is: %s" % o) + raise error.TestFail( + "Failed to get sar output in guest." f"The detail is: {o}" + ) session.close() self.analysis_sar_output(o) @@ -204,14 +198,18 @@ def check_sar_output(self): memory_average = zip(sar_output[1], sar_output[3]) for i in cpu_average: if abs(i[0] - i[1]) > 30: - raise error.TestFail("The guest performance should " - "not be effected obviously with " - "auto-converge on.") + raise error.TestFail( + "The guest performance should " + "not be effected obviously with " + "auto-converge on." + ) for i in memory_average: if abs(i[0] - i[1]) > 30: - raise error.TestFail("The guest performance should " - "not be effected obviously with " - "auto-converge on.") + raise error.TestFail( + "The guest performance should " + "not be effected obviously with " + "auto-converge on." + ) @error.context_aware def get_mig_cpu_throttling_percentage(self, vm): @@ -221,12 +219,15 @@ def get_mig_cpu_throttling_percentage(self, vm): :param vm: vm object """ - error.context("Get cpu throttling percentage during migration", - logging.info) + error.context( + "Get cpu throttling percentage during migration", logging.info + ) cpu_throttling_percentage = vm.monitor.info("migrate").get( - "cpu-throttle-percentage") - logging.info("The cpu throttling percentage is %s%%", - cpu_throttling_percentage) + "cpu-throttle-percentage" + ) + logging.info( + "The cpu throttling percentage is %s%%", cpu_throttling_percentage + ) return cpu_throttling_percentage @error.context_aware @@ -235,25 +236,30 @@ def check_mig_cpu_throttling_percentage(self): check if cpu throttling percentage equal to given value """ - error.context("check cpu throttling percentage during migration", - logging.info) - logging.info("The cpu throttling percentage list is %s", - cpu_throttling_percentage_list) - if ((self.parameters_value[0] not in cpu_throttling_percentage_list) or - (sum(self.parameters_value) not in cpu_throttling_percentage_list)): - raise error.TestFail("The value of cpu throttling percentage " - "should include: %s %s" % - (self.parameters_value[0], - sum(self.parameters_value))) + error.context( + "check cpu throttling percentage during migration", logging.info + ) + logging.info( + "The cpu throttling percentage list is %s", + cpu_throttling_percentage_list, + ) + if (self.parameters_value[0] not in cpu_throttling_percentage_list) or ( + sum(self.parameters_value) not in cpu_throttling_percentage_list + ): + raise error.TestFail( + "The value of cpu throttling percentage " + f"should include: {self.parameters_value[0]} {sum(self.parameters_value)}" + ) if min(cpu_throttling_percentage_list) != self.parameters_value[0]: - raise error.TestFail("The expected cpu-throttle-initial is %s," - " but the actual value is %s" % - (self.parameters_value[0], - min(cpu_throttling_percentage_list))) + raise error.TestFail( + f"The expected cpu-throttle-initial is {self.parameters_value[0]}," + f" but the actual value is {min(cpu_throttling_percentage_list)}" + ) if max(cpu_throttling_percentage_list) > 99: - raise error.TestFail("The expected max cpu-throttling percentage" - "is %s, but the actual value is %s" % - (99, max(cpu_throttling_percentage_list))) + raise error.TestFail( + "The expected max cpu-throttling percentage" + f"is {99}, but the actual value is {max(cpu_throttling_percentage_list)}" + ) def thread_check_mig_cpu_throttling_percentage(self): """ @@ -311,8 +317,16 @@ def before_migration_load_host(self, mig_data): @error.context_aware def post_migration_capability( - self, vm, cancel_delay, mig_offline, dsthost, - vm_ports, not_wait_for_migration, fd, mig_data): + self, + vm, + cancel_delay, + mig_offline, + dsthost, + vm_ports, + not_wait_for_migration, + fd, + mig_data, + ): """ set auto-converge off/on during migration set/get parameter cpu-throttle-initial 30 @@ -333,26 +347,29 @@ def post_migration_capability( if set_auto_converge == "yes": mig_thread = utils.InterruptedThread( - self.thread_check_mig_cpu_throttling_percentage) + self.thread_check_mig_cpu_throttling_percentage + ) mig_thread.start() try: vm.wait_for_migration(self.migration_timeout) logging.info("Migration completed with auto-converge on") except virt_vm.VMMigrateTimeoutError: if set_auto_converge == "yes": - raise error.TestFail("Migration failed with " - "auto-converge on") + raise error.TestFail("Migration failed with " "auto-converge on") else: - logging.info("migration would never finish with " - "auto-converge off") + logging.info( + "migration would never finish with " "auto-converge off" + ) if self.need_cleanup: self.clean_up(self.kill_bg_stress_cmd, vm) try: vm.wait_for_migration(self.migration_timeout) except virt_vm.VMMigrateTimeoutError: - raise error.TestFail("After kill stessapptest, " - "migration failed with " - "auto-converge off") + raise error.TestFail( + "After kill stessapptest, " + "migration failed with " + "auto-converge off" + ) finally: if self.session: self.session.close() @@ -360,8 +377,16 @@ def post_migration_capability( @error.context_aware def post_migration_capability_load_host( - self, vm, cancel_delay, mig_offline, dsthost, - vm_ports, not_wait_for_migration, fd, mig_data): + self, + vm, + cancel_delay, + mig_offline, + dsthost, + vm_ports, + not_wait_for_migration, + fd, + mig_data, + ): """ set auto-converge off/on during migration @@ -385,19 +410,21 @@ def post_migration_capability_load_host( logging.info("Migration completed with auto-converge on") except virt_vm.VMMigrateTimeoutError: if set_auto_converge == "yes": - raise error.TestFail("Migration failed with " - "auto-converge on") + raise error.TestFail("Migration failed with " "auto-converge on") else: - logging.info("migration would never finish with " - "auto-converge off") + logging.info( + "migration would never finish with " "auto-converge off" + ) if self.need_cleanup: self.clean_up(self.kill_bg_stress_cmd, vm) try: vm.wait_for_migration(self.migration_timeout) except virt_vm.VMMigrateTimeoutError: - raise error.TestFail("After kill stessapptest, " - "migration failed with " - "auto-converge off") + raise error.TestFail( + "After kill stessapptest, " + "migration failed with " + "auto-converge off" + ) finally: if self.session: self.session.close() @@ -406,8 +433,16 @@ def post_migration_capability_load_host( @error.context_aware def post_migration_capability_load_host_io( - self, vm, cancel_delay, mig_offline, dsthost, - vm_ports, not_wait_for_migration, fd, mig_data): + self, + vm, + cancel_delay, + mig_offline, + dsthost, + vm_ports, + not_wait_for_migration, + fd, + mig_data, + ): """ set auto-converge off/on during migration @@ -428,11 +463,14 @@ def post_migration_capability_load_host_io( mig_thread.start() try: vm.wait_for_migration(self.migration_timeout) - logging.info("Migration completed with set auto-converge: " - "%s", set_auto_converge) + logging.info( + "Migration completed with set auto-converge: " "%s", + set_auto_converge, + ) except virt_vm.VMMigrateTimeoutError: - raise error.TestFail("Migration failed with set auto-converge" - ": %s" % set_auto_converge) + raise error.TestFail( + "Migration failed with set auto-converge" f": {set_auto_converge}" + ) finally: if self.session: self.session.close() @@ -441,10 +479,10 @@ def post_migration_capability_load_host_io( @error.context_aware def migration_scenario(self): - - error.context("Migration from %s to %s over protocol %s." % - (self.srchost, self.dsthost, mig_protocol), - logging.info) + error.context( + f"Migration from {self.srchost} to {self.dsthost} over protocol {mig_protocol}.", + logging.info, + ) def start_worker(mig_data): """ @@ -466,18 +504,17 @@ def check_worker(mig_data): if vm.is_paused(): vm.resume() if not utils_test.qemu.guest_active(vm): - raise error.TestFail("Guest not active " - "after migration") + raise error.TestFail("Guest not active " "after migration") if self.need_cleanup: self.clean_up(self.kill_bg_stress_cmd, vm) else: - logging.info("No need to kill the background " - "test in guest.") + logging.info("No need to kill the background " "test in guest.") vm.reboot() vm.destroy() - self.migrate_wait([self.vm], self.srchost, self.dsthost, - start_worker, check_worker) + self.migrate_wait( + [self.vm], self.srchost, self.dsthost, start_worker, check_worker + ) set_auto_converge_list = params.objects("need_set_auto_converge") sar_log_name = params.get("sar_log_name", "") @@ -491,10 +528,8 @@ def check_worker(mig_data): sar_log_index = str(set_auto_converge_list.index(set_auto_converge)) tmp_sar_log_name = sar_log_name sar_log_name += sar_log_index - sar_cmd_in_guest = sar_cmd_in_guest.replace(tmp_sar_log_name, - sar_log_name) - get_sar_output_cmd = params.get("get_sar_output_cmd", - "tail -n 200 %s") + sar_cmd_in_guest = sar_cmd_in_guest.replace(tmp_sar_log_name, sar_log_name) + get_sar_output_cmd = params.get("get_sar_output_cmd", "tail -n 200 %s") get_sar_output_cmd %= sar_log_name mig = TestMultihostMigration(test, params, env) mig.run() diff --git a/multi_host_migration/tests/migration_multi_host_cancel.py b/multi_host_migration/tests/migration_multi_host_cancel.py index cb14868050..84237da7e7 100644 --- a/multi_host_migration/tests/migration_multi_host_cancel.py +++ b/multi_host_migration/tests/migration_multi_host_cancel.py @@ -1,8 +1,7 @@ import logging from autotest.client.shared import error -from virttest import remote -from virttest import virt_vm +from virttest import remote, virt_vm from virttest.utils_test.qemu import migration @@ -34,26 +33,24 @@ def run(test, params, env): base_class = migration.MultihostMigrationRdma class TestMultihostMigrationCancel(base_class): - def __init__(self, test, params, env): - super(TestMultihostMigrationCancel, self).__init__(test, params, - env, - preprocess_env) + super().__init__(test, params, env, preprocess_env) self.srchost = self.params.get("hosts")[0] self.dsthost = self.params.get("hosts")[1] self.vms = params["vms"].split() self.vm = params["vms"].split()[0] - self.id = {'src': self.srchost, - 'dst': self.dsthost, - "type": "cancel_migration"} + self.id = { + "src": self.srchost, + "dst": self.dsthost, + "type": "cancel_migration", + } def check_guest(self): broken_vms = [] for vm in self.vms: try: vm = env.get_vm(vm) - stress_kill_cmd = params.get("stress_kill_cmd", - "killall -9 stress") + stress_kill_cmd = params.get("stress_kill_cmd", "killall -9 stress") error.context("Kill load and reboot vm.", logging.info) session = vm.wait_for_login(timeout=self.login_timeout) session.sendline(stress_kill_cmd) @@ -61,9 +58,11 @@ def check_guest(self): except (remote.LoginError, virt_vm.VMError): broken_vms.append(vm) if broken_vms: - raise error.TestError("VMs %s should work on src" - " host after canceling of" - " migration." % (broken_vms)) + raise error.TestError( + f"VMs {broken_vms} should work on src" + " host after canceling of" + " migration." + ) def migration_scenario(self): @error.context_aware @@ -82,8 +81,9 @@ def worker(mig_data): if params.get("hostid") == self.master_id(): self.check_guest() - self._hosts_barrier(self.hosts, self.id, - 'wait_for_cancel', self.login_timeout) + self._hosts_barrier( + self.hosts, self.id, "wait_for_cancel", self.login_timeout + ) params["cancel_delay"] = None error.context("Do migration again", logging.info) diff --git a/multi_host_migration/tests/migration_multi_host_downtime_and_speed.py b/multi_host_migration/tests/migration_multi_host_downtime_and_speed.py index 7689b7f249..71d04855e0 100644 --- a/multi_host_migration/tests/migration_multi_host_downtime_and_speed.py +++ b/multi_host_migration/tests/migration_multi_host_downtime_and_speed.py @@ -1,12 +1,7 @@ import logging -from autotest.client.shared import error -from autotest.client.shared import utils - -from virttest import qemu_migration -from virttest import utils_misc -from virttest import utils_test -from virttest import virt_vm +from autotest.client.shared import error, utils +from virttest import qemu_migration, utils_misc, utils_test, virt_vm from virttest.utils_test.qemu import migration @@ -38,9 +33,8 @@ def run(test, params, env): base_class = migration.MultihostMigrationRdma class TestMultihostMigration(base_class): - def __init__(self, test, params, env): - super(TestMultihostMigration, self).__init__(test, params, env) + super().__init__(test, params, env) self.srchost = self.params.get("hosts")[0] self.dsthost = self.params.get("hosts")[1] self.is_src = params["hostid"] == self.srchost @@ -57,8 +51,7 @@ def __init__(self, test, params, env): self.min_speed = utils.convert_data_size(self.min_speed, "M") self.max_speed = utils.convert_data_size(self.max_speed, "M") - self.speed_step = int((self.max_speed - self.min_speed) / - speed_count) + self.speed_step = int((self.max_speed - self.min_speed) / speed_count) if self.sub_type == "before_migrate": self.before_migration = self.before_migration_downtime @@ -70,12 +63,10 @@ def __init__(self, test, params, env): elif self.sub_type == "stop_during": self.post_migration = self.post_migration_stop else: - error.TestFail("Wrong subtest type selected %s" % - (self.sub_type)) + error.TestFail(f"Wrong subtest type selected {self.sub_type}") def clean_up(self, vm): - kill_bg_stress_cmd = params.get("kill_bg_stress_cmd", - "killall -9 stress") + kill_bg_stress_cmd = params.get("kill_bg_stress_cmd", "killall -9 stress") logging.info("Kill the background stress test in the guest.") session = vm.wait_for_login(timeout=self.login_timeout) @@ -88,8 +79,10 @@ def check_mig_downtime(self, vm): actual_downtime = int(vm.monitor.info("migrate").get("downtime")) if actual_downtime > self.mig_downtime * 1000: error = "Migration failed for setting downtime, " - error += "Expected: '%d', Actual: '%d'" % (self.mig_downtime, - actual_downtime) + error += "Expected: '%d', Actual: '%d'" % ( + self.mig_downtime, + actual_downtime, + ) raise error.TestFail(error) @error.context_aware @@ -100,26 +93,44 @@ def before_migration_downtime(self, mig_data): qemu_migration.set_downtime(vm, self.mig_downtime) @error.context_aware - def post_migration_before_downtime(self, vm, cancel_delay, mig_offline, - dsthost, vm_ports, - not_wait_for_migration, - fd, mig_data): + def post_migration_before_downtime( + self, + vm, + cancel_delay, + mig_offline, + dsthost, + vm_ports, + not_wait_for_migration, + fd, + mig_data, + ): try: vm.wait_for_migration(self.mig_timeout) except virt_vm.VMMigrateTimeoutError: - raise error.TestFail("Migration failed with setting " - " downtime to %ds." % self.mig_downtime) + raise error.TestFail( + "Migration failed with setting " + " downtime to %ds." % self.mig_downtime + ) - logging.info("Migration completed with downtime " - "is %s seconds.", self.mig_downtime) + logging.info( + "Migration completed with downtime " "is %s seconds.", self.mig_downtime + ) self.check_mig_downtime(vm) vm.destroy(gracefully=False) @error.context_aware - def post_migration_downtime(self, vm, cancel_delay, mig_offline, - dsthost, vm_ports, not_wait_for_migration, - fd, mig_data): + def post_migration_downtime( + self, + vm, + cancel_delay, + mig_offline, + dsthost, + vm_ports, + not_wait_for_migration, + fd, + mig_data, + ): logging.info("Set downtime after migration.") downtime = 0 for downtime in range(1, self.max_downtime): @@ -133,44 +144,61 @@ def post_migration_downtime(self, vm, cancel_delay, mig_offline, try: vm.wait_for_migration(self.mig_timeout) except virt_vm.VMMigrateTimeoutError: - raise error.TestFail("Migration failed with setting " - " downtime to %ds." % downtime) + raise error.TestFail( + "Migration failed with setting " " downtime to %ds." % downtime + ) self.mig_downtime = downtime - 1 - logging.info("Migration completed with downtime " - "is %s seconds.", self.mig_downtime) + logging.info( + "Migration completed with downtime " "is %s seconds.", self.mig_downtime + ) self.check_mig_downtime(vm) vm.destroy(gracefully=False) - def post_migration_speed(self, vm, cancel_delay, mig_offline, dsthost, - vm_ports, not_wait_for_migration, - fd, mig_data): + def post_migration_speed( + self, + vm, + cancel_delay, + mig_offline, + dsthost, + vm_ports, + not_wait_for_migration, + fd, + mig_data, + ): mig_speed = None - for mig_speed in range(self.min_speed, - self.max_speed, - self.speed_step): + for mig_speed in range(self.min_speed, self.max_speed, self.speed_step): try: vm.wait_for_migration(self.wait_mig_timeout) break except virt_vm.VMMigrateTimeoutError: - qemu_migration.set_speed(vm, "%sB" % (mig_speed)) + qemu_migration.set_speed(vm, f"{mig_speed}B") # Test migration status. If migration is not completed then # it kill program which creates guest load. try: vm.wait_for_migration(self.mig_timeout) except virt_vm.VMMigrateTimeoutError: - raise error.TestFail("Migration failed with setting " - " mig_speed to %sB." % mig_speed) + raise error.TestFail( + "Migration failed with setting " f" mig_speed to {mig_speed}B." + ) logging.debug("Migration passed with mig_speed %sB", mig_speed) vm.destroy(gracefully=False) - def post_migration_stop(self, vm, cancel_delay, mig_offline, dsthost, - vm_ports, not_wait_for_migration, - fd, mig_data): + def post_migration_stop( + self, + vm, + cancel_delay, + mig_offline, + dsthost, + vm_ports, + not_wait_for_migration, + fd, + mig_data, + ): wait_before_mig = int(vm.params.get("wait_before_stop", "5")) try: @@ -192,25 +220,31 @@ def start_worker(mig_data): bg_stress_test = params.get("bg_stress_test") check_running_cmd = params.get("check_running_cmd") - bg = utils.InterruptedThread(utils_test.run_virt_sub_test, - args=(test, params, env,), - kwargs={"sub_type": bg_stress_test}) + bg = utils.InterruptedThread( + utils_test.run_virt_sub_test, + args=( + test, + params, + env, + ), + kwargs={"sub_type": bg_stress_test}, + ) bg.start() def is_stress_running(): return session.cmd_status(check_running_cmd) == 0 if not utils_misc.wait_for(is_stress_running, timeout=360): - raise error.TestFail("Failed to start %s in guest." % - bg_stress_test) + raise error.TestFail(f"Failed to start {bg_stress_test} in guest.") def check_worker(mig_data): if not self.is_src: vm = env.get_vm(params["main_vm"]) self.clean_up(vm) - self.migrate_wait(self.vms, self.srchost, self.dsthost, - start_worker, check_worker) + self.migrate_wait( + self.vms, self.srchost, self.dsthost, start_worker, check_worker + ) mig = TestMultihostMigration(test, params, env) diff --git a/multi_host_migration/tests/migration_multi_host_firewall_block.py b/multi_host_migration/tests/migration_multi_host_firewall_block.py index a686aa14bb..2705c5b13e 100644 --- a/multi_host_migration/tests/migration_multi_host_firewall_block.py +++ b/multi_host_migration/tests/migration_multi_host_firewall_block.py @@ -1,14 +1,13 @@ import logging import os -import six import time -from autotest.client.shared import error + +import six from autotest.client import utils -from virttest import utils_test -from virttest import virt_vm -from virttest import utils_misc -from virttest import qemu_monitor +from autotest.client.shared import error +from virttest import qemu_monitor, utils_misc, utils_test, virt_vm from virttest.utils_test.qemu import migration + from provider import cpuflags @@ -37,8 +36,10 @@ def run(test, params, env): def wait_for_migration(vm, timeout): def mig_finished(): ret = True - if (vm.params["display"] == "spice" and - vm.get_spice_var("spice_seamless_migration") == "on"): + if ( + vm.params["display"] == "spice" + and vm.get_spice_var("spice_seamless_migration") == "on" + ): s = vm.monitor.info("spice") if isinstance(s, six.string_types): ret = "migrated: true" in s @@ -50,16 +51,16 @@ def mig_finished(): else: return ret and (o.get("status") != "active") - if not utils_misc.wait_for(mig_finished, timeout, 2, 2, - "Waiting for migration to complete"): - raise virt_vm.VMMigrateTimeoutError("Timeout expired while waiting " - "for migration to finish") + if not utils_misc.wait_for( + mig_finished, timeout, 2, 2, "Waiting for migration to complete" + ): + raise virt_vm.VMMigrateTimeoutError( + "Timeout expired while waiting " "for migration to finish" + ) class TestMultihostMigrationLongWait(base_class): - def __init__(self, test, params, env): - super(TestMultihostMigrationLongWait, self).__init__( - test, params, env) + super().__init__(test, params, env) self.install_path = params.get("cpuflags_install_path", "/tmp") self.vm_mem = int(params.get("mem", "512")) @@ -71,20 +72,23 @@ def __init__(self, test, params, env): self.vms = params.get("vms").split() def firewall_block_port(self, port): - utils.run("iptables -A INPUT -p tcp --dport %s" - " -j REJECT" % (port), ignore_status=True) + utils.run( + f"iptables -A INPUT -p tcp --dport {port}" " -j REJECT", + ignore_status=True, + ) def clean_firewall(self): utils.run("iptables -F", ignore_status=True) def migrate_vms_src(self, mig_data): - super(TestMultihostMigrationLongWait, - self).migrate_vms_src(mig_data) - self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_started', - self.mig_timeout) + super().migrate_vms_src(mig_data) + self._hosts_barrier( + self.hosts, mig_data.mig_id, "mig_started", self.mig_timeout + ) vm = mig_data.vms[0] - self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_interrupted', - self.mig_timeout) + self._hosts_barrier( + self.hosts, mig_data.mig_id, "mig_interrupted", self.mig_timeout + ) session = vm.wait_for_login(timeout=self.login_timeout) session.cmd("killall cpuflags-test") @@ -102,14 +106,16 @@ def migrate_vms_src(self, mig_data): break time.sleep(1) else: - raise error.TestWarn("Firewall block migraiton timeout" - " is too short: %s. For completing" - " the test increase mig_timeout in" - " variant dest-problem-test." % - (self.mig_fir_timeout)) + raise error.TestWarn( + "Firewall block migraiton timeout" + f" is too short: {self.mig_fir_timeout}. For completing" + " the test increase mig_timeout in" + " variant dest-problem-test." + ) - self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_interfynish', - self.mig_timeout) + self._hosts_barrier( + self.hosts, mig_data.mig_id, "mig_interfynish", self.mig_timeout + ) def migrate_vms_dest(self, mig_data): """ @@ -118,22 +124,27 @@ def migrate_vms_dest(self, mig_data): :param mig_Data: Data for migration. """ - self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_started', - self.mig_timeout) + self._hosts_barrier( + self.hosts, mig_data.mig_id, "mig_started", self.mig_timeout + ) time.sleep(3) for vm in mig_data.vms: self.firewall_block_port(mig_data.vm_ports[vm.name]) - self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_interrupted', - self.mig_timeout) - self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_interfynish', - self.mig_fir_timeout + 10) + self._hosts_barrier( + self.hosts, mig_data.mig_id, "mig_interrupted", self.mig_timeout + ) + self._hosts_barrier( + self.hosts, + mig_data.mig_id, + "mig_interfynish", + self.mig_fir_timeout + 10, + ) try: stat = [] for vm in mig_data.vms: stat.append(vm.monitor.get_status()) - except (qemu_monitor.MonitorProtocolError, - qemu_monitor.QMPCmdError): + except (qemu_monitor.MonitorProtocolError, qemu_monitor.QMPCmdError): logging.debug("Guest %s not working", vm) def check_vms_src(self, mig_data): @@ -167,29 +178,35 @@ def check_vms_dst(self, mig_data): try: vm.resume() if utils_test.qemu.guest_active(vm): - raise error.TestFail("Guest can't be active after" - " interrupted migration.") - except (qemu_monitor.MonitorProtocolError, - qemu_monitor.MonitorLockError, - qemu_monitor.QMPCmdError): + raise error.TestFail( + "Guest can't be active after" " interrupted migration." + ) + except ( + qemu_monitor.MonitorProtocolError, + qemu_monitor.MonitorLockError, + qemu_monitor.QMPCmdError, + ): pass def migration_scenario(self, worker=None): - error.context("Migration from %s to %s over protocol %s." % - (self.srchost, self.dsthost, mig_protocol), - logging.info) + error.context( + f"Migration from {self.srchost} to {self.dsthost} over protocol {mig_protocol}.", + logging.info, + ) def worker_func(mig_data): vm = mig_data.vms[0] session = vm.wait_for_login(timeout=self.login_timeout) - cpuflags.install_cpuflags_util_on_vm(test, vm, - self.install_path, - extra_flags="-msse3 -msse2") + cpuflags.install_cpuflags_util_on_vm( + test, vm, self.install_path, extra_flags="-msse3 -msse2" + ) - cmd = ("nohup %s/cpuflags-test --stressmem %d,%d &" % - (os.path.join(self.install_path, "cpu_flags"), - self.vm_mem * 100, self.vm_mem / 2)) + cmd = "nohup %s/cpuflags-test --stressmem %d,%d &" % ( + os.path.join(self.install_path, "cpu_flags"), + self.vm_mem * 100, + self.vm_mem / 2, + ) logging.debug("Sending command: %s", cmd) session.sendline(cmd) time.sleep(3) @@ -198,33 +215,34 @@ def worker_func(mig_data): worker = worker_func try: - self.migrate_wait(self.vms, self.srchost, self.dsthost, - start_work=worker) + self.migrate_wait( + self.vms, self.srchost, self.dsthost, start_work=worker + ) finally: self.clean_firewall() class TestMultihostMigrationShortInterrupt(TestMultihostMigrationLongWait): - def __init__(self, test, params, env): - super(TestMultihostMigrationShortInterrupt, self).__init__( - test, params, env) + super().__init__(test, params, env) def migrate_vms_src(self, mig_data): - super(TestMultihostMigrationShortInterrupt, - self).migrate_vms_src(mig_data) - self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_started', - self.mig_timeout) + super().migrate_vms_src(mig_data) + self._hosts_barrier( + self.hosts, mig_data.mig_id, "mig_started", self.mig_timeout + ) vm = mig_data.vms[0] - self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_interrupted', - self.mig_timeout) + self._hosts_barrier( + self.hosts, mig_data.mig_id, "mig_interrupted", self.mig_timeout + ) session = vm.wait_for_login(timeout=self.login_timeout) session.cmd("killall cpuflags-test") wait_for_migration(vm, self.mig_timeout) - self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_done', - self.mig_timeout) + self._hosts_barrier( + self.hosts, mig_data.mig_id, "mig_done", self.mig_timeout + ) def migrate_vms_dest(self, mig_data): """ @@ -233,22 +251,24 @@ def migrate_vms_dest(self, mig_data): :param mig_Data: Data for migration. """ - self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_started', - self.mig_timeout) + self._hosts_barrier( + self.hosts, mig_data.mig_id, "mig_started", self.mig_timeout + ) time.sleep(3) for vm in mig_data.vms: self.firewall_block_port(mig_data.vm_ports[vm.name]) - self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_interrupted', - self.mig_timeout) + self._hosts_barrier( + self.hosts, mig_data.mig_id, "mig_interrupted", self.mig_timeout + ) self.clean_firewall() - self._hosts_barrier(self.hosts, mig_data.mig_id, 'mig_done', - self.mig_fir_timeout) + self._hosts_barrier( + self.hosts, mig_data.mig_id, "mig_done", self.mig_fir_timeout + ) try: for vm in mig_data.vms: vm.monitor.get_status() - except (qemu_monitor.MonitorProtocolError, - qemu_monitor.QMPCmdError): + except (qemu_monitor.MonitorProtocolError, qemu_monitor.QMPCmdError): logging.debug("Guest %s not working", vm) def check_vms_dst(self, mig_data): @@ -257,7 +277,7 @@ def check_vms_dst(self, mig_data): :param mig_data: object with migration data. """ - super(TestMultihostMigrationShortInterrupt, self).check_vms_dst(mig_data) + super().check_vms_dst(mig_data) def check_vms_src(self, mig_data): """ @@ -265,7 +285,7 @@ def check_vms_src(self, mig_data): :param mig_data: object with migration data. """ - super(TestMultihostMigrationShortInterrupt, self).check_vms_src(mig_data) + super().check_vms_src(mig_data) mig = None if sub_type == "long_wait": @@ -273,5 +293,5 @@ def check_vms_src(self, mig_data): elif sub_type == "short_interrupt": mig = TestMultihostMigrationShortInterrupt(test, params, env) else: - raise error.TestNAError("Unsupported sub_type = '%s'." % sub_type) + raise error.TestNAError(f"Unsupported sub_type = '{sub_type}'.") mig.run() diff --git a/multi_host_migration/tests/migration_multi_host_helper_tests.py b/multi_host_migration/tests/migration_multi_host_helper_tests.py index 9dfbd1e380..7823ea231e 100644 --- a/multi_host_migration/tests/migration_multi_host_helper_tests.py +++ b/multi_host_migration/tests/migration_multi_host_helper_tests.py @@ -4,10 +4,9 @@ from autotest.client.shared import error -class MiniSubtest(object): - +class MiniSubtest: def __new__(cls, *args, **kargs): - self = super(MiniSubtest, cls).__new__(cls) + self = super().__new__(cls) ret = None if args is None: args = [] @@ -30,7 +29,6 @@ def run(test, params, env): """ class hot_unplug_block_dev(MiniSubtest): - def test(self): attempts = int(params.get("attempts", "100")) attempt_timeout = int(params.get("attempt_timeout", "1")) @@ -40,20 +38,25 @@ def test(self): for block in params.objects("unplug_block"): for _ in range(attempts): - if vm.devices.simple_unplug(vm.devices["drive_%s" % block], - vm.monitor)[1] is True: + if ( + vm.devices.simple_unplug( + vm.devices[f"drive_{block}"], vm.monitor + )[1] + is True + ): break else: time.sleep(attempt_timeout) for _ in range(attempts): - if vm.devices.simple_unplug(vm.devices[block], - vm.monitor)[1] is True: + if ( + vm.devices.simple_unplug(vm.devices[block], vm.monitor)[1] + is True + ): break else: time.sleep(attempt_timeout) class hot_plug_block_dev(MiniSubtest): - def test(self): def get_index(vm, index): while vm.index_in_use.get(str(index)): @@ -78,47 +81,43 @@ def get_index(vm, index): if drive_index: index = drive_index else: - vm.last_driver_index = get_index(vm, - vm.last_driver_index) + vm.last_driver_index = get_index(vm, vm.last_driver_index) index = str(vm.last_driver_index) vm.last_driver_index += 1 else: index = None image_bootindex = None image_boot = image_params.get("image_boot") - if not re.search(r"boot=on\|off", devices.get_help_text(), - re.MULTILINE): - if image_boot in ['yes', 'on', True]: + if not re.search( + r"boot=on\|off", devices.get_help_text(), re.MULTILINE + ): + if image_boot in ["yes", "on", True]: image_bootindex = str(vm.last_boot_index) vm.last_boot_index += 1 image_boot = "unused" - image_bootindex = image_params.get('bootindex', - image_bootindex) + image_bootindex = image_params.get("bootindex", image_bootindex) else: - if image_boot in ['yes', 'on', True]: + if image_boot in ["yes", "on", True]: if vm.last_boot_index > 0: image_boot = False vm.last_boot_index += 1 image_params = params.object_params(image_name) if image_params.get("boot_drive") == "no": continue - devs = vm.devices.images_define_by_params(image_name, - image_params, - 'disk', - index, - image_boot, - image_bootindex) + devs = vm.devices.images_define_by_params( + image_name, image_params, "disk", index, image_boot, image_bootindex + ) for dev in devs: for _ in range(attempts): - if (vm.devices.simple_hotplug(dev, - vm.monitor)[1] is True): + if vm.devices.simple_hotplug(dev, vm.monitor)[1] is True: return time.sleep(attempt_timeout) test_type = params.get("helper_test") - if (test_type in locals()): + if test_type in locals(): tests_group = locals()[test_type] tests_group() else: - raise error.TestFail("Test group '%s' is not defined in" - " cpuflags test" % test_type) + raise error.TestFail( + f"Test group '{test_type}' is not defined in" " cpuflags test" + ) diff --git a/multi_host_migration/tests/migration_multi_host_ping_pong.py b/multi_host_migration/tests/migration_multi_host_ping_pong.py index 5914ef2080..d793cdbc85 100644 --- a/multi_host_migration/tests/migration_multi_host_ping_pong.py +++ b/multi_host_migration/tests/migration_multi_host_ping_pong.py @@ -1,11 +1,11 @@ import logging import os + from autotest.client.shared import error from autotest.client.shared.syncdata import SyncData -from virttest import env_process -from virttest import utils_test -from virttest import utils_misc +from virttest import env_process, utils_misc, utils_test from virttest.utils_test.qemu import migration + from provider import cpuflags @@ -42,9 +42,8 @@ def run(test, params, env): base_class = migration.MultihostMigrationRdma class TestMultihostMigration(base_class): - def __init__(self, test, params, env): - super(TestMultihostMigration, self).__init__(test, params, env) + super().__init__(test, params, env) self.srchost = self.params.get("hosts")[0] self.dsthost = self.params.get("hosts")[1] self.vms = params["vms"].split() @@ -54,12 +53,13 @@ def __init__(self, test, params, env): self.stress_memory = int(params.get("stress_memory", "128")) self.stress_type = params.get("stress_type", "none") self.migrate_count = int(params.get("migrate_count", "3")) - 1 - self.migration_timeout = int(params.get("migration_timeout", - "240")) + self.migration_timeout = int(params.get("migration_timeout", "240")) self.disk_usage = int(params.get("disk_usage", "512")) - self.id = {'src': self.srchost, - 'dst': self.dsthost, - "type": "file_transfer"} + self.id = { + "src": self.srchost, + "dst": self.dsthost, + "type": "file_transfer", + } self.vmaddr = None self.cpuflags_test_out = os.path.join("/tmp", "cpuflags_test.out") self.disktest_out = os.path.join("/tmp", "disktest.out") @@ -94,20 +94,21 @@ def check_vms(self, mig_data): cpu_flags_out = "" disk_out = "" if "cpuflags-test" in run_error: - cpu_flags_out = ("\ncpuflags_test_output: \n" + - session.cmd_output("cat %s" % - (self.cpuflags_test_out))) + cpu_flags_out = ( + "\ncpuflags_test_output: \n" + + session.cmd_output(f"cat {self.cpuflags_test_out}") + ) if "disktest" in run_error: - disk_out = ("\ndisk_test_output: \n" + - session.cmd_output("cat %s" % - (self.disktest_out))) - raise error.TestFail("Something wrong happened" - " during migration %s" - " should be running all time" - " during this test." - " outputs%s%s" % - (run_error, cpu_flags_out, - disk_out)) + disk_out = "\ndisk_test_output: \n" + session.cmd_output( + f"cat {self.disktest_out}" + ) + raise error.TestFail( + "Something wrong happened" + f" during migration {run_error}" + " should be running all time" + " during this test." + f" outputs{cpu_flags_out}{disk_out}" + ) def _prepare_vm(self, vm_name): """ @@ -119,12 +120,16 @@ def _prepare_vm(self, vm_name): """ new_params = self.params.copy() - new_params['migration_mode'] = None - new_params['start_vm'] = 'yes' + new_params["migration_mode"] = None + new_params["start_vm"] = "yes" self.vm_lock.acquire() - env_process.process(self.test, new_params, self.env, - env_process.preprocess_image, - env_process.preprocess_vm) + env_process.process( + self.test, + new_params, + self.env, + env_process.preprocess_image, + env_process.preprocess_vm, + ) self.vm_lock.release() vm = self.env.get_vm(vm_name) vm.wait_for_login(timeout=self.login_timeout) @@ -132,76 +137,80 @@ def _prepare_vm(self, vm_name): def ping_pong_migrate(self, sync, worker): for _ in range(self.migrate_count): - logging.info("File transfer not ended, starting" - " a round of migration...") + logging.info( + "File transfer not ended, starting" " a round of migration..." + ) sync.sync(True, timeout=self.migration_timeout) - self.migrate_wait([self.vm], - self.srchost, - self.dsthost) + self.migrate_wait([self.vm], self.srchost, self.dsthost) tmp = self.dsthost self.dsthost = self.srchost self.srchost = tmp def install_disktest(self): - test.job.setup_dep(['disktest']) - self.disk_srcdir = os.path.join(test.autodir, "deps", - "disktest", "src") + test.job.setup_dep(["disktest"]) + self.disk_srcdir = os.path.join(test.autodir, "deps", "disktest", "src") def migration_scenario(self): - error.context("Migration from %s to %s over protocol %s." % - (self.srchost, self.dsthost, mig_protocol), - logging.info) - sync = SyncData(self.master_id(), self.hostid, self.hosts, - self.id, self.sync_server) + error.context( + f"Migration from {self.srchost} to {self.dsthost} over protocol {mig_protocol}.", + logging.info, + ) + sync = SyncData( + self.master_id(), self.hostid, self.hosts, self.id, self.sync_server + ) address_cache = env.get("address_cache") def worker_cpu_mem(mig_data): vm = mig_data.vms[0] session = vm.wait_for_login(timeout=self.login_timeout) - cpuflags.install_cpuflags_util_on_vm(test, vm, - self.install_path, - extra_flags="-msse3 -msse2") + cpuflags.install_cpuflags_util_on_vm( + test, vm, self.install_path, extra_flags="-msse3 -msse2" + ) - cmd = ("nohup %s/cpuflags-test --stressmem %d,32" - " > %s &" % - (os.path.join(self.install_path, "cpu_flags"), - self.stress_memory, - self.cpuflags_test_out)) + cmd = "nohup %s/cpuflags-test --stressmem %d,32" " > %s &" % ( + os.path.join(self.install_path, "cpu_flags"), + self.stress_memory, + self.cpuflags_test_out, + ) logging.debug("Sending command: %s", cmd) session.sendline(cmd) if session.cmd_status("killall -s 0 cpuflags-test") != 0: - cpu_flags_out = ("\n cpuflags_test_output: \n" + - session.cmd_output("cat %s" % - (self.cpuflags_test_out))) - raise error.TestFail("Something wrong happened" - " during migration cpuflags-test" - " should be running all time" - " during this test.\n%s" % - (cpu_flags_out)) + cpu_flags_out = "\n cpuflags_test_output: \n" + session.cmd_output( + f"cat {self.cpuflags_test_out}" + ) + raise error.TestFail( + "Something wrong happened" + " during migration cpuflags-test" + " should be running all time" + f" during this test.\n{cpu_flags_out}" + ) def worker_disk(mig_data): vm = mig_data.vms[0] session = vm.wait_for_login(timeout=self.login_timeout) - utils_misc.install_disktest_on_vm(test, vm, self.disk_srcdir, - self.install_path) + utils_misc.install_disktest_on_vm( + test, vm, self.disk_srcdir, self.install_path + ) - cmd = ("nohup %s/disktest -m %s -L -S > %s &" % - (os.path.join(self.install_path, "disktest", "src"), - self.disk_usage, - self.disktest_out)) + cmd = "nohup {}/disktest -m {} -L -S > {} &".format( + os.path.join(self.install_path, "disktest", "src"), + self.disk_usage, + self.disktest_out, + ) logging.debug("Sending command: %s", cmd) session.sendline(cmd) if session.cmd_status("killall -s 0 disktest") != 0: - disk_out = ("\n cpuflags_test_output: \n" + - session.cmd_output("cat %s" % - (self.disktest_out))) - raise error.TestFail("Something wrong happened" - " during migration disktest" - " should be running all time" - " during this test.\n%s" % - (disk_out)) + disk_out = "\n cpuflags_test_output: \n" + session.cmd_output( + f"cat {self.disktest_out}" + ) + raise error.TestFail( + "Something wrong happened" + " during migration disktest" + " should be running all time" + f" during this test.\n{disk_out}" + ) def worker_all(mig_data): worker_cpu_mem(mig_data) @@ -212,16 +221,16 @@ def worker_all(mig_data): self.worker = worker_cpu_mem elif self.stress_type == "disk": - if (self.hostid == self.master_id()): + if self.hostid == self.master_id(): self.install_disktest() self.worker = worker_disk elif self.stress_type == "all": - if (self.hostid == self.master_id()): + if self.hostid == self.master_id(): self.install_disktest() self.worker = worker_all - if (self.hostid == self.master_id()): + if self.hostid == self.master_id(): self.vm_addr = self._prepare_vm(self.vm).get_address() self._hosts_barrier(self.hosts, self.id, "befor_mig", 120) sync.sync(address_cache, timeout=120) @@ -229,8 +238,9 @@ def worker_all(mig_data): self._hosts_barrier(self.hosts, self.id, "befor_mig", 260) address_cache.update(sync.sync(timeout=120)[self.master_id()]) - self.migrate_wait([self.vm], self.srchost, self.dsthost, - start_work=self.worker) + self.migrate_wait( + [self.vm], self.srchost, self.dsthost, start_work=self.worker + ) sync.sync(True, timeout=self.migration_timeout) tmp = self.dsthost self.dsthost = self.srchost diff --git a/multi_host_migration/tests/migration_multi_host_timedrift.py b/multi_host_migration/tests/migration_multi_host_timedrift.py index 24fddb6188..01e87affc6 100644 --- a/multi_host_migration/tests/migration_multi_host_timedrift.py +++ b/multi_host_migration/tests/migration_multi_host_timedrift.py @@ -1,6 +1,7 @@ import logging import re import time + from autotest.client.shared import error, utils from autotest.client.shared.syncdata import SyncData from virttest import utils_test @@ -29,9 +30,8 @@ def run(test, params, env): base_class = migration.MultihostMigrationRdma class TestMultihostMigration(base_class): - def __init__(self, test, params, env): - super(TestMultihostMigration, self).__init__(test, params, env) + super().__init__(test, params, env) self.srchost = self.params.get("hosts")[0] self.dsthost = self.params.get("hosts")[1] self.is_src = params["hostid"] == self.srchost @@ -49,12 +49,11 @@ def __init__(self, test, params, env): self.start_gt = {} self.diff_ht = {} self.diff_gt = {} - self.id = {'src': self.srchost, - 'dst': self.dsthost, - "type": "timedrift"} + self.id = {"src": self.srchost, "dst": self.dsthost, "type": "timedrift"} - self.sync = SyncData(self.master_id(), self.hostid, self.hosts, - self.id, self.sync_server) + self.sync = SyncData( + self.master_id(), self.hostid, self.hosts, self.id, self.sync_server + ) @error.context_aware def check_diff(self, mig_data): @@ -65,37 +64,42 @@ def check_diff(self, mig_data): session = vm.wait_for_login() if self.is_src: - error.context("Check the clocksource in guest.", - logging.info) + error.context("Check the clocksource in guest.", logging.info) check_clocksource_cmd = params.get("check_clocksource_cmd") clocksource = params.get("clocksource", "kvm-clock") current_clocksource = session.cmd(check_clocksource_cmd) current_clocksource = re.findall(clocksource, current_clocksource) current_clocksource = "".join(current_clocksource) - logging.info("current_clocksource in guest is: '%s'", - current_clocksource) + logging.info( + "current_clocksource in guest is: '%s'", current_clocksource + ) if clocksource == "kvm-clock": s = current_clocksource == "kvm-clock" else: s = current_clocksource != "kvm-clock" if not s: - raise error.TestFail("Guest didn't use '%s' " - "clocksource" % clocksource) - - error.context("Check the system time on guest and host.", - logging.info) - (ht, gt) = utils_test.get_time(session, self.time_command, - self.time_filter_re, - self.time_format) + raise error.TestFail( + f"Guest didn't use '{clocksource}' " "clocksource" + ) + + error.context("Check the system time on guest and host.", logging.info) + (ht, gt) = utils_test.get_time( + session, self.time_command, self.time_filter_re, self.time_format + ) session.cmd(self.create_file) if vm.name not in self.start_ht.keys(): (self.start_ht[vm.name], self.start_gt[vm.name]) = (ht, gt) if abs(ht - gt) > self.diff_limit: - logging.warning("Host and %s time diff %s is greater " - "than time_diff_limit:%s", - vm.name, abs(ht - gt), self.diff_limit) - logging.warning("Host time:%s Guest %s time:%s", - ht, vm.name, gt) + logging.warning( + "Host and %s time diff %s is greater " + "than time_diff_limit:%s", + vm.name, + abs(ht - gt), + self.diff_limit, + ) + logging.warning( + "Host time:%s Guest %s time:%s", ht, vm.name, gt + ) else: self.diff_ht[vm.name] = ht - self.start_ht[vm.name] self.diff_gt[vm.name] = gt - self.start_gt[vm.name] @@ -106,15 +110,14 @@ def check_diff(self, mig_data): if time_drifted: difs = "" for vm in mig_data.vms: - difs += ("\n VM=%s HOST=%ss GUEST=%ss" - " DIFF=%s" % - (vm.name, self.diff_ht[vm.name], - self.diff_gt[vm.name], - (self.diff_ht[vm.name] - - self.diff_gt[vm.name]))) - raise error.TestError("Time DIFFERENCE for VM is greater than" - " LIMIT:%ss.%s\n" % (self.diff_limit, - difs)) + difs += ( + f"\n VM={vm.name} HOST={self.diff_ht[vm.name]}s GUEST={self.diff_gt[vm.name]}s" + f" DIFF={self.diff_ht[vm.name] - self.diff_gt[vm.name]}" + ) + raise error.TestError( + "Time DIFFERENCE for VM is greater than" + f" LIMIT:{self.diff_limit}s.{difs}\n" + ) def before_migration(self, mig_data): """ @@ -126,17 +129,22 @@ def before_migration(self, mig_data): def ping_pong_migrate(self): for _ in range(self.migrate_count): self.sync.sync(True, timeout=self.migration_timeout) - self.migrate_wait(self.vms, self.srchost, self.dsthost, - start_work=self.check_diff, - check_work=self.check_diff) + self.migrate_wait( + self.vms, + self.srchost, + self.dsthost, + start_work=self.check_diff, + check_work=self.check_diff, + ) tmp = self.dsthost self.dsthost = self.srchost self.srchost = tmp def migration_scenario(self, worker=None): - error.context("Migration from %s to %s over protocol %s." % - (self.srchost, self.dsthost, mig_protocol), - logging.info) + error.context( + f"Migration from {self.srchost} to {self.dsthost} over protocol {mig_protocol}.", + logging.info, + ) self.ping_pong_migrate() diff --git a/multi_host_migration/tests/migration_multi_host_with_file_transfer.py b/multi_host_migration/tests/migration_multi_host_with_file_transfer.py index 93030b9d74..d4b7a993d7 100644 --- a/multi_host_migration/tests/migration_multi_host_with_file_transfer.py +++ b/multi_host_migration/tests/migration_multi_host_with_file_transfer.py @@ -2,14 +2,9 @@ import threading from autotest.client import utils as client_utils -from autotest.client.shared import utils -from autotest.client.shared import error +from autotest.client.shared import error, utils from autotest.client.shared.syncdata import SyncData - -from virttest import env_process -from virttest import utils_test -from virttest import remote -from virttest import utils_misc +from virttest import env_process, remote, utils_misc, utils_test from virttest.utils_test.qemu import migration @@ -71,9 +66,9 @@ def run(test, params, env): # Path where file is stored on guest. guest_path = params.get("guest_path", "/tmp/file") # Path where file is generated. - host_path = "/tmp/file-%s" % utils_misc.generate_random_string(6) + host_path = f"/tmp/file-{utils_misc.generate_random_string(6)}" # Path on host for file copied from vm. - host_path_returned = "%s-returned" % host_path + host_path_returned = f"{host_path}-returned" file_size = params.get("file_size", "500") transfer_timeout = int(params.get("transfer_timeout", "240")) transfer_speed = int(params.get("transfer_speed", "100")) * 1000 @@ -84,17 +79,14 @@ def run(test, params, env): migrate_count = int(params.get("migrate_count", "3")) class TestMultihostMigration(base_class): - def __init__(self, test, params, env): - super(TestMultihostMigration, self).__init__(test, params, env) + super().__init__(test, params, env) self.vm = None self.vm_addr = None self.srchost = self.params.get("hosts")[0] self.dsthost = self.params.get("hosts")[1] self.slave = self.dsthost - self.id = {'src': self.srchost, - 'dst': self.dsthost, - "type": "file_trasfer"} + self.id = {"src": self.srchost, "dst": self.dsthost, "type": "file_trasfer"} self.file_check_sums = [] def check_vms(self, mig_data): @@ -124,12 +116,16 @@ def _prepare_vm(self, vm_name): """ new_params = self.params.copy() - new_params['migration_mode'] = None - new_params['start_vm'] = 'yes' + new_params["migration_mode"] = None + new_params["start_vm"] = "yes" self.vm_lock.acquire() - env_process.process(self.test, new_params, self.env, - env_process.preprocess_image, - env_process.preprocess_vm) + env_process.process( + self.test, + new_params, + self.env, + env_process.preprocess_image, + env_process.preprocess_vm, + ) self.vm_lock.release() vm = self.env.get_vm(vm_name) vm.wait_for_login(timeout=self.login_timeout) @@ -139,19 +135,33 @@ def _copy_until_end(self, end_event): # Copy until migration not end. while not end_event.is_set(): logging.info("Copy file to guest %s.", self.vm_addr) - remote.copy_files_to(self.vm_addr, "scp", guest_root, - guest_pass, 22, host_path, - guest_path, limit=transfer_speed, - verbose=True, - timeout=transfer_timeout) + remote.copy_files_to( + self.vm_addr, + "scp", + guest_root, + guest_pass, + 22, + host_path, + guest_path, + limit=transfer_speed, + verbose=True, + timeout=transfer_timeout, + ) logging.info("Copy file to guests %s done.", self.vm_addr) logging.info("Copy file from guest %s.", self.vm_addr) - remote.copy_files_from(self.vm_addr, "scp", guest_root, - guest_pass, 22, guest_path, - host_path_returned, - limit=transfer_speed, verbose=True, - timeout=transfer_timeout) + remote.copy_files_from( + self.vm_addr, + "scp", + guest_root, + guest_pass, + 22, + guest_path, + host_path_returned, + limit=transfer_speed, + verbose=True, + timeout=transfer_timeout, + ) logging.info("Copy file from guests %s done.", self.vm_addr) check_sum = client_utils.hash_file(host_path_returned) # store checksum for later check. @@ -161,17 +171,16 @@ def _run_and_migrate(self, bg, end_event, sync, migrate_count): bg.start() try: while bg.is_alive(): - logging.info("File transfer not ended, starting" - " a round of migration...") + logging.info( + "File transfer not ended, starting" " a round of migration..." + ) sync.sync(True, timeout=d_transfer_timeout) - self.migrate_wait([self.vm], - self.srchost, - self.dsthost) + self.migrate_wait([self.vm], self.srchost, self.dsthost) tmp = self.dsthost self.dsthost = self.srchost self.srchost = tmp migrate_count -= 1 - if (migrate_count <= 0): + if migrate_count <= 0: end_event.set() bg.join() @@ -189,45 +198,52 @@ def _slave_migrate(self, sync): done = sync.sync(timeout=d_transfer_timeout)[self.master_id()] if not done: break - logging.info("File transfer not ended, starting" - " a round of migration...") - self.migrate_wait([self.vm], - self.srchost, - self.dsthost) + logging.info( + "File transfer not ended, starting" " a round of migration..." + ) + self.migrate_wait([self.vm], self.srchost, self.dsthost) tmp = self.dsthost self.dsthost = self.srchost self.srchost = tmp def migration_scenario(self): - sync = SyncData(self.master_id(), self.hostid, self.hosts, - self.id, self.sync_server) + sync = SyncData( + self.master_id(), self.hostid, self.hosts, self.id, self.sync_server + ) self.vm = params.get("vms").split()[0] address_cache = env.get("address_cache") - if (self.hostid == self.master_id()): + if self.hostid == self.master_id(): try: - utils.run("dd if=/dev/zero of=%s bs=1M" - " count=%s" % (host_path, file_size)) + utils.run( + f"dd if=/dev/zero of={host_path} bs=1M" f" count={file_size}" + ) self.vm_addr = self._prepare_vm(self.vm).get_address() end_event = threading.Event() - bg = utils.InterruptedThread(self._copy_until_end, - (end_event,)) + bg = utils.InterruptedThread(self._copy_until_end, (end_event,)) self._hosts_barrier(self.hosts, self.id, "befor_mig", 120) sync.sync(address_cache, timeout=120) - error.context("ping-pong migration during file transfer " - "between host and guest.", logging.info) + error.context( + "ping-pong migration during file transfer " + "between host and guest.", + logging.info, + ) self._run_and_migrate(bg, end_event, sync, migrate_count) # Check if guest lives. - remote.wait_for_login(shell_client, self.vm_addr, - shell_port, guest_root, - guest_pass, shell_prompt) - self._hosts_barrier(self.hosts, self.id, - "After_check", 120) + remote.wait_for_login( + shell_client, + self.vm_addr, + shell_port, + guest_root, + guest_pass, + shell_prompt, + ) + self._hosts_barrier(self.hosts, self.id, "After_check", 120) error.context("comparing hashes", logging.info) orig_hash = client_utils.hash_file(host_path) @@ -239,23 +255,25 @@ def migration_scenario(self): check_sum = self.file_check_sums[i] if check_sum != orig_hash: wrong_check_sum = True - logging.error("Checksum in transfer number" - " %d if wrong.", i) + logging.error( + "Checksum in transfer number" " %d if wrong.", i + ) if wrong_check_sum: - raise error.TestFail("Returned file hash (%s) differs" - "from original one (%s)" % - (returned_hash, orig_hash)) + raise error.TestFail( + f"Returned file hash ({returned_hash}) differs" + f"from original one ({orig_hash})" + ) else: # clean temp - utils.run("rm -rf %s" % (host_path)) - utils.run("rm -rf %s" % (host_path_returned)) + utils.run(f"rm -rf {host_path}") + utils.run(f"rm -rf {host_path_returned}") error.context() finally: if del_file_with_err == "yes": - utils.run("rm -rf %s" % (host_path)) - utils.run("rm -rf %s" % (host_path_returned)) + utils.run(f"rm -rf {host_path}") + utils.run(f"rm -rf {host_path_returned}") else: self._hosts_barrier(self.hosts, self.id, "befor_mig", 260) address_cache.update(sync.sync(timeout=120)[self.master_id()]) diff --git a/multi_host_migration/tests/migration_multi_host_with_kdump.py b/multi_host_migration/tests/migration_multi_host_with_kdump.py index 43d219199c..077e886bba 100644 --- a/multi_host_migration/tests/migration_multi_host_with_kdump.py +++ b/multi_host_migration/tests/migration_multi_host_with_kdump.py @@ -1,8 +1,10 @@ import logging + from autotest.client.shared import error from autotest.client.shared.syncdata import SyncData from virttest import utils_test from virttest.utils_test.qemu import migration + from generic.tests import kdump @@ -36,46 +38,48 @@ def run(test, params, env): mig_type = migration.MultihostMigrationRdma class TestMultihostMigration(mig_type, migration.MigrationBase): - """ multihost migration test """ def __init__(self, test, params, env): - - super(TestMultihostMigration, self).__init__(test, params, env) + super().__init__(test, params, env) self.srchost = self.params.get("hosts")[0] self.dsthost = self.params.get("hosts")[1] - super(TestMultihostMigration, self).__setup__(test, params, env, - self.srchost, - self.dsthost) + super().__setup__(test, params, env, self.srchost, self.dsthost) self.crash_timeout = float(params.get("crash_timeout", 360)) - self.def_kernel_param_cmd = ("grubby --update-kernel=`grubby" - " --default-kernel`" - " --args=crashkernel=128M@16M") - self.kernel_param_cmd = params.get("kernel_param_cmd", - self.def_kernel_param_cmd) - def_kdump_enable_cmd = ("chkconfig kdump on &&" - " service kdump restart") - self.kdump_enable_cmd = params.get("kdump_enable_cmd", - def_kdump_enable_cmd) - def_crash_kernel_prob_cmd = ("grep -q 1 /sys/kernel/" - "kexec_crash_loaded") - self.crash_kernel_prob_cmd = params.get("crash_kernel_prob_cmd", - def_crash_kernel_prob_cmd) - self.crash_cmd = params.get("crash_cmd", - "echo c > /proc/sysrq-trigger") - self.vmcore_chk_cmd = params.get("vmcore_chk_cmd", - "ls -R /var/crash | grep vmcore") + self.def_kernel_param_cmd = ( + "grubby --update-kernel=`grubby" + " --default-kernel`" + " --args=crashkernel=128M@16M" + ) + self.kernel_param_cmd = params.get( + "kernel_param_cmd", self.def_kernel_param_cmd + ) + def_kdump_enable_cmd = "chkconfig kdump on &&" " service kdump restart" + self.kdump_enable_cmd = params.get("kdump_enable_cmd", def_kdump_enable_cmd) + def_crash_kernel_prob_cmd = "grep -q 1 /sys/kernel/" "kexec_crash_loaded" + self.crash_kernel_prob_cmd = params.get( + "crash_kernel_prob_cmd", def_crash_kernel_prob_cmd + ) + self.crash_cmd = params.get("crash_cmd", "echo c > /proc/sysrq-trigger") + self.vmcore_chk_cmd = params.get( + "vmcore_chk_cmd", "ls -R /var/crash | grep vmcore" + ) self.vmcore_incomplete = "vmcore-incomplete" self.nvcpu = 1 @error.context_aware - def start_worker_guest_kdump(self, mig_data, login_timeout, - crash_kernel_prob_cmd, - kernel_param_cmd, - kdump_enable_cmd, - nvcpu, crash_cmd): + def start_worker_guest_kdump( + self, + mig_data, + login_timeout, + crash_kernel_prob_cmd, + kernel_param_cmd, + kdump_enable_cmd, + nvcpu, + crash_cmd, + ): """ force the Linux kernel to crash before migration @@ -90,11 +94,17 @@ def start_worker_guest_kdump(self, mig_data, login_timeout, vm = mig_data.vms[0] kdump.preprocess_kdump(test, vm, login_timeout) - kdump.kdump_enable(vm, vm.name, crash_kernel_prob_cmd, - kernel_param_cmd, kdump_enable_cmd, - login_timeout) - error.context("Kdump Testing, force the Linux kernel to crash", - logging.info) + kdump.kdump_enable( + vm, + vm.name, + crash_kernel_prob_cmd, + kernel_param_cmd, + kdump_enable_cmd, + login_timeout, + ) + error.context( + "Kdump Testing, force the Linux kernel to crash", logging.info + ) kdump.crash_test(test, vm, nvcpu, crash_cmd, login_timeout) @error.context_aware @@ -112,16 +122,12 @@ def check_worker_kdump(self, mig_data, vmcore_chk_cmd, vmcore_incomplete): if vm.is_paused(): vm.resume() if not utils_test.qemu.guest_active(vm): - raise error.TestFail("Guest not active " - "after migration") - logging.info("Logging into migrated guest after " - "migration") + raise error.TestFail("Guest not active " "after migration") + logging.info("Logging into migrated guest after " "migration") session = vm.wait_for_login(timeout=self.login_timeout) - error.context("Checking vmcore file in guest", - logging.info) + error.context("Checking vmcore file in guest", logging.info) if session is not None: - logging.info("kdump completed, no need ping-pong" - " migration") + logging.info("kdump completed, no need ping-pong" " migration") self.stop_migrate = True output = session.cmd_output(vmcore_chk_cmd) session.close() @@ -135,12 +141,13 @@ def check_worker_kdump(self, mig_data, vmcore_chk_cmd, vmcore_incomplete): @error.context_aware def migration_scenario(self): - - error.context("Migration from %s to %s over protocol %s." % - (self.srchost, self.dsthost, mig_protocol), - logging.info) - sync = SyncData(self.master_id(), self.hostid, self.hosts, - self.id, self.sync_server) + error.context( + f"Migration from {self.srchost} to {self.dsthost} over protocol {mig_protocol}.", + logging.info, + ) + sync = SyncData( + self.master_id(), self.hostid, self.hosts, self.id, self.sync_server + ) def start_worker(mig_data): """ @@ -149,12 +156,15 @@ def start_worker(mig_data): :param mig_data: Data for migration """ - self.start_worker_guest_kdump(mig_data, self.login_timeout, - self.crash_kernel_prob_cmd, - self.kernel_param_cmd, - self.kdump_enable_cmd, - self.nvcpu, - self.crash_cmd) + self.start_worker_guest_kdump( + mig_data, + self.login_timeout, + self.crash_kernel_prob_cmd, + self.kernel_param_cmd, + self.kdump_enable_cmd, + self.nvcpu, + self.crash_cmd, + ) def check_worker(mig_data): """ @@ -163,11 +173,11 @@ def check_worker(mig_data): :param mig_data: Data for migration """ - self.check_worker_kdump(mig_data, self.vmcore_chk_cmd, - self.vmcore_incomplete) + self.check_worker_kdump( + mig_data, self.vmcore_chk_cmd, self.vmcore_incomplete + ) - super(TestMultihostMigration, self).ping_pong_migrate( - mig_type, sync, start_worker, check_worker) + super().ping_pong_migrate(mig_type, sync, start_worker, check_worker) mig = TestMultihostMigration(test, params, env) mig.run() diff --git a/multi_host_migration/tests/migration_multi_host_with_reboot.py b/multi_host_migration/tests/migration_multi_host_with_reboot.py index 2b1175cfb0..f990919e5a 100644 --- a/multi_host_migration/tests/migration_multi_host_with_reboot.py +++ b/multi_host_migration/tests/migration_multi_host_with_reboot.py @@ -1,8 +1,8 @@ import logging -import time import random -from autotest.client.shared import error -from autotest.client.shared import utils +import time + +from autotest.client.shared import error, utils from virttest import utils_misc from virttest.utils_test.qemu import migration @@ -35,10 +35,8 @@ def run(test, params, env): mig_type = migration.MultihostMigrationRdma class TestMultihostMigration(mig_type): - def __init__(self, test, params, env): - super(TestMultihostMigration, self).__init__(test, params, env, - preprocess_env) + super().__init__(test, params, env, preprocess_env) self.srchost = self.params.get("hosts")[0] self.dsthost = self.params.get("hosts")[1] self.is_src = params["hostid"] == self.srchost @@ -50,8 +48,7 @@ def __init__(self, test, params, env): @error.context_aware def before_migration(self, mig_data): def do_reboot(vm): - reboot_method = mig_data.params.get("reboot_method", - "system_reset") + reboot_method = mig_data.params.get("reboot_method", "system_reset") reboot_timeout = float(mig_data.params.get("reboot_timeout", 30)) if self.is_src: logging.info("Do '%s' before migraion...", reboot_method) @@ -60,12 +57,14 @@ def do_reboot(vm): while time.time() < end_time: vm.monitor.clear_event("RESET") vm.monitor.cmd(reboot_method) - reseted = utils_misc.wait_for(lambda: - vm.monitor.get_event("RESET"), - timeout=self.login_timeout) + reseted = utils_misc.wait_for( + lambda: vm.monitor.get_event("RESET"), + timeout=self.login_timeout, + ) if not reseted: - raise error.TestFail("Not found RESET event after " - "execute 'system_reset'") + raise error.TestFail( + "Not found RESET event after " "execute 'system_reset'" + ) vm.monitor.clear_event("RESET") time.sleep(self.random_timeout) @@ -86,8 +85,10 @@ def migration_scenario(self, worker=None): max_t = int(params.get("max_random_timeout", 5)) self.random_timeout = random.randint(min_t, max_t) params["start_migration_timeout"] = self.random_timeout - error.context("Start migration after %d seconds" % - self.random_timeout, logging.info) + error.context( + "Start migration after %d seconds" % self.random_timeout, + logging.info, + ) self.migrate_wait([self.vm], self.srchost, self.dsthost) diff --git a/multi_host_migration/tests/migration_multi_host_with_speed_measurement.py b/multi_host_migration/tests/migration_multi_host_with_speed_measurement.py index bf4a446861..0acbe9cd77 100644 --- a/multi_host_migration/tests/migration_multi_host_with_speed_measurement.py +++ b/multi_host_migration/tests/migration_multi_host_with_speed_measurement.py @@ -1,14 +1,16 @@ +import logging import os import re -import logging -import time import socket +import time + import six from autotest.client.shared import error, utils from autotest.client.shared.barrier import listen_server from autotest.client.shared.syncdata import SyncData from virttest import utils_misc from virttest.utils_test.qemu import migration + from provider import cpuflags @@ -45,8 +47,7 @@ def run(test, params, env): vm_mem = int(params.get("mem", "512")) - get_mig_speed = re.compile(r"^transferred ram: (\d+) kbytes$", - re.MULTILINE) + get_mig_speed = re.compile(r"^transferred ram: (\d+) kbytes$", re.MULTILINE) mig_speed = params.get("mig_speed", "1G") mig_speed_accuracy = float(params.get("mig_speed_accuracy", "0.2")) @@ -57,11 +58,14 @@ def get_migration_statistic(vm): mig_stat = utils.Statistic() for _ in range(30): o = vm.monitor.info("migrate") - warning_msg = ("Migration already ended. Migration speed is" - " probably too high and will block vm while" - " filling its memory.") - fail_msg = ("Could not determine the transferred memory from" - " monitor data: %s" % o) + warning_msg = ( + "Migration already ended. Migration speed is" + " probably too high and will block vm while" + " filling its memory." + ) + fail_msg = ( + "Could not determine the transferred memory from" f" monitor data: {o}" + ) if isinstance(o, six.string_types): if "status: active" not in o: raise error.TestWarn(warning_msg) @@ -88,15 +92,16 @@ def get_migration_statistic(vm): return mig_stat class TestMultihostMigration(base_class): - def __init__(self, test, params, env): - super(TestMultihostMigration, self).__init__(test, params, env) + super().__init__(test, params, env) self.mig_stat = None self.srchost = self.params.get("hosts")[0] self.dsthost = self.params.get("hosts")[1] - self.id = {'src': self.srchost, - 'dst': self.dsthost, - "type": "speed_measurement"} + self.id = { + "src": self.srchost, + "dst": self.dsthost, + "type": "speed_measurement", + } self.link_speed = 0 def check_vms(self, mig_data): @@ -116,14 +121,15 @@ def migrate_vms_src(self, mig_data): For change way how machine migrates is necessary re implement this method. """ - super_cls = super(TestMultihostMigration, self) + super_cls = super() super_cls.migrate_vms_src(mig_data) vm = mig_data.vms[0] self.mig_stat = get_migration_statistic(vm) def migration_scenario(self): - sync = SyncData(self.master_id(), self.hostid, self.hosts, - self.id, self.sync_server) + sync = SyncData( + self.master_id(), self.hostid, self.hosts, self.id, self.sync_server + ) srchost = self.params.get("hosts")[0] dsthost = self.params.get("hosts")[1] vms = [params.get("vms").split()[0]] @@ -132,12 +138,15 @@ def worker(mig_data): vm = mig_data.vms[0] session = vm.wait_for_login(timeout=self.login_timeout) - cpuflags.install_cpuflags_util_on_vm(test, vm, install_path, - extra_flags="-msse3 -msse2") + cpuflags.install_cpuflags_util_on_vm( + test, vm, install_path, extra_flags="-msse3 -msse2" + ) - cmd = ("%s/cpuflags-test --stressmem %d,%d" % - (os.path.join(install_path, "cpu_flags"), - vm_mem * 4, vm_mem / 2)) + cmd = "%s/cpuflags-test --stressmem %d,%d" % ( + os.path.join(install_path, "cpu_flags"), + vm_mem * 4, + vm_mem / 2, + ) logging.debug("Sending command: %s", cmd) session.sendline(cmd) @@ -154,11 +163,14 @@ def worker(mig_data): server.close() self.link_speed = data_len / (30 * 1024 * 1024) logging.info("Link speed %d MB/s", self.link_speed) - ms = utils.convert_data_size(mig_speed, 'M') - if (ms > data_len / 30): - logging.warn("Migration speed %s MB/s is set faster than " - "real link speed %d MB/s", - mig_speed, self.link_speed) + ms = utils.convert_data_size(mig_speed, "M") + if ms > data_len / 30: + logging.warning( + "Migration speed %s MB/s is set faster than " + "real link speed %d MB/s", + mig_speed, + self.link_speed, + ) else: self.link_speed = ms / (1024 * 1024) else: @@ -166,8 +178,7 @@ def worker(mig_data): for _ in range(10000): data += "i" server_port = sync.sync(timeout=120)[self.master_id()] - sock = socket.socket(socket.AF_INET, - socket.SOCK_STREAM) + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((self.master_id(), server_port)) try: endtime = time.time() + 10 @@ -194,22 +205,22 @@ def worker(mig_data): logging.info("Target migration speed: %d MB/s", mig_speed) logging.info("Real Link speed: %d MB/s", mig.link_speed) - logging.info( - "Average migration speed: %d MB/s", mig_stat.get_average()) + logging.info("Average migration speed: %d MB/s", mig_stat.get_average()) logging.info("Minimum migration speed: %d MB/s", mig_stat.get_min()) logging.info("Maximum migration speed: %d MB/s", mig_stat.get_max()) - logging.info("Maximum tolerable divergence: %3.1f%%", - mig_speed_accuracy * 100) + logging.info("Maximum tolerable divergence: %3.1f%%", mig_speed_accuracy * 100) if real_speed < mig_speed - ack_speed: divergence = (1 - float(real_speed) / float(mig_speed)) * 100 - raise error.TestWarn("Average migration speed (%s MB/s) " - "is %3.1f%% lower than target (%s MB/s)" % - (real_speed, divergence, mig_speed)) + raise error.TestWarn( + f"Average migration speed ({real_speed} MB/s) " + f"is {divergence:3.1f}% lower than target ({mig_speed} MB/s)" + ) if real_speed > mig_speed + ack_speed: divergence = (1 - float(mig_speed) / float(real_speed)) * 100 - raise error.TestWarn("Average migration speed (%s MB/s) " - "is %3.1f%% higher than target (%s MB/s)" % - (real_speed, divergence, mig_speed)) + raise error.TestWarn( + f"Average migration speed ({real_speed} MB/s) " + f"is {divergence:3.1f}% higher than target ({mig_speed} MB/s)" + ) diff --git a/multi_host_migration/tests/migration_multi_host_with_stress.py b/multi_host_migration/tests/migration_multi_host_with_stress.py index 01ac42fb1b..63989db826 100644 --- a/multi_host_migration/tests/migration_multi_host_with_stress.py +++ b/multi_host_migration/tests/migration_multi_host_with_stress.py @@ -1,10 +1,7 @@ import logging -from autotest.client.shared import error -from autotest.client.shared import utils - -from virttest import utils_misc -from virttest import utils_test +from autotest.client.shared import error, utils +from virttest import utils_misc, utils_test from virttest.utils_test.qemu import migration @@ -34,10 +31,8 @@ def run(test, params, env): mig_type = migration.MultihostMigrationRdma class TestMultihostMigration(mig_type): - def __init__(self, test, params, env): - super(TestMultihostMigration, self).__init__(test, params, env, - preprocess_env) + super().__init__(test, params, env, preprocess_env) self.srchost = self.params.get("hosts")[0] self.dsthost = self.params.get("hosts")[1] self.is_src = params["hostid"] == self.srchost @@ -54,12 +49,14 @@ def __init__(self, test, params, env): def migration_scenario(self): def clean_up(vm): - kill_bg_stress_cmd = params.get("kill_bg_stress_cmd", - "killall -9 stress") + kill_bg_stress_cmd = params.get( + "kill_bg_stress_cmd", "killall -9 stress" + ) logging.info("Kill the background test in guest.") - session = vm.wait_for_login(timeout=self.login_timeout, - nic_index=self.nic_index) + session = vm.wait_for_login( + timeout=self.login_timeout, nic_index=self.nic_index + ) if self.params.get("bg_stress_test") == "driver_load": if self.bg and self.bg.is_alive(): self.bg.join() @@ -69,8 +66,9 @@ def clean_up(vm): else: s, o = session.cmd_status_output(kill_bg_stress_cmd) if s: - raise error.TestFail("Failed to kill the background" - " test in guest: %s" % o) + raise error.TestFail( + "Failed to kill the background" f" test in guest: {o}" + ) session.close() @error.context_aware @@ -78,15 +76,22 @@ def start_worker(mig_data): logging.info("Try to login guest before migration test.") vm = env.get_vm(params["main_vm"]) bg_stress_test = self.params.get("bg_stress_test") - session = vm.wait_for_login(timeout=self.login_timeout, - nic_index=self.nic_index) + session = vm.wait_for_login( + timeout=self.login_timeout, nic_index=self.nic_index + ) error.context("Do stress test before migration.", logging.info) check_running_cmd = params.get("check_running_cmd") - self.bg = utils.InterruptedThread(utils_test.run_virt_sub_test, - args=(test, params, env,), - kwargs={"sub_type": bg_stress_test}) + self.bg = utils.InterruptedThread( + utils_test.run_virt_sub_test, + args=( + test, + params, + env, + ), + kwargs={"sub_type": bg_stress_test}, + ) self.bg.start() @@ -95,8 +100,9 @@ def check_running(): if check_running_cmd: if not utils_misc.wait_for(check_running, timeout=360): - raise error.TestFail("Failed to start %s in guest." % - bg_stress_test) + raise error.TestFail( + f"Failed to start {bg_stress_test} in guest." + ) def check_worker(mig_data): if not self.is_src and self.need_cleanup: @@ -106,8 +112,9 @@ def check_worker(mig_data): if params.get("check_vm_before_migration", "yes") == "no": params["check_vm_needs_restart"] = "no" - self.migrate_wait(self.vms, self.srchost, self.dsthost, - start_worker, check_worker) + self.migrate_wait( + self.vms, self.srchost, self.dsthost, start_worker, check_worker + ) mig = TestMultihostMigration(test, params, env) mig.run() diff --git a/multi_host_migration/tests/migration_multi_host_with_xbzrle.py b/multi_host_migration/tests/migration_multi_host_with_xbzrle.py index 0c01d7ade2..fe1d95615e 100644 --- a/multi_host_migration/tests/migration_multi_host_with_xbzrle.py +++ b/multi_host_migration/tests/migration_multi_host_with_xbzrle.py @@ -1,7 +1,7 @@ import logging + from autotest.client.shared import error -from virttest import utils_test -from virttest import virt_vm +from virttest import utils_test, virt_vm from virttest.utils_test.qemu import migration @@ -41,19 +41,15 @@ def run(test, params, env): mig_type = migration.MultihostMigrationRdma class TestMultihostMigration(mig_type, migration.MigrationBase): - """ multihost migration test """ def __init__(self, test, params, env): - - super(TestMultihostMigration, self).__init__(test, params, env) + super().__init__(test, params, env) self.srchost = self.params.get("hosts")[0] self.dsthost = self.params.get("hosts")[1] - super(TestMultihostMigration, self).__setup__(test, params, env, - self.srchost, - self.dsthost) + super().__setup__(test, params, env, self.srchost, self.dsthost) self.need_set_cache_size = set_cache_size == "yes" self.need_stress = need_stress == "yes" self.need_cleanup = self.need_stress @@ -69,13 +65,10 @@ def __init__(self, test, params, env): self.post_migration = self.post_migration_capability if self.sub_type == "before_migrate_cache_size": self.before_migration = self.before_migration_cache_size_speed - self.post_migration = \ - self.post_migration_capability_with_xbzrle_off_on + self.post_migration = self.post_migration_capability_with_xbzrle_off_on if self.sub_type == "after_migrate_cache_size": - self.before_migration = \ - self.before_migration_capability_with_xbzrle_on - self.post_migration = \ - self.post_migration_set_cache_size + self.before_migration = self.before_migration_capability_with_xbzrle_on + self.post_migration = self.post_migration_set_cache_size def set_xbzrle(self): """ @@ -98,18 +91,25 @@ def get_mig_totaltime_downtime_transferred_ram(self, vm): :param vm: vm object """ - error.context("Get total time, downtime and transferred ram " - "after migration.", logging.info) + error.context( + "Get total time, downtime and transferred ram " "after migration.", + logging.info, + ) downtime = int(vm.monitor.info("migrate").get("downtime")) total_time = int(vm.monitor.info("migrate").get("total-time")) - transferred_ram = \ - int(vm.monitor.info("migrate").get("ram").get("transferred")) + transferred_ram = int( + vm.monitor.info("migrate").get("ram").get("transferred") + ) mig_downtime_list.append(downtime) mig_total_time_list.append(total_time) transferred_ram_list.append(transferred_ram) - logging.info("The total time is %d, downtime is %d and " - "transferred ram is %d after migration", - total_time, downtime, transferred_ram) + logging.info( + "The total time is %d, downtime is %d and " + "transferred ram is %d after migration", + total_time, + downtime, + transferred_ram, + ) @error.context_aware def check_mig_totaltime_downtime_transferred_ram(self): @@ -119,35 +119,36 @@ def check_mig_totaltime_downtime_transferred_ram(self): """ if self.is_src: - error.context("Check total time, downtime and transferred ram" - " after migration.", logging.info) + error.context( + "Check total time, downtime and transferred ram" + " after migration.", + logging.info, + ) logging.info("Total time list: %s", str(mig_total_time_list)) logging.info("Downtime list: %s", str(mig_downtime_list)) - logging.info("Transferred ram list: %s", - str(transferred_ram_list)) + logging.info("Transferred ram list: %s", str(transferred_ram_list)) for i in range(len(mig_total_time_list)): if min(mig_total_time_list) != mig_total_time_list[-1]: - raise error.TestFail("The total time of migration is " - "error, %s should be minimum, " - "but actual is %s" % - (mig_total_time_list[-1], - min(mig_total_time_list))) + raise error.TestFail( + "The total time of migration is " + f"error, {mig_total_time_list[-1]} should be minimum, " + f"but actual is {min(mig_total_time_list)}" + ) else: mig_total_time_list.pop() if min(mig_downtime_list) != mig_downtime_list[-1]: - raise error.TestFail("The downtime of migration is " - "error, %s should be minimum, " - "but actual is %s" % - (mig_downtime_list[-1], - min(mig_downtime_list))) + raise error.TestFail( + "The downtime of migration is " + f"error, {mig_downtime_list[-1]} should be minimum, " + f"but actual is {min(mig_downtime_list)}" + ) else: mig_downtime_list.pop() if min(transferred_ram_list) != transferred_ram_list[-1]: raise error.TestFail( "The transferred ram of migration is error, " - "%s should be minimum, but actual is %s" % - (transferred_ram_list[-1], - min(transferred_ram_list))) + f"{transferred_ram_list[-1]} should be minimum, but actual is {min(transferred_ram_list)}" + ) else: transferred_ram_list.pop() @@ -194,10 +195,17 @@ def before_migration_capability_with_xbzrle_on(self, mig_data): self.set_migration_speed(self.max_speed) @error.context_aware - def post_migration_capability(self, vm, cancel_delay, mig_offline, - dsthost, vm_ports, - not_wait_for_migration, - fd, mig_data): + def post_migration_capability( + self, + vm, + cancel_delay, + mig_offline, + dsthost, + vm_ports, + not_wait_for_migration, + fd, + mig_data, + ): """ get total time, downtime and transferred ram after migration @@ -217,16 +225,25 @@ def post_migration_capability(self, vm, cancel_delay, mig_offline, try: vm.wait_for_migration(self.migration_timeout) except virt_vm.VMMigrateTimeoutError: - raise error.TestFail("Migration failed with setting " - "xbzrle to false.") + raise error.TestFail( + "Migration failed with setting " "xbzrle to false." + ) logging.info("Migration completed with xbzrle false") self.get_mig_totaltime_downtime_transferred_ram(vm) vm.destroy(gracefully=False) @error.context_aware def post_migration_capability_with_xbzrle_off_on( - self, vm, cancel_delay, mig_offline, dsthost, - vm_ports, not_wait_for_migration, fd, mig_data): + self, + vm, + cancel_delay, + mig_offline, + dsthost, + vm_ports, + not_wait_for_migration, + fd, + mig_data, + ): """ get total time, downtime and transferred ram after migration @@ -250,17 +267,25 @@ def post_migration_capability_with_xbzrle_off_on( try: vm.wait_for_migration(self.mig_timeout) except virt_vm.VMMigrateTimeoutError: - raise error.TestFail("Migration failed with setting cache " - "size to %s." % cache_size) - logging.info("Migration completed with cache size %s" - "", cache_size) + raise error.TestFail( + "Migration failed with setting cache " f"size to {cache_size}." + ) + logging.info("Migration completed with cache size %s" "", cache_size) self.get_mig_totaltime_downtime_transferred_ram(vm) vm.destroy(gracefully=False) @error.context_aware def post_migration_set_cache_size( - self, vm, cancel_delay, mig_offline, dsthost, - vm_ports, not_wait_for_migration, fd, mig_data): + self, + vm, + cancel_delay, + mig_offline, + dsthost, + vm_ports, + not_wait_for_migration, + fd, + mig_data, + ): """ set cache size during migration get cache size after migration @@ -283,16 +308,20 @@ def post_migration_set_cache_size( try: vm.wait_for_migration(5) except virt_vm.VMMigrateTimeoutError: - logging.info("Set cache size to %s during migration" - ".", self.cache_size[1]) + logging.info( + "Set cache size to %s during migration" ".", self.cache_size[1] + ) self.set_migration_cache_size(int(self.cache_size[1])) try: vm.wait_for_migration(self.mig_timeout) except virt_vm.VMMigrateTimeoutError: - raise error.TestFail("Migration failed with setting cache " - "size to %s." % self.cache_size[1]) - logging.info("Migration completed with cache size %s" - "", self.cache_size[1]) + raise error.TestFail( + "Migration failed with setting cache " + f"size to {self.cache_size[1]}." + ) + logging.info( + "Migration completed with cache size %s" "", self.cache_size[1] + ) self.get_migration_cache_size(1) self.get_mig_totaltime_downtime_transferred_ram(vm) self.get_migration_info(vm) @@ -300,10 +329,10 @@ def post_migration_set_cache_size( @error.context_aware def migration_scenario(self): - - error.context("Migration from %s to %s over protocol %s." % - (self.srchost, self.dsthost, mig_protocol), - logging.info) + error.context( + f"Migration from {self.srchost} to {self.dsthost} over protocol {mig_protocol}.", + logging.info, + ) def start_worker(mig_data): """ @@ -325,18 +354,17 @@ def check_worker(mig_data): if vm.is_paused(): vm.resume() if not utils_test.qemu.guest_active(vm): - raise error.TestFail("Guest not active " - "after migration") + raise error.TestFail("Guest not active " "after migration") if self.need_cleanup: self.clean_up(self.kill_bg_stress_cmd, vm) else: - logging.info("No need to kill the background " - "test in guest.") + logging.info("No need to kill the background " "test in guest.") vm.reboot() vm.destroy() - self.migrate_wait([self.vm], self.srchost, self.dsthost, - start_worker, check_worker) + self.migrate_wait( + [self.vm], self.srchost, self.dsthost, start_worker, check_worker + ) set_cache_size_list = params.objects("set_cache_size") need_stress_list = params.objects("need_stress") diff --git a/openvswitch/tests/load_module.py b/openvswitch/tests/load_module.py index d8c2514f04..0773d498ea 100644 --- a/openvswitch/tests/load_module.py +++ b/openvswitch/tests/load_module.py @@ -1,11 +1,9 @@ import sys import traceback -import six +import six from avocado.utils import process -from virttest import openvswitch -from virttest import versionable_class -from virttest import error_context +from virttest import error_context, openvswitch, versionable_class @error_context.context_aware @@ -43,8 +41,8 @@ def run(test, params, env): if _e is None: raise else: - test.log.error("Cleaning function raised exception too: \n%s", - "".join(traceback.format_exception(e[0], - e[1], - e[2]))) + test.log.error( + "Cleaning function raised exception too: \n%s", + "".join(traceback.format_exception(e[0], e[1], e[2])), + ) six.reraise(_e[0], _e[1], _e[2]) diff --git a/openvswitch/tests/ovs_basic.py b/openvswitch/tests/ovs_basic.py index 77ee4f95b9..5c1f235094 100644 --- a/openvswitch/tests/ovs_basic.py +++ b/openvswitch/tests/ovs_basic.py @@ -1,19 +1,20 @@ import logging -import time import os +import time import aexpect - from avocado.utils import process -from virttest import utils_misc -from virttest import utils_net -from virttest import openvswitch -from virttest import ovs_utils -from virttest import versionable_class -from virttest import data_dir -from virttest import error_context +from virttest import ( + data_dir, + error_context, + openvswitch, + ovs_utils, + utils_misc, + utils_net, + versionable_class, +) -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def allow_iperf_firewall(machine): @@ -21,10 +22,9 @@ def allow_iperf_firewall(machine): machine.cmd("iptables -I INPUT -p udp --dport 5001 --j ACCEPT") -class MiniSubtest(object): - +class MiniSubtest: def __new__(cls, *args, **kargs): - self = super(MiniSubtest, cls).__new__(cls) + self = super().__new__(cls) ret = None if args is None: args = [] @@ -40,11 +40,10 @@ def __new__(cls, *args, **kargs): class InfrastructureInit(MiniSubtest): - def setup(self, test, params, env): - self.br0_name = "br0-%s" % (utils_misc.generate_random_string(3)) + self.br0_name = f"br0-{utils_misc.generate_random_string(3)}" while self.br0_name in utils_net.get_net_if(): - self.br0_name = "br0-%s" % (utils_misc.generate_random_string(3)) + self.br0_name = f"br0-{utils_misc.generate_random_string(3)}" self.br0_ip = params.get("bridge_ip", "192.168.250.1") self.ovs = None @@ -58,19 +57,17 @@ def setup(self, test, params, env): self.ovs = versionable_class.factory(openvswitch.OpenVSwitchSystem)() self.ovs.init_system() self.ovs.check() - error_context.context("Add new bridge %s." % (self.br0_name)) + error_context.context(f"Add new bridge {self.br0_name}.") self.ovs.add_br(self.br0_name) utils_net.set_net_if_ip(self.br0_name, self.br0_ip) utils_net.bring_up_ifname(self.br0_name) - self.dns_pidf = (utils_net.check_add_dnsmasq_to_br(self.br0_name, - test.tmpdir)) - error_context.context("Add new ports from vms %s to bridge %s." % - (self.vms, self.br0_name)) + self.dns_pidf = utils_net.check_add_dnsmasq_to_br(self.br0_name, test.tmpdir) + error_context.context( + f"Add new ports from vms {self.vms} to bridge {self.br0_name}." + ) for vm in self.vms: - utils_net.change_iface_bridge(vm.virtnet[1], - self.br0_name, - self.ovs) + utils_net.change_iface_bridge(vm.virtnet[1], self.br0_name, self.ovs) LOG_JOB.debug(self.ovs.status()) self.host = ovs_utils.Machine(src=test.workdir) @@ -86,8 +83,9 @@ def clean(self, test, params, env): if self.ovs: try: if self.dns_pidf is not None: - utils_misc.signal_program(self.dns_pidf[0:-4], - pid_files_dir=test.tmpdir) + utils_misc.signal_program( + self.dns_pidf[0:-4], pid_files_dir=test.tmpdir + ) except: pass try: @@ -103,8 +101,8 @@ def run(test, params, env): """ Run basic test of OpenVSwitch driver. """ - class test_ping(InfrastructureInit): + class test_ping(InfrastructureInit): def test(self, test, params, env): count = params.get("ping_count", 10) for mvm in self.mvms: @@ -121,20 +119,16 @@ def test(self, test, params, env): mvm.ping(addr, self.br0_name, count) class test_iperf(InfrastructureInit): - def start_servers(self): - utils_misc.ForAllP( - self.machines).cmd_in_src("%s -s &> /dev/null &" % - (self.iperf_b_path)) - utils_misc.ForAllP( - self.machines).cmd_in_src("%s -s -u &> /dev/null &" % - (self.iperf_b_path)) + utils_misc.ForAllP(self.machines).cmd_in_src( + f"{self.iperf_b_path} -s &> /dev/null &" + ) + utils_misc.ForAllP(self.machines).cmd_in_src( + f"{self.iperf_b_path} -s -u &> /dev/null &" + ) def iperf_client(self, machine, server_ip, add_params): - out = machine.cmd_in_src("%s -c %s %s" % - (self.iperf_b_path, - server_ip, - add_params)) + out = machine.cmd_in_src(f"{self.iperf_b_path} -c {server_ip} {add_params}") return " ".join(out.splitlines()[-1].split()[6:8]) def test_bandwidth(self, add_params=None): @@ -142,20 +136,23 @@ def test_bandwidth(self, add_params=None): add_params = "" speeds = [] - speeds.append(self.iperf_client(self.mvms[0], - self.host.addrs[ - self.br0_name]["ipv4"][0], - add_params)) - - speeds.append(self.iperf_client(self.host, - self.mvms[0].virtnet[ - 1].ip["ipv4"][0], - add_params)) - - speeds.append(self.iperf_client(self.mvms[0], - self.mvms[1].virtnet[ - 1].ip["ipv4"][0], - add_params)) + speeds.append( + self.iperf_client( + self.mvms[0], self.host.addrs[self.br0_name]["ipv4"][0], add_params + ) + ) + + speeds.append( + self.iperf_client( + self.host, self.mvms[0].virtnet[1].ip["ipv4"][0], add_params + ) + ) + + speeds.append( + self.iperf_client( + self.mvms[0], self.mvms[1].virtnet[1].ip["ipv4"][0], add_params + ) + ) return speeds @@ -164,9 +161,9 @@ def test(self, test, params, env): self.iperf_b_path = os.path.join("iperf-2.0.4", "src", "iperf") error_context.context("Install iperf to vms machine.") - utils_misc.ForAllP( - self.machines).compile_autotools_app_tar(iperf_src_path, - "iperf-2.0.4.tar.gz") + utils_misc.ForAllP(self.machines).compile_autotools_app_tar( + iperf_src_path, "iperf-2.0.4.tar.gz" + ) allow_iperf_firewall(self.host) utils_misc.ForAllP(self.mvms).cmd("iptables -F") @@ -189,19 +186,17 @@ def test(self, test, params, env): def clean(self, test, params, env): self.host.cmd("killall -9 iperf") - super(test_iperf, self).clean(test, params, env) + super().clean(test, params, env) class test_vlan_ping(InfrastructureInit): - def test(self, test, params, env): count = params.get("ping_count", 10) ret = utils_misc.ForAllPSE(self.mvms).ping( - self.host.addrs[self.br0_name]["ipv6"][0], - 1, count) + self.host.addrs[self.br0_name]["ipv6"][0], 1, count + ) for ret, vm in zip(ret, self.mvms): if "exception" in ret: - test.error("VM %s can't ping host:\n %s" % - (vm.name, ret.exception)) + test.error(f"VM {vm.name} can't ping host:\n {ret.exception}") error_context.context("Add OpenVSwitch device to vlan.") self.ovs.add_port_tag(self.mvms[0].virtnet[1].ifname, "1") @@ -217,10 +212,8 @@ def test(self, test, params, env): self.mvms[1].ping(self.mvms[0].virtnet[1].ip["ipv6"][0], 1, 1) try: - self.mvms[0].ping(self.mvms[2].virtnet[1].ip["ipv6"][0], - 1, 2) - test.error("VM %s can't ping host:\n %s" % - (vm.name, ret.exception)) + self.mvms[0].ping(self.mvms[2].virtnet[1].ip["ipv6"][0], 1, 2) + test.error(f"VM {vm.name} can't ping host:\n {ret.exception}") except (process.CmdError, aexpect.ShellError): pass @@ -232,20 +225,17 @@ def test(self, test, params, env): time.sleep(1) error_context.context("Ping all devices in vlan.") - self.mvms[0].ping(self.mvms[1].virtnet[1].ip["ipv6"][0], 1, - count, vlan=1) - self.mvms[0].ping(self.mvms[2].virtnet[1].ip["ipv6"][0], 1, - count, vlan=2) - self.mvms[1].ping(self.mvms[0].virtnet[1].ip["ipv6"][0], 1, - count) - self.mvms[2].ping(self.mvms[0].virtnet[1].ip["ipv6"][0], 1, - count) + self.mvms[0].ping(self.mvms[1].virtnet[1].ip["ipv6"][0], 1, count, vlan=1) + self.mvms[0].ping(self.mvms[2].virtnet[1].ip["ipv6"][0], 1, count, vlan=2) + self.mvms[1].ping(self.mvms[0].virtnet[1].ip["ipv6"][0], 1, count) + self.mvms[2].ping(self.mvms[0].virtnet[1].ip["ipv6"][0], 1, count) try: - self.mvms[0].ping(self.mvms[2].virtnet[1].ip["ipv6"][0], - 1, 2) - test.error("VM %s shouldn't be able to ping" - " host:\n %s" % (vm.name, ret.exception)) + self.mvms[0].ping(self.mvms[2].virtnet[1].ip["ipv6"][0], 1, 2) + test.error( + f"VM {vm.name} shouldn't be able to ping" + f" host:\n {ret.exception}" + ) except (process.CmdError, aexpect.ShellError): pass @@ -253,18 +243,17 @@ def test(self, test, params, env): self.ovs.add_port_tag(self.mvms[0].virtnet[1].ifname, "[]") self.ovs.add_port_trunk(self.mvms[0].virtnet[1].ifname, [i]) - self.ovs.add_port_trunk(self.mvms[0].virtnet[1].ifname, - list(range(4095))) + self.ovs.add_port_trunk(self.mvms[0].virtnet[1].ifname, list(range(4095))) self.ovs.add_port_trunk(self.mvms[0].virtnet[1].ifname, [1]) - self.mvms[0].ping(self.mvms[1].virtnet[1].ip["ipv6"][0], 1, - count, vlan=1) + self.mvms[0].ping(self.mvms[1].virtnet[1].ip["ipv6"][0], 1, count, vlan=1) test_type = "test_" + params.get("test_type") - if (test_type in locals()): + if test_type in locals(): tests_group = locals()[test_type] tests_group(test, params, env) else: - test.fail("Test type '%s' is not defined in" - " OpenVSwitch basic test" % test_type) + test.fail( + f"Test type '{test_type}' is not defined in" " OpenVSwitch basic test" + ) diff --git a/provider/ansible.py b/provider/ansible.py index 23cac71fc8..5ff534987e 100644 --- a/provider/ansible.py +++ b/provider/ansible.py @@ -1,16 +1,13 @@ -import os import logging +import os from aexpect.client import Expect - -from avocado.utils import path -from avocado.utils import process -from avocado.utils.wait import wait_for +from avocado.utils import path, process from avocado.utils.software_manager.backends.yum import YumBackend - +from avocado.utils.wait import wait_for from virttest import utils_package -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class SyntaxCheckError(Exception): @@ -19,8 +16,10 @@ def __init__(self, cmd, output): self.output = output def __str__(self): - return ('The ansible-playbook command "{}" cannot pass syntax check: ' - '{}'.format(self.cmd, self.output)) + return ( + f'The ansible-playbook command "{self.cmd}" cannot pass syntax check: ' + f"{self.output}" + ) class ExecutorTimeoutError(Exception): @@ -28,8 +27,15 @@ class ExecutorTimeoutError(Exception): class PlaybookExecutor(Expect): - def __init__(self, inventory, site_yml, remote_user=None, extra_vars=None, - callback_plugin=None, addl_opts=None): + def __init__( + self, + inventory, + site_yml, + remote_user=None, + extra_vars=None, + callback_plugin=None, + addl_opts=None, + ): """ The wrapper of Ansible-playbook. @@ -40,13 +46,12 @@ def __init__(self, inventory, site_yml, remote_user=None, extra_vars=None, :param callback_plugin: The plugin of the main manager of console output. :param addl_opts: Other ansible-playbook common options. """ - self.program = path.find_command('ansible-playbook') + self.program = path.find_command("ansible-playbook") self.inventory = inventory self.site_yml = site_yml self.remote_user = remote_user self.callback_plugin = callback_plugin - super(PlaybookExecutor, self).__init__(self._generate_cmd(extra_vars, - addl_opts)) + super().__init__(self._generate_cmd(extra_vars, addl_opts)) LOG_JOB.info("Command of ansible playbook: '%s'", self.command) def _generate_cmd(self, extra_vars=None, addl_opts=None): @@ -59,17 +64,14 @@ def _generate_cmd(self, extra_vars=None, addl_opts=None): """ playbook_cmd_options = [] if self.callback_plugin: - playbook_cmd_options = [ - 'ANSIBLE_STDOUT_CALLBACK={}'.format(self.callback_plugin)] - playbook_cmd_options.extend([self.program, - self.site_yml, - '-i {}'.format(self.inventory)]) - not self.remote_user or playbook_cmd_options.append( - '-u {}'.format(self.remote_user)) - not extra_vars or playbook_cmd_options.append( - "-e '{}'".format(extra_vars)) + playbook_cmd_options = [f"ANSIBLE_STDOUT_CALLBACK={self.callback_plugin}"] + playbook_cmd_options.extend( + [self.program, self.site_yml, f"-i {self.inventory}"] + ) + not self.remote_user or playbook_cmd_options.append(f"-u {self.remote_user}") + not extra_vars or playbook_cmd_options.append(f"-e '{extra_vars}'") not addl_opts or playbook_cmd_options.append(addl_opts) - playbook_cmd = r' '.join(playbook_cmd_options) + playbook_cmd = r" ".join(playbook_cmd_options) self._syntax_check(playbook_cmd) return playbook_cmd @@ -81,7 +83,7 @@ def _syntax_check(cmd): :param cmd: The generated ansible-playbook command line. """ try: - process.run(cmd + ' --syntax-check', verbose=False, shell=True) + process.run(cmd + " --syntax-check", verbose=False, shell=True) except process.CmdError as err: raise SyntaxCheckError(cmd, err.result.stdout_text) @@ -92,13 +94,18 @@ def wait_for_completed(self, timeout, step_time=10): :param timeout: Timeout in seconds. :param step_time: Time to sleep between attempts in seconds. """ - if not wait_for(lambda: not self.is_alive(), timeout, step=step_time, - text='Waiting for the ansible-playbook process to ' - 'complete...'): + if not wait_for( + lambda: not self.is_alive(), + timeout, + step=step_time, + text="Waiting for the ansible-playbook process to " "complete...", + ): self.kill() - raise ExecutorTimeoutError('ansible-playbook cannot complete all ' - 'tasks within the expected time.') - LOG_JOB.info('ansible-playbook execution is completed.') + raise ExecutorTimeoutError( + "ansible-playbook cannot complete all " + "tasks within the expected time." + ) + LOG_JOB.info("ansible-playbook execution is completed.") def store_playbook_log(self, log_dir, filename): """ @@ -107,7 +114,7 @@ def store_playbook_log(self, log_dir, filename): :param log_dir: Path of the log directory. :param filename: the log file name. """ - with open(os.path.join(log_dir, filename), 'w') as log_file: + with open(os.path.join(log_dir, filename), "w") as log_file: log_file.write(self.get_output()) log_file.flush() @@ -124,8 +131,8 @@ def _pip_binary(): """ Define pip binary """ - for binary in ['pip', 'pip3', 'pip2']: - if process.system("which %s" % binary, ignore_status=True) == 0: + for binary in ["pip", "pip3", "pip2"]: + if process.system(f"which {binary}", ignore_status=True) == 0: return binary LOG_JOB.error("Failed to get available pip binary") return False @@ -134,7 +141,7 @@ def python_install(): """ Install python ansible. """ - install_cmd = '%s install ansible' % pip_bin # pylint: disable=E0606 + install_cmd = f"{pip_bin} install ansible" # pylint: disable=E0606 status, output = process.getstatusoutput(install_cmd, verbose=True) if status != 0: LOG_JOB.error("Install python ansible failed as: %s", output) @@ -150,44 +157,43 @@ def distro_install(packages="ansible"): repo_options = { "priority": "1", "gpgcheck": "0", - "skip_if_unavailable": "1" + "skip_if_unavailable": "1", } yum_backend = YumBackend() if yum_backend.add_repo(params["ansible_repo"], **repo_options): - LOG_JOB.info(f"Ansible repo was added: {params['ansible_repo']}") + LOG_JOB.info("Ansible repo was added: %s", params["ansible_repo"]) else: LOG_JOB.error("Ansible repo was required, but failed to be added.") return False install_status = utils_package.package_install(packages) if not install_status: - LOG_JOB.error(f"Failed to install {packages}.") + LOG_JOB.error("Failed to install %s.", packages) # Remove custom dnf repo when it is no longer used if params.get("ansible_repo"): yum_backend.remove_repo(params["ansible_repo"]) return install_status - policy_map = {"distro_install": distro_install, - "python_install": python_install} + policy_map = {"distro_install": distro_install, "python_install": python_install} - ansible_install_policy = params.get('ansible_install_policy') + ansible_install_policy = params.get("ansible_install_policy") if ansible_install_policy: if ansible_install_policy not in policy_map: - LOG_JOB.error(f"No valid install policy: {ansible_install_policy}.") + LOG_JOB.error("No valid install policy: %s.", ansible_install_policy) return False - package_list = params.get_list("package_list", 'sshpass') + package_list = params.get_list("package_list", "sshpass") try: check_cmd = params.get("ansible_check_cmd") - if ansible_install_policy == 'python_install': + if ansible_install_policy == "python_install": global pip_bin pip_bin = _pip_binary() check_cmd = rf"{pip_bin} freeze | grep -v ansible-core | grep -q ansible=" - elif ansible_install_policy == 'distro_install': - package_list.insert(0, 'ansible') + elif ansible_install_policy == "distro_install": + package_list.insert(0, "ansible") if check_cmd: - LOG_JOB.debug(f"Is full ansible version installed: '{check_cmd}'") + LOG_JOB.debug("Is full ansible version installed: %s", check_cmd) process.run(check_cmd, verbose=False, shell=True) else: - path.find_command('ansible-playbook') + path.find_command("ansible-playbook") except (path.CmdNotFoundError, process.CmdError): # If except block is reached and no ansible install policy # is defined it is not possible to install ansible at all @@ -197,7 +203,7 @@ def distro_install(packages="ansible"): return False # Install ansible depended packages that can't be installed # by pip (or are not a dependency) when installing ansible - if not policy_map['distro_install'](package_list): + if not policy_map["distro_install"](package_list): return False # If ansible and dependents packages are installed correctly return True diff --git a/provider/backup_utils.py b/provider/backup_utils.py index 97b10577b9..d2ff88ef6f 100644 --- a/provider/backup_utils.py +++ b/provider/backup_utils.py @@ -5,19 +5,19 @@ from avocado import fail_on from avocado.utils import process - -from virttest import data_dir -from virttest import qemu_storage -from virttest import utils_libguestfs -from virttest import utils_numeric -from virttest import utils_misc -from virttest import utils_disk -from virttest import utils_version +from virttest import ( + data_dir, + qemu_storage, + utils_disk, + utils_libguestfs, + utils_misc, + utils_numeric, + utils_version, +) from provider import block_dirty_bitmap as block_bitmap -from provider.virt_storage.storage_admin import sp_admin from provider import job_utils - +from provider.virt_storage.storage_admin import sp_admin BACKING_MASK_PROTOCOL_VERSION_SCOPE = "[9.0.0, )" @@ -44,13 +44,7 @@ def set_default_block_job_options(obj, arguments): def generate_log2_value(start, end, step=1, blacklist=None): if blacklist is None: blacklist = list() - outlist = list( - filter( - lambda x: math.log2(x).is_integer(), - range( - start, - end, - step))) + outlist = list(filter(lambda x: math.log2(x).is_integer(), range(start, end, step))) pool = set(outlist) - set(blacklist) return random.choice(list(pool)) @@ -84,7 +78,13 @@ def copy_out_dict_if_exists(params_in, keys): if key in ["speed", "granularity", "buf-size", "timeout"]: params_out[key] = int(val) continue - if key in ["auto-finalize", "auto-dismiss", "unmap", "persistent", "backing-mask-protocol"]: + if key in [ + "auto-finalize", + "auto-dismiss", + "unmap", + "persistent", + "backing-mask-protocol", + ]: if val in ["yes", "true", "on", True]: params_out[key] = True continue @@ -100,20 +100,19 @@ def generate_tempfile(vm, root_dir, filename, size="10M", timeout=720): """Generate temp data file in VM""" session = vm.wait_for_login() if vm.params["os_type"] == "windows": - file_path = "%s\\%s" % (root_dir, filename) - mk_file_cmd = "fsutil file createnew %s %s" % (file_path, size) - md5_cmd = "certutil -hashfile %s MD5 > %s.md5" % (file_path, file_path) + file_path = f"{root_dir}\\{filename}" + mk_file_cmd = f"fsutil file createnew {file_path} {size}" + md5_cmd = f"certutil -hashfile {file_path} MD5 > {file_path}.md5" else: - file_path = "%s/%s" % (root_dir, filename) + file_path = f"{root_dir}/{filename}" count = int( - utils_numeric.normalize_data_size( - size, - order_magnitude="M", - factor=1024)) + utils_numeric.normalize_data_size(size, order_magnitude="M", factor=1024) + ) dd_cmd = vm.params.get( - "dd_cmd", "dd if=/dev/urandom of=%s bs=1M count=%s oflag=direct") + "dd_cmd", "dd if=/dev/urandom of=%s bs=1M count=%s oflag=direct" + ) mk_file_cmd = dd_cmd % (file_path, count) - md5_cmd = "md5sum %s > %s.md5 && sync" % (file_path, file_path) + md5_cmd = f"md5sum {file_path} > {file_path}.md5 && sync" try: session.cmd(mk_file_cmd, timeout=timeout) session.cmd(md5_cmd, timeout=timeout) @@ -124,34 +123,29 @@ def generate_tempfile(vm, root_dir, filename, size="10M", timeout=720): @fail_on def verify_file_md5(vm, root_dir, filename, timeout=720): if vm.params["os_type"] == "windows": - file_path = "%s\\%s" % (root_dir, filename) - md5_cmd = "certutil -hashfile %s MD5" % file_path - cat_cmd = "type %s.md5" % file_path + file_path = f"{root_dir}\\{filename}" + md5_cmd = f"certutil -hashfile {file_path} MD5" + cat_cmd = f"type {file_path}.md5" else: - file_path = "%s/%s" % (root_dir, filename) - md5_cmd = "md5sum %s" % file_path - cat_cmd = "cat %s.md5" % file_path + file_path = f"{root_dir}/{filename}" + md5_cmd = f"md5sum {file_path}" + cat_cmd = f"cat {file_path}.md5" session = vm.wait_for_login() try: status1, output1 = session.cmd_status_output(md5_cmd, timeout=timeout) now = output1.strip() - assert status1 == 0, "Get file ('%s') MD5 with error: %s" % ( - filename, output1) + assert status1 == 0, f"Get file ('{filename}') MD5 with error: {output1}" status2, output2 = session.cmd_status_output(cat_cmd, timeout=timeout) saved = output2.strip() - assert status2 == 0, "Read file ('%s') MD5 file with error: %s" % ( - filename, output2) - assert now == saved, "File's ('%s') MD5 is mismatch! (%s, %s)" % ( - filename, now, saved) + assert status2 == 0, f"Read file ('{filename}') MD5 file with error: {output2}" + assert now == saved, f"File's ('{filename}') MD5 is mismatch! ({now}, {saved})" finally: session.close() def blockdev_snapshot_qmp_cmd(source, target, **extra_options): - options = [ - "node", - "overlay"] + options = ["node", "overlay"] arguments = copy_out_dict_if_exists(extra_options, options) arguments["node"] = source arguments["overlay"] = target @@ -160,7 +154,7 @@ def blockdev_snapshot_qmp_cmd(source, target, **extra_options): def blockdev_mirror_qmp_cmd(source, target, **extra_options): random_id = utils_misc.generate_random_string(4) - job_id = "%s_%s" % (source, random_id) + job_id = f"{source}_{random_id}" options = [ "format", "node-name", @@ -176,7 +170,8 @@ def blockdev_mirror_qmp_cmd(source, target, **extra_options): "auto-finalize", "auto-dismiss", "filter-node-name", - "unmap"] + "unmap", + ] arguments = copy_out_dict_if_exists(extra_options, options) arguments["device"] = source arguments["target"] = target @@ -186,19 +181,19 @@ def blockdev_mirror_qmp_cmd(source, target, **extra_options): def block_commit_qmp_cmd(device, **extra_options): random_id = utils_misc.generate_random_string(4) - job_id = "%s_%s" % (device, random_id) + job_id = f"{device}_{random_id}" options = [ - 'base-node', - 'base', - 'top-node', - 'top', - 'backing-file', - 'speed', - 'on-error', - 'filter-node-name', - 'auto-finalize', - 'auto-dismiss', - 'backing-mask-protocol', + "base-node", + "base", + "top-node", + "top", + "backing-file", + "speed", + "on-error", + "filter-node-name", + "auto-finalize", + "auto-dismiss", + "backing-mask-protocol", ] arguments = copy_out_dict_if_exists(extra_options, options) arguments["device"] = device @@ -210,12 +205,21 @@ def blockdev_stream_qmp_cmd(device, **extra_options): if not isinstance(extra_options, dict): extra_options = dict() random_id = utils_misc.generate_random_string(4) - job_id = "%s_%s" % (device, random_id) + job_id = f"{device}_{random_id}" arguments = {"device": device, "job-id": job_id} # TODO: we may have to sync the block-stream options with libvirt - options = ["speed", "base", "base-node", "snapshot-file", - "filter-node-name", "on-error", "backing-file", - "auto-dismiss", "auto-finalize", 'backing-mask-protocol'] + options = [ + "speed", + "base", + "base-node", + "snapshot-file", + "filter-node-name", + "on-error", + "backing-file", + "auto-dismiss", + "auto-finalize", + "backing-mask-protocol", + ] args = copy_out_dict_if_exists(extra_options, options) if args: arguments.update(args) @@ -227,17 +231,15 @@ def blockdev_backup_qmp_cmd(source, target, **extra_options): if not isinstance(extra_options, dict): extra_options = dict() random_id = utils_misc.generate_random_string(4) - job_id = "%s_%s" % (source, random_id) + job_id = f"{source}_{random_id}" arguments = {"device": source, "target": target, "job-id": job_id} arguments["sync"] = extra_options.get("sync", "full") arguments["speed"] = int(extra_options.get("speed", 0)) arguments["compress"] = extra_options.get("compress", False) arguments["auto-finalize"] = extra_options.get("auto-finalize", True) arguments["auto-dismiss"] = extra_options.get("auto-dismiss", True) - arguments["on-source-error"] = extra_options.get( - "on-source-error", "report") - arguments["on-target-error"] = extra_options.get( - "on-target-error", "report") + arguments["on-source-error"] = extra_options.get("on-source-error", "report") + arguments["on-target-error"] = extra_options.get("on-target-error", "report") if "bitmap" in extra_options: arguments["bitmap"] = extra_options["bitmap"] if "bitmap-mode" in extra_options: @@ -246,9 +248,9 @@ def blockdev_backup_qmp_cmd(source, target, **extra_options): arguments["filter-node-name"] = extra_options["filter-node-name"] x_perf_ops = ["use-copy-range", "max-workers", "max-chunk"] if any(item in extra_options for item in x_perf_ops): - arguments["x-perf"] = {x: extra_options[x] - for x in x_perf_ops - if x in extra_options} + arguments["x-perf"] = { + x: extra_options[x] for x in x_perf_ops if x in extra_options + } return "blockdev-backup", arguments @@ -263,7 +265,7 @@ def blockdev_create(vm, **options): def blockdev_snapshot(vm, source, target, **extra_options): cmd, arguments = blockdev_snapshot_qmp_cmd(source, target, **extra_options) out = vm.monitor.cmd(cmd, arguments) - assert out == {}, 'blockdev-snapshot-sync faild: %s' % out + assert out == {}, f"blockdev-snapshot-sync faild: {out}" @fail_on @@ -314,12 +316,13 @@ def blockdev_backup(vm, source, target, **extra_options): timeout = int(extra_options.pop("timeout", 600)) if "bitmap" in arguments: info = block_bitmap.get_bitmap_by_name(vm, source, arguments["bitmap"]) - assert info, "Bitmap '%s' not exists in device '%s'" % ( - arguments["bitmap"], source) + assert info, "Bitmap '{}' not exists in device '{}'".format( + arguments["bitmap"], + source, + ) auto_disable_bitmap = extra_options.pop("auto_disable_bitmap", True) if auto_disable_bitmap and info.get("status") != "disabled": - block_bitmap.block_dirty_bitmap_disable( - vm, source, arguments["bitmap"]) + block_bitmap.block_dirty_bitmap_disable(vm, source, arguments["bitmap"]) vm.monitor.cmd(cmd, arguments) job_id = arguments.get("job-id", source) job_utils.wait_until_block_job_completed(vm, job_id, timeout) @@ -332,16 +335,18 @@ def blockdev_batch_snapshot(vm, source_lst, target_lst, **extra_options): jobs_id = [] for idx, src in enumerate(source_lst): snapshot_cmd, arguments = blockdev_snapshot_qmp_cmd( - src, target_lst[idx], **extra_options) + src, target_lst[idx], **extra_options + ) actions.append({"type": snapshot_cmd, "data": arguments}) arguments = {"actions": actions} vm.monitor.cmd("transaction", arguments) - list(map(lambda x: job_utils.wait_until_block_job_completed(vm, x, timeout), jobs_id)) + list( + map(lambda x: job_utils.wait_until_block_job_completed(vm, x, timeout), jobs_id) + ) @fail_on -def blockdev_batch_backup(vm, source_lst, target_lst, - bitmap_lst, **extra_options): +def blockdev_batch_backup(vm, source_lst, target_lst, bitmap_lst, **extra_options): actions = [] jobs_id = [] bitmap_add_cmd = "block-dirty-bitmap-add" @@ -360,15 +365,17 @@ def blockdev_batch_backup(vm, source_lst, target_lst, for idx, src in enumerate(source_lst): if sync_mode in ["incremental", "bitmap"]: assert len(bitmap_lst) == len( - source_lst), "must provide a valid bitmap name for 'incremental' sync mode" + source_lst + ), "must provide a valid bitmap name for 'incremental' sync mode" extra_options["bitmap"] = bitmap_lst[idx] backup_cmd, arguments = blockdev_backup_qmp_cmd( - src, target_lst[idx], **extra_options) + src, target_lst[idx], **extra_options + ) job_id = arguments.get("job-id", src) jobs_id.append(job_id) actions.append({"type": backup_cmd, "data": arguments}) - if bitmap_lst and (sync_mode == 'full' or sync_mode == 'none'): + if bitmap_lst and (sync_mode == "full" or sync_mode == "none"): bitmap_data = {"node": source_lst[idx], "name": bitmap_lst[idx]} granularity = extra_options.get("granularity") persistent = extra_options.get("persistent") @@ -382,19 +389,21 @@ def blockdev_batch_backup(vm, source_lst, target_lst, actions.append({"type": bitmap_add_cmd, "data": bitmap_data}) if disabled_bitmap_lst: - bitmap_data = {"node": source_lst[idx], - "name": disabled_bitmap_lst[idx]} + bitmap_data = {"node": source_lst[idx], "name": disabled_bitmap_lst[idx]} actions.append({"type": bitmap_disable_cmd, "data": bitmap_data}) arguments = {"actions": actions} - if completion_mode == 'grouped': - arguments['properties'] = {"completion-mode": "grouped"} + if completion_mode == "grouped": + arguments["properties"] = {"completion-mode": "grouped"} vm.monitor.cmd("transaction", arguments) if wait_job_complete: - list(map( - lambda x: job_utils.wait_until_block_job_completed(vm, x, timeout), - jobs_id)) + list( + map( + lambda x: job_utils.wait_until_block_job_completed(vm, x, timeout), + jobs_id, + ) + ) @fail_on @@ -417,7 +426,7 @@ def incremental_backup(vm, source, target, bitmap, **extra_options): @fail_on def full_backup(vm, source, target, **extra_options): - """ Do full backup for node""" + """Do full backup for node""" if extra_options is None: extra_options = dict() extra_options["sync"] = "full" @@ -467,9 +476,10 @@ def format_storage_volume(img, filesystem, partition="mbr"): img.image_filename, filesystem=filesystem, image_format=img.image_format, - partition="mbr") + partition="mbr", + ) finally: - process.system("setenforce %s" % selinux_mode, shell=True) + process.system(f"setenforce {selinux_mode}", shell=True) def copyif(params, nbd_image, target_image, bitmap=None): @@ -480,31 +490,27 @@ def copyif(params, nbd_image, target_image, bitmap=None): :params target_image: target image tag :params bitmap: bitmap name """ + def _qemu_io_read(qemu_io, s, l, img): - cmd = '{io} -C -c "r {s} {l}" -f {fmt} {f}'.format( - io=qemu_io, s=s, l=l, fmt=img.image_format, - f=img.image_filename - ) + cmd = f'{qemu_io} -C -c "r {s} {l}" -f {img.image_format} {img.image_filename}' process.system(cmd, ignore_status=False, shell=True) qemu_io = utils_misc.get_qemu_io_binary(params) qemu_img = utils_misc.get_qemu_img_binary(params) - img_obj = qemu_storage.QemuImg(params.object_params(target_image), - data_dir.get_data_dir(), target_image) - nbd_img_obj = qemu_storage.QemuImg(params.object_params(nbd_image), - None, nbd_image) - max_len = int(params.get('qemu_io_max_len', 2147483136)) + img_obj = qemu_storage.QemuImg( + params.object_params(target_image), data_dir.get_data_dir(), target_image + ) + nbd_img_obj = qemu_storage.QemuImg(params.object_params(nbd_image), None, nbd_image) + max_len = int(params.get("qemu_io_max_len", 2147483136)) if bitmap is None: - args = '-f %s %s' % (nbd_img_obj.image_format, - nbd_img_obj.image_filename) + args = f"-f {nbd_img_obj.image_format} {nbd_img_obj.image_filename}" state = True else: - opts = qemu_storage.filename_to_file_opts( - nbd_img_obj.image_filename) - opt = params.get('dirty_bitmap_opt', 'x-dirty-bitmap') - opts[opt] = 'qemu:dirty-bitmap:%s' % bitmap - args = "'json:%s'" % json.dumps(opts) + opts = qemu_storage.filename_to_file_opts(nbd_img_obj.image_filename) + opt = params.get("dirty_bitmap_opt", "x-dirty-bitmap") + opts[opt] = f"qemu:dirty-bitmap:{bitmap}" + args = f"'json:{json.dumps(opts)}'" state = False img_obj.base_image_filename = nbd_img_obj.image_filename @@ -512,25 +518,24 @@ def _qemu_io_read(qemu_io, s, l, img): img_obj.base_tag = nbd_img_obj.tag img_obj.rebase(img_obj.params) - map_cmd = '{qemu_img} map --output=json {args}'.format( - qemu_img=qemu_img, args=args) + map_cmd = f"{qemu_img} map --output=json {args}" result = process.run(map_cmd, ignore_status=False, shell=True) for item in json.loads(result.stdout.decode().strip()): - if item['data'] is not state: + if item["data"] is not state: continue # qemu-io can only handle length less than 2147483136, # so here we need to split 'large length' into several parts - start, length = item['start'], item['length'] + start, length = item["start"], item["length"] while length > max_len: _qemu_io_read(qemu_io, start, max_len, img_obj) - start, length = start+max_len, length-max_len + start, length = start + max_len, length - max_len else: if length > 0: _qemu_io_read(qemu_io, start, length, img_obj) - img_obj.base_tag = 'null' + img_obj.base_tag = "null" img_obj.rebase(img_obj.params) @@ -555,26 +560,27 @@ def get_disk_info_by_param(tag, params, session): info = None drive_path = None image_params = params.object_params(tag) - if image_params.get('blk_extra_params'): + if image_params.get("blk_extra_params"): # get disk by serial or wwn # utils_disk.get_linux_disks can also get serial, but for # virtio-scsi ID_SERIAL is a long string including serial # e.g. ID_SERIAL=0QEMU_QEMU_HARDDISK_DATA_DISK2 instead of # ID_SERIAL=DATA_DISK2 - m = re.search(r"(serial|wwn)=(\w+)", - image_params["blk_extra_params"], re.M) + m = re.search(r"(serial|wwn)=(\w+)", image_params["blk_extra_params"], re.M) if m is not None: drive_path = utils_misc.get_linux_drive_path(session, m.group(2)) if drive_path: - info = {'kname': drive_path[5:], 'size': image_params['image_size']} + info = {"kname": drive_path[5:], "size": image_params["image_size"]} else: # get disk by disk size - conds = {'type': image_params.get('disk_type', 'disk'), - 'size': image_params['image_size']} + conds = { + "type": image_params.get("disk_type", "disk"), + "size": image_params["image_size"], + } disks = utils_disk.get_linux_disks(session, True) for kname, attr in disks.items(): - d = dict(zip(['kname', 'size', 'type'], attr)) + d = dict(zip(["kname", "size", "type"], attr)) if all([conds[k] == d[k] for k in conds]): info = d break @@ -594,8 +600,8 @@ def refresh_mounts(mounts, params, session): """ # always refresh disks info when count of data disks >= 1 for tag, mount in mounts.items(): - if tag == 'image1': + if tag == "image1": continue info = get_disk_info_by_param(tag, params, session) - assert info, 'Failed to get the kname for device: %s' % tag - mount[0] = '/dev/%s1' % info['kname'] + assert info, f"Failed to get the kname for device: {tag}" + mount[0] = "/dev/{}1".format(info["kname"]) diff --git a/provider/block_devices_plug.py b/provider/block_devices_plug.py index 9433f4bf7f..e4dc677d2c 100644 --- a/provider/block_devices_plug.py +++ b/provider/block_devices_plug.py @@ -20,24 +20,25 @@ import time from avocado import TestError - from six import reraise from six.moves import xrange - from virttest import utils_misc from virttest.qemu_capabilities import Flags from virttest.qemu_devices import qdevices -from virttest.qemu_devices.utils import (DeviceError, DeviceHotplugError, - DeviceUnplugError) +from virttest.qemu_devices.utils import ( + DeviceError, + DeviceHotplugError, + DeviceUnplugError, +) from virttest.qemu_monitor import MonitorLockError -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") -HOTPLUG, UNPLUG = ('hotplug', 'unplug') +HOTPLUG, UNPLUG = ("hotplug", "unplug") HOTPLUGGED_HBAS = {} -DELETED_EVENT = 'DEVICE_DELETED' -DISK = {'name': 'images', 'media': 'disk'} -CDROM = {'name': 'cdroms', 'media': 'cdrom'} +DELETED_EVENT = "DEVICE_DELETED" +DISK = {"name": "images", "media": "disk"} +CDROM = {"name": "cdroms", "media": "cdrom"} _LOCK = threading.Lock() _QMP_OUTPUT = {} @@ -47,37 +48,46 @@ def _verify_plugged_num(action): """ Verify if the number of changed disks is equal to the plugged ones. """ + def decorator(func): def wrapper(self, *args, **kwargs): orig_disks = self._list_all_disks() - LOG_JOB.debug('The index of disks before %s:\n %s', action, orig_disks) + LOG_JOB.debug("The index of disks before %s:\n %s", action, orig_disks) result = func(self, *args, **kwargs) if self._dev_type != CDROM: for dev in HOTPLUGGED_HBAS.values(): - if dev.get_param('hotplug') == 'off': + if dev.get_param("hotplug") == "off": return result - if not utils_misc.wait_for(lambda: len(self._imgs) == len( - self._list_all_disks() ^ orig_disks), self._timeout, step=1.5): - disks_info_win = ('wmic logicaldisk get drivetype,name,description ' - '& wmic diskdrive list brief /format:list') - disks_info_linux = 'lsblk -a' + if not utils_misc.wait_for( + lambda: len(self._imgs) == len(self._list_all_disks() ^ orig_disks), + self._timeout, + step=1.5, + ): + disks_info_win = ( + "wmic logicaldisk get drivetype,name,description " + "& wmic diskdrive list brief /format:list" + ) + disks_info_linux = "lsblk -a" _session = self.vm.wait_for_login(timeout=360) disks_info = _session.cmd( - disks_info_win if self._iswindows else disks_info_linux) + disks_info_win if self._iswindows else disks_info_linux + ) LOG_JOB.debug("The details of disks:\n %s", disks_info) _session.close() raise TestError( - "%s--> Actual: %s disks. Expected: %s disks." % - (action, len(self._all_disks ^ orig_disks), len(self._imgs))) + f"{action}--> Actual: {len(self._all_disks ^ orig_disks)} disks. Expected: {len(self._imgs)} disks." + ) self._plugged_disks = sorted( - [disk.split('/')[-1] for disk in list(self._all_disks ^ orig_disks)]) + [disk.split("/")[-1] for disk in list(self._all_disks ^ orig_disks)] + ) return result + return wrapper + return decorator class _PlugThread(threading.Thread): - """ Plug Thread that define a plug thread. """ @@ -103,13 +113,13 @@ def run(self): getattr(self._plug_manager, method)(*args) except Exception as e: LOG_JOB.error( - '%s %s failed: %s', self._action.capitalize(), self._images, str(e)) + "%s %s failed: %s", self._action.capitalize(), self._images, str(e) + ) self.exc_info = sys.exc_info() self.exit_event.set() -class _ThreadManager(object): - +class _ThreadManager: """ Thread Manager that provides interfaces about threads for plugging devices. """ @@ -120,44 +130,50 @@ def __init__(self, vm): self.exit_event = threading.Event() def _initial_threads(self, action, imgs, bus=None, interval=0): - """ Initial the threads. """ + """Initial the threads.""" max_threads = min(len(imgs), 2 * multiprocessing.cpu_count()) for i in xrange(max_threads): mon = self._vm.monitors[i % len(self._vm.monitors)] - args = (self._vm, action, imgs[i::max_threads], - mon, self.exit_event, bus, interval) + args = ( + self._vm, + action, + imgs[i::max_threads], + mon, + self.exit_event, + bus, + interval, + ) self._threads.append(_PlugThread(*args)) def _start_threads(self): - """ Start the threads. """ + """Start the threads.""" for thread in self._threads: thread.start() def _join_threads(self, timeout): - """ Join the threads. """ + """Join the threads.""" for thread in self._threads: thread.join(timeout) def run_threads(self, action, imgs, bus, timeout, interval=0): - """ Run the threads. """ + """Run the threads.""" self._initial_threads(action, imgs, bus, interval) self._start_threads() self._join_threads(timeout) def raise_threads(self): - """ Raise the exception information of threads. """ + """Raise the exception information of threads.""" for thread in self._threads: if thread.exc_info: reraise(*thread.exc_info) def clean_threads(self): - """ Clean the env threads. """ + """Clean the env threads.""" del self.exit_event del self._threads[:] -class BlockDevicesPlug(object): - +class BlockDevicesPlug: """ The Block Devices Plug. """ @@ -170,60 +186,62 @@ def __init__(self, vm): self._imgs = vm.params.get("images").split()[1:] self._hotplugged_devs = {} self._unplugged_devs = {} - self._islinux = vm.params['os_type'] == 'linux' - self._iswindows = vm.params['os_type'] == 'windows' + self._islinux = vm.params["os_type"] == "linux" + self._iswindows = vm.params["os_type"] == "windows" self._plugged_disks = [] self._orig_disks = set() self._all_disks = set() self._event_devs = [] self._dev_type = DISK - self._qdev_type = qdevices.QBlockdevNode if vm.check_capability( - Flags.BLOCKDEV) else qdevices.QDrive + self._qdev_type = ( + qdevices.QBlockdevNode + if vm.check_capability(Flags.BLOCKDEV) + else qdevices.QDrive + ) self._timeout = 300 self._interval = 0 self._qemu_version = self.vm.devices.qemu_version def __getitem__(self, index): - """ Get the hot plugged disk index. """ + """Get the hot plugged disk index.""" return self._plugged_disks[index] def __len__(self): - """ Get the len of the hot plugged disks. """ + """Get the len of the hot plugged disks.""" return len(self._plugged_disks) def __iter__(self): - """ Iterate the the hot plugged disks. """ - for disk in self._plugged_disks: - yield disk + """Iterate the the hot plugged disks.""" + yield from self._plugged_disks def _list_all_disks(self): - """ List all the disks. """ + """List all the disks.""" session = self.vm.wait_for_login(timeout=360) if self._islinux: self._all_disks = utils_misc.list_linux_guest_disks(session) else: - self._all_disks = set(session.cmd('wmic diskdrive get index').split()[1:]) + self._all_disks = set(session.cmd("wmic diskdrive get index").split()[1:]) session.close() return self._all_disks def _check_qmp_outputs(self, action): - """ Check the output of qmp commands. """ + """Check the output of qmp commands.""" for dev_id in list(_QMP_OUTPUT.keys()): output = _QMP_OUTPUT.pop(dev_id) if output[1] is False: - err = "Failed to %s device %s. " % (action, dev_id) - if not output[0] and action == 'unplug': - err += 'No deleted event generated and %s still in qtree' % dev_id + err = f"Failed to {action} device {dev_id}. " + if not output[0] and action == "unplug": + err += f"No deleted event generated and {dev_id} still in qtree" else: err += output[0] raise TestError(err) def _get_events_deleted(self): - """ Get the device deleted events. """ + """Get the device deleted events.""" self.event_devs = [img for img in self._unplugged_devs.keys()] for event in self.vm.monitor.get_events(): - if DELETED_EVENT in event.get("event") and 'device' in event.get('data'): - name = event.get('data')['device'] + if DELETED_EVENT in event.get("event") and "device" in event.get("data"): + name = event.get("data")["device"] if name in self.event_devs: self.event_devs.remove(name) self.vm.monitor.clear_event(DELETED_EVENT) @@ -235,30 +253,35 @@ def _wait_events_deleted(self, timeout=300): """ if not utils_misc.wait_for(self._get_events_deleted, timeout): raise TestError( - 'No \"DEVICE DELETED\" event generated after unplug \"%s\".' % - (';'.join(self.event_devs))) + 'No "DEVICE DELETED" event generated after unplug "{}".'.format( + ";".join(self.event_devs) + ) + ) def _create_devices(self, images, pci_bus={"aobject": "pci.0"}): - """ Create the block devcies. """ + """Create the block devcies.""" self._hotplugged_devs.clear() for img in images: bus_name = None self._hotplugged_devs[img] = [] img_params = self.vm.params.object_params(img) devices_created = getattr( - self.vm.devices, '%s_define_by_params' % self._dev_type['name'])( - img, img_params, self._dev_type['media'], pci_bus=pci_bus) + self.vm.devices, "{}_define_by_params".format(self._dev_type["name"]) + )(img, img_params, self._dev_type["media"], pci_bus=pci_bus) for dev in reversed(devices_created): qid = dev.get_qid() - if isinstance(dev, qdevices.QObject) and dev.get_param( - 'backend') == 'secret' and qid.startswith('%s_' % img): + if ( + isinstance(dev, qdevices.QObject) + and dev.get_param("backend") == "secret" + and qid.startswith(f"{img}_") + ): self._hotplugged_devs[img].insert(0, dev) - elif qid.endswith('_%s' % img) or qid == img: + elif qid.endswith(f"_{img}") or qid == img: self._hotplugged_devs[img].insert(0, dev) - bus = dev.get_param('bus') + bus = dev.get_param("bus") if bus: - bus_name = bus.rsplit('.')[0] + bus_name = bus.rsplit(".")[0] # Search the corresponding HBA device to be plugged. elif bus_name == dev.get_qid() and dev not in self.vm.devices: self._hotplugged_devs[img].insert(-1, dev) @@ -268,37 +291,37 @@ def _plug(self, plug_func, monitor, action): end = time.time() + self.ACQUIRE_LOCK_TIMEOUT while time.time() < end: try: - return plug_func( - monitor - ) if action == UNPLUG else plug_func( - monitor, self._qemu_version + return ( + plug_func(monitor) + if action == UNPLUG + else plug_func(monitor, self._qemu_version) ) except MonitorLockError: pass else: - return plug_func( - monitor - ) if action == UNPLUG else plug_func( - monitor, self._qemu_version + return ( + plug_func(monitor) + if action == UNPLUG + else plug_func(monitor, self._qemu_version) ) def _hotplug_atomic(self, device, monitor, bus=None): - """ Function hot plug device to devices representation. """ + """Function hot plug device to devices representation.""" self.vm.devices.set_dirty() - qdev_out = '' + qdev_out = "" if isinstance(device, qdevices.QDevice): - dev_bus = device.get_param('bus') + dev_bus = device.get_param("bus") if bus is None: - if self.vm.devices.is_pci_device(device['driver']): - bus = self.vm.devices.get_buses({'aobject': 'pci.0'})[0] + if self.vm.devices.is_pci_device(device["driver"]): + bus = self.vm.devices.get_buses({"aobject": "pci.0"})[0] if not isinstance(device.parent_bus, (list, tuple)): device.parent_bus = [device.parent_bus] for parent_bus in device.parent_bus: for _bus in self.vm.devices.get_buses(parent_bus): - if _bus.bus_item == 'bus': + if _bus.bus_item == "bus": if dev_bus: - dev_bus_name = dev_bus.rsplit('.')[0] + dev_bus_name = dev_bus.rsplit(".")[0] if _bus.busid: if dev_bus_name == _bus.busid: bus = _bus @@ -322,24 +345,29 @@ def _hotplug_atomic(self, device, monitor, bus=None): qdev_out = self.vm.devices.insert(device) if not isinstance(qdev_out, list) or len(qdev_out) != 1: raise NotImplementedError( - "This device %s require to hotplug multiple devices %s, " - "which is not supported." % (device, out)) + f"This device {device} require to hotplug multiple devices {out}, " + "which is not supported." + ) if ver_out is True: self.vm.devices.set_clean() except DeviceError as exc: raise DeviceHotplugError( - device, 'According to qemu_device: %s' % exc, self, ver_out) + device, f"According to qemu_device: {exc}", self, ver_out + ) return out, ver_out def _unplug_atomic(self, device, monitor): - """ Function unplug device to devices representation. """ + """Function unplug device to devices representation.""" device = self.vm.devices[device] self.vm.devices.set_dirty() out = self._plug(device.unplug, monitor, UNPLUG) if not utils_misc.wait_for( - lambda: device.verify_unplug(out, monitor) is True, - first=1, step=5, timeout=self.VERIFY_UNPLUG_TIMEOUT): + lambda: device.verify_unplug(out, monitor) is True, + first=1, + step=5, + timeout=self.VERIFY_UNPLUG_TIMEOUT, + ): self.vm.devices.set_clean() return out, device.verify_unplug(out, monitor) ver_out = device.verify_unplug(out, monitor) @@ -364,9 +392,11 @@ def _unplug_atomic(self, device, monitor): child_nodes = node.get_child_nodes() recursive = True if len(child_nodes) > 0 else False if not node.verify_unplug( - self._plug(node.unplug, monitor, UNPLUG), monitor): + self._plug(node.unplug, monitor, UNPLUG), monitor + ): raise DeviceUnplugError( - node, "Failed to unplug blockdev node.", self) + node, "Failed to unplug blockdev node.", self + ) self.vm.devices.remove(node, recursive) if parent_node: parent_node.del_child_node(node) @@ -377,34 +407,43 @@ def _unplug_atomic(self, device, monitor): self.vm.devices.set_clean() elif out is False: raise DeviceUnplugError( - device, "Device wasn't unplugged in qemu, but it was " - "unplugged in device representation.", self) + device, + "Device wasn't unplugged in qemu, but it was " + "unplugged in device representation.", + self, + ) except (DeviceError, KeyError) as exc: device.unplug_unhook() raise DeviceUnplugError(device, exc, self) return out, ver_out def _plug_devs(self, action, devices_dict, monitor, bus=None, interval=0): - """ Plug devices. """ + """Plug devices.""" for img, devices in devices_dict.items(): for device in devices: args = (device, monitor) - if (isinstance(device, qdevices.QDevice) and - bus is not None and - self.vm.devices.is_pci_device(device['driver'])): + if ( + isinstance(device, qdevices.QDevice) + and bus is not None + and self.vm.devices.is_pci_device(device["driver"]) + ): args += (bus,) with _LOCK: - _QMP_OUTPUT[device.get_qid()] = getattr( - self, '_%s_atomic' % action)(*args) + _QMP_OUTPUT[device.get_qid()] = getattr(self, f"_{action}_atomic")( + *args + ) time.sleep(interval) def _hotplug_devs(self, images, monitor, bus=None, interval=0): """ Hot plug the block devices which are defined by images. """ - LOG_JOB.info("Start to hotplug devices \"%s\" by monitor %s.", - ' '.join(images), monitor.name) - args = (images, {'aobject': 'pci.0' if bus is None else bus.aobject}) + LOG_JOB.info( + 'Start to hotplug devices "%s" by monitor %s.', + " ".join(images), + monitor.name, + ) + args = (images, {"aobject": "pci.0" if bus is None else bus.aobject}) self._create_devices(*args) self._plug_devs(HOTPLUG, self._hotplugged_devs, monitor, bus, interval) @@ -413,38 +452,46 @@ def _unplug_devs(self, images, monitor, interval=0): Unplug the block devices which are defined by images. """ self._unplugged_devs.clear() - devs = [dev for dev in self.vm.devices if isinstance( - dev, (qdevices.QDevice, qdevices.QObject))] + devs = [ + dev + for dev in self.vm.devices + if isinstance(dev, (qdevices.QDevice, qdevices.QObject)) + ] for img in images: self._unplugged_devs[img] = [] for dev in devs: qid = dev.get_qid() - if qid == img or qid.startswith('%s_' % img): + if qid == img or qid.startswith(f"{img}_"): self._unplugged_devs[img].insert(0, dev) if qid == img: break else: - raise TestError('No such device \'%s\' in VM\'s devices.' % img) + raise TestError(f"No such device '{img}' in VM's devices.") # Search the corresponding HBA device to be unplugged. for img in list(self._unplugged_devs.keys()): - _dev = next((_ for _ in self._unplugged_devs[img] if _.get_qid() == img)) - _dev_bus = _dev.get_param('bus') + _dev = next(_ for _ in self._unplugged_devs[img] if _.get_qid() == img) + _dev_bus = _dev.get_param("bus") if _dev_bus: - bus_name = _dev_bus.rsplit('.')[0] + bus_name = _dev_bus.rsplit(".")[0] for parent_bus in _dev.parent_bus: for bus in self.vm.devices.get_buses(parent_bus, True): - if bus_name == bus.busid.rsplit('.')[0]: + if bus_name == bus.busid.rsplit(".")[0]: if len(bus) == 1 and img in HOTPLUGGED_HBAS: - self._unplugged_devs[img].append(HOTPLUGGED_HBAS.pop(img)) + self._unplugged_devs[img].append( + HOTPLUGGED_HBAS.pop(img) + ) break - LOG_JOB.info("Start to unplug devices \"%s\" by monitor %s.", - ' '.join(images), monitor.name) + LOG_JOB.info( + 'Start to unplug devices "%s" by monitor %s.', + " ".join(images), + monitor.name, + ) self._plug_devs(UNPLUG, self._unplugged_devs, monitor, interval=interval) def _plug_devs_threads(self, action, images, bus, timeout, interval=0): - """ Threads that plug blocks devices. """ + """Threads that plug blocks devices.""" self._orig_disks = self._list_all_disks() if images: self._imgs = images.split() @@ -456,8 +503,9 @@ def _plug_devs_threads(self, action, images, bus, timeout, interval=0): LOG_JOB.info("All %s threads finished.", action) @_verify_plugged_num(action=HOTPLUG) - def hotplug_devs_serial(self, images=None, monitor=None, bus=None, - timeout=300, interval=0): + def hotplug_devs_serial( + self, images=None, monitor=None, bus=None, timeout=300, interval=0 + ): """ Hot plug the block devices by serial. @@ -477,7 +525,7 @@ def hotplug_devs_serial(self, images=None, monitor=None, bus=None, monitor = self.vm.monitor if images: self._imgs = [img for img in images.split()] - if set(self._imgs) <= set(self.vm.params['cdroms'].split()): + if set(self._imgs) <= set(self.vm.params["cdroms"].split()): self._dev_type = CDROM self._hotplug_devs(self._imgs, monitor, bus, interval) self._check_qmp_outputs(HOTPLUG) @@ -501,7 +549,7 @@ def unplug_devs_serial(self, images=None, monitor=None, timeout=300, interval=0) monitor = self.vm.monitor if images: self._imgs = [img for img in images.split()] - if set(self._imgs) <= set(self.vm.params['cdroms'].split()): + if set(self._imgs) <= set(self.vm.params["cdroms"].split()): self._dev_type = CDROM self._unplug_devs(self._imgs, monitor, interval) self._check_qmp_outputs(UNPLUG) diff --git a/provider/block_dirty_bitmap.py b/provider/block_dirty_bitmap.py index 0203047fb3..e9ccb7cc12 100644 --- a/provider/block_dirty_bitmap.py +++ b/provider/block_dirty_bitmap.py @@ -1,18 +1,15 @@ """ Module to provide functions related to block dirty bitmap operations. """ -import time + import logging +import time from functools import partial from avocado import fail_on +from virttest import data_dir, qemu_monitor, qemu_storage, storage -from virttest import data_dir -from virttest import storage -from virttest import qemu_storage -from virttest import qemu_monitor - -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def parse_params(vm, params): @@ -26,12 +23,13 @@ def parse_params(vm, params): json_backend_list = ["ceph", "iscsi_direct"] if target_image_params["image_backend"] in json_backend_list: get_image_name = qemu_storage.get_image_json - target_image_filename = get_image_name(target_image, - target_image_params, - data_dir.get_data_dir()) + target_image_filename = get_image_name( + target_image, target_image_params, data_dir.get_data_dir() + ) else: target_image_filename = storage.get_image_filename( - target_image_params, data_dir.get_data_dir()) + target_image_params, data_dir.get_data_dir() + ) target_device = vm.get_block({"file": target_image_filename}) bitmap_params["target_device"] = target_device bitmaps.append(bitmap_params) @@ -66,8 +64,9 @@ def check_bitmap_existence(bitmaps, bitmap_params, expected_existence=True): """ bname = bitmap_params.get("bitmap_name") dev = bitmap_params.get("target_device") - bitmap = (dev in bitmaps and - next((b for b in bitmaps[dev] if b["name"] == bname), {})) + bitmap = dev in bitmaps and next( + (b for b in bitmaps[dev] if b["name"] == bname), {} + ) return bool(bitmap) == expected_existence @@ -80,13 +79,13 @@ def block_dirty_bitmap_add(vm, bitmap_params): mapping = {} for item in ["persistent", "disabled"]: mapping[item] = { - "on": { - item: True}, "off": { - item: False}, "default": { - item: None}} + "on": {item: True}, + "off": {item: False}, + "default": {item: None}, + } kargs = dict(node=target_device, name=bitmap) - if bitmap_params.get('bitmap_granularity'): - kargs['granularity'] = bitmap_params['bitmap_granularity'] + if bitmap_params.get("bitmap_granularity"): + kargs["granularity"] = bitmap_params["bitmap_granularity"] for item in ["persistent", "disabled"]: kargs.update(mapping[item][bitmap_params.get(item, "default")]) vm.monitor.block_dirty_bitmap_add(**kargs) @@ -101,8 +100,7 @@ def debug_block_dirty_bitmap_sha256(vm, device, bitmap): :param bitmap: bitmap name :return: sha256 string or None if bitmap is not exists """ - func = qemu_monitor.get_monitor_function( - vm, "debug-block-dirty-bitmap-sha256") + func = qemu_monitor.get_monitor_function(vm, "debug-block-dirty-bitmap-sha256") return func(device, bitmap).get("sha256") @@ -147,10 +145,9 @@ def get_bitmap_by_name(vm, device, name): @fail_on def block_dirty_bitmap_clear(vm, device, name): - qemu_monitor.get_monitor_function( - vm, "block-dirty-bitmap-clear")(device, name) + qemu_monitor.get_monitor_function(vm, "block-dirty-bitmap-clear")(device, name) count = int(get_bitmap_by_name(vm, device, name)["count"]) - msg = "Count of '%s' in device '%s'" % (name, device) + msg = f"Count of '{name}' in device '{device}'" msg += "is '%d' not equal '0' after clear it" % count assert count == 0, msg @@ -167,10 +164,9 @@ def clear_all_bitmaps_in_device(vm, device): @fail_on def block_dirty_bitmap_remove(vm, device, name): """Remove bitmaps on the device one by one""" - qemu_monitor.get_monitor_function( - vm, "block-dirty-bitmap-remove")(device, name) + qemu_monitor.get_monitor_function(vm, "block-dirty-bitmap-remove")(device, name) time.sleep(0.3) - msg = "Bitmap '%s' in device '%s' still exists!" % (name, device) + msg = f"Bitmap '{name}' in device '{device}' still exists!" assert get_bitmap_by_name(vm, device, name) is None, msg @@ -189,8 +185,8 @@ def block_dirty_bitmap_disable(vm, node, name): func = qemu_monitor.get_monitor_function(vm, "block-dirty-bitmap-disable") func(node, name) bitmap = get_bitmap_by_name(vm, node, name) - msg = "block dirty bitmap '%s' is not disabled" % name - assert (bitmap["recording"] is False), msg + msg = f"block dirty bitmap '{name}' is not disabled" + assert bitmap["recording"] is False, msg @fail_on @@ -199,8 +195,8 @@ def block_dirty_bitmap_enable(vm, node, name): func = qemu_monitor.get_monitor_function(vm, "block-dirty-bitmap-enable") func(node, name) bitmap = get_bitmap_by_name(vm, node, name) - msg = "block dirty bitmap '%s' is not enabled" % name - assert (bitmap["recording"] is True), msg + msg = f"block dirty bitmap '{name}' is not enabled" + assert bitmap["recording"] is True, msg def get_bitmaps_in_device(vm, device): @@ -211,9 +207,9 @@ def get_bitmaps_in_device(vm, device): @fail_on -def handle_block_dirty_bitmap_transaction(vm, disabled_params=None, - added_params=None, - merged_params=None): +def handle_block_dirty_bitmap_transaction( + vm, disabled_params=None, added_params=None, merged_params=None +): """ Add/disable/merge bitmaps in one transaction. :param vm: an active VM object @@ -231,34 +227,41 @@ def handle_block_dirty_bitmap_transaction(vm, disabled_params=None, actions = [] if disabled_params: - bitmap_disable_cmd = disabled_params.get('bitmap_disable_cmd', - 'block-dirty-bitmap-disable') - bitmap_data = {"node": disabled_params['bitmap_device_node'], - "name": disabled_params['bitmap_name']} + bitmap_disable_cmd = disabled_params.get( + "bitmap_disable_cmd", "block-dirty-bitmap-disable" + ) + bitmap_data = { + "node": disabled_params["bitmap_device_node"], + "name": disabled_params["bitmap_name"], + } actions.append({"type": bitmap_disable_cmd, "data": bitmap_data}) if added_params: - bitmap_add_cmd = added_params.get('bitmap_add_cmd', - 'block-dirty-bitmap-add') - bitmap_data = {"node": added_params['bitmap_device_node'], - "name": added_params['bitmap_name']} - if added_params.get('bitmap_granularity'): - bitmap_data['granularity'] = added_params['bitmap_granularity'] - - mapping = {'on': True, 'yes': True, 'off': False, 'no': False} - if added_params.get('bitmap_persistent'): - bitmap_data['persistent'] = mapping[added_params['bitmap_persistent']] - if added_params.get('bitmap_disabled'): - bitmap_data['disabled'] = mapping[added_params['bitmap_disabled']] - actions.append({'type': bitmap_add_cmd, 'data': bitmap_data}) + bitmap_add_cmd = added_params.get("bitmap_add_cmd", "block-dirty-bitmap-add") + bitmap_data = { + "node": added_params["bitmap_device_node"], + "name": added_params["bitmap_name"], + } + if added_params.get("bitmap_granularity"): + bitmap_data["granularity"] = added_params["bitmap_granularity"] + + mapping = {"on": True, "yes": True, "off": False, "no": False} + if added_params.get("bitmap_persistent"): + bitmap_data["persistent"] = mapping[added_params["bitmap_persistent"]] + if added_params.get("bitmap_disabled"): + bitmap_data["disabled"] = mapping[added_params["bitmap_disabled"]] + actions.append({"type": bitmap_add_cmd, "data": bitmap_data}) if merged_params: - bitmap_merge_cmd = merged_params.get('bitmap_merge_cmd', - 'block-dirty-bitmap-merge') - bitmap_data = {'node': merged_params['bitmap_device_node'], - 'target': merged_params['bitmap_target'], - 'bitmaps': merged_params['bitmap_sources']} - actions.append({'type': bitmap_merge_cmd, 'data': bitmap_data}) + bitmap_merge_cmd = merged_params.get( + "bitmap_merge_cmd", "block-dirty-bitmap-merge" + ) + bitmap_data = { + "node": merged_params["bitmap_device_node"], + "target": merged_params["bitmap_target"], + "bitmaps": merged_params["bitmap_sources"], + } + actions.append({"type": bitmap_merge_cmd, "data": bitmap_data}) if actions: arguments = {"actions": actions} diff --git a/provider/blockdev_backup_base.py b/provider/blockdev_backup_base.py index 5290440080..1b7706a592 100644 --- a/provider/blockdev_backup_base.py +++ b/provider/blockdev_backup_base.py @@ -2,23 +2,23 @@ from avocado.core import exceptions from avocado.utils import memory - -from virttest import data_dir -from virttest import env_process -from virttest import qemu_storage -from virttest import error_context -from virttest import utils_disk -from virttest import qemu_vm +from virttest import ( + data_dir, + env_process, + error_context, + qemu_storage, + qemu_vm, + utils_disk, +) from virttest.qemu_capabilities import Flags from provider import backup_utils from provider.virt_storage.storage_admin import sp_admin -LOG_JOB = logging.getLogger('avocado.test') - +LOG_JOB = logging.getLogger("avocado.test") -class BlockdevBackupBaseTest(object): +class BlockdevBackupBaseTest: def __init__(self, test, params, env): self.main_vm = None self.clone_vm = None @@ -54,13 +54,10 @@ def __source_disk_define_by_params(self, params, image_name): def __target_disk_define_by_params(self, params, image_name): if params.get("random_cluster_size") == "yes": - blacklist = list( - map(int, params.objects("cluster_size_blacklist"))) + blacklist = list(map(int, params.objects("cluster_size_blacklist"))) cluster_size = backup_utils.generate_random_cluster_size(blacklist) params["image_cluster_size"] = cluster_size - LOG_JOB.info( - "set target image cluster size to '%s'", - cluster_size) + LOG_JOB.info("set target image cluster size to '%s'", cluster_size) params.setdefault("target_path", data_dir.get_data_dir()) return sp_admin.volume_define_by_params(image_name, params) @@ -68,13 +65,10 @@ def preprocess_data_disks(self): for tag in self.source_disks: params = self.params.object_params(tag) if params.get("random_cluster_size") == "yes": - blacklist = list( - map(int, params.objects("cluster_size_blacklist"))) + blacklist = list(map(int, params.objects("cluster_size_blacklist"))) cluster_size = backup_utils.generate_random_cluster_size(blacklist) params["image_cluster_size"] = cluster_size - LOG_JOB.info( - "set image cluster size to '%s'", - cluster_size) + LOG_JOB.info("set image cluster size to '%s'", cluster_size) disk = self.__source_disk_define_by_params(params, tag) disk.create(params) @@ -94,7 +88,8 @@ def prepare_data_disks(self): for tag in self.source_disks: self.format_data_disk(tag) backup_utils.generate_tempfile( - self.main_vm, self.disks_info[tag][1], "data") + self.main_vm, self.disks_info[tag][1], "data" + ) def verify_data_files(self): session = self.clone_vm.wait_for_login() @@ -135,9 +130,10 @@ def format_data_disk(self, tag): break else: raise exceptions.TestFail("disk not found in guest ...") - disk_path = "/dev/%s1" % kname + disk_path = f"/dev/{kname}1" mount_point = utils_disk.configure_empty_linux_disk( - session, disk_id, disk_size)[0] + session, disk_id, disk_size + )[0] self.disks_info[tag] = [disk_path, mount_point] finally: session.close() @@ -157,29 +153,31 @@ def prepare_test(self): @error_context.context_aware def blockdev_backup(self): - assert len( - self.target_disks) >= len( - self.source_disks), "No enough target disks define in cfg!" - source_lst = list(map(lambda x: "drive_%s" % x, self.source_disks)) - target_lst = list(map(lambda x: "drive_%s" % x, self.target_disks)) - bitmap_lst = list(map(lambda x: "bitmap_%s" % - x, range(len(self.source_disks)))) + assert len(self.target_disks) >= len( + self.source_disks + ), "No enough target disks define in cfg!" + source_lst = list(map(lambda x: f"drive_{x}", self.source_disks)) + target_lst = list(map(lambda x: f"drive_{x}", self.target_disks)) + bitmap_lst = list(map(lambda x: f"bitmap_{x}", range(len(self.source_disks)))) try: if len(source_lst) > 1: error_context.context( - "backup %s to %s, options: %s" % - (source_lst, target_lst, self.backup_options)) + f"backup {source_lst} to {target_lst}, options: {self.backup_options}" + ) backup_utils.blockdev_batch_backup( - self.main_vm, source_lst, target_lst, bitmap_lst, **self.backup_options) + self.main_vm, + source_lst, + target_lst, + bitmap_lst, + **self.backup_options, + ) else: error_context.context( - "backup %s to %s, options: %s" % - (source_lst[0], target_lst[0], self.backup_options)) + f"backup {source_lst[0]} to {target_lst[0]}, options: {self.backup_options}" + ) backup_utils.blockdev_backup( - self.main_vm, - source_lst[0], - target_lst[0], - **self.backup_options) + self.main_vm, source_lst[0], target_lst[0], **self.backup_options + ) finally: memory.drop_caches() diff --git a/provider/blockdev_backup_parallel.py b/provider/blockdev_backup_parallel.py index 5dce259ef5..8b6f4daa53 100644 --- a/provider/blockdev_backup_parallel.py +++ b/provider/blockdev_backup_parallel.py @@ -4,11 +4,9 @@ class BlockdevBackupParallelTest(BlockdevBackupBaseTest): - def blockdev_backup(self): parallel_tests = self.params.objects("parallel_tests") - targets = list([getattr(self, t) - for t in parallel_tests if hasattr(self, t)]) - backup_func = super(BlockdevBackupParallelTest, self).blockdev_backup + targets = list([getattr(self, t) for t in parallel_tests if hasattr(self, t)]) + backup_func = super().blockdev_backup targets.append(backup_func) utils_misc.parallel(targets) diff --git a/provider/blockdev_base.py b/provider/blockdev_base.py index 8c417ee459..76a070dd69 100644 --- a/provider/blockdev_base.py +++ b/provider/blockdev_base.py @@ -2,24 +2,23 @@ from avocado.core import exceptions from avocado.utils import memory - -from virttest import data_dir -from virttest import utils_misc -from virttest import env_process -from virttest import qemu_storage -from virttest import error_context -from virttest import utils_disk +from virttest import ( + data_dir, + env_process, + error_context, + qemu_storage, + utils_disk, + utils_misc, +) from virttest.qemu_capabilities import Flags -from provider import backup_utils -from provider import job_utils +from provider import backup_utils, job_utils from provider.virt_storage.storage_admin import sp_admin -LOG_JOB = logging.getLogger('avocado.test') - +LOG_JOB = logging.getLogger("avocado.test") -class BlockdevBaseTest(object): +class BlockdevBaseTest: def __init__(self, test, params, env): self.main_vm = None self.params = params @@ -45,13 +44,10 @@ def source_disk_define_by_params(self, params, image_name): def target_disk_define_by_params(self, params, image_name): if params.get("random_cluster_size") == "yes": - blacklist = list( - map(int, params.objects("cluster_size_blacklist"))) + blacklist = list(map(int, params.objects("cluster_size_blacklist"))) cluster_size = backup_utils.generate_random_cluster_size(blacklist) params["image_cluster_size"] = cluster_size - LOG_JOB.info( - "set target image cluster size to '%s'", - cluster_size) + LOG_JOB.info("set target image cluster size to '%s'", cluster_size) params.setdefault("target_path", data_dir.get_data_dir()) vol = sp_admin.volume_define_by_params(image_name, params) return vol @@ -60,14 +56,10 @@ def preprocess_data_disks(self): for tag in self.params.objects("source_images"): params = self.params.object_params(tag) if params.get("random_cluster_size") == "yes": - blacklist = list( - map(int, params.objects("cluster_size_blacklist"))) - cluster_size = backup_utils.generate_random_cluster_size( - blacklist) + blacklist = list(map(int, params.objects("cluster_size_blacklist"))) + cluster_size = backup_utils.generate_random_cluster_size(blacklist) params["image_cluster_size"] = cluster_size - LOG_JOB.info( - "set image cluster size to '%s'", - cluster_size) + LOG_JOB.info("set image cluster size to '%s'", cluster_size) disk = self.source_disk_define_by_params(params, tag) disk.create(params) self.trash.append(disk) @@ -97,7 +89,8 @@ def generate_data_file(self, tag, filename=None): image_size = params.get("tempfile_size", "10M") timeout = params.get_numeric("create_tempfile_timeout", 720) backup_utils.generate_tempfile( - self.main_vm, self.disks_info[tag][1], filename, image_size, timeout) + self.main_vm, self.disks_info[tag][1], filename, image_size, timeout + ) if tag not in self.files_info: self.files_info[tag] = [filename] @@ -130,12 +123,11 @@ def verify_data_files(self): try: backup_utils.refresh_mounts(self.disks_info, self.params, session) for tag, info in self.disks_info.items(): - if tag != 'image1': + if tag != "image1": LOG_JOB.debug("mount target disk in VM!") utils_disk.mount(info[0], info[1], session=session) for data_file in self.files_info[tag]: - backup_utils.verify_file_md5( - self.clone_vm, info[1], data_file) + backup_utils.verify_file_md5(self.clone_vm, info[1], data_file) finally: session.close() @@ -143,13 +135,13 @@ def verify_data_files(self): def format_data_disk(self, tag): session = self.main_vm.wait_for_login() try: - info = backup_utils.get_disk_info_by_param(tag, self.params, - session) + info = backup_utils.get_disk_info_by_param(tag, self.params, session) if info is None: raise exceptions.TestFail("disk not found in guest ...") - disk_path = "/dev/%s1" % info['kname'] + disk_path = "/dev/{}1".format(info["kname"]) mount_point = utils_disk.configure_empty_linux_disk( - session, info['kname'], info['size'])[0] + session, info["kname"], info["size"] + )[0] self.disks_info[tag] = [disk_path, mount_point] finally: session.close() @@ -207,7 +199,7 @@ def clean_images(self): # A StorageVolume object sp_admin.remove_volume(img) except Exception as e: - LOG_JOB.warn(str(e)) + LOG_JOB.warning(str(e)) def check_block_jobs_started(self, jobid_list, tmo=10): """ diff --git a/provider/blockdev_commit_base.py b/provider/blockdev_commit_base.py index 87745aa2e1..8273f7dcca 100644 --- a/provider/blockdev_commit_base.py +++ b/provider/blockdev_commit_base.py @@ -1,19 +1,14 @@ import logging -from virttest import qemu_storage -from virttest import data_dir -from virttest import utils_disk - -from provider import backup_utils -from provider import job_utils +from virttest import data_dir, qemu_storage, utils_disk +from provider import backup_utils, job_utils from provider.virt_storage.storage_admin import sp_admin -LOG_JOB = logging.getLogger('avocado.test') - +LOG_JOB = logging.getLogger("avocado.test") -class BlockDevCommitTest(object): +class BlockDevCommitTest: def __init__(self, test, params, env): self.env = env self.test = test @@ -24,7 +19,7 @@ def __init__(self, test, params, env): @staticmethod def get_node_name(tag): - return "drive_%s" % tag + return f"drive_{tag}" def prepare_main_vm(self): return self.env.get_vm(self.params["main_vm"]) @@ -35,8 +30,7 @@ def get_image_by_tag(self, name): return qemu_storage.QemuImg(image_params, image_dir, name) def prepare_snapshot_file(self, snapshot_tags): - self.snapshot_images = list( - map(self.get_image_by_tag, snapshot_tags)) + self.snapshot_images = list(map(self.get_image_by_tag, snapshot_tags)) params = self.params.copy() params.setdefault("target_path", data_dir.get_data_dir()) for tag in snapshot_tags: @@ -46,8 +40,7 @@ def prepare_snapshot_file(self, snapshot_tags): def verify_data_file(self): for info in self.files_info: mount_point, filename = info[0], info[1] - backup_utils.verify_file_md5( - self.main_vm, mount_point, filename) + backup_utils.verify_file_md5(self.main_vm, mount_point, filename) def create_snapshots(self, snapshot_tags, device): options = ["node", "overlay"] @@ -59,8 +52,7 @@ def create_snapshots(self, snapshot_tags, device): if idx == 0: arguments["node"] = self.device_node else: - arguments["node"] = self.get_node_name( - snapshot_tags[idx - 1]) + arguments["node"] = self.get_node_name(snapshot_tags[idx - 1]) self.main_vm.monitor.cmd(cmd, dict(arguments)) for info in self.disks_info: if device in info: @@ -119,24 +111,21 @@ def configure_data_disk(self, tag): disk_id = self.get_linux_disk_path(session, disk_size) assert disk_id, "Disk not found in guest!" mount_point = utils_disk.configure_empty_linux_disk( - session, disk_id, disk_size)[0] - self.disks_info.append([ - r"/dev/%s1" % - disk_id, mount_point, tag]) + session, disk_id, disk_size + )[0] + self.disks_info.append([rf"/dev/{disk_id}1", mount_point, tag]) else: - disk_id = utils_disk.get_windows_disks_index( - session, disk_size) + disk_id = utils_disk.get_windows_disks_index(session, disk_size) driver_letter = utils_disk.configure_empty_windows_disk( - session, disk_id, disk_size)[0] - mount_point = r"%s:\\" % driver_letter + session, disk_id, disk_size + )[0] + mount_point = rf"{driver_letter}:\\" self.disks_info.append([disk_id, mount_point, tag]) finally: session.close() - def generate_tempfile(self, root_dir, filename="data", - size="10M", timeout=360): - backup_utils.generate_tempfile( - self.main_vm, root_dir, filename, size, timeout) + def generate_tempfile(self, root_dir, filename="data", size="10M", timeout=360): + backup_utils.generate_tempfile(self.main_vm, root_dir, filename, size, timeout) self.files_info.append([root_dir, filename]) def pre_test(self): diff --git a/provider/blockdev_full_backup_base.py b/provider/blockdev_full_backup_base.py index 98075dec7a..34b0aaae1b 100644 --- a/provider/blockdev_full_backup_base.py +++ b/provider/blockdev_full_backup_base.py @@ -2,11 +2,8 @@ class BlockdevFullBackupBaseTest(BlockdevBackupBaseTest): - def get_backup_options(self, params): - extra_options = super( - BlockdevFullBackupBaseTest, - self).get_backup_options(params) + extra_options = super().get_backup_options(params) extra_options["sync"] = "full" return extra_options diff --git a/provider/blockdev_full_backup_parallel.py b/provider/blockdev_full_backup_parallel.py index d2b1b932e4..ab1aa89489 100644 --- a/provider/blockdev_full_backup_parallel.py +++ b/provider/blockdev_full_backup_parallel.py @@ -3,5 +3,6 @@ class BlockdevFullBackupParallelTest( - BlockdevFullBackupBaseTest, BlockdevBackupParallelTest): + BlockdevFullBackupBaseTest, BlockdevBackupParallelTest +): pass diff --git a/provider/blockdev_live_backup_base.py b/provider/blockdev_live_backup_base.py index 6a869b5157..f360e99846 100644 --- a/provider/blockdev_live_backup_base.py +++ b/provider/blockdev_live_backup_base.py @@ -16,8 +16,8 @@ granularity: granularity in BlockDirtyInfo persistent: persistent in BlockDirtyInfo """ -import json +import json from functools import partial from provider.backup_utils import blockdev_batch_backup @@ -28,11 +28,11 @@ class BlockdevLiveBackupBaseTest(BlockdevBaseTest): """Live backup base test module""" def __init__(self, test, params, env): - super(BlockdevLiveBackupBaseTest, self).__init__(test, params, env) + super().__init__(test, params, env) self.clone_vm = None self._target_images = [] self._source_images = params.objects("source_images") - self._source_nodes = ["drive_%s" % src for src in self._source_images] + self._source_nodes = [f"drive_{src}" for src in self._source_images] self._full_backup_options = self._get_full_backup_options() self._full_bk_images = [] self._full_bk_nodes = [] @@ -43,8 +43,8 @@ def _init_arguments_by_params(self, tag): image_params = self.params.object_params(tag) image_chain = image_params.objects("image_backup_chain") self._full_bk_images.append(image_chain[0]) - self._full_bk_nodes.append("drive_%s" % image_chain[0]) - self._bitmaps.append("bitmap_%s" % tag) + self._full_bk_nodes.append(f"drive_{image_chain[0]}") + self._bitmaps.append(f"bitmap_{tag}") self._target_images.append(image_chain[-1]) def _convert_args(self, backup_options): @@ -60,20 +60,20 @@ def _get_full_backup_options(self): return options def _configure_system_disk(self, tag): - self.disks_info[tag] = [ - "system", self.params.get("mnt_on_sys_dsk", "/var/tmp")] + self.disks_info[tag] = ["system", self.params.get("mnt_on_sys_dsk", "/var/tmp")] def _configure_data_disk(self, tag): self.format_data_disk(tag) def remove_files_from_system_image(self, tmo=60): """Remove testing files from system image""" - tag_dir_list = [(t, d[1]) - for t, d in self.disks_info.items() if d[0] == "system"] + tag_dir_list = [ + (t, d[1]) for t, d in self.disks_info.items() if d[0] == "system" + ] if tag_dir_list: tag, root_dir = tag_dir_list[0] - files = ["%s/%s" % (root_dir, f) for f in self.files_info[tag]] - rm_cmd = "rm -f %s" % " ".join(files) + files = [f"{root_dir}/{f}" for f in self.files_info[tag]] + rm_cmd = "rm -f {}".format(" ".join(files)) if self.clone_vm and self.clone_vm.is_alive(): self.clone_vm.destroy() @@ -93,9 +93,9 @@ def prepare_data_disk(self, tag): self._configure_system_disk(tag) else: self._configure_data_disk(tag) - self.generate_data_file(tag, filename='base') + self.generate_data_file(tag, filename="base") - def generate_inc_files(self, filename='inc'): + def generate_inc_files(self, filename="inc"): """Create new files on data disks""" f = partial(self.generate_data_file, filename=filename) list(map(f, self._source_images)) @@ -112,13 +112,17 @@ def prepare_clone_vm(self): self.clone_vm = self.main_vm.clone(params=clone_params) self.clone_vm.create() self.clone_vm.verify_alive() - self.env.register_vm("%s_clone" % self.clone_vm.name, self.clone_vm) + self.env.register_vm(f"{self.clone_vm.name}_clone", self.clone_vm) def do_full_backup(self): - blockdev_batch_backup(self.main_vm, self._source_nodes, - self._full_bk_nodes, self._bitmaps, - **self._full_backup_options) + blockdev_batch_backup( + self.main_vm, + self._source_nodes, + self._full_bk_nodes, + self._bitmaps, + **self._full_backup_options, + ) def post_test(self): self.remove_files_from_system_image() - super(BlockdevLiveBackupBaseTest, self).post_test() + super().post_test() diff --git a/provider/blockdev_mirror_base.py b/provider/blockdev_mirror_base.py index 75c9711714..65801bb83a 100644 --- a/provider/blockdev_mirror_base.py +++ b/provider/blockdev_mirror_base.py @@ -37,14 +37,13 @@ class BlockdevMirrorBaseTest(blockdev_base.BlockdevBaseTest): """ def __init__(self, test, params, env): - super(BlockdevMirrorBaseTest, self).__init__(test, params, env) + super().__init__(test, params, env) self.clone_vm = None self._source_images = params.objects("source_images") self._target_images = params.objects("target_images") - self._source_nodes = ["drive_%s" % src for src in self._source_images] - self._target_nodes = ["drive_%s" % tgt for tgt in self._target_images] - self._backup_options = list(map(self._get_backup_options, - self._source_images)) + self._source_nodes = [f"drive_{src}" for src in self._source_images] + self._target_nodes = [f"drive_{tgt}" for tgt in self._target_images] + self._backup_options = list(map(self._get_backup_options, self._source_images)) def _get_backup_options(self, source_image): params = self.params.object_params(source_image) @@ -60,19 +59,20 @@ def _get_backup_options(self, source_image): return backup_options def _configure_system_disk(self, tag): - self.disks_info[tag] = [ - "system", self.params.get("mnt_on_sys_dsk", "/var/tmp")] + self.disks_info[tag] = ["system", self.params.get("mnt_on_sys_dsk", "/var/tmp")] def _configure_data_disk(self, tag): self.format_data_disk(tag) def remove_files_from_system_image(self, tmo=60): """Remove testing files from system image""" - tag_dir_list = [(t, d[1]) for t, d in six.iteritems(self.disks_info) if d[0] == "system"] + tag_dir_list = [ + (t, d[1]) for t, d in six.iteritems(self.disks_info) if d[0] == "system" + ] if tag_dir_list: tag, root_dir = tag_dir_list[0] - files = ["%s/%s" % (root_dir, f) for f in self.files_info[tag]] - rm_cmd = "rm -f %s" % " ".join(files) + files = [f"{root_dir}/{f}" for f in self.files_info[tag]] + rm_cmd = "rm -f {}".format(" ".join(files)) # restart main vm for the original system image is offlined # and the mirror image is attached after block-mirror @@ -100,39 +100,45 @@ def clone_vm_with_mirrored_images(self): params = self.main_vm.params.copy() system_image = params.objects("images")[0] - images = [system_image] + \ - self._target_images if self._source_images[0] != system_image else self._target_images + images = ( + [system_image] + self._target_images + if self._source_images[0] != system_image + else self._target_images + ) params["images"] = " ".join(images) self.clone_vm = self.main_vm.clone(params=params) self.clone_vm.create() self.clone_vm.verify_alive() - self.env.register_vm("%s_clone" % self.clone_vm.name, self.clone_vm) + self.env.register_vm(f"{self.clone_vm.name}_clone", self.clone_vm) def add_target_data_disks(self): """Hot plug target disks to VM with qmp monitor""" for tag in self._target_images: disk = self.target_disk_define_by_params( - self.params.object_params(tag), tag) + self.params.object_params(tag), tag + ) disk.hotplug(self.main_vm) self.trash.append(disk) def _check_mirrored_block_node_attached(self, source_qdev, target_node): out = self.main_vm.monitor.query("block") for item in out: - if (source_qdev in item["qdev"] - and item["inserted"].get("node-name") == target_node): + if ( + source_qdev in item["qdev"] + and item["inserted"].get("node-name") == target_node + ): break else: - self.test.fail("Device(%s) is not attached to target node(%s)" - % (source_qdev, target_node)) + self.test.fail( + f"Device({source_qdev}) is not attached to target node({target_node})" + ) def check_mirrored_block_nodes_attached(self): """All source devices attach to the mirrored nodes""" for idx, target in enumerate(self._target_nodes): - self._check_mirrored_block_node_attached( - self._source_images[idx], target) + self._check_mirrored_block_node_attached(self._source_images[idx], target) def blockdev_mirror(self): """Need to be implemented in specific test case""" diff --git a/provider/blockdev_mirror_nowait.py b/provider/blockdev_mirror_nowait.py index f9843ee909..66b391f26a 100644 --- a/provider/blockdev_mirror_nowait.py +++ b/provider/blockdev_mirror_nowait.py @@ -6,12 +6,9 @@ from functools import partial from avocado.utils import memory - from virttest import utils_misc -from provider import backup_utils -from provider import blockdev_mirror_base -from provider import job_utils +from provider import backup_utils, blockdev_mirror_base, job_utils class BlockdevMirrorNowaitTest(blockdev_mirror_base.BlockdevMirrorBaseTest): @@ -20,7 +17,7 @@ class BlockdevMirrorNowaitTest(blockdev_mirror_base.BlockdevMirrorBaseTest): """ def __init__(self, test, params, env): - super(BlockdevMirrorNowaitTest, self).__init__(test, params, env) + super().__init__(test, params, env) self._jobs = [] def blockdev_mirror(self): @@ -28,16 +25,19 @@ def blockdev_mirror(self): for idx, source_node in enumerate(self._source_nodes): self._jobs.append( backup_utils.blockdev_mirror_nowait( - self.main_vm, source_node, + self.main_vm, + source_node, self._target_nodes[idx], - **self._backup_options[idx] + **self._backup_options[idx], ) ) def wait_mirror_jobs_completed(self): """Wait till all mirror jobs completed in parallel""" - targets = [partial(job_utils.wait_until_block_job_completed, - vm=self.main_vm, job_id=j) for j in self._jobs] + targets = [ + partial(job_utils.wait_until_block_job_completed, vm=self.main_vm, job_id=j) + for j in self._jobs + ] try: utils_misc.parallel(targets) finally: diff --git a/provider/blockdev_mirror_parallel.py b/provider/blockdev_mirror_parallel.py index 6a311ef687..3bc94ef72b 100644 --- a/provider/blockdev_mirror_parallel.py +++ b/provider/blockdev_mirror_parallel.py @@ -8,11 +8,9 @@ from functools import partial from avocado.utils import memory - from virttest import utils_misc -from provider import backup_utils -from provider import blockdev_mirror_base +from provider import backup_utils, blockdev_mirror_base class BlockdevMirrorParallelTest(blockdev_mirror_base.BlockdevMirrorBaseTest): @@ -26,15 +24,18 @@ def blockdev_mirror(self): # e.g. parallel_tests = 'stress_test', we should define stress_test # function with no argument parallel_tests = self.params.objects("parallel_tests") - targets = list([getattr(self, t) - for t in parallel_tests if hasattr(self, t)]) + targets = list([getattr(self, t) for t in parallel_tests if hasattr(self, t)]) # block-mirror on all source nodes is in parallel too for idx, source_node in enumerate(self._source_nodes): targets.append( - partial(backup_utils.blockdev_mirror, vm=self.main_vm, - source=source_node, target=self._target_nodes[idx], - **self._backup_options[idx]) + partial( + backup_utils.blockdev_mirror, + vm=self.main_vm, + source=source_node, + target=self._target_nodes[idx], + **self._backup_options[idx], + ) ) try: diff --git a/provider/blockdev_mirror_wait.py b/provider/blockdev_mirror_wait.py index 51c85f4d9f..a97f73b070 100644 --- a/provider/blockdev_mirror_wait.py +++ b/provider/blockdev_mirror_wait.py @@ -5,8 +5,7 @@ from avocado.utils import memory -from provider import backup_utils -from provider import blockdev_mirror_base +from provider import backup_utils, blockdev_mirror_base class BlockdevMirrorWaitTest(blockdev_mirror_base.BlockdevMirrorBaseTest): @@ -18,8 +17,11 @@ def blockdev_mirror(self): """Run block-mirror and wait job done""" try: for idx, source_node in enumerate(self._source_nodes): - backup_utils.blockdev_mirror(self.main_vm, source_node, - self._target_nodes[idx], - **self._backup_options[idx]) + backup_utils.blockdev_mirror( + self.main_vm, + source_node, + self._target_nodes[idx], + **self._backup_options[idx], + ) finally: memory.drop_caches() diff --git a/provider/blockdev_snapshot_base.py b/provider/blockdev_snapshot_base.py index bd4a175885..df3b87841a 100644 --- a/provider/blockdev_snapshot_base.py +++ b/provider/blockdev_snapshot_base.py @@ -1,19 +1,15 @@ import logging -from virttest import qemu_storage -from virttest import data_dir -from virttest import utils_disk +from virttest import data_dir, qemu_storage, utils_disk from virttest.qemu_capabilities import Flags from provider import backup_utils - from provider.virt_storage.storage_admin import sp_admin -LOG_JOB = logging.getLogger('avocado.test') - +LOG_JOB = logging.getLogger("avocado.test") -class BlockDevSnapshotTest(object): +class BlockDevSnapshotTest: def __init__(self, test, params, env): self.env = env self.test = test @@ -35,8 +31,7 @@ def prepare_main_vm(self): def prepare_clone_vm(self): vm_params = self.main_vm.params.copy() - images = self.main_vm.params["images"].replace( - self.base_tag, self.snapshot_tag) + images = self.main_vm.params["images"].replace(self.base_tag, self.snapshot_tag) vm_params["images"] = images return self.main_vm.clone(params=vm_params) @@ -99,7 +94,7 @@ def create_snapshot(self): if not self.is_blockdev_mode(): arguments["snapshot-file"] = self.snapshot_image.image_filename else: - arguments.setdefault("overlay", "drive_%s" % self.snapshot_tag) + arguments.setdefault("overlay", f"drive_{self.snapshot_tag}") return self.main_vm.monitor.cmd(cmd, dict(arguments)) @staticmethod @@ -120,23 +115,21 @@ def configure_data_disk(self): disk_id = self.get_linux_disk_path(session, disk_size) assert disk_id, "Disk not found in guest!" mount_point = utils_disk.configure_empty_linux_disk( - session, disk_id, disk_size)[0] - self.disks_info[self.base_tag] = [r"/dev/%s1" % disk_id, - mount_point] + session, disk_id, disk_size + )[0] + self.disks_info[self.base_tag] = [rf"/dev/{disk_id}1", mount_point] else: - disk_id = utils_disk.get_windows_disks_index( - session, disk_size) + disk_id = utils_disk.get_windows_disks_index(session, disk_size) driver_letter = utils_disk.configure_empty_windows_disk( - session, disk_id, disk_size)[0] - mount_point = r"%s:\\" % driver_letter + session, disk_id, disk_size + )[0] + mount_point = rf"{driver_letter}:\\" self.disks_info[self.base_tag] = [disk_id, mount_point] finally: session.close() - def generate_tempfile(self, root_dir, filename="data", - size="10M", timeout=360): - backup_utils.generate_tempfile( - self.main_vm, root_dir, filename, size, timeout) + def generate_tempfile(self, root_dir, filename="data", size="10M", timeout=360): + backup_utils.generate_tempfile(self.main_vm, root_dir, filename, size, timeout) self.files_info.append([root_dir, filename]) def snapshot_test(self): diff --git a/provider/blockdev_stream_base.py b/provider/blockdev_stream_base.py index 3c4c494689..8597e9c1d5 100644 --- a/provider/blockdev_stream_base.py +++ b/provider/blockdev_stream_base.py @@ -1,20 +1,20 @@ -import time import json +import time from provider import backup_utils from provider.blockdev_snapshot_base import BlockDevSnapshotTest class BlockDevStreamTest(BlockDevSnapshotTest): - def __init__(self, test, params, env): - super(BlockDevStreamTest, self).__init__(test, params, env) + super().__init__(test, params, env) self._stream_options = {} - self._top_device = "drive_%s" % self.snapshot_tag + self._top_device = f"drive_{self.snapshot_tag}" self._init_stream_options() if self.base_tag == self.params.objects("images")[0]: self.disks_info[self.base_tag] = [ - "system", self.params.get("mnt_on_sys_dsk", "/var/tmp") + "system", + self.params.get("mnt_on_sys_dsk", "/var/tmp"), ] def _init_stream_options(self): @@ -33,8 +33,7 @@ def _init_stream_options(self): if self.params.get("backing_file"): self._stream_options["backing-file"] = self.params["backing_file"] if self.params.get("block_stream_timeout"): - self._stream_options["timeout"] = int( - self.params["block_stream_timeout"]) + self._stream_options["timeout"] = int(self.params["block_stream_timeout"]) def snapshot_test(self): for info in self.disks_info.values(): @@ -47,8 +46,9 @@ def blockdev_stream(self): if not self.is_blockdev_mode(): self._stream_options["base"] = self.base_image.image_filename self._top_device = self.params["device"] - backup_utils.blockdev_stream(self.main_vm, self._top_device, - **self._stream_options) + backup_utils.blockdev_stream( + self.main_vm, self._top_device, **self._stream_options + ) time.sleep(0.5) def check_backing_file(self): @@ -56,16 +56,16 @@ def check_backing_file(self): out = self.snapshot_image.info(output="json") info = json.loads(out) backing_file = info.get("backing-filename") - assert not backing_file, "Unexpect backing file(%s) found!" % backing_file + assert not backing_file, f"Unexpect backing file({backing_file}) found!" def mount_data_disks(self): if self.base_tag != self.params.objects("images")[0]: - super(BlockDevStreamTest, self).mount_data_disks() + super().mount_data_disks() def remove_files_from_system_image(self, tmo=60): """Remove testing files from system image""" if self.base_tag == self.params.objects("images")[0]: - files = ["%s/%s" % (info[0], info[1]) for info in self.files_info] + files = [f"{info[0]}/{info[1]}" for info in self.files_info] if files: self.main_vm = self.main_vm.clone() self.main_vm.create() @@ -73,7 +73,7 @@ def remove_files_from_system_image(self, tmo=60): try: session = self.main_vm.wait_for_login() - session.cmd("rm -f %s" % " ".join(files), timeout=tmo) + session.cmd("rm -f {}".format(" ".join(files)), timeout=tmo) session.close() finally: self.main_vm.destroy() diff --git a/provider/blockdev_stream_nowait.py b/provider/blockdev_stream_nowait.py index 439b3737e6..822e2ac283 100644 --- a/provider/blockdev_stream_nowait.py +++ b/provider/blockdev_stream_nowait.py @@ -4,9 +4,7 @@ from avocado.utils import memory -from provider import backup_utils -from provider import blockdev_stream_base -from provider import job_utils +from provider import backup_utils, blockdev_stream_base, job_utils class BlockdevStreamNowaitTest(blockdev_stream_base.BlockDevStreamTest): @@ -15,14 +13,14 @@ class BlockdevStreamNowaitTest(blockdev_stream_base.BlockDevStreamTest): """ def __init__(self, test, params, env): - super(BlockdevStreamNowaitTest, self).__init__(test, params, env) + super().__init__(test, params, env) self._job = None def blockdev_stream(self): """Run block-stream without waiting job completed""" - self._job = backup_utils.blockdev_stream_nowait(self.main_vm, - self._top_device, - **self._stream_options) + self._job = backup_utils.blockdev_stream_nowait( + self.main_vm, self._top_device, **self._stream_options + ) def wait_stream_job_completed(self): """Wait till the stream job completed""" diff --git a/provider/blockdev_stream_parallel.py b/provider/blockdev_stream_parallel.py index f2a75cbe33..739f039f20 100644 --- a/provider/blockdev_stream_parallel.py +++ b/provider/blockdev_stream_parallel.py @@ -7,11 +7,9 @@ from functools import partial from avocado.utils import memory - from virttest import utils_misc -from provider import backup_utils -from provider import blockdev_stream_base +from provider import backup_utils, blockdev_stream_base class BlockdevStreamParallelTest(blockdev_stream_base.BlockDevStreamTest): @@ -28,11 +26,14 @@ def blockdev_stream(self): function with no argument """ parallel_tests = self.params.objects("parallel_tests") - targets = list([getattr(self, t) - for t in parallel_tests if hasattr(self, t)]) + targets = list([getattr(self, t) for t in parallel_tests if hasattr(self, t)]) targets.append( - partial(backup_utils.blockdev_stream, vm=self.main_vm, - device=self._top_device, **self._stream_options) + partial( + backup_utils.blockdev_stream, + vm=self.main_vm, + device=self._top_device, + **self._stream_options, + ) ) try: diff --git a/provider/cdrom.py b/provider/cdrom.py index aa5e08faa5..80403ae97d 100644 --- a/provider/cdrom.py +++ b/provider/cdrom.py @@ -5,13 +5,14 @@ procedures. Generally, functions here should provide layers of extra event check for cdrom related operations. """ + import logging from avocado import fail_on from virttest import utils_misc from virttest.qemu_capabilities import Flags -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class CDRomError(Exception): @@ -25,9 +26,7 @@ def __init__(self, device, operation, status): self.status = status def __str__(self): - return "Device %s tray-open status: %s after %s" % (self.device, - self.status, - self.operation) + return f"Device {self.device} tray-open status: {self.status} after {self.operation}" class CDRomEventCountError(CDRomError): @@ -38,8 +37,12 @@ def __init__(self, device, operation, event, count): self.count = count def __str__(self): - return "%d '%s' received after %s %s" % (self.count, self.event, - self.operation, self.device) + return "%d '%s' received after %s %s" % ( + self.count, + self.event, + self.operation, + self.device, + ) def is_device_tray_opened(vm, device_id): @@ -50,14 +53,14 @@ def is_device_tray_opened(vm, device_id): : param vm: VM object : device_id: block device identifier """ - blocks_info = vm.monitor.info('block') + blocks_info = vm.monitor.info("block") if vm.check_capability(Flags.BLOCKDEV): device_id = vm.devices.get_qdev_by_drive(device_id) if isinstance(blocks_info, str): - open_str = 'tray open' - close_str = 'tray closed' + open_str = "tray open" + close_str = "tray closed" for block in blocks_info.splitlines(): if device_id in block: if open_str in block: @@ -66,18 +69,18 @@ def is_device_tray_opened(vm, device_id): return False else: for block in blocks_info: - if device_id in str(block) and block.get('tray_open'): - return block['tray_open'] + if device_id in str(block) and block.get("tray_open"): + return block["tray_open"] return False -class QMPEventCheck(object): +class QMPEventCheck: """ base context manager class. """ def __init__(self, *args, **kargs): - super(QMPEventCheck, self).__init__(*args, **kargs) + super().__init__(*args, **kargs) def __enter__(self): """ @@ -102,7 +105,8 @@ class QMPEventCheckCD(QMPEventCheck): """ context manager class to handle checking of event "DEVICE_TRAY_MOVED" """ - event_to_check = u"DEVICE_TRAY_MOVED" + + event_to_check = "DEVICE_TRAY_MOVED" def __init__(self, vm, device_id, operation): self.vm = vm @@ -137,31 +141,30 @@ def _event_check(self): # count is 2 if closed before, 1 if opened before. """ if not len(self.vm.qmp_monitors): - LOG_JOB.warn("unable to check %s due to no qmp_monitor available", - self.event_to_check) + LOG_JOB.warning( + "unable to check %s due to no qmp_monitor available", + self.event_to_check, + ) return m = self.vm.qmp_monitors[0] events = utils_misc.wait_for(m.get_events, timeout=20) if not events: events = [] - LOG_JOB.info('Event list:\n%s', events) + LOG_JOB.info("Event list:\n%s", events) self.count = 0 for event in events: - if event['event'] == u"DEVICE_TRAY_MOVED": + if event["event"] == "DEVICE_TRAY_MOVED": self.count += 1 - self.status_after = bool(event['data']['tray-open']) + self.status_after = bool(event["data"]["tray-open"]) if self.is_status_after_incorrect(): - raise CDRomStatusError(self.device_id, - self.status_after, - self.operation) + raise CDRomStatusError(self.device_id, self.status_after, self.operation) if self.is_events_count_incorrect(): - raise CDRomEventCountError(self.device_id, - self.operation, - self.event_to_check, - self.count) + raise CDRomEventCountError( + self.device_id, self.operation, self.event_to_check, self.count + ) class QMPEventCheckCDEject(QMPEventCheckCD): @@ -170,15 +173,15 @@ class to check for eject_cdrom """ def __init__(self, vm, device_id): - super(QMPEventCheckCDEject, self).__init__(vm, device_id, - 'eject_cdrom') + super().__init__(vm, device_id, "eject_cdrom") def is_status_after_incorrect(self): return not self.status_after def is_events_count_incorrect(self): - return ((not self.status_before and self.count != 1) or - (self.status_before and self.count != 0)) + return (not self.status_before and self.count != 1) or ( + self.status_before and self.count != 0 + ) class QMPEventCheckCDChange(QMPEventCheckCD): @@ -187,12 +190,12 @@ class to check for change_media """ def __init__(self, vm, device_id): - super(QMPEventCheckCDChange, self).__init__(vm, device_id, - 'change_media') + super().__init__(vm, device_id, "change_media") def is_status_after_incorrect(self): return self.status_after def is_events_count_incorrect(self): - return ((not self.status_before and self.count != 2) or - (self.status_before and self.count != 1)) + return (not self.status_before and self.count != 2) or ( + self.status_before and self.count != 1 + ) diff --git a/provider/chardev_utils.py b/provider/chardev_utils.py index 74f3a3a2a0..be422d0390 100644 --- a/provider/chardev_utils.py +++ b/provider/chardev_utils.py @@ -3,8 +3,7 @@ from avocado.utils import process from virttest import utils_net - -from virttest.utils_conn import build_server_key, build_CA, build_client_key +from virttest.utils_conn import build_CA, build_client_key, build_server_key def setup_certs(params): @@ -19,38 +18,47 @@ def setup_certs(params): shutil.rmtree(cert_dir) # Setup the certificate authority. - hostname = process.run('hostname', ignore_status=False, - shell=True, verbose=True).stdout_text.strip() + hostname = process.run( + "hostname", ignore_status=False, shell=True, verbose=True + ).stdout_text.strip() server_ip = utils_net.get_host_ip_address() cn = hostname ca_credential_dict = {} - ca_credential_dict['cakey'] = 'ca-key.pem' - ca_credential_dict['cacert'] = 'ca-cert.pem' + ca_credential_dict["cakey"] = "ca-key.pem" + ca_credential_dict["cacert"] = "ca-cert.pem" if not os.path.exists(cert_dir): os.makedirs(cert_dir) - build_CA(cert_dir, cn, certtool="certtool", - credential_dict=ca_credential_dict) + build_CA(cert_dir, cn, certtool="certtool", credential_dict=ca_credential_dict) # Setup server certificates server_credential_dict = {} - server_credential_dict['cakey'] = 'ca-key.pem' - server_credential_dict['cacert'] = 'ca-cert.pem' - server_credential_dict['serverkey'] = 'server-key.pem' - server_credential_dict['servercert'] = 'server-cert.pem' - server_credential_dict['ca_cakey_path'] = cert_dir + server_credential_dict["cakey"] = "ca-key.pem" + server_credential_dict["cacert"] = "ca-cert.pem" + server_credential_dict["serverkey"] = "server-key.pem" + server_credential_dict["servercert"] = "server-cert.pem" + server_credential_dict["ca_cakey_path"] = cert_dir # Build a server key. - build_server_key(cert_dir, cn, server_ip, certtool="certtool", - credential_dict=server_credential_dict, on_local=True) + build_server_key( + cert_dir, + cn, + server_ip, + certtool="certtool", + credential_dict=server_credential_dict, + on_local=True, + ) # Setup client certificates client_credential_dict = {} - client_credential_dict['cakey'] = 'ca-key.pem' - client_credential_dict['cacert'] = 'ca-cert.pem' - client_credential_dict['clientkey'] = 'client-key.pem' - client_credential_dict['clientcert'] = 'client-cert.pem' - server_credential_dict['ca_cakey_path'] = cert_dir + client_credential_dict["cakey"] = "ca-key.pem" + client_credential_dict["cacert"] = "ca-cert.pem" + client_credential_dict["clientkey"] = "client-key.pem" + client_credential_dict["clientcert"] = "client-cert.pem" + server_credential_dict["ca_cakey_path"] = cert_dir # build a client key. - build_client_key(cert_dir, - client_cn=cn, certtool="certtool", - credential_dict=client_credential_dict) + build_client_key( + cert_dir, + client_cn=cn, + certtool="certtool", + credential_dict=client_credential_dict, + ) diff --git a/provider/cpu_utils.py b/provider/cpu_utils.py index 13f75557bb..13692ff096 100644 --- a/provider/cpu_utils.py +++ b/provider/cpu_utils.py @@ -1,13 +1,12 @@ -import re import logging +import re from avocado.utils import process - from virttest import utils_misc -from virttest.utils_test import VMStress, StressError +from virttest.utils_test import StressError, VMStress from virttest.utils_version import VersionInterval -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class VMStressBinding(VMStress): @@ -16,8 +15,7 @@ class VMStressBinding(VMStress): """ def __init__(self, vm, params, stress_args=""): - super(VMStressBinding, self).__init__(vm, "stress", params, - stress_args=stress_args) + super().__init__(vm, "stress", params, stress_args=stress_args) self.install() def load_stress_tool(self, cpu_id): @@ -26,16 +24,17 @@ def load_stress_tool(self, cpu_id): :param cpu_id: CPU id you want to bind """ - cmd = "setsid taskset -c {} {} {} > /dev/null".format(cpu_id, - self.stress_cmds, - self.stress_args) + cmd = f"setsid taskset -c {cpu_id} {self.stress_cmds} {self.stress_args} > /dev/null" LOG_JOB.info("Launch stress with command: %s", cmd) self.cmd_launch(cmd) # wait for stress to start and then check, if not raise StressError - if not utils_misc.wait_for(self.app_running, - self.stress_wait_for_timeout, - first=2.0, step=1.0, - text="wait for stress app to start"): + if not utils_misc.wait_for( + self.app_running, + self.stress_wait_for_timeout, + first=2.0, + step=1.0, + text="wait for stress app to start", + ): raise StressError("Stress does not running as expected.") @@ -53,8 +52,7 @@ def get_guest_cpu_ids(session, os_type): return set() cmd = "grep processor /proc/cpuinfo" output = session.cmd_output(cmd) - return set(map(int, re.findall(r"processor\s+(?::\s)?(\d+)", - output, re.M))) + return set(map(int, re.findall(r"processor\s+(?::\s)?(\d+)", output, re.M))) def check_if_vm_vcpu_topology_match(session, os_type, cpuinfo, test, devices=None): @@ -71,7 +69,7 @@ def check_if_vm_vcpu_topology_match(session, os_type, cpuinfo, test, devices=Non if os_type == "linux": out = session.cmd_output_safe("lscpu") cpu_info = dict(re.findall(r"([A-Z].+):\s+(.+)", out, re.M)) - if str(cpu_info["Architecture"]) == 's390x': + if str(cpu_info["Architecture"]) == "s390x": sockets = int(cpu_info["Socket(s) per book"]) else: sockets = int(cpu_info["Socket(s)"]) @@ -79,30 +77,42 @@ def check_if_vm_vcpu_topology_match(session, os_type, cpuinfo, test, devices=Non threads = int(cpu_info["Thread(s) per core"]) threads_matched = cpuinfo.threads == threads else: - cmd = ('powershell "Get-WmiObject Win32_processor | Format-List ' - 'NumberOfCores,ThreadCount"') + cmd = ( + 'powershell "Get-WmiObject Win32_processor | Format-List ' + 'NumberOfCores,ThreadCount"' + ) out = session.cmd_output_safe(cmd).strip() try: - cpu_info = [dict(re.findall(r"(\w+)\s+:\s(\d+)", cpu_out, re.M)) - for cpu_out in out.split("\n\n")] + cpu_info = [ + dict(re.findall(r"(\w+)\s+:\s(\d+)", cpu_out, re.M)) + for cpu_out in out.split("\n\n") + ] sockets = len(cpu_info) cores = int(cpu_info[0]["NumberOfCores"]) threads = int(cpu_info[0]["ThreadCount"]) except KeyError: - LOG_JOB.warning("Attempt to get output via 'powershell' failed, " - "output returned by guest:\n%s", out) + LOG_JOB.warning( + "Attempt to get output via 'powershell' failed, " + "output returned by guest:\n%s", + out, + ) LOG_JOB.info("Try again via 'wmic'") - cmd = 'wmic CPU get NumberOfCores,ThreadCount /Format:list' + cmd = "wmic CPU get NumberOfCores,ThreadCount /Format:list" out = session.cmd_output_safe(cmd).strip() try: - cpu_info = [dict(re.findall(r"(\w+)=(\d+)", cpu_out, re.M)) - for cpu_out in out.split("\n\n")] + cpu_info = [ + dict(re.findall(r"(\w+)=(\d+)", cpu_out, re.M)) + for cpu_out in out.split("\n\n") + ] sockets = len(cpu_info) cores = int(cpu_info[0]["NumberOfCores"]) threads = int(cpu_info[0]["ThreadCount"]) except KeyError: - LOG_JOB.error("Attempt to get output via 'wmic' failed, output" - " returned by guest:\n%s", out) + LOG_JOB.error( + "Attempt to get output via 'wmic' failed, output" + " returned by guest:\n%s", + out, + ) return False if devices: # Until QEMU 8.1 there was a different behaviour for thread count in case @@ -113,12 +123,13 @@ def check_if_vm_vcpu_topology_match(session, os_type, cpuinfo, test, devices=Non LOG_JOB.warning("ThreadCount is disabled for Windows guests") threads_matched = True else: - threads_matched = threads//cores == cpuinfo.threads + threads_matched = threads // cores == cpuinfo.threads else: test.fail("Variable 'devices' must be defined for Windows guest.") - is_matched = (cpuinfo.sockets == sockets and cpuinfo.cores == cores and - threads_matched) # pylint: disable=E0606 + is_matched = ( + cpuinfo.sockets == sockets and cpuinfo.cores == cores and threads_matched + ) # pylint: disable=E0606 if not is_matched: LOG_JOB.debug("CPU infomation of guest:\n%s", out) @@ -143,16 +154,16 @@ def check_cpu_flags(params, flags, test, session=None): if session: LOG_JOB.info("Check cpu flags inside guest") if missing: - test.fail("Flag %s not in guest" % missing) + test.fail(f"Flag {missing} not in guest") no_flags = params.get("no_flags") if no_flags: err_flags = [f for f in no_flags.split() if f in out] if err_flags: - test.fail("Flag %s should not be present in guest" % err_flags) + test.fail(f"Flag {err_flags} should not be present in guest") else: LOG_JOB.info("Check cpu flags on host") if missing: - test.cancel("This host doesn't support flag %s" % missing) + test.cancel(f"This host doesn't support flag {missing}") # Copied from unstable module "virttest/cpu.py" @@ -169,8 +180,11 @@ def check_if_vm_vcpu_match(vcpu_desire, vm): if isinstance(vcpu_desire, str) and vcpu_desire.isdigit(): vcpu_desire = int(vcpu_desire) if vcpu_desire != vcpu_actual: - LOG_JOB.debug("CPU quantity mismatched !!! guest said it got %s " - "but we assigned %s", vcpu_actual, vcpu_desire) + LOG_JOB.debug( + "CPU quantity mismatched !!! guest said it got %s " "but we assigned %s", + vcpu_actual, + vcpu_desire, + ) return False LOG_JOB.info("CPU quantity matched: %s", vcpu_actual) return True diff --git a/provider/cpuflags.py b/provider/cpuflags.py index 8889ac80e8..3e06d6273d 100644 --- a/provider/cpuflags.py +++ b/provider/cpuflags.py @@ -1,6 +1,7 @@ """ Shared code for tests that make use of cpuflags """ + import os from virttest import data_dir @@ -20,12 +21,10 @@ def install_cpuflags_util_on_vm(test, vm, dst_dir, extra_flags=None): cpuflags_src = data_dir.get_deps_dir("cpu_flags") cpuflags_dst = os.path.join(dst_dir, "cpu_flags") session = vm.wait_for_login() - session.cmd("rm -rf %s" % - (cpuflags_dst)) + session.cmd(f"rm -rf {cpuflags_dst}") session.cmd("sync") vm.copy_files_to(cpuflags_src, dst_dir) session.cmd("sync") - session.cmd("cd %s; cd src; make EXTRA_FLAGS='%s';" % - (cpuflags_dst, extra_flags)) + session.cmd(f"cd {cpuflags_dst}; cd src; make EXTRA_FLAGS='{extra_flags}';") session.cmd("sync") session.close() diff --git a/provider/dpdk_utils.py b/provider/dpdk_utils.py index 01f7fdac12..7b52786bd3 100644 --- a/provider/dpdk_utils.py +++ b/provider/dpdk_utils.py @@ -1,12 +1,13 @@ +import re import subprocess import sys -import re # Ensure paramiko is installed -for pip in ['pip3', 'pip']: +for pip in ["pip3", "pip"]: try: - subprocess.check_call([pip, 'install', '--default-timeout=100', 'paramiko']) + subprocess.check_call([pip, "install", "--default-timeout=100", "paramiko"]) import paramiko + break except ImportError: continue @@ -23,7 +24,7 @@ def install_dpdk(params, session): :param session: the session of guest or host """ - cmd = 'yum install -y %s' % params.get("env_pkg") + cmd = "yum install -y {}".format(params.get("env_pkg")) session.cmd(cmd, timeout=360, ignore_all_errors=True) @@ -57,14 +58,14 @@ def bind_pci_device_to_vfio(session, pci_id): :param pci_id: PCI ID of the device to bind """ - cmd = "dpdk-devbind.py --bind=vfio-pci %s" % pci_id + cmd = f"dpdk-devbind.py --bind=vfio-pci {pci_id}" status, output = session.cmd_status_output(cmd) if status == 0: - print("PCI device %s bound to vfio-pci successfully." % pci_id) + print(f"PCI device {pci_id} bound to vfio-pci successfully.") elif "already bound to driver vfio-pci" in output: - print("PCI device %s is already bound to vfio-pci." % pci_id) + print(f"PCI device {pci_id} is already bound to vfio-pci.") else: - print("Failed to bind PCI device %s to vfio-pci" % pci_id) + print(f"Failed to bind PCI device {pci_id} to vfio-pci") class TestPMD: @@ -95,7 +96,7 @@ def _expect_prompt(self, timeout=10): while True: data = self.dpdk_channel.recv(16384).decode() output += data - print(data, end='') + print(data, end="") if "testpmd>" in output: return output @@ -128,19 +129,23 @@ def login(self): self.session.set_missing_host_key_policy(paramiko.AutoAddPolicy()) try: - self.session.connect(self.host, username=self.username, password=self.password) - print("Successfully logged in to %s." % self.host) + self.session.connect( + self.host, username=self.username, password=self.password + ) + print(f"Successfully logged in to {self.host}.") return self.session except paramiko.AuthenticationException: - print("Failed to authenticate with SSH on %s." % self.host) + print(f"Failed to authenticate with SSH on {self.host}.") except paramiko.SSHException as e: - print("SSH error occurred while connecting to %s: %s" % (self.host, str(e))) + print(f"SSH error occurred while connecting to {self.host}: {str(e)}") except paramiko.ssh_exception.NoValidConnectionsError: - print("Failed to connect to %s." % self.host) + print(f"Failed to connect to {self.host}.") except Exception as e: - print("Error occurred while logging in to %s: %s" % (self.host, str(e))) + print(f"Error occurred while logging in to {self.host}: {str(e)}") - def launch_testpmd(self, dpdk_tool_path, cpu_cores, pci_id, forward_mode, queue, pkts, mac=None): + def launch_testpmd( + self, dpdk_tool_path, cpu_cores, pci_id, forward_mode, queue, pkts, mac=None + ): """ Launch the testpmd tool with the specified parameters. @@ -153,22 +158,31 @@ def launch_testpmd(self, dpdk_tool_path, cpu_cores, pci_id, forward_mode, queue, :param mac: MAC address (optional) """ - base_cmd = ("{} -l 0-{} -a {} --file-prefix {} -- " - "--port-topology=chained --disable-rss -i " - "--rxq={} --txq={} --rxd=256 --txd=256 " - "--nb-cores={} --burst=64 --auto-start " - "--forward-mode={} --{}pkts={} ") - - eth_peer = "--eth-peer={} ".format(mac) if mac else "" - - cmd = base_cmd.format(dpdk_tool_path, int(cpu_cores) - 1, - pci_id, - 'tx' if forward_mode == 'txonly' else 'rx', - queue, queue, - int(cpu_cores) - 1, forward_mode, - 'tx' if forward_mode == 'txonly' else 'rx', - pkts - ) + eth_peer + base_cmd = ( + "{} -l 0-{} -a {} --file-prefix {} -- " + "--port-topology=chained --disable-rss -i " + "--rxq={} --txq={} --rxd=256 --txd=256 " + "--nb-cores={} --burst=64 --auto-start " + "--forward-mode={} --{}pkts={} " + ) + + eth_peer = f"--eth-peer={mac} " if mac else "" + + cmd = ( + base_cmd.format( + dpdk_tool_path, + int(cpu_cores) - 1, + pci_id, + "tx" if forward_mode == "txonly" else "rx", + queue, + queue, + int(cpu_cores) - 1, + forward_mode, + "tx" if forward_mode == "txonly" else "rx", + pkts, + ) + + eth_peer + ) if forward_mode == "txonly": cmd += "--txonly-multi-flow " diff --git a/provider/hostdev/__init__.py b/provider/hostdev/__init__.py index f092fde040..31ce9c3703 100644 --- a/provider/hostdev/__init__.py +++ b/provider/hostdev/__init__.py @@ -129,11 +129,11 @@ def bind_one(self, slot_id, driver): if current_driver: if current_driver == driver: LOG_JOB.info( - f"Notice: {slot_id} already bound to driver " f"{driver}, skipping" + "Notice: %s already bound to driver %s, skipping", slot_id, driver ) return self.unbind_one(slot_id) - LOG_JOB.info(f"Binding driver for device {slot_id}") + LOG_JOB.info("Binding driver for device %s", slot_id) # For kernels >= 3.15 driver_override can be used to specify the driver # for a device rather than relying on the driver to provide a positive # match of the device. @@ -198,7 +198,7 @@ def unbind_one(self, slot_id): """ current_driver = self._get_current_driver(slot_id) if current_driver: - LOG_JOB.info(f'Unbinding current driver "{current_driver}"') + LOG_JOB.info('Unbinding current driver "%s"', current_driver) driver_path = PCI_DRV_PATH / current_driver try: with (driver_path / "unbind").open("a") as unbind_f: @@ -236,7 +236,7 @@ def _config_net_vfs(self, params): f"hostdev_vf{idx}_mac", utils_net.generate_mac_address_simple() ) self.mac_addresses.append(mac) - LOG_JOB.info(f'Assigning MAC address "{mac}" to VF "{vf}"') + LOG_JOB.info('Assigning MAC address "%s" to VF "%s"', mac, vf) process.run(f"ip link set dev {dev_name} vf {idx} mac {mac}") def bind_all(self, driver): @@ -256,8 +256,7 @@ def config(self, params): super().config(params) if (self.slot_path / "class").read_text()[2:4] == "02": LOG_JOB.info( - f'Device "{self.slot_id}" is a network device, configure MAC address ' - f"for VFs" + '"%s" is a network device, configure MAC address for VFs', self.slot_id ) self._config_net_vfs(params) diff --git a/provider/hostdev/dev_setup.py b/provider/hostdev/dev_setup.py index a83e88c9a5..3474777a12 100644 --- a/provider/hostdev/dev_setup.py +++ b/provider/hostdev/dev_setup.py @@ -50,7 +50,7 @@ def hostdev_setup(params): for slot in host_pci_slots: if not (PCI_DEV_PATH / slot).exists(): LOG_JOB.warning( - f"The provided slot({slot}) does not exist, " f"skipping setup it." + "The provided slot(%s) does not exist, skipping setup it.", slot ) continue hostdev_params = params.object_params(slot) diff --git a/provider/hostdev/utils.py b/provider/hostdev/utils.py index e19733b85b..0186ade678 100644 --- a/provider/hostdev/utils.py +++ b/provider/hostdev/utils.py @@ -105,7 +105,7 @@ def get_ifname_from_pci(pci_slot): try: return next((PCI_DEV_PATH / pci_slot / "net").iterdir()).name except OSError as e: - LOG_JOB.error(f"Cannot get the NIC name of {pci_slot}: str({e})") + LOG_JOB.error("Cannot get the NIC name of %s: %s", pci_slot, str(e)) return "" @@ -163,7 +163,7 @@ def get_guest_ip_from_mac(vm, mac, ip_version=4): raise ValueError("Unknown os type") finally: serial_session.close() - LOG_JOB.info(f"IP address of MAC address({mac}) is: {ip_addr}") + LOG_JOB.info("IP address of MAC address(%s) is: %s", mac, ip_addr) return ip_addr diff --git a/provider/in_place_upgrade_base.py b/provider/in_place_upgrade_base.py index 1f6fc0ccac..2d71969354 100644 --- a/provider/in_place_upgrade_base.py +++ b/provider/in_place_upgrade_base.py @@ -1,14 +1,13 @@ +import logging import re import time -import logging - from virttest import utils_package -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") -class IpuTest(object): +class IpuTest: """ Class for in_place_upgrade test in the vm """ @@ -34,8 +33,7 @@ def run_guest_cmd(self, cmd, check_status=True, timeout=1200): """ status, output = self.session.cmd_status_output(cmd, timeout=timeout) if check_status and status != 0: - self.test.fail("Execute command %s failed, output: %s" - % (cmd, output)) + self.test.fail(f"Execute command {cmd} failed, output: {output}") return output.strip() def upgrade_process(self, cmd, timeout=6000): @@ -64,7 +62,7 @@ def yum_update_no_rhsm(self, test, old_custom): update_vm = self.params.get("yum_update") self.session.cmd(update_vm, timeout=3000) except Exception as error: - test.fail("Failed to do yum update in the vm : %s" % str(error)) + test.fail(f"Failed to do yum update in the vm : {str(error)}") def rhsm(self, test): """ @@ -91,7 +89,7 @@ def rhsm(self, test): update_vm = self.params.get("yum_update") self.session.cmd(update_vm, timeout=6000) except Exception as error: - test.fail("Failed to register rhsm : %s" % str(error)) + test.fail(f"Failed to register rhsm : {str(error)}") def create_ipuser(self, test): """ @@ -107,7 +105,7 @@ def create_ipuser(self, test): self.session.cmd(add_passwd_ipuser) self.session.cmd(no_passwd_for_sudo) except Exception as error: - test.fail("Failed to create ipuser : %s" % str(error)) + test.fail(f"Failed to create ipuser : {str(error)}") def pre_upgrade_whitelist(self, test): """ @@ -125,13 +123,12 @@ def pre_upgrade_whitelist(self, test): self.session.cmd(self.params.get("fix_permit")) # New kernel is not used erase_old_kernel = self.params.get("clean_up_old_kernel") - s, output = self.session.cmd_status_output(erase_old_kernel, - timeout=1200) + s, output = self.session.cmd_status_output(erase_old_kernel, timeout=1200) error_info = self.params.get("error_info") if re.search(error_info, output): pass except Exception as error: - test.fail("Failed to fix issues in advance: %s" % str(error)) + test.fail(f"Failed to fix issues in advance: {str(error)}") def post_upgrade_check(self, test, post_release): """ @@ -141,16 +138,18 @@ def post_upgrade_check(self, test, post_release): release = self.params.get("release_check") status, output_release = self.session.cmd_status_output(release) if not re.search(post_release, output_release): - test.fail("Post_release: %s, expected result: %s" - % (post_release, output_release)) + test.fail( + f"Post_release: {post_release}, expected result: {output_release}" + ) new_kernel = self.params.get("new_kernel_ver") check_kernel = self.params.get("check_kernel") s, actual_new_kernel = self.session.cmd_status_output(check_kernel) if not re.search(new_kernel, actual_new_kernel): - test.fail("kernel is not right, expected is %s and new is %s" - % (new_kernel, actual_new_kernel)) + test.fail( + f"kernel is not right, expected is {new_kernel} and new is {actual_new_kernel}" + ) except Exception as error: - test.fail("Post upgrade checking failed : %s" % str(error)) + test.fail(f"Post upgrade checking failed : {str(error)}") def post_upgrade_restore(self, test): """ @@ -166,13 +165,13 @@ def post_upgrade_restore(self, test): if re.search("answerfile", o): s, o = self.session.cmd_status_output(re_permit) if s: - test.fail("Failed to restore permit: %s" % o) + test.fail(f"Failed to restore permit: {o}") re_sshd_service = self.params.get("restart_sshd") break else: test.fail("upgrade is in proress, please add waiting time") s, o = self.session.cmd_status_output(re_sshd_service) if s != 0: - test.fail("Failed to restart sshd: %s" % o) + test.fail(f"Failed to restart sshd: {o}") except Exception as error: - test.fail("Failed to restore permit: %s" % str(error)) + test.fail(f"Failed to restore permit: {str(error)}") diff --git a/provider/input_event_proxy.py b/provider/input_event_proxy.py index 7f45f0eb15..2810aeb901 100644 --- a/provider/input_event_proxy.py +++ b/provider/input_event_proxy.py @@ -1,95 +1,88 @@ -import os import json import logging +import os + try: from queue import Queue except ImportError: from Queue import Queue -from virttest import data_dir -from virttest import utils_misc +from virttest import data_dir, utils_misc -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") -DEP_DIR = data_dir.get_deps_dir('input_event') +DEP_DIR = data_dir.get_deps_dir("input_event") class AgentMessageType: + """Agent message types.""" - '''Agent message types.''' - - SYNC = 'SYNC' - INFO = 'INFO' - READY = 'READY' - EVENT = 'EVENT' - ERROR = 'ERROR' + SYNC = "SYNC" + INFO = "INFO" + READY = "READY" + EVENT = "EVENT" + ERROR = "ERROR" class AgentState: - - '''Agent state codes.''' + """Agent state codes.""" STOPPED = 0 GREETING = 1 LISTENING = 2 -EventTypeKey = 'type' -DevNameKey = 'device' +EventTypeKey = "type" +DevNameKey = "device" class EventType: + """Event types.""" - '''Event types.''' - - KEYDOWN = 'KEYDOWN' - KEYUP = 'KEYUP' - POINTERMOVE = 'POINTERMOVE' - WHEELFORWARD = 'WHEELFORWARD' - WHEELBACKWARD = 'WHEELBACKWARD' - UNKNOWN = 'UNKNOWN' + KEYDOWN = "KEYDOWN" + KEYUP = "KEYUP" + POINTERMOVE = "POINTERMOVE" + WHEELFORWARD = "WHEELFORWARD" + WHEELBACKWARD = "WHEELBACKWARD" + UNKNOWN = "UNKNOWN" class KeyEventData: + """Key event schema.""" - '''Key event schema.''' - - KEYCODE = 'keyCode' - SCANCODE = 'scanCode' + KEYCODE = "keyCode" + SCANCODE = "scanCode" class PointerEventData: + """Pointer event schema.""" - '''Pointer event schema.''' - - XPOS = 'xPos' - YPOS = 'yPos' - ABS = 'abs' + XPOS = "xPos" + YPOS = "yPos" + ABS = "abs" class WheelEventData: + """Wheel event schema.""" - '''Wheel event schema.''' - - HSCROLL = 'hScroll' # horizontal scroll wheel - ABS = 'abs' + HSCROLL = "hScroll" # horizontal scroll wheel + ABS = "abs" -class _EventListener(object): +class _EventListener: + """Base implementation for the event listener class.""" - '''Base implementation for the event listener class.''' - - agent_source = '' - agent_target = '' - python_bin = '' + agent_source = "" + agent_target = "" + python_bin = "" def __init__(self, vm): - ''' + """ Initialize the event listener. :param vm: VM object. - ''' + """ self.events = Queue() self.targets = {} self._vm = vm @@ -100,26 +93,26 @@ def __init__(self, vm): self._listen() def _install(self): - '''Install (copy) the agent into VM.''' + """Install (copy) the agent into VM.""" if not os.path.exists(self.agent_source): - raise IOError('agent program is missing') + raise OSError("agent program is missing") self._vm.copy_files_to(self.agent_source, self.agent_target) def _uninstall(self): - '''Uninstall the agent.''' + """Uninstall the agent.""" raise NotImplementedError() def _launch(self): - '''Launch the agent.''' + """Launch the agent.""" self._agent_sh = self._vm.wait_for_login() self._agent_sh.set_output_func(self._parse_output) self._agent_sh.set_output_params(tuple()) - cmd = ' '.join((self.python_bin, self.agent_target)) + cmd = " ".join((self.python_bin, self.agent_target)) self._agent_sh.sendline(cmd) def _terminate(self): - '''Terminate the agent.''' - self._agent_sh.sendcontrol('c') + """Terminate the agent.""" + self._agent_sh.sendcontrol("c") self._agent_sh.close() # session objects wants `output_func` to be serializable, # but it does not make sense currently, drop the value to @@ -127,39 +120,39 @@ def _terminate(self): self._agent_sh.set_output_func(None) self._agent_sh = None self._agent_state = AgentState.STOPPED - LOG_JOB.info('Stopped listening input events on %s', self._vm.name) + LOG_JOB.info("Stopped listening input events on %s", self._vm.name) def is_listening(self): - '''Return `True` if listening.''' + """Return `True` if listening.""" return self._agent_state == AgentState.LISTENING def _listen(self): - '''Listen events in VM.''' + """Listen events in VM.""" self._launch() if not utils_misc.wait_for(self.is_listening, timeout=10, step=1): - raise AssertionError('agent program is not running') - LOG_JOB.info('Listening input events on %s', self._vm.name) + raise AssertionError("agent program is not running") + LOG_JOB.info("Listening input events on %s", self._vm.name) def cleanup(self): - '''Cleanup the event listener.''' + """Cleanup the event listener.""" self._terminate() self._uninstall() self._ctrl_sh.close() def clear_events(self): - '''Clear all the queued events.''' + """Clear all the queued events.""" while not self.events.empty(): self.events.get() def _parse_output(self, line): - '''Parse output of the agent.''' + """Parse output of the agent.""" try: message = json.loads(line) except: # garbage line, skip it return - mtype = message['type'] - content = message['content'] + mtype = message["type"] + content = message["content"] if mtype == AgentMessageType.SYNC: self._agent_state = AgentState.GREETING elif mtype == AgentMessageType.INFO: @@ -171,65 +164,64 @@ def _parse_output(self, line): elif mtype == AgentMessageType.ERROR: self._report_error(content) else: - LOG_JOB.error('Input event listener received unknown message') + LOG_JOB.error("Input event listener received unknown message") def _report_info(self, content): - '''Report information of devices.''' - dev = content['device'] - info = content['info'] + """Report information of devices.""" + dev = content["device"] + info = content["info"] self.targets[dev] = info def _report_error(self, content): - '''Report errors.''' + """Report errors.""" pass def _parse_platform_event(self, content): - '''Parse events of the certian platform.''' + """Parse events of the certian platform.""" raise NotImplementedError() class EventListenerLinux(_EventListener): + """Linux implementation for the event listener class.""" - '''Linux implementation for the event listener class.''' - - agent_source = os.path.join(DEP_DIR, 'input_event_linux.py') - agent_target = '/tmp/input_event.py' - python_bin = '`command -v python python3 | head -1`' + agent_source = os.path.join(DEP_DIR, "input_event_linux.py") + agent_target = "/tmp/input_event.py" + python_bin = "`command -v python python3 | head -1`" KEYDOWN = 1 KEYUP = 0 WHEELFORWARD = 0x00000001 - WHEELBACKWARD = 0xffffffff + WHEELBACKWARD = 0xFFFFFFFF def __init__(self, vm): - super(EventListenerLinux, self).__init__(vm) + super().__init__(vm) self._buffers = {} def _uninstall(self): - cmd = ' '.join(('rm', '-f', self.agent_target)) + cmd = " ".join(("rm", "-f", self.agent_target)) self._ctrl_sh.cmd(cmd, ignore_all_errors=True) def _report_info(self, content): - super(EventListenerLinux, self)._report_info(content) - dev = content['device'] + super()._report_info(content) + dev = content["device"] self._buffers[dev] = {} def _parse_platform_event(self, content): - dev = content['device'] - nevent = content['event'] - etype = nevent['typeName'] - value = nevent['value'] + dev = content["device"] + nevent = content["event"] + etype = nevent["typeName"] + value = nevent["value"] ebuf = self._buffers[dev] - if etype == 'EV_SYN': - subtype = nevent['codeName'] - if subtype == 'SYN_REPORT': + if etype == "EV_SYN": + subtype = nevent["codeName"] + if subtype == "SYN_REPORT": # end of event, report it ebuf[DevNameKey] = dev self.events.put(ebuf) ebuf = {EventTypeKey: EventType.UNKNOWN} - elif etype == 'EV_KEY': - keycode = nevent['codeName'] + elif etype == "EV_KEY": + keycode = nevent["codeName"] if value == self.KEYDOWN: mtype = EventType.KEYDOWN elif value == self.KEYUP: @@ -239,49 +231,49 @@ def _parse_platform_event(self, content): if mtype: ebuf[EventTypeKey] = mtype ebuf[KeyEventData.KEYCODE] = keycode - elif etype == 'EV_REL': - subtype = nevent['codeName'] - if subtype in ('REL_X', 'REL_Y'): + elif etype == "EV_REL": + subtype = nevent["codeName"] + if subtype in ("REL_X", "REL_Y"): ebuf[EventTypeKey] = EventType.POINTERMOVE - if subtype.endswith('X'): + if subtype.endswith("X"): ebuf[PointerEventData.XPOS] = value - else: # 'Y' + else: # 'Y' ebuf[PointerEventData.YPOS] = value ebuf[PointerEventData.ABS] = 0 - elif subtype in ('REL_HWHEEL', 'REL_WHEEL'): + elif subtype in ("REL_HWHEEL", "REL_WHEEL"): if value == self.WHEELFORWARD: ebuf[EventTypeKey] = EventType.WHEELFORWARD elif value == self.WHEELBACKWARD: ebuf[EventTypeKey] = EventType.WHEELBACKWARD - if subtype.endswith('HWHEEL'): + if subtype.endswith("HWHEEL"): ebuf[WheelEventData.HSCROLL] = 1 else: ebuf[WheelEventData.HSCROLL] = 0 ebuf[WheelEventData.ABS] = 0 - elif etype == 'EV_ABS': - subtype = nevent['codeName'] - if subtype in ('ABS_X', 'ABS_Y'): + elif etype == "EV_ABS": + subtype = nevent["codeName"] + if subtype in ("ABS_X", "ABS_Y"): ebuf[EventTypeKey] = EventType.POINTERMOVE - if subtype.endswith('X'): + if subtype.endswith("X"): ebuf[PointerEventData.XPOS] = value - else: # 'Y' + else: # 'Y' ebuf[PointerEventData.YPOS] = value ebuf[PointerEventData.ABS] = 1 - elif subtype == 'ABS_WHEEL': + elif subtype == "ABS_WHEEL": if value == self.WHEELFORWARD: ebuf[EventTypeKey] = EventType.WHEELFORWARD elif value == self.WHEELBACKWARD: ebuf[EventTypeKey] = EventType.WHEELBACKWARD ebuf[WheelEventData.HSCROLL] = 0 ebuf[WheelEventData.ABS] = 1 - elif etype == 'EV_MSC': - subtype = nevent['codeName'] - if subtype == 'MSC_SCAN': + elif etype == "EV_MSC": + subtype = nevent["codeName"] + if subtype == "MSC_SCAN": ebuf[KeyEventData.SCANCODE] = value - elif etype == 'EV_LED': + elif etype == "EV_LED": # TODO: handle this kind of events when necessary pass - elif etype == 'EV_REP': + elif etype == "EV_REP": # FIXME: handle this kind of events pass else: @@ -292,265 +284,269 @@ def _parse_platform_event(self, content): # XXX: we may need different map tables for different keyboard layouts, # or even the best solution is not using any mapping, but let us pick # the current implementation since I can only realize this. -UNMAPPED = 'UNMAPPED' +UNMAPPED = "UNMAPPED" VK2Linux = { - 'VK_BACK': 'KEY_BACKSPACE', - 'VK_TAB': 'KEY_TAB', - 'VK_CLEAR': 'KEY_CLEAR', - 'VK_RETURN': 'KEY_ENTER', - 'VK_SHIFT': 'KEY_LEFTSHIFT', - 'VK_CONTROL': 'KEY_LEFTCTRL', - 'VK_MENU': 'KEY_LEFTALT', - 'VK_PAUSE': 'KEY_PAUSE', - 'VK_CAPITAL': 'KEY_CAPSLOCK', - 'VK_KANA': 'KEY_KATAKANA', - 'VK_HANGUEL': 'KEY_HANGEUL', - 'VK_HANGUL': 'KEY_HANGEUL', - 'VK_JUNJA': UNMAPPED, - 'VK_FINAL': UNMAPPED, - 'VK_HANJA': 'KEY_HANJA', - 'VK_KANJI': UNMAPPED, - 'VK_ESCAPE': 'KEY_ESC', - 'VK_CONVERT': 'KEY_HENKAN', - 'VK_NONCONVERT': 'KEY_MUHENKAN', - 'VK_ACCEPT': UNMAPPED, - 'VK_MODECHANGE': UNMAPPED, - 'VK_SPACE': 'KEY_SPACE', - 'VK_PRIOR': 'KEY_PAGEUP', - 'VK_NEXT': 'KEY_PAGEDOWN', - 'VK_END': 'KEY_END', - 'VK_HOME': 'KEY_HOME', - 'VK_LEFT': 'KEY_LEFT', - 'VK_UP': 'KEY_UP', - 'VK_RIGHT': 'KEY_RIGHT', - 'VK_DOWN': 'KEY_DOWN', - 'VK_SELECT': 'KEY_SELECT', - 'VK_PRINT': 'KEY_PRINT', - 'VK_EXECUTE': UNMAPPED, - 'VK_SNAPSHOT': 'KEY_SYSRQ', - 'VK_INSERT': 'KEY_INSERT', - 'VK_DELETE': 'KEY_DELETE', - 'VK_HELP': 'KEY_HELP', - 'VK_0': 'KEY_0', - 'VK_1': 'KEY_1', - 'VK_2': 'KEY_2', - 'VK_3': 'KEY_3', - 'VK_4': 'KEY_4', - 'VK_5': 'KEY_5', - 'VK_6': 'KEY_6', - 'VK_7': 'KEY_7', - 'VK_8': 'KEY_8', - 'VK_9': 'KEY_9', - 'VK_A': 'KEY_A', - 'VK_B': 'KEY_B', - 'VK_C': 'KEY_C', - 'VK_D': 'KEY_D', - 'VK_E': 'KEY_E', - 'VK_F': 'KEY_F', - 'VK_G': 'KEY_G', - 'VK_H': 'KEY_H', - 'VK_I': 'KEY_I', - 'VK_J': 'KEY_J', - 'VK_K': 'KEY_K', - 'VK_L': 'KEY_L', - 'VK_M': 'KEY_M', - 'VK_N': 'KEY_N', - 'VK_O': 'KEY_O', - 'VK_P': 'KEY_P', - 'VK_Q': 'KEY_Q', - 'VK_R': 'KEY_R', - 'VK_S': 'KEY_S', - 'VK_T': 'KEY_T', - 'VK_U': 'KEY_U', - 'VK_V': 'KEY_V', - 'VK_W': 'KEY_W', - 'VK_X': 'KEY_X', - 'VK_Y': 'KEY_Y', - 'VK_Z': 'KEY_Z', - 'VK_LWIN': 'KEY_LEFTMETA', - 'VK_RWIN': 'KEY_RIGHTMETA', - 'VK_APPS': 'KEY_COMPOSE', - 'VK_SLEEP': 'KEY_SLEEP', - 'VK_NUMPAD0': 'KEY_KP0', - 'VK_NUMPAD1': 'KEY_KP1', - 'VK_NUMPAD2': 'KEY_KP2', - 'VK_NUMPAD3': 'KEY_KP3', - 'VK_NUMPAD4': 'KEY_KP4', - 'VK_NUMPAD5': 'KEY_KP5', - 'VK_NUMPAD6': 'KEY_KP6', - 'VK_NUMPAD7': 'KEY_KP7', - 'VK_NUMPAD8': 'KEY_KP8', - 'VK_NUMPAD9': 'KEY_KP9', - 'VK_MULTIPLY': 'KEY_KPASTERISK', - 'VK_ADD': 'KEY_KPPLUS', - 'VK_SEPARATOR': 'KEY_KPCOMMA', - 'VK_SUBTRACT': 'KEY_KPMINUS', - 'VK_DECIMAL': 'KEY_KPDOT', - 'VK_DIVIDE': 'KEY_KPSLASH', - 'VK_F1': 'KEY_F1', - 'VK_F2': 'KEY_F2', - 'VK_F3': 'KEY_F3', - 'VK_F4': 'KEY_F4', - 'VK_F5': 'KEY_F5', - 'VK_F6': 'KEY_F6', - 'VK_F7': 'KEY_F7', - 'VK_F8': 'KEY_F8', - 'VK_F9': 'KEY_F9', - 'VK_F10': 'KEY_F10', - 'VK_F11': 'KEY_F11', - 'VK_F12': 'KEY_F12', - 'VK_F13': 'KEY_F13', - 'VK_F14': 'KEY_F14', - 'VK_F15': 'KEY_F15', - 'VK_F16': 'KEY_F16', - 'VK_F17': 'KEY_F17', - 'VK_F18': 'KEY_F18', - 'VK_F19': 'KEY_F19', - 'VK_F20': 'KEY_F20', - 'VK_F21': 'KEY_F21', - 'VK_F22': 'KEY_F22', - 'VK_F23': 'KEY_F23', - 'VK_F24': 'KEY_F24', - 'VK_NUMLOCK': 'KEY_NUMLOCK', - 'VK_SCROLL': 'KEY_SCROLLLOCK', - 'VK_OEM_0x92': UNMAPPED, - 'VK_OEM_0x93': UNMAPPED, - 'VK_OEM_0x94': UNMAPPED, - 'VK_OEM_0x95': UNMAPPED, - 'VK_OEM_0x96': UNMAPPED, - 'VK_LSHIFT': 'KEY_LEFTSHIFT', - 'VK_RSHIFT': 'KEY_RIGHTSHIFT', - 'VK_LCONTROL': 'KEY_LEFTCTRL', - 'VK_RCONTROL': 'KEY_RIGHTCTRL', - 'VK_LMENU': 'KEY_LEFTALT', - 'VK_RMENU': 'KEY_RIGHTALT', - 'VK_BROWSER_BACK': 'KEY_BACK', - 'VK_BROWSER_FORWARD': 'KEY_FORWARD', - 'VK_BROWSER_REFRESH': 'KEY_REFRESH', - 'VK_BROWSER_STOP': 'KEY_STOP', - 'VK_BROWSER_SEARCH': 'KEY_SEARCH', - 'VK_BROWSER_FAVORITES': 'KEY_FAVORITES', - 'VK_BROWSER_HOME': 'KEY_HOMEPAGE', - 'VK_VOLUME_MUTE': 'KEY_MUTE', - 'VK_VOLUME_DOWN': 'KEY_VOLUMEDOWN', - 'VK_VOLUME_UP': 'KEY_VOLUMEUP', - 'VK_MEDIA_NEXT_TRACK': 'KEY_NEXTSONG', - 'VK_MEDIA_PREV_TRACK': 'KEY_PREVIOUSSONG', - 'VK_MEDIA_STOP': 'KEY_STOPCD', - 'VK_MEDIA_PLAY_PAUSE': 'KEY_PLAYPAUSE', - 'VK_LAUNCH_MAIL': 'KEY_EMAIL', - 'VK_LAUNCH_MEDIA_SELECT': UNMAPPED, - 'VK_LAUNCH_APP1': UNMAPPED, - 'VK_LAUNCH_APP2': UNMAPPED, - 'VK_OEM_1': 'KEY_SEMICOLON', - 'VK_OEM_PLUS': 'KEY_EQUAL', - 'VK_OEM_COMMA': 'KEY_COMMA', - 'VK_OEM_MINUS': 'KEY_MINUS', - 'VK_OEM_PERIOD': 'KEY_DOT', - 'VK_OEM_2': 'KEY_SLASH', - 'VK_OEM_3': 'KEY_GRAVE', - 'VK_OEM_4': 'KEY_LEFTBRACE', - 'VK_OEM_5': 'KEY_BACKSLASH', - 'VK_OEM_6': 'KEY_RIGHTBRACE', - 'VK_OEM_7': 'KEY_APOSTROPHE', - 'VK_OEM_8': UNMAPPED, - 'VK_OEM_0xE1': UNMAPPED, - 'VK_OEM_102': 'KEY_102ND', - 'VK_OEM_0xE3': UNMAPPED, - 'VK_OEM_0xE4': UNMAPPED, - 'VK_PROCESSKEY': UNMAPPED, - 'VK_OEM_0xE6': UNMAPPED, - 'VK_PACKET': UNMAPPED, - 'VK_OEM_0xE9': UNMAPPED, - 'VK_OEM_0xEA': UNMAPPED, - 'VK_OEM_0xEB': UNMAPPED, - 'VK_OEM_0xEC': UNMAPPED, - 'VK_OEM_0xED': UNMAPPED, - 'VK_OEM_0xEE': UNMAPPED, - 'VK_OEM_0xEF': UNMAPPED, - 'VK_OEM_0xF0': UNMAPPED, - 'VK_OEM_0xF1': UNMAPPED, - 'VK_OEM_0xF2': UNMAPPED, - 'VK_OEM_0xF3': UNMAPPED, - 'VK_OEM_0xF4': UNMAPPED, - 'VK_OEM_0xF5': UNMAPPED, - 'VK_ATTN': UNMAPPED, - 'VK_CRSEL': UNMAPPED, - 'VK_EXSEL': UNMAPPED, - 'VK_EREOF': UNMAPPED, - 'VK_PLAY': 'KEY_PLAY', - 'VK_ZOOM': 'KEY_ZOOM', - 'VK_NONAME': UNMAPPED, - 'VK_PA1': UNMAPPED, - 'VK_OEM_CLEAR': UNMAPPED + "VK_BACK": "KEY_BACKSPACE", + "VK_TAB": "KEY_TAB", + "VK_CLEAR": "KEY_CLEAR", + "VK_RETURN": "KEY_ENTER", + "VK_SHIFT": "KEY_LEFTSHIFT", + "VK_CONTROL": "KEY_LEFTCTRL", + "VK_MENU": "KEY_LEFTALT", + "VK_PAUSE": "KEY_PAUSE", + "VK_CAPITAL": "KEY_CAPSLOCK", + "VK_KANA": "KEY_KATAKANA", + "VK_HANGUEL": "KEY_HANGEUL", + "VK_HANGUL": "KEY_HANGEUL", + "VK_JUNJA": UNMAPPED, + "VK_FINAL": UNMAPPED, + "VK_HANJA": "KEY_HANJA", + "VK_KANJI": UNMAPPED, + "VK_ESCAPE": "KEY_ESC", + "VK_CONVERT": "KEY_HENKAN", + "VK_NONCONVERT": "KEY_MUHENKAN", + "VK_ACCEPT": UNMAPPED, + "VK_MODECHANGE": UNMAPPED, + "VK_SPACE": "KEY_SPACE", + "VK_PRIOR": "KEY_PAGEUP", + "VK_NEXT": "KEY_PAGEDOWN", + "VK_END": "KEY_END", + "VK_HOME": "KEY_HOME", + "VK_LEFT": "KEY_LEFT", + "VK_UP": "KEY_UP", + "VK_RIGHT": "KEY_RIGHT", + "VK_DOWN": "KEY_DOWN", + "VK_SELECT": "KEY_SELECT", + "VK_PRINT": "KEY_PRINT", + "VK_EXECUTE": UNMAPPED, + "VK_SNAPSHOT": "KEY_SYSRQ", + "VK_INSERT": "KEY_INSERT", + "VK_DELETE": "KEY_DELETE", + "VK_HELP": "KEY_HELP", + "VK_0": "KEY_0", + "VK_1": "KEY_1", + "VK_2": "KEY_2", + "VK_3": "KEY_3", + "VK_4": "KEY_4", + "VK_5": "KEY_5", + "VK_6": "KEY_6", + "VK_7": "KEY_7", + "VK_8": "KEY_8", + "VK_9": "KEY_9", + "VK_A": "KEY_A", + "VK_B": "KEY_B", + "VK_C": "KEY_C", + "VK_D": "KEY_D", + "VK_E": "KEY_E", + "VK_F": "KEY_F", + "VK_G": "KEY_G", + "VK_H": "KEY_H", + "VK_I": "KEY_I", + "VK_J": "KEY_J", + "VK_K": "KEY_K", + "VK_L": "KEY_L", + "VK_M": "KEY_M", + "VK_N": "KEY_N", + "VK_O": "KEY_O", + "VK_P": "KEY_P", + "VK_Q": "KEY_Q", + "VK_R": "KEY_R", + "VK_S": "KEY_S", + "VK_T": "KEY_T", + "VK_U": "KEY_U", + "VK_V": "KEY_V", + "VK_W": "KEY_W", + "VK_X": "KEY_X", + "VK_Y": "KEY_Y", + "VK_Z": "KEY_Z", + "VK_LWIN": "KEY_LEFTMETA", + "VK_RWIN": "KEY_RIGHTMETA", + "VK_APPS": "KEY_COMPOSE", + "VK_SLEEP": "KEY_SLEEP", + "VK_NUMPAD0": "KEY_KP0", + "VK_NUMPAD1": "KEY_KP1", + "VK_NUMPAD2": "KEY_KP2", + "VK_NUMPAD3": "KEY_KP3", + "VK_NUMPAD4": "KEY_KP4", + "VK_NUMPAD5": "KEY_KP5", + "VK_NUMPAD6": "KEY_KP6", + "VK_NUMPAD7": "KEY_KP7", + "VK_NUMPAD8": "KEY_KP8", + "VK_NUMPAD9": "KEY_KP9", + "VK_MULTIPLY": "KEY_KPASTERISK", + "VK_ADD": "KEY_KPPLUS", + "VK_SEPARATOR": "KEY_KPCOMMA", + "VK_SUBTRACT": "KEY_KPMINUS", + "VK_DECIMAL": "KEY_KPDOT", + "VK_DIVIDE": "KEY_KPSLASH", + "VK_F1": "KEY_F1", + "VK_F2": "KEY_F2", + "VK_F3": "KEY_F3", + "VK_F4": "KEY_F4", + "VK_F5": "KEY_F5", + "VK_F6": "KEY_F6", + "VK_F7": "KEY_F7", + "VK_F8": "KEY_F8", + "VK_F9": "KEY_F9", + "VK_F10": "KEY_F10", + "VK_F11": "KEY_F11", + "VK_F12": "KEY_F12", + "VK_F13": "KEY_F13", + "VK_F14": "KEY_F14", + "VK_F15": "KEY_F15", + "VK_F16": "KEY_F16", + "VK_F17": "KEY_F17", + "VK_F18": "KEY_F18", + "VK_F19": "KEY_F19", + "VK_F20": "KEY_F20", + "VK_F21": "KEY_F21", + "VK_F22": "KEY_F22", + "VK_F23": "KEY_F23", + "VK_F24": "KEY_F24", + "VK_NUMLOCK": "KEY_NUMLOCK", + "VK_SCROLL": "KEY_SCROLLLOCK", + "VK_OEM_0x92": UNMAPPED, + "VK_OEM_0x93": UNMAPPED, + "VK_OEM_0x94": UNMAPPED, + "VK_OEM_0x95": UNMAPPED, + "VK_OEM_0x96": UNMAPPED, + "VK_LSHIFT": "KEY_LEFTSHIFT", + "VK_RSHIFT": "KEY_RIGHTSHIFT", + "VK_LCONTROL": "KEY_LEFTCTRL", + "VK_RCONTROL": "KEY_RIGHTCTRL", + "VK_LMENU": "KEY_LEFTALT", + "VK_RMENU": "KEY_RIGHTALT", + "VK_BROWSER_BACK": "KEY_BACK", + "VK_BROWSER_FORWARD": "KEY_FORWARD", + "VK_BROWSER_REFRESH": "KEY_REFRESH", + "VK_BROWSER_STOP": "KEY_STOP", + "VK_BROWSER_SEARCH": "KEY_SEARCH", + "VK_BROWSER_FAVORITES": "KEY_FAVORITES", + "VK_BROWSER_HOME": "KEY_HOMEPAGE", + "VK_VOLUME_MUTE": "KEY_MUTE", + "VK_VOLUME_DOWN": "KEY_VOLUMEDOWN", + "VK_VOLUME_UP": "KEY_VOLUMEUP", + "VK_MEDIA_NEXT_TRACK": "KEY_NEXTSONG", + "VK_MEDIA_PREV_TRACK": "KEY_PREVIOUSSONG", + "VK_MEDIA_STOP": "KEY_STOPCD", + "VK_MEDIA_PLAY_PAUSE": "KEY_PLAYPAUSE", + "VK_LAUNCH_MAIL": "KEY_EMAIL", + "VK_LAUNCH_MEDIA_SELECT": UNMAPPED, + "VK_LAUNCH_APP1": UNMAPPED, + "VK_LAUNCH_APP2": UNMAPPED, + "VK_OEM_1": "KEY_SEMICOLON", + "VK_OEM_PLUS": "KEY_EQUAL", + "VK_OEM_COMMA": "KEY_COMMA", + "VK_OEM_MINUS": "KEY_MINUS", + "VK_OEM_PERIOD": "KEY_DOT", + "VK_OEM_2": "KEY_SLASH", + "VK_OEM_3": "KEY_GRAVE", + "VK_OEM_4": "KEY_LEFTBRACE", + "VK_OEM_5": "KEY_BACKSLASH", + "VK_OEM_6": "KEY_RIGHTBRACE", + "VK_OEM_7": "KEY_APOSTROPHE", + "VK_OEM_8": UNMAPPED, + "VK_OEM_0xE1": UNMAPPED, + "VK_OEM_102": "KEY_102ND", + "VK_OEM_0xE3": UNMAPPED, + "VK_OEM_0xE4": UNMAPPED, + "VK_PROCESSKEY": UNMAPPED, + "VK_OEM_0xE6": UNMAPPED, + "VK_PACKET": UNMAPPED, + "VK_OEM_0xE9": UNMAPPED, + "VK_OEM_0xEA": UNMAPPED, + "VK_OEM_0xEB": UNMAPPED, + "VK_OEM_0xEC": UNMAPPED, + "VK_OEM_0xED": UNMAPPED, + "VK_OEM_0xEE": UNMAPPED, + "VK_OEM_0xEF": UNMAPPED, + "VK_OEM_0xF0": UNMAPPED, + "VK_OEM_0xF1": UNMAPPED, + "VK_OEM_0xF2": UNMAPPED, + "VK_OEM_0xF3": UNMAPPED, + "VK_OEM_0xF4": UNMAPPED, + "VK_OEM_0xF5": UNMAPPED, + "VK_ATTN": UNMAPPED, + "VK_CRSEL": UNMAPPED, + "VK_EXSEL": UNMAPPED, + "VK_EREOF": UNMAPPED, + "VK_PLAY": "KEY_PLAY", + "VK_ZOOM": "KEY_ZOOM", + "VK_NONAME": UNMAPPED, + "VK_PA1": UNMAPPED, + "VK_OEM_CLEAR": UNMAPPED, } class EventListenerWin(_EventListener): + """Windows implementation for the event listener class.""" - '''Windows implementation for the event listener class.''' - - agent_source = os.path.join(DEP_DIR, 'input_event_win.py') - agent_target = r'%TEMP%\input_event.py' - python_bin = 'python' + agent_source = os.path.join(DEP_DIR, "input_event_win.py") + agent_target = r"%TEMP%\input_event.py" + python_bin = "python" def _uninstall(self): - cmd = ' '.join(('del', self.agent_target)) + cmd = " ".join(("del", self.agent_target)) self._ctrl_sh.cmd(cmd, ignore_all_errors=True) def _parse_platform_event(self, content): - dev = content['device'] - nevent = content['event'] - etype = nevent['typeName'] + dev = content["device"] + nevent = content["event"] + etype = nevent["typeName"] event = {} mtype = EventType.UNKNOWN - if etype in ('WM_KEYDOWN', 'WM_KEYUP', - 'WM_SYSKEYDOWN', 'WM_SYSKEYUP'): - keycode = VK2Linux[nevent['vkCodeName']] - scancode = nevent['scanCode'] - if etype.endswith('DOWN'): + if etype in ("WM_KEYDOWN", "WM_KEYUP", "WM_SYSKEYDOWN", "WM_SYSKEYUP"): + keycode = VK2Linux[nevent["vkCodeName"]] + scancode = nevent["scanCode"] + if etype.endswith("DOWN"): mtype = EventType.KEYDOWN - else: # 'UP' + else: # 'UP' mtype = EventType.KEYUP event[KeyEventData.KEYCODE] = keycode event[KeyEventData.SCANCODE] = scancode - elif etype in ('WM_LBUTTONDOWN', 'WM_LBUTTONUP', - 'WM_RBUTTONDOWN', 'WM_RBUTTONUP', - 'WM_MBUTTONDOWN', 'WM_MBUTTONUP', - 'WM_XBUTTONDOWN', 'WM_XBUTTONUP'): - if etype.endswith('DOWN'): + elif etype in ( + "WM_LBUTTONDOWN", + "WM_LBUTTONUP", + "WM_RBUTTONDOWN", + "WM_RBUTTONUP", + "WM_MBUTTONDOWN", + "WM_MBUTTONUP", + "WM_XBUTTONDOWN", + "WM_XBUTTONUP", + ): + if etype.endswith("DOWN"): mtype = EventType.KEYDOWN - else: # 'UP' + else: # 'UP' mtype = EventType.KEYUP button = etype[3] - if button == 'L': - keycode = 'BTN_LEFT' - elif button == 'R': - keycode = 'BTN_RIGHT' - elif button == 'M': - keycode = 'BTN_MIDDLE' - else: # 'X' - xbutton = nevent['mouseDataText'] - if xbutton == 'XBUTTON1': - keycode = 'BTN_SIDE' - elif xbutton == 'XBUTTON2': - keycode = 'BTN_EXTRA' + if button == "L": + keycode = "BTN_LEFT" + elif button == "R": + keycode = "BTN_RIGHT" + elif button == "M": + keycode = "BTN_MIDDLE" + else: # 'X' + xbutton = nevent["mouseDataText"] + if xbutton == "XBUTTON1": + keycode = "BTN_SIDE" + elif xbutton == "XBUTTON2": + keycode = "BTN_EXTRA" else: keycode = xbutton event[KeyEventData.KEYCODE] = keycode - elif etype in ('WM_MOUSEWHEEL', 'WM_MOUSEHWHEEL'): - direction = nevent['mouseDataText'] - if direction == 'WHEELFORWARD': + elif etype in ("WM_MOUSEWHEEL", "WM_MOUSEHWHEEL"): + direction = nevent["mouseDataText"] + if direction == "WHEELFORWARD": mtype = EventType.WHEELFORWARD - elif direction == 'WHEELBACKWARD': + elif direction == "WHEELBACKWARD": mtype = EventType.WHEELBACKWARD - if etype.endswith('MOUSEHWHEEL'): + if etype.endswith("MOUSEHWHEEL"): event[WheelEventData.HSCROLL] = 1 else: event[WheelEventData.HSCROLL] = 0 - elif etype == 'WM_MOUSEMOVE': - xpos = nevent['xPos'] - ypos = nevent['yPos'] + elif etype == "WM_MOUSEMOVE": + xpos = nevent["xPos"] + ypos = nevent["yPos"] mtype = EventType.POINTERMOVE event[PointerEventData.XPOS] = xpos event[PointerEventData.YPOS] = ypos @@ -560,20 +556,20 @@ def _parse_platform_event(self, content): def EventListener(vm): - ''' + """ Event listener factory. This function creates an event listener instance by respecting the OS type of the given VM. :param vm: VM object. :return: Event listener object. - ''' + """ klass = None - os_type = vm.params['os_type'] - if os_type == 'linux': + os_type = vm.params["os_type"] + if os_type == "linux": klass = EventListenerLinux - elif os_type == 'windows': + elif os_type == "windows": klass = EventListenerWin if not klass: - raise ValueError('unsupported guest os type') + raise ValueError("unsupported guest os type") return klass(vm) diff --git a/provider/input_tests.py b/provider/input_tests.py index 790cbfa991..be5a8d6fd9 100644 --- a/provider/input_tests.py +++ b/provider/input_tests.py @@ -1,17 +1,16 @@ """Input test related functions""" import json +import logging import os import time -import logging - from collections import Counter -from virttest import error_context -from virttest import graphical_console -from virttest import data_dir + +from virttest import data_dir, error_context, graphical_console + from provider import input_event_proxy -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def get_keycode_cfg(filename): @@ -42,12 +41,14 @@ def key_tap_test(test, params, console, listener, wait_time): keys_file = params.get("key_table_file") keys_dict = get_keycode_cfg(keys_file) for key in keys_dict.keys(): - error_context.context("Send %s key tap event" % key, LOG_JOB.info) + error_context.context(f"Send {key} key tap event", LOG_JOB.info) console.key_tap(key) time.sleep(wait_time) - LOG_JOB.info("Check guest received %s key event is " - "matched with expected key event", key) + LOG_JOB.info( + "Check guest received %s key event is " "matched with expected key event", + key, + ) keycode = keys_dict[key] exp_events = [(keycode, "KEYDOWN"), (keycode, "KEYUP")] event_queue = listener.events @@ -59,9 +60,10 @@ def key_tap_test(test, params, console, listener, wait_time): key_events.append((event["keyCode"], event["type"])) if key_events != exp_events: - test.fail("Received key event didn't match expected event.\n" - "Received key event as: %s\n Expected event as: %s" - % (key_events, exp_events)) + test.fail( + "Received key event didn't match expected event.\n" + f"Received key event as: {key_events}\n Expected event as: {exp_events}" + ) @error_context.context_aware @@ -76,14 +78,16 @@ def mouse_btn_test(test, params, console, listener, wait_time): :param listener: listening the mouse button event in guest. :param wait_time: wait event received in listener event queue. """ - mouse_btn_map = {'left': 'BTN_LEFT', - 'right': 'BTN_RIGHT', - 'middle': 'BTN_MIDDLE', - 'side': 'BTN_SIDE', - 'extra': 'BTN_EXTRA'} + mouse_btn_map = { + "left": "BTN_LEFT", + "right": "BTN_RIGHT", + "middle": "BTN_MIDDLE", + "side": "BTN_SIDE", + "extra": "BTN_EXTRA", + } btns = params.objects("btns") for btn in btns: - error_context.context("Click mouse %s button" % btn, LOG_JOB.info) + error_context.context(f"Click mouse {btn} button", LOG_JOB.info) console.btn_click(btn) keycode = mouse_btn_map[btn] @@ -92,8 +96,7 @@ def mouse_btn_test(test, params, console, listener, wait_time): events_queue = listener.events btn_event = list() - error_context.context("Check correct button event is received", - LOG_JOB.info) + error_context.context("Check correct button event is received", LOG_JOB.info) while not events_queue.empty(): events = events_queue.get() # some windows os will return pointer move event first @@ -103,9 +106,10 @@ def mouse_btn_test(test, params, console, listener, wait_time): btn_event.append((events["keyCode"], events["type"])) if btn_event != exp_events: - test.fail("Received btn events don't match expected events.\n" - "Received btn events as: %s\n Expected events as: %s" - % (btn_event, exp_events)) + test.fail( + "Received btn events don't match expected events.\n" + f"Received btn events as: {btn_event}\n Expected events as: {exp_events}" + ) @error_context.context_aware @@ -121,10 +125,9 @@ def mouse_scroll_test(test, params, console, listener, wait_time, count=1): :param count: wheel event counts, default count=1. """ scrolls = params.objects("scrolls") - exp_events = {'wheel-up': ("WHEELFORWARD", 0), - 'wheel-down': ('WHEELBACKWARD', 0)} + exp_events = {"wheel-up": ("WHEELFORWARD", 0), "wheel-down": ("WHEELBACKWARD", 0)} for scroll in scrolls: - error_context.context("Scroll mouse %s" % scroll, LOG_JOB.info) + error_context.context(f"Scroll mouse {scroll}", LOG_JOB.info) if "up" in scroll: console.scroll_forward(count) else: @@ -132,8 +135,7 @@ def mouse_scroll_test(test, params, console, listener, wait_time, count=1): events_queue = listener.events time.sleep(wait_time) - error_context.context("Check correct scroll event is received", - LOG_JOB.info) + error_context.context("Check correct scroll event is received", LOG_JOB.info) exp_event = exp_events.get(scroll) samples = [] while not events_queue.empty(): @@ -147,17 +149,19 @@ def mouse_scroll_test(test, params, console, listener, wait_time, count=1): counter = Counter(samples) num = counter.pop(exp_event, 0) if num != count: - test.fail("Received scroll number %s don't match expected" - "scroll count %s" % (num, count)) + test.fail( + f"Received scroll number {num} don't match expected" + f"scroll count {count}" + ) if counter: - test.fail("Received scroll events don't match expected events" - "Received scroll events as: %s\n Expected events as: %s" - % (counter, exp_event)) + test.fail( + "Received scroll events don't match expected events" + f"Received scroll events as: {counter}\n Expected events as: {exp_event}" + ) @error_context.context_aware -def mouse_move_test(test, params, console, listener, - wait_time, end_pos, absolute): +def mouse_move_test(test, params, console, listener, wait_time, end_pos, absolute): """ Mouse move test, default move trace is uniform linear motion. @@ -186,13 +190,11 @@ def mouse_move_test(test, params, console, listener, else: vertical = 1 - error_context.context("Moving pointer from %s to %s" - % (start_pos, end_pos), LOG_JOB.info) + error_context.context(f"Moving pointer from {start_pos} to {end_pos}", LOG_JOB.info) console.pointer_move(end_pos, motion=line, absolute=absolute) time.sleep(wait_time) - error_context.context("Collecting all pointer move events from guest", - LOG_JOB.info) + error_context.context("Collecting all pointer move events from guest", LOG_JOB.info) while not events_queue.empty(): event = events_queue.get() xpos, ypos = event["xPos"], event["yPos"] @@ -203,42 +205,56 @@ def mouse_move_test(test, params, console, listener, xn_guest, yn_guest = event_lst[-1] tolerance = int(params.get("tolerance")) - error_context.context("Compare if pointer move to destination pos (%s, %s)" - "the missed value should in tolerance scope." - % end_pos, LOG_JOB.info) + error_context.context( + "Compare if pointer move to destination pos ({}, {})" + "the missed value should in tolerance scope.".format(*end_pos), + LOG_JOB.info, + ) if (abs(xn - xn_guest) > tolerance) or (abs(yn - yn_guest) > tolerance): - test.fail("pointer did not move to destination position." - "it move to pos (%s, %s) in guest, but exepected pos is" - "(%s, %s)" % (xn_guest, yn_guest, xn, yn)) - - error_context.context("Compare if pointer move trace nearby destination line," - "the missed value should in tolerance scope.", - LOG_JOB.info) + test.fail( + "pointer did not move to destination position." + f"it move to pos ({xn_guest}, {yn_guest}) in guest, but exepected pos is" + f"({xn}, {yn})" + ) + + error_context.context( + "Compare if pointer move trace nearby destination line," + "the missed value should in tolerance scope.", + LOG_JOB.info, + ) for i, (x, y) in enumerate(event_lst): if not vertical: - if abs((k * x + b) - y) > tolerance: # pylint: disable=E0606 - test.fail("Received pointer pos beyond line's tolerance scope " - "when move from {0} to {1}. Received pos is ({2}, {3})," - "it didn't nearby the expected line " - "y={4}x+{5}.".format(start_pos, end_pos, x, y, k, b)) + if abs((k * x + b) - y) > tolerance: # pylint: disable=E0606 + test.fail( + "Received pointer pos beyond line's tolerance scope " + f"when move from {start_pos} to {end_pos}. Received pos is ({x}, {y})," + "it didn't nearby the expected line " + f"y={k}x+{b}." + ) elif k == 0: # for horizontal direction line, only x value will change. if i > 0: - dx = [x2 - x1 for x1, x2 in zip(event_lst[i-1], event_lst[i])][0] - if (xn - x0 > 0 and dx <= 0): - test.fail("pointer move direction is wrong when " - "move from {0} to {1}.".format(start_pos, end_pos)) - elif (xn - x0 < 0 and dx >= 0): - test.fail("pointer move direction is wrong when " - "move from {0} to {1}.".format(start_pos, end_pos)) + dx = [x2 - x1 for x1, x2 in zip(event_lst[i - 1], event_lst[i])][0] + if xn - x0 > 0 and dx <= 0: + test.fail( + "pointer move direction is wrong when " + f"move from {start_pos} to {end_pos}." + ) + elif xn - x0 < 0 and dx >= 0: + test.fail( + "pointer move direction is wrong when " + f"move from {start_pos} to {end_pos}." + ) else: # for vertical direction line, only y value will change. if i > 0: - dy = [y2 - y1 for y1, y2 in zip(event_lst[i-1], event_lst[i])][1] + dy = [y2 - y1 for y1, y2 in zip(event_lst[i - 1], event_lst[i])][1] if (yn - y0 > 0 and dy <= 0) or (yn - y0 < 0 and dy >= 0): - test.fail("pointer move to incorrect direction when " - "move from {0} to {1}.".format(start_pos, end_pos)) + test.fail( + "pointer move to incorrect direction when " + f"move from {start_pos} to {end_pos}." + ) def query_mice_status(vm, mice_name): @@ -250,7 +266,7 @@ def query_mice_status(vm, mice_name): """ events = vm.monitor.query_mice() for event in events: - if event['name'] == mice_name: + if event["name"] == mice_name: return event @@ -285,22 +301,20 @@ def mouse_test(test, params, vm, wait_time, count=1): mice_name = params.get("mice_name", "QEMU PS/2 Mouse") mice_info = query_mice_status(vm, mice_name) absolute = True if mice_info["absolute"] else False - error_context.context("Check if %s device is working" % mice_name, - LOG_JOB.info) + error_context.context(f"Check if {mice_name} device is working", LOG_JOB.info) if not mice_info["current"]: - test.fail("%s does not worked currently" % mice_name) + test.fail(f"{mice_name} does not worked currently") mouse_btn_test(test, params, console, listener, wait_time) mouse_scroll_test(test, params, console, listener, wait_time, count=count) if not params.get("target_pos", None): width, height = console.screen_size - x_max, y_max = width-1, height-1 + x_max, y_max = width - 1, height - 1 target_pos = [(1, 0), (x_max, 0), (1, y_max), (x_max, y_max)] else: # suggest set target_pos if want to test one target position. target_pos = [tuple([int(i) for i in params.objects("target_pos")])] for end_pos in target_pos: - mouse_move_test(test, params, console, listener, wait_time, - end_pos, absolute) + mouse_move_test(test, params, console, listener, wait_time, end_pos, absolute) listener.clear_events() listener.cleanup() diff --git a/provider/job_utils.py b/provider/job_utils.py index 1715713f33..2fe2ad0f40 100644 --- a/provider/job_utils.py +++ b/provider/job_utils.py @@ -4,7 +4,7 @@ from avocado import fail_on from virttest import utils_misc -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") BLOCK_JOB_COMPLETED_EVENT = "BLOCK_JOB_COMPLETED" BLOCK_JOB_CANCELLED_EVENT = "BLOCK_JOB_CANCELLED" @@ -36,15 +36,15 @@ def wait_until_job_status_match(vm, status, device, timeout): :param timeout: blocked timeout """ matched = utils_misc.wait_for( - lambda: get_job_status(vm, device) == status, - timeout=timeout) - assert matched, "wait job status to '%s' timeout in %s seconds" % ( - status, timeout) + lambda: get_job_status(vm, device) == status, timeout=timeout + ) + assert matched, f"wait job status to '{status}' timeout in {timeout} seconds" @fail_on def wait_until_block_job_completed(vm, job_id, timeout=900): """Block until block job completed""" + def _wait_until_block_job_completed(): finished = False status = get_job_status(vm, job_id) @@ -63,7 +63,7 @@ def _wait_until_block_job_completed(): data = event.get("data", dict()) if job_id in [data.get("id"), data.get("device")]: error = data.get("error") - assert not error, "block backup job finished with error: %s" % error + assert not error, f"block backup job finished with error: {error}" finished = True break finally: @@ -73,10 +73,9 @@ def _wait_until_block_job_completed(): return finished finished = utils_misc.wait_for( - _wait_until_block_job_completed, - first=0.1, - timeout=timeout) - assert finished, "wait for block job complete event timeout in %s seconds" % timeout + _wait_until_block_job_completed, first=0.1, timeout=timeout + ) + assert finished, f"wait for block job complete event timeout in {timeout} seconds" @fail_on @@ -101,7 +100,7 @@ def block_job_dismiss(vm, job_id, timeout=120): _job_dismiss(vm, job_id, timeout) time.sleep(0.1) job = get_block_job_by_id(vm, job_id) - assert not job, "Block job '%s' exists" % job_id + assert not job, f"Block job '{job_id}' exists" @fail_on @@ -110,7 +109,7 @@ def job_dismiss(vm, job_id, timeout=120): _job_dismiss(vm, job_id, timeout) time.sleep(0.1) job = get_job_by_id(vm, job_id) - assert not job, "Job '%s' exists" % job_id + assert not job, f"Job '{job_id}' exists" def _job_dismiss(vm, job_id, timeout=120): @@ -199,10 +198,14 @@ def get_event_by_condition(vm, event_name, tmo=30, **condition): event = None for i in range(tmo): all_events = vm.monitor.get_events() - events = [e for e in all_events if e.get('event') == event_name] + events = [e for e in all_events if e.get("event") == event_name] if condition: - events = [e for e in events if e.get('data') and all( - item in e['data'].items() for item in condition.items())] + events = [ + e + for e in events + if e.get("data") + and all(item in e["data"].items() for item in condition.items()) + ] if events: event = events[0] break @@ -218,13 +221,13 @@ def is_block_job_started(vm, jobid, tmo=10): for i in range(tmo): job = get_block_job_by_id(vm, jobid) if not job: - LOG_JOB.debug('job %s was not found', jobid) + LOG_JOB.debug("job %s was not found", jobid) break - elif job['offset'] > 0: + elif job["offset"] > 0: return True time.sleep(1) else: - LOG_JOB.debug('block job %s never starts in %s', jobid, tmo) + LOG_JOB.debug("block job %s never starts in %s", jobid, tmo) return False @@ -232,8 +235,7 @@ def check_block_jobs_started(vm, jobid_list, tmo=10): """ Test failed if any block job failed to start """ - started = all(list(map(lambda j: is_block_job_started(vm, j, tmo), - jobid_list))) + started = all(list(map(lambda j: is_block_job_started(vm, j, tmo), jobid_list))) assert started, "Not all block jobs start successfully" @@ -246,22 +248,21 @@ def is_block_job_running(vm, jobid, tmo=200): for i in range(tmo): job = get_block_job_by_id(vm, jobid) if not job: - LOG_JOB.debug('job %s cancelled unexpectedly', jobid) + LOG_JOB.debug("job %s cancelled unexpectedly", jobid) break - elif job['status'] not in ["running", "pending", "ready"]: - LOG_JOB.debug('job %s is not in running status', jobid) + elif job["status"] not in ["running", "pending", "ready"]: + LOG_JOB.debug("job %s is not in running status", jobid) return False elif offset is None: - if job['status'] in ["pending", "ready"]: + if job["status"] in ["pending", "ready"]: return True else: - offset = job['offset'] - elif job['offset'] > offset: + offset = job["offset"] + elif job["offset"] > offset: return True time.sleep(1) else: - LOG_JOB.debug('offset never changed for block job %s in %s', - jobid, tmo) + LOG_JOB.debug("offset never changed for block job %s in %s", jobid, tmo) return False @@ -269,8 +270,7 @@ def check_block_jobs_running(vm, jobid_list, tmo=200): """ Test failed if any block job's offset never increased """ - running = all(list(map(lambda j: is_block_job_running(vm, j, tmo), - jobid_list))) + running = all(list(map(lambda j: is_block_job_running(vm, j, tmo), jobid_list))) assert running, "Not all block jobs are running" @@ -285,16 +285,15 @@ def is_block_job_paused(vm, jobid, tmo=50): for i in range(tmo): job = get_block_job_by_id(vm, jobid) if not job: - LOG_JOB.debug('job %s cancelled unexpectedly', jobid) + LOG_JOB.debug("job %s cancelled unexpectedly", jobid) return False - elif job['status'] != "running": - LOG_JOB.debug('job %s is not in running status', jobid) + elif job["status"] != "running": + LOG_JOB.debug("job %s is not in running status", jobid) return False elif offset is None: - offset = job['offset'] - elif offset != job['offset']: - LOG_JOB.debug('offset %s changed for job %s in %s', - offset, jobid, tmo) + offset = job["offset"] + elif offset != job["offset"]: + LOG_JOB.debug("offset %s changed for job %s in %s", offset, jobid, tmo) return False time.sleep(1) return True @@ -304,6 +303,5 @@ def check_block_jobs_paused(vm, jobid_list, tmo=50): """ Test failed if any block job's offset changed """ - paused = all(list(map(lambda j: is_block_job_paused(vm, j, tmo), - jobid_list))) + paused = all(list(map(lambda j: is_block_job_paused(vm, j, tmo), jobid_list))) assert paused, "Not all block jobs are paused" diff --git a/provider/message_queuing.py b/provider/message_queuing.py index f65fbfa0fa..ac3d8565ac 100644 --- a/provider/message_queuing.py +++ b/provider/message_queuing.py @@ -4,7 +4,7 @@ from aexpect import client -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") DEFAULT_MONITOR_TIMEOUT = 60 @@ -17,8 +17,10 @@ def __init__(self, message, output): self.output = output def __str__(self): - return ('No matching message("{}") was found. ' - 'Output: {}'.format(self.message, self.output)) + return ( + f'No matching message("{self.message}") was found. ' + f"Output: {self.output}" + ) class UnknownEventError(Exception): @@ -26,7 +28,7 @@ def __init__(self, event): self.event = event def __str__(self): - return 'Got unknown event: "{}"'.format(self.event) + return f'Got unknown event: "{self.event}"' class MQBase(client.Expect): @@ -36,7 +38,7 @@ def __init__(self, cmd): :param cmd: The MQ command line. """ - super(MQBase, self).__init__(cmd) + super().__init__(cmd) LOG_JOB.info("MQ command line: %s", self.command) def _confirm_message(self, message): @@ -45,7 +47,7 @@ def _confirm_message(self, message): :param message: The message you need to send. """ - self.sendline('CONFIRM-' + message) + self.sendline("CONFIRM-" + message) def _monitor_message(self, message, timeout=DEFAULT_MONITOR_TIMEOUT): """ @@ -56,21 +58,20 @@ def _monitor_message(self, message, timeout=DEFAULT_MONITOR_TIMEOUT): """ try: self.read_until_last_line_matches([message], timeout) - LOG_JOB.info('The message "{}" has been monitored.'.format(message)) + LOG_JOB.info('The message "%s" has been monitored.', message) except client.ExpectTimeoutError as err: raise MessageNotFoundError(message, err.output) - def _monitor_confirm_message(self, message, - timeout=DEFAULT_MONITOR_TIMEOUT): - return self._monitor_message('CONFIRM-' + message, timeout) + def _monitor_confirm_message(self, message, timeout=DEFAULT_MONITOR_TIMEOUT): + return self._monitor_message("CONFIRM-" + message, timeout) def close(self): """ Check and close the message queuing environment. """ if self.is_alive(): - self.send_ctrl('^C') - super(MQBase, self).close() + self.send_ctrl("^C") + super().close() def send_message(self, msg): """ @@ -80,8 +81,9 @@ def send_message(self, msg): class MQPublisher(MQBase): - def __init__(self, port=None, udp=False, multiple_connections=False, - other_options=""): + def __init__( + self, port=None, udp=False, multiple_connections=False, other_options="" + ): """ MQ publisher. @@ -90,32 +92,32 @@ def __init__(self, port=None, udp=False, multiple_connections=False, :param multiple_connections: Accept multiple connections in listen mode. :param other_options: extra options for the server. """ - cmd_options = ['nc', '-l'] - port and cmd_options.append('-p ' + str(port)) - udp and cmd_options.append('--udp') - multiple_connections and cmd_options.append('--keep-open') + cmd_options = ["nc", "-l"] + port and cmd_options.append("-p " + str(port)) + udp and cmd_options.append("--udp") + multiple_connections and cmd_options.append("--keep-open") cmd_options.append(other_options) - super(MQPublisher, self).__init__(' '.join(cmd_options)) + super().__init__(" ".join(cmd_options)) def confirm_access(self, timeout=DEFAULT_MONITOR_TIMEOUT): - self._monitor_message('ACCESS', timeout) - self._confirm_message('ACCESS') + self._monitor_message("ACCESS", timeout) + self._confirm_message("ACCESS") def approve(self, timeout=DEFAULT_MONITOR_TIMEOUT): - self.sendline('APPROVE') - self._monitor_confirm_message('APPROVE', timeout) + self.sendline("APPROVE") + self._monitor_confirm_message("APPROVE", timeout) def notify(self, timeout=DEFAULT_MONITOR_TIMEOUT): - self.sendline('NOTIFY') - self._monitor_confirm_message('NOTIFY', timeout) + self.sendline("NOTIFY") + self._monitor_confirm_message("NOTIFY", timeout) def alert(self, timeout=DEFAULT_MONITOR_TIMEOUT): - self.sendline('ALERT') - self._monitor_confirm_message('ALERT', timeout) + self.sendline("ALERT") + self._monitor_confirm_message("ALERT", timeout) def refuse(self, timeout=DEFAULT_MONITOR_TIMEOUT): - self.sendline('REFUSE') - self._monitor_message('REFUSE', timeout) + self.sendline("REFUSE") + self._monitor_message("REFUSE", timeout) class MQSubscriber(MQBase): @@ -127,35 +129,35 @@ def __init__(self, server_address, port=None, udp=False): :param port: The listening port of the MQ server. :param udp: Use UDP instead of default TCP. """ - cmd_options = ['nc', server_address] + cmd_options = ["nc", server_address] port and cmd_options.append(str(port)) - udp and cmd_options.append('--udp') - super(MQSubscriber, self).__init__(' '.join(cmd_options)) + udp and cmd_options.append("--udp") + super().__init__(" ".join(cmd_options)) self._access() def _access(self): - self.sendline('ACCESS') - self._monitor_confirm_message('ACCESS') + self.sendline("ACCESS") + self._monitor_confirm_message("ACCESS") def confirm_approve(self): - self._confirm_message('APPROVE') + self._confirm_message("APPROVE") def confirm_notify(self): - self._confirm_message('NOTIFY') + self._confirm_message("NOTIFY") def confirm_alert(self): - self._confirm_message('ALERT') + self._confirm_message("ALERT") def confirm_refuse(self): - self._confirm_message('REFUSE') + self._confirm_message("REFUSE") def receive_event(self, timeout=DEFAULT_MONITOR_TIMEOUT): - event_pattern = ['APPROVE', 'NOTIFY', 'ALERT', 'REFUSE'] + event_pattern = ["APPROVE", "NOTIFY", "ALERT", "REFUSE"] try: event = self.read_until_last_line_matches(event_pattern, timeout)[1] if len(event.splitlines()) > 1: event = event.splitlines()[-1] - getattr(self, 'confirm_' + event.strip().lower())() + getattr(self, "confirm_" + event.strip().lower())() except client.ExpectTimeoutError as err: raise UnknownEventError(err.output.strip()) return event.strip() @@ -175,11 +177,11 @@ def __init__(self, server_address, port=None): :param server_address: The address of remote/local MQ server. :param port: The listening port of the MQ server. """ - cmd_options = ['nc', server_address] + cmd_options = ["nc", server_address] port and cmd_options.append(str(port)) self.msg_loop_flag = True self.msg_callback = {} - super(MQClient, self).__init__(' '.join(cmd_options)) + super().__init__(" ".join(cmd_options)) def match_patterns(self, lines, patterns): matches = [] @@ -215,12 +217,12 @@ def filter_msg(self, msgs=None, timeout=DEFAULT_MONITOR_TIMEOUT): if not msgs: msgs = self.msg_callback.keys() - output = self.read_until_output_matches(msgs, - lambda x: x.splitlines(), - timeout, 0.1) + output = self.read_until_output_matches( + msgs, lambda x: x.splitlines(), timeout, 0.1 + ) if output: - LOG_JOB.debug('Monitor The message "{}"'.format(output)) + LOG_JOB.debug('Monitor The message "%s"', output) return output[0] except client.ExpectTimeoutError as err: diff --git a/provider/nbd_image_export.py b/provider/nbd_image_export.py index c3eabeb2e3..8ae3619131 100644 --- a/provider/nbd_image_export.py +++ b/provider/nbd_image_export.py @@ -48,27 +48,21 @@ will be exported """ +import logging import os import signal -import logging -from avocado.utils import process from avocado.core import exceptions - -from virttest import nbd -from virttest import data_dir -from virttest import qemu_storage -from virttest import utils_misc -from virttest import qemu_devices - +from avocado.utils import process +from virttest import data_dir, nbd, qemu_devices, qemu_storage, utils_misc from virttest.qemu_monitor import MonitorNotSupportedCmdError from provider.job_utils import get_event_by_condition -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") -class NBDExportImage(object): +class NBDExportImage: """NBD local image export base class""" def __init__(self, params, local_image): @@ -83,19 +77,19 @@ def __init__(self, params, local_image): def create_image(self): result = None - if self._image_params.get('create_image_cmd'): - result = process.run(self._image_params['create_image_cmd'], - ignore_status=True, shell=True) + if self._image_params.get("create_image_cmd"): + result = process.run( + self._image_params["create_image_cmd"], ignore_status=True, shell=True + ) elif not self._image_params.get_boolean("force_create_image"): _, result = qemu_storage.QemuImg( - self._image_params, - data_dir.get_data_dir(), - self._tag + self._image_params, data_dir.get_data_dir(), self._tag ).create(self._image_params) if result and result.exit_status != 0: - raise exceptions.TestFail('Failed to create image, error: %s' - % result.stderr.decode()) + raise exceptions.TestFail( + f"Failed to create image, error: {result.stderr.decode()}" + ) def export_image(self): raise NotImplementedError() @@ -108,30 +102,33 @@ class QemuNBDExportImage(NBDExportImage): """Export local image with qemu-nbd command""" def __init__(self, params, local_image): - super(QemuNBDExportImage, self).__init__(params, local_image) + super().__init__(params, local_image) self._qemu_nbd = utils_misc.get_qemu_nbd_binary(self._params) - filename_repr = 'json' if self._image_params.get( - 'nbd_export_format') == 'luks' else 'filename' + filename_repr = ( + "json" + if self._image_params.get("nbd_export_format") == "luks" + else "filename" + ) self._local_filename = qemu_storage.get_image_repr( - self._tag, self._image_params, - data_dir.get_data_dir(), filename_repr) + self._tag, self._image_params, data_dir.get_data_dir(), filename_repr + ) self._nbd_server_pid = None def export_image(self): LOG_JOB.info("Export image with qemu-nbd") - self._nbd_server_pid = nbd.export_image(self._qemu_nbd, - self._local_filename, - self._tag, self._image_params) + self._nbd_server_pid = nbd.export_image( + self._qemu_nbd, self._local_filename, self._tag, self._image_params + ) if self._nbd_server_pid is None: - raise exceptions.TestFail('Failed to export image') + raise exceptions.TestFail("Failed to export image") def list_exported_image(self, nbd_image, nbd_image_params): LOG_JOB.info("List the nbd image with qemu-nbd") - result = nbd.list_exported_image(self._qemu_nbd, nbd_image, - nbd_image_params) + result = nbd.list_exported_image(self._qemu_nbd, nbd_image, nbd_image_params) if result.exit_status != 0: - raise exceptions.TestFail('Failed to list nbd image: %s' - % result.stderr.decode()) + raise exceptions.TestFail( + f"Failed to list nbd image: {result.stderr.decode()}" + ) def stop_export(self): if self._nbd_server_pid is not None: @@ -139,8 +136,7 @@ def stop_export(self): # when qemu-nbd crashes unexpectedly, we can handle it os.kill(self._nbd_server_pid, signal.SIGKILL) except Exception as e: - LOG_JOB.warn("Error occurred when killing nbd server: %s", - str(e)) + LOG_JOB.warning("Error occurred when killing nbd server: %s", str(e)) finally: self._nbd_server_pid = None @@ -150,8 +146,9 @@ def suspend_export(self): try: os.kill(self._nbd_server_pid, signal.SIGSTOP) except Exception as e: - LOG_JOB.warning("Error occurred when suspending" - "nbd server: %s", str(e)) + LOG_JOB.warning( + "Error occurred when suspending" "nbd server: %s", str(e) + ) def resume_export(self): if self._nbd_server_pid is not None: @@ -159,15 +156,14 @@ def resume_export(self): try: os.kill(self._nbd_server_pid, signal.SIGCONT) except Exception as e: - LOG_JOB.warning("Error occurred when resuming nbd server: %s", - str(e)) + LOG_JOB.warning("Error occurred when resuming nbd server: %s", str(e)) class InternalNBDExportImage(NBDExportImage): """Export image with qemu internal nbd server""" def __init__(self, vm, params, local_image): - super(InternalNBDExportImage, self).__init__(params, local_image) + super().__init__(params, local_image) self._tls_creds_id = None self._node_name = None self._image_devices = None @@ -176,14 +172,17 @@ def __init__(self, vm, params, local_image): def get_export_name(self): """export name is the node name if nbd_export_name is not set""" - return self._image_params['nbd_export_name'] if self._image_params.get( - 'nbd_export_name') else self._node_name + return ( + self._image_params["nbd_export_name"] + if self._image_params.get("nbd_export_name") + else self._node_name + ) def hotplug_image(self): """Hotplug the image to be exported""" - devices = self._vm.devices.images_define_by_params(self._tag, - self._image_params, - 'disk') + devices = self._vm.devices.images_define_by_params( + self._tag, self._image_params, "disk" + ) # Only hotplug protocol and format node and the related objects devices.pop() @@ -194,35 +193,38 @@ def hotplug_image(self): for dev in devices: ret = self._vm.devices.simple_hotplug(dev, self._vm.monitor) if not ret[1]: - raise exceptions.TestFail("Failed to hotplug device '%s': %s." - % (dev, ret[0])) + raise exceptions.TestFail( + f"Failed to hotplug device '{dev}': {ret[0]}." + ) def hotplug_tls(self): """Hotplug tls creds object for nbd server""" - if self._image_params.get('nbd_unix_socket'): - LOG_JOB.info('TLS is only supported with IP') - elif self._image_params.get('nbd_server_tls_creds'): + if self._image_params.get("nbd_unix_socket"): + LOG_JOB.info("TLS is only supported with IP") + elif self._image_params.get("nbd_server_tls_creds"): LOG_JOB.info("Plug server tls creds device") - self._tls_creds_id = '%s_server_tls_creds' % self._tag - dev = qemu_devices.qdevices.QObject('tls-creds-x509') + self._tls_creds_id = f"{self._tag}_server_tls_creds" + dev = qemu_devices.qdevices.QObject("tls-creds-x509") dev.set_param("id", self._tls_creds_id) dev.set_param("endpoint", "server") - dev.set_param("dir", self._image_params['nbd_server_tls_creds']) + dev.set_param("dir", self._image_params["nbd_server_tls_creds"]) ret = self._vm.devices.simple_hotplug(dev, self._vm.monitor) if not ret[1]: - raise exceptions.TestFail("Failed to hotplug device '%s': %s." - % (dev, ret[0])) + raise exceptions.TestFail( + f"Failed to hotplug device '{dev}': {ret[0]}." + ) def start_nbd_server(self): """Start internal nbd server""" - server = { - 'type': 'unix', - 'path': self._image_params['nbd_unix_socket'] - } if self._image_params.get('nbd_unix_socket') else { - 'type': 'inet', - 'host': '0.0.0.0', - 'port': self._image_params.get('nbd_port', '10809') - } + server = ( + {"type": "unix", "path": self._image_params["nbd_unix_socket"]} + if self._image_params.get("nbd_unix_socket") + else { + "type": "inet", + "host": "0.0.0.0", + "port": self._image_params.get("nbd_port", "10809"), + } + ) LOG_JOB.info("Start internal nbd server") return self._vm.monitor.nbd_server_start(server, self._tls_creds_id) @@ -230,39 +232,52 @@ def start_nbd_server(self): def _block_export_add(self): # block export arguments self._export_uid = self._image_params.get( - 'block_export_uid', 'block_export_%s' % self._node_name) - iothread = self._image_params.get('block_export_iothread') - writethrough = self._image_params['block_export_writethrough'] == 'yes' \ - if self._image_params.get('block_export_writethrough') else None - fixed = self._image_params['block_export_fixed_iothread'] == 'yes' \ - if self._image_params.get('block_export_fixed_iothread') else None + "block_export_uid", f"block_export_{self._node_name}" + ) + iothread = self._image_params.get("block_export_iothread") + writethrough = ( + self._image_params["block_export_writethrough"] == "yes" + if self._image_params.get("block_export_writethrough") + else None + ) + fixed = ( + self._image_params["block_export_fixed_iothread"] == "yes" + if self._image_params.get("block_export_fixed_iothread") + else None + ) # to be compatible with the original test cases using nbd-server-add export_writable = self._image_params.get( - 'block_export_writable', - self._image_params.get('nbd_export_writable') + "block_export_writable", self._image_params.get("nbd_export_writable") ) - writable = export_writable == 'yes' if export_writable else None + writable = export_writable == "yes" if export_writable else None # nbd specified arguments kw = { - 'name': self._image_params.get('nbd_export_name'), - 'description': self._image_params.get('nbd_export_description') + "name": self._image_params.get("nbd_export_name"), + "description": self._image_params.get("nbd_export_description"), } - if self._image_params.get('nbd_export_bitmaps') is not None: - kw['bitmaps'] = self._image_params.objects('nbd_export_bitmaps') - if self._image_params.get('nbd_allocation_exported') is not None: - kw['allocation-depth'] = self._image_params['nbd_allocation_exported'] == 'yes' + if self._image_params.get("nbd_export_bitmaps") is not None: + kw["bitmaps"] = self._image_params.objects("nbd_export_bitmaps") + if self._image_params.get("nbd_allocation_exported") is not None: + kw["allocation-depth"] = ( + self._image_params["nbd_allocation_exported"] == "yes" + ) - return self._vm.monitor.block_export_add(self._export_uid, 'nbd', - self._node_name, iothread, - fixed, writable, writethrough, - **kw) + return self._vm.monitor.block_export_add( + self._export_uid, + "nbd", + self._node_name, + iothread, + fixed, + writable, + writethrough, + **kw, + ) def _block_export_del(self): return self._vm.monitor.block_export_del( - self._export_uid, - self._image_params.get('block_export_remove_mode') + self._export_uid, self._image_params.get("block_export_remove_mode") ) def wait_till_export_removed(self): @@ -272,14 +287,13 @@ def wait_till_export_removed(self): emitted when a block export is removed and its id can be reused. """ if self._export_uid is not None: - cond = {'id': self._export_uid} - tmo = self._image_params.get_numeric('block_export_del_timeout', - 60) - event = get_event_by_condition(self._vm, 'BLOCK_EXPORT_DELETED', - tmo, **cond) + cond = {"id": self._export_uid} + tmo = self._image_params.get_numeric("block_export_del_timeout", 60) + event = get_event_by_condition( + self._vm, "BLOCK_EXPORT_DELETED", tmo, **cond + ) if event is None: - raise exceptions.TestFail( - 'Failed to receive BLOCK_EXPORT_DELETED') + raise exceptions.TestFail("Failed to receive BLOCK_EXPORT_DELETED") self._export_uid = None def add_nbd_image(self, node_name=None): @@ -299,9 +313,9 @@ def add_nbd_image(self, node_name=None): self._export_uid = None return self._vm.monitor.nbd_server_add( self._node_name, - self._image_params.get('nbd_export_name'), - self._image_params.get('nbd_export_writable'), - self._image_params.get('nbd_export_bitmaps') + self._image_params.get("nbd_export_name"), + self._image_params.get("nbd_export_writable"), + self._image_params.get("nbd_export_bitmaps"), ) def remove_nbd_image(self): @@ -311,8 +325,7 @@ def remove_nbd_image(self): return self._block_export_del() except MonitorNotSupportedCmdError: return self._vm.monitor.nbd_server_remove( - self.get_export_name(), - self._image_params.get('nbd_remove_mode') + self.get_export_name(), self._image_params.get("nbd_remove_mode") ) def stop_nbd_server(self): @@ -336,5 +349,5 @@ def stop_export(self): def query_nbd_export(self): """Get the nbd export info""" exports = self._vm.monitor.query_block_exports() - nbd_exports = [e for e in exports if e['id'] == self._export_uid] + nbd_exports = [e for e in exports if e["id"] == self._export_uid] return nbd_exports[0] if nbd_exports else None diff --git a/provider/netperf.py b/provider/netperf.py index 664c11a65a..1181a14551 100644 --- a/provider/netperf.py +++ b/provider/netperf.py @@ -1,16 +1,12 @@ import logging import os -from virttest import data_dir -from virttest import utils_misc -from virttest import utils_net -from virttest import utils_netperf +from virttest import data_dir, utils_misc, utils_net, utils_netperf -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") -class NetperfTest(object): - +class NetperfTest: def __init__(self, params, vm): self.params = params self.vm = vm @@ -24,17 +20,20 @@ def netperf_client(self): self.client = utils_netperf.NetperfClient( self.vm.get_address(), self.params.get("netperf_client_path"), - netperf_source=os.path.join(data_dir.get_deps_dir("netperf"), - self.params.get("netperf_client_bin")), + netperf_source=os.path.join( + data_dir.get_deps_dir("netperf"), self.params.get("netperf_client_bin") + ), client=self.params.get("shell_client"), port=self.params.get("shell_port"), username=self.params.get("username"), password=self.params.get("password"), prompt=self.params.get("shell_prompt"), - linesep=self.params.get("shell_linesep", "\n").encode().decode( - 'unicode_escape'), + linesep=self.params.get("shell_linesep", "\n") + .encode() + .decode("unicode_escape"), status_test_command=self.params.get("status_test_command", ""), - compile_option=self.params.get("compile_option", "")) + compile_option=self.params.get("compile_option", ""), + ) return self.client def netperf_server(self, server_ip="localhost", server_passwd=None): @@ -47,10 +46,12 @@ def netperf_server(self, server_ip="localhost", server_passwd=None): self.server = utils_netperf.NetperfServer( server_ip, self.params.get("server_path", "/var/tmp"), - netperf_source=os.path.join(data_dir.get_deps_dir("netperf"), - self.params.get("netperf_server_link")), + netperf_source=os.path.join( + data_dir.get_deps_dir("netperf"), self.params.get("netperf_server_link") + ), password=server_passwd, - compile_option=self.params.get("compile_option", "")) + compile_option=self.params.get("compile_option", ""), + ) return self.server def start_netperf_test(self): @@ -62,19 +63,21 @@ def start_netperf_test(self): netperf_output_unit = self.params.get("netperf_output_unit") extra_netperf_option = self.params.get("extra_netperf_option", "") test_protocols = self.params.get("test_protocol") - extra_netperf_option += " -l %s" % test_duration + extra_netperf_option += f" -l {test_duration}" if self.params.get("netperf_remote_cpu") == "yes": extra_netperf_option += " -C" elif self.params.get("netperf_local_cpu") == "yes": extra_netperf_option += " -c" if netperf_output_unit in "GMKgmk": - extra_netperf_option += " -f %s" % netperf_output_unit - option = "%s -t %s" % (extra_netperf_option, test_protocols) - self.client.bg_start(utils_net.get_host_ip_address(self.params), - option, - self.params.get_numeric("netperf_para_sessions"), - self.params.get("netperf_cmd_prefix", ""), - package_sizes=self.params.get("netperf_sizes")) + extra_netperf_option += f" -f {netperf_output_unit}" + option = f"{extra_netperf_option} -t {test_protocols}" + self.client.bg_start( + utils_net.get_host_ip_address(self.params), + option, + self.params.get_numeric("netperf_para_sessions"), + self.params.get("netperf_cmd_prefix", ""), + package_sizes=self.params.get("netperf_sizes"), + ) if utils_misc.wait_for(self.netperf_status, 30, 0, 5): LOG_JOB.info("Netperf test start successfully.") return True diff --git a/provider/netperf_base.py b/provider/netperf_base.py index 875db85953..2c9c2b1653 100644 --- a/provider/netperf_base.py +++ b/provider/netperf_base.py @@ -1,17 +1,11 @@ import logging import os -import six +import six from avocado.utils import process +from virttest import data_dir, error_context, remote, utils_misc, utils_test - -from virttest import utils_test -from virttest import utils_misc -from virttest import remote -from virttest import data_dir -from virttest import error_context - -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def pin_vm_threads(vm, node): @@ -36,17 +30,15 @@ def record_env_version(test, params, host, server_ctl, fd, test_duration): ver_cmd = params.get("ver_cmd", "rpm -q qemu-kvm") guest_ver_cmd = params.get("guest_ver_cmd", "uname -r") - test.write_test_keyval({'kvm-userspace-ver': ssh_cmd( - host, ver_cmd).strip()}) - test.write_test_keyval({'guest-kernel-ver': ssh_cmd( - server_ctl, guest_ver_cmd).strip()}) - test.write_test_keyval({'session-length': test_duration}) - fd.write('### kvm-userspace-ver : %s\n' % ssh_cmd( - host, ver_cmd).strip()) - fd.write('### guest-kernel-ver : %s\n' % ssh_cmd( - server_ctl, guest_ver_cmd).strip()) - fd.write('### kvm_version : %s\n' % os.uname()[2]) - fd.write('### session-length : %s\n' % test_duration) + test.write_test_keyval({"kvm-userspace-ver": ssh_cmd(host, ver_cmd).strip()}) + test.write_test_keyval( + {"guest-kernel-ver": ssh_cmd(server_ctl, guest_ver_cmd).strip()} + ) + test.write_test_keyval({"session-length": test_duration}) + fd.write(f"### kvm-userspace-ver : {ssh_cmd(host, ver_cmd).strip()}\n") + fd.write(f"### guest-kernel-ver : {ssh_cmd(server_ctl, guest_ver_cmd).strip()}\n") + fd.write(f"### kvm_version : {os.uname()[2]}\n") + fd.write(f"### session-length : {test_duration}\n") def env_setup(test, params, session, ip, username, shell_port, password): @@ -54,7 +46,7 @@ def env_setup(test, params, session, ip, username, shell_port, password): Prepare the test environment in server/client/host """ - error_context.context("Setup env for %s" % ip) + error_context.context(f"Setup env for {ip}") if params.get("env_setup_cmd"): ssh_cmd(session, params.get("env_setup_cmd"), ignore_status=True) @@ -64,8 +56,7 @@ def env_setup(test, params, session, ip, username, shell_port, password): ssh_cmd(session, params.get("setup_cmd")) agent_path = os.path.join(test.virtdir, "scripts/netperf_agent.py") - remote.scp_to_remote(ip, shell_port, username, password, - agent_path, "/tmp") + remote.scp_to_remote(ip, shell_port, username, password, agent_path, "/tmp") def tweak_tuned_profile(params, server_ctl, client, host): @@ -82,8 +73,7 @@ def tweak_tuned_profile(params, server_ctl, client, host): if server_tuned_profile: ssh_cmd(server_ctl, server_tuned_profile) - error_context.context("Changing tune profile of client/host", - LOG_JOB.info) + error_context.context("Changing tune profile of client/host", LOG_JOB.info) if client_tuned_profile: ssh_cmd(client, client_tuned_profile) if host_tuned_profile: @@ -99,9 +89,9 @@ def ssh_cmd(session, cmd, timeout=120, ignore_status=False): :param timeout: timeout for the command """ if session == "localhost": - o = process.system_output(cmd, timeout=timeout, - ignore_status=ignore_status, - shell=True).decode() + o = process.system_output( + cmd, timeout=timeout, ignore_status=ignore_status, shell=True + ).decode() else: o = session.cmd(cmd, timeout=timeout, ignore_all_errors=ignore_status) return o @@ -115,9 +105,9 @@ def netperf_thread(params, numa_enable, client_s, option, fname): cmd = "" if numa_enable: n = abs(int(params.get("numa_node"))) - 1 - cmd += "numactl --cpunodebind=%s --membind=%s " % (n, n) + cmd += f"numactl --cpunodebind={n} --membind={n} " cmd += option - cmd += " >> %s" % fname + cmd += f" >> {fname}" LOG_JOB.info("Start netperf thread by cmd '%s'", cmd) ssh_cmd(client_s, cmd) @@ -141,8 +131,7 @@ def format_result(result, base="17", fbase="2"): return value % result -def netperf_record(results, filter_list, header=False, - base="17", fbase="2"): +def netperf_record(results, filter_list, header=False, base="17", fbase="2"): """ Record the results in a certain format. @@ -161,10 +150,10 @@ def netperf_record(results, filter_list, header=False, record = "" if header: for key in key_list: - record += "%s|" % format_result(key, base=base, fbase=fbase) + record += f"{format_result(key, base=base, fbase=fbase)}|" record = record.rstrip("|") record += "\n" for key in key_list: - record += "%s|" % format_result(results[key], base=base, fbase=fbase) + record += f"{format_result(results[key], base=base, fbase=fbase)}|" record = record.rstrip("|") return record, key_list diff --git a/provider/netperf_test.py b/provider/netperf_test.py index a2a7d55deb..453e12c357 100644 --- a/provider/netperf_test.py +++ b/provider/netperf_test.py @@ -2,13 +2,9 @@ import os import time -from virttest import error_context -from virttest import data_dir -from virttest import utils_misc -from virttest import utils_net -from virttest import utils_netperf +from virttest import data_dir, error_context, utils_misc, utils_net, utils_netperf -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") @error_context.context_aware @@ -19,24 +15,27 @@ def netperf_stress(test, params, vm): n_client = utils_netperf.NetperfClient( vm.get_address(), params.get("client_path"), - netperf_source=os.path.join(data_dir.get_deps_dir("netperf"), - params.get("netperf_client_link")), + netperf_source=os.path.join( + data_dir.get_deps_dir("netperf"), params.get("netperf_client_link") + ), client=params.get("shell_client"), port=params.get("shell_port"), username=params.get("username"), password=params.get("password"), prompt=params.get("shell_prompt"), - linesep=params.get("shell_linesep", "\n").encode().decode( - 'unicode_escape'), + linesep=params.get("shell_linesep", "\n").encode().decode("unicode_escape"), status_test_command=params.get("status_test_command", ""), - compile_option=params.get("compile_option", "")) + compile_option=params.get("compile_option", ""), + ) n_server = utils_netperf.NetperfServer( utils_net.get_host_ip_address(params), params.get("server_path", "/var/tmp"), - netperf_source=os.path.join(data_dir.get_deps_dir("netperf"), - params.get("netperf_server_link")), + netperf_source=os.path.join( + data_dir.get_deps_dir("netperf"), params.get("netperf_server_link") + ), password=params.get("hostpassword"), - compile_option=params.get("compile_option", "")) + compile_option=params.get("compile_option", ""), + ) try: n_server.start() @@ -46,21 +45,24 @@ def netperf_stress(test, params, vm): test_protocols = params.get("test_protocol") netperf_output_unit = params.get("netperf_output_unit") test_option = params.get("test_option", "") - test_option += " -l %s" % test_duration + test_option += f" -l {test_duration}" if params.get("netperf_remote_cpu") == "yes": test_option += " -C" if params.get("netperf_local_cpu") == "yes": test_option += " -c" if netperf_output_unit in "GMKgmk": - test_option += " -f %s" % netperf_output_unit - t_option = "%s -t %s" % (test_option, test_protocols) - n_client.bg_start(utils_net.get_host_ip_address(params), - t_option, - params.get_numeric("netperf_para_sessions"), - params.get("netperf_cmd_prefix", ""), - package_sizes=params.get("netperf_sizes")) - if utils_misc.wait_for(n_client.is_netperf_running, 10, 0, 3, - "Wait netperf test start"): + test_option += f" -f {netperf_output_unit}" + t_option = f"{test_option} -t {test_protocols}" + n_client.bg_start( + utils_net.get_host_ip_address(params), + t_option, + params.get_numeric("netperf_para_sessions"), + params.get("netperf_cmd_prefix", ""), + package_sizes=params.get("netperf_sizes"), + ) + if utils_misc.wait_for( + n_client.is_netperf_running, 10, 0, 3, "Wait netperf test start" + ): LOG_JOB.info("Netperf test start successfully.") else: test.error("Can not start netperf client.") diff --git a/provider/pktgen_utils.py b/provider/pktgen_utils.py index bf2373130b..b8a213d463 100644 --- a/provider/pktgen_utils.py +++ b/provider/pktgen_utils.py @@ -1,17 +1,14 @@ import logging import os import re -import six -import aexpect import time +import aexpect +import six from avocado.utils import process +from virttest import data_dir, utils_misc, utils_net -from virttest import data_dir -from virttest import utils_net -from virttest import utils_misc - -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class PktgenConfig: @@ -23,8 +20,9 @@ def __init__(self, interface=None, dsc=None, runner=None): def vp_vdpa_bind(self, session_serial): try: LOG_JOB.info("Starting the binding process for Virtio 1.0 network devices.") - pci_list = utils_misc.get_pci_id_using_filter("Virtio 1.0 network", - session_serial) + pci_list = utils_misc.get_pci_id_using_filter( + "Virtio 1.0 network", session_serial + ) if not pci_list: raise ValueError("No PCI devices found matching 'Virtio 1.0 network'.") @@ -40,7 +38,7 @@ def vp_vdpa_bind(self, session_serial): LOG_JOB.debug("Device %s bound to vp-vdpa driver", pci_id) # Add vDPA device - vdpa_cmd = "vdpa dev add name vdpa0 mgmtdev pci/%s" % pci_id + vdpa_cmd = f"vdpa dev add name vdpa0 mgmtdev pci/{pci_id}" session_serial.cmd(vdpa_cmd) LOG_JOB.debug("vDPA device added successfully") @@ -48,14 +46,23 @@ def vp_vdpa_bind(self, session_serial): cmd = "vdpa dev list" output = session_serial.cmd_output_safe(cmd) if "vdpa0" not in output: - raise ValueError("vDPA device 'vdpa0' not found in 'vdpa dev list' output") + raise ValueError( + "vDPA device 'vdpa0' not found in 'vdpa dev list' output" + ) except Exception as err: LOG_JOB.error("Error during vDPA binding process: %s", err) - def configure_pktgen(self, params, script, pkt_cate, test_vm, - vm=None, session_serial=None, interface=None): - local_path = os.path.join(data_dir.get_shared_dir(), - "scripts/pktgen_perf") + def configure_pktgen( + self, + params, + script, + pkt_cate, + test_vm, + vm=None, + session_serial=None, + interface=None, + ): + local_path = os.path.join(data_dir.get_shared_dir(), "scripts/pktgen_perf") remote_path = "/tmp/" if pkt_cate == "tx": @@ -68,9 +75,9 @@ def configure_pktgen(self, params, script, pkt_cate, test_vm, self.runner = session_serial.cmd elif pkt_cate == "rx": LOG_JOB.info("test guest rx pps performance") - process.run("cp -r %s %s" % (local_path, remote_path)) + process.run(f"cp -r {local_path} {remote_path}") host_bridge = params.get("netdst", "switch") - host_nic = utils_net.Interface(host_bridge) + utils_net.Interface(host_bridge) if script == "pktgen_perf": self.dsc = vm.wait_for_get_address(0, timeout=5) else: @@ -87,26 +94,35 @@ def configure_pktgen(self, params, script, pkt_cate, test_vm, self.runner = session_serial.cmd elif not test_vm: LOG_JOB.info("test loopback pps performance on host") - process.run("cp -r %s %s" % (local_path, remote_path)) + process.run(f"cp -r {local_path} {remote_path}") self.interface = interface self.dsc = params.get("mac") self.runner = process.system_output return self - def generate_pktgen_cmd(self, script, pkt_cate, interface, dsc, - threads, size, burst, session_serial=None): - script_path = "/tmp/pktgen_perf/%s.sh" % script + def generate_pktgen_cmd( + self, + script, + pkt_cate, + interface, + dsc, + threads, + size, + burst, + session_serial=None, + ): + script_path = f"/tmp/pktgen_perf/{script}.sh" if script in "pktgen_perf": - dsc_option = '-m' if pkt_cate == 'tx' else '-d' - cmd = "%s -i %s %s %s -t %s -s %s" % ( - script_path, interface, dsc_option, dsc, threads, size) + dsc_option = "-m" if pkt_cate == "tx" else "-d" + cmd = f"{script_path} -i {interface} {dsc_option} {dsc} -t {threads} -s {size}" else: - cmd = "%s -i %s -m %s -n 0 -t %s -s %s -b %s -c 0" % ( - script_path, interface, dsc, threads, size, burst) + cmd = f"{script_path} -i {interface} -m {dsc} -n 0 -t {threads} -s {size} -b {burst} -c 0" - if session_serial and \ - hasattr(self.runner, '__name__') and \ - self.runner.__name__ == session_serial.cmd.__name__: + if ( + session_serial + and hasattr(self.runner, "__name__") + and self.runner.__name__ == session_serial.cmd.__name__ + ): cmd += " &" return cmd @@ -125,7 +141,7 @@ def run_test(self, script, cmd, runner, interface, timeout): :return: The calculated MPPS (Million Packets Per Second) """ - packets = "cat /sys/class/net/%s/statistics/tx_packets" % interface + packets = f"cat /sys/class/net/{interface}/statistics/tx_packets" LOG_JOB.info("Start pktgen test by cmd '%s'", cmd) try: packet_b = runner(packets) @@ -134,8 +150,10 @@ def run_test(self, script, cmd, runner, interface, timeout): except aexpect.ShellTimeoutError: # when pktgen script is running on guest, the pktgen process # need to be killed. - kill_cmd = "kill -9 `ps -ef | grep %s --color | grep -v grep | "\ - "awk '{print $2}'`" % script + kill_cmd = ( + f"kill -9 `ps -ef | grep {script} --color | grep -v grep | " + "awk '{print $2}'`" + ) runner(kill_cmd) packet_a = runner(packets) except process.CmdError: @@ -148,28 +166,28 @@ def run_test(self, script, cmd, runner, interface, timeout): # convert pps to mpps power = 10**6 mpps_results = float(pps_results) / float(power) - mpps_results = "%.2f" % mpps_results + mpps_results = f"{mpps_results:.2f}" return mpps_results def install_package(self, ver, vm=None, session_serial=None): """Check module pktgen, install kernel-modules-internal package""" output_cmd = process.getoutput - kernel_ver = "kernel-modules-internal-%s" % ver - cmd_download = "cd /tmp && brew download-build %s --rpm" % kernel_ver - cmd_install = "cd /tmp && rpm -ivh %s.rpm --force --nodeps" % kernel_ver + kernel_ver = f"kernel-modules-internal-{ver}" + cmd_download = f"cd /tmp && brew download-build {kernel_ver} --rpm" + cmd_install = f"cd /tmp && rpm -ivh {kernel_ver}.rpm --force --nodeps" output_cmd(cmd_download) - cmd_clean = "rm -rf /tmp/%s.rpm" % kernel_ver + cmd_clean = f"rm -rf /tmp/{kernel_ver}.rpm" if session_serial: output_cmd = session_serial.cmd_output - local_path = "/tmp/%s.rpm" % kernel_ver + local_path = f"/tmp/{kernel_ver}.rpm" remote_path = "/tmp/" vm.copy_files_to(local_path, remote_path) output_cmd(cmd_install) output_cmd(cmd_clean) def is_version_lt_rhel7(self, uname_str): - ver = re.findall('el(\\d+)', uname_str) + ver = re.findall("el(\\d+)", uname_str) if ver: return int(ver[0]) > 7 return False @@ -194,7 +212,15 @@ def format_result(result, base, fbase): return value % result -def run_tests_for_category(params, result_file, test_vm=None, vm=None, session_serial=None, vp_vdpa=None, interface=None): +def run_tests_for_category( + params, + result_file, + test_vm=None, + vm=None, + session_serial=None, + vp_vdpa=None, + interface=None, +): """ Run Pktgen tests for a specific category. :param params: Dictionary with the test parameters @@ -207,18 +233,18 @@ def run_tests_for_category(params, result_file, test_vm=None, vm=None, session_s timeout = float(params.get("pktgen_test_timeout", "240")) category = params.get("category") - pkt_size = params.get("pkt_size") - run_threads = params.get("pktgen_threads") + params.get("pkt_size") + params.get("pktgen_threads") burst = params.get("burst") record_list = params.get("record_list") - pktgen_script = params.get('pktgen_script') + pktgen_script = params.get("pktgen_script") base = params.get("format_base", "12") fbase = params.get("format_fbase", "2") # get record_list record_line = "" for record in record_list.split(): - record_line += "%s|" % format_result(record, base, fbase) + record_line += f"{format_result(record, base, fbase)}|" pktgen_config = PktgenConfig() pktgen_runner = PktgenRunner() @@ -227,41 +253,98 @@ def run_tests_for_category(params, result_file, test_vm=None, vm=None, session_s for script in pktgen_script.split(): for pkt_cate in category.split(): - result_file.write("Script:%s " % script) - result_file.write("Category:%s\n" % pkt_cate) - result_file.write("%s\n" % record_line.rstrip("|")) + result_file.write(f"Script:{script} ") + result_file.write(f"Category:{pkt_cate}\n") + result_file.write("{}\n".format(record_line.rstrip("|"))) for size in params.get("pkt_size", "").split(): for threads in params.get("pktgen_threads", "").split(): for burst in params.get("burst", "").split(): if pkt_cate != "loopback": pktgen_config = pktgen_config.configure_pktgen( - params, script, pkt_cate, test_vm, vm, session_serial) + params, script, pkt_cate, test_vm, vm, session_serial + ) exec_cmd = pktgen_config.generate_pktgen_cmd( - script, pkt_cate, pktgen_config.interface, pktgen_config.dsc, - threads, size, burst, session_serial) + script, + pkt_cate, + pktgen_config.interface, + pktgen_config.dsc, + threads, + size, + burst, + session_serial, + ) else: if not test_vm: pktgen_config = pktgen_config.configure_pktgen( - params, script, pkt_cate, test_vm, interface=interface) + params, + script, + pkt_cate, + test_vm, + interface=interface, + ) exec_cmd = pktgen_config.generate_pktgen_cmd( - script, pkt_cate, pktgen_config.interface, pktgen_config.dsc, - threads, size, burst) + script, + pkt_cate, + pktgen_config.interface, + pktgen_config.dsc, + threads, + size, + burst, + ) else: pktgen_config = pktgen_config.configure_pktgen( - params, script, pkt_cate, test_vm, vm, session_serial) + params, + script, + pkt_cate, + test_vm, + vm, + session_serial, + ) exec_cmd = pktgen_config.generate_pktgen_cmd( - script, pkt_cate, pktgen_config.interface, pktgen_config.dsc, - threads, size, burst, session_serial) + script, + pkt_cate, + pktgen_config.interface, + pktgen_config.dsc, + threads, + size, + burst, + session_serial, + ) pkt_cate_r = pktgen_runner.run_test( - script, exec_cmd, pktgen_config.runner, pktgen_config.interface, timeout) - - line = "%s|" % format_result( - size, params.get("format_base", "12"), params.get("format_fbase", "2")) - line += "%s|" % format_result( - threads, params.get("format_base", "12"), params.get("format_fbase", "2")) - line += "%s|" % format_result( - burst, params.get("format_base", "12"), params.get("format_fbase", "2")) - line += "%s" % format_result( - pkt_cate_r, params.get("format_base", "12"), params.get("format_fbase", "2")) - result_file.write(("%s\n" % line)) + script, + exec_cmd, + pktgen_config.runner, + pktgen_config.interface, + timeout, + ) + + line = "{}|".format( + format_result( + size, + params.get("format_base", "12"), + params.get("format_fbase", "2"), + ) + ) + line += "{}|".format( + format_result( + threads, + params.get("format_base", "12"), + params.get("format_fbase", "2"), + ) + ) + line += "{}|".format( + format_result( + burst, + params.get("format_base", "12"), + params.get("format_fbase", "2"), + ) + ) + line += "{}".format( + format_result( + pkt_cate_r, + params.get("format_base", "12"), + params.get("format_fbase", "2"), + ) + ) + result_file.write(f"{line}\n") diff --git a/provider/qemu_img_utils.py b/provider/qemu_img_utils.py index 6706e1602e..b918a800d8 100644 --- a/provider/qemu_img_utils.py +++ b/provider/qemu_img_utils.py @@ -1,16 +1,14 @@ """qemu-img related functions.""" -import avocado + import contextlib import logging import tempfile -from avocado.utils import path -from virttest import env_process -from virttest import utils_misc - -from avocado.utils import process +import avocado +from avocado.utils import path, process +from virttest import env_process, utils_misc -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def boot_vm_with_images(test, params, env, images=None, vm_name=None): @@ -48,22 +46,23 @@ def save_random_file_to_vm(vm, save_path, count, sync_bin, blocksize=512): sync_bin = utils_misc.set_winutils_letter(session, sync_bin) status, out = session.cmd_status_output(sync_bin, timeout=240) if status: - raise EnvironmentError("Fail to execute %s: %s" % (sync_bin, out)) + raise OSError(f"Fail to execute {sync_bin}: {out}") session.close() @avocado.fail_on(exceptions=(ValueError,)) def check_md5sum(filepath, md5sum_bin, session, md5_value_to_check=None): """Check md5sum value of the file specified.""" - md5cmd = "%s %s" % (md5sum_bin, filepath) + md5cmd = f"{md5sum_bin} {filepath}" status, out = session.cmd_status_output(md5cmd, timeout=240) if status: - raise EnvironmentError("Fail to get md5 value of file: %s" % filepath) + raise OSError(f"Fail to get md5 value of file: {filepath}") md5_value = out.split()[0] LOG_JOB.debug("md5sum value of %s: %s", filepath, md5_value) if md5_value_to_check and md5_value != md5_value_to_check: - raise ValueError("md5 values mismatch, got: %s, expected: %s" % - (md5_value, md5_value_to_check)) + raise ValueError( + f"md5 values mismatch, got: {md5_value}, expected: {md5_value_to_check}" + ) return md5_value diff --git a/provider/qsd.py b/provider/qsd.py index ca19b5bd16..bb506ad46d 100644 --- a/provider/qsd.py +++ b/provider/qsd.py @@ -39,47 +39,48 @@ import copy import json import logging +import os import re import signal import subprocess -import os - from enum import Enum, auto + from avocado.utils import process -from virttest.utils_params import Params +from virttest import data_dir, qemu_monitor, qemu_storage, utils_misc +from virttest.qemu_capabilities import Capabilities from virttest.qemu_devices import qdevices from virttest.qemu_devices.qdevices import QDaemonDev, QUnixSocketBus -from virttest import data_dir, utils_misc, qemu_monitor, qemu_storage -from virttest.qemu_capabilities import Capabilities from virttest.storage import get_image_filename +from virttest.utils_params import Params -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class Flags(Enum): - """ Enumerate the flags of QSD capabilities. """ + """Enumerate the flags of QSD capabilities.""" + DAEMONIZE = auto() PIDFILE = auto() class QsdError(Exception): """Generic QSD Error.""" + pass def add_vubp_into_boot(img_name, params, addr=15, opts=""): - """ Add vhost-user-blk-pci device into boot command line """ + """Add vhost-user-blk-pci device into boot command line""" devs = create_vubp_devices(None, img_name, params) cmd = "" for dev in devs: - cmd += " %s" % dev.cmdline() + cmd += f" {dev.cmdline()}" if cmd: machine_type = params.get("machine_type", "") if machine_type.startswith("q35") or machine_type.startswith("arm64"): busid = "pcie_vubp_root_port_%d" % addr - bus = "-device pcie-root-port,id=%s,bus=pcie.0,addr=%d " % ( - busid, addr) - cmd = bus + cmd + ",bus=%s" % busid + bus = "-device pcie-root-port,id=%s,bus=pcie.0,addr=%d " % (busid, addr) + cmd = bus + cmd + f",bus={busid}" elif "i440fx" in machine_type or machine_type == "pc": cmd += ",bus=pci.0,addr=%d" % addr @@ -91,8 +92,8 @@ def add_vubp_into_boot(img_name, params, addr=15, opts=""): def get_qsd_name_by_image(img_name, params): - """Get QSD name by image """ - if params.get("drive_format_%s" % img_name) != "vhost-user-blk-pci": + """Get QSD name by image""" + if params.get(f"drive_format_{img_name}") != "vhost-user-blk-pci": return qsd_name = "" @@ -111,17 +112,20 @@ def create_vubp_devices(vm, img_name, params, bus=None): raise QsdError("Can not find QSD") qsd_params = params.object_params(qsd_name) - qsd_basedir = qsd_params.get("qsd_basedir", - data_dir.get_data_dir() + "/qsd/%s" % qsd_name) + qsd_basedir = qsd_params.get( + "qsd_basedir", data_dir.get_data_dir() + f"/qsd/{qsd_name}" + ) - qid = "qsd_%s" % qsd_name - img_sock = "%s/%s_vhost_user_%s.sock" % (qsd_basedir, qsd_name, img_name) + qid = f"qsd_{qsd_name}" + img_sock = f"{qsd_basedir}/{qsd_name}_vhost_user_{img_name}.sock" devices = [] if vm and not vm.devices.get_by_qid(qid): # Declare virtual QSD daemon - qsd = qdevices.QDaemonDev("qsd", aobject=qsd_name, - child_bus=qdevices.QUnixSocketBus(img_sock, - qsd_name)) + qsd = qdevices.QDaemonDev( + "qsd", + aobject=qsd_name, + child_bus=qdevices.QUnixSocketBus(img_sock, qsd_name), + ) vm.devices.insert(qsd) machine_type = params.get("machine_type", "") @@ -131,9 +135,9 @@ def create_vubp_devices(vm, img_name, params, bus=None): char_params = Params() char_params["backend"] = "socket" - char_params["id"] = 'char_qsd_%s' % qsd_name + char_params["id"] = f"char_qsd_{qsd_name}" char_params["path"] = img_sock - sock_bus = {'busid': img_sock} + sock_bus = {"busid": img_sock} char = qdevices.CharDevice(char_params, parent_bus=sock_bus) char.set_param("server", "off") char.set_aid(char_params["id"]) @@ -144,11 +148,8 @@ def create_vubp_devices(vm, img_name, params, bus=None): if bus is None: bus = {"type": qbus_type} - dev_params = {"id": "vubp_%s" % img_name, - "chardev": char.get_qid() - } - vubp_driver_props = json.loads( - params.get("image_vubp_props_%s" % img_name, "{}")) + dev_params = {"id": f"vubp_{img_name}", "chardev": char.get_qid()} + vubp_driver_props = json.loads(params.get(f"image_vubp_props_{img_name}", "{}")) dev_params.update(vubp_driver_props) vubp = qdevices.QDevice(qdriver, params=dev_params, parent_bus=bus) vubp.set_aid(dev_params["id"]) @@ -158,7 +159,7 @@ def create_vubp_devices(vm, img_name, params, bus=None): def plug_vubp_devices(vm, img_name, params, bus=None): - """Hotplug vhost-user-blk-pci into VM """ + """Hotplug vhost-user-blk-pci into VM""" LOG_JOB.info("Ready plug vubp image:%s", img_name) devs = create_vubp_devices(vm, img_name, params, bus) @@ -167,7 +168,7 @@ def plug_vubp_devices(vm, img_name, params, bus=None): def unplug_vubp_devices(vm, img_name, params, bus=None): - """Unplug vhost-user-blk-pci from VM """ + """Unplug vhost-user-blk-pci from VM""" LOG_JOB.info("Ready unplug vubp image:%s", img_name) devs = create_vubp_devices(vm, img_name, params, bus) devs = devs[::-1] @@ -180,8 +181,11 @@ def unplug_vubp_devices(vm, img_name, params, bus=None): if not dev.verify_unplug(None, vm.monitor): out = dev.unplug(vm.monitor) utils_misc.wait_for( - lambda: dev.verify_unplug(out, vm.monitor) is True, first=1, - step=5, timeout=20) + lambda: dev.verify_unplug(out, vm.monitor) is True, + first=1, + step=5, + timeout=20, + ) else: LOG_JOB.info("Ignore device %s Can not be found", dev.get_qid()) @@ -189,8 +193,13 @@ def unplug_vubp_devices(vm, img_name, params, bus=None): class QsdDaemonDev(QDaemonDev): # Default data struct of raw image data. raw_image_data = { - "protocol": {"driver": "file", "node-name": "tbd", "filename": "tbd", - "auto-read-only": True, "discard": "unmap"}, + "protocol": { + "driver": "file", + "node-name": "tbd", + "filename": "tbd", + "auto-read-only": True, + "discard": "unmap", + }, "format": {"driver": "raw", "node-name": "tbd", "file": "tbd"}, "filter": {"driver": None, "node-name": "tbd", "file": "tbd"}, "name": "", @@ -198,24 +207,23 @@ class QsdDaemonDev(QDaemonDev): "inet": {"type": "inet", "host": "0.0.0.0"}, "nbd-server": {"addr": {}}, "export": {"type": "", "id": "", "node-name": "", "writable": True}, - "image_object": None + "image_object": None, } def __init__(self, name, params): qsd_params = params.object_params(name) - basedir = qsd_params.get("qsd_basedir", - data_dir.get_data_dir() + "/qsd/%s" % name) + basedir = qsd_params.get( + "qsd_basedir", data_dir.get_data_dir() + f"/qsd/{name}" + ) if not os.path.exists(basedir): LOG_JOB.info("Create QSD basedir %s", basedir) os.makedirs(basedir) binary = qsd_params.get("qsd_binary", "/usr/bin/qemu-storage-daemon") - sock_path = qsd_params.get("qsd_sock_path", - "%s/%s_monitor.sock" % (basedir, name)) - qsd_monitor_id = "qsd_monitor_%s" % name - qid = "qsd_%s" % name - super(QsdDaemonDev, self).__init__('QSD', aobject=name, - child_bus=QUnixSocketBus(qid, qid)) + sock_path = qsd_params.get("qsd_sock_path", f"{basedir}/{name}_monitor.sock") + qsd_monitor_id = f"qsd_monitor_{name}" + qid = f"qsd_{name}" + super().__init__("QSD", aobject=name, child_bus=QUnixSocketBus(qid, qid)) self.name = name self.basedir = basedir self.monitor = None @@ -224,14 +232,12 @@ def __init__(self, name, params): self.binary = binary self.sock_path = sock_path self.qsd_monitor_id = qsd_monitor_id - self.qsd_version = process.run("%s -V" % binary, - verbose=False, - ignore_status=True, - shell=True).stdout_text.split()[2] - self.__qsd_help = process.run("%s -h" % binary, - verbose=False, - ignore_status=True, - shell=True).stdout_text + self.qsd_version = process.run( + f"{binary} -V", verbose=False, ignore_status=True, shell=True + ).stdout_text.split()[2] + self.__qsd_help = process.run( + f"{binary} -h", verbose=False, ignore_status=True, shell=True + ).stdout_text LOG_JOB.info(self.qsd_version) self.caps = Capabilities() @@ -252,7 +258,7 @@ def _remove_images(self): img["image_object"].remove() def _fulfil_image_props(self, name, params): - """Fulfil image property and prepare image file """ + """Fulfil image property and prepare image file""" img = copy.deepcopy(QsdDaemonDev.raw_image_data) img["name"] = name img["protocol"]["node-name"] = "prot_" + name @@ -264,42 +270,45 @@ def _fulfil_image_props(self, name, params): img["filter"]["node-name"] = "flt_" + name img["filter"]["file"] = img["format"]["node-name"] - img["protocol"].update( - json.loads(params.get("qsd_image_protocol", "{}"))) + img["protocol"].update(json.loads(params.get("qsd_image_protocol", "{}"))) img["format"].update(json.loads(params.get("qsd_image_format", "{}"))) img["filter"].update(json.loads(params.get("qsd_image_filter", "{}"))) img["export"]["id"] = "id_" + name - img["export"]["node-name"] = img["filter"]["node-name"] if \ - img["filter"]["driver"] else img["format"]["node-name"] + img["export"]["node-name"] = ( + img["filter"]["node-name"] + if img["filter"]["driver"] + else img["format"]["node-name"] + ) img["export"].update(json.loads(params.get("qsd_image_export", "{}"))) if img["export"]["type"] == "nbd": # The name is necessary. empty value to simply setting in nbd client img["export"]["name"] = "" - addr = json.loads( - params.get("qsd_image_export_nbd", '{"type":"unix"')) + addr = json.loads(params.get("qsd_image_export_nbd", '{"type":"unix"')) img[addr["type"]].update(addr) if addr["type"] == "unix": if not img[addr["type"]]["path"]: - img[addr["type"]]["path"] = "%s/%s_nbd_%s.sock" % ( - self.basedir, self.name, name) + img[addr["type"]]["path"] = ( + f"{self.basedir}/{self.name}_nbd_{name}.sock" + ) img["nbd-server"]["addr"].update(img[addr["type"]]) elif img["export"]["type"] == "vhost-user-blk": - img["unix"]["path"] = "%s/%s_vhost_user_%s.sock" % ( - self.basedir, self.name, name) + img["unix"]["path"] = f"{self.basedir}/{self.name}_vhost_user_{name}.sock" img["export"]["addr"] = img["unix"] else: - raise QsdError("Unknown export type %s " % img["export"]["type"]) + raise QsdError("Unknown export type {} ".format(img["export"]["type"])) # Prepare image image_created = False if name in params.get("images").split(): - if params.get("force_create_image") == "yes" or params.get( - "create_image") == "yes": + if ( + params.get("force_create_image") == "yes" + or params.get("create_image") == "yes" + ): image_created = True if image_created: @@ -311,7 +320,7 @@ def _fulfil_image_props(self, name, params): LOG_JOB.info("QSD ready to create image %s", name) _, result = obj.create(params) if result.exit_status != 0: - raise QsdError("Failed create image %s " % name) + raise QsdError(f"Failed create image {name} ") img["image_object"] = obj self.images.update({name: img}) @@ -322,21 +331,20 @@ def has_option(self, option): :param option: Desired option :return: Is the desired option supported by current qemu? """ - return bool(re.search(r"%s" % option, self.__qsd_help, - re.MULTILINE)) + return bool(re.search(rf"{option}", self.__qsd_help, re.MULTILINE)) def _probe_capabilities(self): - """ Probe capabilities. """ + """Probe capabilities.""" - if self.has_option('--daemonize'): + if self.has_option("--daemonize"): LOG_JOB.info("--daemonize") self.caps.set_flag(Flags.DAEMONIZE) - if self.has_option('--pidfile'): + if self.has_option("--pidfile"): LOG_JOB.info("--pidfile") self.caps.set_flag(Flags.PIDFILE) def get_pid(self): - """ Get QSD pid""" + """Get QSD pid""" if self.daemonize: return self.pid if self.daemon_process: @@ -346,7 +354,7 @@ def start_daemon(self): """Start the QSD daemon in background.""" params = self.qsd_params.object_params(self.name) # check exist QSD - get_pid_cmd = "ps -e ww|grep qemu-storage-d|grep %s|" % self.sock_path + get_pid_cmd = f"ps -e ww|grep qemu-storage-d|grep {self.sock_path}|" get_pid_cmd += "grep -v grep|awk '{print $1}'" pid = process.system_output(get_pid_cmd, shell=True).decode() @@ -356,14 +364,11 @@ def start_daemon(self): LOG_JOB.info("Find running QSD:%s, force killing", pid) utils_misc.kill_process_tree(int(pid), 9, timeout=60) else: - raise QsdError("Find running QSD:%s" % pid) + raise QsdError(f"Find running QSD:{pid}") # QSD monitor - qsd_cmd = '%s --chardev socket,server=on,wait=off,path=%s,id=%s' % ( - self.binary, - self.sock_path, - self.qsd_monitor_id) - qsd_cmd += ' --monitor chardev=%s,mode=control ' % self.qsd_monitor_id + qsd_cmd = f"{self.binary} --chardev socket,server=on,wait=off,path={self.sock_path},id={self.qsd_monitor_id}" + qsd_cmd += f" --monitor chardev={self.qsd_monitor_id},mode=control " # QSD raw command lines cmds = self.qsd_params.get("qsd_cmd_lines", "") @@ -376,14 +381,15 @@ def start_daemon(self): for img in qsd_imgs: params = self.qsd_params.object_params(img) img_info = self._fulfil_image_props(img, params) - qsd_cmd += " --blockdev '%s'" % json.dumps(img_info["protocol"]) - qsd_cmd += " --blockdev '%s'" % json.dumps(img_info["format"]) + qsd_cmd += " --blockdev '{}'".format(json.dumps(img_info["protocol"])) + qsd_cmd += " --blockdev '{}'".format(json.dumps(img_info["format"])) if img_info["filter"]["driver"]: - qsd_cmd += " --blockdev '%s'" % json.dumps(img_info["filter"]) + qsd_cmd += " --blockdev '{}'".format(json.dumps(img_info["filter"])) if img_info["nbd-server"]["addr"]: - qsd_cmd += " --nbd-server '%s'" % json.dumps( - img_info["nbd-server"]) - qsd_cmd += " --export '%s'" % json.dumps(img_info["export"]) + qsd_cmd += " --nbd-server '{}'".format( + json.dumps(img_info["nbd-server"]) + ) + qsd_cmd += " --export '{}'".format(json.dumps(img_info["export"])) # QSD daemonize @@ -398,13 +404,13 @@ def start_daemon(self): # QSD pidfile if params.get("qsd_enable_pidfile", "yes") == "yes": if self.check_capability(Flags.PIDFILE): - self.pidfile = "%s/%s.pid" % (self.basedir, self.name) - qsd_cmd += " --pidfile %s" % self.pidfile + self.pidfile = f"{self.basedir}/{self.name}.pid" + qsd_cmd += f" --pidfile {self.pidfile}" else: LOG_JOB.info("Ignore option --pidfile ") LOG_JOB.info(qsd_cmd.replace(" --", " \\\n --")) - self.set_param('cmd', qsd_cmd) + self.set_param("cmd", qsd_cmd) # run QSD if self.daemonize: @@ -414,13 +420,15 @@ def start_daemon(self): if qsd.returncode: raise QsdError("Failed run QSD daemonize: %d" % qsd.returncode) else: - super(QsdDaemonDev, self).start_daemon() + super().start_daemon() if not self.is_daemon_alive(): output = self.daemon_process.get_output() self.close_daemon_process() - raise QsdError("Failed to run QSD daemon: %s" % output) - LOG_JOB.info("Created QSD daemon process with parent PID %d.", - self.daemon_process.get_pid()) + raise QsdError(f"Failed to run QSD daemon: {output}") + LOG_JOB.info( + "Created QSD daemon process with parent PID %d.", + self.daemon_process.get_pid(), + ) pid = process.system_output(get_pid_cmd, shell=True).decode() @@ -428,10 +436,9 @@ def start_daemon(self): LOG_JOB.info("Can not Find running QSD %s ", self.name) if self.pidfile: - file_pid = process.system_output("cat %s" % self.pidfile, - shell=True).decode() + file_pid = process.system_output(f"cat {self.pidfile}", shell=True).decode() if file_pid != pid: - raise QsdError("Find mismatch pid: %s %s" % (pid, file_pid)) + raise QsdError(f"Find mismatch pid: {pid} {file_pid}") self.pid = pid @@ -441,13 +448,14 @@ def start_daemon(self): def is_daemon_alive(self): if self.daemonize: - check_pid_cmd = "ps -q %s" % self.pid + check_pid_cmd = f"ps -q {self.pid}" if self.pid: - return process.system(check_pid_cmd, shell=True, - ignore_status=True) == 0 + return ( + process.system(check_pid_cmd, shell=True, ignore_status=True) == 0 + ) return False - return super(QsdDaemonDev, self).is_daemon_alive() + return super().is_daemon_alive() def _destroy(self): # Is it already dead? @@ -466,18 +474,17 @@ def _destroy(self): except Exception as e: LOG_JOB.warning(e) if not self.is_daemon_alive(): - LOG_JOB.warning("QSD %s down during try to kill it " - "by monitor", self.name) + LOG_JOB.warning( + "QSD %s down during try to kill it " "by monitor", self.name + ) return else: # Wait for the QSD to be really dead - if utils_misc.wait_for(lambda: not self.is_daemon_alive(), - timeout=10): + if utils_misc.wait_for(lambda: not self.is_daemon_alive(), timeout=10): LOG_JOB.debug("QSD %s down (monitor)", self.name) return else: - LOG_JOB.debug("QSD %s failed to go down (monitor)", - self.name) + LOG_JOB.debug("QSD %s failed to go down (monitor)", self.name) LOG_JOB.debug("Killing QSD %s process (killing PID %s)", self.name, pid) try: @@ -485,8 +492,7 @@ def _destroy(self): utils_misc.kill_process_tree(int(pid), signal.SIGTERM, timeout=60) if self.is_daemon_alive(): LOG_JOB.debug("Ready to kill qsd:%s", pid) - utils_misc.kill_process_tree(int(pid), signal.SIGKILL, - timeout=60) + utils_misc.kill_process_tree(int(pid), signal.SIGKILL, timeout=60) LOG_JOB.debug("VM %s down (process killed)", self.name) except RuntimeError: # If all else fails, we've got a zombie... @@ -495,7 +501,7 @@ def _destroy(self): def stop_daemon(self): try: self._destroy() - super(QsdDaemonDev, self).stop_daemon() + super().stop_daemon() finally: self._remove_images() @@ -503,6 +509,6 @@ def check_capability(self, flag): return flag in self.caps def __eq__(self, other): - if super(QsdDaemonDev, self).__eq__(other): + if super().__eq__(other): return self.sock_path == other.sock_path return False diff --git a/provider/sgx.py b/provider/sgx.py index d0b30607ed..f854977c7a 100644 --- a/provider/sgx.py +++ b/provider/sgx.py @@ -1,6 +1,7 @@ """ Module for sgx relevant operations. """ + import logging import re @@ -8,11 +9,12 @@ from virttest.staging import utils_memory from virttest.utils_misc import normalize_data_size -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class SGXError(Exception): - """ General SGX error""" + """General SGX error""" + pass @@ -25,15 +27,17 @@ def _get_epc_size(output): """ epc_size = 0 for line in output.splitlines(): - tmp_epc = re.search(r'\b0x[0-9a-fA-F]*-0x[0-9a-fA-F]*\b', - str(line)) + tmp_epc = re.search(r"\b0x[0-9a-fA-F]*-0x[0-9a-fA-F]*\b", str(line)) if tmp_epc: - epc_size += int(tmp_epc[0].split('-')[1], 16) - int( - tmp_epc[0].split("-")[0], 16) + 1 + epc_size += ( + int(tmp_epc[0].split("-")[1], 16) + - int(tmp_epc[0].split("-")[0], 16) + + 1 + ) return epc_size -class SGXHostCapability(object): +class SGXHostCapability: """ Hypervisor sgx capabilities check. """ @@ -53,9 +57,10 @@ def validate_sgx_cap(self): """ try: host_sgx_msg = process.system_output( - "journalctl --boot|grep -i 'sgx: EPC section'", shell=True) + "journalctl --boot|grep -i 'sgx: EPC section'", shell=True + ) except Exception as e: - self._test.cancel("Host sgx capability check fail %s" % e) + self._test.cancel(f"Host sgx capability check fail {e}") else: self.host_epc_size = _get_epc_size(host_sgx_msg) @@ -68,17 +73,19 @@ def validate_numa_node_count(self): node_list = [] numa_info = process.getoutput("numactl -H") for i in host_numa: - node_size = re.findall(r"node %d size: " - r"\d+ \w" % i, numa_info)[0].split()[-2] - if node_size != '0': + node_size = re.findall(r"node %d size: " r"\d+ \w" % i, numa_info)[ + 0 + ].split()[-2] + if node_size != "0": node_list.append(str(i)) monitor_expect_nodes = int(self._params["monitor_expect_nodes"]) if len(node_list) < monitor_expect_nodes: - self._test.cancel("host numa nodes %s isn't enough for " - "testing." % node_list) + self._test.cancel( + f"host numa nodes {node_list} isn't enough for " "testing." + ) -class SGXChecker(object): +class SGXChecker: """ Basic verification on sgx capabilities for both host and guest. """ @@ -103,7 +110,7 @@ def verify_sgx_flags(self, qmp_command, flags): """ for ele in flags: if qmp_command[ele] is not True: - self._test.fail("%s is not enabled, qmp check failed." % ele) + self._test.fail(f"{ele} is not enabled, qmp check failed.") def get_config_total_epc_size(self): """ @@ -115,10 +122,10 @@ def get_config_total_epc_size(self): config_epc_size = 0 for ele in epc_list: epc_params = self._params.object_params(ele) - ele_memdev = self._params.object_params( - epc_params["vm_sgx_epc_memdev"]) - tmp_epc_size = int(float(normalize_data_size( - ele_memdev["size_mem"], "B", 1024))) + ele_memdev = self._params.object_params(epc_params["vm_sgx_epc_memdev"]) + tmp_epc_size = int( + float(normalize_data_size(ele_memdev["size_mem"], "B", 1024)) + ) config_epc_size += tmp_epc_size return config_epc_size @@ -132,56 +139,59 @@ def get_config_epc_numa_info(self): tmp_epc_dict = {} for ele in guest_sgx_epc_list: epc_params = self._params.object_params(ele) - ele_memdev = self._params.object_params( - epc_params["vm_sgx_epc_memdev"]) - tmp_epc_size = int(float(normalize_data_size( - ele_memdev["size_mem"], "B", 1024))) + ele_memdev = self._params.object_params(epc_params["vm_sgx_epc_memdev"]) + tmp_epc_size = int( + float(normalize_data_size(ele_memdev["size_mem"], "B", 1024)) + ) epc_numa_id = int(epc_params["vm_sgx_epc_node"]) - tmp_epc_dict = {'size': tmp_epc_size, 'node': epc_numa_id} + tmp_epc_dict = {"size": tmp_epc_size, "node": epc_numa_id} return tmp_epc_dict def verify_qmp_host_sgx_cap(self, host_epc_size): """ Verify query host sgx capabilities qmp cmd in sgx flags and epc size """ - sgx_flags = self._params['sgx_flags'].split() + sgx_flags = self._params["sgx_flags"].split() host_sgx_info = self._monitor.query_sgx_capabilities() self.verify_sgx_flags(host_sgx_info, sgx_flags) - host_qmp_sections = host_sgx_info['sections'] + host_qmp_sections = host_sgx_info["sections"] host_qmp_section_size = 0 for section in host_qmp_sections: - host_qmp_section_size += int(section['size']) + host_qmp_section_size += int(section["size"]) if host_epc_size != host_qmp_section_size: - self._test.fail("Host epc size %s is not equal to query sgx" - "capabilities section size %s" - % (host_epc_size, host_qmp_section_size)) + self._test.fail( + f"Host epc size {host_epc_size} is not equal to query sgx" + f"capabilities section size {host_qmp_section_size}" + ) LOG_JOB.debug("Guest query host capability verified successfully") def verify_qmp_guest_sgx_cap(self): """ Verify query guest sgx capabilities qmp cmd in sgx flags and epc size """ - sgx_flags = self._params['sgx_flags'].split() + sgx_flags = self._params["sgx_flags"].split() guest_sgx_info = self._monitor.query_sgx() self.verify_sgx_flags(guest_sgx_info, sgx_flags) LOG_JOB.debug("Guest query SGX flags %s verified done", sgx_flags) - epc_sections_info = guest_sgx_info['sections'] + epc_sections_info = guest_sgx_info["sections"] numa_epc_dict = self.get_config_epc_numa_info() if numa_epc_dict == epc_sections_info: - self._test.fail("Guest epc sized on each numa mis-matched, " - "qmp check failed.") + self._test.fail( + "Guest epc sized on each numa mis-matched, " "qmp check failed." + ) - sgx_sections = guest_sgx_info['sections'] + sgx_sections = guest_sgx_info["sections"] sgx_section_size = 0 for section in sgx_sections: - sgx_section_size += int(section['size']) + sgx_section_size += int(section["size"]) config_epc_size = self.get_config_total_epc_size() if config_epc_size != sgx_section_size: - self._test.fail("Guest epc size %s is not equal to query_sgx" - " section size %s" % (config_epc_size, - sgx_section_size)) + self._test.fail( + f"Guest epc size {config_epc_size} is not equal to query_sgx" + f" section size {sgx_section_size}" + ) LOG_JOB.debug("Guest query SGX verified successfully") def verify_guest_epc_size(self, cmd_output): @@ -193,7 +203,8 @@ def verify_guest_epc_size(self, cmd_output): guest_total_epc_size = self.get_config_total_epc_size() guest_msg_epc_size = _get_epc_size(cmd_output) if guest_msg_epc_size != int(guest_total_epc_size): - self._test.fail("Guest epc size %s is not equal to qemu set " - "section size %s" % (guest_msg_epc_size, - guest_total_epc_size)) + self._test.fail( + f"Guest epc size {guest_msg_epc_size} is not equal to qemu set " + f"section size {guest_total_epc_size}" + ) LOG_JOB.debug("Guest SGX size verified successfully") diff --git a/provider/slof.py b/provider/slof.py index e95325f818..59e5b503e6 100644 --- a/provider/slof.py +++ b/provider/slof.py @@ -12,17 +12,17 @@ - check_error: Check if there are error info in the SLOF content. """ -import re -import time import logging import os +import re +import time from virttest import utils_misc -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") -START_PATTERN = r'\s+SLOF\S+\s+\*+' -END_PATTERN = r'\s+Successfully loaded$' +START_PATTERN = r"\s+SLOF\S+\s+\*+" +END_PATTERN = r"\s+Successfully loaded$" def get_boot_content(vm, start_pos=0, start_str=START_PATTERN, end_str=END_PATTERN): @@ -59,8 +59,9 @@ def get_boot_content(vm, start_pos=0, start_str=START_PATTERN, end_str=END_PATTE return None, start_str_pos -def wait_for_loaded(vm, test, start_pos=0, start_str=START_PATTERN, - end_str=END_PATTERN, timeout=300): +def wait_for_loaded( + vm, test, start_pos=0, start_str=START_PATTERN, end_str=END_PATTERN, timeout=300 +): """ Wait for loading the SLOF. @@ -80,19 +81,18 @@ def wait_for_loaded(vm, test, start_pos=0, start_str=START_PATTERN, :rtype: tuple(list, int) """ file_timeout = 30 - if not utils_misc.wait_for(lambda: os.path.isfile(vm.serial_console_log), - file_timeout): - test.error('No found serial log in %s sec.' % file_timeout) + if not utils_misc.wait_for( + lambda: os.path.isfile(vm.serial_console_log), file_timeout + ): + test.error(f"No found serial log in {file_timeout} sec.") end_time = timeout + time.time() while time.time() < end_time: content, start_pos = get_boot_content(vm, start_pos, start_str, end_str) if content: - LOG_JOB.info('Output of SLOF:\n%s', ''.join(content)) + LOG_JOB.info("Output of SLOF:\n%s", "".join(content)) return content, start_pos - test.fail( - 'No found corresponding SLOF info in serial log during %s sec.' % - timeout) + test.fail(f"No found corresponding SLOF info in serial log during {timeout} sec.") def get_booted_devices(content): @@ -107,16 +107,21 @@ def get_booted_devices(content): position = 0 devices = {} for line in content: - ret = re.search(r'(\s+Trying to load:\s+from:\s)(/.+)(\s+\.\.\.)', - line) + ret = re.search(r"(\s+Trying to load:\s+from:\s)(/.+)(\s+\.\.\.)", line) if ret: devices[position] = ret.group(2) position += 1 return devices -def verify_boot_device(content, parent_bus_type, child_bus_type, child_addr, - sub_child_addr=None, position=0): +def verify_boot_device( + content, + parent_bus_type, + child_bus_type, + child_addr, + sub_child_addr=None, + position=0, +): """ Verify whether the vm is booted from the specified device. @@ -135,51 +140,54 @@ def verify_boot_device(content, parent_bus_type, child_bus_type, child_addr, :return: true if booted from the specified device :rtype: bool """ - pattern = re.compile(r'^0x0?') - addr = pattern.sub('', child_addr) + pattern = re.compile(r"^0x0?") + addr = pattern.sub("", child_addr) sub_addr = "" if sub_child_addr: - sub_addr = pattern.sub('', sub_child_addr) + sub_addr = pattern.sub("", sub_child_addr) - pattern = re.compile(r'/\w+.{1}\w+@') + pattern = re.compile(r"/\w+.{1}\w+@") devices = get_booted_devices(content) for k, v in devices.items(): if int(k) == position: - LOG_JOB.info('Position [%d]: %s', k, v) + LOG_JOB.info("Position [%d]: %s", k, v) break if position in devices: name = devices[position] - info = ('Check whether the device({0}@{1}@{2}) is the {3} bootable ' - 'device.'.format(parent_bus_type, child_bus_type, - child_addr, position)) + info = ( + f"Check whether the device({parent_bus_type}@{child_bus_type}@{child_addr}) is the {position} bootable " + "device." + ) if sub_child_addr: - info = ('Check whether the device({0}@{1}@{2}@{3}) is the {4} ' - 'bootable device.'.format(parent_bus_type, child_bus_type, - child_addr, sub_child_addr, - position)) + info = ( + f"Check whether the device({parent_bus_type}@{child_bus_type}@{child_addr}@{sub_child_addr}) is the {position} " + "bootable device." + ) LOG_JOB.info(info) - if parent_bus_type == 'pci': + if parent_bus_type == "pci": # virtio-blk, virtio-scsi and ethernet device. - if child_bus_type == 'scsi' or child_bus_type == 'ethernet': + if child_bus_type == "scsi" or child_bus_type == "ethernet": if addr == pattern.split(name)[2]: return True # pci-bridge, usb device. - elif child_bus_type == 'pci-bridge' or child_bus_type == 'usb': - if (addr == pattern.split(name)[2] and - sub_addr == pattern.split(name)[3]): + elif child_bus_type == "pci-bridge" or child_bus_type == "usb": + if ( + addr == pattern.split(name)[2] + and sub_addr == pattern.split(name)[3] + ): return True - elif parent_bus_type == 'vdevice': + elif parent_bus_type == "vdevice": # v-scsi device, spapr-vlan device. - if child_bus_type == 'v-scsi' or child_bus_type == 'l-lan': + if child_bus_type == "v-scsi" or child_bus_type == "l-lan": if addr == pattern.split(name)[1]: return True else: return False else: LOG_JOB.debug( - 'No such device at position %s in all devices in SLOF contents.', - position) + "No such device at position %s in all devices in SLOF contents.", position + ) return False @@ -192,6 +200,6 @@ def check_error(test, content): :type content: list """ for line in content: - if re.search(r'error', line, re.IGNORECASE): - test.fail('Found errors: %s' % line) + if re.search(r"error", line, re.IGNORECASE): + test.fail(f"Found errors: {line}") LOG_JOB.info("No errors in SLOF content.") diff --git a/provider/storage_benchmark.py b/provider/storage_benchmark.py index 79960931a2..5c119a9321 100644 --- a/provider/storage_benchmark.py +++ b/provider/storage_benchmark.py @@ -16,45 +16,49 @@ import logging import os import re - from functools import wraps - -from platform import machine from operator import attrgetter - -from virttest import utils_misc -from virttest import data_dir -from virttest.remote import scp_to_remote +from platform import machine from avocado import TestError +from virttest import data_dir, utils_misc +from virttest.remote import scp_to_remote -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") -GIT_DOWNLOAD = 'git' -CURL_DOWNLOAD = 'curl' +GIT_DOWNLOAD = "git" +CURL_DOWNLOAD = "curl" -TAR_UNPACK = 'tar' +TAR_UNPACK = "tar" -class StorageBenchmark(object): +class StorageBenchmark: """ Create a Benchmark class which provides common interface(method) for using Benchmark tool to run test. """ - cmds = {'linux': {'_symlinks': 'ln -s -f %s %s', - '_list_pid': 'pgrep -xl %s', - '_kill_pid': 'killall -s SIGKILL %s', - '_rm_file': 'rm -rf {}'}, - 'windows': {'_symlinks': 'mklink %s %s', - '_list_pid': 'TASKLIST /FI "IMAGENAME eq %s', - '_kill_pid': 'TASKKILL /F /IM %s /T', - '_rm_file': 'RD /S /Q "{}"'}} - tar_map = {'.tar': '-xvf', '.tar.gz': '-xzf', - '.tar.bz2': '-xjf', '.tar.Z': '-xZf'} - download_cmds = {GIT_DOWNLOAD: 'rm -rf {0} && git clone {1} {0}', - CURL_DOWNLOAD: 'curl -o {0} {1}'} - unpack_cmds = {TAR_UNPACK: '_tar_unpack_file'} + + cmds = { + "linux": { + "_symlinks": "ln -s -f %s %s", + "_list_pid": "pgrep -xl %s", + "_kill_pid": "killall -s SIGKILL %s", + "_rm_file": "rm -rf {}", + }, + "windows": { + "_symlinks": "mklink %s %s", + "_list_pid": 'TASKLIST /FI "IMAGENAME eq %s', + "_kill_pid": "TASKKILL /F /IM %s /T", + "_rm_file": 'RD /S /Q "{}"', + }, + } + tar_map = {".tar": "-xvf", ".tar.gz": "-xzf", ".tar.bz2": "-xjf", ".tar.Z": "-xZf"} + download_cmds = { + GIT_DOWNLOAD: "rm -rf {0} && git clone {1} {0}", + CURL_DOWNLOAD: "curl -o {0} {1}", + } + unpack_cmds = {TAR_UNPACK: "_tar_unpack_file"} def __init__(self, os_type, vm, name): """ @@ -106,15 +110,20 @@ def __wait_procs_done(self, session, timeout=1800): :param timeout: timeout for waiting :type timeout: float """ - proc_name = self.name if self.os_type == 'linux' else ( - self.name.upper() + '.EXE') - LOG_JOB.info('Checking the running %s processes.', self.name) + proc_name = ( + self.name if self.os_type == "linux" else (self.name.upper() + ".EXE") + ) + LOG_JOB.info("Checking the running %s processes.", self.name) if not utils_misc.wait_for( - lambda: not re.search( - proc_name.lower(), session.cmd_output( - self._list_pid % proc_name), re.I | re.M), timeout, step=3.0): - raise TestError( - 'Not all %s processes done in %s sec.' % (proc_name, timeout)) + lambda: not re.search( + proc_name.lower(), + session.cmd_output(self._list_pid % proc_name), + re.I | re.M, + ), + timeout, + step=3.0, + ): + raise TestError(f"Not all {proc_name} processes done in {timeout} sec.") def __kill_procs(self, session): """ @@ -123,7 +132,7 @@ def __kill_procs(self, session): :param session: vm session :type session: aexpect.client.ShellSession """ - LOG_JOB.info('Killing all %s processes by force.', self.name) + LOG_JOB.info("Killing all %s processes by force.", self.name) session.cmd_output(self._kill_pid % self.name, timeout=120) def __remove_env_files(self, session, timeout=300): @@ -136,9 +145,9 @@ def __remove_env_files(self, session, timeout=300): :param timeout: timeout for removing :type timeout: float """ - LOG_JOB.info('Removing the environment files.') + LOG_JOB.info("Removing the environment files.") cmds = (self._rm_file.format(f) for f in self.env_files) - session.cmd(' && '.join(cmds), timeout=timeout) + session.cmd(" && ".join(cmds), timeout=timeout) def download_benchmark(self, mode, url, dst, timeout=300): """ @@ -155,18 +164,21 @@ def download_benchmark(self, mode, url, dst, timeout=300): self.session.cmd(self.download_cmds[mode].format(dst, url), timeout) self.env_files.append(dst) - def scp_benckmark(self, username, password, host_path, guest_path, port='22'): + def scp_benckmark(self, username, password, host_path, guest_path, port="22"): """ Scp a benchmark tool from the local host to the guest. """ - scp_to_remote(self.vm.get_address(), port, username, password, host_path, guest_path) + scp_to_remote( + self.vm.get_address(), port, username, password, host_path, guest_path + ) self.env_files.append(guest_path) def _tar_unpack_file(self, src, dst, timeout=300): """Unpack file by tar.""" - cmd = 'mkdir -p {0} && tar {1} {2} -C {0}'.format( - dst, self.tar_map[re.search(r'\.tar\.?(\w+)?$', src).group()], src) + cmd = "mkdir -p {0} && tar {1} {2} -C {0}".format( + dst, self.tar_map[re.search(r"\.tar\.?(\w+)?$", src).group()], src + ) self.session.cmd(cmd, timeout=timeout) def unpack_file(self, mode, src, dst, timeout=300): @@ -190,29 +202,31 @@ def _install_linux(self, src, dst, timeout): Install a package from source file to destination directory in linux. """ self.session.cmd( - "cd %s && ./configure --prefix=%s && make && make install" % ( - src, dst), timeout=timeout) + f"cd {src} && ./configure --prefix={dst} && make && make install", + timeout=timeout, + ) def _install_win(self, src, dst, timeout): """ Install a package from source file to destination directory in windows. """ + def _find_exe_file(): """ Find the path of the given executable file in windows. """ - cmd_dir = r'DIR /S /B "%s" | find "%s.exe"' % (dst, self.name) + cmd_dir = rf'DIR /S /B "{dst}" | find "{self.name}.exe"' s, o = self.session.cmd_status_output(cmd_dir, timeout=timeout) if not s: - return '"{}"'.format(o.splitlines()[0]) + return f'"{o.splitlines()[0]}"' return None cmd = utils_misc.set_winutils_letter( - self.session, r'msiexec /a "%s" /qn TARGETDIR="%s"' % (src, dst)) + self.session, rf'msiexec /a "{src}" /qn TARGETDIR="{dst}"' + ) self.session.cmd_output(cmd, timeout=timeout) - if not utils_misc.wait_for( - lambda: _find_exe_file(), timeout, step=3.0): - raise TestError('Failed to install fio under %.2f.' % timeout) + if not utils_misc.wait_for(lambda: _find_exe_file(), timeout, step=3.0): + raise TestError(f"Failed to install fio under {timeout:.2f}.") def install(self, src, dst, timeout=300): """ @@ -225,7 +239,7 @@ def install(self, src, dst, timeout=300): :param timeout: timeout for installing :type timeout: float """ - install_map = {'linux': '_install_linux', 'windows': '_install_win'} + install_map = {"linux": "_install_linux", "windows": "_install_win"} getattr(self, install_map[self.os_type])(src, dst, timeout) self.env_files.append(dst) @@ -266,7 +280,8 @@ def clean(self, timeout=1800, force=False): @staticmethod def _clean_env(func): - """ Decorator that clean the env files. """ + """Decorator that clean the env files.""" + @wraps(func) def __clean_env(self, *args, **kwargs): try: @@ -274,58 +289,66 @@ def __clean_env(self, *args, **kwargs): except Exception as e: self.clean() raise TestError(str(e)) + return __clean_env -class IozoneLinuxCfg(object): +class IozoneLinuxCfg: def __init__(self, params, session): - iozone_pkg = params.get("iozone_pkg", 'iozone3_490.tar.bz2') - host_path = os.path.join(data_dir.get_deps_dir(), 'iozone', iozone_pkg) - self.download_path = os.path.join('/home', iozone_pkg) - self.iozone_inst = os.path.join('/home', 'iozone_inst') - if 'ppc64' in machine(): - self.arch = 'linux-powerpc64' - elif 'aarch64' in machine(): - self.arch = 'linux-arm' - elif 's390' in machine(): - self.arch = 'linux-S390X' + iozone_pkg = params.get("iozone_pkg", "iozone3_490.tar.bz2") + host_path = os.path.join(data_dir.get_deps_dir(), "iozone", iozone_pkg) + self.download_path = os.path.join("/home", iozone_pkg) + self.iozone_inst = os.path.join("/home", "iozone_inst") + if "ppc64" in machine(): + self.arch = "linux-powerpc64" + elif "aarch64" in machine(): + self.arch = "linux-arm" + elif "s390" in machine(): + self.arch = "linux-S390X" else: - self.arch = 'linux-AMD64' - self.cmd = 'cd %s/src/current && make clean && make %s' % (self.iozone_inst, - self.arch) - self.iozone_path = '%s/src/current/iozone' % self.iozone_inst - scp_benckmark = attrgetter('scp_benckmark') - unpack_file = attrgetter('unpack_file') - session_cmd = attrgetter('session.cmd') - self.setups = {scp_benckmark: (params.get('username'), params.get('password'), - host_path, self.download_path), - unpack_file: (TAR_UNPACK, self.download_path, self.iozone_inst), - session_cmd: (self.cmd, 300)} + self.arch = "linux-AMD64" + self.cmd = ( + f"cd {self.iozone_inst}/src/current && make clean && make {self.arch}" + ) + self.iozone_path = f"{self.iozone_inst}/src/current/iozone" + scp_benckmark = attrgetter("scp_benckmark") + unpack_file = attrgetter("unpack_file") + session_cmd = attrgetter("session.cmd") + self.setups = { + scp_benckmark: ( + params.get("username"), + params.get("password"), + host_path, + self.download_path, + ), + unpack_file: (TAR_UNPACK, self.download_path, self.iozone_inst), + session_cmd: (self.cmd, 300), + } self.setup_orders = (scp_benckmark, unpack_file, session_cmd) -class IozoneWinCfg(object): +class IozoneWinCfg: def __init__(self, params, session): - label = params.get('win_utils_label', 'WIN_UTILS') + label = params.get("win_utils_label", "WIN_UTILS") drive_letter = utils_misc.get_winutils_vol(session, label) - self.cmd = 'set nodosfilewarning=1 && set CYGWIN=nodosfilewarning' - self.iozone_path = drive_letter + r':\Iozone\iozone.exe' - session_cmd = attrgetter('session.cmd') + self.cmd = "set nodosfilewarning=1 && set CYGWIN=nodosfilewarning" + self.iozone_path = drive_letter + r":\Iozone\iozone.exe" + session_cmd = attrgetter("session.cmd") self.setups = {session_cmd: (self.cmd, 300)} - self.setup_orders = (session_cmd, ) + self.setup_orders = (session_cmd,) class Iozone(StorageBenchmark): @StorageBenchmark._clean_env def __init__(self, params, vm): - self.os_type = params['os_type'] - super(Iozone, self).__init__(self.os_type, vm, 'iozone') - self.cfg_map = {'linux': IozoneLinuxCfg, 'windows': IozoneWinCfg} + self.os_type = params["os_type"] + super().__init__(self.os_type, vm, "iozone") + self.cfg_map = {"linux": IozoneLinuxCfg, "windows": IozoneWinCfg} self.cfg = self.cfg_map[self.os_type](params, self.session) for method in self.cfg.setup_orders: method(self)(*self.cfg.setups[method]) - def run(self, cmd_options='-a', timeout=1800): + def run(self, cmd_options="-a", timeout=1800): """ Run iozone test inside guest. @@ -333,62 +356,71 @@ def run(self, cmd_options='-a', timeout=1800): 1G -M -f /home/test :type cmd_options: str """ - cmd = ' '.join((self.cfg.iozone_path, cmd_options)) - return super(Iozone, self).run(cmd, timeout) + cmd = " ".join((self.cfg.iozone_path, cmd_options)) + return super().run(cmd, timeout) -class FioLinuxCfg(object): +class FioLinuxCfg: def __init__(self, params, session): - #fio_resource accept 'distro' or one specified fio package. + # fio_resource accept 'distro' or one specified fio package. #'distro' means use the fio binary provides by os, and the specified - #package means use the specified package in deps. - fio_resource = params.get("fio_resource", 'fio-3.13-48-ga819.tar.bz2') - if fio_resource == 'distro': - status, output = session.cmd_status_output('which fio') + # package means use the specified package in deps. + fio_resource = params.get("fio_resource", "fio-3.13-48-ga819.tar.bz2") + if fio_resource == "distro": + status, output = session.cmd_status_output("which fio") if status == 0: self.fio_path = output.strip() self.setup_orders = () else: - raise TestError('No available fio in the distro') + raise TestError("No available fio in the distro") else: - host_path = os.path.join(data_dir.get_deps_dir(), 'fio', - fio_resource) - self.download_path = os.path.join('/home', fio_resource) - self.fio_inst = os.path.join('/home', 'fio_inst') - self.fio_path = '%s/bin/fio' % self.fio_inst - scp_benckmark = attrgetter('scp_benckmark') - unpack_file = attrgetter('unpack_file') - install_timeout = params.get_numeric('fio_install_timeout', 300) - install = attrgetter('install') - self.setups = {scp_benckmark: (params.get('username'), params.get('password'), - host_path, self.download_path), - unpack_file: (TAR_UNPACK, self.download_path, self.fio_inst), - install: (self.fio_inst, self.fio_inst, install_timeout)} + host_path = os.path.join(data_dir.get_deps_dir(), "fio", fio_resource) + self.download_path = os.path.join("/home", fio_resource) + self.fio_inst = os.path.join("/home", "fio_inst") + self.fio_path = f"{self.fio_inst}/bin/fio" + scp_benckmark = attrgetter("scp_benckmark") + unpack_file = attrgetter("unpack_file") + install_timeout = params.get_numeric("fio_install_timeout", 300) + install = attrgetter("install") + self.setups = { + scp_benckmark: ( + params.get("username"), + params.get("password"), + host_path, + self.download_path, + ), + unpack_file: (TAR_UNPACK, self.download_path, self.fio_inst), + install: (self.fio_inst, self.fio_inst, install_timeout), + } self.setup_orders = (scp_benckmark, unpack_file, install) -class FioWinCfg(object): +class FioWinCfg: def __init__(self, params, session): - label = params.get('win_utils_label', 'WIN_UTILS') + label = params.get("win_utils_label", "WIN_UTILS") utils_letter = utils_misc.get_winutils_vol(session, label) - arch = params.get('vm_arch_name', 'x84_64') - fio_ver = params.get('fio_ver', 'fio-latest') - self.fio_inst = {'x86_64': r'C:\Program Files (x86)\fio', - 'i686': r'C:\Program Files\fio'} - self.fio_msi = {'x86_64': r'%s:\%s-x64.msi' % (utils_letter, fio_ver), - 'i686': r'%s:\%s-x86.msi' % (utils_letter, fio_ver)} - self.fio_path = r'"%s\fio\fio.exe"' % self.fio_inst[arch] - install = attrgetter('install') + arch = params.get("vm_arch_name", "x84_64") + fio_ver = params.get("fio_ver", "fio-latest") + self.fio_inst = { + "x86_64": r"C:\Program Files (x86)\fio", + "i686": r"C:\Program Files\fio", + } + self.fio_msi = { + "x86_64": rf"{utils_letter}:\{fio_ver}-x64.msi", + "i686": rf"{utils_letter}:\{fio_ver}-x86.msi", + } + self.fio_path = rf'"{self.fio_inst[arch]}\fio\fio.exe"' + install = attrgetter("install") self.setups = {install: (self.fio_msi[arch], self.fio_inst[arch], 300)} - self.setup_orders = (install, ) + self.setup_orders = (install,) class Fio(StorageBenchmark): @StorageBenchmark._clean_env def __init__(self, params, vm): - self.os_type = params['os_type'] - super(Fio, self).__init__(self.os_type, vm, 'fio') - self.cfg_map = {'linux': FioLinuxCfg, 'windows': FioWinCfg} + self.os_type = params["os_type"] + super().__init__(self.os_type, vm, "fio") + self.cfg_map = {"linux": FioLinuxCfg, "windows": FioWinCfg} self.cfg = self.cfg_map[self.os_type](params, self.session) for method in self.cfg.setup_orders: method(self)(*self.cfg.setups[method]) @@ -402,8 +434,8 @@ def run(self, cmd_options, timeout=1800): --name=test :type cmd_options: str """ - cmd = ' '.join((self.cfg.fio_path, cmd_options)) - return super(Fio, self).run(cmd, timeout) + cmd = " ".join((self.cfg.fio_path, cmd_options)) + return super().run(cmd, timeout) def generate_instance(params, vm, name): @@ -417,4 +449,4 @@ def generate_instance(params, vm, name): :return: instance with the given name class :rtype: StorageBenchmark object """ - return {'fio': Fio, 'iozone': Iozone}[name](params, vm) + return {"fio": Fio, "iozone": Iozone}[name](params, vm) diff --git a/provider/thp_fragment_tool.py b/provider/thp_fragment_tool.py index 5b92edf560..b0a8ed3dec 100644 --- a/provider/thp_fragment_tool.py +++ b/provider/thp_fragment_tool.py @@ -3,21 +3,20 @@ This module is meant to copy, build and execute the thp_fragment tool. """ + import os import shutil from avocado.utils import process - from virttest import data_dir - dst_dir = "/var/tmp" test_bin = "/var/tmp/thp_fragment" source_file = "thp_fragment.c" def clean(): - process.system("rm -rf %s %s/%s" % (test_bin, dst_dir, source_file)) + process.system(f"rm -rf {test_bin} {dst_dir}/{source_file}") def copy_tool(): @@ -26,10 +25,10 @@ def copy_tool(): def build_tool(test): - build_cmd = "cd %s; gcc -lrt %s -o %s" % (dst_dir, source_file, test_bin) - test.log.info("Build binary file '%s'" % test_bin) + build_cmd = f"cd {dst_dir}; gcc -lrt {source_file} -o {test_bin}" + test.log.info("Build binary file '%s'", test_bin) if process.system(build_cmd, ignore_status=True, shell=True) != 0: - test.fail("Failed building the the tool binary: %s" % test_bin) + test.fail(f"Failed building the the tool binary: {test_bin}") def get_tool_output(): diff --git a/provider/throttle_utils.py b/provider/throttle_utils.py index b4e2b9f154..56cbc5c75f 100644 --- a/provider/throttle_utils.py +++ b/provider/throttle_utils.py @@ -1,6 +1,7 @@ """ Module for IO throttling relevant interfaces. """ + import copy import json import logging @@ -12,23 +13,21 @@ from multiprocessing.pool import ThreadPool from time import sleep -from virttest.utils_version import VersionInterval - -from virttest.utils_misc import get_linux_drive_path - -from virttest.qemu_monitor import QMPCmdError - from virttest.qemu_devices.qdevices import QThrottleGroup +from virttest.qemu_monitor import QMPCmdError +from virttest.utils_misc import get_linux_drive_path +from virttest.utils_version import VersionInterval -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class ThrottleError(Exception): - """ General Throttle error""" + """General Throttle error""" + pass -class ThrottleGroupManager(object): +class ThrottleGroupManager: """ General operations for Throttle group. """ @@ -140,11 +139,16 @@ def change_throttle_group(self, image, group_id): throttle_blockdev = self._vm.devices.get_by_qid(node_name)[0] old_throttle_group = self._vm.devices.get_by_qid( - throttle_blockdev.get_param("throttle-group"))[0] + throttle_blockdev.get_param("throttle-group") + )[0] new_throttle_group = self._vm.devices.get_by_qid(group_id)[0] file = throttle_blockdev.get_param("file") - args = {"driver": "throttle", "node-name": node_name, "file": file, - "throttle-group": group_id} + args = { + "driver": "throttle", + "node-name": node_name, + "file": file, + "throttle-group": group_id, + } if self._vm.devices.qemu_version in VersionInterval("[6.1.0, )"): self._monitor.blockdev_reopen({"options": [args]}) else: @@ -153,8 +157,7 @@ def change_throttle_group(self, image, group_id): for bus in old_throttle_group.child_bus: bus.remove(throttle_blockdev) - throttle_blockdev.parent_bus = ( - {"busid": group_id}, {"type": "ThrottleGroup"}) + throttle_blockdev.parent_bus = ({"busid": group_id}, {"type": "ThrottleGroup"}) throttle_blockdev.set_param("throttle-group", group_id) for bus in new_throttle_group.child_bus: @@ -171,8 +174,7 @@ def _online_disk_windows(session, index, timeout=360): :return: The output of cmd """ - disk = "disk_" + ''.join( - random.sample(string.ascii_letters + string.digits, 4)) + disk = "disk_" + "".join(random.sample(string.ascii_letters + string.digits, 4)) online_cmd = "echo select disk %s > " + disk online_cmd += " && echo online disk noerr >> " + disk online_cmd += " && echo clean >> " + disk @@ -194,7 +196,7 @@ def _get_drive_path(session, params, image): """ image_params = params.object_params(image) - os_type = params['os_type'] + os_type = params["os_type"] extra_params = image_params["blk_extra_params"] serial = re.search(r"(serial|wwn)=(\w+)", extra_params, re.M).group(2) if os_type == "windows": @@ -209,7 +211,7 @@ def _get_drive_path(session, params, image): return get_linux_drive_path(session, serial) -class ThrottleTester(object): +class ThrottleTester: """ FIO test for in throttle group disks, It contains building general fio command and check the result of fio command output. @@ -223,11 +225,18 @@ class ThrottleTester(object): tt.start() """ + # Default data struct of expected result. raw_expected = { - "burst": {"read": 0, "write": 0, "total": 0, "burst_time": 0, - "burst_empty_time": 0}, - "normal": {"read": 0, "write": 0, "total": 0}} + "burst": { + "read": 0, + "write": 0, + "total": 0, + "burst_time": 0, + "burst_empty_time": 0, + }, + "normal": {"read": 0, "write": 0, "total": 0}, + } # Default data struct of raw image data. raw_image_data = {"name": "", "fio_option": "", "output": {}} @@ -253,9 +262,11 @@ def __init__(self, test, params, vm, session, group, images=None): self._fio_option = "" self.images = images.copy() if images else [] self._throttle = { - "images": {image: copy.deepcopy(ThrottleTester.raw_image_data) for - image in images}, - "expected": copy.deepcopy(ThrottleTester.raw_expected)} + "images": { + image: copy.deepcopy(ThrottleTester.raw_image_data) for image in images + }, + "expected": copy.deepcopy(ThrottleTester.raw_expected), + } self._margin = 0.3 @staticmethod @@ -323,7 +334,7 @@ def run_fio(self, *args): image_info = args[0] fio_option = image_info["fio_option"] session = self._vm.wait_for_login() - cmd = ' '.join((self._fio.cfg.fio_path, fio_option)) + cmd = " ".join((self._fio.cfg.fio_path, fio_option)) burst = self._throttle["expected"]["burst"] expected_burst = burst["read"] + burst["write"] + burst["total"] if expected_burst: @@ -361,39 +372,40 @@ def check_output(self, images): LOG_JOB.debug("Check %s in total %d images.", image, num_images) if expected_burst: if num_samples < 2: - self._test.error( - "At lease 2 Data samples:%d" % num_samples) + self._test.error("At lease 2 Data samples:%d" % num_samples) read = output[1]["jobs"][0]["read"]["iops"] write = output[1]["jobs"][0]["write"]["iops"] total = read + write sum_burst += total else: if num_samples < 1: - self._test.error( - "At lease 1 Data samples:%d" % num_samples) + self._test.error("At lease 1 Data samples:%d" % num_samples) read = output[num_samples]["jobs"][0]["read"]["iops"] write = output[num_samples]["jobs"][0]["write"]["iops"] total = read + write sum_normal += total - LOG_JOB.debug("expected_burst:%d %d expected_normal:%d %d", - expected_burst, sum_burst, expected_normal, sum_normal) + LOG_JOB.debug( + "expected_burst:%d %d expected_normal:%d %d", + expected_burst, + sum_burst, + expected_normal, + sum_normal, + ) if expected_burst: real_gap = abs(expected_burst - sum_burst) if real_gap <= expected_burst * self._margin: - LOG_JOB.debug( - "Passed burst %d %d", expected_burst, sum_burst) + LOG_JOB.debug("Passed burst %d %d", expected_burst, sum_burst) else: - self._test.fail( - "Failed burst %d %d", expected_burst, sum_burst) + self._test.fail("Failed burst %d %d", expected_burst, sum_burst) if abs(expected_normal - sum_normal) <= expected_normal * self._margin: - LOG_JOB.debug("Passed normal verification %d %d", - expected_normal, sum_normal) + LOG_JOB.debug( + "Passed normal verification %d %d", expected_normal, sum_normal + ) else: - self._test.fail( - "Failed normal %d %d" % (expected_normal, sum_normal)) + self._test.fail("Failed normal %d %d" % (expected_normal, sum_normal)) return True @@ -472,8 +484,7 @@ def set_throttle_expected(self, expected, reset=False): """ if reset: - self._throttle["expected"] = copy.deepcopy( - ThrottleTester.raw_expected) + self._throttle["expected"] = copy.deepcopy(ThrottleTester.raw_expected) if expected: for k, v in expected.items(): if isinstance(v, dict): @@ -538,8 +549,8 @@ def build_default_option(self): self.set_throttle_expected(None, True) def _count_normal_iops(variables, iops_type): - iops_val = variables["iops_%s" % iops_type] - bps_val = variables["bps_%s" % iops_type] + iops_val = variables[f"iops_{iops_type}"] + bps_val = variables[f"bps_{iops_type}"] normal_iops = 0 if iops_val != 0 or bps_val != 0: bps = int(bps_val / iops_size) @@ -552,11 +563,11 @@ def _count_normal_iops(variables, iops_type): return normal_iops def _count_burst_iops(variables, iops_type): - iops_max = variables["iops_%s_max" % iops_type] - iops_length = variables["iops_%s_max_length" % iops_type] - bps_max = variables["bps_%s_max" % iops_type] - bps_length = variables["bps_%s_max_length" % iops_type] - normal_iops = variables["normal_%s_iops" % iops_type] + iops_max = variables[f"iops_{iops_type}_max"] + iops_length = variables[f"iops_{iops_type}_max_length"] + bps_max = variables[f"bps_{iops_type}_max"] + bps_length = variables[f"bps_{iops_type}_max_length"] + normal_iops = variables[f"normal_{iops_type}_iops"] burst_iops = 0 empty_time = burst_empty_time full_time = burst_time @@ -588,18 +599,26 @@ def _count_burst_iops(variables, iops_type): # count burst property local_vars = locals() burst_write_iops, burst_empty_time, burst_time = _count_burst_iops( - local_vars, "write") + local_vars, "write" + ) burst_read_iops, burst_empty_time, burst_time = _count_burst_iops( - local_vars, "read") + local_vars, "read" + ) burst_total_iops, burst_empty_time, burst_time = _count_burst_iops( - local_vars, "total") + local_vars, "total" + ) runtime = self._params.get("throttle_runtime", 60) if burst_time: runtime = burst_time - self.set_throttle_expected({"burst": { - "burst_time": burst_time, - "burst_empty_time": burst_empty_time}}) + self.set_throttle_expected( + { + "burst": { + "burst_time": burst_time, + "burst_empty_time": burst_empty_time, + } + } + ) if (normal_read_iops and normal_write_iops) or normal_total_iops: mode = "randrw" @@ -625,13 +644,14 @@ def build_image_fio_option(self, image): """ if image not in self._throttle["images"].keys(): - self._throttle["images"].update({image: copy.deepcopy( - ThrottleTester.raw_image_data)}) + self._throttle["images"].update( + {image: copy.deepcopy(ThrottleTester.raw_image_data)} + ) name = _get_drive_path(self._session, self._params, image) image_data = self._throttle["images"][image] image_data["name"] = name - image_data["fio_option"] = self._fio_option + " --filename=%s" % name + image_data["fio_option"] = self._fio_option + f" --filename={name}" return image_data def build_images_fio_option(self): @@ -664,7 +684,7 @@ def detach_image(self, image): self.images.remove(image) -class ThrottleGroupsTester(object): +class ThrottleGroupsTester: """ This class mainly testing multi groups parallel or specified group Example of usage: diff --git a/provider/vdpa_sim_utils.py b/provider/vdpa_sim_utils.py index d6a1e8406e..96158fc5ad 100644 --- a/provider/vdpa_sim_utils.py +++ b/provider/vdpa_sim_utils.py @@ -1,26 +1,27 @@ """ Module for VDPA block/net device interfaces. """ -import logging -import time + import glob +import logging import os +import time from aexpect.utils import wait from avocado.utils import process -from virttest.vdpa_blk import get_image_filename from virttest.utils_kernel_module import KernelModuleHandler +from virttest.vdpa_blk import get_image_filename -LOG = logging.getLogger('avocado.test') +LOG = logging.getLogger("avocado.test") class VDPABlkNetSimulatorError(Exception): - """ General VDPA BLK/Net error""" - pass + """General VDPA BLK/Net error""" + pass -class VDPABlkNetSimulatorTest(object): +class VDPABlkNetSimulatorTest: def __init__(self): self._modules = [] @@ -59,12 +60,12 @@ def remove_dev(self, name): Remove vDPA device :param name: device name """ - cmd = "vdpa dev del %s" % name + cmd = f"vdpa dev del {name}" process.run(cmd, shell=True, ignore_status=True) - cmd = "vdpa dev list -jp %s" % name + cmd = f"vdpa dev list -jp {name}" cmd_result = process.run(cmd, shell=True, ignore_status=True) if cmd_result.exit_status == 0: - raise VDPABlkNetSimulatorError("The vdpa device %s still exist" % name) + raise VDPABlkNetSimulatorError(f"The vdpa device {name} still exist") def setup(self, opts={}): """ @@ -84,8 +85,8 @@ def cleanup(self): class VhostVdpaBlkSimulatorTest(VDPABlkNetSimulatorTest): def __init__(self): - super(VhostVdpaBlkSimulatorTest, self).__init__() - self._modules = ['vhost-vdpa', 'vdpa-sim-blk'] + super().__init__() + self._modules = ["vhost-vdpa", "vdpa-sim-blk"] def add_dev(self, name): """ @@ -94,24 +95,23 @@ def add_dev(self, name): :return : host device name ,eg. /dev/vhost-vdpa-X """ - cmd = "vdpa dev add mgmtdev vdpasim_blk name %s" % name + cmd = f"vdpa dev add mgmtdev vdpasim_blk name {name}" process.run(cmd, shell=True) - cmd = "vdpa dev list -jp %s" % name + cmd = f"vdpa dev list -jp {name}" process.run(cmd, shell=True) time.sleep(2) try: dev = get_image_filename(name).replace("vdpa://", "") except Exception as e: - raise VDPABlkNetSimulatorError( - "vdpa dev add %s failed:%s" % (name, str(e))) + raise VDPABlkNetSimulatorError(f"vdpa dev add {name} failed:{str(e)}") return dev class VirtioVdpaBlkSimulatorTest(VDPABlkNetSimulatorTest): def __init__(self): - super(VirtioVdpaBlkSimulatorTest, self).__init__() - self._modules = ['virtio-vdpa', 'vdpa-sim-blk'] + super().__init__() + self._modules = ["virtio-vdpa", "vdpa-sim-blk"] def add_dev(self, name): """ @@ -123,23 +123,23 @@ def add_dev(self, name): disk_cmd = "lsblk -nd -o name " disks = process.system_output(disk_cmd, shell=True).decode().split() dev_before = set(disks) - cmd = "vdpa dev add mgmtdev vdpasim_blk name %s" % name + cmd = f"vdpa dev add mgmtdev vdpasim_blk name {name}" process.run(cmd, shell=True) - cmd = "vdpa dev list -jp %s" % name + cmd = f"vdpa dev list -jp {name}" process.run(cmd, shell=True) time.sleep(2) disks = process.system_output(disk_cmd, shell=True).decode().split() dev_after = set(disks) host_dev = list(dev_after - dev_before) if not host_dev: - raise VDPABlkNetSimulatorError("vdpa dev add %s failed" % name) + raise VDPABlkNetSimulatorError(f"vdpa dev add {name} failed") return host_dev[0] class VhostVdpaNetSimulatorTest(VDPABlkNetSimulatorTest): def __init__(self): - super(VhostVdpaNetSimulatorTest, self).__init__() - self._modules = ['vhost-vdpa', 'vdpa-sim', 'vdpa-sim-net'] + super().__init__() + self._modules = ["vhost-vdpa", "vdpa-sim", "vdpa-sim-net"] def add_dev(self, name, mac): """ @@ -149,24 +149,23 @@ def add_dev(self, name, mac): :return : host device name ,eg. /dev/vhost-vdpa-X """ - cmd = "vdpa dev add name %s mgmtdev vdpasim_net mac %s" % (name, mac) + cmd = f"vdpa dev add name {name} mgmtdev vdpasim_net mac {mac}" process.run(cmd, shell=True) - cmd = "vdpa dev list -jp %s" % name + cmd = f"vdpa dev list -jp {name}" process.run(cmd, shell=True) time.sleep(2) try: dev = get_image_filename(name).replace("vdpa://", "") except Exception as e: - raise VDPABlkNetSimulatorError( - "vdpa dev add %s failed:%s" % (name, str(e))) + raise VDPABlkNetSimulatorError(f"vdpa dev add {name} failed:{str(e)}") return dev class VirtioVdpaNetSimulatorTest(VDPABlkNetSimulatorTest): def __init__(self): - super(VirtioVdpaNetSimulatorTest, self).__init__() - self._modules = ['vdpa', 'virtio-vdpa', 'vdpa_sim', 'vdpa-sim-net'] + super().__init__() + self._modules = ["vdpa", "virtio-vdpa", "vdpa_sim", "vdpa-sim-net"] def add_dev(self, name, mac): """ @@ -176,13 +175,13 @@ def add_dev(self, name, mac): :return : host device name ,eg. eth0 """ - cmd = "vdpa dev add name %s mgmtdev vdpasim_net mac %s" % (name, mac) + cmd = f"vdpa dev add name {name} mgmtdev vdpasim_net mac {mac}" process.run(cmd, shell=True) - cmd = "vdpa dev list -jp %s" % name + cmd = f"vdpa dev list -jp {name}" process.run(cmd, shell=True) - virtio_dir = "/sys/bus/vdpa/devices/{}/virtio*/net/*".format(name) + virtio_dir = f"/sys/bus/vdpa/devices/{name}/virtio*/net/*" virtio_path = wait.wait_for(lambda: glob.glob(virtio_dir), 2) if virtio_path: return os.path.basename(virtio_path[0]) else: - raise VDPABlkNetSimulatorError("vdpa dev add %s failed:%s" % name) + raise VDPABlkNetSimulatorError("vdpa dev add {} failed:{}".format(*name)) diff --git a/provider/vioinput_basic.py b/provider/vioinput_basic.py index 18216d1b9f..77c1b3e6cd 100644 --- a/provider/vioinput_basic.py +++ b/provider/vioinput_basic.py @@ -2,9 +2,8 @@ import os import time -from virttest import error_context -from virttest import graphical_console -from virttest import data_dir +from virttest import data_dir, error_context, graphical_console + from provider import input_event_proxy @@ -36,8 +35,8 @@ def key_check(key): """ events_queue = listener.events - if '-' in key: - key_lst = [key_check_cfg[k] for k in key.split('-')] + if "-" in key: + key_lst = [key_check_cfg[k] for k in key.split("-")] else: key_lst = [key_check_cfg[key]] key_num = len(key_lst) @@ -48,29 +47,33 @@ def key_check(key): key_event_lst.append((events["keyCode"], events["type"])) if len(key_event_lst) < 2 * key_num: - test.fail("Reveived key events %s were not enough" % key_event_lst) + test.fail(f"Reveived key events {key_event_lst} were not enough") key_down_lst = list() for k, v in key_event_lst[:-key_num]: - if v != 'KEYDOWN': - test.fail("Received key {0} event type {1} was not KEYDOWN").format(k, v) + if v != "KEYDOWN": + test.fail("Received key {0} event type {1} was not KEYDOWN").format( + k, v + ) key_down_lst.append(k) if len(key_down_lst) != key_num or set(key_down_lst) != set(key_lst): - test.fail("Key down event keycode error, received:{0}," - "expect:{1}").format(key_down_lst, key_lst) + test.fail( + "Key down event keycode error, received:{0}," "expect:{1}" + ).format(key_down_lst, key_lst) key_up_lst = list() for k, v in key_event_lst[-key_num:]: - if v != 'KEYUP': + if v != "KEYUP": test.fail("Received key {0} event type {1} was not KEYUP").format(k, v) key_up_lst.append(k) if set(key_up_lst) != set(key_lst): - test.fail("Key up event keycode error, received:{0}," - "expect:{1}").format(key_up_lst, key_lst) + test.fail("Key up event keycode error, received:{0}," "expect:{1}").format( + key_up_lst, key_lst + ) - key_table_file = params.get('key_table_file') + key_table_file = params.get("key_table_file") key_check_cfg = get_keycode_cfg(key_table_file) wait_time = float(params.get("wait_time", 0.2)) @@ -79,10 +82,11 @@ def key_check(key): console = graphical_console.GraphicalConsole(vm) for key in key_check_cfg.keys(): - error_context.context("Send %s key tap to guest" % key, test.log.info) + error_context.context(f"Send {key} key tap to guest", test.log.info) console.key_tap(key) - error_context.context("Check %s key tap event received" - "correct in guest" % key, test.log.info) + error_context.context( + f"Check {key} key tap event received" "correct in guest", test.log.info + ) time.sleep(wait_time) key_check(key) diff --git a/provider/virt_storage/backend/base.py b/provider/virt_storage/backend/base.py index 2d6b7ad584..f9bcdf0927 100644 --- a/provider/virt_storage/backend/base.py +++ b/provider/virt_storage/backend/base.py @@ -1,11 +1,10 @@ import re import uuid -from provider.virt_storage import virt_source -from provider.virt_storage import virt_target +from provider.virt_storage import virt_source, virt_target -class BaseStoragePool(object): +class BaseStoragePool: TYPE = "none" def __init__(self, name): @@ -37,7 +36,8 @@ def pool_define_by_params(cls, name, params): if params.get("source"): source_params = params.object_params(params.get("source")) inst.source = virt_source.PoolSource.source_define_by_params( - params.get("source"), source_params) + params.get("source"), source_params + ) inst.set_special_opts_by_params(params) return inst @@ -99,12 +99,9 @@ def __get_volume_by_attr(self, attr, val): :raise: """ - matched_volumes = list(filter( - lambda x: str( - getattr( - x, - attr)) == str(val), - self.get_volumes())) + matched_volumes = list( + filter(lambda x: str(getattr(x, attr)) == str(val), self.get_volumes()) + ) return matched_volumes[0] if matched_volumes else None def get_volumes(self): @@ -133,4 +130,4 @@ def info(self): return out def __str__(self): - return "%s:%s" % (self.__class__.__name__, self.name) + return f"{self.__class__.__name__}:{self.name}" diff --git a/provider/virt_storage/backend/directory.py b/provider/virt_storage/backend/directory.py index b433b6356e..6b0a810b1a 100644 --- a/provider/virt_storage/backend/directory.py +++ b/provider/virt_storage/backend/directory.py @@ -1,9 +1,8 @@ import os -from provider.virt_storage.helper import fscli - from provider.virt_storage import storage_volume from provider.virt_storage.backend import base +from provider.virt_storage.helper import fscli from provider.virt_storage.utils import storage_util @@ -30,9 +29,7 @@ def delete(self): self.helper.remove() def refresh(self): - files = filter( - lambda x: not self.find_volume_by_path, - self.find_sources()) + files = filter(lambda x: not self.find_volume_by_path, self.find_sources()) return map(self.create_volume_from_local, files) def create_volume_from_local(self, path): @@ -61,7 +58,7 @@ def remove_volume(self, volume): def get_volume_path_by_param(self, params): image_name = params.get("image_name", self.name) image_format = params.get("image_format", "qcow2") - filename = "%s.%s" % (image_name, image_format) + filename = f"{image_name}.{image_format}" return os.path.join(self.target.path, filename) def get_volume_by_params(self, params, name): diff --git a/provider/virt_storage/backend/rbd.py b/provider/virt_storage/backend/rbd.py index af7fe9f7d3..89930cfcee 100644 --- a/provider/virt_storage/backend/rbd.py +++ b/provider/virt_storage/backend/rbd.py @@ -1,11 +1,8 @@ import os -from provider.virt_storage.helper import rbdcli - -from provider.virt_storage import storage_volume -from provider.virt_storage import virt_source -from provider.virt_storage import virt_target +from provider.virt_storage import storage_volume, virt_source, virt_target from provider.virt_storage.backend import base +from provider.virt_storage.helper import rbdcli class RBDPool(base.BaseStoragePool): @@ -14,7 +11,7 @@ class RBDPool(base.BaseStoragePool): def __init__(self, name): self.image = None self.server = None - super(RBDPool, self).__init__(name) + super().__init__(name) @property def helper(self): @@ -32,9 +29,7 @@ def stop(self): pass def refresh(self): - files = filter( - lambda x: not self.find_volume_by_path, - self.find_sources()) + files = filter(lambda x: not self.find_volume_by_path, self.find_sources()) return map(self.create_volume_on_rbd, files) def create_volume_on_rbd(self, path): @@ -55,7 +50,7 @@ def remove_volume(self, volume): def get_volume_path_by_param(self, params): image_name = params.get("image_name", self.name) image_format = params.get("image_format", "qcow2") - filename = "%s.%s" % (image_name, image_format) + filename = f"{image_name}.{image_format}" return os.path.join(self.target.path, filename) def get_volume_by_params(self, params, name): @@ -73,9 +68,10 @@ def get_volume_by_params(self, params, name): def pool_define_by_params(cls, name, params): inst = cls(name) inst.target = virt_target.PoolTarget.target_define_by_params(params) - inst.target.path = params['rbd_pool_name'] + inst.target.path = params["rbd_pool_name"] source_params = params.object_params(name) inst.source = virt_source.PoolSource.source_define_by_params( - name, source_params) + name, source_params + ) inst.set_special_opts_by_params(params) return inst diff --git a/provider/virt_storage/exception.py b/provider/virt_storage/exception.py index e377410a02..9b06acfa56 100644 --- a/provider/virt_storage/exception.py +++ b/provider/virt_storage/exception.py @@ -1,10 +1,8 @@ class UnsupportedStoragePoolException(Exception): - def __init__(self, sp_manager, sp_type): self.sp_manager = sp_manager self.sp_type = sp_type - self.message = "Unsupported StoragePool type '%s', supported type are: %s" % ( - self.sp_type, sp_manager.supported_storage_backend.keys()) + self.message = f"Unsupported StoragePool type '{self.sp_type}', supported type are: {sp_manager.supported_storage_backend.keys()}" def __str__(self): - return "UnsupportedStoragePoolException:%s" % self.message + return f"UnsupportedStoragePoolException:{self.message}" diff --git a/provider/virt_storage/helper/fscli.py b/provider/virt_storage/helper/fscli.py index 658e4a3263..4dfd4e68bc 100644 --- a/provider/virt_storage/helper/fscli.py +++ b/provider/virt_storage/helper/fscli.py @@ -4,8 +4,7 @@ from avocado.utils import process -class FsCli(object): - +class FsCli: def __init__(self, dir_path): self.dir_path = dir_path self._is_export = None @@ -23,7 +22,7 @@ def remove(self): @staticmethod def remove_file(path): - return process.system("rm -f %s" % path, shell=True) + return process.system(f"rm -f {path}", shell=True) def get_path_by_name(self, name): path = os.path.join(self.dir_path, name) @@ -58,10 +57,10 @@ def get_size(path): def path_to_url(self, path): """Get url schema path""" - return "%s%s" % (self._protocol, os.path.realpath(path)) + return f"{self._protocol}{os.path.realpath(path)}" def url_to_path(self, url): - return url[len(self._protocol):] + return url[len(self._protocol) :] @property def is_exists(self): @@ -71,12 +70,12 @@ def is_exists(self): @property def capacity(self): - cmd = "df -k --output=size %s |tail -n1" % self.dir_path + cmd = f"df -k --output=size {self.dir_path} |tail -n1" output = process.system_output(cmd, shell=True) return int(output) * 1024 @property def available(self): - cmd = "df -k --output=avail %s |tail -n1" % self.dir_path + cmd = f"df -k --output=avail {self.dir_path} |tail -n1" output = process.system_output(cmd, shell=True) return int(output) * 1024 diff --git a/provider/virt_storage/helper/rbdcli.py b/provider/virt_storage/helper/rbdcli.py index a3c5f2eef3..2acc3666e4 100644 --- a/provider/virt_storage/helper/rbdcli.py +++ b/provider/virt_storage/helper/rbdcli.py @@ -3,8 +3,7 @@ from avocado.utils import process -class RBDCli(object): - +class RBDCli: def __init__(self, pool_name): self.pool_name = pool_name self._protocol = r"rbd:" @@ -12,7 +11,7 @@ def __init__(self, pool_name): @staticmethod def remove_image(path): - return process.system("rbd rm %s" % path, shell=True) + return process.system(f"rbd rm {path}", shell=True) def get_path_by_name(self, name): path = os.path.join(self.pool_name, name) @@ -24,13 +23,13 @@ def get_url_by_name(self, name): def list_images(self): """List all images""" - cmd = "rbd ls %s" % self.pool_name + cmd = f"rbd ls {self.pool_name}" images = process.system_output(cmd).decode().split() return images def path_to_url(self, path): """Get url schema path""" - return "%s%s" % (self._protocol, path) + return f"{self._protocol}{path}" def url_to_path(self, url): - return url[len(self._protocol):] + return url[len(self._protocol) :] diff --git a/provider/virt_storage/storage_admin.py b/provider/virt_storage/storage_admin.py index 0f89c39ff4..40199fb677 100644 --- a/provider/virt_storage/storage_admin.py +++ b/provider/virt_storage/storage_admin.py @@ -2,14 +2,13 @@ from functools import reduce from . import exception -from .backend import rbd -from .backend import directory +from .backend import directory, rbd from .utils import state -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") -class StoragePoolAdmin(object): +class StoragePoolAdmin: supported_storage_backend = { "directory": directory.DirectoryPool, "rbd": rbd.RBDPool, @@ -49,14 +48,14 @@ def pool_define_by_params(cls, name, params): def pools_define_by_params(cls, params): lst_names = params.objects("storage_pools") lst_params = map(params.object_params, lst_names) - return map(lambda x: cls.pool_define_by_params( - *x), zip(lst_names, lst_params)) + return map(lambda x: cls.pool_define_by_params(*x), zip(lst_names, lst_params)) @classmethod def list_volumes(cls): """List all volumes in host""" - out = reduce(lambda x, y: x.union( - y), [p.get_volumes() for p in sp_admin.list_pools()]) + out = reduce( + lambda x, y: x.union(y), [p.get_volumes() for p in sp_admin.list_pools()] + ) return list(out) @classmethod @@ -77,10 +76,7 @@ def find_pool_by_volume(volume): @classmethod def find_pool_by_path(cls, path): try: - pools = list( - filter( - lambda x: x.target.path == path, - cls.list_pools())) + pools = list(filter(lambda x: x.target.path == path, cls.list_pools())) return pools[0] except IndexError: LOG_JOB.warning("no storage pool with matching path '%s'", path) @@ -109,8 +105,9 @@ def release_volume(cls, volume): @classmethod def volumes_define_by_params(cls, params): - return map(lambda x: cls.volume_define_by_params( - x, params), params.objects("images")) + return map( + lambda x: cls.volume_define_by_params(x, params), params.objects("images") + ) @classmethod def volume_define_by_params(cls, volume_name, test_params): @@ -130,8 +127,7 @@ def _volume_define_by_params(name, params): if backing_name: backing_store = cls.get_volume_by_name(backing_name) if not backing_store: - backing_store = _volume_define_by_params( - backing_name, params) + backing_store = _volume_define_by_params(backing_name, params) volume.backing = backing_store volume.refresh_with_params(volume_params) return volume diff --git a/provider/virt_storage/storage_volume.py b/provider/virt_storage/storage_volume.py index a978c039eb..2ebc5d3a48 100644 --- a/provider/virt_storage/storage_volume.py +++ b/provider/virt_storage/storage_volume.py @@ -1,14 +1,12 @@ -from virttest import utils_misc -from virttest import utils_qemu -from virttest import utils_version +from virttest import utils_misc, utils_qemu, utils_version from virttest.qemu_devices import qdevices from provider import backup_utils -from . import virt_encryption +from . import virt_encryption -class StorageVolume(object): +class StorageVolume: def __init__(self, pool): self.name = None self.pool = pool @@ -103,9 +101,7 @@ def capacity(self): @capacity.setter def capacity(self, size): - self._capacity = float( - utils_misc.normalize_data_size( - str(size), 'B', '1024')) + self._capacity = float(utils_misc.normalize_data_size(str(size), "B", "1024")) @property def auth(self): @@ -127,7 +123,7 @@ def refresh_with_params(self, params): qemu_binary = utils_misc.get_qemu_binary(params) qemu_version = utils_qemu.get_qemu_version(qemu_binary)[0] if qemu_version in utils_version.VersionInterval( - backup_utils.BACKING_MASK_PROTOCOL_VERSION_SCOPE + backup_utils.BACKING_MASK_PROTOCOL_VERSION_SCOPE ): self.raw_format_node_eliminated = True @@ -153,21 +149,20 @@ def refresh_format_by_params(self, params): if self.format.TYPE == "qcow2": encrypt = params.get("image_encryption") if encrypt and encrypt != "off": - self.encrypt = virt_encryption.VolumeEncryption.encryption_define_by_params( - params) - self.format.set_param( - "encrypt.key-secret", - self.encrypt.secret.name) + self.encrypt = ( + virt_encryption.VolumeEncryption.encryption_define_by_params(params) + ) + self.format.set_param("encrypt.key-secret", self.encrypt.secret.name) self.format.set_param("encrypt.format", self.encrypt.format) backing = params.get("backing") if backing: - backing_node = "drive_%s" % backing + backing_node = f"drive_{backing}" self.format.set_param("backing", backing_node) data_file_name = params.get("image_data_file") if data_file_name: - data_file_node = "drive_%s" % data_file_name + data_file_node = f"drive_{data_file_name}" self.format.set_param("data-file", data_file_node) self.format.set_param("file", self.protocol.get_param("node-name")) @@ -205,7 +200,9 @@ def info(self): out["path"] = self.path out["key"] = self.key # __hash__ uses it when adding a volume object - out["format"] = self._params.get("image_format", "qcow2") if self._params else None + out["format"] = ( + self._params.get("image_format", "qcow2") if self._params else None + ) out["auth"] = str(self.auth) out["capacity"] = self.capacity out["preallocation"] = self.preallocation @@ -219,13 +216,12 @@ def generate_qemu_img_options(self): if fmt == "qcow2": backing_store = self.backing if backing_store: - options += " -b %s" % backing_store.key + options += f" -b {backing_store.key}" encrypt = self.format.get_param("encrypt") if encrypt: secret = encrypt.secret - options += " -%s " % secret.as_qobject().cmdline() - options += " -o encrypt.format=%s,encrypt.key-secret=%s" % ( - encrypt.format, secret.name) + options += f" -{secret.as_qobject().cmdline()} " + options += f" -o encrypt.format={encrypt.format},encrypt.key-secret={secret.name}" return options def hotplug(self, vm): @@ -260,17 +256,16 @@ def create_protocol_by_qmp(self, vm, timeout=120): options["size"] = self.capacity else: raise NotImplementedError - arguments = { - "options": options, - "job-id": node_name, - "timeout": timeout} + arguments = {"options": options, "job-id": node_name, "timeout": timeout} return backup_utils.blockdev_create(vm, **arguments) def format_protocol_by_qmp(self, vm, timeout=120): node_name = self.format.get_param("node-name") - options = {"driver": self.format.TYPE, - "file": self.protocol.get_param("node-name"), - "size": self.capacity} + options = { + "driver": self.format.TYPE, + "file": self.protocol.get_param("node-name"), + "size": self.capacity, + } if self.format.TYPE == "qcow2": if self.backing: options["backing-fmt"] = self.backing.format.TYPE @@ -284,22 +279,17 @@ def format_protocol_by_qmp(self, vm, timeout=120): if encrypt_format: options["encrypt"]["format"] = encrypt_format if self._params and self._params.get("image_cluster_size"): - options["cluster-size"] = int( - self._params["image_cluster_size"]) + options["cluster-size"] = int(self._params["image_cluster_size"]) if self._params.get("image_data_file"): options["data-file"] = self.format.get_param("data-file") data_file_raw_set = self._params.get("image_data_file_raw") data_file_raw = data_file_raw_set in ("on", "yes", "true") options["data-file-raw"] = data_file_raw - arguments = { - "options": options, - "job-id": node_name, - "timeout": timeout} + arguments = {"options": options, "job-id": node_name, "timeout": timeout} backup_utils.blockdev_create(vm, **arguments) def __str__(self): - return "%s-%s(%s)" % (self.__class__.__name__, - self.name, str(self.key)) + return f"{self.__class__.__name__}-{self.name}({str(self.key)})" def __eq__(self, vol): if not isinstance(vol, StorageVolume): @@ -311,7 +301,7 @@ def __hash__(self): return hash(str(self.info())) def __repr__(self): - return "'%s'" % self.name + return f"'{self.name}'" def as_json(self): if not self.raw_format_node_eliminated: @@ -319,4 +309,4 @@ def as_json(self): else: _, options = self.protocol.hotplug_qmp() - return "json: %s" % options + return f"json: {options}" diff --git a/provider/virt_storage/utils/state.py b/provider/virt_storage/utils/state.py index b7c3989fae..2f8a6a76ea 100644 --- a/provider/virt_storage/utils/state.py +++ b/provider/virt_storage/utils/state.py @@ -2,24 +2,23 @@ def register_pool_state_machine(instance): - states = ['dead', 'ready', 'running'] + states = ["dead", "ready", "running"] transitions = [ - {'trigger': 'start_pool', - 'source': ['dead', 'ready'], - 'dest': 'running', - 'after': 'start'}, - {'trigger': 'stop_pool', - 'source': 'running', - 'dest': 'ready', - 'after': 'stop'}, - {'trigger': 'destroy_pool', - 'source': ['stop', 'ready'], - 'dest': 'dead', - 'after': 'destroy'} + { + "trigger": "start_pool", + "source": ["dead", "ready"], + "dest": "running", + "after": "start", + }, + {"trigger": "stop_pool", "source": "running", "dest": "ready", "after": "stop"}, + { + "trigger": "destroy_pool", + "source": ["stop", "ready"], + "dest": "dead", + "after": "destroy", + }, ] machine = Machine( - model=instance, - states=states, - transitions=transitions, - initial="dead") + model=instance, states=states, transitions=transitions, initial="dead" + ) return machine diff --git a/provider/virt_storage/utils/storage_util.py b/provider/virt_storage/utils/storage_util.py index 8c9dfaf719..3a3fa556d1 100644 --- a/provider/virt_storage/utils/storage_util.py +++ b/provider/virt_storage/utils/storage_util.py @@ -3,22 +3,22 @@ from avocado.core import exceptions from avocado.utils import process -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def create_volume(volume): if volume.preallocation == "full": if volume.pool.available < volume.capacity: raise exceptions.TestError( - "No enough free space, request '%s' but available in %s is '%s'" % ( - volume.capacity, volume.pool.name, volume.pool.available)) + f"No enough free space, request '{volume.capacity}' but available in {volume.pool.name} is '{volume.pool.available}'" + ) else: if volume.format == "qcow2": if volume.pool.available * 1.2 < volume.capacity: raise exceptions.TestError( - "No enough free space, request '%s' but available in %s is '%s'" % ( - volume.capacity, volume.pool.name, volume.pool.available)) + f"No enough free space, request '{volume.capacity}' but available in {volume.pool.name} is '{volume.pool.available}'" + ) options = volume.generate_qemu_img_options() - cmd = "qemu-img create %s %s %sB" % (options, volume.key, volume.capacity) + cmd = f"qemu-img create {options} {volume.key} {volume.capacity}B" LOG_JOB.debug("create volume cmd: %s", cmd) process.system(cmd, shell=True, ignore_status=False) diff --git a/provider/virt_storage/virt_auth.py b/provider/virt_storage/virt_auth.py index 1d087dff41..6f8f3bea2c 100644 --- a/provider/virt_storage/virt_auth.py +++ b/provider/virt_storage/virt_auth.py @@ -1,12 +1,10 @@ -import os - import configparser +import os from .virt_secret import secret_admin -class StorageAuthation(object): - +class StorageAuthation: def __init__(self, _type=None, username=None, password=None, secret=None): self.type = _type self.username = username @@ -40,6 +38,7 @@ def auth_define_by_params(cls, params): if not secret: secret_params = params.object_params(secret_name) secret = secret_admin.secret_define_by_params( - secret_name, secret_params) + secret_name, secret_params + ) instance.secret = secret return instance diff --git a/provider/virt_storage/virt_device.py b/provider/virt_storage/virt_device.py index 235e7a000c..d7c63429be 100644 --- a/provider/virt_storage/virt_device.py +++ b/provider/virt_storage/virt_device.py @@ -1,5 +1,4 @@ -class HostAdapter(object): - +class HostAdapter: def __init__(self): self.name = None self.type = None @@ -24,8 +23,7 @@ def adapter_define_by_params(cls, params): return inst -class StorageDevice(object): - +class StorageDevice: def __init__(self, path): self.path = path @@ -34,8 +32,7 @@ def device_define_by_params(cls, params): return cls(params.get("device_path")) -class StorageHost(object): - +class StorageHost: def __init__(self, hostname, port=None): self.hostname = hostname self.port = port diff --git a/provider/virt_storage/virt_encryption.py b/provider/virt_storage/virt_encryption.py index 5e89ebce5e..21d1d913c4 100644 --- a/provider/virt_storage/virt_encryption.py +++ b/provider/virt_storage/virt_encryption.py @@ -1,9 +1,9 @@ import json -from .virt_secret import secret_admin +from .virt_secret import secret_admin -class VolumeEncryption(object): +class VolumeEncryption: def __init__(self, encrypt_format=None, secret=None): self.format = encrypt_format self.secret = secret @@ -19,7 +19,7 @@ def __repr__(self): return "'%s'" % {"format": self.format, "key-secret": self.secret.name} def __str__(self): - return "%s: %s" % (self.__class__.__name__, self.format) + return f"{self.__class__.__name__}: {self.format}" @classmethod def encryption_define_by_params(cls, params): @@ -33,7 +33,6 @@ def encryption_define_by_params(cls, params): secret = secret_admin.find_secret_by_name(secret_name) if not secret: secret_params = params.object_params(secret_name) - secret = secret_admin.secret_define_by_params( - secret_name, secret_params) + secret = secret_admin.secret_define_by_params(secret_name, secret_params) instance.secret = secret return instance diff --git a/provider/virt_storage/virt_secret.py b/provider/virt_storage/virt_secret.py index 61c0c9e510..ab89774e44 100644 --- a/provider/virt_storage/virt_secret.py +++ b/provider/virt_storage/virt_secret.py @@ -6,8 +6,7 @@ from virttest.qemu_devices import qdevices -class StorageSecret(object): - +class StorageSecret: def __init__(self, name, data, stype=None): self.name = name self._data = data @@ -48,7 +47,7 @@ def as_qobject(self): return dev -class StorageSecretAdmin(object): +class StorageSecretAdmin: __secrets = list() @classmethod diff --git a/provider/virt_storage/virt_source.py b/provider/virt_storage/virt_source.py index 1a5ddcbcf3..3f62c4314c 100644 --- a/provider/virt_storage/virt_source.py +++ b/provider/virt_storage/virt_source.py @@ -1,11 +1,8 @@ from .virt_auth import StorageAuthation -from .virt_device import HostAdapter -from .virt_device import StorageDevice -from .virt_device import StorageHost +from .virt_device import HostAdapter, StorageDevice, StorageHost -class PoolSource(object): - +class PoolSource: def __init__(self): self.name = None self.pool_name = None @@ -57,4 +54,4 @@ def source_define_by_params(cls, name, params): return instance def __str__(self): - return "%s: %s" % (self.__class__.__name__, self.name) + return f"{self.__class__.__name__}: {self.name}" diff --git a/provider/virt_storage/virt_target.py b/provider/virt_storage/virt_target.py index a283b94cbd..73045028a6 100644 --- a/provider/virt_storage/virt_target.py +++ b/provider/virt_storage/virt_target.py @@ -1,5 +1,4 @@ -class PoolTarget(object): - +class PoolTarget: def __init__(self): self.path = None self.format = None @@ -12,4 +11,4 @@ def target_define_by_params(cls, params): return instance def __str__(self): - return "%s: %s" % (self.__class__.__name__, self.path) + return f"{self.__class__.__name__}: {self.path}" diff --git a/provider/virtio_fs_utils.py b/provider/virtio_fs_utils.py index f0b2a994c9..2e1efd692f 100644 --- a/provider/virtio_fs_utils.py +++ b/provider/virtio_fs_utils.py @@ -2,14 +2,10 @@ import os from avocado.utils import process - -from virttest import data_dir -from virttest import error_context -from virttest import utils_misc - +from virttest import data_dir, error_context, utils_misc from virttest.utils_windows import virtio_win -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def get_virtiofs_driver_letter(test, fs_target, session): @@ -21,8 +17,10 @@ def get_virtiofs_driver_letter(test, fs_target, session): :param session: the session of guest :return driver_letter: the driver letter of the virtiofs """ - error_context.context("Get driver letter of virtio fs target, " - "the driver label is %s." % fs_target, LOG_JOB.info) + error_context.context( + "Get driver letter of virtio fs target, " f"the driver label is {fs_target}.", + LOG_JOB.info, + ) driver_letter = utils_misc.get_winutils_vol(session, fs_target) if driver_letter is None: test.fail("Could not get virtio-fs mounted driver letter.") @@ -40,57 +38,61 @@ def basic_io_test(test, params, session): :param session: the session from guest """ error_context.context("Running viofs basic io test", LOG_JOB.info) - test_file = params.get('virtio_fs_test_file', "virtio_fs_test_file") + test_file = params.get("virtio_fs_test_file", "virtio_fs_test_file") windows = params.get("os_type", "windows") == "windows" io_timeout = params.get_numeric("fs_io_timeout", 120) fs_source = params.get("fs_source_dir", "virtio_fs_test/") fs_target = params.get("fs_target", "myfs") - base_dir = params.get("fs_source_base_dir", - data_dir.get_data_dir()) + base_dir = params.get("fs_source_base_dir", data_dir.get_data_dir()) if not os.path.isabs(fs_source): fs_source = os.path.join(base_dir, fs_source) host_data = os.path.join(fs_source, test_file) try: if windows: - cmd_dd = params.get("virtio_fs_cmd_dd", - 'dd if=/dev/random of=%s bs=1M count=100') + cmd_dd = params.get( + "virtio_fs_cmd_dd", "dd if=/dev/random of=%s bs=1M count=100" + ) driver_letter = get_virtiofs_driver_letter(test, fs_target, session) - fs_dest = "%s:" % driver_letter + fs_dest = f"{driver_letter}:" else: - cmd_dd = params.get("virtio_fs_cmd_dd", - 'dd if=/dev/urandom of=%s bs=1M ' - 'count=100 iflag=fullblock') + cmd_dd = params.get( + "virtio_fs_cmd_dd", + "dd if=/dev/urandom of=%s bs=1M " "count=100 iflag=fullblock", + ) fs_dest = params.get("fs_dest", "/mnt/" + fs_target) guest_file = os.path.join(fs_dest, test_file) - error_context.context("The guest file in shared dir is %s" % - guest_file, LOG_JOB.info) - error_context.context("Creating file under %s inside guest." % fs_dest, - LOG_JOB.info) + error_context.context( + f"The guest file in shared dir is {guest_file}", LOG_JOB.info + ) + error_context.context( + f"Creating file under {fs_dest} inside guest.", LOG_JOB.info + ) session.cmd(cmd_dd % guest_file, io_timeout) if windows: guest_file_win = guest_file.replace("/", "\\") - cmd_md5 = params.get("cmd_md5", '%s: && md5sum.exe %s') + cmd_md5 = params.get("cmd_md5", "%s: && md5sum.exe %s") cmd_md5_vm = cmd_md5 % (driver_letter, guest_file_win) else: - cmd_md5 = params.get("cmd_md5", 'md5sum %s') + cmd_md5 = params.get("cmd_md5", "md5sum %s") cmd_md5_vm = cmd_md5 % guest_file - md5_guest = session.cmd_output(cmd_md5_vm, - io_timeout).strip().split()[0] - error_context.context("md5 of the guest file: %s" % md5_guest, - LOG_JOB.info) - md5_host = process.run("md5sum %s" % host_data, - io_timeout).stdout_text.strip().split()[0] - error_context.context("md5 of the host file: %s" % md5_host, - LOG_JOB.info) + md5_guest = session.cmd_output(cmd_md5_vm, io_timeout).strip().split()[0] + error_context.context(f"md5 of the guest file: {md5_guest}", LOG_JOB.info) + md5_host = ( + process.run(f"md5sum {host_data}", io_timeout) + .stdout_text.strip() + .split()[0] + ) + error_context.context(f"md5 of the host file: {md5_host}", LOG_JOB.info) if md5_guest != md5_host: - test.fail('The md5 value of host is not same to guest.') + test.fail("The md5 value of host is not same to guest.") else: - error_context.context("The md5 of host is as same as md5 of " - "guest.", LOG_JOB.info) + error_context.context( + "The md5 of host is as same as md5 of " "guest.", LOG_JOB.info + ) finally: if not windows: - session.cmd("rm -rf %s" % guest_file) + session.cmd(f"rm -rf {guest_file}") create_sub_folder_test(params, session, fs_dest, fs_source) @@ -107,26 +109,29 @@ def create_sub_folder_test(params, session, guest_dest, host_dir): os_type = params.get("os_type") folder_name = params.get("sub_folder_name", "virtio_fs_folder_test") try: - error_context.context("Create the sub folder on shared directory " - "of guest: ", LOG_JOB.info) + error_context.context( + "Create the sub folder on shared directory " "of guest: ", LOG_JOB.info + ) if os_type == "linux": - session.cmd("mkdir -p %s" % - (guest_dest + "/" + folder_name + "/a")) + session.cmd("mkdir -p %s" % (guest_dest + "/" + folder_name + "/a")) else: fs_dest = guest_dest.replace("/", "\\") session.cmd("md %s" % (fs_dest + "\\" + folder_name + "\\a")) - error_context.context("Check the sub folder on shared directory " - "of host: ", LOG_JOB.info) + error_context.context( + "Check the sub folder on shared directory " "of host: ", LOG_JOB.info + ) if os.path.exists(host_dir + "/" + folder_name + "/a"): - error_context.context("Find the %s on the host." % - (host_dir + "/" + folder_name + "/a"), - LOG_JOB.info) + error_context.context( + "Find the %s on the host." % (host_dir + "/" + folder_name + "/a"), + LOG_JOB.info, + ) else: LOG_JOB.error("Do NOT find the sub folder on the host.") finally: - error_context.context("Delete the sub folder on shared directory " - "of guest: ", LOG_JOB.info) + error_context.context( + "Delete the sub folder on shared directory " "of guest: ", LOG_JOB.info + ) if os_type == "linux": session.cmd("rm -rf %s" % (guest_dest + "/" + folder_name)) else: @@ -157,85 +162,101 @@ def basic_io_test_via_psexec(test, params, vm, usernm, pwd): :param pwd: the password used to execute the cmd """ if params.get("os_type", "windows") == "windows": - error_context.context("Running viofs basic io test via psexec", - LOG_JOB.info) - cmd_dd_win = params.get("virtio_fs_cmd_dd_win", - "C:\\tools\\dd.exe if=/dev/random of=%s " - "bs=1M count=100") - test_file = params.get('virtio_fs_test_file', "virtio_fs_test_file") + error_context.context("Running viofs basic io test via psexec", LOG_JOB.info) + cmd_dd_win = params.get( + "virtio_fs_cmd_dd_win", + "C:\\tools\\dd.exe if=/dev/random of=%s " "bs=1M count=100", + ) + test_file = params.get("virtio_fs_test_file", "virtio_fs_test_file") io_timeout = params.get_numeric("fs_io_timeout", 120) fs_source = params.get("fs_source_dir", "virtio_fs_test/") fs_target = params.get("fs_target", "myfs") - base_dir = params.get("fs_source_base_dir", - data_dir.get_data_dir()) + base_dir = params.get("fs_source_base_dir", data_dir.get_data_dir()) if not os.path.isabs(fs_source): fs_source = os.path.join(base_dir, fs_source) host_data = os.path.join(fs_source, test_file) session = vm.wait_for_login() driver_letter = get_virtiofs_driver_letter(test, fs_target, session) - fs_dest = "%s:" % driver_letter + fs_dest = f"{driver_letter}:" guest_file = os.path.join(fs_dest, test_file) cmd_io_test = "%systemdrive%\\cmd_io_test.bat" - error_context.context("Creating the test file(cmd_io_test.bat) " - "on guest", LOG_JOB.info) - session.cmd("echo " + cmd_dd_win % guest_file + " > " + cmd_io_test, - io_timeout) + error_context.context( + "Creating the test file(cmd_io_test.bat) " "on guest", LOG_JOB.info + ) + session.cmd("echo " + cmd_dd_win % guest_file + " > " + cmd_io_test, io_timeout) psexec_path = install_psexec(vm) try: - error_context.context("Execute the cmd_io_test.bat on guest", - LOG_JOB.info) + error_context.context("Execute the cmd_io_test.bat on guest", LOG_JOB.info) domain_dns = params.get("domain_dns", "") domain_dns += "\\" if domain_dns else "" - session.cmd(psexec_path + " /accepteula -u " + domain_dns + - usernm + " -p " + pwd + " " + cmd_io_test) + session.cmd( + psexec_path + + " /accepteula -u " + + domain_dns + + usernm + + " -p " + + pwd + + " " + + cmd_io_test + ) guest_file_win = guest_file.replace("/", "\\") - cmd_md5 = params.get("cmd_md5", '%s: && md5sum.exe %s') + cmd_md5 = params.get("cmd_md5", "%s: && md5sum.exe %s") cmd_md5_vm = cmd_md5 % (driver_letter, guest_file_win) - md5_guest = session.cmd_output(cmd_md5_vm, - io_timeout).strip().split()[0] - error_context.context("md5 of the guest file: %s" % md5_guest, - LOG_JOB.info) - md5_host = process.run("md5sum %s" % host_data, - io_timeout).stdout_text.strip().split()[0] - error_context.context("md5 of the host file: %s" % md5_host, - LOG_JOB.info) + md5_guest = session.cmd_output(cmd_md5_vm, io_timeout).strip().split()[0] + error_context.context(f"md5 of the guest file: {md5_guest}", LOG_JOB.info) + md5_host = ( + process.run(f"md5sum {host_data}", io_timeout) + .stdout_text.strip() + .split()[0] + ) + error_context.context(f"md5 of the host file: {md5_host}", LOG_JOB.info) if md5_guest != md5_host: - test.fail('The md5 value of host is not same to guest.') + test.fail("The md5 value of host is not same to guest.") else: - error_context.context("The md5 of host is as same as md5 of " - "guest.", LOG_JOB.info) + error_context.context( + "The md5 of host is as same as md5 of " "guest.", LOG_JOB.info + ) finally: - error_context.context("Delete the test file from host.", - LOG_JOB.info) + error_context.context("Delete the test file from host.", LOG_JOB.info) os.remove(host_data) - error_context.context("Start to test creating/deleting folder...", - LOG_JOB.info) + error_context.context("Start to test creating/deleting folder...", LOG_JOB.info) bat_create_folder_test = "%systemdrive%\\cmd_create_folder_test.bat" folder_name = params.get("sub_folder_name", "virtio_fs_folder_test") cmd_create_folder = "md %s" % (fs_dest + "\\" + folder_name + "\\a") try: - session.cmd("echo " + cmd_create_folder + " > " + - bat_create_folder_test) - error_context.context("Create the sub folder on shared directory " - "of guest: ", LOG_JOB.info) - session.cmd(psexec_path + " /accepteula -u " + domain_dns + - usernm + " -p " + pwd + " " + bat_create_folder_test) - error_context.context("Check the sub folder on shared directory " - "of host: ", LOG_JOB.info) + session.cmd("echo " + cmd_create_folder + " > " + bat_create_folder_test) + error_context.context( + "Create the sub folder on shared directory " "of guest: ", LOG_JOB.info + ) + session.cmd( + psexec_path + + " /accepteula -u " + + domain_dns + + usernm + + " -p " + + pwd + + " " + + bat_create_folder_test + ) + error_context.context( + "Check the sub folder on shared directory " "of host: ", LOG_JOB.info + ) if os.path.exists(fs_source + "/" + folder_name + "/a"): - error_context.context("Find the %s on the host." % - (fs_source + "/" + folder_name + "/a"), - LOG_JOB.info) + error_context.context( + "Find the %s on the host." % (fs_source + "/" + folder_name + "/a"), + LOG_JOB.info, + ) else: LOG_JOB.error("Do NOT find the sub folder on the host.") finally: - error_context.context("Delete the sub folder on shared directory " - "of guest: ", LOG_JOB.info) + error_context.context( + "Delete the sub folder on shared directory " "of guest: ", LOG_JOB.info + ) session.cmd("rmdir /s /q %s" % (fs_dest + "\\" + folder_name)) @@ -250,10 +271,9 @@ def get_viofs_exe_path(test, params, session): error_context.context("Get virtiofs exe full path.", test.log.info) media_type = params["virtio_win_media_type"] try: - get_drive_letter = getattr(virtio_win, "drive_letter_%s" % media_type) - get_product_dirname = getattr(virtio_win, - "product_dirname_%s" % media_type) - get_arch_dirname = getattr(virtio_win, "arch_dirname_%s" % media_type) + get_drive_letter = getattr(virtio_win, f"drive_letter_{media_type}") + get_product_dirname = getattr(virtio_win, f"product_dirname_{media_type}") + get_arch_dirname = getattr(virtio_win, f"arch_dirname_{media_type}") except AttributeError: test.error("Not supported virtio win media type '%s'", media_type) viowin_ltr = get_drive_letter(session) @@ -266,9 +286,9 @@ def get_viofs_exe_path(test, params, session): if not guest_arch: test.error("Could not get architecture dirname of the vm") - exe_middle_path = ("{name}\\{arch}" if media_type == "iso" - else "{arch}\\{name}").format(name=guest_name, - arch=guest_arch) + exe_middle_path = ( + "{name}\\{arch}" if media_type == "iso" else "{arch}\\{name}" + ).format(name=guest_name, arch=guest_arch) exe_file_name = "virtiofs.exe" exe_find_cmd = 'dir /b /s %s\\%s | findstr "\\%s\\\\"' exe_find_cmd %= (viowin_ltr, exe_file_name, exe_middle_path) @@ -286,40 +306,43 @@ def create_viofs_service(test, params, session, service="VirtioFsSvc"): """ install_winfsp(test, params, session) exe_path = get_viofs_exe_path(test, params, session) - viofs_exe_copy_cmd_default = 'xcopy %s C:\\ /Y' - viofs_exe_copy_cmd = params.get("viofs_exe_copy_cmd", - viofs_exe_copy_cmd_default) + viofs_exe_copy_cmd_default = "xcopy %s C:\\ /Y" + viofs_exe_copy_cmd = params.get("viofs_exe_copy_cmd", viofs_exe_copy_cmd_default) if service == "VirtioFsSvc": - error_context.context("Create virtiofs own service in" - " Windows guest.", - test.log.info) + error_context.context( + "Create virtiofs own service in" " Windows guest.", test.log.info + ) output = query_viofs_service(test, params, session) if "not exist as an installed service" in output: session.cmd(viofs_exe_copy_cmd % exe_path) - viofs_sc_create_cmd_default = 'sc create VirtioFsSvc ' \ - 'binpath="c:\\virtiofs.exe" ' \ - 'start=auto ' \ - 'depend="WinFsp.Launcher/VirtioFsDrv" ' \ - 'DisplayName="Virtio FS Service"' - viofs_sc_create_cmd = params.get("viofs_sc_create_cmd", - viofs_sc_create_cmd_default) - sc_create_s, sc_create_o = session.cmd_status_output( - viofs_sc_create_cmd) + viofs_sc_create_cmd_default = ( + "sc create VirtioFsSvc " + 'binpath="c:\\virtiofs.exe" ' + "start=auto " + 'depend="WinFsp.Launcher/VirtioFsDrv" ' + 'DisplayName="Virtio FS Service"' + ) + viofs_sc_create_cmd = params.get( + "viofs_sc_create_cmd", viofs_sc_create_cmd_default + ) + sc_create_s, sc_create_o = session.cmd_status_output(viofs_sc_create_cmd) if sc_create_s != 0: - test.fail("Failed to create virtiofs service, " - "output is %s" % sc_create_o) + test.fail( + "Failed to create virtiofs service, " f"output is {sc_create_o}" + ) if service == "WinFSP.Launcher": - error_context.context("Stop virtiofs own service, " - "using WinFsp.Launcher service instead.", - test.log.info) + error_context.context( + "Stop virtiofs own service, " "using WinFsp.Launcher service instead.", + test.log.info, + ) stop_viofs_service(test, params, session) session.cmd(viofs_exe_copy_cmd % exe_path) - error_context.context("Config WinFsp.Launcher for multifs.", - test.log.info) + error_context.context("Config WinFsp.Launcher for multifs.", test.log.info) output = session.cmd_output(params["viofs_sc_create_cmd"]) if "completed successfully" not in output.lower(): - test.fail("MultiFS: Config WinFsp.Launcher failed, " - "the output is %s." % output) + test.fail( + "MultiFS: Config WinFsp.Launcher failed, " f"the output is {output}." + ) def delete_viofs_serivce(test, params, session): @@ -329,13 +352,13 @@ def delete_viofs_serivce(test, params, session): :param params: Dictionary with the test parameters :param session: the session of guest """ - viofs_sc_delete_cmd = params.get("viofs_sc_delete_cmd", - "sc delete VirtioFsSvc") + viofs_sc_delete_cmd = params.get("viofs_sc_delete_cmd", "sc delete VirtioFsSvc") error_context.context("Deleting the viofs service...", test.log.info) output = query_viofs_service(test, params, session) if "not exist as an installed service" in output: - test.log.info("The viofs service was NOT found at the guest." - " Skipping delete...") + test.log.info( + "The viofs service was NOT found at the guest." " Skipping delete..." + ) else: status = session.cmd_status(viofs_sc_delete_cmd) if status == 0: @@ -351,8 +374,7 @@ def start_viofs_service(test, params, session): :param params: Dictionary with the test parameters :param session: the session of guest """ - viofs_sc_start_cmd = params.get("viofs_sc_start_cmd", - "sc start VirtioFsSvc") + viofs_sc_start_cmd = params.get("viofs_sc_start_cmd", "sc start VirtioFsSvc") error_context.context("Start the viofs service...", test.log.info) test.log.info("Check if virtiofs service is started.") output = query_viofs_service(test, params, session) @@ -374,10 +396,8 @@ def query_viofs_service(test, params, session): :param session: the session of guest :return output: the output of cmd return """ - viofs_sc_query_cmd = params.get("viofs_sc_query_cmd", - "sc query VirtioFsSvc") - error_context.context("Query the status of viofs service...", - test.log.info) + viofs_sc_query_cmd = params.get("viofs_sc_query_cmd", "sc query VirtioFsSvc") + error_context.context("Query the status of viofs service...", test.log.info) return session.cmd_output(viofs_sc_query_cmd) @@ -417,12 +437,12 @@ def install_winfsp(test, params, session): """ cmd_timeout = params.get_numeric("cmd_timeout", 120) install_path = params["install_winfsp_path"] - check_installed_cmd = params.get("check_winfsp_installed_cmd", - r'dir "%s" |findstr /I winfsp') + check_installed_cmd = params.get( + "check_winfsp_installed_cmd", r'dir "%s" |findstr /I winfsp' + ) check_installed_cmd = check_installed_cmd % install_path - install_winfsp_cmd = r'msiexec /i WIN_UTILS:\winfsp.msi /qn' - install_cmd = params.get("install_winfsp_cmd", - install_winfsp_cmd) + install_winfsp_cmd = r"msiexec /i WIN_UTILS:\winfsp.msi /qn" + install_cmd = params.get("install_winfsp_cmd", install_winfsp_cmd) # install winfsp tool test.log.info("Install winfsp for windows guest.") installed = session.cmd_status(check_installed_cmd) == 0 @@ -431,8 +451,9 @@ def install_winfsp(test, params, session): else: install_cmd = utils_misc.set_winutils_letter(session, install_cmd) session.cmd(install_cmd, cmd_timeout) - if not utils_misc.wait_for(lambda: not session.cmd_status( - check_installed_cmd), 60): + if not utils_misc.wait_for( + lambda: not session.cmd_status(check_installed_cmd), 60 + ): test.error("Winfsp tool is not installed.") @@ -447,10 +468,8 @@ def operate_debug_log(test, params, session, vm, operation): :param operation: enable or disable :return: session """ - error_context.context("%s virtiofs debug log in guest." % operation, - test.log.info) - query_cmd = params.get('viofs_reg_query_cmd', - r'reg query HKLM\Software\VirtIO-FS') + error_context.context(f"{operation} virtiofs debug log in guest.", test.log.info) + query_cmd = params.get("viofs_reg_query_cmd", r"reg query HKLM\Software\VirtIO-FS") ret = session.cmd_output(query_cmd) run_reg_cmd = [] @@ -468,10 +487,10 @@ def operate_debug_log(test, params, session, vm, operation): test.error("Please give a right operation.") for reg_cmd in run_reg_cmd: - test.log.info("Set %s " % reg_cmd) + test.log.info("Set %s ", reg_cmd) s, o = session.cmd_status_output(reg_cmd) if s: - test.fail("Fail command: %s. Output: %s" % (reg_cmd, o)) + test.fail(f"Fail command: {reg_cmd}. Output: {o}") if run_reg_cmd: session = vm.reboot() return session diff --git a/provider/virtio_mem_utils.py b/provider/virtio_mem_utils.py index 834051f5f4..700a87095b 100644 --- a/provider/virtio_mem_utils.py +++ b/provider/virtio_mem_utils.py @@ -4,10 +4,10 @@ This module is meant to reduce code size on virtio_mem cases avoiding repeat functions implementation. """ + import re from avocado.utils.wait import wait_for - from virttest import error_context from virttest.utils_misc import normalize_data_size @@ -132,7 +132,7 @@ def count_memslots(vm, mem_object_id): :param mem_object_id: the ID of the memory object device """ output = vm.monitor.info("mtree") - return len(set(re.findall(r"(memslot.+%s)" % mem_object_id, output, re.MULTILINE))) + return len(set(re.findall(rf"(memslot.+{mem_object_id})", output, re.MULTILINE))) def validate_memslots(expected_memslots, test, vm, mem_object_id, timeout=10): diff --git a/provider/win_dev.py b/provider/win_dev.py index 2154df4868..39642ff835 100644 --- a/provider/win_dev.py +++ b/provider/win_dev.py @@ -2,10 +2,10 @@ windows device and driver utility functions. """ + import re -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc @error_context.context_aware @@ -19,14 +19,15 @@ def get_hwids(session, device_name, devcon_folder, timeout=300): :param timeout: Timeout in seconds. :rtype: list """ + def _get_hwid_once(): """ Return a list of hardware id of specific devices according to device name. """ - hwid_cmd = '%sdevcon.exe find *' % devcon_folder + hwid_cmd = f"{devcon_folder}devcon.exe find *" output = session.cmd_output(hwid_cmd) return re.findall(hwid_pattern, output, re.M) - hwid_pattern = r"(\S+)\s*:\s%s$" % device_name + hwid_pattern = rf"(\S+)\s*:\s{device_name}$" hwids = utils_misc.wait_for(_get_hwid_once, timeout, 0, 5) return hwids diff --git a/provider/win_driver_installer_test.py b/provider/win_driver_installer_test.py index 8a9751d3bf..16cfdc9467 100644 --- a/provider/win_driver_installer_test.py +++ b/provider/win_driver_installer_test.py @@ -1,51 +1,60 @@ import logging +import os import random import re import time -import os from avocado.utils import process +from virttest import error_context, utils_disk, utils_misc, utils_net -from virttest import utils_net -from virttest import error_context -from virttest import utils_disk -from virttest import utils_misc - -from provider import win_driver_utils -from provider import virtio_fs_utils +from provider import virtio_fs_utils, win_driver_utils from provider.storage_benchmark import generate_instance from qemu.tests.virtio_serial_file_transfer import transfer_data -from provider.vioinput_basic import key_tap_test as vioinput_test # pylint: disable=W0611 - -LOG_JOB = logging.getLogger('avocado.test') - - -driver_name_list = ['balloon', 'viostor', 'vioscsi', - 'viorng', 'viofs', 'vioser', - 'pvpanic', 'netkvm', 'vioinput', - 'fwcfg'] - -device_hwid_list = ['"PCI\\VEN_1AF4&DEV_1002" "PCI\\VEN_1AF4&DEV_1045"', - '"PCI\\VEN_1AF4&DEV_1001" "PCI\\VEN_1AF4&DEV_1042"', - '"PCI\\VEN_1AF4&DEV_1004" "PCI\\VEN_1AF4&DEV_1048"', - '"PCI\\VEN_1AF4&DEV_1005" "PCI\\VEN_1AF4&DEV_1044"', - '"PCI\\VEN_1AF4&DEV_105A"', - '"PCI\\VEN_1AF4&DEV_1003" "PCI\\VEN_1AF4&DEV_1043"', - '"ACPI\\QEMU0001"', - '"PCI\\VEN_1AF4&DEV_1000" "PCI\\VEN_1AF4&DEV_1041"', - '"PCI\\VEN_1AF4&DEV_1052"', - '"ACPI\\VEN_QEMU&DEV_0002"'] - -device_name_list = ["VirtIO Balloon Driver", "Red Hat VirtIO SCSI controller", - "Red Hat VirtIO SCSI pass-through controller", - "VirtIO RNG Device", "VirtIO FS Device", - "VirtIO Serial Driver", "QEMU PVPanic Device", - "Red Hat VirtIO Ethernet Adapter", "VirtIO Input Driver", - "QEMU FwCfg Device"] - -def install_gagent(session, test, qemu_ga_pkg, gagent_install_cmd, - gagent_pkg_info_cmd): +LOG_JOB = logging.getLogger("avocado.test") + + +driver_name_list = [ + "balloon", + "viostor", + "vioscsi", + "viorng", + "viofs", + "vioser", + "pvpanic", + "netkvm", + "vioinput", + "fwcfg", +] + +device_hwid_list = [ + '"PCI\\VEN_1AF4&DEV_1002" "PCI\\VEN_1AF4&DEV_1045"', + '"PCI\\VEN_1AF4&DEV_1001" "PCI\\VEN_1AF4&DEV_1042"', + '"PCI\\VEN_1AF4&DEV_1004" "PCI\\VEN_1AF4&DEV_1048"', + '"PCI\\VEN_1AF4&DEV_1005" "PCI\\VEN_1AF4&DEV_1044"', + '"PCI\\VEN_1AF4&DEV_105A"', + '"PCI\\VEN_1AF4&DEV_1003" "PCI\\VEN_1AF4&DEV_1043"', + '"ACPI\\QEMU0001"', + '"PCI\\VEN_1AF4&DEV_1000" "PCI\\VEN_1AF4&DEV_1041"', + '"PCI\\VEN_1AF4&DEV_1052"', + '"ACPI\\VEN_QEMU&DEV_0002"', +] + +device_name_list = [ + "VirtIO Balloon Driver", + "Red Hat VirtIO SCSI controller", + "Red Hat VirtIO SCSI pass-through controller", + "VirtIO RNG Device", + "VirtIO FS Device", + "VirtIO Serial Driver", + "QEMU PVPanic Device", + "Red Hat VirtIO Ethernet Adapter", + "VirtIO Input Driver", + "QEMU FwCfg Device", +] + + +def install_gagent(session, test, qemu_ga_pkg, gagent_install_cmd, gagent_pkg_info_cmd): """ Install guest agent. @@ -58,12 +67,11 @@ def install_gagent(session, test, qemu_ga_pkg, gagent_install_cmd, LOG_JOB.info("Install 'qemu-guest-agent' package in guest.") vol_virtio_key = "VolumeName like '%virtio-win%'" vol_virtio = utils_misc.get_win_disk_vol(session, vol_virtio_key) - qemu_ga_pkg_path = r"%s:\%s\%s" % (vol_virtio, "guest-agent", qemu_ga_pkg) + qemu_ga_pkg_path = r"{}:\{}\{}".format(vol_virtio, "guest-agent", qemu_ga_pkg) gagent_install_cmd = gagent_install_cmd % qemu_ga_pkg_path s_inst, o_inst = session.cmd_status_output(gagent_install_cmd) if s_inst != 0: - test.fail("qemu-guest-agent install failed," - " the detailed info:\n%s." % o_inst) + test.fail("qemu-guest-agent install failed," f" the detailed info:\n{o_inst}.") gagent_version = session.cmd_output(gagent_pkg_info_cmd).split()[-2] return gagent_version @@ -91,18 +99,18 @@ def win_uninstall_all_drivers(session, test, params): :param params: the dict used for parameters. """ devcon_path = params["devcon_path"] - for driver_name, device_name, device_hwid in zip(driver_name_list, - device_name_list, - device_hwid_list): - win_driver_utils.uninstall_driver(session, test, devcon_path, - driver_name, device_name, - device_hwid) + for driver_name, device_name, device_hwid in zip( + driver_name_list, device_name_list, device_hwid_list + ): + win_driver_utils.uninstall_driver( + session, test, devcon_path, driver_name, device_name, device_hwid + ) @error_context.context_aware -def run_installer_with_interaction(vm, session, test, params, - run_installer_cmd, - copy_files_params=None): +def run_installer_with_interaction( + vm, session, test, params, run_installer_cmd, copy_files_params=None +): """ Install/uninstall/repair virtio-win drivers and qxl,spice and qemu-ga-win by installer. @@ -115,16 +123,16 @@ def run_installer_with_interaction(vm, session, test, params, :param copy_files_params: copy files params. :return session: a new session after restart of installer """ - error_context.context("Run virtio-win-guest-tools.exe by %s." - % run_installer_cmd, LOG_JOB.info) - vm.send_key('meta_l-d') + error_context.context( + f"Run virtio-win-guest-tools.exe by {run_installer_cmd}.", LOG_JOB.info + ) + vm.send_key("meta_l-d") time.sleep(30) if copy_files_params: - win_driver_utils.copy_file_to_samepath(session, test, - copy_files_params) - session = win_driver_utils.run_installer(vm, session, - test, params, - run_installer_cmd) + win_driver_utils.copy_file_to_samepath(session, test, copy_files_params) + session = win_driver_utils.run_installer( + vm, session, test, params, run_installer_cmd + ) return session @@ -137,11 +145,12 @@ def win_installer_test(session, test, params): :param test: kvm test object :param params: the dict used for parameters. """ - error_context.context("Check if virtio-win-guest-too.exe " - "is signed by redhat", LOG_JOB.info) + error_context.context( + "Check if virtio-win-guest-too.exe " "is signed by redhat", LOG_JOB.info + ) status = session.cmd_status(params["signed_check_cmd"]) if status != 0: - test.fail('Installer not signed by redhat.') + test.fail("Installer not signed by redhat.") @error_context.context_aware @@ -162,18 +171,20 @@ def driver_check(session, test, params): driver_name_list = [params["driver_name"]] device_name_list = [params["device_name"]] for driver_name, device_name in zip(driver_name_list, device_name_list): - error_context.context("%s Driver Check" % driver_name, LOG_JOB.info) - inf_path = win_driver_utils.get_driver_inf_path(session, test, - media_type, - driver_name) - expected_ver = session.cmd("type %s | findstr /i /r DriverVer.*=" % - inf_path, timeout=360) + error_context.context(f"{driver_name} Driver Check", LOG_JOB.info) + inf_path = win_driver_utils.get_driver_inf_path( + session, test, media_type, driver_name + ) + expected_ver = session.cmd( + f"type {inf_path} | findstr /i /r DriverVer.*=", timeout=360 + ) expected_ver = expected_ver.strip().split(",", 1)[-1] if not expected_ver: test.error("Failed to find driver version from inf file") LOG_JOB.info("Target version is '%s'", expected_ver) - ver_list = win_driver_utils._pnpdrv_info(session, device_name, - ["DriverVersion"]) + ver_list = win_driver_utils._pnpdrv_info( + session, device_name, ["DriverVersion"] + ) if expected_ver not in ver_list: wrong_ver_driver.append(driver_name) chk_cmd = params["vio_driver_chk_cmd"] % device_name[0:30] @@ -181,16 +192,15 @@ def driver_check(session, test, params): if "FALSE" in chk_output: not_signed_driver.append(driver_name) elif "TRUE" not in chk_output: - test.error("Device %s is not found in guest" % device_name) + test.error(f"Device {device_name} is not found in guest") if wrong_ver_driver: - test.fail("%s not the expected driver version" % wrong_ver_driver) + test.fail(f"{wrong_ver_driver} not the expected driver version") if not_signed_driver: - test.fail("%s not digitally signed!" % not_signed_driver) + test.fail(f"{not_signed_driver} not digitally signed!") @error_context.context_aware -def check_gagent_version(session, test, gagent_pkg_info_cmd, - expected_gagent_version): +def check_gagent_version(session, test, gagent_pkg_info_cmd, expected_gagent_version): """ Check whether guest agent version is right. @@ -199,12 +209,12 @@ def check_gagent_version(session, test, gagent_pkg_info_cmd, :param gagent_pkg_info_cmd: guest-agent pkg info check command. :param expected_gagent_version: expected gagent version. """ - error_context.context("Check if gagent version is correct.", - LOG_JOB.info) + error_context.context("Check if gagent version is correct.", LOG_JOB.info) actual_gagent_version = session.cmd_output(gagent_pkg_info_cmd).split()[-2] if actual_gagent_version != expected_gagent_version: - test.fail("gagent version is not right, expected is %s but got %s" - % (expected_gagent_version, actual_gagent_version)) + test.fail( + f"gagent version is not right, expected is {expected_gagent_version} but got {actual_gagent_version}" + ) @error_context.context_aware @@ -220,11 +230,12 @@ def get_drive_letter(test, vm, img_size): error_context.context("Format data disk", test.log.info) disk_index = utils_disk.get_windows_disks_index(session, img_size) if not disk_index: - test.error("Failed to get the disk index of size %s" % img_size) + test.error(f"Failed to get the disk index of size {img_size}") if not utils_disk.update_windows_disk_attributes(session, disk_index): - test.error("Failed to enable data disk %s" % disk_index) + test.error(f"Failed to enable data disk {disk_index}") drive_letter_list = utils_disk.configure_empty_windows_disk( - session, disk_index[0], img_size) + session, disk_index[0], img_size + ) if not drive_letter_list: test.error("Failed to format the data disk") return drive_letter_list[0] @@ -240,14 +251,12 @@ def rng_test(test, params, vm): :param vm: vm object. """ session = vm.wait_for_login() - read_rng_cmd = params['read_rng_cmd'] + read_rng_cmd = params["read_rng_cmd"] read_rng_cmd = utils_misc.set_winutils_letter(session, read_rng_cmd) - error_context.context("Read virtio-rng device to get random number", - LOG_JOB.info) + error_context.context("Read virtio-rng device to get random number", LOG_JOB.info) output = session.cmd_output(read_rng_cmd) - if len(re.findall(r'0x\w', output, re.M)) < 2: - test.fail("Unable to read random numbers " - "from guest: %s" % output) + if len(re.findall(r"0x\w", output, re.M)) < 2: + test.fail("Unable to read random numbers " f"from guest: {output}") @error_context.context_aware @@ -260,14 +269,13 @@ def iozone_test(test, params, vm, images): :param vm: vm object. :param img_size: image size. """ - iozone = generate_instance(params, vm, 'iozone') + iozone = generate_instance(params, vm, "iozone") for img in images.split(): - drive_letter = get_drive_letter(test, vm, params['image_size_%s' % img]) + drive_letter = get_drive_letter(test, vm, params[f"image_size_{img}"]) try: - error_context.context("Running IOzone command on guest", - LOG_JOB.info) - iozone.run(params['iozone_cmd_opitons'] % drive_letter) + error_context.context("Running IOzone command on guest", LOG_JOB.info) + iozone.run(params["iozone_cmd_opitons"] % drive_letter) finally: iozone.clean() @@ -302,7 +310,7 @@ def balloon_test(test, params, vm, balloon_test_win): tag = "evict" min_sz, max_sz = balloon_test_win.get_memory_boundary(tag) - error_context.context("Running %s test" % tag, test.log.info) + error_context.context(f"Running {tag} test", test.log.info) expect_mem = int(random.uniform(min_sz, max_sz)) balloon_test_win.run_ballooning_test(expect_mem, tag) @@ -331,8 +339,9 @@ def pvpanic_test(test, params, vm): set_panic_cmd = params.get("set_panic_cmd") status, output = session.cmd_status_output(set_panic_cmd) if status: - test.error("Command '%s' failed, status: %s, output: %s" % - (set_panic_cmd, status, output)) + test.error( + f"Command '{set_panic_cmd}' failed, status: {status}, output: {output}" + ) session = vm.reboot(session) # triger a crash in guest @@ -341,7 +350,7 @@ def pvpanic_test(test, params, vm): # check qmp event expect_event = params.get("expect_event") if not utils_misc.wait_for(lambda: vm.monitor.get_event(expect_event), 60): - test.fail("Not found expect event: %s" % expect_event) + test.fail(f"Not found expect event: {expect_event}") @error_context.context_aware @@ -356,9 +365,9 @@ def vioser_test(test, params, vm): """ error_context.context("Transfer data between host and guest", test.log.info) - result = transfer_data(params, vm, sender='both') + result = transfer_data(params, vm, sender="both") if result is not True: - test.fail("Test failed. %s" % result[1]) + test.fail(f"Test failed. {result[1]}") def netkvm_test(test, params, vm): @@ -388,17 +397,16 @@ def fwcfg_test(test, params, vm): """ tmp_dir = params["tmp_dir"] if not os.path.isdir(tmp_dir): - process.system("mkdir %s" % tmp_dir) + process.system(f"mkdir {tmp_dir}") dump_name = utils_misc.generate_random_string(4) + "Memory.dmp" dump_file = tmp_dir + "/" + dump_name - output = vm.monitor.human_monitor_cmd('dump-guest-memory -w %s' - % dump_file) + output = vm.monitor.human_monitor_cmd(f"dump-guest-memory -w {dump_file}") if output and "warning" not in output: - test.fail("Save dump file failed as: %s" % output) + test.fail(f"Save dump file failed as: {output}") else: - cmd = "ls -l %s | awk '{print $5}'" % dump_file + cmd = f"ls -l {dump_file} | awk '{{print $5}}'" dump_size = int(process.getoutput(cmd)) - process.system("rm -rf %s" % dump_file, shell=True) + process.system(f"rm -rf {dump_file}", shell=True) if dump_size == 0: test.fail("The dump file is empty") diff --git a/provider/win_driver_utils.py b/provider/win_driver_utils.py index c16c20d495..ca2e60e693 100644 --- a/provider/win_driver_utils.py +++ b/provider/win_driver_utils.py @@ -3,49 +3,74 @@ :copyright: Red Hat Inc. """ + import logging -import re import os +import re import time -import aexpect -from virttest import error_context -from virttest import utils_misc -from virttest import utils_test -from virttest import data_dir -from virttest.utils_windows import virtio_win, wmic, system +import aexpect +from virttest import data_dir, error_context, utils_misc, utils_test from virttest.utils_version import VersionInterval +from virttest.utils_windows import system, virtio_win, wmic -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") QUERY_TIMEOUT = 360 INSTALL_TIMEOUT = 360 OPERATION_TIMEOUT = 120 -driver_info_dict = {"netkvm": {"hwid": '"PCI\\VEN_1AF4&DEV_1000" "PCI\\VEN_1AF4&DEV_1041"', "device_name": "Red Hat VirtIO Ethernet Adapter"}, - "viorng": {"hwid": '"PCI\\VEN_1AF4&DEV_1005" "PCI\\VEN_1AF4&DEV_1044"', "device_name": "VirtIO RNG Device"}, - "vioser": {"hwid": '"PCI\\VEN_1AF4&DEV_1003" "PCI\\VEN_1AF4&DEV_1043"', "device_name": "VirtIO Serial Driver"}, - "balloon": {"hwid": '"PCI\\VEN_1AF4&DEV_1002" "PCI\\VEN_1AF4&DEV_1045"', "device_name": "VirtIO Balloon Driver"}, - "pvpanic": {"hwid": '"ACPI\\QEMU0001"', "device_name": "QEMU PVPanic Device"}, - "vioinput": {"hwid": '"PCI\\VEN_1AF4&DEV_1052"', "device_name": "VirtIO Input Driver"}, - "viofs": {"hwid": '"PCI\\VEN_1AF4&DEV_105A"', "device_name": "VirtIO FS Device"}, - "viostor": {"hwid": '"PCI\\VEN_1AF4&DEV_1001" "PCI\\VEN_1AF4&DEV_1042"', "device_name": "Red Hat VirtIO SCSI controller"}, - "vioscsi": {"hwid": '"PCI\\VEN_1AF4&DEV_1004" "PCI\\VEN_1AF4&DEV_1048"', "device_name": "Red Hat VirtIO SCSI pass-through controller"}, - "fwcfg": {"hwid": '"ACPI\\VEN_QEMU&DEV_0002" "ACPI\\QEMU0002"', "device_name": "QEMU FWCfg Device"} - } +driver_info_dict = { + "netkvm": { + "hwid": '"PCI\\VEN_1AF4&DEV_1000" "PCI\\VEN_1AF4&DEV_1041"', + "device_name": "Red Hat VirtIO Ethernet Adapter", + }, + "viorng": { + "hwid": '"PCI\\VEN_1AF4&DEV_1005" "PCI\\VEN_1AF4&DEV_1044"', + "device_name": "VirtIO RNG Device", + }, + "vioser": { + "hwid": '"PCI\\VEN_1AF4&DEV_1003" "PCI\\VEN_1AF4&DEV_1043"', + "device_name": "VirtIO Serial Driver", + }, + "balloon": { + "hwid": '"PCI\\VEN_1AF4&DEV_1002" "PCI\\VEN_1AF4&DEV_1045"', + "device_name": "VirtIO Balloon Driver", + }, + "pvpanic": {"hwid": '"ACPI\\QEMU0001"', "device_name": "QEMU PVPanic Device"}, + "vioinput": { + "hwid": '"PCI\\VEN_1AF4&DEV_1052"', + "device_name": "VirtIO Input Driver", + }, + "viofs": {"hwid": '"PCI\\VEN_1AF4&DEV_105A"', "device_name": "VirtIO FS Device"}, + "viostor": { + "hwid": '"PCI\\VEN_1AF4&DEV_1001" "PCI\\VEN_1AF4&DEV_1042"', + "device_name": "Red Hat VirtIO SCSI controller", + }, + "vioscsi": { + "hwid": '"PCI\\VEN_1AF4&DEV_1004" "PCI\\VEN_1AF4&DEV_1048"', + "device_name": "Red Hat VirtIO SCSI pass-through controller", + }, + "fwcfg": { + "hwid": '"ACPI\\VEN_QEMU&DEV_0002" "ACPI\\QEMU0002"', + "device_name": "QEMU FWCfg Device", + }, +} def _pnpdrv_info(session, name_pattern, props=None): """Get the driver props eg: InfName""" - cmd = wmic.make_query("path win32_pnpsigneddriver", - "DeviceName like '%s'" % name_pattern, - props=props, get_swch=wmic.FMT_TYPE_LIST) + cmd = wmic.make_query( + "path win32_pnpsigneddriver", + f"DeviceName like '{name_pattern}'", + props=props, + get_swch=wmic.FMT_TYPE_LIST, + ) return wmic.parse_list(session.cmd(cmd, timeout=QUERY_TIMEOUT)) -def uninstall_driver(session, test, devcon_path, driver_name, - device_name, device_hwid): +def uninstall_driver(session, test, devcon_path, driver_name, device_name, device_hwid): """ Uninstall driver. @@ -57,10 +82,11 @@ def uninstall_driver(session, test, devcon_path, driver_name, :param device_hwid: device hardware id. """ devcon_path = utils_misc.set_winutils_letter(session, devcon_path) - status, output = session.cmd_status_output("dir %s" % devcon_path, - timeout=OPERATION_TIMEOUT) + status, output = session.cmd_status_output( + f"dir {devcon_path}", timeout=OPERATION_TIMEOUT + ) if status: - test.error("Not found devcon.exe, details: %s" % output) + test.error(f"Not found devcon.exe, details: {output}") LOG_JOB.info("Uninstalling previous installed driver") # find the inf name and remove the repeated one inf_list_all = _pnpdrv_info(session, device_name, ["InfName"]) @@ -68,23 +94,24 @@ def uninstall_driver(session, test, devcon_path, driver_name, # pnputil flags available starting in Windows 10, # version 1607, build 14393 later - build_ver = system.version(session).split('.')[2] + build_ver = system.version(session).split(".")[2] if int(build_ver) > 14393: - uninst_store_cmd = ("pnputil /delete-driver %s /uninstall /force" - % inf_list[0]) + uninst_store_cmd = f"pnputil /delete-driver {inf_list[0]} /uninstall /force" else: - uninst_store_cmd = "pnputil /f /d %s" % inf_list[0] - status, output = session.cmd_status_output(uninst_store_cmd, - INSTALL_TIMEOUT) + uninst_store_cmd = f"pnputil /f /d {inf_list[0]}" + status, output = session.cmd_status_output(uninst_store_cmd, INSTALL_TIMEOUT) if status: - test.error("Failed to uninstall driver '%s' from store, " - "details:\n%s" % (driver_name, output)) - uninst_cmd = "%s remove %s" % (devcon_path, device_hwid) + test.error( + f"Failed to uninstall driver '{driver_name}' from store, " + f"details:\n{output}" + ) + uninst_cmd = f"{devcon_path} remove {device_hwid}" status, output = session.cmd_status_output(uninst_cmd, INSTALL_TIMEOUT) # acceptable status: OK(0), REBOOT(1) if status > 1: - test.error("Failed to uninstall driver '%s', details:\n" - "%s" % (driver_name, output)) + test.error( + f"Failed to uninstall driver '{driver_name}', details:\n" f"{output}" + ) def get_driver_inf_path(session, test, media_type, driver_name): @@ -97,12 +124,11 @@ def get_driver_inf_path(session, test, media_type, driver_name): :param driver_name: driver name. """ try: - get_drive_letter = getattr(virtio_win, "drive_letter_%s" % media_type) - get_product_dirname = getattr(virtio_win, - "product_dirname_%s" % media_type) - get_arch_dirname = getattr(virtio_win, "arch_dirname_%s" % media_type) + get_drive_letter = getattr(virtio_win, f"drive_letter_{media_type}") + get_product_dirname = getattr(virtio_win, f"product_dirname_{media_type}") + get_arch_dirname = getattr(virtio_win, f"arch_dirname_{media_type}") except AttributeError: - test.error("Not supported virtio win media type '%s'" % media_type) + test.error(f"Not supported virtio win media type '{media_type}'") viowin_ltr = get_drive_letter(session) if not viowin_ltr: test.error("Could not find virtio-win drive in guest") @@ -113,9 +139,9 @@ def get_driver_inf_path(session, test, media_type, driver_name): if not guest_arch: test.error("Could not get architecture dirname of the vm") - inf_middle_path = ("{name}\\{arch}" if media_type == "iso" - else "{arch}\\{name}").format(name=guest_name, - arch=guest_arch) + inf_middle_path = ( + "{name}\\{arch}" if media_type == "iso" else "{arch}\\{name}" + ).format(name=guest_name, arch=guest_arch) inf_find_cmd = 'dir /b /s %s\\%s.inf | findstr "\\%s\\\\"' inf_find_cmd %= (viowin_ltr, driver_name, inf_middle_path) inf_path = session.cmd(inf_find_cmd, timeout=OPERATION_TIMEOUT).strip() @@ -124,8 +150,9 @@ def get_driver_inf_path(session, test, media_type, driver_name): @error_context.context_aware -def install_driver_by_virtio_media(session, test, devcon_path, media_type, - driver_name, device_hwid): +def install_driver_by_virtio_media( + session, test, devcon_path, media_type, driver_name, device_hwid +): """ Install driver by virtio media. @@ -137,27 +164,28 @@ def install_driver_by_virtio_media(session, test, devcon_path, media_type, :param device_hwid: device hardware id. """ devcon_path = utils_misc.set_winutils_letter(session, devcon_path) - status, output = session.cmd_status_output("dir %s" % devcon_path, - timeout=OPERATION_TIMEOUT) + status, output = session.cmd_status_output( + f"dir {devcon_path}", timeout=OPERATION_TIMEOUT + ) if status: - test.error("Not found devcon.exe, details: %s" % output) + test.error(f"Not found devcon.exe, details: {output}") error_context.context("Installing target driver", LOG_JOB.info) installed_any = False for hwid in device_hwid.split(): - output = session.cmd_output("%s find %s" % (devcon_path, hwid)) + output = session.cmd_output(f"{devcon_path} find {hwid}") if re.search("No matching devices found", output, re.I): continue inf_path = get_driver_inf_path(session, test, media_type, driver_name) - inst_cmd = "%s updateni %s %s" % (devcon_path, inf_path, hwid) + inst_cmd = f"{devcon_path} updateni {inf_path} {hwid}" status, output = session.cmd_status_output(inst_cmd, INSTALL_TIMEOUT) # acceptable status: OK(0), REBOOT(1) if status > 1: - test.fail("Failed to install driver '%s', " - "details:\n%s" % (driver_name, output)) + test.fail( + f"Failed to install driver '{driver_name}', " f"details:\n{output}" + ) installed_any |= True if not installed_any: - test.error("Failed to find target devices " - "by hwids: '%s'" % device_hwid) + test.error("Failed to find target devices " f"by hwids: '{device_hwid}'") def autoit_installer_check(params, session): @@ -167,13 +195,16 @@ def autoit_installer_check(params, session): :param session: The guest session object. :return: True if it is running. """ - autoit_check_cmd = params.get("autoit_check_cmd", - "tasklist |findstr /i autoit3_.*exe") + autoit_check_cmd = params.get( + "autoit_check_cmd", "tasklist |findstr /i autoit3_.*exe" + ) try: return session.cmd_status(autoit_check_cmd) == 0 - except (aexpect.ShellTimeoutError, - aexpect.ShellProcessTerminatedError, - aexpect.ShellStatusError): + except ( + aexpect.ShellTimeoutError, + aexpect.ShellProcessTerminatedError, + aexpect.ShellStatusError, + ): LOG_JOB.info("VM is rebooting...") return False @@ -195,28 +226,27 @@ def run_installer(vm, session, test, params, run_installer_cmd): :return session: a new session after restart of installer """ cdrom_virtio = params["cdrom_virtio"] - installer_restart_version = params.get("installer_restart_version", - "[1.9.37.0,)") - cdrom_virtio_path = os.path.basename(utils_misc.get_path( - data_dir.get_data_dir(), cdrom_virtio)) - match = re.search(r"virtio-win-(\d+\.\d+(?:\.\d+)?-\d+)", - cdrom_virtio_path) - cdrom_virtio_version = re.sub('-', '.', match.group(1)) + installer_restart_version = params.get("installer_restart_version", "[1.9.37.0,)") + cdrom_virtio_path = os.path.basename( + utils_misc.get_path(data_dir.get_data_dir(), cdrom_virtio) + ) + match = re.search(r"virtio-win-(\d+\.\d+(?:\.\d+)?-\d+)", cdrom_virtio_path) + cdrom_virtio_version = re.sub("-", ".", match.group(1)) # run installer cmd - run_installer_cmd = utils_misc.set_winutils_letter( - session, run_installer_cmd) + run_installer_cmd = utils_misc.set_winutils_letter(session, run_installer_cmd) session.cmd(run_installer_cmd) - if not utils_misc.wait_for(lambda: not autoit_installer_check( - params, session), 240, 2, 2): - test.fail("Autoit exe stop there for 240s," - " please have a check.") + if not utils_misc.wait_for( + lambda: not autoit_installer_check(params, session), 240, 2, 2 + ): + test.fail("Autoit exe stop there for 240s," " please have a check.") if cdrom_virtio_version in VersionInterval(installer_restart_version): - if not utils_misc.wait_for(lambda: not session.is_responsive(), - 120, 5, 5): - test.fail("The previous session still exists," - "seems that the vm doesn't restart.") + if not utils_misc.wait_for(lambda: not session.is_responsive(), 120, 5, 5): + test.fail( + "The previous session still exists," + "seems that the vm doesn't restart." + ) session = vm.wait_for_login(timeout=360) else: session = vm.reboot(session) @@ -233,11 +263,11 @@ def remove_driver_by_msi(session, vm, params): :return: a new session after restart os """ media_type = params.get("virtio_win_media_type", "iso") - get_drive_letter = getattr(virtio_win, "drive_letter_%s" % media_type) + get_drive_letter = getattr(virtio_win, f"drive_letter_{media_type}") drive_letter = get_drive_letter(session) msi_path = drive_letter + "\\" + params["msi_name"] msi_uninstall_cmd = params["msi_uninstall_cmd"] % msi_path - vm.send_key('meta_l-d') + vm.send_key("meta_l-d") # msi uninstall cmd will restart os. session.cmd(msi_uninstall_cmd) time.sleep(15) @@ -252,35 +282,41 @@ def copy_file_to_samepath(session, test, params): :param test: kvm test object :param params: the dict used for parameters """ - LOG_JOB.info("Copy autoit scripts and virtio-win-guest-tools.exe " - "to the same path.") + LOG_JOB.info( + "Copy autoit scripts and virtio-win-guest-tools.exe " "to the same path." + ) dst_path = r"C:\\" vol_virtio_key = "VolumeName like '%virtio-win%'" vol_virtio = utils_misc.get_win_disk_vol(session, vol_virtio_key) - installer_path = r"%s:\%s" % (vol_virtio, "virtio-win-guest-tools.exe") - install_script_path = utils_misc.set_winutils_letter(session, - params["install_script_path"]) - repair_script_path = utils_misc.set_winutils_letter(session, - params["repair_script_path"]) - uninstall_script_path = utils_misc.set_winutils_letter(session, - params["uninstall_script_path"]) - src_files = [installer_path, install_script_path, - repair_script_path, uninstall_script_path] + installer_path = r"{}:\{}".format(vol_virtio, "virtio-win-guest-tools.exe") + install_script_path = utils_misc.set_winutils_letter( + session, params["install_script_path"] + ) + repair_script_path = utils_misc.set_winutils_letter( + session, params["repair_script_path"] + ) + uninstall_script_path = utils_misc.set_winutils_letter( + session, params["uninstall_script_path"] + ) + src_files = [ + installer_path, + install_script_path, + repair_script_path, + uninstall_script_path, + ] if params.get("msi_name"): - msi_path = r"%s:\%s" % (vol_virtio, params["msi_name"]) + msi_path = r"{}:\{}".format(vol_virtio, params["msi_name"]) uninstall_msi_script_path = utils_misc.set_winutils_letter( - session, - params["uninstall_msi_script_path"] + session, params["uninstall_msi_script_path"] ) src_files.extend([msi_path, uninstall_msi_script_path]) for src_file in src_files: - copy_cmd = "xcopy %s %s /Y" % (src_file, dst_path) + copy_cmd = f"xcopy {src_file} {dst_path} /Y" status, output = session.cmd_status_output(copy_cmd) if status != 0: - test.error("Copy file error," - " the detailed info:\n%s." % output) + test.error("Copy file error," f" the detailed info:\n{output}.") def enable_driver(session, test, cmd): @@ -294,7 +330,7 @@ def enable_driver(session, test, cmd): cmd = utils_misc.set_winutils_letter(session, cmd) status, output = session.cmd_status_output(cmd) if status != 0: - test.fail("failed to enable driver, %s" % output) + test.fail(f"failed to enable driver, {output}") def disable_driver(session, vm, test, cmd): @@ -312,7 +348,7 @@ def disable_driver(session, vm, test, cmd): if "reboot" in output: session = vm.reboot(session) else: - test.fail("failed to disable driver, %s" % output) + test.fail(f"failed to disable driver, {output}") return session @@ -330,18 +366,18 @@ def get_device_id(session, test, driver_name): output = _pnpdrv_info(session, device_name, ["DeviceID"]) # workaround for viostor/vioscsi to get data device id device_id = output[0] - device_id = '&'.join(device_id.split('&')) + device_id = "&".join(device_id.split("&")) find_devices = False for hwid in device_hwid.split(): hwid = hwid.split('"')[1] if hwid in device_id: find_devices = True if not find_devices: - test.fail("Didn't find driver info from guest %s" % output) + test.fail(f"Didn't find driver info from guest {output}") return device_id -def load_driver(session, test, params, load_method='enable'): +def load_driver(session, test, params, load_method="enable"): """ Load driver. @@ -352,20 +388,19 @@ def load_driver(session, test, params, load_method='enable'): """ driver_name = params["driver_name"] devcon_path = params["devcon_path"] - if load_method != 'enable': - media_type = params.get("virtio_win_media_type", "iso") + if load_method != "enable": + params.get("virtio_win_media_type", "iso") device_hwid = driver_info_dict[driver_name]["hwid"] - install_driver_by_virtio_media(session, test, devcon_path, - device_hwid) + install_driver_by_virtio_media(session, test, devcon_path, device_hwid) else: device_id = get_device_id(session, test, driver_name) - cmd = '%s enable "@%s"' % (devcon_path, device_id) + cmd = f'{devcon_path} enable "@{device_id}"' enable_driver(session, test, cmd) utils_test.qemu.windrv_verify_running(session, test, driver_name) -def unload_driver(session, vm, test, params, load_method='enable'): +def unload_driver(session, vm, test, params, load_method="enable"): """ Unload driver. @@ -377,19 +412,20 @@ def unload_driver(session, vm, test, params, load_method='enable'): """ driver_name = params["driver_name"] devcon_path = params["devcon_path"] - if load_method != 'enable': + if load_method != "enable": device_name = driver_info_dict[driver_name]["device_name"] device_hwid = driver_info_dict[driver_name]["hwid"] - uninstall_driver(session, test, devcon_path, driver_name, - device_name, device_hwid) + uninstall_driver( + session, test, devcon_path, driver_name, device_name, device_hwid + ) else: device_id = get_device_id(session, test, driver_name) - cmd = '%s disable "@%s"' % (devcon_path, device_id) + cmd = f'{devcon_path} disable "@{device_id}"' session = disable_driver(session, vm, test, cmd) return session -def memory_leak_check(vm, test, params, load_method='enable'): +def memory_leak_check(vm, test, params, load_method="enable"): """ In order to let the driver verifier to catch memory leaks, driver should be unloaded after driver function.Note that if want to use @@ -404,9 +440,11 @@ def memory_leak_check(vm, test, params, load_method='enable'): session = unload_driver(session, vm, test, params, load_method) time.sleep(10) if vm.is_alive() is False: - test.fail("VM is not alive after uninstall driver," - "please check if it is a memory leak") - if load_method != 'enable': + test.fail( + "VM is not alive after uninstall driver," + "please check if it is a memory leak" + ) + if load_method != "enable": session = vm.reboot(session) load_driver(session, test, params, load_method) session.close() diff --git a/provider/win_dump_utils.py b/provider/win_dump_utils.py index 3e56e7a780..7ff5bd439d 100644 --- a/provider/win_dump_utils.py +++ b/provider/win_dump_utils.py @@ -1,14 +1,14 @@ """ Windows dump related utilities. """ -import os + import logging +import os from avocado.utils import process -from virttest import env_process -from virttest import utils_misc +from virttest import env_process, utils_misc -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def set_vm_for_dump(test, params): @@ -18,15 +18,17 @@ def set_vm_for_dump(test, params): :param test: kvm test object :param params: Params object """ - host_free_mem = utils_misc.get_mem_info(attr='MemFree') + host_free_mem = utils_misc.get_mem_info(attr="MemFree") host_avail_disk = int(process.getoutput(params["get_avail_disk"])) - sys_image_size = int(float( - utils_misc.normalize_data_size(params["image_size"], "G"))) + sys_image_size = int( + float(utils_misc.normalize_data_size(params["image_size"], "G")) + ) if host_avail_disk < (host_free_mem // 1024**2) * 1.2 + sys_image_size: avail_dump_size = host_avail_disk - sys_image_size params["mem"] = str(int((avail_dump_size) * 0.8 // 2.4) * 1024) - image_size_stg = int(float( - utils_misc.normalize_data_size("%sM" % params["mem"], "G")) * 1.4) + image_size_stg = int( + float(utils_misc.normalize_data_size("{}M".format(params["mem"]), "G")) * 1.4 + ) params["image_size_stg"] = str(image_size_stg) + "G" params["force_create_image_stg"] = "yes" @@ -44,23 +46,21 @@ def generate_mem_dump(test, params, vm): """ tmp_dir = params["tmp_dir"] if not os.path.isdir(tmp_dir): - process.system("mkdir %s" % tmp_dir) + process.system(f"mkdir {tmp_dir}") dump_name = utils_misc.generate_random_string(4) + "Memory.dmp" dump_file = tmp_dir + "/" + dump_name - output = vm.monitor.human_monitor_cmd('dump-guest-memory -w %s' - % dump_file) + output = vm.monitor.human_monitor_cmd(f"dump-guest-memory -w {dump_file}") if output and "warning" not in output: - test.fail("Save dump file failed as: %s" % output) + test.fail(f"Save dump file failed as: {output}") else: - cmd = "ls -l %s | awk '{print $5}'" % dump_file + cmd = f"ls -l {dump_file} | awk '{{print $5}}'" dump_size = int(process.getoutput(cmd)) if dump_size == 0: test.fail("The size of dump file is %d" % dump_size) - dump_name_zip = "%s.zip" % dump_name - process.system("cd %s && zip %s %s" % (tmp_dir, dump_name_zip, - dump_name), shell=True) + dump_name_zip = f"{dump_name}.zip" + process.system(f"cd {tmp_dir} && zip {dump_name_zip} {dump_name}", shell=True) dump_file_zip = tmp_dir + "/" + dump_name_zip return dump_file, dump_file_zip @@ -76,13 +76,14 @@ def install_windbg(test, params, session, timeout=600): """ LOG_JOB.info("Install Windows Debug Tools in guest.") windbg_install_cmd = params["windbg_install_cmd"] - windbg_install_cmd = utils_misc.set_winutils_letter(session, - windbg_install_cmd - % params["feature"]) + windbg_install_cmd = utils_misc.set_winutils_letter( + session, windbg_install_cmd % params["feature"] + ) session.cmd(windbg_install_cmd) - if not utils_misc.wait_for(lambda: check_windbg_installed(params, session), - timeout=timeout, step=5): + if not utils_misc.wait_for( + lambda: check_windbg_installed(params, session), timeout=timeout, step=5 + ): test.fail("windbg tool has not been installed") else: LOG_JOB.info("windbg tool installation completed") @@ -111,7 +112,7 @@ def disable_security_alert(params, session): output = session.cmd_output(query_cmd) cmd = 'reg add "%s\\Software\\Microsoft\\Windows\\CurrentVersion' cmd += '\\Internet Settings" /v WarnonZoneCrossing /d 0 /t ' - cmd += 'REG_DWORD /f' + cmd += "REG_DWORD /f" session.cmd(cmd % output) @@ -126,22 +127,22 @@ def dump_windbg_check(test, params, session): LOG_JOB.info("Check the dump file can be opened by windbg tool") chk_dump_cmd = params["chk_dump_cmd"] log_file = params["dump_analyze_file"] - chk_dump_cmd = utils_misc.set_winutils_letter(session, - chk_dump_cmd) + chk_dump_cmd = utils_misc.set_winutils_letter(session, chk_dump_cmd) status, output = session.cmd_status_output(chk_dump_cmd) if status: - test.fail("Failed to check dump file by windbg,command out is %s" - % output) - if not utils_misc.wait_for(lambda: check_log_exist(session, log_file), - timeout=480, step=10): + test.fail(f"Failed to check dump file by windbg,command out is {output}") + if not utils_misc.wait_for( + lambda: check_log_exist(session, log_file), timeout=480, step=10 + ): test.error("Cannot generate dump analyze log.") chk_id_cmd = params["chk_id_cmd"] % log_file - if utils_misc.wait_for(lambda: not session.cmd_status(chk_id_cmd), - timeout=60, step=5): + if utils_misc.wait_for( + lambda: not session.cmd_status(chk_id_cmd), timeout=60, step=5 + ): LOG_JOB.info("Check dump file passed") else: - output = session.cmd_output("type %s" % log_file) - test.fail("Check dump file failed, output as %s" % output) + output = session.cmd_output(f"type {log_file}") + test.fail(f"Check dump file failed, output as {output}") def check_log_exist(session, log_file): @@ -151,6 +152,6 @@ def check_log_exist(session, log_file): :param session: The guest session object. :param log_file: The log file of dump analyze. """ - chk_log_exist = "dir %s" % log_file + chk_log_exist = f"dir {log_file}" status, _ = session.cmd_status_output(chk_log_exist) return False if status else True diff --git a/provider/win_hlk_suite.py b/provider/win_hlk_suite.py index e4c9ca06d3..38a13f95d1 100644 --- a/provider/win_hlk_suite.py +++ b/provider/win_hlk_suite.py @@ -4,17 +4,13 @@ import re import time -from avocado.utils import archive -from avocado.utils import process +from avocado.utils import archive, process +from virttest import data_dir, utils_misc, utils_net -from virttest import data_dir -from virttest import utils_misc -from virttest import utils_net +LOG_JOB = logging.getLogger("avocado.test") -LOG_JOB = logging.getLogger('avocado.test') - -STATE_READY = 'Ready' -STATE_NOT_READY = 'NotReady' +STATE_READY = "Ready" +STATE_NOT_READY = "NotReady" class HLKError(Exception): @@ -27,7 +23,7 @@ def __init__(self, *args): Exception.__init__(self, *args) -class HLKServer(object): +class HLKServer: def __init__(self, test, vm_server): """ Initial HLKServer instance. @@ -37,15 +33,15 @@ def __init__(self, test, vm_server): :param vm_server: VM Server object. :type vm_server: qemu_vm.VM """ - src_link = os.path.join(data_dir.get_deps_dir("hlk"), 'hlk_studio.ps1') + src_link = os.path.join(data_dir.get_deps_dir("hlk"), "hlk_studio.ps1") self._test = test self._vm = vm_server self._vm.copy_files_to(src_link, "c:\\", timeout=60) self._session = self._vm.wait_for_login(timeout=360) - LOG_JOB.info('Getting HLK Server hostname:') - hostname = self._session.cmd('hostname').strip() - self._session.set_prompt(r'toolsHLK@%s' % hostname) - LOG_JOB.info('Starting to run HLK Server powershell script:') + LOG_JOB.info("Getting HLK Server hostname:") + hostname = self._session.cmd("hostname").strip() + self._session.set_prompt(rf"toolsHLK@{hostname}") + LOG_JOB.info("Starting to run HLK Server powershell script:") self._session.cmd_output('powershell -command "c:\\hlk_studio.ps1"') def close(self): @@ -54,8 +50,8 @@ def close(self): def get_default_pool(self): """Get default pool.""" - LOG_JOB.info('Getting default pool:') - machines = self._session.cmd_output('getdefaultpool') + LOG_JOB.info("Getting default pool:") + machines = self._session.cmd_output("getdefaultpool") LOG_JOB.info(machines) return [json.loads(machine) for machine in machines.splitlines()] @@ -67,7 +63,7 @@ def create_pool(self, name): :type name: str """ LOG_JOB.info('Creating pool "%s":', name) - self._session.cmd_output('createpool %s' % name) + self._session.cmd_output(f"createpool {name}") def move_machine_from_default_pool(self, machine_name, dst_pool_name): """ @@ -78,10 +74,12 @@ def move_machine_from_default_pool(self, machine_name, dst_pool_name): :param dst_pool_name: Destination pool name. :type dst_pool_name: str """ - LOG_JOB.info('Moving machine "%s" from default pool to pool "%s":', - machine_name, dst_pool_name) - cmd = 'movemachinefromdefaultpool %s %s' % (machine_name, - dst_pool_name) + LOG_JOB.info( + 'Moving machine "%s" from default pool to pool "%s":', + machine_name, + dst_pool_name, + ) + cmd = f"movemachinefromdefaultpool {machine_name} {dst_pool_name}" self._session.cmd_output(cmd) def set_machine_state(self, machine_name, pool_name, state, timeout=360): @@ -97,9 +95,13 @@ def set_machine_state(self, machine_name, pool_name, state, timeout=360): :param timeout: Timeout for setting in seconds. :type timeout: int """ - LOG_JOB.info('Setting machine "%s" of pool "%s" to state "%s":', - machine_name, pool_name, state) - cmd = 'setmachinestate %s %s %s' % (machine_name, pool_name, state) + LOG_JOB.info( + 'Setting machine "%s" of pool "%s" to state "%s":', + machine_name, + pool_name, + state, + ) + cmd = f"setmachinestate {machine_name} {pool_name} {state}" self._session.cmd_output(cmd, timeout) def list_machine_targets(self, machine_name, pool_name, timeout=60): @@ -117,7 +119,7 @@ def list_machine_targets(self, machine_name, pool_name, timeout=60): "$Target1_Name,$Target1_Key,$Target1_Type", ...] :type: list """ - cmd = 'listmachinetargets %s %s' % (machine_name, pool_name) + cmd = f"listmachinetargets {machine_name} {pool_name}" targets = self._session.cmd_output(cmd, timeout) LOG_JOB.info(targets) return [target for target in targets.splitlines()] @@ -138,13 +140,17 @@ def get_machine_target(self, target_name, machine_name, pool_name, timeout=60): format: ["$Target0_Name", "$Target0_Key", "$Target0_Type"] :type: list """ - LOG_JOB.info('Getting target "%s" of machine "%s" of pool "%s":', - target_name, machine_name, pool_name) + LOG_JOB.info( + 'Getting target "%s" of machine "%s" of pool "%s":', + target_name, + machine_name, + pool_name, + ) targets = self.list_machine_targets(machine_name, pool_name, timeout) for target in targets: if target_name in target: - target = target.split(',') - LOG_JOB.info('key: %s, type: %s', target[1], target[2]) + target = target.split(",") + LOG_JOB.info("key: %s, type: %s", target[1], target[2]) return target def get_machine_target_key(self, target_name, machine_name, pool_name, timeout=60): @@ -162,8 +168,9 @@ def get_machine_target_key(self, target_name, machine_name, pool_name, timeout=6 :return: Target key. :type: str """ - return self.get_machine_target( - target_name, machine_name, pool_name, timeout)[1].replace('&', '"&"') + return self.get_machine_target(target_name, machine_name, pool_name, timeout)[ + 1 + ].replace("&", '"&"') def list_projects(self, timeout=60): """ @@ -180,7 +187,7 @@ def list_projects(self, timeout=60): ...}, ...] :rtype: list """ - projects = self._session.cmd_output('listprojects', timeout) + projects = self._session.cmd_output("listprojects", timeout) return [json.loads(project) for project in projects.splitlines()] def get_project(self, name): @@ -197,7 +204,7 @@ def get_project(self, name): :rtype: dict """ for project in self.list_projects(): - if project['project_name'] == name: + if project["project_name"] == name: LOG_JOB.info(project) return project @@ -209,10 +216,11 @@ def create_project(self, name): :type name: str """ LOG_JOB.info('Creating project "%s":', name) - self._session.cmd_output('createproject %s' % name) + self._session.cmd_output(f"createproject {name}") - def create_project_target(self, target_key, project_name, - machine_name, pool_name, timeout=60): + def create_project_target( + self, target_key, project_name, machine_name, pool_name, timeout=60 + ): """ Create project target. @@ -227,10 +235,12 @@ def create_project_target(self, target_key, project_name, :param timeout: Timeout for creating in seconds. :type timeout: int """ - LOG_JOB.info('Creating project target by target key "%s" of "%s":', - target_key, project_name) - cmd = 'createprojecttarget %s %s %s %s' % (target_key, project_name, - machine_name, pool_name) + LOG_JOB.info( + 'Creating project target by target key "%s" of "%s":', + target_key, + project_name, + ) + cmd = f"createprojecttarget {target_key} {project_name} {machine_name} {pool_name}" self._session.cmd_output(cmd, timeout) def list_tests(self, target_key, project_name, machine_name, pool_name, timeout=60): @@ -260,13 +270,13 @@ def list_tests(self, target_key, project_name, machine_name, pool_name, timeout= ...}, ...] :rtype: list """ - cmd = 'listtests %s %s %s %s' % (target_key, project_name, - machine_name, pool_name) + cmd = f"listtests {target_key} {project_name} {machine_name} {pool_name}" tests = self._session.cmd_output(cmd, timeout) return [json.loads(test) for test in tests.splitlines()] - def get_target_test(self, test_name, target_key, project_name, - machine_name, pool_name, timeout=60): + def get_target_test( + self, test_name, target_key, project_name, machine_name, pool_name, timeout=60 + ): """ Get target test. @@ -293,14 +303,16 @@ def get_target_test(self, test_name, target_key, project_name, "execution_state": "$Test_ExecutionState"} :rtype: dict """ - tests = self.list_tests(target_key, project_name, - machine_name, pool_name, timeout) + tests = self.list_tests( + target_key, project_name, machine_name, pool_name, timeout + ) for test in tests: - if test['test_name'] == test_name: + if test["test_name"] == test_name: return test - def get_target_test_id(self, test_name, target_key, project_name, - machine_name, pool_name, timeout=60): + def get_target_test_id( + self, test_name, target_key, project_name, machine_name, pool_name, timeout=60 + ): """ Get target test ID. @@ -320,8 +332,9 @@ def get_target_test_id(self, test_name, target_key, project_name, :rtype: str """ LOG_JOB.info('Getting target id of test "%s":', test_name) - test_id = self.get_target_test(test_name, target_key, project_name, - machine_name, pool_name, timeout)['test_id'] + test_id = self.get_target_test( + test_name, target_key, project_name, machine_name, pool_name, timeout + )["test_id"] LOG_JOB.info(test_id) return test_id @@ -341,12 +354,12 @@ def queue_test(self, test_id, target_key, project_name, machine_name, pool_name) :type pool_name: str """ LOG_JOB.info('Queuing a test, test id "%s":', test_id) - cmd = 'queuetest %s %s %s %s %s' % (test_id, target_key, project_name, - machine_name, pool_name) + cmd = f"queuetest {test_id} {target_key} {project_name} {machine_name} {pool_name}" self._session.cmd_output(cmd) - def zip_test_result_logs(self, result_index, test_id, target_key, - project_name, machine_name, pool_name): + def zip_test_result_logs( + self, result_index, test_id, target_key, project_name, machine_name, pool_name + ): """ Zip test result logs. @@ -365,17 +378,19 @@ def zip_test_result_logs(self, result_index, test_id, target_key, :return: Output of command. :rtype: str """ - cmd = 'ziptestresultlogs %s %s %s %s %s %s' % (result_index, test_id, - target_key, project_name, - machine_name, pool_name) - LOG_JOB.info('Zipping the index %s of test result logs of test id "%s":', - result_index, test_id) + cmd = f"ziptestresultlogs {result_index} {test_id} {target_key} {project_name} {machine_name} {pool_name}" + LOG_JOB.info( + 'Zipping the index %s of test result logs of test id "%s":', + result_index, + test_id, + ) output = self._session.cmd_output(cmd) LOG_JOB.info(output) return output - def list_test_results(self, test_id, target_key, project_name, - machine_name, pool_name): + def list_test_results( + self, test_id, target_key, project_name, machine_name, pool_name + ): """ List test results. @@ -392,13 +407,12 @@ def list_test_results(self, test_id, target_key, project_name, :return: Output of command. :rtype: str """ - cmd = 'listtestresults %s %s %s %s %s' % (test_id, target_key, - project_name, machine_name, - pool_name) + cmd = f"listtestresults {test_id} {target_key} {project_name} {machine_name} {pool_name}" return self._session.cmd_output(cmd) - def list_tests_results(self, tests_id, target_key, project_name, - machine_name, pool_name): + def list_tests_results( + self, tests_id, target_key, project_name, machine_name, pool_name + ): """ List tests results. @@ -416,28 +430,44 @@ def list_tests_results(self, tests_id, target_key, project_name, :rtype: str """ results = "" - LOG_JOB.info('Getting tests results:') + LOG_JOB.info("Getting tests results:") host_path = os.path.join(self._test.resultsdir, "hlk_test_result_logs") if not os.path.exists(host_path): os.makedirs(host_path) for test_id in tests_id: - output = self.list_test_results(test_id, target_key, project_name, - machine_name, pool_name) - results_index = re.findall(r'Test result index :\s+(\d+)', output, re.M) + output = self.list_test_results( + test_id, target_key, project_name, machine_name, pool_name + ) + results_index = re.findall(r"Test result index :\s+(\d+)", output, re.M) for result_index in results_index: - o = self.zip_test_result_logs(result_index, test_id, target_key, - project_name, machine_name, pool_name) + o = self.zip_test_result_logs( + result_index, + test_id, + target_key, + project_name, + machine_name, + pool_name, + ) zip_path = o.splitlines()[-1] - LOG_JOB.info('Uploading the test result from %s to %s:', - zip_path, host_path) + LOG_JOB.info( + "Uploading the test result from %s to %s:", zip_path, host_path + ) self._vm.copy_files_from(zip_path, host_path) results = results + output LOG_JOB.info(results) return results - def run_tests(self, tests_id, target_key, project_name, - machine_name, pool_name, timeout=600, step=3): + def run_tests( + self, + tests_id, + target_key, + project_name, + machine_name, + pool_name, + timeout=600, + step=3, + ): """ Run tests. @@ -458,25 +488,28 @@ def run_tests(self, tests_id, target_key, project_name, :raise: HLKRunError, if run timeout or found error messages. """ for test_id in tests_id: - self.queue_test(test_id, target_key, project_name, - machine_name, pool_name) + self.queue_test(test_id, target_key, project_name, machine_name, pool_name) if not utils_misc.wait_for( - lambda: "NotRunning" == self.get_project(project_name)['status'], - timeout, step=step): - raise HLKRunError('Timeout for running tests.') - - resutls = self.list_tests_results(tests_id, target_key, project_name, - machine_name, pool_name) + lambda: "NotRunning" == self.get_project(project_name)["status"], + timeout, + step=step, + ): + raise HLKRunError("Timeout for running tests.") + + resutls = self.list_tests_results( + tests_id, target_key, project_name, machine_name, pool_name + ) err_msg = [] for result in resutls.splitlines(): if "Task error message" in result: err_msg.append(result.strip()) if err_msg: - raise HLKRunError('Found task error messages:%s' % err_msg) + raise HLKRunError(f"Found task error messages:{err_msg}") - def simple_run_test(self, pool_name, project_name, target_name, - tests_name, timeout=14400, step=600): + def simple_run_test( + self, pool_name, project_name, target_name, tests_name, timeout=14400, step=600 + ): """ Simple run test. @@ -494,7 +527,7 @@ def simple_run_test(self, pool_name, project_name, target_name, :type step: int """ default_pool = self.get_default_pool()[0] - machine_name = default_pool['machine_name'] + machine_name = default_pool["machine_name"] self.create_pool(pool_name) self.move_machine_from_default_pool(machine_name, pool_name) self.set_machine_state(machine_name, pool_name, STATE_READY) @@ -506,13 +539,14 @@ def simple_run_test(self, pool_name, project_name, target_name, tests_id = [] for test_name in tests_name: - test_id = self.get_target_test_id(test_name, target_key, - project_name, machine_name, - pool_name) + test_id = self.get_target_test_id( + test_name, target_key, project_name, machine_name, pool_name + ) tests_id.append(test_id) - self.run_tests(tests_id, target_key, project_name, - machine_name, pool_name, timeout, step) + self.run_tests( + tests_id, target_key, project_name, machine_name, pool_name, timeout, step + ) def install_hlk_client(vm_client, vm_server, timeout=1200): @@ -529,9 +563,9 @@ def install_hlk_client(vm_client, vm_server, timeout=1200): client_session = vm_client.wait_for_login(timeout=600) server_session = vm_server.wait_for_login(timeout=600) server_mac = vm_server.virtnet[0].mac - server_ip = utils_net.get_guest_ip_addr(server_session, server_mac, 'windows') - client_session.cmd(r'REG DELETE HKCR\pysFile /f') - inst_cmd = r'\\%s\HLKInstall\Client\Setup.cmd /qn ICFAGREE=Yes' % server_ip + server_ip = utils_net.get_guest_ip_addr(server_session, server_mac, "windows") + client_session.cmd(r"REG DELETE HKCR\pysFile /f") + inst_cmd = rf"\\{server_ip}\HLKInstall\Client\Setup.cmd /qn ICFAGREE=Yes" client_session.cmd(inst_cmd, timeout) @@ -553,37 +587,40 @@ def download_hlk_server_image(params, src_img_uri, timeout=1800): :raise: HLKError, if URI is invalid or not supported. """ if re.search(r'(^http:)|(^https:)|(^ftp")|(^ftps:")', src_img_uri): - src_img_name = src_img_uri.split('/')[-1] + src_img_name = src_img_uri.split("/")[-1] dst_img_path = os.path.join(data_dir.DATA_DIR, "images", src_img_name) dst_img_dir = os.path.dirname(dst_img_path) if not os.path.exists(dst_img_path): - LOG_JOB.info('Checking HLK Server URI %s:', src_img_uri) - curl_check_cmd = 'curl -I -L -k -m 120 %s' % src_img_uri + LOG_JOB.info("Checking HLK Server URI %s:", src_img_uri) + curl_check_cmd = f"curl -I -L -k -m 120 {src_img_uri}" output = process.run(curl_check_cmd).stdout_text - if 'File not found' in output: - raise HLKError('Invalid URI %s.' % src_img_uri) + if "File not found" in output: + raise HLKError(f"Invalid URI {src_img_uri}.") - LOG_JOB.info('Downloading HLK Server from %s to %s/:', - src_img_uri, dst_img_dir) - curl_download_cmd = 'curl -o %s %s' % (dst_img_path, src_img_uri) + LOG_JOB.info( + "Downloading HLK Server from %s to %s/:", src_img_uri, dst_img_dir + ) + curl_download_cmd = f"curl -o {dst_img_path} {src_img_uri}" process.run(curl_download_cmd, timeout) else: - LOG_JOB.info('Found HLK Server image: %s.', dst_img_path) + LOG_JOB.info("Found HLK Server image: %s.", dst_img_path) if archive.is_archive(dst_img_path): LOG_JOB.info("Uncompressing %s :", dst_img_path) img_name = archive.uncompress(dst_img_path, dst_img_dir) dst_img_path = os.path.join(dst_img_dir, img_name) - LOG_JOB.info('The uncompressed destination path: %s', dst_img_path) + LOG_JOB.info("The uncompressed destination path: %s", dst_img_path) qemu_binary = utils_misc.get_qemu_img_binary(params) - info_cmd = "%s info %s --output=json" % (qemu_binary, dst_img_path) + info_cmd = f"{qemu_binary} info {dst_img_path} --output=json" info_dict = json.loads(process.run(info_cmd).stdout_text) - dst_img = {'image_name': info_dict['filename'].split('.')[0], - 'image_size': info_dict['virtual-size'], - 'image_format': info_dict['format']} + dst_img = { + "image_name": info_dict["filename"].split(".")[0], + "image_size": info_dict["virtual-size"], + "image_format": info_dict["format"], + } return dst_img else: - raise HLKError('No supported URI: %s.' % src_img_uri) + raise HLKError(f"No supported URI: {src_img_uri}.") diff --git a/provider/win_wora.py b/provider/win_wora.py index a4337dad2e..a47a6c5fba 100644 --- a/provider/win_wora.py +++ b/provider/win_wora.py @@ -2,13 +2,13 @@ windows workaround functions. """ + import logging import re -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") @error_context.context_aware @@ -20,17 +20,19 @@ def modify_driver(params, session): issue details please refer to: https://support.huawei.com/enterprise/zh/doc/EDOC1100034211/5ba99a60. """ - devcon_path = utils_misc.set_winutils_letter(session, - params["devcon_path"]) + devcon_path = utils_misc.set_winutils_letter(session, params["devcon_path"]) dev_hwid = params["dev_hwid"] - chk_cmd = '%s find %s' % (devcon_path, dev_hwid) - chk_pat = r'ACPI\\ACPI0010.*\: Generic Bus' + chk_cmd = f"{devcon_path} find {dev_hwid}" + chk_pat = r"ACPI\\ACPI0010.*\: Generic Bus" if not re.search(chk_pat, session.cmd(chk_cmd)): - error_context.context("Install 'HID Button over Interrupt Driver' " - "to Generic Bus", LOG_JOB.info) - inst_cmd = '%s install %s %s' % (devcon_path, - params["driver_inf_file"], - dev_hwid) + error_context.context( + "Install 'HID Button over Interrupt Driver' " "to Generic Bus", LOG_JOB.info + ) + inst_cmd = "{} install {} {}".format( + devcon_path, + params["driver_inf_file"], + dev_hwid, + ) if session.cmd_status(inst_cmd, timeout=60): LOG_JOB.error("'HID Button over Interrupt Driver' modify failed") LOG_JOB.info("'HID Button over Interrupt Driver' modify finished") diff --git a/qemu/deps/cdrom/tray_open.py b/qemu/deps/cdrom/tray_open.py index 4bafcb5fcd..f7ab090b73 100644 --- a/qemu/deps/cdrom/tray_open.py +++ b/qemu/deps/cdrom/tray_open.py @@ -3,8 +3,10 @@ if len(sys.argv) > 1: if "linux" in sys.platform: - import CDROM import fcntl + + import CDROM + fd = os.open(sys.argv[1], os.O_RDONLY | os.O_NONBLOCK) if CDROM.CDS_TRAY_OPEN == fcntl.ioctl(fd, CDROM.CDROM_DRIVE_STATUS): @@ -15,9 +17,10 @@ os.close(fd) else: import ctypes - msg = u"open %s: type cdaudio alias d_drive" % sys.argv[1] + + msg = f"open {sys.argv[1]}: type cdaudio alias d_drive" ctypes.windll.WINMM.mciSendStringW(msg, None, 0, None) - msg = u"status d_drive length" + msg = "status d_drive length" if ctypes.windll.WINMM.mciSendStringW(msg, None, 0, None) == 0: print("cdrom is close") else: diff --git a/qemu/deps/performance/start_testpmd.py b/qemu/deps/performance/start_testpmd.py index 7778aa3629..2f1093ab85 100644 --- a/qemu/deps/performance/start_testpmd.py +++ b/qemu/deps/performance/start_testpmd.py @@ -1,13 +1,12 @@ +import locale import logging +import subprocess import sys import time -import locale -import pexpect -import subprocess +import pexpect from six import string_types - nic1_driver = sys.argv[1] nic2_driver = sys.argv[2] whitelist_option = sys.argv[3] @@ -20,26 +19,36 @@ ENCODING = locale.getpreferredencoding() -class TestPMD(object): - +class TestPMD: def __init__(self): - self.proc = None testpmd_cmd = subprocess.check_output( - "rpm -ql dpdk |grep testpmd", shell=True).decode() + "rpm -ql dpdk |grep testpmd", shell=True + ).decode() self.testpmd_exec = testpmd_cmd - def launch(self, nic1_driver, nic2_driver, - whitelist_option, nic1, nic2, cores, queues): - - cmd = ("-l 1,2,3 -n 4 -d %s -d %s" - " %s %s %s %s " - " -- " - " -i --nb-cores=%d " - " --disable-rss --rxd=512 --txd=512 " - " --rxq=%d --txq=%d" % ( - nic1_driver, nic2_driver, whitelist_option, - nic1, whitelist_option, nic2, cores, queues, queues)) + def launch( + self, nic1_driver, nic2_driver, whitelist_option, nic1, nic2, cores, queues + ): + cmd = ( + "-l 1,2,3 -n 4 -d %s -d %s" + " %s %s %s %s " + " -- " + " -i --nb-cores=%d " + " --disable-rss --rxd=512 --txd=512 " + " --rxq=%d --txq=%d" + % ( + nic1_driver, + nic2_driver, + whitelist_option, + nic1, + whitelist_option, + nic2, + cores, + queues, + queues, + ) + ) cmd_str = self.testpmd_exec + cmd logging.info("[cmd] %s", cmd_str) try: @@ -71,25 +80,25 @@ def set_port_stats(self): self.command("show port stats all") def set_portlist(self, portlist): - self.command("set portlist %s" % portlist) + self.command(f"set portlist {portlist}") def get_config_fwd(self): self.command("show config fwd") def set_fwd_mac_retry(self): - self.command('set fwd mac retry') + self.command("set fwd mac retry") def set_vlan_0(self): - self.command('vlan set strip on 0') + self.command("vlan set strip on 0") def set_vlan_1(self): - self.command('vlan set strip on 1') + self.command("vlan set strip on 1") def command(self, cmd): self.proc.sendline(cmd) self.proc.expect("testpmd>") logging.info("testpmd> %s", cmd) - print("testpmd> %s" % cmd) + print(f"testpmd> {cmd}") logging.info(self.proc.before) line_list = to_text(self.proc.before).split("\n") for subline in line_list: @@ -99,9 +108,9 @@ def command(self, cmd): return to_text(self.proc.before) -def start_testpmd(nic1_driver, nic2_driver, whitelist_option, - nic1, nic2, cores, queues): - +def start_testpmd( + nic1_driver, nic2_driver, whitelist_option, nic1, nic2, cores, queues +): my_testpmd = TestPMD() my_testpmd.launch( nic1_driver=nic1_driver, @@ -110,7 +119,8 @@ def start_testpmd(nic1_driver, nic2_driver, whitelist_option, nic1=nic1, nic2=nic2, cores=cores, - queues=queues) + queues=queues, + ) my_testpmd.set_fwd_mac_retry() my_testpmd.set_vlan_0() @@ -124,7 +134,7 @@ def start_testpmd(nic1_driver, nic2_driver, whitelist_option, end_time = start_time + running_time while time.time() < end_time: time.sleep(10) - print("time.time=%s" % time.time) + print(f"time.time={time.time}") my_testpmd.stop() my_testpmd.set_port_stats() my_testpmd.quit() @@ -145,12 +155,8 @@ def to_text(data): if isinstance(data, bytes): return data.decode(ENCODING) elif not isinstance(data, string_types): - if sys.version_info[0] < 3: - return unicode(data) # pylint: disable=E0602 - else: - return str(data) + return str(data) return data -start_testpmd(nic1_driver, nic2_driver, whitelist_option, - nic1, nic2, cores, queues) +start_testpmd(nic1_driver, nic2_driver, whitelist_option, nic1, nic2, cores, queues) diff --git a/qemu/deps/softlockup/heartbeat_slu.py b/qemu/deps/softlockup/heartbeat_slu.py index 0568bb2684..539dd13d0d 100755 --- a/qemu/deps/softlockup/heartbeat_slu.py +++ b/qemu/deps/softlockup/heartbeat_slu.py @@ -2,11 +2,11 @@ Heartbeat server/client to detect soft lockups """ -import socket +import getopt import os +import socket import sys import time -import getopt def daemonize(output_file): @@ -24,14 +24,14 @@ def daemonize(output_file): sys.stderr.flush() if output_file: - output_handle = open(output_file, 'a+') + output_handle = open(output_file, "a+") # autoflush stdout/stderr - sys.stdout = os.fdopen(sys.stdout.fileno(), 'w') - sys.stderr = os.fdopen(sys.stderr.fileno(), 'w') + sys.stdout = os.fdopen(sys.stdout.fileno(), "w") + sys.stderr = os.fdopen(sys.stderr.fileno(), "w") else: - output_handle = open('/dev/null', 'a+') + output_handle = open("/dev/null", "a+") - stdin_handle = open('/dev/null', 'r') + stdin_handle = open("/dev/null", "r") os.dup2(output_handle.fileno(), sys.stdout.fileno()) os.dup2(output_handle.fileno(), sys.stderr.fileno()) os.dup2(stdin_handle.fileno(), sys.stdin.fileno()) @@ -44,7 +44,7 @@ def recv_all(sock): if not data: break total_data.append(data) - return ''.join(total_data) + return "".join(total_data) def run_server(host, port, daemon, path, queue_size, threshold, drift): @@ -58,8 +58,7 @@ def run_server(host, port, daemon, path, queue_size, threshold, drift): c_sock, _ = sock.accept() heartbeat = recv_all(c_sock) local_timestamp = float(time.time()) - drift = check_heartbeat(heartbeat, local_timestamp, threshold, - check_drift) + drift = check_heartbeat(heartbeat, local_timestamp, threshold, check_drift) # NOTE: this doesn't work if the only client is the one that timed # out, but anything more complete would require another thread and # a lock for client_prev_timestamp. @@ -68,9 +67,9 @@ def run_server(host, port, daemon, path, queue_size, threshold, drift): prev_check_timestamp = local_timestamp if verbose: if check_drift: - print("%.2f: %s (%s)" % (local_timestamp, heartbeat, drift)) + print(f"{local_timestamp:.2f}: {heartbeat} ({drift})") else: - print("%.2f: %s" % (local_timestamp, heartbeat)) + print(f"{local_timestamp:.2f}: {heartbeat}") def run_client(host, port, daemon, path, interval): @@ -86,9 +85,8 @@ def run_client(host, port, daemon, path, interval): sock.close() if verbose: print(heartbeat) - except socket.error as message: - print("%.2f: ERROR - %s" - % (float(time.time()), message)) + except OSError as message: + print(f"{float(time.time()):.2f}: ERROR - {message}") seq += 1 time.sleep(interval) @@ -104,8 +102,10 @@ def check_heartbeat(heartbeat, local_timestamp, threshold, check_drift): if hostname in client_prev_timestamp: delta = local_timestamp - client_prev_timestamp[hostname] if delta > threshold: - print("%.2f: ALERT, SLU detected on host %s, delta %ds" % - (float(time.time()), hostname, delta)) + print( + "%.2f: ALERT, SLU detected on host %s, delta %ds" + % (float(time.time()), hostname, delta) + ) client_prev_timestamp[hostname] = local_timestamp @@ -116,7 +116,7 @@ def check_heartbeat(heartbeat, local_timestamp, threshold, check_drift): drift = timestamp - local_timestamp - client_clock_offset[hostname] drift_delta = drift - client_prev_drift[hostname] client_prev_drift[hostname] = drift - return "drift %+4.2f (%+4.2f)" % (drift, drift_delta) + return f"drift {drift:+4.2f} ({drift_delta:+4.2f})" def check_for_timeouts(threshold, check_drift): @@ -126,8 +126,10 @@ def check_for_timeouts(threshold, check_drift): timestamp = client_prev_timestamp[hostname] delta = local_timestamp - timestamp if delta > threshold * 2: - print("%.2f: ALERT, SLU detected on host %s, no heartbeat for %ds" - % (local_timestamp, hostname, delta)) + print( + "%.2f: ALERT, SLU detected on host %s, no heartbeat for %ds" + % (local_timestamp, hostname, delta) + ) del client_prev_timestamp[hostname] if check_drift: del client_clock_offset[hostname] @@ -156,7 +158,7 @@ def usage(): # default param values host_port = 9001 -host_address = '' +host_address = "" interval = 1 # seconds between heartbeats threshold = 10 # seconds late till alert is_server = False @@ -170,12 +172,26 @@ def usage(): # process cmdline opts try: - opts, args = getopt.getopt(sys.argv[1:], "vhsfd:p:a:i:t:", [ - "server", "client", "no-daemon", "address=", "port=", - "file=", "server", "interval=", "threshold=", "verbose", - "check-drift", "help"]) + opts, args = getopt.getopt( + sys.argv[1:], + "vhsfd:p:a:i:t:", + [ + "server", + "client", + "no-daemon", + "address=", + "port=", + "file=", + "server", + "interval=", + "threshold=", + "verbose", + "check-drift", + "help", + ], + ) except getopt.GetoptError as e: - print("error: %s" % str(e)) + print(f"error: {str(e)}") usage() sys.exit(1) @@ -204,15 +220,22 @@ def usage(): usage() sys.exit(0) else: - print("error: unrecognized option: %s" % value) + print(f"error: unrecognized option: {value}") usage() sys.exit(1) # run until we're terminated if is_server: file_server = file_selected or file_server - run_server(host_address, host_port, is_daemon, file_server, queue_size, - threshold, check_drift) + run_server( + host_address, + host_port, + is_daemon, + file_server, + queue_size, + threshold, + check_drift, + ) else: file_client = file_selected or file_client run_client(host_address, host_port, is_daemon, file_client, interval) diff --git a/qemu/deps/spice/build_install.py b/qemu/deps/spice/build_install.py index 1f94614787..18c0136e22 100755 --- a/qemu/deps/spice/build_install.py +++ b/qemu/deps/spice/build_install.py @@ -1,22 +1,29 @@ #!/usr/bin/python -''' +""" Script to build and install packages from git in VMs -''' +""" +import optparse import os -import sys import re -import optparse import subprocess +import sys def run_subprocess_cmd(args): - output = subprocess.Popen(args, shell=False, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - close_fds=True).stdout.read().strip() + output = ( + subprocess.Popen( + args, + shell=False, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=True, + ) + .stdout.read() + .strip() + ) return output @@ -34,9 +41,11 @@ def run_subprocess_cmd(args): git_repo["spice-server"] = "git://anongit.freedesktop.org/spice/spice" # options to pass -autogen_options["spice-gtk"] = "--disable-gtk-doc --disable-werror --disable-vala --enable-smartcard" +autogen_options["spice-gtk"] = ( + "--disable-gtk-doc --disable-werror --disable-vala --enable-smartcard" +) autogen_options["spice-vd-agent"] = "--libdir=/usr/lib64 --sysconfdir=/etc" -autogen_options["xf86-video-qxl"] = "--libdir=\"/usr/lib64\"" +autogen_options["xf86-video-qxl"] = '--libdir="/usr/lib64"' autogen_options["virt-viewer"] = "--with-spice-gtk --disable-update-mimedb" autogen_options["spice-server"] = "--enable-smartcard" prefix_defaults["spice-protocol"] = "/usr/local" @@ -59,22 +68,33 @@ def run_subprocess_cmd(args): # Getting all parameters parser = optparse.OptionParser(usage=usageMsg) -parser.add_option("-p", "--package", dest="pkgName", - help="Name of package to build. Required.") -parser.add_option("-g", "--gitRepo", dest="gitRepo", - help="Repo to download and build package from") -parser.add_option("-b", "--branch", dest="branch", default="master", - help="Branch to checkout and use") -parser.add_option("-d", "--destDir", dest="destDir", - help="Destination Dir to store repo at") -parser.add_option("-c", "--commit", dest="commit", - help="Specific commit to download") -parser.add_option("-l", "--prefix", dest="prefix", - help="Location to store built binaries/libraries") -parser.add_option("-o", "--buildOptions", dest="buildOptions", - help="Options to pass to autogen.sh while building") -parser.add_option("--tarball", dest="tarballLocation", - help="Option to build from tarball. Pass tarball location") +parser.add_option( + "-p", "--package", dest="pkgName", help="Name of package to build. Required." +) +parser.add_option( + "-g", "--gitRepo", dest="gitRepo", help="Repo to download and build package from" +) +parser.add_option( + "-b", "--branch", dest="branch", default="master", help="Branch to checkout and use" +) +parser.add_option( + "-d", "--destDir", dest="destDir", help="Destination Dir to store repo at" +) +parser.add_option("-c", "--commit", dest="commit", help="Specific commit to download") +parser.add_option( + "-l", "--prefix", dest="prefix", help="Location to store built binaries/libraries" +) +parser.add_option( + "-o", + "--buildOptions", + dest="buildOptions", + help="Options to pass to autogen.sh while building", +) +parser.add_option( + "--tarball", + dest="tarballLocation", + help="Option to build from tarball. Pass tarball location", +) (options, args) = parser.parse_args() @@ -98,7 +118,7 @@ def run_subprocess_cmd(args): f = open("/etc/redhat-release", "r") rhelVersion = f.read() -print("OS: %s" % rhelVersion) +print(f"OS: {rhelVersion}") if re.findall("release 6", rhelVersion): if pkgName in ("spice-gtk", "virt-viewer"): autogen_options[pkgName] += " --with-gtk=2.0" @@ -106,7 +126,6 @@ def run_subprocess_cmd(args): autogen_options[pkgName] += " --disable-kms" if not tarballLocation: - # If spice-gtk & not tarball, then disable spice controller if pkgName == "spice-gtk": autogen_options[pkgName] += " --disable-controller" @@ -122,70 +141,75 @@ def run_subprocess_cmd(args): destDir = os.path.join("/tmp", basename) if os.path.exists(destDir): print("Deleting existing destination directory") - subprocess.check_call(("rm -rf %s" % destDir).split()) + subprocess.check_call((f"rm -rf {destDir}").split()) # If destination directory doesn't exist, create it if not os.path.exists(destDir): - print("Creating directory %s for git repo %s" % (destDir, git_repo[pkgName])) + print(f"Creating directory {destDir} for git repo {git_repo[pkgName]}") os.makedirs(destDir) # Switch to the directory os.chdir(destDir) # If git repo already exists, reset. If not, initialize - if os.path.exists('.git'): - print("Resetting previously existing git repo at %s for receiving git repo %s" % (destDir, git_repo[pkgName])) + if os.path.exists(".git"): + print( + f"Resetting previously existing git repo at {destDir} for receiving git repo {git_repo[pkgName]}" + ) subprocess.check_call("git reset --hard".split()) else: - print("Initializing new git repo at %s for receiving git repo %s" % (destDir, git_repo[pkgName])) + print( + f"Initializing new git repo at {destDir} for receiving git repo {git_repo[pkgName]}" + ) subprocess.check_call("git init".split()) # Fetch the contents of the repo - print("Fetching git [REP '%s' BRANCH '%s'] -> %s" % (git_repo[pkgName], branch, destDir)) - subprocess.check_call(("git fetch -q -f -u -t %s %s:%s" % - (git_repo[pkgName], branch, branch)).split()) + print(f"Fetching git [REP '{git_repo[pkgName]}' BRANCH '{branch}'] -> {destDir}") + subprocess.check_call( + (f"git fetch -q -f -u -t {git_repo[pkgName]} {branch}:{branch}").split() + ) # checkout the branch specified, master by default - print("Checking out branch %s" % branch) - subprocess.check_call(("git checkout %s" % branch).split()) + print(f"Checking out branch {branch}") + subprocess.check_call((f"git checkout {branch}").split()) # If a certain commit is specified, checkout that commit if commit is not None: - print("Checking out commit %s" % commit) - subprocess.check_call(("git checkout %s" % commit).split()) + print(f"Checking out commit {commit}") + subprocess.check_call((f"git checkout {commit}").split()) else: print("Specific commit not specified") # Adding remote origin print("Adding remote origin") - args = ("git remote add origin %s" % git_repo[pkgName]).split() + args = (f"git remote add origin {git_repo[pkgName]}").split() output = run_subprocess_cmd(args) # Get the commit and tag which repo is at - args = 'git log --pretty=format:%H -1'.split() + args = "git log --pretty=format:%H -1".split() print("Running 'git log --pretty=format:%H -1' to get top commit") top_commit = run_subprocess_cmd(args) - args = 'git describe'.split() + args = "git describe".split() print("Running 'git describe' to get top tag") top_tag = run_subprocess_cmd(args) if top_tag is None: - top_tag_desc = 'no tag found' + top_tag_desc = "no tag found" else: - top_tag_desc = 'tag %s' % top_tag - print("git commit ID is %s (%s)" % (top_commit, top_tag_desc)) + top_tag_desc = f"tag {top_tag}" + print(f"git commit ID is {top_commit} ({top_tag_desc})") # If tarball is not specified else: tarballName = tarballLocation.split("/")[-1] - args = ('wget -O /tmp/%s %s' % (tarballName, tarballLocation)).split() + args = (f"wget -O /tmp/{tarballName} {tarballLocation}").split() output = run_subprocess_cmd(args) - args = ('tar xf /tmp/%s -C /tmp' % tarballName).split() + args = (f"tar xf /tmp/{tarballName} -C /tmp").split() output = run_subprocess_cmd(args) tarballName = re.sub(".tar.bz2", "", tarballName) - destDir = "/tmp/%s" % tarballName + destDir = f"/tmp/{tarballName}" os.chdir(destDir) # If prefix to be passed to autogen.sh is in the defaults, use that @@ -195,12 +219,15 @@ def run_subprocess_cmd(args): # if no prefix is set, the use default PKG_CONFIG_PATH. If not, set to # prefix's PKG_CONFIG_PATH if prefix is None: - env_vars = ("PKG_CONFIG_PATH=$PKG_CONFIG_PATH:/usr/local/share/pkgconfig:" - "/usr/local/lib:/usr/local/lib/pkgconfig:/usr/local/lib/pkg-config:") + env_vars = ( + "PKG_CONFIG_PATH=$PKG_CONFIG_PATH:/usr/local/share/pkgconfig:" + "/usr/local/lib:/usr/local/lib/pkgconfig:/usr/local/lib/pkg-config:" + ) else: - env_vars = ("PKG_CONFIG_PATH=$PKG_CONFIG_PATH:%s/share/pkgconfig:%s/lib:" - "/usr/local/share/pkgconfig:%s/lib/pkgconfig:%s/lib/pkg-config:" - % (prefix, prefix, prefix, prefix)) + env_vars = ( + f"PKG_CONFIG_PATH=$PKG_CONFIG_PATH:{prefix}/share/pkgconfig:{prefix}/lib:" + f"/usr/local/share/pkgconfig:{prefix}/lib/pkgconfig:{prefix}/lib/pkg-config:" + ) # Running autogen.sh with prefix and any other options # Using os.system because subprocess.Popen would not work @@ -211,37 +238,39 @@ def run_subprocess_cmd(args): if not os.path.exists(cmd): cmd = destDir + "/configure" if not os.path.exists(cmd): - print("%s doesn't exist! Something's wrong!" % cmd) + print(f"{cmd} doesn't exist! Something's wrong!") sys.exit(1) if prefix is not None: - cmd += " --prefix=\"" + prefix + "\"" + cmd += ' --prefix="' + prefix + '"' if pkgName in autogen_options.keys(): cmd += " " + autogen_options[pkgName] -print("Running '%s %s'" % (env_vars, cmd)) +print(f"Running '{env_vars} {cmd}'") ret = os.system(env_vars + " " + cmd) if ret != 0: - print("Return code: %s! Autogen.sh failed! Exiting!" % ret) + print(f"Return code: {ret}! Autogen.sh failed! Exiting!") sys.exit(1) # Temporary workaround for building spice-vdagent if pkgName == "spice-vd-agent": - os.system("sed -i '/^src_spice_vdagent_CFLAGS/ s/$/ -fno-strict-aliasing/g' Makefile.am") + os.system( + "sed -i '/^src_spice_vdagent_CFLAGS/ s/$/ -fno-strict-aliasing/g' Makefile.am" + ) os.system("sed -i '/(PCIACCESS_CFLAGS)/ s/$/ -fno-strict-aliasing/g' Makefile.am") # Running 'make' to build and using os.system again cmd = "make" -print("Running '%s %s'" % (env_vars, cmd)) -ret = os.system("%s %s" % (env_vars, cmd)) +print(f"Running '{env_vars} {cmd}'") +ret = os.system(f"{env_vars} {cmd}") if ret != 0: - print("Return code: %s! make failed! Exiting!" % ret) + print(f"Return code: {ret}! make failed! Exiting!") sys.exit(1) # Running 'make install' to install the built libraries/binaries cmd = "make install" -print("Running '%s %s'" % (env_vars, cmd)) -ret = os.system("%s %s" % (env_vars, cmd)) +print(f"Running '{env_vars} {cmd}'") +ret = os.system(f"{env_vars} {cmd}") if ret != 0: - print("Return code: %s! make install failed! Exiting!" % ret) + print(f"Return code: {ret}! make install failed! Exiting!") sys.exit(ret) diff --git a/qemu/deps/spice/key_event_form.py b/qemu/deps/spice/key_event_form.py index 55433d4eef..e48682e5fa 100644 --- a/qemu/deps/spice/key_event_form.py +++ b/qemu/deps/spice/key_event_form.py @@ -2,9 +2,8 @@ class TestForm(gtk.Window): - def __init__(self): - super(TestForm, self).__init__() + super().__init__() self.set_title("Key test") self.set_size_request(200, 200) @@ -28,7 +27,7 @@ def __init__(self): def on_key_press_event(self, widget, event): # Store caught keycodes into text file input_file = open("/tmp/autotest-rv_input", "a") - input_file.write("{0} ".format(event.keyval)) + input_file.write(f"{event.keyval} ") input_file.close() diff --git a/qemu/deps/win_driver_install/win_driver_install.py b/qemu/deps/win_driver_install/win_driver_install.py index 78224eac94..953856f8cb 100644 --- a/qemu/deps/win_driver_install/win_driver_install.py +++ b/qemu/deps/win_driver_install/win_driver_install.py @@ -1,10 +1,10 @@ +import argparse +import logging import os import re -import sys import shutil -import logging -import argparse import subprocess +import sys logger = logging.getLogger(f"avocado.test.{__name__}") @@ -17,10 +17,11 @@ def cmd_output(cmd): """ logger.debug("Sending command: %s", cmd) try: - p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p = subprocess.Popen( + cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) except Exception as err: - error_msg = (f"Failed to execute cmd {cmd}!\n" - f"Details refers: {err}") + error_msg = f"Failed to execute cmd {cmd}!\n" f"Details refers: {err}" logger.error(error_msg) sys.exit(1) stdoutput = p.stdout.readlines() @@ -33,16 +34,16 @@ def getdpinst(vol_utils): :param vol_utils: Volume of Win_utils. """ - if os.environ.get('PROCESSOR_ARCHITECTURE') == "AMD64": - dpinst_dir = r"%s\dpinst_64.exe" % vol_utils + if os.environ.get("PROCESSOR_ARCHITECTURE") == "AMD64": + dpinst_dir = rf"{vol_utils}\dpinst_64.exe" else: - dpinst_dir = r"%s\dpinst_32.exe" % vol_utils + dpinst_dir = rf"{vol_utils}\dpinst_32.exe" if not os.path.exists(r"C:\dpinst.exe"): shutil.copy(dpinst_dir, r"C:\dpinst.exe") else: logger.debug("dpinst.exe is already existed") if not os.path.exists(r"C:\dpinst.xml"): - dpinst_xml = r"%s\dpinst.xml" % vol_utils + dpinst_xml = rf"{vol_utils}\dpinst.xml" shutil.copy(dpinst_xml, r"C:\dpinst.xml") else: logger.debug("dpinst.xml is already existed") @@ -54,11 +55,11 @@ def certutil(vol_utils): :param vol_utils: Volume of Win_utils. """ - certutil_cmd = r"certutil -addstore -f TrustedPublisher %s\redhat.cer" % vol_utils + certutil_cmd = rf"certutil -addstore -f TrustedPublisher {vol_utils}\redhat.cer" if not os.path.exists(r"C:\certutil.exe"): - shutil.copy(r"%s\certutil.exe" % vol_utils, r"C:\certutil.exe") + shutil.copy(rf"{vol_utils}\certutil.exe", r"C:\certutil.exe") if not os.path.exists(r"C:\certadm.dll"): - shutil.copy(r"%s\certadm.dll" % vol_utils, r"C:\certadm.dll") + shutil.copy(rf"{vol_utils}\certadm.dll", r"C:\certadm.dll") logger.info("Install certificate!") cmd_output(certutil_cmd) @@ -71,7 +72,7 @@ def install_driver(driver_path, driver_name, vol_utils): :param driver_name: Driver name which will be installed. :param vol_utils: Volume of Win_utils. """ - install_driver_cmd = r"C:\dpinst.exe /A /PATH %s /C /LM /Q /F" % driver_path + install_driver_cmd = rf"C:\dpinst.exe /A /PATH {driver_path} /C /LM /Q /F" certutil(vol_utils) logger.info("Install driver %s!", driver_name) cmd_output(install_driver_cmd) @@ -85,12 +86,11 @@ def get_inf_files(driver_path, driver_name): :param driver_name: Driver name which will be installed. :return inf_files: Inf file path. """ - inf_name = ("%s.inf" % driver_name).lower() + inf_name = (f"{driver_name}.inf").lower() inf_files = [] for root, dirs, files in os.walk(driver_path): files_path = map(lambda x: os.path.join(root, x), files) - inf_files += list( - filter(lambda x: x.lower().endswith(inf_name), files_path)) + inf_files += list(filter(lambda x: x.lower().endswith(inf_name), files_path)) return inf_files @@ -121,8 +121,10 @@ def get_current_driver_ver(device_name): :return: Current driver version. """ key = r"\d*\.\d*\.\d*\.\d*" - get_driver_ver_cmd = ("wmic path win32_pnpsigneddriver where" - " Devicename='%s' get driverversion" % device_name) + get_driver_ver_cmd = ( + "wmic path win32_pnpsigneddriver where" + f" Devicename='{device_name}' get driverversion" + ) driver_version = os.popen(get_driver_ver_cmd).read() if not driver_version.strip(): return "" @@ -155,9 +157,11 @@ def verify_driver_ver(driver_path, device_name, driver_name): expected_driver_ver = get_expected_driver_ver(driver_path, driver_name) logger.info("Compare whether driver version is same as expected.") if current_driver_ver != expected_driver_ver: - error_msg = ("Driver installation failed !\n" - "Current driver version %s is not equal" - " to the expected %s." % (current_driver_ver, expected_driver_ver)) + error_msg = ( + "Driver installation failed !\n" + f"Current driver version {current_driver_ver} is not equal" + f" to the expected {expected_driver_ver}." + ) logger.error(error_msg) sys.exit(1) logger.info("Current driver version %s is same as expected.", current_driver_ver) @@ -174,33 +178,57 @@ def show_log_output(result_file): if __name__ == "__main__": - parser = argparse.ArgumentParser( - description='Windows Driver Operation') - parser.add_argument('-i', '--install_driver', - help='operation for install driver', - dest='install_driver', action='store_true') - parser.add_argument('-u', '--uninstall_driver', - help='operation for uninstall driver', - dest='uninstall_driver', action='store_true') - parser.add_argument('-q', '--query_driver', - help='operation for query driver', - dest='query_driver', action='store_true') - parser.add_argument('-v', '--verify_driver', - help='operation for verify driver', - dest='verify_driver', action='store_true') - parser.add_argument('-o', '--log_output', - help='operation for show log output', - dest='log_output', action='store_true') - parser.add_argument('--driver_path', - help='driver path', dest='driver_path', action='store') - parser.add_argument('--driver_name', - help='driver name', dest='driver_name', action='store') - parser.add_argument('--device_name', - help='the corresponding device name with driver', - dest='device_name', action='store') - parser.add_argument('--vol_utils', - help='volume of WIN_UTILS', - dest='vol_utils', action='store') + parser = argparse.ArgumentParser(description="Windows Driver Operation") + parser.add_argument( + "-i", + "--install_driver", + help="operation for install driver", + dest="install_driver", + action="store_true", + ) + parser.add_argument( + "-u", + "--uninstall_driver", + help="operation for uninstall driver", + dest="uninstall_driver", + action="store_true", + ) + parser.add_argument( + "-q", + "--query_driver", + help="operation for query driver", + dest="query_driver", + action="store_true", + ) + parser.add_argument( + "-v", + "--verify_driver", + help="operation for verify driver", + dest="verify_driver", + action="store_true", + ) + parser.add_argument( + "-o", + "--log_output", + help="operation for show log output", + dest="log_output", + action="store_true", + ) + parser.add_argument( + "--driver_path", help="driver path", dest="driver_path", action="store" + ) + parser.add_argument( + "--driver_name", help="driver name", dest="driver_name", action="store" + ) + parser.add_argument( + "--device_name", + help="the corresponding device name with driver", + dest="device_name", + action="store", + ) + parser.add_argument( + "--vol_utils", help="volume of WIN_UTILS", dest="vol_utils", action="store" + ) arguments = parser.parse_args() result_file = r"C:\driver_install.log" @@ -208,7 +236,9 @@ def show_log_output(result_file): logger.setLevel(logging.DEBUG) fh = logging.FileHandler(result_file, mode="a+") fh.setLevel(logging.DEBUG) - formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') + formatter = logging.Formatter( + "%(asctime)s - %(name)s - %(levelname)s - %(message)s" + ) fh.setFormatter(formatter) logger.addHandler(fh) @@ -221,16 +251,17 @@ def show_log_output(result_file): uninstall_driver(arguments.driver_name) elif arguments.install_driver: getdpinst(arguments.vol_utils) - install_driver(arguments.driver_path, arguments.driver_name, - arguments.vol_utils) + install_driver( + arguments.driver_path, arguments.driver_name, arguments.vol_utils + ) elif arguments.query_driver: current_driver_ver = get_current_driver_ver(arguments.device_name) - msg = "Current driver version for %s is %s" % (arguments.driver_name, - current_driver_ver) + msg = f"Current driver version for {arguments.driver_name} is {current_driver_ver}" logger.debug(msg) elif arguments.verify_driver: - verify_driver_ver(arguments.driver_path, arguments.device_name, - arguments.driver_name) + verify_driver_ver( + arguments.driver_path, arguments.device_name, arguments.driver_name + ) elif arguments.log_output: print("Execution log:\n") show_log_output(result_file) diff --git a/qemu/deps/win_serial/VirtIoChannel_guest_recieve.py b/qemu/deps/win_serial/VirtIoChannel_guest_recieve.py index 3ef78d5eb3..e0bbfdcf89 100755 --- a/qemu/deps/win_serial/VirtIoChannel_guest_recieve.py +++ b/qemu/deps/win_serial/VirtIoChannel_guest_recieve.py @@ -9,10 +9,10 @@ # import os -import sys +import platform import socket import struct -import platform +import sys class Message: @@ -44,14 +44,13 @@ class Message: class VirtIoChannel: - # Python on Windows 7 return 'Microsoft' rather than 'Windows' as documented. - is_windows = ((platform.system() == 'Windows') or - (platform.system() == 'Microsoft')) + is_windows = (platform.system() == "Windows") or (platform.system() == "Microsoft") def __init__(self, vport_name): if self.is_windows: from windows_support import WinBufferedReadFile + self._vport = WinBufferedReadFile(vport_name) else: self._vport = os.open(vport_name, os.O_RDWR) @@ -66,7 +65,7 @@ def read(self): cmd = struct.unpack("%ds" % len(rest), rest)[0] return cmd - def write(self, message, arg=''): + def write(self, message, arg=""): if not isinstance(message, int): raise TypeError("1nd arg must be a known message type.") if not isinstance(arg, str): @@ -82,26 +81,28 @@ def _read_header(self): hdr = self._vport.read(READ_HEADER_LEN) else: hdr = os.read(self._vport, (READ_HEADER_LEN)) - if hdr == '': + if hdr == "": return 0 return socket.ntohl(struct.unpack(READ_HEADER, hdr)[2]) - READ_HEADER_LEN def _pack_message(self, message, arg): size = WRITE_HEADER_LEN + len(arg) - stream = struct.pack(WRITE_HEADER + "%ds" % len(arg), - socket.htonl(1), - socket.htonl(3), - socket.htonl(size), - socket.htonl(message), - arg) + stream = struct.pack( + WRITE_HEADER + "%ds" % len(arg), + socket.htonl(1), + socket.htonl(3), + socket.htonl(size), + socket.htonl(message), + arg, + ) return stream def test(path): - if (platform.system() == 'Windows') or (platform.system() == 'Microsoft'): - vport_name = '\\\\.\\Global\\' + path + if (platform.system() == "Windows") or (platform.system() == "Microsoft"): + vport_name = "\\\\.\\Global\\" + path else: - vport_name = '/dev/virtio-ports/' + path + vport_name = "/dev/virtio-ports/" + path vio = VirtIoChannel(vport_name) print(vio.read()) diff --git a/qemu/deps/win_serial/serial-host-send.py b/qemu/deps/win_serial/serial-host-send.py index aee203d9dd..2e27c83481 100755 --- a/qemu/deps/win_serial/serial-host-send.py +++ b/qemu/deps/win_serial/serial-host-send.py @@ -1,8 +1,8 @@ #!/usr/bin/python -import sys import socket import struct +import sys WRITE_HEADER = "III" WRITE_HEADER_LEN = struct.calcsize(WRITE_HEADER) @@ -12,11 +12,13 @@ def pack_message(arg): size = WRITE_HEADER_LEN + len(arg) - stream = struct.pack(WRITE_HEADER + "%ds" % len(arg), - socket.htonl(1), - socket.htonl(3), - socket.htonl(size), - arg) + stream = struct.pack( + WRITE_HEADER + "%ds" % len(arg), + socket.htonl(1), + socket.htonl(3), + socket.htonl(size), + arg, + ) return stream @@ -33,7 +35,7 @@ def main(): vport.connect(sys.argv[1]) data_file = sys.argv[2] - with open(data_file, 'rb') as ff: + with open(data_file, "rb") as ff: arg = ff.read(65535) stream = pack_message(arg) vport.send(stream) diff --git a/qemu/deps/win_serial/windows_support.py b/qemu/deps/win_serial/windows_support.py index 9fb9fc4b49..3c62d623f3 100755 --- a/qemu/deps/win_serial/windows_support.py +++ b/qemu/deps/win_serial/windows_support.py @@ -1,23 +1,24 @@ - -import win32security -import win32file -import win32event -import win32con -import win32api import pywintypes +import win32api +import win32con +import win32event +import win32file +import win32security -class WinBufferedReadFile(object): +class WinBufferedReadFile: verbose = False def __init__(self, filename): - self._hfile = win32file.CreateFile(filename, - win32con.GENERIC_READ | win32con.GENERIC_WRITE, - win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE, - win32security.SECURITY_ATTRIBUTES(), - win32con.OPEN_EXISTING, - win32con.FILE_FLAG_OVERLAPPED, - 0) + self._hfile = win32file.CreateFile( + filename, + win32con.GENERIC_READ | win32con.GENERIC_WRITE, + win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE, + win32security.SECURITY_ATTRIBUTES(), + win32con.OPEN_EXISTING, + win32con.FILE_FLAG_OVERLAPPED, + 0, + ) self._read_ovrlpd = pywintypes.OVERLAPPED() self._read_ovrlpd.hEvent = win32event.CreateEvent(None, True, False, None) self._write_ovrlpd = pywintypes.OVERLAPPED() @@ -42,21 +43,28 @@ def read(self, n): frags = [] aux = 0 if self.verbose: - print("get %s, | bufs = %s [%s]" % (n, self._n, - ','.join(map(lambda x: str(len(x)), - self._bufs)))) + print( + "get {}, | bufs = {} [{}]".format( + n, self._n, ",".join(map(lambda x: str(len(x)), self._bufs)) + ) + ) while aux < n: frags.append(self._bufs.pop(0)) aux += len(frags[-1]) self._n -= n - whole = ''.join(frags) + whole = "".join(frags) ret, rest = whole[:n], whole[n:] if len(rest) > 0: self._bufs.append(rest) if self.verbose: - print("return %s(%s), | bufs = %s [%s]" % (len(ret), n, self._n, - ','.join(map(lambda x: str(len(x)), - self._bufs)))) + print( + "return {}({}), | bufs = {} [{}]".format( + len(ret), + n, + self._n, + ",".join(map(lambda x: str(len(x)), self._bufs)), + ) + ) return ret try: # 4096 is the largest result viosdev will return right now. @@ -66,9 +74,14 @@ def read(self, n): self._bufs.append(b[:nr]) self._n += nr if self.verbose: - print("read %s, err %s | bufs = %s [%s]" % (nr, err, self._n, - ','.join(map(lambda x: str(len(x)), - self._bufs)))) + print( + "read {}, err {} | bufs = {} [{}]".format( + nr, + err, + self._n, + ",".join(map(lambda x: str(len(x)), self._bufs)), + ) + ) except: pass # Never Reached diff --git a/qemu/deps/windows_ga_install/get_package.py b/qemu/deps/windows_ga_install/get_package.py index 04de818048..d1323acf1a 100644 --- a/qemu/deps/windows_ga_install/get_package.py +++ b/qemu/deps/windows_ga_install/get_package.py @@ -1,20 +1,28 @@ #!/bin/env python -import sys import re - -import commands +import sys from argparse import ArgumentParser +import commands -class GuestAgentPkg(object): +class GuestAgentPkg: """ A guest agent package class """ - def __init__(self, build_tag, build_name, url, authtype='', server='', - topdir='', weburl='', topurl=''): + def __init__( + self, + build_tag, + build_name, + url, + authtype="", + server="", + topdir="", + weburl="", + topurl="", + ): self.build_tag = build_tag self.build_name = build_name self.server_url = url @@ -25,25 +33,25 @@ def __init__(self, build_tag, build_name, url, authtype='', server='', self.topurl = topurl def _run_brew_cmd(self, cmd): - brew_cmd = 'brew ' + brew_cmd = "brew " if self.authtype: - brew_cmd += '--authtype=%s ' % self.authtype + brew_cmd += f"--authtype={self.authtype} " if self.server: - brew_cmd += '--server=%s ' % self.server + brew_cmd += f"--server={self.server} " if self.topdir: - brew_cmd += '--topdir=%s ' % self.topdir + brew_cmd += f"--topdir={self.topdir} " if self.weburl: - brew_cmd += '--weburl=%s ' % self.weburl + brew_cmd += f"--weburl={self.weburl} " if self.topurl: - brew_cmd += '--topurl=%s ' % self.topurl + brew_cmd += f"--topurl={self.topurl} " brew_cmd += cmd (status, output) = commands.getstatusoutput(brew_cmd) if status: - raise Exception("the cmd %s didn't run successfully" % brew_cmd) + raise Exception(f"the cmd {brew_cmd} didn't run successfully") return (status, output) def get_latest_build(self): - cmd = 'latest-build %s %s' % (self.build_tag, self.build_name) + cmd = f"latest-build {self.build_tag} {self.build_name}" (status, output) = self._run_brew_cmd(cmd) for line in output.splitlines(): if self.build_name in line: @@ -52,11 +60,11 @@ def get_latest_build(self): def get_build_url(self): build_name = self.get_latest_build() - cmd = 'buildinfo %s | grep msi' % build_name + cmd = f"buildinfo {build_name} | grep msi" (status, output) = self._run_brew_cmd(cmd) url_list = [] for package in output.splitlines(): - url = re.sub(r'/mnt/redhat', self.server_url, package) + url = re.sub(r"/mnt/redhat", self.server_url, package) url_list.append(url) return url_list @@ -68,9 +76,8 @@ def download_package(self): for url in url_list: (status, output) = commands.getstatusoutput(cmd % url) if status: - raise Exception("the download from %s didn't run successfully" - % url) - print("\033[32m %s download successfully\033[0m" % url) + raise Exception(f"the download from {url} didn't run successfully") + print(f"\033[32m {url} download successfully\033[0m") def parse_params(program): @@ -78,25 +85,41 @@ def parse_params(program): parse the params passed to the application """ parser = ArgumentParser(prog=program) - option_list = [('build_tag', "the tag of the build"), - ('build_name', "the name of the build")] - brew_conf_list = [('-s', '--server', "url of XMLRPC server"), - ('-a', '--authtype', "the type of authentication"), - ('-t', '--topdir', "specify topdir"), - ('-w', '--weburl', "url of the Koji web interface"), - ('-T', '--topurl', "url for Koji file access")] + option_list = [ + ("build_tag", "the tag of the build"), + ("build_name", "the name of the build"), + ] + brew_conf_list = [ + ("-s", "--server", "url of XMLRPC server"), + ("-a", "--authtype", "the type of authentication"), + ("-t", "--topdir", "specify topdir"), + ("-w", "--weburl", "url of the Koji web interface"), + ("-T", "--topurl", "url for Koji file access"), + ] for option in option_list: parser.add_argument(dest=option[0], help=option[1]) for brew_conf in brew_conf_list: parser.add_argument(brew_conf[0], brew_conf[1], help=brew_conf[2]) - parser.add_argument('-u', '--url', required=True, dest='download_url', - help='the server url which we can download package') + parser.add_argument( + "-u", + "--url", + required=True, + dest="download_url", + help="the server url which we can download package", + ) return parser.parse_args() -if __name__ == '__main__': +if __name__ == "__main__": args = parse_params(sys.argv[0]) - guestagent = GuestAgentPkg(args.build_tag, args.build_name, - args.download_url, args.authtype, args.server, - args.topdir, args.weburl, args.topurl) + guestagent = GuestAgentPkg( + args.build_tag, + args.build_name, + args.download_url, + args.authtype, + args.server, + args.topdir, + args.weburl, + args.topurl, + ) guestagent.download_package() diff --git a/qemu/tests/9p.py b/qemu/tests/9p.py index b188bd9081..af574cf9f7 100644 --- a/qemu/tests/9p.py +++ b/qemu/tests/9p.py @@ -21,7 +21,7 @@ def run(test, params, env): if mount_dir is None: test.log.info("User Variable for mount dir is not set") else: - session.cmd("mkdir -p %s" % mount_dir) + session.cmd(f"mkdir -p {mount_dir}") mount_option = " trans=virtio" @@ -37,19 +37,19 @@ def run(test, params, env): mount_option += ",posixacl" test.log.info("Mounting 9p mount point with options %s", mount_option) - cmd = "mount -t 9p -o %s autotest_tag %s" % (mount_option, mount_dir) + cmd = f"mount -t 9p -o {mount_option} autotest_tag {mount_dir}" mount_status = session.cmd_status(cmd) - if (mount_status != 0): + if mount_status != 0: test.log.error("mount failed") - test.fail('mount failed.') + test.fail("mount failed.") # Collect test parameters timeout = int(params.get("test_timeout", 14400)) - control_path = os.path.join(test.virtdir, "autotest_control", - params.get("test_control_file")) + control_path = os.path.join( + test.virtdir, "autotest_control", params.get("test_control_file") + ) outputdir = test.outputdir - utils_test.run_autotest(vm, session, control_path, - timeout, outputdir, params) + utils_test.run_autotest(vm, session, control_path, timeout, outputdir, params) diff --git a/qemu/tests/aio_test.py b/qemu/tests/aio_test.py index f8f418280d..582db298cf 100644 --- a/qemu/tests/aio_test.py +++ b/qemu/tests/aio_test.py @@ -5,17 +5,11 @@ import os import re -from avocado import TestCancel -from avocado import TestFail +from avocado import TestCancel, TestFail +from avocado.utils import cpu, path, process +from virttest import data_dir, utils_misc -from avocado.utils import cpu -from avocado.utils import path -from avocado.utils import process - -from virttest import data_dir -from virttest import utils_misc - -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def which(cmd): @@ -29,11 +23,13 @@ def which(cmd): def coroutine(func): """Start coroutine.""" + @functools.wraps(func) def start(*args, **kargs): cr = func(*args, **kargs) cr.send(None) return cr + return start @@ -52,7 +48,7 @@ def get_qemu_version(params, target): """Get installed QEMU version.""" LOG_JOB.debug("check QEMU version") qemu_binary = utils_misc.get_qemu_binary(params) - cmd = "%s --version" % qemu_binary + cmd = f"{qemu_binary} --version" line = process.run(cmd).stdout_text.splitlines()[0] version = line.split()[-1].strip("()") LOG_JOB.debug("QEMU version: %s", version) @@ -64,14 +60,13 @@ def brew_download_build(target): """Download source rpm.""" while True: version = yield - filename = "%s.src.rpm" % version + filename = f"{version}.src.rpm" root_dir = data_dir.get_data_dir() save_path = os.path.join(root_dir, filename) LOG_JOB.debug("download source rpm to %s", save_path) if not os.path.isfile(save_path): with chcwd(root_dir): - cmd = "brew download-build -q --rpm {filename}".format( - filename=filename) + cmd = f"brew download-build -q --rpm {filename}" process.run(cmd) target.send(save_path) @@ -82,10 +77,10 @@ def unpack_source(target): while True: path = yield LOG_JOB.debug("unpack source rpm") - process.run("rpm -ivhf {path}".format(path=path)) + process.run(f"rpm -ivhf {path}") process.run("rpmbuild -bp /root/rpmbuild/SPECS/qemu-kvm.spec --nodeps") version = re.search(r"\d+.\d+.\d+", path).group() - src_path = glob.glob("/root/rpmbuild/BUILD/qemu*%s" % version)[0] + src_path = glob.glob(f"/root/rpmbuild/BUILD/qemu*{version}")[0] target.send(src_path) @@ -99,8 +94,7 @@ def run_aio_tests(target): process.run("./configure") cpu_count = cpu.online_count() aio_path = "tests/test-aio" - make_cmd = "make {aio_path} -j{cpu_count}".format( - aio_path=aio_path, cpu_count=cpu_count) + make_cmd = f"make {aio_path} -j{cpu_count}" process.run(make_cmd) LOG_JOB.debug("run aio tests") result = process.run(aio_path) @@ -137,8 +131,6 @@ def run(test, params, env): # check if command brew and rpmbuild is presented which("brew") which("rpmbuild") - get_qemu_version(params, - brew_download_build( - unpack_source( - run_aio_tests( - parse_result())))) + get_qemu_version( + params, brew_download_build(unpack_source(run_aio_tests(parse_result()))) + ) diff --git a/qemu/tests/ansible_test.py b/qemu/tests/ansible_test.py index 8adab247da..fee131cecc 100644 --- a/qemu/tests/ansible_test.py +++ b/qemu/tests/ansible_test.py @@ -1,10 +1,8 @@ -import os import json +import os from avocado.utils import process - -from virttest import env_process -from virttest import error_context +from virttest import env_process, error_context from provider import ansible @@ -38,13 +36,14 @@ def run(test, params, env): custom_extra_vars = params.objects("custom_extra_vars") playbook_repo = params["playbook_repo"] playbook_timeout = params.get_numeric("playbook_timeout") - playbook_dir = params.get("playbook_dir", - os.path.join(test.workdir, "ansible_playbook")) + playbook_dir = params.get( + "playbook_dir", os.path.join(test.workdir, "ansible_playbook") + ) toplevel_playbook = os.path.join(playbook_dir, params["toplevel_playbook"]) # Use this directory to copy some logs back from the guest test_harness_log_dir = test.logdir - params['start_vm'] = 'yes' + params["start_vm"] = "yes" env_process.preprocess(test, params, env) vms = env.get_all_vms() guest_ip_list = [] @@ -54,14 +53,14 @@ def run(test, params, env): guest_ip_list.append(vm.get_address()) test.log.info("Cloning %s", playbook_repo) - process.run("git clone {src} {dst}".format(src=playbook_repo, - dst=playbook_dir), verbose=False) + process.run(f"git clone {playbook_repo} {playbook_dir}", verbose=False) - error_context.base_context("Generate playbook related options.", - test.log.info) - extra_vars = {"ansible_ssh_extra_args": ansible_ssh_extra_args, - "ansible_ssh_pass": guest_passwd, - "test_harness_log_dir": test_harness_log_dir} + error_context.base_context("Generate playbook related options.", test.log.info) + extra_vars = { + "ansible_ssh_extra_args": ansible_ssh_extra_args, + "ansible_ssh_pass": guest_passwd, + "test_harness_log_dir": test_harness_log_dir, + } extra_vars.update(json.loads(ansible_extra_vars)) custom_params = params.object_params("extra_vars") for cev in custom_extra_vars: @@ -74,7 +73,7 @@ def run(test, params, env): remote_user=guest_user, extra_vars=json.dumps(extra_vars), callback_plugin=ansible_callback_plugin, - addl_opts=ansible_addl_opts + addl_opts=ansible_addl_opts, ) ansible_log = "ansible_playbook.log" @@ -84,8 +83,10 @@ def run(test, params, env): test.error(str(err)) else: if playbook_executor.get_status() != 0: - test.fail("Ansible playbook execution failed, please check the {} " - "for details.".format(ansible_log)) + test.fail( + f"Ansible playbook execution failed, please check the {ansible_log} " + "for details." + ) test.log.info("Ansible playbook execution passed.") finally: playbook_executor.store_playbook_log(test_harness_log_dir, ansible_log) diff --git a/qemu/tests/ansible_with_responsive_migration.py b/qemu/tests/ansible_with_responsive_migration.py index 0f7e1b63bc..c88cd39e02 100644 --- a/qemu/tests/ansible_with_responsive_migration.py +++ b/qemu/tests/ansible_with_responsive_migration.py @@ -1,15 +1,12 @@ -import os import json +import os from avocado.utils import process from avocado.utils.network.ports import find_free_port - -from virttest import env_process -from virttest import error_context +from virttest import env_process, error_context from virttest.virt_vm import VMMigrateFailedError -from provider import ansible -from provider import message_queuing +from provider import ansible, message_queuing @error_context.context_aware @@ -41,8 +38,9 @@ def run(test, params, env): custom_extra_vars = params.objects("custom_extra_vars") playbook_repo = params["playbook_repo"] playbook_timeout = params.get_numeric("playbook_timeout") - playbook_dir = params.get("playbook_dir", - os.path.join(test.workdir, "ansible_playbook")) + playbook_dir = params.get( + "playbook_dir", os.path.join(test.workdir, "ansible_playbook") + ) toplevel_playbook = os.path.join(playbook_dir, params["toplevel_playbook"]) # Use this directory to copy some logs back from the guest test_harness_log_dir = test.logdir @@ -51,7 +49,7 @@ def run(test, params, env): mq_listen_port = params.get_numeric("mq_listen_port", find_free_port()) wait_response_timeout = params.get_numeric("wait_response_timeout", 600) - params['start_vm'] = 'yes' + params["start_vm"] = "yes" env_process.preprocess(test, params, env) vms = env.get_all_vms() guest_ip_list = [] @@ -61,15 +59,15 @@ def run(test, params, env): guest_ip_list.append(vm.get_address()) test.log.info("Cloning %s", playbook_repo) - process.run("git clone {src} {dst}".format(src=playbook_repo, - dst=playbook_dir), verbose=False) + process.run(f"git clone {playbook_repo} {playbook_dir}", verbose=False) - error_context.base_context("Generate playbook related options.", - test.log.info) - extra_vars = {"ansible_ssh_extra_args": ansible_ssh_extra_args, - "ansible_ssh_pass": guest_passwd, - "mq_port": mq_listen_port, - "test_harness_log_dir": test_harness_log_dir} + error_context.base_context("Generate playbook related options.", test.log.info) + extra_vars = { + "ansible_ssh_extra_args": ansible_ssh_extra_args, + "ansible_ssh_pass": guest_passwd, + "mq_port": mq_listen_port, + "test_harness_log_dir": test_harness_log_dir, + } extra_vars.update(json.loads(ansible_extra_vars)) custom_params = params.object_params("extra_vars") for cev in custom_extra_vars: @@ -82,13 +80,15 @@ def run(test, params, env): remote_user=guest_user, extra_vars=json.dumps(extra_vars), callback_plugin=ansible_callback_plugin, - addl_opts=ansible_addl_opts + addl_opts=ansible_addl_opts, ) mq_publisher = message_queuing.MQPublisher(mq_listen_port) try: - error_context.base_context('Confirm remote subscriber has accessed to ' - 'activate migrating guests.', test.log.info) + error_context.base_context( + "Confirm remote subscriber has accessed to " "activate migrating guests.", + test.log.info, + ) try: mq_publisher.confirm_access(wait_response_timeout) except message_queuing.MessageNotFoundError as err: @@ -96,18 +96,25 @@ def run(test, params, env): test.fail("Failed to capture the 'ACCESS' message.") test.log.info("Already captured the 'ACCESS' message.") - error_context.context("Migrate guests after subscriber accessed.", - test.log.info) + error_context.context( + "Migrate guests after subscriber accessed.", test.log.info + ) for vm in vms: vm.migrate() except VMMigrateFailedError: - error_context.context("Send the 'ALERT' message to notify the remote " - "subscriber to stop the test.", test.log.info) + error_context.context( + "Send the 'ALERT' message to notify the remote " + "subscriber to stop the test.", + test.log.info, + ) mq_publisher.alert() raise else: - error_context.context("Send the 'APPROVE' message to notify the remote " - "subscriber to continue the test.", test.log.info) + error_context.context( + "Send the 'APPROVE' message to notify the remote " + "subscriber to continue the test.", + test.log.info, + ) mq_publisher.approve() finally: ansible_log = "ansible_playbook.log" @@ -117,11 +124,12 @@ def run(test, params, env): test.error(str(err)) else: if playbook_executor.get_status() != 0: - test.fail("Ansible playbook execution failed, please check the " - "{} for details.".format(ansible_log)) + test.fail( + "Ansible playbook execution failed, please check the " + f"{ansible_log} for details." + ) test.log.info("Ansible playbook execution passed.") finally: - playbook_executor.store_playbook_log(test_harness_log_dir, - ansible_log) + playbook_executor.store_playbook_log(test_harness_log_dir, ansible_log) playbook_executor.close() mq_publisher.close() diff --git a/qemu/tests/apicv_test.py b/qemu/tests/apicv_test.py index a31481c7e2..b30104edda 100644 --- a/qemu/tests/apicv_test.py +++ b/qemu/tests/apicv_test.py @@ -1,13 +1,9 @@ -import re import os +import re import time from avocado.utils import process -from virttest import error_context -from virttest import env_process -from virttest import data_dir -from virttest import utils_net -from virttest import utils_netperf +from virttest import data_dir, env_process, error_context, utils_net, utils_netperf @error_context.context_aware @@ -26,12 +22,13 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def reload_module(value): """ Reload module """ - process.system("rmmod %s" % module) - cmd = "modprobe %s %s=%s" % (module, mod_param, value) + process.system(f"rmmod {module}") + cmd = f"modprobe {module} {mod_param}={value}" process.system(cmd) def run_netperf(): @@ -43,24 +40,24 @@ def run_netperf(): n_client.session = session throughput = 0 for i in range(repeat_times): - output = n_client.start(server_address=host_ip, - test_option=params.get("test_option")) - throughput += float(re.findall(r"580\s+\d+\.?\d+\s+(\d+\.?\d+)", - output)[0]) + output = n_client.start( + server_address=host_ip, test_option=params.get("test_option") + ) + throughput += float(re.findall(r"580\s+\d+\.?\d+\s+(\d+\.?\d+)", output)[0]) time.sleep(1) n_server.stop() - return throughput/repeat_times + return throughput / repeat_times module = params["module_name"] mod_param = params["mod_param"] error_context.context("Enable apicv on host", test.log.info) - cmd = "cat /sys/module/%s/parameters/%s" % (module, mod_param) + cmd = f"cat /sys/module/{module}/parameters/{mod_param}" ori_apicv = process.getoutput(cmd) - if ori_apicv != 'Y': + if ori_apicv != "Y": reload_module("Y") params["start_vm"] = "yes" - env_process.preprocess_vm(test, params, env, params['main_vm']) + env_process.preprocess_vm(test, params, env, params["main_vm"]) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login() @@ -69,21 +66,25 @@ def run_netperf(): n_server = utils_netperf.NetperfServer( address=host_ip, netperf_path=params["server_path"], - netperf_source=os.path.join(data_dir.get_deps_dir("netperf"), - params.get("netperf_server_link")), + netperf_source=os.path.join( + data_dir.get_deps_dir("netperf"), params.get("netperf_server_link") + ), username=params.get("host_username", "root"), - password=params.get("host_password")) + password=params.get("host_password"), + ) n_client = utils_netperf.NetperfClient( address=vm.get_address(), netperf_path=params["client_path"], - netperf_source=os.path.join(data_dir.get_deps_dir("netperf"), - params.get("netperf_client_link")), + netperf_source=os.path.join( + data_dir.get_deps_dir("netperf"), params.get("netperf_client_link") + ), client=params.get("shell_client", "ssh"), port=params.get("shell_port"), username=params.get("username"), password=params.get("password"), - prompt=params.get("shell_prompt", r"^root@.*[\#\$]\s*$|#")) + prompt=params.get("shell_prompt", r"^root@.*[\#\$]\s*$|#"), + ) repeat_times = params.get_numeric("repeat_times", 10) try: @@ -98,7 +99,7 @@ def run_netperf(): value_off = run_netperf() test.log.info("When disable apicv, average throughput is %s", value_off) threshold = float(params.get("threshold", 0.9)) - if value_on <= value_off*threshold: + if value_on <= value_off * threshold: test.fail("Throughput is smaller when apicv is on than off") finally: n_server.cleanup(True) diff --git a/qemu/tests/arm_cpu_test_clusters.py b/qemu/tests/arm_cpu_test_clusters.py index 0c2aeb7f01..69f312d559 100644 --- a/qemu/tests/arm_cpu_test_clusters.py +++ b/qemu/tests/arm_cpu_test_clusters.py @@ -17,25 +17,29 @@ def run(test, params, env): """ vm = env.get_vm(params["main_vm"]) vcpu_clusters_list = [2, 4] - params['vcpu_clusters'] = random.choice(vcpu_clusters_list) - params['start_vm'] = 'yes' + params["vcpu_clusters"] = random.choice(vcpu_clusters_list) + params["start_vm"] = "yes" vm.create(params=params) vm.verify_alive() session = vm.wait_for_login() - check_cluster_id = params['check_cluster_id'] - check_cluster_cpus_list = params['check_cluster_cpus_list'] + check_cluster_id = params["check_cluster_id"] + check_cluster_cpus_list = params["check_cluster_cpus_list"] vcpu_sockets = vm.cpuinfo.sockets vcpu_clusters = vm.cpuinfo.clusters clusters_id = session.cmd_output(check_cluster_id).strip().splitlines() - clusters_cpus_list = session.cmd_output( - check_cluster_cpus_list).strip().splitlines() + clusters_cpus_list = ( + session.cmd_output(check_cluster_cpus_list).strip().splitlines() + ) if len(clusters_id) != int(vcpu_clusters): - test.fail("cluster_id is not right: %d != %d" - % (len(clusters_id), int(vcpu_clusters))) - if len(clusters_cpus_list) != int(vcpu_sockets)*int(vcpu_clusters): - test.fail("cluster_cpus_list is not right: %d != %d" - % (len(clusters_cpus_list), int(vcpu_sockets)*int(vcpu_clusters))) + test.fail( + "cluster_id is not right: %d != %d" % (len(clusters_id), int(vcpu_clusters)) + ) + if len(clusters_cpus_list) != int(vcpu_sockets) * int(vcpu_clusters): + test.fail( + "cluster_cpus_list is not right: %d != %d" + % (len(clusters_cpus_list), int(vcpu_sockets) * int(vcpu_clusters)) + ) vm.verify_kernel_crash() session.close() diff --git a/qemu/tests/arm_gic.py b/qemu/tests/arm_gic.py index 930bb71258..e949a46ea5 100644 --- a/qemu/tests/arm_gic.py +++ b/qemu/tests/arm_gic.py @@ -1,5 +1,4 @@ from avocado.utils import process - from virttest import error_context @@ -16,19 +15,22 @@ def run(test, params, env): :param env: Dictionary with test environment. :type env: virttest.utils_env.Env """ - gic_version = params['gic_version'] - irq_cmd = params['irq_cmd'] - gic_version = (gic_version if gic_version != 'host' else - process.getoutput(irq_cmd).strip()) + gic_version = params["gic_version"] + irq_cmd = params["irq_cmd"] + gic_version = ( + gic_version if gic_version != "host" else process.getoutput(irq_cmd).strip() + ) - vm = env.get_vm(params['main_vm']) + vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login() - error_context.context('Get GIC version in the guest', test.log.info) + error_context.context("Get GIC version in the guest", test.log.info) guest_gic_version = session.cmd_output(irq_cmd).strip() - test.log.info(f'Guest GIC version: {guest_gic_version}') + test.log.info("Guest GIC version: %s", guest_gic_version) if guest_gic_version != gic_version: - test.fail(f'GIC version mismatch, expected version is "{gic_version}" ' - f'but the guest GIC version is "{guest_gic_version}"') - test.log.info('GIC version match') + test.fail( + f'GIC version mismatch, expected version is "{gic_version}" ' + f'but the guest GIC version is "{guest_gic_version}"' + ) + test.log.info("GIC version match") diff --git a/qemu/tests/audio.py b/qemu/tests/audio.py index a9c3366ebb..bdaea0ed1a 100644 --- a/qemu/tests/audio.py +++ b/qemu/tests/audio.py @@ -20,7 +20,8 @@ def run(test, params, env): audio_device = params.get("audio_device") error_context.context("Verifying whether /dev/dsp is present") - session.cmd("test -c %s" % audio_device) + session.cmd(f"test -c {audio_device}") error_context.context("Trying to write to the device") - session.cmd("dd if=/dev/urandom of=%s bs=%s count=1" % - (audio_device, random_content_size)) + session.cmd( + f"dd if=/dev/urandom of={audio_device} bs={random_content_size} count=1" + ) diff --git a/qemu/tests/avic_test.py b/qemu/tests/avic_test.py index 911d3f333e..2bad131bb9 100644 --- a/qemu/tests/avic_test.py +++ b/qemu/tests/avic_test.py @@ -1,4 +1,5 @@ from virttest import env_process + from provider.cpu_utils import check_cpu_flags diff --git a/qemu/tests/balloon_boot_in_pause.py b/qemu/tests/balloon_boot_in_pause.py index e4dd12deca..cfe3dfe10c 100644 --- a/qemu/tests/balloon_boot_in_pause.py +++ b/qemu/tests/balloon_boot_in_pause.py @@ -1,23 +1,20 @@ import random from avocado.core import exceptions - -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc from provider import win_driver_utils from qemu.tests.balloon_check import BallooningTest class BallooningTestPause(BallooningTest): - """ Basic functions of memory ballooning test for guest booted in paused status """ def __init__(self, test, params, env): - super(BallooningTestPause, self).__init__(test, params, env) + super().__init__(test, params, env) self.vm = env.get_vm(params["main_vm"]) # ori_mem is the original memory @@ -41,21 +38,21 @@ def memory_check(self, step, changed_mem): :return: memory size get from monitor and guest :rtype: tuple """ - error_context.context("Check memory status %s" % step, self.test.log.info) + error_context.context(f"Check memory status {step}", self.test.log.info) mmem = self.get_ballooned_memory() gmem = self.get_memory_status() if self.pre_gmem: # for rhel guest, the gmem is total memory in guest; # for windows guest, the gmem is used memory in guest. - if self.params['os_type'] == 'windows': + if self.params["os_type"] == "windows": guest_ballooned_mem = self.pre_gmem - gmem else: guest_ballooned_mem = gmem - self.pre_gmem - if (mmem - self.pre_mem) != changed_mem or (self.pre_gmem and abs( - guest_ballooned_mem - changed_mem) > 100): # pylint: disable=E0606 - self.error_report(step, self.pre_mem + changed_mem, - mmem, gmem) - self.test.fail("Balloon test failed %s" % step) + if (mmem - self.pre_mem) != changed_mem or ( + self.pre_gmem and abs(guest_ballooned_mem - changed_mem) > 100 + ): # pylint: disable=E0606 + self.error_report(step, self.pre_mem + changed_mem, mmem, gmem) + self.test.fail(f"Balloon test failed {step}") return (mmem, gmem) @error_context.context_aware @@ -66,11 +63,11 @@ def balloon_memory(self, new_mem): :param new_mem: New desired memory. :type new_mem: int """ - error_context.context("Change VM memory to %s" % new_mem, self.test.log.info) + error_context.context(f"Change VM memory to {new_mem}", self.test.log.info) try: self.vm.balloon(new_mem) except Exception as e: - if self.vm.monitor.verify_status('paused'): + if self.vm.monitor.verify_status("paused"): # Make sure memory not changed before the guest resumed if self.get_ballooned_memory() != self.pre_mem: self.test.fail("Memory changed before guest resumed") @@ -80,15 +77,16 @@ def balloon_memory(self, new_mem): elif new_mem == self.get_ballooned_memory(): pass else: - self.test.fail("Balloon memory fail with error message:%s" % e) + self.test.fail(f"Balloon memory fail with error message:{e}") compare_mem = new_mem balloon_timeout = float(self.params.get("balloon_timeout", 240)) - status = utils_misc.wait_for((lambda: compare_mem == - self.get_ballooned_memory()), - balloon_timeout) + status = utils_misc.wait_for( + (lambda: compare_mem == self.get_ballooned_memory()), balloon_timeout + ) if status is None: - self.test.fail("Failed to balloon memory to expect value during " - "%ss" % balloon_timeout) + self.test.fail( + "Failed to balloon memory to expect value during " f"{balloon_timeout}s" + ) def get_memory_boundary(self): """ @@ -116,8 +114,7 @@ def error_report(self, step, expect_value, monitor_value, guest_value): """ self.test.log.error("Memory size mismatch %s:\n", step) error_msg = "Wanted to be changed: %s\n" % (expect_value - self.pre_mem) - error_msg += "Changed in monitor: %s\n" % (monitor_value - - self.pre_mem) + error_msg += "Changed in monitor: %s\n" % (monitor_value - self.pre_mem) if self.pre_gmem: error_msg += "Changed in guest: %s\n" % (guest_value - self.pre_gmem) self.test.log.error(error_msg) @@ -139,7 +136,6 @@ def get_memory_status(self): class BallooningTestPauseLinux(BallooningTestPause): - """ Linux memory ballooning test for guest booted in paused status """ @@ -176,6 +172,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _memory_check_after_sub_test(): """ Check memory status after subtest, the changed_mem is 0 @@ -185,56 +182,59 @@ def _memory_check_after_sub_test(): except exceptions.TestFail: return None - if params['os_type'] == 'windows': + if params["os_type"] == "windows": balloon_test = BallooningTestPauseWin(test, params, env) else: balloon_test = BallooningTestPauseLinux(test, params, env) min_sz, max_sz = balloon_test.get_memory_boundary() - for tag in params.objects('test_tags'): + for tag in params.objects("test_tags"): vm = env.get_vm(params["main_vm"]) - if vm.monitor.verify_status('paused'): - error_context.context("Running balloon %s test when" - " the guest in paused status" % tag, - test.log.info) + if vm.monitor.verify_status("paused"): + error_context.context( + f"Running balloon {tag} test when" " the guest in paused status", + test.log.info, + ) else: - error_context.context("Running balloon %s test after" - " the guest turned to running status" % tag, - test.log.info) + error_context.context( + f"Running balloon {tag} test after" + " the guest turned to running status", + test.log.info, + ) params_tag = params.object_params(tag) - balloon_type = params_tag['balloon_type'] - if balloon_type == 'evict': + balloon_type = params_tag["balloon_type"] + if balloon_type == "evict": expect_mem = int(random.uniform(min_sz, balloon_test.pre_mem)) else: expect_mem = int(random.uniform(balloon_test.pre_mem, max_sz)) balloon_test.balloon_memory(expect_mem) changed_memory = expect_mem - balloon_test.pre_mem - mmem, gmem = balloon_test.memory_check("after %s memory" % tag, - changed_memory) + mmem, gmem = balloon_test.memory_check(f"after {tag} memory", changed_memory) balloon_test.pre_mem = mmem balloon_test.pre_gmem = gmem subtest = params.get("sub_test_after_balloon") if subtest: - error_context.context("Running subtest after guest balloon test", - test.log.info) - qemu_should_quit = balloon_test.run_balloon_sub_test(test, params, - env, subtest) + error_context.context("Running subtest after guest balloon test", test.log.info) + qemu_should_quit = balloon_test.run_balloon_sub_test(test, params, env, subtest) if qemu_should_quit == 1: return sleep_before_check = int(params.get("sleep_before_check", 0)) timeout = int(params.get("balloon_timeout", 100)) + sleep_before_check - msg = "Wait memory balloon back after %s " % subtest - output = utils_misc.wait_for(_memory_check_after_sub_test, timeout, - sleep_before_check, 5, msg) + msg = f"Wait memory balloon back after {subtest} " + output = utils_misc.wait_for( + _memory_check_after_sub_test, timeout, sleep_before_check, 5, msg + ) if output is None: - test.fail("Check memory status failed after subtest " - "after %s seconds" % timeout) + test.fail( + "Check memory status failed after subtest " f"after {timeout} seconds" + ) - error_context.context("Reset guest memory to original one after all the " - "test", test.log.info) + error_context.context( + "Reset guest memory to original one after all the " "test", test.log.info + ) balloon_test.reset_memory() # for windows guest, disable/uninstall driver to get memory leak based on # driver verifier is enabled diff --git a/qemu/tests/balloon_check.py b/qemu/tests/balloon_check.py index 37d940a92f..74f20c32cd 100644 --- a/qemu/tests/balloon_check.py +++ b/qemu/tests/balloon_check.py @@ -1,38 +1,35 @@ -import time -import re import random +import re +import time from avocado.core import exceptions from avocado.utils import process - -from virttest import qemu_monitor -from virttest import utils_test -from virttest import utils_misc -from virttest import error_context +from virttest import error_context, qemu_monitor, utils_misc, utils_test from virttest.utils_numeric import normalize_data_size from virttest.utils_test.qemu import MemoryBaseTest + from provider import win_driver_utils class BallooningTest(MemoryBaseTest): - """ Provide basic functions for memory ballooning test cases """ def __init__(self, test, params, env): self.test_round = 0 - super(BallooningTest, self).__init__(test, params, env) + super().__init__(test, params, env) self.vm = env.get_vm(params["main_vm"]) if params.get("paused_after_start_vm") != "yes": self.params["balloon_test_setup_ready"] = False - if self.params.get('os_type') == 'windows': + if self.params.get("os_type") == "windows": sleep_time = 180 else: sleep_time = 90 - self.test.log.info("Waiting %d seconds for guest's " - "applications up", sleep_time) + self.test.log.info( + "Waiting %d seconds for guest's " "applications up", sleep_time + ) time.sleep(sleep_time) self.params["balloon_test_setup_ready"] = True # ori_mem/gmem is original memory @@ -56,10 +53,11 @@ def get_ballooned_memory(self): output = self.vm.monitor.info("balloon") ballooned_mem = int(re.findall(r"\d+", str(output))[0]) if self.vm.monitor.protocol == "qmp": - ballooned_mem = ballooned_mem / (1024 ** 2) + ballooned_mem = ballooned_mem / (1024**2) else: - self.test.log.info('could not get balloon_memory, cause ' - 'vm.monitor is None') + self.test.log.info( + "could not get balloon_memory, cause " "vm.monitor is None" + ) return 0 except qemu_monitor.MonitorError as emsg: self.test.log.error(emsg) @@ -78,15 +76,15 @@ def memory_check(self, step, ballooned_mem): :return: memory size get from monitor and guest :rtype: tuple """ - error_context.context("Check memory status %s" % step, self.test.log.info) + error_context.context(f"Check memory status {step}", self.test.log.info) mmem = self.get_ballooned_memory() gmem = self.get_memory_status() - gcompare_threshold = int(self.params.get("guest_compare_threshold", - 100)) + gcompare_threshold = int(self.params.get("guest_compare_threshold", 100)) guest_mem_ratio = self.params.get("guest_mem_ratio") if guest_mem_ratio: - gcompare_threshold = max(gcompare_threshold, - float(guest_mem_ratio) * self.pre_mem) + gcompare_threshold = max( + gcompare_threshold, float(guest_mem_ratio) * self.pre_mem + ) # if set windows guest balloon in (1,100),free # memory of the OS should be as small as possible. if self.pre_mem + ballooned_mem <= 100: @@ -94,23 +92,25 @@ def memory_check(self, step, ballooned_mem): session = self.vm.wait_for_login(timeout=timeout) try: if self.get_win_mon_free_mem(session) > gcompare_threshold: - self.test.fail("Balloon_min test failed %s" % step) + self.test.fail(f"Balloon_min test failed {step}") finally: session.close() else: # for rhel guest, the gmem is total memory in guest; # for windows guest or balloon_opt_deflate_on_oom condition, the gmem is # used memory in guest. - if self.params['os_type'] == 'windows' or \ - self.params.get("balloon_opt_deflate_on_oom") == "yes": + if ( + self.params["os_type"] == "windows" + or self.params.get("balloon_opt_deflate_on_oom") == "yes" + ): guest_ballooned_mem = self.pre_gmem - gmem else: guest_ballooned_mem = gmem - self.pre_gmem - if (mmem - self.pre_mem != ballooned_mem or - (abs(guest_ballooned_mem - ballooned_mem) > gcompare_threshold)): - self.error_report(step, self.pre_mem + ballooned_mem, mmem, - gmem) - raise exceptions.TestFail("Balloon test failed %s" % step) + if mmem - self.pre_mem != ballooned_mem or ( + abs(guest_ballooned_mem - ballooned_mem) > gcompare_threshold + ): + self.error_report(step, self.pre_mem + ballooned_mem, mmem, gmem) + raise exceptions.TestFail(f"Balloon test failed {step}") return mmem, gmem def enable_polling(self, device_path): @@ -122,8 +122,9 @@ def enable_polling(self, device_path): polling_interval = int(self.params.get("polling_interval", 2)) sleep_time = int(self.params.get("polling_sleep_time", 20)) error_context.context("Enable polling", self.test.log.info) - self.vm.monitor.qom_set(device_path, "guest-stats-polling-interval", - polling_interval) + self.vm.monitor.qom_set( + device_path, "guest-stats-polling-interval", polling_interval + ) time.sleep(sleep_time) def get_memory_stat(self, device_path): @@ -143,8 +144,9 @@ def _memory_stats_compare(self, keyname, memory_stat_qmp): """ check_mem_ratio = float(self.params.get("check_mem_ratio", 0.1)) check_mem_diff = float(self.params.get("check_mem_diff", 150)) - error_context.context("Get memory from guest aligned" - " with %s." % keyname, self.test.log.info) + error_context.context( + "Get memory from guest aligned" f" with {keyname}.", self.test.log.info + ) if keyname == "stat-free-memory": guest_mem = self.get_guest_free_mem(self.vm) elif keyname == "stat-total-memory": @@ -154,18 +156,17 @@ def _memory_stats_compare(self, keyname, memory_stat_qmp): else: raise ValueError(f"unexpected keyname: {keyname}") - memory_stat_qmp = "%sB" % memory_stat_qmp - memory_stat_qmp = int(float(utils_misc.normalize_data_size( - memory_stat_qmp, order_magnitude="M"))) + memory_stat_qmp = f"{memory_stat_qmp}B" + memory_stat_qmp = int( + float(utils_misc.normalize_data_size(memory_stat_qmp, order_magnitude="M")) + ) mem_diff = float(abs(guest_mem - memory_stat_qmp)) if mem_diff > guest_mem * check_mem_ratio and mem_diff > check_mem_diff: - self.test.fail("%s of guest %s is not equal to %s" - " in qmp,the acceptable ratio/diff" - " is %s/%s" % (keyname, - guest_mem, - memory_stat_qmp, - check_mem_ratio, - check_mem_diff)) + self.test.fail( + f"{keyname} of guest {guest_mem} is not equal to {memory_stat_qmp}" + " in qmp,the acceptable ratio/diff" + f" is {check_mem_ratio}/{check_mem_diff}" + ) def memory_stats_check(self, keyname, enabled): """ @@ -178,15 +179,14 @@ def memory_stats_check(self, keyname, enabled): base_path = self.params.get("base_path", "/machine/peripheral/") device = self.params["balloon"] device_path = base_path + device - mem_stat_disabled = 0xffffffffffffffff + mem_stat_disabled = 0xFFFFFFFFFFFFFFFF self.enable_polling(device_path) - memory_stat_qmp = self.get_memory_stat(device_path)['stats'][keyname] + memory_stat_qmp = self.get_memory_stat(device_path)["stats"][keyname] - stat_enabled = (memory_stat_qmp != mem_stat_disabled) + stat_enabled = memory_stat_qmp != mem_stat_disabled if stat_enabled != enabled: - self.test.fail("Memory statistics reporting is not working as" - " expected") + self.test.fail("Memory statistics reporting is not working as" " expected") elif enabled: self._memory_stats_compare(keyname, memory_stat_qmp) @@ -200,15 +200,18 @@ def balloon_memory(self, new_mem): :type new_mem: int """ self.env["balloon_test"] = 0 - error_context.context("Change VM memory to %s" % new_mem, self.test.log.info) + error_context.context(f"Change VM memory to {new_mem}", self.test.log.info) try: self.vm.balloon(new_mem) self.env["balloon_test"] = 1 except Exception as e: - if (self.params.get('illegal_value_check', 'no') == 'no' - and new_mem != self.get_ballooned_memory()): - raise exceptions.TestFail("Balloon memory fail with error" - " message: %s" % e) + if ( + self.params.get("illegal_value_check", "no") == "no" + and new_mem != self.get_ballooned_memory() + ): + raise exceptions.TestFail( + "Balloon memory fail with error" f" message: {e}" + ) if new_mem > self.ori_mem: compare_mem = self.ori_mem elif new_mem == 0: @@ -220,12 +223,13 @@ def balloon_memory(self, new_mem): compare_mem = new_mem balloon_timeout = float(self.params.get("balloon_timeout", 480)) - status = utils_misc.wait_for(( - lambda: compare_mem == self.get_ballooned_memory()), - balloon_timeout) + status = utils_misc.wait_for( + (lambda: compare_mem == self.get_ballooned_memory()), balloon_timeout + ) if status is None: - raise exceptions.TestFail("Failed to balloon memory to expect" - " value during %ss" % balloon_timeout) + raise exceptions.TestFail( + "Failed to balloon memory to expect" f" value during {balloon_timeout}s" + ) def run_balloon_sub_test(self, test, params, env, test_tag): """ @@ -244,8 +248,7 @@ def run_balloon_sub_test(self, test, params, env, test_tag): 1 means the process quit after sub test. :rtype: int """ - utils_test.run_virt_sub_test(test, params, env, - sub_type=test_tag) + utils_test.run_virt_sub_test(test, params, env, sub_type=test_tag) qemu_quit_after_test = -1 if "shutdown" in test_tag: self.test.log.info("Guest shutdown normally after balloon") @@ -278,13 +281,15 @@ def wait_for_balloon_complete(self, timeout): self.test.log.info("Wait until guest memory don't change") threshold = int(self.params.get("guest_stable_threshold", 100)) is_stable = self._mem_state(threshold) - ret = utils_misc.wait_for(lambda: next(is_stable), timeout, - step=float(self.params.get("guest_check_step", - 10.0))) + ret = utils_misc.wait_for( + lambda: next(is_stable), + timeout, + step=float(self.params.get("guest_check_step", 10.0)), + ) if not ret: self.test.log.warning("guest memory is not stable after %ss", timeout) - def get_memory_boundary(self, balloon_type=''): + def get_memory_boundary(self, balloon_type=""): """ Get the legal memory boundary for balloon operation. @@ -297,21 +302,19 @@ def get_memory_boundary(self, balloon_type=''): min_size = self.params.get("minmem", "512M") min_size = int(float(utils_misc.normalize_data_size(min_size))) balloon_buffer = int(self.params.get("balloon_buffer", 300)) - if self.params.get('os_type') == 'windows': + if self.params.get("os_type") == "windows": self.test.log.info("Get windows miminum balloon value:") self.vm.balloon(1) balloon_timeout = self.params.get("balloon_timeout", 900) self.wait_for_balloon_complete(balloon_timeout) - used_size = min((self.get_ballooned_memory() + balloon_buffer), - max_size) + used_size = min((self.get_ballooned_memory() + balloon_buffer), max_size) self.vm.balloon(max_size) self.wait_for_balloon_complete(balloon_timeout) self.ori_gmem = self.get_memory_status() else: - vm_total = self.get_memory_status() + self.get_memory_status() vm_mem_free = self.get_free_mem() - used_size = min((self.ori_mem - vm_mem_free + balloon_buffer), - max_size) + used_size = min((self.ori_mem - vm_mem_free + balloon_buffer), max_size) current_mem = self.get_ballooned_memory() if balloon_type == "enlarge": min_size = current_mem @@ -332,6 +335,7 @@ def run_ballooning_test(self, expect_mem, tag): :return: If test should quit after test :rtype: bool """ + def _memory_check_after_sub_test(): try: output = self.memory_check("after subtest", ballooned_mem) # pylint: disable=E0606 @@ -351,13 +355,14 @@ def _memory_check_after_sub_test(): # for illegal enlarge test if expect_mem > self.ori_mem: ballooned_memory = self.ori_mem - self.pre_mem - self.memory_check("after %s memory" % tag, - ballooned_memory) - if (params_tag.get("run_sub_test_after_balloon", "no") == "yes" and - params_tag.get('sub_test_after_balloon')): - sub_type = params_tag['sub_test_after_balloon'] - should_quit = self.run_balloon_sub_test(self.test, params_tag, - self.env, sub_type) + self.memory_check(f"after {tag} memory", ballooned_memory) + if params_tag.get( + "run_sub_test_after_balloon", "no" + ) == "yes" and params_tag.get("sub_test_after_balloon"): + sub_type = params_tag["sub_test_after_balloon"] + should_quit = self.run_balloon_sub_test( + self.test, params_tag, self.env, sub_type + ) if should_quit == 1: return True # s4 after balloon test @@ -365,13 +370,13 @@ def _memory_check_after_sub_test(): expect_mem = self.ori_mem sleep_before_check = int(self.params.get("sleep_before_check", 0)) - timeout = (int(self.params.get("balloon_timeout", 100)) + - sleep_before_check) + timeout = int(self.params.get("balloon_timeout", 100)) + sleep_before_check ballooned_mem = expect_mem - self.pre_mem msg = "Wait memory balloon back after " - msg += params_tag['sub_test_after_balloon'] - ret = utils_misc.wait_for(_memory_check_after_sub_test, - timeout, sleep_before_check, 5, msg) + msg += params_tag["sub_test_after_balloon"] + ret = utils_misc.wait_for( + _memory_check_after_sub_test, timeout, sleep_before_check, 5, msg + ) if not ret: self.test.fail("After sub test, memory check failed") return False @@ -428,7 +433,6 @@ def _balloon_post_action(self): class BallooningTestWin(BallooningTest): - """ Windows memory ballooning test """ @@ -454,8 +458,7 @@ def error_report(self, step, expect_value, monitor_value, guest_value): self.test.log.error("Memory size mismatch %s:\n", step) error_msg = "Wanted to be changed: %s\n" % (expect_value - self.pre_mem) if monitor_value: - error_msg += "Changed in monitor: %s\n" % (monitor_value - - self.pre_mem) + error_msg += "Changed in monitor: %s\n" % (monitor_value - self.pre_mem) error_msg += "Changed in guest: %s\n" % (guest_value - self.pre_gmem) self.test.log.error(error_msg) @@ -478,9 +481,8 @@ def get_win_mon_free_mem(self, session): cmd = r'typeperf "\Memory\Free & Zero Page List Bytes" -sc 1' status, output = session.cmd_status_output(cmd) if status == 0: - free = "%s" % re.findall(r"\d+\.\d+", output)[2] - free = float(utils_misc.normalize_data_size(free, - order_magnitude="M")) + free = "{}".format(re.findall(r"\d+\.\d+", output)[2]) + free = float(utils_misc.normalize_data_size(free, order_magnitude="M")) return int(free) else: self.test.fail("Failed to get windows guest free memory") @@ -493,8 +495,7 @@ def get_disk_vol(self, session): """ key = "VolumeName like 'virtio-win%'" try: - return utils_misc.get_win_disk_vol(session, - condition=key) + return utils_misc.get_win_disk_vol(session, condition=key) except Exception: self.test.error("Could not get virtio-win disk vol!") @@ -508,19 +509,18 @@ def operate_balloon_service(self, session, operation): uninstall/stop :return: cmd execution output """ - error_context.context("%s Balloon Service in guest." % operation, - self.test.log.info) + error_context.context( + f"{operation} Balloon Service in guest.", self.test.log.info + ) drive_letter = self.get_disk_vol(session) try: - operate_cmd = self.params["%s_balloon_service" - % operation] % drive_letter + operate_cmd = self.params[f"{operation}_balloon_service"] % drive_letter if operation == "status": output = session.cmd_output(operate_cmd) else: output = session.cmd(operate_cmd) except Exception as err: - self.test.error("%s balloon service failed! Error msg is:\n%s" - % (operation, err)) + self.test.error(f"{operation} balloon service failed! Error msg is:\n{err}") return output @error_context.context_aware @@ -532,8 +532,9 @@ def configure_balloon_service(self, session): :param operation: operation against balloon serive, e.g. run/status/ uninstall/stop """ - error_context.context("Check Balloon Service status before install" - "service", self.test.log.info) + error_context.context( + "Check Balloon Service status before install" "service", self.test.log.info + ) output = self.operate_balloon_service(session, "status") if re.search("running", output.lower(), re.M): self.test.log.info("Balloon service is already running !") @@ -546,7 +547,6 @@ def configure_balloon_service(self, session): class BallooningTestLinux(BallooningTest): - """ Linux memory ballooning test """ @@ -563,11 +563,11 @@ def error_report(self, step, expect_value, monitor_value, guest_value): None """ self.test.log.error("Memory size mismatch %s:\n", step) - error_msg = "Assigner to VM: %s\n" % expect_value + error_msg = f"Assigner to VM: {expect_value}\n" if monitor_value: - error_msg += "Reported by monitor: %s\n" % monitor_value + error_msg += f"Reported by monitor: {monitor_value}\n" if guest_value: - error_msg += "Reported by guest OS: %s\n" % guest_value + error_msg += f"Reported by guest OS: {guest_value}\n" self.test.log.error(error_msg) def get_memory_status(self): @@ -592,7 +592,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ - if params['os_type'] == 'windows': + if params["os_type"] == "windows": balloon_test = BallooningTestWin(test, params, env) else: balloon_test = BallooningTestLinux(test, params, env) @@ -600,38 +600,43 @@ def run(test, params, env): if params.get("balloon_opt_deflate_on_oom") == "yes": guest_ori_mem = balloon_test.get_total_mem() - for tag in params.objects('test_tags'): - error_context.context("Running %s test" % tag, test.log.info) + for tag in params.objects("test_tags"): + error_context.context(f"Running {tag} test", test.log.info) params_tag = params.object_params(tag) - if params_tag.get('expect_memory'): - expect_mem = int(params_tag.get('expect_memory')) - elif params_tag.get('expect_memory_ratio'): - expect_mem = int(balloon_test.ori_mem * - float(params_tag.get('expect_memory_ratio'))) + if params_tag.get("expect_memory"): + expect_mem = int(params_tag.get("expect_memory")) + elif params_tag.get("expect_memory_ratio"): + expect_mem = int( + balloon_test.ori_mem * float(params_tag.get("expect_memory_ratio")) + ) # set evict illegal value to "0" for both linux and windows - elif params_tag.get('illegal_value_check', 'no') == 'yes' and tag == 'enlarge': + elif params_tag.get("illegal_value_check", "no") == "yes" and tag == "enlarge": expect_mem = int(balloon_test.ori_mem + random.uniform(1, 1000)) else: - balloon_type = params_tag['balloon_type'] + balloon_type = params_tag["balloon_type"] min_sz, max_sz = balloon_test.get_memory_boundary(balloon_type) expect_mem = int(random.uniform(min_sz, max_sz)) - if params_tag.get('minimum_value_check', 'no') == 'yes': + if params_tag.get("minimum_value_check", "no") == "yes": expect_mem = int(min_sz) quit_after_test = balloon_test.run_ballooning_test(expect_mem, tag) if params.get("balloon_opt_deflate_on_oom") == "yes": guest_curr_mem = balloon_test.get_total_mem() if guest_ori_mem != guest_curr_mem: - balloon_test.error_report("after %s memory" % tag, - expect_value=guest_ori_mem, - guest_value=guest_curr_mem) - test.fail("Balloon test failed %s" % tag) + balloon_test.error_report( + f"after {tag} memory", + expect_value=guest_ori_mem, + guest_value=guest_curr_mem, + ) + test.fail(f"Balloon test failed {tag}") if quit_after_test: return try: balloon_test.reset_memory() - if params.get("balloon_opt_free_page_reporting") == "yes" and \ - params.get("os_type") == "linux": + if ( + params.get("balloon_opt_free_page_reporting") == "yes" + and params.get("os_type") == "linux" + ): get_res_cmd = params["get_res_cmd"] % balloon_test.vm.get_pid() memhog_cmd = params["memhog_cmd"] consumed_mem = float(normalize_data_size(params["consumed_mem"])) @@ -641,8 +646,9 @@ def run(test, params, env): res2 = float(normalize_data_size(process.getoutput(get_res_cmd))) time.sleep(30) res3 = float(normalize_data_size(process.getoutput(get_res_cmd))) - test.log.info("The RES values are %sM, %sM, and %sM sequentially", - res1, res2, res3) + test.log.info( + "The RES values are %sM, %sM, and %sM sequentially", res1, res2, res3 + ) if res2 - res1 < consumed_mem * 0.5: test.error("QEMU should consume more memory") if res3 - res1 > res1 * 0.1: diff --git a/qemu/tests/balloon_disable.py b/qemu/tests/balloon_disable.py index 86efdccbdc..da9711f59d 100644 --- a/qemu/tests/balloon_disable.py +++ b/qemu/tests/balloon_disable.py @@ -13,13 +13,14 @@ def run(test, params, env): """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() - session = vm.wait_for_login( - timeout=float(params.get("login_timeout", 240))) + session = vm.wait_for_login(timeout=float(params.get("login_timeout", 240))) try: output = vm.monitor.info("balloon") except qemu_monitor.QMPCmdError as e: output = str(e) - if not ("has not been activated" in output or - "No balloon device has been activated" in output): + if not ( + "has not been activated" in output + or "No balloon device has been activated" in output + ): test.fail("Balloon driver still on when disable it on command line") session.close() diff --git a/qemu/tests/balloon_hotplug.py b/qemu/tests/balloon_hotplug.py index 7cb585b18c..22b7b475d5 100644 --- a/qemu/tests/balloon_hotplug.py +++ b/qemu/tests/balloon_hotplug.py @@ -2,12 +2,11 @@ import re import time +from virttest import error_context, utils_test from virttest.qemu_devices import qdevices -from virttest import error_context -from virttest import utils_test -from qemu.tests.balloon_check import BallooningTestWin -from qemu.tests.balloon_check import BallooningTestLinux + from provider import win_driver_utils +from qemu.tests.balloon_check import BallooningTestLinux, BallooningTestWin @error_context.context_aware @@ -36,23 +35,25 @@ def run_pm_test(pm_test, plug_type): :param pm_test: power management test name,e.g. reboot/shutdown :param plug_type:balloon device plug operation,e.g.hot_plug or hot_unplug """ - error_context.context("Run %s test after %s balloon device" - % (pm_test, plug_type), test.log.info) + error_context.context( + f"Run {pm_test} test after {plug_type} balloon device", test.log.info + ) utils_test.run_virt_sub_test(test, params, env, pm_test) def enable_balloon_service(): """ Install balloon service and check its status in windows guests """ - if params['os_type'] != 'windows': + if params["os_type"] != "windows": return - error_context.context("Install and check balloon service in windows " - "guest", test.log.info) + error_context.context( + "Install and check balloon service in windows " "guest", test.log.info + ) session = vm.wait_for_login() driver_name = params.get("driver_name", "balloon") - session = utils_test.qemu.windrv_check_running_verifier(session, - vm, test, - driver_name) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name + ) balloon_test.configure_balloon_service(session) output = balloon_test.operate_balloon_service(session, "status") @@ -68,30 +69,30 @@ def enable_balloon_service(): vm.verify_alive() balloon_device = params.get("balloon_device", "virtio-balloon-pci") - error_context.context("Hotplug and unplug balloon device in a loop", - test.log.info) + error_context.context("Hotplug and unplug balloon device in a loop", test.log.info) for i in range(int(params.get("balloon_repeats", 3))): vm.devices.set_dirty() - new_dev = qdevices.QDevice(balloon_device, - {'id': 'balloon%d' % idx}, - parent_bus={'aobject': params.get( - "balloon_bus", 'pci.0')}) - - error_context.context("Hotplug balloon device for %d times" % (i+1), - test.log.info) + new_dev = qdevices.QDevice( + balloon_device, + {"id": "balloon%d" % idx}, + parent_bus={"aobject": params.get("balloon_bus", "pci.0")}, + ) + + error_context.context( + "Hotplug balloon device for %d times" % (i + 1), test.log.info + ) out = vm.devices.simple_hotplug(new_dev, vm.monitor) if out[1] is False: - test.fail("Failed to hotplug balloon in iteration %s, %s" - % (i, out[0])) + test.fail(f"Failed to hotplug balloon in iteration {i}, {out[0]}") # temporary workaround for migration vm.params["balloon"] = "balloon%d" % idx vm.params["balloon_dev_devid"] = "balloon%d" % idx vm.params["balloon_dev_add_bus"] = "yes" - devs = vm.devices.get_by_params({"id": 'balloon%d' % idx}) + devs = vm.devices.get_by_params({"id": "balloon%d" % idx}) vm.params["balloon_pci_bus"] = devs[0]["bus"] - if params['os_type'] == 'windows': + if params["os_type"] == "windows": balloon_test = BallooningTestWin(test, params, env) else: balloon_test = BallooningTestLinux(test, params, env) @@ -99,8 +100,9 @@ def enable_balloon_service(): enable_balloon_service() - error_context.context("Check whether balloon device work after hotplug", - test.log.info) + error_context.context( + "Check whether balloon device work after hotplug", test.log.info + ) balloon_test.balloon_memory(int(random.uniform(min_sz, max_sz))) if pm_test_after_plug: @@ -112,22 +114,24 @@ def enable_balloon_service(): else: return - if params['os_type'] == 'windows': + if params["os_type"] == "windows": time.sleep(10) - error_context.context("Unplug balloon device for %d times" % (i+1), - test.log.info) + error_context.context( + "Unplug balloon device for %d times" % (i + 1), test.log.info + ) - out = vm.devices.simple_unplug(devs[0].get_aid(), vm.monitor, - timeout=unplug_timeout) + out = vm.devices.simple_unplug( + devs[0].get_aid(), vm.monitor, timeout=unplug_timeout + ) if out[1] is False: - test.fail("Failed to unplug balloon in iteration %s, %s" - % (i, out[0])) + test.fail(f"Failed to unplug balloon in iteration {i}, {out[0]}") time.sleep(2) if params.get("migrate_after_unplug", "no") == "yes": - error_context.context("Migrate after hotunplug balloon device", - test.log.info) + error_context.context( + "Migrate after hotunplug balloon device", test.log.info + ) # temporary workaround for migration del vm.params["balloon"] del vm.params["balloon_dev_devid"] @@ -145,8 +149,7 @@ def enable_balloon_service(): if params.get("os_type") == "windows": out = vm.devices.simple_hotplug(new_dev, vm.monitor) if out[1] is False: - test.fail("Failed to hotplug balloon at last, " - "output is %s" % out[0]) + test.fail("Failed to hotplug balloon at last, " f"output is {out[0]}") win_driver_utils.memory_leak_check(vm, test, params) error_context.context("Verify guest alive!", test.log.info) vm.verify_kernel_crash() diff --git a/qemu/tests/balloon_memhp.py b/qemu/tests/balloon_memhp.py index 9e4391ccce..b63465d768 100644 --- a/qemu/tests/balloon_memhp.py +++ b/qemu/tests/balloon_memhp.py @@ -1,12 +1,10 @@ import random -from virttest import utils_test -from virttest import error_context -from virttest import utils_numeric +from virttest import error_context, utils_numeric, utils_test from virttest.utils_test.qemu import MemoryHotplugTest -from qemu.tests.balloon_check import BallooningTestWin -from qemu.tests.balloon_check import BallooningTestLinux + from provider import win_driver_utils +from qemu.tests.balloon_check import BallooningTestLinux, BallooningTestWin @error_context.context_aware @@ -25,20 +23,22 @@ def run(test, params, env): 10) uninstall balloon service and clear driver verifier(only for windows guest) """ + def check_memory(): """ Check guest memory """ - if params['os_type'] == 'windows': + if params["os_type"] == "windows": memhp_test.check_memory(vm, wait_time=3) else: expected_mem = new_mem + mem_dev_sz guest_mem_size = memhp_test.get_guest_total_mem(vm) threshold = float(params.get("threshold", 0.1)) - if expected_mem - guest_mem_size > guest_mem_size*threshold: - msg = ("Assigned '%s MB' memory to '%s', " - "but '%s MB' memory detect by OS" % - (expected_mem, vm.name, guest_mem_size)) + if expected_mem - guest_mem_size > guest_mem_size * threshold: + msg = ( + f"Assigned '{expected_mem} MB' memory to '{vm.name}', " + f"but '{guest_mem_size} MB' memory detect by OS" + ) test.fail(msg) error_context.context("Boot guest with balloon device", test.log.info) @@ -46,15 +46,15 @@ def check_memory(): vm.verify_alive() session = vm.wait_for_login() - if params['os_type'] == 'linux': + if params["os_type"] == "linux": balloon_test = BallooningTestLinux(test, params, env) else: driver_name = params.get("driver_name", "balloon") - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, driver_name) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name + ) balloon_test = BallooningTestWin(test, params, env) - error_context.context("Config balloon service in guest", - test.log.info) + error_context.context("Config balloon service in guest", test.log.info) balloon_test.configure_balloon_service(session) memhp_test = MemoryHotplugTest(test, params, env) diff --git a/qemu/tests/balloon_minimum.py b/qemu/tests/balloon_minimum.py index 0808deaa27..eaa247c596 100644 --- a/qemu/tests/balloon_minimum.py +++ b/qemu/tests/balloon_minimum.py @@ -1,10 +1,10 @@ import time -from virttest import utils_test -from virttest import error_context +from virttest import error_context, utils_test from virttest.qemu_monitor import QMPEventError -from qemu.tests.balloon_check import BallooningTestWin + from provider import win_driver_utils +from qemu.tests.balloon_check import BallooningTestWin @error_context.context_aware @@ -26,8 +26,9 @@ def run(test, params, env): vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() driver_name = params.get("driver_name", "balloon") - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, driver_name) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name + ) balloon_test = BallooningTestWin(test, params, env) expect_mem = int(params["expect_memory"]) balloon_test.pre_mem = balloon_test.get_ballooned_memory() diff --git a/qemu/tests/balloon_sc_interrogate.py b/qemu/tests/balloon_sc_interrogate.py index ce6f0a58d3..ece0115ef2 100644 --- a/qemu/tests/balloon_sc_interrogate.py +++ b/qemu/tests/balloon_sc_interrogate.py @@ -1,9 +1,9 @@ import re -from virttest import utils_test -from virttest import error_context -from qemu.tests.balloon_check import BallooningTestWin +from virttest import error_context, utils_test + from provider import win_driver_utils +from qemu.tests.balloon_check import BallooningTestWin @error_context.context_aware @@ -35,8 +35,9 @@ def interrogate_balloon_service(session): session = vm.wait_for_login() driver_name = params.get("driver_name", "balloon") - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, driver_name) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name + ) balloon_test = BallooningTestWin(test, params, env) err = None try: @@ -49,16 +50,18 @@ def interrogate_balloon_service(session): # Check ballloon serivce status again output = balloon_test.operate_balloon_service(session, "status") if not re.search("running", output.lower(), re.M): - test.fail("Balloon service is not running after sc interrogate!" - "Output is: \n %s" % output) + test.fail( + "Balloon service is not running after sc interrogate!" + f"Output is: \n {output}" + ) # for windows guest, disable/uninstall driver to get memory leak based on # driver verifier is enabled if params.get("os_type") == "windows": win_driver_utils.memory_leak_check(vm, test, params) - except Exception as err: + except Exception: pass finally: session.close() if err: - raise err # pylint: disable=E0702 + raise err # pylint: disable=E0702 diff --git a/qemu/tests/balloon_service.py b/qemu/tests/balloon_service.py index 8624eefdbb..cc577f7514 100644 --- a/qemu/tests/balloon_service.py +++ b/qemu/tests/balloon_service.py @@ -1,11 +1,10 @@ -import time import random +import time + +from virttest import error_context, utils_test -from virttest import utils_test -from virttest import error_context -from qemu.tests.balloon_check import BallooningTestWin -from qemu.tests.balloon_check import BallooningTestLinux from provider import win_driver_utils +from qemu.tests.balloon_check import BallooningTestLinux, BallooningTestWin @error_context.context_aware @@ -38,25 +37,27 @@ def balloon_memory(vm, mem_check, min_sz, max_sz): test.log.info("repeat times: %d", repeat_times) while repeat_times: - for tag in params.objects('test_tags'): - error_context.context("Running %s test" % tag, test.log.info) + for tag in params.objects("test_tags"): + error_context.context(f"Running {tag} test", test.log.info) params_tag = params.object_params(tag) - balloon_type = params_tag['balloon_type'] - if balloon_type == 'evict': - expect_mem = int(random.uniform( - min_sz, balloon_test.get_ballooned_memory())) + balloon_type = params_tag["balloon_type"] + if balloon_type == "evict": + expect_mem = int( + random.uniform(min_sz, balloon_test.get_ballooned_memory()) + ) else: - expect_mem = int(random.uniform( - balloon_test.get_ballooned_memory(), max_sz)) + expect_mem = int( + random.uniform(balloon_test.get_ballooned_memory(), max_sz) + ) - quit_after_test = balloon_test.run_ballooning_test(expect_mem, - tag) + quit_after_test = balloon_test.run_ballooning_test(expect_mem, tag) time.sleep(20) if mem_check == "yes": check_list = params["mem_stat_check_list"].split() for mem_check_name in check_list: - balloon_test.memory_stats_check(mem_check_name, - mem_stat_working) + balloon_test.memory_stats_check( + mem_check_name, mem_stat_working + ) if quit_after_test: return @@ -70,13 +71,13 @@ def balloon_memory(vm, mem_check, min_sz, max_sz): vm.verify_alive() session = vm.wait_for_login() - if params['os_type'] == 'windows': + if params["os_type"] == "windows": driver_name = params.get("driver_name", "balloon") - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, driver_name) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name + ) balloon_test = BallooningTestWin(test, params, env) - error_context.context("Config balloon service in guest", - test.log.info) + error_context.context("Config balloon service in guest", test.log.info) balloon_test.configure_balloon_service(session) else: balloon_test = BallooningTestLinux(test, params, env) @@ -87,11 +88,12 @@ def balloon_memory(vm, mem_check, min_sz, max_sz): blnsrv_operation = params.objects("blnsrv_operation") mem_stat_working = False for bln_oper in blnsrv_operation: - error_context.context("%s balloon service" % bln_oper, test.log.info) + error_context.context(f"{bln_oper} balloon service", test.log.info) balloon_test.operate_balloon_service(session, bln_oper) - error_context.context("Balloon vm memory after %s balloon service" - % bln_oper, test.log.info) + error_context.context( + f"Balloon vm memory after {bln_oper} balloon service", test.log.info + ) balloon_memory(vm, mem_check, min_sz, max_sz) mem_stat_working = True # for windows guest, disable/uninstall driver to get memory leak based on diff --git a/qemu/tests/balloon_stop_continue.py b/qemu/tests/balloon_stop_continue.py index af74b28618..8c9919f3af 100644 --- a/qemu/tests/balloon_stop_continue.py +++ b/qemu/tests/balloon_stop_continue.py @@ -1,5 +1,5 @@ -import time import random +import time from virttest import error_context @@ -28,7 +28,7 @@ def run(test, params, env): vm.monitor.info("balloon") error_context.context("Stop and continue vm from monitor", test.log.info) vm.monitor.cmd("stop") - vm.monitor.cmd('cont') + vm.monitor.cmd("cont") vm.verify_alive() time.sleep(random.randint(0, 3)) diff --git a/qemu/tests/balloon_stress.py b/qemu/tests/balloon_stress.py index 69a2dcab3e..9a8bae81d1 100644 --- a/qemu/tests/balloon_stress.py +++ b/qemu/tests/balloon_stress.py @@ -1,12 +1,10 @@ import random import re -from virttest import utils_misc -from virttest import utils_test -from virttest import error_context -from qemu.tests.balloon_check import BallooningTestWin -from qemu.tests.balloon_check import BallooningTestLinux +from virttest import error_context, utils_misc, utils_test + from provider import win_driver_utils +from qemu.tests.balloon_check import BallooningTestLinux, BallooningTestWin @error_context.context_aware @@ -28,7 +26,7 @@ def check_bg_running(): Check the background test status in guest. :return: return True if find the process name; otherwise False """ - if params['os_type'] == 'windows': + if params["os_type"] == "windows": list_cmd = params.get("list_cmd", "wmic process get name") output = session.cmd_output_safe(list_cmd, timeout=60) process = re.findall("mplayer", output, re.M | re.I) @@ -43,22 +41,26 @@ def check_bg_running(): timeout = float(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) - if params['os_type'] == 'windows': + if params["os_type"] == "windows": driver_name = params["driver_name"] - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, driver_name, - timeout) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name, timeout + ) balloon_test = BallooningTestWin(test, params, env) else: balloon_test = BallooningTestLinux(test, params, env) error_context.context("Run stress background", test.log.info) stress_test = params.get("stress_test") - if params['os_type'] == 'windows': + if params["os_type"] == "windows": utils_test.run_virt_sub_test(test, params, env, stress_test) - if not utils_misc.wait_for(check_bg_running, first=2.0, - text="wait for stress app to start", - step=1.0, timeout=60): + if not utils_misc.wait_for( + check_bg_running, + first=2.0, + text="wait for stress app to start", + step=1.0, + timeout=60, + ): test.error("Run stress background failed") else: stress_bg = utils_test.VMStress(vm, "stress", params) @@ -69,7 +71,7 @@ def check_bg_running(): error_context.context("balloon vm memory in loop", test.log.info) try: - for i in range(1, int(repeat_times+1)): + for i in range(1, int(repeat_times + 1)): test.log.info("repeat times: %d", i) balloon_test.balloon_memory(int(random.uniform(min_sz, max_sz))) if not check_bg_running(): diff --git a/qemu/tests/balloon_thp.py b/qemu/tests/balloon_thp.py index 9445449723..dd99caf236 100644 --- a/qemu/tests/balloon_thp.py +++ b/qemu/tests/balloon_thp.py @@ -1,7 +1,6 @@ import time -from virttest import funcatexit -from virttest import utils_misc +from virttest import funcatexit, utils_misc from virttest.staging import utils_memory @@ -11,7 +10,7 @@ def clean_env(session, file_name): :param session: The vm session :param file_name: The file name """ - session.cmd_output_safe('rm -rf %s' % file_name) + session.cmd_output_safe(f"rm -rf {file_name}") def run(test, params, env): @@ -28,28 +27,29 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def get_buddy_info(sleep=False): """Get buddy info""" if sleep: time.sleep(10) - buddy_info = utils_memory.get_buddy_info('0', session=session)['0'] - test.log.info('Checked buddy info, value is %s', buddy_info) + buddy_info = utils_memory.get_buddy_info("0", session=session)["0"] + test.log.info("Checked buddy info, value is %s", buddy_info) return buddy_info - fragement_dir = params['fragement_dir'] + fragement_dir = params["fragement_dir"] vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() buddy_info_bf = get_buddy_info() - test.log.info('Making fragement on guest...') - session.cmd_output_safe(params['cmd_make_fragement'], timeout=600) + test.log.info("Making fragement on guest...") + session.cmd_output_safe(params["cmd_make_fragement"], timeout=600) for i in range(1, 10, 2): - session.cmd_output_safe('rm -f %s/*%s' % (fragement_dir, i)) - funcatexit.register(env, params['type'], clean_env, session, fragement_dir) + session.cmd_output_safe(f"rm -f {fragement_dir}/*{i}") + funcatexit.register(env, params["type"], clean_env, session, fragement_dir) buddy_info_af_fragement = get_buddy_info(sleep=True) if buddy_info_bf >= buddy_info_af_fragement: - test.fail('Buddy info should increase.') - mem = int(float(utils_misc.normalize_data_size("%sM" % params["mem"]))) + test.fail("Buddy info should increase.") + mem = int(float(utils_misc.normalize_data_size("{}M".format(params["mem"])))) vm.balloon(mem - 1024) buddy_info_af_balloon = get_buddy_info(sleep=True) if buddy_info_af_balloon >= buddy_info_af_fragement: - test.fail('Buddy info should decrease.') + test.fail("Buddy info should decrease.") diff --git a/qemu/tests/balloon_uniqueness.py b/qemu/tests/balloon_uniqueness.py index c8c6b8120b..0f29938a11 100644 --- a/qemu/tests/balloon_uniqueness.py +++ b/qemu/tests/balloon_uniqueness.py @@ -1,8 +1,7 @@ from virttest import env_process from virttest.qemu_devices.qdevices import QDevice - -from virttest.virt_vm import VMCreateError from virttest.qemu_monitor import QMPCmdError +from virttest.virt_vm import VMCreateError def run(test, params, env): @@ -18,10 +17,10 @@ def run(test, params, env): :param env: Dictionary with test environment. """ - params['start_vm'] = 'yes' - vm_name = params['main_vm'] - error_msg = params.get('error_msg') - num_boot_devices = len(params.objects('balloon')) + params["start_vm"] = "yes" + vm_name = params["main_vm"] + error_msg = params.get("error_msg") + num_boot_devices = len(params.objects("balloon")) try: env_process.preprocess_vm(test, params, env, vm_name) except VMCreateError as e: @@ -29,28 +28,28 @@ def run(test, params, env): raise else: if num_boot_devices > 1: - test.fail('The guest should not start with two balloon devices.') + test.fail("The guest should not start with two balloon devices.") - machine_type = params['machine_type'] - bus = {'aobject': 'pci.0'} - if 's390' in machine_type: # For s390x platform - model = 'virtio-balloon-ccw' - bus = {'type': 'virtual-css'} + machine_type = params["machine_type"] + bus = {"aobject": "pci.0"} + if "s390" in machine_type: # For s390x platform + model = "virtio-balloon-ccw" + bus = {"type": "virtual-css"} else: - model = 'virtio-balloon-pci' - num_hotplug_devices = int(params.get('num_hotplug_devices', 0)) + model = "virtio-balloon-pci" + num_hotplug_devices = int(params.get("num_hotplug_devices", 0)) for i in range(num_hotplug_devices): dev = QDevice(model, parent_bus=bus) - dev.set_param('id', 'hotplugged_balloon%s' % i) - dev_num = len(params.objects('balloon')) + i + dev.set_param("id", f"hotplugged_balloon{i}") + dev_num = len(params.objects("balloon")) + i try: vm = env.get_vm(vm_name) vm.devices.simple_hotplug(dev, vm.monitor) except QMPCmdError as e: if dev_num < 1: - test.fail("Fail to hotplug the balloon device: %s" % str(e)) - elif error_msg not in e.data['desc']: + test.fail(f"Fail to hotplug the balloon device: {str(e)}") + elif error_msg not in e.data["desc"]: raise else: if dev_num >= 1: - test.fail('Qemu should reject the second balloon device.') + test.fail("Qemu should reject the second balloon device.") diff --git a/qemu/tests/bitmap_boundary_test.py b/qemu/tests/bitmap_boundary_test.py index 880830422b..a0703e735d 100644 --- a/qemu/tests/bitmap_boundary_test.py +++ b/qemu/tests/bitmap_boundary_test.py @@ -1,11 +1,8 @@ import json -from virttest import data_dir -from virttest import qemu_storage +from virttest import data_dir, qemu_storage -from provider import backup_utils -from provider import job_utils -from provider import block_dirty_bitmap +from provider import backup_utils, block_dirty_bitmap, job_utils from provider.virt_storage.storage_admin import sp_admin @@ -23,13 +20,15 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def full_backup(vm, source_node, target_node, bitmap_count): """start full backup job with 65535 bitmaps""" test.log.info("Begin full backup %s to %s", source_node, target_node) actions, extra_options = [], {"sync": "full"} cmd, args = backup_utils.blockdev_backup_qmp_cmd( - source_node, target_node, **extra_options) + source_node, target_node, **extra_options + ) backup_action = {"type": cmd, "data": args} actions.append(backup_action) bitmap_data = {"node": source_node, "persistent": True} @@ -48,9 +47,9 @@ def verify_bitmap_counts(vm, source_node, bitmap_count): out = vm.monitor.query("block") bitmaps_dict = block_dirty_bitmap.get_bitmaps(out) if source_node not in bitmaps_dict: - raise test.fail("device '%s' not found!" % source_node) + raise test.fail(f"device '{source_node}' not found!") bitmap_len = len(bitmaps_dict[source_node]) - msg = "bitmap count mismatch, %s != %s" % (bitmap_len, bitmap_count) + msg = f"bitmap count mismatch, {bitmap_len} != {bitmap_count}" assert bitmap_len == bitmap_count, msg def verify_persistent_bitmaps(params, image_name, bitmap_count): @@ -63,14 +62,13 @@ def verify_persistent_bitmaps(params, image_name, bitmap_count): output = data_img.info(output="json") info = json.loads(output) bitmap_len = len(info["format-specific"]["data"]["bitmaps"]) - msg = "bitmap losts after destory VM, %s != %s" % ( - bitmap_len, bitmap_count) + msg = f"bitmap losts after destory VM, {bitmap_len} != {bitmap_count}" assert bitmap_len == bitmap_count, msg source_image = params.get("source_image") target_image = params.get("target_image") - source_node = "drive_%s" % source_image - target_node = "drive_%s" % target_image + source_node = f"drive_{source_image}" + target_node = f"drive_{target_image}" bitmap_count = int(params.get("bitmap_count", 65535)) vm = env.get_vm(params["main_vm"]) vm.verify_alive() diff --git a/qemu/tests/bitmaps_merge_with_nospace.py b/qemu/tests/bitmaps_merge_with_nospace.py index e23e4053d0..1bec56b178 100644 --- a/qemu/tests/bitmaps_merge_with_nospace.py +++ b/qemu/tests/bitmaps_merge_with_nospace.py @@ -1,9 +1,8 @@ from avocado.utils import process - from virttest.data_dir import get_data_dir from virttest.lvm import EmulatedLVM -from virttest.qemu_storage import QemuImg from virttest.qemu_io import QemuIOSystem +from virttest.qemu_storage import QemuImg def run(test, params, env): @@ -25,6 +24,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def _image_create(image_name): """Create an image.""" img_param = params.object_params(image_name) @@ -37,8 +37,7 @@ def _qemu_io(img, cmd): try: QemuIOSystem(test, params, img.image_filename).cmd_output(cmd, 120) except process.CmdError as err: - test.fail( - "qemu-io to '%s' failed: %s." % (img.image_filename, str(err))) + test.fail(f"qemu-io to '{img.image_filename}' failed: {str(err)}.") def _clean_images(img_list): """Remove images from image_list.""" @@ -64,9 +63,13 @@ def _clean_images(img_list): top_image.commit(params.get("cache_mode"), base=base) base_image.bitmap_add(params.get("new_bitmap_base")) try: - base_image.bitmap_merge(params, get_data_dir(), - params["bitmap_name_top"], - params["new_bitmap_base"], top) + base_image.bitmap_merge( + params, + get_data_dir(), + params["bitmap_name_top"], + params["new_bitmap_base"], + top, + ) except process.CmdError as err: err_msg = err.result.stderr.decode() err_msg_cfg = params.get("error_msg").split(",") diff --git a/qemu/tests/blk_commit.py b/qemu/tests/blk_commit.py index d15803947e..742d37e29c 100644 --- a/qemu/tests/blk_commit.py +++ b/qemu/tests/blk_commit.py @@ -4,7 +4,6 @@ class BlockCommit(block_copy.BlockCopy): - def start(self): """ start block device committing job; @@ -24,12 +23,15 @@ def start(self): else: self.test.cancel("hmp command is not supportable.") self.test.log.info("start to commit block device") - self.vm.block_commit(self.device, default_speed, base_image, top_image, - backing_file) + self.vm.block_commit( + self.device, default_speed, base_image, top_image, backing_file + ) status = self.get_status() if not status: self.test.fail("no active job found") - self.test.log.info("block commit job running, with limited speed {0} B/s".format(default_speed)) + self.test.log.info( + "block commit job running, with limited speed %s B/s", default_speed + ) def create_snapshots(self): """ @@ -41,10 +43,12 @@ def create_snapshots(self): for snapshot in snapshots: snapshot = utils_misc.get_path(self.data_dir, snapshot) image_file = self.get_image_file() - self.test.log.info("snapshot {0}, base {1}".format(snapshot, image_file)) + self.test.log.info("snapshot %s, base %s", snapshot, image_file) device = self.vm.live_snapshot(image_file, snapshot, image_format) if device != self.device: image_file = self.get_image_file() - self.test.log.info("expect file: {0}, opening file: {1}".format(snapshot, image_file)) - self.test.fail("create snapshot '%s' failed" % snapshot) + self.test.log.info( + "expect file: %s, opening file: %s", snapshot, image_file + ) + self.test.fail(f"create snapshot '{snapshot}' failed") self.trash_files.append(snapshot) diff --git a/qemu/tests/blk_stream.py b/qemu/tests/blk_stream.py index 8cf0b0fc8b..45d45eb844 100644 --- a/qemu/tests/blk_stream.py +++ b/qemu/tests/blk_stream.py @@ -1,26 +1,26 @@ -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc from qemu.tests import block_copy class BlockStream(block_copy.BlockCopy): - """ base class for block stream tests; """ def __init__(self, test, params, env, tag): - super(BlockStream, self).__init__(test, params, env, tag) + super().__init__(test, params, env, tag) self.base_image = None self.ext_args = {} def parser_test_args(self): - default_params = {"wait_finished": "yes", - "snapshot_format": "qcow2", - "snapshot_chain": ""} + default_params = { + "wait_finished": "yes", + "snapshot_format": "qcow2", + "snapshot_chain": "", + } self.default_params.update(default_params) - return super(BlockStream, self).parser_test_args() + return super().parser_test_args() @error_context.context_aware def start(self): @@ -36,7 +36,7 @@ def start(self): if not status: self.test.fail("no active job found") msg = "block stream job running, " - msg += "with limited speed %s B/s" % default_speed + msg += f"with limited speed {default_speed} B/s" self.test.log.info(msg) @error_context.context_aware @@ -54,9 +54,10 @@ def create_snapshots(self): device = self.vm.live_snapshot(image_file, snapshot, image_format) if device != self.device: image_file = self.get_image_file() - self.test.log.info("expect file: %s" % snapshot + - "opening file: %s" % image_file) - self.test.fail("create snapshot '%s' fail" % snapshot) + self.test.log.info( + "expect file: %s opening file: %s", snapshot, image_file + ) + self.test.fail(f"create snapshot '{snapshot}' fail") self.trash_files.append(snapshot) def action_when_streaming(self): diff --git a/qemu/tests/block_aio_io_uring.py b/qemu/tests/block_aio_io_uring.py index 62892a4226..4708118b10 100644 --- a/qemu/tests/block_aio_io_uring.py +++ b/qemu/tests/block_aio_io_uring.py @@ -1,11 +1,11 @@ """Blockdev aio=io_uring basic test""" - -from provider.block_devices_plug import BlockDevicesPlug from virttest import utils_disk, utils_misc from virttest.utils_misc import get_linux_drive_path from virttest.utils_windows.drive import get_disk_props_by_serial_number +from provider.block_devices_plug import BlockDevicesPlug + def run(test, params, env): """ @@ -24,22 +24,22 @@ def _get_window_disk_index_by_serial(serial): idx_info = get_disk_props_by_serial_number(session, serial, ["Index"]) if idx_info: return idx_info["Index"] - test.fail("Not find expected disk %s" % serial) + test.fail(f"Not find expected disk {serial}") def _check_disk_in_guest(img): os_type = params["os_type"] - logger.debug("Check disk %s in guest" % img) - if os_type == 'windows': - img_size = params.get("image_size_%s" % img) + logger.debug("Check disk %s in guest", img) + if os_type == "windows": + img_size = params.get(f"image_size_{img}") cmd = utils_misc.set_winutils_letter(session, guest_cmd) disk = _get_window_disk_index_by_serial(img) utils_disk.update_windows_disk_attributes(session, disk) logger.info("Clean disk:%s", disk) utils_disk.clean_partition_windows(session, disk) logger.info("Formatting disk:%s", disk) - driver = \ - utils_disk.configure_empty_disk(session, disk, img_size, - os_type)[0] + driver = utils_disk.configure_empty_disk(session, disk, img_size, os_type)[ + 0 + ] output_path = driver + ":\\test.dat" cmd = cmd.format(output_path) else: @@ -73,5 +73,5 @@ def hotplug_unplug_test(): locals_var = locals() if guest_operation: - logger.debug("Execute guest operation %s" % guest_operation) + logger.debug("Execute guest operation %s", guest_operation) locals_var[guest_operation]() diff --git a/qemu/tests/block_auto_detect_size_increased.py b/qemu/tests/block_auto_detect_size_increased.py index 553fa9132b..bbb99369e7 100644 --- a/qemu/tests/block_auto_detect_size_increased.py +++ b/qemu/tests/block_auto_detect_size_increased.py @@ -1,12 +1,9 @@ import re -from virttest import data_dir -from virttest import storage -from virttest import utils_disk -from virttest import utils_test +from virttest import data_dir, storage, utils_disk, utils_test +from virttest.qemu_capabilities import Flags from virttest.qemu_storage import get_image_json from virttest.utils_numeric import normalize_data_size -from virttest.qemu_capabilities import Flags def run(test, params, env): @@ -25,12 +22,17 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def increase_block_device(dev): """Increase the block device.""" - test.log.info("Start to increase image '%s' to %s.", - img, img_resize_size) - resize_size = int(float(normalize_data_size(re.search( - r'(\d+\.?(\d+)?\w)', img_resize_size).group(1), "B"))) + test.log.info("Start to increase image '%s' to %s.", img, img_resize_size) + resize_size = int( + float( + normalize_data_size( + re.search(r"(\d+\.?(\d+)?\w)", img_resize_size).group(1), "B" + ) + ) + ) args = (dev, resize_size) if vm.check_capability(Flags.BLOCKDEV): args = (None, resize_size, dev) @@ -39,27 +41,35 @@ def increase_block_device(dev): def get_disk_size_by_diskpart(index): """Get the disk size by the diskpart.""" - cmd = ' && '.join( - ("echo list disk > {0}", "echo exit >> {0}", - "diskpart /s {0}", "del /f {0}")).format('disk_script') - pattern = r'Disk\s+%s\s+Online\s+(\d+\s+\w+)\s+\d+\s+\w+' % index + cmd = " && ".join( + ( + "echo list disk > {0}", + "echo exit >> {0}", + "diskpart /s {0}", + "del /f {0}", + ) + ).format("disk_script") + pattern = rf"Disk\s+{index}\s+Online\s+(\d+\s+\w+)\s+\d+\s+\w+" return re.search(pattern, session.cmd_output(cmd), re.M).group(1) def check_disk_size(index): """Check the disk size after increasing inside guest.""" - test.log.info('Check whether the size of disk %s is equal to %s after ' - 'increasing inside guest.', index, img_resize_size) + test.log.info( + "Check whether the size of disk %s is equal to %s after " + "increasing inside guest.", + index, + img_resize_size, + ) v, u = re.search(r"(\d+\.?\d*)\s*(\w?)", img_resize_size).groups() size = get_disk_size_by_diskpart(index) - test.log.info('The size of disk %s is %s', index, size) + test.log.info("The size of disk %s is %s", index, size) if normalize_data_size(size, u) != v: - test.fail('The size of disk %s is not equal to %s' % - (index, img_resize_size)) + test.fail(f"The size of disk {index} is not equal to {img_resize_size}") img = params.get("images").split()[-1] img_params = params.object_params(img) img_size = img_params.get("image_size") - img_resize_size = img_params.get('image_resize_size') + img_resize_size = img_params.get("image_resize_size") root_dir = data_dir.get_data_dir() img_filename = storage.get_image_filename(img_params, root_dir) @@ -67,15 +77,15 @@ def check_disk_size(index): vm.verify_alive() session = utils_test.qemu.windrv_check_running_verifier( - vm.wait_for_login(), vm, test, 'viostor', 300) + vm.wait_for_login(), vm, test, "viostor", 300 + ) indices = utils_disk.get_windows_disks_index(session, img_size) utils_disk.update_windows_disk_attributes(session, indices) index = indices[0] - mpoint = utils_disk.configure_empty_windows_disk(session, index, - img_size)[0] + mpoint = utils_disk.configure_empty_windows_disk(session, index, img_size)[0] - if img_params.get("image_format") == 'luks': + if img_params.get("image_format") == "luks": img_filename = get_image_json(img, img_params, root_dir) increase_block_device(vm.get_block({"filename": img_filename})) - vm.copy_files_to('/home/dd_file', "%s:\\dd_file" % mpoint) + vm.copy_files_to("/home/dd_file", f"{mpoint}:\\dd_file") check_disk_size(index) diff --git a/qemu/tests/block_boot_multi_disks.py b/qemu/tests/block_boot_multi_disks.py index 8bf0ad97aa..a1bc00e397 100644 --- a/qemu/tests/block_boot_multi_disks.py +++ b/qemu/tests/block_boot_multi_disks.py @@ -1,7 +1,8 @@ """Test booting with multi disks""" + import re -from virttest import error_context, env_process, utils_misc, utils_disk +from virttest import env_process, error_context, utils_disk, utils_misc from virttest.utils_misc import get_linux_drive_path from virttest.utils_windows.drive import get_disk_props_by_serial_number @@ -29,30 +30,29 @@ def _prepare_images(): for idx_ in range(stg_image_num): name = "stg%d" % idx_ params["images"] = params["images"] + " " + name - params["image_name_%s" % name] = image_name % name - params["image_size_%s" % name] = image_size - params["image_format_%s" % name] = image_format - params["drive_format_%s" % name] = drive_format - params["boot_drive_%s" % name] = "yes" - params["blk_extra_params_%s" % name] = "serial=%s" % name + params[f"image_name_{name}"] = image_name % name + params[f"image_size_{name}"] = image_size + params[f"image_format_{name}"] = image_format + params[f"drive_format_{name}"] = drive_format + params[f"boot_drive_{name}"] = "yes" + params[f"blk_extra_params_{name}"] = f"serial={name}" def _get_window_disk_index_by_serial(serial): idx_info = get_disk_props_by_serial_number(session, serial, ["Index"]) if idx_info: return idx_info["Index"] - test.fail("Not find expected disk %s" % serial) + test.fail(f"Not find expected disk {serial}") def _check_disk_in_guest(img): - logger.debug("Check disk %s in guest" % img) - if os_type == 'windows': - + logger.debug("Check disk %s in guest", img) + if os_type == "windows": cmd = utils_misc.set_winutils_letter(session, guest_cmd) disk = _get_window_disk_index_by_serial(img) utils_disk.update_windows_disk_attributes(session, disk) logger.info("Formatting disk:%s", disk) - driver = \ - utils_disk.configure_empty_disk(session, disk, image_size, - os_type)[0] + driver = utils_disk.configure_empty_disk( + session, disk, image_size, os_type + )[0] output_path = driver + ":\\test.dat" cmd = cmd.format(output_path) else: @@ -63,18 +63,19 @@ def _check_disk_in_guest(img): session.cmd(cmd) logger = test.log - stg_image_num = params.get_numeric('stg_image_num') + stg_image_num = params.get_numeric("stg_image_num") os_type = params["os_type"] image_size = params.get("stg_image_size", "512M") guest_cmd = params["guest_cmd"] - logger.info('Prepare images ...%s', params["stg_image_num"]) + logger.info("Prepare images ...%s", params["stg_image_num"]) _prepare_images() - logger.info('Booting vm...') - params['start_vm'] = 'yes' - vm = env.get_vm(params['main_vm']) - env_process.process(test, params, env, env_process.preprocess_image, - env_process.preprocess_vm) + logger.info("Booting vm...") + params["start_vm"] = "yes" + vm = env.get_vm(params["main_vm"]) + env_process.process( + test, params, env, env_process.preprocess_image, env_process.preprocess_vm + ) timeout = params.get_numeric("login_timeout", 360) logger.debug("Login in guest...") @@ -83,12 +84,12 @@ def _check_disk_in_guest(img): check_message = params.get("check_message") if check_message: logger.debug("Check warning message in BIOS log...") - logs = vm.logsessions['seabios'].get_output() + logs = vm.logsessions["seabios"].get_output() result = re.search(check_message, logs, re.S) result = "yes" if result else "no" expect_find = params.get("expect_find") if result != expect_find: - test.fail("Get unexpected find %s %s" % (result, expect_find)) + test.fail(f"Get unexpected find {result} {expect_find}") logger.debug("Check disk number in guest...") check_num_cmd = params["check_num_cmd"] @@ -96,8 +97,10 @@ def _check_disk_in_guest(img): guest_disk_num = int(guest_cmd_output.strip()) expected_disk_number = stg_image_num + 1 if guest_disk_num != expected_disk_number: - test.fail("Guest disk number is wrong,expected: %d actually: %d" % ( - guest_disk_num, expected_disk_number)) + test.fail( + "Guest disk number is wrong,expected: %d actually: %d" + % (guest_disk_num, expected_disk_number) + ) logger.debug("Check IO on guest disk...") for idx in range(params.get_numeric("check_disk_num", 3)): diff --git a/qemu/tests/block_boot_under_low_speed.py b/qemu/tests/block_boot_under_low_speed.py index 9bcb00b194..a359b6609d 100644 --- a/qemu/tests/block_boot_under_low_speed.py +++ b/qemu/tests/block_boot_under_low_speed.py @@ -1,12 +1,12 @@ """QEMU Low Speed Booting Test""" -import threading -import time + import os import shutil -from avocado.utils import process +import threading +import time -from virttest import error_context, storage, env_process, nfs -from virttest import data_dir +from avocado.utils import process +from virttest import data_dir, env_process, error_context, nfs, storage # This decorator makes the test function aware of context strings @@ -37,11 +37,12 @@ def _setup_env(): org_img = storage.get_image_filename(params, data_dir.DATA_DIR) logger.info(org_img) file_name = os.path.basename(org_img) - if not os.path.exists(params['export_dir'] + "/" + file_name): - logger.info("Copy file %s %s", org_img, params['export_dir']) - shutil.copy(org_img, params['export_dir']) - params["image_name"] = params['nfs_mount_dir'] + "/" + \ - os.path.splitext(file_name)[0] + if not os.path.exists(params["export_dir"] + "/" + file_name): + logger.info("Copy file %s %s", org_img, params["export_dir"]) + shutil.copy(org_img, params["export_dir"]) + params["image_name"] = ( + params["nfs_mount_dir"] + "/" + os.path.splitext(file_name)[0] + ) logger = test.log nfs_local = nfs.Nfs(params) @@ -52,11 +53,12 @@ def _setup_env(): thread = threading.Thread(target=_limit_daemon) thread.start() time.sleep(2) - logger.info('Booting vm...%s', params["image_name"]) - params['start_vm'] = 'yes' - vm = env.get_vm(params['main_vm']) - env_process.process(test, params, env, env_process.preprocess_image, - env_process.preprocess_vm) + logger.info("Booting vm...%s", params["image_name"]) + params["start_vm"] = "yes" + vm = env.get_vm(params["main_vm"]) + env_process.process( + test, params, env, env_process.preprocess_image, env_process.preprocess_vm + ) timeout = int(params.get("login_timeout", 360)) vm.wait_for_login(timeout=timeout) finally: diff --git a/qemu/tests/block_check_event.py b/qemu/tests/block_check_event.py index 79117746ea..c42c6b41f3 100644 --- a/qemu/tests/block_check_event.py +++ b/qemu/tests/block_check_event.py @@ -15,17 +15,19 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def query_system_events(filter_options): """Query the system events in filter options.""" test.log.info("Query the system event log.") - cmd = 'wevtutil qe system /q:\"%s\" /f:text' % filter_options + cmd = f'wevtutil qe system /q:"{filter_options}" /f:text' return session.cmd(cmd).strip() vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = utils_test.qemu.windrv_check_running_verifier( - vm.wait_for_login(), vm, test, 'viostor', 300) + vm.wait_for_login(), vm, test, "viostor", 300 + ) - if query_system_events(params['filter_options']): - test.fail('Found the error event(id: %s).' % params['event_id']) + if query_system_events(params["filter_options"]): + test.fail("Found the error event(id: {}).".format(params["event_id"])) diff --git a/qemu/tests/block_check_fds.py b/qemu/tests/block_check_fds.py index 8e0fe116ae..9ced05e685 100644 --- a/qemu/tests/block_check_fds.py +++ b/qemu/tests/block_check_fds.py @@ -1,5 +1,4 @@ from avocado.utils import process - from virttest import error_context from provider.block_devices_plug import BlockDevicesPlug @@ -22,6 +21,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def _get_aio_fds_num(pid): """Get the number of AIO file descriptors.""" return int(process.system_output(lsof_cmd % pid, shell=True)) @@ -30,24 +30,23 @@ def hotplug_unplug_block_repeatedly(times): """Hot plug then unplug block devices repeatedly.""" vm_pid = vm.get_pid() plug = BlockDevicesPlug(vm) - info = ('The number of AIO file descriptors is %s ' - 'after %s block device.') + info = "The number of AIO file descriptors is %s " "after %s block device." for i in range(times): - test.log.info('Iteration %d: Hot plug then unplug ' - 'block device.', i) + test.log.info("Iteration %d: Hot plug then unplug " "block device.", i) plug.hotplug_devs_serial() orig_fds_num = _get_aio_fds_num(vm_pid) - test.log.info(info, orig_fds_num, 'hot plugging') + test.log.info(info, orig_fds_num, "hot plugging") plug.unplug_devs_serial() new_fds_num = _get_aio_fds_num(vm_pid) - test.log.info(info, new_fds_num, 'unplugging') + test.log.info(info, new_fds_num, "unplugging") if new_fds_num != orig_fds_num: - test.fail('The the number of AIO descriptors is ' - 'changed, from %s to %s.' % (orig_fds_num, - new_fds_num)) + test.fail( + "The the number of AIO descriptors is " + f"changed, from {orig_fds_num} to {new_fds_num}." + ) - lsof_cmd = params['lsof_cmd'] + lsof_cmd = params["lsof_cmd"] vm = env.get_vm(params["main_vm"]) vm.verify_alive() vm.wait_for_login() - hotplug_unplug_block_repeatedly(int(params['repeat_times'])) + hotplug_unplug_block_repeatedly(int(params["repeat_times"])) diff --git a/qemu/tests/block_check_max_tranfer_length.py b/qemu/tests/block_check_max_tranfer_length.py index d4a093f4c1..6b08893536 100644 --- a/qemu/tests/block_check_max_tranfer_length.py +++ b/qemu/tests/block_check_max_tranfer_length.py @@ -1,8 +1,6 @@ import re -from virttest import error_context -from virttest import utils_disk -from virttest import utils_misc +from virttest import error_context, utils_disk, utils_misc @error_context.context_aware @@ -23,25 +21,29 @@ def run(test, params, env): vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() - check_cmd = utils_misc.set_winutils_letter(session, - params["check_cmd"]) + check_cmd = utils_misc.set_winutils_letter(session, params["check_cmd"]) error_context.context("Format data disk", test.log.info) disk_index = utils_disk.get_windows_disks_index(session, img_size) if not disk_index: - test.error("Failed to get the disk index of size %s" % img_size) + test.error(f"Failed to get the disk index of size {img_size}") if not utils_disk.update_windows_disk_attributes(session, disk_index): - test.error("Failed to enable data disk %s" % disk_index) + test.error(f"Failed to enable data disk {disk_index}") drive_letter_list = utils_disk.configure_empty_windows_disk( - session, disk_index[0], img_size) + session, disk_index[0], img_size + ) if not drive_letter_list: test.error("Failed to format the data disk") drive_letter = drive_letter_list[0] - error_context.context("Check the maximum transfer length if " - "VIRTIO_BLK_F_SEG_MAX flag is on", test.log.info) + error_context.context( + "Check the maximum transfer length if " "VIRTIO_BLK_F_SEG_MAX flag is on", + test.log.info, + ) output = session.cmd_output(check_cmd % drive_letter) - actual_max_transfer_length = re.findall( - r"MaximumTransferLength: ([\w]+)", output)[0] + actual_max_transfer_length = re.findall(r"MaximumTransferLength: ([\w]+)", output)[ + 0 + ] if actual_max_transfer_length != expect_max_transfer_length: - test.error("maximum transfer length %s is not expected" - % actual_max_transfer_length) + test.error( + f"maximum transfer length {actual_max_transfer_length} is not expected" + ) diff --git a/qemu/tests/block_check_memory_leak.py b/qemu/tests/block_check_memory_leak.py index 3631c4617e..0f60574bf0 100644 --- a/qemu/tests/block_check_memory_leak.py +++ b/qemu/tests/block_check_memory_leak.py @@ -5,10 +5,9 @@ import time from avocado.utils import process +from virttest import arch, error_context from virttest import data_dir as virttest_data_dir -from virttest import error_context from virttest.utils_misc import get_linux_drive_path -from virttest import arch @error_context.context_aware @@ -44,26 +43,28 @@ def _execute_io_in_guest(): logger.info("Execute io:%s", guest_io_cmd) session.sendline("$SHELL " + guest_io_cmd) - if arch.ARCH in ('ppc64', 'ppc64le'): + if arch.ARCH in ("ppc64", "ppc64le"): output = process.system_output("lscfg --list firmware -v", shell=True).decode() - ver = float(re.findall(r'\d\.\d', output)[0]) + ver = float(re.findall(r"\d\.\d", output)[0]) if ver >= 6.3: - #bz2235228,cancel test due to known product bug. - test.cancel("Skip test for xive kvm interrupt guest due to" - " known host crash issue.") + # bz2235228,cancel test due to known product bug. + test.cancel( + "Skip test for xive kvm interrupt guest due to" + " known host crash issue." + ) logger = test.log - data_images = params['data_images'].split() + data_images = params["data_images"].split() error_context.context("Get the main VM", logger.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() - timeout = params.get_numeric('login_timeout', 360) + timeout = params.get_numeric("login_timeout", 360) session = vm.wait_for_login(timeout=timeout) time.sleep(60) logger.info("Start to IO in guest") _execute_io_in_guest() logger.info("Wait ...") - time.sleep(params.get_numeric('io_timeout', 300)) + time.sleep(params.get_numeric("io_timeout", 300)) logger.info("Try to cancel IO.") session = vm.wait_for_login(timeout=timeout) @@ -71,11 +72,11 @@ def _execute_io_in_guest(): logger.info("Ready to destroy vm") vm.destroy() logger.info("Ready to check vm...") - cp_cmd = "cp %s %s" % (params["valgrind_log"], test.logdir) + cp_cmd = "cp {} {}".format(params["valgrind_log"], test.logdir) process.system_output(cp_cmd, shell=True) check_cmd = params["check_cmd"] out = process.system_output(check_cmd, shell=True).decode() - leak_threshold = params.get_numeric('leak_threshold') + leak_threshold = params.get_numeric("leak_threshold") logger.info("Find leak:%s,threshold: %d", out, leak_threshold) if len(out) and int(out) > leak_threshold: - test.fail("Find memory leak %s,Please check valgrind.log" % out) + test.fail(f"Find memory leak {out},Please check valgrind.log") diff --git a/qemu/tests/block_check_serial.py b/qemu/tests/block_check_serial.py index d35cb4f8a6..44cc80ddae 100644 --- a/qemu/tests/block_check_serial.py +++ b/qemu/tests/block_check_serial.py @@ -1,5 +1,4 @@ -"""Test serial length """ - +"""Test serial length""" from virttest import error_context from virttest.utils_misc import get_linux_drive_path @@ -27,20 +26,20 @@ def _find_disks_by_serial(): wrong_disks = [] for img in images: image_params = params.object_params(img) - serial = image_params['image_serial'] + serial = image_params["image_serial"] test.log.info("Try to Find the image %s by %s", img, serial) - os_type = params['os_type'] - cmd = params['cmd_get_disk_id'] + os_type = params["os_type"] + cmd = params["cmd_get_disk_id"] if os_type == "windows": cmd = cmd.format(serial) status, output = session.cmd_status_output(cmd) if status != 0: - test.fail("Execute command fail: %s" % output) + test.fail(f"Execute command fail: {output}") disk = output.strip() else: disk = get_linux_drive_path(session, serial) if disk: - tmp_file = "/tmp/%s.vpd" % img + tmp_file = f"/tmp/{img}.vpd" cmd = cmd.format(disk, tmp_file, serial) status, output = session.cmd_status_output(cmd) if status != 0: @@ -53,9 +52,9 @@ def _find_disks_by_serial(): wrong_disks.append(img) if len(wrong_disks): - test.fail("Can not get disks %s by serial or uid" % wrong_disks) + test.fail(f"Can not get disks {wrong_disks} by serial or uid") - images = params['data_images'].split() + images = params["data_images"].split() vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login() diff --git a/qemu/tests/block_commit_reboot.py b/qemu/tests/block_commit_reboot.py index 1b53e19045..b25c3c9af2 100644 --- a/qemu/tests/block_commit_reboot.py +++ b/qemu/tests/block_commit_reboot.py @@ -1,24 +1,25 @@ -import time import random +import time from qemu.tests import blk_commit class BlockCommitReboot(blk_commit.BlockCommit): - def reboot(self): """ Reset guest with system_reset; """ - return super(BlockCommitReboot, self).reboot(boot_check=False) + return super().reboot(boot_check=False) def action_when_start(self): """ start pre-action in new threads; """ - super(BlockCommitReboot, self).action_when_start() - self.test.log.info("sleep for random time between 0 to 20, to perform " - "the block job during different stage of rebooting") + super().action_when_start() + self.test.log.info( + "sleep for random time between 0 to 20, to perform " + "the block job during different stage of rebooting" + ) time.sleep(random.randint(0, 20)) @@ -44,4 +45,4 @@ def run(test, params, env): try: reboot_test.clean() except Exception as e: - test.log.warn(e) + test.log.warning(e) diff --git a/qemu/tests/block_commit_stress.py b/qemu/tests/block_commit_stress.py index ce0665b536..8b74f75209 100644 --- a/qemu/tests/block_commit_stress.py +++ b/qemu/tests/block_commit_stress.py @@ -1,11 +1,9 @@ -from virttest import utils_misc -from virttest import utils_test +from virttest import utils_misc, utils_test from qemu.tests import blk_commit class BlockCommitStress(blk_commit.BlockCommit): - def load_stress(self): """ load IO/CPU/Memoery stress in guest @@ -23,6 +21,7 @@ def unload_stress(self): """ stop stress app """ + def _unload_stress(): session = self.get_session() cmd = self.params.get("stop_cmd") @@ -31,11 +30,15 @@ def _unload_stress(): return self.app_running() self.test.log.info("stop stress app in guest") - stopped = utils_misc.wait_for(_unload_stress, first=2.0, - text="wait stress app quit", step=1.0, - timeout=self.params["wait_timeout"]) + stopped = utils_misc.wait_for( + _unload_stress, + first=2.0, + text="wait stress app quit", + step=1.0, + timeout=self.params["wait_timeout"], + ) if not stopped: - self.test.log.warn("stress app is still running") + self.test.log.warning("stress app is still running") def app_running(self): """ @@ -54,17 +57,24 @@ def verify_backingfile(self): self.test.log.info("Check image backing-file") exp_img_file = self.params["expected_image_file"] exp_img_file = utils_misc.get_path(self.data_dir, exp_img_file) - self.test.log.debug("Expected image file read from config file is '%s'", exp_img_file) + self.test.log.debug( + "Expected image file read from config file is '%s'", exp_img_file + ) backingfile = self.get_backingfile("monitor") if backingfile: - self.test.log.info("Got backing-file: #{0}# by 'info/query block' in #{1}# " - "monitor".format(backingfile, self.vm.monitor.protocol)) + self.test.log.info( + "Got backing-file: #%s# by 'info/query block' in #%s# " "monitor", + backingfile, + self.vm.monitor.protocol, + ) if exp_img_file == backingfile: self.test.log.info("check backing file with monitor passed") else: - self.test.fail("backing file is different with the expected one. " - "expecting: %s, actual: %s" % (exp_img_file, backingfile)) + self.test.fail( + "backing file is different with the expected one. " + f"expecting: {exp_img_file}, actual: {backingfile}" + ) def run(test, params, env): diff --git a/qemu/tests/block_copy.py b/qemu/tests/block_copy.py index d3621f8a61..667726793a 100644 --- a/qemu/tests/block_copy.py +++ b/qemu/tests/block_copy.py @@ -1,16 +1,17 @@ +import random import re import time -import random -import six +import six from avocado.utils import process - -from virttest import data_dir -from virttest import error_context -from virttest import storage -from virttest import qemu_storage -from virttest import utils_misc -from virttest import qemu_monitor +from virttest import ( + data_dir, + error_context, + qemu_monitor, + qemu_storage, + storage, + utils_misc, +) def speed2byte(speed): @@ -18,22 +19,24 @@ def speed2byte(speed): convert speed to Bytes/s """ if str(speed).isdigit(): - speed = "%sB" % speed + speed = f"{speed}B" speed = utils_misc.normalize_data_size(speed, "B") return int(float(speed)) -class BlockCopy(object): - +class BlockCopy: """ Base class for block copy test; """ - default_params = {"cancel_timeout": 6, - "wait_timeout": 600, - "login_timeout": 360, - "check_timeout": 3, - "max_speed": 0, - "default_speed": 0} + + default_params = { + "cancel_timeout": 6, + "wait_timeout": 600, + "login_timeout": 360, + "check_timeout": 3, + "max_speed": 0, + "default_speed": 0, + } trash_files = [] opening_sessions = [] processes = [] @@ -77,8 +80,7 @@ def get_device(self): """ according configuration get target device ID; """ - image_file = storage.get_image_filename(self.parser_test_args(), - self.data_dir) + image_file = storage.get_image_filename(self.parser_test_args(), self.data_dir) self.test.log.info("image filename: %s", image_file) return self.vm.get_block({"file": image_file}) @@ -100,7 +102,7 @@ def get_status(self): try: return self.vm.get_job_status(self.device) except qemu_monitor.MonitorLockError as e: - self.test.log.warn(e) + self.test.log.warning(e) time.sleep(random.uniform(1, 5)) count += 1 return {} @@ -113,15 +115,16 @@ def do_steps(self, tag=None): fun = getattr(self, step) fun() else: - self.test.error("undefined step %s" % step) + self.test.error(f"undefined step {step}") except KeyError: - self.test.log.warn("Undefined test phase '%s'", tag) + self.test.log.warning("Undefined test phase '%s'", tag) @error_context.context_aware def cancel(self): """ cancel active job on given image; """ + def is_cancelled(): ret = not bool(self.get_status()) ret &= bool(self.vm.monitor.get_event("BLOCK_JOB_CANCELLED")) @@ -134,7 +137,7 @@ def is_cancelled(): self.vm.cancel_block_job(self.device) cancelled = utils_misc.wait_for(is_cancelled, timeout=timeout) if not cancelled: - msg = "Cancel block job timeout in %ss" % timeout + msg = f"Cancel block job timeout in {timeout}s" self.test.fail(msg) self.vm.monitor.clear_event("BLOCK_JOB_CANCELLED") @@ -189,16 +192,15 @@ def set_speed(self): params = self.parser_test_args() max_speed = params.get("max_speed") expected_speed = int(params.get("expected_speed", max_speed)) - error_context.context("set speed to %s B/s" % expected_speed, - self.test.log.info) + error_context.context(f"set speed to {expected_speed} B/s", self.test.log.info) self.vm.set_job_speed(self.device, expected_speed) status = self.get_status() if not status: self.test.fail("Unable to query job status.") speed = status["speed"] if speed != expected_speed: - msg = "Set speed fail. (expected speed: %s B/s," % expected_speed - msg += "actual speed: %s B/s)" % speed + msg = f"Set speed fail. (expected speed: {expected_speed} B/s," + msg += f"actual speed: {speed} B/s)" self.test.fail(msg) @error_context.context_aware @@ -212,17 +214,17 @@ def reboot(self, method="shell", boot_check=True): if boot_check: session = self.get_session() - return self.vm.reboot(session=session, - timeout=timeout, method=method) + return self.vm.reboot(session=session, timeout=timeout, method=method) error_context.context("reset guest via system_reset", self.test.log.info) self.vm.monitor.clear_event("RESET") self.vm.monitor.cmd("system_reset") - reseted = utils_misc.wait_for(lambda: - self.vm.monitor.get_event("RESET"), - timeout=timeout) + reseted = utils_misc.wait_for( + lambda: self.vm.monitor.get_event("RESET"), timeout=timeout + ) if not reseted: - self.test.fail("No RESET event received after" - "execute system_reset %ss" % timeout) + self.test.fail( + "No RESET event received after" f"execute system_reset {timeout}s" + ) self.vm.monitor.clear_event("RESET") return None @@ -263,26 +265,25 @@ def get_image_file(self): try: if isinstance(blocks, six.string_types): # ide0-hd0: removable=1 locked=0 file=/tmp/test.img - image_regex = r'%s.*\s+file=(\S*)' % self.device + image_regex = rf"{self.device}.*\s+file=(\S*)" image_file = re.findall(image_regex, blocks) if image_file: return image_file[0] # ide0-hd0 (#block184): a b c # or # ide0-hd0 (#block184): a b c (raw) - image_file = re.findall(r"%s[^:]+: ([^(]+)\(?" % self.device, - blocks) + image_file = re.findall(rf"{self.device}[^:]+: ([^(]+)\(?", blocks) if image_file: - if image_file[0][-1] == ' ': + if image_file[0][-1] == " ": return image_file[0][:-1] else: return image_file[0] for block in blocks: - if block['device'] == self.device: - return block['inserted']['file'] + if block["device"] == self.device: + return block["inserted"]["file"] except KeyError: - self.test.log.warn("Image file not found for device '%s'", self.device) + self.test.log.warning("Image file not found for device '%s'", self.device) self.test.log.debug("Blocks info: '%s'", blocks) return None @@ -300,7 +301,7 @@ def get_backingfile(self, method="monitor"): matched = re.search(r"backing file: +(.*)", info, re.M) return matched.group(1) except AttributeError: - self.test.log.warn("No backingfile found, cmd output: %s", info) + self.test.log.warning("No backingfile found, cmd output: %s", info) def action_before_start(self): """ @@ -337,7 +338,7 @@ def wait_for_finished(self): timeout = params.get("wait_timeout") finished = utils_misc.wait_for(self.job_finished, timeout=timeout) if not finished: - self.test.fail("Job not finished in %s seconds" % timeout) + self.test.fail(f"Job not finished in {timeout} seconds") time_end = time.time() self.test.log.info("Block job done.") return time_end - time_start @@ -371,10 +372,11 @@ def wait_for_steady(self): params = self.parser_test_args() timeout = params.get("wait_timeout") self.vm.monitor.clear_event("BLOCK_JOB_READY") - steady = utils_misc.wait_for(self.is_steady, first=3.0, - step=3.0, timeout=timeout) + steady = utils_misc.wait_for( + self.is_steady, first=3.0, step=3.0, timeout=timeout + ) if not steady: - self.test.fail("Wait mirroring job ready timeout in %ss" % timeout) + self.test.fail(f"Wait mirroring job ready timeout in {timeout}s") def action_before_steady(self): """ @@ -409,7 +411,7 @@ def clean(self): self.vm.destroy() while self.trash_files: tmp_file = self.trash_files.pop() - process.system("rm -f %s" % tmp_file, ignore_status=True) + process.system(f"rm -f {tmp_file}", ignore_status=True) def create_file(self, file_name): """ @@ -423,7 +425,7 @@ def create_file(self, file_name): test_exists_cmd = params.get("test_exists_cmd", "test -f FILE") if session.cmd_status(test_exists_cmd.replace("FILE", file_name)): session.cmd(file_create_cmd.replace("FILE", file_name), timeout=200) - session.cmd("md5sum %s > %s.md5" % (file_name, file_name), timeout=200) + session.cmd(f"md5sum {file_name} > {file_name}.md5", timeout=200) sync_cmd = params.get("sync_cmd", "sync") sync_cmd = utils_misc.set_winutils_letter(session, sync_cmd) session.cmd(sync_cmd) @@ -435,11 +437,11 @@ def verify_md5(self, file_name): :param file_name: the file need to be verified. """ session = self.get_session() - status, output = session.cmd_status_output("md5sum -c %s.md5" % file_name, - timeout=200) + status, output = session.cmd_status_output( + f"md5sum -c {file_name}.md5", timeout=200 + ) if status != 0: - self.test.fail("File %s changed, md5sum check output: %s" - % (file_name, output)) + self.test.fail(f"File {file_name} changed, md5sum check output: {output}") def reopen(self, reopen_image): """ @@ -447,7 +449,7 @@ def reopen(self, reopen_image): :param reopen_image: the image that vm reopen with. """ self.vm.destroy() - self.params["image_name_%s" % self.tag] = reopen_image + self.params[f"image_name_{self.tag}"] = reopen_image self.vm.create(params=self.params) self.vm.verify_alive() @@ -482,7 +484,7 @@ def is_unplugged(): timeout = self.params.get("cancel_timeout", 10) unplugged = utils_misc.wait_for(is_unplugged, timeout=timeout) if not unplugged: - self.test.fail("Unplug timeout in %ss" % timeout) + self.test.fail(f"Unplug timeout in {timeout}s") def create_files(self): """ diff --git a/qemu/tests/block_detect_zeroes.py b/qemu/tests/block_detect_zeroes.py index 2ea474c067..6b9e0d5d08 100644 --- a/qemu/tests/block_detect_zeroes.py +++ b/qemu/tests/block_detect_zeroes.py @@ -1,12 +1,13 @@ """blockdev detect-zeroes option test""" -import time -from provider.block_devices_plug import BlockDevicesPlug -from virttest import utils_misc, utils_disk, storage, data_dir +import time +from virttest import data_dir, storage, utils_disk, utils_misc from virttest.utils_misc import get_linux_drive_path from virttest.utils_windows.drive import get_disk_props_by_serial_number +from provider.block_devices_plug import BlockDevicesPlug + def run(test, params, env): """ @@ -30,29 +31,29 @@ def _get_window_disk_index_by_serial(serial): idx_info = get_disk_props_by_serial_number(session, serial, ["Index"]) if idx_info: return idx_info["Index"] - test.fail("Not find expected disk %s" % serial) + test.fail(f"Not find expected disk {serial}") def _check_disk_in_guest(img): nonlocal guest_cmd os_type = params["os_type"] pre_guest_cmd = params.get("pre_guest_cmd") post_guest_cmd = params.get("post_guest_cmd") - logger.debug("Check disk %s in guest" % img) - if os_type == 'windows': - img_size = params.get("image_size_%s" % img) + logger.debug("Check disk %s in guest", img) + if os_type == "windows": + img_size = params.get(f"image_size_{img}") cmd = utils_misc.set_winutils_letter(session, guest_cmd) disk = _get_window_disk_index_by_serial(img) utils_disk.update_windows_disk_attributes(session, disk) logger.info("Formatting disk:%s", disk) - driver = \ - utils_disk.configure_empty_disk(session, disk, img_size, - os_type)[0] + driver = utils_disk.configure_empty_disk(session, disk, img_size, os_type)[ + 0 + ] output_path = driver + ":\\test.dat" guest_cmd = cmd.format(output_path) else: driver = get_linux_drive_path(session, img) if not driver: - test.fail("Can not find disk by %s" % img) + test.fail(f"Can not find disk by {img}") logger.debug(driver) if pre_guest_cmd: pre_guest_cmd = pre_guest_cmd.format(driver) @@ -60,10 +61,10 @@ def _check_disk_in_guest(img): session.cmd(pre_guest_cmd) if post_guest_cmd: post_guest_cmd = post_guest_cmd.format(driver) - output_path = "/home/{}/test.dat".format(driver) + output_path = f"/home/{driver}/test.dat" guest_cmd = guest_cmd.format(output_path) - logger.debug("Ready execute cmd:" + guest_cmd) + logger.debug("Ready execute cmd: %s", guest_cmd) session.cmd(guest_cmd) if post_guest_cmd: logger.debug(post_guest_cmd) @@ -81,9 +82,10 @@ def hotplug_unplug_test(): def block_resize_test(): image_params = params.object_params(data_img) image_size = params.get_numeric("new_image_size_stg1") - image_filename = storage.get_image_filename(image_params, - data_dir.get_data_dir()) - image_dev = vm.get_block({'file': image_filename}) + image_filename = storage.get_image_filename( + image_params, data_dir.get_data_dir() + ) + image_dev = vm.get_block({"file": image_filename}) if not image_dev: blocks_info = vm.monitor.human_monitor_cmd("info block") logger.debug(blocks_info) @@ -93,7 +95,7 @@ def block_resize_test(): logger.debug("Find %s node:%s", image_filename, image_dev) break if not image_dev: - test.fail("Can not find dev by %s" % image_filename) + test.fail(f"Can not find dev by {image_filename}") args = (None, image_size, image_dev) vm.monitor.block_resize(*args) @@ -111,7 +113,7 @@ def block_resize_test(): locals_var = locals() if guest_operation: - logger.debug("Execute guest operation %s" % guest_operation) + logger.debug("Execute guest operation %s", guest_operation) locals_var[guest_operation]() vm.destroy() diff --git a/qemu/tests/block_discard.py b/qemu/tests/block_discard.py index f0702a3b83..e86935fddc 100644 --- a/qemu/tests/block_discard.py +++ b/qemu/tests/block_discard.py @@ -1,14 +1,9 @@ import os import re -from avocado.utils import genio +from avocado.utils import genio, process from avocado.utils import path as utils_path -from avocado.utils import process - -from virttest import env_process -from virttest import error_context -from virttest import utils_disk -from virttest import utils_misc +from virttest import env_process, error_context, utils_disk, utils_misc @error_context.context_aware @@ -34,14 +29,14 @@ def get_host_scsi_disk(): Get latest scsi disk which emulated by scsi_debug module. """ scsi_disk_info = process.system_output("lsscsi").decode().splitlines() - scsi_debug = [_ for _ in scsi_disk_info if 'scsi_debug' in _][-1] + scsi_debug = [_ for _ in scsi_disk_info if "scsi_debug" in _][-1] scsi_debug = scsi_debug.split() host_id = scsi_debug[0][1:-1] device_name = scsi_debug[-1] return (host_id, device_name) def get_guest_discard_disk(session): - """" + """ " Get disk without partitions in guest. """ list_disk_cmd = "ls /dev/[sh]d*|sed 's/[0-9]//p'|uniq -u" @@ -54,8 +49,8 @@ def get_provisioning_mode(device, host_id): on params for scsi_debug module. """ device_name = os.path.basename(device) - path = "/sys/block/%s/device/scsi_disk" % device_name - path += "/%s/provisioning_mode" % host_id + path = f"/sys/block/{device_name}/device/scsi_disk" + path += f"/{host_id}/provisioning_mode" return genio.read_one_line(path).strip() def get_allocation_bitmap(): @@ -65,14 +60,14 @@ def get_allocation_bitmap(): path = "/sys/bus/pseudo/drivers/scsi_debug/map" try: return genio.read_one_line(path).strip() - except IOError: - test.log.warn("block allocation bitmap not exists") + except OSError: + test.log.warning("block allocation bitmap not exists") return "" def _check_disk_partitions_number(): - """ Check the data disk partitions number. """ + """Check the data disk partitions number.""" disks = utils_disk.get_linux_disks(session, True) - return len(re.findall(r'%s\d+' % device_name[5:], ' '.join(disks))) == 1 + return len(re.findall(rf"{device_name[5:]}\d+", " ".join(disks))) == 1 # destroy all vms to avoid emulated disk marked drity before start test for vm in env.get_all_vms(): @@ -93,14 +88,13 @@ def _check_disk_partitions_number(): vm_name = params["main_vm"] test_image = "scsi_debug" params["start_vm"] = "yes" - params["image_name_%s" % test_image] = disk_name - params["image_format_%s" % test_image] = "raw" - params["image_raw_device_%s" % test_image] = "yes" - params["force_create_image_%s" % test_image] = "no" + params[f"image_name_{test_image}"] = disk_name + params[f"image_format_{test_image}"] = "raw" + params[f"image_raw_device_{test_image}"] = "yes" + params[f"force_create_image_{test_image}"] = "no" params["images"] = " ".join([params["images"], test_image]) - error_context.context("boot guest with disk '%s'" % disk_name, - test.log.info) + error_context.context(f"boot guest with disk '{disk_name}'", test.log.info) # boot guest with scsi_debug disk env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) @@ -108,8 +102,7 @@ def _check_disk_partitions_number(): timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) - error_context.context("Fresh block allocation bitmap before test.", - test.log.info) + error_context.context("Fresh block allocation bitmap before test.", test.log.info) device_name = get_guest_discard_disk(session) rewrite_disk_cmd = params["rewrite_disk_cmd"] rewrite_disk_cmd = rewrite_disk_cmd.replace("DISK", device_name) @@ -120,18 +113,20 @@ def _check_disk_partitions_number(): test.log.debug("bitmap before test: %s", bitmap_before_trim) test.fail("bitmap should be continuous before fstrim") - error_context.context("Create partition on '%s' in guest" % device_name, - test.log.info) - session.cmd(params['create_partition_cmd'].replace("DISK", device_name)) + error_context.context( + f"Create partition on '{device_name}' in guest", test.log.info + ) + session.cmd(params["create_partition_cmd"].replace("DISK", device_name)) if not utils_misc.wait_for(_check_disk_partitions_number, 30, step=3.0): - test.error('Failed to get a partition on %s.' % device_name) + test.error(f"Failed to get a partition on {device_name}.") - error_context.context("format disk '%s' in guest" % device_name, test.log.info) + error_context.context(f"format disk '{device_name}' in guest", test.log.info) session.cmd(params["format_disk_cmd"].replace("DISK", device_name)) - error_context.context("mount disk with discard options '%s'" % device_name, - test.log.info) + error_context.context( + f"mount disk with discard options '{device_name}'", test.log.info + ) mount_disk_cmd = params["mount_disk_cmd"] mount_disk_cmd = mount_disk_cmd.replace("DISK", device_name) session.cmd(mount_disk_cmd) @@ -143,7 +138,9 @@ def _check_disk_partitions_number(): bitmap_after_trim = get_allocation_bitmap() if not re.match(r"\d+-\d+,.*\d+-\d+$", bitmap_after_trim): test.log.debug("bitmap after test: %s", bitmap_before_trim) - test.fail("discard command doesn't issue" - "to scsi_debug disk, please report bug for qemu") + test.fail( + "discard command doesn't issue" + "to scsi_debug disk, please report bug for qemu" + ) if vm: vm.destroy() diff --git a/qemu/tests/block_discard_hotplug.py b/qemu/tests/block_discard_hotplug.py index f253ddfc10..a77a4037ec 100644 --- a/qemu/tests/block_discard_hotplug.py +++ b/qemu/tests/block_discard_hotplug.py @@ -3,11 +3,9 @@ """ from avocado.utils import process +from virttest import data_dir, error_context, storage -from virttest import storage -from virttest import error_context from provider.block_devices_plug import BlockDevicesPlug -from virttest import data_dir @error_context.context_aware @@ -32,8 +30,8 @@ def run(test, params, env): """ def get_scsi_debug_disk(): - """" - Get scsi debug disk on host which created as scsi-block. + """ " + Get scsi debug disk on host which created as scsi-block. """ cmd = "lsblk -S -n -p|grep scsi_debug" status, output = process.getstatusoutput(cmd) @@ -64,21 +62,19 @@ def check_disk_allocation(): if scsi_debug == "yes": disk_name = get_scsi_debug_disk() - vm.params["image_name_%s" % data_tag] = disk_name + vm.params[f"image_name_{data_tag}"] = disk_name else: image_params = params.object_params(data_tag) - disk_name = storage.get_image_filename(image_params, - data_dir.get_data_dir()) + disk_name = storage.get_image_filename(image_params, data_dir.get_data_dir()) timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) plug = BlockDevicesPlug(vm) error_context.context("Hot-plug discarded disk in guest.", test.log.info) plug.hotplug_devs_serial(data_tag) - guest_disk_name = '/dev/' + plug[0] + guest_disk_name = "/dev/" + plug[0] - guest_format_command = params["guest_format_command"].format( - guest_disk_name) + guest_format_command = params["guest_format_command"].format(guest_disk_name) guest_dd_command = params["guest_dd_command"] guest_rm_command = params["guest_rm_command"] diff --git a/qemu/tests/block_discard_write_same.py b/qemu/tests/block_discard_write_same.py index 4a9488584b..f9cc5a8214 100644 --- a/qemu/tests/block_discard_write_same.py +++ b/qemu/tests/block_discard_write_same.py @@ -1,13 +1,11 @@ """sg_write_same command testing for discard feature""" + import os from avocado.utils import process -from virttest import data_dir -from virttest import env_process -from virttest import error_context -from virttest import storage -from virttest.utils_misc import get_linux_drive_path +from virttest import data_dir, env_process, error_context, storage from virttest import data_dir as virttest_data_dir +from virttest.utils_misc import get_linux_drive_path @error_context.context_aware @@ -35,14 +33,13 @@ def _run_sg_write_same(dev): host_file = os.path.join(deps_dir, file_name) guest_file = guest_dir + file_name vm.copy_files_to(host_file, guest_dir) - status, output = session.cmd_status_output( - "$SHELL " + guest_file + " " + dev) + status, output = session.cmd_status_output("$SHELL " + guest_file + " " + dev) if status != 0: test.fail("run sg_write_same failed:" + output) test.log.debug(output) def _get_scsi_debug_disk(guest_session=None): - """" + """ " Get scsi debug disk on host or guest which created as scsi-block. """ cmd = "lsblk -S -n -p|grep scsi_debug" @@ -58,7 +55,7 @@ def _get_scsi_debug_disk(guest_session=None): return output.split()[0] def _get_sha1sum(target, guest_session=None): - cmd = "sha1sum %s | awk '{print $1}'" % target + cmd = f"sha1sum {target} | awk '{{print $1}}'" if guest_session: return guest_session.cmd_output(cmd).strip() return process.system_output(cmd, shell=True).decode() @@ -79,21 +76,19 @@ def _show_blocks_info(target): if scsi_debug == "yes": params["start_vm"] = "yes" disk_name = _get_scsi_debug_disk() - params["image_name_%s" % data_tag] = disk_name + params[f"image_name_{data_tag}"] = disk_name # boot guest with scsi_debug disk env_process.preprocess_vm(test, params, env, vm_name) else: image_params = params.object_params(data_tag) - disk_name = storage.get_image_filename(image_params, - data_dir.get_data_dir()) + disk_name = storage.get_image_filename(image_params, data_dir.get_data_dir()) vm = env.get_vm(vm_name) vm.verify_alive() timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) - error_context.context("Boot guest with disk '%s'" % disk_name, - test.log.info) + error_context.context(f"Boot guest with disk '{disk_name}'", test.log.info) guest_disk_drive = get_linux_drive_path(session, disk_serial) if not guest_disk_drive: test.fail("Can not get data disk in guest.") @@ -111,4 +106,4 @@ def _show_blocks_info(target): host_sha1sum = _get_sha1sum(disk_name) if guest_sha1sum != host_sha1sum: - test.fail("Unmatched sha1sum %s:%s" % (guest_sha1sum, host_sha1sum)) + test.fail(f"Unmatched sha1sum {guest_sha1sum}:{host_sha1sum}") diff --git a/qemu/tests/block_during_io.py b/qemu/tests/block_during_io.py index 1617127a73..1f7dc3a2e5 100644 --- a/qemu/tests/block_during_io.py +++ b/qemu/tests/block_during_io.py @@ -1,12 +1,10 @@ import re import time -from virttest import utils_test -from virttest import utils_misc -from virttest import utils_disk -from virttest import error_context -from provider.storage_benchmark import generate_instance +from virttest import error_context, utils_disk, utils_misc, utils_test + from provider import win_driver_utils +from provider.storage_benchmark import generate_instance @error_context.context_aware @@ -18,10 +16,11 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _get_data_disks_linux(): - """ Get the data disks by serial or wwn options in linux. """ - for data_image in params['images'].split()[1:]: - extra_params = params.get("blk_extra_params_%s" % data_image, '') + """Get the data disks by serial or wwn options in linux.""" + for data_image in params["images"].split()[1:]: + extra_params = params.get(f"blk_extra_params_{data_image}", "") match = re.search(r"(serial|wwn)=(\w+)", extra_params, re.M) if match: drive_id = match.group(2) @@ -29,66 +28,72 @@ def _get_data_disks_linux(): continue drive_path = utils_misc.get_linux_drive_path(session, drive_id) if not drive_path: - test.error("Failed to get '%s' drive path" % data_image) - yield drive_path[5:], params.object_params(data_image)['image_size'] + test.error(f"Failed to get '{data_image}' drive path") + yield drive_path[5:], params.object_params(data_image)["image_size"] def _get_data_disks_win(): - """ Get the data disks in windows. """ - for data_image in params['images'].split()[1:]: - size = params.object_params(data_image)['image_size'] + """Get the data disks in windows.""" + for data_image in params["images"].split()[1:]: + size = params.object_params(data_image)["image_size"] yield utils_disk.get_windows_disks_index(session, size)[0], size def get_data_disks(): - """ Get the data disks. """ + """Get the data disks.""" _get_disks = _get_data_disks_win if windows else _get_data_disks_linux - for disk, size in _get_disks(): - yield disk, size + yield from _get_disks() def configure_data_disks(): - """ Configure the data disks. """ + """Configure the data disks.""" if windows: utils_disk.update_windows_disk_attributes( - session, (disk for disk, _ in get_data_disks())) - return [utils_disk.configure_empty_disk( - session, disk, size, os_type)[0] for disk, size in get_data_disks()] + session, (disk for disk, _ in get_data_disks()) + ) + return [ + utils_disk.configure_empty_disk(session, disk, size, os_type)[0] + for disk, size in get_data_disks() + ] def get_win_drive_letters_after_reboot(): - """ Get the drive letters after reboot in windows. """ + """Get the drive letters after reboot in windows.""" new_mount_points = utils_misc.get_windows_drive_letters( - vm.wait_for_login(timeout=362)) + vm.wait_for_login(timeout=362) + ) for mount_point in fixed_mount_points: # pylint: disable=E0606 new_mount_points.remove(mount_point) diff_num = len(orig_mount_points) - len(new_mount_points) if diff_num != 0: - test.error('No found the corresponding drive letters ' - 'in %s disks.' % diff_num) + test.error( + "No found the corresponding drive letters " f"in {diff_num} disks." + ) return new_mount_points def run_iozone(mount_points): - """ Run iozone inside guest. """ - iozone = generate_instance(params, vm, 'iozone') + """Run iozone inside guest.""" + iozone = generate_instance(params, vm, "iozone") try: for mount_point in mount_points: - iozone.run(params['iozone_cmd_opitons'] % mount_point, - int(params['iozone_timeout'])) + iozone.run( + params["iozone_cmd_opitons"] % mount_point, + int(params["iozone_timeout"]), + ) finally: iozone.clean() def run_stress(name, mount_points): - """ Run the stress inside guest. """ + """Run the stress inside guest.""" run_stress_maps[name](mount_points) def is_stress_alive(session, name): - """ Check whether the stress is alive. """ - name = name.upper() + '.EXE' if windows else name - chk_cmd = 'TASKLIST /FI "IMAGENAME eq %s' if windows else 'pgrep -xl %s' + """Check whether the stress is alive.""" + name = name.upper() + ".EXE" if windows else name + chk_cmd = 'TASKLIST /FI "IMAGENAME eq %s' if windows else "pgrep -xl %s" return re.search(name, session.cmd_output(chk_cmd % name), re.I | re.M) def _change_vm_power(): - """ Change the vm power. """ - method, command = params['command_opts'].split(',') - test.log.info('Sending command(%s): %s', method, command) - if method == 'shell': + """Change the vm power.""" + method, command = params["command_opts"].split(",") + test.log.info("Sending command(%s): %s", method, command) + if method == "shell": power_session = vm.wait_for_login(timeout=360) power_session.sendline(command) else: @@ -98,36 +103,36 @@ def _change_vm_power(): raise test.fail("Not received SHUTDOWN QMP event.") def _check_vm_status(): - """ Check the status of vm. """ - action = 'shutdown' if shutdown_vm else 'login' - if not getattr(vm, 'wait_for_%s' % action)(timeout=362): - test.fail('Failed to %s vm.' % action) - test.log.info('%s vm successfully.', action.capitalize()) + """Check the status of vm.""" + action = "shutdown" if shutdown_vm else "login" + if not getattr(vm, f"wait_for_{action}")(timeout=362): + test.fail(f"Failed to {action} vm.") + test.log.info("%s vm successfully.", action.capitalize()) def run_power_management_test(): - """ Run power management test inside guest. """ + """Run power management test inside guest.""" _change_vm_power() _check_vm_status() def run_bg_test(target, args=(), kwargs={}): - """ Run the test background. """ + """Run the test background.""" error_context.context(target.__doc__, test.log.info) thread = utils_misc.InterruptedThread(target, args, kwargs) thread.daemon = True thread.start() return thread - shutdown_vm = params.get('shutdown_vm', 'no') == 'yes' - reboot = params.get('reboot_vm', 'no') == 'yes' + shutdown_vm = params.get("shutdown_vm", "no") == "yes" + reboot = params.get("reboot_vm", "no") == "yes" - with_data_disks = params.get('with_data_disks', 'yes') == 'yes' - stress_name = params['stress_name'] - run_stress_maps = {'iozone': run_iozone} - stress_thread_timeout = int(params.get('stress_thread_timeout', 60)) - bg_test_thread_timeout = int(params.get('bg_test_thread_timeout', 600)) - sleep_time = int(params.get('sleep_time', 30)) - os_type = params['os_type'] - windows = os_type == 'windows' + with_data_disks = params.get("with_data_disks", "yes") == "yes" + stress_name = params["stress_name"] + run_stress_maps = {"iozone": run_iozone} + stress_thread_timeout = int(params.get("stress_thread_timeout", 60)) + bg_test_thread_timeout = int(params.get("bg_test_thread_timeout", 600)) + sleep_time = int(params.get("sleep_time", 30)) + os_type = params["os_type"] + windows = os_type == "windows" vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -135,24 +140,26 @@ def run_bg_test(target, args=(), kwargs={}): if windows: session = utils_test.qemu.windrv_check_running_verifier( - session, vm, test, params["driver_name"]) + session, vm, test, params["driver_name"] + ) if with_data_disks: orig_mount_points = configure_data_disks() - fixed_mount_points = set(utils_misc.get_windows_drive_letters - (session)) ^ set(orig_mount_points) + fixed_mount_points = set(utils_misc.get_windows_drive_letters(session)) ^ set( + orig_mount_points + ) else: - orig_mount_points = ['C'] if windows else ['/home'] + orig_mount_points = ["C"] if windows else ["/home"] mount_points = orig_mount_points stress_thread = run_bg_test(run_stress, (stress_name, mount_points)) if not utils_misc.wait_for( - lambda: is_stress_alive(session, stress_name), 60, step=3.0): - test.error('The %s stress is not alive.' % stress_name) + lambda: is_stress_alive(session, stress_name), 60, step=3.0 + ): + test.error(f"The {stress_name} stress is not alive.") time.sleep(sleep_time) if not is_stress_alive(session, stress_name): - test.error( - 'The %s stress is not alive after %s.' % (stress_name, sleep_time)) + test.error(f"The {stress_name} stress is not alive after {sleep_time}.") if shutdown_vm or reboot: bg_test_target = run_power_management_test diff --git a/qemu/tests/block_hotplug.py b/qemu/tests/block_hotplug.py index 506193a249..07e415b45a 100644 --- a/qemu/tests/block_hotplug.py +++ b/qemu/tests/block_hotplug.py @@ -2,43 +2,42 @@ import re from avocado.utils import process - -from virttest import error_context -from virttest import utils_disk -from virttest import utils_misc -from virttest import utils_numeric -from virttest import utils_test - +from virttest import error_context, utils_disk, utils_misc, utils_numeric, utils_test from virttest.qemu_capabilities import Flags from virttest.qemu_devices import qdevices -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def find_all_disks(session, windows): - """ Find all disks in guest. """ + """Find all disks in guest.""" global all_disks if windows: - all_disks = set(session.cmd('wmic diskdrive get index').split()[1:]) + all_disks = set(session.cmd("wmic diskdrive get index").split()[1:]) else: all_disks = utils_misc.list_linux_guest_disks(session) return all_disks -def wait_plug_disks(session, action, disks_before_plug, excepted_num, - windows, test): - """ Wait plug disks completely. """ - if not utils_misc.wait_for(lambda: len(disks_before_plug ^ find_all_disks( - session, windows)) == excepted_num, 60, step=1.5): - disks_info_win = ('wmic logicaldisk get drivetype,name,description ' - '& wmic diskdrive list brief /format:list') - disks_info_linux = 'lsblk -a' - disks_info = session.cmd( - disks_info_win if windows else disks_info_linux) +def wait_plug_disks(session, action, disks_before_plug, excepted_num, windows, test): + """Wait plug disks completely.""" + if not utils_misc.wait_for( + lambda: len(disks_before_plug ^ find_all_disks(session, windows)) + == excepted_num, + 60, + step=1.5, + ): + disks_info_win = ( + "wmic logicaldisk get drivetype,name,description " + "& wmic diskdrive list brief /format:list" + ) + disks_info_linux = "lsblk -a" + disks_info = session.cmd(disks_info_win if windows else disks_info_linux) LOG_JOB.debug("The details of disks:\n %s", disks_info) - test.fail("Failed to {0} devices from guest, need to {0}: {1}, " - "actual {0}: {2}".format(action, excepted_num, - len(disks_before_plug ^ all_disks))) + test.fail( + f"Failed to {action} devices from guest, need to {action}: {excepted_num}, " + f"actual {action}: {len(disks_before_plug ^ all_disks)}" + ) return disks_before_plug ^ all_disks @@ -58,58 +57,68 @@ def run(test, params, env): """ def run_sub_test(test_name): - """ Run subtest before/after hotplug/unplug device. """ - error_context.context("Running sub test '%s'." % test_name, test.log.info) + """Run subtest before/after hotplug/unplug device.""" + error_context.context(f"Running sub test '{test_name}'.", test.log.info) utils_test.run_virt_sub_test(test, params, env, test_name) def create_block_devices(image): - """ Create block devices. """ + """Create block devices.""" return vm.devices.images_define_by_params( - image, params.object_params(image), 'disk') + image, params.object_params(image), "disk" + ) def get_block_devices(objs): - """ Get block devices. """ + """Get block devices.""" if isinstance(objs, str): return [dev for dev in vm.devices if dev.get_param("id") == objs] - dtype = qdevices.QBlockdevNode if vm.check_capability( - Flags.BLOCKDEV) else qdevices.QDrive + dtype = ( + qdevices.QBlockdevNode + if vm.check_capability(Flags.BLOCKDEV) + else qdevices.QDrive + ) return [dev for dev in objs if not isinstance(dev, dtype)] def plug_block_devices(action, plug_devices): - """ Plug block devices. """ - error_context.context("%s block device (iteration %d)" % - (action.capitalize(), iteration), test.log.info) + """Plug block devices.""" + error_context.context( + "%s block device (iteration %d)" % (action.capitalize(), iteration), + test.log.info, + ) session = vm.wait_for_login(timeout=timeout) disks_before_plug = find_all_disks(session, windows) - plug_devices = plug_devices if action == 'hotplug' else plug_devices[::-1] + plug_devices = plug_devices if action == "hotplug" else plug_devices[::-1] for dev in plug_devices: - ret = getattr(vm.devices, 'simple_%s' % action)(dev, vm.monitor) + ret = getattr(vm.devices, f"simple_{action}")(dev, vm.monitor) if ret[1] is False: - test.fail("Failed to %s device '%s', %s." % (action, dev, ret[0])) + test.fail(f"Failed to {action} device '{dev}', {ret[0]}.") - num = 1 if action == 'hotplug' else len(data_imgs) - plugged_disks = wait_plug_disks(session, action, disks_before_plug, num, windows, test) + num = 1 if action == "hotplug" else len(data_imgs) + plugged_disks = wait_plug_disks( + session, action, disks_before_plug, num, windows, test + ) session.close() return plugged_disks def format_disk_win(): - """ Format disk in windows. """ - error_context.context("Format disk %s in windows." % new_disk, test.log.info) # pylint: disable=E0606 + """Format disk in windows.""" + error_context.context(f"Format disk {new_disk} in windows.", test.log.info) # pylint: disable=E0606 session = vm.wait_for_login(timeout=timeout) if disk_index is None and disk_letter is None: drive_letters.append( utils_disk.configure_empty_windows_disk( - session, new_disk, params['image_size_%s' % img])[0]) + session, new_disk, params[f"image_size_{img}"] + )[0] + ) elif disk_index and disk_letter: utils_misc.format_windows_disk( - session, disk_index[index], disk_letter[index]) + session, disk_index[index], disk_letter[index] + ) drive_letters.append(disk_letter[index]) session.close() def run_io_test(): - """ Run io test on the hot plugged disks. """ - error_context.context( - "Run io test on the hot plugged disks.", test.log.info) + """Run io test on the hot plugged disks.""" + error_context.context("Run io test on the hot plugged disks.", test.log.info) session = vm.wait_for_login(timeout=timeout) if windows: drive_letter = drive_letters[index] @@ -129,14 +138,16 @@ def get_disk_size(did): """ session = vm.wait_for_login(timeout=timeout) if windows: - script = '{}_{}'.format("disk", utils_misc.generate_random_string(6)) - cmd = "echo %s > {0} && diskpart /s {0} && del /f {0}".format(script) - p = r'Disk\s+%s\s+[A-Z]+\s+(?P\d+\s+[A-Z]+)\s+' - disk_info = session.cmd(cmd % 'list disk') - size = re.search(p % did, disk_info, re.I | re.M).groupdict()['size'].strip() + script = "{}_{}".format("disk", utils_misc.generate_random_string(6)) + cmd = f"echo %s > {script} && diskpart /s {script} && del /f {script}" + p = r"Disk\s+%s\s+[A-Z]+\s+(?P\d+\s+[A-Z]+)\s+" + disk_info = session.cmd(cmd % "list disk") + size = ( + re.search(p % did, disk_info, re.I | re.M).groupdict()["size"].strip() + ) else: size = utils_disk.get_linux_disks(session)[did][1].strip() - test.log.info('The size of disk %s is %s', did, size) + test.log.info("The size of disk %s is %s", did, size) session.close() return size @@ -148,12 +159,15 @@ def check_disk_size(did, excepted_size): :param excepted_size: the excepted size """ error_context.context( - 'Check whether the size of the disk[%s] hot plugged is equal to ' - 'excepted size(%s).' % (did, excepted_size), test.log.info) + f"Check whether the size of the disk[{did}] hot plugged is equal to " + f"excepted size({excepted_size}).", + test.log.info, + ) value, unit = re.search(r"(\d+\.?\d*)\s*(\w?)", excepted_size).groups() if utils_numeric.normalize_data_size(get_disk_size(did), unit) != value: - test.fail('The size of disk %s is not equal to excepted size(%s).' - % (did, excepted_size)) + test.fail( + f"The size of disk {did} is not equal to excepted size({excepted_size})." + ) data_imgs = params.get("images").split()[1:] disk_index = params.objects("disk_index") @@ -161,14 +175,14 @@ def check_disk_size(did, excepted_size): disk_op_cmd = params.get("disk_op_cmd") disk_op_timeout = int(params.get("disk_op_timeout", 360)) timeout = int(params.get("login_timeout", 360)) - windows = params["os_type"] == 'windows' + windows = params["os_type"] == "windows" sub_test_after_plug = params.get("sub_type_after_plug") sub_test_after_unplug = params.get("sub_type_after_unplug") sub_test_before_unplug = params.get("sub_type_before_unplug") - shutdown_after_plug = sub_test_after_plug == 'shutdown' - need_plug = params.get("need_plug", 'no') == "yes" - need_check_disk_size = params.get('check_disk_size', 'no') == 'yes' + shutdown_after_plug = sub_test_after_plug == "shutdown" + need_plug = params.get("need_plug", "no") == "yes" + need_check_disk_size = params.get("check_disk_size", "no") == "yes" drive_letters = [] unplug_devs = [] @@ -183,19 +197,24 @@ def check_disk_size(did, excepted_size): for index, img in enumerate(data_imgs): data_devs = create_block_devices(img) if need_plug: - new_disk = plug_block_devices('hotplug', data_devs).pop() + new_disk = plug_block_devices("hotplug", data_devs).pop() if windows: if iteration == 0: format_disk_win() if need_check_disk_size: - check_disk_size(new_disk if windows else new_disk[5:], - params['image_size_%s' % img]) + check_disk_size( + new_disk if windows else new_disk[5:], + params[f"image_size_{img}"], + ) if disk_op_cmd: run_io_test() - unplug_devs.extend(get_block_devices( - data_devs) if need_plug else get_block_devices(img)) + unplug_devs.extend( + get_block_devices(data_devs) + if need_plug + else get_block_devices(img) + ) if sub_test_after_plug: run_sub_test(sub_test_after_plug) @@ -205,7 +224,7 @@ def check_disk_size(did, excepted_size): if sub_test_before_unplug: run_sub_test(sub_test_before_unplug) - plug_block_devices('unplug', unplug_devs) + plug_block_devices("unplug", unplug_devs) del unplug_devs[:] if sub_test_after_unplug: @@ -215,10 +234,11 @@ def check_disk_size(did, excepted_size): test.log.debug("Find %s Exception:'%s'.", pid, str(e)) if pid: logdir = test.logdir - process.getoutput("gstack %s > %s/gstack.log" % (pid, logdir)) + process.getoutput(f"gstack {pid} > {logdir}/gstack.log") process.getoutput( - "timeout 20 strace -tt -T -v -f -s 32 -p %s -o " - "%s/strace.log" % (pid, logdir)) + f"timeout 20 strace -tt -T -v -f -s 32 -p {pid} -o " + f"{logdir}/strace.log" + ) else: test.log.debug("VM dead...") raise e diff --git a/qemu/tests/block_hotplug_in_pause.py b/qemu/tests/block_hotplug_in_pause.py index 7d620b11ed..226436f01e 100644 --- a/qemu/tests/block_hotplug_in_pause.py +++ b/qemu/tests/block_hotplug_in_pause.py @@ -1,12 +1,11 @@ import re import time -from virttest import error_context -from virttest import utils_misc -from virttest import utils_disk -from virttest.qemu_devices import qdevices +from virttest import error_context, utils_disk, utils_misc from virttest.qemu_capabilities import Flags -from virttest.qemu_devices.utils import (DeviceError, DeviceUnplugError) +from virttest.qemu_devices import qdevices +from virttest.qemu_devices.utils import DeviceError, DeviceUnplugError + from provider import win_driver_utils @@ -29,6 +28,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def find_disk(session, cmd): """ Find all disks in guest. @@ -41,10 +41,10 @@ def find_disk(session, cmd): elif params.get("os_type") == "windows": pattern = r"^\d+\s+\d+" else: - test.cancel("Unsupported OS type '%s'" % params.get("os_type")) + test.cancel("Unsupported OS type '{}'".format(params.get("os_type"))) output = session.cmd_output_safe(cmd) - disks = re.findall(pattern, output, re.M) # pylint: disable=E0606 + disks = re.findall(pattern, output, re.M) # pylint: disable=E0606 return disks def get_plug_unplug_disks(disk1, disk2): @@ -66,24 +66,27 @@ def block_hotplug(image_name): :return: List of objects for hotplug disk. """ image_params = params.object_params(image_name) - devs = vm.devices.images_define_by_params(image_name, - image_params, 'disk') + devs = vm.devices.images_define_by_params(image_name, image_params, "disk") for dev in devs: ret = vm.devices.simple_hotplug(dev, vm.monitor) if ret[1] is False: - test.fail("Failed to hotplug device '%s'." - "Output:\n%s" % (dev, ret[0])) - dtype = qdevices.QBlockdevNode if vm.check_capability( - Flags.BLOCKDEV) else qdevices.QDrive + test.fail(f"Failed to hotplug device '{dev}'." f"Output:\n{ret[0]}") + dtype = ( + qdevices.QBlockdevNode + if vm.check_capability(Flags.BLOCKDEV) + else qdevices.QDrive + ) devs = [dev for dev in devs if not isinstance(dev, dtype)] return devs def verify_deleted_event(device_list, timeout=120): def get_deleted_event(dev_qid): for event in vm.monitor.get_events(): - if ('DEVICE_DELETED' in event.get("event") and - 'device' in event.get('data') and - dev_qid == event.get('data')['device']): + if ( + "DEVICE_DELETED" in event.get("event") + and "device" in event.get("data") + and dev_qid == event.get("data")["device"] + ): return True return False @@ -94,17 +97,21 @@ def get_deleted_event(dev_qid): continue dev_qid = dev.get_qid() if not utils_misc.wait_for( - lambda: get_deleted_event(dev_qid), timeout, 0, 0): - test.fail('Failed to get deleted event of %s ' - 'during %s sec.' % (dev_qid, timeout)) - vm.monitor.clear_event('DEVICE_DELETED') + lambda: get_deleted_event(dev_qid), timeout, 0, 0 + ): + test.fail( + f"Failed to get deleted event of {dev_qid} " + f"during {timeout} sec." + ) + vm.monitor.clear_event("DEVICE_DELETED") def verify_unplug_devices_by_qtree(device_list, timeout=30): """verify the unplug devices in qtree""" for dev in device_list: if not utils_misc.wait_for( - lambda: dev.verify_unplug('', vm.monitor), timeout, 1, 5): - test.error('The %s is still in qtree after unplugging.' % dev) + lambda: dev.verify_unplug("", vm.monitor), timeout, 1, 5 + ): + test.error(f"The {dev} is still in qtree after unplugging.") def unplug_backend_devices(device_list): """Unplug the backend devices""" @@ -116,17 +123,21 @@ def unplug_backend_devices(device_list): if Flags.BLOCKDEV in vm.devices.caps: format_node = vm.devices[drive] nodes = [format_node] - nodes.extend((n for n in format_node.get_child_nodes())) + nodes.extend(n for n in format_node.get_child_nodes()) for node in nodes: if not node.verify_unplug( - node.unplug(vm.monitor), vm.monitor): + node.unplug(vm.monitor), vm.monitor + ): raise DeviceUnplugError( - node, "Failed to unplug blockdev node.", - vm.devices) - vm.devices.remove(node, True if isinstance( - node, qdevices.QBlockdevFormatNode) else False) - if not isinstance(node, - qdevices.QBlockdevFormatNode): + node, "Failed to unplug blockdev node.", vm.devices + ) + vm.devices.remove( + node, + True + if isinstance(node, qdevices.QBlockdevFormatNode) + else False, + ) + if not isinstance(node, qdevices.QBlockdevFormatNode): format_node.del_child_node(node) else: vm.devices.remove(drive) @@ -136,8 +147,9 @@ def unplug_backend_devices(device_list): dev.unplug_unhook() raise DeviceUnplugError(dev, exc, vm.devices) - def block_unplug(device_list, verify_del_event=True, - verify_qtree=True, unplug_backend=True): + def block_unplug( + device_list, verify_del_event=True, verify_qtree=True, unplug_backend=True + ): """ Unplug disks and verify it in qtree @@ -146,8 +158,7 @@ def block_unplug(device_list, verify_del_event=True, for dev in reversed(device_list): out = dev.unplug(vm.monitor) if out: - test.fail("Failed to unplug device '%s'.Ouptut:\n%s" % (dev, - out)) + test.fail(f"Failed to unplug device '{dev}'.Ouptut:\n{out}") if verify_del_event: verify_deleted_event(device_list) @@ -158,8 +169,7 @@ def block_unplug(device_list, verify_del_event=True, if unplug_backend: unplug_backend_devices(device_list) - def block_check_in_guest(session, disks, blk_num, - get_disk_cmd, plug_tag="hotplug"): + def block_check_in_guest(session, disks, blk_num, get_disk_cmd, plug_tag="hotplug"): """ Check hotplug/unplug disks in guest @@ -172,13 +182,16 @@ def block_check_in_guest(session, disks, blk_num, test.log.info("Check block device in guest after %s.", plug_tag) pause = float(params.get("virtio_block_pause", 30.0)) status = utils_misc.wait_for( - lambda: len( - get_plug_unplug_disks( - disks, find_disk(session, get_disk_cmd))) == blk_num, pause) + lambda: len(get_plug_unplug_disks(disks, find_disk(session, get_disk_cmd))) + == blk_num, + pause, + ) disks = get_plug_unplug_disks(disks, find_disk(session, get_disk_cmd)) if not status: - test.fail("Failed to %s device to guest, expected: %d," - "actual: %d" % (plug_tag, blk_num, len(disks))) + test.fail( + "Failed to %s device to guest, expected: %d," + "actual: %d" % (plug_tag, blk_num, len(disks)) + ) def get_windows_drive_letters(session, index_sizes): """ @@ -191,14 +204,15 @@ def get_windows_drive_letters(session, index_sizes): for item in index_sizes: drive_indexs.append(item.split()[0]) if not utils_disk.update_windows_disk_attributes(session, drive_indexs): - test.fail("Failed to clear readonly for all disks and online " - "them in guest") + test.fail( + "Failed to clear readonly for all disks and online " "them in guest" + ) error_context.context("Format disk", test.log.info) for item in index_sizes: did, size = item.split() - drive_letter = utils_disk.configure_empty_windows_disk(session, - did, - size + "B") + drive_letter = utils_disk.configure_empty_windows_disk( + session, did, size + "B" + ) windows_drive_letters.extend(drive_letter) def rw_disk_in_guest(session, plug_disks, iteration): @@ -218,16 +232,16 @@ def rw_disk_in_guest(session, plug_disks, iteration): disk_op_timeout = int(params.get("disk_op_timeout", 360)) for disk in plug_disks: if params.get("os_type") not in ["linux", "windows"]: - test.cancel("Unsupported OS type '%s'" % params.get("os_type")) + test.cancel("Unsupported OS type '{}'".format(params.get("os_type"))) else: test_cmd = params.get("disk_op_cmd") % (disk, disk) if params.get("os_type") == "windows": test_cmd = utils_misc.set_winutils_letter(session, test_cmd) - status, output = session.cmd_status_output(test_cmd, - timeout=disk_op_timeout) + status, output = session.cmd_status_output( + test_cmd, timeout=disk_op_timeout + ) if status: - test.fail("Check for block device rw failed." - "Output: %s" % output) + test.fail("Check for block device rw failed." f"Output: {output}") blk_num = int(params.get("blk_num", 1)) repeat_times = int(params.get("repeat_times", 3)) @@ -244,8 +258,10 @@ def rw_disk_in_guest(session, plug_disks, iteration): for iteration in range(repeat_times): device_list = [] if params.get("need_plug") == "yes": - error_context.context("Run block hotplug/unplug for iteration:" - "%d" % iteration, test.log.info) + error_context.context( + "Run block hotplug/unplug for iteration:" "%d" % iteration, + test.log.info, + ) error_context.context("Plug device", test.log.info) disks_before_plug = find_disk(session, get_disk_cmd) @@ -260,18 +276,16 @@ def rw_disk_in_guest(session, plug_disks, iteration): if devs: device_list.extend(devs) - if is_vm_paused and params.get("resume_vm_after_hotplug", - "yes") == "yes": + if is_vm_paused and params.get("resume_vm_after_hotplug", "yes") == "yes": error_context.context("Resume vm after hotplug") vm.resume() is_vm_paused = False - block_check_in_guest(session, disks_before_plug, blk_num, - get_disk_cmd) + block_check_in_guest(session, disks_before_plug, blk_num, get_disk_cmd) if params.get("disk_op_cmd"): - plug_disks = get_plug_unplug_disks(disks_before_plug, - find_disk(session, - get_disk_cmd)) + plug_disks = get_plug_unplug_disks( + disks_before_plug, find_disk(session, get_disk_cmd) + ) rw_disk_in_guest(session, plug_disks, iteration) else: @@ -290,8 +304,7 @@ def rw_disk_in_guest(session, plug_disks, iteration): else: blk_num = 0 disks_before_unplug = disks_before_plug - block_unplug(device_list, not is_vm_paused, - not is_vm_paused, not is_vm_paused) + block_unplug(device_list, not is_vm_paused, not is_vm_paused, not is_vm_paused) if is_vm_paused: error_context.context("Resume vm after unplug") @@ -303,8 +316,9 @@ def rw_disk_in_guest(session, plug_disks, iteration): verify_unplug_devices_by_qtree(device_list) unplug_backend_devices(device_list) - block_check_in_guest(session, disks_before_unplug, - blk_num, get_disk_cmd, plug_tag="unplug") + block_check_in_guest( + session, disks_before_unplug, blk_num, get_disk_cmd, plug_tag="unplug" + ) # for windows guest, disable/uninstall driver to get memory leak based on # driver verifier is enabled diff --git a/qemu/tests/block_hotplug_negative.py b/qemu/tests/block_hotplug_negative.py index 7e7c53be5d..f4975385b0 100644 --- a/qemu/tests/block_hotplug_negative.py +++ b/qemu/tests/block_hotplug_negative.py @@ -19,6 +19,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def drive_unplug_plug(drive, vm): """ Unplug drive then replug it. @@ -39,23 +40,22 @@ def drive_unplug_plug(drive, vm): img_list = params.get("images").split() image_name = img_list[-1] image_params = params.object_params(image_name) - devs = vm.devices.images_define_by_params(image_name, - image_params, 'disk') + devs = vm.devices.images_define_by_params(image_name, image_params, "disk") drive = devs[-2] for dev in devs: try: - ret = vm.devices.simple_hotplug(dev, vm.monitor) + vm.devices.simple_hotplug(dev, vm.monitor) except Exception as e: if "QMP command 'device_add' failed" in str(e): test.log.info("Failed to hotplug device with invalid params") try: drive_unplug_plug(drive, vm) except Exception as e: - test.fail("Failed to hotplug/unplug drive with error:" - "%s") % e + test.fail("Failed to hotplug/unplug drive with error:" "%s") % e - error_context.context("Check vm is alive after drive unplug/hotplug test", - test.log.info) + error_context.context( + "Check vm is alive after drive unplug/hotplug test", test.log.info + ) session = vm.wait_for_login() if not session.is_responsive(): session.close() diff --git a/qemu/tests/block_hotplug_passthrough.py b/qemu/tests/block_hotplug_passthrough.py index 6dfb20fca6..a8887188a3 100644 --- a/qemu/tests/block_hotplug_passthrough.py +++ b/qemu/tests/block_hotplug_passthrough.py @@ -1,9 +1,8 @@ -from virttest import error_context -from virttest import utils_test -from virttest import utils_disk from avocado.utils import process -from provider.storage_benchmark import generate_instance +from virttest import error_context, utils_disk, utils_test + from provider.block_devices_plug import BlockDevicesPlug +from provider.storage_benchmark import generate_instance @error_context.context_aware @@ -24,11 +23,12 @@ def run(test, params, env): """ def create_path_disk(): - """Create a passthrough disk with scsi_debug """ + """Create a passthrough disk with scsi_debug""" process.getoutput(params["pre_command"], shell=True) disks_old = process.getoutput("ls -1d /dev/sd*", shell=True).split() - process.system_output(params["create_command"], timeout=300, - shell=True, verbose=False) + process.system_output( + params["create_command"], timeout=300, shell=True, verbose=False + ) disks_new = process.getoutput("ls -1d /dev/sd*", shell=True).split() return list(set(disks_new) - set(disks_old))[0] @@ -46,26 +46,26 @@ def format_plug_disk(session, did): ostype = params["os_type"] if ostype == "windows": if not utils_disk.update_windows_disk_attributes(session, did): - test.fail("Failed to clear readonly for all disks and online " - "them in guest") - partition = utils_disk.configure_empty_disk(session, did, - stg_image_size, ostype) + test.fail( + "Failed to clear readonly for all disks and online " "them in guest" + ) + partition = utils_disk.configure_empty_disk( + session, did, stg_image_size, ostype + ) if not partition: test.fail("Fail to format disks.") return partition[0] def run_io_test(session, partition): - """ Run io test on the hot plugged disk. """ - iozone_options = params.get('iozone_options') - dd_test = params.get('dd_test') + """Run io test on the hot plugged disk.""" + iozone_options = params.get("iozone_options") + dd_test = params.get("dd_test") if iozone_options: - error_context.context( - "Run iozone test on the plugged disk.", test.log.info) - iozone = generate_instance(params, vm, 'iozone') + error_context.context("Run iozone test on the plugged disk.", test.log.info) + iozone = generate_instance(params, vm, "iozone") iozone.run(iozone_options.format(partition[0])) if dd_test: - error_context.context( - "Do dd test on the plugged disk", test.log.info) + error_context.context("Do dd test on the plugged disk", test.log.info) partition = partition.split("/")[-1] session.cmd(dd_test.format(partition)) @@ -81,7 +81,8 @@ def unplug_path_disk(vm): if params["os_type"] == "windows": session = utils_test.qemu.windrv_check_running_verifier( - session, vm, test, params["driver_name"]) + session, vm, test, params["driver_name"] + ) drive_index = hotplug_path_disk(vm, create_path_disk()) run_io_test(session, format_plug_disk(session, drive_index)) diff --git a/qemu/tests/block_hotplug_scsi_hba.py b/qemu/tests/block_hotplug_scsi_hba.py index b72ea47a3a..5aa9bf4a24 100644 --- a/qemu/tests/block_hotplug_scsi_hba.py +++ b/qemu/tests/block_hotplug_scsi_hba.py @@ -1,11 +1,10 @@ import re -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc from virttest.utils_test import qemu -from provider.block_devices_plug import BlockDevicesPlug from provider import win_driver_utils +from provider.block_devices_plug import BlockDevicesPlug @error_context.context_aware @@ -27,25 +26,31 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def list_all_disks(session): """List all disks inside guest.""" if is_linux: return utils_misc.list_linux_guest_disks(session) - return set(session.cmd('wmic diskdrive get index').split()[1:]) + return set(session.cmd("wmic diskdrive get index").split()[1:]) def _get_scsi_host_id(session): test.log.info("Get the scsi host id which is hot plugged.") - output = session.cmd("dmesg | grep \"scsi host\" | " - "awk 'END{print}' | awk '{print $4}'") - return re.search(r'(\d+)', output).group(1) + output = session.cmd( + 'dmesg | grep "scsi host" | ' "awk 'END{print}' | awk '{print $4}'" + ) + return re.search(r"(\d+)", output).group(1) def _rescan_hba_controller_linux(session): - session.cmd('echo "- - -" > /sys/class/scsi_host/host%s/scan' % - _get_scsi_host_id(session)) + session.cmd( + f'echo "- - -" > /sys/class/scsi_host/host{_get_scsi_host_id(session)}/scan' + ) def _rescan_hba_controller_windows(session): - session.cmd('echo rescan > {0} && echo exit >> {0} && diskpart / {0} ' - '&& del /f {0}'.format('diskpart_script'), 300) + session.cmd( + "echo rescan > {0} && echo exit >> {0} && diskpart / {0} " + "&& del /f {0}".format("diskpart_script"), + 300, + ) def rescan_hba_controller(session): """Rescan the scsi hba controller.""" @@ -55,24 +60,28 @@ def rescan_hba_controller(session): else: _rescan_hba_controller_windows(session) - is_linux = params['os_type'] == 'linux' + is_linux = params["os_type"] == "linux" vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=360) if not is_linux: session = qemu.windrv_check_running_verifier( - session, vm, test, params['driver_name'], 360) + session, vm, test, params["driver_name"], 360 + ) orig_disks = list_all_disks(session) plug = BlockDevicesPlug(vm) - plug.hotplug_devs_serial(interval=int(params['hotplug_interval'])) - if params['need_rescan_hba'] == 'yes': + plug.hotplug_devs_serial(interval=int(params["hotplug_interval"])) + if params["need_rescan_hba"] == "yes": if utils_misc.wait_for( - lambda: bool(list_all_disks(session) - orig_disks), 30, step=3): - test.log.debug('The all disks: %s.', list_all_disks(session)) - test.fail('Found a new disk with virtio-scsi-pci.hotplug=off ' - 'before rescan scsi hba controller.') + lambda: bool(list_all_disks(session) - orig_disks), 30, step=3 + ): + test.log.debug("The all disks: %s.", list_all_disks(session)) + test.fail( + "Found a new disk with virtio-scsi-pci.hotplug=off " + "before rescan scsi hba controller." + ) rescan_hba_controller(session) if not is_linux: win_driver_utils.memory_leak_check(vm, test, params) diff --git a/qemu/tests/block_hotplug_with_cpu_hotplug.py b/qemu/tests/block_hotplug_with_cpu_hotplug.py index 324d0470b8..930f496276 100644 --- a/qemu/tests/block_hotplug_with_cpu_hotplug.py +++ b/qemu/tests/block_hotplug_with_cpu_hotplug.py @@ -1,10 +1,8 @@ -from virttest import error_context -from virttest import utils_disk, utils_misc +from virttest import error_context, utils_disk, utils_misc from virttest.utils_misc import get_linux_drive_path from virttest.utils_windows.drive import get_disk_props_by_serial_number -from provider import cpu_utils -from provider import win_wora +from provider import cpu_utils, win_wora from provider.block_devices_plug import BlockDevicesPlug @@ -30,22 +28,22 @@ def _get_window_disk_index_by_serial(serial): idx_info = get_disk_props_by_serial_number(session, serial, ["Index"]) if idx_info: return idx_info["Index"] - test.fail("Not find expected disk %s" % serial) + test.fail(f"Not find expected disk {serial}") def _check_disk_in_guest(img): os_type = params["os_type"] - test.log.debug("Check disk %s in guest" % img) - if os_type == 'windows': - img_size = params.get("image_size_%s" % img) + test.log.debug("Check disk %s in guest", img) + if os_type == "windows": + img_size = params.get(f"image_size_{img}") cmd = utils_misc.set_winutils_letter(session, guest_cmd) disk = _get_window_disk_index_by_serial(img) utils_disk.update_windows_disk_attributes(session, disk) test.log.info("Clean disk:%s", disk) utils_disk.clean_partition_windows(session, disk) test.log.info("Formatting disk:%s", disk) - driver = \ - utils_disk.configure_empty_disk(session, disk, img_size, - os_type)[0] + driver = utils_disk.configure_empty_disk(session, disk, img_size, os_type)[ + 0 + ] output_path = driver + ":\\test.dat" cmd = cmd.format(output_path) else: @@ -70,12 +68,9 @@ def _check_disk_in_guest(img): _check_disk_in_guest(img_name_list[1]) for vcpu_dev in vcpu_devices: - error_context.context("Hotplug vcpu device: %s" % vcpu_dev, - test.log.info) + error_context.context(f"Hotplug vcpu device: {vcpu_dev}", test.log.info) vm.hotplug_vcpu_device(vcpu_dev) - if not utils_misc.wait_for( - lambda: cpu_utils.check_if_vm_vcpus_match_qemu(vm), - 60): + if not utils_misc.wait_for(lambda: cpu_utils.check_if_vm_vcpus_match_qemu(vm), 60): test.fail("Actual number of guest CPUs is not equal to expected") # FIXME: win2016 guest will reboot once hotplug a cpu @@ -83,8 +78,7 @@ def _check_disk_in_guest(img): if params.get_boolean("workaround_need"): session = vm.wait_for_login() - error_context.context("Plug another device", - test.log.info) + error_context.context("Plug another device", test.log.info) plug.hotplug_devs_serial(img_name_list[2]) _check_disk_in_guest(img_name_list[2]) diff --git a/qemu/tests/block_io_with_unaligned_offset.py b/qemu/tests/block_io_with_unaligned_offset.py index cc95770ffd..a833abaafa 100644 --- a/qemu/tests/block_io_with_unaligned_offset.py +++ b/qemu/tests/block_io_with_unaligned_offset.py @@ -1,4 +1,5 @@ """qemu-io with unaligned offset""" + from avocado.utils import process @@ -11,7 +12,7 @@ def run(test, params, env): """ logger = test.log io_cmd = params["io_cmd"] - cmd = "cat %s" % params["loop_dev"] + cmd = "cat {}".format(params["loop_dev"]) loop_dev = process.system_output(cmd, shell=True).decode() logger.debug("Create loop device on:%s", loop_dev) diff --git a/qemu/tests/block_iothread_test.py b/qemu/tests/block_iothread_test.py index fb521e8ea7..9b35023621 100644 --- a/qemu/tests/block_iothread_test.py +++ b/qemu/tests/block_iothread_test.py @@ -1,12 +1,13 @@ """Block device iothread relevant test""" + import re -from provider.block_devices_plug import BlockDevicesPlug -from virttest import error_context -from virttest import env_process, utils_disk, utils_misc, virt_vm +from aexpect import ShellCmdError +from virttest import env_process, error_context, utils_disk, utils_misc, virt_vm from virttest.utils_misc import get_linux_drive_path from virttest.utils_windows.drive import get_disk_props_by_serial_number -from aexpect import ShellCmdError + +from provider.block_devices_plug import BlockDevicesPlug # This decorator makes the test function aware of context strings @@ -30,22 +31,22 @@ def _get_window_disk_index_by_serial(serial): idx_info = get_disk_props_by_serial_number(session, serial, ["Index"]) if idx_info: return idx_info["Index"] - test.fail("Not find expected disk %s" % serial) + test.fail(f"Not find expected disk {serial}") def _check_disk_in_guest(img): os_type = params["os_type"] - logger.debug("Check disk %s in guest" % img) - if os_type == 'windows': - img_size = params.get("image_size_%s" % img) + logger.debug("Check disk %s in guest", img) + if os_type == "windows": + img_size = params.get(f"image_size_{img}") cmd = utils_misc.set_winutils_letter(session, guest_cmd) disk = _get_window_disk_index_by_serial(img) utils_disk.update_windows_disk_attributes(session, disk) logger.info("Clean disk:%s", disk) utils_disk.clean_partition_windows(session, disk) logger.info("Formatting disk:%s", disk) - driver = \ - utils_disk.configure_empty_disk(session, disk, img_size, - os_type)[0] + driver = utils_disk.configure_empty_disk(session, disk, img_size, os_type)[ + 0 + ] output_path = driver + ":\\test.dat" cmd = cmd.format(output_path) else: @@ -63,8 +64,7 @@ def check_image_iothread(): name = attr[0] num = int(attr[1]) if attr[1] else 0 expect_iothreads = set(attr[2].split(",") if attr[2] else []) - logger.debug("Expected %s iothread :%s %s", name, num, - expect_iothreads) + logger.debug("Expected %s iothread :%s %s", name, num, expect_iothreads) parent_bus = vm.monitor.qom_get(name, "parent_bus") parent_bus_type = vm.monitor.qom_get(parent_bus, "type") check_name = name @@ -85,12 +85,14 @@ def check_image_iothread(): if expect_iothreads: if real_iothreads != expect_iothreads: - test.fail("Get unexpeced %s iothread list:%s %s" % ( - name, expect_iothreads, real_iothreads)) + test.fail( + f"Get unexpeced {name} iothread list:{expect_iothreads} {real_iothreads}" + ) else: if len(real_iothreads) != num: - test.fail("Get unexpeced %s iothread len:%s %s" % ( - name, num, len(real_iothreads))) + test.fail( + f"Get unexpeced {name} iothread len:{num} {len(real_iothreads)}" + ) def hotplug_disks_test(): plug = BlockDevicesPlug(vm) @@ -109,17 +111,21 @@ def hotplug_disks_test(): err_msg = params.get("err_msg", "undefined unknown error") start_vm = params.get("start_vm") try: - timeout = params.get_numeric("login_timeout", 180) test_images = params.get_list("test_images") host_cmd = params.get("host_cmd") guest_cmd = params.get("guest_cmd") guest_operation = params.get("guest_operation") - if params.get("not_preprocess", "no") == 'yes': - logger.debug("Ready boot VM : %s", params['images']) - env_process.process(test, params, env, env_process.preprocess_image, - env_process.preprocess_vm) + if params.get("not_preprocess", "no") == "yes": + logger.debug("Ready boot VM : %s", params["images"]) + env_process.process( + test, + params, + env, + env_process.preprocess_image, + env_process.preprocess_vm, + ) error_context.context("Get the main VM", test.log.info) vm = env.get_vm(params["main_vm"]) @@ -129,7 +135,7 @@ def hotplug_disks_test(): check_image_iothread() locals_var = locals() if guest_operation: - logger.debug("Execute guest operation %s" % guest_operation) + logger.debug("Execute guest operation %s", guest_operation) locals_var[guest_operation]() logger.debug("Destroy VM...") @@ -137,10 +143,10 @@ def hotplug_disks_test(): vm.destroy() vm = None except (virt_vm.VMCreateError, virt_vm.VMStartError, ShellCmdError) as e: - logger.debug("Find exception %s" % e) + logger.debug("Find exception %s", str(e)) match = re.search(err_msg, e.output) if expect_to_fail == "yes" and match: - logger.info("%s is expected " % err_msg) + logger.info("%s is expected", err_msg) # reset expect_to_fail expect_to_fail = "no" else: @@ -150,4 +156,4 @@ def hotplug_disks_test(): vm.destroy() if expect_to_fail != "no": - test.fail("Expected '%s' not happened" % err_msg) + test.fail(f"Expected '{err_msg}' not happened") diff --git a/qemu/tests/block_iscsi_4kdisk.py b/qemu/tests/block_iscsi_4kdisk.py index b463cc73cf..aa7e16c0ec 100644 --- a/qemu/tests/block_iscsi_4kdisk.py +++ b/qemu/tests/block_iscsi_4kdisk.py @@ -1,9 +1,7 @@ """Test to install the Windows OS on the 4k disk.""" from avocado.utils import process - -from virttest import data_dir -from virttest import utils_misc +from virttest import data_dir, utils_misc from virttest.iscsi import Iscsi from virttest.tests import unattended_install @@ -23,31 +21,31 @@ def run(test, params, env): """ def _prepare(): - cmd_prepare = params['cmd_prepare'].format(dev_name) + cmd_prepare = params["cmd_prepare"].format(dev_name) process.run(cmd_prepare, 600, shell=True) def _cleanup(): if vm and vm.is_alive(): vm.destroy() - cmd_cleanup = params['cmd_cleanup'] % base_dir + cmd_cleanup = params["cmd_cleanup"] % base_dir process.run(cmd_cleanup, 600, shell=True) try: vm = None - params['image_size'] = params['emulated_image_size'] + params["image_size"] = params["emulated_image_size"] base_dir = data_dir.get_data_dir() iscsi = Iscsi.create_iSCSI(params, base_dir) iscsi.login() dev_name = utils_misc.wait_for(lambda: iscsi.get_device_name(), 60) if not dev_name: - test.error('Can not get the iSCSI device.') + test.error("Can not get the iSCSI device.") - test.log.info('Prepare env on: %s', dev_name) + test.log.info("Prepare env on: %s", dev_name) _prepare() - test.log.info('Start to install ...') + test.log.info("Start to install ...") vm = env.get_vm(params["main_vm"]) unattended_install.run(test, params, env) - test.log.info('Install completed') + test.log.info("Install completed") vm.destroy() vm = None finally: diff --git a/qemu/tests/block_iscsi_fault_disk.py b/qemu/tests/block_iscsi_fault_disk.py index 83cc0da3c4..d7711b2059 100644 --- a/qemu/tests/block_iscsi_fault_disk.py +++ b/qemu/tests/block_iscsi_fault_disk.py @@ -1,14 +1,11 @@ -"""Test to kill vm should non-infinite """ +"""Test to kill vm should non-infinite""" -import time import random import string +import time from avocado.utils import process -from virttest import env_process -from virttest import data_dir -from virttest import utils_misc - +from virttest import data_dir, env_process, utils_misc from virttest.iscsi import Iscsi from virttest.utils_misc import get_linux_drive_path @@ -31,32 +28,32 @@ def run(test, params, env): """ def _prepare_fault_disk(): - cmd = params['cmd_get_scsi_debug'] + cmd = params["cmd_get_scsi_debug"] process.run(cmd, shell=True) - cmd = "cat " + params['dev_scsi_debug'] - params['scsi_debug_disk'] = process.getoutput(cmd, shell=True) - if not params['scsi_debug_disk']: - test.fail("Can not find scsi_debug disk %s" % cmd) - - cmd_dmsetup = params['cmd_dmsetup'].format(params['dev_mapper'], - params['scsi_debug_disk']) + cmd = "cat " + params["dev_scsi_debug"] + params["scsi_debug_disk"] = process.getoutput(cmd, shell=True) + if not params["scsi_debug_disk"]: + test.fail(f"Can not find scsi_debug disk {cmd}") + + cmd_dmsetup = params["cmd_dmsetup"].format( + params["dev_mapper"], params["scsi_debug_disk"] + ) process.run(cmd_dmsetup, shell=True) - cmd = "dmsetup info " + params['dev_mapper'] + cmd = "dmsetup info " + params["dev_mapper"] process.run(cmd, shell=True) - params['mapper_disk'] = "/dev/mapper/" + params['dev_mapper'] - params['emulated_image'] = params['mapper_disk'] + params["mapper_disk"] = "/dev/mapper/" + params["dev_mapper"] + params["emulated_image"] = params["mapper_disk"] def _cleanup(): if vm and vm.is_alive(): vm.destroy() - if params['mapper_disk']: - cmd_cleanup = params['cmd_cleanup'] + if params["mapper_disk"]: + cmd_cleanup = params["cmd_cleanup"] process.run(cmd_cleanup, 600, shell=True) def _online_disk_windows(index): - disk = "disk_" + ''.join( - random.sample(string.ascii_letters + string.digits, 4)) + disk = "disk_" + "".join(random.sample(string.ascii_letters + string.digits, 4)) online_cmd = "echo select disk %s > " + disk online_cmd += " && echo online disk noerr >> " + disk online_cmd += " && echo clean >> " + disk @@ -67,11 +64,11 @@ def _online_disk_windows(index): return session.cmd(online_cmd % index, timeout=timeout) def _get_window_disk_index_by_uid(wwn): - cmd = "powershell -command \"get-disk|?" - cmd += " {$_.UniqueId -eq '%s'}|select number|FL\"" % wwn + cmd = 'powershell -command "get-disk|?' + cmd += f" {{$_.UniqueId -eq '{wwn}'}}|select number|FL\"" status, output = session.cmd_status_output(cmd) if status != 0: - test.fail("execute command fail: %s" % output) + test.fail(f"execute command fail: {output}") test.log.debug(output) output = "".join([s for s in output.splitlines(True) if s.strip()]) @@ -79,7 +76,7 @@ def _get_window_disk_index_by_uid(wwn): if len(info) > 1: return info[1].strip() - cmd = "powershell -command \"get-disk| FL\"" + cmd = 'powershell -command "get-disk| FL"' output = session.cmd_output(cmd) test.log.debug(output) test.fail("Not find expected disk:" + wwn) @@ -92,8 +89,8 @@ def _get_disk_wwn(devname): vm = None iscsi = None - params['scsi_debug_disk'] = None - params['mapper_disk'] = None + params["scsi_debug_disk"] = None + params["mapper_disk"] = None timeout = params.get_numeric("timeout", 360) kill_max_timeout = params.get_numeric("kill_max_timeout", 240) kill_min_timeout = params.get_numeric("kill_min_timeout", 60) @@ -111,20 +108,21 @@ def _get_disk_wwn(devname): dev_name = utils_misc.wait_for(lambda: iscsi.get_device_name(), 60) if not dev_name: - test.error('Can not get the iSCSI device.') + test.error("Can not get the iSCSI device.") - test.log.info('Create host disk %s', dev_name) + test.log.info("Create host disk %s", dev_name) disk_wwn = _get_disk_wwn(dev_name) params["image_name_stg0"] = dev_name - test.log.info('Booting vm...') - params['start_vm'] = 'yes' - vm = env.get_vm(params['main_vm']) - env_process.process(test, params, env, env_process.preprocess_image, - env_process.preprocess_vm) + test.log.info("Booting vm...") + params["start_vm"] = "yes" + vm = env.get_vm(params["main_vm"]) + env_process.process( + test, params, env, env_process.preprocess_image, env_process.preprocess_vm + ) session = vm.wait_for_login(timeout=600) - if os_type == 'windows': + if os_type == "windows": guest_cmd = utils_misc.set_winutils_letter(session, guest_cmd) disk_drive = _get_window_disk_index_by_uid(disk_wwn) _online_disk_windows(disk_drive) @@ -141,14 +139,17 @@ def _get_disk_wwn(devname): test.log.info("Ready to kill vm...") process.system_output(host_kill_command, shell=True).decode() - real_timeout = int(process.system_output(params["get_timeout_command"], - shell=True).decode()) + real_timeout = int( + process.system_output(params["get_timeout_command"], shell=True).decode() + ) if kill_min_timeout < real_timeout < kill_max_timeout: test.log.info("Succeed kill timeout: %d", real_timeout) else: - test.fail("Kill timeout %d not in range (%d , %d)" % ( - real_timeout, kill_min_timeout, kill_max_timeout)) + test.fail( + "Kill timeout %d not in range (%d , %d)" + % (real_timeout, kill_min_timeout, kill_max_timeout) + ) vm = None finally: test.log.info("cleanup") diff --git a/qemu/tests/block_iscsi_format_large_size_disk.py b/qemu/tests/block_iscsi_format_large_size_disk.py index c13607f2ce..dfaad58b1b 100644 --- a/qemu/tests/block_iscsi_format_large_size_disk.py +++ b/qemu/tests/block_iscsi_format_large_size_disk.py @@ -3,10 +3,7 @@ import time from avocado.utils import process -from virttest import env_process, utils_disk -from virttest import data_dir -from virttest import utils_misc - +from virttest import data_dir, env_process, utils_disk, utils_misc from virttest.iscsi import Iscsi @@ -26,11 +23,11 @@ def run(test, params, env): """ def _get_window_disk_index_by_wwn(uid): - cmd = "powershell -command \"get-disk| Where-Object" - cmd += " {$_.UniqueId -eq '%s'}|select number|FL\"" % uid + cmd = 'powershell -command "get-disk| Where-Object' + cmd += f" {{$_.UniqueId -eq '{uid}'}}|select number|FL\"" status, output = session.cmd_status_output(cmd) if status != 0: - test.fail("execute command fail: %s" % output) + test.fail(f"execute command fail: {output}") output = "".join([s for s in output.splitlines(True) if s.strip()]) logger.debug(output) info = output.split(":") @@ -40,22 +37,21 @@ def _get_window_disk_index_by_wwn(uid): def _set_max_sector(dev): if params.get("set_max_sector"): - cmd = params['cmd_set_max_sector'].format(dev.replace("/dev/", "")) + cmd = params["cmd_set_max_sector"].format(dev.replace("/dev/", "")) process.run(cmd, shell=True) def _set_max_segment(dev): if params.get("set_max_segment"): - cmd = params['cmd_get_max_segment'].format( - dev.replace("/dev/", "")) + cmd = params["cmd_get_max_segment"].format(dev.replace("/dev/", "")) out = process.getoutput(cmd, shell=True) logger.info("run check segment %s,%s", cmd, out) - params["bus_extra_params_stg1"] = "max_sectors=%s" % out + params["bus_extra_params_stg1"] = f"max_sectors={out}" return out def _get_disk_serial(dev): if params.get("serial"): return params["serial"] - cmd = "lsblk -dno wwn %s" % dev + cmd = f"lsblk -dno wwn {dev}" logger.info("run check serial %s", cmd) out = process.getoutput(cmd).replace("0x", "").strip() logger.info("serial : %s", out) @@ -69,18 +65,18 @@ def _get_disk_serial(dev): timeout = params.get_numeric("timeout", 180) clean_cmd = params["clean_cmd"] - backend_image_name = params['image_name_stg1'] + backend_image_name = params["image_name_stg1"] guest_cmd = params["guest_cmd"] try: logger.info("Create iscsi disk.") base_dir = data_dir.get_data_dir() - params['image_size'] = params['emulated_image_size'] + params["image_size"] = params["emulated_image_size"] iscsi = Iscsi.create_iSCSI(params, base_dir) iscsi.login() dev_name = utils_misc.wait_for(lambda: iscsi.get_device_name(), 60) time.sleep(2) if not dev_name: - test.error('Can not get the iSCSI device.') + test.error("Can not get the iSCSI device.") serial = _get_disk_serial(dev_name) _set_max_sector(dev_name) @@ -90,12 +86,13 @@ def _get_disk_serial(dev): logger.info("run clean cmd %s", clean_cmd) process.run(clean_cmd, shell=True) - params['image_name_stg1'] = dev_name - params['image_raw_device_stg1'] = "yes" - params['start_vm'] = 'yes' - vm = env.get_vm(params['main_vm']) - env_process.process(test, params, env, env_process.preprocess_image, - env_process.preprocess_vm) + params["image_name_stg1"] = dev_name + params["image_raw_device_stg1"] = "yes" + params["start_vm"] = "yes" + vm = env.get_vm(params["main_vm"]) + env_process.process( + test, params, env, env_process.preprocess_image, env_process.preprocess_vm + ) session = vm.wait_for_login(timeout=timeout) img_size = params.get("image_size_stg1") os_type = params["os_type"] @@ -107,12 +104,12 @@ def _get_disk_serial(dev): logger.info("Format disk %s", disk) utils_disk.update_windows_disk_attributes(session, disk) - driver = utils_disk.configure_empty_disk(session, disk, img_size, - os_type, fstype=fstype, - labeltype=labeltype)[0] + driver = utils_disk.configure_empty_disk( + session, disk, img_size, os_type, fstype=fstype, labeltype=labeltype + )[0] output_path = driver + ":\\test.dat" guest_cmd = guest_cmd.format(output_path) - logger.info('Start IO: %s', guest_cmd) + logger.info("Start IO: %s", guest_cmd) session.cmd(guest_cmd, timeout=360) finally: @@ -121,5 +118,5 @@ def _get_disk_serial(dev): vm.destroy() if iscsi: iscsi.cleanup(True) - params['image_name_stg1'] = backend_image_name - params['image_raw_device_stg1'] = "no" + params["image_name_stg1"] = backend_image_name + params["image_raw_device_stg1"] = "no" diff --git a/qemu/tests/block_iscsi_lvm.py b/qemu/tests/block_iscsi_lvm.py index 3e12d89f19..4e66abeb9d 100644 --- a/qemu/tests/block_iscsi_lvm.py +++ b/qemu/tests/block_iscsi_lvm.py @@ -1,7 +1,5 @@ from avocado.utils import process - -from virttest import data_dir -from virttest import utils_misc +from virttest import data_dir, utils_misc from virttest.env_process import preprocess_vm from virttest.iscsi import Iscsi from virttest.lvm import LVM @@ -23,25 +21,33 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ - params['image_size'] = params['emulated_image_size'] + params["image_size"] = params["emulated_image_size"] post_commands = [] try: iscsi = Iscsi.create_iSCSI(params, data_dir.get_data_dir()) - post_commands.extend((params['post_commands_iscsi'] % - (iscsi.emulated_image, iscsi.id)).split(',')) + post_commands.extend( + (params["post_commands_iscsi"] % (iscsi.emulated_image, iscsi.id)).split( + "," + ) + ) lvm = LVM(params) iscsi.login() dev_name = utils_misc.wait_for(lambda: iscsi.get_device_name(), 60) if not dev_name: - test.error('Can not get the iSCSI device.') - process.run(params['cmd_fdisk'] % dev_name, 600, shell=True) - params['pv_name'] = process.system_output( - params['cmd_get_partition'].format(dev_name), - 60, shell=True).decode().strip() - post_commands.extend((params['post_commands_lvm'] % - params['pv_name']).split(',')) + test.error("Can not get the iSCSI device.") + process.run(params["cmd_fdisk"] % dev_name, 600, shell=True) + params["pv_name"] = ( + process.system_output( + params["cmd_get_partition"].format(dev_name), 60, shell=True + ) + .decode() + .strip() + ) + post_commands.extend( + (params["post_commands_lvm"] % params["pv_name"]).split(",") + ) lvm.setup() preprocess_vm(test, params, env, params["main_vm"]) unattended_install.run(test, params, env) finally: - params['post_command'] = ' ; '.join(post_commands[::-1]) + params["post_command"] = " ; ".join(post_commands[::-1]) diff --git a/qemu/tests/block_iscsi_with_specical_max_sectors.py b/qemu/tests/block_iscsi_with_specical_max_sectors.py index c34eb094e7..e2c5433490 100644 --- a/qemu/tests/block_iscsi_with_specical_max_sectors.py +++ b/qemu/tests/block_iscsi_with_specical_max_sectors.py @@ -1,12 +1,9 @@ -"""Test IO on specific max_sector_kb of disk """ +"""Test IO on specific max_sector_kb of disk""" import logging from avocado.utils import process -from virttest import env_process -from virttest import data_dir -from virttest import utils_misc - +from virttest import data_dir, env_process, utils_misc from virttest.iscsi import Iscsi from virttest.utils_misc import get_linux_drive_path @@ -27,14 +24,14 @@ def run(test, params, env): """ def _setup_lvs(dev): - cmd = params['cmd_set_max_sector'].format(dev.replace("/dev/", "")) + cmd = params["cmd_set_max_sector"].format(dev.replace("/dev/", "")) process.run(cmd, shell=True) - cmd = params['cmd_setup_vg'].format(dev) + cmd = params["cmd_setup_vg"].format(dev) process.run(cmd, shell=True) for lv in lvs: - cmd = params['cmd_setup_lv'].format(lv) + cmd = params["cmd_setup_lv"].format(lv) process.run(cmd, shell=True) - cmd = params['cmd_build_img'].format(lv) + cmd = params["cmd_build_img"].format(lv) process.run(cmd, shell=True) def _cleanup_lvs(dev): @@ -44,9 +41,9 @@ def _cleanup_lvs(dev): if not dev: return - cmd = params['cmd_clean_lv'] + cmd = params["cmd_clean_lv"] process.run(cmd, shell=True) - cmd = params['cmd_clean_vg'].format(dev) + cmd = params["cmd_clean_vg"].format(dev) process.run(cmd, shell=True) def _execute_io_in_guest(): @@ -64,13 +61,13 @@ def _execute_io_in_guest(): vm = None iscsi = None dev_name = None - log = logging.getLogger('avocado.test') - lvs = params['lvs_name'].split(",") + log = logging.getLogger("avocado.test") + lvs = params["lvs_name"].split(",") timeout = params.get_numeric("timeout", 180) guest_cmd = params["guest_cmd"] try: - params['image_size'] = params['emulated_image_size'] + params["image_size"] = params["emulated_image_size"] log.info("Create iscsi disk.") base_dir = data_dir.get_data_dir() iscsi = Iscsi.create_iSCSI(params, base_dir) @@ -78,27 +75,29 @@ def _execute_io_in_guest(): dev_name = utils_misc.wait_for(lambda: iscsi.get_device_name(), 60) if not dev_name: - test.error('Can not get the iSCSI device.') + test.error("Can not get the iSCSI device.") log.info("Prepare lvs disks on %s", dev_name) _setup_lvs(dev_name) - log.info('Booting vm...') - params['start_vm'] = 'yes' - vm = env.get_vm(params['main_vm']) - env_process.process(test, params, env, env_process.preprocess_image, - env_process.preprocess_vm) + log.info("Booting vm...") + params["start_vm"] = "yes" + vm = env.get_vm(params["main_vm"]) + env_process.process( + test, params, env, env_process.preprocess_image, env_process.preprocess_vm + ) session = vm.wait_for_login(timeout=timeout) - log.info('Execute IO in guest ...') + log.info("Execute IO in guest ...") _execute_io_in_guest() - log.info('Check guest status.') - if utils_misc.wait_for(lambda: not vm.monitor.verify_status("running"), - 600, first=10, step=20): + log.info("Check guest status.") + if utils_misc.wait_for( + lambda: not vm.monitor.verify_status("running"), 600, first=10, step=20 + ): if vm.is_dead(): test.fail("Vm in dead status.") - test.fail("VM not in running: %s" % vm.monitor.get_status()) + test.fail(f"VM not in running: {vm.monitor.get_status()}") finally: log.info("cleanup") diff --git a/qemu/tests/block_kill_reconnect_with_remote_storage.py b/qemu/tests/block_kill_reconnect_with_remote_storage.py index 2b5ff3b1cd..c2d72a423e 100644 --- a/qemu/tests/block_kill_reconnect_with_remote_storage.py +++ b/qemu/tests/block_kill_reconnect_with_remote_storage.py @@ -1,14 +1,14 @@ -import socket import logging +import socket import time -from provider.nbd_image_export import QemuNBDExportImage -from provider.blockdev_base import BlockdevBaseTest +from avocado.utils import process from virttest import error_context -from avocado.utils import process +from provider.blockdev_base import BlockdevBaseTest +from provider.nbd_image_export import QemuNBDExportImage -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class BlockReconnectTest(BlockdevBaseTest): @@ -21,34 +21,36 @@ def __init__(self, test, params, env): self.disk_op_cmd = params["disk_op_cmd"] self.disk_op_timeout = int(params.get("disk_op_timeout", 360)) localhost = socket.gethostname() - params['nbd_server_%s' % params['nbd_image_tag']] = localhost \ - if localhost else 'localhost' + params["nbd_server_{}".format(params["nbd_image_tag"])] = ( + localhost if localhost else "localhost" + ) self.timeout = int(params.get("login_timeout", 360)) self.repeat_times = int(params["repeat_times"]) self.reconnect_time_wait = int(params["reconnect_time_wait"]) self.vm = env.get_vm(params["main_vm"]) - super(BlockReconnectTest, self).__init__(test, params, env) + super().__init__(test, params, env) def prepare_test(self): try: self.create_local_image() self.export_local_image_with_nbd() - super(BlockReconnectTest, self).prepare_test() + super().prepare_test() except Exception: self.clean_images() raise def create_local_image(self): - image_params = self.params.object_params( - self.params["local_image_tag"]) + image_params = self.params.object_params(self.params["local_image_tag"]) local_image = self.source_disk_define_by_params( - image_params, self.params["local_image_tag"]) + image_params, self.params["local_image_tag"] + ) local_image.create(image_params) self.trash.append(local_image) def export_local_image_with_nbd(self): self.nbd_export = QemuNBDExportImage( - self.params, self.params["local_image_tag"]) + self.params, self.params["local_image_tag"] + ) self.nbd_export.export_image() def get_disk_storage_name(self, system_disk_cmd, data_disk_cmd): @@ -57,21 +59,22 @@ def get_disk_storage_name(self, system_disk_cmd, data_disk_cmd): return: data disk name e.g. /dev/sdb """ error_context.context("Identify data disk.", LOG_JOB.info) - LOG_JOB.info('Identify data disk') + LOG_JOB.info("Identify data disk") session = self.vm.wait_for_login(timeout=self.timeout) - system_disk_name = session.cmd(system_disk_cmd, - timeout=self.disk_op_timeout).strip() + system_disk_name = session.cmd( + system_disk_cmd, timeout=self.disk_op_timeout + ).strip() find_disk_cmd = data_disk_cmd % system_disk_name - data_disk_name = session.cmd(find_disk_cmd, - timeout=self.disk_op_timeout).strip() - LOG_JOB.info('The data disk is %s', data_disk_name) + data_disk_name = session.cmd( + find_disk_cmd, timeout=self.disk_op_timeout + ).strip() + LOG_JOB.info("The data disk is %s", data_disk_name) session.close() return system_disk_name, data_disk_name def run_io_test(self, test_disk): - """ Run io test on given disks. """ - error_context.context( - "Run io test on %s." % test_disk, LOG_JOB.info) + """Run io test on given disks.""" + error_context.context(f"Run io test on {test_disk}.", LOG_JOB.info) session = self.vm.wait_for_login(timeout=self.timeout) test_cmd = self.disk_op_cmd % (test_disk, test_disk) session.cmd(test_cmd, timeout=self.disk_op_timeout) @@ -80,28 +83,27 @@ def run_io_test(self, test_disk): def run_iptables(self, cmd): result = process.run(cmd, ignore_status=True, shell=True) if result.exit_status != 0: - LOG_JOB.error('command error: %s', result.stderr.decode()) + LOG_JOB.error("command error: %s", result.stderr.decode()) def break_net_with_iptables(self): - self.run_iptables(self.params['net_break_cmd']) + self.run_iptables(self.params["net_break_cmd"]) self.net_down = True def resume_net_with_iptables(self): - self.run_iptables(self.params['net_resume_cmd']) + self.run_iptables(self.params["net_resume_cmd"]) self.net_down = False def reconnect_loop_io(self): - error_context.context( - "Run IO test when in reconnecting loop", LOG_JOB.info) + error_context.context("Run IO test when in reconnecting loop", LOG_JOB.info) for iteration in range(self.repeat_times): - error_context.context("Wait %s seconds" % self.reconnect_time_wait, - LOG_JOB.info) + error_context.context( + f"Wait {self.reconnect_time_wait} seconds", LOG_JOB.info + ) time.sleep(self.reconnect_time_wait) self.run_io_test("test_file") def check_data_disk_resume(self, test_disk): - error_context.context( - "check data disk resumed", LOG_JOB.info) + error_context.context("check data disk resumed", LOG_JOB.info) for iteration in range(self.repeat_times): LOG_JOB.info("Wait %s seconds", self.reconnect_time_wait) time.sleep(self.reconnect_time_wait) @@ -116,7 +118,7 @@ def clean_images(self): self.stop_export_local_image_with_nbd() - super(BlockReconnectTest, self).clean_images() + super().clean_images() def stop_export_local_image_with_nbd(self): LOG_JOB.info("Stop export nbd data disk image.") @@ -124,8 +126,8 @@ def stop_export_local_image_with_nbd(self): def do_test(self): disk_storage_name = self.get_disk_storage_name( - self.params["find_system_disk_cmd"], - self.params["find_data_disk_cmd"]) + self.params["find_system_disk_cmd"], self.params["find_data_disk_cmd"] + ) data_disk = disk_storage_name[1] self.run_io_test(data_disk) self.stop_export_local_image_with_nbd() diff --git a/qemu/tests/block_libblkio_release.py b/qemu/tests/block_libblkio_release.py index 01331c4c9d..c6d56077d2 100644 --- a/qemu/tests/block_libblkio_release.py +++ b/qemu/tests/block_libblkio_release.py @@ -1,5 +1,7 @@ -""" Verify libblkio release""" +"""Verify libblkio release""" + import os + from virttest import data_dir as virttest_data_dir diff --git a/qemu/tests/block_lvm_read_only.py b/qemu/tests/block_lvm_read_only.py index b551876bd0..bf926e2837 100644 --- a/qemu/tests/block_lvm_read_only.py +++ b/qemu/tests/block_lvm_read_only.py @@ -1,5 +1,4 @@ from avocado.utils import process - from virttest import env_process from virttest.lvm import LVM @@ -17,19 +16,19 @@ def run(test, params, env): :param env: Dictionary with test environment """ - params["start_vm"] = 'yes' + params["start_vm"] = "yes" params["pv_name"] = process.getoutput(params["get_devname_command"]) lvm = LVM(params) lvm.setup() env_process.preprocess_vm(test, params, env, params["main_vm"]) - vm = env.get_vm(params['main_vm']) - session = vm.wait_for_login() + vm = env.get_vm(params["main_vm"]) + vm.wait_for_login() qmp_port = vm.monitor qdev = vm.devices - device = qdev.get_by_params({"id": 'stg0'})[0] + device = qdev.get_by_params({"id": "stg0"})[0] qdev.simple_unplug(device, qmp_port) image_name = params["data_tag"] image_params = params.object_params(image_name) - devs = qdev.images_define_by_params(image_name, image_params, 'disk') + devs = qdev.images_define_by_params(image_name, image_params, "disk") for dev in devs: qdev.simple_hotplug(dev, qmp_port) diff --git a/qemu/tests/block_multifunction.py b/qemu/tests/block_multifunction.py index f5a3ed1c03..2b228e6c65 100644 --- a/qemu/tests/block_multifunction.py +++ b/qemu/tests/block_multifunction.py @@ -2,21 +2,17 @@ import time from avocado.core import exceptions - -from virttest import env_process -from virttest import error_context -from virttest import utils_misc -from virttest import utils_disk -from virttest.qemu_monitor import QMPCmdError +from virttest import env_process, error_context, utils_disk, utils_misc from virttest.qemu_devices.qdevices import QDevice, QDrive +from virttest.qemu_monitor import QMPCmdError -from qemu.tests import block_hotplug from provider.block_devices_plug import BlockDevicesPlug +from qemu.tests import block_hotplug -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") -def set_addr(image_name, slot, function, params, multifunction='on'): +def set_addr(image_name, slot, function, params, multifunction="on"): """ Specify the multifunciton address for image device @@ -26,26 +22,25 @@ def set_addr(image_name, slot, function, params, multifunction='on'): :param params: Params object :param multifunction: on/off """ - if params['drive_format'].startswith('scsi'): - param_name = 'bus_extra_params_%s' % image_name + if params["drive_format"].startswith("scsi"): + param_name = f"bus_extra_params_{image_name}" else: - param_name = 'blk_extra_params_%s' % image_name + param_name = f"blk_extra_params_{image_name}" if function % 8 == 0: - LOG_JOB.info('Set multifunction=on for %s', image_name) - params[param_name] = 'multifunction=%s' % multifunction + LOG_JOB.info("Set multifunction=on for %s", image_name) + params[param_name] = f"multifunction={multifunction}" if function == 0: return - addr_pattern = 'addr=%s.%s' % (hex(slot), hex(function % 8)) - LOG_JOB.info('Set addr of %s to %s', image_name, addr_pattern) + addr_pattern = f"addr={hex(slot)}.{hex(function % 8)}" + LOG_JOB.info("Set addr of %s to %s", image_name, addr_pattern) extra_param = params.get(param_name) if extra_param: - params[param_name] = extra_param + ',' + addr_pattern + params[param_name] = extra_param + "," + addr_pattern else: params[param_name] = addr_pattern -def io_test(session, disk_op_cmd, disks, - windows=False, image_size=None): +def io_test(session, disk_op_cmd, disks, windows=False, image_size=None): """ Perform io test on disks :param session: vm session @@ -57,10 +52,12 @@ def io_test(session, disk_op_cmd, disks, for index, disk in enumerate(disks): if windows: if not utils_disk.update_windows_disk_attributes(session, disk): - raise exceptions.TestError("Failed to clear readonly for all" - " disks and online them in guest") + raise exceptions.TestError( + "Failed to clear readonly for all" " disks and online them in guest" + ) partition = utils_disk.configure_empty_windows_disk( - session, disk, image_size) + session, disk, image_size + ) test_cmd = disk_op_cmd % (partition[0], partition[0]) test_cmd = utils_misc.set_winutils_letter(session, test_cmd) else: @@ -95,48 +92,49 @@ def get_image_device(qdev, img_name): """ dev = qdev.get(img_name) devs = [dev] - if params['drive_format'].startswith('scsi'): - devs.append(qdev.get_by_properties( - {'aid': dev.get_param('bus').split('.')[0]})[0]) + if params["drive_format"].startswith("scsi"): + devs.append( + qdev.get_by_properties({"aid": dev.get_param("bus").split(".")[0]})[0] + ) return devs - image = params.objects('images')[0] - vm_name = params['main_vm'] + image = params.objects("images")[0] + vm_name = params["main_vm"] set_addr(image, 0, 0, params) # Add multifunction=on option before start vm - params['start_vm'] = 'yes' + params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) qdev = vm.devices - windows = params["os_type"] == 'windows' + windows = params["os_type"] == "windows" disk_op_cmd = params.get("disk_op_cmd") session = vm.wait_for_login() time.sleep(60) pcie_port_set = False - if "q35" in params['machine_type'] or "arm64-pci" in params['machine_type']: + if "q35" in params["machine_type"] or "arm64-pci" in params["machine_type"]: pcie_port_set = True dev_slot = 0 if pcie_port_set else 9 - parent_bus = 'pcie_extra_root_port_0' if pcie_port_set else 'pci.0' - image_size = '1G' + parent_bus = "pcie_extra_root_port_0" if pcie_port_set else "pci.0" + image_size = "1G" # Generate the data disk devices to be plugged for i in range(1, 9): - stg = 'stg%s' % i - vm.params['images'] += ' %s' % stg - vm.params['image_name_%s' % stg] = 'images/%s' % stg - vm.params['image_size_%s' % stg] = image_size - vm.params['remove_image_%s' % stg] = 'yes' - vm.params['force_create_image_%s' % stg] = 'yes' - vm.params['boot_drive_%s' % stg] = 'no' + stg = f"stg{i}" + vm.params["images"] += f" {stg}" + vm.params[f"image_name_{stg}"] = f"images/{stg}" + vm.params[f"image_size_{stg}"] = image_size + vm.params[f"remove_image_{stg}"] = "yes" + vm.params[f"force_create_image_{stg}"] = "yes" + vm.params[f"boot_drive_{stg}"] = "no" # Specify the address of the device, plug them into same slot set_addr(stg, dev_slot, i, vm.params) - if params['drive_format'].startswith('scsi'): + if params["drive_format"].startswith("scsi"): # Create oen new scsi bus for each block device - vm.params['drive_bus_%s' % stg] = i + vm.params[f"drive_bus_{stg}"] = i # To create those image files env_process.process_images(env_process.preprocess_image, test, vm.params) plug = BlockDevicesPlug(vm) - parent_bus_obj = qdev.get_buses({'aobject': parent_bus})[0] + parent_bus_obj = qdev.get_buses({"aobject": parent_bus})[0] plug.hotplug_devs_serial(bus=parent_bus_obj) # Run io test on all the plugged disks @@ -146,21 +144,24 @@ def get_image_device(qdev, img_name): disks_before_reboot = block_hotplug.find_all_disks(session, windows) session = vm.reboot(session) block_hotplug.wait_plug_disks( - session, 'check', disks_before_reboot, 0, windows, test) + session, "check", disks_before_reboot, 0, windows, test + ) session.close() # Unplug the disk on function 7 and 0, and check if all the disks been removed - images = vm.params.objects('images') + images = vm.params.objects("images") unplug_dev = images[-1] - unplug_timeout = params['unplug_timeout'] + unplug_timeout = params["unplug_timeout"] try: plug.unplug_devs_serial(images=unplug_dev, timeout=unplug_timeout) except exceptions.TestError as e: - if 'Actual: 8 disks. Expected: ' not in str(e): + if "Actual: 8 disks. Expected: " not in str(e): raise else: - test.fail('All the plugged disks should be removed when' - ' the device at function 0 is removed.') + test.fail( + "All the plugged disks should be removed when" + " the device at function 0 is removed." + ) # replug disk 2-7 rest_dev = images[1:-1] @@ -169,17 +170,17 @@ def get_image_device(qdev, img_name): for img in rest_dev: devs_rm = get_image_device(qdev, img) list(map(lambda x: qdev.remove(x, recursive=False), devs_rm)) - plug._create_devices(rest_dev, {'aobject': parent_bus}) + plug._create_devices(rest_dev, {"aobject": parent_bus}) for img, devs in plug._hotplugged_devs.items(): if img not in rest_dev: continue for dev in devs: args = (dev, vm.monitor) if isinstance(dev, QDevice): - pci_device = qdev.is_pci_device(dev['driver']) + pci_device = qdev.is_pci_device(dev["driver"]) if pci_device: args += (parent_bus_obj,) - elif not dev['driver'].startswith('scsi'): + elif not dev["driver"].startswith("scsi"): continue elif not isinstance(dev, QDrive): continue @@ -191,23 +192,27 @@ def get_image_device(qdev, img_name): pass # Replug disk 8 on slot 0 with multifunction='off' - set_addr(images[-1], dev_slot, 0, vm.params, multifunction='off') - plug._create_devices(unplug_dev.split(), {'aobject': parent_bus}) + set_addr(images[-1], dev_slot, 0, vm.params, multifunction="off") + plug._create_devices(unplug_dev.split(), {"aobject": parent_bus}) for img, devs in plug._hotplugged_devs.items(): for dev in devs: - if (img == images[-1] and - isinstance(dev, QDevice) and - qdev.is_pci_device(dev['driver'])): - dev['addr'] = hex(dev_slot) # for pci bus addr might be reset + if ( + img == images[-1] + and isinstance(dev, QDevice) + and qdev.is_pci_device(dev["driver"]) + ): + dev["addr"] = hex(dev_slot) # for pci bus addr might be reset try: parent_bus_obj.prepare_hotplug(dev) dev.hotplug(vm.monitor, vm.devices.qemu_version) except QMPCmdError as e: - if 'single function' not in str(e): + if "single function" not in str(e): raise else: - test.fail('It should fail to hotplug a single function device' - ' to the address where multifunction already on.') + test.fail( + "It should fail to hotplug a single function device" + " to the address where multifunction already on." + ) break else: plug._hotplug_atomic(dev, vm.monitor) diff --git a/qemu/tests/block_multifunction_scale.py b/qemu/tests/block_multifunction_scale.py index 6be201ff70..960e45183a 100644 --- a/qemu/tests/block_multifunction_scale.py +++ b/qemu/tests/block_multifunction_scale.py @@ -1,8 +1,7 @@ -from virttest import env_process -from virttest import error_context +from virttest import env_process, error_context from provider.block_devices_plug import BlockDevicesPlug -from qemu.tests.block_multifunction import set_addr, io_test +from qemu.tests.block_multifunction import io_test, set_addr @error_context.context_aware @@ -34,46 +33,45 @@ def generate_image(dev_slots, plug, params, qdev, image_size, pcie, test): disks = [] for slot in dev_slots: scsi_bus = 1 - parent_bus = 'pcie_extra_root_port_%s' % slot if pcie else 'pci.0' + parent_bus = f"pcie_extra_root_port_{slot}" if pcie else "pci.0" images = [] for i in range(1, 9): - stg = 'stg%s%s' % (slot, i) + stg = f"stg{slot}{i}" images.append(stg) - params['images'] += ' %s' % stg - params['image_name_%s' % stg] = 'images/%s' % stg - params['image_size_%s' % stg] = image_size - params['remove_image_%s' % stg] = 'yes' - params['force_create_image_%s' % stg] = 'no' - params['create_image_%s' % stg] = 'yes' - params['boot_drive_%s' % stg] = 'no' + params["images"] += f" {stg}" + params[f"image_name_{stg}"] = f"images/{stg}" + params[f"image_size_{stg}"] = image_size + params[f"remove_image_{stg}"] = "yes" + params[f"force_create_image_{stg}"] = "no" + params[f"create_image_{stg}"] = "yes" + params[f"boot_drive_{stg}"] = "no" # Specify the address of the device, plug them into same slot addr = 0 if pcie else slot set_addr(stg, addr, i, params) - if params['drive_format'].startswith('scsi'): + if params["drive_format"].startswith("scsi"): # Create oen new scsi bus for each block device - params['drive_bus_%s' % stg] = scsi_bus + params[f"drive_bus_{stg}"] = scsi_bus scsi_bus += 1 env_process.process_images(env_process.preprocess_image, test, params) - parent_bus_obj = qdev.get_buses({'aobject': parent_bus})[0] + parent_bus_obj = qdev.get_buses({"aobject": parent_bus})[0] plug._hotplug_devs(images, vm.monitor, bus=parent_bus_obj) disks.extend(plug) return disks - image_size = '500M' - vm_name = params['main_vm'] + image_size = "500M" + vm_name = params["main_vm"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) qdev = vm.devices - windows = params["os_type"] == 'windows' + windows = params["os_type"] == "windows" disk_op_cmd = params.get("disk_op_cmd") session = vm.wait_for_login() pcie = False - if "q35" in params['machine_type'] or "arm64-pci" in params['machine_type']: + if "q35" in params["machine_type"] or "arm64-pci" in params["machine_type"]: pcie = True dev_slots = range(0, 3) if pcie else (7, 10) plug = BlockDevicesPlug(vm) - disks = generate_image( - dev_slots, plug, vm.params, qdev, image_size, pcie, test) + disks = generate_image(dev_slots, plug, vm.params, qdev, image_size, pcie, test) if windows: io_test(session, disk_op_cmd, disks, windows, image_size) diff --git a/qemu/tests/block_performance_test.py b/qemu/tests/block_performance_test.py index b235f5034d..07fba70fe6 100644 --- a/qemu/tests/block_performance_test.py +++ b/qemu/tests/block_performance_test.py @@ -1,16 +1,17 @@ import copy +import itertools import json -import time import re import statistics as st -import itertools +import time from avocado.utils import process -from provider.storage_benchmark import generate_instance -from virttest import utils_disk, env_process +from virttest import env_process, utils_disk from virttest.utils_misc import get_linux_drive_path from virttest.utils_windows.drive import get_disk_props_by_serial_number +from provider.storage_benchmark import generate_instance + def run(test, params, env): """ @@ -29,10 +30,10 @@ def _get_window_disk_index_by_serial(serial): idx_info = get_disk_props_by_serial_number(session, serial, ["Index"]) if idx_info: return idx_info["Index"] - test.fail("Not find expected disk %s" % serial) + test.fail(f"Not find expected disk {serial}") def preprocess_fio_opts(results): - """expand fio options """ + """expand fio options""" fio_rw = params.get("fio_rw", "null").split() fio_bs = params.get("fio_bs", "null").split() fio_iodepth = params.get("fio_iodepth", "null-").split() @@ -44,16 +45,16 @@ def preprocess_fio_opts(results): rw = sub_fio[0] bs = sub_fio[1] iodepth = sub_fio[2] - name = "%s-%s-%s" % (rw, bs, iodepth) + name = f"{rw}-{bs}-{iodepth}" name = name.replace("null-", "") if rw != "null": - cmd += " --rw=%s " % rw + cmd += f" --rw={rw} " if bs != "null": - cmd += " --bs=%s " % bs + cmd += f" --bs={bs} " if iodepth != "null-": - cmd += " --iodepth=%s " % iodepth + cmd += f" --iodepth={iodepth} " if cmd: - fio_combination += " --stonewall --name=%s" % name + cmd + fio_combination += f" --stonewall --name={name}" + cmd fio_opts = params["fio_cmd"] @@ -65,11 +66,11 @@ def preprocess_fio_opts(results): return fio_opts def preprcess_fio_filename(img): - """get filename for img """ + """get filename for img""" - disk_size = params["image_size_%s" % img] - fio_raw_device = params.get("fio_raw_device_%s" % img, "no") - fio_filename = params.get("fio_filename_%s" % img) + disk_size = params[f"image_size_{img}"] + fio_raw_device = params.get(f"fio_raw_device_{img}", "no") + fio_filename = params.get(f"fio_filename_{img}") if fio_filename: return fio_filename if os_type == "windows": @@ -77,25 +78,24 @@ def preprcess_fio_filename(img): disk_id = _get_window_disk_index_by_serial(img) if not utils_disk.update_windows_disk_attributes(session, disk_id): - test.error("Failed to enable data disk %s" % disk_id) + test.error(f"Failed to enable data disk {disk_id}") if fio_raw_device == "yes": - return r"\\.\PHYSICALDRIVE%s" % disk_id + return rf"\\.\PHYSICALDRIVE{disk_id}" - disk_letter = utils_disk.configure_empty_windows_disk(session, - disk_id, - disk_size)[0] + disk_letter = utils_disk.configure_empty_windows_disk( + session, disk_id, disk_size + )[0] fio_filename = disk_letter + ":\\test.dat" else: dev = get_linux_drive_path(session, img) logger.debug(dev) if fio_raw_device == "yes": return dev - mount_dir = "/home/%s" % (dev.replace("/dev/", "")) - cmd = "mkfs.xfs {0} && mkdir -p {1} && mount {0} {1}".format(dev, - mount_dir) + mount_dir = "/home/{}".format(dev.replace("/dev/", "")) + cmd = f"mkfs.xfs {dev} && mkdir -p {mount_dir} && mount {dev} {mount_dir}" session.cmd_output(cmd) - fio_filename = "%s/test.img" % mount_dir + fio_filename = f"{mount_dir}/test.img" if not fio_filename: test.fail("Can not get output file path in guest.") @@ -138,11 +138,16 @@ def preprocess_fio_data(results): results["images"] = params["compare_images"].split() opts = preprocess_fio_opts(results) for img in results["images"]: - results[img] = {"filename": "", "global_options": {}, "jobs": {}, - "cmd": "", "cmds": [], "location": "", - "results": []} - results[img]["location"] = params.get("fio_cmd_location_%s" % img, - "vm") + results[img] = { + "filename": "", + "global_options": {}, + "jobs": {}, + "cmd": "", + "cmds": [], + "location": "", + "results": [], + } + results[img]["location"] = params.get(f"fio_cmd_location_{img}", "vm") # guest fio if results[img]["location"] == "vm": fio_bin = fio.cfg.fio_path @@ -153,7 +158,7 @@ def preprocess_fio_data(results): fio_bin = "fio" filename = preprcess_fio_filename(img) - results[img]["cmd"] = "%s %s" % (fio_bin, opts % filename) + results[img]["cmd"] = f"{fio_bin} {opts % filename}" cmds = results[img]["cmd"].split("--stonewall") if len(cmds) > 2 and fio_run_mode == "separate": for i in range(1, len(cmds)): @@ -183,7 +188,7 @@ def run_fio_test(results): if results[img]["cmds"]: cmd_num = len(results[img]["cmds"]) for idx, cmd in enumerate(results[img]["cmds"]): - logger.debug(f"Run sub-cmd {idx}/{cmd_num}:{cmd}") + logger.debug("Run sub-cmd %s/%s:%s", idx, cmd_num, cmd) img_output = runner(cmd, cmd_timeout) if i > 0: # discard first result @@ -210,10 +215,11 @@ def parse_fio_result(cmd_output, img, results, record=False): filename = json_output["global options"]["directory"] else: filename = json_output["global options"]["filename"] - if img_result.get('filename'): + if img_result.get("filename"): if filename != img_result["filename"]: test.fail( - "Wrong data %s %s" % (filename, img_result["filename"])) + "Wrong data {} {}".format(filename, img_result["filename"]) + ) else: # init global info global_options = copy.deepcopy(json_output["global options"]) @@ -232,19 +238,31 @@ def parse_fio_result(cmd_output, img, results, record=False): if jobname not in jobs: # init job info logger.debug("Add job: %s %s", filename, jobname) - jobs[jobname] = {"options": job["job options"].copy(), - "iops": [], "iops_avg": 0, "lat": [], - "lat_avg": 0, - "job_runtime": 0, "bw": []} + jobs[jobname] = { + "options": job["job options"].copy(), + "iops": [], + "iops_avg": 0, + "lat": [], + "lat_avg": 0, + "job_runtime": 0, + "bw": [], + } read = int(job["read"]["iops"]) write = int(job["write"]["iops"]) iops = read + write bw = int(job["read"]["bw"]) + int(job["write"]["bw"]) lat = int(job["read"]["lat_ns"]["mean"]) + int( - job["write"]["lat_ns"]["mean"]) + job["write"]["lat_ns"]["mean"] + ) logger.debug( "Get %s %s runtime:%s IOPS read:%s write:%s sum:%s", - filename, jobname, job["job_runtime"], read, write, iops) + filename, + jobname, + job["job_runtime"], + read, + write, + iops, + ) img_result["jobs"][jobname]["iops"].append(iops) img_result["jobs"][jobname]["lat"].append(lat) img_result["jobs"][jobname]["bw"].append(bw) @@ -295,9 +313,15 @@ def compare_fio_result(results): job["iops_std"] = 0 if sample_num == 1 else st.stdev(iops) job["iops_dispersion"] = round(job["iops_std"] / iops_avg, 6) job["lat_avg"] = int(sum(lat) / sample_num) - logger.debug("%s smooth %s iops:%s AVG:%s lat:%s V:%s%%", img, - key, iops, iops_avg, job["lat_avg"], - job["iops_dispersion"] * 100) + logger.debug( + "%s smooth %s iops:%s AVG:%s lat:%s V:%s%%", + img, + key, + iops, + iops_avg, + job["lat_avg"], + job["iops_dispersion"] * 100, + ) # compare data unexpected_result = {} warning_result = {} @@ -316,13 +340,26 @@ def compare_fio_result(results): obj2_v = obj2_job["iops_dispersion"] if (obj1_v > dispersion) or (obj2_v > dispersion): logger.warning( - "Test result %s is unstable(>%s) %s:%s %s:%s", key, - dispersion, obj1_name, obj1_v, obj2_name, obj2_v) + "Test result %s is unstable(>%s) %s:%s %s:%s", + key, + dispersion, + obj1_name, + obj1_v, + obj2_name, + obj2_v, + ) gap = round(((obj2_avg - obj1_avg) / obj1_avg * 100), 1) ratio = round((obj1_avg / obj2_avg * 100), 1) logger.debug( "%s-%s: %-20s: %-10s %-10s (ratio: %-5s%%) (gap: %-5s%%)", - obj1_name, obj2_name, key, obj1_avg, obj2_avg, ratio, gap) + obj1_name, + obj2_name, + key, + obj1_avg, + obj2_avg, + ratio, + gap, + ) if obj1_avg > obj2_avg: r = (obj1_name, obj2_name, obj1_avg, obj2_avg) @@ -341,7 +378,7 @@ def compare_fio_result(results): # final result if unexpected_result: - test.fail("Get Unexpected: %s" % unexpected_result) + test.fail(f"Get Unexpected: {unexpected_result}") if warning_result: logger.warning("Get Warning :%s", warning_result) @@ -362,7 +399,7 @@ def get_disk_iops(disk): raise err def choose_fastest_disk(disks): - logger.debug("Choose disk in: %s" % disks) + logger.debug("Choose disk in: %s", disks) if len(disks) < 2: return disks[0] @@ -381,26 +418,25 @@ def check_host_iops(disk, iops_req): iops = get_disk_iops(disk) logger.debug("Checking performance %s : %s", iops, iops_req) if iops < iops_req: - test.cancel("IO Performance is too low %s < %s" % (iops, iops_req)) + test.cancel(f"IO Performance is too low {iops} < {iops_req}") def process_selected_disk(disk): - """ format and mount disk - """ - out = process.getoutput("lsblk -s -p %s -O -J" % disk) + """format and mount disk""" + out = process.getoutput(f"lsblk -s -p {disk} -O -J") out = json.loads(out) device = out["blockdevices"][0] if device.get("fstype"): logger.debug("%s fstype:%s", disk, device.get("fstype")) else: - execute_operation("host", "mkfs.xfs -f %s " % disk) + execute_operation("host", f"mkfs.xfs -f {disk} ") - execute_operation("host", "mount %s %s && mount" % (disk, fio_dir)) - umount_cmd = "umount -fl %s;" % fio_dir + execute_operation("host", f"mount {disk} {fio_dir} && mount") + umount_cmd = f"umount -fl {fio_dir};" params["post_command"] = umount_cmd + params.get("post_command", "") def auto_select_disk(): - """select empty disk """ + """select empty disk""" if select_disk_request != "yes": return disks = [] @@ -409,8 +445,7 @@ def auto_select_disk(): if select_disk_name: logger.debug("Checking specified disk:%s", select_disk_name) - status_out = process.getstatusoutput( - "lsblk -p -O -J %s" % select_disk_name) + status_out = process.getstatusoutput(f"lsblk -p -O -J {select_disk_name}") if status_out[0] == 0: out = json.loads(status_out[1]) disk = out["blockdevices"][0] if out["blockdevices"] else None @@ -420,10 +455,12 @@ def auto_select_disk(): process_disk = False test.cancel("Please use mpath instead of raw device") if disk.get("mountpoint") or ( - disk.get("children") and disk["type"] != "mpath"): + disk.get("children") and disk["type"] != "mpath" + ): process_disk = False - logger.debug("Skip %s due to mounted or not empty", - select_disk_name) + logger.debug( + "Skip %s due to mounted or not empty", select_disk_name + ) if process_disk: return process_selected_disk(select_disk_name) else: @@ -433,11 +470,11 @@ def auto_select_disk(): out = json.loads(process.getoutput("lsblk -p -b -O -J ")) for disk in out["blockdevices"]: name = disk["name"] - logger.debug("Checking %s: type:%s fstype:%s", name, disk["type"], - disk["fstype"]) + logger.debug( + "Checking %s: type:%s fstype:%s", name, disk["type"], disk["fstype"] + ) if disk["type"] != "disk" and disk["type"] != "mpath": - logger.debug("Skip %s the type:%s is not support", name, - disk["type"]) + logger.debug("Skip %s the type:%s is not support", name, disk["type"]) continue if disk.get("mountpoint"): logger.debug("Skip %s due to mounted or not empty", name) @@ -487,12 +524,15 @@ def check_default_mq(): check_default_mq_cmd %= dev output = session.cmd_output(check_default_mq_cmd) logger.debug(output) - output = output.split('\n')[0] + output = output.split("\n")[0] default_mq_nums = len(re.split(r"[ ]+", output)) if default_mq_nums != int(params["vcpu_maxcpus"]): - test.fail("Default num-queue value(%s) not equal vcpu nums(%s)" - % (default_mq_nums, int(params["vcpu_maxcpus"]))) + test.fail( + "Default num-queue value({}) not equal vcpu nums({})".format( + default_mq_nums, int(params["vcpu_maxcpus"]) + ) + ) def execute_operation(where, cmd): # function @@ -531,14 +571,13 @@ def execute_operation(where, cmd): boot_wait_time = params.get_numeric("boot_wait_time", 60) select_disk_request = params.get("select_disk_request") select_disk_name = params.get("select_disk_name", "") - select_disk_minimum_size = params.get_numeric("select_disk_minimum_size", - 20) + select_disk_minimum_size = params.get_numeric("select_disk_minimum_size", 20) vm = None locals_var = locals() if host_init_operation: - logger.debug("Execute host init : %s" % host_init_operation) + logger.debug("Execute host init : %s", host_init_operation) execute_operation("host", host_init_operation) auto_select_disk() @@ -546,10 +585,11 @@ def execute_operation(where, cmd): if check_host_iops_req > 0: check_host_iops(host_test_file, check_host_iops_req) - if params["not_preprocess"] != 'no': - logger.debug("Ready boot VM : %s", params['images']) - env_process.process(test, params, env, env_process.preprocess_image, - env_process.preprocess_vm) + if params["not_preprocess"] != "no": + logger.debug("Ready boot VM : %s", params["images"]) + env_process.process( + test, params, env, env_process.preprocess_image, env_process.preprocess_vm + ) vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -557,24 +597,24 @@ def execute_operation(where, cmd): # Wait system init time.sleep(boot_wait_time) - fio = generate_instance(params, vm, 'fio') + fio = generate_instance(params, vm, "fio") if guest_init_operation: - logger.debug("Execute guest init: %s" % guest_init_operation) + logger.debug("Execute guest init: %s", guest_init_operation) execute_operation("guest", guest_init_operation) if guest_operation: - logger.debug("Execute guest: %s" % guest_operation) + logger.debug("Execute guest: %s", guest_operation) execute_operation("guest", guest_operation) preprocess_fio_data(test_results) run_fio_test(test_results) if guest_deinit_operation: - logger.debug("Execute guest deinit : %s" % guest_deinit_operation) + logger.debug("Execute guest deinit : %s", guest_deinit_operation) execute_operation("guest", guest_deinit_operation) if host_deinit_operation: - logger.debug("Execute host deinit: %s" % host_deinit_operation) + logger.debug("Execute host deinit: %s", host_deinit_operation) execute_operation("host", host_deinit_operation) compare_fio_result(test_results) diff --git a/qemu/tests/block_repeat_blockdev_add.py b/qemu/tests/block_repeat_blockdev_add.py index cc2c6f3e70..6488945a87 100644 --- a/qemu/tests/block_repeat_blockdev_add.py +++ b/qemu/tests/block_repeat_blockdev_add.py @@ -1,9 +1,9 @@ """Repeatedly blockdev_add/del iothread enabled node""" + import os -from avocado.utils import process -from virttest import error_context -from virttest import data_dir +from avocado.utils import process +from virttest import data_dir, error_context # This decorator makes the test function aware of context strings diff --git a/qemu/tests/block_resize.py b/qemu/tests/block_resize.py index f113ac90ab..479793f080 100644 --- a/qemu/tests/block_resize.py +++ b/qemu/tests/block_resize.py @@ -1,20 +1,21 @@ import json import re -from avocado.utils import wait, process - -from virttest import error_context -from virttest import utils_numeric -from virttest import utils_test -from virttest import utils_disk -from virttest import utils_misc -from virttest import storage -from virttest import data_dir -from virttest.utils_windows import drive +from avocado.utils import process, wait +from virttest import ( + data_dir, + error_context, + storage, + utils_disk, + utils_misc, + utils_numeric, + utils_test, +) +from virttest.qemu_capabilities import Flags from virttest.qemu_storage import QemuImg +from virttest.utils_windows import drive from provider.storage_benchmark import generate_instance -from virttest.qemu_capabilities import Flags @error_context.context_aware @@ -48,11 +49,15 @@ def verify_disk_size(session, os_type, disk): global current_size current_size = utils_disk.get_disk_size(session, os_type, disk) accept_ratio = float(params.get("accept_ratio", 0)) - if (current_size <= block_size and - current_size >= block_size * (1 - accept_ratio)): - test.log.info("Block Resizing Finished !!! \n" - "Current size %s is same as the expected %s", - current_size, block_size) + if current_size <= block_size and current_size >= block_size * ( + 1 - accept_ratio + ): + test.log.info( + "Block Resizing Finished !!! \n" + "Current size %s is same as the expected %s", + current_size, + block_size, + ) return True def create_md5_file(filename): @@ -60,7 +65,7 @@ def create_md5_file(filename): Create the file to verify md5 value. """ test.log.debug("create md5 file %s", filename) - if os_type == 'windows': + if os_type == "windows": vm.copy_files_to(params["tmp_md5_file"], filename) else: session.cmd(params["dd_cmd"] % filename) @@ -69,7 +74,7 @@ def get_md5_of_file(filename): """ Get the md5 value of filename. """ - ex_args = (mpoint, filename) if os_type == 'windows' else filename + ex_args = (mpoint, filename) if os_type == "windows" else filename return session.cmd(md5_cmd % ex_args).split()[0] def check_shrink_completion(vol_id, size): @@ -78,11 +83,12 @@ def check_shrink_completion(vol_id, size): :param vol_id: Drive letter. :param size: shrink size. """ + def _check_event_cmd(cmd): status, output = session.cmd_status_output(cmd) - test.log.debug("Get event: %s" % output) - test.log.debug("Expect contain msg: %s" % msg) # pylint: disable=E0606 - return status == 0 and regexp.search(output) # pylint: disable=E0606 + test.log.debug("Get event: %s", output) + test.log.debug("Expect contain msg: %s", msg) # pylint: disable=E0606 + return status == 0 and regexp.search(output) # pylint: disable=E0606 drive.shrink_volume(session, mpoint, size) check_event = params.get("check_258_event") @@ -90,11 +96,14 @@ def _check_event_cmd(cmd): msg = params.get("event_msg") % vol_id regexp = re.compile(msg) defrag_event_received = utils_misc.wait_for( - lambda: _check_event_cmd(check_event), 180) + lambda: _check_event_cmd(check_event), 180 + ) if not defrag_event_received: - test.fail("Did not receive the defrag finished event, " - "disk shrink failed in guest.") + test.fail( + "Did not receive the defrag finished event, " + "disk shrink failed in guest." + ) vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -110,23 +119,25 @@ def _check_event_cmd(cmd): md5_file = params.get("md5_file", "md5.dat") data_image = params.get("images").split()[-1] data_image_params = params.object_params(data_image) - data_image_filename = storage.get_image_filename(data_image_params, - data_dir.get_data_dir()) - data_image_dev = vm.get_block({'file': data_image_filename}) + data_image_filename = storage.get_image_filename( + data_image_params, data_dir.get_data_dir() + ) + data_image_dev = vm.get_block({"file": data_image_filename}) img = QemuImg(data_image_params, data_dir.get_data_dir(), data_image) - block_virtual_size = json.loads(img.info(force_share=True, - output="json"))["virtual-size"] + block_virtual_size = json.loads(img.info(force_share=True, output="json"))[ + "virtual-size" + ] session = vm.wait_for_login(timeout=timeout) try: - if os_type == 'windows' and driver_name: - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, driver_name, - timeout) + if os_type == "windows" and driver_name: + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name, timeout + ) if params.get("format_disk") == "yes": - if os_type == 'linux': + if os_type == "linux": disk_dict = utils_disk.get_linux_disks(session) disk = sorted(disk_dict.keys())[0] disk_serial = disk_dict[disk][3] @@ -134,10 +145,10 @@ def _check_event_cmd(cmd): disk = utils_disk.get_windows_disks_index(session, img_size)[0] utils_disk.update_windows_disk_attributes(session, disk) error_context.context("Formatting disk", test.log.info) - mpoint = utils_disk.configure_empty_disk(session, disk, img_size, - os_type, fstype=fstype, - labeltype=labeltype)[0] - partition = mpoint.replace('mnt', 'dev') if 'mnt' in mpoint else None + mpoint = utils_disk.configure_empty_disk( + session, disk, img_size, os_type, fstype=fstype, labeltype=labeltype + )[0] + partition = mpoint.replace("mnt", "dev") if "mnt" in mpoint else None for ratio in params.objects("disk_change_ratio"): block_size = int(int(block_virtual_size) * float(ratio)) @@ -146,8 +157,8 @@ def _check_event_cmd(cmd): block_size = int(block_size / 512) * 512 # Record md5 - if params.get('md5_test') == 'yes': - junction = ":\\" if os_type == 'windows' else "/" + if params.get("md5_test") == "yes": + junction = ":\\" if os_type == "windows" else "/" md5_filename = mpoint + junction + md5_file create_md5_file(md5_filename) md5 = get_md5_of_file(md5_filename) @@ -155,21 +166,29 @@ def _check_event_cmd(cmd): # We need shrink the disk in guest first, then in monitor if float(ratio) < 1.0: - error_context.context("Shrink disk size to %s in guest" - % block_size, test.log.info) - if os_type == 'windows': - shr_size = utils_numeric.normalize_data_size(str( - utils_disk.get_disk_size(session, os_type, disk) - - block_size), 'M').split(".")[0] + error_context.context( + f"Shrink disk size to {block_size} in guest", test.log.info + ) + if os_type == "windows": + shr_size = utils_numeric.normalize_data_size( + str( + utils_disk.get_disk_size(session, os_type, disk) + - block_size + ), + "M", + ).split(".")[0] check_shrink_completion(mpoint, shr_size) else: - utils_disk.resize_filesystem_linux(session, partition, - str(block_size)) - utils_disk.resize_partition_linux(session, partition, - str(block_size)) + utils_disk.resize_filesystem_linux( + session, partition, str(block_size) + ) + utils_disk.resize_partition_linux( + session, partition, str(block_size) + ) - error_context.context("Change disk size to %s in monitor" - % block_size, test.log.info) + error_context.context( + f"Change disk size to {block_size} in monitor", test.log.info + ) if vm.check_capability(Flags.BLOCKDEV): args = (None, block_size, data_image_dev) else: @@ -190,31 +209,41 @@ def _check_event_cmd(cmd): # We need extend disk in monitor first then extend it in guest if float(ratio) > 1.0: - error_context.context("Extend disk to %s in guest" - % block_size, test.log.info) - if os_type == 'windows': + error_context.context( + f"Extend disk to {block_size} in guest", test.log.info + ) + if os_type == "windows": max_block_size = int(params["max_block_size"]) if int(block_size) >= max_block_size: test.cancel( - "Cancel the test for more than maximum %dB disk." % - max_block_size) + "Cancel the test for more than maximum %dB disk." + % max_block_size + ) drive.extend_volume(session, mpoint) else: - utils_disk.resize_partition_linux(session, partition, - str(block_size)) - utils_disk.resize_filesystem_linux(session, partition, - utils_disk.SIZE_AVAILABLE) + utils_disk.resize_partition_linux( + session, partition, str(block_size) + ) + utils_disk.resize_filesystem_linux( + session, partition, utils_disk.SIZE_AVAILABLE + ) global current_size current_size = 0 steps = params.get_numeric("verify_disk_size_steps", 1) - if not wait.wait_for(lambda: verify_disk_size(session, os_type, - disk), 20, 0, steps, - "Block Resizing"): - test.fail("Block size get from guest is not same as expected.\n" - "Reported: %s\nExpect: %s\n" % (current_size, block_size)) + if not wait.wait_for( + lambda: verify_disk_size(session, os_type, disk), + 20, + 0, + steps, + "Block Resizing", + ): + test.fail( + "Block size get from guest is not same as expected.\n" + f"Reported: {current_size}\nExpect: {block_size}\n" + ) session = vm.reboot(session=session) - if os_type == 'linux': + if os_type == "linux": # After guest reboot, reget the disk letter, if it changed, replace # variables, i.e 'mpoint', 'partition', 'disk' and 'md5_filename' new_disk = utils_misc.get_linux_drive_path(session, disk_serial) @@ -223,31 +252,32 @@ def _check_event_cmd(cmd): mpoint = mpoint.replace(disk, new_disk) partition = partition.replace(disk, new_disk) disk = new_disk - if params.get('md5_test') == 'yes': + if params.get("md5_test") == "yes": md5_filename = mpoint + junction + md5_file - session.cmd("mkdir -p %s" % mpoint) + session.cmd(f"mkdir -p {mpoint}") - if not utils_disk.is_mount(partition, dst=mpoint, - fstype=fstype, session=session): - res = utils_disk.mount(partition, mpoint, - fstype=fstype, session=session) + if not utils_disk.is_mount( + partition, dst=mpoint, fstype=fstype, session=session + ): + res = utils_disk.mount( + partition, mpoint, fstype=fstype, session=session + ) if not res: test.fail("Mounting data disk was failed! ") - if params.get('iozone_test') == 'yes': - iozone_timeout = params.get_numeric("iozone_timeout", 1800, - float) + if params.get("iozone_test") == "yes": + iozone_timeout = params.get_numeric("iozone_timeout", 1800, float) iozone_cmd_options = params.get("iozone_option") % mpoint - io_test = generate_instance(params, vm, 'iozone') + io_test = generate_instance(params, vm, "iozone") try: io_test.run(iozone_cmd_options, iozone_timeout) finally: io_test.clean() # Verify md5 - if params.get('md5_test') == 'yes': + if params.get("md5_test") == "yes": new_md5 = get_md5_of_file(md5_filename) - test.assertTrue(new_md5 == md5, "Unmatched md5: %s" % new_md5) + test.assertTrue(new_md5 == md5, f"Unmatched md5: {new_md5}") session.close() except Exception as e: @@ -255,10 +285,10 @@ def _check_event_cmd(cmd): test.log.debug("Find %s Exception:'%s'.", pid, str(e)) if pid: logdir = test.logdir - process.getoutput("gstack %s > %s/gstack.log" % (pid, logdir)) + process.getoutput(f"gstack {pid} > {logdir}/gstack.log") process.getoutput( - "timeout 20 strace -tt -T -v -f -s 32 -p %s -o %s/strace.log" % ( - pid, logdir)) + f"timeout 20 strace -tt -T -v -f -s 32 -p {pid} -o {logdir}/strace.log" + ) else: test.log.debug("VM dead...") raise e diff --git a/qemu/tests/block_resize_unplug.py b/qemu/tests/block_resize_unplug.py index f128bd2c29..8ef68d7bfb 100644 --- a/qemu/tests/block_resize_unplug.py +++ b/qemu/tests/block_resize_unplug.py @@ -1,18 +1,19 @@ import json import re -from virttest import data_dir -from virttest import error_context -from virttest import qemu_storage -from virttest import storage -from virttest import utils_misc -from virttest import utils_test +from virttest import ( + data_dir, + error_context, + qemu_storage, + storage, + utils_misc, + utils_test, +) from virttest.qemu_capabilities import Flags from virttest.utils_numeric import normalize_data_size -from provider.block_devices_plug import BlockDevicesPlug from provider import win_driver_utils - +from provider.block_devices_plug import BlockDevicesPlug ENLARGE, SHRINK = ("enlarge", "shrink") @@ -36,35 +37,41 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def _change_vm_power(): - """ Change the vm power. """ - method, command = params['command_opts'].split(',') - test.log.info('Sending command(%s): %s', method, command) - if method == 'shell': + """Change the vm power.""" + method, command = params["command_opts"].split(",") + test.log.info("Sending command(%s): %s", method, command) + if method == "shell": power_session = vm.wait_for_login() power_session.sendline(command) else: getattr(vm.monitor, command)() if shutdown_vm: - if not utils_misc.wait_for( - lambda: vm.monitor.get_event("SHUTDOWN"), 600): + if not utils_misc.wait_for(lambda: vm.monitor.get_event("SHUTDOWN"), 600): raise test.fail("Not received SHUTDOWN QMP event.") def _check_vm_status(timeout=600): - """ Check the status of vm. """ - action = 'shutdown' if shutdown_vm else 'login' - if not getattr(vm, 'wait_for_%s' % action)(timeout=timeout): - test.fail('Failed to %s vm.' % action) + """Check the status of vm.""" + action = "shutdown" if shutdown_vm else "login" + if not getattr(vm, f"wait_for_{action}")(timeout=timeout): + test.fail(f"Failed to {action} vm.") def _block_resize(dev): - """ Resize the block size. """ - resize_size = int(float(normalize_data_size(re.search( - r'(\d+\.?(\d+)?\w)', params['resize_size']).group(1), "B"))) - size = str( - data_image_size + resize_size) if resize_op == ENLARGE else str( - data_image_size - resize_size) - test.log.info("Start to %s image '%s' to %sB.", - resize_op, data_image, size) + """Resize the block size.""" + resize_size = int( + float( + normalize_data_size( + re.search(r"(\d+\.?(\d+)?\w)", params["resize_size"]).group(1), "B" + ) + ) + ) + size = ( + str(data_image_size + resize_size) + if resize_op == ENLARGE + else str(data_image_size - resize_size) + ) + test.log.info("Start to %s image '%s' to %sB.", resize_op, data_image, size) if vm.check_capability(Flags.BLOCKDEV): args = (None, size, dev) else: @@ -73,23 +80,25 @@ def _block_resize(dev): return size def _check_img_size(size): - """ Check the size of image after resize. """ + """Check the size of image after resize.""" img = qemu_storage.QemuImg( - data_image_params, data_dir.get_data_dir(), data_image) - if json.loads(img.info(True, 'json'))['virtual-size'] != int(size): - test.fail('The virtual size is not equal to %sB after %s.' % - (size, resize_op)) + data_image_params, data_dir.get_data_dir(), data_image + ) + if json.loads(img.info(True, "json"))["virtual-size"] != int(size): + test.fail(f"The virtual size is not equal to {size}B after {resize_op}.") - shutdown_vm = params.get('shutdown_vm', 'no') == 'yes' - reboot = params.get('reboot_vm', 'no') == 'yes' + shutdown_vm = params.get("shutdown_vm", "no") == "yes" + reboot = params.get("reboot_vm", "no") == "yes" data_image = params.get("images").split()[-1] data_image_params = params.object_params(data_image) - data_image_size = int(float(normalize_data_size( - data_image_params.get("image_size"), "B"))) + data_image_size = int( + float(normalize_data_size(data_image_params.get("image_size"), "B")) + ) data_image_filename = storage.get_image_filename( - data_image_params, data_dir.get_data_dir()) - resize_op = SHRINK if '-' in params['resize_size'] else ENLARGE - is_windows = params['os_type'] == 'windows' + data_image_params, data_dir.get_data_dir() + ) + resize_op = SHRINK if "-" in params["resize_size"] else ENLARGE + is_windows = params["os_type"] == "windows" vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -98,8 +107,9 @@ def _check_img_size(size): if is_windows: utils_test.qemu.windrv_check_running_verifier( - session, vm, test, params['driver_name'], 300) - _check_img_size(_block_resize(vm.get_block({'file': data_image_filename}))) + session, vm, test, params["driver_name"], 300 + ) + _check_img_size(_block_resize(vm.get_block({"file": data_image_filename}))) if reboot: _change_vm_power() diff --git a/qemu/tests/block_scsi_device.py b/qemu/tests/block_scsi_device.py index 7804d87365..e61d0a1afc 100644 --- a/qemu/tests/block_scsi_device.py +++ b/qemu/tests/block_scsi_device.py @@ -21,32 +21,34 @@ def run(test, params, env): """ def scan_scsi_device(scsi_addr): - """ Scan the scsi device. """ - error_context.context( - "Scan the scsi driver @%s." % scsi_addr, test.log.info) - session.cmd('echo "- - -" > /sys/class/scsi_host/host%s/scan' % - scsi_addr.split(':')[0]) + """Scan the scsi device.""" + error_context.context(f"Scan the scsi driver @{scsi_addr}.", test.log.info) + session.cmd( + 'echo "- - -" > /sys/class/scsi_host/host{}/scan'.format( + scsi_addr.split(":")[0] + ) + ) def delete_scsi_device(scsi_addr): - """ Delete the scsi drive. """ - error_context.context( - "Delete the scsi driver @%s." % scsi_addr, test.log.info) - session.cmd('echo 1 > /sys/class/scsi_device/%s/device/delete' % scsi_addr) + """Delete the scsi drive.""" + error_context.context(f"Delete the scsi driver @{scsi_addr}.", test.log.info) + session.cmd(f"echo 1 > /sys/class/scsi_device/{scsi_addr}/device/delete") def get_scsi_addr_by_product(product_name): - """ Get the scsi address by virtio_scsi product option. """ + """Get the scsi address by virtio_scsi product option.""" test.log.info("Get the scsi address by qemu product option.") - addr_info = session.cmd("lsscsi | grep %s | awk '{print $1}'" % product_name) - addr = re.search(r'((\d+\:){3}\d+)', addr_info).group(1) - test.log.info( - "The scsi address of the product %s is %s.", product_name, addr) + addr_info = session.cmd(f"lsscsi | grep {product_name} | awk '{{print $1}}'") + addr = re.search(r"((\d+\:){3}\d+)", addr_info).group(1) + test.log.info("The scsi address of the product %s is %s.", product_name, addr) return addr def check_scsi_disk_by_address(scsi_addr): - """ Check whether the scsi disk is inside guest. """ - error_context.context("Check whether the scsi disk(@%s) is inside guest." - % scsi_addr, test.log.info) - scsi_info = session.cmd('lsscsi') + """Check whether the scsi disk is inside guest.""" + error_context.context( + f"Check whether the scsi disk(@{scsi_addr}) is inside guest.", + test.log.info, + ) + scsi_info = session.cmd("lsscsi") test.log.info(scsi_info) return scsi_addr in scsi_info @@ -54,15 +56,18 @@ def check_scsi_disk_by_address(scsi_addr): vm.verify_alive() session = vm.wait_for_login(timeout=360) - product_name = params['product_name'] + product_name = params["product_name"] scsi_addr = get_scsi_addr_by_product(product_name) delete_scsi_device(scsi_addr) if check_scsi_disk_by_address(scsi_addr): - test.fail("The scsi disk(@%s) appears in guest " - "after disable scsi drive." % scsi_addr) + test.fail( + f"The scsi disk(@{scsi_addr}) appears in guest " "after disable scsi drive." + ) scan_scsi_device(scsi_addr) if not check_scsi_disk_by_address(scsi_addr): - test.fail("The scsi disk(@%s) does not appear in guest " - "after enable scsi drive." % scsi_addr) + test.fail( + f"The scsi disk(@{scsi_addr}) does not appear in guest " + "after enable scsi drive." + ) diff --git a/qemu/tests/block_scsi_generic_inquiry.py b/qemu/tests/block_scsi_generic_inquiry.py index bb3b6c7ce7..198325faf1 100644 --- a/qemu/tests/block_scsi_generic_inquiry.py +++ b/qemu/tests/block_scsi_generic_inquiry.py @@ -1,9 +1,5 @@ from avocado.utils import process - -from virttest import data_dir -from virttest import env_process -from virttest import utils_misc - +from virttest import data_dir, env_process, utils_misc from virttest.iscsi import Iscsi from virttest.utils_disk import get_linux_disks @@ -29,8 +25,9 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def fetch_sg_info(device, session=None): - cmd = params['cmd_sg_inq'] % device + cmd = params["cmd_sg_inq"] % device if session: return session.cmd_output(cmd) return process.getoutput(cmd, 60, ignore_status=False) @@ -39,18 +36,17 @@ def fetch_sg_info(device, session=None): try: iscsi.login() if not utils_misc.wait_for(lambda: iscsi.get_device_name(), 60): - test.error('Can not get the iSCSI device.') + test.error("Can not get the iSCSI device.") - cmd_get_disk_path = params['cmd_get_disk_path'] - disk_path = process.system_output(cmd_get_disk_path, 60, - shell=True).decode() + cmd_get_disk_path = params["cmd_get_disk_path"] + disk_path = process.system_output(cmd_get_disk_path, 60, shell=True).decode() host_sg_info = fetch_sg_info(disk_path) - test.log.info('The scsi generic info from host: %s', host_sg_info) + test.log.info("The scsi generic info from host: %s", host_sg_info) - image_data_tag = params['image_data_tag'] - params['image_name_%s' % image_data_tag] = disk_path - params['image_size'] = params['emulated_image_size'] + image_data_tag = params["image_data_tag"] + params[f"image_name_{image_data_tag}"] = disk_path + params["image_size"] = params["emulated_image_size"] image_params = params.object_params(image_data_tag) env_process.preprocess_image(test, image_params, image_data_tag) @@ -60,22 +56,23 @@ def fetch_sg_info(device, session=None): vm.verify_alive() session = vm.wait_for_login() - data_disk = '/dev/' + list(get_linux_disks(session).keys()).pop() + data_disk = "/dev/" + list(get_linux_disks(session).keys()).pop() guest_sg_info = fetch_sg_info(data_disk, session) - test.log.info('The scsi generic info from guest: %s', guest_sg_info) + test.log.info("The scsi generic info from guest: %s", guest_sg_info) for info in guest_sg_info.split(): if info not in host_sg_info: - test.fail('The guest scsi generic info is not similar to host.') + test.fail("The guest scsi generic info is not similar to host.") iscsi.logout() - if params['sg_fail_info'] not in fetch_sg_info(data_disk, session): - test.fail('No found the fail information after logout iscsi server.') + if params["sg_fail_info"] not in fetch_sg_info(data_disk, session): + test.fail("No found the fail information after logout iscsi server.") - session.cmd_output(params['cmd_dd'] % data_disk) - vm_status_paused = params['vm_status_paused'] + session.cmd_output(params["cmd_dd"] % data_disk) + vm_status_paused = params["vm_status_paused"] if not utils_misc.wait_for( - lambda: vm.monitor.verify_status(vm_status_paused), 120, step=3): - test.fail('The vm status is not %s.' % vm_status_paused) + lambda: vm.monitor.verify_status(vm_status_paused), 120, step=3 + ): + test.fail(f"The vm status is not {vm_status_paused}.") finally: iscsi.delete_target() diff --git a/qemu/tests/block_stream.py b/qemu/tests/block_stream.py index cc9ee30cdc..c5b37c5087 100644 --- a/qemu/tests/block_stream.py +++ b/qemu/tests/block_stream.py @@ -1,29 +1,28 @@ -import re import logging +import re from avocado.utils import process - -from virttest import error_context -from virttest import env_process, utils_misc +from virttest import env_process, error_context, utils_misc from qemu.tests import blk_stream -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class BlockStreamTest(blk_stream.BlockStream): - def get_image_size(self, image_file): try: qemu_img = utils_misc.get_qemu_img_binary(self.params) - cmd = "%s info %s" % (qemu_img, image_file) + cmd = f"{qemu_img} info {image_file}" LOG_JOB.info("Try to get image size via qemu-img info") info = process.system_output(cmd) size = int(re.findall(r"(\d+) bytes", info)[0]) except process.CmdError: - LOG_JOB.info("qemu-img info failed(it happens because later qemu" - " distributions prevent it access a running image.)." - " Now get image size via qmp interface 'query-block'") + LOG_JOB.info( + "qemu-img info failed(it happens because later qemu" + " distributions prevent it access a running image.)." + " Now get image size via qmp interface 'query-block'" + ) blocks_info = self.vm.monitor.info("block") for block in blocks_info: info = block["inserted"] @@ -53,23 +52,22 @@ def run(test, params, env): stream_test.create_snapshots() backingfile = stream_test.get_backingfile() if not backingfile: - test.fail("Backing file is not available in the " - "backdrive image") + test.fail("Backing file is not available in the " "backdrive image") test.log.info("Image file: %s", stream_test.get_image_file()) test.log.info("Backing file: %s", backingfile) stream_test.start() stream_test.wait_for_finished() backingfile = stream_test.get_backingfile() if backingfile: - test.fail("Backing file is still available in the " - "backdrive image") + test.fail("Backing file is still available in the " "backdrive image") target_file = stream_test.get_image_file() target_size = stream_test.get_image_size(target_file) error_context.context("Compare image size", test.log.info) if image_size < target_size: - test.fail("Compare %s image, size of %s increased" - "(%s -> %s)" % (image_file, target_file, - image_size, target_size)) + test.fail( + f"Compare {image_file} image, size of {target_file} increased" + f"({image_size} -> {target_size})" + ) stream_test.verify_alive() stream_test.vm.destroy() vm_name = params["main_vm"] diff --git a/qemu/tests/block_stream_check_backingfile.py b/qemu/tests/block_stream_check_backingfile.py index 94c884c4e3..6b1f8d920a 100644 --- a/qemu/tests/block_stream_check_backingfile.py +++ b/qemu/tests/block_stream_check_backingfile.py @@ -1,18 +1,16 @@ -import os import logging +import os from virttest import utils_misc from qemu.tests import blk_stream -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class BlockStreamCheckBackingfile(blk_stream.BlockStream): - def __init__(self, test, params, env, tag): - super(BlockStreamCheckBackingfile, self).__init__(test, - params, env, tag) + super().__init__(test, params, env, tag) def check_backingfile(self): """ @@ -23,18 +21,20 @@ def check_backingfile(self): backingfile = self.get_backingfile("qemu-img") if backingfile: img_file = self.get_image_file() - LOG_JOB.debug("Got backing-file: %s" % backingfile + - "by 'qemu-img info %s'" % img_file) + LOG_JOB.debug( + "Got backing-file: %s by 'qemu-img info %s'", backingfile, img_file + ) fail |= bool(backingfile) backingfile = self.get_backingfile("monitor") if backingfile: - LOG_JOB.debug("Got backing-file: %s" % backingfile + - "by 'info/query block' " + - "in %s monitor" % self.vm.monitor.protocol) + LOG_JOB.debug( + "Got backing-file: %s by 'info/query block' in %s monitor", + backingfile, + self.vm.monitor.protocol, + ) fail |= bool(backingfile) if fail: - msg = ("Unexpected backing file found, there should be " - "no backing file") + msg = "Unexpected backing file found, there should be " "no backing file" self.test.fail(msg) def check_backingfile_exist(self): @@ -43,7 +43,7 @@ def check_backingfile_exist(self): backingfile = self.get_backingfile() if backingfile != self.base_image: msg = "The backing file from monitor does not meet expectation. " - msg += "It should be %s, now is %s." % (self.base_image, backingfile) + msg += f"It should be {self.base_image}, now is {backingfile}." self.test.fail(msg) def check_imagefile(self): @@ -56,8 +56,8 @@ def check_imagefile(self): LOG_JOB.info("Check image file is '%s'", exp_img_file) img_file = self.get_image_file() if exp_img_file != img_file: - msg = "Excepted image file: %s," % exp_img_file - msg += "Actual image file: %s" % img_file + msg = f"Excepted image file: {exp_img_file}," + msg += f"Actual image file: {img_file}" self.test.fail(msg) def set_backingfile(self): diff --git a/qemu/tests/block_stream_drop_backingfile.py b/qemu/tests/block_stream_drop_backingfile.py index acdcb163bc..0755116d43 100644 --- a/qemu/tests/block_stream_drop_backingfile.py +++ b/qemu/tests/block_stream_drop_backingfile.py @@ -2,11 +2,7 @@ import re from avocado.utils import process - -from virttest import error_context -from virttest import storage -from virttest import utils_misc -from virttest import data_dir +from virttest import data_dir, error_context, storage, utils_misc @error_context.context_aware @@ -41,11 +37,17 @@ def wait_job_done(timeout=3600): """ Wait for job on the device done, raise TestFail exception if timeout; """ - if utils_misc.wait_for(lambda: - not vm.monitor.query_block_job(device_id), - timeout, first=0.2, step=2.0, - text="Wait for canceling block job") is None: - test.fail("Wait job finish timeout in %ss" % timeout) + if ( + utils_misc.wait_for( + lambda: not vm.monitor.query_block_job(device_id), + timeout, + first=0.2, + step=2.0, + text="Wait for canceling block job", + ) + is None + ): + test.fail(f"Wait job finish timeout in {timeout}s") def verify_backingfile(expect_backingfile): """ @@ -54,7 +56,7 @@ def verify_backingfile(expect_backingfile): """ backing_file = vm.monitor.get_backingfile(device_id) if backing_file != expect_backingfile: - test.fail("Unexpect backingfile(%s)" % backing_file) + test.fail(f"Unexpect backingfile({backing_file})") def get_openingfiles(): """ @@ -62,18 +64,18 @@ def get_openingfiles(): """ pid = vm.get_pid() cmd = params.get("snapshot_check_cmd") % pid - return set(process.system_output(cmd, ignore_status=True, - shell=True).splitlines()) + return set( + process.system_output(cmd, ignore_status=True, shell=True).splitlines() + ) snapshots = list(map(lambda x: os.path.join(image_dir, x), ["sn1", "sn2"])) try: - error_context.context("Create snapshots-chain(base->sn1->sn2)", - test.log.info) + error_context.context("Create snapshots-chain(base->sn1->sn2)", test.log.info) for index, snapshot in enumerate(snapshots): base_file = index and snapshots[index - 1] or image_file device_id = vm.live_snapshot(base_file, snapshot) if not device_id: - test.fail("Fail to create %s" % snapshot) + test.fail(f"Fail to create {snapshot}") error_context.context("Check backing-file of sn2", test.log.info) verify_backingfile(snapshots[0]) @@ -82,32 +84,31 @@ def get_openingfiles(): wait_job_done(wait_timeout) error_context.context("Check backing-file of sn2", test.log.info) verify_backingfile(image_file) - error_context.context("Check sn1 is not opening by qemu process", - test.log.info) + error_context.context("Check sn1 is not opening by qemu process", test.log.info) if snapshots[0] in get_openingfiles(): - test.fail("sn1 (%s) is opening by qemu" % snapshots[0]) + test.fail(f"sn1 ({snapshots[0]}) is opening by qemu") error_context.context("Merge base to sn2", test.log.info) vm.monitor.block_stream(device_id) wait_job_done(wait_timeout) error_context.context("Check backing-file of sn2", test.log.info) verify_backingfile(None) - error_context.context("check sn1 and base are not opening " - "by qemu process", test.log.info) + error_context.context( + "check sn1 and base are not opening " "by qemu process", test.log.info + ) if set([snapshots[0], image_file]).issubset(get_openingfiles()): - test.fail("%s is opening by qemu" - % set([snapshots[0], image_file])) + test.fail(f"{set([snapshots[0], image_file])} is opening by qemu") error_context.context("Reboot VM to check it works fine", test.log.info) session = vm.reboot(session=session, timeout=timeout) session.cmd(alive_check_cmd) vm.destroy() - error_context.context( - "Check backing-file of sn2 by qemu-img", - test.log.info) - cmd = "%s info %s" % (qemu_img, snapshots[1]) - if re.search("backing file", - process.system_output(cmd, ignore_status=True).decode('utf-8')): + error_context.context("Check backing-file of sn2 by qemu-img", test.log.info) + cmd = f"{qemu_img} info {snapshots[1]}" + if re.search( + "backing file", + process.system_output(cmd, ignore_status=True).decode("utf-8"), + ): test.fail("should no backing-file in this step") finally: files = " ".join(snapshots) - process.system(r"\rm -rf %s" % files) + process.system(rf"\rm -rf {files}") diff --git a/qemu/tests/block_stream_installation.py b/qemu/tests/block_stream_installation.py index 4d6d42756f..8fc1b829f4 100644 --- a/qemu/tests/block_stream_installation.py +++ b/qemu/tests/block_stream_installation.py @@ -1,8 +1,7 @@ -import time import random +import time -from virttest import utils_test -from virttest import utils_misc +from virttest import utils_misc, utils_test from qemu.tests import blk_stream @@ -19,8 +18,9 @@ def run(test, params, env): """ args = (test, params, env) - bg = utils_misc.InterruptedThread(utils_test.run_virt_sub_test, args, - {"sub_type": "unattended_install"}) + bg = utils_misc.InterruptedThread( + utils_test.run_virt_sub_test, args, {"sub_type": "unattended_install"} + ) bg.start() utils_misc.wait_for(bg.is_alive, timeout=10) time.sleep(random.uniform(60, 200)) diff --git a/qemu/tests/block_stream_negative.py b/qemu/tests/block_stream_negative.py index c16848f9ff..033d122a17 100644 --- a/qemu/tests/block_stream_negative.py +++ b/qemu/tests/block_stream_negative.py @@ -4,13 +4,12 @@ from qemu.tests import blk_stream -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class BlockStreamNegative(blk_stream.BlockStream): - def __init__(self, test, params, env, tag): - super(BlockStreamNegative, self).__init__(test, params, env, tag) + super().__init__(test, params, env, tag) @error_context.context_aware def set_speed(self): @@ -23,19 +22,16 @@ def set_speed(self): expected_speed = params.get("expected_speed", default_speed) if params.get("need_convert_to_int", "no") == "yes": expected_speed = int(expected_speed) - error_context.context("set speed to %s B/s" % expected_speed, - LOG_JOB.info) - args = {"device": self.device, - "speed": expected_speed} + error_context.context(f"set speed to {expected_speed} B/s", LOG_JOB.info) + args = {"device": self.device, "speed": expected_speed} response = str(self.vm.monitor.cmd_qmp("block-job-set-speed", args)) if "(core dump)" in response: - self.test.fail("Qemu core dump when reset " - "speed to a negative value.") + self.test.fail("Qemu core dump when reset " "speed to a negative value.") if match_str not in response: - self.test.fail("Fail to get expected result. %s is expected in %s" - % (match_str, response)) - LOG_JOB.info("Keyword '%s' is found in QMP output '%s'.", - match_str, response) + self.test.fail( + f"Fail to get expected result. {match_str} is expected in {response}" + ) + LOG_JOB.info("Keyword '%s' is found in QMP output '%s'.", match_str, response) def run(test, params, env): diff --git a/qemu/tests/block_stream_reboot.py b/qemu/tests/block_stream_reboot.py index 2de049692a..570a29d8d3 100644 --- a/qemu/tests/block_stream_reboot.py +++ b/qemu/tests/block_stream_reboot.py @@ -1,11 +1,12 @@ -import time import random +import time + from virttest import error_context + from qemu.tests import blk_stream class BlockStreamReboot(blk_stream.BlockStream): - @error_context.context_aware def reboot(self): """ @@ -13,7 +14,7 @@ def reboot(self): """ params = self.parser_test_args() method = params.get("reboot_method", "system_reset") - super(BlockStreamReboot, self).reboot(method=method) + super().reboot(method=method) time.sleep(random.randint(0, 20)) diff --git a/qemu/tests/block_stream_simple.py b/qemu/tests/block_stream_simple.py index 77b965cec5..c0768d860b 100644 --- a/qemu/tests/block_stream_simple.py +++ b/qemu/tests/block_stream_simple.py @@ -4,13 +4,12 @@ from qemu.tests import blk_stream -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class BlockStreamSimple(blk_stream.BlockStream): - def __init__(self, test, params, env, tag): - super(BlockStreamSimple, self).__init__(test, params, env, tag) + super().__init__(test, params, env, tag) @error_context.context_aware def query_status(self): diff --git a/qemu/tests/block_stream_stress.py b/qemu/tests/block_stream_stress.py index eb12813301..2416a4ec93 100644 --- a/qemu/tests/block_stream_stress.py +++ b/qemu/tests/block_stream_stress.py @@ -1,17 +1,14 @@ -import time import logging +import time -from virttest import utils_misc -from virttest import utils_test -from virttest import error_context +from virttest import error_context, utils_misc, utils_test from qemu.tests import blk_stream -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class BlockStreamStress(blk_stream.BlockStream): - @error_context.context_aware def load_stress(self): """ @@ -34,6 +31,7 @@ def unload_stress(self): """ stop stress app """ + def _unload_stress(): session = self.get_session() cmd = self.params.get("stop_cmd") @@ -42,11 +40,15 @@ def _unload_stress(): return self.app_running() error_context.context("stop stress app in guest", LOG_JOB.info) - stopped = utils_misc.wait_for(_unload_stress, first=2.0, - text="wait stress app quit", - step=1.0, timeout=120) + stopped = utils_misc.wait_for( + _unload_stress, + first=2.0, + text="wait stress app quit", + step=1.0, + timeout=120, + ) if not stopped: - LOG_JOB.warn("stress app is still running") + LOG_JOB.warning("stress app is still running") def app_running(self): """ diff --git a/qemu/tests/block_transfer_parameters_check.py b/qemu/tests/block_transfer_parameters_check.py index af3661ff06..1288fe925c 100644 --- a/qemu/tests/block_transfer_parameters_check.py +++ b/qemu/tests/block_transfer_parameters_check.py @@ -1,17 +1,14 @@ -"""Verify Maximum transfer length and max_sector_kb in guest """ +"""Verify Maximum transfer length and max_sector_kb in guest""" +import copy +import json import time from os.path import basename from avocado.utils import process -from virttest import env_process -from virttest import data_dir -from virttest import utils_misc - +from virttest import data_dir, env_process, utils_misc from virttest.iscsi import Iscsi from virttest.utils_misc import get_linux_drive_path -import json -import copy def run(test, params, env): @@ -30,7 +27,7 @@ def run(test, params, env): """ def _verify_transfer_info(host_info, guest_info): - errmsg = "The guest get unexpected transfer %s" % guest_info + errmsg = f"The guest get unexpected transfer {guest_info}" if guest_info["sectors_kb"] > host_info["sectors_kb"]: test.fail(errmsg) min_host = min(host_info["sectors_kb"], host_info["segments"] * 4) @@ -42,15 +39,15 @@ def _verify_transfer_info(host_info, guest_info): def _get_target_devices(trans_type, size=None): devs = [] - cond = "$2==\"%s\"" % trans_type + cond = f'$2=="{trans_type}"' if size: - cond += " && $3==\"%s\"" % size - cmd = "lsblk -Spo 'NAME,TRAN,SIZE' |awk '{if(%s) print $1}'" % cond + cond += f' && $3=="{size}"' + cmd = f"lsblk -Spo 'NAME,TRAN,SIZE' |awk '{{if({cond}) print $1}}'" logger.debug(cmd) status, output = process.getstatusoutput(cmd) devs_str = output.strip().replace("\n", " ") if devs_str: - cmd = "lsblk -Jpo 'NAME,HCTL,SERIAL,TRAN,FSTYPE,WWN' %s" % devs_str + cmd = f"lsblk -Jpo 'NAME,HCTL,SERIAL,TRAN,FSTYPE,WWN' {devs_str}" status, output = process.getstatusoutput(cmd) devs = copy.deepcopy(json.loads(output)["blockdevices"]) @@ -83,7 +80,7 @@ def _get_transfer_parameters(dev, conn=None): try: tran_type = params["tran_type"] - params['image_size'] = params.get('emulated_image_size', "") + params["image_size"] = params.get("emulated_image_size", "") if tran_type == "iscsi": logger.debug("Create iscsi disk.") base_dir = data_dir.get_data_dir() @@ -91,17 +88,17 @@ def _get_transfer_parameters(dev, conn=None): iscsi.login() dev_name = utils_misc.wait_for(lambda: iscsi.get_device_name(), 60) if not dev_name: - test.error('Can not get the iSCSI device.') + test.error("Can not get the iSCSI device.") logger.debug(dev_name) time.sleep(2) - logger.debug(_run_cmd("lsblk -JO %s" % dev_name)) + logger.debug(_run_cmd(f"lsblk -JO {dev_name}")) - target_devs = _get_target_devices(tran_type, params['image_size']) + target_devs = _get_target_devices(tran_type, params["image_size"]) if not len(target_devs): if tran_type == "fc": - test.cancel("No FC device:%s" % params['image_size']) + test.cancel("No FC device:{}".format(params["image_size"])) else: - test.error("No ISCSI device:%s" % params['image_size']) + test.error("No ISCSI device:{}".format(params["image_size"])) target_dev = target_devs[0] logger.debug(target_dev) @@ -111,7 +108,7 @@ def _get_transfer_parameters(dev, conn=None): set_max_sector_cmd = set_max_sector_cmd % dev_name _run_cmd(set_max_sector_cmd) - vm = env.get_vm(params['main_vm']) + vm = env.get_vm(params["main_vm"]) timeout = float(params.get("timeout", 240)) guest_cmd = params["guest_cmd"] @@ -127,9 +124,10 @@ def _get_transfer_parameters(dev, conn=None): logger.debug("Get host transfer info of %s ", dev_name) host_tran_info = _get_transfer_parameters(dev_name) - params['start_vm'] = 'yes' - env_process.process(test, params, env, env_process.preprocess_image, - env_process.preprocess_vm) + params["start_vm"] = "yes" + env_process.process( + test, params, env, env_process.preprocess_image, env_process.preprocess_vm + ) session = vm.wait_for_login(timeout=timeout) @@ -143,7 +141,7 @@ def _get_transfer_parameters(dev, conn=None): logger.debug("Verify transfer info of %s", target_path) _verify_transfer_info(host_tran_info, guest_tran_info) guest_cmd = guest_cmd % target_path - logger.debug('Start IO: %s', guest_cmd) + logger.debug("Start IO: %s", guest_cmd) session.cmd(guest_cmd, timeout=360) vm.monitor.verify_status("running") diff --git a/qemu/tests/block_vhost_vdpa_test.py b/qemu/tests/block_vhost_vdpa_test.py index 7084a03eb2..8a95f72a7e 100644 --- a/qemu/tests/block_vhost_vdpa_test.py +++ b/qemu/tests/block_vhost_vdpa_test.py @@ -1,13 +1,14 @@ """VDPA blk vhost vdpa test""" + +from aexpect import ShellCmdError from avocado.core import exceptions from avocado.utils import process - -from provider.block_devices_plug import BlockDevicesPlug -from provider.vdpa_sim_utils import VhostVdpaBlkSimulatorTest from virttest import env_process, utils_disk, utils_misc, virt_vm from virttest.utils_misc import get_linux_drive_path from virttest.utils_windows.drive import get_disk_props_by_serial_number -from aexpect import ShellCmdError + +from provider.block_devices_plug import BlockDevicesPlug +from provider.vdpa_sim_utils import VhostVdpaBlkSimulatorTest def run(test, params, env): @@ -41,7 +42,7 @@ def run(test, params, env): def _setup_vdpa_disks(): for img in vdpa_blk_images: dev = vdpa_blk_test.add_dev(img) - logger.debug("Add vhost device %s %s" % (img, dev)) + logger.debug("Add vhost device %s %s", img, dev) def _cleanup_vdpa_disks(): for img in vdpa_blk_images: @@ -51,22 +52,22 @@ def _get_window_disk_index_by_serial(serial): idx_info = get_disk_props_by_serial_number(session, serial, ["Index"]) if idx_info: return idx_info["Index"] - test.fail("Not find expected disk %s" % serial) + test.fail(f"Not find expected disk {serial}") def _check_disk_in_guest(img): os_type = params["os_type"] - logger.debug("Check disk %s in guest" % img) - if os_type == 'windows': - img_size = params.get("image_size_%s" % img) + logger.debug("Check disk %s in guest", img) + if os_type == "windows": + img_size = params.get(f"image_size_{img}") cmd = utils_misc.set_winutils_letter(session, guest_cmd) disk = _get_window_disk_index_by_serial(img) utils_disk.update_windows_disk_attributes(session, disk) logger.info("Clean disk:%s", disk) utils_disk.clean_partition_windows(session, disk) logger.info("Formatting disk:%s", disk) - driver = \ - utils_disk.configure_empty_disk(session, disk, img_size, - os_type)[0] + driver = utils_disk.configure_empty_disk(session, disk, img_size, os_type)[ + 0 + ] output_path = driver + ":\\test.dat" cmd = cmd.format(output_path) else: @@ -89,7 +90,7 @@ def hotplug_unplug_test(): def discard_test(): for img in vdpa_blk_images: - cmd = "blkdiscard -f %s && echo 'it works!' " % vdpa_blk_info[img] + cmd = f"blkdiscard -f {vdpa_blk_info[img]} && echo 'it works!' " process.run(cmd, shell=True) logger = test.log @@ -116,11 +117,11 @@ def discard_test(): locals_var = locals() if host_operation: - logger.debug("Execute operation %s" % host_operation) + logger.debug("Execute operation %s", host_operation) locals_var[host_operation]() logger.debug("Ready boot VM...") - params["start_vm"] = 'yes' + params["start_vm"] = "yes" login_timeout = params.get_numeric("login_timeout", 360) env_process.preprocess_vm(test, params, env, params.get("main_vm")) vm = env.get_vm(params["main_vm"]) @@ -128,7 +129,7 @@ def discard_test(): session = vm.wait_for_login(timeout=login_timeout) if guest_operation: - logger.debug("Execute guest operation %s" % guest_operation) + logger.debug("Execute guest operation %s", guest_operation) locals_var[guest_operation]() logger.debug("Destroy VM...") @@ -136,9 +137,9 @@ def discard_test(): vm.destroy() vm = None except (virt_vm.VMCreateError, ShellCmdError) as e: - logger.debug("Find exception %s" % e) + logger.debug("Find exception %s", str(e)) if expect_to_fail == "yes" and err_msg in e.output: - logger.info("%s is expected " % err_msg) + logger.info("%s is expected", err_msg) # reset expect_to_fail expect_to_fail = "no" else: @@ -153,4 +154,4 @@ def discard_test(): vdpa_blk_test.cleanup() if expect_to_fail != "no": - raise exceptions.TestFail("Expected '%s' not happened" % err_msg) + raise exceptions.TestFail(f"Expected '{err_msg}' not happened") diff --git a/qemu/tests/block_with_iommu.py b/qemu/tests/block_with_iommu.py index 5978833229..d0ad472064 100644 --- a/qemu/tests/block_with_iommu.py +++ b/qemu/tests/block_with_iommu.py @@ -1,7 +1,6 @@ import re -from virttest import cpu -from virttest import error_context +from virttest import cpu, error_context @error_context.context_aware @@ -22,7 +21,7 @@ def run(test, params, env): def _get_boot_file(cmd_get_boot_file): """Get the boot file.""" - current_kernel = session.cmd_output(params.get('cmd_get_kernel_ver')) + current_kernel = session.cmd_output(params.get("cmd_get_kernel_ver")) boot_files = session.cmd_output(cmd_get_boot_file).splitlines() if len(boot_files) > 1: for boot_file in boot_files: @@ -32,25 +31,27 @@ def _get_boot_file(cmd_get_boot_file): def reload_kernel(session): """Reload kernel.""" - error_context.context('Reload kernel.', test.log.info) - vmlinuz = _get_boot_file(params.get('cmd_get_boot_vmlinuz')) - initrd = _get_boot_file(params.get('cmd_get_boot_initramfs')) - orig_cmdline = session.cmd_output(params.get('cmd_get_boot_cmdline')) - new_cmdline = re.sub(r'vmlinuz\S+', vmlinuz, orig_cmdline).strip() - session.cmd(params.get('reload_kernel_cmd') % (vmlinuz, initrd, new_cmdline)) + error_context.context("Reload kernel.", test.log.info) + vmlinuz = _get_boot_file(params.get("cmd_get_boot_vmlinuz")) + initrd = _get_boot_file(params.get("cmd_get_boot_initramfs")) + orig_cmdline = session.cmd_output(params.get("cmd_get_boot_cmdline")) + new_cmdline = re.sub(r"vmlinuz\S+", vmlinuz, orig_cmdline).strip() + session.cmd(params.get("reload_kernel_cmd") % (vmlinuz, initrd, new_cmdline)) def verify_iommu_enabled(): - """ Verify whether the iommu is enabled. """ + """Verify whether the iommu is enabled.""" error_context.context( - 'Verify whether IOMMU is enabled in the guest.', test.log.info) - for key_words in params['check_key_words'].split(';'): - output = session.cmd_output("journalctl -k | grep -i \"%s\"" % key_words) + "Verify whether IOMMU is enabled in the guest.", test.log.info + ) + for key_words in params["check_key_words"].split(";"): + output = session.cmd_output(f'journalctl -k | grep -i "{key_words}"') if not output: - test.fail("No found the info \"%s\" " - "from the systemd journal log." % key_words) + test.fail( + f'No found the info "{key_words}" ' "from the systemd journal log." + ) test.log.debug(output) - if cpu.get_cpu_vendor(verbose=False) != 'GenuineIntel': + if cpu.get_cpu_vendor(verbose=False) != "GenuineIntel": test.cancel("This case only support Intel platform.") vm = env.get_vm(params["main_vm"]) @@ -58,6 +59,6 @@ def verify_iommu_enabled(): session = vm.wait_for_login(timeout=360) verify_iommu_enabled() - if params.get('reload_kernel_cmd'): + if params.get("reload_kernel_cmd"): reload_kernel(session) vm.reboot(session) diff --git a/qemu/tests/block_with_share_rw.py b/qemu/tests/block_with_share_rw.py index 4061f6e32e..12432c9ac9 100644 --- a/qemu/tests/block_with_share_rw.py +++ b/qemu/tests/block_with_share_rw.py @@ -1,10 +1,7 @@ import re from avocado.utils import process - -from virttest import error_context -from virttest import env_process -from virttest import virt_vm +from virttest import env_process, error_context, virt_vm @error_context.context_aware @@ -25,18 +22,19 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ - msgs = ['"write" lock', 'Is another process using the image'] - modprobe_cmd = params.get('modprobe_cmd') - disk_check_cmd = params.get('disk_check_cmd') - indirect_image_blacklist = params.get('indirect_image_blacklist').split() + msgs = ['"write" lock', "Is another process using the image"] + modprobe_cmd = params.get("modprobe_cmd") + disk_check_cmd = params.get("disk_check_cmd") + indirect_image_blacklist = params.get("indirect_image_blacklist").split() # In some environments, The image in indirect_image_blacklist # does not exist in host. So we need to check the host env # first and update the blacklist. if disk_check_cmd: - image_stg_blacklist = params.get('image_stg_blacklist').split() - matching_images = process.run(disk_check_cmd, ignore_status=True, - shell=True).stdout_text + image_stg_blacklist = params.get("image_stg_blacklist").split() + matching_images = process.run( + disk_check_cmd, ignore_status=True, shell=True + ).stdout_text for disk in image_stg_blacklist: if not re.search(disk, matching_images): indirect_image_blacklist.remove(disk) @@ -46,21 +44,20 @@ def run(test, params, env): params["image_raw_device_stg"] = "yes" params["indirect_image_select_stg"] = "-1" params["start_vm"] = "yes" - env_process.preprocess_vm(test, params, env, - params["main_vm"]) + env_process.preprocess_vm(test, params, env, params["main_vm"]) vm1 = env.get_vm(params["main_vm"]) vm1.verify_alive() vm1.wait_for_login(timeout=360) try: - error_context.context('Start another vm with the data image.', test.log.info) - params['images'] = params['images'].split()[-1] + error_context.context("Start another vm with the data image.", test.log.info) + params["images"] = params["images"].split()[-1] env_process.preprocess_vm(test, params, env, "avocado-vt-vm2") vm2 = env.get_vm("avocado-vt-vm2") vm2.verify_alive() except virt_vm.VMCreateError as e: - if params['share_rw'] == 'off': + if params["share_rw"] == "off": if not all(msg in str(e) for msg in msgs): test.fail("Image lock information is not as expected.") else: diff --git a/qemu/tests/block_with_write_threshold.py b/qemu/tests/block_with_write_threshold.py index 44e0ebee99..ec4c29c59b 100644 --- a/qemu/tests/block_with_write_threshold.py +++ b/qemu/tests/block_with_write_threshold.py @@ -1,12 +1,7 @@ import re from avocado.utils.wait import wait_for - -from virttest import data_dir -from virttest import error_context -from virttest import utils_disk -from virttest import utils_test -from virttest import utils_misc +from virttest import data_dir, error_context, utils_disk, utils_misc, utils_test from virttest.qemu_storage import QemuImg, get_image_json from provider.storage_benchmark import generate_instance @@ -32,53 +27,58 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def get_node_name(image_tag): - """ Get the node name. """ + """Get the node name.""" img_params = params.object_params(image_tag) root_dir = data_dir.get_data_dir() img = QemuImg(img_params, root_dir, image_tag) filename = img.image_filename - if img.image_format == 'luks': + if img.image_format == "luks": filename = get_image_json(image_tag, img_params, root_dir) return vm.get_block({"filename": filename}) def set_block_write_threshold(monitor, node_name, size): - """ Set block write threshold for the block drive. """ - error_context.context("Set block write threshold to %s for the block " - "drive in QMP." % size, test.log.info) - monitor.cmd('block-set-write-threshold', - {"node-name": node_name, "write-threshold": size}) + """Set block write threshold for the block drive.""" + error_context.context( + f"Set block write threshold to {size} for the block " "drive in QMP.", + test.log.info, + ) + monitor.cmd( + "block-set-write-threshold", + {"node-name": node_name, "write-threshold": size}, + ) def verify_block_write_threshold_event(monitor): - """ Verify the event 'BLOCK_WRITE_THRESHOLD' in QMP. """ - return wait_for(lambda: monitor.get_event('BLOCK_WRITE_THRESHOLD'), 30) + """Verify the event 'BLOCK_WRITE_THRESHOLD' in QMP.""" + return wait_for(lambda: monitor.get_event("BLOCK_WRITE_THRESHOLD"), 30) def get_data_disk(session): - """ Get the data disk. """ + """Get the data disk.""" if is_linux: - extra_params = params["blk_extra_params_%s" % data_img_tag] + extra_params = params[f"blk_extra_params_{data_img_tag}"] drive_id = re.search(r"(serial|wwn)=(\w+)", extra_params, re.M).group(2) return utils_misc.get_linux_drive_path(session, drive_id) - return sorted(session.cmd('wmic diskdrive get index').split()[1:])[-1] + return sorted(session.cmd("wmic diskdrive get index").split()[1:])[-1] def _io_stress_linux(target): - session.cmd(params['dd_cmd'] % target, 180) + session.cmd(params["dd_cmd"] % target, 180) def _io_stress_windows(target): - fio = generate_instance(params, vm, 'fio') + fio = generate_instance(params, vm, "fio") try: - fio.run(params['fio_opts'] % target) + fio.run(params["fio_opts"] % target) finally: fio.clean() def run_io_stress(stress_func, target): - """ Run io stress inside guest. """ + """Run io stress inside guest.""" error_context.context("Run io stress inside guest.", test.log.info) stress_func(target) - is_linux = params['os_type'] == 'linux' - data_img_tag = params['images'].split()[-1] - data_img_size = params['image_size_%s' % data_img_tag] + is_linux = params["os_type"] == "linux" + data_img_tag = params["images"].split()[-1] + data_img_size = params[f"image_size_{data_img_tag}"] vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -86,21 +86,23 @@ def run_io_stress(stress_func, target): target = get_data_disk(session) if not is_linux: session = utils_test.qemu.windrv_check_running_verifier( - session, vm, test, params['driver_name']) + session, vm, test, params["driver_name"] + ) utils_disk.update_windows_disk_attributes(session, target) target = utils_disk.configure_empty_windows_disk( - session, target, data_img_size)[-1] + session, target, data_img_size + )[-1] qmp_monitor = vm.monitors[0] node_name = get_node_name(data_img_tag) - set_block_write_threshold(qmp_monitor, node_name, int(params['threshold_size'])) - stress_func = locals()['_io_stress_%s' % ('linux' if is_linux else 'windows')] + set_block_write_threshold(qmp_monitor, node_name, int(params["threshold_size"])) + stress_func = locals()["_io_stress_%s" % ("linux" if is_linux else "windows")] run_io_stress(stress_func, target) if not verify_block_write_threshold_event(qmp_monitor): - test.fail('Failed to get the event \'BLOCK_WRITE_THRESHOLD\'.') + test.fail("Failed to get the event 'BLOCK_WRITE_THRESHOLD'.") - qmp_monitor.clear_event('BLOCK_WRITE_THRESHOLD') + qmp_monitor.clear_event("BLOCK_WRITE_THRESHOLD") set_block_write_threshold(qmp_monitor, node_name, 0) run_io_stress(stress_func, target) if verify_block_write_threshold_event(qmp_monitor): - test.fail('Failed to disable threshold.') + test.fail("Failed to disable threshold.") diff --git a/qemu/tests/blockdev_commit.py b/qemu/tests/blockdev_commit.py index 1814e9c060..a33673dfa6 100644 --- a/qemu/tests/blockdev_commit.py +++ b/qemu/tests/blockdev_commit.py @@ -1,22 +1,21 @@ from virttest import utils_disk -from provider.blockdev_commit_base import BlockDevCommitTest from provider import backup_utils +from provider.blockdev_commit_base import BlockDevCommitTest class BlockDevCommitBase(BlockDevCommitTest): - def configure_data_disk(self, tag): session = self.main_vm.wait_for_login() try: - info = backup_utils.get_disk_info_by_param(tag, - self.params, - session) + info = backup_utils.get_disk_info_by_param(tag, self.params, session) assert info, "Disk not found in guest!" mount_point = utils_disk.configure_empty_linux_disk( - session, info["kname"], info["size"])[0] - self.disks_info.append([ - r"/dev/%s1" % info["kname"], mount_point, tag]) + session, info["kname"], info["size"] + )[0] + self.disks_info.append( + [r"/dev/{}1".format(info["kname"]), mount_point, tag] + ) finally: session.close() diff --git a/qemu/tests/blockdev_commit_auto_readonly.py b/qemu/tests/blockdev_commit_auto_readonly.py index 545d9d346d..c9d45dd2f0 100644 --- a/qemu/tests/blockdev_commit_auto_readonly.py +++ b/qemu/tests/blockdev_commit_auto_readonly.py @@ -1,10 +1,8 @@ -from provider import backup_utils -from provider import job_utils +from provider import backup_utils, job_utils from provider.blockdev_commit_base import BlockDevCommitTest class BlockdevCommitAutoReadonly(BlockDevCommitTest): - def commit_snapshots(self): device = self.params.get("device_tag") device_params = self.params.object_params(device) diff --git a/qemu/tests/blockdev_commit_backing_file.py b/qemu/tests/blockdev_commit_backing_file.py index dcdc3242c1..8035654a94 100644 --- a/qemu/tests/blockdev_commit_backing_file.py +++ b/qemu/tests/blockdev_commit_backing_file.py @@ -1,6 +1,4 @@ -from virttest import data_dir -from virttest import storage -from virttest import qemu_storage +from virttest import data_dir, qemu_storage, storage from provider import backup_utils from provider.blockdev_commit_base import BlockDevCommitTest @@ -12,13 +10,11 @@ def check_backing_file(self): self.main_vm.destroy() device = self.params["snapshot_tags"].split()[-1] device_params = self.params.object_params(device) - image_obj = qemu_storage.QemuImg(device_params, - data_dir.get_data_dir(), device) + image_obj = qemu_storage.QemuImg(device_params, data_dir.get_data_dir(), device) output = image_obj.info() self.test.log.info(output) if self.backing_file not in output: - self.test.fail("The backing file info of % is not correct" - % device) + self.test.fail("The backing file info of % is not correct" % device) def commit_snapshots(self): device = self.params.get("device_tag") @@ -29,8 +25,9 @@ def commit_snapshots(self): arguments = self.params.copy_from_keys(options) arguments["base-node"] = self.get_node_name(device) backing_file = self.params.object_params(snapshot_tags[-2]) - self.backing_file = storage.get_image_filename(backing_file, - data_dir.get_data_dir()) + self.backing_file = storage.get_image_filename( + backing_file, data_dir.get_data_dir() + ) arguments["backing-file"] = self.backing_file arguments["top-node"] = self.get_node_name(snapshot_tags[-2]) device = self.get_node_name(snapshot_tags[-1]) diff --git a/qemu/tests/blockdev_commit_cor.py b/qemu/tests/blockdev_commit_cor.py index ce9b626e9c..470d9b010a 100644 --- a/qemu/tests/blockdev_commit_cor.py +++ b/qemu/tests/blockdev_commit_cor.py @@ -1,10 +1,8 @@ -from provider import job_utils -from provider import backup_utils +from provider import backup_utils, job_utils from provider.blockdev_commit_base import BlockDevCommitTest class BlockdevCommitCOR(BlockDevCommitTest): - def create_snapshots(self, snapshot_tags, device): options = ["node", "overlay"] cmd = "blockdev-snapshot" diff --git a/qemu/tests/blockdev_commit_filter_node_name.py b/qemu/tests/blockdev_commit_filter_node_name.py index 1df10507a9..3e8f231216 100644 --- a/qemu/tests/blockdev_commit_filter_node_name.py +++ b/qemu/tests/blockdev_commit_filter_node_name.py @@ -1,10 +1,8 @@ -from provider import job_utils -from provider import backup_utils +from provider import backup_utils, job_utils from provider.blockdev_commit_base import BlockDevCommitTest class BlockdevCommitFilter(BlockDevCommitTest): - def commit_snapshots(self): device = self.params.get("device_tag") device_params = self.params.object_params(device) @@ -23,15 +21,16 @@ def commit_snapshots(self): job_id = args.get("job-id", device) block_info = self.main_vm.monitor.info_block() if filter_node_name not in block_info: - self.test.fail("Block info not correct,node-name should be '%s'" - % filter_node_name) - self.main_vm.monitor.cmd("block-job-set-speed", - {'device': job_id, 'speed': 0}) + self.test.fail( + f"Block info not correct,node-name should be '{filter_node_name}'" + ) + self.main_vm.monitor.cmd("block-job-set-speed", {"device": job_id, "speed": 0}) job_utils.wait_until_block_job_completed(self.main_vm, job_id) block_info = self.main_vm.monitor.info_block() if filter_node_name in block_info: - self.test.fail("Block info not correct,node-name should not" - "be '%s'" % filter_node_name) + self.test.fail( + "Block info not correct,node-name should not" f"be '{filter_node_name}'" + ) def run(test, params, env): diff --git a/qemu/tests/blockdev_commit_fio.py b/qemu/tests/blockdev_commit_fio.py index 3127dd3fcb..abd4de1c75 100644 --- a/qemu/tests/blockdev_commit_fio.py +++ b/qemu/tests/blockdev_commit_fio.py @@ -1,12 +1,11 @@ -import time import random +import time from virttest import utils_test -from provider import job_utils -from provider import backup_utils -from provider.storage_benchmark import generate_instance +from provider import backup_utils, job_utils from provider.blockdev_commit_base import BlockDevCommitTest +from provider.storage_benchmark import generate_instance class BlockdevCommitFio(BlockDevCommitTest): @@ -14,7 +13,7 @@ def fio_thread(self): fio_options = self.params.get("fio_options") if fio_options: self.test.log.info("Start to run fio") - self.fio = generate_instance(self.params, self.main_vm, 'fio') + self.fio = generate_instance(self.params, self.main_vm, "fio") fio_run_timeout = self.params.get_numeric("fio_timeout", 2400) self.fio.run(fio_options, fio_run_timeout) @@ -30,9 +29,7 @@ def commit_snapshots(self): job_id = args.get("job-id", device) self.main_vm.monitor.cmd(cmd, args) job_timeout = self.params.get_numeric("commit_job_timeout", 1800) - job_utils.wait_until_block_job_completed(self.main_vm, - job_id, - job_timeout) + job_utils.wait_until_block_job_completed(self.main_vm, job_id, job_timeout) def run_test(self): self.pre_test() diff --git a/qemu/tests/blockdev_commit_firewall.py b/qemu/tests/blockdev_commit_firewall.py index 0dda114659..ae7f434512 100644 --- a/qemu/tests/blockdev_commit_firewall.py +++ b/qemu/tests/blockdev_commit_firewall.py @@ -2,27 +2,25 @@ from avocado.utils import process -from provider.nbd_image_export import QemuNBDExportImage -from provider import backup_utils -from provider import job_utils -from provider import qemu_img_utils - +from provider import backup_utils, job_utils, qemu_img_utils from provider.blockdev_commit_base import BlockDevCommitTest +from provider.nbd_image_export import QemuNBDExportImage class BlockdevCommitFirewall(BlockDevCommitTest): - def __init__(self, test, params, env): localhost = socket.gethostname() - params['nbd_server_%s' % params['nbd_image_tag']] = localhost \ - if localhost else 'localhost' + params["nbd_server_{}".format(params["nbd_image_tag"])] = ( + localhost if localhost else "localhost" + ) self._offset = None self._net_down = False - super(BlockdevCommitFirewall, self).__init__(test, params, env) + super().__init__(test, params, env) def _export_local_image_with_nbd(self): - self._nbd_export = QemuNBDExportImage(self.params, - self.params["local_image_tag"]) + self._nbd_export = QemuNBDExportImage( + self.params, self.params["local_image_tag"] + ) self._nbd_export.create_image() self._nbd_export.export_image() @@ -31,23 +29,24 @@ def pre_test(self): self._export_local_image_with_nbd() boot_vm_cmd = qemu_img_utils.boot_vm_with_images self.main_vm = boot_vm_cmd(self.test, self.params, self.env) - super(BlockdevCommitFirewall, self).pre_test() + super().pre_test() except: self.clean_images() def _run_iptables(self, cmd): cmd = cmd.format( - s=self.params['nbd_server_%s' % self.params['nbd_image_tag']]) + s=self.params["nbd_server_{}".format(self.params["nbd_image_tag"])] + ) result = process.run(cmd, ignore_status=True, shell=True) if result.exit_status != 0: - self.test.error('command error: %s' % result.stderr.decode()) + self.test.error(f"command error: {result.stderr.decode()}") def break_net_with_iptables(self): - self._run_iptables(self.params['net_break_cmd']) + self._run_iptables(self.params["net_break_cmd"]) self._net_down = True def resume_net_with_iptables(self): - self._run_iptables(self.params['net_resume_cmd']) + self._run_iptables(self.params["net_resume_cmd"]) self._net_down = False def clean_images(self): @@ -62,10 +61,8 @@ def clean_images(self): nbd_image = self.get_image_by_tag(self.params["local_image_tag"]) nbd_image.remove() - def generate_tempfile(self, root_dir, filename="data", - size="1000M", timeout=360): - backup_utils.generate_tempfile( - self.main_vm, root_dir, filename, size, timeout) + def generate_tempfile(self, root_dir, filename="data", size="1000M", timeout=360): + backup_utils.generate_tempfile(self.main_vm, root_dir, filename, size, timeout) self.files_info.append([root_dir, filename]) def commit_snapshots(self): @@ -84,7 +81,7 @@ def commit_snapshots(self): self.job_id = args.get("job-id", device) def post_test(self): - super(BlockdevCommitFirewall, self).post_test() + super().post_test() self.clean_images() def run_test(self): diff --git a/qemu/tests/blockdev_commit_forbidden_actions.py b/qemu/tests/blockdev_commit_forbidden_actions.py index eb475efe17..8357b967a5 100644 --- a/qemu/tests/blockdev_commit_forbidden_actions.py +++ b/qemu/tests/blockdev_commit_forbidden_actions.py @@ -1,12 +1,10 @@ from virttest.qemu_monitor import QMPCmdError -from provider import job_utils -from provider import backup_utils +from provider import backup_utils, job_utils from provider.blockdev_commit_base import BlockDevCommitTest class BlockdevCommitForbiddenActions(BlockDevCommitTest): - def commit_snapshots(self): device = self.params.get("device_tag") device_params = self.params.object_params(device) @@ -23,50 +21,45 @@ def commit_snapshots(self): self.main_vm.monitor.cmd(cmd, args) job_id = args.get("job-id", self.active_node) self.do_forbidden_actions() - self.main_vm.monitor.cmd("block-job-set-speed", - {'device': job_id, 'speed': 0}) + self.main_vm.monitor.cmd("block-job-set-speed", {"device": job_id, "speed": 0}) job_utils.wait_until_block_job_completed(self.main_vm, job_id) def commit(self): - self.main_vm.monitor.cmd( - "block-commit", {'device': self.active_node} - ) + self.main_vm.monitor.cmd("block-commit", {"device": self.active_node}) def resize(self): self.main_vm.monitor.cmd( - "block_resize", - {'node-name': self.active_node, 'size': 1024*1024*1024} + "block_resize", {"node-name": self.active_node, "size": 1024 * 1024 * 1024} ) def mirror(self): self.main_vm.monitor.cmd( "blockdev-mirror", - {'device': self.active_node, - 'target': self.forbidden_node, 'sync': 'full'} + {"device": self.active_node, "target": self.forbidden_node, "sync": "full"}, ) def snapshot(self): self.main_vm.monitor.cmd( "blockdev-snapshot", - {'node': self.active_node, 'overlay': self.forbidden_node} + {"node": self.active_node, "overlay": self.forbidden_node}, ) def stream(self): - self.main_vm.monitor.cmd("block-stream", {'device': self.active_node}) + self.main_vm.monitor.cmd("block-stream", {"device": self.active_node}) def do_forbidden_actions(self): """Run the qmp commands one by one, all should fail""" self.prepare_snapshot_file(self.params["fnode"].split()) - for action in self.params.objects('forbidden_actions'): - error_msg = self.params['error_msg_%s' % action] + for action in self.params.objects("forbidden_actions"): + error_msg = self.params[f"error_msg_{action}"] f = getattr(self, action) try: f() except QMPCmdError as e: if error_msg not in str(e): - self.test.fail('Unexpected error: %s' % str(e)) + self.test.fail(f"Unexpected error: {str(e)}") else: - self.test.fail('Unexpected qmp command success') + self.test.fail("Unexpected qmp command success") def run(test, params, env): diff --git a/qemu/tests/blockdev_commit_general_operation.py b/qemu/tests/blockdev_commit_general_operation.py index 9c1f20569e..d407c6a443 100644 --- a/qemu/tests/blockdev_commit_general_operation.py +++ b/qemu/tests/blockdev_commit_general_operation.py @@ -1,10 +1,8 @@ -from provider import job_utils -from provider import backup_utils +from provider import backup_utils, job_utils from provider.blockdev_commit_base import BlockDevCommitTest class BlockdevCommitGeneralOperation(BlockDevCommitTest): - def commit_op(self, cmd, args=None): self.main_vm.monitor.cmd(cmd, args) job_status = job_utils.query_block_jobs(self.main_vm) @@ -26,25 +24,26 @@ def commit_snapshots(self): backup_utils.set_default_block_job_options(self.main_vm, args) self.main_vm.monitor.cmd(cmd, args) job_id = args.get("job-id", device) - self.main_vm.monitor.cmd("block-job-set-speed", - {'device': job_id, 'speed': 10240}) + self.main_vm.monitor.cmd( + "block-job-set-speed", {"device": job_id, "speed": 10240} + ) self.commit_op("stop") self.commit_op("cont") - self.commit_op("job-pause", {'id': job_id}) - self.commit_op("job-resume", {'id': job_id}) - self.commit_op("job-cancel", {'id': job_id}) + self.commit_op("job-pause", {"id": job_id}) + self.commit_op("job-resume", {"id": job_id}) + self.commit_op("job-cancel", {"id": job_id}) event = job_utils.get_event_by_condition( - self.main_vm, 'BLOCK_JOB_CANCELLED', - self.params.get_numeric('job_cancelled_timeout', 60), - device=job_id + self.main_vm, + "BLOCK_JOB_CANCELLED", + self.params.get_numeric("job_cancelled_timeout", 60), + device=job_id, ) if event is None: - self.test.fail('Job failed to cancel') + self.test.fail("Job failed to cancel") if not self.params.get_boolean("dismiss"): - self.commit_op("job-dismiss", {'id': job_id}) + self.commit_op("job-dismiss", {"id": job_id}) self.main_vm.monitor.cmd(cmd, args) - self.main_vm.monitor.cmd("block-job-set-speed", - {'device': job_id, 'speed': 0}) + self.main_vm.monitor.cmd("block-job-set-speed", {"device": job_id, "speed": 0}) job_utils.wait_until_block_job_completed(self.main_vm, job_id) diff --git a/qemu/tests/blockdev_commit_hotunplug.py b/qemu/tests/blockdev_commit_hotunplug.py index 1f7b5ae561..d1cebc1c41 100644 --- a/qemu/tests/blockdev_commit_hotunplug.py +++ b/qemu/tests/blockdev_commit_hotunplug.py @@ -1,12 +1,10 @@ from virttest import utils_misc -from provider import job_utils -from provider import backup_utils +from provider import backup_utils, job_utils from provider.blockdev_commit_base import BlockDevCommitTest class BlockdevCommitHotunplug(BlockDevCommitTest): - def is_device_deleted(self, device): return device not in str(self.main_vm.monitor.info_block()) @@ -23,18 +21,17 @@ def commit_snapshots(self): cmd, args = commit_cmd(device, **arguments) backup_utils.set_default_block_job_options(self.main_vm, args) self.main_vm.monitor.cmd(cmd, args) - self.main_vm.monitor.cmd('device_del', - {'id': self.params["device_tag"]}) - unplug_s = utils_misc.wait_for(lambda: self.is_device_deleted(device), - timeout=60, step=1.0) + self.main_vm.monitor.cmd("device_del", {"id": self.params["device_tag"]}) + unplug_s = utils_misc.wait_for( + lambda: self.is_device_deleted(device), timeout=60, step=1.0 + ) if not unplug_s: self.test.fail("Hotunplug device failed") job_id = args.get("job-id", device) job_status = job_utils.get_job_status(self.main_vm, job_id) if job_status not in self.params["expect_status"]: - self.test.fail("Job status %s is not correct" % job_status) - self.main_vm.monitor.cmd("block-job-set-speed", - {'device': job_id, 'speed': 0}) + self.test.fail(f"Job status {job_status} is not correct") + self.main_vm.monitor.cmd("block-job-set-speed", {"device": job_id, "speed": 0}) job_utils.wait_until_block_job_completed(self.main_vm, job_id) def run_test(self): diff --git a/qemu/tests/blockdev_commit_install.py b/qemu/tests/blockdev_commit_install.py index c71d23cc20..300cee0a82 100644 --- a/qemu/tests/blockdev_commit_install.py +++ b/qemu/tests/blockdev_commit_install.py @@ -1,9 +1,8 @@ -import time import random import re +import time -from virttest import utils_test -from virttest import utils_misc +from virttest import utils_misc, utils_test from virttest.tests import unattended_install from provider.blockdev_commit_base import BlockDevCommitTest @@ -23,6 +22,7 @@ def run(test, params, env): 3. commit snapshot 3 to base 4. installation can be finished after commit """ + def tag_for_install(vm, tag): if vm.serial_console: serial_output = vm.serial_console.get_output() @@ -37,7 +37,9 @@ def tag_for_install(vm, tag): bg.start() if bg.is_alive(): tag = params.get("tag_for_install_start", "Starting Login Service") - if utils_misc.wait_for(lambda: tag_for_install(block_test.main_vm, tag), 240, 10, 5): + if utils_misc.wait_for( + lambda: tag_for_install(block_test.main_vm, tag), 240, 10, 5 + ): test.log.info("sleep random time before do snapshots") time.sleep(random.randint(10, 120)) block_test.pre_test() diff --git a/qemu/tests/blockdev_commit_non_existed_node.py b/qemu/tests/blockdev_commit_non_existed_node.py index d5824c4495..e0415902e0 100644 --- a/qemu/tests/blockdev_commit_non_existed_node.py +++ b/qemu/tests/blockdev_commit_non_existed_node.py @@ -6,11 +6,10 @@ from provider import backup_utils from provider.blockdev_commit_base import BlockDevCommitTest -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class BlockdevCommitNonExistedNode(BlockDevCommitTest): - def commit_snapshots(self): device = self.params.get("device_tag") device_params = self.params.object_params(device) @@ -37,9 +36,11 @@ def commit_snapshots(self): if not re.search(qmp_error_msg, str(e.data)): self.test.fail("Error message not as expected") else: - self.test.fail("Block commit should fail with " - "'Cannot find device= nor node_name=sn0'" - ",but block commit succeeded unexpectedly") + self.test.fail( + "Block commit should fail with " + "'Cannot find device= nor node_name=sn0'" + ",but block commit succeeded unexpectedly" + ) def run_test(self): self.pre_test() diff --git a/qemu/tests/blockdev_commit_powerdown.py b/qemu/tests/blockdev_commit_powerdown.py index 31003bc8b3..6f74c67e7f 100644 --- a/qemu/tests/blockdev_commit_powerdown.py +++ b/qemu/tests/blockdev_commit_powerdown.py @@ -1,12 +1,10 @@ from virttest import env_process -from provider import job_utils -from provider import backup_utils +from provider import backup_utils, job_utils from provider.blockdev_commit_base import BlockDevCommitTest class BlockdevCommitHotunplug(BlockDevCommitTest): - def commit_snapshot_and_destory_vm(self): device = self.params.get("device_tag") device_params = self.params.object_params(device) diff --git a/qemu/tests/blockdev_commit_query_named_block_nodes.py b/qemu/tests/blockdev_commit_query_named_block_nodes.py index 428460f9b5..b3602d46c1 100644 --- a/qemu/tests/blockdev_commit_query_named_block_nodes.py +++ b/qemu/tests/blockdev_commit_query_named_block_nodes.py @@ -1,11 +1,8 @@ -from provider import backup_utils -from provider import job_utils - +from provider import backup_utils, job_utils from provider.blockdev_commit_base import BlockDevCommitTest class BlockdevCommitQueryNamedBlockNodes(BlockDevCommitTest): - def commit_snapshots(self): device = self.params.get("device_tag") device_params = self.params.object_params(device) diff --git a/qemu/tests/blockdev_commit_reboot.py b/qemu/tests/blockdev_commit_reboot.py index 6e4701e1b0..7216f0f846 100644 --- a/qemu/tests/blockdev_commit_reboot.py +++ b/qemu/tests/blockdev_commit_reboot.py @@ -1,6 +1,4 @@ -from provider import backup_utils -from provider import job_utils - +from provider import backup_utils, job_utils from provider.blockdev_commit_base import BlockDevCommitTest diff --git a/qemu/tests/blockdev_commit_server_down.py b/qemu/tests/blockdev_commit_server_down.py index 706b05d7a8..37d4e46a3f 100644 --- a/qemu/tests/blockdev_commit_server_down.py +++ b/qemu/tests/blockdev_commit_server_down.py @@ -1,45 +1,41 @@ -import time import socket +import time -from provider import job_utils -from provider import backup_utils +from virttest import env_process +from provider import backup_utils, job_utils from provider.blockdev_commit_base import BlockDevCommitTest from provider.nbd_image_export import QemuNBDExportImage -from virttest import env_process - class BlockdevCommitServerDown(BlockDevCommitTest): - def __init__(self, test, params, env): - params['nbd_export_format'] = params['image_format'] + params["nbd_export_format"] = params["image_format"] self.nbd_export = QemuNBDExportImage(params, params["local_image_tag"]) self.nbd_export.create_image() self.nbd_export.export_image() localhost = socket.gethostname() - params['nbd_server'] = localhost if localhost else 'localhost' - params['images'] += ' %s' % params['nbd_image_tag'] + params["nbd_server"] = localhost if localhost else "localhost" + params["images"] += " {}".format(params["nbd_image_tag"]) env_process.preprocess_vm(test, params, env, params["main_vm"]) - super(BlockdevCommitServerDown, self).__init__(test, params, env) + super().__init__(test, params, env) def check_commit_running(self): - tmo = self.params.get_numeric('commit_start_timeout', 5) + tmo = self.params.get_numeric("commit_start_timeout", 5) # make sure commit is running, i.e. offset > 0 for i in range(tmo): time.sleep(1) - job = job_utils.get_block_job_by_id(self.main_vm, - self.commit_job) - if job['offset'] > 0: + job = job_utils.get_block_job_by_id(self.main_vm, self.commit_job) + if job["offset"] > 0: break else: - self.test.fail("offset is 0 after %s seconds" % tmo) + self.test.fail(f"offset is 0 after {tmo} seconds") def check_commit_process(self): offset = None - tmo = self.params.get_numeric('server_down_elapsed_time') + tmo = self.params.get_numeric("server_down_elapsed_time") # stop nbd server self.nbd_export.stop_export() @@ -47,15 +43,15 @@ def check_commit_process(self): # check commit job should hang for i in range(tmo): time.sleep(1) - job = job_utils.get_block_job_by_id(self.main_vm, - self.commit_job) + job = job_utils.get_block_job_by_id(self.main_vm, self.commit_job) if not job: self.test.fail("job cancelled in %d seconds" % tmo) if offset is None: - offset = job['offset'] - elif offset != job['offset']: - self.test.fail("offset changed: %s vs. %s" - % (offset, job['offset'])) + offset = job["offset"] + elif offset != job["offset"]: + self.test.fail( + "offset changed: {} vs. {}".format(offset, job["offset"]) + ) # resume nbd access self.nbd_export.export_image() @@ -66,23 +62,23 @@ def check_commit_process(self): job_utils.wait_until_block_job_completed(self.main_vm, self.commit_job) def commit_snapshots(self): - device_params = self.params.object_params(self.params['nbd_image_tag']) + device_params = self.params.object_params(self.params["nbd_image_tag"]) snapshot_tags = device_params["snapshot_tags"].split() - args = self.params.copy_from_keys(['speed']) + args = self.params.copy_from_keys(["speed"]) device = self.get_node_name(snapshot_tags[-1]) cmd, arguments = backup_utils.block_commit_qmp_cmd(device, **args) backup_utils.set_default_block_job_options(self.main_vm, arguments) self.main_vm.monitor.cmd(cmd, arguments) job = job_utils.query_block_jobs(self.main_vm)[0] - self.commit_job = job['device'] + self.commit_job = job["device"] self.check_commit_running() self.check_commit_process() def post_test(self): - self.params['images'] += ' %s' % self.params.get("local_image_tag") + self.params["images"] += " {}".format(self.params.get("local_image_tag")) self.nbd_export.stop_export() - super(BlockdevCommitServerDown, self).post_test() + super().post_test() def run(test, params, env): diff --git a/qemu/tests/blockdev_commit_specify_node.py b/qemu/tests/blockdev_commit_specify_node.py index b49989efcf..4d4d8b8f7f 100644 --- a/qemu/tests/blockdev_commit_specify_node.py +++ b/qemu/tests/blockdev_commit_specify_node.py @@ -5,7 +5,6 @@ class BlockdevCommitSpecifyNode(BlockDevCommitTest): - def commit_snapshots(self): device = self.params.get("device_tag") device_params = self.params.object_params(device) @@ -14,11 +13,11 @@ def commit_snapshots(self): options = ["base-node", "top-node", "speed"] arguments = self.params.copy_from_keys(options) test_scenario = self.params["test_scenario"] - if test_scenario == 'base_same_with_top': + if test_scenario == "base_same_with_top": arguments["base-node"] = self.get_node_name(snapshot_tags[-2]) arguments["top-node"] = self.get_node_name(snapshot_tags[-2]) device = self.get_node_name(snapshot_tags[-1]) - if test_scenario == 'parent_as_top_child_as_base': + if test_scenario == "parent_as_top_child_as_base": arguments["base-node"] = self.get_node_name(snapshot_tags[-2]) arguments["top-node"] = self.get_node_name(snapshot_tags[0]) device = self.get_node_name(snapshot_tags[-1]) @@ -32,9 +31,12 @@ def commit_snapshots(self): if self.params.get("error_msg") not in str(e.data): self.test.fail("Error message not as expected") else: - self.test.fail("Block commit should fail with %s,but " - "block commit succeeded unexpectedly" - % self.params.get("error_msg")) + self.test.fail( + "Block commit should fail with {},but " + "block commit succeeded unexpectedly".format( + self.params.get("error_msg") + ) + ) def run_test(self): self.pre_test() diff --git a/qemu/tests/blockdev_commit_speed_limit.py b/qemu/tests/blockdev_commit_speed_limit.py index 568312bf72..79b82114d6 100644 --- a/qemu/tests/blockdev_commit_speed_limit.py +++ b/qemu/tests/blockdev_commit_speed_limit.py @@ -2,13 +2,11 @@ from virttest.qemu_monitor import QMPCmdError -from provider import backup_utils -from provider import job_utils +from provider import backup_utils, job_utils from provider.blockdev_commit_base import BlockDevCommitTest class BlockdevCommitSpeedLimit(BlockDevCommitTest): - def commit_snapshots(self): device = self.params.get("device_tag") device_params = self.params.object_params(device) @@ -26,15 +24,15 @@ def commit_snapshots(self): job_utils.check_block_jobs_started(self.main_vm, [job_id]) small_speed = self.params.get_numeric("small_speed") large_speed = self.params.get_numeric("large_speed") - commit_speed = self.params.get("commit_speed", - random.randint(small_speed, - large_speed)) + commit_speed = self.params.get( + "commit_speed", random.randint(small_speed, large_speed) + ) if self.params.get_boolean("speed_is_int", True): commit_speed = int(commit_speed) try: - self.main_vm.monitor.cmd("block-job-set-speed", - {'device': job_id, - 'speed': commit_speed}) + self.main_vm.monitor.cmd( + "block-job-set-speed", {"device": job_id, "speed": commit_speed} + ) except QMPCmdError as e: self.test.log.info("Error message is %s", e.data) if self.params.get("error_msg") not in str(e.data): @@ -43,9 +41,9 @@ def commit_snapshots(self): output = job_utils.query_block_jobs(self.main_vm) if output[0]["speed"] != commit_speed: self.test.fail("Commit speed set failed") - self.main_vm.monitor.cmd("block-job-set-speed", - {'device': job_id, - 'speed': 0}) + self.main_vm.monitor.cmd( + "block-job-set-speed", {"device": job_id, "speed": 0} + ) job_utils.wait_until_block_job_completed(self.main_vm, job_id) def run_test(self): diff --git a/qemu/tests/blockdev_commit_standby.py b/qemu/tests/blockdev_commit_standby.py index a353beb995..f7582d82fb 100644 --- a/qemu/tests/blockdev_commit_standby.py +++ b/qemu/tests/blockdev_commit_standby.py @@ -1,6 +1,4 @@ -from provider import backup_utils -from provider import job_utils - +from provider import backup_utils, job_utils from provider.blockdev_commit_base import BlockDevCommitTest @@ -16,9 +14,13 @@ def commit_snapshots(self): backup_utils.set_default_block_job_options(self.main_vm, args) self.main_vm.monitor.cmd(cmd, args) job_id = args.get("job-id", device) - job_utils.wait_until_job_status_match(self.main_vm, "ready", job_id, timeout=120) + job_utils.wait_until_job_status_match( + self.main_vm, "ready", job_id, timeout=120 + ) self.main_vm.monitor.cmd("job-pause", {"id": job_id}) - job_utils.wait_until_job_status_match(self.main_vm, "standby", job_id, timeout=120) + job_utils.wait_until_job_status_match( + self.main_vm, "standby", job_id, timeout=120 + ) self.main_vm.monitor.cmd("job-complete", {"id": job_id}) self.main_vm.monitor.cmd("job-resume", {"id": job_id}) if not job_utils.get_event_by_condition(self.main_vm, "BLOCK_JOB_COMPLETED"): diff --git a/qemu/tests/blockdev_commit_stop_cont.py b/qemu/tests/blockdev_commit_stop_cont.py index 85c5a91019..6d68d98006 100644 --- a/qemu/tests/blockdev_commit_stop_cont.py +++ b/qemu/tests/blockdev_commit_stop_cont.py @@ -1,11 +1,8 @@ -from provider import backup_utils -from provider import job_utils - +from provider import backup_utils, job_utils from provider.blockdev_commit_base import BlockDevCommitTest class BlockdevCommitStopCont(BlockDevCommitTest): - def commit_snapshots(self): device = self.params.get("device_tag") device_params = self.params.object_params(device) diff --git a/qemu/tests/blockdev_commit_stress.py b/qemu/tests/blockdev_commit_stress.py index 6f2f293c95..2cd18986af 100644 --- a/qemu/tests/blockdev_commit_stress.py +++ b/qemu/tests/blockdev_commit_stress.py @@ -5,8 +5,7 @@ class BlockdevCommitStress(BlockDevCommitTest): def run_stress_test(self): - self.stress_test = utils_test.VMStress(self.main_vm, "stress", - self.params) + self.stress_test = utils_test.VMStress(self.main_vm, "stress", self.params) self.stress_test.load_stress_tool() def stress_running_check(self): diff --git a/qemu/tests/blockdev_commit_throttle.py b/qemu/tests/blockdev_commit_throttle.py index 1faca00973..b47caf3d68 100644 --- a/qemu/tests/blockdev_commit_throttle.py +++ b/qemu/tests/blockdev_commit_throttle.py @@ -3,9 +3,7 @@ class BlockdevCommitThrottle(BlockDevCommitTest): - def commit_snapshots(self): - def _commit_snapshots(device, base_node=None, top_node=None): arguments = {} if base_node: diff --git a/qemu/tests/blockdev_commit_to_nospace.py b/qemu/tests/blockdev_commit_to_nospace.py index e34f334068..e0f07f8861 100644 --- a/qemu/tests/blockdev_commit_to_nospace.py +++ b/qemu/tests/blockdev_commit_to_nospace.py @@ -3,11 +3,8 @@ class BlockdevCommitToNospace(BlockDevCommitTest): - - def generate_tempfile(self, root_dir, filename="data", - size="1000M", timeout=360): - backup_utils.generate_tempfile( - self.main_vm, root_dir, filename, size, timeout) + def generate_tempfile(self, root_dir, filename="data", size="1000M", timeout=360): + backup_utils.generate_tempfile(self.main_vm, root_dir, filename, size, timeout) self.files_info.append([root_dir, filename]) def commit_snapshots(self): diff --git a/qemu/tests/blockdev_commit_top.py b/qemu/tests/blockdev_commit_top.py index b1170094f4..a97c7746fa 100644 --- a/qemu/tests/blockdev_commit_top.py +++ b/qemu/tests/blockdev_commit_top.py @@ -1,10 +1,8 @@ from provider import backup_utils - from provider.blockdev_commit_base import BlockDevCommitTest class BlockdevCommitTop(BlockDevCommitTest): - def commit_snapshots(self): device = self.params.get("device_tag") device_params = self.params.object_params(device) diff --git a/qemu/tests/blockdev_commit_with_ignore.py b/qemu/tests/blockdev_commit_with_ignore.py index 571b5bf510..c1e6faa4f3 100644 --- a/qemu/tests/blockdev_commit_with_ignore.py +++ b/qemu/tests/blockdev_commit_with_ignore.py @@ -1,16 +1,12 @@ from avocado.utils import process -from provider import backup_utils -from provider import job_utils +from provider import backup_utils, job_utils from provider.blockdev_commit_base import BlockDevCommitTest class BlockdevCommitWithIgnore(BlockDevCommitTest): - - def generate_tempfile(self, root_dir, filename="data", - size="1000M", timeout=360): - backup_utils.generate_tempfile( - self.main_vm, root_dir, filename, size, timeout) + def generate_tempfile(self, root_dir, filename="data", size="1000M", timeout=360): + backup_utils.generate_tempfile(self.main_vm, root_dir, filename, size, timeout) self.files_info.append([root_dir, filename]) def commit_snapshots(self): @@ -27,11 +23,15 @@ def commit_snapshots(self): self.main_vm.monitor.cmd(cmd, arguments) job_id = arguments.get("job-id", device) get_event = job_utils.get_event_by_condition - event = get_event(self.main_vm, job_utils.BLOCK_JOB_ERROR_EVENT, - timeout, device=job_id, action='ignore') + event = get_event( + self.main_vm, + job_utils.BLOCK_JOB_ERROR_EVENT, + timeout, + device=job_id, + action="ignore", + ) if not event: - self.test.fail("Commit job can't reach error after %s seconds", - timeout) + self.test.fail("Commit job can't reach error after %s seconds", timeout) process.system(self.params["extend_backend_space"]) process.system(self.params["resize_backend_size"]) job_utils.wait_until_block_job_completed(self.main_vm, job_id, timeout) diff --git a/qemu/tests/blockdev_commit_with_ioerror.py b/qemu/tests/blockdev_commit_with_ioerror.py index f3fd6a8703..039d7cf6df 100644 --- a/qemu/tests/blockdev_commit_with_ioerror.py +++ b/qemu/tests/blockdev_commit_with_ioerror.py @@ -5,19 +5,19 @@ class BlockdevCommitWithIoerror(BlockDevCommitTest): - def dd_io_error(self, root_dir, ori_filename, tar_filename, timeout=20): """dd file in snapshot""" self.session = self.main_vm.wait_for_login() self.file_info = self.files_info[0] - ori_file_path = "%s/%s" % (root_dir, ori_filename) - tar_file_path = "%s/%s" % (root_dir, tar_filename) + ori_file_path = f"{root_dir}/{ori_filename}" + tar_file_path = f"{root_dir}/{tar_filename}" dd_cmd = self.main_vm.params.get( - "dd_cmd", "dd if=%s of=%s bs=1M count=500 oflag=direct") + "dd_cmd", "dd if=%s of=%s bs=1M count=500 oflag=direct" + ) mk_file_cmd = dd_cmd % (ori_file_path, tar_file_path) try: self.session.cmd(mk_file_cmd, timeout=timeout) - except ShellTimeoutError as e: + except ShellTimeoutError: self.main_vm.verify_status("io-error") self.file_info.append(tar_filename) else: @@ -36,8 +36,7 @@ def create_snapshots(self, snapshot_tags, device): if idx == 0: arguments["node"] = self.device_node else: - arguments["node"] = self.get_node_name( - snapshot_tags[idx - 1]) + arguments["node"] = self.get_node_name(snapshot_tags[idx - 1]) self.main_vm.monitor.cmd(cmd, dict(arguments)) for info in self.disks_info: if device in info: @@ -46,11 +45,11 @@ def create_snapshots(self, snapshot_tags, device): def md5_io_error_file(self): if not self.session: self.session = self.main_vm.wait_for_login() - output = self.session.cmd_output('\n', timeout=120) + output = self.session.cmd_output("\n", timeout=120) if self.params["dd_done"] not in output: self.test.fail("dd not continue to run after vm resume") - tar_file_path = "%s/%s" % (self.file_info[0], self.file_info[2]) - md5_cmd = "md5sum %s > %s.md5 && sync" % (tar_file_path, tar_file_path) + tar_file_path = f"{self.file_info[0]}/{self.file_info[2]}" + md5_cmd = f"md5sum {tar_file_path} > {tar_file_path}.md5 && sync" self.session.cmd(md5_cmd, timeout=120) def commit_snapshots(self): @@ -66,14 +65,14 @@ def verify_data_file(self): self.session = self.main_vm.wait_for_login() ori_file_md5 = "" for info in [self.file_info[1], self.file_info[2]]: - file_path = "%s/%s" % (self.file_info[0], info) - cat_cmd = "cat %s.md5" % file_path + file_path = f"{self.file_info[0]}/{info}" + cat_cmd = f"cat {file_path}.md5" output = self.session.cmd_output(cat_cmd, timeout=120).split()[0] if not ori_file_md5: ori_file_md5 = output if ori_file_md5 != output: - msg = "file ('%s' '%s') md5 mismatch" % (ori_file_md5, output) - msg += "with value ('%s', '%s')" % (ori_file_md5, output) + msg = f"file ('{ori_file_md5}' '{output}') md5 mismatch" + msg += f"with value ('{ori_file_md5}', '{output}')" self.test.fail(msg) def op_after_commit(self): diff --git a/qemu/tests/blockdev_commit_with_stop.py b/qemu/tests/blockdev_commit_with_stop.py index 99a5093266..3229feb4e0 100644 --- a/qemu/tests/blockdev_commit_with_stop.py +++ b/qemu/tests/blockdev_commit_with_stop.py @@ -1,16 +1,12 @@ from avocado.utils import process -from provider import backup_utils -from provider import job_utils +from provider import backup_utils, job_utils from provider.blockdev_commit_base import BlockDevCommitTest class BlockdevCommitWithStop(BlockDevCommitTest): - - def generate_tempfile(self, root_dir, filename="data", - size="1000M", timeout=360): - backup_utils.generate_tempfile( - self.main_vm, root_dir, filename, size, timeout) + def generate_tempfile(self, root_dir, filename="data", size="1000M", timeout=360): + backup_utils.generate_tempfile(self.main_vm, root_dir, filename, size, timeout) self.files_info.append([root_dir, filename]) def commit_snapshots(self): @@ -25,8 +21,7 @@ def commit_snapshots(self): timeout = self.params.get("job_timeout", 600) self.main_vm.monitor.cmd(cmd, arguments) job_id = arguments.get("job-id", device) - job_utils.wait_until_job_status_match(self.main_vm, "paused", - job_id, timeout) + job_utils.wait_until_job_status_match(self.main_vm, "paused", job_id, timeout) process.system(self.params["extend_backend_space"]) process.system(self.params["resize_backend_size"]) args = {"id": job_id} diff --git a/qemu/tests/blockdev_full_backup_invalid_max_workers.py b/qemu/tests/blockdev_full_backup_invalid_max_workers.py index 7c6196f21a..1e9886c456 100644 --- a/qemu/tests/blockdev_full_backup_invalid_max_workers.py +++ b/qemu/tests/blockdev_full_backup_invalid_max_workers.py @@ -5,14 +5,12 @@ class BlkFullBackupInvalidMaxWorkers(BlockdevLiveBackupBaseTest): - def do_full_backup(self): max_workers = int(self.params["invalid_max_workers"]) extra_options = {"max-workers": max_workers} cmd, arguments = backup_utils.blockdev_backup_qmp_cmd( - self._source_nodes[0], - self._full_bk_nodes[0], - **extra_options) + self._source_nodes[0], self._full_bk_nodes[0], **extra_options + ) try: self.main_vm.monitor.cmd(cmd, arguments) except QMPCmdError as e: diff --git a/qemu/tests/blockdev_full_backup_invalid_sync_mode.py b/qemu/tests/blockdev_full_backup_invalid_sync_mode.py index b7e81f0591..226bd494cb 100644 --- a/qemu/tests/blockdev_full_backup_invalid_sync_mode.py +++ b/qemu/tests/blockdev_full_backup_invalid_sync_mode.py @@ -1,7 +1,6 @@ import re -from virttest import utils_qemu -from virttest import utils_misc +from virttest import utils_misc, utils_qemu from virttest.qemu_monitor import QMPCmdError from virttest.utils_version import VersionInterval @@ -10,7 +9,6 @@ class BlockdevFullBackupInvalidSyncTest(BlockdevLiveBackupBaseTest): - def do_full_backup(self): """ Backup source image to target image diff --git a/qemu/tests/blockdev_full_backup_multi_disks.py b/qemu/tests/blockdev_full_backup_multi_disks.py index db3cc23353..5ebe24d468 100644 --- a/qemu/tests/blockdev_full_backup_multi_disks.py +++ b/qemu/tests/blockdev_full_backup_multi_disks.py @@ -5,17 +5,15 @@ class BlockdevFullBackupMultiDisks(BlockdevFullBackupBaseTest): - def format_data_disk(self, tag): session = self.main_vm.wait_for_login() try: - info = backup_utils.get_disk_info_by_param(tag, - self.params, - session) + info = backup_utils.get_disk_info_by_param(tag, self.params, session) assert info, "Disk not found in guest!" - disk_path = "/dev/%s1" % info["kname"] + disk_path = "/dev/{}1".format(info["kname"]) mount_point = utils_disk.configure_empty_linux_disk( - session, info["kname"], info["size"])[0] + session, info["kname"], info["size"] + )[0] self.disks_info[tag] = [disk_path, mount_point] finally: session.close() diff --git a/qemu/tests/blockdev_full_backup_nonexist_target.py b/qemu/tests/blockdev_full_backup_nonexist_target.py index fbe8a5af37..fa1ff8d817 100644 --- a/qemu/tests/blockdev_full_backup_nonexist_target.py +++ b/qemu/tests/blockdev_full_backup_nonexist_target.py @@ -7,7 +7,6 @@ class BlockdevFullBackupNonexistTargetTest(BlockdevFullBackupBaseTest): - def prepare_test(self): self.prepare_main_vm() self.prepare_data_disks() @@ -16,11 +15,11 @@ def do_backup(self): """ Backup source image to target image """ - assert len( - self.target_disks) >= len( - self.source_disks), "No enough target disks define in cfg!" - src_lst = ["drive_%s" % x for x in self.source_disks] - dst_lst = ["drive_%s" % x for x in self.target_disks] + assert len(self.target_disks) >= len( + self.source_disks + ), "No enough target disks define in cfg!" + src_lst = [f"drive_{x}" for x in self.source_disks] + dst_lst = [f"drive_{x}" for x in self.target_disks] backup_cmd = backup_utils.blockdev_backup_qmp_cmd cmd, arguments = backup_cmd(src_lst[0], dst_lst[0], **self.backup_options) try: diff --git a/qemu/tests/blockdev_full_backup_reboot.py b/qemu/tests/blockdev_full_backup_reboot.py index f2a2c20411..1eb17076ee 100644 --- a/qemu/tests/blockdev_full_backup_reboot.py +++ b/qemu/tests/blockdev_full_backup_reboot.py @@ -2,7 +2,6 @@ class BlockdevFullBackupRebootTest(BlockdevFullBackupParallelTest): - def vm_reset(self): self.main_vm.reboot(method="system_reset") diff --git a/qemu/tests/blockdev_full_backup_stress.py b/qemu/tests/blockdev_full_backup_stress.py index b008ba17b9..90df478b1e 100644 --- a/qemu/tests/blockdev_full_backup_stress.py +++ b/qemu/tests/blockdev_full_backup_stress.py @@ -1,18 +1,16 @@ -import time import random +import time -from virttest import utils_test -from virttest import error_context +from virttest import error_context, utils_test from provider.blockdev_full_backup_parallel import BlockdevFullBackupParallelTest class BlockdevFullBackupStressTest(BlockdevFullBackupParallelTest): - def blockdev_backup(self): """sleep some secondes to wait VM load stress then do blockdev backup test.""" time.sleep(random.randint(1, 4)) - super(BlockdevFullBackupStressTest, self).blockdev_backup() + super().blockdev_backup() @error_context.context_aware def load_stress(self): diff --git a/qemu/tests/blockdev_full_backup_with_bitmap.py b/qemu/tests/blockdev_full_backup_with_bitmap.py index 2d56bc97ce..705a5f0c28 100644 --- a/qemu/tests/blockdev_full_backup_with_bitmap.py +++ b/qemu/tests/blockdev_full_backup_with_bitmap.py @@ -8,10 +8,11 @@ class BlockdevFullBackupWithBitmapTest(BlockdevLiveBackupBaseTest): - def add_bitmap(self): - kargs = {'bitmap_name': self._bitmaps[0], - 'target_device': self._source_nodes[0]} + kargs = { + "bitmap_name": self._bitmaps[0], + "target_device": self._source_nodes[0], + } block_dirty_bitmap_add(self.main_vm, kargs) def _get_full_backup_options(self): diff --git a/qemu/tests/blockdev_full_backup_x_perf.py b/qemu/tests/blockdev_full_backup_x_perf.py index c6f949f5a8..dfd0053761 100644 --- a/qemu/tests/blockdev_full_backup_x_perf.py +++ b/qemu/tests/blockdev_full_backup_x_perf.py @@ -1,5 +1,5 @@ -import re import json +import re from random import randrange from provider import backup_utils @@ -7,13 +7,11 @@ class BlkFullBackupXperf(BlockdevLiveBackupBaseTest): - def get_image_cluster_size(self): csize_parttern = self.params.get("cluster_size_pattern") image_name = self._source_images[0] image_params = self.params.object_params(image_name) - image = self.source_disk_define_by_params(image_params, - image_name) + image = self.source_disk_define_by_params(image_params, image_name) output = image.info(force_share=True) match = re.findall(csize_parttern, output) if match: @@ -28,9 +26,9 @@ def do_full_backup(self): extra_options = {"max-workers": max_workers, "max-chunk": max_chunk} else: extra_options = {"max-workers": max_workers} - backup_utils.blockdev_backup(self.main_vm, self._source_nodes[0], - self._full_bk_nodes[0], - **extra_options) + backup_utils.blockdev_backup( + self.main_vm, self._source_nodes[0], self._full_bk_nodes[0], **extra_options + ) def do_test(self): self.do_full_backup() diff --git a/qemu/tests/blockdev_full_mirror.py b/qemu/tests/blockdev_full_mirror.py index 343c26fc2e..f2c4a29831 100644 --- a/qemu/tests/blockdev_full_mirror.py +++ b/qemu/tests/blockdev_full_mirror.py @@ -1,37 +1,32 @@ import logging from avocado.utils import memory - from virttest import error_context -from provider import backup_utils -from provider import blockdev_full_backup_base - -LOG_JOB = logging.getLogger('avocado.test') +from provider import backup_utils, blockdev_full_backup_base +LOG_JOB = logging.getLogger("avocado.test") -class BlockDevFullMirrorTest( - blockdev_full_backup_base.BlockdevFullBackupBaseTest): +class BlockDevFullMirrorTest(blockdev_full_backup_base.BlockdevFullBackupBaseTest): @error_context.context_aware def blockdev_mirror(self): - source = "drive_%s" % self.source_disks[0] - target = "drive_%s" % self.target_disks[0] + source = f"drive_{self.source_disks[0]}" + target = f"drive_{self.target_disks[0]}" try: error_context.context( - "backup %s to %s, options: %s" % - (source, target, self.backup_options), LOG_JOB.info) + f"backup {source} to {target}, options: {self.backup_options}", + LOG_JOB.info, + ) backup_utils.blockdev_mirror( - self.main_vm, - source, - target, - **self.backup_options) + self.main_vm, source, target, **self.backup_options + ) finally: memory.drop_caches() def verify_blockdev_mirror(self): out = self.main_vm.monitor.query("block") - target_node = "drive_%s" % self.target_disks[0] + target_node = f"drive_{self.target_disks[0]}" for item in out: inserted = item["inserted"] if self.is_blockdev_mode(): @@ -40,7 +35,7 @@ def verify_blockdev_mirror(self): device = inserted.get("device") if device == target_node: return - self.test.fail("target node(%s) is not opening" % target_node) + self.test.fail(f"target node({target_node}) is not opening") @error_context.context_aware def do_backup(self): diff --git a/qemu/tests/blockdev_inc_backup_add_bitmap_to_raw.py b/qemu/tests/blockdev_inc_backup_add_bitmap_to_raw.py index 824448d7bd..c1118731ef 100644 --- a/qemu/tests/blockdev_inc_backup_add_bitmap_to_raw.py +++ b/qemu/tests/blockdev_inc_backup_add_bitmap_to_raw.py @@ -11,17 +11,18 @@ def prepare_test(self): def add_bitmap_to_raw_image(self): try: - kargs = {'node': self._source_nodes[0], - 'name': self._bitmaps[0], - 'persistent': self._full_backup_options['persistent']} + kargs = { + "node": self._source_nodes[0], + "name": self._bitmaps[0], + "persistent": self._full_backup_options["persistent"], + } self.main_vm.monitor.block_dirty_bitmap_add(**kargs) except QMPCmdError as e: - error_msg = self.params['error_msg'].format( - node=self._source_nodes[0]) + error_msg = self.params["error_msg"].format(node=self._source_nodes[0]) if error_msg not in str(e): - self.test.fail('Unexpected error: %s' % str(e)) + self.test.fail(f"Unexpected error: {str(e)}") else: - self.test.fail('Adding bitmap succeeded unexpectedly') + self.test.fail("Adding bitmap succeeded unexpectedly") def do_test(self): self.add_bitmap_to_raw_image() diff --git a/qemu/tests/blockdev_inc_backup_add_disabled_bitmap.py b/qemu/tests/blockdev_inc_backup_add_disabled_bitmap.py index 0d7921ad19..42da62717d 100644 --- a/qemu/tests/blockdev_inc_backup_add_disabled_bitmap.py +++ b/qemu/tests/blockdev_inc_backup_add_disabled_bitmap.py @@ -7,12 +7,22 @@ class BlockdevIncbkAddDisabledBitmapTest(BlockdevLiveBackupBaseTest): def check_disabled_bitmaps(self): """count of bitmaps should be 0""" - bitmaps = list(map( - lambda n, b: get_bitmap_by_name(self.main_vm, n, b), - self._source_nodes, self._bitmaps)) - if not all(list(map(lambda b: b and (b['recording'] is False) - and b['count'] == 0, bitmaps))): - self.test.fail('disabled bitmaps changed.') + bitmaps = list( + map( + lambda n, b: get_bitmap_by_name(self.main_vm, n, b), + self._source_nodes, + self._bitmaps, + ) + ) + if not all( + list( + map( + lambda b: b and (b["recording"] is False) and b["count"] == 0, + bitmaps, + ) + ) + ): + self.test.fail("disabled bitmaps changed.") def do_test(self): self.do_full_backup() diff --git a/qemu/tests/blockdev_inc_backup_add_persistent_bitmap_when_paused.py b/qemu/tests/blockdev_inc_backup_add_persistent_bitmap_when_paused.py index 5c60717364..5f9acd0e82 100644 --- a/qemu/tests/blockdev_inc_backup_add_persistent_bitmap_when_paused.py +++ b/qemu/tests/blockdev_inc_backup_add_persistent_bitmap_when_paused.py @@ -8,36 +8,38 @@ class BlockdevIncbkAddPersistentBitmapVMPaused(BlockdevLiveBackupBaseTest): """Add disabled bitmaps test""" def __init__(self, test, params, env): - super(BlockdevIncbkAddPersistentBitmapVMPaused, self).__init__( - test, params, env) + super().__init__(test, params, env) self._data_image_obj = self.source_disk_define_by_params( - self.params, self._source_images[0]) + self.params, self._source_images[0] + ) def prepare_test(self): self.prepare_main_vm() def add_persistent_bitmap(self): - kargs = {'bitmap_name': self._bitmaps[0], - 'target_device': self._source_nodes[0], - 'persistent': 'on'} + kargs = { + "bitmap_name": self._bitmaps[0], + "target_device": self._source_nodes[0], + "persistent": "on", + } block_dirty_bitmap_add(self.main_vm, kargs) def _get_image_bitmap_info(self): try: - out = json.loads(self._data_image_obj.info(True, 'json')) - return out['format-specific']['data']['bitmaps'][0] + out = json.loads(self._data_image_obj.info(True, "json")) + return out["format-specific"]["data"]["bitmaps"][0] except Exception as e: - self.test.fail('Failed to get bitmap info: %s' % str(e)) + self.test.fail(f"Failed to get bitmap info: {str(e)}") def check_image_bitmap_existed(self): bitmap = self._get_image_bitmap_info() - if bitmap['name'] != self._bitmaps[0]: - self.test.fail('Persistent bitmap should exist in image') + if bitmap["name"] != self._bitmaps[0]: + self.test.fail("Persistent bitmap should exist in image") def check_image_bitmap_in_use(self): bitmap = self._get_image_bitmap_info() - if 'in-use' not in bitmap['flags']: - self.test.fail('Failed to check bitmap in-use flag') + if "in-use" not in bitmap["flags"]: + self.test.fail("Failed to check bitmap in-use flag") def do_test(self): self.main_vm.pause() diff --git a/qemu/tests/blockdev_inc_backup_after_commit.py b/qemu/tests/blockdev_inc_backup_after_commit.py index dd273c3313..d437ead3d5 100644 --- a/qemu/tests/blockdev_inc_backup_after_commit.py +++ b/qemu/tests/blockdev_inc_backup_after_commit.py @@ -1,14 +1,11 @@ from virttest.qemu_devices.qdevices import QBlockdevFormatNode -from provider import backup_utils -from provider import blockdev_base -from provider import block_dirty_bitmap +from provider import backup_utils, block_dirty_bitmap, blockdev_base class BlockdevIncbkAfterCommitTest(blockdev_base.BlockdevBaseTest): - def __init__(self, test, params, env): - super(BlockdevIncbkAfterCommitTest, self).__init__(test, params, env) + super().__init__(test, params, env) self._source_nodes = [] self._full_bk_nodes = [] self._inc_bk_nodes = [] @@ -22,22 +19,21 @@ def __init__(self, test, params, env): def _init_arguments_by_params(self, tag): image_params = self.params.object_params(tag) image_chain = image_params.objects("image_backup_chain") - self._source_nodes.append("drive_%s" % tag) - self._full_bk_nodes.append("drive_%s" % image_chain[0]) - self._inc_bk_nodes.append("drive_%s" % image_chain[1]) + self._source_nodes.append(f"drive_{tag}") + self._full_bk_nodes.append(f"drive_{image_chain[0]}") + self._inc_bk_nodes.append(f"drive_{image_chain[1]}") self._inc_bk_images.append(image_chain[1]) self._snap_images.append(image_params["snap_image"]) - self._snap_nodes.append("drive_%s" % self._snap_images[-1]) - self._bitmaps.append("bitmap_%s" % tag) + self._snap_nodes.append(f"drive_{self._snap_images[-1]}") + self._bitmaps.append(f"bitmap_{tag}") # Add the full backup image only before full backup - self.params["image_backup_chain_%s" % tag] = image_chain[0] + self.params[f"image_backup_chain_{tag}"] = image_chain[0] def add_images_for_incremental_backup(self): """add incremental backup images with qmp command""" for idx, tag in enumerate(self._source_images): - self.params["image_backup_chain_%s" % - tag] = self._inc_bk_images[idx] + self.params[f"image_backup_chain_{tag}"] = self._inc_bk_images[idx] self.add_target_data_disks() def add_images_for_data_image_snapshots(self): @@ -50,20 +46,16 @@ def add_images_for_data_image_snapshots(self): # hotplug image with blockdev-add(format and protocol only) params = self.params.object_params(tag) - devices = self.main_vm.devices.images_define_by_params(tag, - params, - 'disk') + devices = self.main_vm.devices.images_define_by_params(tag, params, "disk") devices.pop() for dev in devices: if self.main_vm.devices.get_by_qid(dev.get_qid()): continue if isinstance(dev, QBlockdevFormatNode): dev.params["backing"] = None - ret = self.main_vm.devices.simple_hotplug(dev, - self.main_vm.monitor) + ret = self.main_vm.devices.simple_hotplug(dev, self.main_vm.monitor) if not ret[1]: - self.test.fail("Failed to hotplug '%s': %s." - % (dev, ret[0])) + self.test.fail(f"Failed to hotplug '{dev}': {ret[0]}.") def do_full_backup(self): """full backup: data->base""" @@ -73,7 +65,8 @@ def do_full_backup(self): self._source_nodes, self._full_bk_nodes, self._bitmaps, - **extra_options) + **extra_options, + ) def generate_new_files(self): return list(map(self.generate_data_file, self._source_images)) @@ -86,7 +79,8 @@ def do_incremental_backup(self): self._source_nodes, self._inc_bk_nodes, self._bitmaps, - **extra_options) + **extra_options, + ) def clone_vm_with_incremental_images(self): """clone VM with incremental backup images as vm's data images""" @@ -101,32 +95,32 @@ def clone_vm_with_incremental_images(self): self.clone_vm.create() self.clone_vm.verify_alive() - self.env.register_vm("%s_clone" % self.clone_vm.name, self.clone_vm) + self.env.register_vm(f"{self.clone_vm.name}_clone", self.clone_vm) def take_snapshots_on_data_images(self): """take snapshots on data images""" snapshot_options = {} for idx, source_node in enumerate(self._source_nodes): - backup_utils.blockdev_snapshot(self.main_vm, source_node, - self._snap_nodes[idx], - **snapshot_options) + backup_utils.blockdev_snapshot( + self.main_vm, source_node, self._snap_nodes[idx], **snapshot_options + ) def commit_snapshots_on_data_images(self): """commit snapshots onto data images""" commit_options = {} for idx, snap_node in enumerate(self._snap_nodes): - backup_utils.block_commit(self.main_vm, snap_node, - **commit_options) + backup_utils.block_commit(self.main_vm, snap_node, **commit_options) def check_bitmaps(self): for idx, bitmap in enumerate(self._bitmaps): info = block_dirty_bitmap.get_bitmap_by_name( - self.main_vm, self._source_nodes[idx], bitmap) + self.main_vm, self._source_nodes[idx], bitmap + ) if info: if info["count"] <= 0: self.test.fail("count in bitmap must be greater than 0") else: - self.test.fail("Failed to find bitmap %s" % bitmap) + self.test.fail(f"Failed to find bitmap {bitmap}") def do_test(self): self.do_full_backup() diff --git a/qemu/tests/blockdev_inc_backup_bitmap_inuse.py b/qemu/tests/blockdev_inc_backup_bitmap_inuse.py index 8b3c466912..a53d248de4 100644 --- a/qemu/tests/blockdev_inc_backup_bitmap_inuse.py +++ b/qemu/tests/blockdev_inc_backup_bitmap_inuse.py @@ -11,47 +11,51 @@ class BlockdevIncbkBitmapInuseTest(BlockdevLiveBackupBaseTest): def __init__(self, test, params, env): self._inc1_bk_nodes = [] self._inc2_bk_nodes = [] - super(BlockdevIncbkBitmapInuseTest, self).__init__(test, params, env) + super().__init__(test, params, env) self._forbidden_actions = [ partial(self.handle_bitmap, op=op) - for op in self.params.objects('bitmap_forbidden_actions') + for op in self.params.objects("bitmap_forbidden_actions") ] self._forbidden_actions.append( partial(self.do_inc_backup, self._inc2_bk_nodes[0]) ) def _init_arguments_by_params(self, tag): - super(BlockdevIncbkBitmapInuseTest, - self)._init_arguments_by_params(tag) + super()._init_arguments_by_params(tag) image_params = self.params.object_params(tag) image_chain = image_params.objects("image_backup_chain") - self._inc1_bk_nodes.append("drive_%s" % image_chain[1]) - self._inc2_bk_nodes.append("drive_%s" % image_chain[2]) + self._inc1_bk_nodes.append(f"drive_{image_chain[1]}") + self._inc2_bk_nodes.append(f"drive_{image_chain[2]}") def handle_bitmap(self, op): self.main_vm.monitor.cmd( - op, {'node': self._source_nodes[0], 'name': self._bitmaps[0]} + op, {"node": self._source_nodes[0], "name": self._bitmaps[0]} ) def do_inc_backup(self, target, speed=0): - self.main_vm.monitor.cmd('blockdev-backup', - {'device': self._source_nodes[0], - 'sync': 'incremental', - 'target': target, 'speed': speed, - 'bitmap': self._bitmaps[0], - 'job-id': 'job_%s' % target}) + self.main_vm.monitor.cmd( + "blockdev-backup", + { + "device": self._source_nodes[0], + "sync": "incremental", + "target": target, + "speed": speed, + "bitmap": self._bitmaps[0], + "job-id": f"job_{target}", + }, + ) def do_forbidden_actions(self): """All these actions should fail with proper error message""" - error_msg = self.params['error_msg'] % self._bitmaps[0] + error_msg = self.params["error_msg"] % self._bitmaps[0] for action in self._forbidden_actions: try: action() except QMPCmdError as e: if error_msg not in str(e): - self.test.fail('Unexpected error: %s' % str(e)) + self.test.fail(f"Unexpected error: {str(e)}") else: - self.test.fail('Unexpectedly succeeded') + self.test.fail("Unexpectedly succeeded") def do_test(self): self.do_full_backup() diff --git a/qemu/tests/blockdev_inc_backup_bitmap_max_len_name.py b/qemu/tests/blockdev_inc_backup_bitmap_max_len_name.py index 8bcd6b70eb..93be03c0d4 100644 --- a/qemu/tests/blockdev_inc_backup_bitmap_max_len_name.py +++ b/qemu/tests/blockdev_inc_backup_bitmap_max_len_name.py @@ -3,8 +3,7 @@ from avocado.utils import process -from provider.block_dirty_bitmap import block_dirty_bitmap_add -from provider.block_dirty_bitmap import get_bitmap_by_name +from provider.block_dirty_bitmap import block_dirty_bitmap_add, get_bitmap_by_name from provider.blockdev_live_backup_base import BlockdevLiveBackupBaseTest @@ -12,56 +11,65 @@ class BlockdevIncbkAddBitmapMaxLenName(BlockdevLiveBackupBaseTest): """Add a bitmap with the max len name(1023 chars)""" def __init__(self, test, params, env): - super(BlockdevIncbkAddBitmapMaxLenName, self).__init__(test, - params, - env) + super().__init__(test, params, env) self._max_len_name = self._make_bitmap_name() def _make_bitmap_name(self): - length = self.params.get_numeric( - 'bitmap_name_max_len') - len(self.params['prefix_name']) - return self.params['prefix_name'] + process.run( - self.params['create_bitmap_name_cmd'].format(length=length), - ignore_status=True, shell=True - ).stdout.decode().strip() + length = self.params.get_numeric("bitmap_name_max_len") - len( + self.params["prefix_name"] + ) + return ( + self.params["prefix_name"] + + process.run( + self.params["create_bitmap_name_cmd"].format(length=length), + ignore_status=True, + shell=True, + ) + .stdout.decode() + .strip() + ) def prepare_test(self): self.prepare_main_vm() def add_persistent_bitmap(self): - kargs = {'bitmap_name': self._max_len_name, - 'target_device': self._source_nodes[0], - 'persistent': 'on'} + kargs = { + "bitmap_name": self._max_len_name, + "target_device": self._source_nodes[0], + "persistent": "on", + } block_dirty_bitmap_add(self.main_vm, kargs) def check_image_bitmap_qemu_img(self): data_image_obj = self.source_disk_define_by_params( - self.params, self._source_images[0]) + self.params, self._source_images[0] + ) try: - out = json.loads(data_image_obj.info(True, 'json')) - bitmap = out['format-specific']['data']['bitmaps'][0] + out = json.loads(data_image_obj.info(True, "json")) + bitmap = out["format-specific"]["data"]["bitmaps"][0] except Exception as e: - self.test.fail('Failed to get bitmap: %s' % str(e)) + self.test.fail(f"Failed to get bitmap: {str(e)}") else: - if bitmap['name'] != self._max_len_name: - self.test.fail('Failed to get bitmap with qemu-img') + if bitmap["name"] != self._max_len_name: + self.test.fail("Failed to get bitmap with qemu-img") def check_image_bitmap_with_qmp_cmd(self): - bitmap = get_bitmap_by_name(self.main_vm, self._source_nodes[0], - self._max_len_name) + bitmap = get_bitmap_by_name( + self.main_vm, self._source_nodes[0], self._max_len_name + ) if bitmap is None: - self.test.fail('Failed to get bitmap with query-block') + self.test.fail("Failed to get bitmap with query-block") def check_qemu_aborted(self): """We used to hit core once, so add this check for future detection""" - with open(self.test.logfile, 'r') as f: + with open(self.test.logfile, "r") as f: out = f.read().strip() if re.search(self.error_msg, out, re.M): - self.test.fail('qemu aborted (core dumped)') + self.test.fail("qemu aborted (core dumped)") def post_test(self): - self.error_msg = '(core dumped)|%s Aborted' % self.main_vm.get_pid() - super(BlockdevIncbkAddBitmapMaxLenName, self).post_test() + self.error_msg = f"(core dumped)|{self.main_vm.get_pid()} Aborted" + super().post_test() self.check_qemu_aborted() def do_test(self): diff --git a/qemu/tests/blockdev_inc_backup_bitmap_mode_test.py b/qemu/tests/blockdev_inc_backup_bitmap_mode_test.py index d3b22ca711..604b7ec6af 100644 --- a/qemu/tests/blockdev_inc_backup_bitmap_mode_test.py +++ b/qemu/tests/blockdev_inc_backup_bitmap_mode_test.py @@ -1,17 +1,9 @@ -from provider import backup_utils -from provider import blockdev_base -from provider import block_dirty_bitmap +from provider import backup_utils, block_dirty_bitmap, blockdev_base class BlockdevIncreamentalBackupBitmapTest(blockdev_base.BlockdevBaseTest): - def __init__(self, test, params, env): - super( - BlockdevIncreamentalBackupBitmapTest, - self).__init__( - test, - params, - env) + super().__init__(test, params, env) self.source_images = [] self.full_backups = [] self.inc_backups = [] @@ -24,10 +16,10 @@ def __init__(self, test, params, env): def _init_arguments_by_params(self, tag): image_params = self.params.object_params(tag) image_chain = image_params.objects("image_backup_chain") - self.source_images.append("drive_%s" % tag) - self.full_backups.append("drive_%s" % image_chain[0]) - self.inc_backups.append("drive_%s" % image_chain[1]) - self.bitmaps.append("bitmap_%s" % tag) + self.source_images.append(f"drive_{tag}") + self.full_backups.append(f"drive_{image_chain[0]}") + self.inc_backups.append(f"drive_{image_chain[1]}") + self.bitmaps.append(f"bitmap_{tag}") def do_full_backup(self): extra_options = {"sync": "full", "auto_disable_bitmap": False} @@ -36,14 +28,14 @@ def do_full_backup(self): self.source_images, self.full_backups, self.bitmaps, - **extra_options) + **extra_options, + ) def generate_inc_files(self): return list(map(self.generate_data_file, self.src_img_tags)) def do_incremental_backup(self): - extra_options = {'sync': self.sync_mode, - 'auto_disable_bitmap': False} + extra_options = {"sync": self.sync_mode, "auto_disable_bitmap": False} if self.sync_mode != "top": extra_options["bitmap-mode"] = self.bitmap_mode backup_utils.blockdev_batch_backup( @@ -51,23 +43,21 @@ def do_incremental_backup(self): self.source_images, self.inc_backups, self.bitmaps, - **extra_options) + **extra_options, + ) def create_snapshot(self, source): snapshot_options = {} - source_node = "drive_%s" % source + source_node = f"drive_{source}" source_params = self.params.object_params(source) snapshot_tag = source_params["snapshot"] - snapshot_node = "drive_%s" % snapshot_tag - snapshot_img = self.target_disk_define_by_params( - self.params, snapshot_tag) + snapshot_node = f"drive_{snapshot_tag}" + snapshot_img = self.target_disk_define_by_params(self.params, snapshot_tag) snapshot_img.hotplug(self.main_vm) self.trash.append(snapshot_img) backup_utils.blockdev_snapshot( - self.main_vm, - source_node, - snapshot_node, - **snapshot_options) + self.main_vm, source_node, snapshot_node, **snapshot_options + ) def create_snapshots(self): return list(map(self.create_snapshot, self.src_img_tags)) @@ -76,8 +66,7 @@ def get_bitmaps_info(self): out = [] for idx, bitmap in enumerate(self.bitmaps): node = self.source_images[idx] - info = block_dirty_bitmap.get_bitmap_by_name( - self.main_vm, node, bitmap) + info = block_dirty_bitmap.get_bitmap_by_name(self.main_vm, node, bitmap) out.append(info) return out @@ -100,8 +89,12 @@ def check_bitmaps(self): else: keyword = "is not" condiction = info["count"] == 0 - assert condiction, "bitmap '%s' %s clear in '%s' mode: \n%s" % ( - info["name"], keyword, self.bitmap_mode, info) + assert condiction, "bitmap '{}' {} clear in '{}' mode: \n{}".format( + info["name"], + keyword, + self.bitmap_mode, + info, + ) def compare_images(self): self.main_vm.destroy() diff --git a/qemu/tests/blockdev_inc_backup_bitmap_not_exist.py b/qemu/tests/blockdev_inc_backup_bitmap_not_exist.py index e7fe2c5bdc..e2c9aa1a5d 100644 --- a/qemu/tests/blockdev_inc_backup_bitmap_not_exist.py +++ b/qemu/tests/blockdev_inc_backup_bitmap_not_exist.py @@ -13,17 +13,19 @@ def prepare_test(self): def do_incremental_backup(self): try: self.main_vm.monitor.cmd( - 'blockdev-backup', - {'device': self._source_nodes[0], - 'target': self._full_bk_nodes[0], - 'bitmap': self.params['non_existed_bitmap'], - 'sync': 'incremental'} + "blockdev-backup", + { + "device": self._source_nodes[0], + "target": self._full_bk_nodes[0], + "bitmap": self.params["non_existed_bitmap"], + "sync": "incremental", + }, ) except QMPCmdError as e: - if self.params['error_msg'] not in str(e): - self.test.fail('Unexpected error: %s' % str(e)) + if self.params["error_msg"] not in str(e): + self.test.fail(f"Unexpected error: {str(e)}") else: - self.test.fail('blockdev-backup succeeded unexpectedly') + self.test.fail("blockdev-backup succeeded unexpectedly") def do_test(self): self.do_incremental_backup() diff --git a/qemu/tests/blockdev_inc_backup_bitmap_size.py b/qemu/tests/blockdev_inc_backup_bitmap_size.py index 3166494bfa..7f2df62959 100644 --- a/qemu/tests/blockdev_inc_backup_bitmap_size.py +++ b/qemu/tests/blockdev_inc_backup_bitmap_size.py @@ -1,7 +1,6 @@ import json from avocado.utils import process - from virttest.utils_numeric import normalize_data_size from provider.block_dirty_bitmap import block_dirty_bitmap_add @@ -12,37 +11,36 @@ class BlockdevIncbkBitmapSizeTest(BlockdevLiveBackupBaseTest): """Estimate bitmaps size""" def __init__(self, test, params, env): - super(BlockdevIncbkBitmapSizeTest, self).__init__(test, params, env) - self._granularities = self.params.objects('granularity_list') + super().__init__(test, params, env) + self._granularities = self.params.objects("granularity_list") def add_bitmaps(self): - args = {'target_device': self._source_nodes[0], 'persistent': 'on'} + args = {"target_device": self._source_nodes[0], "persistent": "on"} if self._granularities: for granularity in self._granularities: g = int(normalize_data_size(granularity, "B")) - args.update({'bitmap_name': 'bitmap_%s' % g, - 'bitmap_granularity': g}) + args.update({"bitmap_name": f"bitmap_{g}", "bitmap_granularity": g}) block_dirty_bitmap_add(self.main_vm, args) else: - max_len = self.params.get_numeric('max_bitmap_name_len') - for i in range(self.params.get_numeric('bitmap_count')): + max_len = self.params.get_numeric("max_bitmap_name_len") + for i in range(self.params.get_numeric("bitmap_count")): l = max_len - len(str(i)) - args['bitmap_name'] = process.run( - self.params['create_bitmap_name_cmd'].format(length=l), - ignore_status=True, shell=True + args["bitmap_name"] = process.run( + self.params["create_bitmap_name_cmd"].format(length=l), + ignore_status=True, + shell=True, ).stdout.decode().strip() + str(i) block_dirty_bitmap_add(self.main_vm, args) def measure_bitmaps_size(self): - img = self.source_disk_define_by_params(self.params, - self._source_images[0]) - o = img.measure(self.params['target_fmt'], output='json').stdout_text + img = self.source_disk_define_by_params(self.params, self._source_images[0]) + o = img.measure(self.params["target_fmt"], output="json").stdout_text if o: info = json.loads(o) - if info.get(self.params['check_keyword'], 0) <= 0: - self.test.fail('Failed to get bitmap size') + if info.get(self.params["check_keyword"], 0) <= 0: + self.test.fail("Failed to get bitmap size") else: - self.test.error('Failed to measure a qcow2 image') + self.test.error("Failed to measure a qcow2 image") def prepare_test(self): self.prepare_main_vm() diff --git a/qemu/tests/blockdev_inc_backup_bitmap_vm_crash_reboot.py b/qemu/tests/blockdev_inc_backup_bitmap_vm_crash_reboot.py index 9757463582..8dc61a8c2a 100644 --- a/qemu/tests/blockdev_inc_backup_bitmap_vm_crash_reboot.py +++ b/qemu/tests/blockdev_inc_backup_bitmap_vm_crash_reboot.py @@ -14,34 +14,46 @@ def check_bitmap_existed(self): No need compare bitmap count with the original, for an active bitmap's count can be changed after reboot """ - bitmaps = list(map( - lambda n, b: get_bitmap_by_name(self.main_vm, n, b), - self._source_nodes, self._bitmaps)) - if not all(list(map(lambda b: b and (b['recording'] is True) - and b['count'] >= 0, bitmaps))): - self.test.fail('bitmap should still exist after vm crash.') + bitmaps = list( + map( + lambda n, b: get_bitmap_by_name(self.main_vm, n, b), + self._source_nodes, + self._bitmaps, + ) + ) + if not all( + list( + map( + lambda b: b and (b["recording"] is True) and b["count"] >= 0, + bitmaps, + ) + ) + ): + self.test.fail("bitmap should still exist after vm crash.") def trigger_vm_crash(self): session = self.main_vm.wait_for_login( - timeout=self.params.get_numeric('login_timeout', 300)) + timeout=self.params.get_numeric("login_timeout", 300) + ) try: - session.cmd(self.params['trigger_crash_cmd'], timeout=5) + session.cmd(self.params["trigger_crash_cmd"], timeout=5) except aexpect.ShellTimeoutError: pass else: - self.test.error('Error occurred when triggering vm crash') + self.test.error("Error occurred when triggering vm crash") finally: session.close() def wait_till_vm_reboot(self): session = self.main_vm.wait_for_login( - timeout=self.params.get_numeric('login_timeout', 360)) + timeout=self.params.get_numeric("login_timeout", 360) + ) session.close() def check_vm_reset_event(self): - tmo = self.params.get_numeric('vm_reset_timeout', 60) - if get_event_by_condition(self.main_vm, 'RESET', tmo) is None: - self.test.fail('Failed to reset VM after triggering crash') + tmo = self.params.get_numeric("vm_reset_timeout", 60) + if get_event_by_condition(self.main_vm, "RESET", tmo) is None: + self.test.fail("Failed to reset VM after triggering crash") def do_test(self): self.do_full_backup() diff --git a/qemu/tests/blockdev_inc_backup_bitmap_with_granularity.py b/qemu/tests/blockdev_inc_backup_bitmap_with_granularity.py index 8866c67cfb..a7fc593ce9 100644 --- a/qemu/tests/blockdev_inc_backup_bitmap_with_granularity.py +++ b/qemu/tests/blockdev_inc_backup_bitmap_with_granularity.py @@ -10,29 +10,34 @@ class BlockdevIncbkBitmapGranularityTest(BlockdevLiveBackupBaseTest): """bitmap with granularity testing""" def __init__(self, test, params, env): - super(BlockdevIncbkBitmapGranularityTest, self).__init__(test, - params, - env) + super().__init__(test, params, env) self._set_granularity() def _set_granularity(self): - granularities = self.params.objects('granularity_list') - granularity = random.choice( - granularities) if granularities else self.params['granularity'] - self._full_backup_options['granularity'] = int( - normalize_data_size(granularity, "B")) + granularities = self.params.objects("granularity_list") + granularity = ( + random.choice(granularities) + if granularities + else self.params["granularity"] + ) + self._full_backup_options["granularity"] = int( + normalize_data_size(granularity, "B") + ) def _get_bitmaps(self): - return list(map( - lambda n, b: get_bitmap_by_name(self.main_vm, n, b), - self._source_nodes, self._bitmaps)) + return list( + map( + lambda n, b: get_bitmap_by_name(self.main_vm, n, b), + self._source_nodes, + self._bitmaps, + ) + ) def check_bitmaps_granularity(self): bitmaps = self._get_bitmaps() - granularity = self._full_backup_options['granularity'] - if not all(list(map( - lambda b: b.get('granularity') == granularity, bitmaps))): - self.test.fail('Failed to set granularity') + granularity = self._full_backup_options["granularity"] + if not all(list(map(lambda b: b.get("granularity") == granularity, bitmaps))): + self.test.fail("Failed to set granularity") def do_test(self): self.do_full_backup() diff --git a/qemu/tests/blockdev_inc_backup_bitmap_with_hotplug.py b/qemu/tests/blockdev_inc_backup_bitmap_with_hotplug.py index b37813413b..8126be36d9 100644 --- a/qemu/tests/blockdev_inc_backup_bitmap_with_hotplug.py +++ b/qemu/tests/blockdev_inc_backup_bitmap_with_hotplug.py @@ -1,6 +1,6 @@ from virttest.utils_misc import wait_for -from provider.block_dirty_bitmap import get_bitmaps, get_bitmap_by_name +from provider.block_dirty_bitmap import get_bitmap_by_name, get_bitmaps from provider.blockdev_live_backup_base import BlockdevLiveBackupBaseTest @@ -9,25 +9,28 @@ class BlockdevIncbkAddBitmapToHotplugImg(BlockdevLiveBackupBaseTest): def check_bitmap_count_gt_zero(self): """count of bitmaps should be 0""" - bitmaps = list(map( - lambda n, b: get_bitmap_by_name(self.main_vm, n, b), - self._source_nodes, self._bitmaps)) - if not all(list(map(lambda b: b and b['count'] > 0, bitmaps))): - self.test.fail('bitmap count should be greater than 0.') + bitmaps = list( + map( + lambda n, b: get_bitmap_by_name(self.main_vm, n, b), + self._source_nodes, + self._bitmaps, + ) + ) + if not all(list(map(lambda b: b and b["count"] > 0, bitmaps))): + self.test.fail("bitmap count should be greater than 0.") def hotplug_data_disk(self): tag = self._source_images[0] self._devices = self.main_vm.devices.images_define_by_params( - tag, self.params.object_params(tag), 'disk') + tag, self.params.object_params(tag), "disk" + ) for dev in self._devices: - ret = self.main_vm.devices.simple_hotplug( - dev, self.main_vm.monitor) + ret = self.main_vm.devices.simple_hotplug(dev, self.main_vm.monitor) if not ret[1]: - self.test.fail("Failed to hotplug '%s': %s." - % (dev, ret[0])) + self.test.fail(f"Failed to hotplug '{dev}': {ret[0]}.") def prepare_main_vm(self): - super(BlockdevIncbkAddBitmapToHotplugImg, self).prepare_main_vm() + super().prepare_main_vm() self.hotplug_data_disk() def unplug_data_disk(self): @@ -35,9 +38,12 @@ def unplug_data_disk(self): for dev in self._devices[-1:-3:-1]: out = dev.unplug(self.main_vm.monitor) if not wait_for( - lambda: dev.verify_unplug(out, self.main_vm.monitor), - first=1, step=5, timeout=30): - self.test.fail('Failed to unplug device') + lambda: dev.verify_unplug(out, self.main_vm.monitor), + first=1, + step=5, + timeout=30, + ): + self.test.fail("Failed to unplug device") def check_bitmap_gone(self): out = self.main_vm.monitor.cmd("query-block") @@ -48,7 +54,7 @@ def check_bitmap_gone(self): def prepare_test(self): if self.params.get("not_preprocess") == "yes": self.preprocess_data_disks() - super(BlockdevIncbkAddBitmapToHotplugImg, self).prepare_test() + super().prepare_test() def do_test(self): self.do_full_backup() diff --git a/qemu/tests/blockdev_inc_backup_clear_bitmap.py b/qemu/tests/blockdev_inc_backup_clear_bitmap.py index 733e1dab22..7cf92ccce5 100644 --- a/qemu/tests/blockdev_inc_backup_clear_bitmap.py +++ b/qemu/tests/blockdev_inc_backup_clear_bitmap.py @@ -1,6 +1,8 @@ -from provider.block_dirty_bitmap import get_bitmap_by_name -from provider.block_dirty_bitmap import block_dirty_bitmap_clear -from provider.block_dirty_bitmap import block_dirty_bitmap_disable +from provider.block_dirty_bitmap import ( + block_dirty_bitmap_clear, + block_dirty_bitmap_disable, + get_bitmap_by_name, +) from provider.blockdev_live_backup_base import BlockdevLiveBackupBaseTest @@ -14,29 +16,41 @@ def clear_bitmaps(self): of the bitmap should be 0, so no more check is needed after clearing the bitmap. """ - list(map( - lambda n, b: block_dirty_bitmap_clear(self.main_vm, n, b), - self._source_nodes, self._bitmaps)) + list( + map( + lambda n, b: block_dirty_bitmap_clear(self.main_vm, n, b), + self._source_nodes, + self._bitmaps, + ) + ) def disable_bitmaps(self): - list(map( - lambda n, b: block_dirty_bitmap_disable(self.main_vm, n, b), - self._source_nodes, self._bitmaps)) + list( + map( + lambda n, b: block_dirty_bitmap_disable(self.main_vm, n, b), + self._source_nodes, + self._bitmaps, + ) + ) def check_bitmaps_count_gt_zero(self): """active bitmap's count should be greater than 0 after file writing""" - bitmaps_info = list(map( - lambda n, b: get_bitmap_by_name(self.main_vm, n, b), - self._source_nodes, self._bitmaps)) - if not all(list(map(lambda b: b and b['count'] > 0, bitmaps_info))): - self.test.fail('bitmaps count should be greater than 0') + bitmaps_info = list( + map( + lambda n, b: get_bitmap_by_name(self.main_vm, n, b), + self._source_nodes, + self._bitmaps, + ) + ) + if not all(list(map(lambda b: b and b["count"] > 0, bitmaps_info))): + self.test.fail("bitmaps count should be greater than 0") def do_test(self): self.do_full_backup() - self.generate_inc_files('inc1') + self.generate_inc_files("inc1") self.check_bitmaps_count_gt_zero() self.clear_bitmaps() - self.generate_inc_files('inc2') + self.generate_inc_files("inc2") self.check_bitmaps_count_gt_zero() self.disable_bitmaps() self.clear_bitmaps() diff --git a/qemu/tests/blockdev_inc_backup_convert_with_bitmap.py b/qemu/tests/blockdev_inc_backup_convert_with_bitmap.py index 941be60e31..0fde0c668c 100644 --- a/qemu/tests/blockdev_inc_backup_convert_with_bitmap.py +++ b/qemu/tests/blockdev_inc_backup_convert_with_bitmap.py @@ -1,5 +1,4 @@ from avocado.utils import process - from virttest.utils_misc import get_qemu_img_binary from provider.blockdev_live_backup_base import BlockdevLiveBackupBaseTest @@ -9,47 +8,47 @@ class BlockdevIncbkConvertWithBitmapsTest(BlockdevLiveBackupBaseTest): """Convert image with persistent bitmaps""" def __init__(self, test, params, env): - super(BlockdevIncbkConvertWithBitmapsTest, self).__init__( - test, params, env) - self._bitmaps = params.objects('bitmap_list') + super().__init__(test, params, env) + self._bitmaps = params.objects("bitmap_list") self._bitmap_states = [True, False] self._src_image = self.source_disk_define_by_params( - self.params, self._source_images[0]) + self.params, self._source_images[0] + ) self._target_image = self.source_disk_define_by_params( - self.params, self.params['convert_target']) + self.params, self.params["convert_target"] + ) def prepare_test(self): self.prepare_main_vm() def add_persistent_bitmaps(self): - bitmaps = [{'node': self._source_nodes[0], - 'name': b, - 'persistent': self._full_backup_options['persistent'], - 'disabled': s} - for b, s in zip(self._bitmaps, self._bitmap_states)] - job_list = [{'type': 'block-dirty-bitmap-add', 'data': data} - for data in bitmaps] + bitmaps = [ + { + "node": self._source_nodes[0], + "name": b, + "persistent": self._full_backup_options["persistent"], + "disabled": s, + } + for b, s in zip(self._bitmaps, self._bitmap_states) + ] + job_list = [ + {"type": "block-dirty-bitmap-add", "data": data} for data in bitmaps + ] self.main_vm.monitor.transaction(job_list) def convert_data_image_with_bitmaps(self): # TODO: bitmap option is not supported by qemu_storage.convert, # so run qemu-img command explictly to convert an qcow2 image to # the target local qcow2 image - cmd = '{qemu_img} convert -f {fmt} -O {ofmt} --bitmaps {s} {t}'.format( - qemu_img=get_qemu_img_binary(self.params), - fmt=self._src_image.image_format, - ofmt=self._target_image.image_format, - s=self._src_image.image_filename, - t=self._target_image.image_filename - ) + cmd = f"{get_qemu_img_binary(self.params)} convert -f {self._src_image.image_format} -O {self._target_image.image_format} --bitmaps {self._src_image.image_filename} {self._target_image.image_filename}" process.system(cmd, ignore_status=False, shell=True) self.trash.append(self._target_image) def check_image_bitmaps_existed(self): - check_list = ['name: %s' % b for b in self._bitmaps] + check_list = [f"name: {b}" for b in self._bitmaps] info = self._target_image.info() if not all([b in info for b in check_list]): - self.test.fail('Persistent bitmaps should exist in image') + self.test.fail("Persistent bitmaps should exist in image") def do_test(self): self.add_persistent_bitmaps() diff --git a/qemu/tests/blockdev_inc_backup_disable_bitmap.py b/qemu/tests/blockdev_inc_backup_disable_bitmap.py index f5eef6e855..74c48f7c26 100644 --- a/qemu/tests/blockdev_inc_backup_disable_bitmap.py +++ b/qemu/tests/blockdev_inc_backup_disable_bitmap.py @@ -1,5 +1,4 @@ -from provider.block_dirty_bitmap import get_bitmap_by_name -from provider.block_dirty_bitmap import block_dirty_bitmap_disable +from provider.block_dirty_bitmap import block_dirty_bitmap_disable, get_bitmap_by_name from provider.blockdev_live_backup_base import BlockdevLiveBackupBaseTest @@ -7,32 +6,49 @@ class BlockdevIncbkDisableBitmapTest(BlockdevLiveBackupBaseTest): """Disable bitmap test""" def _get_bitmaps(self): - return list(map( - lambda n, b: get_bitmap_by_name(self.main_vm, n, b), - self._source_nodes, self._bitmaps)) + return list( + map( + lambda n, b: get_bitmap_by_name(self.main_vm, n, b), + self._source_nodes, + self._bitmaps, + ) + ) def disable_bitmaps(self): - list(map( - lambda n, b: block_dirty_bitmap_disable(self.main_vm, n, b), - self._source_nodes, self._bitmaps)) + list( + map( + lambda n, b: block_dirty_bitmap_disable(self.main_vm, n, b), + self._source_nodes, + self._bitmaps, + ) + ) self._disabled_bitmaps_info = self._get_bitmaps() def check_disabled_bitmaps(self): """bitmaps should be disabled, count should not change""" bitmaps_info = self._get_bitmaps() - if not all(list(map( - lambda b1, b2: (b1 and b2 - and b1['count'] == b2['count'] # same count - and b2['count'] > 0 # count > 0 - and (b2['recording'] is False)), # disabled - self._disabled_bitmaps_info, bitmaps_info))): - self.test.fail('bitmaps count or status changed') + if not all( + list( + map( + lambda b1, b2: ( + b1 + and b2 + and b1["count"] == b2["count"] # same count + and b2["count"] > 0 # count > 0 + and (b2["recording"] is False) + ), # disabled + self._disabled_bitmaps_info, + bitmaps_info, + ) + ) + ): + self.test.fail("bitmaps count or status changed") def do_test(self): self.do_full_backup() - self.generate_inc_files('inc1') + self.generate_inc_files("inc1") self.disable_bitmaps() - self.generate_inc_files('inc2') + self.generate_inc_files("inc2") self.check_disabled_bitmaps() diff --git a/qemu/tests/blockdev_inc_backup_disabled_persistent_bitmap.py b/qemu/tests/blockdev_inc_backup_disabled_persistent_bitmap.py index 3393b95e59..d144c399fa 100644 --- a/qemu/tests/blockdev_inc_backup_disabled_persistent_bitmap.py +++ b/qemu/tests/blockdev_inc_backup_disabled_persistent_bitmap.py @@ -1,5 +1,4 @@ -from provider.block_dirty_bitmap import get_bitmap_by_name -from provider.block_dirty_bitmap import block_dirty_bitmap_disable +from provider.block_dirty_bitmap import block_dirty_bitmap_disable, get_bitmap_by_name from provider.blockdev_live_backup_base import BlockdevLiveBackupBaseTest @@ -9,42 +8,60 @@ class BlockdevIncbkDisPersistentBitmapTest(BlockdevLiveBackupBaseTest): def check_disabled_bitmaps_after_vm_reboot(self): """bitmaps still disabled, and counts should keep the same as before""" bitmaps_info = self._get_bitmaps() - if not all(list(map( - lambda b1, b2: (b1 and b2 - and b2['count'] > 0 - and b1['count'] == b2['count'] - and (b2['recording'] is False)), - self._disabled_bitmaps_info, bitmaps_info))): + if not all( + list( + map( + lambda b1, b2: ( + b1 + and b2 + and b2["count"] > 0 + and b1["count"] == b2["count"] + and (b2["recording"] is False) + ), + self._disabled_bitmaps_info, + bitmaps_info, + ) + ) + ): self.test.fail("bitmaps' count or status changed") def disable_bitmaps(self): - list(map( - lambda n, b: block_dirty_bitmap_disable(self.main_vm, n, b), - self._source_nodes, self._bitmaps)) + list( + map( + lambda n, b: block_dirty_bitmap_disable(self.main_vm, n, b), + self._source_nodes, + self._bitmaps, + ) + ) self._disabled_bitmaps_info = self._get_bitmaps() def _get_bitmaps(self): - return list(map( - lambda n, b: get_bitmap_by_name(self.main_vm, n, b), - self._source_nodes, self._bitmaps)) + return list( + map( + lambda n, b: get_bitmap_by_name(self.main_vm, n, b), + self._source_nodes, + self._bitmaps, + ) + ) def check_image_bitmaps_existed(self): """Persistent bitmaps should be saved""" + def _check(tag): out = self.source_disk_define_by_params(self.params, tag).info() if out: - if self.params['check_bitmaps'] not in out: - self.test.fail( - 'Persistent bitmaps should be saved in image') + if self.params["check_bitmaps"] not in out: + self.test.fail("Persistent bitmaps should be saved in image") else: - self.test.error('Error when querying image info with qemu-img') + self.test.error("Error when querying image info with qemu-img") list(map(_check, self._source_images)) def powerdown_vm(self): self.main_vm.monitor.system_powerdown() if not self.main_vm.wait_for_shutdown( - self.params.get_numeric("shutdown_timeout", 360)): + self.params.get_numeric("shutdown_timeout", 360) + ): self.test.fail("Failed to poweroff vm") def restart_vm(self): diff --git a/qemu/tests/blockdev_inc_backup_enable_bitmap.py b/qemu/tests/blockdev_inc_backup_enable_bitmap.py index 48c76c8520..dcf38bd162 100644 --- a/qemu/tests/blockdev_inc_backup_enable_bitmap.py +++ b/qemu/tests/blockdev_inc_backup_enable_bitmap.py @@ -1,5 +1,4 @@ -from provider.block_dirty_bitmap import get_bitmap_by_name -from provider.block_dirty_bitmap import block_dirty_bitmap_enable +from provider.block_dirty_bitmap import block_dirty_bitmap_enable, get_bitmap_by_name from provider.blockdev_live_backup_base import BlockdevLiveBackupBaseTest @@ -7,33 +6,39 @@ class BlockdevIncbkEnableBitmapTest(BlockdevLiveBackupBaseTest): """Enable disabled bitmaps""" def _get_bitmaps(self): - return list(map( - lambda n, b: get_bitmap_by_name(self.main_vm, n, b), - self._source_nodes, self._bitmaps)) + return list( + map( + lambda n, b: get_bitmap_by_name(self.main_vm, n, b), + self._source_nodes, + self._bitmaps, + ) + ) def enable_bitmaps(self): - list(map( - lambda n, b: block_dirty_bitmap_enable(self.main_vm, n, b), - self._source_nodes, self._bitmaps)) + list( + map( + lambda n, b: block_dirty_bitmap_enable(self.main_vm, n, b), + self._source_nodes, + self._bitmaps, + ) + ) def check_disabled_bitmaps_count(self): """count should always be 0""" - if not all(list(map(lambda b: b and b['count'] == 0, - self._get_bitmaps()))): - self.test.fail('disabled bitmap count should always be 0') + if not all(list(map(lambda b: b and b["count"] == 0, self._get_bitmaps()))): + self.test.fail("disabled bitmap count should always be 0") def check_enabled_bitmaps_count(self): """count should be greater than 0""" - if not all(list(map(lambda b: b and b['count'] > 0, - self._get_bitmaps()))): - self.test.fail('active bitmap count should be greater than 0') + if not all(list(map(lambda b: b and b["count"] > 0, self._get_bitmaps()))): + self.test.fail("active bitmap count should be greater than 0") def do_test(self): self.do_full_backup() - self.generate_inc_files('inc1') + self.generate_inc_files("inc1") self.check_disabled_bitmaps_count() self.enable_bitmaps() - self.generate_inc_files('inc2') + self.generate_inc_files("inc2") self.check_enabled_bitmaps_count() diff --git a/qemu/tests/blockdev_inc_backup_expose_active_bitmap.py b/qemu/tests/blockdev_inc_backup_expose_active_bitmap.py index bb4b29389b..c90e477229 100644 --- a/qemu/tests/blockdev_inc_backup_expose_active_bitmap.py +++ b/qemu/tests/blockdev_inc_backup_expose_active_bitmap.py @@ -13,41 +13,44 @@ class BlockdevIncbkExposeActiveBitmap(BlockdevLiveBackupBaseTest): """Expose an active bitmap""" def __init__(self, test, params, env): - super(BlockdevIncbkExposeActiveBitmap, self).__init__(test, params, - env) + super().__init__(test, params, env) - if self.params.get_boolean('enable_nbd'): - self.params['nbd_server_data1'] = self.params.get('nbd_server') - self.params['nbd_client_tls_creds_data1'] = self.params.get( - 'nbd_client_tls_creds') + if self.params.get_boolean("enable_nbd"): + self.params["nbd_server_data1"] = self.params.get("nbd_server") + self.params["nbd_client_tls_creds_data1"] = self.params.get( + "nbd_client_tls_creds" + ) self._nbd_export = None localhost = socket.gethostname() - self.params['nbd_server_full'] = localhost if localhost else 'localhost' - self.params['nbd_export_bitmaps_full'] = self._bitmaps[0] + self.params["nbd_server_full"] = localhost if localhost else "localhost" + self.params["nbd_export_bitmaps_full"] = self._bitmaps[0] self._fleecing_image_obj = self.source_disk_define_by_params( - self.params, self._full_bk_images[0]) + self.params, self._full_bk_images[0] + ) self.trash.append(self._fleecing_image_obj) def add_bitmap(self): - kargs = {'bitmap_name': self._bitmaps[0], - 'target_device': self._source_nodes[0], - 'persistent': 'off'} + kargs = { + "bitmap_name": self._bitmaps[0], + "target_device": self._source_nodes[0], + "persistent": "off", + } block_dirty_bitmap_add(self.main_vm, kargs) def add_target_data_disks(self): self._fleecing_image_obj.create(self.params) tag = self._fleecing_image_obj.tag devices = self.main_vm.devices.images_define_by_params( - tag, self.params.object_params(tag), 'disk') + tag, self.params.object_params(tag), "disk" + ) devices.pop() # ignore the front end device for dev in devices: if isinstance(dev, QBlockdevFormatNode): dev.params["backing"] = self._source_nodes[0] - ret = self.main_vm.devices.simple_hotplug( - dev, self.main_vm.monitor) + ret = self.main_vm.devices.simple_hotplug(dev, self.main_vm.monitor) if not ret[1]: - self.test.fail("Failed to hotplug '%s'" % dev) + self.test.fail(f"Failed to hotplug '{dev}'") def prepare_test(self): self.prepare_main_vm() @@ -55,23 +58,28 @@ def prepare_test(self): self.prepare_data_disks() self.add_target_data_disks() self._nbd_export = InternalNBDExportImage( - self.main_vm, self.params, self._full_bk_images[0]) + self.main_vm, self.params, self._full_bk_images[0] + ) self._nbd_export.start_nbd_server() def expose_active_bitmap(self): try: self._nbd_export.add_nbd_image(self._full_bk_nodes[0]) except QMPCmdError as e: - error_msg = self.params['error_msg'] % self._bitmaps[0] + error_msg = self.params["error_msg"] % self._bitmaps[0] if error_msg not in str(e): - self.test.fail('Unexpected error: %s' % str(e)) + self.test.fail(f"Unexpected error: {str(e)}") else: - self.test.fail('active bitmap export completed unexpectedly') + self.test.fail("active bitmap export completed unexpectedly") def do_full_backup(self): - blockdev_batch_backup(self.main_vm, self._source_nodes, - self._full_bk_nodes, None, - **self._full_backup_options) + blockdev_batch_backup( + self.main_vm, + self._source_nodes, + self._full_bk_nodes, + None, + **self._full_backup_options, + ) def do_test(self): self.do_full_backup() diff --git a/qemu/tests/blockdev_inc_backup_filternode.py b/qemu/tests/blockdev_inc_backup_filternode.py index 6331cb0e17..19ad52e410 100644 --- a/qemu/tests/blockdev_inc_backup_filternode.py +++ b/qemu/tests/blockdev_inc_backup_filternode.py @@ -1,41 +1,49 @@ from provider.blockdev_live_backup_base import BlockdevLiveBackupBaseTest -from provider.job_utils import check_block_jobs_started -from provider.job_utils import wait_until_block_job_completed +from provider.job_utils import check_block_jobs_started, wait_until_block_job_completed class BlockdevIncbkFilterNodeTest(BlockdevLiveBackupBaseTest): - """ live backup with filter-node-name test """ + """live backup with filter-node-name test""" def __init__(self, test, params, env): - super(BlockdevIncbkFilterNodeTest, self).__init__(test, params, env) - self._jobid = 'backup_%s_job' % self._source_nodes[0] - self._full_backup_options.update({ - 'device': self._source_nodes[0], 'target': self._full_bk_nodes[0], - 'filter-node-name': self.params['filter_node_name'], - 'job-id': self._jobid}) + super().__init__(test, params, env) + self._jobid = f"backup_{self._source_nodes[0]}_job" + self._full_backup_options.update( + { + "device": self._source_nodes[0], + "target": self._full_bk_nodes[0], + "filter-node-name": self.params["filter_node_name"], + "job-id": self._jobid, + } + ) def check_node_attached(self, node): """The filter node name should be set when doing backup""" - for item in self.main_vm.monitor.query('block'): - if (self._source_images[0] in item['qdev'] - and item['inserted'].get('node-name') == node): + for item in self.main_vm.monitor.query("block"): + if ( + self._source_images[0] in item["qdev"] + and item["inserted"].get("node-name") == node + ): break else: - self.test.fail('Filter node (%s) is not attached' % node) + self.test.fail(f"Filter node ({node}) is not attached") def do_full_backup(self): - self.main_vm.monitor.cmd('blockdev-backup', self._full_backup_options) + self.main_vm.monitor.cmd("blockdev-backup", self._full_backup_options) def set_max_job_speed(self): self.main_vm.monitor.cmd( - "block-job-set-speed", {'device': self._jobid, 'speed': 0}) + "block-job-set-speed", {"device": self._jobid, "speed": 0} + ) def do_test(self): self.do_full_backup() check_block_jobs_started( - self.main_vm, [self._jobid], - self.params.get_numeric('job_started_timeout', 30)) - self.check_node_attached(self.params['filter_node_name']) + self.main_vm, + [self._jobid], + self.params.get_numeric("job_started_timeout", 30), + ) + self.check_node_attached(self.params["filter_node_name"]) self.set_max_job_speed() wait_until_block_job_completed(self.main_vm, self._jobid) self.check_node_attached(self._source_nodes[0]) diff --git a/qemu/tests/blockdev_inc_backup_inc_success.py b/qemu/tests/blockdev_inc_backup_inc_success.py index f6c781c5fd..cf1316905a 100644 --- a/qemu/tests/blockdev_inc_backup_inc_success.py +++ b/qemu/tests/blockdev_inc_backup_inc_success.py @@ -1,15 +1,11 @@ -from provider import backup_utils -from provider import blockdev_base -from provider import block_dirty_bitmap - from virttest import utils_misc +from provider import backup_utils, block_dirty_bitmap, blockdev_base -class BlockdevIncbkIncSyncSuccBitmapTest(blockdev_base.BlockdevBaseTest): +class BlockdevIncbkIncSyncSuccBitmapTest(blockdev_base.BlockdevBaseTest): def __init__(self, test, params, env): - super(BlockdevIncbkIncSyncSuccBitmapTest, self).__init__( - test, params, env) + super().__init__(test, params, env) self.source_images = [] self.full_backups = [] self.inc_backups = [] @@ -23,11 +19,11 @@ def __init__(self, test, params, env): def _init_arguments_by_params(self, tag): image_params = self.params.object_params(tag) image_chain = image_params.objects("image_backup_chain") - self.source_images.append("drive_%s" % tag) - self.full_backups.append("drive_%s" % image_chain[0]) - self.inc_backups.append("drive_%s" % image_chain[1]) + self.source_images.append(f"drive_{tag}") + self.full_backups.append(f"drive_{image_chain[0]}") + self.inc_backups.append(f"drive_{image_chain[1]}") self.inc_backup_tags.append(image_chain[1]) - self.bitmaps.append("bitmap_%s" % tag) + self.bitmaps.append(f"bitmap_{tag}") def do_full_backup(self): extra_options = {"sync": "full", "auto_disable_bitmap": False} @@ -36,28 +32,31 @@ def do_full_backup(self): self.source_images, self.full_backups, self.bitmaps, - **extra_options) + **extra_options, + ) def generate_inc_files(self): return list(map(self.generate_data_file, self.src_img_tags)) def do_incremental_backup(self): - extra_options = {"sync": self.inc_sync_mode, - "bitmap-mode": self.inc_bitmap_mode, - "auto_disable_bitmap": False} + extra_options = { + "sync": self.inc_sync_mode, + "bitmap-mode": self.inc_bitmap_mode, + "auto_disable_bitmap": False, + } backup_utils.blockdev_batch_backup( self.main_vm, self.source_images, self.inc_backups, self.bitmaps, - **extra_options) + **extra_options, + ) def get_bitmaps_info(self): out = [] for idx, bitmap in enumerate(self.bitmaps): node = self.source_images[idx] - info = block_dirty_bitmap.get_bitmap_by_name( - self.main_vm, node, bitmap) + info = block_dirty_bitmap.get_bitmap_by_name(self.main_vm, node, bitmap) out.append(info) return out @@ -81,19 +80,17 @@ def _check_bitmaps(): else: return True - refresh_timeout = self.params.get_numeric('refresh_timeout', 10) - if not utils_misc.wait_for(lambda: _check_bitmaps(), - refresh_timeout, 0, 1): - self.test.fail('count of bitmap should be 0 ' - 'after incremental backup') + refresh_timeout = self.params.get_numeric("refresh_timeout", 10) + if not utils_misc.wait_for(lambda: _check_bitmaps(), refresh_timeout, 0, 1): + self.test.fail("count of bitmap should be 0 " "after incremental backup") def check_images(self): self.verify_data_files() def clone_main_vm(self): self.main_vm.destroy() - imgs = [self.params['images'].split()[0]] + self.inc_backup_tags - self.params['images'] = ' '.join(imgs) + imgs = [self.params["images"].split()[0]] + self.inc_backup_tags + self.params["images"] = " ".join(imgs) self.prepare_main_vm() self.clone_vm = self.main_vm diff --git a/qemu/tests/blockdev_inc_backup_inconsistent_bitmap.py b/qemu/tests/blockdev_inc_backup_inconsistent_bitmap.py index 4dd010e992..d851dd5dba 100644 --- a/qemu/tests/blockdev_inc_backup_inconsistent_bitmap.py +++ b/qemu/tests/blockdev_inc_backup_inconsistent_bitmap.py @@ -4,9 +4,11 @@ from virttest.qemu_monitor import QMPCmdError from virttest.utils_misc import kill_process_tree -from provider.block_dirty_bitmap import block_dirty_bitmap_add -from provider.block_dirty_bitmap import block_dirty_bitmap_remove -from provider.block_dirty_bitmap import get_bitmap_by_name +from provider.block_dirty_bitmap import ( + block_dirty_bitmap_add, + block_dirty_bitmap_remove, + get_bitmap_by_name, +) from provider.blockdev_live_backup_base import BlockdevLiveBackupBaseTest @@ -14,20 +16,22 @@ class BlockdevIncbkInconsistentBitmap(BlockdevLiveBackupBaseTest): """Inconsistent bitmap tests""" def __init__(self, test, params, env): - super(BlockdevIncbkInconsistentBitmap, self).__init__( - test, params, env) + super().__init__(test, params, env) self._data_image_obj = self.source_disk_define_by_params( - self.params, self._source_images[0]) - self.test_scenario = getattr(self, self.params['test_scenario']) + self.params, self._source_images[0] + ) + self.test_scenario = getattr(self, self.params["test_scenario"]) def prepare_test(self): self.prepare_main_vm() self.prepare_data_disks() def add_persistent_bitmap(self): - kargs = {'bitmap_name': self._bitmaps[0], - 'target_device': self._source_nodes[0], - 'persistent': 'on'} + kargs = { + "bitmap_name": self._bitmaps[0], + "target_device": self._source_nodes[0], + "persistent": "on", + } block_dirty_bitmap_add(self.main_vm, kargs) def is_image_bitmap_existed(self): @@ -35,16 +39,18 @@ def is_image_bitmap_existed(self): return out and self._bitmaps[0] in out def check_bitmap_field(self, **args): - bitmap = get_bitmap_by_name(self.main_vm, self._source_nodes[0], - self._bitmaps[0]) + bitmap = get_bitmap_by_name( + self.main_vm, self._source_nodes[0], self._bitmaps[0] + ) if bitmap is None: - self.test.fail('Failed to get bitmap %s' % self._bitmaps[0]) + self.test.fail(f"Failed to get bitmap {self._bitmaps[0]}") else: for key, value in args.items(): if value != bitmap[key]: - self.test.fail('bitmap field %s is not correct: ' - 'expected %s, got %s' % - (key, value, bitmap[key])) + self.test.fail( + f"bitmap field {key} is not correct: " + f"expected {value}, got {bitmap[key]}" + ) def kill_qemu_and_start_vm(self): """Forcely killing qemu-kvm can make bitmap inconsistent""" @@ -56,7 +62,8 @@ def kill_qemu_and_start_vm(self): def powerdown_and_start_vm(self): self.main_vm.monitor.system_powerdown() if not self.main_vm.wait_for_shutdown( - self.params.get_numeric("shutdown_timeout", 360)): + self.params.get_numeric("shutdown_timeout", 360) + ): self.test.fail("Failed to poweroff vm") self.main_vm.create() self.main_vm.verify_alive() @@ -64,45 +71,46 @@ def powerdown_and_start_vm(self): def handle_bitmap_with_qmp_cmd(self): """Failed to clear/enable/disable an inconsistent bitmap""" - forbidden_actions = ['block-dirty-bitmap-disable', - 'block-dirty-bitmap-enable', - 'block-dirty-bitmap-clear'] + forbidden_actions = [ + "block-dirty-bitmap-disable", + "block-dirty-bitmap-enable", + "block-dirty-bitmap-clear", + ] for action in forbidden_actions: try: - self.main_vm.monitor.cmd(action, - {'node': self._source_nodes[0], - 'name': self._bitmaps[0]}) + self.main_vm.monitor.cmd( + action, {"node": self._source_nodes[0], "name": self._bitmaps[0]} + ) except QMPCmdError as e: - error_msg = self.params['error_msg'] % self._bitmaps[0] + error_msg = self.params["error_msg"] % self._bitmaps[0] if error_msg not in str(e): - self.test.fail('Unexpected error: %s' % str(e)) + self.test.fail(f"Unexpected error: {str(e)}") else: - self.test.fail('%s completed unexpectedly' % action) + self.test.fail(f"{action} completed unexpectedly") def remove_bitmap_with_qmp_cmd(self): """Removing an inconsistent bitmap should succeed""" - block_dirty_bitmap_remove(self.main_vm, self._source_nodes[0], - self._bitmaps[0]) - bitmap = get_bitmap_by_name(self.main_vm, self._source_nodes[0], - self._bitmaps[0]) + block_dirty_bitmap_remove(self.main_vm, self._source_nodes[0], self._bitmaps[0]) + bitmap = get_bitmap_by_name( + self.main_vm, self._source_nodes[0], self._bitmaps[0] + ) if bitmap is not None: - self.test.fail('Failed to remove bitmap %s' % self._bitmaps[0]) + self.test.fail(f"Failed to remove bitmap {self._bitmaps[0]}") def repair_bitmap_with_qemu_img(self): """Repair an inconsistent bitmap with qemu-img should succeed""" self.main_vm.destroy() if not self.is_image_bitmap_existed(): - self.test.fail('Persistent bitmap should exist in image') - self._data_image_obj.check(self._data_image_obj.params, - get_data_dir()) + self.test.fail("Persistent bitmap should exist in image") + self._data_image_obj.check(self._data_image_obj.params, get_data_dir()) if self.is_image_bitmap_existed(): - self.test.fail('Persistent bitmap should be removed from image') + self.test.fail("Persistent bitmap should be removed from image") def do_test(self): self.add_persistent_bitmap() - self.generate_inc_files(filename='inc') + self.generate_inc_files(filename="inc") self.powerdown_and_start_vm() self.check_bitmap_field(recording=True) self.kill_qemu_and_start_vm() diff --git a/qemu/tests/blockdev_inc_backup_merge_bitmaps_with_diff_granularity.py b/qemu/tests/blockdev_inc_backup_merge_bitmaps_with_diff_granularity.py index 0d580ea7c6..f320050403 100644 --- a/qemu/tests/blockdev_inc_backup_merge_bitmaps_with_diff_granularity.py +++ b/qemu/tests/blockdev_inc_backup_merge_bitmaps_with_diff_granularity.py @@ -10,41 +10,49 @@ class BlockdevIncbkMergeBitmapsDiffGranularityTest(BlockdevLiveBackupBaseTest): """Merge two bitmaps with different granularities""" def __init__(self, test, params, env): - super(BlockdevIncbkMergeBitmapsDiffGranularityTest, self).__init__( - test, params, env) - self._merged_bitmaps = params.objects('bitmap_merge_list') - self._merged_target = params['bitmap_merge_target'] + super().__init__(test, params, env) + self._merged_bitmaps = params.objects("bitmap_merge_list") + self._merged_target = params["bitmap_merge_target"] self._granularities = random.sample( - params.objects('granularity_list'), len(self._merged_bitmaps)) + params.objects("granularity_list"), len(self._merged_bitmaps) + ) def _get_bitmaps(self): return get_bitmaps_in_device(self.main_vm, self._source_nodes[0]) def check_bitmaps_count(self): """count of both bitmaps should be greater than 0""" - if not all(list(map(lambda b: b and b['count'] > 0, - self._get_bitmaps()))): - self.test.fail('bitmaps count should be greater than 0') + if not all(list(map(lambda b: b and b["count"] > 0, self._get_bitmaps()))): + self.test.fail("bitmaps count should be greater than 0") def add_two_bitmaps(self): - bitmaps = [{'node': self._source_nodes[0], - 'name': b, - 'granularity': int(normalize_data_size(g, "B"))} - for b, g in zip(self._merged_bitmaps, self._granularities)] - job_list = [{'type': 'block-dirty-bitmap-add', 'data': data} - for data in bitmaps] + bitmaps = [ + { + "node": self._source_nodes[0], + "name": b, + "granularity": int(normalize_data_size(g, "B")), + } + for b, g in zip(self._merged_bitmaps, self._granularities) + ] + job_list = [ + {"type": "block-dirty-bitmap-add", "data": data} for data in bitmaps + ] self.main_vm.monitor.transaction(job_list) def merge_two_bitmaps(self): - target_bitmap = {'node': self._source_nodes[0], - 'name': self._merged_target, - 'disabled': True} - merged_bitmap = {'node': self._source_nodes[0], - 'bitmaps': self._merged_bitmaps, - 'target': self._merged_target} + target_bitmap = { + "node": self._source_nodes[0], + "name": self._merged_target, + "disabled": True, + } + merged_bitmap = { + "node": self._source_nodes[0], + "bitmaps": self._merged_bitmaps, + "target": self._merged_target, + } job_list = [ - {'type': 'block-dirty-bitmap-add', 'data': target_bitmap}, - {'type': 'block-dirty-bitmap-merge', 'data': merged_bitmap} + {"type": "block-dirty-bitmap-add", "data": target_bitmap}, + {"type": "block-dirty-bitmap-merge", "data": merged_bitmap}, ] self.main_vm.monitor.transaction(job_list) diff --git a/qemu/tests/blockdev_inc_backup_merge_external_bitmaps.py b/qemu/tests/blockdev_inc_backup_merge_external_bitmaps.py index f55e8f9f79..b3793aa694 100644 --- a/qemu/tests/blockdev_inc_backup_merge_external_bitmaps.py +++ b/qemu/tests/blockdev_inc_backup_merge_external_bitmaps.py @@ -4,10 +4,8 @@ class BlockdevIncbkMergeExternalBitmaps(BlockdevLiveBackupBaseTest): - def __init__(self, test, params, env): - super(BlockdevIncbkMergeExternalBitmaps, self).__init__( - test, params, env) + super().__init__(test, params, env) self._inc_bk_images = [] self._inc_bk_nodes = [] self._snapshot_images = [] @@ -19,11 +17,11 @@ def _init_inc_arguments_by_params(self, tag): image_params = self.params.object_params(tag) image_chain = image_params.objects("image_backup_chain") snapshot_img = image_params["snapshot_tag"] - self._inc_bk_nodes.append("drive_%s" % image_chain[1]) + self._inc_bk_nodes.append(f"drive_{image_chain[1]}") self._inc_bk_images.append(image_chain[1]) self._snapshot_images.append(snapshot_img) - self._snapshot_nodes.append("drive_%s" % snapshot_img) - self._merged_bitmaps.append("bitmap_%s" % snapshot_img) + self._snapshot_nodes.append(f"drive_{snapshot_img}") + self._merged_bitmaps.append(f"bitmap_{snapshot_img}") def do_incremental_backup(self): extra_options = {"sync": "incremental"} @@ -32,7 +30,8 @@ def do_incremental_backup(self): self._snapshot_nodes, self._inc_bk_nodes, self._merged_bitmaps, - **extra_options) + **extra_options, + ) def do_snapshot(self): snapshots = [] @@ -42,27 +41,40 @@ def do_snapshot(self): self.trash.append(disk) disk.hotplug(self.main_vm) - snapshots.append({"type": "blockdev-snapshot", - "data": {"node": self._source_nodes[i], - "overlay": self._snapshot_nodes[i]}}) - bitmaps.append({"type": "block-dirty-bitmap-add", - "data": {"node": self._snapshot_nodes[i], - "name": self._merged_bitmaps[i]}}) - self.main_vm.monitor.transaction(snapshots+bitmaps) + snapshots.append( + { + "type": "blockdev-snapshot", + "data": { + "node": self._source_nodes[i], + "overlay": self._snapshot_nodes[i], + }, + } + ) + bitmaps.append( + { + "type": "block-dirty-bitmap-add", + "data": { + "node": self._snapshot_nodes[i], + "name": self._merged_bitmaps[i], + }, + } + ) + self.main_vm.monitor.transaction(snapshots + bitmaps) def merge_external_bitmaps(self): for i, node in enumerate(self._snapshot_nodes): block_dirty_bitmap_merge( - self.main_vm, node, + self.main_vm, + node, [{"node": self._source_nodes[i], "name": self._bitmaps[i]}], - self._merged_bitmaps[i] + self._merged_bitmaps[i], ) def do_test(self): self.do_full_backup() - self.generate_inc_files('inc1') + self.generate_inc_files("inc1") self.do_snapshot() - self.generate_inc_files('inc2') + self.generate_inc_files("inc2") self.merge_external_bitmaps() self.main_vm.pause() self.do_incremental_backup() diff --git a/qemu/tests/blockdev_inc_backup_merge_to_nonexist_bitmap.py b/qemu/tests/blockdev_inc_backup_merge_to_nonexist_bitmap.py index 0575edfe34..ec65d10839 100644 --- a/qemu/tests/blockdev_inc_backup_merge_to_nonexist_bitmap.py +++ b/qemu/tests/blockdev_inc_backup_merge_to_nonexist_bitmap.py @@ -8,32 +8,35 @@ class BlkdevIncMergeToNonexistBitmap(BlockdevLiveBackupBaseTest): """Merge two bitmaps to a non-exist target""" def __init__(self, test, params, env): - super(BlkdevIncMergeToNonexistBitmap, self).__init__( - test, params, env) - self._merged_bitmaps = params.objects('bitmap_merge_list') - self._merged_target = params['bitmap_merge_target'] + super().__init__(test, params, env) + self._merged_bitmaps = params.objects("bitmap_merge_list") + self._merged_target = params["bitmap_merge_target"] def add_two_bitmaps(self): - bitmaps = [{'node': self._source_nodes[0], - 'name': bitmap} - for bitmap in self._merged_bitmaps] - job_list = [{'type': 'block-dirty-bitmap-add', 'data': data} - for data in bitmaps] + bitmaps = [ + {"node": self._source_nodes[0], "name": bitmap} + for bitmap in self._merged_bitmaps + ] + job_list = [ + {"type": "block-dirty-bitmap-add", "data": data} for data in bitmaps + ] self.main_vm.monitor.transaction(job_list) def merge_two_bitmaps(self): try: - block_dirty_bitmap_merge(self.main_vm, self._source_nodes[0], - self._merged_bitmaps, - self._merged_target) + block_dirty_bitmap_merge( + self.main_vm, + self._source_nodes[0], + self._merged_bitmaps, + self._merged_target, + ) except QMPCmdError as e: nonexist_target = self._merged_target qmp_error_msg = self.params.get("qmp_error_msg") % nonexist_target if qmp_error_msg not in str(e.data): self.test.fail(str(e)) else: - self.test.fail("Merge to a non-exist bitmap:%s" - % self._merged_target) + self.test.fail(f"Merge to a non-exist bitmap:{self._merged_target}") def do_test(self): self.add_two_bitmaps() diff --git a/qemu/tests/blockdev_inc_backup_merge_with_nonexist_bitmap.py b/qemu/tests/blockdev_inc_backup_merge_with_nonexist_bitmap.py index 8e4568d77d..d09c129c9e 100644 --- a/qemu/tests/blockdev_inc_backup_merge_with_nonexist_bitmap.py +++ b/qemu/tests/blockdev_inc_backup_merge_with_nonexist_bitmap.py @@ -8,26 +8,31 @@ class BlkdevIncMergeWithNonexistBitmap(BlockdevLiveBackupBaseTest): """Merge bitmaps with a non-exist bitmap to the target bitmap""" def __init__(self, test, params, env): - super(BlkdevIncMergeWithNonexistBitmap, self).__init__( - test, params, env) - self._merged_bitmaps = params.objects('bitmap_merge_list') - self._merged_target = params['bitmap_merge_target'] + super().__init__(test, params, env) + self._merged_bitmaps = params.objects("bitmap_merge_list") + self._merged_target = params["bitmap_merge_target"] def add_one_bitmap(self): - args = {'target_device': self._source_nodes[0], - 'bitmap_name': self._merged_bitmaps[0]} + args = { + "target_device": self._source_nodes[0], + "bitmap_name": self._merged_bitmaps[0], + } block_dirty_bitmap_add(self.main_vm, args) def merge_two_bitmaps(self): - target_bitmap = {'node': self._source_nodes[0], - 'name': self._merged_target, - 'disabled': True} - merged_bitmap = {'node': self._source_nodes[0], - 'bitmaps': self._merged_bitmaps, - 'target': self._merged_target} + target_bitmap = { + "node": self._source_nodes[0], + "name": self._merged_target, + "disabled": True, + } + merged_bitmap = { + "node": self._source_nodes[0], + "bitmaps": self._merged_bitmaps, + "target": self._merged_target, + } job_list = [ - {'type': 'block-dirty-bitmap-add', 'data': target_bitmap}, - {'type': 'block-dirty-bitmap-merge', 'data': merged_bitmap} + {"type": "block-dirty-bitmap-add", "data": target_bitmap}, + {"type": "block-dirty-bitmap-merge", "data": merged_bitmap}, ] try: self.main_vm.monitor.transaction(job_list) @@ -37,8 +42,9 @@ def merge_two_bitmaps(self): if qmp_error_msg not in str(e.data): self.test.fail(str(e)) else: - self.test.fail("Can merge with a non-exist bitmap:%s" - % self._merged_bitmaps[1]) + self.test.fail( + f"Can merge with a non-exist bitmap:{self._merged_bitmaps[1]}" + ) def do_test(self): self.add_one_bitmap() diff --git a/qemu/tests/blockdev_inc_backup_migrate_without_bitmap.py b/qemu/tests/blockdev_inc_backup_migrate_without_bitmap.py index 22f9e2b816..6e8bf9d2e8 100644 --- a/qemu/tests/blockdev_inc_backup_migrate_without_bitmap.py +++ b/qemu/tests/blockdev_inc_backup_migrate_without_bitmap.py @@ -1,58 +1,61 @@ import ast -from provider.block_dirty_bitmap import block_dirty_bitmap_disable -from provider.block_dirty_bitmap import debug_block_dirty_bitmap_sha256 -from provider.block_dirty_bitmap import get_bitmap_by_name +from provider.block_dirty_bitmap import ( + block_dirty_bitmap_disable, + debug_block_dirty_bitmap_sha256, + get_bitmap_by_name, +) from provider.blockdev_live_backup_base import BlockdevLiveBackupBaseTest class BlockdevIncbkMigrateNoBitmap(BlockdevLiveBackupBaseTest): - def __init__(self, test, params, env): - super(BlockdevIncbkMigrateNoBitmap, self).__init__(test, params, env) - self._bitmap_debugged = self.params.get_boolean('bitmap_debugged') + super().__init__(test, params, env) + self._bitmap_debugged = self.params.get_boolean("bitmap_debugged") self._bitmap_sha256 = None def migrate_vm(self): capabilities = ast.literal_eval(self.params["migrate_capabilities"]) - self.main_vm.migrate(self.params.get_numeric('mig_timeout'), - self.params["migration_protocol"], - migrate_capabilities=capabilities, - env=self.env) + self.main_vm.migrate( + self.params.get_numeric("mig_timeout"), + self.params["migration_protocol"], + migrate_capabilities=capabilities, + env=self.env, + ) def check_bitmap_after_migration(self): - bitmap = get_bitmap_by_name(self.main_vm, self._source_nodes[0], - self._bitmaps[0]) + bitmap = get_bitmap_by_name( + self.main_vm, self._source_nodes[0], self._bitmaps[0] + ) if self._bitmap_debugged: if bitmap is None: - self.test.fail('No persistent bitmap was found ' - 'after migration') - if bitmap.get('recording') is not False: - self.test.fail('Persistent bitmap was not disabled ' - 'after migration') - v = debug_block_dirty_bitmap_sha256(self.main_vm, - self._source_nodes[0], - self._bitmaps[0]) + self.test.fail("No persistent bitmap was found " "after migration") + if bitmap.get("recording") is not False: + self.test.fail("Persistent bitmap was not disabled " "after migration") + v = debug_block_dirty_bitmap_sha256( + self.main_vm, self._source_nodes[0], self._bitmaps[0] + ) if self._bitmap_sha256 != v: - self.test.fail('Persistent bitmap sha256 changed ' - 'after migration') + self.test.fail("Persistent bitmap sha256 changed " "after migration") else: if bitmap is not None: - self.test.fail('Got non-persistent bitmap unexpectedly ' - 'after migration') + self.test.fail( + "Got non-persistent bitmap unexpectedly " "after migration" + ) def get_bitmap_sha256(self): if self._bitmap_debugged: - v = debug_block_dirty_bitmap_sha256(self.main_vm, - self._source_nodes[0], - self._bitmaps[0]) + v = debug_block_dirty_bitmap_sha256( + self.main_vm, self._source_nodes[0], self._bitmaps[0] + ) if v is None: - self.test.fail('Failed to get persistent bitmap sha256') + self.test.fail("Failed to get persistent bitmap sha256") self._bitmap_sha256 = v def disable_bitmap(self): - block_dirty_bitmap_disable(self.main_vm, self._source_nodes[0], - self._bitmaps[0]) + block_dirty_bitmap_disable( + self.main_vm, self._source_nodes[0], self._bitmaps[0] + ) def do_test(self): self.do_full_backup() diff --git a/qemu/tests/blockdev_inc_backup_mod_readonly_bitmap.py b/qemu/tests/blockdev_inc_backup_mod_readonly_bitmap.py index b8bf564634..5f3ecdfc10 100644 --- a/qemu/tests/blockdev_inc_backup_mod_readonly_bitmap.py +++ b/qemu/tests/blockdev_inc_backup_mod_readonly_bitmap.py @@ -1,8 +1,7 @@ import os import re -from virttest.qemu_monitor import get_monitor_function -from virttest.qemu_monitor import QMPCmdError +from virttest.qemu_monitor import QMPCmdError, get_monitor_function from provider.block_dirty_bitmap import block_dirty_bitmap_add from provider.blockdev_live_backup_base import BlockdevLiveBackupBaseTest @@ -13,49 +12,50 @@ class BlockdevIncbkModRdonlyBitmapTest(BlockdevLiveBackupBaseTest): def prepare_test(self): self.prepare_main_vm() - self._error_msg = '(core dumped)|{pid} Aborted'.format( - pid=self.main_vm.get_pid()) + self._error_msg = f"(core dumped)|{self.main_vm.get_pid()} Aborted" self.prepare_data_disks() def modify_readonly_bitmaps(self): - for act in ['block-dirty-bitmap-clear', 'block-dirty-bitmap-remove']: + for act in ["block-dirty-bitmap-clear", "block-dirty-bitmap-remove"]: f = get_monitor_function(self.main_vm, act) try: f(self._source_nodes[0], self._bitmaps[0]) except QMPCmdError as e: - error_msg = self.params['error_msg'].format( - bitmap=self._bitmaps[0]) + error_msg = self.params["error_msg"].format(bitmap=self._bitmaps[0]) if error_msg not in str(e): - self.test.fail('Unexpected error: %s' % str(e)) + self.test.fail(f"Unexpected error: {str(e)}") else: - self.test.fail('%s succeeded unexpectedly' % act) + self.test.fail(f"{act} succeeded unexpectedly") def add_persistent_bitmap(self): - kargs = {'bitmap_name': self._bitmaps[0], - 'target_device': self._source_nodes[0], - 'persistent': 'on'} + kargs = { + "bitmap_name": self._bitmaps[0], + "target_device": self._source_nodes[0], + "persistent": "on", + } block_dirty_bitmap_add(self.main_vm, kargs) def restart_vm_with_readonly_data_image(self): self.main_vm.monitor.system_powerdown() if not self.main_vm.wait_until_dead(10, 1, 1): self.test.fail("Failed to shutdowm vm and save bitmap") - self.params['image_readonly_%s' % self._source_images[0]] = 'on' + self.params[f"image_readonly_{self._source_images[0]}"] = "on" self.prepare_main_vm() - self._error_msg += '|{pid} Aborted'.format(pid=self.main_vm.get_pid()) + self._error_msg += f"|{self.main_vm.get_pid()} Aborted" def check_qemu_aborted(self): """We used to hit core once, so add this check for future detection""" - log_file = os.path.join(self.test.resultsdir, - self.params.get('debug_log_file', 'debug.log')) - with open(log_file, 'r') as f: + log_file = os.path.join( + self.test.resultsdir, self.params.get("debug_log_file", "debug.log") + ) + with open(log_file, "r") as f: out = f.read().strip() if re.search(self._error_msg, out, re.M): - self.test.fail('qemu aborted (core dumped)') + self.test.fail("qemu aborted (core dumped)") def do_test(self): self.add_persistent_bitmap() - self.generate_inc_files('inc1') + self.generate_inc_files("inc1") self.restart_vm_with_readonly_data_image() self.modify_readonly_bitmaps() self.check_qemu_aborted() diff --git a/qemu/tests/blockdev_inc_backup_modify_backing_bitmaps.py b/qemu/tests/blockdev_inc_backup_modify_backing_bitmaps.py index 5962e35b68..728c69da29 100644 --- a/qemu/tests/blockdev_inc_backup_modify_backing_bitmaps.py +++ b/qemu/tests/blockdev_inc_backup_modify_backing_bitmaps.py @@ -1,5 +1,4 @@ -from virttest import utils_qemu -from virttest import utils_misc +from virttest import utils_misc, utils_qemu from virttest.utils_version import VersionInterval from provider import block_dirty_bitmap as bitmap_handle @@ -7,14 +6,17 @@ class BlkIncModifyBackingBitmaps(BlockDevSnapshotTest): - def reopen_backing_image(self, node_name): opts = [] fmt_node = self.main_vm.devices.get_by_qid(node_name)[0] file_node = fmt_node.get_param("file") driver = fmt_node.get_param("driver") - item = {"driver": driver, "node-name": node_name, "file": file_node, - "read-only": False} + item = { + "driver": driver, + "node-name": node_name, + "file": file_node, + "read-only": False, + } qemu_binary = utils_misc.get_qemu_binary(self.params) qemu_version = utils_qemu.get_qemu_version(qemu_binary)[0] required_qemu_version = self.params["required_qemu_version"] @@ -27,19 +29,18 @@ def reopen_backing_image(self, node_name): self.main_vm.monitor.x_blockdev_reopen(args) def add_bitmap(self, node_name): - bitmap = "bitmap_%s" % node_name - kargs = {'bitmap_name': bitmap, - 'target_device': node_name} + bitmap = f"bitmap_{node_name}" + kargs = {"bitmap_name": bitmap, "target_device": node_name} bitmap_handle.block_dirty_bitmap_add(self.main_vm, kargs) self.bitmap_list.append(kargs) def remove_bitmaps(self): actions = [] - bitmap_rm_cmd = self.params.get('bitmap_remove_cmd', - 'block-dirty-bitmap-remove') + bitmap_rm_cmd = self.params.get( + "bitmap_remove_cmd", "block-dirty-bitmap-remove" + ) for item in self.bitmap_list: - bitmap_data = {"node": item["target_device"], - "name": item["bitmap_name"]} + bitmap_data = {"node": item["target_device"], "name": item["bitmap_name"]} actions.append({"type": bitmap_rm_cmd, "data": bitmap_data}) arguments = {"actions": actions} self.main_vm.monitor.cmd("transaction", arguments) @@ -83,7 +84,10 @@ def run(test, params, env): """ base_image = params.get("images", "image1").split()[0] params.update( - {"image_name_%s" % base_image: params["image_name"], - "image_format_%s" % base_image: params["image_format"]}) + { + f"image_name_{base_image}": params["image_name"], + f"image_format_{base_image}": params["image_format"], + } + ) snapshot_test = BlkIncModifyBackingBitmaps(test, params, env) snapshot_test.run_test() diff --git a/qemu/tests/blockdev_inc_backup_never_always.py b/qemu/tests/blockdev_inc_backup_never_always.py index 52024ff196..53fd9555bb 100644 --- a/qemu/tests/blockdev_inc_backup_never_always.py +++ b/qemu/tests/blockdev_inc_backup_never_always.py @@ -1,14 +1,11 @@ from virttest.qemu_monitor import QMPCmdError -from provider import backup_utils -from provider import blockdev_base +from provider import backup_utils, blockdev_base class BlkdevIncNA(blockdev_base.BlockdevBaseTest): - def __init__(self, test, params, env): - super(BlkdevIncNA, self).__init__( - test, params, env) + super().__init__(test, params, env) self.source_images = [] self.full_backups = [] self.inc_backups = [] @@ -21,10 +18,10 @@ def __init__(self, test, params, env): def _init_arguments_by_params(self, tag): image_params = self.params.object_params(tag) image_chain = image_params.objects("image_backup_chain") - self.source_images.append("drive_%s" % tag) - self.full_backups.append("drive_%s" % image_chain[0]) - self.inc_backups.append("drive_%s" % image_chain[1]) - self.bitmaps.append("bitmap_%s" % tag) + self.source_images.append(f"drive_{tag}") + self.full_backups.append(f"drive_{image_chain[0]}") + self.inc_backups.append(f"drive_{image_chain[1]}") + self.bitmaps.append(f"bitmap_{tag}") def do_full_backup(self): extra_options = {"sync": "full", "auto_disable_bitmap": False} @@ -33,19 +30,23 @@ def do_full_backup(self): self.source_images, self.full_backups, self.bitmaps, - **extra_options) + **extra_options, + ) def generate_inc_files(self): return list(map(self.generate_data_file, self.src_img_tags)) def do_incremental_backup(self): - extra_options = {"sync": self.inc_sync_mode, - "bitmap": self.bitmaps[0], - "bitmap-mode": self.inc_bitmap_mode, - "auto_disable_bitmap": False} + extra_options = { + "sync": self.inc_sync_mode, + "bitmap": self.bitmaps[0], + "bitmap-mode": self.inc_bitmap_mode, + "auto_disable_bitmap": False, + } inc_backup = backup_utils.blockdev_backup_qmp_cmd - cmd, arguments = inc_backup(self.source_images[0], self.inc_backups[0], - **extra_options) + cmd, arguments = inc_backup( + self.source_images[0], self.inc_backups[0], **extra_options + ) try: self.main_vm.monitor.cmd(cmd, arguments) except QMPCmdError as e: @@ -53,8 +54,9 @@ def do_incremental_backup(self): if qmp_error_msg not in str(e.data): self.test.fail(str(e)) else: - self.test.fail("Inc backup with invalid bitmap mode:%s" - % self.inc_bitmap_mode) + self.test.fail( + f"Inc backup with invalid bitmap mode:{self.inc_bitmap_mode}" + ) def do_test(self): self.do_full_backup() diff --git a/qemu/tests/blockdev_inc_backup_no_bitmap.py b/qemu/tests/blockdev_inc_backup_no_bitmap.py index 7ca1fb4b02..39b0701283 100644 --- a/qemu/tests/blockdev_inc_backup_no_bitmap.py +++ b/qemu/tests/blockdev_inc_backup_no_bitmap.py @@ -1,14 +1,11 @@ from virttest.qemu_monitor import QMPCmdError -from provider import backup_utils -from provider import blockdev_base +from provider import backup_utils, blockdev_base class BlkdevIncNobitmap(blockdev_base.BlockdevBaseTest): - def __init__(self, test, params, env): - super(BlkdevIncNobitmap, self).__init__( - test, params, env) + super().__init__(test, params, env) self.source_images = [] self.full_backups = [] self.inc_backups = [] @@ -20,10 +17,10 @@ def __init__(self, test, params, env): def _init_arguments_by_params(self, tag): image_params = self.params.object_params(tag) image_chain = image_params.objects("image_backup_chain") - self.source_images.append("drive_%s" % tag) - self.full_backups.append("drive_%s" % image_chain[0]) - self.inc_backups.append("drive_%s" % image_chain[1]) - self.bitmaps.append("bitmap_%s" % tag) + self.source_images.append(f"drive_{tag}") + self.full_backups.append(f"drive_{image_chain[0]}") + self.inc_backups.append(f"drive_{image_chain[1]}") + self.bitmaps.append(f"bitmap_{tag}") def do_full_backup(self): extra_options = {"sync": "full"} @@ -32,7 +29,8 @@ def do_full_backup(self): self.source_images, self.full_backups, self.bitmaps, - **extra_options) + **extra_options, + ) def generate_inc_files(self): return list(map(self.generate_data_file, self.src_img_tags)) @@ -40,8 +38,9 @@ def generate_inc_files(self): def do_incremental_backup(self): extra_options = {"sync": self.inc_sync_mode} inc_backup = backup_utils.blockdev_backup_qmp_cmd - cmd, arguments = inc_backup(self.source_images[0], self.inc_backups[0], - **extra_options) + cmd, arguments = inc_backup( + self.source_images[0], self.inc_backups[0], **extra_options + ) try: self.main_vm.monitor.cmd(cmd, arguments) except QMPCmdError as e: diff --git a/qemu/tests/blockdev_inc_backup_no_space.py b/qemu/tests/blockdev_inc_backup_no_space.py index 1bce9705ef..fd4559ea1a 100644 --- a/qemu/tests/blockdev_inc_backup_no_space.py +++ b/qemu/tests/blockdev_inc_backup_no_space.py @@ -2,40 +2,39 @@ import os from provider.blockdev_live_backup_base import BlockdevLiveBackupBaseTest -from provider.job_utils import BLOCK_JOB_COMPLETED_EVENT -from provider.job_utils import get_event_by_condition +from provider.job_utils import BLOCK_JOB_COMPLETED_EVENT, get_event_by_condition -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class BlockdevIncbkNoSpaceTest(BlockdevLiveBackupBaseTest): """Do full backup to an image without enough space""" def __init__(self, test, params, env): - super(BlockdevIncbkNoSpaceTest, self).__init__(test, params, env) + super().__init__(test, params, env) self._bitmaps = [] def release_target_space(self): - LOG_JOB.info('Release space to extend target image size') - os.unlink(self.params['dummy_image_file']) + LOG_JOB.info("Release space to extend target image size") + os.unlink(self.params["dummy_image_file"]) def check_no_space_error(self): # check 'error' message in BLOCK_JOB_COMPLETED event - tmo = self.params.get_numeric('job_complete_timeout', 900) - event = get_event_by_condition(self.main_vm, - BLOCK_JOB_COMPLETED_EVENT, tmo) + tmo = self.params.get_numeric("job_complete_timeout", 900) + event = get_event_by_condition(self.main_vm, BLOCK_JOB_COMPLETED_EVENT, tmo) if event: - if event['data'].get('error') != self.params['error_msg']: - self.test.fail('Unexpected error: %s' - % event['data'].get('error')) + if event["data"].get("error") != self.params["error_msg"]: + self.test.fail( + "Unexpected error: {}".format(event["data"].get("error")) + ) else: - self.test.fail('Failed to get BLOCK_JOB_COMPLETED event') + self.test.fail("Failed to get BLOCK_JOB_COMPLETED event") def do_test(self): self.do_full_backup() self.check_no_space_error() self.release_target_space() - self._full_backup_options['wait_job_complete'] = True + self._full_backup_options["wait_job_complete"] = True self.do_full_backup() self.prepare_clone_vm() self.verify_data_files() diff --git a/qemu/tests/blockdev_inc_backup_non_persistent_bitmap.py b/qemu/tests/blockdev_inc_backup_non_persistent_bitmap.py index a8b25eba60..b42598f758 100644 --- a/qemu/tests/blockdev_inc_backup_non_persistent_bitmap.py +++ b/qemu/tests/blockdev_inc_backup_non_persistent_bitmap.py @@ -1,14 +1,9 @@ -from provider import backup_utils -from provider import blockdev_base -from provider import block_dirty_bitmap +from provider import backup_utils, block_dirty_bitmap, blockdev_base class BlockdevIncBackupNonPersistentBitmapTest(blockdev_base.BlockdevBaseTest): - def __init__(self, test, params, env): - super(BlockdevIncBackupNonPersistentBitmapTest, self).__init__(test, - params, - env) + super().__init__(test, params, env) self.source_images = [] self.full_backups = [] self.bitmaps = [] @@ -19,9 +14,9 @@ def __init__(self, test, params, env): def _init_arguments_by_params(self, tag): image_params = self.params.object_params(tag) image_chain = image_params.objects("image_backup_chain") - self.source_images.append("drive_%s" % tag) - self.full_backups.append("drive_%s" % image_chain[0]) - self.bitmaps.append("bitmap_%s" % tag) + self.source_images.append(f"drive_{tag}") + self.full_backups.append(f"drive_{image_chain[0]}") + self.bitmaps.append(f"bitmap_{tag}") def do_full_backup(self): extra_options = {"sync": "full", "auto_disable_bitmap": False} @@ -30,14 +25,14 @@ def do_full_backup(self): self.source_images, self.full_backups, self.bitmaps, - **extra_options) + **extra_options, + ) def get_bitmaps_info(self): out = [] for idx, bitmap in enumerate(self.bitmaps): node = self.source_images[idx] - info = block_dirty_bitmap.get_bitmap_by_name( - self.main_vm, node, bitmap) + info = block_dirty_bitmap.get_bitmap_by_name(self.main_vm, node, bitmap) out.append(info) return out @@ -54,20 +49,22 @@ def write_files(self): def check_bitmaps(self, file_write=False): bitmaps = self.get_bitmaps_info() if not bitmaps: - self.test.fail('No bitmap was found.') + self.test.fail("No bitmap was found.") for info in bitmaps: # check if bitmap is non-persistent - if info['persistent']: - self.test.fail('It should be non-persistent') + if info["persistent"]: + self.test.fail("It should be non-persistent") # check if count is changed after file writing if file_write: if info["count"] <= self.bitmap_count: - self.test.fail('count of bitmap should be greater than ' - 'the original after writing a file') + self.test.fail( + "count of bitmap should be greater than " + "the original after writing a file" + ) else: - self.bitmap_count = info['count'] + self.bitmap_count = info["count"] def check_image_info(self): # make sure non-persistent bitmaps never exist after VM shutdown @@ -77,10 +74,10 @@ def check_image_info(self): out = disk.info() if out: - if self.params['check_bitmaps'] in out: - self.test.fail('bitmap should not be in image') + if self.params["check_bitmaps"] in out: + self.test.fail("bitmap should not be in image") else: - self.test.error('Error when querying image info by qemu-img') + self.test.error("Error when querying image info by qemu-img") def do_test(self): self.do_full_backup() diff --git a/qemu/tests/blockdev_inc_backup_nospace_with_bitmap_mode.py b/qemu/tests/blockdev_inc_backup_nospace_with_bitmap_mode.py index 9f8494dda7..086bae1674 100644 --- a/qemu/tests/blockdev_inc_backup_nospace_with_bitmap_mode.py +++ b/qemu/tests/blockdev_inc_backup_nospace_with_bitmap_mode.py @@ -2,8 +2,7 @@ from provider.block_dirty_bitmap import get_bitmap_by_name from provider.blockdev_live_backup_base import BlockdevLiveBackupBaseTest -from provider.job_utils import BLOCK_JOB_COMPLETED_EVENT -from provider.job_utils import get_event_by_condition +from provider.job_utils import BLOCK_JOB_COMPLETED_EVENT, get_event_by_condition class BlockdevIncbkIncNospaceWithSyncMode(BlockdevLiveBackupBaseTest): @@ -13,19 +12,16 @@ class BlockdevIncbkIncNospaceWithSyncMode(BlockdevLiveBackupBaseTest): """ def __init__(self, test, params, env): - super(BlockdevIncbkIncNospaceWithSyncMode, self).__init__(test, - params, - env) + super().__init__(test, params, env) self._inc_bitmap_mode = params["inc_bitmap_mode"] - self._inc_bk_nodes = ["drive_%s" % t for t in self._target_images] + self._inc_bk_nodes = [f"drive_{t}" for t in self._target_images] def _get_bitmap_count(self): # let's wait some time to sync bitmap count, for on ppc the count # failed to be synced immediately after creating a new file sleep(10) - bm = get_bitmap_by_name(self.main_vm, self._source_nodes[0], - self._bitmaps[0]) + bm = get_bitmap_by_name(self.main_vm, self._source_nodes[0], self._bitmaps[0]) if bm: if bm.get("count", 0) <= 0: self.test.fail("Count of bitmap should be greater than 0") @@ -35,27 +31,30 @@ def _get_bitmap_count(self): def do_incremental_backup(self): self.count_before_incbk = self._get_bitmap_count() - self.inc_job_id = "job_%s" % self._inc_bk_nodes[0] - args = {"device": self._source_nodes[0], - "target": self._inc_bk_nodes[0], - "sync": "bitmap", - "job-id": self.inc_job_id, - "bitmap": self._bitmaps[0], - "bitmap-mode": self._inc_bitmap_mode} + self.inc_job_id = f"job_{self._inc_bk_nodes[0]}" + args = { + "device": self._source_nodes[0], + "target": self._inc_bk_nodes[0], + "sync": "bitmap", + "job-id": self.inc_job_id, + "bitmap": self._bitmaps[0], + "bitmap-mode": self._inc_bitmap_mode, + } self.main_vm.monitor.cmd("blockdev-backup", args) def check_no_space_error(self): - tmo = self.params.get_numeric('job_completed_timeout', 360) - cond = {'device': self.inc_job_id} - event = get_event_by_condition(self.main_vm, - BLOCK_JOB_COMPLETED_EVENT, - tmo, **cond) + tmo = self.params.get_numeric("job_completed_timeout", 360) + cond = {"device": self.inc_job_id} + event = get_event_by_condition( + self.main_vm, BLOCK_JOB_COMPLETED_EVENT, tmo, **cond + ) if event: - if event['data'].get('error') != self.params['error_msg']: - self.test.fail('Unexpected error: %s' - % event['data'].get('error')) + if event["data"].get("error") != self.params["error_msg"]: + self.test.fail( + "Unexpected error: {}".format(event["data"].get("error")) + ) else: - self.test.fail('Failed to get BLOCK_JOB_COMPLETED event') + self.test.fail("Failed to get BLOCK_JOB_COMPLETED event") def check_bitmap_count(self): count_after_incbk = self._get_bitmap_count() diff --git a/qemu/tests/blockdev_inc_backup_pull_mode_diff.py b/qemu/tests/blockdev_inc_backup_pull_mode_diff.py index 465d77a106..6b3182ca1b 100644 --- a/qemu/tests/blockdev_inc_backup_pull_mode_diff.py +++ b/qemu/tests/blockdev_inc_backup_pull_mode_diff.py @@ -1,34 +1,25 @@ -import six import socket - from functools import partial -from provider import backup_utils -from provider import blockdev_base -from provider import job_utils -from provider import block_dirty_bitmap +import six +from virttest import utils_disk, utils_misc +from provider import backup_utils, block_dirty_bitmap, blockdev_base, job_utils from provider.nbd_image_export import InternalNBDExportImage -from virttest import utils_disk -from virttest import utils_misc - class BlockdevIncBackupPullModeDiff(blockdev_base.BlockdevBaseTest): - def __init__(self, test, params, env): - super(BlockdevIncBackupPullModeDiff, self).__init__(test, - params, - env) + super().__init__(test, params, env) self.source_images = [] self.fleecing_full_backups = [] self.fleecing_inc_backups = [] self.full_backup_tags = [] self.inc_backup_tags = [] - self.full_backup_bitmaps = [] # added along with full backup + self.full_backup_bitmaps = [] # added along with full backup self.before_2nd_inc_bitmaps = [] # added before 2nd inc files - self.merged_bitmaps = [] # merge above two into this one - self.inc_backup_bitmaps = [] # added along with inc backup + self.merged_bitmaps = [] # merge above two into this one + self.inc_backup_bitmaps = [] # added along with inc backup self.backup_jobs = [] self.full_backup_nbd_objs = [] self.inc_backup_nbd_objs = [] @@ -38,50 +29,51 @@ def __init__(self, test, params, env): self.inc_backup_nbd_images = [] self.src_img_tags = params.objects("source_images") localhost = socket.gethostname() - self.params['nbd_server'] = localhost if localhost else 'localhost' + self.params["nbd_server"] = localhost if localhost else "localhost" list(map(self._init_arguments_by_params, self.src_img_tags)) def _init_arguments_by_params(self, tag): image_params = self.params.object_params(tag) - self.source_images.append("drive_%s" % tag) + self.source_images.append(f"drive_{tag}") # fleecing images bk_tags = image_params.objects("image_backup_chain") - self.fleecing_full_backups.append("drive_%s" % bk_tags[0]) - self.fleecing_inc_backups.append("drive_%s" % bk_tags[1]) + self.fleecing_full_backups.append(f"drive_{bk_tags[0]}") + self.fleecing_inc_backups.append(f"drive_{bk_tags[1]}") # bitmaps - self.full_backup_bitmaps.append("full_bitmap_%s" % tag) - self.before_2nd_inc_bitmaps.append("before_2nd_inc_bitmap_%s" % tag) - self.merged_bitmaps.append("merged_bitmap_%s" % tag) - self.inc_backup_bitmaps.append("inc_bitmap_%s" % tag) - self.params['nbd_export_bitmaps_%s' % - bk_tags[1]] = self.merged_bitmaps[-1] + self.full_backup_bitmaps.append(f"full_bitmap_{tag}") + self.before_2nd_inc_bitmaps.append(f"before_2nd_inc_bitmap_{tag}") + self.merged_bitmaps.append(f"merged_bitmap_{tag}") + self.inc_backup_bitmaps.append(f"inc_bitmap_{tag}") + self.params[f"nbd_export_bitmaps_{bk_tags[1]}"] = self.merged_bitmaps[-1] # nbd images - nbd_image = self.params['nbd_image_%s' % bk_tags[0]] + nbd_image = self.params[f"nbd_image_{bk_tags[0]}"] self.full_backup_nbd_images.append( - self.source_disk_define_by_params(self.params, nbd_image)) - nbd_image = self.params['nbd_image_%s' % bk_tags[1]] + self.source_disk_define_by_params(self.params, nbd_image) + ) + nbd_image = self.params[f"nbd_image_{bk_tags[1]}"] self.inc_backup_nbd_images.append( - self.source_disk_define_by_params(self.params, nbd_image)) + self.source_disk_define_by_params(self.params, nbd_image) + ) # target 'fullbk' image, copy data from exported full bk image to it - fullbk = self.params['client_image_%s' % bk_tags[0]] + fullbk = self.params[f"client_image_{bk_tags[0]}"] disk = self.source_disk_define_by_params(self.params, fullbk) disk.create(disk.params) self.trash.append(disk) self.full_backup_client_images.append(disk) # target 'incbk' image, copy data from exported inc bk image to it - incbk = self.params['client_image_%s' % bk_tags[1]] + incbk = self.params[f"client_image_{bk_tags[1]}"] disk = self.source_disk_define_by_params(self.params, incbk) disk.create(disk.params) self.trash.append(disk) self.inc_backup_client_images.append(disk) # Only hotplug fleecing images for full backup before full-backup - self.params['image_backup_chain_%s' % tag] = bk_tags[0] + self.params[f"image_backup_chain_{tag}"] = bk_tags[0] self.full_backup_tags.append(bk_tags[0]) self.inc_backup_tags.append(bk_tags[1]) @@ -90,29 +82,36 @@ def init_nbd_exports(self): # nbd export objects, used for exporting local images for i, tag in enumerate(self.src_img_tags): self.full_backup_nbd_objs.append( - InternalNBDExportImage(self.main_vm, self.params, - self.full_backup_tags[i])) + InternalNBDExportImage( + self.main_vm, self.params, self.full_backup_tags[i] + ) + ) self.inc_backup_nbd_objs.append( - InternalNBDExportImage(self.main_vm, self.params, - self.inc_backup_tags[i])) + InternalNBDExportImage( + self.main_vm, self.params, self.inc_backup_tags[i] + ) + ) def _copy_data_from_export(self, nbd_imgs, target_imgs, bitmaps=None): for i, nbd_obj in enumerate(nbd_imgs): if bitmaps is None: - backup_utils.copyif(self.params, nbd_obj.tag, - target_imgs[i].tag) + backup_utils.copyif(self.params, nbd_obj.tag, target_imgs[i].tag) else: - backup_utils.copyif(self.params, nbd_obj.tag, - target_imgs[i].tag, bitmaps[i]) + backup_utils.copyif( + self.params, nbd_obj.tag, target_imgs[i].tag, bitmaps[i] + ) def copy_full_data_from_export(self): - self._copy_data_from_export(self.full_backup_nbd_images, - self.full_backup_client_images) + self._copy_data_from_export( + self.full_backup_nbd_images, self.full_backup_client_images + ) def copy_inc_data_from_export(self): - self._copy_data_from_export(self.inc_backup_nbd_images, - self.inc_backup_client_images, - self.merged_bitmaps) + self._copy_data_from_export( + self.inc_backup_nbd_images, + self.inc_backup_client_images, + self.merged_bitmaps, + ) def _export_fleecing_images(self, nbd_objs, nodes): for i, obj in enumerate(nbd_objs): @@ -124,43 +123,40 @@ def _stop_export_fleecing_images(self, nbd_objs): obj.stop_export() def export_full_bk_fleecing_imgs(self): - self._export_fleecing_images(self.full_backup_nbd_objs, - self.fleecing_full_backups) + self._export_fleecing_images( + self.full_backup_nbd_objs, self.fleecing_full_backups + ) def stop_export_full_bk_fleecing_imgs(self): self._stop_export_fleecing_images(self.full_backup_nbd_objs) def export_inc_bk_fleecing_imgs(self): - self._export_fleecing_images(self.inc_backup_nbd_objs, - self.fleecing_inc_backups) + self._export_fleecing_images( + self.inc_backup_nbd_objs, self.fleecing_inc_backups + ) def stop_export_inc_bk_fleecing_imgs(self): self._stop_export_fleecing_images(self.inc_backup_nbd_objs) def cancel_backup_jobs(self): for job_id in self.backup_jobs: - arguments = {'id': job_id} - self.main_vm.monitor.cmd('job-cancel', arguments) + arguments = {"id": job_id} + self.main_vm.monitor.cmd("job-cancel", arguments) def generate_inc_files(self): return list(map(self.generate_data_file, self.src_img_tags)) def hotplug_inc_backup_images(self): for idx, tag in enumerate(self.src_img_tags): - self.params['image_backup_chain_%s' % - tag] = self.inc_backup_tags[idx] + self.params[f"image_backup_chain_{tag}"] = self.inc_backup_tags[idx] self.add_target_data_disks() def _do_backup(self, backup_nodes, bitmaps): extra_options = {"sync": "none", "wait_job_complete": False} backup_utils.blockdev_batch_backup( - self.main_vm, - self.source_images, - backup_nodes, - bitmaps, - ** extra_options) - self.backup_jobs = [job['id'] - for job in job_utils.query_jobs(self.main_vm)] + self.main_vm, self.source_images, backup_nodes, bitmaps, **extra_options + ) + self.backup_jobs = [job["id"] for job in job_utils.query_jobs(self.main_vm)] def do_full_backup(self): self._do_backup(self.fleecing_full_backups, self.full_backup_bitmaps) @@ -171,21 +167,22 @@ def do_incremental_backup(self): def restart_vm_with_incbk_images(self): """restart vm with incbk as its data disk""" self.main_vm.destroy() - images = self.params['images'] - self.params['images'] = ' '.join( - [images.split()[0]] + [o.tag for o in self.inc_backup_client_images]) + images = self.params["images"] + self.params["images"] = " ".join( + [images.split()[0]] + [o.tag for o in self.inc_backup_client_images] + ) self.prepare_main_vm() self.clone_vm = self.main_vm - self.params['images'] = images + self.params["images"] = images def rebase_inc_onto_full(self): # rebase target 'incbk' onto target 'fullbk' rebase_funcs = [] for i, tag in enumerate(self.inc_backup_tags): - incbk = self.params['client_image_%s' % tag] - fullbk = self.params['client_image_%s' % self.full_backup_tags[i]] + incbk = self.params[f"client_image_{tag}"] + fullbk = self.params[f"client_image_{self.full_backup_tags[i]}"] image_params = self.params.object_params(incbk) - image_params['image_chain'] = '%s %s' % (fullbk, incbk) + image_params["image_chain"] = f"{fullbk} {incbk}" disk = self.source_disk_define_by_params(image_params, incbk) rebase_funcs.append(partial(disk.rebase, params=image_params)) utils_misc.parallel(rebase_funcs) @@ -200,65 +197,80 @@ def check_data_files(self): disks_info[data_img] = self.disks_info[data_img] # Check md5sum for the first three files - super(BlockdevIncBackupPullModeDiff, self).verify_data_files() + super().verify_data_files() # Check the files should not exist try: session = self.clone_vm.wait_for_login() for tag, info in six.iteritems(disks_info): utils_disk.mount(info[0], info[1], session=session) - file_path = "%s/%s" % (info[1], non_existed_files[tag]) - cat_cmd = "cat %s" % file_path + file_path = f"{info[1]}/{non_existed_files[tag]}" + cat_cmd = f"cat {file_path}" s, o = session.cmd_status_output(cat_cmd) if s == 0: - self.test.fail('File (%s) exists' % non_existed_files[tag]) - elif 'No such file' not in o.strip(): - self.test.fail('Unknown error: %s' % o) + self.test.fail(f"File ({non_existed_files[tag]}) exists") + elif "No such file" not in o.strip(): + self.test.fail(f"Unknown error: {o}") finally: session.close() def _handle_bitmaps(self, disabled_list, new_list, **extra): for idx, bitmap in enumerate(disabled_list): block_dirty_bitmap.block_dirty_bitmap_disable( - self.main_vm, self.source_images[idx], bitmap) + self.main_vm, self.source_images[idx], bitmap + ) for idx, bitmap in enumerate(new_list): bitmap_params = {} - bitmap_params['bitmap_name'] = bitmap - bitmap_params['target_device'] = self.source_images[idx] - bitmap_params['disabled'] = extra.pop('disabled', 'off') - block_dirty_bitmap.block_dirty_bitmap_add(self.main_vm, - bitmap_params) + bitmap_params["bitmap_name"] = bitmap + bitmap_params["target_device"] = self.source_images[idx] + bitmap_params["disabled"] = extra.pop("disabled", "off") + block_dirty_bitmap.block_dirty_bitmap_add(self.main_vm, bitmap_params) - merged_list = extra.pop('merged_list', []) + merged_list = extra.pop("merged_list", []) for idx, target in enumerate(merged_list): src_list = [v[idx] for v in extra.values()] block_dirty_bitmap.block_dirty_bitmap_merge( - self.main_vm, self.source_images[idx], src_list, target) + self.main_vm, self.source_images[idx], src_list, target + ) def add_bitmaps_transaction(self): for i, bitmap in enumerate(self.full_backup_bitmaps): - disabled_params = {'bitmap_device_node': self.source_images[i], - 'bitmap_name': bitmap} - added_params = {'bitmap_device_node': self.source_images[i], - 'bitmap_name': self.before_2nd_inc_bitmaps[i]} + disabled_params = { + "bitmap_device_node": self.source_images[i], + "bitmap_name": bitmap, + } + added_params = { + "bitmap_device_node": self.source_images[i], + "bitmap_name": self.before_2nd_inc_bitmaps[i], + } block_dirty_bitmap.handle_block_dirty_bitmap_transaction( - self.main_vm, disabled_params, added_params) + self.main_vm, disabled_params, added_params + ) def merge_bitmaps_transaction(self): for i, bitmap in enumerate(self.before_2nd_inc_bitmaps): - disabled_params = {'bitmap_device_node': self.source_images[i], - 'bitmap_name': bitmap} - added_params = {'bitmap_device_node': self.source_images[i], - 'bitmap_name': self.merged_bitmaps[i], - 'bitmap_disabled': 'on'} - merged_params = {'bitmap_device_node': self.source_images[i], - 'bitmap_target': self.merged_bitmaps[i], - 'bitmap_sources': [self.full_backup_bitmaps[i], - self.before_2nd_inc_bitmaps[i]]} + disabled_params = { + "bitmap_device_node": self.source_images[i], + "bitmap_name": bitmap, + } + added_params = { + "bitmap_device_node": self.source_images[i], + "bitmap_name": self.merged_bitmaps[i], + "bitmap_disabled": "on", + } + merged_params = { + "bitmap_device_node": self.source_images[i], + "bitmap_target": self.merged_bitmaps[i], + "bitmap_sources": [ + self.full_backup_bitmaps[i], + self.before_2nd_inc_bitmaps[i], + ], + } block_dirty_bitmap.handle_block_dirty_bitmap_transaction( - self.main_vm, disabled_params, added_params, merged_params) + self.main_vm, disabled_params, added_params, merged_params + ) def do_test(self): self.init_nbd_exports() diff --git a/qemu/tests/blockdev_inc_backup_pull_mode_test.py b/qemu/tests/blockdev_inc_backup_pull_mode_test.py index f72fe7745e..5fc33a65ee 100644 --- a/qemu/tests/blockdev_inc_backup_pull_mode_test.py +++ b/qemu/tests/blockdev_inc_backup_pull_mode_test.py @@ -1,26 +1,19 @@ -import six -import socket import logging +import socket -from provider import backup_utils -from provider import blockdev_base -from provider import job_utils +import six +from virttest import qemu_storage, utils_disk +from provider import backup_utils, blockdev_base, job_utils from provider.nbd_image_export import InternalNBDExportImage from provider.virt_storage.storage_admin import sp_admin -from virttest import qemu_storage -from virttest import utils_disk - -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class BlockdevIncBackupPullModeTest(blockdev_base.BlockdevBaseTest): - def __init__(self, test, params, env): - super(BlockdevIncBackupPullModeTest, self).__init__(test, - params, - env) + super().__init__(test, params, env) self.source_images = [] self.full_backups = [] self.inc_backups = [] @@ -36,77 +29,86 @@ def __init__(self, test, params, env): self.inc_backup_nbd_images = [] self.src_img_tags = params.objects("source_images") localhost = socket.gethostname() - self.params['nbd_server'] = localhost if localhost else 'localhost' + self.params["nbd_server"] = localhost if localhost else "localhost" list(map(self._init_arguments_by_params, self.src_img_tags)) def _init_arguments_by_params(self, tag): image_params = self.params.object_params(tag) bk_tags = image_params.objects("backup_images") - self.source_images.append("drive_%s" % tag) + self.source_images.append(f"drive_{tag}") # fleecing image used for full backup, to be exported by nbd - self.full_backups.append("drive_%s" % bk_tags[0]) - self.full_backup_bitmaps.append("full_bitmap_%s" % tag) + self.full_backups.append(f"drive_{bk_tags[0]}") + self.full_backup_bitmaps.append(f"full_bitmap_{tag}") # fleecing image used for inc backup, to be exported by nbd - self.inc_backups.append("drive_%s" % bk_tags[1]) - self.inc_backup_bitmaps.append("inc_bitmap_%s" % tag) + self.inc_backups.append(f"drive_{bk_tags[1]}") + self.inc_backup_bitmaps.append(f"inc_bitmap_{tag}") # nbd export image used full backup - nbd_image = self.params['nbd_image_%s' % bk_tags[0]] - disk = qemu_storage.QemuImg(self.params.object_params(nbd_image), - None, nbd_image) + nbd_image = self.params[f"nbd_image_{bk_tags[0]}"] + disk = qemu_storage.QemuImg( + self.params.object_params(nbd_image), None, nbd_image + ) self.full_backup_nbd_images.append(disk) # nbd export image used for inc backup - nbd_image = self.params['nbd_image_%s' % bk_tags[1]] - disk = qemu_storage.QemuImg(self.params.object_params(nbd_image), - None, nbd_image) + nbd_image = self.params[f"nbd_image_{bk_tags[1]}"] + disk = qemu_storage.QemuImg( + self.params.object_params(nbd_image), None, nbd_image + ) self.inc_backup_nbd_images.append(disk) # local image used for copying data from nbd export image(full backup) - client_image = self.params['client_image_%s' % bk_tags[0]] + client_image = self.params[f"client_image_{bk_tags[0]}"] disk = self.source_disk_define_by_params( - self.params.object_params(client_image), client_image) + self.params.object_params(client_image), client_image + ) disk.create(self.params) self.trash.append(disk) self.full_backup_client_images.append(disk) # local image used for copying data from nbd export images(inc backup) - client_image = self.params['client_image_%s' % bk_tags[1]] + client_image = self.params[f"client_image_{bk_tags[1]}"] disk = self.source_disk_define_by_params( - self.params.object_params(client_image), client_image) + self.params.object_params(client_image), client_image + ) disk.create(self.params) self.trash.append(disk) self.inc_backup_client_images.append(disk) # disable bitmap created in full backup when doing inc backup - self.disabled_bitmaps.append("full_bitmap_%s" % tag) + self.disabled_bitmaps.append(f"full_bitmap_{tag}") def init_nbd_exports(self): def _init_nbd_exports(tag): bk_tags = self.params.object_params(tag).objects("backup_images") self.full_backup_nbd_objs.append( - InternalNBDExportImage(self.main_vm, self.params, bk_tags[0])) + InternalNBDExportImage(self.main_vm, self.params, bk_tags[0]) + ) - self.params['nbd_export_bitmaps_%s' % - bk_tags[1]] = "full_bitmap_%s" % tag + self.params[f"nbd_export_bitmaps_{bk_tags[1]}"] = f"full_bitmap_{tag}" self.inc_backup_nbd_objs.append( - InternalNBDExportImage(self.main_vm, self.params, bk_tags[1])) + InternalNBDExportImage(self.main_vm, self.params, bk_tags[1]) + ) list(map(_init_nbd_exports, self.src_img_tags)) def full_copyif(self): for i, nbd_obj in enumerate(self.full_backup_nbd_images): - backup_utils.copyif(self.params, nbd_obj.tag, - self.full_backup_client_images[i].tag) + backup_utils.copyif( + self.params, nbd_obj.tag, self.full_backup_client_images[i].tag + ) def inc_copyif(self): for i, nbd_obj in enumerate(self.inc_backup_nbd_images): - backup_utils.copyif(self.params, nbd_obj.tag, - self.inc_backup_client_images[i].tag, - self.full_backup_bitmaps[i]) + backup_utils.copyif( + self.params, + nbd_obj.tag, + self.inc_backup_client_images[i].tag, + self.full_backup_bitmaps[i], + ) def export_full_backups(self): for i, obj in enumerate(self.full_backup_nbd_objs): @@ -128,8 +130,8 @@ def stop_export_inc_backups(self): def cancel_backup_jobs(self): for job_id in self.backup_jobs: - arguments = {'id': job_id} - self.main_vm.monitor.cmd('job-cancel', arguments) + arguments = {"id": job_id} + self.main_vm.monitor.cmd("job-cancel", arguments) def do_full_backup(self): extra_options = {"sync": "none", "wait_job_complete": False} @@ -138,54 +140,60 @@ def do_full_backup(self): self.source_images, self.full_backups, self.full_backup_bitmaps, - **extra_options) - self.backup_jobs = [job['id'] - for job in job_utils.query_jobs(self.main_vm)] + **extra_options, + ) + self.backup_jobs = [job["id"] for job in job_utils.query_jobs(self.main_vm)] def generate_inc_files(self): return list(map(self.generate_data_file, self.src_img_tags)) - def add_target_data_disks(self, bktype='full'): + def add_target_data_disks(self, bktype="full"): """Hot add target disk to VM with qmp monitor""" for tag in self.params.objects("source_images"): image_params = self.params.object_params(tag) - img = image_params['full_backup_image'] if bktype == 'full' else image_params['inc_backup_image'] + img = ( + image_params["full_backup_image"] + if bktype == "full" + else image_params["inc_backup_image"] + ) disk = self.target_disk_define_by_params(self.params, img) disk.hotplug(self.main_vm) self.trash.append(disk) def do_incremental_backup(self): - extra_options = {"sync": "none", - "disabled_bitmaps": self.disabled_bitmaps, - "wait_job_complete": False} + extra_options = { + "sync": "none", + "disabled_bitmaps": self.disabled_bitmaps, + "wait_job_complete": False, + } backup_utils.blockdev_batch_backup( self.main_vm, self.source_images, self.inc_backups, self.inc_backup_bitmaps, - **extra_options) - self.backup_jobs = [job['id'] - for job in job_utils.query_jobs(self.main_vm)] + **extra_options, + ) + self.backup_jobs = [job["id"] for job in job_utils.query_jobs(self.main_vm)] def restart_vm_with_backup_images(self): """restart vm with back2 as its data disk""" self.main_vm.destroy() images = self.params["images"].split()[0] for obj in self.inc_backup_client_images: - images += ' %s' % obj.tag - self.params['images'] = images + images += f" {obj.tag}" + self.params["images"] = images self.prepare_main_vm() self.clone_vm = self.main_vm def clean_images(self): for img in self.trash: try: - if hasattr(img, 'remove'): + if hasattr(img, "remove"): img.remove() else: sp_admin.remove_volume(img) except Exception as e: - LOG_JOB.warn(str(e)) + LOG_JOB.warning(str(e)) def rebase_backup_image(self): """rebase image back2 onto back1""" @@ -206,22 +214,22 @@ def verify_data_files(self): disks_info[data_img] = self.disks_info[data_img] # Check md5sum for the first two files - super(BlockdevIncBackupPullModeTest, self).verify_data_files() + super().verify_data_files() # Check the files should not exist on back2 session = self.clone_vm.wait_for_login() try: for tag, info in six.iteritems(disks_info): utils_disk.mount(info[0], info[1], session=session) - file_path = "%s/%s" % (info[1], non_existed_files[tag]) - cat_cmd = "cat %s" % file_path + file_path = f"{info[1]}/{non_existed_files[tag]}" + cat_cmd = f"cat {file_path}" - LOG_JOB.info('Check %s should not exist', file_path) + LOG_JOB.info("Check %s should not exist", file_path) s, o = session.cmd_status_output(cat_cmd) if s == 0: - self.test.fail('File (%s) exists' % non_existed_files[tag]) - elif 'No such file' not in o.strip(): - self.test.fail('Unknown error: %s' % o) + self.test.fail(f"File ({non_existed_files[tag]}) exists") + elif "No such file" not in o.strip(): + self.test.fail(f"Unknown error: {o}") finally: if session: session.close() @@ -234,7 +242,7 @@ def do_test(self): self.full_copyif() self.cancel_backup_jobs() self.stop_export_full_backups() - self.add_target_data_disks('inc') + self.add_target_data_disks("inc") self.do_incremental_backup() self.export_inc_backups() self.generate_inc_files() diff --git a/qemu/tests/blockdev_inc_backup_pull_mode_vm_down.py b/qemu/tests/blockdev_inc_backup_pull_mode_vm_down.py index 04bfd445f8..ec38e3cb5c 100644 --- a/qemu/tests/blockdev_inc_backup_pull_mode_vm_down.py +++ b/qemu/tests/blockdev_inc_backup_pull_mode_vm_down.py @@ -4,7 +4,6 @@ from multiprocessing import Process from avocado.utils import process - from virttest.qemu_devices.qdevices import QBlockdevFormatNode from virttest.utils_misc import wait_for @@ -13,34 +12,34 @@ from provider.job_utils import query_jobs from provider.nbd_image_export import InternalNBDExportImage -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class BlockdevIncBackupPullModePoweroffVMTest(BlockdevLiveBackupBaseTest): """Poweroff VM during pulling image from 4 clients""" def __init__(self, test, params, env): - super(BlockdevIncBackupPullModePoweroffVMTest, self).__init__( - test, params, env) + super().__init__(test, params, env) self._is_qemu_hang = False self._job = None self._nbd_export = None localhost = socket.gethostname() - self.params['nbd_server'] = localhost if localhost else 'localhost' + self.params["nbd_server"] = localhost if localhost else "localhost" # the fleecing image to be exported - self.params['image_name_image1'] = self.params['image_name'] - self.params['image_format_image1'] = self.params['image_format'] + self.params["image_name_image1"] = self.params["image_name"] + self.params["image_format_image1"] = self.params["image_format"] self._fleecing_image_obj = self.source_disk_define_by_params( - self.params, self._full_bk_images[0]) + self.params, self._full_bk_images[0] + ) self.trash.append(self._fleecing_image_obj) # local target images, where data is copied from nbd image self._clients = [] self._client_image_objs = [] - nbd_image = self.params['nbd_image_%s' % self._full_bk_images[0]] - for tag in self.params.objects('client_images'): + nbd_image = self.params[f"nbd_image_{self._full_bk_images[0]}"] + for tag in self.params.objects("client_images"): self._client_image_objs.append( self.source_disk_define_by_params(self.params, tag) ) @@ -53,16 +52,16 @@ def add_target_data_disks(self): tag = self._fleecing_image_obj.tag devices = self.main_vm.devices.images_define_by_params( - tag, self.params.object_params(tag), 'disk') + tag, self.params.object_params(tag), "disk" + ) devices.pop() # ignore the front end device for dev in devices: if isinstance(dev, QBlockdevFormatNode): dev.params["backing"] = self._source_nodes[0] - ret = self.main_vm.devices.simple_hotplug( - dev, self.main_vm.monitor) + ret = self.main_vm.devices.simple_hotplug(dev, self.main_vm.monitor) if not ret[1]: - self.test.fail("Failed to hotplug '%s'" % dev) + self.test.fail(f"Failed to hotplug '{dev}'") def generate_data_file(self, tag, filename=None): """ @@ -73,35 +72,41 @@ def generate_data_file(self, tag, filename=None): self.disks_info = {} def prepare_test(self): - super(BlockdevIncBackupPullModePoweroffVMTest, self).prepare_test() + super().prepare_test() self._nbd_export = InternalNBDExportImage( - self.main_vm, self.params, self._full_bk_images[0]) + self.main_vm, self.params, self._full_bk_images[0] + ) self._nbd_export.start_nbd_server() for obj in self._client_image_objs: obj.create(self.params) def _wait_till_all_qemu_io_active(self): def _wait_till_qemu_io_active(tag): - for i in range(self.params.get_numeric('cmd_timeout', 20)*10): - if process.system(self.params['grep_qemu_io_cmd'] % tag, - ignore_status=True, shell=True) == 0: + for i in range(self.params.get_numeric("cmd_timeout", 20) * 10): + if ( + process.system( + self.params["grep_qemu_io_cmd"] % tag, + ignore_status=True, + shell=True, + ) + == 0 + ): break time.sleep(0.1) else: - self.test.error('Failed to detect the active qemu-io process') + self.test.error("Failed to detect the active qemu-io process") - list(map(_wait_till_qemu_io_active, - [o.tag for o in self._client_image_objs])) + list(map(_wait_till_qemu_io_active, [o.tag for o in self._client_image_objs])) def _poweroff_vm_during_data_copy(self, session): self._wait_till_all_qemu_io_active() - session.cmd(cmd='poweroff', ignore_all_errors=True) - tmo = self.params.get_numeric('vm_down_timeout', 300) + session.cmd(cmd="poweroff", ignore_all_errors=True) + tmo = self.params.get_numeric("vm_down_timeout", 300) if not wait_for(self.main_vm.is_dead, timeout=tmo): # qemu should quit after vm poweroff, or we have to do some checks self._check_qemu_responsive() else: - LOG_JOB.info('qemu quit after vm poweroff') + LOG_JOB.info("qemu quit after vm poweroff") def destroy_vms(self): if self._is_qemu_hang: @@ -117,9 +122,9 @@ def _check_qemu_responsive(self): self.main_vm.monitor.cmd(cmd="query-status", timeout=10) except Exception as e: self._is_qemu_hang = True - self.test.fail('qemu hangs: %s' % str(e)) + self.test.fail(f"qemu hangs: {str(e)}") else: - self.test.error('qemu keeps alive unexpectedly after vm poweroff') + self.test.error("qemu keeps alive unexpectedly after vm poweroff") def pull_data_and_poweroff_vm_in_parallel(self): """pull data and poweroff vm in parallel""" @@ -136,8 +141,8 @@ def export_full_bk_fleecing_img(self): self._nbd_export.add_nbd_image(self._full_bk_nodes[0]) def do_full_backup(self): - super(BlockdevIncBackupPullModePoweroffVMTest, self).do_full_backup() - self._job = [job['id'] for job in query_jobs(self.main_vm)][0] + super().do_full_backup() + self._job = [job["id"] for job in query_jobs(self.main_vm)][0] def do_test(self): self.do_full_backup() diff --git a/qemu/tests/blockdev_inc_backup_pull_mode_vm_reboot.py b/qemu/tests/blockdev_inc_backup_pull_mode_vm_reboot.py index 973f9991d9..b8f1fabe9c 100644 --- a/qemu/tests/blockdev_inc_backup_pull_mode_vm_reboot.py +++ b/qemu/tests/blockdev_inc_backup_pull_mode_vm_reboot.py @@ -4,7 +4,6 @@ import time from avocado.utils import process - from virttest import utils_misc from virttest.qemu_devices.qdevices import QBlockdevFormatNode @@ -18,25 +17,24 @@ class BlockdevIncBackupPullModeRebootVMTest(BlockdevLiveBackupBaseTest): """Reboot VM during pulling image from client""" def __init__(self, test, params, env): - super(BlockdevIncBackupPullModeRebootVMTest, self).__init__( - test, params, env) + super().__init__(test, params, env) self._job = None self._nbd_export = None localhost = socket.gethostname() - self.params['nbd_server'] = localhost if localhost else 'localhost' + self.params["nbd_server"] = localhost if localhost else "localhost" # the fleecing image to be exported - self.params['image_name_image1'] = self.params['image_name'] - self.params['image_format_image1'] = self.params['image_format'] + self.params["image_name_image1"] = self.params["image_name"] + self.params["image_format_image1"] = self.params["image_format"] self._fleecing_image_obj = self.source_disk_define_by_params( - self.params, self._full_bk_images[0]) + self.params, self._full_bk_images[0] + ) self.trash.append(self._fleecing_image_obj) # local target image, where data is copied from nbd image self._client_image_obj = self.source_disk_define_by_params( - self.params, - self.params['client_image_%s' % self._full_bk_images[0]] + self.params, self.params[f"client_image_{self._full_bk_images[0]}"] ) self.trash.append(self._client_image_obj) self._target_images = [self._client_image_obj.tag] @@ -46,16 +44,16 @@ def add_target_data_disks(self): tag = self._fleecing_image_obj.tag devices = self.main_vm.devices.images_define_by_params( - tag, self.params.object_params(tag), 'disk') + tag, self.params.object_params(tag), "disk" + ) devices.pop() # ignore the front end device for dev in devices: if isinstance(dev, QBlockdevFormatNode): dev.params["backing"] = self._source_nodes[0] - ret = self.main_vm.devices.simple_hotplug( - dev, self.main_vm.monitor) + ret = self.main_vm.devices.simple_hotplug(dev, self.main_vm.monitor) if not ret[1]: - self.test.fail("Failed to hotplug '%s'" % dev) + self.test.fail(f"Failed to hotplug '{dev}'") def generate_data_file(self, tag, filename=None): """ @@ -70,63 +68,63 @@ def remove_files_from_system_image(self, tmo=60): pass def prepare_test(self): - super(BlockdevIncBackupPullModeRebootVMTest, self).prepare_test() + super().prepare_test() self._nbd_export = InternalNBDExportImage( - self.main_vm, self.params, self._full_bk_images[0]) + self.main_vm, self.params, self._full_bk_images[0] + ) self._nbd_export.start_nbd_server() self._client_image_obj.create(self.params) - self._error_msg = '{pid} Aborted|(core dumped)'.format( - pid=self.main_vm.get_pid()) + self._error_msg = f"{self.main_vm.get_pid()} Aborted|(core dumped)" def export_full_bk_fleecing_img(self): self._nbd_export.add_nbd_image(self._full_bk_nodes[0]) def do_full_backup(self): - super(BlockdevIncBackupPullModeRebootVMTest, self).do_full_backup() - self._job = [job['id'] for job in query_jobs(self.main_vm)][0] + super().do_full_backup() + self._job = [job["id"] for job in query_jobs(self.main_vm)][0] def _copy_full_data_from_export(self): - nbd_image = self.params['nbd_image_%s' % self._full_bk_images[0]] + nbd_image = self.params[f"nbd_image_{self._full_bk_images[0]}"] copyif(self.params, nbd_image, self._client_image_obj.tag) def _wait_till_qemu_io_active(self): - for i in range(self.params.get_numeric('cmd_timeout', 20)*10): - if process.system('ps -C qemu-io', - ignore_status=True, shell=True) == 0: + for i in range(self.params.get_numeric("cmd_timeout", 20) * 10): + if process.system("ps -C qemu-io", ignore_status=True, shell=True) == 0: break time.sleep(0.1) else: - self.test.error('Cannot detect the active qemu-io process') + self.test.error("Cannot detect the active qemu-io process") def _reboot_vm_during_data_copy(self): self._wait_till_qemu_io_active() self.main_vm.reboot(method="system_reset") def _is_qemu_aborted(self): - log_file = os.path.join(self.test.resultsdir, - self.params.get('debug_log_file', 'debug.log')) - with open(log_file, 'r') as f: + log_file = os.path.join( + self.test.resultsdir, self.params.get("debug_log_file", "debug.log") + ) + with open(log_file, "r") as f: out = f.read().strip() return re.search(self._error_msg, out, re.M) is not None def pull_data_and_reboot_vm_in_parallel(self): """run data copy and vm reboot in parallel""" - targets = [self._reboot_vm_during_data_copy, - self._copy_full_data_from_export] + targets = [self._reboot_vm_during_data_copy, self._copy_full_data_from_export] try: utils_misc.parallel(targets) - except Exception as e: + except Exception: if self._is_qemu_aborted(): - self.test.fail('qemu aborted(core dumped)') + self.test.fail("qemu aborted(core dumped)") else: raise def cancel_job(self): - self.main_vm.monitor.cmd('job-cancel', {'id': self._job}) + self.main_vm.monitor.cmd("job-cancel", {"id": self._job}) def check_clone_vm_login(self): session = self.clone_vm.wait_for_login( - timeout=self.params.get_numeric('login_timeout', 300)) + timeout=self.params.get_numeric("login_timeout", 300) + ) session.close() def do_test(self): diff --git a/qemu/tests/blockdev_inc_backup_remove_bitmap.py b/qemu/tests/blockdev_inc_backup_remove_bitmap.py index ec91d712a9..9e499500c2 100644 --- a/qemu/tests/blockdev_inc_backup_remove_bitmap.py +++ b/qemu/tests/blockdev_inc_backup_remove_bitmap.py @@ -1,5 +1,4 @@ -from provider.block_dirty_bitmap import get_bitmap_by_name -from provider.block_dirty_bitmap import block_dirty_bitmap_remove +from provider.block_dirty_bitmap import block_dirty_bitmap_remove, get_bitmap_by_name from provider.blockdev_live_backup_base import BlockdevLiveBackupBaseTest @@ -7,24 +6,31 @@ class BlockdevIncbkRemoveBitmapTest(BlockdevLiveBackupBaseTest): """Persistent bitmaps remove testing""" def _get_bitmaps(self): - return list(map( - lambda n, b: get_bitmap_by_name(self.main_vm, n, b), - self._source_nodes, self._bitmaps)) + return list( + map( + lambda n, b: get_bitmap_by_name(self.main_vm, n, b), + self._source_nodes, + self._bitmaps, + ) + ) def remove_bitmaps(self): - list(map( - lambda n, b: block_dirty_bitmap_remove(self.main_vm, n, b), - self._source_nodes, self._bitmaps)) + list( + map( + lambda n, b: block_dirty_bitmap_remove(self.main_vm, n, b), + self._source_nodes, + self._bitmaps, + ) + ) def check_bitmaps_gone_from_qmp(self): """bitmaps should be gone from output of query-block""" if any(list(map(lambda b: b is not None, self._get_bitmaps()))): - self.test.fail('bitmap was found unexpectedly.') + self.test.fail("bitmap was found unexpectedly.") def check_bitmaps_count_gt_zero(self): """count should be greater than 0""" - if not all(list(map(lambda b: b and b['count'] > 0, - self._get_bitmaps()))): + if not all(list(map(lambda b: b and b["count"] > 0, self._get_bitmaps()))): self.test.fail("bitmaps' count should be greater than 0") def do_test(self): diff --git a/qemu/tests/blockdev_inc_backup_resize.py b/qemu/tests/blockdev_inc_backup_resize.py index da50e34854..6717d573d5 100644 --- a/qemu/tests/blockdev_inc_backup_resize.py +++ b/qemu/tests/blockdev_inc_backup_resize.py @@ -1,14 +1,11 @@ from virttest import utils_numeric -from provider import backup_utils -from provider import blockdev_base -from provider import block_dirty_bitmap +from provider import backup_utils, block_dirty_bitmap, blockdev_base class BlockdevIncBackupResizeTest(blockdev_base.BlockdevBaseTest): - def __init__(self, test, params, env): - super(BlockdevIncBackupResizeTest, self).__init__(test, params, env) + super().__init__(test, params, env) self.source_images = [] self.full_backups = [] self.bitmaps = [] @@ -19,27 +16,34 @@ def __init__(self, test, params, env): def _init_arguments_by_params(self, tag): image_params = self.params.object_params(tag) image_chain = image_params.objects("image_backup_chain") - self.source_images.append("drive_%s" % tag) - self.full_backups.append("drive_%s" % image_chain[0]) - self.bitmaps.append("bitmap_%s" % tag) + self.source_images.append(f"drive_{tag}") + self.full_backups.append(f"drive_{image_chain[0]}") + self.bitmaps.append(f"bitmap_{tag}") # Extend or shrink image size based on its original size self.src_img_sizes.append( - int(float( - utils_numeric.normalize_data_size(image_params['image_size'], - order_magnitude="B"))) + int( + float( + utils_numeric.normalize_data_size( + image_params["image_size"], order_magnitude="B" + ) + ) + ) ) def do_full_backup(self): - extra_options = {"sync": "full", - "persistent": True, - "auto_disable_bitmap": False} + extra_options = { + "sync": "full", + "persistent": True, + "auto_disable_bitmap": False, + } backup_utils.blockdev_batch_backup( self.main_vm, self.source_images, self.full_backups, self.bitmaps, - **extra_options) + **extra_options, + ) def prepare_data_disk(self, tag): """ @@ -52,16 +56,16 @@ def gen_inc_files(self): return list(map(self.generate_data_file, self.src_img_tags)) def check_bitmaps(self, node_name, bitmap_name): - bitmap = block_dirty_bitmap.get_bitmap_by_name(self.main_vm, - node_name, - bitmap_name) + bitmap = block_dirty_bitmap.get_bitmap_by_name( + self.main_vm, node_name, bitmap_name + ) # check if bitmap exists if bitmap is None: - self.test.fail('Failed to get bitmap') + self.test.fail("Failed to get bitmap") # check if bitmap is persistent - if not bitmap['persistent']: - self.test.fail('Bitmap should be persistent') + if not bitmap["persistent"]: + self.test.fail("Bitmap should be persistent") def check_image_bitmaps_existed(self): # make sure persistent bitmaps always exist after VM shutdown @@ -70,27 +74,26 @@ def check_image_bitmaps_existed(self): out = disk.info() if out: - if self.params['check_bitmaps'] not in out: - self.test.fail('Persistent bitmaps should be in image') + if self.params["check_bitmaps"] not in out: + self.test.fail("Persistent bitmaps should be in image") else: - self.test.error('Error when querying image info with qemu-img') + self.test.error("Error when querying image info with qemu-img") def check_image_size(self, node_name, block_size): for d in self.main_vm.monitor.cmd("query-block"): - if d['inserted']['node-name'] == node_name: - node = d['inserted']['image'] + if d["inserted"]["node-name"] == node_name: + node = d["inserted"]["image"] break else: - self.test.error('Error when querying %s with query-block' - % node_name) + self.test.error(f"Error when querying {node_name} with query-block") - if int(node['virtual-size']) != block_size: - self.test.fail('image size %s != %s after block_resize') + if int(node["virtual-size"]) != block_size: + self.test.fail("image size %s != %s after block_resize") def block_resize_data_disks(self): - for ratio in self.params.objects('disk_change_ratio'): + for ratio in self.params.objects("disk_change_ratio"): for idx, tag in enumerate(self.src_img_tags): - image_params = self.params.object_params(tag) + self.params.object_params(tag) block_size = int(self.src_img_sizes[idx] * float(ratio)) args = (None, block_size, self.source_images[idx]) self.main_vm.monitor.block_resize(*args) diff --git a/qemu/tests/blockdev_inc_backup_rm_persistent_bitmap.py b/qemu/tests/blockdev_inc_backup_rm_persistent_bitmap.py index 4f457134e2..1e97e17fce 100644 --- a/qemu/tests/blockdev_inc_backup_rm_persistent_bitmap.py +++ b/qemu/tests/blockdev_inc_backup_rm_persistent_bitmap.py @@ -1,6 +1,8 @@ -from provider.block_dirty_bitmap import get_bitmap_by_name -from provider.block_dirty_bitmap import block_dirty_bitmap_disable -from provider.block_dirty_bitmap import block_dirty_bitmap_remove +from provider.block_dirty_bitmap import ( + block_dirty_bitmap_disable, + block_dirty_bitmap_remove, + get_bitmap_by_name, +) from provider.blockdev_live_backup_base import BlockdevLiveBackupBaseTest @@ -8,50 +10,72 @@ class BlockdevIncbkRmPersistentBitmapTest(BlockdevLiveBackupBaseTest): """Persistent bitmaps remove testing""" def disable_bitmaps(self): - list(map( - lambda n, b: block_dirty_bitmap_disable(self.main_vm, n, b), - self._source_nodes, self._bitmaps)) + list( + map( + lambda n, b: block_dirty_bitmap_disable(self.main_vm, n, b), + self._source_nodes, + self._bitmaps, + ) + ) def get_bitmaps(self): - return list(map( - lambda n, b: get_bitmap_by_name(self.main_vm, n, b), - self._source_nodes, self._bitmaps)) + return list( + map( + lambda n, b: get_bitmap_by_name(self.main_vm, n, b), + self._source_nodes, + self._bitmaps, + ) + ) def remove_bitmaps(self): - list(map( - lambda n, b: block_dirty_bitmap_remove(self.main_vm, n, b), - self._source_nodes, self._bitmaps)) + list( + map( + lambda n, b: block_dirty_bitmap_remove(self.main_vm, n, b), + self._source_nodes, + self._bitmaps, + ) + ) def powerdown_and_start_vm(self): self.main_vm.monitor.system_powerdown() if not self.main_vm.wait_for_shutdown( - self.params.get_numeric("shutdown_timeout", 360)): + self.params.get_numeric("shutdown_timeout", 360) + ): self.test.fail("Failed to poweroff vm") self.main_vm.create() self.main_vm.verify_alive() def check_image_bitmaps_gone(self): """bitmaps should be removed""" + def _check(tag): out = self.source_disk_define_by_params(self.params, tag).info() if out: - if self.params['check_bitmaps'] in out: - self.test.fail( - 'Persistent bitmaps should be gone in image') + if self.params["check_bitmaps"] in out: + self.test.fail("Persistent bitmaps should be gone in image") else: - self.test.error('Error when querying image info with qemu-img') + self.test.error("Error when querying image info with qemu-img") list(map(_check, self._source_images)) def check_bitmaps_not_changed(self): """bitmap's count should keep the same, status should be 'disabled'""" bitmaps_info = self.get_bitmaps() - if not all(list(map( - lambda b1, b2: (b1 and b2 - and b2['count'] > 0 - and b1['count'] == b2['count'] - and (b2['recording'] is False)), - self._bitmaps_info, bitmaps_info))): + if not all( + list( + map( + lambda b1, b2: ( + b1 + and b2 + and b2["count"] > 0 + and b1["count"] == b2["count"] + and (b2["recording"] is False) + ), + self._bitmaps_info, + bitmaps_info, + ) + ) + ): self.test.fail("bitmaps' count or status changed") def record_bitmaps_info(self): diff --git a/qemu/tests/blockdev_inc_backup_sync_bitmap_nobitmap.py b/qemu/tests/blockdev_inc_backup_sync_bitmap_nobitmap.py index 2914c9bba7..f57fff40c4 100644 --- a/qemu/tests/blockdev_inc_backup_sync_bitmap_nobitmap.py +++ b/qemu/tests/blockdev_inc_backup_sync_bitmap_nobitmap.py @@ -1,14 +1,11 @@ from virttest.qemu_monitor import QMPCmdError -from provider import backup_utils -from provider import blockdev_base +from provider import backup_utils, blockdev_base class BlkdevIncSyncbpNobp(blockdev_base.BlockdevBaseTest): - def __init__(self, test, params, env): - super(BlkdevIncSyncbpNobp, self).__init__( - test, params, env) + super().__init__(test, params, env) self.source_images = [] self.full_backups = [] self.inc_backups = [] @@ -20,10 +17,10 @@ def __init__(self, test, params, env): def _init_arguments_by_params(self, tag): image_params = self.params.object_params(tag) image_chain = image_params.objects("image_backup_chain") - self.source_images.append("drive_%s" % tag) - self.full_backups.append("drive_%s" % image_chain[0]) - self.inc_backups.append("drive_%s" % image_chain[1]) - self.bitmaps.append("bitmap_%s" % tag) + self.source_images.append(f"drive_{tag}") + self.full_backups.append(f"drive_{image_chain[0]}") + self.inc_backups.append(f"drive_{image_chain[1]}") + self.bitmaps.append(f"bitmap_{tag}") def do_full_backup(self): extra_options = {"sync": "full", "auto_disable_bitmap": False} @@ -32,17 +29,18 @@ def do_full_backup(self): self.source_images, self.full_backups, self.bitmaps, - **extra_options) + **extra_options, + ) def generate_inc_files(self): return list(map(self.generate_data_file, self.src_img_tags)) def do_incremental_backup(self): - extra_options = {"sync": self.inc_sync_mode, - "auto_disable_bitmap": False} + extra_options = {"sync": self.inc_sync_mode, "auto_disable_bitmap": False} inc_backup = backup_utils.blockdev_backup_qmp_cmd - cmd, arguments = inc_backup(self.source_images[0], self.inc_backups[0], - **extra_options) + cmd, arguments = inc_backup( + self.source_images[0], self.inc_backups[0], **extra_options + ) try: self.main_vm.monitor.cmd(cmd, arguments) except QMPCmdError as e: diff --git a/qemu/tests/blockdev_inc_backup_target_not_exist.py b/qemu/tests/blockdev_inc_backup_target_not_exist.py index 37d37d6453..27dd07d5cd 100644 --- a/qemu/tests/blockdev_inc_backup_target_not_exist.py +++ b/qemu/tests/blockdev_inc_backup_target_not_exist.py @@ -10,9 +10,12 @@ class BlockdevIncbkNonExistedTarget(BlockdevLiveBackupBaseTest): """Do incremental backup with a non-existed target""" def add_bitmap(self): - kargs = {'bitmap_name': self._bitmaps[0], - 'target_device': self._source_nodes[0], - 'persistent': 'off', 'disabled': 'off'} + kargs = { + "bitmap_name": self._bitmaps[0], + "target_device": self._source_nodes[0], + "persistent": "off", + "disabled": "off", + } block_dirty_bitmap_add(self.main_vm, kargs) def prepare_test(self): @@ -22,18 +25,20 @@ def prepare_test(self): def do_incremental_backup(self): try: self.main_vm.monitor.cmd( - 'blockdev-backup', - {'device': self._source_nodes[0], - 'target': self.params['non_existed_target'], - 'bitmap': self._bitmaps[0], - 'sync': 'incremental'} + "blockdev-backup", + { + "device": self._source_nodes[0], + "target": self.params["non_existed_target"], + "bitmap": self._bitmaps[0], + "sync": "incremental", + }, ) except QMPCmdError as e: - error_msg = self.params.get('error_msg') + error_msg = self.params.get("error_msg") if not re.search(error_msg, str(e)): - self.test.fail('Unexpected error: %s' % str(e)) + self.test.fail(f"Unexpected error: {str(e)}") else: - self.test.fail('blockdev-backup succeeded unexpectedly') + self.test.fail("blockdev-backup succeeded unexpectedly") def do_test(self): self.do_incremental_backup() diff --git a/qemu/tests/blockdev_inc_backup_test.py b/qemu/tests/blockdev_inc_backup_test.py index f7d4d49fd9..2436b3c7fc 100644 --- a/qemu/tests/blockdev_inc_backup_test.py +++ b/qemu/tests/blockdev_inc_backup_test.py @@ -1,78 +1,74 @@ import logging - from functools import partial from avocado.utils import memory +from virttest import qemu_monitor, utils_misc -from virttest import utils_misc -from virttest import qemu_monitor - -from provider import backup_utils -from provider import blockdev_base +from provider import backup_utils, blockdev_base -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class BlockdevIncreamentalBackupTest(blockdev_base.BlockdevBaseTest): - def __init__(self, test, params, env): - super(BlockdevIncreamentalBackupTest, self).__init__(test, params, env) + super().__init__(test, params, env) self.source_images = [] self.full_backups = [] self.inc_backups = [] self.bitmaps = [] self.rebase_targets = [] - for tag in params.objects('source_images'): + for tag in params.objects("source_images"): image_params = params.object_params(tag) image_chain = image_params.objects("image_backup_chain") - self.source_images.append("drive_%s" % tag) - self.full_backups.append("drive_%s" % image_chain[0]) - self.inc_backups.append("drive_%s" % image_chain[1]) - self.bitmaps.append("bitmap_%s" % tag) + self.source_images.append(f"drive_{tag}") + self.full_backups.append(f"drive_{image_chain[0]}") + self.inc_backups.append(f"drive_{image_chain[1]}") + self.bitmaps.append(f"bitmap_{tag}") inc_img_tag = image_chain[-1] inc_img_params = params.object_params(inc_img_tag) # rebase 'inc' image onto 'base' image, so inc's backing is base - inc_img_params['image_chain'] = image_params['image_backup_chain'] - inc_img = self.source_disk_define_by_params( - inc_img_params, inc_img_tag) + inc_img_params["image_chain"] = image_params["image_backup_chain"] + inc_img = self.source_disk_define_by_params(inc_img_params, inc_img_tag) target_func = partial(inc_img.rebase, params=inc_img_params) self.rebase_targets.append(target_func) def get_granularity(self): - granularity = self.params.get('granularity') - if granularity == 'random': - blacklist = self.params.objects('granularity_blacklist') + granularity = self.params.get("granularity") + if granularity == "random": + blacklist = self.params.objects("granularity_blacklist") granularity = backup_utils.generate_log2_value( - 512, 2147483648, 1, blacklist) + 512, 2147483648, 1, blacklist + ) return granularity def do_full_backup(self): extra_options = {"sync": "full", "auto_disable_bitmap": False} - if self.params.get('auto_dismiss') == 'no': - extra_options['auto_dismiss'] = False - extra_options['auto_finalize'] = False + if self.params.get("auto_dismiss") == "no": + extra_options["auto_dismiss"] = False + extra_options["auto_finalize"] = False granularity = self.get_granularity() if granularity is not None: - extra_options['granularity'] = granularity + extra_options["granularity"] = granularity LOG_JOB.info("bitmap granularity is '%s' ", granularity) backup_utils.blockdev_batch_backup( self.main_vm, self.source_images, self.full_backups, self.bitmaps, - **extra_options) + **extra_options, + ) def generate_inc_files(self): for tag in self.params.objects("source_images"): self.generate_data_file(tag) def do_incremental_backup(self): - extra_options = {'sync': 'incremental', 'auto_disable_bitmap': False} - if self.params.get("completion_mode") == 'grouped': - extra_options['completion_mode'] = 'grouped' - if self.params.get('negative_test') == 'yes': - extra_options['wait_job_complete'] = False + extra_options = {"sync": "incremental", "auto_disable_bitmap": False} + if self.params.get("completion_mode") == "grouped": + extra_options["completion_mode"] = "grouped" + if self.params.get("negative_test") == "yes": + extra_options["wait_job_complete"] = False # Unwrap blockdev_batch_backup to catch the exception backup_func = backup_utils.blockdev_batch_backup.__wrapped__ try: @@ -81,19 +77,21 @@ def do_incremental_backup(self): self.source_images, self.inc_backups, self.bitmaps, - **extra_options) + **extra_options, + ) except qemu_monitor.QMPCmdError as e: - if self.params['error_msg'] not in str(e): - self.test.fail('Unexpect error: %s' % str(e)) + if self.params["error_msg"] not in str(e): + self.test.fail(f"Unexpect error: {str(e)}") else: - self.test.fail('expect incremental backup job(s) failed') + self.test.fail("expect incremental backup job(s) failed") else: backup_utils.blockdev_batch_backup( self.main_vm, self.source_images, self.inc_backups, self.bitmaps, - **extra_options) + **extra_options, + ) def rebase_target_disk(self): return utils_misc.parallel(self.rebase_targets) @@ -104,7 +102,7 @@ def prepare_clone_vm(self): clone_params = self.main_vm.params.copy() for tag in self.params.objects("source_images"): img_params = self.params.object_params(tag) - image_chain = img_params.objects('image_backup_chain') + image_chain = img_params.objects("image_backup_chain") images = images.replace(tag, image_chain[-1]) clone_params["images"] = images clone_vm = self.main_vm.clone(params=clone_params) diff --git a/qemu/tests/blockdev_inc_backup_with_complete_mode.py b/qemu/tests/blockdev_inc_backup_with_complete_mode.py index 9ae81d2702..e427056493 100644 --- a/qemu/tests/blockdev_inc_backup_with_complete_mode.py +++ b/qemu/tests/blockdev_inc_backup_with_complete_mode.py @@ -9,20 +9,24 @@ class BlockdevIncbkIncWithCompleteModeNeg(BlockdevLiveBackupBaseTest): """ def __init__(self, test, params, env): - super(BlockdevIncbkIncWithCompleteModeNeg, self).__init__(test, - params, - env) - self._inc_bk_nodes = ["drive_%s" % t for t in self._target_images] - self._job_ids = ["job_%s" % n for n in self._inc_bk_nodes] + super().__init__(test, params, env) + self._inc_bk_nodes = [f"drive_{t}" for t in self._target_images] + self._job_ids = [f"job_{n}" for n in self._inc_bk_nodes] def do_incremental_backup(self): - job_list = [{'type': 'blockdev-backup', - 'data': {'device': self._source_nodes[i], - 'target': self._inc_bk_nodes[i], - 'sync': 'incremental', - 'job-id': self._job_ids[i], - 'bitmap': self._bitmaps[i]}} - for i, _ in enumerate(self._source_nodes)] + job_list = [ + { + "type": "blockdev-backup", + "data": { + "device": self._source_nodes[i], + "target": self._inc_bk_nodes[i], + "sync": "incremental", + "job-id": self._job_ids[i], + "bitmap": self._bitmaps[i], + }, + } + for i, _ in enumerate(self._source_nodes) + ] arguments = {"actions": job_list} if self.params.get("completion_mode"): arguments["properties"] = { @@ -36,33 +40,33 @@ def check_first_job_status(self): grouped: the 1st job should be cancelled default: the 1st job should complete without any error """ - job_event = 'BLOCK_JOB_CANCELLED' if self.params.get( - "completion_mode") == "grouped" else 'BLOCK_JOB_COMPLETED' - tmo = self.params.get_numeric('job_completed_timeout', 360) - cond = {'device': self._job_ids[0]} + job_event = ( + "BLOCK_JOB_CANCELLED" + if self.params.get("completion_mode") == "grouped" + else "BLOCK_JOB_COMPLETED" + ) + tmo = self.params.get_numeric("job_completed_timeout", 360) + cond = {"device": self._job_ids[0]} event = get_event_by_condition(self.main_vm, job_event, tmo, **cond) if event: - if event['data'].get('error'): - self.test.fail('Unexpected error: %s' % event['data']['error']) + if event["data"].get("error"): + self.test.fail("Unexpected error: {}".format(event["data"]["error"])) else: - self.test.fail('Failed to get %s for the first job' % job_event) + self.test.fail(f"Failed to get {job_event} for the first job") def check_second_job_no_space_error(self): """ We always get the 'no enough space' error for the 2nd job """ - tmo = self.params.get_numeric('job_completed_timeout', 360) - cond = {'device': self._job_ids[1]} - event = get_event_by_condition(self.main_vm, - 'BLOCK_JOB_COMPLETED', - tmo, **cond) + tmo = self.params.get_numeric("job_completed_timeout", 360) + cond = {"device": self._job_ids[1]} + event = get_event_by_condition(self.main_vm, "BLOCK_JOB_COMPLETED", tmo, **cond) if event: - if event['data'].get('error') != self.params['error_msg']: - self.test.fail('Unexpected error: %s' - % event['data']['error']) + if event["data"].get("error") != self.params["error_msg"]: + self.test.fail("Unexpected error: {}".format(event["data"]["error"])) else: - self.test.fail('Failed to get BLOCK_JOB_COMPLETED event') + self.test.fail("Failed to get BLOCK_JOB_COMPLETED event") def do_test(self): self.do_full_backup() diff --git a/qemu/tests/blockdev_inc_backup_with_guest_agent.py b/qemu/tests/blockdev_inc_backup_with_guest_agent.py index ce4768e7a1..2ba468807a 100644 --- a/qemu/tests/blockdev_inc_backup_with_guest_agent.py +++ b/qemu/tests/blockdev_inc_backup_with_guest_agent.py @@ -1,16 +1,13 @@ from functools import partial -from provider import backup_utils -from provider import blockdev_base +from virttest import guest_agent, utils_misc -from virttest import utils_misc -from virttest import guest_agent +from provider import backup_utils, blockdev_base class BlockdevIncbkFSFreezeTest(blockdev_base.BlockdevBaseTest): - def __init__(self, test, params, env): - super(BlockdevIncbkFSFreezeTest, self).__init__(test, params, env) + super().__init__(test, params, env) self.source_images = [] self.full_backups = [] self.inc_backups = [] @@ -23,19 +20,17 @@ def __init__(self, test, params, env): def _init_arguments_by_params(self, tag): image_params = self.params.object_params(tag) image_chain = image_params.objects("image_backup_chain") - self.source_images.append("drive_%s" % tag) - self.full_backups.append("drive_%s" % image_chain[0]) - self.inc_backups.append("drive_%s" % image_chain[1]) + self.source_images.append(f"drive_{tag}") + self.full_backups.append(f"drive_{image_chain[0]}") + self.inc_backups.append(f"drive_{image_chain[1]}") self.inc_backup_tags.append(image_chain[1]) - self.bitmaps.append("bitmap_%s" % tag) + self.bitmaps.append(f"bitmap_{tag}") # rebase 'inc' image onto 'base' image, so inc's backing is base inc_img_params = self.params.object_params(image_chain[1]) - inc_img_params['image_chain'] = image_params['image_backup_chain'] - inc_img = self.source_disk_define_by_params(inc_img_params, - image_chain[1]) - self.rebase_funcs.append(partial(inc_img.rebase, - params=inc_img_params)) + inc_img_params["image_chain"] = image_params["image_backup_chain"] + inc_img = self.source_disk_define_by_params(inc_img_params, image_chain[1]) + self.rebase_funcs.append(partial(inc_img.rebase, params=inc_img_params)) def do_full_backup(self): extra_options = {"sync": "full", "auto_disable_bitmap": False} @@ -44,7 +39,8 @@ def do_full_backup(self): self.source_images, self.full_backups, self.bitmaps, - **extra_options) + **extra_options, + ) def generate_inc_files(self): return list(map(self.generate_data_file, self.src_img_tags)) @@ -56,24 +52,27 @@ def do_incremental_backup(self): self.source_images, self.inc_backups, self.bitmaps, - **extra_options) + **extra_options, + ) def restart_vm_with_inc(self): - images = self.params['images'] - self.params['images'] = ' '.join( - [images.split()[0]] + self.inc_backup_tags) + images = self.params["images"] + self.params["images"] = " ".join([images.split()[0]] + self.inc_backup_tags) self.prepare_main_vm() self.clone_vm = self.main_vm - self.params['images'] = images + self.params["images"] = images def prepare_test(self): - super(BlockdevIncbkFSFreezeTest, self).prepare_test() - params = self.params.object_params(self.params['agent_name']) + super().prepare_test() + params = self.params.object_params(self.params["agent_name"]) params["monitor_filename"] = self.main_vm.get_serial_console_filename( - self.params['agent_name']) + self.params["agent_name"] + ) self.guest_agent = guest_agent.QemuAgent( - self.main_vm, self.params['agent_name'], - self.params['agent_serial_type'], params + self.main_vm, + self.params["agent_name"], + self.params["agent_serial_type"], + params, ) # bz1747960, enable virt_qemu_ga_read_nonsecurity_files before freeze, @@ -81,8 +80,8 @@ def prepare_test(self): # no need to restore the setting for a VM reboot can restore it s = self.main_vm.wait_for_login() try: - if s.cmd_status(self.params['enable_nonsecurity_files_cmd']) != 0: - s.cmd_status(self.params['enable_permissive_cmd']) + if s.cmd_status(self.params["enable_nonsecurity_files_cmd"]) != 0: + s.cmd_status(self.params["enable_permissive_cmd"]) finally: s.close() diff --git a/qemu/tests/blockdev_inc_backup_with_ignore.py b/qemu/tests/blockdev_inc_backup_with_ignore.py index 1462e3e3f3..61494fad01 100644 --- a/qemu/tests/blockdev_inc_backup_with_ignore.py +++ b/qemu/tests/blockdev_inc_backup_with_ignore.py @@ -1,18 +1,16 @@ from avocado.utils import process - -from virttest.lvm import EmulatedLVM from virttest.data_dir import get_data_dir +from virttest.lvm import EmulatedLVM -from provider import backup_utils -from provider import job_utils +from provider import backup_utils, job_utils from provider.blockdev_live_backup_base import BlockdevLiveBackupBaseTest class BlkdevIncWithIgnore(BlockdevLiveBackupBaseTest): - """ live backup with on-target-error:ignore """ + """live backup with on-target-error:ignore""" def __init__(self, test, params, env): - super(BlkdevIncWithIgnore, self).__init__(test, params, env) + super().__init__(test, params, env) # TODO: Workaound lvm setup till VT enhances emulated image creation self.lv_size = params["lv_size"] params["lv_size"] = params["emulated_image_size"] @@ -22,41 +20,49 @@ def _create_inc_dir(self): try: self._lvm.setup() self._lvm.lvs[-1].resize(self.lv_size) - process.system(self.params["storage_prepare_cmd"], - ignore_status=False, shell=True) + process.system( + self.params["storage_prepare_cmd"], ignore_status=False, shell=True + ) except: self._clean_inc_dir() raise def _clean_inc_dir(self): - process.system(self.params["storage_clean_cmd"], - ignore_status=False, shell=True) + process.system( + self.params["storage_clean_cmd"], ignore_status=False, shell=True + ) self._lvm.cleanup() - def generate_tempfile(self, root_dir, filename, size='10M', timeout=360): - super(BlkdevIncWithIgnore, self).generate_tempfile( - root_dir, filename, self.params['tempfile_size'], timeout) + def generate_tempfile(self, root_dir, filename, size="10M", timeout=360): + super().generate_tempfile( + root_dir, filename, self.params["tempfile_size"], timeout + ) def do_incremental_backup(self): - extra_options = {"sync": self.params["inc_sync_mode"], - "bitmap": self._bitmaps[0], - "on-target-error": self.params["on_target_error"], - "auto_disable_bitmap": False} + extra_options = { + "sync": self.params["inc_sync_mode"], + "bitmap": self._bitmaps[0], + "on-target-error": self.params["on_target_error"], + "auto_disable_bitmap": False, + } inc_backup = backup_utils.blockdev_backup_qmp_cmd - cmd, arguments = inc_backup(self._source_nodes[0], - self.params["inc_node"], - **extra_options) + cmd, arguments = inc_backup( + self._source_nodes[0], self.params["inc_node"], **extra_options + ) self.main_vm.monitor.cmd(cmd, arguments) timeout = self.params.get("job_timeout", 600) job_id = arguments.get("job-id", self._source_nodes[0]) get_event = job_utils.get_event_by_condition - event = get_event(self.main_vm, job_utils.BLOCK_JOB_ERROR_EVENT, - timeout, device=job_id, action='ignore') + event = get_event( + self.main_vm, + job_utils.BLOCK_JOB_ERROR_EVENT, + timeout, + device=job_id, + action="ignore", + ) if not event: - self.test.fail("Backup job can't reach error after %s seconds" - % timeout) - process.system(self.params['lv_extend_cmd'], - ignore_status=False, shell=True) + self.test.fail(f"Backup job can't reach error after {timeout} seconds") + process.system(self.params["lv_extend_cmd"], ignore_status=False, shell=True) job_utils.wait_until_block_job_completed(self.main_vm, job_id, timeout) def rebase_backup_images(self): @@ -93,7 +99,7 @@ def do_test(self): def post_test(self): if self.main_vm.is_alive(): self.main_vm.destroy() - super(BlkdevIncWithIgnore, self).post_test() + super().post_test() self._clean_inc_dir() diff --git a/qemu/tests/blockdev_inc_backup_with_migration.py b/qemu/tests/blockdev_inc_backup_with_migration.py index e871c24bbf..171cc46bd1 100644 --- a/qemu/tests/blockdev_inc_backup_with_migration.py +++ b/qemu/tests/blockdev_inc_backup_with_migration.py @@ -1,18 +1,14 @@ import ast - from functools import partial -from provider import backup_utils -from provider import blockdev_base -from provider import block_dirty_bitmap - from virttest import utils_misc +from provider import backup_utils, block_dirty_bitmap, blockdev_base -class BlockdevIncbkWithMigration(blockdev_base.BlockdevBaseTest): +class BlockdevIncbkWithMigration(blockdev_base.BlockdevBaseTest): def __init__(self, test, params, env): - super(BlockdevIncbkWithMigration, self).__init__(test, params, env) + super().__init__(test, params, env) self.source_images = [] self.full_backups = [] self.inc_backups = [] @@ -26,23 +22,21 @@ def __init__(self, test, params, env): def _init_arguments_by_params(self, tag): image_params = self.params.object_params(tag) image_chain = image_params.objects("image_backup_chain") - self.source_images.append("drive_%s" % tag) - self.full_backups.append("drive_%s" % image_chain[0]) - self.inc_backups.append("drive_%s" % image_chain[1]) + self.source_images.append(f"drive_{tag}") + self.full_backups.append(f"drive_{image_chain[0]}") + self.inc_backups.append(f"drive_{image_chain[1]}") self.inc_backup_tags.append(image_chain[1]) - self.bitmaps.append("bitmap_%s" % tag) - self.bitmap_counts["bitmap_%s" % tag] = None + self.bitmaps.append(f"bitmap_{tag}") + self.bitmap_counts[f"bitmap_{tag}"] = None # rebase 'inc' image onto 'base' image, so inc's backing is base inc_img_params = self.params.object_params(image_chain[1]) - inc_img_params['image_chain'] = image_params['image_backup_chain'] - inc_img = self.source_disk_define_by_params(inc_img_params, - image_chain[1]) - self.rebase_funcs.append(partial(inc_img.rebase, - params=inc_img_params)) + inc_img_params["image_chain"] = image_params["image_backup_chain"] + inc_img = self.source_disk_define_by_params(inc_img_params, image_chain[1]) + self.rebase_funcs.append(partial(inc_img.rebase, params=inc_img_params)) # Only hotplug full backup images before full-backup - self.params['image_backup_chain_%s' % tag] = image_chain[0] + self.params[f"image_backup_chain_{tag}"] = image_chain[0] def do_full_backup(self): extra_options = {"sync": "full", "auto_disable_bitmap": False} @@ -51,7 +45,8 @@ def do_full_backup(self): self.source_images, self.full_backups, self.bitmaps, - **extra_options) + **extra_options, + ) def generate_inc_files(self): return list(map(self.generate_data_file, self.src_img_tags)) @@ -63,56 +58,59 @@ def do_incremental_backup(self): self.source_images, self.inc_backups, self.bitmaps, - **extra_options) + **extra_options, + ) def restart_vm_with_inc(self): - images = self.params['images'] - self.params['images'] = ' '.join( - [images.split()[0]] + self.inc_backup_tags) + images = self.params["images"] + self.params["images"] = " ".join([images.split()[0]] + self.inc_backup_tags) self.prepare_main_vm() self.clone_vm = self.main_vm - self.params['images'] = images + self.params["images"] = images def hotplug_inc_backup_disks(self): for idx, tag in enumerate(self.src_img_tags): - self.params['image_backup_chain_%s' % - tag] = self.inc_backup_tags[idx] + self.params[f"image_backup_chain_{tag}"] = self.inc_backup_tags[idx] self.add_target_data_disks() def disable_bitmaps(self): for idx, bitmap in enumerate(self.bitmaps): # disable function has already checked if the bitmap was disabled block_dirty_bitmap.block_dirty_bitmap_disable( - self.main_vm, self.source_images[idx], bitmap) + self.main_vm, self.source_images[idx], bitmap + ) # record the count of the bitmap info = block_dirty_bitmap.get_bitmap_by_name( - self.main_vm, self.source_images[idx], bitmap) - self.bitmap_counts[info['name']] = info['count'] + self.main_vm, self.source_images[idx], bitmap + ) + self.bitmap_counts[info["name"]] = info["count"] def get_bitmaps_info(self): out = [] for idx, bitmap in enumerate(self.bitmaps): info = block_dirty_bitmap.get_bitmap_by_name( - self.main_vm, self.source_images[idx], bitmap) + self.main_vm, self.source_images[idx], bitmap + ) out.append(info) return out def check_bitmaps(self): for info in self.get_bitmaps_info(): if info is None: - self.test.fail('Failed to get bitmaps after migration') - if info['recording'] is not False: - self.test.fail('Bitmap was not disabled after migration') - if info['count'] != self.bitmap_counts[info['name']]: - self.test.fail('Count of bitmap was changed after migration') + self.test.fail("Failed to get bitmaps after migration") + if info["recording"] is not False: + self.test.fail("Bitmap was not disabled after migration") + if info["count"] != self.bitmap_counts[info["name"]]: + self.test.fail("Count of bitmap was changed after migration") def migrate_vm(self): mig_timeout = float(self.params["mig_timeout"]) mig_protocol = self.params["migration_protocol"] capabilities = ast.literal_eval(self.params["migrate_capabilities"]) - self.main_vm.migrate(mig_timeout, mig_protocol, - migrate_capabilities=capabilities, env=self.env) + self.main_vm.migrate( + mig_timeout, mig_protocol, migrate_capabilities=capabilities, env=self.env + ) def rebase_inc_onto_base(self): return utils_misc.parallel(self.rebase_funcs) diff --git a/qemu/tests/blockdev_inc_backup_without_bitmap.py b/qemu/tests/blockdev_inc_backup_without_bitmap.py index 146dd9bf90..f36aa4b55e 100644 --- a/qemu/tests/blockdev_inc_backup_without_bitmap.py +++ b/qemu/tests/blockdev_inc_backup_without_bitmap.py @@ -11,17 +11,23 @@ def prepare_test(self): self.add_target_data_disks() def do_incremental_backup(self): - job_list = [{'type': 'blockdev-backup', - 'data': {'device': self._source_nodes[0], - 'target': self._full_bk_nodes[0], - 'sync': 'incremental'}}] + job_list = [ + { + "type": "blockdev-backup", + "data": { + "device": self._source_nodes[0], + "target": self._full_bk_nodes[0], + "sync": "incremental", + }, + } + ] try: self.main_vm.monitor.transaction(job_list) except QMPCmdError as e: - if self.params['error_msg'] not in str(e): - self.test.fail('Unexpected error: %s' % str(e)) + if self.params["error_msg"] not in str(e): + self.test.fail(f"Unexpected error: {str(e)}") else: - self.test.fail('blockdev-backup succeeded unexpectedly') + self.test.fail("blockdev-backup succeeded unexpectedly") def do_test(self): self.do_incremental_backup() diff --git a/qemu/tests/blockdev_inc_backup_without_bitmapmode.py b/qemu/tests/blockdev_inc_backup_without_bitmapmode.py index 0fdc238a48..8281ea9784 100644 --- a/qemu/tests/blockdev_inc_backup_without_bitmapmode.py +++ b/qemu/tests/blockdev_inc_backup_without_bitmapmode.py @@ -1,14 +1,11 @@ from virttest.qemu_monitor import QMPCmdError -from provider import backup_utils -from provider import blockdev_base +from provider import backup_utils, blockdev_base class BlkdevIncNoBitmapmode(blockdev_base.BlockdevBaseTest): - def __init__(self, test, params, env): - super(BlkdevIncNoBitmapmode, self).__init__( - test, params, env) + super().__init__(test, params, env) self.source_images = [] self.full_backups = [] self.inc_backups = [] @@ -20,10 +17,10 @@ def __init__(self, test, params, env): def _init_arguments_by_params(self, tag): image_params = self.params.object_params(tag) image_chain = image_params.objects("image_backup_chain") - self.source_images.append("drive_%s" % tag) - self.full_backups.append("drive_%s" % image_chain[0]) - self.inc_backups.append("drive_%s" % image_chain[1]) - self.bitmaps.append("bitmap_%s" % tag) + self.source_images.append(f"drive_{tag}") + self.full_backups.append(f"drive_{image_chain[0]}") + self.inc_backups.append(f"drive_{image_chain[1]}") + self.bitmaps.append(f"bitmap_{tag}") def do_full_backup(self): extra_options = {"sync": "full", "auto_disable_bitmap": False} @@ -32,18 +29,22 @@ def do_full_backup(self): self.source_images, self.full_backups, self.bitmaps, - **extra_options) + **extra_options, + ) def generate_inc_files(self): return list(map(self.generate_data_file, self.src_img_tags)) def do_incremental_backup(self): - extra_options = {"sync": self.inc_sync_mode, - "bitmap": self.bitmaps[0], - "auto_disable_bitmap": False} + extra_options = { + "sync": self.inc_sync_mode, + "bitmap": self.bitmaps[0], + "auto_disable_bitmap": False, + } inc_backup = backup_utils.blockdev_backup_qmp_cmd - cmd, arguments = inc_backup(self.source_images[0], self.inc_backups[0], - **extra_options) + cmd, arguments = inc_backup( + self.source_images[0], self.inc_backups[0], **extra_options + ) try: self.main_vm.monitor.cmd(cmd, arguments) except QMPCmdError as e: diff --git a/qemu/tests/blockdev_inc_backup_xpt_allocation_depth.py b/qemu/tests/blockdev_inc_backup_xpt_allocation_depth.py index 78a60a7da6..ec02bfc157 100644 --- a/qemu/tests/blockdev_inc_backup_xpt_allocation_depth.py +++ b/qemu/tests/blockdev_inc_backup_xpt_allocation_depth.py @@ -2,42 +2,42 @@ import socket from avocado.utils import process - from virttest.qemu_storage import filename_to_file_opts from virttest.utils_misc import get_qemu_img_binary from provider.blockdev_live_backup_base import BlockdevLiveBackupBaseTest -from provider.nbd_image_export import InternalNBDExportImage -from provider.nbd_image_export import QemuNBDExportImage +from provider.nbd_image_export import InternalNBDExportImage, QemuNBDExportImage class BlockdevIncbkXptAllocDepth(BlockdevLiveBackupBaseTest): """Allocation depth export test""" def __init__(self, test, params, env): - super(BlockdevIncbkXptAllocDepth, self).__init__(test, params, env) + super().__init__(test, params, env) self._base_image, self._snapshot_image = self.params.objects( - 'image_backup_chain') + "image_backup_chain" + ) localhost = socket.gethostname() - self.params['nbd_server'] = localhost if localhost else 'localhost' + self.params["nbd_server"] = localhost if localhost else "localhost" self._nbd_image_obj = self.source_disk_define_by_params( - self.params, self.params['nbd_image_tag']) - self._block_export_uid = self.params.get('block_export_uid') + self.params, self.params["nbd_image_tag"] + ) + self._block_export_uid = self.params.get("block_export_uid") self._nbd_export = None self._is_exported = False def _init_nbd_export(self, tag): - self._nbd_export = InternalNBDExportImage( - self.main_vm, self.params, tag - ) if self._block_export_uid else QemuNBDExportImage( - self.params, tag + self._nbd_export = ( + InternalNBDExportImage(self.main_vm, self.params, tag) + if self._block_export_uid + else QemuNBDExportImage(self.params, tag) ) def _start_nbd_export(self, tag): if self._block_export_uid is not None: # export local image with block-export-add self._nbd_export.start_nbd_server() - self._nbd_export.add_nbd_image('drive_%s' % tag) + self._nbd_export.add_nbd_image(f"drive_{tag}") else: # export local image with qemu-nbd # we should stop vm and rebase sn onto base @@ -48,13 +48,12 @@ def _start_nbd_export(self, tag): self._is_exported = True def _rebase_sn_onto_base(self): - disk = self.source_disk_define_by_params(self.params, - self._snapshot_image) + disk = self.source_disk_define_by_params(self.params, self._snapshot_image) disk.rebase(params=self.params) def post_test(self): self.stop_export() - super(BlockdevIncbkXptAllocDepth, self).post_test() + super().post_test() def stop_export(self): """stop nbd export""" @@ -74,19 +73,18 @@ def check_allocation_depth_from_export(self, zero, data): backing(snapshot): zero: true, data: true """ opts = filename_to_file_opts(self._nbd_image_obj.image_filename) - opts[self.params['dirty_bitmap_opt']] = 'qemu:allocation-depth' - map_cmd = '{qemu_img} map --output=json {args}'.format( + opts[self.params["dirty_bitmap_opt"]] = "qemu:allocation-depth" + map_cmd = "{qemu_img} map --output=json {args}".format( qemu_img=get_qemu_img_binary(self.params), - args="'json:%s'" % json.dumps(opts) + args=f"'json:{json.dumps(opts)}'", ) result = process.run(map_cmd, ignore_status=False, shell=True) for item in json.loads(result.stdout.decode().strip()): - if item['zero'] is zero and item['data'] is data: + if item["zero"] is zero and item["data"] is data: break else: - self.test.fail( - 'Failed to get "zero": %s, "data": %s' % (zero, data)) + self.test.fail(f'Failed to get "zero": {zero}, "data": {data}') def do_test(self): self.do_full_backup() diff --git a/qemu/tests/blockdev_inc_backup_xpt_bitmap.py b/qemu/tests/blockdev_inc_backup_xpt_bitmap.py index 531fd05128..c6f49ea68f 100644 --- a/qemu/tests/blockdev_inc_backup_xpt_bitmap.py +++ b/qemu/tests/blockdev_inc_backup_xpt_bitmap.py @@ -1,21 +1,17 @@ import json import socket -from provider import backup_utils +from avocado.utils import process +from virttest import qemu_storage, utils_misc +from provider import backup_utils from provider.blockdev_base import BlockdevBaseTest from provider.nbd_image_export import QemuNBDExportImage -from virttest import utils_misc -from virttest import qemu_storage - -from avocado.utils import process - class BlockdevIncBackupXptBitmapTest(BlockdevBaseTest): - def __init__(self, test, params, env): - super(BlockdevIncBackupXptBitmapTest, self).__init__(test, params, env) + super().__init__(test, params, env) self.source_images = [] self.full_backups = [] self.bitmaps = [] @@ -23,34 +19,38 @@ def __init__(self, test, params, env): self.nbd_images = [] self.src_img_tags = params.objects("source_images") localhost = socket.gethostname() - self.params['nbd_server'] = localhost if localhost else 'localhost' + self.params["nbd_server"] = localhost if localhost else "localhost" list(map(self._init_arguments_by_params, self.src_img_tags)) def _init_arguments_by_params(self, tag): image_params = self.params.object_params(tag) image_chain = image_params.objects("image_backup_chain") - self.source_images.append("drive_%s" % tag) - self.full_backups.append("drive_%s" % image_chain[0]) - self.bitmaps.append("bitmap_%s" % tag) - image_params["nbd_export_bitmaps"] = "bitmap_%s" % tag + self.source_images.append(f"drive_{tag}") + self.full_backups.append(f"drive_{image_chain[0]}") + self.bitmaps.append(f"bitmap_{tag}") + image_params["nbd_export_bitmaps"] = f"bitmap_{tag}" self.nbd_exports.append(QemuNBDExportImage(image_params, tag)) self.nbd_images.append( qemu_storage.QemuImg( - self.params.object_params(image_params['nbd_image_tag']), - None, image_params['nbd_image_tag'] + self.params.object_params(image_params["nbd_image_tag"]), + None, + image_params["nbd_image_tag"], ) ) def do_full_backup(self): - extra_options = {"sync": "full", - "persistent": True, - "auto_disable_bitmap": False} + extra_options = { + "sync": "full", + "persistent": True, + "auto_disable_bitmap": False, + } backup_utils.blockdev_batch_backup( self.main_vm, self.source_images, self.full_backups, list(self.bitmaps), - **extra_options) + **extra_options, + ) def prepare_data_disk(self, tag): """ @@ -71,23 +71,22 @@ def check_info_from_export_bitmaps(self): for i, nbd_img in enumerate(self.nbd_images): opts = qemu_storage.filename_to_file_opts(nbd_img.image_filename) - opts[self.params['dirty_bitmap_opt'] - ] = 'qemu:dirty-bitmap:%s' % self.bitmaps[i] - args = "'json:%s'" % json.dumps(opts) + opts[self.params["dirty_bitmap_opt"]] = ( + f"qemu:dirty-bitmap:{self.bitmaps[i]}" + ) + args = f"'json:{json.dumps(opts)}'" - map_cmd = '{qemu_img} map --output=human {args}'.format( - qemu_img=qemu_img, args=args) + map_cmd = f"{qemu_img} map --output=human {args}" result = process.run(map_cmd, ignore_status=True, shell=True) if result.exit_status != 0: - self.test.fail('Failed to run map command: %s' - % result.stderr.decode()) + self.test.fail(f"Failed to run map command: {result.stderr.decode()}") if nbd_img.image_filename not in result.stdout_text: - self.test.fail('Failed to get bitmap info.') + self.test.fail("Failed to get bitmap info.") def clean_images(self): for obj in self.nbd_exports: obj.stop_export() - super(BlockdevIncBackupXptBitmapTest, self).clean_images() + super().clean_images() def do_test(self): self.do_full_backup() diff --git a/qemu/tests/blockdev_inc_backup_xpt_incon_bitmap.py b/qemu/tests/blockdev_inc_backup_xpt_incon_bitmap.py index cc4d8511a4..119bbf8872 100644 --- a/qemu/tests/blockdev_inc_backup_xpt_incon_bitmap.py +++ b/qemu/tests/blockdev_inc_backup_xpt_incon_bitmap.py @@ -2,20 +2,17 @@ import logging from avocado.utils import process - -from virttest import utils_misc -from virttest import data_dir +from virttest import data_dir, utils_misc from provider import backup_utils from provider.blockdev_base import BlockdevBaseTest -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class BlkdevIncXptInconBitmap(BlockdevBaseTest): - def __init__(self, test, params, env): - super(BlkdevIncXptInconBitmap, self).__init__(test, params, env) + super().__init__(test, params, env) self.source_images = [] self.full_backups = [] self.bitmaps = [] @@ -25,21 +22,24 @@ def __init__(self, test, params, env): def _init_arguments_by_params(self, tag): image_params = self.params.object_params(tag) image_chain = image_params.objects("image_backup_chain") - self.source_images.append("drive_%s" % tag) - self.full_backups.append("drive_%s" % image_chain[0]) - self.bitmaps.append("bitmap_%s" % tag) - image_params["nbd_export_bitmaps"] = "bitmap_%s" % tag + self.source_images.append(f"drive_{tag}") + self.full_backups.append(f"drive_{image_chain[0]}") + self.bitmaps.append(f"bitmap_{tag}") + image_params["nbd_export_bitmaps"] = f"bitmap_{tag}" def do_full_backup(self): - extra_options = {"sync": "full", - "persistent": True, - "auto_disable_bitmap": False} + extra_options = { + "sync": "full", + "persistent": True, + "auto_disable_bitmap": False, + } backup_utils.blockdev_batch_backup( self.main_vm, self.source_images, self.full_backups, list(self.bitmaps), - **extra_options) + **extra_options, + ) def prepare_data_disk(self, tag): """ @@ -54,15 +54,16 @@ def gen_inc_files(self): def kill_vm_after_restart(self): LOG_JOB.info("Re-start vm again") self.main_vm.create() - session = self.main_vm.wait_for_login() + self.main_vm.wait_for_login() LOG_JOB.info("Kill vm after its start") self.main_vm.monitors = [] self.main_vm.destroy(gracefully=False) def check_bitmap_status(self, inconsistent=False): def _get_bitmap_info(bitmap_name): - src_img = self.source_disk_define_by_params(self.params, - self.src_img_tags[0]) + src_img = self.source_disk_define_by_params( + self.params, self.src_img_tags[0] + ) output = json.loads(src_img.info(output="json")) bitmaps = output["format-specific"]["data"].get("bitmaps") if bitmaps: @@ -85,17 +86,15 @@ def expose_inconsistent_bitmap(self): LOG_JOB.info("Export inconsistent bitmap with qemu-nbd") img_path = data_dir.get_data_dir() qemu_nbd_cmd = utils_misc.get_qemu_nbd_binary(self.params) - cmd = self.params.get("export_cmd") % (qemu_nbd_cmd, - self.bitmaps[0], - img_path) - result = process.run(cmd, ignore_status=True, shell=True, - ignore_bg_processes=True) + cmd = self.params.get("export_cmd") % (qemu_nbd_cmd, self.bitmaps[0], img_path) + result = process.run( + cmd, ignore_status=True, shell=True, ignore_bg_processes=True + ) if result.exit_status == 0: ck_qemunbd_pid = self.params.get("ck_qemunbd_pid") - qemu_nbd_ck = process.run(ck_qemunbd_pid, - ignore_status=True, - shell=True, - ignore_bg_processes=True) + qemu_nbd_ck = process.run( + ck_qemunbd_pid, ignore_status=True, shell=True, ignore_bg_processes=True + ) qemu_nbd_pid = qemu_nbd_ck.stdout_text.strip() utils_misc.kill_process_tree(qemu_nbd_pid, 9, timeout=60) self.test.fail("Can expose image with a non-exist bitmap") diff --git a/qemu/tests/blockdev_inc_backup_xpt_multiple_bitmaps.py b/qemu/tests/blockdev_inc_backup_xpt_multiple_bitmaps.py index 19fb0f7f23..1648dd085f 100644 --- a/qemu/tests/blockdev_inc_backup_xpt_multiple_bitmaps.py +++ b/qemu/tests/blockdev_inc_backup_xpt_multiple_bitmaps.py @@ -2,49 +2,50 @@ import socket from avocado.utils import process - from virttest.qemu_storage import filename_to_file_opts from virttest.utils_misc import get_qemu_img_binary from provider.blockdev_live_backup_base import BlockdevLiveBackupBaseTest -from provider.nbd_image_export import InternalNBDExportImage -from provider.nbd_image_export import QemuNBDExportImage +from provider.nbd_image_export import InternalNBDExportImage, QemuNBDExportImage class BlockdevIncbkXptMutBitmaps(BlockdevLiveBackupBaseTest): """Multiple bitmaps export test""" def __init__(self, test, params, env): - super(BlockdevIncbkXptMutBitmaps, self).__init__(test, params, env) - self._bitmaps = params.objects('bitmap_list') + super().__init__(test, params, env) + self._bitmaps = params.objects("bitmap_list") self._bitmap_states = [True, False] localhost = socket.gethostname() - self.params['nbd_server'] = localhost if localhost else 'localhost' + self.params["nbd_server"] = localhost if localhost else "localhost" self._nbd_image_obj = self.source_disk_define_by_params( - self.params, self.params['nbd_image_tag']) - self._block_export_uid = self.params.get('block_export_uid') + self.params, self.params["nbd_image_tag"] + ) + self._block_export_uid = self.params.get("block_export_uid") self._nbd_export = None self._is_exported = False def _init_nbd_export(self): - self._nbd_export = InternalNBDExportImage( - self.main_vm, self.params, self._full_bk_images[0] - ) if self._block_export_uid else QemuNBDExportImage( - self.params, self._full_bk_images[0] + self._nbd_export = ( + InternalNBDExportImage(self.main_vm, self.params, self._full_bk_images[0]) + if self._block_export_uid + else QemuNBDExportImage(self.params, self._full_bk_images[0]) ) def check_nbd_export_info(self): if self._block_export_uid is not None: info = self._nbd_export.query_nbd_export() if info is None: - self.test.fail('Failed to get the nbd block export') + self.test.fail("Failed to get the nbd block export") - if (not info or info['shutting-down'] or - info['id'] != self._block_export_uid or - info['type'] != 'nbd' or - info['node-name'] != self._full_bk_nodes[0]): - self.test.fail( - 'Failed to get the correct export information: %s' % info) + if ( + not info + or info["shutting-down"] + or info["id"] != self._block_export_uid + or info["type"] != "nbd" + or info["node-name"] != self._full_bk_nodes[0] + ): + self.test.fail(f"Failed to get the correct export information: {info}") def do_nbd_export(self): if self._block_export_uid is not None: @@ -63,15 +64,22 @@ def prepare_test(self): def post_test(self): if self._is_exported: self._nbd_export.stop_export() - super(BlockdevIncbkXptMutBitmaps, self).post_test() + super().post_test() def add_persistent_bitmaps(self): """Add two bitmaps, one is enabled while the other is disabled""" - bitmaps = [{'node': self._full_bk_nodes[0], - 'name': b, 'persistent': True, 'disabled': s} - for b, s in zip(self._bitmaps, self._bitmap_states)] - job_list = [{'type': 'block-dirty-bitmap-add', 'data': data} - for data in bitmaps] + bitmaps = [ + { + "node": self._full_bk_nodes[0], + "name": b, + "persistent": True, + "disabled": s, + } + for b, s in zip(self._bitmaps, self._bitmap_states) + ] + job_list = [ + {"type": "block-dirty-bitmap-add", "data": data} for data in bitmaps + ] self.main_vm.monitor.transaction(job_list) def check_bitmaps_from_export(self): @@ -79,13 +87,12 @@ def check_bitmaps_from_export(self): opts = filename_to_file_opts(self._nbd_image_obj.image_filename) for bm in self._bitmaps: - opts[self.params['dirty_bitmap_opt']] = 'qemu:dirty-bitmap:%s' % bm - args = "'json:%s'" % json.dumps(opts) - map_cmd = '{qemu_img} map --output=human {args}'.format( - qemu_img=qemu_img, args=args) + opts[self.params["dirty_bitmap_opt"]] = f"qemu:dirty-bitmap:{bm}" + args = f"'json:{json.dumps(opts)}'" + map_cmd = f"{qemu_img} map --output=human {args}" result = process.run(map_cmd, ignore_status=False, shell=True) if self._nbd_image_obj.image_filename not in result.stdout_text: - self.test.fail('Failed to get bitmap info.') + self.test.fail("Failed to get bitmap info.") def do_test(self): self.add_persistent_bitmaps() diff --git a/qemu/tests/blockdev_inc_backup_xpt_nonexist_bitmap.py b/qemu/tests/blockdev_inc_backup_xpt_nonexist_bitmap.py index 0dbeb79502..cdc6de8c17 100644 --- a/qemu/tests/blockdev_inc_backup_xpt_nonexist_bitmap.py +++ b/qemu/tests/blockdev_inc_backup_xpt_nonexist_bitmap.py @@ -1,21 +1,19 @@ -import os import logging +import os import string from avocado.utils import process - from virttest import utils_misc from provider.blockdev_base import BlockdevBaseTest from provider.nbd_image_export import QemuNBDExportImage -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class BlkdevIncXptNonexistBitmap(BlockdevBaseTest): - def __init__(self, test, params, env): - super(BlkdevIncXptNonexistBitmap, self).__init__(test, params, env) + super().__init__(test, params, env) self.source_images = [] self.nbd_exports = [] self.bitmaps = [] @@ -24,9 +22,9 @@ def __init__(self, test, params, env): def _init_arguments_by_params(self, tag): image_params = self.params.object_params(tag) - self.source_images.append("drive_%s" % tag) - self.bitmaps.append("bitmap_%s" % tag) - image_params["nbd_export_bitmaps"] = "bitmap_%s" % tag + self.source_images.append(f"drive_{tag}") + self.bitmaps.append(f"bitmap_{tag}") + image_params["nbd_export_bitmaps"] = f"bitmap_{tag}" self.nbd_exports.append(QemuNBDExportImage(image_params, tag)) def expose_nonexist_bitmap(self): @@ -39,32 +37,38 @@ def _nbd_expose_cmd(qemu_nbd, filename, local_image, params): "fork": "--fork", "pid_file": "", "bitmap": "", - } - export_cmd = ('{export_format} {persistent} {port} {bitmap} ' - '{fork} {pid_file} {filename}') - pid_file = utils_misc.generate_tmp_file_name('%s_nbd_server' - % local_image, 'pid') - cmd_dict['pid_file'] = '--pid-file %s' % pid_file - cmd_dict['filename'] = filename - if params.get('nbd_export_format'): - cmd_dict['export_format'] = '-f %s' % params['nbd_export_format'] + } + export_cmd = ( + "{export_format} {persistent} {port} {bitmap} " + "{fork} {pid_file} {filename}" + ) + pid_file = utils_misc.generate_tmp_file_name( + f"{local_image}_nbd_server", "pid" + ) + cmd_dict["pid_file"] = f"--pid-file {pid_file}" + cmd_dict["filename"] = filename + if params.get("nbd_export_format"): + cmd_dict["export_format"] = "-f {}".format(params["nbd_export_format"]) else: - if params.get('nbd_port'): - cmd_dict['port'] = '-p %s' % params['nbd_port'] - if params.get('nbd_export_bitmaps'): - cmd_dict['bitmap'] = "".join( - [" -B %s" % _ for _ in params['nbd_export_bitmaps'].split()]) - cmdline = qemu_nbd + ' ' + string.Formatter().format(export_cmd, - **cmd_dict) + if params.get("nbd_port"): + cmd_dict["port"] = "-p {}".format(params["nbd_port"]) + if params.get("nbd_export_bitmaps"): + cmd_dict["bitmap"] = "".join( + [f" -B {_}" for _ in params["nbd_export_bitmaps"].split()] + ) + cmdline = qemu_nbd + " " + string.Formatter().format(export_cmd, **cmd_dict) return pid_file, cmdline LOG_JOB.info("Export inconsistent bitmap with qemu-nbd") - pid_file, cmd = _nbd_expose_cmd(self.nbd_exports[0]._qemu_nbd, - self.nbd_exports[0]._local_filename, - self.nbd_exports[0]._tag, - self.nbd_exports[0]._image_params) - result = process.run(cmd, ignore_status=True, shell=True, - ignore_bg_processes=True) + pid_file, cmd = _nbd_expose_cmd( + self.nbd_exports[0]._qemu_nbd, + self.nbd_exports[0]._local_filename, + self.nbd_exports[0]._tag, + self.nbd_exports[0]._image_params, + ) + result = process.run( + cmd, ignore_status=True, shell=True, ignore_bg_processes=True + ) if result.exit_status == 0: with open(pid_file, "r") as pid_file_fd: qemu_nbd_pid = int(pid_file_fd.read().strip()) diff --git a/qemu/tests/blockdev_mirror_after_block_error.py b/qemu/tests/blockdev_mirror_after_block_error.py index 3ad257de6a..6004ebf9bc 100644 --- a/qemu/tests/blockdev_mirror_after_block_error.py +++ b/qemu/tests/blockdev_mirror_after_block_error.py @@ -10,16 +10,16 @@ class BlockdevMirrorAfterBlockErrorTest(BlockdevMirrorWaitTest): def overflow_source_image(self): session = self.main_vm.wait_for_login() tag = self._source_images[0] - dd_cmd = self.params['write_file_cmd'] % self.disks_info[tag][1] + dd_cmd = self.params["write_file_cmd"] % self.disks_info[tag][1] session.cmd(dd_cmd, ignore_all_errors=True) session.close() def wait_block_io_error(self): event = get_event_by_condition( - self.main_vm, 'BLOCK_IO_ERROR', - self.params.get_numeric('event_timeout', 30)) + self.main_vm, "BLOCK_IO_ERROR", self.params.get_numeric("event_timeout", 30) + ) if event is None: - self.test.fail('Failed to get BLOCK_IO_ERROR event') + self.test.fail("Failed to get BLOCK_IO_ERROR event") def do_test(self): self.overflow_source_image() diff --git a/qemu/tests/blockdev_mirror_cancel_ready_job_with_io.py b/qemu/tests/blockdev_mirror_cancel_ready_job_with_io.py index cf31e404f3..42a62cb055 100644 --- a/qemu/tests/blockdev_mirror_cancel_ready_job_with_io.py +++ b/qemu/tests/blockdev_mirror_cancel_ready_job_with_io.py @@ -1,8 +1,8 @@ +from virttest.utils_misc import wait_for + from provider.blockdev_mirror_nowait import BlockdevMirrorNowaitTest from provider.job_utils import get_event_by_condition -from virttest.utils_misc import wait_for - class BlockdevMirrorCancelReadyIOJobTest(BlockdevMirrorNowaitTest): """ @@ -15,31 +15,34 @@ def _is_dd_running(): session = self.main_vm.wait_for_login() try: - session.sendline(self.params['write_file_cmd']) - if not wait_for(lambda: _is_dd_running(), 30, 0, 1, - "Waiting dd start..."): + session.sendline(self.params["write_file_cmd"]) + if not wait_for(lambda: _is_dd_running(), 30, 0, 1, "Waiting dd start..."): self.test.error("Failed to start dd in vm") finally: session.close() def cancel_job(self): - self.main_vm.monitor.cmd("block-job-cancel", {'device': self._jobs[0], 'force': True}) + self.main_vm.monitor.cmd( + "block-job-cancel", {"device": self._jobs[0], "force": True} + ) event = get_event_by_condition( - self.main_vm, 'BLOCK_JOB_CANCELLED', - self.params.get_numeric('job_cancelled_timeout', 60), - device=self._jobs[0] + self.main_vm, + "BLOCK_JOB_CANCELLED", + self.params.get_numeric("job_cancelled_timeout", 60), + device=self._jobs[0], ) if event is None: - self.test.fail('Job failed to cancel') + self.test.fail("Job failed to cancel") def wait_till_job_ready(self): event = get_event_by_condition( - self.main_vm, 'BLOCK_JOB_READY', - self.params.get_numeric('job_ready_timeout', 120), - device=self._jobs[0] + self.main_vm, + "BLOCK_JOB_READY", + self.params.get_numeric("job_ready_timeout", 120), + device=self._jobs[0], ) if event is None: - self.test.fail('Job failed to reach ready state') + self.test.fail("Job failed to reach ready state") def do_test(self): self.blockdev_mirror() diff --git a/qemu/tests/blockdev_mirror_cancel_ready_job_with_ioerror.py b/qemu/tests/blockdev_mirror_cancel_ready_job_with_ioerror.py index be0c357778..61eb4ba430 100644 --- a/qemu/tests/blockdev_mirror_cancel_ready_job_with_ioerror.py +++ b/qemu/tests/blockdev_mirror_cancel_ready_job_with_ioerror.py @@ -1,4 +1,4 @@ -from virttest import storage, data_dir +from virttest import data_dir, storage from provider.blockdev_mirror_nowait import BlockdevMirrorNowaitTest from provider.job_utils import get_event_by_condition @@ -10,23 +10,19 @@ class BlockdevMirrorCancelReadyIOError(BlockdevMirrorNowaitTest): """ def __init__(self, test, params, env): - params['filter-node-name'] = params['filter_node_name'] - super(BlockdevMirrorCancelReadyIOError, self).__init__(test, params, env) + params["filter-node-name"] = params["filter_node_name"] + super().__init__(test, params, env) def _blockdev_add_image(self, tag): params = self.params.object_params(tag) - devices = self.main_vm.devices.images_define_by_params(tag, - params, - 'disk') + devices = self.main_vm.devices.images_define_by_params(tag, params, "disk") devices.pop() for dev in devices: if self.main_vm.devices.get_by_qid(dev.get_qid()): continue - ret = self.main_vm.devices.simple_hotplug(dev, - self.main_vm.monitor) + ret = self.main_vm.devices.simple_hotplug(dev, self.main_vm.monitor) if not ret[1]: - self.test.fail("Failed to hotplug '%s': %s." - % (dev, ret[0])) + self.test.fail(f"Failed to hotplug '{dev}': {ret[0]}.") def _create_image(self, tag): disk = self.disk_define_by_params(self.params, tag) @@ -52,40 +48,56 @@ def add_target_image(self): # Fixme if blkdebug driver is supported completely in avocado-vt target = self.params["target_images"] target_params = self.params.object_params(target) - target_filename = storage.get_image_filename(target_params, - data_dir.get_data_dir()) - args = {'node-name': 'drive_target', 'driver': 'qcow2', 'file': {'driver': 'blkdebug', - 'image': {'driver': 'file', 'filename': target_filename}, - 'set-state': [{'event': 'flush_to_disk', 'state': 1, 'new_state': 2}], - 'inject-error': [{'event': 'flush_to_disk', 'once': True, - 'immediately': True, 'state': 2}]}} + target_filename = storage.get_image_filename( + target_params, data_dir.get_data_dir() + ) + args = { + "node-name": "drive_target", + "driver": "qcow2", + "file": { + "driver": "blkdebug", + "image": {"driver": "file", "filename": target_filename}, + "set-state": [{"event": "flush_to_disk", "state": 1, "new_state": 2}], + "inject-error": [ + { + "event": "flush_to_disk", + "once": True, + "immediately": True, + "state": 2, + } + ], + }, + } self.main_vm.monitor.cmd("blockdev-add", args) def qemu_io_source(self): qmp_cmd = "human-monitor-command" - filter_node = self.params['filter_node_name'] - qemu_io_cmd = 'qemu-io %s "write 0 64k"' % filter_node - args = {'command-line': qemu_io_cmd} + filter_node = self.params["filter_node_name"] + qemu_io_cmd = f'qemu-io {filter_node} "write 0 64k"' + args = {"command-line": qemu_io_cmd} self.main_vm.monitor.cmd(qmp_cmd, args) def cancel_job(self): - self.main_vm.monitor.cmd("block-job-cancel", {'device': self._jobs[0]}) + self.main_vm.monitor.cmd("block-job-cancel", {"device": self._jobs[0]}) event = get_event_by_condition( - self.main_vm, 'BLOCK_JOB_ERROR', - self.params.get_numeric('job_cancelled_timeout', 60), - device=self._jobs[0], action="stop" + self.main_vm, + "BLOCK_JOB_ERROR", + self.params.get_numeric("job_cancelled_timeout", 60), + device=self._jobs[0], + action="stop", ) if event is None: - self.test.fail('Job failed to cancel') + self.test.fail("Job failed to cancel") def wait_till_job_ready(self): event = get_event_by_condition( - self.main_vm, 'BLOCK_JOB_READY', - self.params.get_numeric('job_ready_timeout', 120), - device=self._jobs[0] + self.main_vm, + "BLOCK_JOB_READY", + self.params.get_numeric("job_ready_timeout", 120), + device=self._jobs[0], ) if event is None: - self.test.fail('Job failed to reach ready state') + self.test.fail("Job failed to reach ready state") def prepare_test(self): self.prepare_main_vm() diff --git a/qemu/tests/blockdev_mirror_cancel_running_job.py b/qemu/tests/blockdev_mirror_cancel_running_job.py index c4ed539c3c..6561b3c1df 100644 --- a/qemu/tests/blockdev_mirror_cancel_running_job.py +++ b/qemu/tests/blockdev_mirror_cancel_running_job.py @@ -8,19 +8,21 @@ class BlockdevMirrorCancelRunningJob(BlockdevMirrorNowaitTest): """ def cancel_job(self): - self.main_vm.monitor.cmd("block-job-cancel", {'device': self._jobs[0]}) + self.main_vm.monitor.cmd("block-job-cancel", {"device": self._jobs[0]}) event = get_event_by_condition( - self.main_vm, 'BLOCK_JOB_CANCELLED', - self.params.get_numeric('job_cancelled_timeout', 60), - device=self._jobs[0] + self.main_vm, + "BLOCK_JOB_CANCELLED", + self.params.get_numeric("job_cancelled_timeout", 60), + device=self._jobs[0], ) if event is None: - self.test.fail('Job failed to cancel') + self.test.fail("Job failed to cancel") def do_test(self): self.blockdev_mirror() self.check_block_jobs_started( - self._jobs, self.params.get_numeric('job_started_timeout', 10)) + self._jobs, self.params.get_numeric("job_started_timeout", 10) + ) self.cancel_job() diff --git a/qemu/tests/blockdev_mirror_complete_running_job.py b/qemu/tests/blockdev_mirror_complete_running_job.py index 3d98117549..35fface16c 100644 --- a/qemu/tests/blockdev_mirror_complete_running_job.py +++ b/qemu/tests/blockdev_mirror_complete_running_job.py @@ -12,19 +12,21 @@ def complete_running_mirror_job(self): try: self.main_vm.monitor.cmd("job-complete", {"id": self._jobs[0]}) except QMPCmdError as e: - error_msg = self.params['error_msg'].format(job_id=self._jobs[0]) + error_msg = self.params["error_msg"].format(job_id=self._jobs[0]) if error_msg not in str(e): - self.test.fail('Unexpected error: %s' % str(e)) + self.test.fail(f"Unexpected error: {str(e)}") else: - self.test.fail('job-complete completed unexpectedly') + self.test.fail("job-complete completed unexpectedly") def do_test(self): self.blockdev_mirror() self.check_block_jobs_started( - self._jobs, self.params.get_numeric('job_started_timeout', 10)) + self._jobs, self.params.get_numeric("job_started_timeout", 10) + ) self.complete_running_mirror_job() - self.main_vm.monitor.cmd("block-job-set-speed", - {'device': self._jobs[0], 'speed': 0}) + self.main_vm.monitor.cmd( + "block-job-set-speed", {"device": self._jobs[0], "speed": 0} + ) self.wait_mirror_jobs_completed() diff --git a/qemu/tests/blockdev_mirror_error.py b/qemu/tests/blockdev_mirror_error.py index ec56c555ea..faef6262f9 100644 --- a/qemu/tests/blockdev_mirror_error.py +++ b/qemu/tests/blockdev_mirror_error.py @@ -9,8 +9,7 @@ def check_mirror_job_stopped(self): tmo = int(self.params.get("mirror_error_stop_timeout", "300")) status = self.params.get("mirror_error_stop_status", "paused") for job_id in self._jobs: - wait_until_job_status_match(self.main_vm, status, - job_id, timeout=tmo) + wait_until_job_status_match(self.main_vm, status, job_id, timeout=tmo) def do_test(self): self.blockdev_mirror() diff --git a/qemu/tests/blockdev_mirror_filternode.py b/qemu/tests/blockdev_mirror_filternode.py index 31dfd82b56..de45ca7a19 100644 --- a/qemu/tests/blockdev_mirror_filternode.py +++ b/qemu/tests/blockdev_mirror_filternode.py @@ -7,23 +7,29 @@ class BlockdevMirrorFilterNodeTest(BlockdevMirrorNowaitTest): """ def __init__(self, test, params, env): - params['filter-node-name'] = params['filter_node_name'] - super(BlockdevMirrorFilterNodeTest, self).__init__(test, params, env) + params["filter-node-name"] = params["filter_node_name"] + super().__init__(test, params, env) def check_filter_node_name(self): """The filter node name should be set when doing mirror""" for item in self.main_vm.monitor.query("block"): - if (self._source_images[0] in item["qdev"] - and item["inserted"].get("node-name") == self.params['filter-node-name']): + if ( + self._source_images[0] in item["qdev"] + and item["inserted"].get("node-name") == self.params["filter-node-name"] + ): break else: - self.test.fail("Filter node name(%s) is not set when doing mirror" - % self.params['filter-node-name']) + self.test.fail( + "Filter node name({}) is not set when doing mirror".format( + self.params["filter-node-name"] + ) + ) def do_test(self): self.blockdev_mirror() self.check_block_jobs_started( - self._jobs, self.params.get_numeric('mirror_started_timeout', 5)) + self._jobs, self.params.get_numeric("mirror_started_timeout", 5) + ) self.check_filter_node_name() self.wait_mirror_jobs_completed() self.check_mirrored_block_nodes_attached() diff --git a/qemu/tests/blockdev_mirror_firewall.py b/qemu/tests/blockdev_mirror_firewall.py index bfb016c1b1..10ae47f870 100644 --- a/qemu/tests/blockdev_mirror_firewall.py +++ b/qemu/tests/blockdev_mirror_firewall.py @@ -13,31 +13,33 @@ class BlockdevMirrorFirewallTest(BlockdevMirrorNowaitTest): def __init__(self, test, params, env): localhost = socket.gethostname() - params['nbd_server_%s' % params['nbd_image_tag']] = localhost \ - if localhost else 'localhost' + params["nbd_server_{}".format(params["nbd_image_tag"])] = ( + localhost if localhost else "localhost" + ) self._offset = None self._net_down = False - super(BlockdevMirrorFirewallTest, self).__init__(test, params, env) + super().__init__(test, params, env) def _create_local_image(self): - image_params = self.params.object_params( - self.params['local_image_tag']) + image_params = self.params.object_params(self.params["local_image_tag"]) local_image = self.source_disk_define_by_params( - image_params, self.params['local_image_tag']) + image_params, self.params["local_image_tag"] + ) local_image.create(image_params) self.trash.append(local_image) def _export_local_image_with_nbd(self): - self._nbd_export = QemuNBDExportImage(self.params, - self.params["local_image_tag"]) + self._nbd_export = QemuNBDExportImage( + self.params, self.params["local_image_tag"] + ) self._nbd_export.export_image() def prepare_test(self): try: self._create_local_image() self._export_local_image_with_nbd() - super(BlockdevMirrorFirewallTest, self).prepare_test() + super().prepare_test() except Exception: self.clean_images() raise @@ -45,29 +47,29 @@ def prepare_test(self): def add_target_data_disks(self): tag = self._target_images[0] devices = self.main_vm.devices.images_define_by_params( - tag, self.params.object_params(tag), 'disk') + tag, self.params.object_params(tag), "disk" + ) devices.pop() # ignore the front end device for dev in devices: - ret = self.main_vm.devices.simple_hotplug(dev, - self.main_vm.monitor) + ret = self.main_vm.devices.simple_hotplug(dev, self.main_vm.monitor) if not ret[1]: - self.test.fail("Failed to hotplug '%s': %s." - % (dev, ret[0])) + self.test.fail(f"Failed to hotplug '{dev}': {ret[0]}.") def _run_iptables(self, cmd): cmd = cmd.format( - s=self.params['nbd_server_%s' % self.params['nbd_image_tag']]) + s=self.params["nbd_server_{}".format(self.params["nbd_image_tag"])] + ) result = process.run(cmd, ignore_status=True, shell=True) if result.exit_status != 0: - self.test.error('command error: %s' % result.stderr.decode()) + self.test.error(f"command error: {result.stderr.decode()}") def break_net_with_iptables(self): - self._run_iptables(self.params['net_break_cmd']) + self._run_iptables(self.params["net_break_cmd"]) self._net_down = True def resume_net_with_iptables(self): - self._run_iptables(self.params['net_resume_cmd']) + self._run_iptables(self.params["net_resume_cmd"]) self._net_down = False def clean_images(self): @@ -78,18 +80,21 @@ def clean_images(self): # stop nbd image export self._nbd_export.stop_export() - super(BlockdevMirrorFirewallTest, self).clean_images() + super().clean_images() def do_test(self): self.blockdev_mirror() self.check_block_jobs_started( - self._jobs, self.params.get_numeric('mirror_started_timeout', 10)) + self._jobs, self.params.get_numeric("mirror_started_timeout", 10) + ) self.break_net_with_iptables() self.check_block_jobs_paused( - self._jobs, self.params.get_numeric('mirror_paused_interval', 50)) + self._jobs, self.params.get_numeric("mirror_paused_interval", 50) + ) self.resume_net_with_iptables() self.check_block_jobs_running( - self._jobs, self.params.get_numeric('mirror_resmued_timeout', 200)) + self._jobs, self.params.get_numeric("mirror_resmued_timeout", 200) + ) self.wait_mirror_jobs_completed() self.check_mirrored_block_nodes_attached() self.clone_vm_with_mirrored_images() diff --git a/qemu/tests/blockdev_mirror_forbidden_actions.py b/qemu/tests/blockdev_mirror_forbidden_actions.py index 892c30ca01..48c2d512ce 100644 --- a/qemu/tests/blockdev_mirror_forbidden_actions.py +++ b/qemu/tests/blockdev_mirror_forbidden_actions.py @@ -7,52 +7,53 @@ class BlockdevMirrorForbiddenActionsTest(BlockdevMirrorNowaitTest): """Do qmp commands during blockdev-mirror""" def commit(self): - self.main_vm.monitor.cmd( - "block-commit", {'device': self._source_nodes[0]} - ) + self.main_vm.monitor.cmd("block-commit", {"device": self._source_nodes[0]}) def resize(self): self.main_vm.monitor.cmd( "block_resize", - {'node-name': self._target_nodes[0], 'size': 1024*1024*1024} + {"node-name": self._target_nodes[0], "size": 1024 * 1024 * 1024}, ) def mirror(self): self.main_vm.monitor.cmd( "blockdev-mirror", - {'device': self._source_nodes[0], - 'target': self._target_nodes[0], 'sync': 'full'} + { + "device": self._source_nodes[0], + "target": self._target_nodes[0], + "sync": "full", + }, ) def snapshot(self): self.main_vm.monitor.cmd( "blockdev-snapshot", - {'node': self._source_nodes[0], 'overlay': self._target_nodes[0]} + {"node": self._source_nodes[0], "overlay": self._target_nodes[0]}, ) def stream(self): - self.main_vm.monitor.cmd("block-stream", - {'device': self._source_nodes[0]}) + self.main_vm.monitor.cmd("block-stream", {"device": self._source_nodes[0]}) def do_forbidden_actions(self): """Run the qmp commands one by one, all should fail""" - for action in self.params.objects('forbidden_actions'): - error_msg = self.params['error_msg_%s' % action] + for action in self.params.objects("forbidden_actions"): + error_msg = self.params[f"error_msg_{action}"] f = getattr(self, action) try: f() except QMPCmdError as e: if error_msg not in str(e): - self.test.fail('Unexpected error: %s' % str(e)) + self.test.fail(f"Unexpected error: {str(e)}") else: - self.test.fail('Unexpected qmp command success') + self.test.fail("Unexpected qmp command success") def do_test(self): self.blockdev_mirror() self.do_forbidden_actions() - self.main_vm.monitor.cmd("block-job-set-speed", - {'device': self._jobs[0], 'speed': 0}) + self.main_vm.monitor.cmd( + "block-job-set-speed", {"device": self._jobs[0], "speed": 0} + ) self.wait_mirror_jobs_completed() diff --git a/qemu/tests/blockdev_mirror_hotunplug.py b/qemu/tests/blockdev_mirror_hotunplug.py index 430bbead61..853c1ff0c8 100644 --- a/qemu/tests/blockdev_mirror_hotunplug.py +++ b/qemu/tests/blockdev_mirror_hotunplug.py @@ -15,13 +15,15 @@ def hotunplug_frontend_devices(self): device_del the frontend devices during mirroring, the devices CAN be removed without any issue """ + def _device_del(device): - self.main_vm.monitor.cmd('device_del', {'id': device}) + self.main_vm.monitor.cmd("device_del", {"id": device}) list(map(_device_del, self._source_images)) def wait_till_frontend_devices_deleted(self): """Wait till devices removed from output of query-block""" + def _is_device_deleted(device): for item in self.main_vm.monitor.query("block"): """ @@ -35,13 +37,13 @@ def _is_device_deleted(device): return True def _wait_till_device_deleted(device): - tmo = self.params.get_numeric('device_del_timeout', 60) + tmo = self.params.get_numeric("device_del_timeout", 60) for i in range(tmo): if _is_device_deleted(device): break time.sleep(1) else: - self.test.fail('Failed to hotunplug the frontend device') + self.test.fail("Failed to hotunplug the frontend device") list(map(_wait_till_device_deleted, self._source_images)) @@ -50,15 +52,16 @@ def hotunplug_format_nodes(self): blockdev-del the format nodes during mirroring, the nodes CANNOT be removed for they are busy """ + def _blockdev_del(node): try: - self.main_vm.monitor.cmd('blockdev-del', {'node-name': node}) + self.main_vm.monitor.cmd("blockdev-del", {"node-name": node}) except QMPCmdError as e: - err = self.params['block_node_busy_error'] % node + err = self.params["block_node_busy_error"] % node if err not in str(e): - self.test.fail('Unexpected error: %s' % str(e)) + self.test.fail(f"Unexpected error: {str(e)}") else: - self.test.fail('blockdev-del succeeded unexpectedly') + self.test.fail("blockdev-del succeeded unexpectedly") list(map(_blockdev_del, self._source_nodes)) diff --git a/qemu/tests/blockdev_mirror_install.py b/qemu/tests/blockdev_mirror_install.py index 6ffd0eb82e..38d79bc737 100644 --- a/qemu/tests/blockdev_mirror_install.py +++ b/qemu/tests/blockdev_mirror_install.py @@ -3,13 +3,12 @@ import re import time -from virttest import utils_test -from virttest import utils_misc +from virttest import utils_misc, utils_test from virttest.tests import unattended_install from provider.blockdev_mirror_nowait import BlockdevMirrorNowaitTest -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class BlockdevMirrorVMInstallTest(BlockdevMirrorNowaitTest): @@ -19,9 +18,12 @@ class BlockdevMirrorVMInstallTest(BlockdevMirrorNowaitTest): def _is_install_started(self, start_msg): # get_output can return None - out = self.main_vm.serial_console.get_output() \ - if self.main_vm.serial_console else None - out = '' if out is None else out + out = ( + self.main_vm.serial_console.get_output() + if self.main_vm.serial_console + else None + ) + out = "" if out is None else out return bool(re.search(start_msg, out, re.M)) def _install_vm_in_background(self): @@ -33,9 +35,11 @@ def _install_vm_in_background(self): LOG_JOB.info("Wait till '%s'", self.params["tag_for_install_start"]) if utils_misc.wait_for( - lambda: self._is_install_started( - self.params["tag_for_install_start"]), - int(self.params.get("timeout_for_install_start", 360)), 10, 5): + lambda: self._is_install_started(self.params["tag_for_install_start"]), + int(self.params.get("timeout_for_install_start", 360)), + 10, + 5, + ): LOG_JOB.info("Sleep some time before block-mirror") time.sleep(random.randint(10, 120)) else: @@ -44,8 +48,7 @@ def _install_vm_in_background(self): def _wait_installation_done(self): # Installation on remote storage may take too much time, # we keep the same timeout with the default used in VT - self._bg.join( - timeout=int(self.params.get("install_timeout", 4800))) + self._bg.join(timeout=int(self.params.get("install_timeout", 4800))) if self._bg.is_alive(): self.test.fail("VM installation timed out") @@ -63,12 +66,16 @@ def clone_vm_with_mirrored_images(self): cdrom = self.main_vm.params.objects("cdroms")[0] self.main_vm.params["cdroms"] = cdrom self.main_vm.params["boot_once"] = "c" - for opt in ["cdrom_%s" % cdrom, "boot_path", - "kernel_params", "kernel", "initrd"]: + for opt in [ + f"cdrom_{cdrom}", + "boot_path", + "kernel_params", + "kernel", + "initrd", + ]: self.main_vm.params[opt] = "" - super(BlockdevMirrorVMInstallTest, - self).clone_vm_with_mirrored_images() + super().clone_vm_with_mirrored_images() def do_test(self): self.blockdev_mirror() diff --git a/qemu/tests/blockdev_mirror_multiple_blocks.py b/qemu/tests/blockdev_mirror_multiple_blocks.py index 3114599210..04ad2589b2 100644 --- a/qemu/tests/blockdev_mirror_multiple_blocks.py +++ b/qemu/tests/blockdev_mirror_multiple_blocks.py @@ -3,6 +3,7 @@ class BlockdevMirrorMultipleBlocksTest(BlockdevMirrorParallelTest): """do block-mirror for multiple disks in parallel""" + pass diff --git a/qemu/tests/blockdev_mirror_no_space.py b/qemu/tests/blockdev_mirror_no_space.py index 99abda4f1b..d923eab76f 100644 --- a/qemu/tests/blockdev_mirror_no_space.py +++ b/qemu/tests/blockdev_mirror_no_space.py @@ -1,5 +1,4 @@ from provider import job_utils - from provider.blockdev_mirror_nowait import BlockdevMirrorNowaitTest @@ -11,19 +10,22 @@ def post_test(self): pass def check_no_space_error(self): - tmo = self.params.get_numeric('block_io_error_timeout', 60) + tmo = self.params.get_numeric("block_io_error_timeout", 60) # check 'error' message in BLOCK_JOB_COMPLETED event - cond = {'device': self._jobs[0]} + cond = {"device": self._jobs[0]} event = job_utils.get_event_by_condition( - self.main_vm, job_utils.BLOCK_JOB_COMPLETED_EVENT, tmo, **cond) + self.main_vm, job_utils.BLOCK_JOB_COMPLETED_EVENT, tmo, **cond + ) if event: - if event['data'].get('error') != self.params['error_msg']: - self.test.fail('Unexpected error: %s' - % event['data'].get('error')) + if event["data"].get("error") != self.params["error_msg"]: + self.test.fail( + "Unexpected error: {}".format(event["data"].get("error")) + ) else: - self.test.fail('Failed to get BLOCK_JOB_COMPLETED event for %s' - % self._jobs[0]) + self.test.fail( + f"Failed to get BLOCK_JOB_COMPLETED event for {self._jobs[0]}" + ) def do_test(self): self.blockdev_mirror() diff --git a/qemu/tests/blockdev_mirror_qemuio_ready_job.py b/qemu/tests/blockdev_mirror_qemuio_ready_job.py index cdfa97dd4e..6e6e9949e0 100644 --- a/qemu/tests/blockdev_mirror_qemuio_ready_job.py +++ b/qemu/tests/blockdev_mirror_qemuio_ready_job.py @@ -1,5 +1,4 @@ from avocado.utils import process - from virttest import utils_misc from provider.blockdev_mirror_nowait import BlockdevMirrorNowaitTest @@ -29,12 +28,13 @@ def qemuio_source_image(self): def wait_till_job_ready(self): event = get_event_by_condition( - self.main_vm, 'BLOCK_JOB_READY', - self.params.get_numeric('job_ready_timeout', 120), - device=self._jobs[0] + self.main_vm, + "BLOCK_JOB_READY", + self.params.get_numeric("job_ready_timeout", 120), + device=self._jobs[0], ) if event is None: - self.test.fail('Job failed to reach ready state') + self.test.fail("Job failed to reach ready state") def do_test(self): self.blockdev_mirror() diff --git a/qemu/tests/blockdev_mirror_qemuio_target.py b/qemu/tests/blockdev_mirror_qemuio_target.py index 5f6a643c46..e4b0abcf11 100644 --- a/qemu/tests/blockdev_mirror_qemuio_target.py +++ b/qemu/tests/blockdev_mirror_qemuio_target.py @@ -1,5 +1,4 @@ from avocado.utils import process - from virttest import utils_misc from provider.blockdev_mirror_nowait import BlockdevMirrorNowaitTest @@ -22,8 +21,9 @@ def qemuio_target_image(self): process.run(qemuio_cmd, shell=True) except process.CmdError as e: if self.params["error_msg"] not in e.result.stderr.decode(): - self.test.fail("Write to used image failed with error: %s" - % e.result.stderr) + self.test.fail( + f"Write to used image failed with error: {e.result.stderr}" + ) else: self.test.fail("Can qemu-io a using image") diff --git a/qemu/tests/blockdev_mirror_readonly.py b/qemu/tests/blockdev_mirror_readonly.py index 5b1595eb90..b789dcaf11 100644 --- a/qemu/tests/blockdev_mirror_readonly.py +++ b/qemu/tests/blockdev_mirror_readonly.py @@ -19,8 +19,8 @@ def check_mirrored_block_nodes_readonly(self): try: self.format_data_disk(tag) except ShellCmdError as e: - if not re.search(self.params['error_msg'], str(e), re.M): - self.test.fail("Unexpected disk format error: %s" % str(e)) + if not re.search(self.params["error_msg"], str(e), re.M): + self.test.fail(f"Unexpected disk format error: {str(e)}") else: self.test.fail("Unexpected disk format success") diff --git a/qemu/tests/blockdev_mirror_ready_vm_down.py b/qemu/tests/blockdev_mirror_ready_vm_down.py index 22c345aef2..a9039d8f88 100644 --- a/qemu/tests/blockdev_mirror_ready_vm_down.py +++ b/qemu/tests/blockdev_mirror_ready_vm_down.py @@ -1,7 +1,6 @@ import time -from virttest import utils_qemu -from virttest import utils_misc +from virttest import utils_misc, utils_qemu from virttest.utils_version import VersionInterval from provider import job_utils @@ -18,46 +17,60 @@ def poweroff_vm(self): def wait_mirror_jobs_ready(self): def _wait_mirror_job_ready(jobid): - tmo = self.params.get_numeric('job_ready_timeout', 600) - job_utils.wait_until_job_status_match(self.main_vm, 'ready', - jobid, tmo) + tmo = self.params.get_numeric("job_ready_timeout", 600) + job_utils.wait_until_job_status_match(self.main_vm, "ready", jobid, tmo) + list(map(_wait_mirror_job_ready, self._jobs)) def wait_mirror_jobs_auto_completed(self): """job completed automatically after vm poweroff""" + def _wait_mirror_job_completed(jobid): - tmo = self.params.get_numeric('job_completed_timeout', 200) + tmo = self.params.get_numeric("job_completed_timeout", 200) for i in range(tmo): events = self.main_vm.monitor.get_events() - completed_events = [e for e in events if e.get( - 'event') == job_utils.BLOCK_JOB_COMPLETED_EVENT] - job_events = [e for e in completed_events if e.get( - 'data') and jobid in (e['data'].get('id'), - e['data'].get('device'))] + completed_events = [ + e + for e in events + if e.get("event") == job_utils.BLOCK_JOB_COMPLETED_EVENT + ] + job_events = [ + e + for e in completed_events + if e.get("data") + and jobid in (e["data"].get("id"), e["data"].get("device")) + ] if job_events: break time.sleep(1) else: - self.test.fail('job complete event never received in %s' % tmo) + self.test.fail(f"job complete event never received in {tmo}") list(map(_wait_mirror_job_completed, self._jobs)) def wait_mirror_jobs_cancelled(self): """job cancelled after vm poweroff since qemu6.2""" + def _wait_mirror_job_cancelled(jobid): - tmo = self.params.get_numeric('job_cancelled_timeout', 200) + tmo = self.params.get_numeric("job_cancelled_timeout", 200) for i in range(tmo): events = self.main_vm.monitor.get_events() - cancelled_events = [e for e in events if e.get( - 'event') == job_utils.BLOCK_JOB_CANCELLED_EVENT] - job_events = [e for e in cancelled_events if e.get( - 'data') and jobid in (e['data'].get('id'), - e['data'].get('device'))] + cancelled_events = [ + e + for e in events + if e.get("event") == job_utils.BLOCK_JOB_CANCELLED_EVENT + ] + job_events = [ + e + for e in cancelled_events + if e.get("data") + and jobid in (e["data"].get("id"), e["data"].get("device")) + ] if job_events: break time.sleep(1) else: - self.test.fail('job cancelled event not received in %s' % tmo) + self.test.fail(f"job cancelled event not received in {tmo}") list(map(_wait_mirror_job_cancelled, self._jobs)) diff --git a/qemu/tests/blockdev_mirror_remote_server_down.py b/qemu/tests/blockdev_mirror_remote_server_down.py index 4b9d846c0f..2d3542ebad 100644 --- a/qemu/tests/blockdev_mirror_remote_server_down.py +++ b/qemu/tests/blockdev_mirror_remote_server_down.py @@ -11,19 +11,17 @@ class BlockdevMirrorRemoteServerDownTest(BlockdevMirrorNowaitTest): def __init__(self, test, params, env): localhost = socket.gethostname() - params['nbd_server_%s' % params['nbd_image_tag']] = localhost \ - if localhost else 'localhost' - self.nbd_export = QemuNBDExportImage(params, - params["local_image_tag"]) - super(BlockdevMirrorRemoteServerDownTest, self).__init__(test, - params, - env) + params["nbd_server_{}".format(params["nbd_image_tag"])] = ( + localhost if localhost else "localhost" + ) + self.nbd_export = QemuNBDExportImage(params, params["local_image_tag"]) + super().__init__(test, params, env) def _create_local_image(self): - image_params = self.params.object_params( - self.params['local_image_tag']) + image_params = self.params.object_params(self.params["local_image_tag"]) local_image = self.source_disk_define_by_params( - image_params, self.params['local_image_tag']) + image_params, self.params["local_image_tag"] + ) local_image.create(image_params) self.trash.append(local_image) @@ -31,7 +29,7 @@ def prepare_test(self): try: self._create_local_image() self.nbd_export.export_image() - super(BlockdevMirrorRemoteServerDownTest, self).prepare_test() + super().prepare_test() except Exception: self.clean_images() raise @@ -41,34 +39,34 @@ def add_target_data_disks(self): tag = self._target_images[0] devices = self.main_vm.devices.images_define_by_params( - tag, self.params.object_params(tag), 'disk') + tag, self.params.object_params(tag), "disk" + ) devices.pop() for dev in devices: - ret = self.main_vm.devices.simple_hotplug(dev, - self.main_vm.monitor) + ret = self.main_vm.devices.simple_hotplug(dev, self.main_vm.monitor) if not ret[1]: - self.test.fail("Failed to hotplug '%s': %s." - % (dev, ret[0])) + self.test.fail(f"Failed to hotplug '{dev}': {ret[0]}.") def clean_images(self): self.nbd_export.stop_export() - super(BlockdevMirrorRemoteServerDownTest, self).clean_images() + super().clean_images() def do_test(self): self.blockdev_mirror() self.check_block_jobs_started( - self._jobs, self.params.get_numeric('job_started_timeout', 10)) + self._jobs, self.params.get_numeric("job_started_timeout", 10) + ) self.nbd_export.suspend_export() try: self.check_block_jobs_paused( - self._jobs, - self.params.get_numeric('job_paused_interval', 30) + self._jobs, self.params.get_numeric("job_paused_interval", 30) ) finally: self.nbd_export.resume_export() - self.main_vm.monitor.cmd("block-job-set-speed", - {'device': self._jobs[0], 'speed': 0}) + self.main_vm.monitor.cmd( + "block-job-set-speed", {"device": self._jobs[0], "speed": 0} + ) self.wait_mirror_jobs_completed() self.check_mirrored_block_nodes_attached() self.clone_vm_with_mirrored_images() diff --git a/qemu/tests/blockdev_mirror_same_src_tgt.py b/qemu/tests/blockdev_mirror_same_src_tgt.py index 312704181e..c3fb49ae4e 100644 --- a/qemu/tests/blockdev_mirror_same_src_tgt.py +++ b/qemu/tests/blockdev_mirror_same_src_tgt.py @@ -18,15 +18,15 @@ def post_test(self): def blockdev_mirror(self): try: - cmd, args = blockdev_mirror_qmp_cmd(self._source_nodes[0], - self._source_nodes[0], - **self._backup_options[0]) + cmd, args = blockdev_mirror_qmp_cmd( + self._source_nodes[0], self._source_nodes[0], **self._backup_options[0] + ) self.main_vm.monitor.cmd(cmd, args) except QMPCmdError as e: - if self.params['error_msg'] not in str(e): - self.test.fail('Unexpected error: %s' % str(e)) + if self.params["error_msg"] not in str(e): + self.test.fail(f"Unexpected error: {str(e)}") else: - self.test.fail('Unexpectedly succeeded') + self.test.fail("Unexpectedly succeeded") def do_test(self): self.blockdev_mirror() diff --git a/qemu/tests/blockdev_mirror_simple.py b/qemu/tests/blockdev_mirror_simple.py index 46e9ecdce6..7d0ed965a1 100644 --- a/qemu/tests/blockdev_mirror_simple.py +++ b/qemu/tests/blockdev_mirror_simple.py @@ -16,22 +16,23 @@ def __init__(self, test, params, env): self._set_bufsize(params) self._set_auto_finalize(params) self._set_auto_dismiss(params) - super(BlockdevMirrorSimpleTest, self).__init__(test, params, env) + super().__init__(test, params, env) def _set_auto_finalize(self, params): - auto_finalize = params.get('auto_finalize') + auto_finalize = params.get("auto_finalize") if auto_finalize: - params['auto-finalize'] = auto_finalize + params["auto-finalize"] = auto_finalize def _set_auto_dismiss(self, params): - auto_dismiss = params.get('auto_dismiss') + auto_dismiss = params.get("auto_dismiss") if auto_dismiss: - params['auto-dismiss'] = auto_dismiss + params["auto-dismiss"] = auto_dismiss def _set_granularity(self, params): granularities = params.objects("granularity_list") - granularity = random.choice( - granularities) if granularities else params.get("granularity") + granularity = ( + random.choice(granularities) if granularities else params.get("granularity") + ) if granularity: params["granularity"] = int( @@ -41,9 +42,7 @@ def _set_granularity(self, params): def _set_bufsize(self, params): factors = params.objects("buf_size_factor_list") if factors: - params["buf-size"] = int( - random.choice(factors) - ) * params["granularity"] + params["buf-size"] = int(random.choice(factors)) * params["granularity"] def run(test, params, env): diff --git a/qemu/tests/blockdev_mirror_speed.py b/qemu/tests/blockdev_mirror_speed.py index 6f2dc5defc..0e7333ba2e 100644 --- a/qemu/tests/blockdev_mirror_speed.py +++ b/qemu/tests/blockdev_mirror_speed.py @@ -2,8 +2,8 @@ from virttest.qemu_monitor import QMPCmdError -from provider.blockdev_mirror_nowait import BlockdevMirrorNowaitTest from provider import job_utils +from provider.blockdev_mirror_nowait import BlockdevMirrorNowaitTest class BlockdevMirrorSpeedTest(BlockdevMirrorNowaitTest): @@ -15,26 +15,27 @@ def test_invalid_speeds(self): """ Set an invalid speed, make sure we can get the proper error message """ + def _set_invalid_speed(jobid, speed, error_msg): try: self.main_vm.monitor.cmd( - "block-job-set-speed", {'device': jobid, 'speed': speed}) + "block-job-set-speed", {"device": jobid, "speed": speed} + ) except QMPCmdError as e: if error_msg not in str(e): - self.test.fail('Unexpected error: %s' % str(e)) + self.test.fail(f"Unexpected error: {str(e)}") else: - self.test.fail('block-job-set-speed %s succeeded unexpectedly' - % speed) + self.test.fail(f"block-job-set-speed {speed} succeeded unexpectedly") def _invalid_speed_error_tuple(speed): - if '-' in speed: # a negative int - return int(speed), self.params['error_msg_negative'] - elif '.' in speed: # a float number - return float(speed), self.params['error_msg'] - else: # a string - return speed, self.params['error_msg'] - - for speed in self.params.objects('invalid_speeds'): + if "-" in speed: # a negative int + return int(speed), self.params["error_msg_negative"] + elif "." in speed: # a float number + return float(speed), self.params["error_msg"] + else: # a string + return speed, self.params["error_msg"] + + for speed in self.params.objects("invalid_speeds"): s, m = _invalid_speed_error_tuple(speed) func = partial(_set_invalid_speed, speed=s, error_msg=m) list(map(func, self._jobs)) @@ -43,24 +44,26 @@ def test_valid_speeds(self): """ Set a valid speed, make sure mirror job can go on without any issue """ + def _set_valid_speed(jobid, speed): self.main_vm.monitor.cmd( - "block-job-set-speed", {'device': jobid, 'speed': speed}) + "block-job-set-speed", {"device": jobid, "speed": speed} + ) def _check_valid_speed(jobid, speed): job = job_utils.get_block_job_by_id(self.main_vm, jobid) if job.get("speed") != speed: - self.test.fail("Speed:%s is not set as expected:%s" - % (job.get("speed"), speed)) + self.test.fail( + "Speed:{} is not set as expected:{}".format(job.get("speed"), speed) + ) ck_speed = self.params.get_numeric("check_speed") uspeed = self.params.get_numeric("ulimit_speed") if speed > ck_speed or speed == uspeed: self.check_block_jobs_running( - self._jobs, - self.params.get_numeric('mirror_running_timeout', 60) - ) + self._jobs, self.params.get_numeric("mirror_running_timeout", 60) + ) - for speed in self.params.objects('valid_speeds'): + for speed in self.params.objects("valid_speeds"): func = partial(_set_valid_speed, speed=int(speed)) list(map(func, self._jobs)) func_ck = partial(_set_valid_speed, speed=int(speed)) @@ -69,7 +72,8 @@ def _check_valid_speed(jobid, speed): def do_test(self): self.blockdev_mirror() self.check_block_jobs_started( - self._jobs, self.params.get_numeric('mirror_started_timeout', 10)) + self._jobs, self.params.get_numeric("mirror_started_timeout", 10) + ) self.test_invalid_speeds() self.test_valid_speeds() self.wait_mirror_jobs_completed() diff --git a/qemu/tests/blockdev_mirror_src_no_space.py b/qemu/tests/blockdev_mirror_src_no_space.py index 130bb48809..5455c1b191 100644 --- a/qemu/tests/blockdev_mirror_src_no_space.py +++ b/qemu/tests/blockdev_mirror_src_no_space.py @@ -9,20 +9,20 @@ class BlockdevMirrorSrcNoSpaceTest(BlockdevMirrorParallelTest): def overflow_source(self): tag = self._source_images[0] - dd_cmd = self.params['write_file_cmd'] % self.disks_info[tag][1] + dd_cmd = self.params["write_file_cmd"] % self.disks_info[tag][1] self._session.cmd(dd_cmd, ignore_all_errors=True) self._session.close() def check_io_error_event(self): event = get_event_by_condition( - self.main_vm, 'BLOCK_IO_ERROR', - self.params.get_numeric('event_timeout', 30)) + self.main_vm, "BLOCK_IO_ERROR", self.params.get_numeric("event_timeout", 30) + ) if event: - if event['data'].get('reason') != self.params['error_msg']: - self.test.fail('Unexpected error') + if event["data"].get("reason") != self.params["error_msg"]: + self.test.fail("Unexpected error") else: - self.test.fail('Failed to get BLOCK_IO_ERROR event') + self.test.fail("Failed to get BLOCK_IO_ERROR event") def do_test(self): self._session = self.main_vm.wait_for_login() diff --git a/qemu/tests/blockdev_mirror_stress.py b/qemu/tests/blockdev_mirror_stress.py index d715291463..5b123939cb 100644 --- a/qemu/tests/blockdev_mirror_stress.py +++ b/qemu/tests/blockdev_mirror_stress.py @@ -1,9 +1,10 @@ -import six -import time import random +import time + +import six -from provider.storage_benchmark import generate_instance from provider.blockdev_mirror_wait import BlockdevMirrorWaitTest +from provider.storage_benchmark import generate_instance class BlockdevMirrorStressTest(BlockdevMirrorWaitTest): @@ -13,18 +14,22 @@ def fio_run_bg(self): fio_options = self.params.get("fio_options") if fio_options: self.test.log.info("Start to run fio") - self.fio = generate_instance(self.params, self.main_vm, 'fio') + self.fio = generate_instance(self.params, self.main_vm, "fio") fio_run_timeout = self.params.get_numeric("fio_timeout", 2400) self.fio.run(fio_options, fio_run_timeout) def remove_files_from_system_image(self, tmo=60): """Remove testing files from system image""" - tag_dir_list = [(t, d[1]) for t, d in six.iteritems(self.disks_info) if d[0] == "system"] + tag_dir_list = [ + (t, d[1]) for t, d in six.iteritems(self.disks_info) if d[0] == "system" + ] if tag_dir_list: tag, root_dir = tag_dir_list[0] - files = ["%s/%s" % (root_dir, f) for f in self.files_info[tag]] - files.append("%s/%s" % (self.params["mnt_on_sys_dsk"], self.params["file_fio"])) - rm_cmd = "rm -f %s" % " ".join(files) + files = [f"{root_dir}/{f}" for f in self.files_info[tag]] + files.append( + "{}/{}".format(self.params["mnt_on_sys_dsk"], self.params["file_fio"]) + ) + rm_cmd = "rm -f {}".format(" ".join(files)) # restart main vm for the original system image is offlined # and the mirror image is attached after block-mirror diff --git a/qemu/tests/blockdev_mirror_sync_none.py b/qemu/tests/blockdev_mirror_sync_none.py index 7fcbd98852..5406761b4e 100644 --- a/qemu/tests/blockdev_mirror_sync_none.py +++ b/qemu/tests/blockdev_mirror_sync_none.py @@ -10,24 +10,23 @@ def _verify_file_not_exist(self, dir_list, none_existed_files): session = self.clone_vm.wait_for_login() try: for idx, f in enumerate(none_existed_files): - file_path = "%s/%s" % (dir_list[idx], f) - cat_cmd = "ls %s" % file_path + file_path = f"{dir_list[idx]}/{f}" + cat_cmd = f"ls {file_path}" s, o = session.cmd_status_output(cat_cmd) if s == 0: - self.test.fail('File (%s) exists' % f) - elif 'No such file' not in o.strip(): - self.test.fail('Unknown error: %s' % o) + self.test.fail(f"File ({f}) exists") + elif "No such file" not in o.strip(): + self.test.fail(f"Unknown error: {o}") finally: session.close() def verify_data_files(self): dir_list = [self.disks_info[t][1] for t in self._source_images] - none_existed_files = [self.files_info[t].pop( - 0) for t in self._source_images] + none_existed_files = [self.files_info[t].pop(0) for t in self._source_images] # the second file should exist - super(BlockdevMirrorSyncModeNoneTest, self).verify_data_files() + super().verify_data_files() # the first file should not exist self._verify_file_not_exist(dir_list, none_existed_files) @@ -43,8 +42,7 @@ def wait_mirror_jobs_completed(self): # total-progress are same, but in fact, the mirror is still running. # This is expected. time.sleep(int(self.params.get("sync_none_mirror_timeout", "20"))) - super(BlockdevMirrorSyncModeNoneTest, - self).wait_mirror_jobs_completed() + super().wait_mirror_jobs_completed() def reboot_vm(self): """ diff --git a/qemu/tests/blockdev_mirror_sync_top.py b/qemu/tests/blockdev_mirror_sync_top.py index 16061ba547..dc836ac9db 100644 --- a/qemu/tests/blockdev_mirror_sync_top.py +++ b/qemu/tests/blockdev_mirror_sync_top.py @@ -1,7 +1,6 @@ from virttest.qemu_devices.qdevices import QBlockdevFormatNode from provider import backup_utils - from provider.blockdev_mirror_nowait import BlockdevMirrorNowaitTest @@ -11,16 +10,15 @@ class BlockdevMirrorSyncTopTest(BlockdevMirrorNowaitTest): """ def __init__(self, test, params, env): - super(BlockdevMirrorSyncTopTest, self).__init__(test, params, env) + super().__init__(test, params, env) # convert source images to convert images self._convert_images = params.objects("convert_images") - self._convert_nodes = ["drive_%s" % - src for src in self._convert_images] + self._convert_nodes = [f"drive_{src}" for src in self._convert_images] # mirror snapshot images of source images to target images self._snap_images = params.objects("snap_images") - self._snap_nodes = ["drive_%s" % src for src in self._snap_images] + self._snap_nodes = [f"drive_{src}" for src in self._snap_images] def _create_images(self, images): for tag in images: @@ -39,20 +37,16 @@ def create_snapshot_images(self): def _blockdev_add_images(self, images, is_backing_null=False): for tag in images: params = self.params.object_params(tag) - devices = self.main_vm.devices.images_define_by_params(tag, - params, - 'disk') + devices = self.main_vm.devices.images_define_by_params(tag, params, "disk") devices.pop() for dev in devices: if self.main_vm.devices.get_by_qid(dev.get_qid()): continue if isinstance(dev, QBlockdevFormatNode) and is_backing_null: dev.params["backing"] = None - ret = self.main_vm.devices.simple_hotplug(dev, - self.main_vm.monitor) + ret = self.main_vm.devices.simple_hotplug(dev, self.main_vm.monitor) if not ret[1]: - self.test.fail("Failed to hotplug '%s': %s." - % (dev, ret[0])) + self.test.fail(f"Failed to hotplug '{dev}': {ret[0]}.") def add_convert_images(self): """blockdev-add convert images: protocol and format nodes only""" @@ -66,7 +60,8 @@ def add_mirror_images(self): """add mirror images where the snapshot images are mirrored""" for tag in self._target_images: disk = self.target_disk_define_by_params( - self.params.object_params(tag), tag) + self.params.object_params(tag), tag + ) # overlay must not have a current backing file, # achieved by passing "backing": null to blockdev-add @@ -81,17 +76,16 @@ def mirror_data_snapshots_to_mirror_images(self): for idx, source_node in enumerate(self._snap_nodes): self._jobs.append( backup_utils.blockdev_mirror_nowait( - self.main_vm, source_node, - self._target_nodes[idx], - **args + self.main_vm, source_node, self._target_nodes[idx], **args ) ) def _blockdev_snapshot(self, nodes, overlays): snapshot_options = {} for idx, source_node in enumerate(nodes): - backup_utils.blockdev_snapshot(self.main_vm, source_node, - overlays[idx], **snapshot_options) + backup_utils.blockdev_snapshot( + self.main_vm, source_node, overlays[idx], **snapshot_options + ) def take_snapshot_on_data_images(self): """snapshot, node: data image node, overlay: snapshot nodes""" diff --git a/qemu/tests/blockdev_mirror_to_rbd.py b/qemu/tests/blockdev_mirror_to_rbd.py index 28ee3bb7bc..945c93e7d9 100644 --- a/qemu/tests/blockdev_mirror_to_rbd.py +++ b/qemu/tests/blockdev_mirror_to_rbd.py @@ -5,6 +5,7 @@ class BlockdevMirrorRBDNode(BlockdevMirrorWaitTest): """ Block mirror to rbd target """ + pass diff --git a/qemu/tests/blockdev_mirror_vm_stop_cont.py b/qemu/tests/blockdev_mirror_vm_stop_cont.py index 9bf93a733f..c88478b2cf 100644 --- a/qemu/tests/blockdev_mirror_vm_stop_cont.py +++ b/qemu/tests/blockdev_mirror_vm_stop_cont.py @@ -1,5 +1,5 @@ -import time import random +import time from provider.blockdev_mirror_parallel import BlockdevMirrorParallelTest @@ -10,7 +10,7 @@ class BlockdevMirrorVMStopContTest(BlockdevMirrorParallelTest): def stop_cont_vm(self): """Stop VM for a while, then resume it""" self.main_vm.pause() - t = int(random.choice(self.params.objects('vm_stop_time_list'))) + t = int(random.choice(self.params.objects("vm_stop_time_list"))) time.sleep(t) self.main_vm.resume() diff --git a/qemu/tests/blockdev_mirror_with_ignore.py b/qemu/tests/blockdev_mirror_with_ignore.py index 28da7dda6c..e2eb68aab6 100644 --- a/qemu/tests/blockdev_mirror_with_ignore.py +++ b/qemu/tests/blockdev_mirror_with_ignore.py @@ -8,17 +8,20 @@ class BlockdevMirrorWithIgnore(BlockdevMirrorNowaitTest): """Block mirror with error ignore on target""" def blockdev_mirror(self): - super(BlockdevMirrorWithIgnore, self).blockdev_mirror() + super().blockdev_mirror() timeout = self.params.get("job_timeout", 600) for job_id in self._jobs: get_event = job_utils.get_event_by_condition - event = get_event(self.main_vm, job_utils.BLOCK_JOB_ERROR_EVENT, - timeout, device=job_id, action='ignore') + event = get_event( + self.main_vm, + job_utils.BLOCK_JOB_ERROR_EVENT, + timeout, + device=job_id, + action="ignore", + ) if not event: - self.test.fail("Mirror job can't reach error after %s seconds" - % timeout) - process.system(self.params['lv_extend_cmd'], - ignore_status=False, shell=True) + self.test.fail(f"Mirror job can't reach error after {timeout} seconds") + process.system(self.params["lv_extend_cmd"], ignore_status=False, shell=True) self.wait_mirror_jobs_completed() diff --git a/qemu/tests/blockdev_snapshot_chains.py b/qemu/tests/blockdev_snapshot_chains.py index bce28a7104..4bc8efe5c4 100644 --- a/qemu/tests/blockdev_snapshot_chains.py +++ b/qemu/tests/blockdev_snapshot_chains.py @@ -1,41 +1,39 @@ import logging -from virttest import data_dir -from virttest import error_context +from virttest import data_dir, error_context from provider.blockdev_snapshot_base import BlockDevSnapshotTest from provider.virt_storage.storage_admin import sp_admin -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class BlockdevSnapshotChainsTest(BlockDevSnapshotTest): - def __init__(self, test, params, env): self.snapshot_num = int(params.get("snapshot_num", 1)) self.snapshot_chains = [] - super(BlockdevSnapshotChainsTest, self).__init__(test, params, env) + super().__init__(test, params, env) def prepare_snapshot_file(self): for index in range(self.snapshot_num + 1): - snapshot_tag = "sn%s" % index + snapshot_tag = f"sn{index}" if snapshot_tag not in self.snapshot_chains: self.snapshot_chains.append(snapshot_tag) params = self.params.copy() params.setdefault("target_path", data_dir.get_data_dir()) - params["image_size_%s" % snapshot_tag] = self.base_image.size - params["image_name_%s" % snapshot_tag] = snapshot_tag - self.params["image_name_%s" % snapshot_tag] = snapshot_tag + params[f"image_size_{snapshot_tag}"] = self.base_image.size + params[f"image_name_{snapshot_tag}"] = snapshot_tag + self.params[f"image_name_{snapshot_tag}"] = snapshot_tag snapshot_format = params.get("snapshot_format", "qcow2") - params["image_format_%s" % snapshot_tag] = snapshot_format - self.params["image_format_%s" % snapshot_tag] = snapshot_format + params[f"image_format_{snapshot_tag}"] = snapshot_format + self.params[f"image_format_{snapshot_tag}"] = snapshot_format if self.params["image_backend"] == "iscsi_direct": - self.params.update({"enable_iscsi_%s" % snapshot_tag: "no"}) - self.params.update({"image_raw_device_%s" % snapshot_tag: "no"}) + self.params.update({f"enable_iscsi_{snapshot_tag}": "no"}) + self.params.update({f"image_raw_device_{snapshot_tag}": "no"}) elif self.params["image_backend"] == "ceph": - self.params.update({"enable_ceph_%s" % snapshot_tag: "no"}) + self.params.update({f"enable_ceph_{snapshot_tag}": "no"}) elif self.params["image_backend"] == "nbd": - self.params.update({"enable_nbd_%s" % snapshot_tag: "no"}) + self.params.update({f"enable_nbd_{snapshot_tag}": "no"}) image = sp_admin.volume_define_by_params(snapshot_tag, params) image.hotplug(self.main_vm) @@ -45,7 +43,7 @@ def create_snapshot(self): cmd = "blockdev-snapshot" arguments = self.params.copy_from_keys(options) for snapshot_tag in self.snapshot_chains: - overlay = "drive_%s" % snapshot_tag + overlay = f"drive_{snapshot_tag}" arguments.update({"overlay": overlay}) self.main_vm.monitor.cmd(cmd, dict(arguments)) arguments["node"] = arguments["overlay"] @@ -56,8 +54,7 @@ def prepare_clone_vm(self): snapshot_tag = self.snapshot_tag else: snapshot_tag = self.snapshot_chains[-1] - images = self.params["images"].replace( - self.base_tag, snapshot_tag) + images = self.params["images"].replace(self.base_tag, snapshot_tag) vm_params["images"] = images return self.main_vm.clone(params=vm_params) @@ -66,7 +63,7 @@ def verify_snapshot(self): self.main_vm.destroy() base_tag = self.base_tag base_format = self.base_image.get_format() - self.params["image_format_%s" % base_tag] = base_format + self.params[f"image_format_{base_tag}"] = base_format for snapshot_tag in self.snapshot_chains: snapshot_image = self.get_image_by_tag(snapshot_tag) base_image = self.get_image_by_tag(base_tag) diff --git a/qemu/tests/blockdev_snapshot_data_file.py b/qemu/tests/blockdev_snapshot_data_file.py index bb8001979f..b59425eec2 100644 --- a/qemu/tests/blockdev_snapshot_data_file.py +++ b/qemu/tests/blockdev_snapshot_data_file.py @@ -1,19 +1,17 @@ import logging -from virttest import error_context -from virttest import data_dir +from virttest import data_dir, error_context from provider import backup_utils from provider.blockdev_snapshot_base import BlockDevSnapshotTest from provider.virt_storage.storage_admin import sp_admin -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class BlkSnapshotWithDatafile(BlockDevSnapshotTest): - def __init__(self, test, params, env): - super(BlkSnapshotWithDatafile, self).__init__(test, params, env) + super().__init__(test, params, env) self.trash = [] def prepare_snapshot_file(self): @@ -29,16 +27,18 @@ def check_data_file_in_block_info(self): filename = item["inserted"]["image"]["filename"] if self.snapshot_tag in filename: if "data-file" in filename: - data_file_tag = self.params["image_data_file_%s" % self.snapshot_tag] + data_file_tag = self.params[f"image_data_file_{self.snapshot_tag}"] data_file_image = self.get_image_by_tag(data_file_tag) data_file = eval(filename.lstrip("json:"))["data-file"] if data_file["file"]["filename"] != data_file_image.image_filename: - self.test.fail("data-file info is not as expected: %s" % data_file_image) + self.test.fail( + f"data-file info is not as expected: {data_file_image}" + ) break else: self.test.fail("Data-file option not included in block info") else: - self.test.fail("Device: %s not found in block info" % self.snapshot_tag) + self.test.fail(f"Device: {self.snapshot_tag} not found in block info") def snapshot_test(self): self.create_snapshot() diff --git a/qemu/tests/blockdev_snapshot_guest_agent.py b/qemu/tests/blockdev_snapshot_guest_agent.py index af13a927bf..0c01c70c10 100644 --- a/qemu/tests/blockdev_snapshot_guest_agent.py +++ b/qemu/tests/blockdev_snapshot_guest_agent.py @@ -1,30 +1,30 @@ import logging import time -from virttest import utils_test -from virttest import error_context -from virttest import guest_agent +from virttest import error_context, guest_agent, utils_test from provider.blockdev_snapshot_base import BlockDevSnapshotTest -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class BlockdevSnapshotGuestAgentTest(BlockDevSnapshotTest): - def pre_test(self): - super(BlockdevSnapshotGuestAgentTest, self).pre_test() - params = self.params.object_params(self.params['agent_name']) + super().pre_test() + params = self.params.object_params(self.params["agent_name"]) params["monitor_filename"] = self.main_vm.get_serial_console_filename( - self.params['agent_name']) + self.params["agent_name"] + ) self.guest_agent = guest_agent.QemuAgent( - self.main_vm, self.params['agent_name'], - self.params['agent_serial_type'], params + self.main_vm, + self.params["agent_name"], + self.params["agent_serial_type"], + params, ) session = self.main_vm.wait_for_login() try: - if session.cmd_status(self.params['enable_nonsecurity_files_cmd']) != 0: - session.cmd_status(self.params['enable_permissive_cmd']) + if session.cmd_status(self.params["enable_nonsecurity_files_cmd"]) != 0: + session.cmd_status(self.params["enable_permissive_cmd"]) finally: session.close() @@ -37,12 +37,10 @@ def create_snapshot(self): bg_test.start() LOG_JOB.info("Sleep some time to wait for scp's preparation done") time.sleep(30) - error_context.context("freeze guest before snapshot", - LOG_JOB.info) + error_context.context("freeze guest before snapshot", LOG_JOB.info) self.guest_agent.fsfreeze() - super(BlockdevSnapshotGuestAgentTest, self).create_snapshot() - error_context.context("thaw guest after snapshot", - LOG_JOB.info) + super().create_snapshot() + error_context.context("thaw guest after snapshot", LOG_JOB.info) self.guest_agent.fsthaw() bg_test.join() @@ -63,7 +61,7 @@ def run(test, params, env): :param env: Dictionary with test environment. """ base_image = params.get("images", "image1").split()[0] - params.setdefault("image_name_%s" % base_image, params["image_name"]) - params.setdefault("image_format_%s" % base_image, params["image_format"]) + params.setdefault(f"image_name_{base_image}", params["image_name"]) + params.setdefault(f"image_format_{base_image}", params["image_format"]) snapshot_guest_agent = BlockdevSnapshotGuestAgentTest(test, params, env) snapshot_guest_agent.run_test() diff --git a/qemu/tests/blockdev_snapshot_install.py b/qemu/tests/blockdev_snapshot_install.py index d8a70abf6d..154bf21a10 100644 --- a/qemu/tests/blockdev_snapshot_install.py +++ b/qemu/tests/blockdev_snapshot_install.py @@ -1,9 +1,8 @@ -import time import random import re +import time -from virttest import utils_test -from virttest import utils_misc +from virttest import utils_misc, utils_test from virttest.tests import unattended_install from provider.blockdev_snapshot_base import BlockDevSnapshotTest @@ -21,6 +20,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def tag_for_install(vm, tag): if vm.serial_console: serial_output = vm.serial_console.get_output() @@ -30,15 +30,16 @@ def tag_for_install(vm, tag): return False base_image = params.get("images", "image1").split()[0] - params.update( - {"image_format_%s" % base_image: params["image_format"]}) + params.update({f"image_format_{base_image}": params["image_format"]}) snapshot_test = BlockDevSnapshotTest(test, params, env) args = (test, params, env) bg = utils_test.BackgroundTest(unattended_install.run, args) bg.start() if bg.is_alive(): tag = params["tag_for_install_start"] - if utils_misc.wait_for(lambda: tag_for_install(snapshot_test.main_vm, tag), 120, 10, 5): + if utils_misc.wait_for( + lambda: tag_for_install(snapshot_test.main_vm, tag), 120, 10, 5 + ): test.log.info("sleep random time before do snapshots") time.sleep(random.randint(120, 600)) snapshot_test.pre_test() diff --git a/qemu/tests/blockdev_snapshot_merge.py b/qemu/tests/blockdev_snapshot_merge.py index fef02a8c56..83d5e89549 100644 --- a/qemu/tests/blockdev_snapshot_merge.py +++ b/qemu/tests/blockdev_snapshot_merge.py @@ -5,7 +5,7 @@ from provider.blockdev_snapshot_base import BlockDevSnapshotTest from provider.virt_storage.storage_admin import sp_admin -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class BlockdevSnapshotMergeTest(BlockDevSnapshotTest): @@ -17,17 +17,17 @@ def pre_test(self): self.configure_data_disk() def prepare_snapshot_file(self): - self.params["image_size_%s" % self.snapshot_tag] = self.base_image.size - self.params["image_name_%s" % self.snapshot_tag] = 'images/' + self.snapshot_tag + self.params[f"image_size_{self.snapshot_tag}"] = self.base_image.size + self.params[f"image_name_{self.snapshot_tag}"] = "images/" + self.snapshot_tag snapshot_format = self.params.get("snapshot_format", "qcow2") - self.params["image_format_%s" % self.snapshot_tag] = snapshot_format + self.params[f"image_format_{self.snapshot_tag}"] = snapshot_format if self.params["image_backend"] == "iscsi_direct": - self.params.update({"enable_iscsi_%s" % self.snapshot_tag: "no"}) - self.params.update({"image_raw_device_%s" % self.snapshot_tag: "no"}) + self.params.update({f"enable_iscsi_{self.snapshot_tag}": "no"}) + self.params.update({f"image_raw_device_{self.snapshot_tag}": "no"}) elif self.params["image_backend"] == "ceph": - self.params.update({"enable_ceph_%s" % self.snapshot_tag: "no"}) + self.params.update({f"enable_ceph_{self.snapshot_tag}": "no"}) elif self.params["image_backend"] == "nbd": - self.params.update({"enable_nbd_%s" % self.snapshot_tag: "no"}) + self.params.update({f"enable_nbd_{self.snapshot_tag}": "no"}) params = self.params.copy() params.setdefault("target_path", data_dir.get_data_dir()) image = sp_admin.volume_define_by_params(self.snapshot_tag, params) @@ -60,13 +60,13 @@ def snapshot_test(self): self.snapshot_images = [] self.snapshot_tags = [self.base_tag] for index in range(1, snapshot_nums + 1): - self.snapshot_tag = "sn%s" % index + self.snapshot_tag = f"sn{index}" if self.snapshot_tag not in self.snapshot_tags: self.snapshot_tags.append(self.snapshot_tag) snapshot_image = self.prepare_snapshot_file() self.snapshot_images.append(snapshot_image) - self.params["overlay"] = "drive_%s" % self.snapshot_tag + self.params["overlay"] = f"drive_{self.snapshot_tag}" self.create_snapshot() self.params["node"] = self.params["overlay"] dd_filename = self.params.get("dd_filename") % index diff --git a/qemu/tests/blockdev_snapshot_multi_disks.py b/qemu/tests/blockdev_snapshot_multi_disks.py index d83d18dfc8..6f6e280bd8 100644 --- a/qemu/tests/blockdev_snapshot_multi_disks.py +++ b/qemu/tests/blockdev_snapshot_multi_disks.py @@ -1,47 +1,47 @@ import logging -from virttest import data_dir -from virttest import utils_disk -from virttest import error_context +from virttest import data_dir, error_context, utils_disk from provider import backup_utils -from provider.virt_storage.storage_admin import sp_admin from provider.blockdev_snapshot_base import BlockDevSnapshotTest +from provider.virt_storage.storage_admin import sp_admin -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class BlockdevSnapshotMultiDisksTest(BlockDevSnapshotTest): - def __init__(self, test, params, env): self.source_disks = params["source_disks"].split() self.target_disks = params["target_disks"].split() self.snapshot_tag_list = params["snapshot_tag"].split() self.base_tag_list = params["base_tag"].split() - super(BlockdevSnapshotMultiDisksTest, self).__init__(test, params, env) + super().__init__(test, params, env) def prepare_clone_vm(self): vm_params = self.main_vm.params.copy() - for snapshot_tag, base_tag in zip(self.snapshot_tag_list, - self.base_tag_list): + for snapshot_tag, base_tag in zip(self.snapshot_tag_list, self.base_tag_list): images = self.main_vm.params["images"].replace( - self.base_tag, self.snapshot_tag) + self.base_tag, self.snapshot_tag + ) vm_params["images"] = images return self.main_vm.clone(params=vm_params) def configure_data_disk(self): - os_type = self.params["os_type"] + self.params["os_type"] for snapshot_tag in self.snapshot_tag_list: session = self.main_vm.wait_for_login() try: - info = backup_utils.get_disk_info_by_param(snapshot_tag, - self.params, - session) + info = backup_utils.get_disk_info_by_param( + snapshot_tag, self.params, session + ) assert info, "Disk not found in guest!" mount_point = utils_disk.configure_empty_linux_disk( - session, info["kname"], info["size"])[0] - self.disks_info[snapshot_tag] = [r"/dev/%s1" % info["kname"], - mount_point] + session, info["kname"], info["size"] + )[0] + self.disks_info[snapshot_tag] = [ + r"/dev/{}1".format(info["kname"]), + mount_point, + ] finally: session.close() @@ -59,33 +59,26 @@ def prepare_snapshot_file(self): @error_context.context_aware def create_snapshot(self): - error_context.context("do snaoshot on multi_disks", - LOG_JOB.info) - assert len( - self.target_disks) == len( - self.source_disks), "No enough target disks define in cfg!" - source_lst = list(map(lambda x: "drive_%s" % x, self.source_disks)) - target_lst = list(map(lambda x: "drive_%s" % x, self.target_disks)) + error_context.context("do snaoshot on multi_disks", LOG_JOB.info) + assert len(self.target_disks) == len( + self.source_disks + ), "No enough target disks define in cfg!" + source_lst = list(map(lambda x: f"drive_{x}", self.source_disks)) + target_lst = list(map(lambda x: f"drive_{x}", self.target_disks)) arguments = {} if len(source_lst) > 1: - error_context.context( - "snapshot %s to %s " % (source_lst, target_lst)) + error_context.context(f"snapshot {source_lst} to {target_lst} ") backup_utils.blockdev_batch_snapshot( - self.main_vm, source_lst, target_lst, **arguments) + self.main_vm, source_lst, target_lst, **arguments + ) else: - error_context.context( - "snapshot %s to %s" % - (source_lst[0], target_lst[0])) - backup_utils.blockdev_snapshot( - self.main_vm, - source_lst[0], - target_lst[0]) + error_context.context(f"snapshot {source_lst[0]} to {target_lst[0]}") + backup_utils.blockdev_snapshot(self.main_vm, source_lst[0], target_lst[0]) def verify_snapshot(self): if self.main_vm.is_alive(): self.main_vm.destroy() - for snapshot_tag, base_tag in zip(self.snapshot_tag_list, - self.base_tag_list): + for snapshot_tag, base_tag in zip(self.snapshot_tag_list, self.base_tag_list): if self.is_blockdev_mode(): snapshot_image = self.get_image_by_tag(snapshot_tag) base_image = self.get_image_by_tag(base_tag) @@ -127,7 +120,10 @@ def run(test, params, env): """ base_image = params.get("images", "image1").split()[0] params.update( - {"image_name_%s" % base_image: params["image_name"], - "image_format_%s" % base_image: params["image_format"]}) + { + f"image_name_{base_image}": params["image_name"], + f"image_format_{base_image}": params["image_format"], + } + ) snapshot_multi_disks = BlockdevSnapshotMultiDisksTest(test, params, env) snapshot_multi_disks.run_test() diff --git a/qemu/tests/blockdev_snapshot_readonly.py b/qemu/tests/blockdev_snapshot_readonly.py index 66ff61fd3f..a38122e35c 100644 --- a/qemu/tests/blockdev_snapshot_readonly.py +++ b/qemu/tests/blockdev_snapshot_readonly.py @@ -4,27 +4,25 @@ from virttest import utils_disk from provider import backup_utils - from provider.blockdev_snapshot_base import BlockDevSnapshotTest class BlockdevSnapshotReadonly(BlockDevSnapshotTest): def configure_data_disk(self): - os_type = self.params["os_type"] + self.params["os_type"] disk_params = self.params.object_params(self.base_tag) disk_size = disk_params["image_size"] session = self.main_vm.wait_for_login() disk_id = self.get_linux_disk_path(session, disk_size) assert disk_id, "Disk not found in guest!" try: - mount_point = utils_disk.configure_empty_linux_disk( - session, disk_id, disk_size)[0] + utils_disk.configure_empty_linux_disk(session, disk_id, disk_size)[0] except ShellCmdError as e: - disk_tag = r"/dev/%s" % disk_id - error_msg = self.params['error_msg'] % disk_tag + disk_tag = rf"/dev/{disk_id}" + error_msg = self.params["error_msg"] % disk_tag if not re.search(error_msg, str(e)): - self.test.fail("Unexpected disk format error: %s" % str(e)) - self.disks_info[self.base_tag] = [disk_tag, '/mnt'] + self.test.fail(f"Unexpected disk format error: {str(e)}") + self.disks_info[self.base_tag] = [disk_tag, "/mnt"] else: self.test.fail("Read-only disk is formated") finally: diff --git a/qemu/tests/blockdev_snapshot_reboot.py b/qemu/tests/blockdev_snapshot_reboot.py index 65c5e4fa28..5dd398555b 100644 --- a/qemu/tests/blockdev_snapshot_reboot.py +++ b/qemu/tests/blockdev_snapshot_reboot.py @@ -1,26 +1,23 @@ import logging -import time import random +import time -from virttest import utils_test -from virttest import error_context +from virttest import error_context, utils_test from provider.blockdev_snapshot_base import BlockDevSnapshotTest -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class BlockdevSnapshotRebootTest(BlockDevSnapshotTest): - @error_context.context_aware def create_snapshot(self): - error_context.context("do snaoshot during guest rebooting", - LOG_JOB.info) + error_context.context("do snaoshot during guest rebooting", LOG_JOB.info) bg_test = utils_test.BackgroundTest(self.vm_reset, "") bg_test.start() LOG_JOB.info("sleep random time to perform before snapshot") time.sleep(random.randint(0, 10)) - super(BlockdevSnapshotRebootTest, self).create_snapshot() + super().create_snapshot() if bg_test.is_alive(): bg_test.join() @@ -43,7 +40,7 @@ def run(test, params, env): :param env: Dictionary with test environment. """ base_image = params.get("images", "image1").split()[0] - params.setdefault("image_name_%s" % base_image, params["image_name"]) - params.setdefault("image_format_%s" % base_image, params["image_format"]) + params.setdefault(f"image_name_{base_image}", params["image_name"]) + params.setdefault(f"image_format_{base_image}", params["image_format"]) snapshot_reboot = BlockdevSnapshotRebootTest(test, params, env) snapshot_reboot.run_test() diff --git a/qemu/tests/blockdev_snapshot_stop_cont.py b/qemu/tests/blockdev_snapshot_stop_cont.py index 096bb4e63e..af09f11e41 100644 --- a/qemu/tests/blockdev_snapshot_stop_cont.py +++ b/qemu/tests/blockdev_snapshot_stop_cont.py @@ -4,17 +4,17 @@ from provider.blockdev_snapshot_base import BlockDevSnapshotTest -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class BlockdevSnapshotStopContTest(BlockDevSnapshotTest): - @error_context.context_aware def create_snapshot(self): - error_context.context("do snaoshot during running guest stop_cont", - LOG_JOB.info) + error_context.context( + "do snaoshot during running guest stop_cont", LOG_JOB.info + ) self.main_vm.pause() - super(BlockdevSnapshotStopContTest, self).create_snapshot() + super().create_snapshot() self.main_vm.resume() @@ -33,7 +33,7 @@ def run(test, params, env): :param env: Dictionary with test environment. """ base_image = params.get("images", "image1").split()[0] - params.setdefault("image_name_%s" % base_image, params["image_name"]) - params.setdefault("image_format_%s" % base_image, params["image_format"]) + params.setdefault(f"image_name_{base_image}", params["image_name"]) + params.setdefault(f"image_format_{base_image}", params["image_format"]) snapshot_stop_cont = BlockdevSnapshotStopContTest(test, params, env) snapshot_stop_cont.run_test() diff --git a/qemu/tests/blockdev_snapshot_stress.py b/qemu/tests/blockdev_snapshot_stress.py index f19b7f620a..c15a16220c 100644 --- a/qemu/tests/blockdev_snapshot_stress.py +++ b/qemu/tests/blockdev_snapshot_stress.py @@ -1,22 +1,21 @@ import logging -from virttest import utils_test -from virttest import error_context +from virttest import error_context, utils_test from provider.blockdev_snapshot_base import BlockDevSnapshotTest -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class BlockdevSnapshotStressTest(BlockDevSnapshotTest): - @error_context.context_aware def create_snapshot(self): - error_context.context("do snaoshot during running stress in guest", - LOG_JOB.info) + error_context.context( + "do snaoshot during running stress in guest", LOG_JOB.info + ) stress_test = utils_test.VMStress(self.main_vm, "stress", self.params) stress_test.load_stress_tool() - super(BlockdevSnapshotStressTest, self).create_snapshot() + super().create_snapshot() def run(test, params, env): @@ -34,7 +33,7 @@ def run(test, params, env): :param env: Dictionary with test environment. """ base_image = params.get("images", "image1").split()[0] - params.setdefault("image_name_%s" % base_image, params["image_name"]) - params.setdefault("image_format_%s" % base_image, params["image_format"]) + params.setdefault(f"image_name_{base_image}", params["image_name"]) + params.setdefault(f"image_format_{base_image}", params["image_format"]) snapshot_stress = BlockdevSnapshotStressTest(test, params, env) snapshot_stress.run_test() diff --git a/qemu/tests/blockdev_stream_backing_file.py b/qemu/tests/blockdev_stream_backing_file.py index b67ddd172d..7201f39384 100644 --- a/qemu/tests/blockdev_stream_backing_file.py +++ b/qemu/tests/blockdev_stream_backing_file.py @@ -7,28 +7,30 @@ class BlockdevStreamBackingFileTest(BlockDevStreamTest): """Do block-stream with backing-file set""" def __init__(self, test, params, env): - super(BlockdevStreamBackingFileTest, self).__init__(test, params, env) + super().__init__(test, params, env) image = self.base_image.image_filename - self._stream_options['base'] = image - if self.params.get_boolean('with_abspath'): - self._stream_options['backing-file'] = image + self._stream_options["base"] = image + if self.params.get_boolean("with_abspath"): + self._stream_options["backing-file"] = image else: - self._stream_options['backing-file'] = os.path.relpath(image) + self._stream_options["backing-file"] = os.path.relpath(image) def _compare_backing(self, block): - bk = block.get('image', {}) - if bk.get('backing-filename') != self._stream_options['backing-file']: - self.test.fail('backing filename changed: %s vs %s' - % (bk.get('backing-filename'), - self._stream_options['backing-file'])) + bk = block.get("image", {}) + if bk.get("backing-filename") != self._stream_options["backing-file"]: + self.test.fail( + "backing filename changed: {} vs {}".format( + bk.get("backing-filename"), self._stream_options["backing-file"] + ) + ) def check_backing_file(self): for item in self.main_vm.monitor.query("block"): if self.base_tag in item["qdev"]: - self._compare_backing(item.get('inserted', {})) + self._compare_backing(item.get("inserted", {})) break else: - self.test.fail('Failed to get device: %s' % self.base_tag) + self.test.fail(f"Failed to get device: {self.base_tag}") def do_test(self): self.create_snapshot() diff --git a/qemu/tests/blockdev_stream_base_itself.py b/qemu/tests/blockdev_stream_base_itself.py index e95220578d..9f597ec5d3 100644 --- a/qemu/tests/blockdev_stream_base_itself.py +++ b/qemu/tests/blockdev_stream_base_itself.py @@ -7,14 +7,14 @@ from provider.blockdev_stream_base import BlockDevStreamTest from provider.virt_storage.storage_admin import sp_admin -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class BlockdevStreamBaseitself(BlockDevStreamTest): """Do block-stream based on itself""" def __init__(self, test, params, env): - super(BlockdevStreamBaseitself, self).__init__(test, params, env) + super().__init__(test, params, env) self._snapshot_images = self.params.objects("snapshot_images") self._trash = [] @@ -32,21 +32,25 @@ def prepare_snapshot_file(self): def snapshot_test(self): """create one snapshot, create one new file""" - self.generate_tempfile(self.disks_info[self.base_tag][1], - filename="base", - size=self.params["tempfile_size"]) + self.generate_tempfile( + self.disks_info[self.base_tag][1], + filename="base", + size=self.params["tempfile_size"], + ) self.snapshot_chain = [self.base_tag] + self._snapshot_images for idx in range(1, len(self.snapshot_chain)): backup_utils.blockdev_snapshot( self.main_vm, - "drive_%s" % self.snapshot_chain[idx-1], - "drive_%s" % self.snapshot_chain[idx] + f"drive_{self.snapshot_chain[idx - 1]}", + f"drive_{self.snapshot_chain[idx]}", ) - self.generate_tempfile(self.disks_info[self.base_tag][1], - filename=self.snapshot_chain[idx], - size=self.params["tempfile_size"]) + self.generate_tempfile( + self.disks_info[self.base_tag][1], + filename=self.snapshot_chain[idx], + size=self.params["tempfile_size"], + ) def _remove_images(self): for img in self._trash: diff --git a/qemu/tests/blockdev_stream_cor_base.py b/qemu/tests/blockdev_stream_cor_base.py index dd73d69bc0..908a3162f3 100644 --- a/qemu/tests/blockdev_stream_cor_base.py +++ b/qemu/tests/blockdev_stream_cor_base.py @@ -1,20 +1,19 @@ import json import time -from virttest import data_dir -from virttest import qemu_storage +from virttest import data_dir, qemu_storage from virttest.qemu_capabilities import Flags +from provider import backup_utils from provider.blockdev_stream_base import BlockDevStreamTest from provider.virt_storage.storage_admin import sp_admin -from provider import backup_utils class BlockdevStreamCORBase(BlockDevStreamTest): - """Do block-stream with copy-on-read filter as base """ + """Do block-stream with copy-on-read filter as base""" def __init__(self, test, params, env): - super(BlockdevStreamCORBase, self).__init__(test, params, env) + super().__init__(test, params, env) self.snapshot_chain = params["snapshot_chain"].split() def prepare_snapshot_file(self): @@ -27,8 +26,12 @@ def prepare_snapshot_file(self): self.trash.append(image) def _is_same_file(self, file_params, file_opts): - mapping = {"gluster": "path", "iscsi": "lun", - "nbd": "server.port", "rbd": "image"} + mapping = { + "gluster": "path", + "iscsi": "lun", + "nbd": "server.port", + "rbd": "image", + } option = mapping.get(file_params["driver"], "filename") return file_params[option] == file_opts[option] @@ -45,14 +48,16 @@ def _is_backing_exit(backing, depth): filename = image.image_filename is_cor = backing["driver"] == "copy-on-read" backing_mask = self.main_vm.check_capability( - Flags.BLOCKJOB_BACKING_MASK_PROTOCOL) + Flags.BLOCKJOB_BACKING_MASK_PROTOCOL + ) raw_format = image.image_format == "raw" raw_elimi = backing_mask and raw_format - opts = backing["file"]["file"] if ( - is_cor and not raw_elimi) else backing["file"] + opts = ( + backing["file"]["file"] if (is_cor and not raw_elimi) else backing["file"] + ) file_opts = qemu_storage.filename_to_file_opts(filename) if not self._is_same_file(opts, file_opts): - self.test.fail("file %s not in backing" % filename) + self.test.fail(f"file {filename} not in backing") def check_backing_chain(self): out = self.main_vm.monitor.query("block") @@ -60,8 +65,9 @@ def check_backing_chain(self): if self.base_tag in item["qdev"]: backing = item["inserted"].get("backing_file") if not backing: - self.test.fail("Failed to get backing_file for qdev %s" - % self.base_tag) + self.test.fail( + f"Failed to get backing_file for qdev {self.base_tag}" + ) backing_dict = json.loads(backing[5:]) backing_depth = len(self.backing_chain) for image_tag in self.backing_chain: @@ -70,7 +76,7 @@ def check_backing_chain(self): backing_depth -= 1 break else: - self.test.fail("Failed to find %s" % self.base_tag) + self.test.fail(f"Failed to find {self.base_tag}") def create_snapshot(self): self.backing_chain = [self.base_tag] @@ -78,20 +84,21 @@ def create_snapshot(self): cmd = "blockdev-snapshot" arguments = self.params.copy_from_keys(options) for snapshot_tag in self.snapshot_chain: - arguments["overlay"] = "drive_%s" % snapshot_tag + arguments["overlay"] = f"drive_{snapshot_tag}" self.main_vm.monitor.cmd(cmd, dict(arguments)) - arguments["node"] = "drive_%s" % snapshot_tag + arguments["node"] = f"drive_{snapshot_tag}" if snapshot_tag != self.snapshot_chain[-1]: self.backing_chain.append(snapshot_tag) - self._top_device = "drive_%s" % self.snapshot_chain[-1] + self._top_device = f"drive_{self.snapshot_chain[-1]}" self.check_backing_chain() def blockdev_stream(self): - backup_utils.blockdev_stream(self.main_vm, self._top_device, - **self._stream_options) + backup_utils.blockdev_stream( + self.main_vm, self._top_device, **self._stream_options + ) time.sleep(0.5) index = self.backing_chain.index(self.params["base_tag"]) - del self.backing_chain[index+1:] + del self.backing_chain[index + 1 :] self.check_backing_chain() def do_test(self): diff --git a/qemu/tests/blockdev_stream_dirty_bitmap.py b/qemu/tests/blockdev_stream_dirty_bitmap.py index 4154185568..967bfc2293 100644 --- a/qemu/tests/blockdev_stream_dirty_bitmap.py +++ b/qemu/tests/blockdev_stream_dirty_bitmap.py @@ -8,17 +8,16 @@ class BlkStreamWithDirtybitmap(BlockDevStreamTest): """Do block-stream with active layer attached a bitmap""" def check_bitmap_info(self): - bitmap = block_bitmap.get_bitmap_by_name(self.main_vm, - self._top_device, - self.bitmap_name) + bitmap = block_bitmap.get_bitmap_by_name( + self.main_vm, self._top_device, self.bitmap_name + ) if bitmap: count = bitmap["count"] return count def add_bitmap(self): - self.bitmap_name = "bitmap_%s" % self.snapshot_tag - kargs = {'bitmap_name': self.bitmap_name, - 'target_device': self._top_device} + self.bitmap_name = f"bitmap_{self.snapshot_tag}" + kargs = {"bitmap_name": self.bitmap_name, "target_device": self._top_device} block_bitmap.block_dirty_bitmap_add(self.main_vm, kargs) def umount_data_disk(self): @@ -46,8 +45,10 @@ def do_test(self): self.blockdev_stream() bcount_astream = self.check_bitmap_info() if bcount_bstream != bcount_astream: - self.test.fail("bitmap count changed after stream with actual:%d " - "expected:%d" % (bcount_astream, bcount_bstream)) + self.test.fail( + "bitmap count changed after stream with actual:%d " + "expected:%d" % (bcount_astream, bcount_bstream) + ) def run(test, params, env): diff --git a/qemu/tests/blockdev_stream_filter_nodename.py b/qemu/tests/blockdev_stream_filter_nodename.py index 89d23c2fdb..a57fdab148 100644 --- a/qemu/tests/blockdev_stream_filter_nodename.py +++ b/qemu/tests/blockdev_stream_filter_nodename.py @@ -1,5 +1,4 @@ -from provider import blockdev_stream_nowait -from provider import job_utils +from provider import blockdev_stream_nowait, job_utils class BlkdevStreamFilterNode(blockdev_stream_nowait.BlockdevStreamNowaitTest): @@ -8,7 +7,7 @@ class BlkdevStreamFilterNode(blockdev_stream_nowait.BlockdevStreamNowaitTest): """ def _init_stream_options(self): - super(BlkdevStreamFilterNode, self)._init_stream_options() + super()._init_stream_options() filter_node_name = self.params["filter_node_name"] if filter_node_name: self._stream_options["filter-node-name"] = filter_node_name @@ -21,28 +20,38 @@ def check_filter_nodes_name(self, during_stream=True): if during_stream: for block in blocks_info: block_node_name = block["inserted"].get("node-name") - if (self.params.get("source_images") in block["qdev"] and - block_node_name == self.params['filter_node_name']): + if ( + self.params.get("source_images") in block["qdev"] + and block_node_name == self.params["filter_node_name"] + ): break else: - self.test.fail("Filter node name '%s' is not set as expected" - "during stream" % self.params['filter_node_name']) + self.test.fail( + "Filter node name '{}' is not set as expected" + "during stream".format(self.params["filter_node_name"]) + ) else: for block in blocks_info: block_node_name = block["inserted"].get("node-name") - if (self.params.get("source_images") in block["qdev"] and - block_node_name != self.params['filter_node_name']): + if ( + self.params.get("source_images") in block["qdev"] + and block_node_name != self.params["filter_node_name"] + ): break else: - self.test.fail("Filter node name '%s' set after stream" - % self.params['filter_node_name']) + self.test.fail( + "Filter node name '{}' set after stream".format( + self.params["filter_node_name"] + ) + ) def do_test(self): self.snapshot_test() self.blockdev_stream() job_utils.check_block_jobs_started( - self.main_vm, [self._job], - self.params.get_numeric('job_started_timeout', 60) + self.main_vm, + [self._job], + self.params.get_numeric("job_started_timeout", 60), ) self.check_filter_nodes_name() self.wait_stream_job_completed() diff --git a/qemu/tests/blockdev_stream_forbidden_actions.py b/qemu/tests/blockdev_stream_forbidden_actions.py index 2d016192dd..68a711a575 100644 --- a/qemu/tests/blockdev_stream_forbidden_actions.py +++ b/qemu/tests/blockdev_stream_forbidden_actions.py @@ -9,14 +9,13 @@ class BlockdevStreamForbiddenActionsTest(BlockdevStreamNowaitTest): """Do qmp commands during block-stream""" def __init__(self, test, params, env): - super(BlockdevStreamForbiddenActionsTest, self).__init__(test, - params, - env) + super().__init__(test, params, env) self._snapshot_images = self.params.objects("snapshot_images") self._trash = [] def prepare_snapshot_file(self): """hotplug all snapshot images""" + def _disk_define_by_params(tag): params = self.params.copy() params.setdefault("target_path", data_dir.get_data_dir()) @@ -31,51 +30,52 @@ def post_test(self): list(map(sp_admin.remove_volume, self._trash)) def commit(self): - self.main_vm.monitor.cmd( - "block-commit", {'device': self._top_device} - ) + self.main_vm.monitor.cmd("block-commit", {"device": self._top_device}) def resize(self): self.main_vm.monitor.cmd( - "block_resize", - {'node-name': self._top_device, 'size': 1024*1024*1024} + "block_resize", {"node-name": self._top_device, "size": 1024 * 1024 * 1024} ) def mirror(self): self.main_vm.monitor.cmd( "blockdev-mirror", - {'device': self._top_device, - 'target': self.params['overlay_node'], 'sync': 'full'} + { + "device": self._top_device, + "target": self.params["overlay_node"], + "sync": "full", + }, ) def snapshot(self): self.main_vm.monitor.cmd( "blockdev-snapshot", - {'node': self._top_device, 'overlay': self.params['overlay_node']} + {"node": self._top_device, "overlay": self.params["overlay_node"]}, ) def stream(self): - self.main_vm.monitor.cmd("block-stream", {'device': self._top_device}) + self.main_vm.monitor.cmd("block-stream", {"device": self._top_device}) def do_forbidden_actions(self): """Run the qmp commands one by one, all should fail""" - for action in self.params.objects('forbidden_actions'): - error_msg = self.params['error_msg_%s' % action] + for action in self.params.objects("forbidden_actions"): + error_msg = self.params[f"error_msg_{action}"] f = getattr(self, action) try: f() except QMPCmdError as e: if error_msg not in str(e): - self.test.fail('Unexpected error: %s' % str(e)) + self.test.fail(f"Unexpected error: {str(e)}") else: - self.test.fail('Unexpected qmp command success') + self.test.fail("Unexpected qmp command success") def do_test(self): self.create_snapshot() self.blockdev_stream() self.do_forbidden_actions() - self.main_vm.monitor.cmd("block-job-set-speed", - {'device': self._job, 'speed': 0}) + self.main_vm.monitor.cmd( + "block-job-set-speed", {"device": self._job, "speed": 0} + ) self.wait_stream_job_completed() diff --git a/qemu/tests/blockdev_stream_general.py b/qemu/tests/blockdev_stream_general.py index dddc6e942e..731de13385 100644 --- a/qemu/tests/blockdev_stream_general.py +++ b/qemu/tests/blockdev_stream_general.py @@ -1,5 +1,4 @@ from provider import job_utils - from provider.blockdev_stream_nowait import BlockdevStreamNowaitTest @@ -10,35 +9,39 @@ def do_test(self): self.snapshot_test() self.blockdev_stream() job_utils.check_block_jobs_started( - self.main_vm, [self._job], - self.params.get_numeric('job_started_timeout', 30) + self.main_vm, + [self._job], + self.params.get_numeric("job_started_timeout", 30), ) - self.main_vm.monitor.cmd("job-pause", {'id': self._job}) + self.main_vm.monitor.cmd("job-pause", {"id": self._job}) job_utils.wait_until_job_status_match( - self.main_vm, 'paused', self._job, - self.params.get_numeric('job_paused_interval', 30) + self.main_vm, + "paused", + self._job, + self.params.get_numeric("job_paused_interval", 30), ) self.main_vm.monitor.cmd( "block-job-set-speed", - {'device': self._job, - 'speed': self.params.get_numeric('resume_speed')} + {"device": self._job, "speed": self.params.get_numeric("resume_speed")}, ) - self.main_vm.monitor.cmd("job-resume", {'id': self._job}) + self.main_vm.monitor.cmd("job-resume", {"id": self._job}) job_utils.wait_until_job_status_match( - self.main_vm, 'running', self._job, - self.params.get_numeric('job_running_timeout', 300) + self.main_vm, + "running", + self._job, + self.params.get_numeric("job_running_timeout", 300), ) - self.main_vm.monitor.cmd("job-cancel", {'id': self._job}) + self.main_vm.monitor.cmd("job-cancel", {"id": self._job}) event = job_utils.get_event_by_condition( - self.main_vm, 'BLOCK_JOB_CANCELLED', - self.params.get_numeric('job_cancelled_timeout', 30), - device=self._job + self.main_vm, + "BLOCK_JOB_CANCELLED", + self.params.get_numeric("job_cancelled_timeout", 30), + device=self._job, ) if not event: - self.test.fail('Failed to get BLOCK_JOB_CANCELLED event for %s' - % self._job) + self.test.fail(f"Failed to get BLOCK_JOB_CANCELLED event for {self._job}") job_utils.block_job_dismiss(self.main_vm, self._job) - self._stream_options['speed'] = 0 + self._stream_options["speed"] = 0 self.blockdev_stream() self.wait_stream_job_completed() self.check_backing_file() diff --git a/qemu/tests/blockdev_stream_hotunplug.py b/qemu/tests/blockdev_stream_hotunplug.py index 10af103aec..e12e1932ae 100644 --- a/qemu/tests/blockdev_stream_hotunplug.py +++ b/qemu/tests/blockdev_stream_hotunplug.py @@ -16,23 +16,24 @@ def hotunplug_frontend_device(self): device_del the frontend device during stream, the device CAN be removed without any issue """ - self.main_vm.monitor.cmd('device_del', {'id': self.base_tag}) + self.main_vm.monitor.cmd("device_del", {"id": self.base_tag}) def wait_till_frontend_device_deleted(self): """Wait till devices removed from output of query-block""" + def _is_device_deleted(device): for item in self.main_vm.monitor.query("block"): if device in item["qdev"]: return False return True - tmo = self.params.get_numeric('device_del_timeout', 60) + tmo = self.params.get_numeric("device_del_timeout", 60) for i in range(tmo): if _is_device_deleted(self.base_tag): break time.sleep(1) else: - self.test.fail('Failed to hotunplug the frontend device') + self.test.fail("Failed to hotunplug the frontend device") def hotunplug_format_node(self): """ @@ -40,30 +41,32 @@ def hotunplug_format_node(self): the nodes CANNOT be removed for they are busy """ try: - self.main_vm.monitor.cmd('blockdev-del', - {'node-name': self.params['node']}) + self.main_vm.monitor.cmd("blockdev-del", {"node-name": self.params["node"]}) except QMPCmdError as e: - if self.params['block_node_busy_error'] not in str(e): - self.test.fail('Unexpected error: %s' % str(e)) + if self.params["block_node_busy_error"] not in str(e): + self.test.fail(f"Unexpected error: {str(e)}") else: - self.test.fail('blockdev-del succeeded unexpectedly') + self.test.fail("blockdev-del succeeded unexpectedly") def do_test(self): self.snapshot_test() self.blockdev_stream() job_utils.check_block_jobs_started( - self.main_vm, [self._job], - self.params.get_numeric('job_started_timeout', 60) + self.main_vm, + [self._job], + self.params.get_numeric("job_started_timeout", 60), ) self.hotunplug_frontend_device() self.wait_till_frontend_device_deleted() self.hotunplug_format_node() job_utils.check_block_jobs_running( - self.main_vm, [self._job], - self.params.get_numeric('job_running_timeout', 300) + self.main_vm, + [self._job], + self.params.get_numeric("job_running_timeout", 300), ) self.main_vm.monitor.cmd( - "block-job-set-speed", {'device': self._job, 'speed': 0}) + "block-job-set-speed", {"device": self._job, "speed": 0} + ) self.wait_stream_job_completed() self.check_backing_file() self.clone_vm.create() diff --git a/qemu/tests/blockdev_stream_install.py b/qemu/tests/blockdev_stream_install.py index d95f3e4ae6..e306cd94b5 100644 --- a/qemu/tests/blockdev_stream_install.py +++ b/qemu/tests/blockdev_stream_install.py @@ -3,13 +3,12 @@ import re import time -from virttest import utils_test -from virttest import utils_misc +from virttest import utils_misc, utils_test from virttest.tests import unattended_install from provider.blockdev_stream_nowait import BlockdevStreamNowaitTest -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class BlockdevStreamVMInstallTest(BlockdevStreamNowaitTest): @@ -18,9 +17,12 @@ class BlockdevStreamVMInstallTest(BlockdevStreamNowaitTest): """ def _is_install_started(self, start_msg): - out = self.main_vm.serial_console.get_output() \ - if self.main_vm.serial_console else None - out = '' if out is None else out + out = ( + self.main_vm.serial_console.get_output() + if self.main_vm.serial_console + else None + ) + out = "" if out is None else out return bool(re.search(start_msg, out, re.M)) def _install_vm_in_background(self): @@ -32,9 +34,11 @@ def _install_vm_in_background(self): LOG_JOB.info("Wait till '%s'", self.params["tag_for_install_start"]) if utils_misc.wait_for( - lambda: self._is_install_started( - self.params["tag_for_install_start"]), - int(self.params.get("timeout_for_install_start", 360)), 10, 5): + lambda: self._is_install_started(self.params["tag_for_install_start"]), + int(self.params.get("timeout_for_install_start", 360)), + 10, + 5, + ): LOG_JOB.info("Sleep some time before block-stream") time.sleep(random.randint(10, 120)) else: @@ -42,8 +46,7 @@ def _install_vm_in_background(self): def _wait_installation_done(self): # Keep the same timeout with the default used in VT - self._bg.join( - timeout=int(self.params.get("install_timeout", 4800))) + self._bg.join(timeout=int(self.params.get("install_timeout", 4800))) if self._bg.is_alive(): self.test.fail("VM installation timed out") @@ -64,8 +67,13 @@ def _clone_vm_with_snapshot_image(self): cdrom = self.main_vm.params.objects("cdroms")[0] self.clone_vm.params["cdroms"] = cdrom self.clone_vm.params["boot_once"] = "c" - for opt in ["cdrom_%s" % cdrom, "boot_path", - "kernel_params", "kernel", "initrd"]: + for opt in [ + f"cdrom_{cdrom}", + "boot_path", + "kernel_params", + "kernel", + "initrd", + ]: self.clone_vm.params[opt] = "" self.clone_vm.create() diff --git a/qemu/tests/blockdev_stream_multiple_blocks.py b/qemu/tests/blockdev_stream_multiple_blocks.py index c8510b2713..77046e31fe 100644 --- a/qemu/tests/blockdev_stream_multiple_blocks.py +++ b/qemu/tests/blockdev_stream_multiple_blocks.py @@ -1,21 +1,17 @@ import logging from provider import backup_utils - from provider.blockdev_base import BlockdevBaseTest from provider.blockdev_stream_parallel import BlockdevStreamParallelTest -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") -class BlockdevStreamMultipleBlocksTest(BlockdevStreamParallelTest, - BlockdevBaseTest): +class BlockdevStreamMultipleBlocksTest(BlockdevStreamParallelTest, BlockdevBaseTest): """Do block-stream for multiple disks in parallel""" def __init__(self, test, params, env): - super(BlockdevStreamMultipleBlocksTest, self).__init__(test, - params, - env) + super().__init__(test, params, env) self._source_images = self.params.objects("source_images") self._snapshot_images = self.params.objects("snapshot_images") self.disks_info = {} # tag, [dev, mount_point] @@ -30,7 +26,7 @@ def generate_inc_files(self): def do_block_stream_on_another_image(self): """block-stream on another image""" arguments = {} - device = "drive_%s" % self.params.objects("snapshot_images")[-1] + device = "drive_{}".format(self.params.objects("snapshot_images")[-1]) backup_utils.blockdev_stream(self.main_vm, device, **arguments) def pre_test(self): @@ -51,8 +47,8 @@ def create_snapshots(self): for idx, source in enumerate(self._source_images): backup_utils.blockdev_snapshot( self.main_vm, - "drive_%s" % source, - "drive_%s" % self._snapshot_images[idx] + f"drive_{source}", + f"drive_{self._snapshot_images[idx]}", ) def post_test(self): diff --git a/qemu/tests/blockdev_stream_no_backing.py b/qemu/tests/blockdev_stream_no_backing.py index e3a10e78f9..0eeef31f1a 100644 --- a/qemu/tests/blockdev_stream_no_backing.py +++ b/qemu/tests/blockdev_stream_no_backing.py @@ -1,5 +1,4 @@ from provider import job_utils - from provider.blockdev_stream_nowait import BlockdevStreamNowaitTest @@ -15,18 +14,18 @@ def post_test(self): pass def verify_job_status(self): - tmo = self.params.get_numeric('job_completed_timeout', 30) + tmo = self.params.get_numeric("job_completed_timeout", 30) # check offset/len in BLOCK_JOB_COMPLETED event - cond = {'device': self._job} + cond = {"device": self._job} event = job_utils.get_event_by_condition( - self.main_vm, job_utils.BLOCK_JOB_COMPLETED_EVENT, tmo, **cond) + self.main_vm, job_utils.BLOCK_JOB_COMPLETED_EVENT, tmo, **cond + ) if event: - if event['data'].get('offset') != 0 or event['data'].get('len') != 0: - self.test.fail('offset and len should always be 0') + if event["data"].get("offset") != 0 or event["data"].get("len") != 0: + self.test.fail("offset and len should always be 0") else: - self.test.fail('Failed to get BLOCK_JOB_COMPLETED event for %s' - % self._job) + self.test.fail(f"Failed to get BLOCK_JOB_COMPLETED event for {self._job}") def do_test(self): self.blockdev_stream() diff --git a/qemu/tests/blockdev_stream_no_space.py b/qemu/tests/blockdev_stream_no_space.py index 06cfd2b7a6..0fba0c8824 100644 --- a/qemu/tests/blockdev_stream_no_space.py +++ b/qemu/tests/blockdev_stream_no_space.py @@ -1,5 +1,4 @@ from provider import job_utils - from provider.blockdev_stream_nowait import BlockdevStreamNowaitTest @@ -13,19 +12,20 @@ def post_test(self): pass def check_no_space_error(self): - tmo = self.params.get_numeric('block_io_error_timeout', 60) + tmo = self.params.get_numeric("block_io_error_timeout", 60) # check 'error' message in BLOCK_JOB_COMPLETED event - cond = {'device': self._job} + cond = {"device": self._job} event = job_utils.get_event_by_condition( - self.main_vm, job_utils.BLOCK_JOB_COMPLETED_EVENT, tmo, **cond) + self.main_vm, job_utils.BLOCK_JOB_COMPLETED_EVENT, tmo, **cond + ) if event: - if event['data'].get('error') != self.params['error_msg']: - self.test.fail('Unexpected error: %s' - % event['data'].get('error')) + if event["data"].get("error") != self.params["error_msg"]: + self.test.fail( + "Unexpected error: {}".format(event["data"].get("error")) + ) else: - self.test.fail('Failed to get BLOCK_JOB_COMPLETED event for %s' - % self._job) + self.test.fail(f"Failed to get BLOCK_JOB_COMPLETED event for {self._job}") def do_test(self): self.create_snapshot() diff --git a/qemu/tests/blockdev_stream_none_existed_overlay.py b/qemu/tests/blockdev_stream_none_existed_overlay.py index 0ae69f845e..515991b327 100644 --- a/qemu/tests/blockdev_stream_none_existed_overlay.py +++ b/qemu/tests/blockdev_stream_none_existed_overlay.py @@ -19,15 +19,14 @@ def post_test(self): def do_test(self): try: self.main_vm.monitor.cmd( - "block-stream", - {'device': self.params['none_existed_overlay_node']} + "block-stream", {"device": self.params["none_existed_overlay_node"]} ) except QMPCmdError as e: error_msg = self.params.get("error_msg") if not re.search(error_msg, str(e)): - self.test.fail('Unexpected error: %s' % str(e)) + self.test.fail(f"Unexpected error: {str(e)}") else: - self.test.fail('block-stream succeeded unexpectedly') + self.test.fail("block-stream succeeded unexpectedly") def run(test, params, env): diff --git a/qemu/tests/blockdev_stream_on_error_ignore.py b/qemu/tests/blockdev_stream_on_error_ignore.py index bc0ca0566e..629139b409 100644 --- a/qemu/tests/blockdev_stream_on_error_ignore.py +++ b/qemu/tests/blockdev_stream_on_error_ignore.py @@ -1,7 +1,6 @@ import time from avocado.utils import process - from virttest.data_dir import get_data_dir from virttest.lvm import EmulatedLVM @@ -13,36 +12,37 @@ class BlockdevStreamOnErrorIgnoreTest(BlockdevStreamNowaitTest): """Do block-stream with on-error:ignore""" def __init__(self, test, params, env): - super(BlockdevStreamOnErrorIgnoreTest, self).__init__(test, - params, - env) + super().__init__(test, params, env) # TODO: Workaound lvm setup till VT enhances emulated image creation - self.lv_size = params['lv_size'] - params['lv_size'] = params['emulated_image_size'] + self.lv_size = params["lv_size"] + params["lv_size"] = params["emulated_image_size"] self._lvm = EmulatedLVM(params, get_data_dir()) def _create_snapshot_dir(self): self._lvm.setup() self._lvm.lvs[-1].resize(self.lv_size) - process.system(self.params['storage_prepare_cmd'], - ignore_status=False, shell=True) + process.system( + self.params["storage_prepare_cmd"], ignore_status=False, shell=True + ) def _clean_snapshot_dir(self): - process.system(self.params['storage_clean_cmd'], - ignore_status=False, shell=True) + process.system( + self.params["storage_clean_cmd"], ignore_status=False, shell=True + ) self._lvm.cleanup() - def generate_tempfile(self, root_dir, filename, size='10M', timeout=360): - super(BlockdevStreamOnErrorIgnoreTest, self).generate_tempfile( - root_dir, filename, self.params['tempfile_size'], timeout) + def generate_tempfile(self, root_dir, filename, size="10M", timeout=360): + super().generate_tempfile( + root_dir, filename, self.params["tempfile_size"], timeout + ) def pre_test(self): try: self._create_snapshot_dir() except Exception: self._clean_snapshot_dir() - self.test.error('Failed to setup lvm env') - super(BlockdevStreamOnErrorIgnoreTest, self).pre_test() + self.test.error("Failed to setup lvm env") + super().pre_test() def post_test(self): if self.main_vm.is_alive(): @@ -54,20 +54,23 @@ def check_job_error_event(self): """ Check if BLOCK_JOB_ERROR can be received, then clear all """ - tmo = self.params.get_numeric('job_error_timeout', 120) + tmo = self.params.get_numeric("job_error_timeout", 120) event = job_utils.get_event_by_condition( - self.main_vm, job_utils.BLOCK_JOB_ERROR_EVENT, - tmo, device=self._job, action='ignore' + self.main_vm, + job_utils.BLOCK_JOB_ERROR_EVENT, + tmo, + device=self._job, + action="ignore", ) if not event: - self.test.fail('Failed to get BLOCK_JOB_ERROR event for %s in %s' - % (self._job, tmo)) + self.test.fail( + f"Failed to get BLOCK_JOB_ERROR event for {self._job} in {tmo}" + ) self.main_vm.monitor.clear_event(job_utils.BLOCK_JOB_ERROR_EVENT) def extend_lv_size(self): - process.system(self.params['lv_extend_cmd'], - ignore_status=False, shell=True) + process.system(self.params["lv_extend_cmd"], ignore_status=False, shell=True) time.sleep(5) def wait_until_job_complete_with_error(self): diff --git a/qemu/tests/blockdev_stream_powerdown.py b/qemu/tests/blockdev_stream_powerdown.py index a858aa70e2..46aafd8721 100644 --- a/qemu/tests/blockdev_stream_powerdown.py +++ b/qemu/tests/blockdev_stream_powerdown.py @@ -1,6 +1,4 @@ -from provider import job_utils -from provider import backup_utils - +from provider import backup_utils, job_utils from provider.blockdev_stream_nowait import BlockdevStreamNowaitTest @@ -17,17 +15,18 @@ def start_vm_with_snapshot(self): self.clone_vm.create() def stream_with_clone_vm(self): - job_id = backup_utils.blockdev_stream_nowait(self.clone_vm, - self._top_device, - **self._stream_options) + job_id = backup_utils.blockdev_stream_nowait( + self.clone_vm, self._top_device, **self._stream_options + ) job_utils.wait_until_block_job_completed(self.clone_vm, job_id) def do_test(self): self.snapshot_test() self.blockdev_stream() job_utils.check_block_jobs_started( - self.main_vm, [self._job], - self.params.get_numeric('job_started_timeout', 30) + self.main_vm, + [self._job], + self.params.get_numeric("job_started_timeout", 30), ) self.main_vm.monitor.cmd("quit") self.start_vm_with_snapshot() diff --git a/qemu/tests/blockdev_stream_remote_server_down.py b/qemu/tests/blockdev_stream_remote_server_down.py index c6d78b697d..1dfaea6c30 100644 --- a/qemu/tests/blockdev_stream_remote_server_down.py +++ b/qemu/tests/blockdev_stream_remote_server_down.py @@ -1,8 +1,7 @@ import socket from provider.blockdev_stream_nowait import BlockdevStreamNowaitTest -from provider.job_utils import check_block_jobs_paused -from provider.job_utils import check_block_jobs_started +from provider.job_utils import check_block_jobs_paused, check_block_jobs_started from provider.nbd_image_export import QemuNBDExportImage @@ -13,50 +12,52 @@ class BlockdevStreamRemoteServerDownTest(BlockdevStreamNowaitTest): def __init__(self, test, params, env): localhost = socket.gethostname() - params['nbd_server_%s' % params['nbd_image_tag']] = localhost \ - if localhost else 'localhost' - self.nbd_export = QemuNBDExportImage(params, - params['local_image_tag']) - super(BlockdevStreamRemoteServerDownTest, self).__init__(test, - params, - env) + params["nbd_server_{}".format(params["nbd_image_tag"])] = ( + localhost if localhost else "localhost" + ) + self.nbd_export = QemuNBDExportImage(params, params["local_image_tag"]) + super().__init__(test, params, env) def pre_test(self): self.nbd_export.create_image() try: self.nbd_export.export_image() - super(BlockdevStreamRemoteServerDownTest, self).pre_test() + super().pre_test() except Exception: self.nbd_export.stop_export() raise def post_test(self): - super(BlockdevStreamRemoteServerDownTest, self).post_test() + super().post_test() self.nbd_export.stop_export() - self.params['images'] += ' %s' % self.params['local_image_tag'] + self.params["images"] += " {}".format(self.params["local_image_tag"]) - def generate_tempfile(self, root_dir, filename, size='10M', timeout=360): + def generate_tempfile(self, root_dir, filename, size="10M", timeout=360): """Create a large file to enlarge stream time""" - super(BlockdevStreamRemoteServerDownTest, self).generate_tempfile( - root_dir, filename, self.params['tempfile_size'], timeout) + super().generate_tempfile( + root_dir, filename, self.params["tempfile_size"], timeout + ) def do_test(self): self.snapshot_test() self.blockdev_stream() check_block_jobs_started( - self.main_vm, [self._job], - self.params.get_numeric('job_started_timeout', 60) + self.main_vm, + [self._job], + self.params.get_numeric("job_started_timeout", 60), ) self.nbd_export.suspend_export() try: check_block_jobs_paused( - self.main_vm, [self._job], - self.params.get_numeric('job_paused_interval', 50) + self.main_vm, + [self._job], + self.params.get_numeric("job_paused_interval", 50), ) finally: self.nbd_export.resume_export() - self.main_vm.monitor.cmd("block-job-set-speed", - {'device': self._job, 'speed': 0}) + self.main_vm.monitor.cmd( + "block-job-set-speed", {"device": self._job, "speed": 0} + ) self.wait_stream_job_completed() self.main_vm.destroy() self.clone_vm.create() diff --git a/qemu/tests/blockdev_stream_speed.py b/qemu/tests/blockdev_stream_speed.py index 8cd8354a46..a3b3514692 100644 --- a/qemu/tests/blockdev_stream_speed.py +++ b/qemu/tests/blockdev_stream_speed.py @@ -1,7 +1,6 @@ from virttest.qemu_monitor import QMPCmdError -from provider import blockdev_stream_nowait -from provider import job_utils +from provider import blockdev_stream_nowait, job_utils class BlockdevStreamSpeedTest(blockdev_stream_nowait.BlockdevStreamNowaitTest): @@ -13,26 +12,27 @@ def test_invalid_speeds(self): """ Set an invalid speed, make sure we can get the proper error message """ + def _set_invalid_speed(jobid, speed, error_msg): try: self.main_vm.monitor.cmd( - "block-job-set-speed", {'device': jobid, 'speed': speed}) + "block-job-set-speed", {"device": jobid, "speed": speed} + ) except QMPCmdError as e: if error_msg not in str(e): - self.test.fail('Unexpected error: %s' % str(e)) + self.test.fail(f"Unexpected error: {str(e)}") else: - self.test.fail('block-job-set-speed %s succeeded unexpectedly' - % speed) + self.test.fail(f"block-job-set-speed {speed} succeeded unexpectedly") def _invalid_speed_error_tuple(speed): - if '-' in speed: # a negative int - return int(speed), self.params['error_msg_negative'] - elif '.' in speed: # a float number - return float(speed), self.params['error_msg'] - else: # a string - return speed, self.params['error_msg'] - - for speed in self.params.objects('invalid_speeds'): + if "-" in speed: # a negative int + return int(speed), self.params["error_msg_negative"] + elif "." in speed: # a float number + return float(speed), self.params["error_msg"] + else: # a string + return speed, self.params["error_msg"] + + for speed in self.params.objects("invalid_speeds"): s, m = _invalid_speed_error_tuple(speed) _set_invalid_speed(self._job, s, m) @@ -40,38 +40,44 @@ def test_valid_speeds(self): """ Set a valid speed, make sure stream job can go on without any issue """ + def _set_valid_speed(jobid, speed): self.main_vm.monitor.cmd( - "block-job-set-speed", {'device': jobid, 'speed': speed}) + "block-job-set-speed", {"device": jobid, "speed": speed} + ) def _check_valid_speed(jobid, speed): job = job_utils.get_block_job_by_id(self.main_vm, jobid) if job.get("speed") != speed: - self.test.fail("Speed:%s is not set as expected:%s" - % (job.get("speed"), speed)) + self.test.fail( + "Speed:{} is not set as expected:{}".format(job.get("speed"), speed) + ) ck_speed = self.params.get_numeric("check_speed") uspeed = self.params.get_numeric("ulimit_speed") if speed > ck_speed or speed == uspeed: job_utils.check_block_jobs_running( - self.main_vm, [self._job], - self.params.get_numeric('job_running_timeout', 60) - ) + self.main_vm, + [self._job], + self.params.get_numeric("job_running_timeout", 60), + ) - for speed in self.params.objects('valid_speeds'): + for speed in self.params.objects("valid_speeds"): _set_valid_speed(self._job, int(speed)) _check_valid_speed(self._job, int(speed)) - def generate_tempfile(self, root_dir, filename, size='10M', timeout=360): + def generate_tempfile(self, root_dir, filename, size="10M", timeout=360): """Create a large file to enlarge stream time""" - super(BlockdevStreamSpeedTest, self).generate_tempfile( - root_dir, filename, self.params['tempfile_size'], timeout) + super().generate_tempfile( + root_dir, filename, self.params["tempfile_size"], timeout + ) def do_test(self): self.snapshot_test() self.blockdev_stream() job_utils.check_block_jobs_started( - self.main_vm, [self._job], - self.params.get_numeric('job_started_timeout', 60) + self.main_vm, + [self._job], + self.params.get_numeric("job_started_timeout", 60), ) self.test_invalid_speeds() self.test_valid_speeds() diff --git a/qemu/tests/blockdev_stream_stress.py b/qemu/tests/blockdev_stream_stress.py index a2bd908142..d0efa4a927 100644 --- a/qemu/tests/blockdev_stream_stress.py +++ b/qemu/tests/blockdev_stream_stress.py @@ -17,7 +17,7 @@ def check_stress_running(self): self.test.fail("stress stopped unexpectedly") def pre_test(self): - super(BlockdevStreamStressTest, self).pre_test() + super().pre_test() self._run_stress_test() def do_test(self): diff --git a/qemu/tests/blockdev_stream_subchain.py b/qemu/tests/blockdev_stream_subchain.py index d86d8293f5..89e5c17442 100644 --- a/qemu/tests/blockdev_stream_subchain.py +++ b/qemu/tests/blockdev_stream_subchain.py @@ -1,45 +1,45 @@ import json import logging -from virttest import data_dir -from virttest import qemu_storage +from virttest import data_dir, qemu_storage from virttest.qemu_capabilities import Flags from provider import backup_utils - from provider.blockdev_stream_base import BlockDevStreamTest from provider.virt_storage.storage_admin import sp_admin -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class BlockdevStreamSubChainTest(BlockDevStreamTest): """Do block-stream based on an existed snapshot in snapshot chain""" def __init__(self, test, params, env): - super(BlockdevStreamSubChainTest, self).__init__(test, params, env) + super().__init__(test, params, env) self._snapshot_images = self.params.objects("snapshot_images") self._base_node_tag = self.params["base_node_tag"] self._trash = [] def snapshot_test(self): """create one snapshot, create one new file""" - self.generate_tempfile(self.disks_info[self.base_tag][1], - filename="base", - size=self.params["tempfile_size"]) + self.generate_tempfile( + self.disks_info[self.base_tag][1], + filename="base", + size=self.params["tempfile_size"], + ) # data->sn1->sn2->sn3 chain = [self.base_tag] + self._snapshot_images for idx in range(1, len(chain)): backup_utils.blockdev_snapshot( - self.main_vm, - "drive_%s" % chain[idx-1], - "drive_%s" % chain[idx] + self.main_vm, f"drive_{chain[idx - 1]}", f"drive_{chain[idx]}" ) - self.generate_tempfile(self.disks_info[self.base_tag][1], - filename=chain[idx], - size=self.params["tempfile_size"]) + self.generate_tempfile( + self.disks_info[self.base_tag][1], + filename=chain[idx], + size=self.params["tempfile_size"], + ) def _disk_define_by_params(self, tag): params = self.params.copy() @@ -67,44 +67,49 @@ def post_test(self): def _is_same_file(self, file_params, file_opts): # FIXME: this should be supported in VT - mapping = {'gluster': 'path', 'iscsi': 'lun', - 'nbd': 'server.port', 'rbd': 'image'} - option = mapping.get(file_opts["driver"], 'filename') + mapping = { + "gluster": "path", + "iscsi": "lun", + "nbd": "server.port", + "rbd": "image", + } + option = mapping.get(file_opts["driver"], "filename") return file_params[option] == file_opts[option] def _check_backing(self, backing): data_image_opts = qemu_storage.filename_to_file_opts( qemu_storage.QemuImg( self.params.object_params(self.base_tag), - data_dir.get_data_dir(), self.base_tag + data_dir.get_data_dir(), + self.base_tag, ).image_filename ) base_image_opts = qemu_storage.filename_to_file_opts( qemu_storage.QemuImg( self.params.object_params(self._base_node_tag), - data_dir.get_data_dir(), self._base_node_tag + data_dir.get_data_dir(), + self._base_node_tag, ).image_filename ) try: # datasn1->datasn3: check datasn1 is datasn3's backing file if not self._is_same_file(backing["file"], base_image_opts): - self.test.fail("Failed to get backing file for %s" - % self.snapshot_tag) + self.test.fail(f"Failed to get backing file for {self.snapshot_tag}") # data->datasn1: check data is datasn1's backing file backing_mask = self.main_vm.check_capability( - Flags.BLOCKJOB_BACKING_MASK_PROTOCOL) - raw_format = self.get_image_by_tag( - self.base_tag).image_format == "raw" - backing_opts = backing["backing"] if ( - backing_mask and raw_format - ) else backing["backing"]["file"] - if not self._is_same_file(backing_opts, - data_image_opts): - self.test.fail("Failed to get backing file for %s" - % self._base_node_tag) + Flags.BLOCKJOB_BACKING_MASK_PROTOCOL + ) + raw_format = self.get_image_by_tag(self.base_tag).image_format == "raw" + backing_opts = ( + backing["backing"] + if (backing_mask and raw_format) + else backing["backing"]["file"] + ) + if not self._is_same_file(backing_opts, data_image_opts): + self.test.fail(f"Failed to get backing file for {self._base_node_tag}") except Exception as e: - self.test.fail("Failed to get backing chain: %s" % str(e)) + self.test.fail(f"Failed to get backing chain: {str(e)}") def check_backing_chain(self): """after block-stream, the backing chain: data->datasn1->dtasn3""" @@ -113,13 +118,14 @@ def check_backing_chain(self): if self.base_tag in item["qdev"]: backing = item["inserted"].get("backing_file") if not backing: - self.test.fail("Failed to get backing_file for qdev %s" - % self.base_tag) + self.test.fail( + f"Failed to get backing_file for qdev {self.base_tag}" + ) backing_dict = json.loads(backing[5:]) self._check_backing(backing_dict) break else: - self.test.fail("Failed to find %s" % self.base_tag) + self.test.fail(f"Failed to find {self.base_tag}") def clone_vm_with_snapshot(self): if self.main_vm.is_alive(): @@ -127,8 +133,9 @@ def clone_vm_with_snapshot(self): # Add image_chain , then VT can add access secret objects # in qemu-kvm command, qemu-kvm can access the backing files - self.clone_vm.params["image_chain_%s" % self.snapshot_tag] = "%s %s %s" % ( - self.base_tag, self._base_node_tag, self.snapshot_tag) + self.clone_vm.params[f"image_chain_{self.snapshot_tag}"] = ( + f"{self.base_tag} {self._base_node_tag} {self.snapshot_tag}" + ) self.clone_vm.create() def do_test(self): diff --git a/qemu/tests/blockdev_stream_to_invalid_node.py b/qemu/tests/blockdev_stream_to_invalid_node.py index 1fdb9b093e..0fbaff7b41 100644 --- a/qemu/tests/blockdev_stream_to_invalid_node.py +++ b/qemu/tests/blockdev_stream_to_invalid_node.py @@ -1,7 +1,6 @@ import random -from virttest import utils_misc -from virttest import utils_qemu +from virttest import utils_misc, utils_qemu from virttest.qemu_monitor import QMPCmdError from virttest.utils_version import VersionInterval @@ -37,7 +36,7 @@ def stream_to_invalid_node(self): if qmp_error_msg not in str(e.data): self.test.fail(str(e)) else: - self.test.fail("Can stream to an invalid node:%s" % device_node) + self.test.fail(f"Can stream to an invalid node:{device_node}") def run_test(self): self.pre_test() @@ -50,11 +49,11 @@ def run_test(self): def run(test, params, env): """ - Block stream to an invalid node + Block stream to an invalid node - 1. boot guest and create 4 snapshots and save file in each snapshot - 2. do block commit and wait for block job completed - 3. Random choice a node name in the snapshot chain, stream to it. + 1. boot guest and create 4 snapshots and save file in each snapshot + 2. do block commit and wait for block job completed + 3. Random choice a node name in the snapshot chain, stream to it. """ block_test = BlkdevStreamtoInvalidnode(test, params, env) diff --git a/qemu/tests/blockdev_stream_vm_reboot.py b/qemu/tests/blockdev_stream_vm_reboot.py index f75fa4e499..84adfd73d8 100644 --- a/qemu/tests/blockdev_stream_vm_reboot.py +++ b/qemu/tests/blockdev_stream_vm_reboot.py @@ -9,7 +9,7 @@ def reboot_vm(self): self.main_vm.reboot(method=reboot_method) def do_test(self): - super(BlockdevStreamVMRebootTest, self).do_test() + super().do_test() self.clone_vm.destroy() self.remove_files_from_system_image() diff --git a/qemu/tests/blockdev_stream_vm_stop_cont.py b/qemu/tests/blockdev_stream_vm_stop_cont.py index 0de05d5e96..b56611e7f6 100644 --- a/qemu/tests/blockdev_stream_vm_stop_cont.py +++ b/qemu/tests/blockdev_stream_vm_stop_cont.py @@ -1,5 +1,5 @@ -import time import random +import time from provider.blockdev_stream_parallel import BlockdevStreamParallelTest @@ -10,12 +10,12 @@ class BlockdevStreamVMStopContTest(BlockdevStreamParallelTest): def stop_cont_vm(self): """Stop VM for a while, then resume it""" self.main_vm.pause() - t = int(random.choice(self.params.objects('vm_stop_time_list'))) + t = int(random.choice(self.params.objects("vm_stop_time_list"))) time.sleep(t) self.main_vm.resume() def do_test(self): - super(BlockdevStreamVMStopContTest, self).do_test() + super().do_test() self.clone_vm.destroy() self.remove_files_from_system_image() diff --git a/qemu/tests/blockdev_stream_with_ioerror.py b/qemu/tests/blockdev_stream_with_ioerror.py index c675cee33e..e28748f8f5 100644 --- a/qemu/tests/blockdev_stream_with_ioerror.py +++ b/qemu/tests/blockdev_stream_with_ioerror.py @@ -1,5 +1,4 @@ from aexpect import ShellTimeoutError - from virttest import error_context from provider.blockdev_stream_base import BlockDevStreamTest @@ -12,14 +11,15 @@ def dd_io_error(self, root_dir, ori_filename, tar_filename, timeout=20): """Generate temp data file in guest""" self.session = self.main_vm.wait_for_login() self.file_info = self.files_info[0] - ori_file_path = "%s/%s" % (root_dir, ori_filename) - tar_file_path = "%s/%s" % (root_dir, tar_filename) + ori_file_path = f"{root_dir}/{ori_filename}" + tar_file_path = f"{root_dir}/{tar_filename}" dd_cmd = self.main_vm.params.get( - "dd_cmd", "dd if=%s of=%s bs=1M count=60 oflag=direct") + "dd_cmd", "dd if=%s of=%s bs=1M count=60 oflag=direct" + ) mk_file_cmd = dd_cmd % (ori_file_path, tar_file_path) try: self.session.cmd(mk_file_cmd, timeout=timeout) - except ShellTimeoutError as e: + except ShellTimeoutError: self.main_vm.verify_status("io-error") self.file_info.append(tar_filename) else: @@ -34,11 +34,11 @@ def snapshot_test(self): def md5_io_error_file(self): if not self.session: self.session = self.main_vm.wait_for_login() - output = self.session.cmd_output('\n', timeout=120) + output = self.session.cmd_output("\n", timeout=120) if self.params["dd_done"] not in output: self.test.fail("dd not continue to run after vm resume") - tar_file_path = "%s/%s" % (self.file_info[0], self.file_info[2]) - md5_cmd = "md5sum %s > %s.md5 && sync" % (tar_file_path, tar_file_path) + tar_file_path = f"{self.file_info[0]}/{self.file_info[2]}" + md5_cmd = f"md5sum {tar_file_path} > {tar_file_path}.md5 && sync" self.session.cmd(md5_cmd, timeout=120) def verify_data_file(self): @@ -46,14 +46,14 @@ def verify_data_file(self): self.session = self.main_vm.wait_for_login() ori_file_md5 = "" for info in [self.file_info[1], self.file_info[2]]: - file_path = "%s/%s" % (self.file_info[0], info) - cat_cmd = "cat %s.md5" % file_path + file_path = f"{self.file_info[0]}/{info}" + cat_cmd = f"cat {file_path}.md5" output = self.session.cmd_output(cat_cmd, timeout=120).split()[0] if not ori_file_md5: ori_file_md5 = output if ori_file_md5 != output: - msg = "file ('%s' '%s') md5 mismatch" % (ori_file_md5, output) - msg += "with value ('%s', '%s')" % (ori_file_md5, output) + msg = f"file ('{ori_file_md5}' '{output}') md5 mismatch" + msg += f"with value ('{ori_file_md5}', '{output}')" self.test.fail(msg) def op_after_stream(self): diff --git a/qemu/tests/boot_N_M_virtserialports.py b/qemu/tests/boot_N_M_virtserialports.py index a14e6207b3..f017e5e713 100644 --- a/qemu/tests/boot_N_M_virtserialports.py +++ b/qemu/tests/boot_N_M_virtserialports.py @@ -1,8 +1,7 @@ -from virttest import error_context -from virttest import utils_test -from virttest import env_process -from qemu.tests.virtio_serial_file_transfer import transfer_data +from virttest import env_process, error_context, utils_test + from provider import win_driver_utils +from qemu.tests.virtio_serial_file_transfer import transfer_data @error_context.context_aware @@ -21,32 +20,33 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ - if params.get("start_vm") == 'no': + if params.get("start_vm") == "no": num_bus = params.get_numeric("numberic_bus") for i in range(2, num_bus + 1): - serial_name = 'vs%d' % i - params['serials'] = '%s %s' % (params.get('serials', ''), serial_name) - params['serial_type_%s' % serial_name] = "virtserialport" - params['serial_bus_%s' % serial_name] = "" - params['start_vm'] = "yes" + serial_name = "vs%d" % i + params["serials"] = "{} {}".format(params.get("serials", ""), serial_name) + params[f"serial_type_{serial_name}"] = "virtserialport" + params[f"serial_bus_{serial_name}"] = "" + params["start_vm"] = "yes" env_process.preprocess(test, params, env) - vm = env.get_vm(params['main_vm']) + vm = env.get_vm(params["main_vm"]) os_type = params["os_type"] if os_type == "windows": driver_name = params["driver_name"] session = vm.wait_for_login() session = utils_test.qemu.windrv_check_running_verifier( - session, vm, test, driver_name) + session, vm, test, driver_name + ) session.close() for port in params.objects("serials"): port_params = params.object_params(port) - if not port_params['serial_type'].startswith('virtserial'): + if not port_params["serial_type"].startswith("virtserial"): continue - params['file_transfer_serial_port'] = port - error_context.context("Transfer data with %s" % port, test.log.info) - transfer_data(params, vm, sender='both') + params["file_transfer_serial_port"] = port + error_context.context(f"Transfer data with {port}", test.log.info) + transfer_data(params, vm, sender="both") vm.verify_alive() vm.verify_kernel_crash() # for windows guest, disable/uninstall driver to get memory leak based on diff --git a/qemu/tests/boot_cpu_model.py b/qemu/tests/boot_cpu_model.py index 5c8ea60f89..553b6c44f2 100644 --- a/qemu/tests/boot_cpu_model.py +++ b/qemu/tests/boot_cpu_model.py @@ -1,7 +1,4 @@ -from virttest import env_process -from virttest import error_context -from virttest import cpu -from virttest import utils_test +from virttest import cpu, env_process, error_context, utils_test @error_context.context_aware @@ -24,10 +21,9 @@ def run(test, params, env): if cpu_vendor == "unknow": test.error("unknow cpu vendor") else: - model_list = params.get("cpu_model_%s" % cpu_vendor, - host_model[-1]) + model_list = params.get(f"cpu_model_{cpu_vendor}", host_model[-1]) - extra_flags = params.get("cpu_model_flags_%s" % cpu_vendor, "") + extra_flags = params.get(f"cpu_model_flags_{cpu_vendor}", "") if extra_flags: cpu_flags = params.get("cpu_model_flags", "") + extra_flags params["cpu_model_flags"] = cpu_flags @@ -41,14 +37,16 @@ def run(test, params, env): env_process.preprocess_vm(test, params, env, params["main_vm"]) # check guest flags if params.get("enable_check", "no") == "yes": - utils_test.run_virt_sub_test(test, params, - env, sub_type="flag_check") + utils_test.run_virt_sub_test( + test, params, env, sub_type="flag_check" + ) else: # log in and shutdown guest - utils_test.run_virt_sub_test(test, params, - env, sub_type="shutdown") + utils_test.run_virt_sub_test(test, params, env, sub_type="shutdown") test.log.info("shutdown guest successfully") else: if params.get("enable_check", "no") == "yes": - test.cancel("Can not test %s model on %s host, pls use " - "%s host" % (model, host_model[0], model)) + test.cancel( + f"Can not test {model} model on {host_model[0]} host, pls use " + f"{model} host" + ) diff --git a/qemu/tests/boot_e1000e_with_cpu_flag.py b/qemu/tests/boot_e1000e_with_cpu_flag.py index 6c39043978..df5fc7402e 100644 --- a/qemu/tests/boot_e1000e_with_cpu_flag.py +++ b/qemu/tests/boot_e1000e_with_cpu_flag.py @@ -1,5 +1,4 @@ -from virttest import error_context -from virttest import utils_net +from virttest import error_context, utils_net @error_context.context_aware @@ -28,9 +27,10 @@ def run(test, params, env): session_serial.cmd(bcdedit_debug) session_serial.cmd(bcdedit_cmd) vm.reboot(timeout=login_timeout) - status, output = utils_net.ping(dest=ext_host, count=10, - session=session_serial, timeout=30) + status, output = utils_net.ping( + dest=ext_host, count=10, session=session_serial, timeout=30 + ) if status: - test.fail("ping is failed, output %s" % output) + test.fail(f"ping is failed, output {output}") finally: session_serial.close() diff --git a/qemu/tests/boot_from_device.py b/qemu/tests/boot_from_device.py index de413532ed..e2ce46e012 100644 --- a/qemu/tests/boot_from_device.py +++ b/qemu/tests/boot_from_device.py @@ -2,10 +2,7 @@ import re from avocado.utils import process - -from virttest import error_context -from virttest import utils_misc -from virttest import env_process +from virttest import env_process, error_context, utils_misc @error_context.context_aware @@ -28,7 +25,7 @@ def create_cdroms(cdrom_test): """ test.log.info("creating test cdrom") process.run("dd if=/dev/urandom of=test bs=10M count=1") - process.run("mkisofs -o %s test" % cdrom_test) + process.run(f"mkisofs -o {cdrom_test} test") process.run("rm -f test") def cleanup_cdroms(cdrom_test): @@ -55,7 +52,7 @@ def get_serial_console_output(): return output timeout = int(params.get("login_timeout", 360)) - boot_menu_key = params.get("boot_menu_key", 'esc') + boot_menu_key = params.get("boot_menu_key", "esc") boot_menu_hint = params["boot_menu_hint"] boot_entry_info = params["boot_entry_info"] boot_dev = params.get("boot_dev") @@ -72,8 +69,7 @@ def get_serial_console_output(): try: if boot_dev: - if not utils_misc.wait_for(lambda: boot_check(boot_menu_hint), - timeout, 1): + if not utils_misc.wait_for(lambda: boot_check(boot_menu_hint), timeout, 1): test.fail("Could not get boot menu message") # Send boot menu key in monitor. @@ -87,18 +83,17 @@ def get_serial_console_output(): for i, v in enumerate(boot_list, start=1): if re.search(boot_dev, v, re.I): - msg = "Start guest from boot entry '%s'" % boot_dev + msg = f"Start guest from boot entry '{boot_dev}'" error_context.context(msg, test.log.info) vm.send_key(str(i)) break else: - msg = "Could not get boot entry match pattern '%s'" % boot_dev + msg = f"Could not get boot entry match pattern '{boot_dev}'" test.fail(msg) error_context.context("Check boot result", test.log.info) - if not utils_misc.wait_for(lambda: boot_check(boot_entry_info), - timeout, 1): - test.fail("Could not boot from '%s'" % dev_name) + if not utils_misc.wait_for(lambda: boot_check(boot_entry_info), timeout, 1): + test.fail(f"Could not boot from '{dev_name}'") finally: if dev_name == "cdrom": cleanup_cdroms(cdrom_test) diff --git a/qemu/tests/boot_from_nbd_image.py b/qemu/tests/boot_from_nbd_image.py index f221970107..8e5faa09ad 100644 --- a/qemu/tests/boot_from_nbd_image.py +++ b/qemu/tests/boot_from_nbd_image.py @@ -1,13 +1,10 @@ import socket -from virttest import qemu_storage - from avocado import fail_on - from avocado.utils import process +from virttest import qemu_storage from provider import qemu_img_utils as img_utils - from provider.nbd_image_export import QemuNBDExportImage @@ -19,16 +16,18 @@ def run(test, params, env): 4) Start VM from the exported image 5) Log into VM """ + def _convert_image(): - source = params['images'].split()[0] - target = params['convert_target'] + source = params["images"].split()[0] + target = params["convert_target"] source_params = params.object_params(source) target_params = params.object_params(target) source_image = qemu_storage.QemuImg(source_params, None, source) # Convert source to target fail_on((process.CmdError,))(source_image.convert)( - target_params, None, skip_target_creation=True) + target_params, None, skip_target_creation=True + ) nbd_export = QemuNBDExportImage(params, params["local_image_tag"]) nbd_export.create_image() @@ -36,16 +35,17 @@ def _convert_image(): # we only export image with local nbd server localhost = socket.gethostname() - params['nbd_server_%s' % params['convert_target'] - ] = localhost if localhost else 'localhost' + params["nbd_server_{}".format(params["convert_target"])] = ( + localhost if localhost else "localhost" + ) vm = None try: _convert_image() - vm = img_utils.boot_vm_with_images(test, params, env, - (params['convert_target'],)) - session = vm.wait_for_login( - timeout=params.get_numeric("login_timeout", 480)) + vm = img_utils.boot_vm_with_images( + test, params, env, (params["convert_target"],) + ) + session = vm.wait_for_login(timeout=params.get_numeric("login_timeout", 480)) session.close() finally: if vm: diff --git a/qemu/tests/boot_from_remote.py b/qemu/tests/boot_from_remote.py index fac0e0b04f..b82ca19104 100644 --- a/qemu/tests/boot_from_remote.py +++ b/qemu/tests/boot_from_remote.py @@ -1,14 +1,10 @@ +import os import random import re -import os from avocado.core import exceptions from avocado.utils import process - -from virttest import error_context -from virttest import utils_misc -from virttest import env_process -from virttest import utils_numeric +from virttest import env_process, error_context, utils_misc, utils_numeric @error_context.context_aware @@ -24,22 +20,23 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def _get_data_disk(session): - """ Get the data disk. """ - extra_params = params["blk_extra_params_%s" % - params['images'].split()[-1]] - drive_id = re.search(r"(serial|wwn)=(\w+)", - extra_params, re.M).group(2) + """Get the data disk.""" + extra_params = params[ + "blk_extra_params_{}".format(params["images"].split()[-1]) + ] + drive_id = re.search(r"(serial|wwn)=(\w+)", extra_params, re.M).group(2) return utils_misc.get_linux_drive_path(session, drive_id) def _write_disk(session): - disk_op_cmd = params['disk_op_cmd'] + disk_op_cmd = params["disk_op_cmd"] if disk_op_cmd: disk = _get_data_disk(session) session.cmd(disk_op_cmd.format(disk=disk)) def _get_memory(pid): - cmd = "ps -o vsz,rss -p %s | tail -n1" % pid + cmd = f"ps -o vsz,rss -p {pid} | tail -n1" out = process.system_output(cmd, shell=True).split() return [int(i) for i in out] @@ -54,14 +51,18 @@ def boot_with_debug(): # valid debug levels low = int(params["debug_level_low"]) high = int(params["debug_level_high"]) - levels = [i for i in range(low, high+1)] + levels = [i for i in range(low, high + 1)] # invalid debug levels: [low-100, low) and [high+1, high+100) - levels.extend([random.choice(range(low-100, low)), - random.choice(range(high+1, high+100))]) + levels.extend( + [ + random.choice(range(low - 100, low)), + random.choice(range(high + 1, high + 100)), + ] + ) for level in levels: - logfile = utils_misc.get_log_filename("debug.level%s" % level) + logfile = utils_misc.get_log_filename(f"debug.level{level}") params["gluster_debug"] = level params["gluster_logfile"] = logfile test.log.info("debug level: %d, log: %s", level, logfile) @@ -71,8 +72,7 @@ def boot_with_debug(): vm = env.get_vm(params["main_vm"]) vm.verify_alive() if not os.path.exists(logfile): - raise exceptions.TestFail("Failed to generate log file %s" - % logfile) + raise exceptions.TestFail(f"Failed to generate log file {logfile}") os.remove(logfile) finally: vm.destroy() @@ -111,15 +111,17 @@ def boot_with_remote_images(): # get vsz, rss when booting with one remote image single_img_memory = _get_memory(vm.get_pid()) if not single_img_memory: - raise exceptions.TestError("Failed to get memory when " - "booting with one remote image.") - test.log.debug("memory consumption(only one remote image): %s", - single_img_memory) + raise exceptions.TestError( + "Failed to get memory when " "booting with one remote image." + ) + test.log.debug( + "memory consumption(only one remote image): %s", single_img_memory + ) vm.destroy() - for img in params['images'].split()[1:]: - params['boot_drive_%s' % img] = 'yes' + for img in params["images"].split()[1:]: + params[f"boot_drive_{img}"] = "yes" env_process.preprocess_vm(test, params, env, params["main_vm"]) vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -127,23 +129,29 @@ def boot_with_remote_images(): # get vsz, rss when booting with 4 remote image multi_img_memory = _get_memory(vm.get_pid()) if not multi_img_memory: - raise exceptions.TestError("Failed to get memory when booting" - " with several remote images.") - test.log.debug("memory consumption(total 4 remote images): %s", - multi_img_memory) - - diff = int(float(utils_numeric.normalize_data_size( - params['memory_diff'], order_magnitude="K"))) - mem_diffs = [i-j for i, j in zip(multi_img_memory, - single_img_memory)] + raise exceptions.TestError( + "Failed to get memory when booting" " with several remote images." + ) + test.log.debug( + "memory consumption(total 4 remote images): %s", multi_img_memory + ) + + diff = int( + float( + utils_numeric.normalize_data_size( + params["memory_diff"], order_magnitude="K" + ) + ) + ) + mem_diffs = [i - j for i, j in zip(multi_img_memory, single_img_memory)] if mem_diffs[0] > diff: raise exceptions.TestFail( - "vsz increased '%s', which was more than '%s'" - % (mem_diffs[0], diff)) + f"vsz increased '{mem_diffs[0]}', which was more than '{diff}'" + ) if mem_diffs[1] > diff: raise exceptions.TestFail( - "rss increased '%s', which was more than '%s'" - % (mem_diffs[1], diff)) + f"rss increased '{mem_diffs[1]}', which was more than '{diff}'" + ) finally: vm.destroy() diff --git a/qemu/tests/boot_from_virtiofs.py b/qemu/tests/boot_from_virtiofs.py index a7c72f9509..ed440dfe11 100644 --- a/qemu/tests/boot_from_virtiofs.py +++ b/qemu/tests/boot_from_virtiofs.py @@ -1,8 +1,7 @@ import os from avocado.utils import process -from virttest import data_dir -from virttest import env_process +from virttest import data_dir, env_process def create_kernel_initrd(test, params): @@ -15,16 +14,16 @@ def create_kernel_initrd(test, params): test.log.info("Creating initramfs and kernel file") install_path = data_dir.get_data_dir() kernel_version = process.getoutput(params.get("guest_ver_cmd", "uname -r")) - create_initramfs_cmd = (params["create_initramfs_cmd"] % install_path) + create_initramfs_cmd = params["create_initramfs_cmd"] % install_path status, output = process.getstatusoutput(create_initramfs_cmd) if status: test.fail("Failed to create initramfs.") - test.log.info("initramfs is created in %s" % install_path) + test.log.info("initramfs is created in %s", install_path) initrd_path = install_path + "/initramfs-virtiofs.img" # copy vmlinuz to virtiofs_root - process.system("cp /boot/vmlinuz-%s %s" % (kernel_version, install_path)) - kernel_path = install_path + ("/vmlinuz-%s" % kernel_version) + process.system(f"cp /boot/vmlinuz-{kernel_version} {install_path}") + kernel_path = install_path + (f"/vmlinuz-{kernel_version}") params["kernel"] = kernel_path params["initrd"] = initrd_path return [kernel_path, initrd_path] @@ -39,12 +38,12 @@ def setup_basic_root_fs(test, params): """ test.log.info("Setting basic root file system") virtiofs_root_path = data_dir.get_data_dir() + "/virtiofs_root" - install_file_system_cmd = (params["install_file_system_cmd"] - % virtiofs_root_path) + install_file_system_cmd = params["install_file_system_cmd"] % virtiofs_root_path status, output = process.getstatusoutput(install_file_system_cmd) if status: - test.fail("Failed to install basic root file system." - "Error message: %s" % output) + test.fail( + "Failed to install basic root file system." f"Error message: {output}" + ) return virtiofs_root_path @@ -63,13 +62,13 @@ def change_fs_passwd(test, params, virtiofs_root_path): process.getoutput("setenforce 0") set_passwd_cmd = params["set_passwd_cmd"] - fd = os.open('/', os.R_OK, os.X_OK) + fd = os.open("/", os.R_OK, os.X_OK) # change root path os.chroot(virtiofs_root_path) # change password os.system(set_passwd_cmd) os.fchdir(fd) - os.chroot('.') + os.chroot(".") # restore the value after change password if original_selinux_value != "Disabled": @@ -103,10 +102,10 @@ def clean_env(test, trash_files): """ if trash_files: for file_path in trash_files: - test.log.info("Removing file %s" % file_path) - s, o = process.getstatusoutput("rm -rf %s" % file_path) + test.log.info("Removing file %s", file_path) + s, o = process.getstatusoutput(f"rm -rf {file_path}") if s: - test.fail("Failed to remove file %s" % file_path) + test.fail(f"Failed to remove file {file_path}") def run(test, params, env): diff --git a/qemu/tests/boot_nbdimage_with_qsd.py b/qemu/tests/boot_nbdimage_with_qsd.py index 91c5e8eb5d..5ff8d09383 100644 --- a/qemu/tests/boot_nbdimage_with_qsd.py +++ b/qemu/tests/boot_nbdimage_with_qsd.py @@ -1,9 +1,8 @@ import os -from virttest import data_dir -from virttest import qemu_storage - from avocado.utils import process +from virttest import data_dir, qemu_storage + from provider import qemu_img_utils as img_utils from provider.qsd import QsdDaemonDev @@ -16,11 +15,13 @@ def run(test, params, env): 2) Export the OS image via QSD plus unix 3) Start VM from the exported image """ + def pre_test(): # prepare an installed OS image test.log.info( "Prepare an installed OS image file via converting, " - "and exporting it via QSD") + "and exporting it via QSD" + ) source = params["convert_source"] target = params["convert_target"] source_params = params.object_params(source) @@ -34,8 +35,9 @@ def pre_test(): source_image.convert(params, root_dir) except process.CmdError as detail: target_image.remove() - test.fail("Convert %s to %s failed for '%s', check please." % - (source, target, detail)) + test.fail( + f"Convert {source} to {target} failed for '{detail}', check please." + ) # export the OS image over QSD qsd.start_daemon() @@ -46,8 +48,9 @@ def run_test(): login_timeout = params.get_numeric("login_timeout", 360) vm = None try: - vm = img_utils.boot_vm_with_images(test, params, env, - (params['nbd_image_tag'],)) + vm = img_utils.boot_vm_with_images( + test, params, env, (params["nbd_image_tag"],) + ) vm.wait_for_login(timeout=login_timeout) finally: if vm: @@ -59,10 +62,9 @@ def post_test(): # set socket path of the exporting image over nbd socket_path = os.path.join(data_dir.get_tmp_dir(), "nbd_stg1.sock") params["nbd_unix_socket_nbd1"] = socket_path - params["qsd_image_export_nbd_stg1"] = '{"type":"unix","path":"%s"}' % \ - socket_path + params["qsd_image_export_nbd_stg1"] = f'{{"type":"unix","path":"{socket_path}"}}' - qsd = QsdDaemonDev(params.objects('qsd_namespaces')[0], params) + qsd = QsdDaemonDev(params.objects("qsd_namespaces")[0], params) pre_test() try: run_test() diff --git a/qemu/tests/boot_nic_with_iommu.py b/qemu/tests/boot_nic_with_iommu.py index 696c5117bf..8494a29069 100644 --- a/qemu/tests/boot_nic_with_iommu.py +++ b/qemu/tests/boot_nic_with_iommu.py @@ -1,5 +1,4 @@ -from virttest import error_context -from virttest import utils_test +from virttest import error_context, utils_test @error_context.context_aware @@ -25,13 +24,13 @@ def run(test, params, env): guest_ip = vm.get_address() try: - status, output = utils_test.ping(guest_ip, ping_count, - timeout=float(ping_count) * 1.5) + status, output = utils_test.ping( + guest_ip, ping_count, timeout=float(ping_count) * 1.5 + ) if status != 0: - test.fail("Ping returns non-zero value %s" % output) + test.fail(f"Ping returns non-zero value {output}") package_lost = utils_test.get_loss_ratio(output) if package_lost != 0: - test.fail("%s package lost when ping guest ip %s " % - (package_lost, guest_ip)) + test.fail(f"{package_lost} package lost when ping guest ip {guest_ip} ") finally: session.close() diff --git a/qemu/tests/boot_order_check.py b/qemu/tests/boot_order_check.py index df5b1dd464..725bee91fb 100644 --- a/qemu/tests/boot_order_check.py +++ b/qemu/tests/boot_order_check.py @@ -2,9 +2,7 @@ import time from avocado.utils import process - -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc @error_context.context_aware @@ -25,29 +23,30 @@ def run(test, params, env): def _get_device(devices, dev_id): device_found = {} for dev in devices: - if dev['qdev_id'] == dev_id: + if dev["qdev_id"] == dev_id: device_found = dev break - elif dev['class_info'].get('desc') == 'PCI bridge': - pci_bridge_devices = dev['pci_bridge'].get('devices') + elif dev["class_info"].get("desc") == "PCI bridge": + pci_bridge_devices = dev["pci_bridge"].get("devices") if not pci_bridge_devices: continue - device_found = _get_device(pci_bridge_devices, - dev_id) + device_found = _get_device(pci_bridge_devices, dev_id) if device_found: break return device_found def _get_pci_addr_by_devid(dev_id): - dev_addr = '' - dev_addr_fmt = '%02d:%02d.%d' - pci_info = vm.monitor.info('pci', debug=False) + dev_addr = "" + dev_addr_fmt = "%02d:%02d.%d" + pci_info = vm.monitor.info("pci", debug=False) if isinstance(pci_info, list): - device = _get_device(pci_info[0]['devices'], dev_id) + device = _get_device(pci_info[0]["devices"], dev_id) if device: - dev_addr = dev_addr_fmt % (device['bus'], - device['slot'], - device['function']) + dev_addr = dev_addr_fmt % ( + device["bus"], + device["slot"], + device["function"], + ) else: # As device id in the last line of info pci output # We need reverse the pci information to get the pci addr which is in the @@ -61,23 +60,22 @@ def _get_pci_addr_by_devid(dev_id): dev_addr = dev_addr_fmt % tuple(bus_slot_func) return dev_addr - error_context.context("Boot vm by passing boot order decided", - test.log.info) + error_context.context("Boot vm by passing boot order decided", test.log.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() vm.pause() # Disable nic device, boot fail from nic device except user model - if params['nettype'] != 'user': + if params["nettype"] != "user": for nic in vm.virtnet: - process.system("ifconfig %s down" % nic.ifname) + process.system(f"ifconfig {nic.ifname} down") vm.resume() devices_load_timeout = int(params.get("devices_load_timeout", 10)) timeout = int(params.get("login_timeout", 240)) - bootorder_type = params.get("bootorder_type") + params.get("bootorder_type") boot_fail_infos = params.get("boot_fail_infos") bootorder = params.get("bootorder") nic_addr_filter = params.get("nic_addr_filter") @@ -86,18 +84,21 @@ def _get_pci_addr_by_devid(dev_id): list_nic_addr = [] for nic in vm.virtnet: - boot_index = params['bootindex_%s' % nic.nic_name] - pci_addr = utils_misc.wait_for(lambda: _get_pci_addr_by_devid(nic.device_id), - timeout=devices_load_timeout) + boot_index = params[f"bootindex_{nic.nic_name}"] + pci_addr = utils_misc.wait_for( + lambda: _get_pci_addr_by_devid(nic.device_id), timeout=devices_load_timeout + ) if not pci_addr: - test.fail("Cannot get the pci address of %s." % nic.nic_name) + test.fail(f"Cannot get the pci address of {nic.nic_name}.") list_nic_addr.append((pci_addr, boot_index)) list_nic_addr.sort(key=lambda x: x[1]) - boot_fail_infos = boot_fail_infos % (list_nic_addr[0][0], - list_nic_addr[1][0], - list_nic_addr[2][0]) + boot_fail_infos = boot_fail_infos % ( + list_nic_addr[0][0], + list_nic_addr[1][0], + list_nic_addr[2][0], + ) error_context.context("Check the guest boot result", test.log.info) start = time.time() @@ -111,5 +112,7 @@ def _get_pci_addr_by_devid(dev_id): break time.sleep(1) if not result: - test.fail("Timeout when try to get expected boot order: " - "'%s', actual result: '%s'" % (bootorder, output)) + test.fail( + "Timeout when try to get expected boot order: " + f"'{bootorder}', actual result: '{output}'" + ) diff --git a/qemu/tests/boot_time.py b/qemu/tests/boot_time.py index e72149e248..1a9e0b4b2b 100644 --- a/qemu/tests/boot_time.py +++ b/qemu/tests/boot_time.py @@ -1,6 +1,4 @@ -from virttest import error_context -from virttest import utils_misc -from virttest import env_process +from virttest import env_process, error_context, utils_misc from virttest.staging import utils_memory @@ -25,41 +23,41 @@ def run(test, params, env): session = vm.wait_for_login(timeout=timeout) error_context.context("Set guest run level to 1", test.log.info) - single_user_cmd = params['single_user_cmd'] + single_user_cmd = params["single_user_cmd"] session.cmd(single_user_cmd) try: error_context.context("Shut down guest", test.log.info) - session.cmd('sync') + session.cmd("sync") vm.destroy() - error_context.context("Boot up guest and measure the boot time", - test.log.info) + error_context.context("Boot up guest and measure the boot time", test.log.info) utils_memory.drop_caches() vm.create() vm.verify_alive() session = vm.wait_for_serial_login(timeout=timeout) boot_time = utils_misc.monotonic_time() - vm.start_monotonic_time - test.write_test_keyval({'result': "%ss" % boot_time}) + test.write_test_keyval({"result": f"{boot_time}s"}) expect_time = int(params.get("expect_bootup_time", "17")) test.log.info("Boot up time: %ss", boot_time) finally: try: error_context.context("Restore guest run level", test.log.info) - restore_level_cmd = params['restore_level_cmd'] + restore_level_cmd = params["restore_level_cmd"] session.cmd(restore_level_cmd) - session.cmd('sync') + session.cmd("sync") vm.destroy(gracefully=False) env_process.preprocess_vm(test, params, env, vm.name) vm.verify_alive() vm.wait_for_login(timeout=timeout) except Exception: - test.log.warning("Can not restore guest run level, " - "need restore the image") + test.log.warning( + "Can not restore guest run level, " "need restore the image" + ) params["restore_image_after_testing"] = "yes" if boot_time > expect_time: - test.fail("Guest boot up is taking too long: %ss" % boot_time) + test.fail(f"Guest boot up is taking too long: {boot_time}s") session.close() diff --git a/qemu/tests/boot_with_different_vectors.py b/qemu/tests/boot_with_different_vectors.py index 99d6dbf561..2891fc16c6 100644 --- a/qemu/tests/boot_with_different_vectors.py +++ b/qemu/tests/boot_with_different_vectors.py @@ -1,10 +1,6 @@ import re -from virttest import error_context -from virttest import utils_test -from virttest import env_process -from virttest import virt_vm -from virttest import utils_net +from virttest import env_process, error_context, utils_net, utils_test, virt_vm from provider import netperf_test @@ -27,8 +23,7 @@ def run(test, params, env): """ def boot_guest_with_vectors(vectors): - error_context.context("Boot guest with vectors = %s" % vectors, - test.log.info) + error_context.context(f"Boot guest with vectors = {vectors}", test.log.info) params["vectors"] = vectors params["start_vm"] = "yes" try: @@ -40,7 +35,7 @@ def boot_guest_with_vectors(vectors): return if int(vectors) < 0: msg = "Qemu did not raise correct error" - msg += " when vectors = %s" % vectors + msg += f" when vectors = {vectors}" test.fail(msg) vm = env.get_vm(params["main_vm"]) @@ -51,8 +46,9 @@ def check_msi_support(session): vectors = int(params["vectors"]) if params["os_type"] == "linux": devices = session.cmd_output("lspci | grep Eth").strip() - error_context.context("Check if vnic inside guest support msi.", - test.log.info) + error_context.context( + "Check if vnic inside guest support msi.", test.log.info + ) for device in devices.split("\n"): if not device: continue @@ -60,7 +56,7 @@ def check_msi_support(session): msi_check_cmd = params["msi_check_cmd"] % d_id status, output = session.cmd_status_output(msi_check_cmd) if vectors == 0 and output: - if (re.findall("MSI-X: Enable+", output)): + if re.findall("MSI-X: Enable+", output): test.fail("Guest don't support msi when vectors=0") test.log.info("Guest works well when vectors=0") elif vectors != 0 and status: @@ -69,43 +65,45 @@ def check_msi_support(session): test.fail("msg") elif vectors == 1 and output: if not (re.findall("MSI-X: Enable-", output)): - msg = "Command %s get wrong output." % msi_check_cmd + msg = f"Command {msi_check_cmd} get wrong output." msg += " when vectors = 1" test.fail(msg) test.log.info("MSI-X is disabled") elif 2 <= vectors and output: if not (re.findall("MSI-X: Enable+", output)): - msg = "Command %s get wrong output." % msi_check_cmd + msg = f"Command {msi_check_cmd} get wrong output." msg += " when vectors = %d" % vectors test.fail(msg) test.log.info("MSI-X is enabled") else: - error_context.context("Check if the driver is installed and " - "verified", test.log.info) + error_context.context( + "Check if the driver is installed and " "verified", test.log.info + ) driver_verifier = params["driver_verifier"] - utils_test.qemu.windrv_check_running_verifier(session, vm, - test, - driver_verifier, - cmd_timeout) + utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_verifier, cmd_timeout + ) msis, queues = utils_net.get_msis_and_queues_windows(params, vm) if msis == 0 and vectors == 0: test.log.info("Guest works well when vectors=0") elif vectors == 0 and msis != 0: test.fail("Can't get msi status when vectors=0") if 1 <= vectors and vectors != msis: - test.fail("Msis should equal to vectors(%s), " - "but guest is %s" % (vectors, msis)) + test.fail( + f"Msis should equal to vectors({vectors}), " f"but guest is {msis}" + ) def check_interrupt(session, vectors): - error_context.context("Check the cpu interrupt of virito", - test.log.info) + error_context.context("Check the cpu interrupt of virito", test.log.info) vectors = int(vectors) irq_check_cmd = params["irq_check_cmd"] output = session.cmd_output(irq_check_cmd).strip() if vectors == 0 or vectors == 1: - if not (re.findall("IO-APIC.*fasteoi|XICS.*Level|XIVE.*Level|" - "GIC.*Level", - output)): + if not ( + re.findall( + "IO-APIC.*fasteoi|XICS.*Level|XIVE.*Level|" "GIC.*Level", output + ) + ): msg = "Could not find interrupt controller for virito device" msg += " when vectors = %d" % vectors test.fail(msg) @@ -113,14 +111,16 @@ def check_interrupt(session, vectors): if not re.findall("virtio[0-9]-virtqueues", output): msg = "Could not find the virtio device for MSI-X interrupt" msg += " when vectors = %d " % vectors - msg += "Command %s got output %s" % (irq_check_cmd, output) + msg += f"Command {irq_check_cmd} got output {output}" test.fail(msg) elif vectors == 9 or vectors == 10: - if not (re.findall("virtio[0-9]-input", output) and - re.findall("virtio[0-9]-output", output)): + if not ( + re.findall("virtio[0-9]-input", output) + and re.findall("virtio[0-9]-output", output) + ): msg = "Could not find the virtio device for MSI-X interrupt" msg += " when vectors = %d " % vectors - msg += "Command %s got output %s" % (irq_check_cmd, output) + msg += f"Command {irq_check_cmd} got output {output}" test.fail(msg) vectors_list = params["vectors_list"] diff --git a/qemu/tests/boot_with_disable_ept.py b/qemu/tests/boot_with_disable_ept.py index 722bceab93..6243933066 100644 --- a/qemu/tests/boot_with_disable_ept.py +++ b/qemu/tests/boot_with_disable_ept.py @@ -1,5 +1,4 @@ from avocado.utils import process - from virttest import env_process @@ -17,9 +16,9 @@ def run(test, params, env): timeout = float(params.get("login_timeout", 2400)) output = process.getoutput(params["check_status_cmd"]) if output != params["expected_status"]: - test.fail("Disable %s failed" % params["parameter_name"]) - params["start_vm"] = 'yes' + test.fail("Disable {} failed".format(params["parameter_name"])) + params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, params["main_vm"]) - vm = env.get_vm(params['main_vm']) + vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login(timeout=timeout) session.close() diff --git a/qemu/tests/boot_with_machine_types.py b/qemu/tests/boot_with_machine_types.py index 978a3f1bf2..14f1b67d43 100644 --- a/qemu/tests/boot_with_machine_types.py +++ b/qemu/tests/boot_with_machine_types.py @@ -1,8 +1,6 @@ import re -from virttest import error_context -from virttest import utils_misc -from virttest import env_process +from virttest import env_process, error_context, utils_misc @error_context.context_aware @@ -19,25 +17,34 @@ def run(test, params, env): error_context.context("Get supported machine type", test.log.info) qemu_binary = utils_misc.get_qemu_binary(params) machine_types = [] - machine_type_mapping = {"pc": ["i440FX", "RHEL 6"], "q35": ["Q35"], "pseries": ["pSeries"], - "arm64-pci:virt": ["ARM"], "arm64-mmio:virt": ["ARM"], "s390-ccw-virtio": ["S390"]} + machine_type_mapping = { + "pc": ["i440FX", "RHEL 6"], + "q35": ["Q35"], + "pseries": ["pSeries"], + "arm64-pci:virt": ["ARM"], + "arm64-mmio:virt": ["ARM"], + "s390-ccw-virtio": ["S390"], + } for m_type, s_name in zip(*utils_misc.get_support_machine_type(qemu_binary)[:2]): for item in machine_type_mapping[params["machine_type"]]: if item in s_name: if "arm64" in params["machine_type"]: - m_type = re.sub(r'(?<=:)\w+', m_type, params["machine_type"]) + m_type = re.sub(r"(?<=:)\w+", m_type, params["machine_type"]) machine_types.append(m_type) if not machine_types: test.fail("Failed to get machine types") else: - test.log.info("Actual supported machine types are: %s", ', '.join(map(str, machine_types))) + test.log.info( + "Actual supported machine types are: %s", ", ".join(map(str, machine_types)) + ) for m_type in machine_types: params["machine_type"] = m_type params["start_vm"] = "yes" - vm_name = params['main_vm'] - error_context.context("Start vm with machine type '%s'" - % m_type, test.log.info) + vm_name = params["main_vm"] + error_context.context( + f"Start vm with machine type '{m_type}'", test.log.info + ) env_process.preprocess(test, params, env) vm = env.get_vm(vm_name) vm.verify_alive() @@ -45,9 +52,10 @@ def run(test, params, env): session = vm.wait_for_login(timeout=timeout) if not session.is_responsive(): session.close() - test.fail("Start vm with machine type:%s fail" % m_type) + test.fail(f"Start vm with machine type:{m_type} fail") session.close() - error_context.context("Quit guest and check the process quit normally", - test.log.info) + error_context.context( + "Quit guest and check the process quit normally", test.log.info + ) vm.destroy(gracefully=False) diff --git a/qemu/tests/boot_with_multiqueue.py b/qemu/tests/boot_with_multiqueue.py index b697d7bec4..9fe35be9d6 100644 --- a/qemu/tests/boot_with_multiqueue.py +++ b/qemu/tests/boot_with_multiqueue.py @@ -1,8 +1,6 @@ import re -from virttest import error_context -from virttest import utils_test -from virttest import utils_net +from virttest import error_context, utils_net, utils_test @error_context.context_aware @@ -24,8 +22,7 @@ def run(test, params, env): # boot the vm with the queues queues = int(params["queues"]) - error_context.context("Boot the guest with queues = %s" % queues, - test.log.info) + error_context.context(f"Boot the guest with queues = {queues}", test.log.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -35,54 +32,52 @@ def run(test, params, env): if params["os_type"] == "linux": nic = vm.virtnet[0] ifname = utils_net.get_linux_ifname(session, nic.mac) - set_queue_cmd = "ethtool -L %s combined %s" % (ifname, queues) - status, output = session.cmd_status_output(set_queue_cmd, - timeout=cmd_timeout, - safe=True) + set_queue_cmd = f"ethtool -L {ifname} combined {queues}" + status, output = session.cmd_status_output( + set_queue_cmd, timeout=cmd_timeout, safe=True + ) if status: err = "Failed to set queues to %s with status = %s and output= %s" err %= (queues, status, output) test.fail(err) - check_queue_cmd = "ethtool -l %s" % ifname + check_queue_cmd = f"ethtool -l {ifname}" output = session.cmd_output(check_queue_cmd, timeout=cmd_timeout) if len(re.findall(r"Combined:\s+%d\s" % queues, output)) != 2: - test.fail("Fail to set queues to %s on %s" % - (queues, nic.nic_name)) + test.fail(f"Fail to set queues to {queues} on {nic.nic_name}") # check the msi for linux guest error_context.context("Check the msi number in guest", test.log.info) - devices = session.cmd_output("lspci | grep Ethernet", - timeout=cmd_timeout, safe=True).strip() + devices = session.cmd_output( + "lspci | grep Ethernet", timeout=cmd_timeout, safe=True + ).strip() for device in devices.split("\n"): if not device: continue d_id = device.split()[0] msi_check_cmd = params["msi_check_cmd"] % d_id - status, output = session.cmd_status_output(msi_check_cmd, - timeout=cmd_timeout, - safe=True) + status, output = session.cmd_status_output( + msi_check_cmd, timeout=cmd_timeout, safe=True + ) find_result = re.search(r"MSI-X: Enable\+\s+Count=(\d+)", output) if not find_result: - test.fail("No MSI info in output: %s" % output) + test.fail(f"No MSI info in output: {output}") msis = int(find_result.group(1)) if msis != 2 * queues + 2: - test.fail("MSI not correct with output: %s" % output) + test.fail(f"MSI not correct with output: {output}") else: # verify driver - error_context.context("Check if the driver is installed and " - "verified", test.log.info) + error_context.context( + "Check if the driver is installed and " "verified", test.log.info + ) driver_name = params.get("driver_name", "netkvm") - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, - driver_name, - cmd_timeout) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name, cmd_timeout + ) # check the msi for windows guest with trace view error_context.context("Check the msi number in guest", test.log.info) - msis, cur_queues = utils_net.get_msis_and_queues_windows(params, - vm) + msis, cur_queues = utils_net.get_msis_and_queues_windows(params, vm) if cur_queues != queues or msis != 2 * queues + 2: - test.fail("queues not correct with %s, expect %s" % - (cur_queues, queues)) + test.fail(f"queues not correct with {cur_queues}, expect {queues}") # start scp test error_context.context("Start scp file transfer test", test.log.info) diff --git a/qemu/tests/boot_with_remote_readonly_image.py b/qemu/tests/boot_with_remote_readonly_image.py index 1e12c5ef81..99c33a2118 100644 --- a/qemu/tests/boot_with_remote_readonly_image.py +++ b/qemu/tests/boot_with_remote_readonly_image.py @@ -1,11 +1,6 @@ -from virttest import qemu_storage -from virttest import data_dir -from virttest import utils_misc -from virttest import utils_test - from avocado import fail_on - from avocado.utils import process +from virttest import data_dir, qemu_storage, utils_misc, utils_test from provider import qemu_img_utils as img_utils @@ -18,25 +13,25 @@ def run(test, params, env): 3) Log into VM 4) Check readable cdrom """ + def _convert_image(): - source = params['images'].split()[0] - target = params['convert_target'] + source = params["images"].split()[0] + target = params["convert_target"] source_params = params.object_params(source) target_params = params.object_params(target) source_image = qemu_storage.QemuImg(source_params, None, source) # Convert source to target fail_on((process.CmdError,))(source_image.convert)( - target_params, data_dir.get_data_dir()) + target_params, data_dir.get_data_dir() + ) _convert_image() - vm = img_utils.boot_vm_with_images(test, params, env, - (params['convert_target'],)) - session = vm.wait_for_login( - timeout=params.get_numeric("login_timeout", 360)) + vm = img_utils.boot_vm_with_images(test, params, env, (params["convert_target"],)) + session = vm.wait_for_login(timeout=params.get_numeric("login_timeout", 360)) cdroms = utils_misc.wait_for( lambda: (utils_test.get_readable_cdroms(params, session)), - timeout=params.get_numeric("timeout", 10) + timeout=params.get_numeric("timeout", 10), ) session.close() if not cdroms: diff --git a/qemu/tests/boot_without_vectors.py b/qemu/tests/boot_without_vectors.py index eaa3745dec..0b90e9045c 100644 --- a/qemu/tests/boot_without_vectors.py +++ b/qemu/tests/boot_without_vectors.py @@ -1,6 +1,4 @@ -from virttest import error_context -from virttest import utils_test -from virttest import env_process +from virttest import env_process, error_context, utils_test @error_context.context_aware @@ -28,8 +26,9 @@ def check_msi_support(session): """ if params["os_type"] == "linux": devices = session.cmd_output("lspci | grep Eth").strip() - error_context.context("Check if vnic inside guest support msi.", - test.log.info) + error_context.context( + "Check if vnic inside guest support msi.", test.log.info + ) for device in devices.splitlines(): if not device: continue @@ -37,13 +36,14 @@ def check_msi_support(session): msi_check_cmd = params["msi_check_cmd"] % d_id output = session.cmd_output(msi_check_cmd) if output: - req_args = utils_test.check_kernel_cmdline(session, - args="pci=nomsi") + req_args = utils_test.check_kernel_cmdline( + session, args="pci=nomsi" + ) if not req_args: if "MSI-X: Enable-" in output: test.log.info("MSI-X is disabled") else: - msg = "Command %s get wrong" % msi_check_cmd + msg = f"Command {msi_check_cmd} get wrong" msg += " output when no vectors in qemu cmd" msg += " line and nomsi in /proc/cmdline" test.fail(msg) @@ -63,14 +63,15 @@ def do_test(test, params, env): guest_ip = vm.get_address() ping_count = int(params.get("ping_count", 0)) if not ping_count == 0: - status, output = utils_test.ping(guest_ip, ping_count, - timeout=float(ping_count) * 1.5) + status, output = utils_test.ping( + guest_ip, ping_count, timeout=float(ping_count) * 1.5 + ) if status != 0: - test.fail("Ping returns non-zero value %s" % output) + test.fail(f"Ping returns non-zero value {output}") package_lost = utils_test.get_loss_ratio(output) if package_lost != 0: - test.fail("%s packeage lost when ping server" % package_lost) + test.fail(f"{package_lost} packeage lost when ping server") check_msi_support(session) diff --git a/qemu/tests/bridge_mirror.py b/qemu/tests/bridge_mirror.py index 9c00033e51..b919c965c8 100644 --- a/qemu/tests/bridge_mirror.py +++ b/qemu/tests/bridge_mirror.py @@ -2,9 +2,7 @@ import time from avocado.utils import process -from virttest import utils_net -from virttest import env_process -from virttest import error_context +from virttest import env_process, error_context, utils_net @error_context.context_aware @@ -25,6 +23,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def mirror_bridge_to_tap(tap_name): """ Mirror all packages on bridge to tap device connected to vm vNIC @@ -46,8 +45,10 @@ def mirror_bridge_to_tap(tap_name): port = re.findall("qdisc prio (.*):", output)[0] tc_filter_show_dev_port = params.get("tc_filter_show_dev_port") % port - tc_filter_replace_dev_port = params.get( - "tc_filter_replace_dev_port") % (port, tap_name) + tc_filter_replace_dev_port = params.get("tc_filter_replace_dev_port") % ( + port, + tap_name, + ) process.system_output(tc_filter_show_dev_port) process.system_output(tc_filter_replace_dev_port) @@ -62,10 +63,8 @@ def check_tcpdump(output, src_ip, des_ip, ping_count): :return: bool type result. """ - rex_request = r".*IP %s > %s.*ICMP echo request.*" % ( - src_ip, des_ip) - rex_reply = r".*IP %s > %s.*ICMP echo reply.*" % ( - des_ip, src_ip) + rex_request = rf".*IP {src_ip} > {des_ip}.*ICMP echo request.*" + rex_reply = rf".*IP {des_ip} > {src_ip}.*ICMP echo reply.*" request_num = 0 reply_num = 0 for idx, _ in enumerate(output.splitlines()): @@ -75,11 +74,15 @@ def check_tcpdump(output, src_ip, des_ip, ping_count): reply_num += 1 if request_num != ping_count or reply_num != ping_count: - test.log.debug("Unexpected request or reply number. " - "current request number is: %d, " - "current reply number is: %d, " - "expected request and reply number is: %d. ", - request_num, reply_num, ping_count) + test.log.debug( + "Unexpected request or reply number. " + "current request number is: %d, " + "current reply number is: %d, " + "expected request and reply number is: %d. ", + request_num, + reply_num, + ping_count, + ) return False return True @@ -104,7 +107,7 @@ def check_tcpdump(output, src_ip, des_ip, ping_count): br_iface.up() br_iface.promisc_on() - params['netdst'] = brname + params["netdst"] = brname params["start_vm"] = "yes" vm_names = params.get("vms").split() vms_info = {} @@ -113,15 +116,15 @@ def check_tcpdump(output, src_ip, des_ip, ping_count): env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.verify_alive() - ip = params["ip_%s" % vm_name] + ip = params[f"ip_{vm_name}"] mac = vm.get_mac_address() serial_session = vm.wait_for_serial_login(timeout=login_timeout) serial_session.cmd_output_safe(stop_NM_cmd) serial_session.cmd_output_safe(stop_firewall_cmd) nic_name = utils_net.get_linux_ifname(serial_session, mac) ifname = vm.get_ifname() - ifset_cmd = "ip addr add %s/%s dev %s" % (ip, net_mask, nic_name) - ifup_cmd = "ip link set dev %s up" % nic_name + ifset_cmd = f"ip addr add {ip}/{net_mask} dev {nic_name}" + ifup_cmd = f"ip link set dev {nic_name} up" serial_session.cmd_output_safe(ifset_cmd) serial_session.cmd_output_safe(ifup_cmd) vms_info[vm_name] = [vm, ifname, ip, serial_session, nic_name] @@ -131,30 +134,28 @@ def check_tcpdump(output, src_ip, des_ip, ping_count): vm_des = vm_names[2] error_context.context( - "Mirror all packets on bridge to tap device conncted to %s" - % vm_mirror) + f"Mirror all packets on bridge to tap device conncted to {vm_mirror}" + ) tap_ifname = vms_info[vm_mirror][0].virtnet[0].ifname mirror_bridge_to_tap(tap_ifname) - error_context.context("Start tcpdump in %s" - % vm_mirror, test.log.info) + error_context.context(f"Start tcpdump in {vm_mirror}", test.log.info) tcpdump_cmd = tcpdump_cmd % (vms_info[vm_des][2], tcpdump_log) test.log.info("tcpdump command: %s", tcpdump_cmd) vms_info[vm_mirror][3].sendline(tcpdump_cmd) time.sleep(5) - error_context.context("Start ping from %s to %s" % - (vm_src, vm_des), test.log.info) + error_context.context(f"Start ping from {vm_src} to {vm_des}", test.log.info) ping_cmd = params.get("ping_cmd") % vms_info[vm_des][2] vms_info[vm_src][3].cmd(ping_cmd, timeout=150) error_context.context("Check tcpdump results", test.log.info) time.sleep(5) vms_info[vm_mirror][3].cmd_output_safe("pkill tcpdump") - tcpdump_content = vms_info[vm_mirror][3].cmd_output( - get_tcpdump_log_cmd).strip() - if not check_tcpdump(tcpdump_content, vms_info[vm_src][2], - vms_info[vm_des][2], ping_count): + tcpdump_content = vms_info[vm_mirror][3].cmd_output(get_tcpdump_log_cmd).strip() + if not check_tcpdump( + tcpdump_content, vms_info[vm_src][2], vms_info[vm_des][2], ping_count + ): test.fail("tcpdump results are not expected, mirror fail.") finally: for vm in vms_info: diff --git a/qemu/tests/bridge_qinq.py b/qemu/tests/bridge_qinq.py index 95748686ef..8962a8d3aa 100644 --- a/qemu/tests/bridge_qinq.py +++ b/qemu/tests/bridge_qinq.py @@ -1,16 +1,9 @@ -import re import os +import re import time -from avocado.utils import process -from avocado.utils import crypto -from virttest import utils_net -from virttest import env_process -from virttest import error_context -from virttest import data_dir -from virttest import utils_test -from virttest import remote - +from avocado.utils import crypto, process +from virttest import data_dir, env_process, error_context, remote, utils_net, utils_test from virttest.utils_version import VersionInterval @@ -48,12 +41,20 @@ def copy_qinq_file(vm, guest_qinq_dir): """ error_context.context("Copy qinq script to guest", test.log.info) - host_qinq_dir = os.path.join(data_dir.get_deps_dir(), - params.get("copy_qinq_script")) + host_qinq_dir = os.path.join( + data_dir.get_deps_dir(), params.get("copy_qinq_script") + ) vm.copy_files_to(host_qinq_dir, guest_qinq_dir) - def check_tcpdump_result(session, iface_name, ethertype, ethertype2=None, - vlan_tag=None, vlan_tag2=None, enable_logging=False): + def check_tcpdump_result( + session, + iface_name, + ethertype, + ethertype2=None, + vlan_tag=None, + vlan_tag2=None, + enable_logging=False, + ): """ Check tcpdump result. @@ -66,41 +67,48 @@ def check_tcpdump_result(session, iface_name, ethertype, ethertype2=None, :param enable_logging: whether to dump tcpdump results during test """ get_tcpdump_log_cmd = params["get_tcpdump_log_cmd"] % iface_name - tcpdump_content = session.cmd_output(get_tcpdump_log_cmd, timeout=300, safe=True).strip() + tcpdump_content = session.cmd_output( + get_tcpdump_log_cmd, timeout=300, safe=True + ).strip() lines = tcpdump_content.splitlines() sum = 0 for i in range(len(lines)): if enable_logging: test.log.info("line %s: %s", i, lines[i]) if not ethertype2: - if "ICMP echo re" in lines[i] and \ - ethertype in lines[i-1]: + if "ICMP echo re" in lines[i] and ethertype in lines[i - 1]: sum += 1 - if vlan_tag and vlan_tag not in lines[i-1]: - if "too much work for irq" in lines[i-1]: + if vlan_tag and vlan_tag not in lines[i - 1]: + if "too much work for irq" in lines[i - 1]: continue else: - test.fail("in %s tcpdump log, there should be vlan " - "tag %s" % (iface_name, vlan_tag)) + test.fail( + f"in {iface_name} tcpdump log, there should be vlan " + f"tag {vlan_tag}" + ) elif not vlan_tag: - if "vlan" in lines[i-1]: - test.fail("in %s tcpdump log, there should not be " - "vlan tag" % iface_name) + if "vlan" in lines[i - 1]: + test.fail( + f"in {iface_name} tcpdump log, there should not be " + "vlan tag" + ) else: - if "ICMP echo re" in lines[i] and \ - ethertype in lines[i-1] and \ - ethertype2 in lines[i-1]: + if ( + "ICMP echo re" in lines[i] + and ethertype in lines[i - 1] + and ethertype2 in lines[i - 1] + ): sum += 1 - if vlan_tag not in lines[i-1] or \ - vlan_tag2 not in lines[i-1]: - if "too much work for irq" in lines[i-1]: + if vlan_tag not in lines[i - 1] or vlan_tag2 not in lines[i - 1]: + if "too much work for irq" in lines[i - 1]: continue else: - test.fail("in %s tcpdump log, there should be vlan " - "tag %s" % (iface_name, vlan_tag)) + test.fail( + f"in {iface_name} tcpdump log, there should be vlan " + f"tag {vlan_tag}" + ) if sum == 0: - test.fail("in %s tcpdump log, ethertype is not %s" % (iface_name, - ethertype)) + test.fail(f"in {iface_name} tcpdump log, ethertype is not {ethertype}") def compare_host_guest_md5sum(): """ @@ -112,7 +120,7 @@ def compare_host_guest_md5sum(): test.log.info("Comparing md5sum on guest and host") host_result = crypto.hash_file(host_path, algorithm="md5") try: - output = session.cmd_output("md5sum %s" % guest_path, 120).split()[0] + output = session.cmd_output(f"md5sum {guest_path}", 120).split()[0] guest_result = re.findall(r"\w+", output)[0] except IndexError: test.log.error("Could not get file md5sum in guest") @@ -153,7 +161,7 @@ def compare_host_guest_md5sum(): try: login_timeout = int(params.get("login_timeout", "600")) - params['netdst'] = brname + params["netdst"] = brname params["start_vm"] = "yes" params["image_snapshot"] = "yes" env_process.preprocess_vm(test, params, env, params["main_vm"]) @@ -171,36 +179,37 @@ def compare_host_guest_md5sum(): session.cmd_output(set_ip_cmd % (ip, nic_name)) # Create vlans via script qinq.sh - output = session.cmd_output("sh %sqinq.sh %s" % (guest_qinq_dir, - nic_name), timeout=300) + output = session.cmd_output( + f"sh {guest_qinq_dir}qinq.sh {nic_name}", timeout=300 + ) test.log.info("%s", output) # Set interface v1v10 IP in guest L1tag_iface = params["L1tag_iface"] L1tag_iface_ip = params["L1tag_iface_ip"] session.cmd_output(set_ip_cmd % (L1tag_iface_ip, L1tag_iface)) - session.cmd("ip link set %s up" % L1tag_iface) - output = session.cmd_output("ip addr show %s" % L1tag_iface, - timeout=120) + session.cmd(f"ip link set {L1tag_iface} up") + output = session.cmd_output(f"ip addr show {L1tag_iface}", timeout=120) test.log.info(output) # Start tcpdump on L1tag interface and first_nic in guest - error_context.context("Start tcpdump in %s" % params["main_vm"], - test.log.info) + error_context.context( + "Start tcpdump in {}".format(params["main_vm"]), test.log.info + ) L1tag_tcpdump_log = params.get("tcpdump_log") % L1tag_iface - L1tag_tcpdump_cmd = params.get("tcpdump_cmd") % (L1tag_iface, - L1tag_tcpdump_log) + L1tag_tcpdump_cmd = params.get("tcpdump_cmd") % (L1tag_iface, L1tag_tcpdump_log) first_nic_tcpdump_log = params.get("tcpdump_log") % nic_name - first_nic_tcpdump_cmd = params.get("tcpdump_cmd") % (nic_name, - first_nic_tcpdump_log) + first_nic_tcpdump_cmd = params.get("tcpdump_cmd") % ( + nic_name, + first_nic_tcpdump_log, + ) session.sendline(L1tag_tcpdump_cmd) time.sleep(2) session.sendline(first_nic_tcpdump_cmd) time.sleep(5) # Create 802.1ad vlan via bridge in host - error_context.context("Create 802.1ad vlan via bridge %s" % brname, - test.log.info) + error_context.context(f"Create 802.1ad vlan via bridge {brname}", test.log.info) advlan_ifname = params["advlan_name"] add_advlan_cmd = params["add_advlan_cmd"] process.system_output(add_advlan_cmd) @@ -208,45 +217,50 @@ def compare_host_guest_md5sum(): advlan_iface.set_mac(params["advlan_mac"]) process.system(set_ip_cmd % (params["advlan_ip"], advlan_ifname)) advlan_iface.up() - output = process.getoutput("ip addr show %s" % advlan_ifname) + output = process.getoutput(f"ip addr show {advlan_ifname}") test.log.info(output) # Ping guest from host via 802.1ad vlan interface - error_context.context("Start ping test from host to %s via %s" % - (L1tag_iface_ip, advlan_ifname), test.log.info) + error_context.context( + f"Start ping test from host to {L1tag_iface_ip} via {advlan_ifname}", + test.log.info, + ) ping_count = int(params.get("ping_count")) - status, output = utils_net.ping(L1tag_iface_ip, ping_count, - interface=advlan_ifname, - timeout=float(ping_count)*1.5) + status, output = utils_net.ping( + L1tag_iface_ip, + ping_count, + interface=advlan_ifname, + timeout=float(ping_count) * 1.5, + ) if status != 0: - test.fail("Ping returns non-zero value %s" % output) + test.fail(f"Ping returns non-zero value {output}") package_lost = utils_test.get_loss_ratio(output) if package_lost != 0: - test.fail("%s packeage lost when ping guest ip %s " % (package_lost, - L1tag_iface_ip)) + test.fail( + f"{package_lost} packeage lost when ping guest ip {L1tag_iface_ip} " + ) # Stop tcpdump and check result session.cmd_output_safe("pkill tcpdump") - check_tcpdump_result(session, L1tag_iface, - "ethertype IPv4 (0x0800)") - check_tcpdump_result(session, nic_name, - "ethertype 802.1Q-QinQ (0x88a8)", vlan_tag="vlan 10,") + check_tcpdump_result(session, L1tag_iface, "ethertype IPv4 (0x0800)") + check_tcpdump_result( + session, nic_name, "ethertype 802.1Q-QinQ (0x88a8)", vlan_tag="vlan 10," + ) # Set IP on L2 tag on the guest interface with vid 20 L2tag_iface = params["L2tag_iface"] L2tag_iface_ip = params["L2tag_iface_ip"] session.cmd_output(set_ip_cmd % (L2tag_iface_ip, L2tag_iface)) - session.cmd("ip link set %s up" % L2tag_iface) - output = session.cmd_output("ip addr show %s" % L2tag_iface, - timeout=120) + session.cmd(f"ip link set {L2tag_iface} up") + output = session.cmd_output(f"ip addr show {L2tag_iface}", timeout=120) test.log.info(output) # Start tcpdump on L1tag and L2tag interfaces and first_nic in guest - error_context.context("Start tcpdump in %s" % params["main_vm"], - test.log.info) + error_context.context( + "Start tcpdump in {}".format(params["main_vm"]), test.log.info + ) L2tag_tcpdump_log = params.get("tcpdump_log") % L2tag_iface - L2tag_tcpdump_cmd = params.get("tcpdump_cmd") % (L2tag_iface, - L2tag_tcpdump_log) + L2tag_tcpdump_cmd = params.get("tcpdump_cmd") % (L2tag_iface, L2tag_tcpdump_log) session.sendline(L1tag_tcpdump_cmd) time.sleep(2) session.sendline(L2tag_tcpdump_cmd) @@ -255,63 +269,77 @@ def compare_host_guest_md5sum(): time.sleep(5) # Create 802.1q vlan via 802.1ad vlan in host - error_context.context("Create 802.1q vlan via 802.1ad vlan %s" % - advlan_ifname, test.log.info) + error_context.context( + f"Create 802.1q vlan via 802.1ad vlan {advlan_ifname}", test.log.info + ) qvlan_ifname = params["qvlan_name"] add_qvlan_cmd = params["add_qvlan_cmd"] process.system_output(add_qvlan_cmd) qvlan_iface = utils_net.Interface(qvlan_ifname) process.system(set_ip_cmd % (params["qvlan_ip"], qvlan_ifname)) qvlan_iface.up() - output = process.getoutput("ip addr show %s" % qvlan_ifname) + output = process.getoutput(f"ip addr show {qvlan_ifname}") test.log.info(output) # Ping guest from host via 802.1q vlan interface - error_context.context("Start ping test from host to %s via %s" % - (L2tag_iface_ip, qvlan_ifname), test.log.info) - status, output = utils_net.ping(L2tag_iface_ip, ping_count, - interface=qvlan_ifname, - timeout=float(ping_count)*1.5) + error_context.context( + f"Start ping test from host to {L2tag_iface_ip} via {qvlan_ifname}", + test.log.info, + ) + status, output = utils_net.ping( + L2tag_iface_ip, + ping_count, + interface=qvlan_ifname, + timeout=float(ping_count) * 1.5, + ) if status != 0: - test.fail("Ping returns non-zero value %s" % output) + test.fail(f"Ping returns non-zero value {output}") package_lost = utils_test.get_loss_ratio(output) if package_lost >= 5: - test.fail("%s packeage lost when ping guest ip %s " % (package_lost, - L2tag_iface_ip)) + test.fail( + f"{package_lost} packeage lost when ping guest ip {L2tag_iface_ip} " + ) # Stop tcpdump and check result session.cmd_output_safe("pkill tcpdump") - check_tcpdump_result(session, L1tag_iface, - "ethertype 802.1Q (0x8100)", vlan_tag="vlan 20,") - check_tcpdump_result(session, L2tag_iface, - "ethertype IPv4 (0x0800)") - check_tcpdump_result(session, nic_name, - ethertype="ethertype 802.1Q-QinQ (0x88a8)", - ethertype2="ethertype 802.1Q", - vlan_tag="vlan 10,", - vlan_tag2="vlan 20,") + check_tcpdump_result( + session, L1tag_iface, "ethertype 802.1Q (0x8100)", vlan_tag="vlan 20," + ) + check_tcpdump_result(session, L2tag_iface, "ethertype IPv4 (0x0800)") + check_tcpdump_result( + session, + nic_name, + ethertype="ethertype 802.1Q-QinQ (0x88a8)", + ethertype2="ethertype 802.1Q", + vlan_tag="vlan 10,", + vlan_tag2="vlan 20,", + ) # configure the outer VLAN MTU to 1504 on qemu-8.1 - if vm.devices.qemu_version in VersionInterval('[8.1.0,)') and \ - params.get("nic_model") == "e1000e": - session.cmd("ip link set %s mtu 1504" % nic_name) + if ( + vm.devices.qemu_version in VersionInterval("[8.1.0,)") + and params.get("nic_model") == "e1000e" + ): + session.cmd(f"ip link set {nic_name} mtu 1504") # scp file to guest with L2 vlan tag cmd = "dd if=/dev/zero of=%s bs=1M count=%d" % (host_path, file_size) - error_context.context( - "Creating %dMB file on host" % file_size, test.log.info) + error_context.context("Creating %dMB file on host" % file_size, test.log.info) process.run(cmd) - error_context.context("Transferring file host -> guest, " - "timeout: %ss" % transfer_timeout, test.log.info) + error_context.context( + "Transferring file host -> guest, " f"timeout: {transfer_timeout}s", + test.log.info, + ) shell_port = int(params.get("shell_port", 22)) password = params["password"] username = params["username"] - remote.scp_to_remote(L2tag_iface_ip, shell_port, username, password, - host_path, guest_path) + remote.scp_to_remote( + L2tag_iface_ip, shell_port, username, password, host_path, guest_path + ) if not compare_host_guest_md5sum(): test.fail("md5sum mismatch on guest and host") finally: - session.cmd("rm -rf %s" % guest_path) + session.cmd(f"rm -rf {guest_path}") session.close() vm.destroy(gracefully=False) host_bridge_iface.down() diff --git a/qemu/tests/bridge_vlan.py b/qemu/tests/bridge_vlan.py index e7966a7cdf..110d1b3cdb 100644 --- a/qemu/tests/bridge_vlan.py +++ b/qemu/tests/bridge_vlan.py @@ -1,24 +1,18 @@ import time -from virttest import utils_test -from virttest import utils_net -from virttest import funcatexit -from virttest import error_context - -from avocado.utils import process -from avocado.utils import linux_modules +from avocado.utils import linux_modules, process +from virttest import error_context, funcatexit, utils_net, utils_test class NetPingError(utils_net.NetError): - def __init__(self, src, dst, details): utils_net.NetError.__init__(self, src, details) self.dst = dst def __str__(self): - e_msg = "Can't ping from %s to %s" % (self.src, self.dst) + e_msg = f"Can't ping from {self.src} to {self.dst}" if self.details is not None: - e_msg += " : %s" % self.details + e_msg += f" : {self.details}" return e_msg @@ -57,10 +51,11 @@ def add_vlan(interface, v_id, session=None): :params v_id: Vlan id. :params session: VM session or none. """ - vlan_if = '%s.%s' % (interface, v_id) + vlan_if = f"{interface}.{v_id}" add_cmd = params["add_vlan_cmd"] % (interface, vlan_if, v_id) - error_context.context("Create vlan interface '%s' on %s" % - (vlan_if, interface), test.log.info) + error_context.context( + f"Create vlan interface '{vlan_if}' on {interface}", test.log.info + ) if session: session.cmd_output_safe(add_cmd) else: @@ -74,18 +69,19 @@ def set_ip_vlan(vlan_if, vlan_ip, session=None): :params vlan_ip: Vlan internal ip. :params session: VM session or none. """ - error_context.context("Assign IP '%s' to vlan interface '%s'" % - (vlan_ip, vlan_if), test.log.info) + error_context.context( + f"Assign IP '{vlan_ip}' to vlan interface '{vlan_if}'", test.log.info + ) if session: disable_firewall = params.get("disable_firewall", "") session.cmd_output_safe(disable_firewall) disable_nm = params.get("disable_nm", "") session.cmd_output_safe(disable_nm) - session.cmd_output_safe("ifconfig %s 0.0.0.0" % vlan_if) - session.cmd_output_safe("ifconfig %s down" % vlan_if) - session.cmd_output_safe("ifconfig %s %s up" % (vlan_if, vlan_ip)) + session.cmd_output_safe(f"ifconfig {vlan_if} 0.0.0.0") + session.cmd_output_safe(f"ifconfig {vlan_if} down") + session.cmd_output_safe(f"ifconfig {vlan_if} {vlan_ip} up") else: - process.system("ifconfig %s %s up" % (vlan_if, vlan_ip)) + process.system(f"ifconfig {vlan_if} {vlan_ip} up") def set_mac_vlan(vlan_if, mac_str, session): """ @@ -94,17 +90,20 @@ def set_mac_vlan(vlan_if, mac_str, session): :params: mac_str: New mac address for vlan. :params: session: VM session. """ - mac_cmd = "ip link set %s add %s up" % (vlan_if, mac_str) - error_context.context("Give a new mac address '%s' for vlan interface " - "'%s'" % (mac_str, vlan_if), test.log.info) + mac_cmd = f"ip link set {vlan_if} add {mac_str} up" + error_context.context( + f"Give a new mac address '{mac_str}' for vlan interface " f"'{vlan_if}'", + test.log.info, + ) session.cmd_output_safe(mac_cmd) def set_arp_ignore(session): """ Enable arp_ignore for all ipv4 device in guest """ - error_context.context("Enable arp_ignore for all ipv4 device in guest", - test.log.info) + error_context.context( + "Enable arp_ignore for all ipv4 device in guest", test.log.info + ) ignore_cmd = "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore" session.cmd_output_safe(ignore_cmd) @@ -116,17 +115,16 @@ def ping_vlan(vm, dest, vlan_if, session): :params vlan_if: Vlan interface. :params session: VM session. """ - error_context.context("Test ping from '%s' to '%s' on guest '%s'" % - (vlan_if, dest, vm.name)) - status, output = utils_test.ping(dest=dest, count=10, - interface=vlan_if, - session=session, - timeout=30) + error_context.context( + f"Test ping from '{vlan_if}' to '{dest}' on guest '{vm.name}'" + ) + status, output = utils_test.ping( + dest=dest, count=10, interface=vlan_if, session=session, timeout=30 + ) if status: raise NetPingError(vlan_if, dest, output) - def netperf_vlan(client='main_vm', server='localhost', - sub_type='netperf_stress'): + def netperf_vlan(client="main_vm", server="localhost", sub_type="netperf_stress"): """ Test netperf stress among guests and host. :params client: Netperf client. @@ -135,9 +133,11 @@ def netperf_vlan(client='main_vm', server='localhost', """ params["netperf_client"] = client params["netperf_server"] = server - error_context.context("Run netperf stress test among guests and host, " - "server: %s, client: %s" % (server, client), - test.log.info) + error_context.context( + "Run netperf stress test among guests and host, " + f"server: {server}, client: {client}", + test.log.info, + ) session.cmd_output_safe("systemctl restart NetworkManager") utils_test.run_virt_sub_test(test, params, env, sub_type) @@ -152,7 +152,7 @@ def netperf_vlan(client='main_vm', server='localhost', host_vlan_id = params.get("host_vlan_id", "10") host_vlan_ip = params.get("host_vlan_ip", "192.168.10.10") subnet = params.get("subnet", "192.168") - mac_str = params.get("mac_str").split(',') + mac_str = params.get("mac_str").split(",") br_backend = utils_net.find_bridge_manager(host_br) if not isinstance(br_backend, utils_net.Bridge): @@ -160,14 +160,13 @@ def netperf_vlan(client='main_vm', server='localhost', linux_modules.load_module("8021q") - host_vlan_if = "%s.%s" % (host_br, host_vlan_id) + host_vlan_if = f"{host_br}.{host_vlan_id}" if host_vlan_if not in utils_net.get_net_if(): host_vlan_if = add_vlan(interface=host_br, v_id=host_vlan_id) if host_vlan_if in utils_net.get_net_if(): set_ip_vlan(vlan_if=host_vlan_if, vlan_ip=host_vlan_ip) rm_host_vlan_cmd = params["rm_host_vlan_cmd"] % host_vlan_if - funcatexit.register(env, params["type"], _system, - rm_host_vlan_cmd) + funcatexit.register(env, params["type"], _system, rm_host_vlan_cmd) else: test.cancel("Fail to set up vlan over bridge interface in host!") @@ -180,87 +179,93 @@ def netperf_vlan(client='main_vm', server='localhost', vms.append(vm2) else: vms.append(env.get_vm([params["main_vm"]])) - vms.append(env.get_vm('vm2')) + vms.append(env.get_vm("vm2")) for vm_ in vms: vm_.verify_alive() for vm_index, vm in enumerate(vms): - error_context.context("Prepare test env on %s" % vm.name) + error_context.context(f"Prepare test env on {vm.name}") session = vm.wait_for_serial_login() if not session: - err_msg = "Could not log into guest %s" % vm.name + err_msg = f"Could not log into guest {vm.name}" test.error(err_msg) interface = utils_net.get_linux_ifname(session, vm.get_mac_address()) - error_context.context("Load 8021q module in guest %s" % vm.name, - test.log.info) + error_context.context(f"Load 8021q module in guest {vm.name}", test.log.info) session.cmd_output_safe("modprobe 8021q") - error_context.context("Setup vlan environment in guest %s" % vm.name, - test.log.info) + error_context.context( + f"Setup vlan environment in guest {vm.name}", test.log.info + ) inter_ip = "%s.%s.%d" % (subnet, host_vlan_id, vm_index + 1) set_ip_vlan(interface, inter_ip, session=session) set_arp_ignore(session) - params["vlan_nic"] = "%s.%s" % (interface, host_vlan_id) - error_context.context("Test ping from guest '%s' to host with " - "interface '%s'" % - (vm.name, interface), test.log.info) + params["vlan_nic"] = f"{interface}.{host_vlan_id}" + error_context.context( + f"Test ping from guest '{vm.name}' to host with " + f"interface '{interface}'", + test.log.info, + ) try: - ping_vlan(vm, dest=host_vlan_ip, vlan_if=interface, - session=session) + ping_vlan(vm, dest=host_vlan_ip, vlan_if=interface, session=session) except NetPingError: - test.log.info("Guest ping fail to host as expected with " - "interface '%s'", interface) + test.log.info( + "Guest ping fail to host as expected with " "interface '%s'", interface + ) else: - test.fail("Guest ping to host should fail with interface" - " '%s'" % interface) + test.fail("Guest ping to host should fail with interface" f" '{interface}'") ifname.append(interface) vm_ip.append(inter_ip) sessions.append(session) # Ping succeed between guests - error_context.context("Test ping between guests with interface %s" - % ifname[0], test.log.info) + error_context.context( + f"Test ping between guests with interface {ifname[0]}", test.log.info + ) ping_vlan(vms[0], dest=vm_ip[1], vlan_if=ifname[0], session=sessions[0]) # set vlan tag for guest for vm_index, vm in enumerate(vms): session = sessions[vm_index] - error_context.context("Add vlan interface on guest '%s'" % vm.name) - session.cmd_output("ifconfig %s 0.0.0.0" % ifname[vm_index], safe=True) - vlan_if = add_vlan(interface=ifname[vm_index], v_id=host_vlan_id, - session=session) + error_context.context(f"Add vlan interface on guest '{vm.name}'") + session.cmd_output(f"ifconfig {ifname[vm_index]} 0.0.0.0", safe=True) + vlan_if = add_vlan( + interface=ifname[vm_index], v_id=host_vlan_id, session=session + ) vm_vlan_if.append(vlan_if) set_mac_vlan(vlan_if, mac_str[vm_index], session=session) vlan_ip = "%s.%s.%d" % (subnet, host_vlan_id, vm_index + 11) set_ip_vlan(vlan_if, vlan_ip, session=session) vm_vlan_ip.append(vlan_ip) - error_context.context("Test ping from interface '%s' on guest " - "'%s' to host." % - (vm_vlan_if[vm_index], vm.name), test.log.info) + error_context.context( + f"Test ping from interface '{vm_vlan_if[vm_index]}' on guest " + f"'{vm.name}' to host.", + test.log.info, + ) utils_net.restart_guest_network(session) - ping_vlan(vm, dest=host_vlan_ip, vlan_if=vm_vlan_if[vm_index], - session=session) + ping_vlan(vm, dest=host_vlan_ip, vlan_if=vm_vlan_if[vm_index], session=session) netperf_vlan(client=vm.name, server="localhost") - error_context.context("Test ping and netperf between guests with " - "interface '%s'" % - vm_vlan_if[vm_index], test.log.info) - ping_vlan(vms[0], dest=vm_vlan_ip[1], vlan_if=vm_vlan_if[0], - session=sessions[0]) - netperf_vlan(client=params["main_vm"], server='vm2') + error_context.context( + "Test ping and netperf between guests with " + f"interface '{vm_vlan_if[vm_index]}'", + test.log.info, + ) + ping_vlan(vms[0], dest=vm_vlan_ip[1], vlan_if=vm_vlan_if[0], session=sessions[0]) + netperf_vlan(client=params["main_vm"], server="vm2") - exithandlers = "exithandlers__%s" % sub_type + exithandlers = f"exithandlers__{sub_type}" sub_exit_timeout = int(params.get("sub_exit_timeout", 10)) start_time = time.time() end_time = start_time + float(sub_exit_timeout) while time.time() < end_time: - test.log.debug("%s (%f secs)", sub_type + " is running", - (time.time() - start_time)) + test.log.debug( + "%s (%f secs)", sub_type + " is running", (time.time() - start_time) + ) if env.data.get(exithandlers): break time.sleep(1) diff --git a/qemu/tests/cache_sizes_test.py b/qemu/tests/cache_sizes_test.py index f05a9fb96a..9b3f8df0c7 100644 --- a/qemu/tests/cache_sizes_test.py +++ b/qemu/tests/cache_sizes_test.py @@ -17,18 +17,20 @@ def run(test, params, env): initial_tag = params["images"] cache_sizes = params["cache_sizes"].split() - test.log.info("Boot a guest up from initial image: %s, and create a" - " file %s on the disk.", initial_tag, file) + test.log.info( + "Boot a guest up from initial image: %s, and create a" " file %s on the disk.", + initial_tag, + file, + ) for cache_size in cache_sizes: - params["drv_extra_params_image1"] = "cache-size=%s" % cache_size + params["drv_extra_params_image1"] = f"cache-size={cache_size}" vm = img_utils.boot_vm_with_images(test, params, env) session = vm.wait_for_login() guest_temp_file = params["guest_file_name"] sync_bin = params.get("sync_bin", "sync") test.log.debug("Create temporary file on guest: %s", guest_temp_file) - img_utils.save_random_file_to_vm(vm, guest_temp_file, 2048 * 512, - sync_bin) + img_utils.save_random_file_to_vm(vm, guest_temp_file, 2048 * 512, sync_bin) session.close() vm.destroy() diff --git a/qemu/tests/cdrom.py b/qemu/tests/cdrom.py index 28f952ff8a..1b573aa95b 100644 --- a/qemu/tests/cdrom.py +++ b/qemu/tests/cdrom.py @@ -6,27 +6,28 @@ :author: Jiri Zupka :copyright: 2011 Red Hat, Inc. """ -import re -import time + import os +import random +import re import sys import tempfile -import random -import six +import time import aexpect - +import six from avocado.utils import process - -from virttest import error_context -from virttest import utils_misc -from virttest import gluster -from virttest import env_process -from virttest import data_dir -from virttest import utils_test -from virttest import qemu_migration -from virttest.utils_test.qemu import migration +from virttest import ( + data_dir, + env_process, + error_context, + gluster, + qemu_migration, + utils_misc, + utils_test, +) from virttest.qemu_capabilities import Flags +from virttest.utils_test.qemu import migration @error_context.context_aware @@ -71,7 +72,7 @@ def run(test, params, env): @warning: Check dmesg for block device failures """ # Some versions of qemu are unable to eject CDROM directly after insert - workaround_eject_time = float(params.get('workaround_eject_time', 0)) + workaround_eject_time = float(params.get("workaround_eject_time", 0)) login_timeout = int(params.get("login_timeout", 360)) cdrom_prepare_timeout = int(params.get("cdrom_preapre_timeout", 360)) @@ -79,7 +80,7 @@ def run(test, params, env): def generate_serial_num(): length = int(params.get("length", "10")) id_leng = random.randint(6, length) - ignore_str = ",!\"#$%&\'()*+./:;<=>?@[\\]^`{|}~" + ignore_str = ",!\"#$%&'()*+./:;<=>?@[\\]^`{|}~" return utils_misc.generate_random_string(id_leng, ignore_str) def list_guest_cdroms(session): @@ -107,7 +108,7 @@ def get_cdrom_mount_point(session, drive_letter, params): """ mount_point = "/mnt" if params["os_type"] == "windows": - cmd = "wmic volume where DriveLetter='%s' " % drive_letter + cmd = f"wmic volume where DriveLetter='{drive_letter}' " cmd += "get DeviceID | more +1" mount_point = session.cmd_output(cmd).strip() return mount_point @@ -124,8 +125,7 @@ def create_iso_image(params, name, prepare=True, file_size=None): :return: path to new iso image file. """ - error_context.context("Creating test iso image '%s'" % name, - test.log.info) + error_context.context(f"Creating test iso image '{name}'", test.log.info) cdrom_cd = params["target_cdrom"] cdrom_cd = params[cdrom_cd] if not os.path.isabs(cdrom_cd): @@ -141,59 +141,57 @@ def create_iso_image(params, name, prepare=True, file_size=None): else: gluster_server = "localhost" volume_name = params["gluster_volume_name"] - g_mount_link = "%s:/%s" % (gluster_server, volume_name) - mount_cmd = "mount -t glusterfs %s %s" % (g_mount_link, g_mount_point) + g_mount_link = f"{gluster_server}:/{volume_name}" + mount_cmd = f"mount -t glusterfs {g_mount_link} {g_mount_point}" process.system(mount_cmd, timeout=60) - file_name = os.path.join(g_mount_point, "%s.iso" % name) + file_name = os.path.join(g_mount_point, f"{name}.iso") else: - file_name = utils_misc.get_path(iso_image_dir, "%s.iso" % name) + file_name = utils_misc.get_path(iso_image_dir, f"{name}.iso") if prepare: cmd = "dd if=/dev/urandom of=%s bs=1M count=%d" process.run(cmd % (name, file_size)) - process.run("mkisofs -o %s %s" % (file_name, name)) - process.run("rm -rf %s" % (name)) + process.run(f"mkisofs -o {file_name} {name}") + process.run(f"rm -rf {name}") if image_params.get("enable_gluster") == "yes": gluster_uri = gluster.create_gluster_uri(image_params) - file_name = "%s%s.iso" % (gluster_uri, name) + file_name = f"{gluster_uri}{name}.iso" try: - umount_cmd = "umount %s" % g_mount_point + umount_cmd = f"umount {g_mount_point}" process.system(umount_cmd, timeout=60) os.rmdir(g_mount_point) except Exception as err: - msg = "Fail to clean up %s" % g_mount_point - msg += "Error message %s" % err - test.log.warn(msg) + msg = f"Fail to clean up {g_mount_point}" + msg += f"Error message {err}" + test.log.warning(msg) return file_name def cleanup_cdrom(path): - """ Removes created iso image """ + """Removes created iso image""" if path: - error_context.context("Cleaning up temp iso image '%s'" % path, - test.log.info) + error_context.context(f"Cleaning up temp iso image '{path}'", test.log.info) if "gluster" in path: g_mount_point = tempfile.mkdtemp("gluster") g_server, v_name, f_name = path.split("/")[-3:] if ":" in g_server: g_server = g_server.split(":")[0] - g_mount_link = "%s:/%s" % (g_server, v_name) - mount_cmd = "mount -t glusterfs %s %s" % (g_mount_link, - g_mount_point) + g_mount_link = f"{g_server}:/{v_name}" + mount_cmd = f"mount -t glusterfs {g_mount_link} {g_mount_point}" process.system(mount_cmd, timeout=60) path = os.path.join(g_mount_point, f_name) try: test.log.debug("Remove the file with os.remove().") - os.remove("%s" % path) - except OSError as err: - test.log.warn("Fail to delete %s", path) + os.remove(f"{path}") + except OSError: + test.log.warning("Fail to delete %s", path) if "gluster" in path: try: - umount_cmd = "umount %s" % g_mount_point + umount_cmd = f"umount {g_mount_point}" process.system(umount_cmd, timeout=60) os.rmdir(g_mount_point) except Exception as err: - msg = "Fail to clean up %s" % g_mount_point - msg += "Error message %s" % err - test.log.warn(msg) + msg = f"Fail to clean up {g_mount_point}" + msg += f"Error message {err}" + test.log.warning(msg) def get_cdrom_file(vm, qemu_cdrom_device): """ @@ -205,23 +203,25 @@ def get_cdrom_file(vm, qemu_cdrom_device): enable_blockdev = vm.check_capability(Flags.BLOCKDEV) cdfile = None if isinstance(blocks, six.string_types): - tmp_re_str = r'%s: .*file=(\S*) ' % qemu_cdrom_device + tmp_re_str = rf"{qemu_cdrom_device}: .*file=(\S*) " file_list = re.findall(tmp_re_str, blocks) if file_list: cdfile = file_list[0] else: # try to deal with new qemu - tmp_re_str = r'%s: (\S*) \(.*\)' % qemu_cdrom_device + tmp_re_str = rf"{qemu_cdrom_device}: (\S*) \(.*\)" file_list = re.findall(tmp_re_str, blocks) if file_list: cdfile = file_list[0] else: for block in blocks: - if (enable_blockdev and block['qdev'] == vm.devices.get_qdev_by_drive( - qemu_cdrom_device) or ( - not enable_blockdev and block['device'] == qemu_cdrom_device)): + if ( + enable_blockdev + and block["qdev"] == vm.devices.get_qdev_by_drive(qemu_cdrom_device) + or (not enable_blockdev and block["device"] == qemu_cdrom_device) + ): try: - cdfile = block['inserted']['file'] + cdfile = block["inserted"]["file"] break except KeyError: continue @@ -256,19 +256,21 @@ def _get_tray_stat_via_monitor(vm, qemu_cdrom_device): tmp_block = "" else: for block in blocks: - if (enable_blockdev and block['qdev'] == vm.devices.get_qdev_by_drive( - qemu_cdrom_device) or ( - not enable_blockdev and block['device'] == qemu_cdrom_device)): - key = list(filter(lambda x: re.match(r"tray.*open", x), - block.keys())) + if ( + enable_blockdev + and block["qdev"] == vm.devices.get_qdev_by_drive(qemu_cdrom_device) + or (not enable_blockdev and block["device"] == qemu_cdrom_device) + ): + key = list( + filter(lambda x: re.match(r"tray.*open", x), block.keys()) + ) # compatible rhel6 and rhel7 diff qmp output if not key: break is_open, checked = (block[key[0]], True) return (is_open, checked) - def is_tray_opened(vm, qemu_cdrom_device, mode='monitor', - dev_name="/dev/sr0"): + def is_tray_opened(vm, qemu_cdrom_device, mode="monitor", dev_name="/dev/sr0"): """ Checks whether the tray is opend @@ -286,11 +288,10 @@ def is_tray_opened(vm, qemu_cdrom_device, mode='monitor', """ is_open, checked = (None, False) - if mode in ['monitor', 'mixed']: - is_open, checked = _get_tray_stat_via_monitor( - vm, qemu_cdrom_device) + if mode in ["monitor", "mixed"]: + is_open, checked = _get_tray_stat_via_monitor(vm, qemu_cdrom_device) - if (mode in ['session', 'mixed']) and not checked: + if (mode in ["session", "mixed"]) and not checked: session = vm.wait_for_login(timeout=login_timeout) tray_cmd = params["tray_check_cmd"] % dev_name o = session.cmd_output(tray_cmd) @@ -337,8 +338,8 @@ def check_cdrom_lock(vm, cdrom): tmp_block = "" else: for block in blocks: - if block['device'] == cdrom and 'locked' in block.keys(): - return block['locked'] + if block["device"] == cdrom and "locked" in block.keys(): + return block["locked"] return None @error_context.context_aware @@ -351,9 +352,9 @@ def get_device(vm, dev_file_path): :return: device object """ error_context.context("Get cdrom device object") - device = vm.get_block({'file': dev_file_path}) + device = vm.get_block({"file": dev_file_path}) if not device: - device = vm.get_block({'backing_file': dev_file_path}) + device = vm.get_block({"backing_file": dev_file_path}) if not device: test.fail("Could not find a valid cdrom device") return device @@ -379,10 +380,11 @@ def get_match_cdrom(vm, session, serial_num): break if not serial_cdrom: qtree_info = vm.monitor.info("qtree") - test.fail("Could not find the device whose " - "serial number %s is same in Qemu" - " CML.\n Qtree info: %s" % - (serial_num, qtree_info)) + test.fail( + "Could not find the device whose " + f"serial number {serial_num} is same in Qemu" + f" CML.\n Qtree info: {qtree_info}" + ) show_cdrom_cmd = "ls -l /dev/cdrom*" dev_cdrom_output = session.cmd_output(show_cdrom_cmd) @@ -391,8 +393,10 @@ def get_match_cdrom(vm, session, serial_num): if utils_misc.find_substring(str(line), str(serial_cdrom)): match_cdrom = line.split(" ")[-3] return match_cdrom - test.fail("Could not find the corresponding cdrom" - "in guest which is same in Qemu CML.") + test.fail( + "Could not find the corresponding cdrom" + "in guest which is same in Qemu CML." + ) def get_testing_cdrom_device(vm, session, cdrom_dev_list, serial_num=None): """ @@ -403,7 +407,7 @@ def get_testing_cdrom_device(vm, session, cdrom_dev_list, serial_num=None): try: if params["os_type"] == "windows": winutil_drive = utils_misc.get_winutils_vol(session) - winutil_drive = "%s:" % winutil_drive + winutil_drive = f"{winutil_drive}:" cdrom_dev_list.remove(winutil_drive) testing_cdrom_device = cdrom_dev_list[-1] else: @@ -426,11 +430,10 @@ def disk_copy(vm, src_path, dst_path, copy_timeout=None, dsize=None): if copy_timeout is None: copy_timeout = 120 session = vm.wait_for_login(timeout=login_timeout) - copy_file_cmd = ( - "nohup cp %s %s 2> /dev/null &" % (src_path, dst_path)) + copy_file_cmd = f"nohup cp {src_path} {dst_path} 2> /dev/null &" get_pid_cmd = "echo $!" if params["os_type"] == "windows": - copy_file_cmd = "start cmd /c copy /y %s %s" % (src_path, dst_path) + copy_file_cmd = f"start cmd /c copy /y {src_path} {dst_path}" get_pid_cmd = "wmic process where name='cmd.exe' get ProcessID" session.cmd(copy_file_cmd, timeout=copy_timeout) pid = re.findall(r"\d+", session.cmd_output(get_pid_cmd))[-1] @@ -443,27 +446,34 @@ def get_empty_cdrom_device(vm): device = None blocks = vm.monitor.info("block") if isinstance(blocks, six.string_types): - for block in blocks.strip().split('\n'): - if 'not inserted' in block: - device = block.split(':')[0] + for block in blocks.strip().split("\n"): + if "not inserted" in block: + device = block.split(":")[0] else: for block in blocks: if vm.check_capability(Flags.BLOCKDEV): - if 'inserted' in block.keys(): - if block['inserted']['file'] == 'null-co://': - device = block['inserted']['node-name'] + if "inserted" in block.keys(): + if block["inserted"]["file"] == "null-co://": + device = block["inserted"]["node-name"] else: - if 'inserted' not in block.keys(): - device = block['device'] + if "inserted" not in block.keys(): + device = block["device"] return device - def eject_test_via_monitor(vm, qemu_cdrom_device, guest_cdrom_device, - iso_image_orig, iso_image_new, max_times): + def eject_test_via_monitor( + vm, + qemu_cdrom_device, + guest_cdrom_device, + iso_image_orig, + iso_image_new, + max_times, + ): """ Test cdrom eject function via qemu monitor. """ - error_context.context("Eject the iso image in monitor %s times" - % max_times, test.log.info) + error_context.context( + f"Eject the iso image in monitor {max_times} times", test.log.info + ) session = vm.wait_for_login(timeout=login_timeout) iso_image = iso_image_orig for i in range(1, max_times): @@ -471,8 +481,7 @@ def eject_test_via_monitor(vm, qemu_cdrom_device, guest_cdrom_device, vm.eject_cdrom(qemu_cdrom_device) time.sleep(2) if get_cdrom_file(vm, qemu_cdrom_device) is not None: - test.fail("Device %s was not ejected" - " (round %s)" % (iso_image, i)) + test.fail(f"Device {iso_image} was not ejected" f" (round {i})") iso_image = iso_image_new # On even attempts, try to change the iso image @@ -480,12 +489,12 @@ def eject_test_via_monitor(vm, qemu_cdrom_device, guest_cdrom_device, iso_image = iso_image_orig vm.change_media(qemu_cdrom_device, iso_image) if get_cdrom_file(vm, qemu_cdrom_device) != iso_image: - test.fail("Could not change iso image %s" - " (round %s)" % (iso_image, i)) + test.fail(f"Could not change iso image {iso_image}" f" (round {i})") time.sleep(workaround_eject_time) - def check_tray_status_test(vm, qemu_cdrom_device, guest_cdrom_device, - max_times, iso_image_new): + def check_tray_status_test( + vm, qemu_cdrom_device, guest_cdrom_device, max_times, iso_image_new + ): """ Test cdrom tray status reporting function. """ @@ -501,29 +510,34 @@ def check_tray_status_test(vm, qemu_cdrom_device, guest_cdrom_device, error_context.context("Copy test script to guest") tray_check_src = params.get("tray_check_src") if tray_check_src: - tray_check_src = os.path.join(data_dir.get_deps_dir(), "cdrom", - tray_check_src) + tray_check_src = os.path.join( + data_dir.get_deps_dir(), "cdrom", tray_check_src + ) vm.copy_files_to(tray_check_src, params["tmp_dir"]) if is_tray_opened(vm, qemu_cdrom_device) is None: - test.log.warn("Tray status reporting is not supported by qemu!") - test.log.warn("cdrom_test_tray_status test is skipped...") + test.log.warning("Tray status reporting is not supported by qemu!") + test.log.warning("cdrom_test_tray_status test is skipped...") return - error_context.context("Eject the cdrom in guest %s times" - % max_times, test.log.info) + error_context.context( + f"Eject the cdrom in guest {max_times} times", test.log.info + ) session = vm.wait_for_login(timeout=login_timeout) for i in range(1, max_times): session.cmd(params["eject_cdrom_cmd"] % guest_cdrom_device) if not is_tray_opened(vm, qemu_cdrom_device): - test.fail("Monitor reports tray closed" - " when ejecting (round %s)" % i) + test.fail( + "Monitor reports tray closed" f" when ejecting (round {i})" + ) cmd = params["close_cdrom_cmd"] % guest_cdrom_device session.cmd(cmd) if is_tray_opened(vm, qemu_cdrom_device): - test.fail("Monitor reports tray opened when close" - " cdrom in guest (round %s)" % i) + test.fail( + "Monitor reports tray opened when close" + f" cdrom in guest (round {i})" + ) time.sleep(workaround_eject_time) finally: vm.change_media(qemu_cdrom_device, iso_image_orig) @@ -532,21 +546,24 @@ def check_tray_locked_test(vm, qemu_cdrom_device, guest_cdrom_device): """ Test cdrom tray locked function. """ - error_context.context("Check cdrom tray status after cdrom is locked", - test.log.info) + error_context.context( + "Check cdrom tray status after cdrom is locked", test.log.info + ) session = vm.wait_for_login(timeout=login_timeout) - tmp_is_trap_open = is_tray_opened(vm, qemu_cdrom_device, mode='mixed', - dev_name=guest_cdrom_device) + tmp_is_trap_open = is_tray_opened( + vm, qemu_cdrom_device, mode="mixed", dev_name=guest_cdrom_device + ) if tmp_is_trap_open is None: - test.log.warn("Tray status reporting is not supported by qemu!") - test.log.warn("cdrom_test_locked test is skipped...") + test.log.warning("Tray status reporting is not supported by qemu!") + test.log.warning("cdrom_test_locked test is skipped...") return eject_failed = False eject_failed_msg = "Tray should be closed even in locked status" session.cmd(params["eject_cdrom_cmd"] % guest_cdrom_device) - tmp_is_trap_open = is_tray_opened(vm, qemu_cdrom_device, mode='mixed', - dev_name=guest_cdrom_device) + tmp_is_trap_open = is_tray_opened( + vm, qemu_cdrom_device, mode="mixed", dev_name=guest_cdrom_device + ) if not tmp_is_trap_open: test.fail("Tray should not in closed status") session.cmd(params["lock_cdrom_cmd"] % guest_cdrom_device) @@ -554,11 +571,12 @@ def check_tray_locked_test(vm, qemu_cdrom_device, guest_cdrom_device): session.cmd(params["close_cdrom_cmd"] % guest_cdrom_device) except aexpect.ShellCmdError as e: eject_failed = True - eject_failed_msg += ", eject command failed: %s" % str(e) + eject_failed_msg += f", eject command failed: {str(e)}" - tmp_is_trap_open = is_tray_opened(vm, qemu_cdrom_device, mode='mixed', - dev_name=guest_cdrom_device) - if (eject_failed or tmp_is_trap_open): + tmp_is_trap_open = is_tray_opened( + vm, qemu_cdrom_device, mode="mixed", dev_name=guest_cdrom_device + ) + if eject_failed or tmp_is_trap_open: test.fail(eject_failed_msg) session.cmd(params["unlock_cdrom_cmd"] % guest_cdrom_device) session.cmd(params["close_cdrom_cmd"] % guest_cdrom_device) @@ -568,10 +586,8 @@ def file_operation_test(session, guest_cdrom_device, max_times): Cdrom file operation test. """ filename = "new" - mount_point = get_cdrom_mount_point(session, - guest_cdrom_device, params) - mount_cmd = params["mount_cdrom_cmd"] % (guest_cdrom_device, - mount_point) + mount_point = get_cdrom_mount_point(session, guest_cdrom_device, params) + mount_cmd = params["mount_cdrom_cmd"] % (guest_cdrom_device, mount_point) umount_cmd = params["umount_cdrom_cmd"] % guest_cdrom_device src_file = params["src_file"] % (mount_point, filename) dst_file = params["dst_file"] % filename @@ -581,19 +597,20 @@ def file_operation_test(session, guest_cdrom_device, max_times): md5sum_cmd = params["md5sum_cmd"] if params["os_type"] != "windows": - error_context.context("Mounting the cdrom under %s" % mount_point, - test.log.info) + error_context.context( + f"Mounting the cdrom under {mount_point}", test.log.info + ) session.cmd(mount_cmd, timeout=30) error_context.context("File copying test", test.log.info) session.cmd(copy_file_cmd) f1_hash = session.cmd(md5sum_cmd % dst_file).split()[0].strip() f2_hash = session.cmd(md5sum_cmd % src_file).split()[0].strip() if f1_hash != f2_hash: - test.fail("On disk and on cdrom files are different, " - "md5 mismatch") + test.fail("On disk and on cdrom files are different, " "md5 mismatch") session.cmd(remove_file_cmd) - error_context.context("Mount/Unmount cdrom for %s times" % max_times, - test.log.info) + error_context.context( + f"Mount/Unmount cdrom for {max_times} times", test.log.info + ) for _ in range(1, max_times): try: session.cmd(umount_cmd) @@ -603,13 +620,12 @@ def file_operation_test(session, guest_cdrom_device, max_times): test.log.debug(session.cmd(show_mount_cmd)) raise if params["os_type"] != "windows": - session.cmd("umount %s" % guest_cdrom_device) + session.cmd(f"umount {guest_cdrom_device}") # Test main body start. - class MiniSubtest(object): - + class MiniSubtest: def __new__(cls, *args, **kargs): - self = super(MiniSubtest, cls).__new__(cls) + self = super().__new__(cls) ret = None exc_info = None if args is None: @@ -631,7 +647,6 @@ def __new__(cls, *args, **kargs): return ret class test_singlehost(MiniSubtest): - def test(self): self.iso_image_orig = create_iso_image(params, "orig") self.iso_image_new = create_iso_image(params, "new") @@ -642,7 +657,7 @@ def test(self): params["start_vm"] = "yes" serial_num = generate_serial_num() cdrom = params.get("cdroms", "").split()[-1] - params["drive_serial_%s" % cdrom] = serial_num + params[f"drive_serial_{cdrom}"] = serial_num env_process.preprocess_vm(test, params, env, params["main_vm"]) vm = env.get_vm(params["main_vm"]) @@ -660,97 +675,100 @@ def test(self): cdrom_dev_list = list_guest_cdroms(self.session) test.log.debug("cdrom_dev_list: '%s'", cdrom_dev_list) - if params.get('not_insert_at_start') == "yes": - error_context.context("Locked without media present", - test.log.info) + if params.get("not_insert_at_start") == "yes": + error_context.context("Locked without media present", test.log.info) # XXX: The device got from monitor might not match with the guest # defice if there are multiple cdrom devices. qemu_cdrom_device = get_empty_cdrom_device(vm) - guest_cdrom_device = get_testing_cdrom_device(vm, - self.session, - cdrom_dev_list, - serial_num) + guest_cdrom_device = get_testing_cdrom_device( + vm, self.session, cdrom_dev_list, serial_num + ) if vm.check_block_locked(qemu_cdrom_device): - test.fail("Device should not be locked just" - " after booting up") + test.fail("Device should not be locked just" " after booting up") cmd = params["lock_cdrom_cmd"] % guest_cdrom_device self.session.cmd(cmd) if not vm.check_block_locked(qemu_cdrom_device): test.fail("Device is not locked as expect.") return - error_context.context("Detecting the existence of a cdrom " - "(guest OS side)", test.log.info) + error_context.context( + "Detecting the existence of a cdrom " "(guest OS side)", test.log.info + ) cdrom_dev_list = list_guest_cdroms(self.session) - guest_cdrom_device = get_testing_cdrom_device(vm, - self.session, - cdrom_dev_list, - serial_num) - error_context.context("Detecting the existence of a cdrom " - "(qemu side)", test.log.info) + guest_cdrom_device = get_testing_cdrom_device( + vm, self.session, cdrom_dev_list, serial_num + ) + error_context.context( + "Detecting the existence of a cdrom " "(qemu side)", test.log.info + ) qemu_cdrom_device = get_device(vm, iso_image) if params["os_type"] != "windows": - self.session.cmd_output("umount %s" % guest_cdrom_device) - if params.get('cdrom_test_autounlock') == 'yes': - error_context.context("Trying to unlock the cdrom", - test.log.info) - if not utils_misc.wait_for(lambda: not - vm.check_block_locked(qemu_cdrom_device), - 300): - test.fail("Device %s could not be" - " unlocked" % (qemu_cdrom_device)) + self.session.cmd_output(f"umount {guest_cdrom_device}") + if params.get("cdrom_test_autounlock") == "yes": + error_context.context("Trying to unlock the cdrom", test.log.info) + if not utils_misc.wait_for( + lambda: not vm.check_block_locked(qemu_cdrom_device), 300 + ): + test.fail(f"Device {qemu_cdrom_device} could not be" " unlocked") max_test_times = int(params.get("cdrom_max_test_times", 100)) if params.get("cdrom_test_eject") == "yes": - eject_test_via_monitor(vm, qemu_cdrom_device, - guest_cdrom_device, self.iso_image_orig, - self.iso_image_new, max_test_times) - - if params.get('cdrom_test_tray_status') == 'yes': - check_tray_status_test(vm, qemu_cdrom_device, - guest_cdrom_device, max_test_times, - self.iso_image_new) - - if params.get('cdrom_test_locked') == 'yes': - check_tray_locked_test(vm, qemu_cdrom_device, - guest_cdrom_device) - - error_context.context("Check whether the cdrom is read-only", - test.log.info) + eject_test_via_monitor( + vm, + qemu_cdrom_device, + guest_cdrom_device, + self.iso_image_orig, + self.iso_image_new, + max_test_times, + ) + + if params.get("cdrom_test_tray_status") == "yes": + check_tray_status_test( + vm, + qemu_cdrom_device, + guest_cdrom_device, + max_test_times, + self.iso_image_new, + ) + + if params.get("cdrom_test_locked") == "yes": + check_tray_locked_test(vm, qemu_cdrom_device, guest_cdrom_device) + + error_context.context("Check whether the cdrom is read-only", test.log.info) cmd = params["readonly_test_cmd"] % guest_cdrom_device try: self.session.cmd(cmd) - test.fail("Attempt to format cdrom %s succeeded" % - (guest_cdrom_device)) + test.fail(f"Attempt to format cdrom {guest_cdrom_device} succeeded") except aexpect.ShellError: pass sub_test = params.get("sub_test") if sub_test: - error_context.context("Run sub test '%s' before doing file" - " operation" % sub_test, test.log.info) + error_context.context( + f"Run sub test '{sub_test}' before doing file" " operation", + test.log.info, + ) utils_test.run_virt_sub_test(test, params, env, sub_test) if params.get("cdrom_test_file_operation") == "yes": - file_operation_test(self.session, guest_cdrom_device, - max_test_times) + file_operation_test(self.session, guest_cdrom_device, max_test_times) error_context.context("Cleanup") # Return the self.iso_image_orig cdfile = get_cdrom_file(vm, qemu_cdrom_device) if cdfile != self.iso_image_orig: time.sleep(workaround_eject_time) - self.session.cmd(params["eject_cdrom_cmd"] % - guest_cdrom_device) + self.session.cmd(params["eject_cdrom_cmd"] % guest_cdrom_device) vm.eject_cdrom(qemu_cdrom_device) if get_cdrom_file(vm, qemu_cdrom_device) is not None: - test.fail("Device %s was not ejected" - " in clearup stage" % qemu_cdrom_device) + test.fail( + f"Device {qemu_cdrom_device} was not ejected" + " in clearup stage" + ) vm.change_media(qemu_cdrom_device, self.iso_image_orig) if get_cdrom_file(vm, qemu_cdrom_device) != self.iso_image_orig: - test.fail("It wasn't possible to change" - " cdrom %s" % iso_image) + test.fail("It wasn't possible to change" f" cdrom {iso_image}") post_cmd = params.get("post_cmd") if post_cmd: self.session.cmd(post_cmd) @@ -763,10 +781,8 @@ def clean(self): cleanup_cdrom(self.iso_image_new) class Multihost(MiniSubtest): - def test(self): - error_context.context("Preparing migration env and cdroms.", - test.log.info) + error_context.context("Preparing migration env and cdroms.", test.log.info) mig_protocol = params.get("mig_protocol", "tcp") self.mig_type = migration.MultihostMigration if mig_protocol == "fd": @@ -780,21 +796,31 @@ def test(self): self.srchost = params.get("hosts")[0] self.dsthost = params.get("hosts")[1] self.is_src = params.get("hostid") == self.srchost - self.mig = self.mig_type(test, params, env, False, ) + self.mig = self.mig_type( + test, + params, + env, + False, + ) self.cdrom_size = int(params.get("cdrom_size", 10)) cdrom = params.objects("cdroms")[-1] - self.serial_num = params.get("drive_serial_%s" % cdrom) + self.serial_num = params.get(f"drive_serial_{cdrom}") if self.is_src: - self.cdrom_orig = create_iso_image(params, "orig", - file_size=self.cdrom_size) + self.cdrom_orig = create_iso_image( + params, "orig", file_size=self.cdrom_size + ) self.cdrom_dir = os.path.dirname(self.cdrom_orig) vm = env.get_vm(self.vms[0]) vm.destroy() params["start_vm"] = "yes" - env_process.process(test, params, env, - env_process.preprocess_image, - env_process.preprocess_vm) + env_process.process( + test, + params, + env, + env_process.preprocess_image, + env_process.preprocess_vm, + ) vm = env.get_vm(self.vms[0]) vm.wait_for_login(timeout=login_timeout) else: @@ -807,9 +833,8 @@ def clean(self): cleanup_cdrom(self.cdrom_orig) class test_multihost_locking(Multihost): - def test(self): - super(test_multihost_locking, self).test() + super().test() error_context.context("Lock cdrom in VM.", test.log.info) # Starts in source @@ -817,10 +842,9 @@ def test(self): vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login(timeout=login_timeout) cdrom_dev_list = list_guest_cdroms(session) - guest_cdrom_device = get_testing_cdrom_device(vm, - session, - cdrom_dev_list, - self.serial_num) + guest_cdrom_device = get_testing_cdrom_device( + vm, session, cdrom_dev_list, self.serial_num + ) test.log.debug("cdrom_dev_list: %s", cdrom_dev_list) device = get_device(vm, self.cdrom_orig) @@ -831,8 +855,9 @@ def test(self): else: test.fail("Cdrom device should be locked in VM.") - self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts, - 'cdrom_dev', cdrom_prepare_timeout) + self.mig._hosts_barrier( + self.mig.hosts, self.mig.hosts, "cdrom_dev", cdrom_prepare_timeout + ) self.mig.migrate_wait([self.vms[0]], self.srchost, self.dsthost) @@ -846,23 +871,23 @@ def test(self): locked = check_cdrom_lock(vm, device) if locked: - test.log.debug("Cdrom device stayed locked after " - "migration in VM.") + test.log.debug( + "Cdrom device stayed locked after " "migration in VM." + ) else: - test.fail("Cdrom device should stayed locked" - " after migration in VM.") + test.fail( + "Cdrom device should stayed locked" " after migration in VM." + ) error_context.context("Unlock cdrom from VM.", test.log.info) cdrom_dev_list = list_guest_cdroms(session) - guest_cdrom_device = get_testing_cdrom_device(vm, - session, - cdrom_dev_list, - self.serial_num) + guest_cdrom_device = get_testing_cdrom_device( + vm, session, cdrom_dev_list, self.serial_num + ) session.cmd(params["unlock_cdrom_cmd"] % guest_cdrom_device) locked = check_cdrom_lock(vm, device) if not locked: - test.log.debug("Cdrom device is successfully unlocked" - " from VM.") + test.log.debug("Cdrom device is successfully unlocked" " from VM.") else: test.fail("Cdrom device should be unlocked in VM.") @@ -872,22 +897,24 @@ def test(self): vm = env.get_vm(params["main_vm"]) locked = check_cdrom_lock(vm, device) if not locked: - test.log.debug("Cdrom device stayed unlocked after " - "migration in VM.") + test.log.debug( + "Cdrom device stayed unlocked after " "migration in VM." + ) else: - test.fail("Cdrom device should stayed unlocked" - " after migration in VM.") + test.fail( + "Cdrom device should stayed unlocked" " after migration in VM." + ) - self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts, - 'Finish_cdrom_test', login_timeout) + self.mig._hosts_barrier( + self.mig.hosts, self.mig.hosts, "Finish_cdrom_test", login_timeout + ) def clean(self): - super(test_multihost_locking, self).clean() + super().clean() class test_multihost_ejecting(Multihost): - def test(self): - super(test_multihost_ejecting, self).test() + super().test() self.cdrom_new = create_iso_image(params, "new") @@ -902,29 +929,28 @@ def test(self): cdrom_dev_list = list_guest_cdroms(session) test.log.debug("cdrom_dev_list: %s", cdrom_dev_list) device = get_device(vm, self.cdrom_orig) - cdrom = get_testing_cdrom_device(vm, - session, - cdrom_dev_list, - self.serial_num) + cdrom = get_testing_cdrom_device( + vm, session, cdrom_dev_list, self.serial_num + ) error_context.context("Eject cdrom.", test.log.info) session.cmd(params["eject_cdrom_cmd"] % cdrom) vm.eject_cdrom(device) time.sleep(2) if get_cdrom_file(vm, device) is not None: - test.fail("Device %s was not ejected" % (cdrom)) + test.fail(f"Device {cdrom} was not ejected") cdrom = self.cdrom_new error_context.context("Change cdrom.", test.log.info) vm.change_media(device, cdrom) if get_cdrom_file(vm, device) != cdrom: - test.fail("It wasn't possible to change " - "cdrom %s" % (cdrom)) + test.fail("It wasn't possible to change " f"cdrom {cdrom}") time.sleep(workaround_eject_time) - self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts, - 'cdrom_dev', cdrom_prepare_timeout) + self.mig._hosts_barrier( + self.mig.hosts, self.mig.hosts, "cdrom_dev", cdrom_prepare_timeout + ) self.mig.migrate_wait([self.vms[0]], self.srchost, self.dsthost) @@ -935,21 +961,18 @@ def test(self): def clean(self): if self.is_src: cleanup_cdrom(self.cdrom_new) - super(test_multihost_ejecting, self).clean() + super().clean() class test_multihost_copy(Multihost): - def test(self): from autotest.client.shared.syncdata import SyncData - super(test_multihost_copy, self).test() + super().test() copy_timeout = int(params.get("copy_timeout", 480)) checksum_timeout = int(params.get("checksum_timeout", 180)) pid = None - sync_id = {'src': self.srchost, - 'dst': self.dsthost, - "type": "file_trasfer"} + sync_id = {"src": self.srchost, "dst": self.dsthost, "type": "file_trasfer"} filename = "orig" remove_file_cmd = params["remove_file_cmd"] % filename dst_file = params["dst_file"] % filename @@ -960,15 +983,13 @@ def test(self): session = vm.wait_for_login(timeout=login_timeout) cdrom_dev_list = list_guest_cdroms(session) test.log.debug("cdrom_dev_list: %s", cdrom_dev_list) - cdrom = get_testing_cdrom_device(vm, - session, - cdrom_dev_list, - self.serial_num) + cdrom = get_testing_cdrom_device( + vm, session, cdrom_dev_list, self.serial_num + ) mount_point = get_cdrom_mount_point(session, cdrom, params) mount_cmd = params["mount_cdrom_cmd"] % (cdrom, mount_point) src_file = params["src_file"] % (mount_point, filename) - copy_file_cmd = params[ - "copy_file_cmd"] % (mount_point, filename) + copy_file_cmd = params["copy_file_cmd"] % (mount_point, filename) if params["os_type"] != "windows": error_context.context("Mount and copy data", test.log.info) session.cmd(mount_cmd, timeout=30) @@ -979,8 +1000,13 @@ def test(self): pid = disk_copy(vm, src_file, dst_file, copy_timeout) - sync = SyncData(self.mig.master_id(), self.mig.hostid, - self.mig.hosts, sync_id, self.mig.sync_server) + sync = SyncData( + self.mig.master_id(), + self.mig.hostid, + self.mig.hosts, + sync_id, + self.mig.sync_server, + ) pid = sync.sync(pid, timeout=cdrom_prepare_timeout)[self.srchost] @@ -991,10 +1017,9 @@ def test(self): session = vm.wait_for_login(timeout=login_timeout) error_context.context("Wait for copy finishing.", test.log.info) cdrom_dev_list = list_guest_cdroms(session) - cdrom = get_testing_cdrom_device(vm, - session, - cdrom_dev_list, - self.serial_num) + cdrom = get_testing_cdrom_device( + vm, session, cdrom_dev_list, self.serial_num + ) mount_point = get_cdrom_mount_point(session, cdrom, params) mount_cmd = params["mount_cdrom_cmd"] % (cdrom, mount_point) src_file = params["src_file"] % (mount_point, filename) @@ -1002,35 +1027,42 @@ def test(self): def is_copy_done(): if params["os_type"] == "windows": - cmd = "tasklist /FI \"PID eq %s\"" % pid + cmd = f'tasklist /FI "PID eq {pid}"' else: - cmd = "ps -p %s" % pid + cmd = f"ps -p {pid}" return session.cmd_status(cmd) != 0 if not utils_misc.wait_for(is_copy_done, timeout=copy_timeout): test.fail("Wait for file copy finish timeout") - error_context.context("Compare file on disk and on cdrom", - test.log.info) - f1_hash = session.cmd(md5sum_cmd % dst_file, - timeout=checksum_timeout).split()[0] - f2_hash = session.cmd(md5sum_cmd % src_file, - timeout=checksum_timeout).split()[0] + error_context.context( + "Compare file on disk and on cdrom", test.log.info + ) + f1_hash = session.cmd( + md5sum_cmd % dst_file, timeout=checksum_timeout + ).split()[0] + f2_hash = session.cmd( + md5sum_cmd % src_file, timeout=checksum_timeout + ).split()[0] if f1_hash.strip() != f2_hash.strip(): - test.fail("On disk and on cdrom files are" - " different, md5 mismatch") + test.fail( + "On disk and on cdrom files are" " different, md5 mismatch" + ) session.cmd(remove_file_cmd) - self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts, - 'Finish_cdrom_test', login_timeout) + self.mig._hosts_barrier( + self.mig.hosts, self.mig.hosts, "Finish_cdrom_test", login_timeout + ) def clean(self): - super(test_multihost_copy, self).clean() + super().clean() test_type = params.get("test_type", "test_singlehost") - if (test_type in locals()): + if test_type in locals(): tests_group = locals()[test_type] tests_group() else: - test.fail("Test group '%s' is not defined in" - " migration_with_dst_problem test" % test_type) + test.fail( + f"Test group '{test_type}' is not defined in" + " migration_with_dst_problem test" + ) diff --git a/qemu/tests/cdrom_block_size_check.py b/qemu/tests/cdrom_block_size_check.py index b123529ad0..c307bb53f4 100644 --- a/qemu/tests/cdrom_block_size_check.py +++ b/qemu/tests/cdrom_block_size_check.py @@ -1,24 +1,19 @@ -import re import os -import six +import re import time +import six from avocado.utils import process - -from virttest import env_process -from virttest import error_context -from virttest import utils_misc -from virttest import data_dir +from virttest import data_dir, env_process, error_context, utils_misc from virttest.qemu_capabilities import Flags from virttest.qemu_monitor import QMPCmdError -from provider.cdrom import QMPEventCheckCDEject, QMPEventCheckCDChange -from provider import job_utils - - # This decorator makes the test function aware of context strings from virttest.utils_version import VersionInterval +from provider import job_utils +from provider.cdrom import QMPEventCheckCDChange, QMPEventCheckCDEject + @error_context.context_aware def run(test, params, env): @@ -61,7 +56,7 @@ def get_cdrom_mount_point(session, os_type="linux", drive_letter=None): """ mount_point = "/mnt" if os_type == "windows": - cmd = "wmic volume where DriveLetter='%s' " % drive_letter + cmd = f"wmic volume where DriveLetter='{drive_letter}' " cmd += "get DeviceID | more +1" mount_point = session.cmd_output(cmd).strip() return mount_point @@ -73,17 +68,19 @@ def get_cdrom_device(vm): device = None blocks = vm.monitor.info("block") if isinstance(blocks, six.string_types): - for block in blocks.strip().split('\n'): - if 'not inserted' in block: - device = block.split(':')[0] + for block in blocks.strip().split("\n"): + if "not inserted" in block: + device = block.split(":")[0] else: for block in blocks: - if 'inserted' not in block.keys(): - device = block['device'] + if "inserted" not in block.keys(): + device = block["device"] else: - if (vm.check_capability(Flags.BLOCKDEV) and - block['inserted']['file'] == 'null-co://'): - device = block['inserted']['node-name'] + if ( + vm.check_capability(Flags.BLOCKDEV) + and block["inserted"]["file"] == "null-co://" + ): + device = block["inserted"]["node-name"] return device def create_iso_image(params, name, prepare=True, file_size=None): @@ -98,14 +95,13 @@ def create_iso_image(params, name, prepare=True, file_size=None): :return: path to new iso image file. """ - error_context.context("Creating test iso image '%s'" % name, - test.log.info) + error_context.context(f"Creating test iso image '{name}'", test.log.info) if not os.path.isabs(name): cdrom_path = utils_misc.get_path(data_dir.get_data_dir(), name) else: cdrom_path = name if not cdrom_path.endswith(".iso"): - cdrom_path = "%s.iso" % cdrom_path + cdrom_path = f"{cdrom_path}.iso" name = os.path.basename(cdrom_path) if file_size is None: @@ -114,8 +110,8 @@ def create_iso_image(params, name, prepare=True, file_size=None): if prepare: cmd = "dd if=/dev/urandom of=%s bs=1M count=%d" process.run(cmd % (name, file_size)) - process.run("mkisofs -o %s %s" % (cdrom_path, name)) - process.run("rm -rf %s" % (name)) + process.run(f"mkisofs -o {cdrom_path} {name}") + process.run(f"rm -rf {name}") return cdrom_path def check_cdrom_size(session): @@ -124,7 +120,7 @@ def check_cdrom_size(session): output = session.cmd(check_cdrom_size_cmd, timeout=60) if not output: msg = "Unable to get the cdrom's size in guest." - msg += " Command: %s\nOutput: %s" % (check_cdrom_size_cmd, output) + msg += f" Command: {check_cdrom_size_cmd}\nOutput: {output}" test.error(msg) size = output.strip().splitlines()[-1] try: @@ -134,24 +130,22 @@ def check_cdrom_size(session): test.log.info("Cdrom's size in guest %s", cdrom_size) return cdrom_size - def mount_cdrom(session, guest_cdrom, mount_point, - show_mount_cmd, mount_cmd): + def mount_cdrom(session, guest_cdrom, mount_point, show_mount_cmd, mount_cmd): txt = "Mount the cdrom in guest and check its block size." error_context.context(txt, test.log.info) mounted = session.cmd(show_mount_cmd) if mount_point not in mounted: - mount_cmd = params.get("mount_cdrom_cmd") % (guest_cdrom, - mount_point) + mount_cmd = params.get("mount_cdrom_cmd") % (guest_cdrom, mount_point) status, output = session.cmd_status_output(mount_cmd, timeout=360) if status: - msg = "Unable to mount cdrom. command: %s\n" % mount_cmd - msg += " Output: %s" % output + msg = f"Unable to mount cdrom. command: {mount_cmd}\n" + msg += f" Output: {output}" test.error(msg) def is_tray_open(qdev): for block in vm.monitor.info("block"): - if qdev == block.get('qdev'): - return block.get('tray_open') + if qdev == block.get("qdev"): + return block.get("tray_open") def wait_for_tray_open(qdev): if not utils_misc.wait_for(lambda: is_tray_open(qdev), 30, 1, 3): @@ -160,17 +154,16 @@ def wait_for_tray_open(qdev): def change_media(device, target): try: with change_check: - if vm.devices.qemu_version in VersionInterval( - force_parameter_version): + if vm.devices.qemu_version in VersionInterval(force_parameter_version): vm.change_media(device, target, True) else: vm.change_media(device, target) except QMPCmdError as e: if excepted_qmp_err not in str(e): test.error(str(e)) - test.log.warn(str(e)) - cond = {'tray-open': True, 'id': cdroms} - if job_utils.get_event_by_condition(vm, 'DEVICE_TRAY_MOVED', 10, **cond): + test.log.warning(str(e)) + cond = {"tray-open": True, "id": cdroms} + if job_utils.get_event_by_condition(vm, "DEVICE_TRAY_MOVED", 10, **cond): with change_check: vm.change_media(device, target) else: @@ -189,8 +182,8 @@ def eject_cdrom(device): show_mount_cmd = params.get("show_mount_cmd") mount_cmd = params.get("mount_cdrom_cmd") umount_cmd = params.get("umount_cdrom_cmd") - excepted_qmp_err = params.get('excepted_qmp_err') - sleep_time_after_change = params.get_numeric('sleep_time_after_change', 30) + excepted_qmp_err = params.get("excepted_qmp_err") + sleep_time_after_change = params.get_numeric("sleep_time_after_change", 30) os_type = params["os_type"] error_context.context("Get the main VM", test.log.info) main_vm = params["main_vm"] @@ -206,23 +199,20 @@ def eject_cdrom(device): cdrom_name = params.get("orig_cdrom", "images/orig.iso") file_size = params.get("orig_cdrom_size", 100) - orig_cdrom = create_iso_image(params, cdrom_name, prepare=True, - file_size=file_size) + orig_cdrom = create_iso_image(params, cdrom_name, prepare=True, file_size=file_size) cdrom_device = get_cdrom_device(vm) eject_check = QMPEventCheckCDEject(vm, cdrom_device) change_check = QMPEventCheckCDChange(vm, cdrom_device) - error_context.context("Attach a small cd iso file to the cdrom.", - test.log.info) + error_context.context("Attach a small cd iso file to the cdrom.", test.log.info) change_media(cdrom_device, orig_cdrom) if mount_cmd: - mount_cdrom(session, guest_cdrom, mount_point, - show_mount_cmd, mount_cmd) + mount_cdrom(session, guest_cdrom, mount_point, show_mount_cmd, mount_cmd) orig_size = utils_misc.wait_for(lambda: check_cdrom_size(session), 60, 5, 3) if orig_size == empty_size: - err = "Get same block size '%s' after new cdrom attached" % orig_size + err = f"Get same block size '{orig_size}' after new cdrom attached" test.fail(err) if umount_cmd: @@ -230,8 +220,8 @@ def eject_cdrom(device): umount_cmd = umount_cmd % mount_point status, output = session.cmd_status_output(umount_cmd, timeout=360) if status: - msg = "Unable to umount cdrom. command: %s\n" % umount_cmd - msg += "Output: %s" % output + msg = f"Unable to umount cdrom. command: {umount_cmd}\n" + msg += f"Output: {output}" test.error(msg) error_context.context("eject the cdrom from monitor.", test.log.info) @@ -239,19 +229,17 @@ def eject_cdrom(device): cdrom_name = params.get("final_cdrom", "images/final.iso") file_size = params.get("final_cdrom_size", 1000) - final_cdrom = create_iso_image(params, cdrom_name, prepare=True, - file_size=file_size) - error_context.context("Attach a bigger cd iso file to the cdrom.", - test.log.info) + final_cdrom = create_iso_image( + params, cdrom_name, prepare=True, file_size=file_size + ) + error_context.context("Attach a bigger cd iso file to the cdrom.", test.log.info) change_media(cdrom_device, final_cdrom) if mount_cmd: - mount_cdrom(session, guest_cdrom, mount_point, - show_mount_cmd, mount_cmd) - final_size = utils_misc.wait_for(lambda: check_cdrom_size(session), - 60, 5, 3) + mount_cdrom(session, guest_cdrom, mount_point, show_mount_cmd, mount_cmd) + final_size = utils_misc.wait_for(lambda: check_cdrom_size(session), 60, 5, 3) if final_size == empty_size or final_size == orig_size: - err = "Get same block size '%s' after new cdrom attached" % final_size + err = f"Get same block size '{final_size}' after new cdrom attached" test.fail(err) # Check guest's network. diff --git a/qemu/tests/ceph_image_mem_leak.py b/qemu/tests/ceph_image_mem_leak.py index 34c32d875f..89802d1344 100644 --- a/qemu/tests/ceph_image_mem_leak.py +++ b/qemu/tests/ceph_image_mem_leak.py @@ -1,7 +1,6 @@ import time from avocado.utils import process - from virttest import error_context from virttest.utils_numeric import normalize_data_size @@ -21,11 +20,13 @@ def run(test, params, env): def _get_qemu_vmrss(): pid = vm.process.get_pid() qemu_stat_file = "/proc/" + str(pid) + "/status" - used_mem_cmd = "cat %s | grep VmRSS | awk -F ':\t* *' '{print $2}'" % \ - qemu_stat_file + used_mem_cmd = ( + f"cat {qemu_stat_file} | grep VmRSS | awk -F ':\t* *' '{{print $2}}'" + ) used_mem_size = process.system_output(used_mem_cmd, shell=True) - used_mem_size_byte = normalize_data_size(str(used_mem_size), - order_magnitude='B') + used_mem_size_byte = normalize_data_size( + str(used_mem_size), order_magnitude="B" + ) return used_mem_size_byte vm = env.get_vm(params["main_vm"]) @@ -37,8 +38,9 @@ def _get_qemu_vmrss(): used_mem_size_before = _get_qemu_vmrss() test.log.info( - "The qemu-kvm process used mem size before querying blocks is %s" % - used_mem_size_before) + "The qemu-kvm process used mem size before querying blocks is %s", + used_mem_size_before, + ) test.log.info("Begin to query blocks for 1 hour.") timeout = time.time() + 60 * 60 * 1 # 1 hour from now @@ -50,12 +52,13 @@ def _get_qemu_vmrss(): used_mem_size_after = _get_qemu_vmrss() test.log.info( - "The qemu-kvm process used mem size after querying blocks is %s" % - used_mem_size_after) + "The qemu-kvm process used mem size after querying blocks is %s", + used_mem_size_after, + ) test.log.info("Check whether the used memory size is increased.") if int(used_mem_size_after) > int(used_mem_size_before) * 1.2: test.fail( - "The used memory size before is %s, but after checking the blocks " - "for 1 hour, it increased to %s. There should be memory leaks, " - "check please." % ( - used_mem_size_before, used_mem_size_after)) + f"The used memory size before is {used_mem_size_before}, but after checking the blocks " + f"for 1 hour, it increased to {used_mem_size_after}. There should be memory leaks, " + "check please." + ) diff --git a/qemu/tests/cgroup.py b/qemu/tests/cgroup.py index 49ede5abc4..1f8d49960e 100644 --- a/qemu/tests/cgroup.py +++ b/qemu/tests/cgroup.py @@ -3,38 +3,26 @@ :author: Lukas Doktor :copyright: 2011 Red Hat, Inc. """ + import os import re -import time import threading +import time -from avocado.utils import astring -from avocado.utils import process +from aexpect import ExpectProcessTerminatedError, ExpectTimeoutError, ShellTimeoutError from avocado.core import exceptions - -from aexpect import ExpectTimeoutError -from aexpect import ExpectProcessTerminatedError -from aexpect import ShellTimeoutError - +from avocado.utils import astring, process +from virttest import env_process, error_context, qemu_monitor, utils_misc from virttest.env_process import preprocess -from virttest import qemu_monitor -from virttest import error_context -from virttest import utils_misc -from virttest import env_process - from virttest.staging import utils_memory -from virttest.staging.utils_cgroup import Cgroup -from virttest.staging.utils_cgroup import CgroupModules -from virttest.staging.utils_cgroup import get_load_per_cpu +from virttest.staging.utils_cgroup import Cgroup, CgroupModules, get_load_per_cpu from virttest.utils_test import VMStress - # Serial ID of the attached disk RANDOM_DISK_NAME = "RANDOM46464634164145" class SparseRange(list): - """ Turns kernel-like sparse array into list of values """ @@ -43,17 +31,16 @@ def __init__(self, value): """ :param value: sparse array string (eg.: "0-1,16-17") """ - super(SparseRange, self).__init__() + super().__init__() try: - for section in value.split(','): - vals = section.split('-') + for section in value.split(","): + vals = section.split("-") if len(vals) == 1: # single value self.append(int(vals[0])) - else: # range + else: # range self.extend(range(int(vals[0]), int(vals[1]) + 1)) except ValueError as details: - raise ValueError("Can't parse SparseRange from %s: %s" - % (value, details)) + raise ValueError(f"Can't parse SparseRange from {value}: {details}") def str_slice(self, start=0, stop=None, step=1): """ @@ -62,11 +49,13 @@ def str_slice(self, start=0, stop=None, step=1): :param step: slice step :return: kernel-like sparce array string from existing values """ + def append_section(out, sect_start, sect_stop): if sect_start == sect_stop: out.append(str(sect_start)) else: out.append("%d-%d" % (sect_start, sect_stop)) + if stop is None: stop = len(self) out = [] @@ -92,6 +81,7 @@ def run(test, params, env): """ Tests the cgroup functions on KVM guests. """ + # Func def assign_vm_into_cgroup(vm, cgroup, pwd=None): """ @@ -105,16 +95,17 @@ def assign_vm_into_cgroup(vm, cgroup, pwd=None): for pid in vm.get_qemu_threads(): try: cgroup.set_cgroup(int(pid), pwd) - except Exception as detail: # Process might not already exist - if os.path.exists("/proc/%s/" % pid): + except Exception as detail: # Process might not already exist + if os.path.exists(f"/proc/{pid}/"): raise detail - else: # Thread doesn't exist, try it again + else: # Thread doesn't exist, try it again break - else: # All PIDs moved + else: # All PIDs moved break else: - raise exceptions.TestFail("Failed to move all VM threads to new cgroup" - " in %d trials" % i) + raise exceptions.TestFail( + "Failed to move all VM threads to new cgroup" " in %d trials" % i + ) def distance(actual, reference): """ @@ -141,19 +132,20 @@ def get_dd_cmd(direction, count=None, blocksize=None): else: params = "if=$FILE of=$FILE iflag=direct oflag=direct" if blocksize: - params += " bs=%s" % blocksize + params += f" bs={blocksize}" if count: - params += " count=%s" % count - return ("export FILE=$(ls /dev/disk/by-id/*%s | tail -n 1); touch /tmp/cgroup_lock" - " ; while [ -e /tmp/cgroup_lock ]; do dd %s ; done" - % (RANDOM_DISK_NAME, params)) + params += f" count={count}" + return ( + f"export FILE=$(ls /dev/disk/by-id/*{RANDOM_DISK_NAME} | tail -n 1); touch /tmp/cgroup_lock" + f" ; while [ -e /tmp/cgroup_lock ]; do dd {params} ; done" + ) def get_device_driver(): """ Discovers the used block device driver {ide, scsi, virtio_blk} :return: Used block device driver {ide, scsi, virtio} """ - return params.get('drive_format', 'virtio') + return params.get("drive_format", "virtio") def get_maj_min(dev): """ @@ -164,8 +156,7 @@ def get_maj_min(dev): rdev = os.stat(dev).st_rdev ret = (os.major(rdev), os.minor(rdev)) except Exception as details: - raise exceptions.TestFail("get_maj_min(%s) failed: %s" % - (dev, details)) + raise exceptions.TestFail(f"get_maj_min({dev}) failed: {details}") return ret def rm_scsi_disks(no_disks): @@ -174,10 +165,12 @@ def rm_scsi_disks(no_disks): :param no_disks: How many disks to remove :note: params['cgroup_rmmod_scsi_debug'] == "yes" => rmmod scsi_debug """ - process.system("echo -%d > /sys/bus/pseudo/drivers/scsi_debug/add_host" - % no_disks, shell=True) + process.system( + "echo -%d > /sys/bus/pseudo/drivers/scsi_debug/add_host" % no_disks, + shell=True, + ) - if params.get('cgroup_rmmod_scsi_debug', "no") == "yes": + if params.get("cgroup_rmmod_scsi_debug", "no") == "yes": process.system("rmmod scsi_debug") def param_add_scsi_disks(): @@ -185,29 +178,29 @@ def param_add_scsi_disks(): Adds scsi_debug disk to every VM in params['vms'] :param prefix: adds prefix to drive name """ - if process.system("lsmod | grep scsi_debug", ignore_status=True, - shell=True): + if process.system("lsmod | grep scsi_debug", ignore_status=True, shell=True): process.system("modprobe scsi_debug dev_size_mb=8 add_host=0") - for name in params['vms'].split(' '): + for name in params["vms"].split(" "): disk_name = "scsi-debug-" + name - process.system("echo 1 > /sys/bus/pseudo/drivers/scsi_debug/" - "add_host", shell=True) - time.sleep(1) # Wait for device init + process.system( + "echo 1 > /sys/bus/pseudo/drivers/scsi_debug/" "add_host", shell=True + ) + time.sleep(1) # Wait for device init dev = process.getoutput("ls /dev/sd* | tail -n 1", shell=True) # Enable idling in scsi_debug drive - process.system("echo 1 > /sys/block/%s/queue/rotational" - % (dev.split('/')[-1]), shell=True) - vm_disks = params.get('images_%s' % name, - params.get('images', 'image1')) - params['images_%s' % name] = "%s %s" % (vm_disks, disk_name) - params['image_name_%s' % disk_name] = dev - params['image_snapshot_%s' % disk_name] = "no" - params['image_format_%s' % disk_name] = "raw" - params['remove_image_%s' % disk_name] = "no" - params['image_raw_device_%s' % disk_name] = "yes" - params['drive_cache_%s' % disk_name] = params.get('drive_cache', - 'none') - params['drive_serial_%s' % disk_name] = RANDOM_DISK_NAME + process.system( + "echo 1 > /sys/block/{}/queue/rotational".format(dev.split("/")[-1]), + shell=True, + ) + vm_disks = params.get(f"images_{name}", params.get("images", "image1")) + params[f"images_{name}"] = f"{vm_disks} {disk_name}" + params[f"image_name_{disk_name}"] = dev + params[f"image_snapshot_{disk_name}"] = "no" + params[f"image_format_{disk_name}"] = "raw" + params[f"remove_image_{disk_name}"] = "no" + params[f"image_raw_device_{disk_name}"] = "yes" + params[f"drive_cache_{disk_name}"] = params.get("drive_cache", "none") + params[f"drive_serial_{disk_name}"] = RANDOM_DISK_NAME def param_add_file_disks(size, prefix="hd2-"): """ @@ -215,21 +208,19 @@ def param_add_file_disks(size, prefix="hd2-"): :param size: Disk size (1M) :param prefix: adds prefix to drive name """ - for name in params['vms'].split(' '): - vm_disks = params.get('images_%s' % name, - params.get('images', 'image1')) + for name in params["vms"].split(" "): + vm_disks = params.get(f"images_{name}", params.get("images", "image1")) disk_name = prefix + name - params['images_%s' % name] = "%s %s" % (vm_disks, disk_name) - params['image_size_%s' % disk_name] = size - params['image_name_%s' % disk_name] = disk_name - params['image_snapshot_%s' % disk_name] = "no" - params['force_create_image_%s' % disk_name] = "yes" - params['image_format_%s' % disk_name] = "raw" - params['create_with_dd_%s' % disk_name] = "yes" - params['remove_image_%s' % disk_name] = "yes" - params['drive_cache_%s' % disk_name] = params.get('drive_cache', - 'none') - params['drive_serial_%s' % disk_name] = RANDOM_DISK_NAME + params[f"images_{name}"] = f"{vm_disks} {disk_name}" + params[f"image_size_{disk_name}"] = size + params[f"image_name_{disk_name}"] = disk_name + params[f"image_snapshot_{disk_name}"] = "no" + params[f"force_create_image_{disk_name}"] = "yes" + params[f"image_format_{disk_name}"] = "raw" + params[f"create_with_dd_{disk_name}"] = "yes" + params[f"remove_image_{disk_name}"] = "yes" + params[f"drive_cache_{disk_name}"] = params.get("drive_cache", "none") + params[f"drive_serial_{disk_name}"] = RANDOM_DISK_NAME def param_add_vms(no_vms): """ @@ -237,10 +228,10 @@ def param_add_vms(no_vms): :param no_vms: Desired number of VMs :note: All defined VMs are overwritten. """ - params['vms'] = "" + params["vms"] = "" for i in range(no_vms): - params['vms'] += "vm%s " % i - params['vms'] = params['vms'][:-1] + params["vms"] += f"vm{i} " + params["vms"] = params["vms"][:-1] # Tests @error_context.context_aware @@ -253,6 +244,7 @@ def blkio_bandwidth(): :param cfg: cgroup_weights - list of R/W weights '[100, 1000]' :param cfg: cgroup_limit{ ,_read,_write} - allowed R/W threshold '0.1' """ + def _test(direction): """ Executes loop of dd commands, kills it after $test_time and @@ -271,15 +263,15 @@ def _test(direction): # Force stats in case no dd cmd finished sessions[i * 2 + 1].sendline(stat_cmd) for i in range(no_vms): - out.append(sessions[i * 2].read_until_output_matches( - [re_dd])[1]) + out.append(sessions[i * 2].read_until_output_matches([re_dd])[1]) # Stop all transfers (on 2nd sessions) for i in range(no_vms): sessions[i * 2 + 1].sendline(kill_cmd) # Read the rest of the stats for i in range(no_vms): out[-1] = out[-1] + sessions[i * 2].read_up_to_prompt( - timeout=120 + test_time) + timeout=120 + test_time + ) for i in range(no_vms): # Get all dd loops' statistics @@ -299,38 +291,42 @@ def _test(direction): sum_weights = float(sum(weights)) for i in range(len(weights)): # [status, norm_weights, norm_out, actual] - out[i] = ['PASS', weights[i] / sum_weights, out[i] / sum_out, - out[i]] + out[i] = ["PASS", weights[i] / sum_weights, out[i] / sum_out, out[i]] err = "" - limit = float(params.get('cgroup_limit_%s' % direction, - params.get('cgroup_limit', 0.1))) + limit = float( + params.get(f"cgroup_limit_{direction}", params.get("cgroup_limit", 0.1)) + ) # if any of norm_output doesn't ~ match norm_weights, log it. for i in range(len(out)): - if (out[i][2] > (out[i][1] + limit) or - out[i][2] < (out[i][1] - limit)): - out[i][0] = 'FAIL' + if out[i][2] > (out[i][1] + limit) or out[i][2] < (out[i][1] - limit): + out[i][0] = "FAIL" err += "%d, " % i - test.log.info("blkio_bandwidth_%s: dd statistics\n%s", direction, - astring.tabular_output(out, ['status', 'norm_weights', - 'norm_out', 'actual'])) + test.log.info( + "blkio_bandwidth_%s: dd statistics\n%s", + direction, + astring.tabular_output( + out, ["status", "norm_weights", "norm_out", "actual"] + ), + ) if err: - err = ("blkio_bandwidth_%s: limits [%s] were broken" - % (direction, err[:-2])) + err = f"blkio_bandwidth_{direction}: limits [{err[:-2]}] were broken" test.log.debug(err) - return err + '\n' + return err + "\n" return "" test.log.info("Init") try: - weights = eval(params.get('cgroup_weights', "[100, 1000]")) + weights = eval(params.get("cgroup_weights", "[100, 1000]")) if type(weights) is not list: raise TypeError except TypeError: - raise exceptions.TestError("Incorrect configuration: param " - "cgroup_weights have to be list-like string '[1, 2]'") + raise exceptions.TestError( + "Incorrect configuration: param " + "cgroup_weights have to be list-like string '[1, 2]'" + ) test_time = int(params.get("cgroup_test_time", 60)) test.log.info("Prepare VMs") # Prepare enough VMs each with 1 disk for testing @@ -340,18 +336,18 @@ def _test(direction): preprocess(test, params, env) vms = [] - sessions = [] # 2 sessions per VM + sessions = [] # 2 sessions per VM timeout = int(params.get("login_timeout", 360)) - for name in params['vms'].split(): + for name in params["vms"].split(): vms.append(env.get_vm(name)) sessions.append(vms[-1].wait_for_login(timeout=timeout)) sessions.append(vms[-1].wait_for_login(timeout=30)) test.log.info("Setup test") modules = CgroupModules() - if modules.init(['blkio']) != 1: + if modules.init(["blkio"]) != 1: raise exceptions.TestFail("Can't mount blkio cgroup modules") - blkio = Cgroup('blkio', '') + blkio = Cgroup("blkio", "") blkio.initialize(modules) for i in range(no_vms): blkio.mk_cgroup() @@ -363,8 +359,10 @@ def _test(direction): # ; true is necessarily when there is no dd present at the time kill_cmd = "rm -f /tmp/cgroup_lock; killall -9 dd; true" stat_cmd = "killall -SIGUSR1 dd; true" - re_dd = (r'(\d+) bytes \(\d+\.*\d* \w*(, \d+\.*\d* \w*)?\) copied, ' - r'(\d+\.*\d*) s, \d+\.*\d* \w./s') + re_dd = ( + r"(\d+) bytes \(\d+\.*\d* \w*(, \d+\.*\d* \w*)?\) copied, " + r"(\d+\.*\d*) s, \d+\.*\d* \w./s" + ) err = "" try: test.log.info("Read test") @@ -416,13 +414,15 @@ def blkio_throttle(): """ test.log.info("Init") try: - speeds = eval(params.get('cgroup_speeds', "[1024]")) + speeds = eval(params.get("cgroup_speeds", "[1024]")) if type(speeds) is not list: raise TypeError except TypeError: - raise exceptions.TestError("Incorrect configuration: param " - "cgroup_speeds have to be list of strings" - "eg. [1024] or [1024,2048,8192].") + raise exceptions.TestError( + "Incorrect configuration: param " + "cgroup_speeds have to be list of strings" + "eg. [1024] or [1024,2048,8192]." + ) # Make param suitable for multitest and execute it. return blkio_throttle_multi([[_] for _ in speeds]) @@ -442,6 +442,7 @@ def blkio_throttle_multi(speeds=None): and speeds [[speed1],[speed2],..],..]. '[[1024,0,2048,0,8192]]' """ + def _test(direction, blkio): """ Executes loop of small dd transfers changes cgroups and measures @@ -452,8 +453,9 @@ def _test(direction, blkio): # Test # can't set bs for scsi_debug, default is 512b dd_cmd = get_dd_cmd(direction, count=3) - limit = float(params.get('cgroup_limit_%s' % direction, - params.get('cgroup_limit', 0.1))) + float( + params.get(f"cgroup_limit_{direction}", params.get("cgroup_limit", 0.1)) + ) # every scenario have list of results [[][][]] out = [] # every VM have one output [] @@ -465,8 +467,9 @@ def _test(direction, blkio): # assign all VMs to current scenario cgroup assign_vm_into_cgroup(vms[i], blkio, i * no_speeds + j) _ += "vm%d:%d, " % (i, speeds[i][j]) - test.log.debug("blkio_throttle_%s: Current speeds: %s", - direction, _[:-2]) + test.log.debug( + "blkio_throttle_%s: Current speeds: %s", direction, _[:-2] + ) # Restart all transfers (on 1st sessions) for i in range(no_vms): sessions[i * 2].sendline(dd_cmd) @@ -476,16 +479,15 @@ def _test(direction, blkio): # Force stats in case no dd cmd finished sessions[i * 2 + 1].sendline(stat_cmd) for i in range(no_vms): - out[i].append(sessions[i * 2].read_until_output_matches( - [re_dd])[1]) + out[i].append(sessions[i * 2].read_until_output_matches([re_dd])[1]) # Stop all transfers (on 2nd sessions) for i in range(no_vms): sessions[i * 2 + 1].sendline(kill_cmd) # Read the rest of the stats for i in range(no_vms): - out[i][-1] = (out[i][-1] + - sessions[i * 2].read_up_to_prompt( - timeout=120 + test_time)) + out[i][-1] = out[i][-1] + sessions[i * 2].read_up_to_prompt( + timeout=120 + test_time + ) # bash needs some time... time.sleep(1) @@ -506,8 +508,9 @@ def _test(direction, blkio): for _ in re.findall(re_dd, out[i][j]): data += int(_[0]) duration += float(_[2]) - output.append(['PASS', j, 'vm%d' % i, speeds[i][j], - int(data / duration)]) + output.append( + ["PASS", j, "vm%d" % i, speeds[i][j], int(data / duration)] + ) # Don't measure unlimited speeds if speeds[i][j] == 0: output[-1][0] = "INF" @@ -517,47 +520,50 @@ def _test(direction, blkio): output[-1][0] = "FAIL" # TODO: Unlimited speed fluctuates during test - test.log.info("blkio_throttle_%s: dd statistics\n%s", direction, - astring.tabular_output(output, ['result', 'it', - 'vm', 'speed', 'actual'])) + test.log.info( + "blkio_throttle_%s: dd statistics\n%s", + direction, + astring.tabular_output( + output, ["result", "it", "vm", "speed", "actual"] + ), + ) if err: - err = ("blkio_throttle_%s: limits [%s] were broken" - % (direction, err[:-2])) + err = f"blkio_throttle_{direction}: limits [{err[:-2]}] were broken" test.log.debug(err) - return err + '\n' + return err + "\n" return "" test.log.info("Init") no_speeds = 0 if speeds: # blkio_throttle no_speeds = len(speeds[0]) - else: # blkio_throttle_multi + else: # blkio_throttle_multi try: - speeds = eval(params.get('cgroup_speeds', - "[[1024,0,2048,0,8192]]")) + speeds = eval(params.get("cgroup_speeds", "[[1024,0,2048,0,8192]]")) if type(speeds) is not list: raise TypeError if type(speeds[0]) is not list: - test.log.warn("cgroup_speeds have to be listOfLists") + test.log.warning("cgroup_speeds have to be listOfLists") speeds = [speeds] no_speeds = len(speeds[0]) for speed in speeds: if type(speed) is not list: - test.log.error("One of cgroup_speeds sublists is not " - "list") + test.log.error("One of cgroup_speeds sublists is not " "list") raise TypeError if len(speed) != no_speeds: - test.log.error("cgroup_speeds sublists have different " - "lengths") + test.log.error( + "cgroup_speeds sublists have different " "lengths" + ) raise TypeError except TypeError: - raise exceptions.TestError("Incorrect configuration: param " - "cgroup_speeds have to be listOfList-" - "like string with same lengths. " - "([[1024]] or [[0,1024],[1024,2048]])") + raise exceptions.TestError( + "Incorrect configuration: param " + "cgroup_speeds have to be listOfList-" + "like string with same lengths. " + "([[1024]] or [[0,1024],[1024,2048]])" + ) # Minimum testing time is 30s (dd must copy few blocks) - test_time = max(int(params.get("cgroup_test_time", 60)) / no_speeds, - 30) + test_time = max(int(params.get("cgroup_test_time", 60)) / no_speeds, 30) test.log.info("Prepare VMs") # create enough of VMs with scsi_debug attached disks @@ -570,46 +576,58 @@ def _test(direction, blkio): sessions = [] timeout = int(params.get("login_timeout", 360)) # 2 sessions per VM - for name in params['vms'].split(): + for name in params["vms"].split(): vms.append(env.get_vm(name)) sessions.append(vms[-1].wait_for_login(timeout=timeout)) sessions.append(vms[-1].wait_for_login(timeout=30)) test.log.info("Setup test") modules = CgroupModules() - if modules.init(['blkio']) != 1: + if modules.init(["blkio"]) != 1: raise exceptions.TestFail("Can't mount blkio cgroup modules") - blkio = Cgroup('blkio', '') + blkio = Cgroup("blkio", "") blkio.initialize(modules) for i in range(no_vms): # Set speeds for each scsi_debug device for each VM - dev = get_maj_min(params['image_name_scsi-debug-%s' % vms[i].name]) + dev = get_maj_min(params[f"image_name_scsi-debug-{vms[i].name}"]) for j in range(no_speeds): speed = speeds[i][j] blkio.mk_cgroup() if speed == 0: # Disable limit (removes the limit) - blkio.set_property("blkio.throttle.write_bps_device", - "%s:%s %s" % (dev[0], dev[1], speed), - i * no_speeds + j, check="") - blkio.set_property("blkio.throttle.read_bps_device", - "%s:%s %s" % (dev[0], dev[1], speed), - i * no_speeds + j, check="") - else: # Enable limit (input separator ' ', output '\t') - blkio.set_property("blkio.throttle.write_bps_device", - "%s:%s %s" % (dev[0], dev[1], speed), - i * no_speeds + j, check="%s:%s\t%s" - % (dev[0], dev[1], speed)) - blkio.set_property("blkio.throttle.read_bps_device", - "%s:%s %s" % (dev[0], dev[1], speed), - i * no_speeds + j, check="%s:%s\t%s" - % (dev[0], dev[1], speed)) - blkio.mk_cgroup() # last one is unlimited + blkio.set_property( + "blkio.throttle.write_bps_device", + f"{dev[0]}:{dev[1]} {speed}", + i * no_speeds + j, + check="", + ) + blkio.set_property( + "blkio.throttle.read_bps_device", + f"{dev[0]}:{dev[1]} {speed}", + i * no_speeds + j, + check="", + ) + else: # Enable limit (input separator ' ', output '\t') + blkio.set_property( + "blkio.throttle.write_bps_device", + f"{dev[0]}:{dev[1]} {speed}", + i * no_speeds + j, + check=f"{dev[0]}:{dev[1]}\t{speed}", + ) + blkio.set_property( + "blkio.throttle.read_bps_device", + f"{dev[0]}:{dev[1]} {speed}", + i * no_speeds + j, + check=f"{dev[0]}:{dev[1]}\t{speed}", + ) + blkio.mk_cgroup() # last one is unlimited # ; true is necessarily when there is no dd present at the time kill_cmd = "rm -f /tmp/cgroup_lock; killall -9 dd; true" stat_cmd = "killall -SIGUSR1 dd; true" - re_dd = (r'(\d+) bytes \(\d+\.*\d* \w*(, \d+\.*\d* \w*)?\) copied, ' - r'(\d+\.*\d*) s, \d+\.*\d* \w./s') + re_dd = ( + r"(\d+) bytes \(\d+\.*\d* \w*(, \d+\.*\d* \w*)?\) copied, " + r"(\d+\.*\d*) s, \d+\.*\d* \w./s" + ) err = "" try: test.log.info("Read test") @@ -662,17 +680,17 @@ def cpu_cfs_util(): """ test.log.info("Setup test") modules = CgroupModules() - if modules.init(['cpu']) != 1: + if modules.init(["cpu"]) != 1: raise exceptions.TestFail("Can't mount cpu cgroup modules") - cgroup = Cgroup('cpu', '') + cgroup = Cgroup("cpu", "") cgroup.initialize(modules) - host_cpus = open('/proc/cpuinfo').read().count('processor') + host_cpus = open("/proc/cpuinfo").read().count("processor") # Create first VM - params['smp'] = 1 + params["smp"] = 1 params["vcpu_sockets"] = 1 params["vcpu_maxcpus"] = host_cpus - params['vms'] = "vm0" + params["vms"] = "vm0" preprocess(test, params, env) test.log.info("Prepare VMs") @@ -705,7 +723,7 @@ def cpu_cfs_util(): while vm_cpus < 2 * host_cpus: vm_name = "clone%d" % i smp = min(2 * smp, 2 * host_cpus - vm_cpus) - _params['smp'] = smp + _params["smp"] = smp vms.append(vms[0].clone(vm_name, _params)) env.register_vm(vm_name, vms[-1]) vms[-1].create() @@ -730,12 +748,12 @@ def cpu_cfs_util(): cmd = "renice -n 10 $$; " cmd += "while [ -e /tmp/cgroup-cpu-lock ] ; do :; done" - kill_cmd = 'rm -f /tmp/cgroup-cpu-lock' + kill_cmd = "rm -f /tmp/cgroup-cpu-lock" stats = [] # test_time is 1s stabilization, 1s first meass., 9s second and the # rest of cgroup_test_time as 3rd meassurement. - test_time = max(1, int(params.get('cgroup_test_time', 60)) - 11) + test_time = max(1, int(params.get("cgroup_test_time", 60)) - 11) err = [] try: test.log.info("Test") @@ -743,15 +761,15 @@ def cpu_cfs_util(): session.sendline(cmd) time.sleep(1) - stats.append(open('/proc/stat', 'r').readline()) + stats.append(open("/proc/stat", "r").readline()) time.sleep(1) - stats.append(open('/proc/stat', 'r').readline()) + stats.append(open("/proc/stat", "r").readline()) time.sleep(9) - stats.append(open('/proc/stat', 'r').readline()) + stats.append(open("/proc/stat", "r").readline()) time.sleep(test_time) - stats.append(open('/proc/stat', 'r').readline()) + stats.append(open("/proc/stat", "r").readline()) for session in serials: - session.sendline('rm -f /tmp/cgroup-cpu-lock') + session.sendline("rm -f /tmp/cgroup-cpu-lock") # /proc/stat first line is cumulative CPU usage # 1-8 are host times, 8-9 are guest times (on older kernels only 8) @@ -763,8 +781,9 @@ def cpu_cfs_util(): for i in range(1, len(stats)): stats[i] = [int(_) for _ in stats[i].split()[1:]] try: - stats[i] = (float(sum(stats[i][8:]) - stats[0][1]) / - (sum(stats[i][0:8]) - stats[0][0])) + stats[i] = float(sum(stats[i][8:]) - stats[0][1]) / ( + sum(stats[i][0:8]) - stats[0][0] + ) except ZeroDivisionError: test.log.error("ZeroDivisionError in stats calculation") stats[i] = False @@ -773,8 +792,13 @@ def cpu_cfs_util(): for i in range(1, len(stats)): # Utilisation should be 100% - allowed treshold (limit) if stats[i] < limit: - test.log.debug("%d: the utilisation of guest time is %s, " - "smaller than limit %s", i, stats[i], limit) + test.log.debug( + "%d: the utilisation of guest time is %s, " + "smaller than limit %s", + i, + stats[i], + limit, + ) err.append(i) finally: @@ -795,11 +819,10 @@ def cpu_cfs_util(): test.log.info("Results") if err: - err = ("The host vs. guest CPU time ratio is over %s in %s cases" - % (limit, err)) + err = f"The host vs. guest CPU time ratio is over {limit} in {err} cases" raise exceptions.TestFail(err) else: - return "Guest times are over %s%%: %s" % (limit, stats[1:]) + return f"Guest times are over {limit}%: {stats[1:]}" @error_context.context_aware def cpu_share(): @@ -812,8 +835,9 @@ def cpu_share(): :param cfg: cgroup_speeds - list of speeds of each vms [vm0, vm1,..]. List is sorted in test! '[10000, 100000]' """ + def _get_stat(f_stats, _stats=None): - """ Reads CPU times from f_stats[] files and sumarize them. """ + """Reads CPU times from f_stats[] files and sumarize them.""" if _stats is None: _stats = [] for i in range(len(f_stats)): @@ -827,20 +851,22 @@ def _get_stat(f_stats, _stats=None): test.log.info("Init") try: - speeds = eval(params.get('cgroup_speeds', '[10000, 100000]')) + speeds = eval(params.get("cgroup_speeds", "[10000, 100000]")) if type(speeds) is not list: raise TypeError except TypeError: - raise exceptions.TestError("Incorrect configuration: param " - "cgroup_speeds have to be list-like string '[1, 2]'") + raise exceptions.TestError( + "Incorrect configuration: param " + "cgroup_speeds have to be list-like string '[1, 2]'" + ) - host_cpus = open('/proc/cpuinfo').read().count('processor') + host_cpus = open("/proc/cpuinfo").read().count("processor") # when smp <= 0 use smp = no_host_cpus - vm_cpus = int(params.get('smp', 0)) # cpus per VM + vm_cpus = int(params.get("smp", 0)) # cpus per VM params["vcpu_maxcpus"] = host_cpus # Use smp = no_host_cpu - if vm_cpus <= 0 or params.get('cgroup_use_max_smp') == "yes": - params['smp'] = host_cpus + if vm_cpus <= 0 or params.get("cgroup_use_max_smp") == "yes": + params["smp"] = host_cpus vm_cpus = host_cpus no_speeds = len(speeds) # All host_cpus have to be used with no_speeds overcommit @@ -849,9 +875,9 @@ def _get_stat(f_stats, _stats=None): sessions = [] serials = [] modules = CgroupModules() - if modules.init(['cpu']) != 1: + if modules.init(["cpu"]) != 1: raise exceptions.TestFail("Can't mount cpu cgroup modules") - cgroup = Cgroup('cpu', '') + cgroup = Cgroup("cpu", "") cgroup.initialize(modules) test.log.info("Prepare VMs") @@ -869,7 +895,7 @@ def _get_stat(f_stats, _stats=None): for i in range(no_speeds): cgroup.mk_cgroup() - cgroup.set_property('cpu.shares', speeds[i], i) + cgroup.set_property("cpu.shares", speeds[i], i) for i in range(no_vms): assign_vm_into_cgroup(vms[i], cgroup, i % no_speeds) sessions[i].cmd("touch /tmp/cgroup-cpu-lock") @@ -881,14 +907,14 @@ def _get_stat(f_stats, _stats=None): err = [] # Time 0 for vm in vms: - f_stats.append(open("/proc/%d/stat" % vm.get_pid(), 'r')) + f_stats.append(open("/proc/%d/stat" % vm.get_pid(), "r")) time_init = 2 # there are 6 tests time_test = max(int(params.get("cgroup_test_time", 60)) / 6, 5) - thread_count = 0 # actual thread number + thread_count = 0 # actual thread number stats = [] - cmd = "renice -n 10 $$; " # new ssh login should pass + cmd = "renice -n 10 $$; " # new ssh login should pass cmd += "while [ -e /tmp/cgroup-cpu-lock ]; do :; done" # Occupy all host_cpus with 1 task (no overcommit) for thread_count in range(0, host_cpus): @@ -927,14 +953,16 @@ def _get_stat(f_stats, _stats=None): # I. i = 0 # only first #host_cpus guests were running - dist = distance(min(stats[i][:host_cpus]), - max(stats[i][:host_cpus])) + dist = distance(min(stats[i][:host_cpus]), max(stats[i][:host_cpus])) # less vms, lower limit. Maximal limit is 0.2 if dist > min(0.15 + 0.01 * len(vms), 0.2): err += "1, " - test.log.error("1st part's limits broken. Utilisation should be" - " equal. stats = %s, distance = %s", stats[i], - dist) + test.log.error( + "1st part's limits broken. Utilisation should be" + " equal. stats = %s, distance = %s", + stats[i], + dist, + ) else: test.log.info("1st part's distance = %s", dist) # II. @@ -943,34 +971,48 @@ def _get_stat(f_stats, _stats=None): if host_cpus % no_speeds == 0 and no_speeds <= host_cpus: if dist > min(0.15 + 0.01 * len(vms), 0.2): err += "2, " - test.log.error("2nd part's limits broken, Utilisation " - "should be equal. stats = %s, distance = %s", - stats[i], dist) + test.log.error( + "2nd part's limits broken, Utilisation " + "should be equal. stats = %s, distance = %s", + stats[i], + dist, + ) else: test.log.info("2nd part's distance = %s", dist) else: - test.log.warn("2nd part's verification skipped (#cgroup,#cpu)," - " stats = %s,distance = %s", stats[i], dist) + test.log.warning( + "2nd part's verification skipped (#cgroup,#cpu)," + " stats = %s,distance = %s", + stats[i], + dist, + ) # III. # normalize stats, then they should have equal values i += 1 for i in range(i, len(stats)): - norm_stats = [float(stats[i][_]) / speeds[_] - for _ in range(len(stats[i]))] + norm_stats = [ + float(stats[i][_]) / speeds[_] for _ in range(len(stats[i])) + ] dist = distance(min(norm_stats), max(norm_stats)) if dist > min(0.15 + 0.02 * len(vms), 0.25): err += "3, " - test.log.error("3rd part's limits broken; utilisation " - "should be in accordance to self.speeds. " - "stats=%s, norm_stats=%s, distance=%s, " - "speeds=%s,it=%d", stats[i], norm_stats, - dist, speeds, i - 1) + test.log.error( + "3rd part's limits broken; utilisation " + "should be in accordance to self.speeds. " + "stats=%s, norm_stats=%s, distance=%s, " + "speeds=%s,it=%d", + stats[i], + norm_stats, + dist, + speeds, + i - 1, + ) else: test.log.info("3rd part's norm_dist = %s", dist) if err: - err = "[%s] parts broke their limits" % err[:-2] + err = f"[{err[:-2]}] parts broke their limits" test.log.error(err) else: test.log.info("Cpu utilisation enforced successfully") @@ -1024,6 +1066,7 @@ def cpuset_cpus(): 'by default it assumes each used CPU will be 100% utilised' """ + def _generate_cpusets(vm_cpus, cpus): """ Generates 5 cpusets scenerios @@ -1077,19 +1120,23 @@ def _generate_verification(cpusets, cpus): if not ((type(cpusets) is list) or (cpusets is None)): raise Exception except Exception: - raise exceptions.TestError("Incorrect configuration: param cgroup_" - "cpuset have to be list of lists, where " - "all sublist have the same length and " - "the length is ('smp' + 1). Or 'None' for " - "default.\n%s" % cpusets) + raise exceptions.TestError( + "Incorrect configuration: param cgroup_" + "cpuset have to be list of lists, where " + "all sublist have the same length and " + "the length is ('smp' + 1). Or 'None' for " + f"default.\n{cpusets}" + ) try: verify = eval(params.get("cgroup_verify", "None")) if not ((type(cpusets) is list) or (cpusets is None)): raise Exception except Exception: - raise exceptions.TestError("Incorrect configuration: param cgroup_" - "verify have to be list of lists or 'None' " - "for default/automatic.\n%s" % verify) + raise exceptions.TestError( + "Incorrect configuration: param cgroup_" + "verify have to be list of lists or 'None' " + f"for default/automatic.\n{verify}" + ) limit = float(params.get("cgroup_limit", 0.05)) * 100 @@ -1097,9 +1144,9 @@ def _generate_verification(cpusets, cpus): vm = env.get_all_vms()[0] modules = CgroupModules() - if modules.init(['cpuset']) != 1: + if modules.init(["cpuset"]) != 1: raise exceptions.TestFail("Can't mount cpu cgroup modules") - cgroup = Cgroup('cpuset', '') + cgroup = Cgroup("cpuset", "") cgroup.initialize(modules) cpus = SparseRange(cgroup.get_property("cpuset.cpus")[0]) @@ -1109,23 +1156,27 @@ def _generate_verification(cpusets, cpus): # If cpuset specified, set smp accordingly if cpusets: if len(cpus) < (len(cpusets[0]) - 1): - err = ("Not enough host CPUs to run this test with selected " - "cpusets (cpus=%s, cpusets=%s)" % (len(cpus), cpusets)) + err = ( + "Not enough host CPUs to run this test with selected " + f"cpusets (cpus={len(cpus)}, cpusets={cpusets})" + ) test.log.error(err) raise exceptions.TestSkipError(err) - vm_cpus = len(cpusets[0]) - 1 # Don't count main_thread to vcpus + vm_cpus = len(cpusets[0]) - 1 # Don't count main_thread to vcpus for i in range(len(cpusets)): # length of each list have to be 'smp' + 1 if len(cpusets[i]) != (vm_cpus + 1): - err = ("cpusets inconsistent. %d sublist have different " - " length. (param cgroup_cpusets in cfg)." % i) + err = ( + "cpusets inconsistent. %d sublist have different " + " length. (param cgroup_cpusets in cfg)." % i + ) test.log.error(err) raise exceptions.TestError(err) # if cgroup_use_half_smp, set smp accordingly elif params.get("cgroup_use_half_smp") == "yes": vm_cpus = len(cpus) // 2 if len(cpus) == 2: - test.log.warn("Host have only 2 CPUs, using 'smp = all cpus'") + test.log.warning("Host have only 2 CPUs, using 'smp = all cpus'") vm_cpus = 2 if vm_cpus <= 1: @@ -1134,14 +1185,15 @@ def _generate_verification(cpusets, cpus): # Check whether smp changed and recreate VM if so if vm_cpus != (params.get_numeric("smp") or params.get_numeric("vcpu_maxcpus")): test.log.info("Expected VM reload.") - params['vcpu_maxcpus'] = params['smp'] = vm_cpus + params["vcpu_maxcpus"] = params["smp"] = vm_cpus vm.create(params=params) # Verify vcpus matches prescription vcpus = vm.get_vcpu_pids(debug=False) if len(vcpus) != vm_cpus: - raise exceptions.TestFail("Incorrect number of vcpu PIDs; smp=%s vcpus=" - "%s" % (vm_cpus, vcpus)) + raise exceptions.TestFail( + f"Incorrect number of vcpu PIDs; smp={vm_cpus} vcpus=" f"{vcpus}" + ) if not cpusets: test.log.info("Generating cpusets scenerios") @@ -1150,24 +1202,28 @@ def _generate_verification(cpusets, cpus): if verify: # Verify exists, check if it's correct for _ in verify: if len(_) != len(cpus): - err = ("Incorrect cgroup_verify. Each verify sublist have " - "to have length = no_host_cpus") + err = ( + "Incorrect cgroup_verify. Each verify sublist have " + "to have length = no_host_cpus" + ) test.log.error(err) raise exceptions.TestError(err) - else: # Generate one + else: # Generate one test.log.info("Generating cpusets expected results") try: verify = _generate_verification(cpusets, cpus) except IndexError: - raise exceptions.TestError("IndexError occurred while generating " - "verification data. Probably mismatched" - " no_host_cpus and cgroup_cpuset cpus") + raise exceptions.TestError( + "IndexError occurred while generating " + "verification data. Probably mismatched" + " no_host_cpus and cgroup_cpuset cpus" + ) test.log.info("Prepare") for i in range(len(cpus) + 1): cgroup.mk_cgroup() - cgroup.set_property('cpuset.cpus', cpus.str_slice(), i) - cgroup.set_property('cpuset.mems', mems.str_slice(), i) + cgroup.set_property("cpuset.cpus", cpus.str_slice(), i) + cgroup.set_property("cpuset.mems", mems.str_slice(), i) if i == 0: assign_vm_into_cgroup(vm, cgroup, 0) elif i <= vm_cpus: @@ -1177,14 +1233,14 @@ def _generate_verification(cpusets, cpus): sessions = [] stats = [] serial = vm.wait_for_serial_login(timeout=timeout) - cmd = "renice -n 10 $$; " # new ssh login should pass + cmd = "renice -n 10 $$; " # new ssh login should pass cmd += "while [ -e /tmp/cgroup-cpu-lock ]; do :; done" for i in range(vm_cpus * 2): sessions.append(vm.wait_for_login(timeout=timeout)) sessions[-1].cmd("touch /tmp/cgroup-cpu-lock") sessions[-1].sendline(cmd) - cpu_time_type = params.get_numeric('cpu_time_type') + cpu_time_type = params.get_numeric("cpu_time_type") try: test.log.info("Test") for i in range(len(cpusets)): @@ -1192,7 +1248,7 @@ def _generate_verification(cpusets, cpus): test.log.debug("testing: %s", cpuset) # setup scenario for i in range(len(cpuset)): - cgroup.set_property('cpuset.cpus', cpuset[i], i) + cgroup.set_property("cpuset.cpus", cpuset[i], i) # Time 0 _load = get_load_per_cpu() time.sleep(test_time) @@ -1209,24 +1265,25 @@ def _generate_verification(cpusets, cpus): stats[i] = [(_ / test_time) for _ in stats[i]] # Check # header and matrix variables are only for "beautiful" log - header = ['scen'] - header.extend([' cpu%d' % i for i in cpus]) + header = ["scen"] + header.extend([" cpu%d" % i for i in cpus]) matrix = [] for i in range(len(stats)): - matrix.append(['%d' % i]) + matrix.append(["%d" % i]) for j in range(len(stats[i])): - if ((stats[i][j] < (verify[i][j] - limit)) or - (stats[i][j] > (verify[i][j] + limit))): + if (stats[i][j] < (verify[i][j] - limit)) or ( + stats[i][j] > (verify[i][j] + limit) + ): err += "%d(%d), " % (i, j) - matrix[-1].append("%3d ! %d" % (verify[i][j], - stats[i][j])) + matrix[-1].append("%3d ! %d" % (verify[i][j], stats[i][j])) else: - matrix[-1].append("%3d ~ %d" % (verify[i][j], - stats[i][j])) - test.log.info("Results (theoretical ~ actual):\n%s", - astring.tabular_output(matrix, header)) + matrix[-1].append("%3d ~ %d" % (verify[i][j], stats[i][j])) + test.log.info( + "Results (theoretical ~ actual):\n%s", + astring.tabular_output(matrix, header), + ) if err: - err = "Scenerios %s FAILED" % err + err = f"Scenerios {err} FAILED" test.log.error(err) else: test.log.info("All utilisations match prescriptions.") @@ -1254,40 +1311,43 @@ def cpuset_cpus_switching(): try: test_time = int(params.get("cgroup_test_time", 60)) except ValueError: - raise exceptions.TestError("Incorrect configuration: param " - "cgroup_test_time have to be an integer") + raise exceptions.TestError( + "Incorrect configuration: param " + "cgroup_test_time have to be an integer" + ) test.log.info("Prepare") modules = CgroupModules() - if modules.init(['cpuset']) != 1: + if modules.init(["cpuset"]) != 1: raise exceptions.TestFail("Can't mount cpuset cgroup modules") - cgroup = Cgroup('cpuset', '') + cgroup = Cgroup("cpuset", "") cgroup.initialize(modules) timeout = int(params.get("login_timeout", 360)) vm = env.get_all_vms()[0] serial = vm.wait_for_serial_login(timeout=timeout) - vm_cpus = int(params.get('smp', 1)) + vm_cpus = int(params.get("smp", 1)) cpus = SparseRange(cgroup.get_property("cpuset.cpus")[0]) if len(cpus) < 2: - raise exceptions.TestFail("This test needs at least 2 CPUs on " - "host, cpuset=%s" % cpus) + raise exceptions.TestFail( + "This test needs at least 2 CPUs on " f"host, cpuset={cpus}" + ) # Comments are for vm_cpus=2, no_cpus=4, _SC_CLK_TCK=100 cgroup.mk_cgroup() # oooo - cgroup.set_property('cpuset.cpus', cpus.str_slice(), 0) - cgroup.set_property('cpuset.mems', 0, 0) + cgroup.set_property("cpuset.cpus", cpus.str_slice(), 0) + cgroup.set_property("cpuset.mems", 0, 0) cgroup.mk_cgroup() # O___ - cgroup.set_property('cpuset.cpus', cpus[0], 1) - cgroup.set_property('cpuset.mems', 0, 1) + cgroup.set_property("cpuset.cpus", cpus[0], 1) + cgroup.set_property("cpuset.mems", 0, 1) cgroup.mk_cgroup() # _OO_ - cgroup.set_property('cpuset.cpus', cpus.str_slice(1, 3), 2) - cgroup.set_property('cpuset.mems', 0, 2) + cgroup.set_property("cpuset.cpus", cpus.str_slice(1, 3), 2) + cgroup.set_property("cpuset.mems", 0, 2) assign_vm_into_cgroup(vm, cgroup, 0) test.log.info("Test") err = "" try: - cmd = "renice -n 10 $$; " # new ssh login should pass + cmd = "renice -n 10 $$; " # new ssh login should pass cmd += "while [ -e /tmp/cgroup-cpu-lock ]; do :; done" sessions = [] # start stressers @@ -1296,8 +1356,10 @@ def cpuset_cpus_switching(): sessions[i].cmd("touch /tmp/cgroup-cpu-lock") sessions[i].sendline(cmd) - test.log.info("Some harmless IOError messages of non-existing " - "processes might occur.") + test.log.info( + "Some harmless IOError messages of non-existing " + "processes might occur." + ) i = 0 t_stop = time.time() + test_time # run for $test_time seconds while time.time() < t_stop: @@ -1310,7 +1372,7 @@ def cpuset_cpus_switching(): try: vm.verify_alive() except Exception as exc_details: - err += "VM died (no_switches=%s): %s\n" % (i, exc_details) + err += f"VM died (no_switches={i}): {exc_details}\n" if err: err = err[:-1] @@ -1346,27 +1408,28 @@ def cpuset_mems_switching(): 'by default 1/2 of VM memory' """ test.log.info("Init") - test_time = int(params.get('cgroup_test_time', 10)) + test_time = int(params.get("cgroup_test_time", 10)) vm = env.get_all_vms()[0] test.log.info("Prepare") modules = CgroupModules() - if modules.init(['cpuset']) != 1: + if modules.init(["cpuset"]) != 1: raise exceptions.TestFail("Can't mount cpuset cgroup modules") - cgroup = Cgroup('cpuset', '') + cgroup = Cgroup("cpuset", "") cgroup.initialize(modules) mems = SparseRange(cgroup.get_property("cpuset.mems")[0]) if len(mems) < 2: - raise exceptions.TestSkipError("This test needs at least 2 memory nodes, " - "detected mems %s" % mems) + raise exceptions.TestSkipError( + "This test needs at least 2 memory nodes, " f"detected mems {mems}" + ) # Create cgroups all_cpus = cgroup.get_property("cpuset.cpus")[0] for mem in mems: cgroup.mk_cgroup() - cgroup.set_property('cpuset.mems', mem, -1) - cgroup.set_property('cpuset.cpus', all_cpus, -1) - cgroup.set_property('cpuset.memory_migrate', 1) + cgroup.set_property("cpuset.mems", mem, -1) + cgroup.set_property("cpuset.cpus", all_cpus, -1) + cgroup.set_property("cpuset.memory_migrate", 1) timeout = int(params.get("login_timeout", 360)) sessions = [] @@ -1374,47 +1437,53 @@ def cpuset_mems_switching(): sessions.append(vm.wait_for_login(timeout=30)) # Don't allow to specify more than 1/2 of the VM's memory - size = int(params.get('mem', 1024)) / 2 - if params.get('cgroup_cpuset_mems_mb') is not None: - size = min(size, int(params.get('cgroup_cpuset_mems_mb'))) + size = int(params.get("mem", 1024)) / 2 + if params.get("cgroup_cpuset_mems_mb") is not None: + size = min(size, int(params.get("cgroup_cpuset_mems_mb"))) test.log.info("Test") err = "" try: - test.log.info("Some harmless IOError messages of non-existing " - "processes might occur.") - sessions[0].sendline('dd if=/dev/zero of=/dev/null bs=%dM ' - 'iflag=fullblock' % size) + test.log.info( + "Some harmless IOError messages of non-existing " + "processes might occur." + ) + sessions[0].sendline( + "dd if=/dev/zero of=/dev/null bs=%dM " "iflag=fullblock" % size + ) i = 0 - sessions[1].cmd('killall -SIGUSR1 dd') + sessions[1].cmd("killall -SIGUSR1 dd") t_stop = time.time() + test_time while time.time() < t_stop: i += 1 assign_vm_into_cgroup(vm, cgroup, i % len(mems)) - sessions[1].cmd('killall -SIGUSR1 dd; true') + sessions[1].cmd("killall -SIGUSR1 dd; true") try: - out = sessions[0].read_until_output_matches( - [r'(\d+)\+\d records out'])[1] - if len(re.findall(r'(\d+)\+\d records out', out)) < 2: + out = sessions[0].read_until_output_matches([r"(\d+)\+\d records out"])[ + 1 + ] + if len(re.findall(r"(\d+)\+\d records out", out)) < 2: out += sessions[0].read_until_output_matches( - [r'(\d+)\+\d records out'])[1] + [r"(\d+)\+\d records out"] + )[1] except ExpectTimeoutError: - err = ("dd didn't produce expected output: %s" % out) + err = f"dd didn't produce expected output: {out}" if not err: - sessions[1].cmd('killall dd; true') - dd_res = re.findall(r'(\d+)\+(\d+) records in', out) - dd_res += re.findall(r'(\d+)\+(\d+) records out', out) + sessions[1].cmd("killall dd; true") + dd_res = re.findall(r"(\d+)\+(\d+) records in", out) + dd_res += re.findall(r"(\d+)\+(\d+) records out", out) dd_res = [int(_[0]) + int(_[1]) for _ in dd_res] if dd_res[1] <= dd_res[0] or dd_res[3] <= dd_res[2]: - err = ("dd stoped sending bytes: %s..%s, %s..%s" % - (dd_res[0], dd_res[1], dd_res[2], dd_res[3])) + err = f"dd stoped sending bytes: {dd_res[0]}..{dd_res[1]}, {dd_res[2]}..{dd_res[3]}" if err: test.log.error(err) else: - out = ("Guest moved %stimes in %s seconds while moving %d " - "blocks of %dMB each" % (i, test_time, dd_res[3], size)) + out = ( + "Guest moved %stimes in %s seconds while moving %d " + "blocks of %dMB each" % (i, test_time, dd_res[3], size) + ) test.log.info(out) finally: test.log.info("Cleanup") @@ -1443,6 +1512,7 @@ def devices_access(): :note: supported monitor CMDs are pci_add, drive_add and RH-drive_add RH-QMP-drive_add """ + def _set_permissions(cgroup, permissions): """ Wrapper for setting permissions to first cgroup @@ -1452,11 +1522,13 @@ def _set_permissions(cgroup, permissions): 'read_results': excepced read results T/F, 'write_results': expected write results T/F} """ - cgroup.set_property('devices.' + permissions['property'], - permissions['value'], - cgroup.cgroups[0], - check=permissions['check_value'], - checkprop='devices.list') + cgroup.set_property( + "devices." + permissions["property"], + permissions["value"], + cgroup.cgroups[0], + check=permissions["check_value"], + checkprop="devices.list", + ) def _add_drive(monitor, monitor_type, disk, name, readonly=False): """ @@ -1472,31 +1544,37 @@ def _add_drive(monitor, monitor_type, disk, name, readonly=False): else: readonly_str = "off" if monitor_type == "HUMAN PCI_ADD": - out = monitor.cmd("pci_add auto storage file=%s,readonly=%s," - "if=virtio,id=%s" % - (disk, readonly_str, name)) - if "all in use" in out: # All PCIs used - return -1 # restart machine and try again + out = monitor.cmd( + f"pci_add auto storage file={disk},readonly={readonly_str}," + f"if=virtio,id={name}" + ) + if "all in use" in out: # All PCIs used + return -1 # restart machine and try again if name not in monitor.cmd("info block"): return False elif monitor_type == "HUMAN DRIVE_ADD": - monitor.cmd("drive_add auto file=%s,readonly=%s,if=none,id=%s" - % (disk, readonly_str, name)) + monitor.cmd( + f"drive_add auto file={disk},readonly={readonly_str},if=none,id={name}" + ) if name not in monitor.cmd("info block"): return False elif monitor_type == "HUMAN RH": - monitor.cmd("__com.redhat_drive_add id=%s,file=%s,readonly=%s" - % (name, disk, readonly_str)) + monitor.cmd( + f"__com.redhat_drive_add id={name},file={disk},readonly={readonly_str}" + ) if name not in monitor.cmd("info block"): return False elif monitor_type == "QMP RH": - monitor.cmd_obj({"execute": "__com.redhat_drive_add", - "arguments": {"file": disk, "id": name, - "readonly": readonly}}) + monitor.cmd_obj( + { + "execute": "__com.redhat_drive_add", + "arguments": {"file": disk, "id": name, "readonly": readonly}, + } + ) output = monitor.cmd_obj({"execute": "query-block"}) - for out in output['return']: + for out in output["return"]: try: - if out['device'] == name: + if out["device"] == name: return True except KeyError: pass @@ -1507,13 +1585,16 @@ def _add_drive(monitor, monitor_type, disk, name, readonly=False): return True def _get_scsi_debug_disk(): - cmd = ("ls -1 /sys/bus/pseudo/drivers/scsi_debug/adapter*/" - "host*/target*/*/block | grep ^sd | head -1") - name = process.system_output(cmd, shell=True, verbose=False, - ignore_status=True).decode() + cmd = ( + "ls -1 /sys/bus/pseudo/drivers/scsi_debug/adapter*/" + "host*/target*/*/block | grep ^sd | head -1" + ) + name = process.system_output( + cmd, shell=True, verbose=False, ignore_status=True + ).decode() if not name: return None - node_path = "/dev/%s" % name + node_path = f"/dev/{name}" if not os.path.exists(node_path): return None return node_path @@ -1527,7 +1608,7 @@ def _get_scsi_debug_disk(): if isinstance(monitor, qemu_monitor.QMPMonitor): out = monitor.cmd_obj({"execute": "query-commands"}) try: - if {'name': '__com.redhat_drive_add'} in out['return']: + if {"name": "__com.redhat_drive_add"} in out["return"]: monitor_type = "QMP RH" break except KeyError: @@ -1545,60 +1626,74 @@ def _get_scsi_debug_disk(): monitor_type = "HUMAN PCI_ADD" break if monitor_type is None: - raise exceptions.TestSkipError("Not detected any suitable monitor cmd. " - "Supported methods:\nQMP: __com.redhat_" - "drive_add\nHuman: drive_add, pci_add, " - "__com.redhat_drive_add") + raise exceptions.TestSkipError( + "Not detected any suitable monitor cmd. " + "Supported methods:\nQMP: __com.redhat_" + "drive_add\nHuman: drive_add, pci_add, " + "__com.redhat_drive_add" + ) test.log.debug("Using monitor type: %s", monitor_type) modules = CgroupModules() - if modules.init(['devices']) != 1: + if modules.init(["devices"]) != 1: raise exceptions.TestFail("Can't mount blkio cgroup modules") - devices = Cgroup('devices', '') + devices = Cgroup("devices", "") devices.initialize(modules) devices.mk_cgroup() # Add one scsi_debug disk which will be used in testing - if process.system("lsmod | grep scsi_debug", ignore_status=True, - shell=True): + if process.system("lsmod | grep scsi_debug", ignore_status=True, shell=True): process.system("modprobe scsi_debug dev_size_mb=8 add_host=0") - process.system("echo 1 > /sys/bus/pseudo/drivers/scsi_debug/add_host", - shell=True) + process.system( + "echo 1 > /sys/bus/pseudo/drivers/scsi_debug/add_host", shell=True + ) disk = utils_misc.wait_for(_get_scsi_debug_disk, 5) if not disk: test.error("Could not get the device node generated by scsi_debug") - dev = "%s:%s" % get_maj_min(disk) + dev = "{}:{}".format(*get_maj_min(disk)) permissions = [ - {'property': 'deny', - 'value': 'a', - 'check_value': '', - 'result': False, - 'result_read': False}, - {'property': 'allow', - 'value': 'b %s r' % dev, - 'check_value': True, - 'result': False, - 'result_read': True}, - {'property': 'allow', - 'value': 'b %s w' % dev, - 'check_value': 'b %s rw' % dev, - 'result': True, - 'result_read': True}, - {'property': 'deny', - 'value': 'b %s r' % dev, - 'check_value': 'b %s w' % dev, - 'result': False, - 'result_read': False}, - {'property': 'deny', - 'value': 'b %s w' % dev, - 'check_value': '', - 'result': False, - 'result_read': False}, - {'property': 'allow', - 'value': 'a', - 'check_value': 'a *:* rwm', - 'result': True, - 'result_read': True}, + { + "property": "deny", + "value": "a", + "check_value": "", + "result": False, + "result_read": False, + }, + { + "property": "allow", + "value": f"b {dev} r", + "check_value": True, + "result": False, + "result_read": True, + }, + { + "property": "allow", + "value": f"b {dev} w", + "check_value": f"b {dev} rw", + "result": True, + "result_read": True, + }, + { + "property": "deny", + "value": f"b {dev} r", + "check_value": f"b {dev} w", + "result": False, + "result_read": False, + }, + { + "property": "deny", + "value": f"b {dev} w", + "check_value": "", + "result": False, + "result_read": False, + }, + { + "property": "allow", + "value": "a", + "check_value": "a *:* rwm", + "result": True, + "result_read": True, + }, ] assign_vm_into_cgroup(vm, devices, 0) @@ -1611,34 +1706,35 @@ def _get_scsi_debug_disk(): while i < len(permissions): perm = permissions[i] _set_permissions(devices, perm) - test.log.debug("Setting permissions: {%s: %s}, value: %s", - perm['property'], perm['value'], - devices.get_property('devices.list', 0)) + test.log.debug( + "Setting permissions: {%s: %s}, value: %s", + perm["property"], + perm["value"], + devices.get_property("devices.list", 0), + ) results = "" - out = _add_drive(monitor, monitor_type, disk, name % ("R", i), - True) + out = _add_drive(monitor, monitor_type, disk, name % ("R", i), True) if out == -1: - test.log.warn("All PCIs full, recreating VM") + test.log.warning("All PCIs full, recreating VM") vm.create() monitor = vm.monitors[i_monitor] assign_vm_into_cgroup(vm, devices, 0) continue - if perm['result_read'] and not out: + if perm["result_read"] and not out: results += "ReadNotAttached, " - elif not perm['result_read'] and out: + elif not perm["result_read"] and out: results += "ReadAttached, " - out = _add_drive(monitor, monitor_type, disk, name % ("RW", i), - False) + out = _add_drive(monitor, monitor_type, disk, name % ("RW", i), False) if out == -1: - test.log.warn("All PCIs full, recreating VM") + test.log.warning("All PCIs full, recreating VM") vm.create() monitor = vm.monitors[i_monitor] assign_vm_into_cgroup(vm, devices, 0) continue - if perm['result'] and not out: + if perm["result"] and not out: results += "RWNotAttached, " - elif not perm['result'] and out: + elif not perm["result"] and out: results += "RWAttached, " if results: @@ -1649,14 +1745,14 @@ def _get_scsi_debug_disk(): i += 1 if err: - err = "Some restrictions weren't enforced:\n%s" % err[:-2] + err = f"Some restrictions weren't enforced:\n{err[:-2]}" test.log.error(err) else: test.log.info("All restrictions enforced.") finally: test.log.info("Cleanup") - vm.destroy() # "Safely" remove devices :-) + vm.destroy() # "Safely" remove devices :-) rm_scsi_disks(1) del devices del modules @@ -1674,6 +1770,7 @@ def freezer(): and unfreeze it again) :param cfg: cgroup_test_time - test duration '60' """ + def _get_stat(pid): """ Gather statistics of pid+1st level subprocesses cpu usage @@ -1683,24 +1780,25 @@ def _get_stat(pid): out = None for i in range(10): try: - out = process.getoutput("cat /proc/%s/task/*/stat" % - pid, shell=True) + out = process.getoutput(f"cat /proc/{pid}/task/*/stat", shell=True) except process.CmdError: out = None else: break - out = out.split('\n') + out = out.split("\n") ret = 0 for i in out: - ret += sum([int(_) for _ in i.split(' ')[13:17]]) + ret += sum([int(_) for _ in i.split(" ")[13:17]]) return ret test.log.info("Init") try: test_time = int(params.get("cgroup_test_time", 60)) except ValueError: - raise exceptions.TestError("Incorrect configuration: param " - "cgroup_test_time have to be an integer") + raise exceptions.TestError( + "Incorrect configuration: param " + "cgroup_test_time have to be an integer" + ) timeout = int(params.get("login_timeout", 360)) vm = env.get_all_vms()[0] @@ -1712,9 +1810,9 @@ def _get_stat(pid): test.log.info("Prepare") modules = CgroupModules() - if modules.init(['freezer']) != 1: + if modules.init(["freezer"]) != 1: raise exceptions.TestFail("Can't mount freezer cgroup modules") - cgroup = Cgroup('freezer', '') + cgroup = Cgroup("freezer", "") cgroup.initialize(modules) cgroup.mk_cgroup() assign_vm_into_cgroup(vm, cgroup, 0) @@ -1723,8 +1821,8 @@ def _get_stat(pid): err = "" try: for session in sessions: - session.cmd('touch /tmp/freeze-lock') - session.sendline('while [ -e /tmp/freeze-lock ]; do :; done') + session.cmd("touch /tmp/freeze-lock") + session.sendline("while [ -e /tmp/freeze-lock ]; do :; done") cgroup = cgroup pid = vm.get_pid() @@ -1732,28 +1830,33 @@ def _get_stat(pid): for tsttime in [0.5, 3, test_time]: test.log.debug("FREEZING (%ss)", tsttime) # Freezing takes some time, DL is 1s - cgroup.set_property('freezer.state', 'FROZEN', - cgroup.cgroups[0], check=False) + cgroup.set_property( + "freezer.state", "FROZEN", cgroup.cgroups[0], check=False + ) time.sleep(1) - _ = cgroup.get_property('freezer.state', 0) - if 'FROZEN' not in _: - err = "Coundn't freze the VM: state %s" % _ + _ = cgroup.get_property("freezer.state", 0) + if "FROZEN" not in _: + err = f"Coundn't freze the VM: state {_}" break stat_ = _get_stat(pid) time.sleep(tsttime) stat = _get_stat(pid) if stat != stat_: - err = ('Process was running in FROZEN state; stat=%s, ' - 'stat_=%s, diff=%s' % (stat, stat_, stat - stat_)) + err = ( + f"Process was running in FROZEN state; stat={stat}, " + f"stat_={stat_}, diff={stat - stat_}" + ) break test.log.debug("THAWING (%ss)", tsttime) - cgroup.set_property('freezer.state', 'THAWED', 0) + cgroup.set_property("freezer.state", "THAWED", 0) stat_ = _get_stat(pid) time.sleep(tsttime) stat = _get_stat(pid) if (stat - stat_) < (90 * tsttime): - err = ('Process was not active in FROZEN state; stat=%s, ' - 'stat_=%s, diff=%s' % (stat, stat_, stat - stat_)) + err = ( + f"Process was not active in FROZEN state; stat={stat}, " + f"stat_={stat_}, diff={stat - stat_}" + ) break if err: @@ -1784,10 +1887,8 @@ def _get_rss(status): The RssShmem can be accounted to different process making the overall sum of VmRSS greater than set in limit_in_bytes """ - rss = int(re.search(r'VmRSS:[\t ]*(\d+) kB', status) - .group(1)) - shmem = int(re.search(r'RssShmem:[\t ]*(\d+) kB', status) - .group(1)) + rss = int(re.search(r"VmRSS:[\t ]*(\d+) kB", status).group(1)) + shmem = int(re.search(r"RssShmem:[\t ]*(\d+) kB", status).group(1)) return rss - shmem @error_context.context_aware @@ -1805,70 +1906,79 @@ def memory_limit(memsw=False): """ test.log.info("Init") try: - mem_limit = params.get('cgroup_memory_limit_kb', None) + mem_limit = params.get("cgroup_memory_limit_kb", None) if mem_limit is not None: mem_limit = int(mem_limit) except ValueError: - raise exceptions.TestError("Incorrect configuration: param cgroup_" - "memory_limit_kb have to be an integer") + raise exceptions.TestError( + "Incorrect configuration: param cgroup_" + "memory_limit_kb have to be an integer" + ) vm = env.get_all_vms()[0] test.log.info("Prepare") # Don't allow to specify more than 1/2 of the VM's memory - mem = int(params.get('mem', 1024)) * 512 + mem = int(params.get("mem", 1024)) * 512 if mem_limit: mem = min(mem, mem_limit) else: mem_limit = mem # There have to be enough free swap space and hugepages can't be used if not memsw: - if params.get('setup_hugepages') == 'yes': + if params.get("setup_hugepages") == "yes": err = "Hugepages can't be used in this test." test.log.error(err) raise exceptions.TestSkipError(err) - if utils_memory.read_from_meminfo('SwapFree') < (mem * 0.1): + if utils_memory.read_from_meminfo("SwapFree") < (mem * 0.1): err = "Not enough free swap space" test.log.error(err) raise exceptions.TestSkipError(err) # We want to copy slightely over "mem" limit mem *= 1.1 modules = CgroupModules() - if modules.init(['memory']) != 1: + if modules.init(["memory"]) != 1: raise exceptions.TestFail("Can't mount memory cgroup modules") - cgroup = Cgroup('memory', '') + cgroup = Cgroup("memory", "") cgroup.initialize(modules) cgroup.mk_cgroup() - cgroup.set_property('memory.move_charge_at_immigrate', '3', 0) - cgroup.set_property_h('memory.limit_in_bytes', "%dK" % mem_limit, 0) + cgroup.set_property("memory.move_charge_at_immigrate", "3", 0) + cgroup.set_property_h("memory.limit_in_bytes", "%dK" % mem_limit, 0) if memsw: try: cgroup.get_property("memory.memsw.limit_in_bytes", 0) except exceptions.TestError as details: - test.log.error("Can't get memory.memsw.limit_in_bytes info." - "Do you have support for memsw? (try passing" - "swapaccount=1 parameter to kernel):%s", details) - raise exceptions.TestSkipError("System doesn't support memory.memsw.*" - " or swapaccount is disabled.") - cgroup.set_property_h('memory.memsw.limit_in_bytes', - "%dK" % mem_limit, 0) + test.log.error( + "Can't get memory.memsw.limit_in_bytes info." + "Do you have support for memsw? (try passing" + "swapaccount=1 parameter to kernel):%s", + details, + ) + raise exceptions.TestSkipError( + "System doesn't support memory.memsw.*" + " or swapaccount is disabled." + ) + cgroup.set_property_h("memory.memsw.limit_in_bytes", "%dK" % mem_limit, 0) test.log.info("Expected VM reload") try: vm.create() except Exception as failure_detail: - raise exceptions.TestFail("init: Failed to recreate the VM: %s" % - failure_detail) + raise exceptions.TestFail( + f"init: Failed to recreate the VM: {failure_detail}" + ) assign_vm_into_cgroup(vm, cgroup, 0) timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) # VM already eat-up more than allowed by this cgroup - fstats = open('/proc/%s/status' % vm.get_pid(), 'r') + fstats = open(f"/proc/{vm.get_pid()}/status", "r") rss = _get_rss(fstats.read()) if rss > mem_limit: - raise exceptions.TestFail("Init failed to move VM into cgroup, VmRss" - "=%s, expected=%s" % (rss, mem_limit)) + raise exceptions.TestFail( + "Init failed to move VM into cgroup, VmRss" + f"={rss}, expected={mem_limit}" + ) try: test.log.info("Test") @@ -1881,8 +1991,9 @@ def memory_limit(memsw=False): * Max execution time is limited to mem / 10 * Checking every 0.1s """ - session.sendline('dd if=/dev/zero of=/dev/null bs=%dK count=1 ' - 'iflag=fullblock' % mem) + session.sendline( + "dd if=/dev/zero of=/dev/null bs=%dK count=1 " "iflag=fullblock" % mem + ) max_rss = 0 max_rssswap = 0 @@ -1894,8 +2005,7 @@ def memory_limit(memsw=False): status = fstats.read() rss = _get_rss(status) max_rss = max(rss, max_rss) - swap = int(re.search(r'VmSwap:[\t ]*(\d+) kB', status) - .group(1)) + swap = int(re.search(r"VmSwap:[\t ]*(\d+) kB", status).group(1)) max_rssswap = max(rss + swap, max_rssswap) except Exception as details: if memsw and not vm.is_alive(): @@ -1918,13 +2028,17 @@ def memory_limit(memsw=False): break except ExpectProcessTerminatedError as detail: if memsw: - err = ("dd command died (VM should die instead): %s\n" - "Output:%s\n" % (detail, out)) + err = ( + f"dd command died (VM should die instead): {detail}\n" + f"Output:{out}\n" + ) else: - err = ("dd command died (should pass): %s\nOutput:" - "\n%s" % (detail, out)) + err = ( + f"dd command died (should pass): {detail}\nOutput:" + f"\n{out}" + ) break - else: # dd command finished + else: # dd command finished break test.log.info("Verification") @@ -1932,30 +2046,32 @@ def memory_limit(memsw=False): test.log.error(err) elif memsw: if max_rssswap > mem_limit: - err = ("The limit was broken: max_rssswap=%s, limit=%s" % - (max_rssswap, mem_limit)) + err = f"The limit was broken: max_rssswap={max_rssswap}, limit={mem_limit}" elif vm.process.get_status() != 137: # err: Limit exceeded - err = ("VM exit code is %s (should be %s)" % - (vm.process.get_status(), 137)) + err = f"VM exit code is {vm.process.get_status()} (should be {137})" else: - out = ("VM terminated as expected. Used rss+swap: %d, " - "limit %s" % (max_rssswap, mem_limit)) + out = ( + "VM terminated as expected. Used rss+swap: %d, " + "limit %s" % (max_rssswap, mem_limit) + ) test.log.info(out) - else: # only RSS limit + else: # only RSS limit exit_nr = session.cmd_output("echo $?")[:-1] if max_rss > mem_limit * 1.05: # Allow 5% pages to be in-progress of swapping out - err = ("The limit was broken: max_rss=%s, limit=%s (+5%%)" - % (max_rss, mem_limit)) - elif exit_nr != '0': - err = ("dd command failed(%s) output: %s" % (exit_nr, out)) + err = f"The limit was broken: max_rss={max_rss}, limit={mem_limit} (+5%)" + elif exit_nr != "0": + err = f"dd command failed({exit_nr}) output: {out}" elif max_rssswap < mem_limit: - err = ("VM didn't consume expected amount of memory. %d:%d" - " Output of dd cmd: %s" % (max_rssswap, mem_limit, - out)) + err = ( + "VM didn't consume expected amount of memory. %d:%d" + " Output of dd cmd: %s" % (max_rssswap, mem_limit, out) + ) else: - out = ("Created %dMB block with %.2f memory overcommit" % - (mem / 1024, float(max_rssswap) / mem_limit)) + out = "Created %dMB block with %.2f memory overcommit" % ( + mem / 1024, + float(max_rssswap) / mem_limit, + ) test.log.info(out) finally: @@ -1994,20 +2110,20 @@ def memory_move(): 'by default 1/2 of VM memory' """ test.log.info("Init") - test_time = int(params.get('cgroup_test_time', 10)) + test_time = int(params.get("cgroup_test_time", 10)) vm = env.get_all_vms()[0] test.log.info("Prepare") modules = CgroupModules() - if modules.init(['memory']) != 1: + if modules.init(["memory"]) != 1: raise exceptions.TestFail("Can't mount memory cgroup modules") - cgroup = Cgroup('memory', '') + cgroup = Cgroup("memory", "") cgroup.initialize(modules) # Two cgroups cgroup.mk_cgroup() cgroup.mk_cgroup() - cgroup.set_property('memory.move_charge_at_immigrate', '3', 0) - cgroup.set_property('memory.move_charge_at_immigrate', '3', 1) + cgroup.set_property("memory.move_charge_at_immigrate", "3", 0) + cgroup.set_property("memory.move_charge_at_immigrate", "3", 1) timeout = int(params.get("login_timeout", 360)) sessions = [] @@ -2015,53 +2131,59 @@ def memory_move(): sessions.append(vm.wait_for_login(timeout=30)) # Don't allow to specify more than 1/2 of the VM's memory - size = int(params.get('mem', 1024)) / 2 - if params.get('cgroup_memory_move_mb') is not None: - size = min(size, int(params.get('cgroup_memory_move_mb'))) + size = int(params.get("mem", 1024)) / 2 + if params.get("cgroup_memory_move_mb") is not None: + size = min(size, int(params.get("cgroup_memory_move_mb"))) err = "" try: test.log.info("Test") - test.log.info("Some harmless IOError messages of non-existing " - "processes might occur.") - sessions[0].sendline('dd if=/dev/zero of=/dev/null bs=%dM ' - 'iflag=fullblock' % size) + test.log.info( + "Some harmless IOError messages of non-existing " + "processes might occur." + ) + sessions[0].sendline( + "dd if=/dev/zero of=/dev/null bs=%dM " "iflag=fullblock" % size + ) i = 0 - sessions[1].cmd('killall -SIGUSR1 dd ; true') + sessions[1].cmd("killall -SIGUSR1 dd ; true") t_stop = time.time() + test_time while time.time() < t_stop: i += 1 assign_vm_into_cgroup(vm, cgroup, i % 2) - sessions[1].cmd('killall -SIGUSR1 dd; true') + sessions[1].cmd("killall -SIGUSR1 dd; true") try: - out = sessions[0].read_until_output_matches( - [r'(\d+)\+\d records out'])[1] - if len(re.findall(r'(\d+)\+\d records out', out)) < 2: + out = sessions[0].read_until_output_matches([r"(\d+)\+\d records out"])[ + 1 + ] + if len(re.findall(r"(\d+)\+\d records out", out)) < 2: out += sessions[0].read_until_output_matches( - [r'(\d+)\+\d records out'])[1] + [r"(\d+)\+\d records out"] + )[1] except ExpectTimeoutError: - err = ("dd didn't produce expected output: %s" % out) + err = f"dd didn't produce expected output: {out}" if not err: - sessions[1].cmd('killall dd; true') - dd_res = re.findall(r'(\d+)\+(\d+) records in', out) - dd_res += re.findall(r'(\d+)\+(\d+) records out', out) + sessions[1].cmd("killall dd; true") + dd_res = re.findall(r"(\d+)\+(\d+) records in", out) + dd_res += re.findall(r"(\d+)\+(\d+) records out", out) dd_res = [int(_[0]) + int(_[1]) for _ in dd_res] if dd_res[1] <= dd_res[0] or dd_res[3] <= dd_res[2]: - err = ("dd stoped sending bytes: %s..%s, %s..%s" % - (dd_res[0], dd_res[1], dd_res[2], dd_res[3])) + err = f"dd stoped sending bytes: {dd_res[0]}..{dd_res[1]}, {dd_res[2]}..{dd_res[3]}" if err: test.log.error(err) else: - out = ("Guest moved %stimes in %s seconds while moving %d " - "blocks of %dMB each" % (i, test_time, dd_res[3], size)) + out = ( + "Guest moved %stimes in %s seconds while moving %d " + "blocks of %dMB each" % (i, test_time, dd_res[3], size) + ) test.log.info(out) finally: test.log.info("Cleanup") - sessions[1].cmd('killall dd; true') + sessions[1].cmd("killall dd; true") for session in sessions: session.cmd("true") session.close() @@ -2078,18 +2200,19 @@ def cfs_bandwidth(): """ CFS bandwidth limit """ + def stress_thread(): session.cmd(stress_cmd, 120) test.log.info("Setup cgroup subsystem: cpu") modules = CgroupModules() - if modules.init(['cpu']) != 1: + if modules.init(["cpu"]) != 1: test.fail("Can't mount cpu cgroup modules") - cgroup = Cgroup('cpu', '') + cgroup = Cgroup("cpu", "") cgroup.initialize(modules) # Create VM with smp=1 - params['smp'] = 1 + params["smp"] = 1 params["vcpu_sockets"] = 1 vm_name = params["main_vm"] params["start_vm"] = "yes" @@ -2114,26 +2237,31 @@ def stress_thread(): count = 0 while count < 6: time.sleep(10) - o = process.system_output("top -b -p %s -n 3 | tail -1" % vm.get_pid(), - shell=True).decode() + o = process.system_output( + f"top -b -p {vm.get_pid()} -n 3 | tail -1", shell=True + ).decode() if float(o.split()[-4]) <= 51: count = count + 1 - test.log.debug("CPU utilization of guest is: %.2f%%", - float(o.split()[-4])) + test.log.debug( + "CPU utilization of guest is: %.2f%%", float(o.split()[-4]) + ) else: - test.fail("CPU utilization of guest is: %.2f%%, it should be about " - "50" % float(o.split()[-4])) + test.fail( + f"CPU utilization of guest is: {float(o.split()[-4]):.2f}%, it should be about " + "50" + ) stress_t.join() # Main # Executes test specified by cgroup_test variable in cfg fce = None - _fce = params.get('cgroup_test') + _fce = params.get("cgroup_test") test.log.info("Executing test: %s", _fce) try: fce = locals()[_fce] except KeyError: - raise exceptions.TestSkipError("Test %s doesn't exist. Check 'cgroup_test' " - "variable in subtest.cfg" % _fce) + raise exceptions.TestSkipError( + f"Test {_fce} doesn't exist. Check 'cgroup_test' " "variable in subtest.cfg" + ) else: return fce() diff --git a/qemu/tests/change_media.py b/qemu/tests/change_media.py index 35464f7596..13463ca3a5 100644 --- a/qemu/tests/change_media.py +++ b/qemu/tests/change_media.py @@ -1,10 +1,7 @@ import re -import six -from virttest import error_context -from virttest import utils_misc -from virttest import utils_test -from virttest import data_dir +import six +from virttest import data_dir, error_context, utils_misc, utils_test from virttest.qemu_capabilities import Flags from virttest.qemu_storage import QemuImg @@ -39,10 +36,12 @@ def check_block_locked(block_name): else: for block in blocks_info: if vm.check_capability(Flags.BLOCKDEV): - condition = block['qdev'] == vm.devices.get_qdev_by_drive(block_name) + condition = block["qdev"] == vm.devices.get_qdev_by_drive( + block_name + ) else: - condition = block['device'] == block_name - if condition and block['locked']: + condition = block["device"] == block_name + if condition and block["locked"]: return True return False @@ -56,49 +55,46 @@ def change_block(cmd=None): def get_qdev_by_filename(filename): for info_dict in vm.monitor.info("block"): if filename in str(info_dict): - return info_dict['qdev'] + return info_dict["qdev"] vm = env.get_vm(params["main_vm"]) vm.verify_alive() - monitor = vm.get_monitors_by_type('qmp') + monitor = vm.get_monitors_by_type("qmp") if monitor: monitor = monitor[0] else: - test.log.warn("qemu does not support qmp. Human monitor will be used.") + test.log.warning("qemu does not support qmp. Human monitor will be used.") monitor = vm.monitor session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) cdrom = params.get("cdrom_cd1") cdrom = utils_misc.get_path(data_dir.get_data_dir(), cdrom) device_name = vm.get_block({"file": cdrom}) if device_name is None: - msg = "Unable to detect qemu block device for cdrom %s" % cdrom + msg = f"Unable to detect qemu block device for cdrom {cdrom}" test.error(msg) orig_img_name = params.get("orig_img_name") - change_insert_cmd = "change device=%s,target=%s" % (device_name, - orig_img_name) + change_insert_cmd = f"change device={device_name},target={orig_img_name}" if vm.check_capability(Flags.BLOCKDEV): qdev = vm.devices.get_qdev_by_drive(device_name) monitor.blockdev_open_tray(qdev, force=True) - change_insert_cmd = ("blockdev-change-medium id=%s,filename=%s" % - (qdev, orig_img_name)) + change_insert_cmd = f"blockdev-change-medium id={qdev},filename={orig_img_name}" monitor.send_args_cmd(change_insert_cmd) test.log.info("Wait until device is ready") - exists = utils_misc.wait_for(lambda: (orig_img_name in - str(monitor.info("block")) - ), timeout=10, first=3) + exists = utils_misc.wait_for( + lambda: (orig_img_name in str(monitor.info("block"))), timeout=10, first=3 + ) if not exists: - msg = "Fail to insert device %s to guest" % orig_img_name + msg = f"Fail to insert device {orig_img_name} to guest" test.fail(msg) if params.get("os_type") != "windows": error_context.context("lock cdrom in guest", test.log.info) lock_cmd = "eject -i on /dev/cdrom" session.cmd(lock_cmd) - error_context.context("mount cdrom to make status to locked", - test.log.info) - cdroms = utils_misc.wait_for(lambda: (utils_test.get_readable_cdroms( - params, session)), - timeout=10) + error_context.context("mount cdrom to make status to locked", test.log.info) + cdroms = utils_misc.wait_for( + lambda: (utils_test.get_readable_cdroms(params, session)), timeout=10 + ) if not cdroms: test.fail("Not readable cdrom found in your guest") cdrom = cdroms[0] @@ -106,22 +102,21 @@ def get_qdev_by_filename(filename): (status, output) = session.cmd_status_output(mount_cmd, timeout=360) if status: msg = "Unable to mount cdrom. " - msg += "command: %s\nOutput: %s" % (mount_cmd, output) + msg += f"command: {mount_cmd}\nOutput: {output}" test.error(msg) else: error_context.context("lock cdrom in guest", test.log.info) tmp_dir = params.get("tmp_dir", "c:\\") - eject_tool = utils_misc.get_path(data_dir.get_deps_dir(), - "cdrom/eject.exe") + eject_tool = utils_misc.get_path(data_dir.get_deps_dir(), "cdrom/eject.exe") vm.copy_files_to(eject_tool, tmp_dir) output = session.cmd("wmic cdrom get Drive", timeout=120) cd_vol = re.findall("[d-z]:", output, re.I)[0] - lock_cmd = "%s\\eject.exe -i on %s" % (tmp_dir, cd_vol) + lock_cmd = f"{tmp_dir}\\eject.exe -i on {cd_vol}" (status, output) = session.cmd_status_output(lock_cmd) if status: - msg = "Unable to lock cdrom. command: %s\n" % lock_cmd - msg += "Output: %s" % output + msg = f"Unable to lock cdrom. command: {lock_cmd}\n" + msg += f"Output: {output}" test.error(msg) if not check_block_locked(device_name): @@ -129,33 +124,32 @@ def get_qdev_by_filename(filename): error_context.context("Change media of cdrom", test.log.info) new_img_name = params.get("new_img_name") - change_insert_cmd = "change device=%s,target=%s" % (device_name, - new_img_name) + change_insert_cmd = f"change device={device_name},target={new_img_name}" if vm.check_capability(Flags.BLOCKDEV): - change_insert_cmd = ("blockdev-change-medium id=%s,filename=%s" % ( - vm.devices.get_qdev_by_drive(device_name), new_img_name)) + change_insert_cmd = f"blockdev-change-medium id={vm.devices.get_qdev_by_drive(device_name)},filename={new_img_name}" output = change_block(change_insert_cmd) if not ("is locked" in output or "is not open" in output): - msg = ("%s is not locked or is open " - "after execute command %s " - "command output: %s " % ( - device_name, change_insert_cmd, output)) + msg = ( + f"{device_name} is not locked or is open " + f"after execute command {change_insert_cmd} " + f"command output: {output} " + ) test.fail(msg) blocks_info = monitor.info("block") if orig_img_name not in str(blocks_info): - test.fail("Locked device %s is changed!" % orig_img_name) + test.fail(f"Locked device {orig_img_name} is changed!") error_context.context("Change no-removable device", test.log.info) device_name = vm.get_block({"removable": False}) if device_name is None: test.error("VM doesn't have any non-removable devices.") - change_insert_cmd = "change device=%s,target=%s" % (device_name, - new_img_name) + change_insert_cmd = f"change device={device_name},target={new_img_name}" if vm.check_capability(Flags.BLOCKDEV): - sys_image = QemuImg(params, data_dir.get_data_dir(), params['images'].split()[0]) - change_insert_cmd = ("blockdev-change-medium id=%s,filename=%s" % ( - get_qdev_by_filename(sys_image.image_filename), new_img_name)) + sys_image = QemuImg( + params, data_dir.get_data_dir(), params["images"].split()[0] + ) + change_insert_cmd = f"blockdev-change-medium id={get_qdev_by_filename(sys_image.image_filename)},filename={new_img_name}" output = change_block(change_insert_cmd) if "is not removable" not in output: test.fail("Could remove non-removable device!") diff --git a/qemu/tests/chardev_acpi.py b/qemu/tests/chardev_acpi.py index 1fde97d404..1192b337b3 100644 --- a/qemu/tests/chardev_acpi.py +++ b/qemu/tests/chardev_acpi.py @@ -1,7 +1,5 @@ -from virttest import error_context -from virttest import env_process - from avocado.utils import process +from virttest import env_process, error_context @error_context.context_aware @@ -20,19 +18,21 @@ def run(test, params, env): :param env: Dictionary with test environment. """ outputs = [] - check_cmd = params['check_cmd'] + check_cmd = params["check_cmd"] host_output = process.getoutput(check_cmd)[:35] outputs.append(host_output) for x in range(2): if x >= 1: - params['serials'] = " ".join(params['serials'].split()[:-1]) - params['extra_params'] = params.get('extra_params', '') + ' -serial /dev/ttyS0' + params["serials"] = " ".join(params["serials"].split()[:-1]) + params["extra_params"] = ( + params.get("extra_params", "") + " -serial /dev/ttyS0" + ) env_process.preprocess(test, params, env) vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() vm_output = session.cmd_status_output(check_cmd)[1][:35] outputs.append(vm_output) vm.destroy() - assert outputs.count(outputs[0]) == len(outputs), \ - "Host: {} and VM 1: {} and VM 2: {} are not the same".\ - format(outputs[0], outputs[1], outputs[2]) + assert ( + outputs.count(outputs[0]) == len(outputs) + ), f"Host: {outputs[0]} and VM 1: {outputs[1]} and VM 2: {outputs[2]} are not the same" diff --git a/qemu/tests/chardev_free_port.py b/qemu/tests/chardev_free_port.py index fb289e3a0b..115e16a243 100644 --- a/qemu/tests/chardev_free_port.py +++ b/qemu/tests/chardev_free_port.py @@ -1,5 +1,4 @@ -from virttest import error_context, env_process -from virttest import utils_misc +from virttest import env_process, error_context, utils_misc from qemu.tests.virtio_console import add_chardev @@ -24,31 +23,43 @@ def run(test, params, env): chardev_params = char_device.params for vm_ind, vm in enumerate(vms): if vm_ind == 1: - host = chardev_params['host'] - chardev_to = utils_misc.find_free_ports(int(chardev_params['port']) + 1, 6000, 1, host) - chardev_params['to'] = str(chardev_to[0]) + host = chardev_params["host"] + chardev_to = utils_misc.find_free_ports( + int(chardev_params["port"]) + 1, 6000, 1, host + ) + chardev_params["to"] = str(chardev_to[0]) - extra_params = ' ' + char_device.cmdline() - params['extra_params_%s' % vm] = params.get('extra_params', '') + extra_params - params['start_vm_%s' % vm] = "yes" + extra_params = " " + char_device.cmdline() + params[f"extra_params_{vm}"] = params.get("extra_params", "") + extra_params + params[f"start_vm_{vm}"] = "yes" env_process.preprocess(test, params, env) for vm in vms: _vm = env.get_vm(vm) chardev_infos.append(_vm.monitor.info("chardev")) - _port, _to = int(chardev_params['port']), int(chardev_params['to']) + _port, _to = int(chardev_params["port"]), int(chardev_params["to"]) for char_ind, chardevs in enumerate(chardev_infos): in_chardev = False for chardev in chardevs: - if chardev['label'] == chardev_params['id']: - tmp_pnum = int(chardev['filename'].split(':')[-1].split(',')[0]) - error_context.context("Get port %d for vm%d from monitor" % (tmp_pnum, char_ind), test.log.info) + if chardev["label"] == chardev_params["id"]: + tmp_pnum = int(chardev["filename"].split(":")[-1].split(",")[0]) + error_context.context( + "Get port %d for vm%d from monitor" % (tmp_pnum, char_ind), + test.log.info, + ) break if char_ind == 0: - error_context.context("The expect port for vm%d is %d" % (char_ind, _port), test.log.info) + error_context.context( + "The expect port for vm%d is %d" % (char_ind, _port), test.log.info + ) if tmp_pnum == _port: in_chardev = True else: - error_context.context("The expect port for vm%d is in [%d, %d]" % (char_ind, _port + 1, _to), test.log.info) + error_context.context( + "The expect port for vm%d is in [%d, %d]" % (char_ind, _port + 1, _to), + test.log.info, + ) if tmp_pnum > _port and tmp_pnum <= _to: in_chardev = True - assert in_chardev is True, 'The actual port does not match with the expect port in VM %d' % char_ind + assert in_chardev is True, ( + "The actual port does not match with the expect port in VM %d" % char_ind + ) diff --git a/qemu/tests/chardev_hotplug.py b/qemu/tests/chardev_hotplug.py index 7c362ca10d..83ad1b8765 100644 --- a/qemu/tests/chardev_hotplug.py +++ b/qemu/tests/chardev_hotplug.py @@ -1,9 +1,7 @@ import os from avocado.utils import process - -from virttest import arch -from virttest import error_context +from virttest import arch, error_context @error_context.context_aware @@ -32,30 +30,30 @@ def cmd_qmp_log(vm, cmd, args): test.log.debug("[qmp reply] %s", reply) if "error" in reply: if reply["error"]["class"] == "CommandNotFound": - test.cancel("qmp command %s not supported" % cmd) + test.cancel(f"qmp command {cmd} not supported") else: - test.fail("qmp error: %s" % reply["error"]["desc"]) + test.fail("qmp error: {}".format(reply["error"]["desc"])) return reply def pci_serial_add(vm, name, addr, chardev): - reply = cmd_qmp_log(vm, 'device_add', {'driver': 'pci-serial', - 'id': name, - 'addr': addr, - 'chardev': chardev}) + reply = cmd_qmp_log( + vm, + "device_add", + {"driver": "pci-serial", "id": name, "addr": addr, "chardev": chardev}, + ) return reply def device_del(vm, name): - reply = cmd_qmp_log(vm, 'device_del', {'id': name}) + reply = cmd_qmp_log(vm, "device_del", {"id": name}) return reply def chardev_add(vm, name, kind, args): - backend = {'type': kind, 'data': args} - reply = cmd_qmp_log(vm, 'chardev-add', {'id': name, - 'backend': backend}) + backend = {"type": kind, "data": args} + reply = cmd_qmp_log(vm, "chardev-add", {"id": name, "backend": backend}) return reply def chardev_del(vm, name): - reply = cmd_qmp_log(vm, 'chardev-remove', {'id': name}) + reply = cmd_qmp_log(vm, "chardev-remove", {"id": name}) return reply def chardev_use(vm, name): @@ -65,19 +63,19 @@ def chardev_use(vm, name): pci_serial_add(vm, "test-serial", addr, name) session.cmd_status("sleep 1") session.cmd_status("udevadm settle") - msg_add = session.cmd("dmesg -c | grep %s" % addr) + msg_add = session.cmd(f"dmesg -c | grep {addr}") for line in msg_add.splitlines(): test.log.debug("[dmesg add] %s", line) - lspci = session.cmd("lspci -vs %s" % addr) + lspci = session.cmd(f"lspci -vs {addr}") for line in lspci.splitlines(): test.log.debug("[lspci] %s", line) # send message - device = session.cmd("ls /sys/bus/pci/devices/*%s/tty" % addr) + device = session.cmd(f"ls /sys/bus/pci/devices/*{addr}/tty") device = device.strip() test.log.info("guest tty device is '%s'", device) - session.cmd("test -c /dev/%s" % device) - session.cmd("echo 'Hello virttest world' > /dev/%s" % device) + session.cmd(f"test -c /dev/{device}") + session.cmd(f"echo 'Hello virttest world' > /dev/{device}") # unplug serial adapter device_del(vm, "test-serial") @@ -91,7 +89,7 @@ def chardev_use(vm, name): vm.verify_alive() session = vm.wait_for_login() session.cmd_status("dmesg -c") - ppc_host = 'ppc' in params.get('vm_arch_name', arch.ARCH) + ppc_host = "ppc" in params.get("vm_arch_name", arch.ARCH) error_context.context("Test null chardev", test.log.info) chardev_add(vm, "chardev-null", "null", {}) @@ -100,16 +98,16 @@ def chardev_use(vm, name): chardev_del(vm, "chardev-null") error_context.context("Test file chardev", test.log.info) - filename = "/tmp/chardev-file-%s" % vm.instance - args = {'out': filename} + filename = f"/tmp/chardev-file-{vm.instance}" + args = {"out": filename} chardev_add(vm, "chardev-file", "file", args) if not ppc_host: chardev_use(vm, "chardev-file") chardev_del(vm, "chardev-file") if not ppc_host: - output = process.system_output("cat %s" % filename).decode() + output = process.system_output(f"cat {filename}").decode() if output.find("Hello virttest world") == -1: - test.fail("Guest message not found [%s]" % output) + test.fail(f"Guest message not found [{output}]") error_context.context("Test pty chardev", test.log.info) reply = chardev_add(vm, "chardev-pty", "pty", {}) @@ -121,7 +119,7 @@ def chardev_use(vm, name): output = os.read(fd_dst, 256).decode() os.close(fd_dst) if output.find("Hello virttest world") == -1: - test.fail("Guest message not found [%s]" % output) + test.fail(f"Guest message not found [{output}]") chardev_del(vm, "chardev-pty") error_context.context("Cleanup", test.log.info) diff --git a/qemu/tests/chardev_legacy_unplug.py b/qemu/tests/chardev_legacy_unplug.py index 9fa3be9bb1..e16061c1ac 100644 --- a/qemu/tests/chardev_legacy_unplug.py +++ b/qemu/tests/chardev_legacy_unplug.py @@ -1,4 +1,4 @@ -from virttest import error_context, env_process +from virttest import env_process, error_context from virttest.qemu_monitor import QMPCmdError @@ -16,11 +16,11 @@ def run(test, params, env): :param env: Dictionary with test environment. """ - serial_id = params.objects('serials')[-1] - params['start_vm'] = 'yes' - for backend in ['unix_socket', 'tcp_socket', 'pty']: - params['chardev_backend_%s' % serial_id] = backend - vm = params['main_vm'] + serial_id = params.objects("serials")[-1] + params["start_vm"] = "yes" + for backend in ["unix_socket", "tcp_socket", "pty"]: + params[f"chardev_backend_{serial_id}"] = backend + vm = params["main_vm"] env_process.preprocess_vm(test, params, env, vm) vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -30,8 +30,8 @@ def run(test, params, env): try: chardev_device.unplug(vm.monitor) except QMPCmdError as e: - if e.data["desc"] != "Chardev '%s' is busy" % chardev_qid: - test.fail('It is not the expected error') + if e.data["desc"] != f"Chardev '{chardev_qid}' is busy": + test.fail("It is not the expected error") else: test.fail("Should not be unplug successfully") vm.verify_kernel_crash() diff --git a/qemu/tests/chardev_remove_pending_watches.py b/qemu/tests/chardev_remove_pending_watches.py index 97bcc2127f..4a23cc837c 100644 --- a/qemu/tests/chardev_remove_pending_watches.py +++ b/qemu/tests/chardev_remove_pending_watches.py @@ -1,8 +1,7 @@ import aexpect - -from virttest import error_context -from virttest import utils_test +from virttest import error_context, utils_test from virttest.utils_virtio_port import VirtioPortTest + from qemu.tests.virtio_serial_file_transfer import generate_data_file @@ -24,7 +23,7 @@ def run(test, params, env): """ os_type = params["os_type"] file_size = params.get_numeric("filesize") - guest_dir = params.get("guest_script_folder", '/var/tmp/') + guest_dir = params.get("guest_script_folder", "/var/tmp/") port_name = params["file_transfer_serial_port"] virtio_test = VirtioPortTest(test, env, params) @@ -33,15 +32,16 @@ def run(test, params, env): session = vm.wait_for_login() guest_file_name = generate_data_file(guest_dir, file_size, session) if os_type == "windows": - vport_name = '\\\\.\\' + port_name + vport_name = "\\\\.\\" + port_name guest_file_name = guest_file_name.replace("/", "") - guest_send_cmd = "copy %s > con %s" % (guest_file_name, vport_name) + guest_send_cmd = f"copy {guest_file_name} > con {vport_name}" driver_name = params["driver_name"] session = utils_test.qemu.windrv_check_running_verifier( - session, vm, test, driver_name) + session, vm, test, driver_name + ) else: - vport_name = '/dev/virtio-ports/%s' % port_name - guest_send_cmd = "cat %s > %s" % (guest_file_name, vport_name) + vport_name = f"/dev/virtio-ports/{port_name}" + guest_send_cmd = f"cat {guest_file_name} > {vport_name}" try: session.cmd(guest_send_cmd) @@ -55,9 +55,9 @@ def run(test, params, env): if port.sock.recv(4096) is None: test.fail("Host can't receive data !") finally: - clean_cmd = params['clean_cmd'] + clean_cmd = params["clean_cmd"] port.close() - session.cmd('%s %s' % (clean_cmd, guest_file_name)) + session.cmd(f"{clean_cmd} {guest_file_name}") session.close() vm.verify_alive() vm.verify_kernel_crash() diff --git a/qemu/tests/chardev_serial_login.py b/qemu/tests/chardev_serial_login.py index 3d882eb8b8..ed41d0012b 100644 --- a/qemu/tests/chardev_serial_login.py +++ b/qemu/tests/chardev_serial_login.py @@ -1,10 +1,7 @@ -import re import os +import re -from virttest import error_context -from virttest import remote -from virttest import qemu_monitor -from virttest import env_process +from virttest import env_process, error_context, qemu_monitor, remote @error_context.context_aware @@ -22,57 +19,60 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def check_guest(): - session.cmd('touch file.txt') # pylint: disable=E0606 - session.cmd('mkdir -p tmp') - session.cmd('command cp file.txt ./tmp/test.txt') + session.cmd("touch file.txt") # pylint: disable=E0606 + session.cmd("mkdir -p tmp") + session.cmd("command cp file.txt ./tmp/test.txt") - serial_id = params.objects('serials')[-1] + serial_id = params.objects("serials")[-1] prompt = params.get("shell_prompt") - if params['serial_type'] == 'spapr-vty' \ - and params['inactivity_watcher'] == 'none': - params['vga'] = 'none' - params['start_vm'] = 'yes' - for backend in ['tcp_socket', 'unix_socket', 'pty', 'file']: - params['chardev_backend_%s' % serial_id] = backend + if params["serial_type"] == "spapr-vty" and params["inactivity_watcher"] == "none": + params["vga"] = "none" + params["start_vm"] = "yes" + for backend in ["tcp_socket", "unix_socket", "pty", "file"]: + params[f"chardev_backend_{serial_id}"] = backend env_process.preprocess(test, params, env) vm = env.get_vm(params["main_vm"]) vm.wait_for_login() serial_device = vm.devices.get(serial_id) chardev_qid = serial_device.get_param("chardev") chardev_device = vm.devices.get_by_qid(chardev_qid)[0] - if backend == 'tcp_socket': - session = remote.remote_login(client='nc', - host=chardev_device.params['host'], - port=chardev_device.params['port'], - username='root', - password='kvmautotest', - prompt=prompt, - timeout=240) + if backend == "tcp_socket": + session = remote.remote_login( + client="nc", + host=chardev_device.params["host"], + port=chardev_device.params["port"], + username="root", + password="kvmautotest", + prompt=prompt, + timeout=240, + ) check_guest() - elif backend == 'unix_socket': + elif backend == "unix_socket": session = vm.wait_for_serial_login() check_guest() - elif backend == 'pty': - chardev_info = vm.monitor.human_monitor_cmd('info chardev') - hostfile = re.findall('%s: filename=pty:(/dev/pts/\\d)?' % - "serial0", chardev_info)[0] + elif backend == "pty": + chardev_info = vm.monitor.human_monitor_cmd("info chardev") + hostfile = re.findall( + "{}: filename=pty:(/dev/pts/\\d)?".format("serial0"), chardev_info + )[0] if not hostfile: test.fail("Guest boot fail with pty backend.") fd_pty = os.open(hostfile, os.O_RDWR | os.O_NONBLOCK) os.close(fd_pty) - elif backend == 'file': - filename = chardev_device.params['path'] - f = open(filename, errors='ignore') - if 'Linux' not in f.read(): + elif backend == "file": + filename = chardev_device.params["path"] + f = open(filename, errors="ignore") + if "Linux" not in f.read(): f.close() test.fail("Guest boot fail with file backend.") f.close() try: vm.devices.simple_unplug(chardev_device, vm.monitor) except qemu_monitor.QMPCmdError as e: - if 'is busy' not in e.data['desc']: - test.fail(e.data['desc']) + if "is busy" not in e.data["desc"]: + test.fail(e.data["desc"]) else: test.fail("Hot-unplug should fail.") vm.destroy() diff --git a/qemu/tests/chardev_tls_encryption.py b/qemu/tests/chardev_tls_encryption.py index f5ea2ec008..f5129b17b7 100644 --- a/qemu/tests/chardev_tls_encryption.py +++ b/qemu/tests/chardev_tls_encryption.py @@ -1,11 +1,6 @@ import aexpect - from avocado.utils import process - -from virttest import utils_misc -from virttest import env_process -from virttest import error_context -from virttest import utils_package +from virttest import env_process, error_context, utils_misc, utils_package from provider.chardev_utils import setup_certs @@ -38,8 +33,9 @@ def run(test, params, env): test.error("Install dependency packages failed") setup_certs(params) expected_msg = params["expected_msg"] - hostname = process.run('hostname', ignore_status=False, shell=True, - verbose=True).stdout_text.strip() + hostname = process.run( + "hostname", ignore_status=False, shell=True, verbose=True + ).stdout_text.strip() port = str(utils_misc.find_free_ports(5000, 9999, 1, hostname)[0]) # Scenario 1 @@ -49,14 +45,15 @@ def run(test, params, env): params["extra_params"] = params["extra_params"] % (hostname, port) error_context.context("Run gnutls server ...", test.log.info) tls_server = aexpect.run_bg(gnutls_cmd_server) - params['start_vm'] = 'yes' - vm_name = params['main_vm'] + params["start_vm"] = "yes" + vm_name = params["main_vm"] error_context.context( - "Launch QEMU with a serial port as TLS client", test.log.info) + "Launch QEMU with a serial port as TLS client", test.log.info + ) env_process.preprocess_vm(test, params, env, vm_name) if not utils_misc.wait_for( - lambda: expected_msg in tls_server.get_output(), - first=5, timeout=15): + lambda: expected_msg in tls_server.get_output(), first=5, timeout=15 + ): test.fail("TLS server can't connect client succssfully.") # Scenario 2 @@ -64,17 +61,19 @@ def run(test, params, env): if gnutls_cmd_client: gnutls_cmd_client = gnutls_cmd_client % (port, hostname) params["extra_params"] = params["extra_params"] % (hostname, port) - params['start_vm'] = 'yes' - vm_name = params['main_vm'] + params["start_vm"] = "yes" + vm_name = params["main_vm"] error_context.context( - "Launch QEMU with a serial port as TLS server", test.log.info) + "Launch QEMU with a serial port as TLS server", test.log.info + ) env_process.preprocess_vm(test, params, env, vm_name) error_context.context( - "Run gnutls client to connect TLS server", test.log.info) + "Run gnutls client to connect TLS server", test.log.info + ) tls_client = aexpect.run_bg(gnutls_cmd_client) if not utils_misc.wait_for( - lambda: expected_msg in tls_client.get_output(), - first=5, timeout=15): + lambda: expected_msg in tls_client.get_output(), first=5, timeout=15 + ): test.fail("TLS client can't connect server succssfully.") # Scenario 3: @@ -82,22 +81,27 @@ def run(test, params, env): if guest_cmd: params["start_vm"] = "yes" vms = params.get("vms").split() - params["extra_params"] = params["extra_params_%s" - % vms[0]] % (hostname, port) + params["extra_params"] = params[f"extra_params_{vms[0]}"] % ( + hostname, + port, + ) error_context.context( - "Launch QEMU with a serial port as TLS server", test.log.info) + "Launch QEMU with a serial port as TLS server", test.log.info + ) env_process.preprocess_vm(test, params, env, vms[0]) vm1 = env.get_vm(vms[0]) session_vm1 = vm1.wait_for_login() session_vm1.cmd(guest_cmd) - params["extra_params"] = params["extra_params_%s" - % vms[1]] % (hostname, port) + params["extra_params"] = params[f"extra_params_{vms[1]}"] % ( + hostname, + port, + ) error_context.context( - "Launch QEMU with a serial port as TLS client", test.log.info) + "Launch QEMU with a serial port as TLS client", test.log.info + ) env_process.preprocess_vm(test, params, env, vms[1]) try: - session_vm1.read_until_output_matches( - [expected_msg], timeout=15) + session_vm1.read_until_output_matches([expected_msg], timeout=15) except aexpect.ExpectError: test.fail("Can't connect TLS client inside TLS server guest.") vm2 = env.get_vm(vms[1]) diff --git a/qemu/tests/check_basepage_size.py b/qemu/tests/check_basepage_size.py index b425829f87..7e99f5b42d 100644 --- a/qemu/tests/check_basepage_size.py +++ b/qemu/tests/check_basepage_size.py @@ -1,10 +1,7 @@ import os from avocado.utils import process - -from virttest import env_process -from virttest import error_context - +from virttest import env_process, error_context from virttest.utils_numeric import normalize_data_size @@ -23,21 +20,21 @@ def run(test, params, env): :params params: Dictionary with the test parameters. :params env: Dictionary with test environment. """ - get_basic_page = params.get('get_basic_page') - local_pglist = params.get('local_pglist') - basic_page_list = params.objects('basic_page_list') + get_basic_page = params.get("get_basic_page") + local_pglist = params.get("local_pglist") + basic_page_list = params.objects("basic_page_list") test.log.info("Check system configuration basic page size on host.") host_basic_page = process.system_output(get_basic_page).decode() if host_basic_page not in basic_page_list: - test.fail("Host basic page size is %s not as expected." % host_basic_page) + test.fail(f"Host basic page size is {host_basic_page} not as expected.") test.log.info("Check the basic page size mapping to the hugepage size on host.") - host_basic_page = normalize_data_size("%sB" % host_basic_page, "K") - hugepage_list = params.objects("mapping_pgsize_%sk" % host_basic_page) + host_basic_page = normalize_data_size(f"{host_basic_page}B", "K") + hugepage_list = params.objects(f"mapping_pgsize_{host_basic_page}k") host_local_pglist = os.listdir(local_pglist) if sorted(host_local_pglist) != sorted(hugepage_list): - test.fail("Host huge page size is %s not as expected." % host_local_pglist) + test.fail(f"Host huge page size is {host_local_pglist} not as expected.") params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, params["main_vm"]) @@ -47,11 +44,11 @@ def run(test, params, env): test.log.info("Check system configuration basic page size on guest.") guest_basic_page = session.cmd(get_basic_page).strip() if guest_basic_page not in basic_page_list: - test.fail("Guest page size is %s not as expected." % guest_basic_page) + test.fail(f"Guest page size is {guest_basic_page} not as expected.") test.log.info("Check the basic page size mapping to the hugepage size on guest.") - guest_basic_page = normalize_data_size("%sB" % guest_basic_page, "K") - hugepage_list = params.objects("mapping_pgsize_%sk" % guest_basic_page) - guest_local_pglist = session.cmd("ls %s" % local_pglist).strip().split() + guest_basic_page = normalize_data_size(f"{guest_basic_page}B", "K") + hugepage_list = params.objects(f"mapping_pgsize_{guest_basic_page}k") + guest_local_pglist = session.cmd(f"ls {local_pglist}").strip().split() if sorted(guest_local_pglist) != sorted(hugepage_list): - test.fail("Guest huge page size is %s not as expected." % guest_local_pglist) + test.fail(f"Guest huge page size is {guest_local_pglist} not as expected.") diff --git a/qemu/tests/check_block_size.py b/qemu/tests/check_block_size.py index f412a484dd..3ef690b7eb 100644 --- a/qemu/tests/check_block_size.py +++ b/qemu/tests/check_block_size.py @@ -1,8 +1,6 @@ import re -from virttest import error_context -from virttest import utils_test -from virttest import utils_misc +from virttest import error_context, utils_misc, utils_test @error_context.context_aware @@ -22,8 +20,7 @@ def run(test, params, env): name = params["main_vm"] if params.get("need_install") == "yes": error_context.context("Install guest with a new image", test.log.info) - utils_test.run_virt_sub_test(test, params, env, - sub_type='unattended_install') + utils_test.run_virt_sub_test(test, params, env, sub_type="unattended_install") params["cdroms"] = "" params["unattended_file"] = "" params["cdrom_unattended"] = "" @@ -44,8 +41,7 @@ def run(test, params, env): drive_serial = str(params["drive_serial_stg"]) expect_physical = int(params.get("physical_block_size_stg", 512)) expect_logical = int(params.get("logical_block_size_stg", 512)) - error_context.context("Verify physical/Logical block size", - test.log.info) + error_context.context("Verify physical/Logical block size", test.log.info) if params["os_type"] == "linux": drive_path = utils_misc.get_linux_drive_path(session, drive_serial) if not drive_path: @@ -68,15 +64,18 @@ def run(test, params, env): break else: test.error("Could not find the specified device") - out_physical = int(re.search(r'PhysicalSectorSize\s*:\s*(\d+)', target_blk).group(1)) - out_logical = int(re.search(r'LogicalSectorSize\s*:\s(\d+)', target_blk).group(1)) - if ((out_physical != expect_physical) or - (out_logical != expect_logical)): + out_physical = int( + re.search(r"PhysicalSectorSize\s*:\s*(\d+)", target_blk).group(1) + ) + out_logical = int( + re.search(r"LogicalSectorSize\s*:\s(\d+)", target_blk).group(1) + ) + if (out_physical != expect_physical) or (out_logical != expect_logical): msg = "Block size in guest doesn't match with qemu parameter\n" - msg += "Physical block size in guest: %s, " % out_physical - msg += "expect: %s" % expect_physical - msg += "\nLogical block size in guest: %s, " % out_logical - msg += "expect: %s" % expect_logical + msg += f"Physical block size in guest: {out_physical}, " + msg += f"expect: {expect_physical}" + msg += f"\nLogical block size in guest: {out_logical}, " + msg += f"expect: {expect_logical}" test.fail(msg) finally: if session: diff --git a/qemu/tests/check_coredump.py b/qemu/tests/check_coredump.py index 48f5dc9d93..95222f0d72 100644 --- a/qemu/tests/check_coredump.py +++ b/qemu/tests/check_coredump.py @@ -2,19 +2,17 @@ Check_coredump This is a kind of post check case in a test loop. """ -import os + import glob import logging +import os import time -from avocado.utils import path as utils_path - -from virttest import data_dir -from virttest import error_context -from virttest import utils_misc import virttest.utils_libguestfs as lgf +from avocado.utils import path as utils_path +from virttest import data_dir, error_context, utils_misc -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def get_images(): @@ -23,8 +21,7 @@ def get_images(): :return: image names """ - return glob.glob(utils_misc.get_path(data_dir.get_data_dir(), - "images/*.*")) + return glob.glob(utils_misc.get_path(data_dir.get_data_dir(), "images/*.*")) def coredump_exists(mntpnt, files, out_dir): @@ -52,11 +49,9 @@ def coredump_exists(mntpnt, files, out_dir): file_exists = True for item in files_glob: file_ctime = time.ctime(os.path.getctime(item)) - msgs_return.append((os.path.basename(item), - file_ctime)) - error_context.context("copy files %s %s" % - (item, out_dir), LOG_JOB.info) - os.system("cp -rf %s %s" % (item, out_dir)) + msgs_return.append((os.path.basename(item), file_ctime)) + error_context.context(f"copy files {item} {out_dir}", LOG_JOB.info) + os.system(f"cp -rf {item} {out_dir}") return file_exists, msgs_return @@ -75,22 +70,19 @@ def check_images_coredump(image, mntpnt, check_files, debugdir): msgs_return = [] try: - error_context.context("Mount the guest image %s to host mount point" % - image, - LOG_JOB.info) - status = lgf.guestmount(image, mntpnt, - True, True, debug=True, is_disk=True) + error_context.context( + f"Mount the guest image {image} to host mount point", LOG_JOB.info + ) + status = lgf.guestmount(image, mntpnt, True, True, debug=True, is_disk=True) if status.exit_status: - msgs_return.append("Could not mount guest image %s." % image) + msgs_return.append(f"Could not mount guest image {image}.") error_context.context(msgs_return[0], LOG_JOB.error) else: - found_coredump, msgs_return = coredump_exists(mntpnt, - check_files, - debugdir) + found_coredump, msgs_return = coredump_exists(mntpnt, check_files, debugdir) finally: if os.path.ismount(mntpnt): error_context.context("guestunmount host mount point") - lgf.lgf_command("guestunmount %s" % mntpnt) + lgf.lgf_command(f"guestunmount {mntpnt}") return found_coredump, msgs_return @@ -157,8 +149,9 @@ def run(test, params, env): try: utils_path.find_command("guestmount") except: - warn_msg = "Need packages: libguestfs libguestfs-tools" + \ - " libguestfs-winsupport" + warn_msg = ( + "Need packages: libguestfs libguestfs-tools" + " libguestfs-winsupport" + ) test.cancel(warn_msg) # define the file name need to be checked @@ -169,12 +162,10 @@ def run(test, params, env): host_mountpoint = params.get("host_mountpoint", host_mountpoint_default) host_mountpoint = utils_misc.get_path(test.debugdir, host_mountpoint) file_chk_for_win = params.get("coredump_check_win", file_check_win_default) - file_chk_for_linux = params.get("coredump_check_linux", - file_check_linux_default) + file_chk_for_linux = params.get("coredump_check_linux", file_check_linux_default) # check if the host_mountpoint exists. - if not (os.path.isdir(host_mountpoint) and - os.path.exists(host_mountpoint)): + if not (os.path.isdir(host_mountpoint) and os.path.exists(host_mountpoint)): os.makedirs(host_mountpoint) coredump_file_exists = False @@ -183,15 +174,15 @@ def run(test, params, env): error_context.context("Get all the images name", test.log.info) images = get_images() - error_context.context("images: %s" % images, test.log.info) + error_context.context(f"images: {images}", test.log.info) # find all the images # mount per-image to check if the dump file exists error_context.context("Check coredump file per-image", test.log.info) for image in images: - status, chk_msgs = check_images_coredump(image, - host_mountpoint, - check_files, test.debugdir) + status, chk_msgs = check_images_coredump( + image, host_mountpoint, check_files, test.debugdir + ) coredump_file_exists = coredump_file_exists or status if status: check_results.append((image, chk_msgs)) diff --git a/qemu/tests/check_cvq_event.py b/qemu/tests/check_cvq_event.py index f183296f06..18c23c098f 100644 --- a/qemu/tests/check_cvq_event.py +++ b/qemu/tests/check_cvq_event.py @@ -1,5 +1,4 @@ -from virttest import error_context -from virttest import utils_net +from virttest import error_context, utils_net @error_context.context_aware @@ -30,20 +29,20 @@ def run(test, params, env): guest_nic = vm.virtnet device_id = guest_nic[0].device_id test.log.info("Consumed the rx-filter of the nic") - vm.monitor.cmd("query-rx-filter", args={'name': device_id}) + vm.monitor.cmd("query-rx-filter", args={"name": device_id}) test.log.info("Changed the mac address inside guest") session_serial.cmd_output_safe(change_cmd % (new_mac, interface)) test.log.info("Check qemu if sent a NIC_RX_FILTER_CHANGED event") event_name = params.get("event_name") if vm.monitor.get_event(event_name): - test.log.info("Received qmp %s event notification" % event_name) + test.log.info("Received qmp %s event notification", event_name) else: - test.fail("Can not got %s event notification" % event_name) + test.fail(f"Can not got {event_name} event notification") vm.monitor.clear_event(event_name) test.log.info("Changed the mac address again inside guest") session_serial.cmd_output_safe(change_cmd % (old_mac, interface)) if vm.monitor.get_event(event_name): - test.fail("Oops, Received qmp %s event notification again" % event_name) + test.fail(f"Oops, Received qmp {event_name} event notification again") else: test.log.info("Test pass, there is no any event notification") session_serial.close() diff --git a/qemu/tests/check_link_speed_duplex.py b/qemu/tests/check_link_speed_duplex.py index 1b38baf22b..d5496c4816 100644 --- a/qemu/tests/check_link_speed_duplex.py +++ b/qemu/tests/check_link_speed_duplex.py @@ -1,9 +1,6 @@ import re -from virttest import error_context -from virttest import utils_net -from virttest import utils_test -from virttest import env_process +from virttest import env_process, error_context, utils_net, utils_test @error_context.context_aware @@ -31,9 +28,10 @@ def get_speed_duplex_traceview(): and duplex is a 'full' or 'half'. """ error_context.context( - "Check speed and duplex info from the traceview", test.log.info) + "Check speed and duplex info from the traceview", test.log.info + ) log = utils_net.dump_traceview_log_windows(params, vm) - check_pattern = 'Speed=(\\d+).+Duplex=(\\w+)$' + check_pattern = "Speed=(\\d+).+Duplex=(\\w+)$" result = re.search(check_pattern, log, re.MULTILINE) if result: return (int(result.group(1)), result.group(2).lower()) @@ -46,19 +44,24 @@ def get_speed_duplex_powershell(session): return: a tuple of (speed, duplex), whitch speed is measured by bps, and duplex is a 'full' or 'half'. """ - error_context.context("Check speed and duplex info from powershell", - test.log.info) + error_context.context( + "Check speed and duplex info from powershell", test.log.info + ) check_speed_cmd = params["check_speed_powershell_cmd"] status, output = session.cmd_status_output(check_speed_cmd) if status: - test.fail("Failed to get speed info from powershell, " - "status=%s, output=%s" % (status, output)) - lines = output.strip().split('\n') + test.fail( + "Failed to get speed info from powershell, " + f"status={status}, output={output}" + ) + lines = output.strip().split("\n") if len(lines) > 2: result = lines[2].strip().split() if len(result) > 1: - return (int(result[0]), - 'full' if result[1].lower() == 'true' else 'half') + return ( + int(result[0]), + "full" if result[1].lower() == "true" else "half", + ) test.fail("Can't get speed or duplex info from powershell") def run_test_windows(session, tar_speed, tar_duplex): @@ -71,20 +74,22 @@ def run_test_windows(session, tar_speed, tar_duplex): """ # convert to bps unit tar_speed = tar_speed * 1000000 - error_context.context("Check if the driver is installed and verified", - test.log.info) + error_context.context( + "Check if the driver is installed and verified", test.log.info + ) driver_name = params.get("driver_name", "netkvm") - run_powershell = (params.get("run_powershell", "yes") == "yes") + run_powershell = params.get("run_powershell", "yes") == "yes" session = utils_test.qemu.windrv_check_running_verifier( - session, vm, test, driver_name, timeout) + session, vm, test, driver_name, timeout + ) if run_powershell: - check_speed_duplex_windows(session, tar_speed, tar_duplex, - method='powershell') + check_speed_duplex_windows( + session, tar_speed, tar_duplex, method="powershell" + ) # run traceview after powershell, for it will invalid session check_speed_duplex_windows(session, tar_speed, tar_duplex) - def check_speed_duplex_windows(session, tar_speed, tar_duplex, - method='traceview'): + def check_speed_duplex_windows(session, tar_speed, tar_duplex, method="traceview"): """ Check the speed and duplex with certain method. @@ -92,15 +97,17 @@ def check_speed_duplex_windows(session, tar_speed, tar_duplex, param tar_duplex: target duplex expected param method: the method to check, one of 'traceview' and 'powershell' """ - if method == 'traceview': + if method == "traceview": speed, duplex = get_speed_duplex_traceview() - elif method == 'powershell': + elif method == "powershell": speed, duplex = get_speed_duplex_powershell(session) else: test.error("Method %s not supported", method) if speed != tar_speed or duplex != tar_duplex: # pylint: disable=E0606 - test.fail("The speed and duplex is incorrect in %s, " - "with speed=%s, duplex=%s" % (method, speed, duplex)) + test.fail( + f"The speed and duplex is incorrect in {method}, " + f"with speed={speed}, duplex={duplex}" + ) def get_speed_duplex_linux(session): """ @@ -115,11 +122,9 @@ def get_speed_duplex_linux(session): check_speed_cmd = params["check_speed_cmd"] % ethname status, output = session.cmd_status_output(check_speed_cmd) if status: - test.fail("Failed to get speed info," - "status=%s, ouput=%s" % (status, output)) + test.fail("Failed to get speed info," f"status={status}, ouput={output}") test.log.info(output) - result = re.findall(r"(?:Speed:\s+(\d+)Mb/s)|(?:Duplex:\s+(\w+))", - output) + result = re.findall(r"(?:Speed:\s+(\d+)Mb/s)|(?:Duplex:\s+(\w+))", output) if len(result) < 2: test.error("Can't get speed or duplex info") speed = int(result[0][0]) @@ -136,8 +141,10 @@ def run_test_linux(session, tar_speed, tar_duplex): """ speed, duplex = get_speed_duplex_linux(session) if speed != tar_speed or duplex != tar_duplex: - test.fail("The speed and duplex is incorrect, " - "with speed=%s, duplex=%s" % (speed, duplex)) + test.fail( + "The speed and duplex is incorrect, " + f"with speed={speed}, duplex={duplex}" + ) def run_test(session, tar_speed, tar_duplex): """ @@ -179,8 +186,7 @@ def run_test(session, tar_speed, tar_duplex): guest_ip = vm.get_address() status, output = utils_test.ping(guest_ip, 10, timeout=15) if status: - test.fail("Fail to perfrom ping test, status=%s, output=%s" % - (status, output)) + test.fail(f"Fail to perfrom ping test, status={status}, output={output}") lost_ratio = utils_test.get_loss_ratio(output) if lost_ratio > 0: - test.fail("Ping loss ratio is %s" % lost_ratio) + test.fail(f"Ping loss ratio is {lost_ratio}") diff --git a/qemu/tests/check_nic_link_status.py b/qemu/tests/check_nic_link_status.py index 7f84518f1b..c5f1958412 100644 --- a/qemu/tests/check_nic_link_status.py +++ b/qemu/tests/check_nic_link_status.py @@ -1,8 +1,6 @@ import time -from virttest import error_context -from virttest import utils_test -from virttest import utils_net +from virttest import error_context, utils_net, utils_test @error_context.context_aware @@ -27,21 +25,22 @@ def guest_interface_operstate_check(session, expect_status): """ if params.get("os_type") == "linux": guest_ifname = utils_net.get_linux_ifname(session, vm.get_mac_address()) - if_operstate = utils_net.get_net_if_operstate(guest_ifname, - session.cmd_output_safe) + if_operstate = utils_net.get_net_if_operstate( + guest_ifname, session.cmd_output_safe + ) else: - if_operstate = utils_net.get_windows_nic_attribute(session, - "macaddress", - vm.get_mac_address(), - "netconnectionstatus") + if_operstate = utils_net.get_windows_nic_attribute( + session, "macaddress", vm.get_mac_address(), "netconnectionstatus" + ) if if_operstate != expect_status: - err_msg = "Guest interface %s status error, " % guest_ifname - err_msg += "currently interface status is '%s', " % if_operstate - err_msg += "but expect status is '%s'" % expect_status + err_msg = f"Guest interface {guest_ifname} status error, " + err_msg += f"currently interface status is '{if_operstate}', " + err_msg += f"but expect status is '{expect_status}'" test.fail(err_msg) - test.log.info("Guest interface operstate '%s' is exactly as expected", - if_operstate) + test.log.info( + "Guest interface operstate '%s' is exactly as expected", if_operstate + ) def set_link_test(linkid, link_up): """ @@ -72,23 +71,28 @@ def set_link_test(linkid, link_up): error_context.context("Check guest interface operstate", test.log.info) guest_interface_operstate_check(session, expect_down_status) - error_context.context("Reboot guest by '%s' and recheck interface " - "operstate" % reboot_method, test.log.info) + error_context.context( + f"Reboot guest by '{reboot_method}' and recheck interface " "operstate", + test.log.info, + ) session = vm.reboot(method=reboot_method, serial=True, timeout=360, session=session) guest_interface_operstate_check(session, expect_down_status) - error_context.context("Re-enable guest nic device '%s' by set_link" - % device_id, test.log.info) + error_context.context( + f"Re-enable guest nic device '{device_id}' by set_link", test.log.info + ) set_link_test(device_id, True) guest_interface_operstate_check(session, expect_up_status) - error_context.context("Check guest network connecting by set_link to '%s'" - % expect_up_status, test.log.info) + error_context.context( + f"Check guest network connecting by set_link to '{expect_up_status}'", + test.log.info, + ) # Windows guest need about 60s to get the ip address guest_ip = utils_net.get_guest_ip_addr(session, device_mac, os_type, timeout=60) if guest_ip is None: utils_net.restart_guest_network(session, device_mac, os_type) status, output = utils_test.ping(guest_ip, 10, timeout=30, session=session) if status: - test.fail("%s ping host unexpected, output %s" % (vm.name, output)) + test.fail(f"{vm.name} ping host unexpected, output {output}") session.close() diff --git a/qemu/tests/check_reports_end_offset.py b/qemu/tests/check_reports_end_offset.py index e8dcd20e8a..7fc4e00f02 100755 --- a/qemu/tests/check_reports_end_offset.py +++ b/qemu/tests/check_reports_end_offset.py @@ -1,5 +1,5 @@ -import re import json +import re from virttest import data_dir from virttest.qemu_storage import QemuImg @@ -32,16 +32,14 @@ def _check_result(key, offset, output): report.create(report.params) # 'qemu-img check' the image and check the output info. - check_result = report.check(report.params, root_dir, - output="human").stdout.decode() + check_result = report.check(report.params, root_dir, output="human").stdout.decode() if not check_result: test.error("There is no output of check command, check please.") test.log.debug("The check output with human output format: %s", check_result) - result_dict = dict(re.findall(r'(.+):\s(.+)', check_result)) + result_dict = dict(re.findall(r"(.+):\s(.+)", check_result)) _check_result(human_key, offset, result_dict) - check_result = report.check(report.params, root_dir, - output="json").stdout.decode() + check_result = report.check(report.params, root_dir, output="json").stdout.decode() if not check_result: test.error("There is no output of check command, check please.") test.log.debug("The check output with json output format: %s", check_result) diff --git a/qemu/tests/check_roms.py b/qemu/tests/check_roms.py index 46def311a1..c294ae8090 100644 --- a/qemu/tests/check_roms.py +++ b/qemu/tests/check_roms.py @@ -31,10 +31,10 @@ def run(test, params, env): list_fw = [] list_addr = [] - patt = re.compile(r'%s' % fw_filter, re.M) + patt = re.compile(rf"{fw_filter}", re.M) list_fw = patt.findall(str(o)) - patt = re.compile(r'%s' % addr_filter, re.M) + patt = re.compile(rf"{addr_filter}", re.M) list_addr = patt.findall(str(o)) test.log.info("ROMS reported by firmware: '%s'", list_fw) @@ -43,5 +43,7 @@ def run(test, params, env): error_context.context("check result for the roms", test.log.info) ret = set(list_fw).intersection(list_addr) if ret: - test.fail("ROM '%s' is intended to be loaded by the firmware, " - "but is was also loaded by QEMU itself." % ret) + test.fail( + f"ROM '{ret}' is intended to be loaded by the firmware, " + "but is was also loaded by QEMU itself." + ) diff --git a/qemu/tests/check_unhalt_vcpu.py b/qemu/tests/check_unhalt_vcpu.py index 5487d21aa9..b47934a8c8 100644 --- a/qemu/tests/check_unhalt_vcpu.py +++ b/qemu/tests/check_unhalt_vcpu.py @@ -27,8 +27,10 @@ def run(test, params, env): try: cpu_usage = float(cpu_usage) except ValueError as detail: - test.error("Could not get correct cpu usage value with cmd" - " '%s', detail: '%s'" % (cpu_get_usage_cmd, detail)) + test.error( + "Could not get correct cpu usage value with cmd" + f" '{cpu_get_usage_cmd}', detail: '{detail}'" + ) test.log.info("Guest's reported CPU usage: %s", cpu_usage) if cpu_usage >= 90: diff --git a/qemu/tests/client_guest_shutdown.py b/qemu/tests/client_guest_shutdown.py index 1ac649e4db..bca4f17a55 100644 --- a/qemu/tests/client_guest_shutdown.py +++ b/qemu/tests/client_guest_shutdown.py @@ -1,7 +1,6 @@ import time -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc @error_context.context_aware @@ -27,23 +26,24 @@ def run(test, params, env): # shutdown both of the sessions for vm in [client_vm, guest_vm]: - vm_session = vm.wait_for_login(timeout=timeout, username="root", - password="123456") + vm_session = vm.wait_for_login( + timeout=timeout, username="root", password="123456" + ) try: error_context.base_context("shutting down the VM") if params.get("shutdown_method") == "shell": # Send a shutdown command to the guest's shell vm_session.sendline(vm.get_params().get("shutdown_command")) - error_context.context("waiting VM to go down " - "(shutdown shell cmd)") + error_context.context("waiting VM to go down " "(shutdown shell cmd)") elif params.get("shutdown_method") == "system_powerdown": # Sleep for a while -- give the guest a chance to finish # booting time.sleep(float(params.get("sleep_before_powerdown", 10))) # Send a system_powerdown monitor command vm.monitor.system_powerdown() - error_context.context("waiting VM to go down " - "(system_powerdown monitor cmd)") + error_context.context( + "waiting VM to go down " "(system_powerdown monitor cmd)" + ) if not utils_misc.wait_for(vm.is_dead, 240, 0, 1): vm.destroy(gracefully=False, free_mac_addresses=True) diff --git a/qemu/tests/cluster_size_check.py b/qemu/tests/cluster_size_check.py index bbcae419cd..cdd120cdbb 100644 --- a/qemu/tests/cluster_size_check.py +++ b/qemu/tests/cluster_size_check.py @@ -45,35 +45,34 @@ def check_cluster_size(parttern, expect, csize_set): image_name = params.get("images") status_error = "yes" == params.get("status_error", "no") image_params = params.object_params(image_name) - image = qemu_disk_img.QemuImgTest(test, image_params, env, - image_name) + image = qemu_disk_img.QemuImgTest(test, image_params, env, image_name) filename, result = image.create(image_params, ignore_errors=True) if status_error: if result.exit_status == 0: - test.log.error("Create image sucessfully with invalid size: %s", - csize_set) + test.log.error( + "Create image sucessfully with invalid size: %s", csize_set + ) cfail += 1 fail_log += "Succeed in creating image unexpectedly.\n" else: output = image.info() - error_context.context("Check the cluster size from output", - test.log.info) + error_context.context("Check the cluster size from output", test.log.info) cluster_size = re.findall(parttern, output) if cluster_size: if cluster_size[0] != expect: - test.log.error("Cluster size %s is not expected value %s", - cluster_size, expect) + test.log.error( + "Cluster size %s is not expected value %s", cluster_size, expect + ) cfail += 1 fail_log += "Cluster size mismatch the specified value " - fail_log += "%s.\n" % csize_set + fail_log += f"{csize_set}.\n" else: - test.log.error("Can not get the cluster size from command: %s", - output) + test.log.error("Can not get the cluster size from command: %s", output) cfail += 1 fail_log += "Can not get the cluster size from command:" - fail_log += " %s\n" % output + fail_log += f" {output}\n" return cfail, fail_log @@ -91,14 +90,14 @@ def check_cluster_size(parttern, expect, csize_set): params["image_cluster_size"] = cluster_size csize_expect = str(memory_size(cluster_size)) csize_set = cluster_size - error_context.context("Check cluster size as cluster size set to %s" - % cluster_size) + error_context.context( + f"Check cluster size as cluster size set to {cluster_size}" + ) - c_fail, log = check_cluster_size(csize_parttern, csize_expect, - csize_set) + c_fail, log = check_cluster_size(csize_parttern, csize_expect, csize_set) fail += c_fail fail_log += log error_context.context("Finally result check") if fail > 0: - test.fail("Cluster size check failed %s times:\n%s" % (fail, fail_log)) + test.fail(f"Cluster size check failed {fail} times:\n{fail_log}") diff --git a/qemu/tests/commit_snapshot_to_backing_image.py b/qemu/tests/commit_snapshot_to_backing_image.py index 12ee270ae0..fb7ac80c8e 100644 --- a/qemu/tests/commit_snapshot_to_backing_image.py +++ b/qemu/tests/commit_snapshot_to_backing_image.py @@ -1,10 +1,9 @@ import json from avocado.utils import process +from virttest import data_dir, error_context, qemu_storage + from provider import qemu_img_utils as img_utils -from virttest import data_dir -from virttest import error_context -from virttest import qemu_storage @error_context.context_aware @@ -21,25 +20,27 @@ def run(test, params, env): """ # add missing params for image1 images = params["images"].split() - params["image_name_%s" % images[0]] = params["image_name"] - params["image_format_%s" % images[0]] = params["image_format"] + params[f"image_name_{images[0]}"] = params["image_name"] + params[f"image_format_{images[0]}"] = params["image_format"] root_dir = data_dir.get_data_dir() image_chain = params["image_chain"].split() - base, sn = (qemu_storage.QemuImg(params.object_params(tag), root_dir, tag) - for tag in image_chain) + base, sn = ( + qemu_storage.QemuImg(params.object_params(tag), root_dir, tag) + for tag in image_chain + ) - error_context.context("create snapshot %s" % sn.tag, test.log.info) + error_context.context(f"create snapshot {sn.tag}", test.log.info) sn.create(sn.params) - error_context.context("boot vm from snapshot %s" % sn.tag, test.log.info) + error_context.context(f"boot vm from snapshot {sn.tag}", test.log.info) vm = img_utils.boot_vm_with_images(test, params, env, (sn.tag,)) md5sum_bin = params.get("md5sum_bin", "md5sum") sync_bin = params.get("sync_bin", "sync") guest_file = params["guest_tmp_filename"] dd_blkcnt = int(params["dd_blkcnt"]) - error_context.context("save random file %s" % guest_file, test.log.info) + error_context.context(f"save random file {guest_file}", test.log.info) img_utils.save_random_file_to_vm(vm, guest_file, dd_blkcnt, sync_bin) session = vm.wait_for_login() md5val = img_utils.check_md5sum(guest_file, md5sum_bin, session) @@ -47,7 +48,7 @@ def run(test, params, env): session.close() vm.destroy() - error_context.context("commit snapshot %s" % sn.tag, test.log.info) + error_context.context(f"commit snapshot {sn.tag}", test.log.info) size_before_commit = json.loads(sn.info(output="json"))["actual-size"] test.log.debug("%s size before commit: %s", sn.tag, size_before_commit) cache_mode = params.get("cache_mode") @@ -55,21 +56,19 @@ def run(test, params, env): test.log.debug("sync host cache after commit") process.system("sync") - error_context.context("verify snapshot is emptied after commit", - test.log.info) + error_context.context("verify snapshot is emptied after commit", test.log.info) size_after_commit = json.loads(sn.info(output="json"))["actual-size"] test.log.debug("%s size after commit: %s", sn.tag, size_after_commit) - guest_file_size = dd_blkcnt * 512 # tmp file size in bytes + guest_file_size = dd_blkcnt * 512 # tmp file size in bytes if size_before_commit - size_after_commit >= guest_file_size: test.log.debug("the snapshot file was emptied.") else: test.fail("snapshot was not emptied") - error_context.context("boot vm from base %s" % base.tag, test.log.info) + error_context.context(f"boot vm from base {base.tag}", test.log.info) vm = img_utils.boot_vm_with_images(test, params, env, (base.tag,)) session = vm.wait_for_login() - img_utils.check_md5sum(guest_file, md5sum_bin, session, - md5_value_to_check=md5val) + img_utils.check_md5sum(guest_file, md5sum_bin, session, md5_value_to_check=md5val) vm.destroy() # remove snapshot - params["remove_image_%s" % sn.tag] = "yes" + params[f"remove_image_{sn.tag}"] = "yes" diff --git a/qemu/tests/commit_snapshot_to_raw_backing.py b/qemu/tests/commit_snapshot_to_raw_backing.py index a3ac9bc57f..bb12d86c34 100644 --- a/qemu/tests/commit_snapshot_to_raw_backing.py +++ b/qemu/tests/commit_snapshot_to_raw_backing.py @@ -3,8 +3,7 @@ from virttest import data_dir from virttest.qemu_storage import QemuImg -from qemu.tests.qemu_disk_img import QemuImgTest -from qemu.tests.qemu_disk_img import generate_base_snapshot_pair +from qemu.tests.qemu_disk_img import QemuImgTest, generate_base_snapshot_pair def run(test, params, env): @@ -26,6 +25,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ + def _get_img_obj_and_params(tag): """Get an QemuImg object and its params based on the tag.""" img_param = params.object_params(tag) @@ -43,15 +43,15 @@ def _verify_qemu_img_info(output, b_fmt, b_name): """Verify qemu-img info output for this case.""" test.log.info("Verify snapshot's backing file information.") res = json.loads(output) - if (res["backing-filename-format"] != b_fmt or - res["backing-filename"] != b_name): - test.fail("Backing file information is not correct," - " got %s." % b_name) + if res["backing-filename-format"] != b_fmt or res["backing-filename"] != b_name: + test.fail("Backing file information is not correct," f" got {b_name}.") compat = res["format-specific"]["data"]["compat"] expected = _get_compat_version() - if (compat != expected): - test.fail("Snapshot's compat mode is not correct," - " got %s, expected %s." % (compat, expected)) + if compat != expected: + test.fail( + "Snapshot's compat mode is not correct," + f" got {compat}, expected {expected}." + ) file = params["guest_file_name"] gen = generate_base_snapshot_pair(params["image_chain"]) @@ -64,11 +64,15 @@ def _verify_qemu_img_info(output, b_fmt, b_name): params["image_name_image1"] = params["image_name"] sn_qit = QemuImgTest(test, params, env, snapshot) sn_qit.create_snapshot() - _verify_qemu_img_info(sn_img.info(output="json"), - base_img.image_format, base_img.image_filename) - - test.log.info("Boot a guest up from snapshot image: %s, and create a" - " file %s on the disk.", snapshot, file) + _verify_qemu_img_info( + sn_img.info(output="json"), base_img.image_format, base_img.image_filename + ) + + test.log.info( + "Boot a guest up from snapshot image: %s, and create a" " file %s on the disk.", + snapshot, + file, + ) sn_qit.start_vm() md5 = sn_qit.save_file(file) test.log.info("Got %s's md5 %s from the snapshot image disk.", file, md5) @@ -76,8 +80,12 @@ def _verify_qemu_img_info(output, b_fmt, b_name): cache_mode = params.get("cache_mode") if cache_mode: - test.log.info("Commit snapshot image %s back to %s with cache mode %s.", - snapshot, base, cache_mode) + test.log.info( + "Commit snapshot image %s back to %s with cache mode %s.", + snapshot, + base, + cache_mode, + ) else: test.log.info("Commit snapshot image %s back to %s.", snapshot, base) @@ -101,8 +109,9 @@ def _verify_qemu_img_info(output, b_fmt, b_name): base_qit = QemuImgTest(test, params, env, base) base_qit.start_vm() if not base_qit.check_file(file, md5): - test.fail("The file %s's md5 on base image and" - " snapshot file are different." % file) + test.fail( + f"The file {file}'s md5 on base image and" " snapshot file are different." + ) base_qit.destroy_vm() test.log.info("Check image %s.", snapshot) diff --git a/qemu/tests/commit_with_backing.py b/qemu/tests/commit_with_backing.py index 1757e42f21..5906007f26 100644 --- a/qemu/tests/commit_with_backing.py +++ b/qemu/tests/commit_with_backing.py @@ -1,8 +1,8 @@ import json +from virttest import data_dir, qemu_storage + from provider import qemu_img_utils as img_utils -from virttest import qemu_storage -from virttest import data_dir def run(test, params, env): @@ -17,8 +17,10 @@ def run(test, params, env): def prepare_images_from_params(images, params): """Parse params to initialize a QImage list.""" - return [qemu_storage.QemuImg(params.object_params(tag), root_dir, tag) - for tag in images] + return [ + qemu_storage.QemuImg(params.object_params(tag), root_dir, tag) + for tag in images + ] def verify_backing_chain(info): """Verify image's backing chain.""" @@ -26,20 +28,21 @@ def verify_backing_chain(info): base_image = None if image.base_tag: base_params = params.object_params(image.base_tag) - base_image = qemu_storage.get_image_repr(image.base_tag, - base_params, root_dir) + base_image = qemu_storage.get_image_repr( + image.base_tag, base_params, root_dir + ) base_image_from_info = img_info.get("full-backing-filename") if base_image != base_image_from_info: - test.fail(("backing chain check for image %s failed, backing" - " file from info is %s, which should be %s.") % - (image.image_filename, base_image_from_info, - base_image)) + test.fail( + f"backing chain check for image {image.image_filename} failed, backing" + f" file from info is {base_image_from_info}, which should be {base_image}." + ) images = params.get("image_chain", "").split() if len(images) < 3: test.cancel("Snapshot chain must at least contains three images") - params["image_name_%s" % images[0]] = params["image_name"] - params["image_format_%s" % images[0]] = params["image_format"] + params[f"image_name_{images[0]}"] = params["image_name"] + params[f"image_format_{images[0]}"] = params["image_format"] root_dir = data_dir.get_data_dir() images = prepare_images_from_params(images, params) base, active_layer = images[0], images[-1] @@ -49,19 +52,22 @@ def verify_backing_chain(info): hashes = {} for image in images: if image is not base: - test.log.debug("Create snapshot %s based on %s", - image.image_filename, image.base_image_filename) + test.log.debug( + "Create snapshot %s based on %s", + image.image_filename, + image.base_image_filename, + ) image.create(image.params) vm = img_utils.boot_vm_with_images(test, params, env, (image.tag,)) guest_file = params["guest_tmp_filename"] % image.tag - test.log.debug("Create tmp file %s in image %s", guest_file, - image.image_filename) + test.log.debug( + "Create tmp file %s in image %s", guest_file, image.image_filename + ) img_utils.save_random_file_to_vm(vm, guest_file, 2048 * 100, sync_bin) session = vm.wait_for_login() test.log.debug("Get md5 value fo the temporary file") - hashes[guest_file] = img_utils.check_md5sum(guest_file, - md5sum_bin, session) + hashes[guest_file] = img_utils.check_md5sum(guest_file, md5sum_bin, session) session.close() vm.destroy() @@ -78,20 +84,23 @@ def verify_backing_chain(info): test.log.debug("Verify the snapshot chain after commit") info = json.loads(active_layer.info(output="json")) active_layer_size_after = info[0]["actual-size"] - test.log.debug("%s file size before commit: %s, after commit: %s", - active_layer.image_filename, active_layer_size_before, - active_layer_size_after) + test.log.debug( + "%s file size before commit: %s, after commit: %s", + active_layer.image_filename, + active_layer_size_before, + active_layer_size_after, + ) if active_layer_size_after < active_layer_size_before: - test.fail("image %s is emptied after commit with explicit base" % - active_layer.image_filename) + test.fail( + f"image {active_layer.image_filename} is emptied after commit with explicit base" + ) verify_backing_chain(info) test.log.debug("Verify hashes of temporary files") vm = img_utils.boot_vm_with_images(test, params, env, (base.tag,)) session = vm.wait_for_login() for tmpfile, hashval in hashes.items(): - img_utils.check_md5sum(tmpfile, md5sum_bin, session, - md5_value_to_check=hashval) + img_utils.check_md5sum(tmpfile, md5sum_bin, session, md5_value_to_check=hashval) for image in images: if image is not base: diff --git a/qemu/tests/commit_with_bitmaps_nospace.py b/qemu/tests/commit_with_bitmaps_nospace.py index 3f119c2986..cbd90ec606 100644 --- a/qemu/tests/commit_with_bitmaps_nospace.py +++ b/qemu/tests/commit_with_bitmaps_nospace.py @@ -1,9 +1,8 @@ from avocado.utils import process - from virttest.data_dir import get_data_dir from virttest.lvm import EmulatedLVM -from virttest.qemu_storage import QemuImg from virttest.qemu_io import QemuIOSystem +from virttest.qemu_storage import QemuImg def run(test, params, env): @@ -36,8 +35,7 @@ def _qemu_io(img, cmd): try: QemuIOSystem(test, params, img.image_filename).cmd_output(cmd, 120) except process.CmdError as err: - test.fail( - "qemu-io to '%s' failed: %s." % (img.image_filename, str(err))) + test.fail(f"qemu-io to '{img.image_filename}' failed: {str(err)}.") def _clean_images(img_list): """Remove images from image_list.""" diff --git a/qemu/tests/convert_after_resize_snapshot.py b/qemu/tests/convert_after_resize_snapshot.py index e5dda02e70..3de6929887 100644 --- a/qemu/tests/convert_after_resize_snapshot.py +++ b/qemu/tests/convert_after_resize_snapshot.py @@ -1,12 +1,10 @@ import json from avocado.utils import process -from virttest import data_dir -from virttest import utils_numeric +from virttest import data_dir, utils_numeric from virttest.qemu_storage import QemuImg -from qemu.tests.qemu_disk_img import QemuImgTest -from qemu.tests.qemu_disk_img import generate_base_snapshot_pair +from qemu.tests.qemu_disk_img import QemuImgTest, generate_base_snapshot_pair def run(test, params, env): @@ -17,16 +15,25 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _compare_images(img1, img2): """Compare two qemu images are identical or not.""" test.log.info("Compare two images are identical.") - cmd = [img1.image_cmd, "compare", "-f", img1.image_format, - "-F", img2.image_format, - img1.image_filename, img2.image_filename] + cmd = [ + img1.image_cmd, + "compare", + "-f", + img1.image_format, + "-F", + img2.image_format, + img1.image_filename, + img2.image_filename, + ] output = process.system_output(" ".join(cmd)).decode() if "Images are identical" not in output: - test.fail("%s and %s are not identical." % - (img1.image_filename, img2.image_filename)) + test.fail( + f"{img1.image_filename} and {img2.image_filename} are not identical." + ) def _create_external_snapshot(tag): """Create an external snapshot based on tag.""" @@ -40,8 +47,7 @@ def _verify_backing_file(output, backing_tag): return backing_param = params.object_params(backing_tag) backing = QemuImg(backing_param, img_root_dir, backing_tag) - if backing.image_filename not in json.loads( - output)["backing-filename"]: + if backing.image_filename not in json.loads(output)["backing-filename"]: test.fail("Backing file is not correct.") def _qemu_img_info(tag, backing_tag=None): @@ -55,16 +61,18 @@ def _qemu_img_info(tag, backing_tag=None): def _verify_resize(img): """Verify the image size is as expected after resize.""" img_size = json.loads(img.info(output="json"))["virtual-size"] - sign = (-1 if '-' in params["sn1_size_change"] else 1) - expected_size = (int(utils_numeric.normalize_data_size( - params["image_size"], "B")) + - int(utils_numeric.normalize_data_size( - params["sn1_size_change"], "B"))) * sign - test.log.info("Verify the size of %s is %s.", - img.image_filename, expected_size) + sign = -1 if "-" in params["sn1_size_change"] else 1 + expected_size = ( + int(utils_numeric.normalize_data_size(params["image_size"], "B")) + + int(utils_numeric.normalize_data_size(params["sn1_size_change"], "B")) + ) * sign + test.log.info( + "Verify the size of %s is %s.", img.image_filename, expected_size + ) if img_size != expected_size: - test.fail("Got image virtual size: %s, should be: %s." % - (img_size, expected_size)) + test.fail( + f"Got image virtual size: {img_size}, should be: {expected_size}." + ) gen = generate_base_snapshot_pair(params["image_chain"]) img_root_dir = data_dir.get_data_dir() diff --git a/qemu/tests/convert_image_from_raw.py b/qemu/tests/convert_image_from_raw.py index 1f14d104d7..2397997e2f 100644 --- a/qemu/tests/convert_image_from_raw.py +++ b/qemu/tests/convert_image_from_raw.py @@ -23,6 +23,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ + def _get_img_obj_and_params(tag): """Get an QemuImg object and its params based on the tag.""" img_param = params.object_params(tag) @@ -32,29 +33,39 @@ def _get_img_obj_and_params(tag): def _compare_images(img1, img2, strict=False): """Compare two qemu images are identical or not.""" test.log.info("Compare two images, strict mode: %s.", strict) - cmd = [img1.image_cmd, "compare", "-f", img1.image_format, - "-F", img2.image_format, - img1.image_filename, img2.image_filename] + cmd = [ + img1.image_cmd, + "compare", + "-f", + img1.image_format, + "-F", + img2.image_format, + img1.image_filename, + img2.image_filename, + ] if strict: cmd.insert(2, "-s") res = process.run(" ".join(cmd), ignore_status=True) if strict: - if (res.exit_status != 1 and - "block status mismatch" not in res.stdout_text): + if res.exit_status != 1 and "block status mismatch" not in res.stdout_text: test.fail("qemu-img compare strict mode error.") else: if res.exit_status != 0: - test.fail("qemu-img compare error: %s." % res.stderr_text) + test.fail(f"qemu-img compare error: {res.stderr_text}.") if "Images are identical" not in res.stdout_text: - test.fail("%s and %s are not identical." % ( - img1.image_filename, img2.image_filename)) + test.fail( + f"{img1.image_filename} and {img2.image_filename} are not identical." + ) file = params["guest_file_name"] initial_tag = params["images"].split()[0] c_tag = params["convert_target"] - test.log.info("Boot a guest up from initial image: %s, and create a" - " file %s on the disk.", initial_tag, file) + test.log.info( + "Boot a guest up from initial image: %s, and create a" " file %s on the disk.", + initial_tag, + file, + ) base_qit = QemuImgTest(test, params, env, initial_tag) base_qit.start_vm() md5 = base_qit.save_file(file) @@ -63,8 +74,12 @@ def _compare_images(img1, img2, strict=False): cache_mode = params.get("cache_mode") if cache_mode: - test.log.info("Convert initial image %s to %s with cache mode %s.", - initial_tag, c_tag, cache_mode) + test.log.info( + "Convert initial image %s to %s with cache mode %s.", + initial_tag, + c_tag, + cache_mode, + ) else: test.log.info("Convert initial image %s to %s", initial_tag, c_tag) img, img_param = _get_img_obj_and_params(initial_tag) @@ -79,8 +94,9 @@ def _compare_images(img1, img2, strict=False): c_qit = QemuImgTest(test, params, env, c_tag) c_qit.start_vm() if not c_qit.check_file(file, md5): - test.fail("The file %s's md5 on initial image and" - " target file are different." % file) + test.fail( + f"The file {file}'s md5 on initial image and" " target file are different." + ) c_qit.destroy_vm() test.log.info("Check image %s.", c_tag) diff --git a/qemu/tests/convert_to_virtual_disk.py b/qemu/tests/convert_to_virtual_disk.py index 7134610060..91bb7bc20a 100755 --- a/qemu/tests/convert_to_virtual_disk.py +++ b/qemu/tests/convert_to_virtual_disk.py @@ -1,6 +1,5 @@ from avocado import fail_on from avocado.utils import process - from virttest import data_dir from virttest.qemu_storage import QemuImg @@ -15,13 +14,14 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def setup_loop_dev(image_path): """ Setup a loop device with a file image. :param image_path: The path to the image used to setup loop device :return: The loop device under /dev """ - cmd_result = process.run("losetup -f %s --show" % image_path) + cmd_result = process.run(f"losetup -f {image_path} --show") return cmd_result.stdout_text.strip() def free_loop_dev(loop_dev): @@ -29,7 +29,7 @@ def free_loop_dev(loop_dev): Free a loop device. :param loop_dev: The loop device will be free """ - process.run("losetup -d %s" % loop_dev) + process.run(f"losetup -d {loop_dev}") src_img = params["images"].split()[0] mid_img = params["images"].split()[-1] @@ -46,11 +46,15 @@ def free_loop_dev(loop_dev): target = setup_loop_dev(mid_filename) params["image_name_target"] = target - test.log.debug("Convert from %s to %s with cache mode none.", - source.image_filename, mid_filename) + test.log.debug( + "Convert from %s to %s with cache mode none.", + source.image_filename, + mid_filename, + ) try: fail_on((process.CmdError,))(source.convert)( - params.object_params(src_img), root_dir, cache_mode="none") + params.object_params(src_img), root_dir, cache_mode="none" + ) finally: test.log.info("Clean the loop device.") free_loop_dev(target) diff --git a/qemu/tests/cpu_add.py b/qemu/tests/cpu_add.py index 2a7fb953a3..aab19801e3 100644 --- a/qemu/tests/cpu_add.py +++ b/qemu/tests/cpu_add.py @@ -1,9 +1,7 @@ import re import time -from virttest import error_context -from virttest import utils_test -from virttest import utils_misc +from virttest import error_context, utils_misc, utils_test @error_context.context_aware @@ -28,6 +26,7 @@ def run(test, params, env): :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ + def get_clock_offset(session, ntp_query_cmd): """ Get guest clock offset between ntp service; @@ -44,18 +43,22 @@ def qemu_guest_cpu_match(vm, vcpu_been_pluged=0, wait_time=300): Check Whether the vcpus are matche """ total_cpus_expected = int(vm.cpuinfo.smp) + int(vcpu_been_pluged) - if utils_misc.wait_for(lambda: ((total_cpus_expected == - vm.get_cpu_count()) and - (vm.get_cpu_count() == - len(vm.vcpu_threads))), - wait_time, first=10, step=5.0): + if utils_misc.wait_for( + lambda: ( + (total_cpus_expected == vm.get_cpu_count()) + and (vm.get_cpu_count() == len(vm.vcpu_threads)) + ), + wait_time, + first=10, + step=5.0, + ): test.log.info("Cpu number in cmd_line, qemu and guest are match") return True err_msg = "Cpu mismatch! " - err_msg += "after hotplug %s vcpus, " % vcpu_been_pluged - err_msg += "there shoule be %s vcpus exist, " % total_cpus_expected - err_msg += "in qemu %s vcpus threads works, " % len(vm.vcpu_threads) - err_msg += "in guest %s cpus works." % vm.get_cpu_count() + err_msg += f"after hotplug {vcpu_been_pluged} vcpus, " + err_msg += f"there shoule be {total_cpus_expected} vcpus exist, " + err_msg += f"in qemu {len(vm.vcpu_threads)} vcpus threads works, " + err_msg += f"in guest {vm.get_cpu_count()} cpus works." test.fail(err_msg) def cpu_online_offline(session, cpu_id, online=""): @@ -66,12 +69,13 @@ def cpu_online_offline(session, cpu_id, online=""): online = 1 else: online = 0 - online_file = "/sys/devices/system/cpu/cpu%s/online" % cpu_id - if session.cmd_status("test -f %s" % online_file): - test.log.info("online file %s not exist, just pass the cpu%s", - online_file, cpu_id) + online_file = f"/sys/devices/system/cpu/cpu{cpu_id}/online" + if session.cmd_status(f"test -f {online_file}"): + test.log.info( + "online file %s not exist, just pass the cpu%s", online_file, cpu_id + ) return - session.cmd("echo %s > %s " % (online, online_file)) + session.cmd(f"echo {online} > {online_file} ") def onoff_para_opt(onoff_params): """ @@ -82,8 +86,7 @@ def onoff_para_opt(onoff_params): offline = onoff_params.split(",") for item in offline: if "-" in item: - onoff_list += range(int(item.split("-")[0]), - int(item.split("-")[1])) + onoff_list += range(int(item.split("-")[0]), int(item.split("-")[1])) else: onoff_list.append(item) return [str(i) for i in onoff_list] @@ -95,8 +98,7 @@ def onoff_para_opt(onoff_params): ntp_query_cmd = params.get("ntp_query_cmd", "") ntp_sync_cmd = params.get("ntp_sync_cmd", "") - error_context.context("Boot the vm, with '-smp X,maxcpus=Y' option", - test.log.info) + error_context.context("Boot the vm, with '-smp X,maxcpus=Y' option", test.log.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) @@ -108,23 +110,23 @@ def onoff_para_opt(onoff_params): error_context.context("sync guest time via ntp server", test.log.info) session.cmd(ntp_sync_cmd) - error_context.context("Check if cpus in guest match qemu " - "cmd before hotplug", test.log.info) + error_context.context( + "Check if cpus in guest match qemu " "cmd before hotplug", test.log.info + ) qemu_guest_cpu_match(vm) # do pre_operation like stop, before vcpu Hotplug stop_before_hotplug = params.get("stop_before_hotplug", "no") - if stop_before_hotplug == 'yes': - error_context.context("Stop the guest before hotplug vcpu", - test.log.info) + if stop_before_hotplug == "yes": + error_context.context("Stop the guest before hotplug vcpu", test.log.info) vm.pause() error_context.context("Do cpu hotplug", test.log.info) - if vm.monitor.protocol == 'human': + if vm.monitor.protocol == "human": human_check_info = params.get("human_error_recheck", None) qmp_check_info = None hotplug_add_cmd = "" - elif vm.monitor.protocol == 'qmp': + elif vm.monitor.protocol == "qmp": qmp_check_info = params.get("qmp_error_recheck", None) hotplug_add_cmd = params.get("vcpu_add_cmd", "") if hotplug_add_cmd: @@ -136,7 +138,7 @@ def onoff_para_opt(onoff_params): vcpu_been_pluged = 0 for i in range(vcpu_need_hotplug): - hotplug_vcpu_params = params.object_params("hotplug_vcpu%s" % i) + hotplug_vcpu_params = params.object_params(f"hotplug_vcpu{i}") plug_cpu_id = len(vm.vcpu_threads) plug_cpu_id = hotplug_vcpu_params.get("cpuid", plug_cpu_id) @@ -146,8 +148,7 @@ def onoff_para_opt(onoff_params): if not qmp_check_info and not human_check_info: vcpu_been_pluged += 1 test.log.info("Cpu%s hotplug successfully", plug_cpu_id) - test.log.info("Now '%s' cpus have been hotpluged", - vcpu_been_pluged) + test.log.info("Now '%s' cpus have been hotpluged", vcpu_been_pluged) continue else: err_msg = "Qemu should report error, but hotplug successfully" @@ -161,23 +162,21 @@ def onoff_para_opt(onoff_params): test.log.info(msg, plug_cpu_id) test.log.debug("QMP error info: '%s'", output) continue - elif (human_check_info and - re.findall(human_check_info, output, re.I)): + elif human_check_info and re.findall(human_check_info, output, re.I): msg = "Hotplug vcpu(id:'%s') error, qemu report the error" test.log.info(msg, plug_cpu_id) test.log.debug("Error info: '%s'", output) continue else: err_msg = "Hotplug error! " - err_msg += "the hotplug cpu_id is: '%s', " % plug_cpu_id - err_msg += "the maxcpus allowed is: '%s', " % maxcpus - err_msg += "qemu cpu list is:'%s'" % vm.monitor.info("cpus") + err_msg += f"the hotplug cpu_id is: '{plug_cpu_id}', " + err_msg += f"the maxcpus allowed is: '{maxcpus}', " + err_msg += "qemu cpu list is:'{}'".format(vm.monitor.info("cpus")) test.log.debug("The error info is:\n '%s'", output) test.fail(err_msg) if stop_before_hotplug == "yes": - error_context.context("Resume the guest after cpu hotplug", - test.log.info) + error_context.context("Resume the guest after cpu hotplug", test.log.info) vm.resume() if params.get("reboot_after_hotplug", False): @@ -185,13 +184,14 @@ def onoff_para_opt(onoff_params): vm.reboot() if vcpu_been_pluged != 0: - error_context.context("Check whether cpus are match after hotplug", - test.log.info) + error_context.context( + "Check whether cpus are match after hotplug", test.log.info + ) qemu_guest_cpu_match(vm, vcpu_been_pluged) error_context.context("Do cpu online/offline in guest", test.log.info) # Window guest doesn't support online/offline test - if params['os_type'] == "windows": + if params["os_type"] == "windows": test.log.info("For windows guest not do online/offline test") return @@ -214,32 +214,37 @@ def onoff_para_opt(onoff_params): test.log.info("sleep %s seconds", onoff_iterations) time.sleep(onoff_iterations) if ntp_query_cmd: - error_context.context("Check guest clock after online cpu", - test.log.info) + error_context.context( + "Check guest clock after online cpu", test.log.info + ) current_offset = get_clock_offset(session, ntp_query_cmd) if current_offset > acceptable_offset: - test.fail("time drift(%ss)" % current_offset + - "after online cpu(%s)" % offline_cpu) + test.fail( + f"time drift({current_offset}s)" + + f"after online cpu({offline_cpu})" + ) for online_cpu in online_list: cpu_online_offline(session, online_cpu, "online") test.log.info("sleep %s seconds", onoff_iterations) time.sleep(onoff_iterations) if ntp_query_cmd: - error_context.context("Check guest clock after offline cpu", - test.log.info) + error_context.context( + "Check guest clock after offline cpu", test.log.info + ) current_offset = get_clock_offset(session, ntp_query_cmd) if current_offset > acceptable_offset: - test.fail("time drift(%s)" % current_offset + - "after offline cpu(%s)" % online_cpu) + test.fail( + f"time drift({current_offset})" + + f"after offline cpu({online_cpu})" + ) # do sub test after cpu hotplug - if (params.get("run_sub_test", "no") == "yes" and - 'sub_test_name' in params): - sub_test = params['sub_test_name'] - error_context.context("Run subtest %s after cpu hotplug" % sub_test, - test.log.info) - if (sub_test == "guest_suspend" and - params["guest_suspend_type"] == "disk"): + if params.get("run_sub_test", "no") == "yes" and "sub_test_name" in params: + sub_test = params["sub_test_name"] + error_context.context( + f"Run subtest {sub_test} after cpu hotplug", test.log.info + ) + if sub_test == "guest_suspend" and params["guest_suspend_type"] == "disk": vm.params["smp"] = int(vm.cpuinfo.smp) + vcpu_been_pluged vcpu_been_pluged = 0 utils_test.run_virt_sub_test(test, params, env, sub_type=sub_test) @@ -250,8 +255,7 @@ def onoff_para_opt(onoff_params): session = vm.wait_for_login(timeout=timeout) if params.get("vcpu_num_rechek", "yes") == "yes": - error_context.context("Recheck cpu numbers after operation", - test.log.info) + error_context.context("Recheck cpu numbers after operation", test.log.info) qemu_guest_cpu_match(vm, vcpu_been_pluged) if session: diff --git a/qemu/tests/cpu_device_hotplug.py b/qemu/tests/cpu_device_hotplug.py index 4dc79727c6..98d67487c4 100644 --- a/qemu/tests/cpu_device_hotplug.py +++ b/qemu/tests/cpu_device_hotplug.py @@ -1,9 +1,7 @@ import re import time -from virttest import error_context -from virttest import utils_misc -from virttest import cpu +from virttest import cpu, error_context, utils_misc @error_context.context_aware @@ -13,16 +11,16 @@ def run(test, params, env): """ def hotplug(vm, current_cpus, total_cpus, vcpu_threads): - for cpu in range(current_cpus, total_cpus): - error_context.context("hot-pluging vCPU %s" % cpu, test.log.info) - vm.hotplug_vcpu(cpu_id=cpu, plug_command=hotplug_cmd) + for vcpu in range(current_cpus, total_cpus): + error_context.context(f"hot-pluging vCPU {vcpu}", test.log.info) + vm.hotplug_vcpu(cpu_id=vcpu, plug_command=hotplug_cmd) time.sleep(0.1) time.sleep(5) def hotunplug(vm, current_cpus, total_cpus, vcpu_threads): - for cpu in range(current_cpus, total_cpus): - error_context.context("hot-unpluging vCPU %s" % cpu, test.log.info) - vm.hotplug_vcpu(cpu_id=cpu, plug_command=unplug_cmd, unplug="yes") + for vcpu in range(current_cpus, total_cpus): + error_context.context(f"hot-unpluging vCPU {vcpu}", test.log.info) + vm.hotplug_vcpu(cpu_id=vcpu, plug_command=unplug_cmd, unplug="yes") time.sleep(0.1) # Need more time to unplug, so sleeping more than hotplug. time.sleep(10) @@ -34,38 +32,49 @@ def verify(vm, total_cpus): cpu_regexp = re.compile(r"CPU #(\d+)") total_cpus_monitor = len(cpu_regexp.findall(output)) if total_cpus_monitor != total_cpus: - test.fail("Monitor reports %s CPUs, when VM should have" - " %s" % (total_cpus_monitor, total_cpus)) - error_context.context("hotplugging finished, let's wait a few sec and" - " check CPUs quantity in guest.", test.log.info) - if not utils_misc.wait_for(lambda: cpu.check_if_vm_vcpu_match( - total_cpus, vm), - 60 + total_cpus, first=10, - step=5.0, text="retry later"): + test.fail( + f"Monitor reports {total_cpus_monitor} CPUs, when VM should have" + f" {total_cpus}" + ) + error_context.context( + "hotplugging finished, let's wait a few sec and" + " check CPUs quantity in guest.", + test.log.info, + ) + if not utils_misc.wait_for( + lambda: cpu.check_if_vm_vcpu_match(total_cpus, vm), + 60 + total_cpus, + first=10, + step=5.0, + text="retry later", + ): test.fail("CPU quantity mismatch cmd after hotplug !") - error_context.context("rebooting the vm and check CPU quantity !", - test.log.info) - session = vm.reboot() + error_context.context( + "rebooting the vm and check CPU quantity !", test.log.info + ) + vm.reboot() if not cpu.check_if_vm_vcpu_match(total_cpus, vm): test.fail("CPU quantity mismatch cmd after hotplug and reboot !") - error_context.context("boot the vm, with '-smp X,maxcpus=Y' option," - "thus allow hotplug vcpu", test.log.info) + error_context.context( + "boot the vm, with '-smp X,maxcpus=Y' option," "thus allow hotplug vcpu", + test.log.info, + ) vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) - session = vm.wait_for_login(timeout=timeout) + vm.wait_for_login(timeout=timeout) n_cpus_add = int(params.get("n_cpus_add", 1)) n_cpus_remove = int(params.get("n_cpus_remove", 1)) maxcpus = int(params.get("maxcpus", 240)) current_cpus = int(params.get("smp", 2)) - onoff_iterations = int(params.get("onoff_iterations", 20)) + int(params.get("onoff_iterations", 20)) hotplug_cmd = params.get("cpu_hotplug_cmd", "") unplug_cmd = params.get("cpu_hotunplug_cmd", "") - vcpu_cores = int(params.get("vcpu_cores", 1)) + int(params.get("vcpu_cores", 1)) vcpu_threads = int(params.get("vcpu_threads", 1)) cpu_model = params.get("cpu_model", "host") unplug = params.get("unplug", "no") @@ -77,14 +86,15 @@ def verify(vm, total_cpus): hotplug_cmd = hotplug_cmd.replace("CPU_MODEL", cpu_model) if (n_cpus_add * vcpu_threads) + current_cpus > maxcpus: - test.log.warn("CPU quantity more than maxcpus, set it to %s", maxcpus) + test.log.warning("CPU quantity more than maxcpus, set it to %s", maxcpus) total_cpus = maxcpus else: total_cpus = current_cpus + (n_cpus_add * vcpu_threads) test.log.info("current_cpus=%s, total_cpus=%s", current_cpus, total_cpus) - error_context.context("check if CPUs in guest matches qemu cmd " - "before hot-plug", test.log.info) + error_context.context( + "check if CPUs in guest matches qemu cmd " "before hot-plug", test.log.info + ) if not cpu.check_if_vm_vcpu_match(current_cpus, vm): test.error("CPU quantity mismatch cmd before hotplug !") hotplug(vm, current_cpus, total_cpus, vcpu_threads) diff --git a/qemu/tests/cpu_device_hotplug_during_boot.py b/qemu/tests/cpu_device_hotplug_during_boot.py index 993f99f0e7..ec721c4952 100644 --- a/qemu/tests/cpu_device_hotplug_during_boot.py +++ b/qemu/tests/cpu_device_hotplug_during_boot.py @@ -21,21 +21,26 @@ def run(test, params, env): """ vcpu_devices = params.objects("vcpu_devices") unplug_during_boot = params.get_boolean("unplug_during_boot") - boot_patterns = [r".*Started udev Wait for Complete Device Initialization.*", - r".*Finished .*Wait for udev To Complete Device Initialization.*"] - reboot_patterns = [r".*[Rr]ebooting.*", r".*[Rr]estarting system.*", - r".*[Mm]achine restart.*"] + boot_patterns = [ + r".*Started udev Wait for Complete Device Initialization.*", + r".*Finished .*Wait for udev To Complete Device Initialization.*", + ] + reboot_patterns = [ + r".*[Rr]ebooting.*", + r".*[Rr]estarting system.*", + r".*[Mm]achine restart.*", + ] vm = env.get_vm(params["main_vm"]) vm.verify_alive() - error_context.base_context("Hotplug vCPU devices during boot stage.", - test.log.info) + error_context.base_context("Hotplug vCPU devices during boot stage.", test.log.info) error_context.context("Verify guest is in the boot stage.", test.log.info) vm.serial_console.read_until_any_line_matches(boot_patterns) - error_context.context("Hotplug vCPU devices, waiting for guest alive.", - test.log.info) + error_context.context( + "Hotplug vCPU devices, waiting for guest alive.", test.log.info + ) for vcpu_device in vcpu_devices: vm.hotplug_vcpu_device(vcpu_device) vm.wait_for_login().close() @@ -48,23 +53,29 @@ def run(test, params, env): # 1) vm.reboot() will return a new session, which is not what we want. # 2) Send reboot command directly because it will close the ssh client # so we can not get the command status. - error_context.base_context("Reboot guest to boot stage, hotunplug the " - "vCPU device.", test.log.info) + error_context.base_context( + "Reboot guest to boot stage, hotunplug the " "vCPU device.", test.log.info + ) vm.wait_for_login().sendline(params["reboot_command"]) - error_context.context("Verify guest is in boot stage after reboot.", - test.log.info) + error_context.context( + "Verify guest is in boot stage after reboot.", test.log.info + ) vm.serial_console.read_until_any_line_matches(reboot_patterns) vm.serial_console.read_until_any_line_matches(boot_patterns) - error_context.context("Hotunplug vCPU devices, waiting for guest " - "alive.", test.log.info) + error_context.context( + "Hotunplug vCPU devices, waiting for guest " "alive.", test.log.info + ) for vcpu_device in reversed(vcpu_devices): vm.hotunplug_vcpu_device(vcpu_device) vm.wait_for_login().close() - error_context.context("Check number of CPU inside guest after unplug.", - test.log.info) + error_context.context( + "Check number of CPU inside guest after unplug.", test.log.info + ) if not cpu_utils.check_if_vm_vcpus_match_qemu(vm): - test.fail("Actual number of guest CPUs is not equal to expected " - "after hotunplug.") + test.fail( + "Actual number of guest CPUs is not equal to expected " + "after hotunplug." + ) diff --git a/qemu/tests/cpu_device_hotplug_maximum.py b/qemu/tests/cpu_device_hotplug_maximum.py index e8b52ad0aa..255ac54ffb 100644 --- a/qemu/tests/cpu_device_hotplug_maximum.py +++ b/qemu/tests/cpu_device_hotplug_maximum.py @@ -2,14 +2,10 @@ from os import uname from avocado.utils import cpu - -from virttest import error_context -from virttest import utils_misc -from virttest import utils_qemu +from virttest import error_context, utils_misc, utils_qemu from virttest.utils_version import VersionInterval -from provider import cpu_utils -from provider import win_wora +from provider import cpu_utils, win_wora @error_context.context_aware @@ -36,32 +32,32 @@ def run(test, params, env): verify_wait_timeout = params.get_numeric("verify_wait_timeout", 300) qemu_binary = utils_misc.get_qemu_binary(params) machine_info = utils_qemu.get_machines_info(qemu_binary)[machine_type] - machine_info = re.search(r'\(alias of (\S+)\)', machine_info) + machine_info = re.search(r"\(alias of (\S+)\)", machine_info) current_machine = machine_info.group(1) if machine_info else machine_type - supported_maxcpus = (params.get_numeric("vcpu_maxcpus") or - utils_qemu.get_maxcpus_hard_limit(qemu_binary, - current_machine)) + supported_maxcpus = params.get_numeric( + "vcpu_maxcpus" + ) or utils_qemu.get_maxcpus_hard_limit(qemu_binary, current_machine) if not params.get_boolean("allow_pcpu_overcommit"): supported_maxcpus = min(supported_maxcpus, cpu.online_count()) test.log.info("Define the CPU topology of guest") vcpu_devices = [] - if (cpu.get_vendor() == "amd" and - params.get_numeric("vcpu_threads") != 1): + if cpu.get_vendor() == "amd" and params.get_numeric("vcpu_threads") != 1: test.cancel("AMD cpu does not support multi threads") elif machine_type.startswith("pseries"): host_kernel_ver = uname()[2].split("-")[0] if params.get_numeric("vcpu_threads") == 8: supported_maxcpus -= divmod(supported_maxcpus, 8)[1] - vcpu_devices = ["vcpu%d" % c for c in - range(1, supported_maxcpus // 8)] + vcpu_devices = ["vcpu%d" % c for c in range(1, supported_maxcpus // 8)] # The maximum value of vcpu_id in 'linux-3.x' is 2048, so # (vcpu_id * ms->smp.threads / spapr->vsmt) <= 256, need to adjust it - elif (supported_maxcpus > 256 and - host_kernel_ver not in VersionInterval("[4, )")): + elif supported_maxcpus > 256 and host_kernel_ver not in VersionInterval( + "[4, )" + ): supported_maxcpus = 256 - vcpu_devices = vcpu_devices or ["vcpu%d" % vcpu for vcpu in - range(1, supported_maxcpus)] + vcpu_devices = vcpu_devices or [ + "vcpu%d" % vcpu for vcpu in range(1, supported_maxcpus) + ] params["vcpu_maxcpus"] = str(supported_maxcpus) params["vcpu_devices"] = " ".join(vcpu_devices) params["start_vm"] = "yes" @@ -77,11 +73,12 @@ def run(test, params, env): if params.get_boolean("workaround_need"): win_wora.modify_driver(params, session) - error_context.context("Check the number of guest CPUs after startup", - test.log.info) + error_context.context("Check the number of guest CPUs after startup", test.log.info) if not cpu_utils.check_if_vm_vcpus_match_qemu(vm): - test.error("The number of guest CPUs is not equal to the qemu command " - "line configuration") + test.error( + "The number of guest CPUs is not equal to the qemu command " + "line configuration" + ) error_context.context("Hotplug all vCPU devices", test.log.info) for vcpu_device in vcpu_devices: @@ -89,20 +86,23 @@ def run(test, params, env): error_context.context("Check Number of vCPU in guest", test.log.info) if not utils_misc.wait_for( - lambda: cpu_utils.check_if_vm_vcpus_match_qemu(vm), - verify_wait_timeout, first=5, step=10): + lambda: cpu_utils.check_if_vm_vcpus_match_qemu(vm), + verify_wait_timeout, + first=5, + step=10, + ): test.fail(mismatch_text) if params.get_boolean("check_cpu_topology", True): error_context.context("Check CPU topology of guest", test.log.info) - if not cpu_utils.check_if_vm_vcpu_topology_match(session, os_type, - cpuinfo, test, - vm.devices): + if not cpu_utils.check_if_vm_vcpu_topology_match( + session, os_type, cpuinfo, test, vm.devices + ): test.fail("CPU topology of guest is not as expected.") session = vm.reboot(session, timeout=reboot_timeout) - if not cpu_utils.check_if_vm_vcpu_topology_match(session, os_type, - cpuinfo, test, - vm.devices): + if not cpu_utils.check_if_vm_vcpu_topology_match( + session, os_type, cpuinfo, test, vm.devices + ): test.fail("CPU topology of guest is not as expected after reboot.") if os_type == "linux": @@ -111,13 +111,16 @@ def run(test, params, env): hotplugged_vcpu = range(smp, supported_maxcpus) vcpu_list = "%d-%d" % (hotplugged_vcpu[0], hotplugged_vcpu[-1]) test.log.info("Offline vCPU: %s.", vcpu_list) - session.cmd("chcpu -d %s" % vcpu_list, timeout=len(hotplugged_vcpu)) + session.cmd(f"chcpu -d {vcpu_list}", timeout=len(hotplugged_vcpu)) if vm.get_cpu_count() != smp: test.error("Failed to offline all hotplugged vCPU.") for vcpu_device in reversed(vcpu_devices): vm.hotunplug_vcpu_device(vcpu_device, 10 * vcpus_count) if not utils_misc.wait_for( - lambda: cpu_utils.check_if_vm_vcpus_match_qemu(vm), - verify_wait_timeout, first=5, step=10): + lambda: cpu_utils.check_if_vm_vcpus_match_qemu(vm), + verify_wait_timeout, + first=5, + step=10, + ): test.fail(mismatch_text) session.close() diff --git a/qemu/tests/cpu_device_hotplug_time_jump.py b/qemu/tests/cpu_device_hotplug_time_jump.py index 8b16134ef8..4705b17426 100644 --- a/qemu/tests/cpu_device_hotplug_time_jump.py +++ b/qemu/tests/cpu_device_hotplug_time_jump.py @@ -1,5 +1,5 @@ -import time import re +import time from virttest import error_context @@ -24,7 +24,7 @@ def run(test, params, env): session = vm.wait_for_login() wait_time = params.get_numeric("wait_time") - error_context.context("Let guest run %s" % wait_time, test.log.info) + error_context.context(f"Let guest run {wait_time}", test.log.info) time.sleep(wait_time) error_context.context("Hotplug vCPU devices", test.log.info) @@ -39,10 +39,14 @@ def run(test, params, env): error_context.context("Check if guest has time jump", test.log.info) output = session.cmd_output("dmesg") session.close() - time1 = float(re.findall(r"^\[\s*(\d+\.?\d+)\].*CPU.*has been hot-added$", - output, re.M)[0]) - time2 = float(re.findall(r"^\[\s*(\d+\.?\d+)\].*Will online and init " - "hotplugged CPU", output, re.M)[0]) + time1 = float( + re.findall(r"^\[\s*(\d+\.?\d+)\].*CPU.*has been hot-added$", output, re.M)[0] + ) + time2 = float( + re.findall( + r"^\[\s*(\d+\.?\d+)\].*Will online and init " "hotplugged CPU", output, re.M + )[0] + ) time_gap = time2 - time1 test.log.info("The time gap is %.6fs", time_gap) expected_gap = params.get_numeric("expected_gap", target_type=float) diff --git a/qemu/tests/cpu_device_hotpluggable.py b/qemu/tests/cpu_device_hotpluggable.py index 86bcb81153..56e7403749 100644 --- a/qemu/tests/cpu_device_hotpluggable.py +++ b/qemu/tests/cpu_device_hotpluggable.py @@ -1,11 +1,8 @@ from aexpect import ShellCmdError - -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc from virttest.virt_vm import VMDeviceCheckError -from provider import cpu_utils -from provider import win_wora +from provider import cpu_utils, win_wora @error_context.context_aware @@ -24,65 +21,77 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def check_guest_cpu_count(): if not utils_misc.wait_for( - lambda: cpu_utils.check_if_vm_vcpus_match_qemu(vm), - verify_wait_timeout, first=sleep_after_change): + lambda: cpu_utils.check_if_vm_vcpus_match_qemu(vm), + verify_wait_timeout, + first=sleep_after_change, + ): test.fail("Actual number of guest CPUs is not equal to expected") def sub_hotunplug(): - error_context.context("Hotunplug vcpu devices after vcpu %s" - % hotpluggable_test, test.log.info) + error_context.context( + f"Hotunplug vcpu devices after vcpu {hotpluggable_test}", test.log.info + ) for plugged_dev in pluggable_vcpu_dev[::-1]: try: vm.hotunplug_vcpu_device(plugged_dev) except VMDeviceCheckError: if not vm.is_paused(): raise - test.log.warning("%s can not be unplugged directly because " - "guest is paused, will check again after " - "resume", plugged_dev) - vm.params["vcpu_enable_%s" % plugged_dev] = "no" + test.log.warning( + "%s can not be unplugged directly because " + "guest is paused, will check again after " + "resume", + plugged_dev, + ) + vm.params[f"vcpu_enable_{plugged_dev}"] = "no" def sub_reboot(): - error_context.context("Reboot guest after vcpu %s" - % hotpluggable_test, test.log.info) - vm.reboot(session=session, method=params["reboot_method"], - timeout=login_timeout) + error_context.context( + f"Reboot guest after vcpu {hotpluggable_test}", test.log.info + ) + vm.reboot( + session=session, method=params["reboot_method"], timeout=login_timeout + ) def sub_shutdown(): - error_context.context("Shutdown guest after vcpu %s" - % hotpluggable_test, test.log.info) + error_context.context( + f"Shutdown guest after vcpu {hotpluggable_test}", test.log.info + ) shutdown_method = params["shutdown_method"] if shutdown_method == "shell": session.sendline(params["shutdown_command"]) - error_context.context("waiting VM to go down (guest shell cmd)", - test.log.info) + error_context.context( + "waiting VM to go down (guest shell cmd)", test.log.info + ) elif shutdown_method == "system_powerdown": vm.monitor.system_powerdown() - error_context.context("waiting VM to go down (qemu monitor cmd)", - test.log.info) + error_context.context( + "waiting VM to go down (qemu monitor cmd)", test.log.info + ) if not vm.wait_for_shutdown(360): - test.fail("Guest refuses to go down after vcpu %s" - % hotpluggable_test) + test.fail(f"Guest refuses to go down after vcpu {hotpluggable_test}") def sub_migrate(): - sub_migrate_reboot = sub_reboot - sub_migrate_hotunplug = sub_hotunplug - error_context.context("Migrate guest after vcpu %s" - % hotpluggable_test, test.log.info) + error_context.context( + f"Migrate guest after vcpu {hotpluggable_test}", test.log.info + ) vm.migrate() vm.verify_alive() sub_test_after_migrate = params.objects("sub_test_after_migrate") while sub_test_after_migrate: check_guest_cpu_count() sub_test = sub_test_after_migrate.pop(0) - error_context.context("%s after migration completed" % sub_test) - eval("sub_migrate_%s" % sub_test)() + error_context.context(f"{sub_test} after migration completed") + eval(f"sub_migrate_{sub_test}")() def sub_online_offline(): - error_context.context("Offline then online guest CPUs after vcpu %s" - % hotpluggable_test, test.log.info) + error_context.context( + f"Offline then online guest CPUs after vcpu {hotpluggable_test}", + test.log.info, + ) cpu_ids = list(current_guest_cpu_ids - guest_cpu_ids) cpu_ids.sort() cmd = "echo %d > /sys/devices/system/cpu/cpu%d/online" @@ -90,8 +99,7 @@ def sub_online_offline(): for cpu_id in cpu_ids[::-1]: session.cmd(cmd % (0, cpu_id)) if not cpu_utils.check_if_vm_vcpu_match(cpu_count_before_test, vm): - test.fail( - "Actual number of guest CPUs is not equal to expected") + test.fail("Actual number of guest CPUs is not equal to expected") for cpu_id in cpu_ids: session.cmd(cmd % (1, cpu_id)) except ShellCmdError as err: @@ -99,8 +107,9 @@ def sub_online_offline(): test.error("Failed to change the CPU state on guest.") def sub_pause_resume(): - error_context.context("Pause guest to hotunplug all vcpu devices", - test.log.info) + error_context.context( + "Pause guest to hotunplug all vcpu devices", test.log.info + ) vm.pause() sub_hotunplug() error_context.context("Resume guest after hotunplug") @@ -117,11 +126,13 @@ def sub_pause_resume(): vm = env.get_vm(params["main_vm"]) maxcpus = vm.cpuinfo.maxcpus if not params.objects("vcpu_devices"): - vcpus_count = (vm.cpuinfo.threads if - params["machine_type"].startswith("pseries") else 1) + vcpus_count = ( + vm.cpuinfo.threads if params["machine_type"].startswith("pseries") else 1 + ) pluggable_cpus = vm.cpuinfo.maxcpus // vcpus_count // 2 - params["vcpu_devices"] = " ".join(["vcpu%d" % (count + 1) for count in - range(pluggable_cpus)]) + params["vcpu_devices"] = " ".join( + ["vcpu%d" % (count + 1) for count in range(pluggable_cpus)] + ) vm.destroy() if len(params.objects("vcpu_devices")) < 2: test.cancel("Insufficient maxcpus for multi-CPU hotplug") @@ -136,11 +147,12 @@ def sub_pause_resume(): cpu_count_before_test = vm.get_cpu_count() guest_cpu_ids = cpu_utils.get_guest_cpu_ids(session, os_type) - error_context.context("Check the number of guest CPUs after startup", - test.log.info) + error_context.context("Check the number of guest CPUs after startup", test.log.info) if not cpu_utils.check_if_vm_vcpus_match_qemu(vm): - test.error("The number of guest CPUs is not equal to the qemu command " - "line configuration") + test.error( + "The number of guest CPUs is not equal to the qemu command " + "line configuration" + ) if hotpluggable_test == "hotplug": pluggable_vcpu_dev = vcpu_devices @@ -151,24 +163,21 @@ def sub_pause_resume(): win_wora.modify_driver(params, session) if params.get("pause_vm_before_hotplug", "no") == "yes": - error_context.context("Pause guest before %s" % hotpluggable_test, - test.log.info) + error_context.context(f"Pause guest before {hotpluggable_test}", test.log.info) vm.pause() - error_context.context("%s all vcpu devices" % hotpluggable_test, - test.log.info) + error_context.context(f"{hotpluggable_test} all vcpu devices", test.log.info) for vcpu_dev in pluggable_vcpu_dev: - getattr(vm, "%s_vcpu_device" % hotpluggable_test)(vcpu_dev) + getattr(vm, f"{hotpluggable_test}_vcpu_device")(vcpu_dev) if vm.is_paused(): - error_context.context("Resume guest after %s" % hotpluggable_test, - test.log.info) + error_context.context(f"Resume guest after {hotpluggable_test}", test.log.info) vm.resume() check_guest_cpu_count() current_guest_cpu_ids = cpu_utils.get_guest_cpu_ids(session, os_type) if sub_test_type: - eval("sub_%s" % sub_test_type)() + eval(f"sub_{sub_test_type}")() # Close old session since guest maybe dead/reboot if session: session.close() @@ -177,10 +186,8 @@ def sub_pause_resume(): session = vm.wait_for_login(timeout=login_timeout) check_guest_cpu_count() if vm.get_cpu_count() == maxcpus and check_cpu_topology: - if not cpu_utils.check_if_vm_vcpu_topology_match(session, os_type, - vm.cpuinfo, - test, - vm.devices): + if not cpu_utils.check_if_vm_vcpu_topology_match( + session, os_type, vm.cpuinfo, test, vm.devices + ): session.close() - test.fail("CPU topology of guest is inconsistent with " - "expectations.") + test.fail("CPU topology of guest is inconsistent with " "expectations.") diff --git a/qemu/tests/cpu_device_hotpluggable_with_numa.py b/qemu/tests/cpu_device_hotpluggable_with_numa.py index fd32328c42..25c4e8c225 100644 --- a/qemu/tests/cpu_device_hotpluggable_with_numa.py +++ b/qemu/tests/cpu_device_hotpluggable_with_numa.py @@ -1,11 +1,8 @@ import re -from virttest import utils_misc -from virttest import error_context -from virttest import utils_package +from virttest import error_context, utils_misc, utils_package -from provider import cpu_utils -from provider import win_wora +from provider import cpu_utils, win_wora @error_context.context_aware @@ -35,7 +32,7 @@ def assign_numa_cpus(nodes, count): last = 0.0 while last < maxcpus: - numa_cpus_list.append(cpus[int(last):int(last + avg_count)]) + numa_cpus_list.append(cpus[int(last) : int(last + avg_count)]) last += avg_count return dict(zip(nodes, numa_cpus_list)) @@ -45,8 +42,7 @@ def get_guest_numa_cpus_info(): if os_type == "windows": return numa_out = session.cmd_output("numactl -H | grep cpus") - numa_cpus_info = re.findall(r"^node (\d+) cpus:([\d| ]*)$", - numa_out, re.M) + numa_cpus_info = re.findall(r"^node (\d+) cpus:([\d| ]*)$", numa_out, re.M) return dict(map(lambda x: (x[0], x[1].split()), numa_cpus_info)) os_type = params["os_type"] @@ -56,26 +52,26 @@ def get_guest_numa_cpus_info(): maxcpus = vm.cpuinfo.maxcpus alignment = vm.cpuinfo.threads if machine.startswith("pseries") else 1 if not params.objects("vcpu_devices"): - vcpus_count = (vm.cpuinfo.threads if machine.startswith("pseries") else 1) + vcpus_count = vm.cpuinfo.threads if machine.startswith("pseries") else 1 pluggable_cpus = vm.cpuinfo.maxcpus // vcpus_count // 2 - params["vcpu_devices"] = " ".join(["vcpu%d" % (count + 1) for count in - range(pluggable_cpus)]) + params["vcpu_devices"] = " ".join( + ["vcpu%d" % (count + 1) for count in range(pluggable_cpus)] + ) vm.destroy() if len(params.objects("vcpu_devices")) < 2: test.cancel("Insufficient maxcpus for multi-CPU hotplug") params["paused_after_start_vm"] = "no" - error_context.base_context("Define the cpu list for each numa node", - test.log.info) + error_context.base_context("Define the cpu list for each numa node", test.log.info) numa_nodes = params.objects("guest_numa_nodes") - node_ids = [params["numa_nodeid_%s" % node] for node in numa_nodes] + node_ids = [params[f"numa_nodeid_{node}"] for node in numa_nodes] node_cpus_mapping = assign_numa_cpus(node_ids, alignment) for node in numa_nodes: - params["numa_cpus_%s" % node] = ",".join( - node_cpus_mapping[params["numa_nodeid_%s" % node]]) + params[f"numa_cpus_{node}"] = ",".join( + node_cpus_mapping[params[f"numa_nodeid_{node}"]] + ) - error_context.context("Launch the guest with our assigned numa node", - test.log.info) + error_context.context("Launch the guest with our assigned numa node", test.log.info) vcpu_devices = params.objects("vcpu_devices") vm.create(params=params) if vm.is_paused(): @@ -85,47 +81,49 @@ def get_guest_numa_cpus_info(): if params.get_boolean("workaround_need"): win_wora.modify_driver(params, session) - error_context.context("Check the number of guest CPUs after startup", - test.log.info) + error_context.context("Check the number of guest CPUs after startup", test.log.info) if not cpu_utils.check_if_vm_vcpus_match_qemu(vm): - test.error("The number of guest CPUs is not equal to the qemu command " - "line configuration") + test.error( + "The number of guest CPUs is not equal to the qemu command " + "line configuration" + ) - if os_type == "linux" and not utils_package.package_install("numactl", - session): + if os_type == "linux" and not utils_package.package_install("numactl", session): test.cancel("Please install numactl to proceed") numa_before_plug = get_guest_numa_cpus_info() for vcpu_dev in vcpu_devices: - error_context.context("hotplug vcpu device: %s" % vcpu_dev, - test.log.info) + error_context.context(f"hotplug vcpu device: {vcpu_dev}", test.log.info) vm.hotplug_vcpu_device(vcpu_dev) - if not utils_misc.wait_for( - lambda: cpu_utils.check_if_vm_vcpus_match_qemu(vm), 10): + if not utils_misc.wait_for(lambda: cpu_utils.check_if_vm_vcpus_match_qemu(vm), 10): test.fail("Actual number of guest CPUs is not equal to expected") if os_type == "linux": - error_context.context("Check the CPU information of each numa node", - test.log.info) + error_context.context( + "Check the CPU information of each numa node", test.log.info + ) guest_numa_cpus = get_guest_numa_cpus_info() for node_id, node_cpus in node_cpus_mapping.items(): try: if guest_numa_cpus[node_id] != node_cpus: - test.log.debug("Current guest numa info:\n%s", - session.cmd_output("numactl -H")) - test.fail("The cpu obtained by guest is inconsistent with " - "we assigned.") + test.log.debug( + "Current guest numa info:\n%s", session.cmd_output("numactl -H") + ) + test.fail( + "The cpu obtained by guest is inconsistent with " "we assigned." + ) except KeyError: - test.error("Could not find node %s in guest." % node_id) + test.error(f"Could not find node {node_id} in guest.") test.log.info("Number of each CPU in guest matches what we assign.") for vcpu_dev in vcpu_devices[::-1]: - error_context.context("hotunplug vcpu device: %s" % vcpu_dev, - test.log.info) + error_context.context(f"hotunplug vcpu device: {vcpu_dev}", test.log.info) vm.hotunplug_vcpu_device(vcpu_dev) if not utils_misc.wait_for( - lambda: cpu_utils.check_if_vm_vcpus_match_qemu(vm), 10): + lambda: cpu_utils.check_if_vm_vcpus_match_qemu(vm), 10 + ): test.fail("Actual number of guest CPUs is not equal to expected") if get_guest_numa_cpus_info() != numa_before_plug: - test.log.debug("Current guest numa info:\n%s", - session.cmd_output("numactl -H")) + test.log.debug( + "Current guest numa info:\n%s", session.cmd_output("numactl -H") + ) test.fail("Numa info of guest is incorrect after vcpu hotunplug.") diff --git a/qemu/tests/cpu_device_hotpluggable_with_stress.py b/qemu/tests/cpu_device_hotpluggable_with_stress.py index 4aa2b982e1..6ec77e1e65 100644 --- a/qemu/tests/cpu_device_hotpluggable_with_stress.py +++ b/qemu/tests/cpu_device_hotpluggable_with_stress.py @@ -1,16 +1,12 @@ +import random import re import time -import random -from provider import cpu_utils -from provider import win_wora - -from virttest import arch -from virttest import error_context -from virttest import utils_misc -from virttest import utils_package +from virttest import arch, error_context, utils_misc, utils_package from virttest.utils_test import BackgroundTest +from provider import cpu_utils, win_wora + @error_context.context_aware def run(test, params, env): @@ -31,8 +27,10 @@ def run(test, params, env): def heavyload_install(): if session.cmd_status(test_installed_cmd) != 0: # pylint: disable=E0606 - test.log.warning("Could not find installed heavyload in guest, will" - " install it via winutils.iso ") + test.log.warning( + "Could not find installed heavyload in guest, will" + " install it via winutils.iso " + ) winutil_drive = utils_misc.get_winutils_vol(session) if not winutil_drive: test.cancel("WIN_UTILS CDROM not found.") @@ -40,15 +38,16 @@ def heavyload_install(): session.cmd(install_cmd) os_type = params["os_type"] - vm_arch_name = params.get('vm_arch_name', arch.ARCH) + vm_arch_name = params.get("vm_arch_name", arch.ARCH) login_timeout = params.get_numeric("login_timeout", 360) stress_duration = params.get_numeric("stress_duration", 180) verify_wait_timeout = params.get_numeric("verify_wait_timeout", 60) vm = env.get_vm(params["main_vm"]) if not params.objects("vcpu_devices"): - vcpus_count = (vm.cpuinfo.threads if - params["machine_type"].startswith("pseries") else 1) + vcpus_count = ( + vm.cpuinfo.threads if params["machine_type"].startswith("pseries") else 1 + ) pluggable_cpus = vm.cpuinfo.maxcpus // vcpus_count // 2 vcpu_devices = ["vcpu%d" % (count + 1) for count in range(pluggable_cpus)] params["vcpu_devices"] = " ".join(vcpu_devices) @@ -67,79 +66,88 @@ def heavyload_install(): if params.get_boolean("workaround_need"): win_wora.modify_driver(params, session) - error_context.context("Check the number of guest CPUs after startup", - test.log.info) + error_context.context("Check the number of guest CPUs after startup", test.log.info) if not cpu_utils.check_if_vm_vcpus_match_qemu(vm): - test.error("The number of guest CPUs is not equal to the qemu command " - "line configuration") + test.error( + "The number of guest CPUs is not equal to the qemu command " + "line configuration" + ) guest_cpu_ids = cpu_utils.get_guest_cpu_ids(session, os_type) for vcpu_dev in vcpu_devices: - error_context.context("Hotplug vcpu device: %s" % vcpu_dev, - test.log.info) + error_context.context(f"Hotplug vcpu device: {vcpu_dev}", test.log.info) vm.hotplug_vcpu_device(vcpu_dev) if not utils_misc.wait_for( - lambda: cpu_utils.check_if_vm_vcpus_match_qemu(vm), - verify_wait_timeout): + lambda: cpu_utils.check_if_vm_vcpus_match_qemu(vm), verify_wait_timeout + ): test.fail("Actual number of guest CPUs is not equal to expected") if os_type == "linux": stress_args = params["stress_args"] - stress_tool = cpu_utils.VMStressBinding(vm, params, - stress_args=stress_args) + stress_tool = cpu_utils.VMStressBinding(vm, params, stress_args=stress_args) current_guest_cpu_ids = cpu_utils.get_guest_cpu_ids(session, os_type) plugged_cpu_ids = list(current_guest_cpu_ids - guest_cpu_ids) plugged_cpu_ids.sort() for cpu_id in plugged_cpu_ids: - error_context.context("Run stress on vCPU(%d) inside guest." - % cpu_id, test.log.info) + error_context.context( + "Run stress on vCPU(%d) inside guest." % cpu_id, test.log.info + ) stress_tool.load_stress_tool(cpu_id) - error_context.context("Successfully launched stress sessions, execute " - "stress test for %d seconds" % stress_duration, - test.log.info) + error_context.context( + "Successfully launched stress sessions, execute " + "stress test for %d seconds" % stress_duration, + test.log.info, + ) time.sleep(stress_duration) if utils_package.package_install("sysstat", session): error_context.context("Check usage of guest CPUs", test.log.info) - mpstat_cmd = "mpstat 1 5 -P %s | cat" % ",".join( - map(str, plugged_cpu_ids)) + mpstat_cmd = "mpstat 1 5 -P {} | cat".format( + ",".join(map(str, plugged_cpu_ids)) + ) mpstat_out = session.cmd_output(mpstat_cmd) - cpu_stat = dict(re.findall(r"Average:\s+(\d+)\s+(\d+\.\d+)", - mpstat_out, re.M)) + cpu_stat = dict( + re.findall(r"Average:\s+(\d+)\s+(\d+\.\d+)", mpstat_out, re.M) + ) for cpu_id in plugged_cpu_ids: cpu_usage_rate = float(cpu_stat[str(cpu_id)]) if cpu_usage_rate < 50: - test.error("Stress test on vCPU(%s) failed, usage rate: " - "%.2f%%" % (cpu_id, cpu_usage_rate)) - test.log.info("Usage rate of vCPU(%s) is: %.2f%%", cpu_id, - cpu_usage_rate) + test.error( + f"Stress test on vCPU({cpu_id}) failed, usage rate: " + f"{cpu_usage_rate:.2f}%" + ) + test.log.info( + "Usage rate of vCPU(%s) is: %.2f%%", cpu_id, cpu_usage_rate + ) if not vm_arch_name.startswith("s390"): for vcpu_dev in vcpu_devices: - error_context.context("Hotunplug vcpu device: %s" % vcpu_dev, - test.log.info) + error_context.context( + f"Hotunplug vcpu device: {vcpu_dev}", test.log.info + ) vm.hotunplug_vcpu_device(vcpu_dev) # Drift the running stress task to other vCPUs time.sleep(random.randint(5, 10)) if not cpu_utils.check_if_vm_vcpus_match_qemu(vm): - test.fail("Actual number of guest CPUs is not equal to " - "expected") + test.fail("Actual number of guest CPUs is not equal to " "expected") stress_tool.unload_stress() stress_tool.clean() else: install_path = params["install_path"] - test_installed_cmd = 'dir "%s" | findstr /I heavyload' % install_path + test_installed_cmd = f'dir "{install_path}" | findstr /I heavyload' heavyload_install() error_context.context("Run heavyload inside guest.", test.log.info) - heavyload_bin = r'"%s\heavyload.exe" ' % install_path - heavyload_options = ["/CPU %d" % vm.get_cpu_count(), - "/DURATION %d" % (stress_duration // 60), - "/AUTOEXIT", - "/START"] + heavyload_bin = rf'"{install_path}\heavyload.exe" ' + heavyload_options = [ + "/CPU %d" % vm.get_cpu_count(), + "/DURATION %d" % (stress_duration // 60), + "/AUTOEXIT", + "/START", + ] start_cmd = heavyload_bin + " ".join(heavyload_options) - stress_tool = BackgroundTest(session.cmd, (start_cmd, stress_duration, - stress_duration)) + stress_tool = BackgroundTest( + session.cmd, (start_cmd, stress_duration, stress_duration) + ) stress_tool.start() - if not utils_misc.wait_for(stress_tool.is_alive, verify_wait_timeout, - first=5): + if not utils_misc.wait_for(stress_tool.is_alive, verify_wait_timeout, first=5): test.error("Failed to start heavyload process.") stress_tool.join(stress_duration) diff --git a/qemu/tests/cpu_hotplug.py b/qemu/tests/cpu_hotplug.py index 9a4eab3a9f..eb98d7afec 100644 --- a/qemu/tests/cpu_hotplug.py +++ b/qemu/tests/cpu_hotplug.py @@ -1,10 +1,7 @@ import os import re -from virttest import error_context -from virttest import utils_test -from virttest import utils_misc -from virttest import cpu +from virttest import cpu, error_context, utils_misc, utils_test @error_context.context_aware @@ -27,8 +24,10 @@ def run(test, params, env): :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ - error_context.context("boot the vm, with '-smp X,maxcpus=Y' option," - "thus allow hotplug vcpu", test.log.info) + error_context.context( + "boot the vm, with '-smp X,maxcpus=Y' option," "thus allow hotplug vcpu", + test.log.info, + ) vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -42,18 +41,19 @@ def run(test, params, env): cpu_hotplug_cmd = params.get("cpu_hotplug_cmd", "") if n_cpus_add + current_cpus > maxcpus: - test.log.warn("CPU quantity more than maxcpus, set it to %s", maxcpus) + test.log.warning("CPU quantity more than maxcpus, set it to %s", maxcpus) total_cpus = maxcpus else: total_cpus = current_cpus + n_cpus_add - error_context.context("check if CPUs in guest matches qemu cmd " - "before hot-plug", test.log.info) + error_context.context( + "check if CPUs in guest matches qemu cmd " "before hot-plug", test.log.info + ) if not cpu.check_if_vm_vcpu_match(current_cpus, vm): test.error("CPU quantity mismatch cmd before hotplug !") for cpuid in range(current_cpus, total_cpus): - error_context.context("hot-pluging vCPU %s" % cpuid, test.log.info) + error_context.context(f"hot-pluging vCPU {cpuid}", test.log.info) vm.hotplug_vcpu(cpu_id=cpuid, plug_command=cpu_hotplug_cmd) output = vm.monitor.send_args_cmd("info cpus") @@ -62,28 +62,35 @@ def run(test, params, env): cpu_regexp = re.compile(r"CPU #(\d+)") total_cpus_monitor = len(cpu_regexp.findall(output)) if total_cpus_monitor != total_cpus: - test.fail("Monitor reports %s CPUs, when VM should have" - " %s" % (total_cpus_monitor, total_cpus)) + test.fail( + f"Monitor reports {total_cpus_monitor} CPUs, when VM should have" + f" {total_cpus}" + ) # Windows is a little bit lazy that needs more secs to recognize. - error_context.context("hotplugging finished, let's wait a few sec and" - " check CPUs quantity in guest.", test.log.info) - if not utils_misc.wait_for(lambda: cpu.check_if_vm_vcpu_match( - total_cpus, vm), - 60 + total_cpus, first=10, - step=5.0, text="retry later"): + error_context.context( + "hotplugging finished, let's wait a few sec and" + " check CPUs quantity in guest.", + test.log.info, + ) + if not utils_misc.wait_for( + lambda: cpu.check_if_vm_vcpu_match(total_cpus, vm), + 60 + total_cpus, + first=10, + step=5.0, + text="retry later", + ): test.fail("CPU quantity mismatch cmd after hotplug !") - error_context.context("rebooting the vm and check CPU quantity !", - test.log.info) + error_context.context("rebooting the vm and check CPU quantity !", test.log.info) session = vm.reboot() if not cpu.check_if_vm_vcpu_match(total_cpus, vm): test.fail("CPU quantity mismatch cmd after hotplug and reboot !") # Window guest doesn't support online/offline test - if params['os_type'] == "windows": + if params["os_type"] == "windows": return error_context.context("locating online files for guest's new CPUs") - r_cmd = 'find /sys/devices/system/cpu/cpu*/online -maxdepth 0 -type f' + r_cmd = "find /sys/devices/system/cpu/cpu*/online -maxdepth 0 -type f" online_files = session.cmd(r_cmd) # Sometimes the return value include command line itself if "find" in online_files: @@ -95,21 +102,19 @@ def run(test, params, env): if not online_files: test.fail("Could not find CPUs that can be enabled/disabled on guest") - control_path = os.path.join(test.virtdir, "control", - "cpu_hotplug.control") + control_path = os.path.join(test.virtdir, "control", "cpu_hotplug.control") timeout = int(params.get("cpu_hotplug_timeout", 300)) error_context.context("running cpu_hotplug autotest after cpu addition") - utils_test.run_autotest(vm, session, control_path, timeout, - test.outputdir, params) + utils_test.run_autotest(vm, session, control_path, timeout, test.outputdir, params) # Last, but not least, let's offline/online the CPUs in the guest # several times irq = 15 irq_mask = "f0" for i in range(onoff_iterations): - session.cmd("echo %s > /proc/irq/%s/smp_affinity" % (irq_mask, irq)) + session.cmd(f"echo {irq_mask} > /proc/irq/{irq}/smp_affinity") for online_file in online_files: - session.cmd("echo 0 > %s" % online_file) + session.cmd(f"echo 0 > {online_file}") for online_file in online_files: - session.cmd("echo 1 > %s" % online_file) + session.cmd(f"echo 1 > {online_file}") diff --git a/qemu/tests/cpu_info_check.py b/qemu/tests/cpu_info_check.py index 5a47150375..1b4cd4985e 100644 --- a/qemu/tests/cpu_info_check.py +++ b/qemu/tests/cpu_info_check.py @@ -1,12 +1,7 @@ import re from avocado.utils import process - -from virttest import cpu -from virttest import env_process -from virttest import error_context -from virttest import utils_misc - +from virttest import cpu, env_process, error_context, utils_misc from virttest.utils_version import VersionInterval @@ -30,8 +25,9 @@ def remove_models(model_list): try: cpu_types.remove(model) except ValueError: - test.log.warning('The model to be removed is not' - ' in the list: %s', model) + test.log.warning( + "The model to be removed is not" " in the list: %s", model + ) continue def get_patterns(p_list): @@ -40,8 +36,7 @@ def get_patterns(p_list): :param p_list: The list of flags """ r_list = [] - replace_char = [('_', ''), ('_', '-'), ('.', '-'), - ('.', ''), ('.', '_')] + replace_char = [("_", ""), ("_", "-"), (".", "-"), (".", ""), (".", "_")] for p in p_list: r_list.extend(list(map(lambda x: p.replace(*x), replace_char))) return set(r_list) @@ -51,89 +46,100 @@ def get_patterns(p_list): qemu_path = utils_misc.get_qemu_binary(params) qemu_version = env_process._get_qemu_version(qemu_path) - match = re.search(r'[0-9]+\.[0-9]+\.[0-9]+(\-[0-9]+)?', - qemu_version) + match = re.search(r"[0-9]+\.[0-9]+\.[0-9]+(\-[0-9]+)?", qemu_version) host_qemu = match.group(0) - remove_list_deprecated = params.get('remove_list_deprecated', '') - if host_qemu in VersionInterval('[7.0.0-8, )') and remove_list_deprecated: - params['remove_list'] = remove_list_deprecated - remove_models(params.objects('remove_list')) - if host_qemu in VersionInterval('[,4.2.0)'): - remove_models(params.objects('cpu_model_8')) - if host_qemu in VersionInterval('[,3.1.0)'): - remove_models(params.objects('cpu_model_3_1_0')) - if host_qemu in VersionInterval('[,2.12.0)'): - remove_models(params.objects('cpu_model_2_12_0')) + remove_list_deprecated = params.get("remove_list_deprecated", "") + if host_qemu in VersionInterval("[7.0.0-8, )") and remove_list_deprecated: + params["remove_list"] = remove_list_deprecated + remove_models(params.objects("remove_list")) + if host_qemu in VersionInterval("[,4.2.0)"): + remove_models(params.objects("cpu_model_8")) + if host_qemu in VersionInterval("[,3.1.0)"): + remove_models(params.objects("cpu_model_3_1_0")) + if host_qemu in VersionInterval("[,2.12.0)"): + remove_models(params.objects("cpu_model_2_12_0")) qemu_binary = utils_misc.get_qemu_binary(params) - test.log.info('Query cpu models by qemu command') - query_cmd = "%s -cpu ? | awk '{print $2}'" % qemu_binary - qemu_binary_output = process.system_output( - query_cmd, shell=True).decode().splitlines() - cpuid_index = qemu_binary_output.index('CPUID') - cpu_models_binary = qemu_binary_output[1: cpuid_index - 1] - cpu_flags_binary = qemu_binary_output[cpuid_index + 1:] - params['start_vm'] = 'yes' - vm_name = params['main_vm'] + test.log.info("Query cpu models by qemu command") + query_cmd = f"{qemu_binary} -cpu ? | awk '{{print $2}}'" + qemu_binary_output = ( + process.system_output(query_cmd, shell=True).decode().splitlines() + ) + cpuid_index = qemu_binary_output.index("CPUID") + cpu_models_binary = qemu_binary_output[1 : cpuid_index - 1] + cpu_flags_binary = qemu_binary_output[cpuid_index + 1 :] + params["start_vm"] = "yes" + vm_name = params["main_vm"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) # query cpu model supported by qemu - test.log.info('Query cpu model supported by qemu by qemu monitor') - qmp_model_output = str(vm.monitor.cmd('qom-list-types')) - qmp_def_output = str(vm.monitor.cmd('query-cpu-definitions')) + test.log.info("Query cpu model supported by qemu by qemu monitor") + qmp_model_output = str(vm.monitor.cmd("qom-list-types")) + qmp_def_output = str(vm.monitor.cmd("query-cpu-definitions")) # Check if all the output contain expected cpu models - output_list = {'qemu-kvm': cpu_models_binary, - 'qom-list-types': qmp_model_output, - 'query-cpu-definitions': qmp_def_output} + output_list = { + "qemu-kvm": cpu_models_binary, + "qom-list-types": qmp_model_output, + "query-cpu-definitions": qmp_def_output, + } missing = dict.fromkeys(output_list.keys(), []) for cpu_model in cpu_types: - test.log.info('Check cpu model %s from qemu command output and' - ' qemu monitor output', cpu_model) + test.log.info( + "Check cpu model %s from qemu command output and" " qemu monitor output", + cpu_model, + ) for key, value in output_list.items(): if cpu_model not in value: missing[key].append(cpu_model) for key, value in missing.items(): if value: - test.fail('%s is missing in the %s output: %s\n' % - (', '.join(value), key, output_list[key])) + test.fail( + "{} is missing in the {} output: {}\n".format( + ", ".join(value), key, output_list[key] + ) + ) # Check if qemu command output matches qmp output missing = [] - test.log.info('Check if qemu command output matches qemu monitor output') + test.log.info("Check if qemu command output matches qemu monitor output") for cpu_model in cpu_models_binary: if cpu_model not in qmp_model_output: missing.append(cpu_model) if missing: - test.fail('The qemu monitor output does not included all the cpu' - ' model in qemu command output, missing: \n %s' % - ', '.join(missing)) + test.fail( + "The qemu monitor output does not included all the cpu" + " model in qemu command output, missing: \n {}".format(", ".join(missing)) + ) # Check if the flags in qmp output matches expectation - args = {'type': 'full', 'model': {'name': vm.cpuinfo.model}} - output = vm.monitor.cmd('query-cpu-model-expansion', args) - model = output.get('model') - model_name = model.get('name') + args = {"type": "full", "model": {"name": vm.cpuinfo.model}} + output = vm.monitor.cmd("query-cpu-model-expansion", args) + model = output.get("model") + model_name = model.get("name") if model_name != vm.cpuinfo.model: - test.fail('Command query-cpu-model-expansion return' - ' wrong model: %s' % model_name) - model_prop = model.get('props') - for flag in cpu.CPU_TYPES_RE.get(model_name).split(','): - test.log.info('Check flag %s from qemu monitor output', flag) - flags = get_patterns(flag.split('|')) + test.fail( + "Command query-cpu-model-expansion return" f" wrong model: {model_name}" + ) + model_prop = model.get("props") + for flag in cpu.CPU_TYPES_RE.get(model_name).split(","): + test.log.info("Check flag %s from qemu monitor output", flag) + flags = get_patterns(flag.split("|")) for f in flags: if model_prop.get(f) is True: break else: - test.fail('Check cpu model props failed, %s is not True' % flag) + test.fail(f"Check cpu model props failed, {flag} is not True") # Check if the flags in qmp output matches qemu command output missing = [] - test.log.info('Check if the flags in qemu monitor output matches' - ' qemu command output') + test.log.info( + "Check if the flags in qemu monitor output matches" " qemu command output" + ) for flag in cpu_flags_binary: if flag not in str(output): missing.append(flag) if missing: - test.fail('The monitor output does not included all the cpu flags' - ' in qemu command output, missing: \n %s' % - ', '.join(missing)) + test.fail( + "The monitor output does not included all the cpu flags" + " in qemu command output, missing: \n {}".format(", ".join(missing)) + ) diff --git a/qemu/tests/cpu_model_inter_generation.py b/qemu/tests/cpu_model_inter_generation.py index 8b0071c677..a9fdced0a1 100644 --- a/qemu/tests/cpu_model_inter_generation.py +++ b/qemu/tests/cpu_model_inter_generation.py @@ -1,7 +1,4 @@ -from virttest import cpu -from virttest import utils_misc -from virttest import env_process -from virttest import error_context +from virttest import cpu, env_process, error_context, utils_misc @error_context.context_aware @@ -20,26 +17,28 @@ def start_with_model(test_model): :param test_model: The model been tested """ vm = None - params['cpu_model'] = test_model - test.log.info('Start vm with cpu model %s', test_model) + params["cpu_model"] = test_model + test.log.info("Start vm with cpu model %s", test_model) try: env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) output = vm.process.get_output() if warning_text not in output: - test.fail("Qemu should output warning for lack flags" - " while it does not.") + test.fail( + "Qemu should output warning for lack flags" " while it does not." + ) except Exception as e: - if boot_expected == 'no': - test.log.info('Expect vm boot up failed when enforce is set.') + if boot_expected == "no": + test.log.info("Expect vm boot up failed when enforce is set.") if warning_text not in str(e): raise else: raise else: - if boot_expected == 'no': - test.fail('The vm should not boot successfully' - ' when cpu enforce mode is on') + if boot_expected == "no": + test.fail( + "The vm should not boot successfully" " when cpu enforce mode is on" + ) finally: if vm and vm.is_alive(): vm.verify_kernel_crash() @@ -59,10 +58,10 @@ def start_with_model(test_model): host_cpu_model = cpu.get_qemu_best_cpu_model(params) if host_cpu_model.startswith(latest_cpu_model): - test.cancel('The host cpu is not old enough for this test.') + test.cancel("The host cpu is not old enough for this test.") - vm_name = params['main_vm'] - warning_text = params.get('warning_text') - boot_expected = params.get('boot_expected', 'yes') - params['start_vm'] = 'yes' + vm_name = params["main_vm"] + warning_text = params.get("warning_text") + boot_expected = params.get("boot_expected", "yes") + params["start_vm"] = "yes" start_with_model(latest_cpu_model) diff --git a/qemu/tests/cpu_model_negative.py b/qemu/tests/cpu_model_negative.py index 890b7c392e..4226a4f6ab 100644 --- a/qemu/tests/cpu_model_negative.py +++ b/qemu/tests/cpu_model_negative.py @@ -1,11 +1,7 @@ import re from avocado.utils import process - -from virttest import cpu -from virttest import error_context -from virttest import utils_qemu -from virttest import utils_misc +from virttest import cpu, error_context, utils_misc, utils_qemu from virttest.utils_version import VersionInterval @@ -22,46 +18,53 @@ def run(test, params, env): :param env: Dictionary with the test environment. """ - enforce_flag = params.get('enforce_flag') - if enforce_flag and 'CPU_MODEL' in params['wrong_cmd']: + enforce_flag = params.get("enforce_flag") + if enforce_flag and "CPU_MODEL" in params["wrong_cmd"]: if enforce_flag in cpu.get_host_cpu_models(): - test.cancel('This case only test on the host without the flag' - ' %s.' % enforce_flag) + test.cancel( + "This case only test on the host without the flag" f" {enforce_flag}." + ) cpu_model = cpu.get_qemu_best_cpu_model(params) - params['wrong_cmd'] = params['wrong_cmd'].replace('CPU_MODEL', - cpu_model) + params["wrong_cmd"] = params["wrong_cmd"].replace("CPU_MODEL", cpu_model) qemu_bin = utils_misc.get_qemu_binary(params) - if 'OUT_OF_RANGE' in params['wrong_cmd']: - machine_type = params['machine_type'].split(':')[-1] + if "OUT_OF_RANGE" in params["wrong_cmd"]: + machine_type = params["machine_type"].split(":")[-1] m_types = utils_qemu.get_machines_info(qemu_bin)[machine_type] - m_type = re.search(r'\(alias of (\S+)\)', m_types)[1] + m_type = re.search(r"\(alias of (\S+)\)", m_types)[1] max_value = utils_qemu.get_maxcpus_hard_limit(qemu_bin, m_type) smp = str(max_value + 1) - params['wrong_cmd'] = params['wrong_cmd'].replace( - 'MACHINE_TYPE', machine_type).replace('OUT_OF_RANGE', smp) - msg = params['warning_msg'].replace('SMP_VALUE', smp).replace( - 'MAX_VALUE', str(max_value)).replace('MACHINE_TYPE', m_type) - params['warning_msg'] = msg + params["wrong_cmd"] = ( + params["wrong_cmd"] + .replace("MACHINE_TYPE", machine_type) + .replace("OUT_OF_RANGE", smp) + ) + msg = ( + params["warning_msg"] + .replace("SMP_VALUE", smp) + .replace("MAX_VALUE", str(max_value)) + .replace("MACHINE_TYPE", m_type) + ) + params["warning_msg"] = msg - if 'maxcpus' in params['wrong_cmd']: + if "maxcpus" in params["wrong_cmd"]: qemu_version = utils_qemu.get_qemu_version(qemu_bin)[0] - if qemu_version in VersionInterval('[, 6.2.0)'): - params['warning_msg'] = params['old_warning_msg'] + if qemu_version in VersionInterval("[, 6.2.0)"): + params["warning_msg"] = params["old_warning_msg"] else: - params['warning_msg'] = params['new_warning_msg'] - warning_msg = params['warning_msg'] - wrong_cmd = '%s %s' % (qemu_bin, params['wrong_cmd']) - test.log.info('Start qemu with command: %s', wrong_cmd) - ret_cmd = process.run(cmd=wrong_cmd, verbose=False, - ignore_status=True, shell=True) + params["warning_msg"] = params["new_warning_msg"] + warning_msg = params["warning_msg"] + wrong_cmd = "{} {}".format(qemu_bin, params["wrong_cmd"]) + test.log.info("Start qemu with command: %s", wrong_cmd) + ret_cmd = process.run(cmd=wrong_cmd, verbose=False, ignore_status=True, shell=True) output = ret_cmd.stderr_text status = ret_cmd.exit_status - test.log.info('Qemu prompt output:\n%s', output) + test.log.info("Qemu prompt output:\n%s", output) if status == 0: - test.fail('Qemu guest boots up while it should not.') + test.fail("Qemu guest boots up while it should not.") if warning_msg not in output: - test.fail('Does not get expected warning message.') + test.fail("Does not get expected warning message.") else: - test.log.info('Test passed as qemu does not boot up and' - ' prompts expected message.') + test.log.info( + "Test passed as qemu does not boot up and" " prompts expected message." + ) diff --git a/qemu/tests/cpu_offline_online.py b/qemu/tests/cpu_offline_online.py index 1708168e42..782b9dc3bd 100644 --- a/qemu/tests/cpu_offline_online.py +++ b/qemu/tests/cpu_offline_online.py @@ -1,5 +1,4 @@ from avocado.utils import cpu - from virttest import error_context from virttest.cpu import check_if_vm_vcpu_match @@ -14,23 +13,22 @@ def run(test, params, env): """ host_cpu = cpu.online_count() cpu_range = range(host_cpu) - cpu_list = "{}-{}".format(cpu_range[1], cpu_range[-1]) + cpu_list = f"{cpu_range[1]}-{cpu_range[-1]}" params["smp"] = params["vcpu_maxcpus"] = host_cpu params["start_vm"] = "yes" - vm = env.get_vm(params['main_vm']) + vm = env.get_vm(params["main_vm"]) vm.create(params=params) vm.verify_alive() session = vm.wait_for_login() - error_context.base_context("Offline CPUs: {}".format(cpu_list), - test.log.info) - session.cmd("chcpu -d {}".format(cpu_list), timeout=len(cpu_range)) + error_context.base_context(f"Offline CPUs: {cpu_list}", test.log.info) + session.cmd(f"chcpu -d {cpu_list}", timeout=len(cpu_range)) if not check_if_vm_vcpu_match(1, vm): test.fail("CPU quantity on guest mismatch after offline") - test.log.info("{} have been offline.".format(cpu_list)) + test.log.info("%s have been offline.", cpu_list) - error_context.context("Online CPUs: {}".format(cpu_list), test.log.info) - session.cmd("chcpu -e {}".format(cpu_list), timeout=len(cpu_range)) + error_context.context(f"Online CPUs: {cpu_list}", test.log.info) + session.cmd(f"chcpu -e {cpu_list}", timeout=len(cpu_range)) if not check_if_vm_vcpu_match(host_cpu, vm): test.fail("CPU quantity on guest mismatch after online again") - test.log.info("{} have been online.".format(cpu_list)) + test.log.info("%s have been online.", cpu_list) diff --git a/qemu/tests/cpu_rdrand.py b/qemu/tests/cpu_rdrand.py index 1cb6c3bd69..dd9f6437da 100644 --- a/qemu/tests/cpu_rdrand.py +++ b/qemu/tests/cpu_rdrand.py @@ -1,6 +1,4 @@ -from virttest import error_context -from virttest import data_dir -from virttest import utils_misc +from virttest import data_dir, error_context, utils_misc @error_context.context_aware @@ -15,22 +13,22 @@ def run(test, params, env): vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() - test_bin = params['test_bin'] - source_file = params['source_file'] - guest_path = params['guest_path'] - host_path = utils_misc.get_path(data_dir.get_deps_dir('rdrand'), source_file) - vm.copy_files_to(host_path, '%s%s' % (guest_path, source_file)) - if params['os_type'] == 'linux': - build_cmd = params.get('build_cmd', 'cd %s; gcc -lrt %s -o %s') + test_bin = params["test_bin"] + source_file = params["source_file"] + guest_path = params["guest_path"] + host_path = utils_misc.get_path(data_dir.get_deps_dir("rdrand"), source_file) + vm.copy_files_to(host_path, f"{guest_path}{source_file}") + if params["os_type"] == "linux": + build_cmd = params.get("build_cmd", "cd %s; gcc -lrt %s -o %s") error_context.context("build binary file 'rdrand'", test.log.info) session.cmd(build_cmd % (guest_path, source_file, test_bin)) - s, o = session.cmd_status_output('%s%s' % (guest_path, test_bin)) - session.cmd(params['delete_cmd']) + s, o = session.cmd_status_output(f"{guest_path}{test_bin}") + session.cmd(params["delete_cmd"]) if s != 0: - test.fail('rdrand failed with status %s' % s) - if params['os_type'] == 'linux': + test.fail(f"rdrand failed with status {s}") + if params["os_type"] == "linux": try: - if not int(float(o)) in range(-101, 101): - test.fail('rdrand output is %s, which is not expected' % o) + if int(float(o)) not in range(-101, 101): + test.fail(f"rdrand output is {o}, which is not expected") except ValueError as e: - test.fail('rdrand should output a float: %s' % str(e)) + test.fail(f"rdrand should output a float: {str(e)}") diff --git a/qemu/tests/cpu_topology_details_test.py b/qemu/tests/cpu_topology_details_test.py index 727fd8b9c0..e69f6e27d1 100644 --- a/qemu/tests/cpu_topology_details_test.py +++ b/qemu/tests/cpu_topology_details_test.py @@ -13,8 +13,10 @@ def check(session, p_name, exp, check_cmd, test): """ res = session.cmd_output(check_cmd).strip() if int(res) != int(exp): - test.fail('The vcpu %s number inside guest is %s,' - ' while it is set to %s' % (p_name, res, exp)) + test.fail( + f"The vcpu {p_name} number inside guest is {res}," + f" while it is set to {exp}" + ) def check_details(vm_session, vm_params, vm_cpuinfo, test): @@ -24,27 +26,40 @@ def check_details(vm_session, vm_params, vm_cpuinfo, test): several scenarios """ - if vm_params.get('check_sockets_cmd'): - check(vm_session, 'sockets', vm_cpuinfo.sockets, vm_params[ - 'check_sockets_cmd'], test) - if vm_params.get('check_core_id_cmd'): + if vm_params.get("check_sockets_cmd"): + check( + vm_session, + "sockets", + vm_cpuinfo.sockets, + vm_params["check_sockets_cmd"], + test, + ) + if vm_params.get("check_core_id_cmd"): for cpu_id in list(range(vm_cpuinfo.maxcpus)): - check(vm_session, 'core_id', cpu_id, vm_params[ - 'check_core_id_cmd'] % cpu_id, test) - if vm_params.get('check_core_per_socket_cmd'): + check( + vm_session, + "core_id", + cpu_id, + vm_params["check_core_id_cmd"] % cpu_id, + test, + ) + if vm_params.get("check_core_per_socket_cmd"): vm_cores = vm_cpuinfo.cores - socket_list = vm_session.cmd_output(vm_params.get( - 'check_core_per_socket_cmd')).splitlines() + socket_list = vm_session.cmd_output( + vm_params.get("check_core_per_socket_cmd") + ).splitlines() uni_socket = set(socket_list) if len(uni_socket) != vm_cpuinfo.sockets: - test.fail('The number of socket is not expected, expect:%s, ' - 'actual:%s' % (vm_cpuinfo.sockets, len(uni_socket))) + test.fail( + f"The number of socket is not expected, expect:{vm_cpuinfo.sockets}, " + f"actual:{len(uni_socket)}" + ) for value in uni_socket: if socket_list.count(value) != vm_cores: - test.fail('The number of cores per socket is not expected, ' - 'expect:%s, actual:%s' % (vm_cores, - len(socket_list.count( - value)))) + test.fail( + "The number of cores per socket is not expected, " + f"expect:{vm_cores}, actual:{len(socket_list.count(value))}" + ) @error_context.context_aware @@ -61,12 +76,13 @@ def run(test, params, env): :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ - vm_name = params['main_vm'] - os_type = params['os_type'] + vm_name = params["main_vm"] + os_type = params["os_type"] vm = env.get_vm(vm_name) session = vm.wait_for_login() - if not check_if_vm_vcpu_topology_match(session, os_type, vm.cpuinfo, - test, vm.devices): - test.fail('CPU topology of guest is incorrect.') + if not check_if_vm_vcpu_topology_match( + session, os_type, vm.cpuinfo, test, vm.devices + ): + test.fail("CPU topology of guest is incorrect.") check_details(session, params, vm.cpuinfo, test) vm.destroy() diff --git a/qemu/tests/cpu_topology_test.py b/qemu/tests/cpu_topology_test.py index 56c06222bc..7199f3cc6a 100644 --- a/qemu/tests/cpu_topology_test.py +++ b/qemu/tests/cpu_topology_test.py @@ -1,10 +1,8 @@ -import re import random +import re from avocado.utils import cpu - -from virttest import error_context -from virttest import env_process +from virttest import env_process, error_context from provider.cpu_utils import check_if_vm_vcpu_topology_match @@ -31,28 +29,33 @@ def check(p_name, exp, check_cmd): """ res = session.cmd_output(check_cmd).strip() if int(res) != int(exp): - test.fail('The vcpu %s number inside guest is %s,' - ' while it is set to %s' % (p_name, res, exp)) + test.fail( + f"The vcpu {p_name} number inside guest is {res}," + f" while it is set to {exp}" + ) - vm_name = params['main_vm'] - os_type = params['os_type'] + vm_name = params["main_vm"] + os_type = params["os_type"] vcpu_threads_list = [1, 2] - if params['machine_type'] == 'pseries': + if params["machine_type"] == "pseries": vcpu_threads_list = [1, 2, 4, 8] - if 'arm64' in params['machine_type'] or 's390' in params['machine_type']: + if "arm64" in params["machine_type"] or "s390" in params["machine_type"]: vcpu_threads_list = [1] host_cpu = cpu.online_count() - params['vcpu_cores'] = vcpu_cores = random.randint(1, min(6, host_cpu//2)) + params["vcpu_cores"] = vcpu_cores = random.randint(1, min(6, host_cpu // 2)) for vcpu_threads in vcpu_threads_list: - vcpu_sockets = min(max(host_cpu // (vcpu_cores * vcpu_threads), 1), - random.randint(1, 6)) - vcpu_sockets = 2 if (os_type == 'windows' and - vcpu_sockets > 2) else vcpu_sockets - params['vcpu_sockets'] = vcpu_sockets - params['vcpu_threads'] = vcpu_threads - params['smp'] = params['vcpu_maxcpus'] = (vcpu_cores * - vcpu_threads * vcpu_sockets) - params['start_vm'] = 'yes' + vcpu_sockets = min( + max(host_cpu // (vcpu_cores * vcpu_threads), 1), random.randint(1, 6) + ) + vcpu_sockets = ( + 2 if (os_type == "windows" and vcpu_sockets > 2) else vcpu_sockets + ) + params["vcpu_sockets"] = vcpu_sockets + params["vcpu_threads"] = vcpu_threads + params["smp"] = params["vcpu_maxcpus"] = ( + vcpu_cores * vcpu_threads * vcpu_sockets + ) + params["start_vm"] = "yes" try: env_process.preprocess_vm(test, params, env, vm_name) except Exception as e: @@ -60,23 +63,23 @@ def check(p_name, exp, check_cmd): # qemu_vm.VM.make_create_command, and thus cause qemu vm fail to # start, which is expected; Modify the value and restart vm in # this case, and verify cpu topology inside guest after that - if 'qemu-kvm: cpu topology' in str(e): - sockets = int(re.findall(r'sockets\s+\((\d)\)', str(e))[0]) - threads = int(re.findall(r'threads\s+\((\d)\)', str(e))[0]) - cores = int(re.findall(r'cores\s+\((\d)\)', str(e))[0]) - params['smp'] = params['vcpu_maxcpus'] = (sockets * - threads * cores) + if "qemu-kvm: cpu topology" in str(e): + sockets = int(re.findall(r"sockets\s+\((\d)\)", str(e))[0]) + threads = int(re.findall(r"threads\s+\((\d)\)", str(e))[0]) + cores = int(re.findall(r"cores\s+\((\d)\)", str(e))[0]) + params["smp"] = params["vcpu_maxcpus"] = sockets * threads * cores env_process.preprocess_vm(test, params, env, vm_name) else: raise vm = env.get_vm(vm_name) session = vm.wait_for_login() - if not check_if_vm_vcpu_topology_match(session, os_type, vm.cpuinfo, - test, vm.devices): - test.fail('CPU topology of guest is incorrect.') - if params.get('check_siblings_cmd'): - check('sibling', vcpu_threads * vcpu_cores, params['check_siblings_cmd']) - if params.get('check_core_id_cmd'): - for cpu_id in list(range(params['smp'])): - check('core_id', cpu_id, params['check_core_id_cmd'] % cpu_id) + if not check_if_vm_vcpu_topology_match( + session, os_type, vm.cpuinfo, test, vm.devices + ): + test.fail("CPU topology of guest is incorrect.") + if params.get("check_siblings_cmd"): + check("sibling", vcpu_threads * vcpu_cores, params["check_siblings_cmd"]) + if params.get("check_core_id_cmd"): + for cpu_id in list(range(params["smp"])): + check("core_id", cpu_id, params["check_core_id_cmd"] % cpu_id) vm.destroy() diff --git a/qemu/tests/cpuflags.py b/qemu/tests/cpuflags.py index b75a44e3d3..1215ec0c85 100644 --- a/qemu/tests/cpuflags.py +++ b/qemu/tests/cpuflags.py @@ -1,22 +1,15 @@ -import re -import random import os -import time import pickle +import random +import re import sys +import time import traceback from xml.parsers import expat import aexpect - from avocado.utils import process - -from virttest import qemu_vm -from virttest import qemu_migration -from virttest import virt_vm -from virttest import data_dir -from virttest import utils_misc -from virttest import cpu +from virttest import cpu, data_dir, qemu_migration, qemu_vm, utils_misc, virt_vm from virttest.utils_test.qemu import migration @@ -32,8 +25,7 @@ def run(test, params, env): qemu_binary = utils_misc.get_qemu_binary(params) cpuflags_src = os.path.join(data_dir.get_deps_dir("cpu_flags"), "src") - cpuflags_def = os.path.join(data_dir.get_deps_dir("cpu_flags"), - "cpu_map.xml") + cpuflags_def = os.path.join(data_dir.get_deps_dir("cpu_flags"), "cpu_map.xml") smp = int(params.get("smp", 1)) all_host_supported_flags = params.get("all_host_supported_flags", "no") @@ -46,42 +38,42 @@ def run(test, params, env): multi_host_migration = params.get("multi_host_migration", "no") - class HgFlags(object): - + class HgFlags: def __init__(self, cpu_model, extra_flags=set([])): - virtual_flags = set(map(cpu.Flag, - params.get("guest_spec_flags", "").split())) - self.hw_flags = set(map(utils_misc.Flag, - params.get("host_spec_flags", "").split())) + virtual_flags = set( + map(cpu.Flag, params.get("guest_spec_flags", "").split()) + ) + self.hw_flags = set( + map(utils_misc.Flag, params.get("host_spec_flags", "").split()) + ) self.qemu_support_flags = get_all_qemu_flags() - self.host_support_flags = set(map(cpu.Flag, - cpu.get_cpu_flags())) - self.quest_cpu_model_flags = (get_guest_host_cpuflags(cpu_model) - - virtual_flags) - - self.supported_flags = (self.qemu_support_flags & - self.host_support_flags) - self.cpumodel_unsupport_flags = (self.supported_flags - - self.quest_cpu_model_flags) - - self.host_unsupported_flags = (self.quest_cpu_model_flags - - self.host_support_flags) - - self.all_possible_guest_flags = (self.quest_cpu_model_flags - - self.host_unsupported_flags) + self.host_support_flags = set(map(cpu.Flag, cpu.get_cpu_flags())) + self.quest_cpu_model_flags = ( + get_guest_host_cpuflags(cpu_model) - virtual_flags + ) + + self.supported_flags = self.qemu_support_flags & self.host_support_flags + self.cpumodel_unsupport_flags = ( + self.supported_flags - self.quest_cpu_model_flags + ) + + self.host_unsupported_flags = ( + self.quest_cpu_model_flags - self.host_support_flags + ) + + self.all_possible_guest_flags = ( + self.quest_cpu_model_flags - self.host_unsupported_flags + ) self.all_possible_guest_flags |= self.cpumodel_unsupport_flags - self.guest_flags = (self.quest_cpu_model_flags - - self.host_unsupported_flags) + self.guest_flags = self.quest_cpu_model_flags - self.host_unsupported_flags self.guest_flags |= extra_flags self.host_all_unsupported_flags = set([]) self.host_all_unsupported_flags |= self.qemu_support_flags - self.host_all_unsupported_flags -= (self.host_support_flags | - virtual_flags) + self.host_all_unsupported_flags -= self.host_support_flags | virtual_flags - def start_guest_with_cpuflags(cpuflags, smp=None, migration=False, - wait=True): + def start_guest_with_cpuflags(cpuflags, smp=None, migration=False, wait=True): """ Try to boot guest with special cpu flags and try login in to them. """ @@ -91,9 +83,9 @@ def start_guest_with_cpuflags(cpuflags, smp=None, migration=False, params_b["smp"] = smp vm_name = "vm1-cpuflags" - vm = qemu_vm.VM(vm_name, params_b, test.bindir, env['address_cache']) + vm = qemu_vm.VM(vm_name, params_b, test.bindir, env["address_cache"]) env.register_vm(vm_name, vm) - if (migration is True): + if migration is True: vm.create(migration_mode=mig_protocol) else: vm.create() @@ -117,7 +109,7 @@ def get_guest_system_cpuflags(vm_session): :param vm_session: session to checked vm. :return: [corespond flags] """ - flags_re = re.compile(r'^flags\s*:(.*)$', re.MULTILINE) + flags_re = re.compile(r"^flags\s*:(.*)$", re.MULTILINE) out = vm_session.cmd_output("cat /proc/cpuinfo") flags = flags_re.search(out).groups()[0].split() @@ -133,19 +125,20 @@ def get_guest_host_cpuflags_legacy(cpumodel): cmd = qemu_binary + " -cpu ?dump" output = process.run(cmd).stdout re.escape(cpumodel) - pattern = (r".+%s.*\n.*\n +feature_edx .+ \((.*)\)\n +feature_" - r"ecx .+ \((.*)\)\n +extfeature_edx .+ \((.*)\)\n +" - r"extfeature_ecx .+ \((.*)\)\n" % (cpumodel)) + pattern = ( + rf".+{cpumodel}.*\n.*\n +feature_edx .+ \((.*)\)\n +feature_" + r"ecx .+ \((.*)\)\n +extfeature_edx .+ \((.*)\)\n +" + r"extfeature_ecx .+ \((.*)\)\n" + ) flags = [] model = re.search(pattern, output) if model is None: - test.fail("Cannot find %s cpu model." % (cpumodel)) + test.fail(f"Cannot find {cpumodel} cpu model.") for flag_group in model.groups(): flags += flag_group.split() return set(map(cpu.Flag, flags)) - class ParseCpuFlags(object): - + class ParseCpuFlags: def __init__(self, encoding=None): self.cpus = {} self.parser = expat.ParserCreate(encoding) @@ -160,18 +153,18 @@ def start_element(self, name, attrs): if name == "cpus": self.cpus = {} elif name == "arch": - self.last_arch = self.cpus[attrs['name']] = {} + self.last_arch = self.cpus[attrs["name"]] = {} elif name == "model": if self.last_model is None: - self.last_model = self.last_arch[attrs['name']] = [] + self.last_model = self.last_arch[attrs["name"]] = [] else: - self.last_model += self.last_arch[attrs['name']] + self.last_model += self.last_arch[attrs["name"]] self.sub_model = True elif name == "feature": if self.last_model is not None: - self.last_model.append(attrs['name']) + self.last_model.append(attrs["name"]) else: - self.all_flags.append(attrs['name']) + self.all_flags.append(attrs["name"]) def end_element(self, name): if name == "arch": @@ -183,7 +176,7 @@ def end_element(self, name): self.sub_model = False def parse_file(self, file_path): - self.parser.ParseFile(open(file_path, 'r')) + self.parser.ParseFile(open(file_path, "r")) return self.cpus def get_guest_host_cpuflags_1350(cpumodel): @@ -207,8 +200,9 @@ def get_all_qemu_flags_legacy(): cmd = qemu_binary + " -cpu ?cpuid" output = process.run(cmd).stdout - flags_re = re.compile(r".*\n.*f_edx:(.*)\n.*f_ecx:(.*)\n" - ".*extf_edx:(.*)\n.*extf_ecx:(.*)") + flags_re = re.compile( + r".*\n.*f_edx:(.*)\n.*f_ecx:(.*)\n" ".*extf_edx:(.*)\n.*extf_ecx:(.*)" + ) m = flags_re.search(output) flags = [] for a in m.groups(): @@ -280,9 +274,9 @@ def get_qemu_cpu_cmd_version(): qcver = get_qemu_cpu_cmd_version() - get_guest_host_cpuflags = locals()["get_guest_host_cpuflags_%s" % qcver] - get_all_qemu_flags = locals()["get_all_qemu_flags_%s" % qcver] - get_cpu_models = locals()["get_cpu_models_%s" % qcver] + get_guest_host_cpuflags = locals()[f"get_guest_host_cpuflags_{qcver}"] + get_all_qemu_flags = locals()[f"get_all_qemu_flags_{qcver}"] + get_cpu_models = locals()[f"get_cpu_models_{qcver}"] def get_flags_full_name(cpu_flag): """ @@ -308,8 +302,7 @@ def parse_qemu_cpucommand(cpumodel): cpumodel = flags[0] qemu_model_flag = get_guest_host_cpuflags(cpumodel) - host_support_flag = set(map(cpu.Flag, - cpu.get_cpu_flags())) + host_support_flag = set(map(cpu.Flag, cpu.get_cpu_flags())) real_flags = qemu_model_flag & host_support_flag for f in flags[1:]: @@ -359,12 +352,12 @@ def disable_cpu(vm_session, cpu, disable=True): """ system_cpu_dir = "/sys/devices/system/cpu/" cpu_online = system_cpu_dir + "cpu%d/online" % (cpu) - cpu_state = vm_session.cmd_output("cat %s" % cpu_online).strip() + cpu_state = vm_session.cmd_output(f"cat {cpu_online}").strip() if disable and cpu_state == "1": - vm_session.cmd("echo 0 > %s" % cpu_online) + vm_session.cmd(f"echo 0 > {cpu_online}") test.log.debug("Guest cpu %d is disabled.", cpu) elif cpu_state == "0": - vm_session.cmd("echo 1 > %s" % cpu_online) + vm_session.cmd(f"echo 1 > {cpu_online}") test.log.debug("Guest cpu %d is enabled.", cpu) def check_online_cpus(vm_session, smp, disabled_cpu): @@ -380,16 +373,19 @@ def check_online_cpus(vm_session, smp, disabled_cpu): for cpuid in range(1, smp): system_cpu_dir = "/sys/devices/system/cpu/" cpu_online = system_cpu_dir + "cpu%d/online" % (cpuid) - cpu_state = vm_session.cmd_output("cat %s" % cpu_online).strip() + cpu_state = vm_session.cmd_output(f"cat {cpu_online}").strip() if cpu_state == "1": online.append(cpuid) cpu_proc = vm_session.cmd_output("cat /proc/cpuinfo") - cpu_state_proc = map(lambda x: int(x), - re.findall(r"processor\s+:\s*(\d+)\n", cpu_proc)) + cpu_state_proc = map( + lambda x: int(x), re.findall(r"processor\s+:\s*(\d+)\n", cpu_proc) + ) if set(online) != set(cpu_state_proc): - test.error("Some cpus are disabled but %s are still " - "visible like online in /proc/cpuinfo." % - (set(cpu_state_proc) - set(online))) + test.error( + "Some cpus are disabled but %s are still " + "visible like online in /proc/cpuinfo." + % (set(cpu_state_proc) - set(online)) + ) return set(online) - set(disabled_cpu) @@ -403,8 +399,7 @@ def install_cpuflags_test_on_vm(vm, dst_dir): session = vm.wait_for_login() vm.copy_files_to(cpuflags_src, dst_dir) session.cmd("sync") - session.cmd("cd %s; make EXTRA_FLAGS='';" % - os.path.join(dst_dir, "src")) + session.cmd("cd {}; make EXTRA_FLAGS='';".format(os.path.join(dst_dir, "src"))) session.cmd("sync") session.close() @@ -424,16 +419,19 @@ def check_cpuflags_work(vm, path, flags): for f in flags: try: for tc in cpu.kvm_map_flags_to_test[f]: - session.cmd("%s/cpuflags-test --%s" % - (os.path.join(path, "src"), tc)) + session.cmd( + "{}/cpuflags-test --{}".format(os.path.join(path, "src"), tc) + ) pass_Flags.append(f) except aexpect.ShellCmdError: not_working.append(f) except KeyError: not_tested.append(f) - return (set(map(cpu.Flag, pass_Flags)), - set(map(cpu.Flag, not_working)), - set(map(cpu.Flag, not_tested))) + return ( + set(map(cpu.Flag, pass_Flags)), + set(map(cpu.Flag, not_working)), + set(map(cpu.Flag, not_tested)), + ) def run_stress(vm, timeout, guest_flags): """ @@ -445,13 +443,18 @@ def run_stress(vm, timeout, guest_flags): flags = check_cpuflags_work(vm, install_path, guest_flags) dd_session = vm.wait_for_login() stress_session = vm.wait_for_login() - dd_session.sendline("dd if=/dev/[svh]da of=/tmp/stressblock" - " bs=10MB count=100 &") + dd_session.sendline( + "dd if=/dev/[svh]da of=/tmp/stressblock" " bs=10MB count=100 &" + ) try: - stress_session.cmd("%s/cpuflags-test --stress %s%s" % - (os.path.join(install_path, "src"), smp, - cpu.kvm_flags_to_stresstests(flags[0])), - timeout=timeout) + stress_session.cmd( + "{}/cpuflags-test --stress {}{}".format( + os.path.join(install_path, "src"), + smp, + cpu.kvm_flags_to_stresstests(flags[0]), + ), + timeout=timeout, + ) except aexpect.ShellTimeoutError: ret = True stress_session.close() @@ -482,10 +485,9 @@ def parse_cpu_model(): extra_flags = set([]) return (cpu_model, extra_flags) - class MiniSubtest(object): - + class MiniSubtest: def __new__(cls, *args, **kargs): - self = super(MiniSubtest, cls).__new__(cls) + self = super().__new__(cls) ret = None if args is None: args = [] @@ -499,15 +501,15 @@ def __new__(cls, *args, **kargs): def print_exception(called_object): exc_type, exc_value, exc_traceback = sys.exc_info() test.log.error("In function (%s):", called_object.__name__) - test.log.error("Call from:\n%s", - traceback.format_stack()[-2][:-1]) - test.log.error("Exception from:\n%s", - "".join(traceback.format_exception( - exc_type, exc_value, - exc_traceback.tb_next))) + test.log.error("Call from:\n%s", traceback.format_stack()[-2][:-1]) + test.log.error( + "Exception from:\n%s", + "".join( + traceback.format_exception(exc_type, exc_value, exc_traceback.tb_next) + ), + ) class Test_temp(MiniSubtest): - def clean(self): test.log.info("cleanup") vm = getattr(self, "vm", None) @@ -519,7 +521,6 @@ def clean(self): # 1) -cpu ?model class test_qemu_cpu_model(MiniSubtest): - def test(self): if qcver == "legacy": cpu_models = params.get("cpu_models", "core2duo").split() @@ -531,16 +532,15 @@ def test(self): if cpu_model not in result.stdout: missing.append(cpu_model) if missing: - test.fail("CPU models %s are not in output " - "'%s' of command \n%s" % - (missing, cmd, result.stdout)) + test.fail( + f"CPU models {missing} are not in output " + f"'{cmd}' of command \n{result.stdout}" + ) else: - test.cancel("New qemu does not support -cpu " - "?model. (%s)" % qcver) + test.cancel("New qemu does not support -cpu " f"?model. ({qcver})") # 2) -cpu ?dump class test_qemu_dump(MiniSubtest): - def test(self): if qcver == "legacy": cpu_models = params.get("cpu_models", "core2duo").split() @@ -552,51 +552,49 @@ def test(self): if cpu_model not in result.stdout: missing.append(cpu_model) if missing: - test.fail("CPU models %s are not in output " - "'%s' of command \n%s" % - (missing, cmd, result.stdout)) + test.fail( + f"CPU models {missing} are not in output " + f"'{cmd}' of command \n{result.stdout}" + ) else: - test.cancel("New qemu does not support -cpu " - "?dump. (%s)" % qcver) + test.cancel("New qemu does not support -cpu " f"?dump. ({qcver})") # 3) -cpu ?cpuid class test_qemu_cpuid(MiniSubtest): - def test(self): if qcver == "legacy": cmd = qemu_binary + " -cpu ?cpuid" result = process.run(cmd) - if result.stdout is "": - test.fail("There aren't any cpu Flag in output" - " '%s' of command \n%s" % (cmd, result.stdout)) + if result.stdout == "": + test.fail( + "There aren't any cpu Flag in output" + f" '{cmd}' of command \n{result.stdout}" + ) else: - test.cancel("New qemu does not support -cpu " - "?cpuid. (%s)" % qcver) + test.cancel("New qemu does not support -cpu " f"?cpuid. ({qcver})") # 1) boot with cpu_model class test_boot_cpu_model(Test_temp): - def test(self): cpu_model, _ = parse_cpu_model() test.log.debug("Run tests with cpu model %s", cpu_model) flags = HgFlags(cpu_model) (self.vm, session) = start_guest_with_cpuflags(cpu_model) - not_enable_flags = (check_cpuflags(cpu_model, session) - - flags.hw_flags) + not_enable_flags = check_cpuflags(cpu_model, session) - flags.hw_flags if not_enable_flags != set([]): - test.fail("Flags defined on host but not found " - "on guest: %s" % (not_enable_flags)) + test.fail( + "Flags defined on host but not found " + f"on guest: {not_enable_flags}" + ) # 2) success boot with supported flags class test_boot_cpu_model_and_additional_flags(Test_temp): - def test(self): cpu_model, extra_flags = parse_cpu_model() flags = HgFlags(cpu_model, extra_flags) - test.log.debug("Cpu mode flags %s.", - str(flags.quest_cpu_model_flags)) + test.log.debug("Cpu mode flags %s.", str(flags.quest_cpu_model_flags)) cpuf_model = cpu_model if all_host_supported_flags == "yes": @@ -616,32 +614,36 @@ def test(self): (self.vm, session) = start_guest_with_cpuflags(cpuf_model) - not_enable_flags = (check_cpuflags(cpuf_model, session) - - flags.hw_flags) + not_enable_flags = check_cpuflags(cpuf_model, session) - flags.hw_flags if not_enable_flags != set([]): - test.log.info("Model unsupported flags: %s", - str(flags.cpumodel_unsupport_flags)) - test.log.error("Flags defined on host but not on found " - "on guest: %s", str(not_enable_flags)) + test.log.info( + "Model unsupported flags: %s", str(flags.cpumodel_unsupport_flags) + ) + test.log.error( + "Flags defined on host but not on found " "on guest: %s", + str(not_enable_flags), + ) test.log.info("Check main instruction sets.") install_path = "/tmp" install_cpuflags_test_on_vm(self.vm, install_path) - Flags = check_cpuflags_work(self.vm, install_path, - flags.all_possible_guest_flags) + Flags = check_cpuflags_work( + self.vm, install_path, flags.all_possible_guest_flags + ) test.log.info("Woking CPU flags: %s", str(Flags[0])) test.log.info("Not working CPU flags: %s", str(Flags[1])) - test.log.warning("Flags works even if not defined on guest cpu " - "flags: %s", str(Flags[0] - guest_flags)) + test.log.warning( + "Flags works even if not defined on guest cpu " "flags: %s", + str(Flags[0] - guest_flags), + ) test.log.warning("Not tested CPU flags: %s", str(Flags[2])) if Flags[1] & guest_flags: - test.fail("Some flags do not work: %s" % (str(Flags[1]))) + test.fail(f"Some flags do not work: {str(Flags[1])}") # 3) fail boot unsupported flags class test_boot_warn_with_host_unsupported_flags(MiniSubtest): - def test(self): # This is virtual cpu flags which are supported by # qemu but no with host cpu. @@ -649,8 +651,9 @@ def test(self): flags = HgFlags(cpu_model, extra_flags) - test.log.debug("Unsupported flags %s.", - str(flags.host_all_unsupported_flags)) + test.log.debug( + "Unsupported flags %s.", str(flags.host_all_unsupported_flags) + ) cpuf_model = cpu_model + ",check" # Add unsupported flags. @@ -658,9 +661,11 @@ def test(self): cpuf_model += ",+" + str(fadd) vnc_port = utils_misc.find_free_port(5900, 6100) - 5900 - cmd = "%s -cpu %s -vnc :%d -enable-kvm" % (qemu_binary, - cpuf_model, - vnc_port) + cmd = "%s -cpu %s -vnc :%d -enable-kvm" % ( + qemu_binary, + cpuf_model, + vnc_port, + ) out = None try: @@ -671,21 +676,18 @@ def test(self): out = e.result.stderr finally: uns_re = re.compile(r"^warning:.*flag '(.+)'", re.MULTILINE) - nf_re = re.compile( - r"^CPU feature (.+) not found", re.MULTILINE) - warn_flags = set([cpu.Flag(x) - for x in uns_re.findall(out)]) - not_found = set([cpu.Flag(x) - for x in nf_re.findall(out)]) + nf_re = re.compile(r"^CPU feature (.+) not found", re.MULTILINE) + warn_flags = set([cpu.Flag(x) for x in uns_re.findall(out)]) + not_found = set([cpu.Flag(x) for x in nf_re.findall(out)]) fwarn_flags = flags.host_all_unsupported_flags - warn_flags fwarn_flags -= not_found if fwarn_flags: - test.fail("Qemu did not warn the use of " - "flags %s" % str(fwarn_flags)) + test.fail( + "Qemu did not warn the use of " f"flags {str(fwarn_flags)}" + ) # 3) fail boot unsupported flags class test_fail_boot_with_host_unsupported_flags(MiniSubtest): - def test(self): # This is virtual cpu flags which are supported by # qemu but no with host cpu. @@ -694,17 +696,20 @@ def test(self): flags = HgFlags(cpu_model, extra_flags) cpuf_model = cpu_model + ",enforce" - test.log.debug("Unsupported flags %s.", - str(flags.host_all_unsupported_flags)) + test.log.debug( + "Unsupported flags %s.", str(flags.host_all_unsupported_flags) + ) # Add unsupported flags. for fadd in flags.host_all_unsupported_flags: cpuf_model += ",+" + str(fadd) vnc_port = utils_misc.find_free_port(5900, 6100) - 5900 - cmd = "%s -cpu %s -vnc :%d -enable-kvm" % (qemu_binary, - cpuf_model, - vnc_port) + cmd = "%s -cpu %s -vnc :%d -enable-kvm" % ( + qemu_binary, + cpuf_model, + vnc_port, + ) out = None try: try: @@ -713,36 +718,33 @@ def test(self): test.log.error("Host boot with unsupported flag") finally: uns_re = re.compile(r"^warning:.*flag '(.+)'", re.MULTILINE) - nf_re = re.compile( - r"^CPU feature (.+) not found", re.MULTILINE) - warn_flags = set([cpu.Flag(x) - for x in uns_re.findall(out)]) - not_found = set([cpu.Flag(x) - for x in nf_re.findall(out)]) + nf_re = re.compile(r"^CPU feature (.+) not found", re.MULTILINE) + warn_flags = set([cpu.Flag(x) for x in uns_re.findall(out)]) + not_found = set([cpu.Flag(x) for x in nf_re.findall(out)]) fwarn_flags = flags.host_all_unsupported_flags - warn_flags fwarn_flags -= not_found if fwarn_flags: - test.fail("Qemu did not warn the use of " - "flags %s" % str(fwarn_flags)) + test.fail( + "Qemu did not warn the use of " f"flags {str(fwarn_flags)}" + ) # 4) check guest flags under load cpu, stress and system (dd) class test_boot_guest_and_try_flags_under_load(Test_temp): - def test(self): - test.log.info("Check guest working cpuflags under load " - "cpu and stress and system (dd)") + test.log.info( + "Check guest working cpuflags under load " + "cpu and stress and system (dd)" + ) cpu_model, extra_flags = parse_cpu_model() flags = HgFlags(cpu_model, extra_flags) cpuf_model = cpu_model - test.log.debug("Cpu mode flags %s.", - str(flags.quest_cpu_model_flags)) + test.log.debug("Cpu mode flags %s.", str(flags.quest_cpu_model_flags)) if all_host_supported_flags == "yes": - test.log.debug("Added flags %s.", - str(flags.cpumodel_unsupport_flags)) + test.log.debug("Added flags %s.", str(flags.cpumodel_unsupport_flags)) # Add unsupported flags. for fadd in flags.cpumodel_unsupport_flags: @@ -753,12 +755,11 @@ def test(self): (self.vm, _) = start_guest_with_cpuflags(cpuf_model, smp) - if (not run_stress(self.vm, 60, flags.guest_flags)): + if not run_stress(self.vm, 60, flags.guest_flags): test.fail("Stress test ended before end of test.") # 5) Online/offline CPU class test_online_offline_guest_CPUs(Test_temp): - def test(self): cpu_model, extra_flags = parse_cpu_model() @@ -783,30 +784,28 @@ def encap(timeout): else: test.log.warning("For this test is necessary smp > 1.") return False + timeout = 60 test_flags = flags.guest_flags if all_host_supported_flags == "yes": test_flags = flags.all_possible_guest_flags - result = utils_misc.parallel([(encap, [timeout]), - (run_stress, [self.vm, timeout, - test_flags])]) + result = utils_misc.parallel( + [(encap, [timeout]), (run_stress, [self.vm, timeout, test_flags])] + ) if not (result[0] and result[1]): test.fail("Stress tests failed before end of testing.") # 6) migration test class test_migration_with_additional_flags(Test_temp): - def test(self): cpu_model, extra_flags = parse_cpu_model() flags = HgFlags(cpu_model, extra_flags) - test.log.debug("Cpu mode flags %s.", - str(flags.quest_cpu_model_flags)) - test.log.debug("Added flags %s.", - str(flags.cpumodel_unsupport_flags)) + test.log.debug("Cpu mode flags %s.", str(flags.quest_cpu_model_flags)) + test.log.debug("Added flags %s.", str(flags.cpumodel_unsupport_flags)) cpuf_model = cpu_model # Add unsupported flags. @@ -820,28 +819,29 @@ def test(self): install_path = "/tmp" install_cpuflags_test_on_vm(self.vm, install_path) - flags = check_cpuflags_work(self.vm, install_path, - flags.guest_flags) - test.assertTrue(flags[0], "No cpuflags passed the check: %s" - % str(flags)) - test.assertFalse(flags[1], "Some cpuflags failed the check: %s" - % str(flags)) + flags = check_cpuflags_work(self.vm, install_path, flags.guest_flags) + test.assertTrue(flags[0], f"No cpuflags passed the check: {str(flags)}") + test.assertFalse(flags[1], f"Some cpuflags failed the check: {str(flags)}") dd_session = self.vm.wait_for_login() stress_session = self.vm.wait_for_login() - dd_session.sendline("nohup dd if=$(echo /dev/[svh]da) of=/tmp/" - "stressblock bs=10MB count=100 &") - cmd = ("nohup %s/cpuflags-test --stress %s%s &" % - (os.path.join(install_path, "src"), smp, - cpu.kvm_flags_to_stresstests(flags[0]))) + dd_session.sendline( + "nohup dd if=$(echo /dev/[svh]da) of=/tmp/" + "stressblock bs=10MB count=100 &" + ) + cmd = "nohup {}/cpuflags-test --stress {}{} &".format( + os.path.join(install_path, "src"), + smp, + cpu.kvm_flags_to_stresstests(flags[0]), + ) stress_session.sendline(cmd) time.sleep(5) qemu_migration.set_speed(self.vm, mig_speed) self.clone = self.vm.migrate( - mig_timeout, mig_protocol, offline=False, - not_wait_for_migration=True) + mig_timeout, mig_protocol, offline=False, not_wait_for_migration=True + ) time.sleep(5) @@ -858,16 +858,15 @@ def test(self): # If cpuflags-test hang up during migration test raise exception try: - stress_session.cmd('killall cpuflags-test') + stress_session.cmd("killall cpuflags-test") except aexpect.ShellCmdError: - test.fail("Stress cpuflags-test should be still " - "running after migration.") + test.fail( + "Stress cpuflags-test should be still " "running after migration." + ) try: - stress_session.cmd("ls /tmp/stressblock && " - "rm -f /tmp/stressblock") + stress_session.cmd("ls /tmp/stressblock && " "rm -f /tmp/stressblock") except aexpect.ShellCmdError: - test.fail("Background 'dd' command failed to " - "produce output file.") + test.fail("Background 'dd' command failed to " "produce output file.") def net_send_object(socket, obj): """ @@ -893,7 +892,7 @@ def net_recv_object(socket, timeout=60): data = "" d_len = int(socket.recv(6)) - while (len(data) < d_len and (time.time() - time_start) < timeout): + while len(data) < d_len and (time.time() - time_start) < timeout: data += socket.recv(d_len - len(data)) data = pickle.loads(data) @@ -902,7 +901,6 @@ def net_recv_object(socket, timeout=60): test.fail("Failed to receive python object over the network") class test_multi_host_migration(Test_temp): - def test(self): """ Test migration between multiple hosts. @@ -911,10 +909,8 @@ def test(self): flags = HgFlags(cpu_model, extra_flags) - test.log.debug("Cpu mode flags %s.", - str(flags.quest_cpu_model_flags)) - test.log.debug("Added flags %s.", - str(flags.cpumodel_unsupport_flags)) + test.log.debug("Cpu mode flags %s.", str(flags.quest_cpu_model_flags)) + test.log.debug("Added flags %s.", str(flags.cpumodel_unsupport_flags)) cpuf_model = cpu_model for fadd in extra_flags: @@ -926,10 +922,8 @@ def test(self): install_path = "/tmp" class testMultihostMigration(migration.MultihostMigration): - def __init__(self, test, params, env): - migration.MultihostMigration.__init__(self, test, params, - env) + migration.MultihostMigration.__init__(self, test, params, env) def migration_scenario(self): srchost = self.params.get("hosts")[0] @@ -941,26 +935,27 @@ def worker(mig_data): install_cpuflags_test_on_vm(vm, install_path) - Flags = check_cpuflags_work(vm, install_path, - flags.all_possible_guest_flags) + Flags = check_cpuflags_work( + vm, install_path, flags.all_possible_guest_flags + ) test.log.info("Woking CPU flags: %s", str(Flags[0])) - test.log.info("Not working CPU flags: %s", - str(Flags[1])) - test.log.warning("Flags works even if not defined on" - " guest cpu flags: %s", - str(Flags[0] - flags.guest_flags)) - test.log.warning("Not tested CPU flags: %s", - str(Flags[2])) - session.sendline("nohup dd if=/dev/[svh]da of=/tmp/" - "stressblock bs=10MB count=100 &") - - cmd = ("nohup %s/cpuflags-test --stress %s%s &" % - (os.path.join(install_path, "src"), - smp, - cpu.kvm_flags_to_stresstests(Flags[0] & - flags.guest_flags))) - test.log.debug("Guest_flags: %s", - str(flags.guest_flags)) + test.log.info("Not working CPU flags: %s", str(Flags[1])) + test.log.warning( + "Flags works even if not defined on" " guest cpu flags: %s", + str(Flags[0] - flags.guest_flags), + ) + test.log.warning("Not tested CPU flags: %s", str(Flags[2])) + session.sendline( + "nohup dd if=/dev/[svh]da of=/tmp/" + "stressblock bs=10MB count=100 &" + ) + + cmd = "nohup {}/cpuflags-test --stress {}{} &".format( + os.path.join(install_path, "src"), + smp, + cpu.kvm_flags_to_stresstests(Flags[0] & flags.guest_flags), + ) + test.log.debug("Guest_flags: %s", str(flags.guest_flags)) test.log.debug("Working_flags: %s", str(Flags[0])) test.log.debug("Start stress on guest: %s", cmd) session.sendline(cmd) @@ -973,26 +968,26 @@ def check_worker(mig_data): session = vm.wait_for_login(timeout=self.login_timeout) try: - session.cmd('killall cpuflags-test') + session.cmd("killall cpuflags-test") except aexpect.ShellCmdError: - test.fail("The cpuflags-test program" - " should be active after" - " migration and it's not.") - - Flags = check_cpuflags_work(vm, install_path, - flags.all_possible_guest_flags) - test.log.info("Woking CPU flags: %s", - str(Flags[0])) - test.log.info("Not working CPU flags: %s", - str(Flags[1])) - test.log.warning("Flags works even if not defined on" - " guest cpu flags: %s", - str(Flags[0] - flags.guest_flags)) - test.log.warning("Not tested CPU flags: %s", - str(Flags[2])) - - self.migrate_wait(["vm1"], srchost, dsthost, - worker, check_worker) + test.fail( + "The cpuflags-test program" + " should be active after" + " migration and it's not." + ) + + Flags = check_cpuflags_work( + vm, install_path, flags.all_possible_guest_flags + ) + test.log.info("Woking CPU flags: %s", str(Flags[0])) + test.log.info("Not working CPU flags: %s", str(Flags[1])) + test.log.warning( + "Flags works even if not defined on" " guest cpu flags: %s", + str(Flags[0] - flags.guest_flags), + ) + test.log.warning("Not tested CPU flags: %s", str(Flags[2])) + + self.migrate_wait(["vm1"], srchost, dsthost, worker, check_worker) params_b = params.copy() params_b["cpu_model"] = cpu_model @@ -1000,7 +995,6 @@ def check_worker(mig_data): mig.run() class test_multi_host_migration_onoff_cpu(Test_temp): - def test(self): """ Test migration between multiple hosts. @@ -1009,10 +1003,8 @@ def test(self): flags = HgFlags(cpu_model, extra_flags) - test.log.debug("Cpu mode flags %s.", - str(flags.quest_cpu_model_flags)) - test.log.debug("Added flags %s.", - str(flags.cpumodel_unsupport_flags)) + test.log.debug("Cpu mode flags %s.", str(flags.quest_cpu_model_flags)) + test.log.debug("Added flags %s.", str(flags.cpumodel_unsupport_flags)) cpuf_model = cpu_model for fadd in extra_flags: @@ -1022,39 +1014,42 @@ def test(self): cpuf_model += ",-" + str(fdel) smp = int(params["smp"]) - disable_cpus = list(map(lambda cpu: int(cpu), - params.get("disable_cpus", "").split())) + disable_cpus = list( + map(lambda cpu: int(cpu), params.get("disable_cpus", "").split()) + ) install_path = "/tmp" class testMultihostMigration(migration.MultihostMigration): - def __init__(self, test, params, env): - migration.MultihostMigration.__init__(self, test, params, - env) + migration.MultihostMigration.__init__(self, test, params, env) self.srchost = self.params.get("hosts")[0] self.dsthost = self.params.get("hosts")[1] - self.id = {'src': self.srchost, - 'dst': self.dsthost, - "type": "disable_cpu"} - self.migrate_count = int(self.params.get('migrate_count', - '2')) + self.id = { + "src": self.srchost, + "dst": self.dsthost, + "type": "disable_cpu", + } + self.migrate_count = int(self.params.get("migrate_count", "2")) def ping_pong_migrate(self, sync, worker, check_worker): for _ in range(self.migrate_count): - test.log.info("File transfer not ended, starting" - " a round of migration...") + test.log.info( + "File transfer not ended, starting" + " a round of migration..." + ) sync.sync(True, timeout=mig_timeout) if self.hostid == self.srchost: - self.migrate_wait(["vm1"], - self.srchost, - self.dsthost, - start_work=worker) + self.migrate_wait( + ["vm1"], self.srchost, self.dsthost, start_work=worker + ) elif self.hostid == self.dsthost: - self.migrate_wait(["vm1"], - self.srchost, - self.dsthost, - check_work=check_worker) + self.migrate_wait( + ["vm1"], + self.srchost, + self.dsthost, + check_work=check_worker, + ) tmp = self.dsthost self.dsthost = self.srchost self.srchost = tmp @@ -1062,8 +1057,13 @@ def ping_pong_migrate(self, sync, worker, check_worker): def migration_scenario(self): from autotest.client.shared.syncdata import SyncData - sync = SyncData(self.master_id(), self.hostid, self.hosts, - self.id, self.sync_server) + sync = SyncData( + self.master_id(), + self.hostid, + self.hosts, + self.id, + self.sync_server, + ) def worker(mig_data): vm = env.get_vm("vm1") @@ -1071,26 +1071,29 @@ def worker(mig_data): install_cpuflags_test_on_vm(vm, install_path) - Flags = check_cpuflags_work(vm, install_path, - flags.all_possible_guest_flags) + Flags = check_cpuflags_work( + vm, install_path, flags.all_possible_guest_flags + ) test.log.info("Woking CPU flags: %s", str(Flags[0])) - test.log.info("Not working CPU flags: %s", - str(Flags[1])) - test.log.warning("Flags works even if not defined on" - " guest cpu flags: %s", - str(Flags[0] - flags.guest_flags)) - test.log.warning("Not tested CPU flags: %s", - str(Flags[2])) - for cpu in disable_cpus: - if cpu < smp: - disable_cpu(session, cpu, True) + test.log.info("Not working CPU flags: %s", str(Flags[1])) + test.log.warning( + "Flags works even if not defined on" " guest cpu flags: %s", + str(Flags[0] - flags.guest_flags), + ) + test.log.warning("Not tested CPU flags: %s", str(Flags[2])) + for vcpu in disable_cpus: + if vcpu < smp: + disable_cpu(session, vcpu, True) else: - test.log.warning("There is no enouth cpu" - " in Guest. It is trying to" - "remove cpu:%s from guest with" - " smp:%s.", cpu, smp) - test.log.debug("Guest_flags: %s", - str(flags.guest_flags)) + test.log.warning( + "There is no enouth cpu" + " in Guest. It is trying to" + "remove cpu:%s from guest with" + " smp:%s.", + vcpu, + smp, + ) + test.log.debug("Guest_flags: %s", str(flags.guest_flags)) test.log.debug("Working_flags: %s", str(Flags[0])) def check_worker(mig_data): @@ -1100,29 +1103,28 @@ def check_worker(mig_data): session = vm.wait_for_login(timeout=self.login_timeout) - really_disabled = check_online_cpus(session, smp, - disable_cpus) + really_disabled = check_online_cpus(session, smp, disable_cpus) not_disabled = set(really_disabled) & set(disable_cpus) if not_disabled: - test.fail("Some of disabled cpus are " - "online. This shouldn't " - "happen. Cpus disabled on " - "srchost:%s, Cpus not " - "disabled on dsthost:%s" % - (disable_cpus, not_disabled)) - - Flags = check_cpuflags_work(vm, install_path, - flags.all_possible_guest_flags) - test.log.info("Woking CPU flags: %s", - str(Flags[0])) - test.log.info("Not working CPU flags: %s", - str(Flags[1])) - test.log.warning("Flags works even if not defined on" - " guest cpu flags: %s", - str(Flags[0] - flags.guest_flags)) - test.log.warning("Not tested CPU flags: %s", - str(Flags[2])) + test.fail( + "Some of disabled cpus are " + "online. This shouldn't " + "happen. Cpus disabled on " + f"srchost:{disable_cpus}, Cpus not " + f"disabled on dsthost:{not_disabled}" + ) + + Flags = check_cpuflags_work( + vm, install_path, flags.all_possible_guest_flags + ) + test.log.info("Woking CPU flags: %s", str(Flags[0])) + test.log.info("Not working CPU flags: %s", str(Flags[1])) + test.log.warning( + "Flags works even if not defined on" " guest cpu flags: %s", + str(Flags[0] - flags.guest_flags), + ) + test.log.warning("Not tested CPU flags: %s", str(Flags[2])) self.ping_pong_migrate(sync, worker, check_worker) @@ -1132,13 +1134,14 @@ def check_worker(mig_data): mig.run() test_type = params.get("test_type") - if (test_type in locals()): + if test_type in locals(): tests_group = locals()[test_type] if params.get("cpu_model"): tests_group() else: - cpu_models = (set(get_cpu_models_supported_by_host()) - - set(cpu_model_black_list)) + cpu_models = set(get_cpu_models_supported_by_host()) - set( + cpu_model_black_list + ) if not cpu_models: test.cancel("No cpu_models detected, nothing to test.") test.log.info("Start test with cpu models %s", str(cpu_models)) @@ -1151,7 +1154,6 @@ def check_worker(mig_data): print_exception(tests_group) failed.append(cpumodel) if failed != []: - test.fail("Test of cpu models %s failed." % (str(failed))) + test.fail(f"Test of cpu models {str(failed)} failed.") else: - test.fail("Test group '%s' is not defined in" - " cpuflags test" % test_type) + test.fail(f"Test group '{test_type}' is not defined in" " cpuflags test") diff --git a/qemu/tests/cpuid.py b/qemu/tests/cpuid.py index e3cb04f07f..bddaedb5bb 100644 --- a/qemu/tests/cpuid.py +++ b/qemu/tests/cpuid.py @@ -1,18 +1,13 @@ """ Group of cpuid tests for X86 CPU """ -import re -import os -import logging -from avocado.utils import build -from avocado.utils import process +import logging +import os +import re -from virttest import utils_misc -from virttest import cpu -from virttest import env_process -from virttest import virt_vm -from virttest import data_dir +from avocado.utils import build, process +from virttest import cpu, data_dir, env_process, utils_misc, virt_vm logger = logging.getLogger(__name__) dbg = logger.debug @@ -22,6 +17,7 @@ def isprintable(c): try: import string + if c in string.printable: return True return False @@ -58,12 +54,12 @@ def cpu_models_to_test(): models_opt = params.get("cpu_models") model_opt = params.get("cpu_model") - if (models_opt is None and model_opt is None): + if models_opt is None and model_opt is None: test.error("No cpu_models or cpu_model option is set") cpu_models = set() - if models_opt == '*': + if models_opt == "*": cpu_models.update(cpu.get_qemu_cpu_models(qemu_binary)) elif models_opt: cpu_models.update(models_opt.split()) @@ -84,8 +80,7 @@ def test_qemu_cpu_models_list(self): qemu_models = cpu.get_qemu_cpu_models(qemu_binary) missing = set(cpu_models) - set(qemu_models) if missing: - test.fail( - "Some CPU models not in QEMU CPU model list: %r" % (missing)) + test.fail(f"Some CPU models not in QEMU CPU model list: {missing!r}") added = set(qemu_models) - set(cpu_models) if added: test.log.info("Extra CPU models in QEMU CPU listing: %s", added) @@ -107,17 +102,20 @@ def compare_cpuid_output(a, b): def parse_cpuid_dump(output): dbg("parsing cpuid dump: %r", output) cpuid_re = re.compile( - "^ *(0x[0-9a-f]+) +0x([0-9a-f]+): +eax=0x([0-9a-f]+) ebx=0x([0-9a-f]+) ecx=0x([0-9a-f]+) edx=0x([0-9a-f]+)$") - output_match = re.search('(==START TEST==.*==END TEST==)', output, re.M | re.DOTALL) + "^ *(0x[0-9a-f]+) +0x([0-9a-f]+): +eax=0x([0-9a-f]+) ebx=0x([0-9a-f]+) ecx=0x([0-9a-f]+) edx=0x([0-9a-f]+)$" + ) + output_match = re.search( + "(==START TEST==.*==END TEST==)", output, re.M | re.DOTALL + ) if output_match is None: dbg("cpuid dump doesn't follow expected pattern") return None output = output_match.group(1) out_lines = output.splitlines() - if out_lines[0] != '==START TEST==' or out_lines[-1] != '==END TEST==': + if out_lines[0] != "==START TEST==" or out_lines[-1] != "==END TEST==": dbg("cpuid dump doesn't have expected delimiters") return None - if out_lines[1] != 'CPU:': + if out_lines[1] != "CPU:": dbg("cpuid dump doesn't start with 'CPU:' line") return None result = {} @@ -128,66 +126,69 @@ def parse_cpuid_dump(output): return None in_eax = int(m.group(1), 16) in_ecx = int(m.group(2), 16) - result[in_eax, in_ecx, 'eax'] = int(m.group(3), 16) - result[in_eax, in_ecx, 'ebx'] = int(m.group(4), 16) - result[in_eax, in_ecx, 'ecx'] = int(m.group(5), 16) - result[in_eax, in_ecx, 'edx'] = int(m.group(6), 16) + result[in_eax, in_ecx, "eax"] = int(m.group(3), 16) + result[in_eax, in_ecx, "ebx"] = int(m.group(4), 16) + result[in_eax, in_ecx, "ecx"] = int(m.group(5), 16) + result[in_eax, in_ecx, "edx"] = int(m.group(6), 16) return result def get_test_kernel_cpuid(self, vm): vm.resume() timeout = float(params.get("login_timeout", 240)) - test.log.debug("Will wait for CPUID serial output at %r", - vm.serial_console) - if not utils_misc.wait_for(lambda: - re.search("==END TEST==", - vm.serial_console.get_output()), - timeout, 1): + test.log.debug("Will wait for CPUID serial output at %r", vm.serial_console) + if not utils_misc.wait_for( + lambda: re.search("==END TEST==", vm.serial_console.get_output()), + timeout, + 1, + ): test.fail("Could not get test complete message.") test_output = parse_cpuid_dump(vm.serial_console.get_output()) test.log.debug("Got CPUID serial output: %r", test_output) if test_output is None: - test.fail("Test output signature not found in " - "output:\n %s", vm.serial_console.get_output()) + test.fail( + "Test output signature not found in " "output:\n %s", + vm.serial_console.get_output(), + ) vm.destroy(gracefully=False) return test_output def find_cpu_obj(vm): """Find path of a valid VCPU object""" - roots = ['/machine/icc-bridge/icc', '/machine/unattached/device'] + roots = ["/machine/icc-bridge/icc", "/machine/unattached/device"] for root in roots: - for child in vm.monitor.cmd('qom-list', dict(path=root)): - test.log.debug('child: %r', child) - if child['type'].rstrip('>').endswith('-cpu'): - return root + '/' + child['name'] + for child in vm.monitor.cmd("qom-list", dict(path=root)): + test.log.debug("child: %r", child) + if child["type"].rstrip(">").endswith("-cpu"): + return root + "/" + child["name"] def get_qom_cpuid(self, vm): assert vm.monitor.protocol == "qmp" cpu_path = find_cpu_obj(vm) - test.log.debug('cpu path: %r', cpu_path) + test.log.debug("cpu path: %r", cpu_path) r = {} - for prop in 'feature-words', 'filtered-features': - words = vm.monitor.cmd('qom-get', dict(path=cpu_path, property=prop)) - test.log.debug('%s property: %r', prop, words) + for prop in "feature-words", "filtered-features": + words = vm.monitor.cmd("qom-get", dict(path=cpu_path, property=prop)) + test.log.debug("%s property: %r", prop, words) for w in words: - reg = w['cpuid-register'].lower() - key = (w['cpuid-input-eax'], w.get('cpuid-input-ecx', 0), reg) + reg = w["cpuid-register"].lower() + key = (w["cpuid-input-eax"], w.get("cpuid-input-ecx", 0), reg) r.setdefault(key, 0) - r[key] |= w['features'] + r[key] |= w["features"] return r - def get_guest_cpuid(self, cpu_model, feature=None, extra_params=None, qom_mode=False): + def get_guest_cpuid( + self, cpu_model, feature=None, extra_params=None, qom_mode=False + ): if not qom_mode: test_kernel_dir = os.path.join(data_dir.get_deps_dir(), "cpuid", "src") build.make(test_kernel_dir, extra_args="cpuid_dump_kernel.bin") - vm_name = params['main_vm'] + vm_name = params["main_vm"] params_b = params.copy() if not qom_mode: - params_b["kernel"] = os.path.join( - test_kernel_dir, "cpuid_dump_kernel.bin") + params_b["kernel"] = os.path.join(test_kernel_dir, "cpuid_dump_kernel.bin") params_b["cpu_model"] = cpu_model params_b["cpu_model_flags"] = feature del params_b["images"] @@ -196,7 +197,7 @@ def get_guest_cpuid(self, cpu_model, feature=None, extra_params=None, qom_mode=F params_b.update(extra_params) env_process.preprocess_vm(self, params_b, env, vm_name) vm = env.get_vm(vm_name) - dbg('is dead: %r', vm.is_dead()) + dbg("is dead: %r", vm.is_dead()) vm.create() self.vm = vm if qom_mode: @@ -207,12 +208,12 @@ def get_guest_cpuid(self, cpu_model, feature=None, extra_params=None, qom_mode=F def cpuid_to_vendor(cpuid_dump, idx): dst = [] for i in range(0, 4): - dst.append((chr(cpuid_dump[idx, 0, 'ebx'] >> (8 * i) & 0xff))) + dst.append(chr(cpuid_dump[idx, 0, "ebx"] >> (8 * i) & 0xFF)) for i in range(0, 4): - dst.append((chr(cpuid_dump[idx, 0, 'edx'] >> (8 * i) & 0xff))) + dst.append(chr(cpuid_dump[idx, 0, "edx"] >> (8 * i) & 0xFF)) for i in range(0, 4): - dst.append((chr(cpuid_dump[idx, 0, 'ecx'] >> (8 * i) & 0xff))) - return ''.join(dst) + dst.append(chr(cpuid_dump[idx, 0, "ecx"] >> (8 * i) & 0xFF)) + return "".join(dst) def default_vendor(self): """ @@ -227,7 +228,7 @@ def default_vendor(self): cmd_result = process.run(cmd, ignore_status=True, shell=True) vendor = cmd_result.stdout.strip() - ignore_cpus = set(params.get("ignore_cpu_models", "").split(' ')) + ignore_cpus = set(params.get("ignore_cpu_models", "").split(" ")) cpu_models = cpu_models - ignore_cpus for cpu_model in cpu_models: @@ -235,9 +236,10 @@ def default_vendor(self): guest_vendor = cpuid_to_vendor(out, 0x00000000) test.log.debug("Guest's vendor: %s", guest_vendor) if guest_vendor != vendor: - test.fail("Guest vendor [%s], doesn't match " - "required vendor [%s] for CPU [%s]" % - (guest_vendor, vendor, cpu_model)) + test.fail( + f"Guest vendor [{guest_vendor}], doesn't match " + f"required vendor [{vendor}] for CPU [{cpu_model}]" + ) def custom_vendor(self): """ @@ -251,18 +253,18 @@ def custom_vendor(self): guest_vendor0 = cpuid_to_vendor(out, 0x00000000) guest_vendor80000000 = cpuid_to_vendor(out, 0x80000000) test.log.debug("Guest's vendor[0]: %s", guest_vendor0) - test.log.debug("Guest's vendor[0x80000000]: %s", - guest_vendor80000000) + test.log.debug("Guest's vendor[0x80000000]: %s", guest_vendor80000000) if guest_vendor0 != vendor: - test.fail("Guest vendor[0] [%s], doesn't match " - "required vendor [%s] for CPU [%s]" % - (guest_vendor0, vendor, cpu_model)) + test.fail( + f"Guest vendor[0] [{guest_vendor0}], doesn't match " + f"required vendor [{vendor}] for CPU [{cpu_model}]" + ) if guest_vendor80000000 != vendor: - test.fail("Guest vendor[0x80000000] [%s], " - "doesn't match required vendor " - "[%s] for CPU [%s]" % - (guest_vendor80000000, vendor, - cpu_model)) + test.fail( + f"Guest vendor[0x80000000] [{guest_vendor80000000}], " + "doesn't match required vendor " + f"[{vendor}] for CPU [{cpu_model}]" + ) except: has_error = True if xfail is False: @@ -272,7 +274,7 @@ def custom_vendor(self): def cpuid_to_level(cpuid_dump): r = cpuid_dump[0, 0] - return r['eax'] + return r["eax"] def custom_level(self): """ @@ -284,8 +286,10 @@ def custom_level(self): out = get_guest_cpuid(self, cpu_model, "level=" + level) guest_level = str(cpuid_to_level(out)) if guest_level != level: - test.fail("Guest's level [%s], doesn't match " - "required level [%s]" % (guest_level, level)) + test.fail( + f"Guest's level [{guest_level}], doesn't match " + f"required level [{level}]" + ) except: has_error = True if xfail is False: @@ -297,11 +301,11 @@ def cpuid_to_family(cpuid_dump): # Intel Processor Identification and the CPUID Instruction # http://www.intel.com/Assets/PDF/appnote/241618.pdf # 5.1.2 Feature Information (Function 01h) - eax = cpuid_dump[1, 0]['eax'] - family = (eax >> 8) & 0xf - if family == 0xf: + eax = cpuid_dump[1, 0]["eax"] + family = (eax >> 8) & 0xF + if family == 0xF: # extract extendend family - return family + ((eax >> 20) & 0xff) + return family + ((eax >> 20) & 0xFF) return family def custom_family(self): @@ -314,8 +318,10 @@ def custom_family(self): out = get_guest_cpuid(self, cpu_model, "family=" + family) guest_family = str(cpuid_to_family(out)) if guest_family != family: - test.fail("Guest's family [%s], doesn't match " - "required family [%s]" % (guest_family, family)) + test.fail( + f"Guest's family [{guest_family}], doesn't match " + f"required family [{family}]" + ) except: has_error = True if xfail is False: @@ -327,10 +333,10 @@ def cpuid_to_model(cpuid_dump): # Intel Processor Identification and the CPUID Instruction # http://www.intel.com/Assets/PDF/appnote/241618.pdf # 5.1.2 Feature Information (Function 01h) - eax = cpuid_dump[1, 0]['eax'] - model = (eax >> 4) & 0xf + eax = cpuid_dump[1, 0]["eax"] + model = (eax >> 4) & 0xF # extended model - model |= (eax >> 12) & 0xf0 + model |= (eax >> 12) & 0xF0 return model def custom_model(self): @@ -343,8 +349,10 @@ def custom_model(self): out = get_guest_cpuid(self, cpu_model, "model=" + model) guest_model = str(cpuid_to_model(out)) if guest_model != model: - test.fail("Guest's model [%s], doesn't match " - "required model [%s]" % (guest_model, model)) + test.fail( + f"Guest's model [{guest_model}], doesn't match " + f"required model [{model}]" + ) except: has_error = True if xfail is False: @@ -356,8 +364,8 @@ def cpuid_to_stepping(cpuid_dump): # Intel Processor Identification and the CPUID Instruction # http://www.intel.com/Assets/PDF/appnote/241618.pdf # 5.1.2 Feature Information (Function 01h) - eax = cpuid_dump[1, 0]['eax'] - stepping = eax & 0xf + eax = cpuid_dump[1, 0]["eax"] + stepping = eax & 0xF return stepping def custom_stepping(self): @@ -370,9 +378,10 @@ def custom_stepping(self): out = get_guest_cpuid(self, cpu_model, "stepping=" + stepping) guest_stepping = str(cpuid_to_stepping(out)) if guest_stepping != stepping: - test.fail("Guest's stepping [%s], doesn't match " - "required stepping [%s]" % - (guest_stepping, stepping)) + test.fail( + f"Guest's stepping [{guest_stepping}], doesn't match " + f"required stepping [{stepping}]" + ) except: has_error = True if xfail is False: @@ -384,7 +393,7 @@ def cpuid_to_xlevel(cpuid_dump): # Intel Processor Identification and the CPUID Instruction # http://www.intel.com/Assets/PDF/appnote/241618.pdf # 5.2.1 Largest Extendend Function # (Function 80000000h) - return cpuid_dump[0x80000000, 0x00]['eax'] + return cpuid_dump[0x80000000, 0x00]["eax"] def custom_xlevel(self): """ @@ -396,12 +405,13 @@ def custom_xlevel(self): xlevel = params.get("expect_xlevel") try: - out = get_guest_cpuid(self, cpu_model, "xlevel=" + - params.get("xlevel")) + out = get_guest_cpuid(self, cpu_model, "xlevel=" + params.get("xlevel")) guest_xlevel = str(cpuid_to_xlevel(out)) if guest_xlevel != xlevel: - test.fail("Guest's xlevel [%s], doesn't match " - "required xlevel [%s]" % (guest_xlevel, xlevel)) + test.fail( + f"Guest's xlevel [{guest_xlevel}], doesn't match " + f"required xlevel [{xlevel}]" + ) except: has_error = True if xfail is False: @@ -417,9 +427,9 @@ def cpuid_to_model_id(cpuid_dump): m_id = "" for idx in (0x80000002, 0x80000003, 0x80000004): regs = cpuid_dump[idx, 0] - for name in ('eax', 'ebx', 'ecx', 'edx'): + for name in ("eax", "ebx", "ecx", "edx"): for shift in range(4): - c = ((regs[name] >> (shift * 8)) & 0xff) + c = (regs[name] >> (shift * 8)) & 0xFF if c == 0: # drop trailing \0-s break m_id += chr(c) @@ -433,13 +443,13 @@ def custom_model_id(self): model_id = params["model_id"] try: - out = get_guest_cpuid(self, cpu_model, "model_id='%s'" % - model_id) + out = get_guest_cpuid(self, cpu_model, f"model_id='{model_id}'") guest_model_id = cpuid_to_model_id(out) if guest_model_id != model_id: - test.fail("Guest's model_id [%s], doesn't match " - "required model_id [%s]" % - (guest_model_id, model_id)) + test.fail( + f"Guest's model_id [{guest_model_id}], doesn't match " + f"required model_id [{model_id}]" + ) except: has_error = True if xfail is False: @@ -456,9 +466,8 @@ def cpuid_regs_to_string(cpuid_dump, leaf, idx, regs): if isprintable(c): signature = signature + c else: - signature = "%s\\x%02x" % (signature, ord(c)) - test.log.debug("(%s.%s:%s: signature: %s", leaf, idx, str(regs), - signature) + signature = f"{signature}\\x{ord(c):02x}" + test.log.debug("(%s.%s:%s: signature: %s", leaf, idx, str(regs), signature) return signature def cpuid_signature(self): @@ -475,9 +484,10 @@ def cpuid_signature(self): out = get_guest_cpuid(self, cpu_model, flags) _signature = cpuid_regs_to_string(out, leaf, idx, regs) if _signature != signature: - test.fail("Guest's signature [%s], doesn't" - "match required signature [%s]" % - (_signature, signature)) + test.fail( + f"Guest's signature [{_signature}], doesn't" + f"match required signature [{signature}]" + ) except: has_error = True if xfail is False: @@ -501,8 +511,7 @@ def cpuid_bit_test(self): test.log.debug("CPUID(%s.%s).%s=0x%08x", leaf, idx, reg, r) for i in bits: if (r & (1 << int(i))) == 0: - test.fail("CPUID(%s.%s).%s[%s] is not set" % - (leaf, idx, reg, i)) + test.fail(f"CPUID({leaf}.{idx}).{reg}[{i}] is not set") except: has_error = True if xfail is False: @@ -525,8 +534,7 @@ def cpuid_reg_test(self): r = out[leaf, idx][reg] test.log.debug("CPUID(%s.%s).%s=0x%08x", leaf, idx, reg, r) if r != val: - test.fail("CPUID(%s.%s).%s is not 0x%08x" % - (leaf, idx, reg, val)) + test.fail(f"CPUID({leaf}.{idx}).{reg} is not 0x{val:08x}") except: has_error = True if xfail is False: @@ -545,7 +553,7 @@ def check_cpuid_dump(self): ignore_cpuid_leaves = ignore_cpuid_leaves.split() whitelist = [] for leaf in ignore_cpuid_leaves: - leaf = leaf.split(',') + leaf = leaf.split(",") # syntax of ignore_cpuid_leaves: # [,[,[ ,]]] ... for i in 0, 1, 3: # integer fields: @@ -555,67 +563,85 @@ def check_cpuid_dump(self): if not machine_type: test.cancel("No machine_type_to_check defined") - cpu_model_flags = params.get('cpu_model_flags', '') + cpu_model_flags = params.get("cpu_model_flags", "") full_cpu_model_name = cpu_model if cpu_model_flags: - full_cpu_model_name += ',' - full_cpu_model_name += cpu_model_flags.lstrip(',') - ref_file = os.path.join(data_dir.get_deps_dir(), 'cpuid', - "cpuid_dumps", - kvm_enabled and "kvm" or "nokvm", - machine_type, '%s-dump.txt' % (full_cpu_model_name)) + full_cpu_model_name += "," + full_cpu_model_name += cpu_model_flags.lstrip(",") + ref_file = os.path.join( + data_dir.get_deps_dir(), + "cpuid", + "cpuid_dumps", + kvm_enabled and "kvm" or "nokvm", + machine_type, + f"{full_cpu_model_name}-dump.txt", + ) if not os.path.exists(ref_file): - test.cancel("no cpuid dump file: %s" % (ref_file)) - reference = open(ref_file, 'r').read() + test.cancel(f"no cpuid dump file: {ref_file}") + reference = open(ref_file, "r").read() if not reference: - test.cancel("no cpuid dump data on file: %s" % (ref_file)) + test.cancel(f"no cpuid dump data on file: {ref_file}") reference = parse_cpuid_dump(reference) if reference is None: - test.cancel( - "couldn't parse reference cpuid dump from file; %s" % (ref_file)) - qom_mode = params.get('qom_mode', "no").lower() == 'yes' + test.cancel(f"couldn't parse reference cpuid dump from file; {ref_file}") + qom_mode = params.get("qom_mode", "no").lower() == "yes" if not qom_mode: - cpu_model_flags += ',enforce' + cpu_model_flags += ",enforce" try: - out = get_guest_cpuid( - self, cpu_model, cpu_model_flags, + self, + cpu_model, + cpu_model_flags, extra_params=dict(machine_type=machine_type, smp=1), - qom_mode=qom_mode) + qom_mode=qom_mode, + ) except (virt_vm.VMStartError, virt_vm.VMCreateError) as output: - if "host doesn't support requested feature:" in output \ - or ("host cpuid" in output and - ("lacks requested flag" in output or - "flag restricted to guest" in output)) \ - or ("Unable to find CPU definition:" in output): - test.cancel( - "Can't run CPU model %s on this host" % (full_cpu_model_name)) + if ( + "host doesn't support requested feature:" in output + or ( + "host cpuid" in output + and ( + "lacks requested flag" in output + or "flag restricted to guest" in output + ) + ) + or ("Unable to find CPU definition:" in output) + ): + test.cancel(f"Can't run CPU model {full_cpu_model_name} on this host") else: raise - dbg('ref_file: %r', ref_file) - dbg('ref: %r', reference) - dbg('out: %r', out) + dbg("ref_file: %r", ref_file) + dbg("ref: %r", reference) + dbg("out: %r", out) ok = True for k in reference.keys(): in_eax, in_ecx, reg = k diffs = compare_cpuid_output(reference[k], out.get(k)) for d in diffs: bit, vreference, vout = d - whitelisted = (in_eax,) in whitelist \ - or (in_eax, in_ecx) in whitelist \ - or (in_eax, in_ecx, reg) in whitelist \ + whitelisted = ( + (in_eax,) in whitelist + or (in_eax, in_ecx) in whitelist + or (in_eax, in_ecx, reg) in whitelist or (in_eax, in_ecx, reg, bit) in whitelist + ) silent = False - if vout is None and params.get('ok_missing', 'no') == 'yes': + if vout is None and params.get("ok_missing", "no") == "yes": whitelisted = True silent = True if not silent: info( "Non-matching bit: CPUID[0x%x,0x%x].%s[%d]: found %s instead of %s%s", - in_eax, in_ecx, reg, bit, vout, vreference, - whitelisted and " (whitelisted)" or "") + in_eax, + in_ecx, + reg, + bit, + vout, + vreference, + whitelisted and " (whitelisted)" or "", + ) if not whitelisted: ok = False @@ -625,7 +651,7 @@ def check_cpuid_dump(self): # subtests runner test_type = params["test_type"] if test_type not in locals(): - test.error("Test function '%s' is not defined in test" % test_type) + test.error(f"Test function '{test_type}' is not defined in test") test_func = locals()[test_type] return test_func(test) diff --git a/qemu/tests/cpuinfo_query.py b/qemu/tests/cpuinfo_query.py index 2ad1317acb..a6f0ec7e7c 100644 --- a/qemu/tests/cpuinfo_query.py +++ b/qemu/tests/cpuinfo_query.py @@ -1,7 +1,5 @@ from avocado.utils import process - -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc @error_context.context_aware @@ -21,13 +19,11 @@ def run(test, params, env): cmd = qemu_binary + qcmd output = process.system_output(cmd, shell=True) - error_context.context("check if expected info is included in output of %s" - % cmd) + error_context.context(f"check if expected info is included in output of {cmd}") cpuinfos = params.get("cpu_info", "Conroe").split(",") missing = [] for cpuinfo in cpuinfos: if cpuinfo not in output: missing.append(cpuinfo) if missing: - test.fail("%s is missing in the output\n %s" % - (", ".join(missing), output)) + test.fail("{} is missing in the output\n {}".format(", ".join(missing), output)) diff --git a/qemu/tests/create_large_img.py b/qemu/tests/create_large_img.py index 02908231d5..93d2df50b5 100755 --- a/qemu/tests/create_large_img.py +++ b/qemu/tests/create_large_img.py @@ -1,9 +1,8 @@ import os -import six +import six from avocado import TestError from avocado.utils import partition as p - from virttest import data_dir from virttest.qemu_storage import QemuImg @@ -43,7 +42,7 @@ def run(test, params, env): if info in six.text_type(err): break else: - test.fail("CML failed with unexpected output: %s" % err) + test.fail(f"CML failed with unexpected output: {err}") else: test.fail("There is no error when creating an image with large size.") finally: diff --git a/qemu/tests/create_macvtap_device.py b/qemu/tests/create_macvtap_device.py index f213222f5b..2e70a18344 100644 --- a/qemu/tests/create_macvtap_device.py +++ b/qemu/tests/create_macvtap_device.py @@ -1,19 +1,16 @@ import re from avocado.utils import process - -from virttest import error_context -from virttest import utils_net -from virttest import utils_test +from virttest import error_context, utils_net, utils_test def get_macvtap_device_on_ifname(ifname): macvtaps = [] ip_link_out = process.system_output("ip -d link show") - re_str = r"(\S*)@%s" % ifname + re_str = rf"(\S*)@{ifname}" devices = re.findall(re_str, ip_link_out) for device in devices: - out = process.system_output("ip -d link show %s" % device) + out = process.system_output(f"ip -d link show {device}") if "macvtap mode" in out: macvtaps.append(device) return macvtaps @@ -46,55 +43,49 @@ def run(test, params, env): ifname = params.get("netdst") ifname = utils_net.get_macvtap_base_iface(ifname) - error_context.context("Verify no other macvtap share the physical " - "network device.", test.log.info) + error_context.context( + "Verify no other macvtap share the physical " "network device.", test.log.info + ) macvtap_devices = get_macvtap_device_on_ifname(ifname) for device in macvtap_devices: - process.system_output("ip link delete %s" % device) + process.system_output(f"ip link delete {device}") for mode in macvtap_mode.split(): - macvtap_name = "%s_01" % mode - txt = "Create %s mode macvtap device %s on %s." % (mode, - macvtap_name, - ifname) + macvtap_name = f"{mode}_01" + txt = f"Create {mode} mode macvtap device {macvtap_name} on {ifname}." error_context.context(txt, test.log.info) - cmd = " ip link add link %s name %s type macvtap mode %s" % (ifname, - macvtap_name, - mode) + cmd = f" ip link add link {ifname} name {macvtap_name} type macvtap mode {mode}" process.system(cmd, timeout=240) if set_mac: - txt = "Determine and configure mac address of %s, " % macvtap_name + txt = f"Determine and configure mac address of {macvtap_name}, " txt += "Then link up it." error_context.context(txt, test.log.info) mac = utils_net.generate_mac_address_simple() - cmd = " ip link set %s address %s up" % (macvtap_name, mac) + cmd = f" ip link set {macvtap_name} address {mac} up" process.system(cmd, timeout=240) - error_context.context("Check configuraton of macvtap device", - test.log.info) - check_cmd = " ip -d link show %s" % macvtap_name + error_context.context("Check configuraton of macvtap device", test.log.info) + check_cmd = f" ip -d link show {macvtap_name}" try: tap_info = process.system_output(check_cmd, timeout=240) except process.CmdError: - err = "Fail to create %s mode macvtap on %s" % (mode, ifname) + err = f"Fail to create {mode} mode macvtap on {ifname}" test.fail(err) if set_mac: if mac not in tap_info: - err = "Fail to set mac for %s" % macvtap_name + err = f"Fail to set mac for {macvtap_name}" test.fail(err) macvtaps.append(macvtap_name) if not dest_host: dest_host_get_cmd = "ip route | awk '/default/ { print $3 }'" dest_host_get_cmd = params.get("dest_host_get_cmd", dest_host_get_cmd) - dest_host = process.system_output( - dest_host_get_cmd, shell=True).split()[-1] + dest_host = process.system_output(dest_host_get_cmd, shell=True).split()[-1] - txt = "Ping dest host %s from " % dest_host - txt += "localhost with the interface %s" % ifname + txt = f"Ping dest host {dest_host} from " + txt += f"localhost with the interface {ifname}" error_context.context(txt, test.log.info) - status, output = utils_test.ping(dest_host, 10, - interface=ifname, timeout=20) + status, output = utils_test.ping(dest_host, 10, interface=ifname, timeout=20) ratio = utils_test.get_loss_ratio(output) if "passthru" in macvtap_mode: ifnames = utils_net.get_host_iface() @@ -111,37 +102,36 @@ def run(test, params, env): test.log.info("ips = %s", ips) if not ips: if ratio != 100: - err = "%s did not lost network connection after " % ifname - err += " creating %s mode macvtap on it." % macvtap_mode + err = f"{ifname} did not lost network connection after " + err += f" creating {macvtap_mode} mode macvtap on it." test.fail(err) else: - err = "%s is not the only network device in host" % ifname + err = f"{ifname} is not the only network device in host" test.log.debug(err) else: if ratio != 0: - err = "Package lost during ping %s from %s " % (dest_host, ifname) - err += "after creating %s mode macvtap on it." % macvtap_mode + err = f"Package lost during ping {dest_host} from {ifname} " + err += f"after creating {macvtap_mode} mode macvtap on it." test.fail(err) for name in macvtaps: - txt = "Delete macvtap device %s on %s." % (name, ifname) + txt = f"Delete macvtap device {name} on {ifname}." error_context.context(txt, test.log.info) - del_cmd = "ip link delete %s" % name + del_cmd = f"ip link delete {name}" process.system(del_cmd) devices = get_macvtap_device_on_ifname(ifname) if name in devices: - err = "Fail to delete macvtap %s on %s" % (name, ifname) + err = f"Fail to delete macvtap {name} on {ifname}" test.fail(err) test.log.info("dest_host = %s", dest_host) - txt = "Ping dest host %s from " % dest_host - txt += "localhost with the interface %s" % ifname + txt = f"Ping dest host {dest_host} from " + txt += f"localhost with the interface {ifname}" error_context.context(txt, test.log.info) - status, output = utils_test.ping(dest_host, 10, - interface=ifname, timeout=20) + status, output = utils_test.ping(dest_host, 10, interface=ifname, timeout=20) if status != 0: - test.fail("Ping failed, status: %s, output: %s" % (status, output)) + test.fail(f"Ping failed, status: {status}, output: {output}") ratio = utils_test.get_loss_ratio(output) if ratio != 0: - err = "Package lost during ping %s from %s " % (dest_host, ifname) + err = f"Package lost during ping {dest_host} from {ifname} " test.fail(err) diff --git a/qemu/tests/create_snapshot_on_running_base.py b/qemu/tests/create_snapshot_on_running_base.py index 2254f9a1f7..3339b754ed 100644 --- a/qemu/tests/create_snapshot_on_running_base.py +++ b/qemu/tests/create_snapshot_on_running_base.py @@ -1,5 +1,4 @@ -from virttest import data_dir -from virttest import qemu_storage +from virttest import data_dir, qemu_storage from provider import qemu_img_utils as img_utils @@ -18,11 +17,13 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def generate_images_from_image_chain(image_chain): root_dir = data_dir.get_data_dir() - return [qemu_storage.QemuImg( - params.object_params(image), root_dir, image) - for image in image_chain.split()] + return [ + qemu_storage.QemuImg(params.object_params(image), root_dir, image) + for image in image_chain.split() + ] params["image_name_image1"] = params["image_name"] params["image_format_image1"] = params["image_format"] @@ -33,25 +34,31 @@ def generate_images_from_image_chain(image_chain): md5sum_bin = params.get("md5sum_bin", "md5sum") sync_bin = params.get("sync_bin", "sync") - test.log.info("Boot a guest up from base image: %s, and create a" - " file %s on the disk.", base.tag, guest_temp_file) + test.log.info( + "Boot a guest up from base image: %s, and create a" " file %s on the disk.", + base.tag, + guest_temp_file, + ) vm = img_utils.boot_vm_with_images(test, params, env) session = vm.wait_for_login() img_utils.save_random_file_to_vm(vm, guest_temp_file, 1024 * 512, sync_bin) md5_value = img_utils.check_md5sum(guest_temp_file, md5sum_bin, session) session.close() - test.log.info("Create a snapshot %s on the running base image.", - snapshot.tag) + test.log.info("Create a snapshot %s on the running base image.", snapshot.tag) snapshot.create(snapshot.params) vm.destroy() - test.log.info("Boot the guest up from snapshot image: %s, and verify the" - " file %s's md5 on the disk.", snapshot.tag, guest_temp_file) - vm = img_utils.boot_vm_with_images(test, params, env, - images=(snapshot.tag,)) + test.log.info( + "Boot the guest up from snapshot image: %s, and verify the" + " file %s's md5 on the disk.", + snapshot.tag, + guest_temp_file, + ) + vm = img_utils.boot_vm_with_images(test, params, env, images=(snapshot.tag,)) session = vm.wait_for_login() - img_utils.check_md5sum(guest_temp_file, md5sum_bin, session, - md5_value_to_check=md5_value) + img_utils.check_md5sum( + guest_temp_file, md5sum_bin, session, md5_value_to_check=md5_value + ) session.close() vm.destroy() diff --git a/qemu/tests/ctrl_vlan.py b/qemu/tests/ctrl_vlan.py index d978215fcb..c0396c7f21 100644 --- a/qemu/tests/ctrl_vlan.py +++ b/qemu/tests/ctrl_vlan.py @@ -1,8 +1,6 @@ import time -from virttest import error_context -from virttest import utils_net -from virttest import utils_test +from virttest import error_context, utils_net, utils_test from virttest.utils_windows import virtio_win @@ -22,22 +20,28 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def verify_vlan_table(expect_vlan=None): error_context.context("Check vlan table in rx-filter", test.log.info) - query_cmd = "query-rx-filter name=%s" % vm.virtnet[0].device_id + query_cmd = f"query-rx-filter name={vm.virtnet[0].device_id}" vlan_table = vm.monitor.send_args_cmd(query_cmd)[0].get("vlan-table") if not expect_vlan: vlan_table.sort() - if (len(set(vlan_table)) == 4095 and vlan_table[0] == 0 and - vlan_table[-1] == 4094): + if ( + len(set(vlan_table)) == 4095 + and vlan_table[0] == 0 + and vlan_table[-1] == 4094 + ): pass else: - test.fail("Guest vlan table is not correct, expect: %s," - " actual: %s" - % (expect_vlan, vlan_table)) + test.fail( + f"Guest vlan table is not correct, expect: {expect_vlan}," + f" actual: {vlan_table}" + ) elif vlan_table and vlan_table[0] != int(expect_vlan): - test.fail("Guest vlan table is not correct, expect: %s, actual: %s" - % (expect_vlan, vlan_table[0])) + test.fail( + f"Guest vlan table is not correct, expect: {expect_vlan}, actual: {vlan_table[0]}" + ) def get_netkvmco_path(session): """ @@ -60,9 +64,9 @@ def get_netkvmco_path(session): err = "Could not get architecture dirname of the vm" test.error(err) - middle_path = "%s\\%s" % (guest_name, guest_arch) + middle_path = f"{guest_name}\\{guest_arch}" find_cmd = 'dir /b /s %s\\netkvmco.dll | findstr "\\%s\\\\"' - find_cmd %= (viowin_ltr, middle_path) + find_cmd %= (viowin_ltr, middle_path) netkvmco_path = session.cmd(find_cmd).strip() test.log.info("Found netkvmco.dll file at %s", netkvmco_path) return netkvmco_path @@ -72,52 +76,56 @@ def get_netkvmco_path(session): vm = env.get_vm(params["main_vm"]) vm.verify_alive() - if ("ctrl_vlan=on" in params["nic_extra_params"] and - params["os_type"] == "linux"): + if "ctrl_vlan=on" in params["nic_extra_params"] and params["os_type"] == "linux": expect_vlan = vm.virtnet[0].vlan else: expect_vlan = None if "ctrl_vlan=on" in params["nic_extra_params"]: - error_context.context( - "Add vlan tag for guest network", test.log.info) + error_context.context("Add vlan tag for guest network", test.log.info) vlan_set_cmd = params["vlan_set_cmd"] vlan_id = params["vlan_id"] try: if params["os_type"] == "linux": session = vm.wait_for_serial_login(timeout=login_timeout) verify_vlan_table(expect_vlan) - ifname = utils_net.get_linux_ifname( - session, vm.virtnet[0].mac) + ifname = utils_net.get_linux_ifname(session, vm.virtnet[0].mac) vlan_set_cmd = vlan_set_cmd % (ifname, ifname, ifname, ifname) - status, output = session.cmd_status_output(vlan_set_cmd, - safe=True) + status, output = session.cmd_status_output(vlan_set_cmd, safe=True) if status: - test.error("Error occured when set vlan tag for network interface: %s, " - "err info: %s " % (ifname, output)) + test.error( + f"Error occured when set vlan tag for network interface: {ifname}, " + f"err info: {output} " + ) else: driver_verifier = params["driver_verifier"] session = vm.wait_for_login(timeout=login_timeout) - error_context.context("Verify if netkvm.sys is enabled in guest", - test.log.info) + error_context.context( + "Verify if netkvm.sys is enabled in guest", test.log.info + ) session = utils_test.qemu.windrv_check_running_verifier( - session, vm, test, driver_verifier, timeout=120) + session, vm, test, driver_verifier, timeout=120 + ) verify_vlan_table(expect_vlan) ifname = utils_net.get_windows_nic_attribute( - session=session, key="netenabled", value=True, - target="netconnectionID") + session=session, + key="netenabled", + value=True, + target="netconnectionID", + ) session = vm.wait_for_serial_login(timeout=login_timeout) status, output = session.cmd_status_output(vlan_set_cmd % ifname) if status: - test.error("Error occured when set vlan tag for " - "network interface: %s, err info: %s " - % (ifname, output)) + test.error( + "Error occured when set vlan tag for " + f"network interface: {ifname}, err info: {output} " + ) # restart nic for windows guest dev_mac = vm.virtnet[0].mac connection_id = utils_net.get_windows_nic_attribute( - session, "macaddress", dev_mac, "netconnectionid") - utils_net.restart_windows_guest_network( - session, connection_id) + session, "macaddress", dev_mac, "netconnectionid" + ) + utils_net.restart_windows_guest_network(session, connection_id) time.sleep(10) finally: if session: diff --git a/qemu/tests/curl_cookie_with_secret.py b/qemu/tests/curl_cookie_with_secret.py index 556b5da1c3..049fad3a83 100644 --- a/qemu/tests/curl_cookie_with_secret.py +++ b/qemu/tests/curl_cookie_with_secret.py @@ -1,11 +1,8 @@ import os import signal -from virttest import utils_misc -from virttest import qemu_storage -from virttest import error_context - from avocado.utils import process +from virttest import error_context, qemu_storage, utils_misc @error_context.context_aware @@ -20,17 +17,17 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _get_tcpdump_pid(dump_file): - cmd = ("ps -ef|grep tcpdump|grep %s|grep -v grep|awk '{print $2}'" - % dump_file) - return process.system_output(cmd, shell=True, - ignore_status=True).strip() + cmd = f"ps -ef|grep tcpdump|grep {dump_file}|grep -v grep|awk '{{print $2}}'" + return process.system_output(cmd, shell=True, ignore_status=True).strip() def _wait_for_tcpdump_done(dump_file): - response_timeout = params.get_numeric('response_timeout', 10) - if not utils_misc.wait_for(lambda: not _get_tcpdump_pid(dump_file), - response_timeout, 0, 1): - test.fail('tcpdump is running unexpectedly') + response_timeout = params.get_numeric("response_timeout", 10) + if not utils_misc.wait_for( + lambda: not _get_tcpdump_pid(dump_file), response_timeout, 0, 1 + ): + test.fail("tcpdump is running unexpectedly") def _cleanup(dump_file): if os.path.exists(dump_file): @@ -40,30 +37,33 @@ def _cleanup(dump_file): if pid: os.kill(int(pid), signal.SIGKILL) - tag = params['remote_image_tag'] + tag = params["remote_image_tag"] img_params = params.object_params(tag) img_obj = qemu_storage.QemuImg(img_params, None, tag) - dump_file = utils_misc.generate_tmp_file_name('%s_access_tcpdump' % tag, - 'out') + dump_file = utils_misc.generate_tmp_file_name(f"{tag}_access_tcpdump", "out") - test.log.info('start tcpdump, save packets in %s', dump_file) + test.log.info("start tcpdump, save packets in %s", dump_file) process.system( - params['tcpdump_cmd'].format(server=img_params['curl_server'], - dump_file=dump_file), - shell=True, ignore_status=True, ignore_bg_processes=True + params["tcpdump_cmd"].format( + server=img_params["curl_server"], dump_file=dump_file + ), + shell=True, + ignore_status=True, + ignore_bg_processes=True, ) try: img_obj.info() _wait_for_tcpdump_done(dump_file) - with open(dump_file, 'rb') as fd: + with open(dump_file, "rb") as fd: for line in fd: - line = line.decode('utf-8', 'ignore') - if 'Cookie: %s' % img_params['curl_cookie_secret'] in line: - test.log.info('get "%s" from "%s"', - img_params['curl_cookie_secret'], line) + line = line.decode("utf-8", "ignore") + if "Cookie: {}".format(img_params["curl_cookie_secret"]) in line: + test.log.info( + 'get "%s" from "%s"', img_params["curl_cookie_secret"], line + ) break else: - test.fail('Failed to get cookie data from tcpdump output') + test.fail("Failed to get cookie data from tcpdump output") finally: _cleanup(dump_file) diff --git a/qemu/tests/cyginstall.py b/qemu/tests/cyginstall.py index 470c8ad376..75efde6e72 100644 --- a/qemu/tests/cyginstall.py +++ b/qemu/tests/cyginstall.py @@ -19,8 +19,7 @@ def run(test, params, env): cygwin_prompt = params.get("cygwin_prompt", r"\$\s+$") cygwin_start = params.get("cygwin_start") cygwin_verify_cmd = params.get("cygwin_verify_cmd", "ls") - cygwin_install_timeout = float(params.get("cygwin_install_timeout", - "2400")) + cygwin_install_timeout = float(params.get("cygwin_install_timeout", "2400")) timeout = float(params.get("login_timeout", 240)) cdrom_check_cmd = params.get("cdrom_check") cdrom_filter = params.get("cdrom_filter") @@ -33,8 +32,7 @@ def run(test, params, env): output = session.cmd_output(cdrom_check_cmd, timeout) cdrom = re.findall(cdrom_filter, output) if cdrom: - cygwin_install_cmd = re.sub("WINUTILS", cdrom[0], - cygwin_install_cmd) + cygwin_install_cmd = re.sub("WINUTILS", cdrom[0], cygwin_install_cmd) else: test.error("Can not find tools iso in guest") diff --git a/qemu/tests/device_bit_check.py b/qemu/tests/device_bit_check.py index 6dfda2decc..d75142ec60 100644 --- a/qemu/tests/device_bit_check.py +++ b/qemu/tests/device_bit_check.py @@ -1,7 +1,6 @@ import re -from virttest import error_context -from virttest import env_process +from virttest import env_process, error_context @error_context.context_aware @@ -27,7 +26,7 @@ def run(test, params, env): timeout = float(params.get("login_timeout", 240)) dev_type = params.get("dev_type", "virtio-blk-pci") dev_param_name = params.get("dev_param_name", "blk_extra_params") - dev_pattern = params.get("dev_pattern", "(dev: %s.*?)dev:" % dev_type) + dev_pattern = params.get("dev_pattern", f"(dev: {dev_type}.*?)dev:") pci_id_pattern = params.get("pci_id_pattern") ccw_id_pattern = params.get("ccw_id_pattern") convert_dict = {"1": ["on", "true"], "0": ["off", "false"]} @@ -38,16 +37,17 @@ def run(test, params, env): extra_params = orig_extra_params for index, value in enumerate(properties): if value != default_value[index]: - extra_params += ",%s=%s" % (options[index], - option_add[index]) + extra_params += f",{options[index]}={option_add[index]}" params[dev_param_name] = extra_params.lstrip(",") else: properties = default_value - error_context.context("Boot up guest with properites: %s value as: %s" - % (str(options), properties), test.log.info) + error_context.context( + f"Boot up guest with properites: {str(options)} value as: {properties}", + test.log.info, + ) vm_name = params["main_vm"] - params["start_vm"] = 'yes' + params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) @@ -59,46 +59,49 @@ def run(test, params, env): test.error("Can't get device info from qtree result.") for index, option in enumerate(options): - option_regex = r"%s\s+=\s+(\w+)" % option + option_regex = rf"{option}\s+=\s+(\w+)" option_value = re.findall(option_regex, dev_info[0], re.M) if not option_value: test.log.debug("dev info in qtree: %s", dev_info[0]) test.error("Can't get the property info from qtree result") if option_value[0] not in convert_dict[properties[index]]: - msg = "'%s' value get '%s', " % (option, option_value) - msg += "expect value '%s'" % convert_dict[properties[index]] + msg = f"'{option}' value get '{option_value}', " + msg += f"expect value '{convert_dict[properties[index]]}'" test.log.debug(msg) - test.fail("Properity bit for %s is wrong." % option) + test.fail(f"Properity bit for {option} is wrong.") test.log.info("Properity bit in qtree is right for %s.", option) if params.get("check_in_guest", "yes") == "yes": - if params.get('machine_type').startswith("s390"): - id_pattern = \ - ccw_id_pattern + re.findall( - 'dev:virtio-scsi-ccw.*\n''.*\n.*\n.*\ndev_id=\"' - 'fe.0.(.*?)\"', qtree_info.replace(' ', ''))[0] + if params.get("machine_type").startswith("s390"): + id_pattern = ( + ccw_id_pattern + + re.findall( + "dev:virtio-scsi-ccw.*\n" + '.*\n.*\n.*\ndev_id="' + 'fe.0.(.*?)"', + qtree_info.replace(" ", ""), + )[0] + ) ccw_info = session.cmd_output("lscss") ccw_n = re.findall(id_pattern, ccw_info) if not ccw_n: test.error("Can't get the ccw id for device") - cmd = "cat /sys/bus/ccw/devices/%s/" % ccw_n[0] + cmd = f"cat /sys/bus/ccw/devices/{ccw_n[0]}/" else: pci_info = session.cmd_output("lspci -n") pci_n = re.findall(pci_id_pattern, pci_info) if not pci_n: test.error("Can't get the pci id for device") - cmd = "cat /sys/bus/pci/devices/0000:%s/" % pci_n[0] + cmd = f"cat /sys/bus/pci/devices/0000:{pci_n[0]}/" cmd += "virtio*/features" bitstr = session.cmd_output(cmd) bitstr = re.findall("[01]+", bitstr)[-1] if bitstr[int(options_offset[index])] != properties[index]: - msg = "bit string in guest: %s" % bitstr - msg += "expect bit string: %s" % properties[index] + msg = f"bit string in guest: {bitstr}" + msg += f"expect bit string: {properties[index]}" test.log.debug(msg) - test.fail("Properity bit for %s is wrong" - " inside guest." % option) - test.log.info("Properity bit in qtree is right for %s" - " in guest.", option) + test.fail(f"Properity bit for {option} is wrong" " inside guest.") + test.log.info("Properity bit in qtree is right for %s" " in guest.", option) session.close() vm.destroy() diff --git a/qemu/tests/device_option_check.py b/qemu/tests/device_option_check.py index 4d5cd2b1be..6104fe28d0 100644 --- a/qemu/tests/device_option_check.py +++ b/qemu/tests/device_option_check.py @@ -1,9 +1,6 @@ import re -from virttest import error_context -from virttest import utils_misc -from virttest import env_process -from virttest import qemu_qtree +from virttest import env_process, error_context, qemu_qtree, utils_misc @error_context.context_aware @@ -33,11 +30,11 @@ def run(test, params, env): parameter_len = int(params.get("parameter_len", 4)) random_ignore_str = params.get("ignore_str") func_generate_random_string = utils_misc.generate_random_string - args = (parameter_len, ) + args = (parameter_len,) if random_ignore_str: - args += ("ignore_str=%s" % random_ignore_str, ) + args += (f"ignore_str={random_ignore_str}",) if convert_str: - args += ("convert_str=%s" % convert_str, ) + args += (f"convert_str={convert_str}",) parameter_value = func_generate_random_string(*args) params[params_name] = parameter_prefix + parameter_value @@ -47,9 +44,9 @@ def run(test, params, env): env_process.preprocess_vm(test, params, env, vm.name) if convert_str: - tmp_str = re.sub(r'\\\\', 'Abackslash', parameter_value) - tmp_str = re.sub(r'\\', '', tmp_str) - tmp_str = re.sub('Abackslash', r"\\", tmp_str) + tmp_str = re.sub(r"\\\\", "Abackslash", parameter_value) + tmp_str = re.sub(r"\\", "", tmp_str) + tmp_str = re.sub("Abackslash", r"\\", tmp_str) parameter_value_raw = tmp_str else: parameter_value_raw = parameter_value @@ -58,10 +55,10 @@ def run(test, params, env): error_context.context("Check option in qtree", test.log.info) qtree = qemu_qtree.QtreeContainer() try: - qtree.parse_info_qtree(vm.monitor.info('qtree')) - keyword = params['qtree_check_keyword'] - qtree_check_value = params['qtree_check_value'] - qtree_check_option = params['qtree_check_option'] + qtree.parse_info_qtree(vm.monitor.info("qtree")) + keyword = params["qtree_check_keyword"] + qtree_check_value = params["qtree_check_value"] + qtree_check_option = params["qtree_check_option"] for qdev in qtree.get_nodes(): if keyword not in qdev.qtree: @@ -79,16 +76,19 @@ def run(test, params, env): break else: test.fail( - "Can not find property '%s' from info qtree where '%s' is " - "'%s'" % (qtree_check_option, keyword, qtree_check_value)) + f"Can not find property '{qtree_check_option}' from info qtree where '{keyword}' is " + f"'{qtree_check_value}'" + ) qtree_value = re.findall('"?(.*)"?$', qtree_value)[0] - if (qtree_value != parameter_value_raw and - parameter_value_raw not in qtree_value): + if ( + qtree_value != parameter_value_raw + and parameter_value_raw not in qtree_value + ): test.fail( "Value from info qtree is not match with the value from" - "command line: '%s' vs '%s'" % ( - qtree_value, parameter_value_raw)) + f"command line: '{qtree_value}' vs '{parameter_value_raw}'" + ) except AttributeError: test.log.debug("Monitor deson't support info qtree skip this test") @@ -97,27 +97,31 @@ def run(test, params, env): failed_log = "" for check_cmd in check_cmds.split(): check_cmd_params = params.object_params(check_cmd) - cmd = check_cmd_params['cmd'] + cmd = check_cmd_params["cmd"] cmd = utils_misc.set_winutils_letter(session, cmd) - pattern = check_cmd_params['pattern'] % parameter_value_raw + pattern = check_cmd_params["pattern"] % parameter_value_raw - error_context.context("Check option with command %s" % cmd, test.log.info) + error_context.context(f"Check option with command {cmd}", test.log.info) _, output = session.cmd_status_output(cmd) - if not re.findall(r'%s' % pattern, output): - failed_log += ("Can not find option %s from guest." - " Guest output is '%s'" % (params_name, - output)) + if not re.findall(rf"{pattern}", output): + failed_log += ( + f"Can not find option {params_name} from guest." + f" Guest output is '{output}'" + ) if sg_vpd_cmd: - error_context.context("Check serial number length with command %s" - % sg_vpd_cmd, test.log.info) + error_context.context( + f"Check serial number length with command {sg_vpd_cmd}", test.log.info + ) sg_vpd_cmd = utils_misc.set_winutils_letter(session, sg_vpd_cmd) output = session.cmd_output(sg_vpd_cmd) actual_len = sum(len(_.split()[-1]) for _ in output.splitlines()[1:3]) expected_len = len(params.get("drive_serial_image1")) + 4 if actual_len != expected_len: - test.fail("Incorrect serial number length return." - " Guest output serial number is %s" % actual_len) + test.fail( + "Incorrect serial number length return." + f" Guest output serial number is {actual_len}" + ) session.close() diff --git a/qemu/tests/differential_backup.py b/qemu/tests/differential_backup.py index ae24d02c86..1a021f12f4 100644 --- a/qemu/tests/differential_backup.py +++ b/qemu/tests/differential_backup.py @@ -1,21 +1,19 @@ -import time import logging +import time from functools import partial from virttest import error_context -from provider import block_dirty_bitmap -from provider import job_utils -from provider import backup_utils + +from provider import backup_utils, block_dirty_bitmap, job_utils from qemu.tests import live_backup_base -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class DifferentialBackupTest(live_backup_base.LiveBackup): - def __init__(self, test, params, env, tag): - super(DifferentialBackupTest, self).__init__(test, params, env, tag) - self.device = "drive_%s" % tag + super().__init__(test, params, env, tag) + self.device = f"drive_{tag}" def generate_backup_params(self): """generate target image params""" @@ -40,8 +38,7 @@ def get_record_counts_of_bitmap(self, name): :return: record counts :rtype: int """ - bitmap = block_dirty_bitmap.get_bitmap_by_name( - self.vm, self.device, name) + bitmap = block_dirty_bitmap.get_bitmap_by_name(self.vm, self.device, name) return bitmap["count"] if bitmap else -1 def get_sha256_of_bitmap(self, name): @@ -54,9 +51,10 @@ def get_sha256_of_bitmap(self, name): return block_dirty_bitmap.debug_block_dirty_bitmap_sha256(**kwargs) def _make_bitmap_transaction_action( - self, operator="add", index=1, extra_options=None): + self, operator="add", index=1, extra_options=None + ): bitmap = "bitmap_%d" % index - action = "block-dirty-bitmap-%s" % operator + action = f"block-dirty-bitmap-{operator}" action = self.vm.monitor.get_workable_cmd(action) data = {"node": self.device, "name": bitmap} if isinstance(extra_options, dict): @@ -65,10 +63,8 @@ def _make_bitmap_transaction_action( return job_utils.make_transaction_action(action, data) def _bitmap_batch_operate_by_transaction(self, action, bitmap_index_list): - bitmap_lists = ",".join( - map(lambda x: "bitmap_%d" % x, bitmap_index_list)) - LOG_JOB.info("%s %s in a transaction", - action.capitalize(), bitmap_lists) + bitmap_lists = ",".join(map(lambda x: "bitmap_%d" % x, bitmap_index_list)) + LOG_JOB.info("%s %s in a transaction", action.capitalize(), bitmap_lists) func = partial(self._make_bitmap_transaction_action, action) actions = list(map(func, bitmap_index_list)) return self.vm.monitor.transaction(actions) @@ -81,23 +77,27 @@ def _track_file_with_bitmap(self, filename, action_items): :param action_items: list of bitmap action. eg, [{"operator": "add", "index": 1} """ - full_name = "%s/%s" % (self.params.get("mount_point", - "/mnt"), filename) + full_name = "{}/{}".format(self.params.get("mount_point", "/mnt"), filename) self.create_file(full_name) - actions = list([self._make_bitmap_transaction_action(**item) - for item in action_items]) + actions = list( + [self._make_bitmap_transaction_action(**item) for item in action_items] + ) self.vm.monitor.transaction(actions) def track_file1_with_bitmap2(self): """track file1 with bitmap2""" - action_items = [{"operator": "disable", "index": 2}, - {"operator": "add", "index": 3}] + action_items = [ + {"operator": "disable", "index": 2}, + {"operator": "add", "index": 3}, + ] self._track_file_with_bitmap("file1", action_items) def track_file2_with_bitmap3(self): """track file2 with bitmap3""" - action_items = [{"operator": "disable", "index": 1}, - {"operator": "disable", "index": 3}] + action_items = [ + {"operator": "disable", "index": 1}, + {"operator": "disable", "index": 3}, + ] self._track_file_with_bitmap("file2", action_items) def merge_bitmap2_and_bitmap3_to_bitmap4(self): @@ -106,23 +106,26 @@ def merge_bitmap2_and_bitmap3_to_bitmap4(self): args = { "bitmap_name": target_bitmap, "target_device": self.device, - "disabled": "on"} + "disabled": "on", + } block_dirty_bitmap.block_dirty_bitmap_add(self.vm, args) block_dirty_bitmap.block_dirty_bitmap_merge( - self.vm, self.device, source_bitmaps, target_bitmap) + self.vm, self.device, source_bitmaps, target_bitmap + ) time.sleep(5) def track_file3_with_bitmap5(self): """track file3 with bitmap5""" args = {"bitmap_name": "bitmap_5", "target_device": self.device} block_dirty_bitmap.block_dirty_bitmap_add(self.vm, args) - full_name = "%s/file3" % self.params.get("mount_point", "/mnt") + full_name = "{}/file3".format(self.params.get("mount_point", "/mnt")) self.create_file(full_name) def merge_bitmap5_to_bitmap4(self): source_bitmaps, target_bitmap = ["bitmap_5"], "bitmap_4" return block_dirty_bitmap.block_dirty_bitmap_merge( - self.vm, self.device, source_bitmaps, target_bitmap) + self.vm, self.device, source_bitmaps, target_bitmap + ) def do_full_backup(self, tag): """Do full backup""" @@ -140,15 +143,14 @@ def do_incremental_backup_with_bitmap4(self, base_node, tag): """Do incremental backup with bitmap4""" img = backup_utils.create_image_by_params(self.vm, self.params, tag) node_name = img.format.get_param("node-name") - backup_utils.incremental_backup( - self.vm, self.device, node_name, "bitmap_4") + backup_utils.incremental_backup(self.vm, self.device, node_name, "bitmap_4") self.trash_files.append(img.key) def clean(self): """Stop bitmaps and clear image files""" block_dirty_bitmap.clear_all_bitmaps_in_device(self.vm, self.device) block_dirty_bitmap.remove_all_bitmaps_in_device(self.vm, self.device) - super(DifferentialBackupTest, self).clean() + super().clean() @error_context.context_aware @@ -181,33 +183,32 @@ def run(test, params, env): backup_test.track_file1_with_bitmap2() error_context.context("track file2 in bitmap3", test.log.info) backup_test.track_file2_with_bitmap3() - error_context.context( - "Record counts & sha256 of bitmap1", test.log.info) + error_context.context("Record counts & sha256 of bitmap1", test.log.info) sha256_bitmap1 = backup_test.get_sha256_of_bitmap("bitmap_1") - record_counts_bitmap1 = backup_test.get_record_counts_of_bitmap( - "bitmap_1") - error_context.context( - "Merge bitmap2 and bitmap3 to bitmap4", test.log.info) + record_counts_bitmap1 = backup_test.get_record_counts_of_bitmap("bitmap_1") + error_context.context("Merge bitmap2 and bitmap3 to bitmap4", test.log.info) backup_test.merge_bitmap2_and_bitmap3_to_bitmap4() error_context.context("Record sha256 of bitmap4", test.log.info) sha256_bitmap4 = backup_test.get_sha256_of_bitmap("bitmap_4") error_context.context("Record count of bitmap4", test.log.info) - record_counts_bitmap4 = backup_test.get_record_counts_of_bitmap( - "bitmap_4") + record_counts_bitmap4 = backup_test.get_record_counts_of_bitmap("bitmap_4") if sha256_bitmap4 != sha256_bitmap1: - test.log.debug("sha256_bitmap1: %s, sha256_bitmap4: %s", - sha256_bitmap1, sha256_bitmap4) + test.log.debug( + "sha256_bitmap1: %s, sha256_bitmap4: %s", sha256_bitmap1, sha256_bitmap4 + ) raise test.fail("sha256 of bitmap4 not equal sha256 of bitmap1") if record_counts_bitmap4 != record_counts_bitmap1: - test.log.debug("count_bitmap1: %d, count_bitmap4: %d", - record_counts_bitmap1, record_counts_bitmap4) + test.log.debug( + "count_bitmap1: %d, count_bitmap4: %d", + record_counts_bitmap1, + record_counts_bitmap4, + ) raise test.fail("counts of bitmap4 not equal counts of bitmap4") error_context.context("track file3 in bitmap5", test.log.info) backup_test.track_file3_with_bitmap5() error_context.context("Merge bitmap5 in bitmap4", test.log.info) backup_test.merge_bitmap5_to_bitmap4() - error_context.context( - "Do incremental backup with bitmap4", test.log.info) + error_context.context("Do incremental backup with bitmap4", test.log.info) backup_test.do_incremental_backup_with_bitmap4(node_name, "inc") finally: backup_test.clean() diff --git a/qemu/tests/disable_win_update.py b/qemu/tests/disable_win_update.py index 0444aa6ca7..7b6fe619bf 100644 --- a/qemu/tests/disable_win_update.py +++ b/qemu/tests/disable_win_update.py @@ -1,7 +1,6 @@ import re -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc @error_context.context_aware @@ -13,6 +12,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def disable_win_service(session, scname): """ :param session: VM session. @@ -20,24 +20,22 @@ def disable_win_service(session, scname): :return: return True if scname has been disabled. """ - session.cmd("sc config %s start= disabled" % scname) - output = session.cmd("sc qc %s" % scname) + session.cmd(f"sc config {scname} start= disabled") + output = session.cmd(f"sc qc {scname}") return re.search("disabled", output, re.M | re.I) vm = env.get_vm(params["main_vm"]) vm.verify_alive() - session = vm.wait_for_login( - timeout=float(params.get("login_timeout", 240))) + session = vm.wait_for_login(timeout=float(params.get("login_timeout", 240))) cmd_timeout = float(params.get("cmd_timeout", 180)) scname = params.get("win_update_service", "WuAuServ") - error_context.context("Turned off windows updates service.", - test.log.info) + error_context.context("Turned off windows updates service.", test.log.info) try: status = utils_misc.wait_for( - lambda: disable_win_service(session, scname), - timeout=cmd_timeout) + lambda: disable_win_service(session, scname), timeout=cmd_timeout + ) if not status: test.fail("Turn off updates service failed.") session = vm.reboot(session) diff --git a/qemu/tests/discard_for_raw_block_target.py b/qemu/tests/discard_for_raw_block_target.py index a8f13d4bd2..d8c5bd22cc 100644 --- a/qemu/tests/discard_for_raw_block_target.py +++ b/qemu/tests/discard_for_raw_block_target.py @@ -1,15 +1,13 @@ import os import re -from provider.qemu_img_utils import strace - from avocado import fail_on from avocado.utils import process - -from virttest import data_dir -from virttest import utils_numeric +from virttest import data_dir, utils_numeric from virttest.qemu_storage import QemuImg +from provider.qemu_img_utils import strace + def run(test, params, env): """ @@ -29,18 +27,21 @@ def _check_output(strace_event, strace_output, match_str): """Check whether the value is good in the output file.""" test.log.debug("Check the output file '%s'.", strace_output) with open(strace_output) as fd: - m = re.findall(match_str + r', \d+, \d+', fd.read()) + m = re.findall(match_str + r", \d+, \d+", fd.read()) if not m: - test.fail("The result of system call '%s' is not right, " - "check '%s' for more details." - % (strace_event, strace_output)) - last_lst = m[-1].split(',') + test.fail( + f"The result of system call '{strace_event}' is not right, " + f"check '{strace_output}' for more details." + ) + last_lst = m[-1].split(",") sum_size = int(last_lst[-1]) + int(last_lst[-2]) # get the source image size in byte unit byte_image_size = int(utils_numeric.normalize_data_size(image_size, "B")) if sum_size != byte_image_size: - test.fail("The target allocated size '%s' is different from the source image size, " - "check '%s' for more details." % (str(sum_size), strace_output)) + test.fail( + f"The target allocated size '{str(sum_size)}' is different from the source image size, " + f"check '{strace_output}' for more details." + ) src_image = params["images"] image_size = params["image_size_test"] @@ -48,19 +49,25 @@ def _check_output(strace_event, strace_output, match_str): source = QemuImg(params.object_params(src_image), root_dir, src_image) strace_event = params["strace_event"] strace_events = strace_event.split() - strace_output_file = os.path.join(test.debugdir, - "convert_to_block.log") + strace_output_file = os.path.join(test.debugdir, "convert_to_block.log") source.create(source.params) # Generate the target scsi block file. - tgt_disk = process.system_output("lsscsi | grep '%s' | awk '{print $NF}'" - % params["scsi_mod"], shell=True).decode() + tgt_disk = process.system_output( + "lsscsi | grep '{}' | awk '{{print $NF}}'".format(params["scsi_mod"]), + shell=True, + ).decode() params["image_name_target"] = tgt_disk - test.log.debug("Convert from %s to %s with cache mode none, strace log: %s.", - source.image_filename, tgt_disk, strace_output_file) + test.log.debug( + "Convert from %s to %s with cache mode none, strace log: %s.", + source.image_filename, + tgt_disk, + strace_output_file, + ) with strace(source, strace_events, strace_output_file, trace_child=True): fail_on((process.CmdError,))(source.convert)( - params.object_params(src_image), root_dir, cache_mode="none") + params.object_params(src_image), root_dir, cache_mode="none" + ) _check_output(strace_event, strace_output_file, "FALLOC_FL_PUNCH_HOLE") diff --git a/qemu/tests/disk_extension.py b/qemu/tests/disk_extension.py index a6fabdff33..def5115581 100644 --- a/qemu/tests/disk_extension.py +++ b/qemu/tests/disk_extension.py @@ -1,8 +1,12 @@ from avocado.utils import process - -from virttest import env_process, utils_misc, utils_test -from virttest import error_context, data_dir -from virttest import utils_disk +from virttest import ( + data_dir, + env_process, + error_context, + utils_disk, + utils_misc, + utils_test, +) from virttest.qemu_storage import QemuImg from virttest.utils_misc import get_linux_drive_path @@ -33,32 +37,28 @@ def run(test, params, env): """ def cleanup_test_env(dirname, loop_device_name): - cmd = "if losetup -l {0};then losetup -d {0};fi;".format( - loop_device_name) - cmd += "umount -l {0};rm -rf {0};".format(dirname) + cmd = f"if losetup -l {loop_device_name};then losetup -d {loop_device_name};fi;" + cmd += f"umount -l {dirname};rm -rf {dirname};" process.system_output(cmd, shell=True) def prepare_tmpfs_folder(dirname): - cmd = "umount -l {0};rm -rf {0};mkdir -p {0};".format(dirname) + cmd = f"umount -l {dirname};rm -rf {dirname};mkdir -p {dirname};" process.system_output(cmd, ignore_status=True, shell=True) - cmd = "mount -t tmpfs -o rw,nosuid,nodev,seclabel tmpfs {}".format( - dirname) + cmd = f"mount -t tmpfs -o rw,nosuid,nodev,seclabel tmpfs {dirname}" process.system_output(cmd, shell=True) def create_image_on_loop_device(backend_img, device_img): backend_img.create(backend_img.params) backend_filename = backend_img.image_filename loop_device_name = device_img.image_filename - cmd = "losetup -d {}".format(loop_device_name) + cmd = f"losetup -d {loop_device_name}" process.system_output(cmd, ignore_status=True, shell=True) - cmd = "losetup {0} {1} && chmod 666 {0}".format(loop_device_name, - backend_filename) + cmd = f"losetup {loop_device_name} {backend_filename} && chmod 666 {loop_device_name}" process.system_output(cmd, shell=True) device_img.create(device_img.params) def update_loop_device_backend_size(backend_img, device_img, size): - cmd = "qemu-img resize -f raw %s %s && losetup -c %s" % ( - backend_img.image_filename, size, device_img.image_filename) + cmd = f"qemu-img resize -f raw {backend_img.image_filename} {size} && losetup -c {device_img.image_filename}" process.system_output(cmd, shell=True) current_size = int(params["begin_size"][0:-1]) @@ -74,8 +74,7 @@ def update_loop_device_backend_size(backend_img, device_img, size): loop_device_backend_img_tag = params["loop_device_backend_img_tag"] loop_device_img_tag = params["loop_device_img_tag"] - loop_device_backend_img_param = params.object_params( - loop_device_backend_img_tag) + loop_device_backend_img_param = params.object_params(loop_device_backend_img_tag) loop_device_img_param = params.object_params(loop_device_img_tag) tmpfs_folder = params.get("tmpfs_folder", "/tmp/xtmpfs") @@ -86,11 +85,14 @@ def update_loop_device_backend_size(backend_img, device_img, size): prepare_tmpfs_folder(tmpfs_folder) error_context.context("Start to create image on loop device", test.log.info) - loop_device_backend_img = QemuImg(loop_device_backend_img_param, - data_dir.get_data_dir(), - loop_device_backend_img_tag) - loop_device_img = QemuImg(loop_device_img_param, data_dir.get_data_dir(), - loop_device_img_tag) + loop_device_backend_img = QemuImg( + loop_device_backend_img_param, + data_dir.get_data_dir(), + loop_device_backend_img_tag, + ) + loop_device_img = QemuImg( + loop_device_img_param, data_dir.get_data_dir(), loop_device_img_tag + ) create_image_on_loop_device(loop_device_backend_img, loop_device_img) try: @@ -107,21 +109,20 @@ def update_loop_device_backend_size(backend_img, device_img, size): vm.verify_alive() session = vm.wait_for_login(timeout=timeout) - if os_type == 'windows' and driver_name: - session = utils_test.qemu.windrv_check_running_verifier(session, - vm, - test, - driver_name, - timeout) - - if os_type == 'windows': + if os_type == "windows" and driver_name: + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name, timeout + ) + + if os_type == "windows": img_size = loop_device_img_param["image_size"] guest_cmd = utils_misc.set_winutils_letter(session, guest_cmd) disk = utils_disk.get_windows_disks_index(session, img_size)[0] utils_disk.update_windows_disk_attributes(session, disk) test.log.info("Formatting disk:%s", disk) - driver = utils_disk.configure_empty_disk(session, disk, img_size, - os_type)[0] + driver = utils_disk.configure_empty_disk(session, disk, img_size, os_type)[ + 0 + ] output_path = driver + ":\\test.dat" else: @@ -144,11 +145,11 @@ def update_loop_device_backend_size(backend_img, device_img, size): current_size_string = str(current_size) + size_unit error_context.context( - "Update backend image size to %s" % current_size_string, - test.log.info) - update_loop_device_backend_size(loop_device_backend_img, - loop_device_img, - current_size_string) + f"Update backend image size to {current_size_string}", test.log.info + ) + update_loop_device_backend_size( + loop_device_backend_img, loop_device_img, current_size_string + ) vm.monitor.cmd("cont") diff --git a/qemu/tests/disk_extension_lvm.py b/qemu/tests/disk_extension_lvm.py index b70e615bb8..bf57f56cc4 100644 --- a/qemu/tests/disk_extension_lvm.py +++ b/qemu/tests/disk_extension_lvm.py @@ -1,12 +1,10 @@ """continually disk extension on lvm backend""" + import threading import time from avocado.utils import process - -from virttest import utils_misc, utils_test -from virttest import error_context -from virttest import utils_disk +from virttest import error_context, utils_disk, utils_misc, utils_test from virttest.utils_misc import get_linux_drive_path @@ -68,28 +66,26 @@ def _extend_lvm_daemon(): free_size = int(params["free_size"][0:-1]) if _get_free_size() < free_size: - test.cancel("No enough space to run this case %d %d" % ( - _get_free_size(), free_size)) + test.cancel( + "No enough space to run this case %d %d" % (_get_free_size(), free_size) + ) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) - if os_type == 'windows' and driver_name: - session = utils_test.qemu.windrv_check_running_verifier(session, - vm, - test, - driver_name, - timeout) - - if os_type == 'windows': + if os_type == "windows" and driver_name: + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name, timeout + ) + + if os_type == "windows": img_size = params["disk_size"] guest_cmd = utils_misc.set_winutils_letter(session, guest_cmd) disk = _get_window_disk_index_by_serail(disk_serial) utils_disk.update_windows_disk_attributes(session, disk) test.log.info("Formatting disk:%s", disk) - driver = utils_disk.configure_empty_disk(session, disk, img_size, - os_type)[0] + driver = utils_disk.configure_empty_disk(session, disk, img_size, os_type)[0] output_path = driver + ":\\test.dat" else: output_path = get_linux_drive_path(session, disk_serial) @@ -120,7 +116,6 @@ def _extend_lvm_daemon(): else: test.log.debug("Ignore timeout.") else: - test.assertTrue( - vm.wait_for_status("running", wait_timeout)) + test.assertTrue(vm.wait_for_status("running", wait_timeout)) else: time.sleep(0.1) diff --git a/qemu/tests/drive_mirror.py b/qemu/tests/drive_mirror.py index 6167abe70f..6545a2c4d0 100644 --- a/qemu/tests/drive_mirror.py +++ b/qemu/tests/drive_mirror.py @@ -1,41 +1,37 @@ -import os import logging +import os from avocado.utils import process - -from virttest import error_context -from virttest import utils_misc -from virttest import storage -from virttest import qemu_storage -from virttest import nfs +from virttest import error_context, nfs, qemu_storage, storage, utils_misc from qemu.tests import block_copy -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class DriveMirror(block_copy.BlockCopy): - """ base class for block mirror tests; """ def __init__(self, test, params, env, tag): - super(DriveMirror, self).__init__(test, params, env, tag) + super().__init__(test, params, env, tag) self.target_image = self.get_target_image() def parser_test_args(self): """ paraser test args and set default value; """ - default_params = {"create_mode": "absolute-path", - "reopen_timeout": 60, - "full_copy": "full", - "check_event": "no"} + default_params = { + "create_mode": "absolute-path", + "reopen_timeout": 60, + "full_copy": "full", + "check_event": "no", + } self.default_params.update(default_params) - params = super(DriveMirror, self).parser_test_args() + params = super().parser_test_args() if params["block_mirror_cmd"].startswith("__"): - params["full_copy"] = (params["full_copy"] == "full") + params["full_copy"] = params["full_copy"] == "full" params = params.object_params(params["target_image"]) if params.get("image_type") == "iscsi": params.setdefault("host_setup_flag", 2) @@ -48,23 +44,19 @@ def get_target_image(self): if params.get("image_type") == "nfs": image = nfs.Nfs(params) image.setup() - utils_misc.wait_for(lambda: os.path.ismount(image.mount_dir), - timeout=30) + utils_misc.wait_for(lambda: os.path.ismount(image.mount_dir), timeout=30) elif params.get("image_type") == "iscsi": - image = qemu_storage.Iscsidev(params, self.data_dir, - params["target_image"]) + image = qemu_storage.Iscsidev(params, self.data_dir, params["target_image"]) return image.setup() - if (params["create_mode"] == "existing" and - not os.path.exists(target_image)): - image = qemu_storage.QemuImg(params, self.data_dir, - params["target_image"]) + if params["create_mode"] == "existing" and not os.path.exists(target_image): + image = qemu_storage.QemuImg(params, self.data_dir, params["target_image"]) image.create(params) return target_image def get_device(self): - params = super(DriveMirror, self).parser_test_args() + params = super().parser_test_args() image_file = storage.get_image_filename(params, self.data_dir) return self.vm.get_block({"file": image_file}) @@ -79,8 +71,9 @@ def check_node_name(self): node_name_exp = self.params["node_name"] node_name = info.get("node-name", "") if node_name != node_name_exp: - self.test.fail("node-name is: %s, while set value is: %s" % - (node_name, node_name_exp)) + self.test.fail( + f"node-name is: {node_name}, while set value is: {node_name_exp}" + ) @error_context.context_aware def start(self): @@ -94,18 +87,15 @@ def start(self): target_format = params["image_format"] create_mode = params["create_mode"] full_copy = params["full_copy"] - args = {"mode": create_mode, "speed": default_speed, - "format": target_format} - if 'granularity' and 'buf_count' in params: + args = {"mode": create_mode, "speed": default_speed, "format": target_format} + if "granularity" and "buf_count" in params: granularity = int(params["granularity"]) buf_size = granularity * int(params["buf_count"]) - args.update({"granularity": granularity, - "buf-size": buf_size}) - if 'node_name' in params: + args.update({"granularity": granularity, "buf-size": buf_size}) + if "node_name" in params: args.update({"node-name": params.get("node_name")}) error_context.context("Start to mirror block device", LOG_JOB.info) - self.vm.block_mirror(device, target_image, full_copy, - **args) + self.vm.block_mirror(device, target_image, full_copy, **args) if not self.get_status(): self.test.fail("No active mirroring job found") if params.get("image_type") != "iscsi": @@ -119,7 +109,7 @@ def reopen(self): """ params = self.parser_test_args() target_format = params["image_format"] - timeout = params["reopen_timeout"] + params["reopen_timeout"] error_context.context("reopen new target image", LOG_JOB.info) if self.vm.monitor.protocol == "qmp": self.vm.monitor.clear_event("BLOCK_JOB_COMPLETED") @@ -133,19 +123,20 @@ def action_after_reopen(self): return self.do_steps("after_reopen") def clean(self): - super(DriveMirror, self).clean() + super().clean() params = self.parser_test_args() if params.get("image_type") == "iscsi": params["host_setup_flag"] = int(params["host_setup_flag"]) qemu_img = utils_misc.get_qemu_img_binary(self.params) # Reformat it to avoid impact other test - cmd = "%s create -f %s %s %s" % (qemu_img, - params["image_format"], - self.target_image, - params["image_size"]) + cmd = "{} create -f {} {} {}".format( + qemu_img, + params["image_format"], + self.target_image, + params["image_size"], + ) process.system(cmd) - image = qemu_storage.Iscsidev(params, self.data_dir, - params["target_image"]) + image = qemu_storage.Iscsidev(params, self.data_dir, params["target_image"]) image.cleanup() elif params.get("image_type") == "nfs": image = nfs.Nfs(params) diff --git a/qemu/tests/drive_mirror_cancel.py b/qemu/tests/drive_mirror_cancel.py index ffc00f46c0..784a42fa1d 100644 --- a/qemu/tests/drive_mirror_cancel.py +++ b/qemu/tests/drive_mirror_cancel.py @@ -1,7 +1,5 @@ from avocado.utils import process - -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc from qemu.tests import drive_mirror @@ -21,8 +19,7 @@ def run_drive_mirror_cancel(test, params, env): mirror_test = drive_mirror.DriveMirror(test, params, env, tag) try: mirror_test.start() - error_context.context("Block network connection with iptables", - test.log.info) + error_context.context("Block network connection with iptables", test.log.info) process.run(params["start_firewall_cmd"], shell=True) bg = utils_misc.InterruptedThread(mirror_test.cancel) bg.start() diff --git a/qemu/tests/drive_mirror_complete.py b/qemu/tests/drive_mirror_complete.py index 08981a9fa3..ecf0a80987 100644 --- a/qemu/tests/drive_mirror_complete.py +++ b/qemu/tests/drive_mirror_complete.py @@ -1,7 +1,4 @@ -from virttest import data_dir -from virttest import env_process -from virttest import error_context -from virttest import qemu_storage +from virttest import data_dir, env_process, error_context, qemu_storage from qemu.tests import drive_mirror diff --git a/qemu/tests/drive_mirror_installation.py b/qemu/tests/drive_mirror_installation.py index 8b182312d5..7461cb6459 100644 --- a/qemu/tests/drive_mirror_installation.py +++ b/qemu/tests/drive_mirror_installation.py @@ -1,8 +1,7 @@ -import time import random +import time -from virttest import utils_test -from virttest import utils_misc +from virttest import utils_misc, utils_test from qemu.tests import drive_mirror @@ -20,8 +19,9 @@ def run(test, params, env): """ args = (test, params, env) - bg = utils_misc.InterruptedThread(utils_test.run_virt_sub_test, args, - {"sub_type": "unattended_install"}) + bg = utils_misc.InterruptedThread( + utils_test.run_virt_sub_test, args, {"sub_type": "unattended_install"} + ) bg.start() utils_misc.wait_for(bg.is_alive, timeout=10) time.sleep(random.uniform(60, 200)) diff --git a/qemu/tests/drive_mirror_powerdown.py b/qemu/tests/drive_mirror_powerdown.py index 46a9dcd057..c7a482eeab 100644 --- a/qemu/tests/drive_mirror_powerdown.py +++ b/qemu/tests/drive_mirror_powerdown.py @@ -1,17 +1,15 @@ import logging -from virttest import env_process -from virttest import error_context +from virttest import env_process, error_context from qemu.tests import drive_mirror_stress -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class DriveMirrorPowerdown(drive_mirror_stress.DriveMirrorStress): - def __init__(self, test, params, env, tag): - super(DriveMirrorPowerdown, self).__init__(test, params, env, tag) + super().__init__(test, params, env, tag) @error_context.context_aware def powerdown(self): @@ -27,7 +25,7 @@ def powerup(self): bootup guest with target image; """ params = self.parser_test_args() - vm_name = params['main_vm'] + vm_name = params["main_vm"] LOG_JOB.info("Target image: %s", self.target_image) error_context.context("powerup vm with target image", LOG_JOB.info) env_process.preprocess_vm(self.test, params, self.env, vm_name) diff --git a/qemu/tests/drive_mirror_simple.py b/qemu/tests/drive_mirror_simple.py index 970194c45e..d9708bb7d6 100644 --- a/qemu/tests/drive_mirror_simple.py +++ b/qemu/tests/drive_mirror_simple.py @@ -1,20 +1,18 @@ import logging -import time import random +import time from avocado.utils import process - from virttest import error_context from qemu.tests import drive_mirror -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class DriveMirrorSimple(drive_mirror.DriveMirror): - def __init__(self, test, params, env, tag): - super(DriveMirrorSimple, self).__init__(test, params, env, tag) + super().__init__(test, params, env, tag) @error_context.context_aware def query_status(self): @@ -28,14 +26,13 @@ def query_status(self): @error_context.context_aware def readonly_target(self): error_context.context("Set readonly bit on target image", LOG_JOB.info) - cmd = "chattr +i %s" % self.target_image + cmd = f"chattr +i {self.target_image}" return process.system(cmd) @error_context.context_aware def clear_readonly_bit(self): - error_context.context("Clear readonly bit on target image", - LOG_JOB.info) - cmd = "chattr -i %s" % self.target_image + error_context.context("Clear readonly bit on target image", LOG_JOB.info) + cmd = f"chattr -i {self.target_image}" return process.system(cmd) diff --git a/qemu/tests/drive_mirror_stress.py b/qemu/tests/drive_mirror_stress.py index c192c2cbcd..aadd9efc10 100644 --- a/qemu/tests/drive_mirror_stress.py +++ b/qemu/tests/drive_mirror_stress.py @@ -1,17 +1,14 @@ -import time import logging +import time -from virttest import utils_misc -from virttest import utils_test -from virttest import error_context +from virttest import error_context, utils_misc, utils_test from qemu.tests import drive_mirror -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class DriveMirrorStress(drive_mirror.DriveMirror): - @error_context.context_aware def load_stress(self): """ @@ -34,6 +31,7 @@ def unload_stress(self): """ stop stress app """ + def _unload_stress(): session = self.get_session() cmd = self.params.get("stop_cmd") @@ -42,11 +40,15 @@ def _unload_stress(): return self.app_running() error_context.context("stop stress app in guest", LOG_JOB.info) - stopped = utils_misc.wait_for(_unload_stress, first=2.0, - text="wait stress app quit", - step=1.0, timeout=120) + stopped = utils_misc.wait_for( + _unload_stress, + first=2.0, + text="wait stress app quit", + step=1.0, + timeout=120, + ) if not stopped: - LOG_JOB.warn("stress app is still running") + LOG_JOB.warning("stress app is still running") def app_running(self): """ @@ -71,8 +73,8 @@ def verify_steady(self): while time.time() < start + timeout: _offset = self.get_status()["offset"] if _offset < offset: - msg = "offset decreased, offset last: %s" % offset - msg += "offset now: %s" % _offset + msg = f"offset decreased, offset last: {offset}" + msg += f"offset now: {_offset}" self.test.fail(msg) offset = _offset diff --git a/qemu/tests/driver_in_use.py b/qemu/tests/driver_in_use.py index 69eeb3caaf..35f14c3d85 100644 --- a/qemu/tests/driver_in_use.py +++ b/qemu/tests/driver_in_use.py @@ -1,12 +1,9 @@ import re import time -from virttest import utils_misc -from virttest import utils_test -from virttest import env_process -from virttest import error_context -from provider import win_dump_utils -from provider import win_driver_utils +from virttest import env_process, error_context, utils_misc, utils_test + +from provider import win_driver_utils, win_dump_utils @error_context.context_aware @@ -21,8 +18,8 @@ def check_bg_running(vm, params): """ session = vm.wait_for_login() target_process = params.get("target_process", "") - if params['os_type'] == 'linux': - output = session.cmd_output_safe('pgrep -l %s' % target_process) + if params["os_type"] == "linux": + output = session.cmd_output_safe(f"pgrep -l {target_process}") else: list_cmd = params.get("list_cmd", "wmic process get name") output = session.cmd_output_safe(list_cmd, timeout=60) @@ -55,8 +52,7 @@ def run_bg_test_simu(bg_stress_test): :return: return the background case thread if it's successful; else raise error. """ - error_context.context("Run test %s background" % bg_stress_test, - test.log.info) + error_context.context(f"Run test {bg_stress_test} background", test.log.info) stress_thread = None wait_time = float(params.get("wait_bg_time", 60)) bg_stress_run_flag = params.get("bg_stress_run_flag") @@ -65,32 +61,36 @@ def run_bg_test_simu(bg_stress_test): env[bg_stress_run_flag] = False if params.get("bg_stress_test_is_cmd", "no") == "yes": session = vm.wait_for_login() - bg_stress_test = utils_misc.set_winutils_letter( - session, bg_stress_test) + bg_stress_test = utils_misc.set_winutils_letter(session, bg_stress_test) session.sendline(bg_stress_test) else: stress_thread = utils_misc.InterruptedThread( - utils_test.run_virt_sub_test, (test, params, env), - {"sub_type": bg_stress_test}) + utils_test.run_virt_sub_test, + (test, params, env), + {"sub_type": bg_stress_test}, + ) stress_thread.start() for event in params.get("check_setup_events", "").strip().split(): - if not utils_misc.wait_for(lambda: params.get(event), - 600, 0, 1): - test.error("Background test not in ready state since haven't " - "received event %s" % event) + if not utils_misc.wait_for(lambda: params.get(event), 600, 0, 1): + test.error( + "Background test not in ready state since haven't " + f"received event {event}" + ) # Clear event params[event] = False - check_bg_timeout = float(params.get('check_bg_timeout', 120)) - if not utils_misc.wait_for(lambda: check_bg_running(vm, params), - check_bg_timeout, 0, 1): - test.fail("Backgroud test %s is not alive!" % bg_stress_test) + check_bg_timeout = float(params.get("check_bg_timeout", 120)) + if not utils_misc.wait_for( + lambda: check_bg_running(vm, params), check_bg_timeout, 0, 1 + ): + test.fail(f"Backgroud test {bg_stress_test} is not alive!") if params.get("set_bg_stress_flag", "no") == "yes": test.log.info("Wait %s test start", bg_stress_test) - if not utils_misc.wait_for(lambda: env.get(bg_stress_run_flag), - wait_time, 0, 0.5): - err = "Fail to start %s test" % bg_stress_test + if not utils_misc.wait_for( + lambda: env.get(bg_stress_run_flag), wait_time, 0, 0.5 + ): + err = f"Fail to start {bg_stress_test} test" test.error(err) env["bg_status"] = 1 return stress_thread @@ -105,8 +105,7 @@ def run_bg_test_sep(sub_type): """ if params.get("bg_stress_test_is_cmd", "no") == "yes": session = vm.wait_for_login() - sub_type = utils_misc.set_winutils_letter( - session, sub_type) + sub_type = utils_misc.set_winutils_letter(session, sub_type) session.cmd(sub_type, timeout=600) session.close() else: @@ -114,25 +113,24 @@ def run_bg_test_sep(sub_type): driver = params["driver_name"] driver_verifier = params.get("driver_verifier", driver) - driver_running = params.get('driver_running', driver_verifier) + driver_running = params.get("driver_running", driver_verifier) timeout = int(params.get("login_timeout", 360)) - vm_name = params['main_vm'] + vm_name = params["main_vm"] if driver == "fwcfg": win_dump_utils.set_vm_for_dump(test, params) - params['start_vm'] = 'yes' + params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.verify_alive() - error_context.context("Boot guest with %s device" % driver, test.log.info) + error_context.context(f"Boot guest with {driver} device", test.log.info) if params["os_type"] == "windows": session = vm.wait_for_login(timeout=timeout) - utils_test.qemu.windrv_verify_running(session, test, - driver_running) - session = utils_test.qemu.setup_win_driver_verifier(session, - driver_verifier, - vm) + utils_test.qemu.windrv_verify_running(session, test, driver_running) + session = utils_test.qemu.setup_win_driver_verifier( + session, driver_verifier, vm + ) session.close() env["bg_status"] = 0 run_bg_flag = params.get("run_bg_flag") @@ -142,8 +140,9 @@ def run_bg_test_sep(sub_type): suppress_exception = params.get("suppress_exception", "no") == "yes" wait_bg_finish = params.get("wait_bg_finish", "no") == "yes" - error_context.context("Run %s %s %s" % (main_test, run_bg_flag, - bg_stress_test), test.log.info) + error_context.context( + f"Run {main_test} {run_bg_flag} {bg_stress_test}", test.log.info + ) if run_bg_flag == "before_bg_test": utils_test.run_virt_sub_test(test, params, env, main_test) if vm.is_dead(): @@ -160,10 +159,13 @@ def run_bg_test_sep(sub_type): if wait_bg_finish: stress_thread.join(suppress_exception=suppress_exception) else: - stress_thread.join(timeout=timeout, suppress_exception=suppress_exception) + stress_thread.join( + timeout=timeout, suppress_exception=suppress_exception + ) if vm.is_alive(): if driver == "vioser": from qemu.tests import vioser_in_use + vioser_in_use.kill_host_serial_pid(params, vm) run_bg_test_sep(bg_stress_test) elif run_bg_flag == "after_bg_test": diff --git a/qemu/tests/driver_load.py b/qemu/tests/driver_load.py index 3e65ccc47c..591b95b028 100644 --- a/qemu/tests/driver_load.py +++ b/qemu/tests/driver_load.py @@ -1,8 +1,7 @@ import re import time -from virttest import utils_test -from virttest import utils_misc -from virttest import error_context + +from virttest import error_context, utils_misc, utils_test @error_context.context_aware @@ -34,7 +33,7 @@ def load_driver(cmd, driver_id): status, output = session.cmd_status_output(cmd) session.close() if status != 0: - test.fail("failed to load driver, %s" % output) + test.fail(f"failed to load driver, {output}") def unload_driver(cmd, driver_id): """ @@ -54,7 +53,7 @@ def unload_driver(cmd, driver_id): vm.reboot() session.close() else: - test.fail("failed to unload driver, %s" % output) + test.fail(f"failed to unload driver, {output}") def get_driver_id(cmd, pattern): """ @@ -67,11 +66,11 @@ def get_driver_id(cmd, pattern): output = session.cmd_output(cmd) driver_id = re.findall(pattern, output) if not driver_id: - test.fail("Didn't find driver info from guest %s" % output) + test.fail(f"Didn't find driver info from guest {output}") driver_id = driver_id[0] if params["os_type"] == "windows": - driver_id = '^&'.join(driver_id.split('&')) + driver_id = "^&".join(driver_id.split("&")) session.close() return driver_id @@ -95,12 +94,11 @@ def service_operate(cmd, ignore_error=False): start_service_cmd = params.get("start_service_cmd") driver_id_pattern = params["driver_id_pattern"] - driver_id_cmd = utils_misc.set_winutils_letter( - session, params["driver_id_cmd"]) - driver_load_cmd = utils_misc.set_winutils_letter( - session, params["driver_load_cmd"]) + driver_id_cmd = utils_misc.set_winutils_letter(session, params["driver_id_cmd"]) + driver_load_cmd = utils_misc.set_winutils_letter(session, params["driver_load_cmd"]) driver_unload_cmd = utils_misc.set_winutils_letter( - session, params["driver_unload_cmd"]) + session, params["driver_unload_cmd"] + ) session.close() if stop_service_cmd: @@ -109,8 +107,9 @@ def service_operate(cmd, ignore_error=False): try: for repeat in range(0, int(params.get("repeats", 1))): - error_context.context("Unload and load the driver. Round %s" % - repeat, test.log.info) + error_context.context( + f"Unload and load the driver. Round {repeat}", test.log.info + ) test.log.info("Get driver info from guest") driver_id = get_driver_id(driver_id_cmd, driver_id_pattern) @@ -127,5 +126,4 @@ def service_operate(cmd, ignore_error=False): test_after_load = params.get("test_after_load") if test_after_load: - utils_test.run_virt_sub_test(test, params, env, - sub_type=test_after_load) + utils_test.run_virt_sub_test(test, params, env, sub_type=test_after_load) diff --git a/qemu/tests/dump_guest_core.py b/qemu/tests/dump_guest_core.py index e163e3d351..1faa4a08cf 100644 --- a/qemu/tests/dump_guest_core.py +++ b/qemu/tests/dump_guest_core.py @@ -1,9 +1,7 @@ import os from avocado.utils import process - -from virttest import utils_package -from virttest import utils_misc +from virttest import utils_misc, utils_package def run(test, params, env): @@ -27,22 +25,23 @@ def check_env(): """ guest_kernel_version = session.cmd("uname -r").strip() if host_kernel_version != guest_kernel_version: - test.cancel("Please update your host and guest kernel " - "to same version.The host kernel version is %s" - "The guest kernel version is %s" - % (host_kernel_version, guest_kernel_version)) + test.cancel( + "Please update your host and guest kernel " + f"to same version.The host kernel version is {host_kernel_version}" + f"The guest kernel version is {guest_kernel_version}" + ) def check_core_file(arch): """ Use gdb to check core dump file """ - arch_map = {'x86_64': 'X86_64', 'ppc64le': 'ppc64-le'} + arch_map = {"x86_64": "X86_64", "ppc64le": "ppc64-le"} arch_name = arch_map.get(arch) arch = arch_name if arch_name else arch - command = ('echo -e "source %s\nset height 0\ndump-guest-memory' - ' %s %s\nbt\nquit" > %s' % (dump_guest_memory_file, - vmcore_file, arch, - gdb_command_file)) + command = ( + f'echo -e "source {dump_guest_memory_file}\nset height 0\ndump-guest-memory' + f' {vmcore_file} {arch}\nbt\nquit" > {gdb_command_file}' + ) process.run(command, shell=True) status, output = process.getstatusoutput(gdb_command, timeout=360) os.remove(gdb_command_file) @@ -58,20 +57,25 @@ def check_vmcore_file(): """ Use crash to check vmcore file """ - process.run('echo -e "bt\ntask 0\ntask 1\nquit" > %s' - % crash_script, shell=True) + process.run(f'echo -e "bt\ntask 0\ntask 1\nquit" > {crash_script}', shell=True) output = process.getoutput(crash_cmd, timeout=60) os.remove(crash_script) os.remove(vmcore_file) test.log.debug(output) - if "systemd" in output and 'swapper' in output: + if "systemd" in output and "swapper" in output: test.log.info("Crash command works as expected") else: test.fail("Vmcore corrupt") # install crash/gdb/kernel-debuginfo in host - packages = ["crash", "gdb", "kernel-debuginfo*", "qemu-kvm-debuginfo", - "qemu-kvm-debugsource", "qemu-kvm-core-debuginfo"] + packages = [ + "crash", + "gdb", + "kernel-debuginfo*", + "qemu-kvm-debuginfo", + "qemu-kvm-debugsource", + "qemu-kvm-core-debuginfo", + ] utils_package.package_install(packages) trigger_core_dump_command = params["trigger_core_dump_command"] @@ -88,7 +92,7 @@ def check_vmcore_file(): host_kernel_version = process.getoutput("uname -r").strip() vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() - if params.get('check_env', 'yes') == 'yes': + if params.get("check_env", "yes") == "yes": check_env() qemu_id = vm.get_pid() @@ -98,9 +102,9 @@ def check_vmcore_file(): test.log.info("trigger core dump command: %s", trigger_core_dump_command) process.run(trigger_core_dump_command) utils_misc.wait_for(lambda: os.path.exists(core_file), timeout=120) - if params.get('check_core_file', 'yes') == 'yes': + if params.get("check_core_file", "yes") == "yes": check_core_file(arch) - if dump_guest_core == 'on' and check_vmcore == 'yes': + if dump_guest_core == "on" and check_vmcore == "yes": crash_cmd %= host_kernel_version utils_misc.wait_for(lambda: os.path.exists(vmcore_file), timeout=60) check_vmcore_file() diff --git a/qemu/tests/dump_guest_memory.py b/qemu/tests/dump_guest_memory.py index d97f9e10dd..9bf7415919 100644 --- a/qemu/tests/dump_guest_memory.py +++ b/qemu/tests/dump_guest_memory.py @@ -2,9 +2,7 @@ import os from avocado.utils import process - -from virttest import utils_misc -from virttest import utils_package +from virttest import utils_misc, utils_package def run(test, params, env): @@ -29,8 +27,7 @@ def check_env(): """ guest_kernel_version = session.cmd("uname -r").strip() if host_kernel_version != guest_kernel_version: - test.cancel("Please update your host and guest kernel " - "to same version") + test.cancel("Please update your host and guest kernel " "to same version") def check_list(qmp_o, key, val=None, check_item_in_pair=True): """ @@ -55,7 +52,7 @@ def check_list(qmp_o, key, val=None, check_item_in_pair=True): elif isinstance(element, list): if check_list(element, key, val, check_item_in_pair): return True - elif element != '' and not check_item_in_pair: + elif element != "" and not check_item_in_pair: if key in str(element): return True return False @@ -88,7 +85,7 @@ def _check_dict(dic, key, val, check_item_in_pair=True): elif isinstance(value, list): if check_list(value, key, val, check_item_in_pair): return True - elif value != '' and not check_item_in_pair: + elif value != "" and not check_item_in_pair: if key in str(value): return True return False @@ -112,9 +109,11 @@ def check_result(qmp_o, expect_o=None): result = None if result_check == "equal": if not operator.eq(qmp_o, expect_o): - test.fail("QMP output does not equal to the expect result.\n " - "Expect result: '%s'\n" - "Actual result: '%s'" % (expect_o, qmp_o)) + test.fail( + "QMP output does not equal to the expect result.\n " + f"Expect result: '{expect_o}'\n" + f"Actual result: '{qmp_o}'" + ) elif result_check == "contain": if len(expect_o) == 0: result = True @@ -153,15 +152,15 @@ def check_dump_file(): """ Use crash to check dump file """ - process.getstatusoutput("echo bt > %s" % crash_script) - process.getstatusoutput("echo quit >> %s" % crash_script) + process.getstatusoutput(f"echo bt > {crash_script}") + process.getstatusoutput(f"echo quit >> {crash_script}") crash_cmd = "crash -i %s /usr/lib/debug/lib/modules/%s/vmlinux " crash_cmd %= (crash_script, host_kernel_version) crash_cmd += dump_file status, output = process.getstatusoutput(crash_cmd) os.remove(crash_script) test.log.debug(output) - if status != 0 or 'error' in output: + if status != 0 or "error" in output: test.fail("vmcore corrupt") # install crash/gdb/kernel-debuginfo in host @@ -188,9 +187,10 @@ def check_dump_file(): if check_dump == "True": # query dump status and wait for dump completed - utils_misc.wait_for(lambda: execute_qmp_cmd(query_qmp_cmd, - query_cmd_return_value), - dump_file_timeout) + utils_misc.wait_for( + lambda: execute_qmp_cmd(query_qmp_cmd, query_cmd_return_value), + dump_file_timeout, + ) check_dump_file() os.remove(dump_file) diff --git a/qemu/tests/edk2_basic.py b/qemu/tests/edk2_basic.py index 9db73fdb80..3890ab13ce 100644 --- a/qemu/tests/edk2_basic.py +++ b/qemu/tests/edk2_basic.py @@ -26,12 +26,12 @@ def run(test, params, env): vm.create() error_context.context("Check serial log result", test.log.info) try: - output = vm.serial_console.read_until_output_matches([check_messgae], - timeout=timeout) + output = vm.serial_console.read_until_output_matches( + [check_messgae], timeout=timeout + ) except Exception as msg: test.log.error(msg) - test.fail("No highlighted entry was detected " - "the boot was abnormal.") + test.fail("No highlighted entry was detected " "the boot was abnormal.") error_context.context("Check edk2 output information", test.log.info) if len(output[1].splitlines()) > line_numbers: test.fail("Warning edk2 line count exceeds %d." % line_numbers) diff --git a/qemu/tests/edk2_stability_test.py b/qemu/tests/edk2_stability_test.py index 58b90fcfd5..63a3ae1039 100644 --- a/qemu/tests/edk2_stability_test.py +++ b/qemu/tests/edk2_stability_test.py @@ -21,10 +21,10 @@ def run(test, params, env): vm.create() error_context.context("Check serial log result", test.log.info) try: - vm.serial_console.read_until_output_matches([check_messgae], - timeout=timeout) + vm.serial_console.read_until_output_matches( + [check_messgae], timeout=timeout + ) except Exception as msg: test.log.error(msg) - test.fail("No highlighted entry was detected " - "the boot was abnormal.") + test.fail("No highlighted entry was detected " "the boot was abnormal.") vm.destroy(gracefully=False) diff --git a/qemu/tests/eject_media.py b/qemu/tests/eject_media.py index 637c5aca80..cde572ff38 100644 --- a/qemu/tests/eject_media.py +++ b/qemu/tests/eject_media.py @@ -1,11 +1,11 @@ import time -from virttest import error_context -from provider.cdrom import QMPEventCheckCDEject, QMPEventCheckCDChange -from virttest import data_dir +from virttest import data_dir, error_context from virttest.qemu_capabilities import Flags from virttest.qemu_storage import QemuImg, get_image_json +from provider.cdrom import QMPEventCheckCDChange, QMPEventCheckCDEject + @error_context.context_aware def run(test, params, env): @@ -39,7 +39,7 @@ def eject_non_cdrom(device_name, force=False): if vm.check_capability(Flags.BLOCKDEV): for info_dict in vm.monitor.info("block"): if device_name in str(info_dict): - qdev = info_dict['qdev'] + qdev = info_dict["qdev"] break vm.monitor.blockdev_open_tray(qdev, force) return vm.monitor.blockdev_remove_medium(qdev) @@ -50,7 +50,7 @@ def eject_non_cdrom(device_name, force=False): p_dict = {"file": orig_img_name} device_name = vm.get_block(p_dict) if device_name is None: - msg = "Fail to get device using image %s" % orig_img_name + msg = f"Fail to get device using image {orig_img_name}" test.fail(msg) eject_check = QMPEventCheckCDEject(vm, device_name) @@ -61,11 +61,10 @@ def eject_non_cdrom(device_name, force=False): with eject_check: vm.eject_cdrom(device_name, force=True) if check_block(orig_img_name): - test.fail("Fail to eject cdrom %s. " % orig_img_name) + test.fail(f"Fail to eject cdrom {orig_img_name}. ") # eject second time - error_context.context("Eject original device for second time", - test.log.info) + error_context.context("Eject original device for second time", test.log.info) with eject_check: vm.eject_cdrom(device_name) @@ -75,48 +74,50 @@ def eject_non_cdrom(device_name, force=False): with change_check: vm.change_media(device_name, new_img_name) if not check_block(new_img_name): - test.fail("Fail to change cdrom to %s." % new_img_name) + test.fail(f"Fail to change cdrom to {new_img_name}.") # eject after change - error_context.context("Eject device after add new image by change command", - test.log.info) + error_context.context( + "Eject device after add new image by change command", test.log.info + ) with eject_check: vm.eject_cdrom(device_name, True) if check_block(new_img_name): - test.fail("Fail to eject cdrom %s." % orig_img_name) + test.fail(f"Fail to eject cdrom {orig_img_name}.") # change back to orig_img_name - error_context.context("Insert %s to device %s" % (orig_img_name, - device_name), - test.log.info) + error_context.context( + f"Insert {orig_img_name} to device {device_name}", test.log.info + ) with change_check: vm.change_media(device_name, orig_img_name) if not check_block(orig_img_name): - test.fail("Fail to change cdrom to %s." % orig_img_name) + test.fail(f"Fail to change cdrom to {orig_img_name}.") - error_context.context("Eject device after add org image by change command", - test.log.info) + error_context.context( + "Eject device after add org image by change command", test.log.info + ) with eject_check: vm.eject_cdrom(device_name, True) # change again - error_context.context("Insert %s to device %s" % (new_img_name, - device_name), - test.log.info) + error_context.context( + f"Insert {new_img_name} to device {device_name}", test.log.info + ) with change_check: vm.change_media(device_name, new_img_name) if not check_block(new_img_name): - test.fail("Fail to change cdrom to %s." % new_img_name) + test.fail(f"Fail to change cdrom to {new_img_name}.") # eject non-removable error_context.context("Try to eject non-removable device", test.log.info) p_dict = {"removable": False} device_name = vm.get_block(p_dict) if vm.check_capability(Flags.BLOCKDEV): - img_tag = params['images'].split()[0] + img_tag = params["images"].split()[0] root_dir = data_dir.get_data_dir() sys_image = QemuImg(params, root_dir, img_tag) filename = sys_image.image_filename - if sys_image.image_format == 'luks': + if sys_image.image_format == "luks": filename = get_image_json(img_tag, params, root_dir) device_name = vm.get_block({"filename": filename}) if device_name is None: diff --git a/qemu/tests/emulate_vf_reboot.py b/qemu/tests/emulate_vf_reboot.py index 480d9637fb..f801714fd9 100644 --- a/qemu/tests/emulate_vf_reboot.py +++ b/qemu/tests/emulate_vf_reboot.py @@ -1,5 +1,4 @@ -from virttest import utils_sriov -from virttest import error_context +from virttest import error_context, utils_sriov @error_context.context_aware @@ -24,7 +23,9 @@ def run(test, params, env): nic_pci = session.cmd_output(pci_id).strip() check_vf_num = params.get("get_vf_num") sriov_numvfs = int(session.cmd_output(check_vf_num % nic_pci)) - utils_sriov.set_vf(f'/sys/bus/pci/devices/{nic_pci}', vf_no=sriov_numvfs, session=session) + utils_sriov.set_vf( + f"/sys/bus/pci/devices/{nic_pci}", vf_no=sriov_numvfs, session=session + ) session = vm.reboot(session, params["reboot_method"]) error_context.context("Guest works well after create vf then reboot", test.log.info) session.close() diff --git a/qemu/tests/emulate_vf_shutdown.py b/qemu/tests/emulate_vf_shutdown.py index 8870aaf280..cc414e047a 100644 --- a/qemu/tests/emulate_vf_shutdown.py +++ b/qemu/tests/emulate_vf_shutdown.py @@ -1,5 +1,4 @@ -from virttest import utils_sriov -from virttest import error_context +from virttest import error_context, utils_sriov @error_context.context_aware @@ -24,10 +23,11 @@ def run(test, params, env): nic_pci = session.cmd_output(pci_id).strip() check_vf_num = params.get("get_vf_num") sriov_numvfs = int(session.cmd_output(check_vf_num % nic_pci)) - utils_sriov.set_vf(f'/sys/bus/pci/devices/{nic_pci}', vf_no=sriov_numvfs, session=session) + utils_sriov.set_vf( + f"/sys/bus/pci/devices/{nic_pci}", vf_no=sriov_numvfs, session=session + ) session.sendline(shutdown_command) - error_context.context("waiting VM to go down (shutdown shell cmd)", - test.log.info) + error_context.context("waiting VM to go down (shutdown shell cmd)", test.log.info) if not vm.wait_for_shutdown(360): test.fail("Guest refuses to go down") session.close() diff --git a/qemu/tests/enable_scatter_windows.py b/qemu/tests/enable_scatter_windows.py index 2027a717cf..e67a42a091 100644 --- a/qemu/tests/enable_scatter_windows.py +++ b/qemu/tests/enable_scatter_windows.py @@ -1,9 +1,6 @@ import time -from virttest import utils_test -from virttest import error_context -from virttest import utils_net -from virttest import utils_misc +from virttest import error_context, utils_misc, utils_net, utils_test from virttest.utils_windows import virtio_win @@ -26,6 +23,7 @@ def run(test, params, env): param params: the test params param env: test environment """ + def _is_process_finished(session, process_name): """ Check whether the target process is finished running @@ -50,14 +48,18 @@ def _start_wireshark_session(): guest_ip = vm.get_address() try: run_wireshark_cmd = run_wireshark_temp % (host_ip, guest_ip) - status, output = session_serial.cmd_status_output(run_wireshark_cmd, - timeout=timeout) + status, output = session_serial.cmd_status_output( + run_wireshark_cmd, timeout=timeout + ) if status: - test.error("Failed to start wireshark session, " - "status=%s, output=%s" % (status, output)) + test.error( + "Failed to start wireshark session, " + f"status={status}, output={output}" + ) is_started = utils_misc.wait_for( - lambda: not _is_process_finished(session_serial, "tshark.exe"), 20, 5, 1) + lambda: not _is_process_finished(session_serial, "tshark.exe"), 20, 5, 1 + ) if not is_started: test.error("Timeout when wait for wireshark start") finally: @@ -68,11 +70,9 @@ def _stop_wireshark_session(): Stop the running wireshark session """ error_context.context("Stop wireshark", test.log.info) - status, output = session.cmd_status_output(stop_wireshark_cmd, - timeout=timeout) + status, output = session.cmd_status_output(stop_wireshark_cmd, timeout=timeout) if status: - test.error("Failed to stop wireshark: status=%s, output=%s" - % (status, output)) + test.error(f"Failed to stop wireshark: status={status}, output={output}") def _parse_log_file(packet_filter): """ @@ -83,11 +83,11 @@ def _parse_log_file(packet_filter): """ error_context.context("Parse wireshark log file", test.log.info) parse_log_cmd = parse_log_temp % packet_filter - status, output = session.cmd_status_output( - parse_log_cmd, timeout=timeout) + status, output = session.cmd_status_output(parse_log_cmd, timeout=timeout) if status: - test.error("Failed to parse session log file," - " status=%s, output=%s" % (status, output)) + test.error( + "Failed to parse session log file," f" status={status}, output={output}" + ) return output def _get_traffic_log(packet_filter): @@ -122,8 +122,8 @@ def _get_driver_version(session): """ query_version_cmd = params["query_version_cmd"] output = session.cmd_output(query_version_cmd) - version_str = output.strip().split('=')[1] - version = version_str.split('.')[-1][0:3] + version_str = output.strip().split("=")[1] + version = version_str.split(".")[-1][0:3] return int(version) timeout = params.get("timeout", 360) @@ -142,14 +142,14 @@ def _get_driver_version(session): session = vm.wait_for_login(timeout=timeout) # make sure to enter desktop - vm.send_key('meta_l-d') + vm.send_key("meta_l-d") time.sleep(30) - error_context.context("Check if the driver is installed and " - "verified", test.log.info) - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, - driver_verifier, - timeout) + error_context.context( + "Check if the driver is installed and " "verified", test.log.info + ) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_verifier, timeout + ) if _get_driver_version(session) > 189: param_names.append("*JumboPacket") @@ -158,18 +158,16 @@ def _get_driver_version(session): error_context.context("Install winpcap", test.log.info) install_winpcap_cmd = params.get("install_winpcap_cmd") - install_winpcap_cmd = utils_misc.set_winutils_letter( - session, install_winpcap_cmd) - status, output = session.cmd_status_output(install_winpcap_cmd, - timeout=timeout) + install_winpcap_cmd = utils_misc.set_winutils_letter(session, install_winpcap_cmd) + status, output = session.cmd_status_output(install_winpcap_cmd, timeout=timeout) if status: - test.error("Failed to install pcap, status=%s, output=%s" - % (status, output)) + test.error(f"Failed to install pcap, status={status}, output={output}") test.log.info("Wait for pcap installation to complete") autoit_name = params.get("autoit_name") utils_misc.wait_for( - lambda: _is_process_finished(session, autoit_name), timeout, 20, 3) + lambda: _is_process_finished(session, autoit_name), timeout, 20, 3 + ) error_context.context("Check if wireshark is installed", test.log.info) check_installed_cmd = params.get("check_installed_cmd") @@ -178,15 +176,17 @@ def _get_driver_version(session): error_context.context("Install wireshark", test.log.info) install_wireshark_cmd = params.get("install_wireshark_cmd") install_wireshark_cmd = utils_misc.set_winutils_letter( - session, install_wireshark_cmd) - status, output = session.cmd_status_output(install_wireshark_cmd, - timeout=timeout) + session, install_wireshark_cmd + ) + status, output = session.cmd_status_output( + install_wireshark_cmd, timeout=timeout + ) if status: - test.error("Failed to install wireshark, status=%s, output=%s" - % (status, output)) + test.error(f"Failed to install wireshark, status={status}, output={output}") test.log.info("Wait for wireshark installation to complete") utils_misc.wait_for( - lambda: _is_process_finished(session, wireshark_name), timeout, 20, 3) + lambda: _is_process_finished(session, wireshark_name), timeout, 20, 3 + ) else: test.log.info("Wireshark is already installed") session.close() @@ -197,11 +197,12 @@ def _get_driver_version(session): session = vm.wait_for_login(timeout=timeout) error_context.context( - "Log network traffic with scatter gather enabled", test.log.info) + "Log network traffic with scatter gather enabled", test.log.info + ) output = _get_traffic_log("frame.len>1514") test.log.info("Check length > 1514 packets") if "Len" not in output: - test.fail("No packet length >= 1514, output=%s" % output) + test.fail(f"No packet length >= 1514, output={output}") session.close() error_context.context("Disable scatter gather", test.log.info) @@ -210,8 +211,9 @@ def _get_driver_version(session): session = vm.wait_for_login(timeout=timeout) error_context.context( - "Log network traffic with scatter gather disabled", test.log.info) + "Log network traffic with scatter gather disabled", test.log.info + ) test.log.info("Check length > 1014 packets") output = _get_traffic_log("frame.len>1014") if "Len" in output: - test.fail("Some packet length > 1014, output=%s" % output) + test.fail(f"Some packet length > 1014, output={output}") diff --git a/qemu/tests/enforce_quit.py b/qemu/tests/enforce_quit.py index 0c520b5e36..b4bf60f7cf 100644 --- a/qemu/tests/enforce_quit.py +++ b/qemu/tests/enforce_quit.py @@ -1,8 +1,6 @@ import re -from virttest import env_process -from virttest import error_context -from virttest import cpu +from virttest import cpu, env_process, error_context @error_context.context_aware @@ -45,9 +43,9 @@ def run(test, params, env): msg_unavailable = params.get("msg_unavailable", "").split(":") msg_unknow = params.get("msg_unknow", "not found") try: - error_context.context("boot guest with -cpu %s,%s" % - (guest_cpumodel, extra_flags), - test.log.info) + error_context.context( + f"boot guest with -cpu {guest_cpumodel},{extra_flags}", test.log.info + ) params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, params.get("main_vm")) except Exception as err: @@ -58,7 +56,7 @@ def run(test, params, env): if tmp_flag or msg_unknow in str(err): test.log.info("unavailable host feature, guest force quit") else: - test.fail("guest quit with error\n%s" % str(err)) + test.fail(f"guest quit with error\n{str(err)}") vm = env.get_vm(params["main_vm"]) if force_quit: diff --git a/qemu/tests/enospc.py b/qemu/tests/enospc.py index 4fe6f766cf..2e3be552dd 100644 --- a/qemu/tests/enospc.py +++ b/qemu/tests/enospc.py @@ -1,48 +1,41 @@ import logging -import time import os - -from virttest import virt_vm -from virttest import utils_misc -from virttest import qemu_storage -from virttest import data_dir -from virttest import error_context +import time from avocado.utils import process - +from virttest import data_dir, error_context, qemu_storage, utils_misc, virt_vm from virttest.utils_misc import get_linux_drive_path -LOG_JOB = logging.getLogger('avocado.test') - +LOG_JOB = logging.getLogger("avocado.test") -class EnospcConfig(object): +class EnospcConfig: """ Performs setup for the test enospc. This is a borg class, similar to a singleton. The idea is to keep state in memory for when we call cleanup() on postprocessing. """ + __shared_state = {} def __init__(self, test, params): self.__dict__ = self.__shared_state - root_dir = test.bindir self.tmpdir = test.tmpdir self.qemu_img_binary = utils_misc.get_qemu_img_binary(params) - self.raw_file_path = os.path.join(self.tmpdir, 'enospc.raw') + self.raw_file_path = os.path.join(self.tmpdir, "enospc.raw") # Here we're trying to choose fairly explanatory names so it's less # likely that we run in conflict with other devices in the system self.vgtest_name = params["vgtest_name"] self.lvtest_name = params["lvtest_name"] - self.lvtest_device = "/dev/%s/%s" % ( - self.vgtest_name, self.lvtest_name) - image_dir = os.path.join(data_dir.get_data_dir(), - os.path.dirname(params["image_name"])) - self.qcow_file_path = os.path.join(image_dir, 'enospc.qcow2') + self.lvtest_device = f"/dev/{self.vgtest_name}/{self.lvtest_name}" + image_dir = os.path.join( + data_dir.get_data_dir(), os.path.dirname(params["image_name"]) + ) + self.qcow_file_path = os.path.join(image_dir, "enospc.qcow2") try: - getattr(self, 'loopback') + getattr(self, "loopback") except AttributeError: - self.loopback = '' + self.loopback = "" @error_context.context_aware def setup(self): @@ -52,24 +45,25 @@ def setup(self): # Double check if there aren't any leftovers self.cleanup() try: - process.run("%s create -f raw %s 10G" % - (self.qemu_img_binary, self.raw_file_path)) + process.run( + f"{self.qemu_img_binary} create -f raw {self.raw_file_path} 10G" + ) # Associate a loopback device with the raw file. # Subject to race conditions, that's why try here to associate # it with the raw file as quickly as possible l_result = process.run("losetup -f") - process.run("losetup -f %s" % self.raw_file_path) + process.run(f"losetup -f {self.raw_file_path}") self.loopback = l_result.stdout.decode().strip() # Add the loopback device configured to the list of pvs # recognized by LVM - process.run("pvcreate %s" % self.loopback) - process.run("vgcreate %s %s" % (self.vgtest_name, self.loopback)) + process.run(f"pvcreate {self.loopback}") + process.run(f"vgcreate {self.vgtest_name} {self.loopback}") # Create an lv inside the vg with starting size of 200M - process.run("lvcreate -L 200M -n %s %s" % - (self.lvtest_name, self.vgtest_name)) + process.run(f"lvcreate -L 200M -n {self.lvtest_name} {self.vgtest_name}") # Create a 10GB qcow2 image in the logical volume - process.run("%s create -f qcow2 %s 10G" % - (self.qemu_img_binary, self.lvtest_device)) + process.run( + f"{self.qemu_img_binary} create -f qcow2 {self.lvtest_device} 10G" + ) # Let's symlink the logical volume with the image name that autotest # expects this device to have os.symlink(self.lvtest_device, self.qcow_file_path) @@ -77,32 +71,32 @@ def setup(self): try: self.cleanup() except Exception as e: - LOG_JOB.warn(e) + LOG_JOB.warning(e) raise @error_context.context_aware def cleanup(self): error_context.context("performing enospc cleanup", LOG_JOB.info) if os.path.islink(self.lvtest_device): - process.run("fuser -k %s" % self.lvtest_device, ignore_status=True) + process.run(f"fuser -k {self.lvtest_device}", ignore_status=True) time.sleep(2) l_result = process.run("lvdisplay") # Let's remove all volumes inside the volume group created if self.lvtest_name in l_result.stdout.decode(): - process.run("lvremove -f %s" % self.lvtest_device) + process.run(f"lvremove -f {self.lvtest_device}") # Now, removing the volume group itself v_result = process.run("vgdisplay") if self.vgtest_name in v_result.stdout.decode(): - process.run("vgremove -f %s" % self.vgtest_name) + process.run(f"vgremove -f {self.vgtest_name}") # Now, if we can, let's remove the physical volume from lvm list if self.loopback: p_result = process.run("pvdisplay") if self.loopback in p_result.stdout.decode(): - process.run("pvremove -f %s" % self.loopback) - l_result = process.run('losetup -a') + process.run(f"pvremove -f {self.loopback}") + l_result = process.run("losetup -a") if self.loopback and (self.loopback in l_result.stdout.decode()): try: - process.run("losetup -d %s" % self.loopback) + process.run(f"losetup -d {self.loopback}") except process.CmdError: LOG_JOB.error("Failed to liberate loopback %s", self.loopback) if os.path.islink(self.qcow_file_path): @@ -139,7 +133,7 @@ def run(test, params, env): vgtest_name = params["vgtest_name"] lvtest_name = params["lvtest_name"] - logical_volume = "/dev/%s/%s" % (vgtest_name, lvtest_name) + logical_volume = f"/dev/{vgtest_name}/{lvtest_name}" disk_serial = params["disk_serial"] devname = get_linux_drive_path(session_serial, disk_serial) @@ -156,27 +150,32 @@ def run(test, params, env): while i < iterations: if vm.monitor.verify_status("paused"): pause_n += 1 - error_context.context("Checking all images in use by %s" % vm.name, - test.log.info) + error_context.context( + f"Checking all images in use by {vm.name}", test.log.info + ) for image_name in vm.params.objects("images"): image_params = vm.params.object_params(image_name) try: - image = qemu_storage.QemuImg(image_params, - data_dir.get_data_dir(), image_name) - image.check_image(image_params, data_dir.get_data_dir(), force_share=True) + image = qemu_storage.QemuImg( + image_params, data_dir.get_data_dir(), image_name + ) + image.check_image( + image_params, data_dir.get_data_dir(), force_share=True + ) except virt_vm.VMError as e: test.log.error(e) - error_context.context("Guest paused, extending Logical Volume size", - test.log.info) + error_context.context( + "Guest paused, extending Logical Volume size", test.log.info + ) try: - process.run("lvextend -L +200M %s" % logical_volume) + process.run(f"lvextend -L +200M {logical_volume}") except process.CmdError as e: test.log.debug(e.result.stdout.decode()) error_context.context("Continue paused guest", test.log.info) vm.resume() elif not vm.monitor.verify_status("running"): status = str(vm.monitor.info("status")) - test.error("Unexpected guest status: %s" % status) + test.error(f"Unexpected guest status: {status}") time.sleep(10) i += 1 @@ -186,10 +185,9 @@ def run(test, params, env): try: enospc_config.cleanup() except Exception as e: - test.log.warn(e) + test.log.warning(e) if pause_n == 0: test.fail("Guest didn't pause during loop") else: - test.log.info("Guest paused %s times from %s iterations", - pause_n, iterations) + test.log.info("Guest paused %s times from %s iterations", pause_n, iterations) diff --git a/qemu/tests/ept_test.py b/qemu/tests/ept_test.py index 053614a80f..7654123ae5 100644 --- a/qemu/tests/ept_test.py +++ b/qemu/tests/ept_test.py @@ -1,4 +1,4 @@ -from avocado.utils import process, cpu +from avocado.utils import cpu, process from virttest import env_process @@ -16,7 +16,7 @@ def run(test, params, env): :param env: Dictionary with test environment. """ - if cpu.get_cpu_vendor_name() != 'intel': + if cpu.get_cpu_vendor_name() != "intel": test.cancel("This test is supposed to run on Intel host") unload_cmd = params["unload_cmd"] diff --git a/qemu/tests/expose_host_mtu.py b/qemu/tests/expose_host_mtu.py index 579c90f202..5d4e07c0bb 100644 --- a/qemu/tests/expose_host_mtu.py +++ b/qemu/tests/expose_host_mtu.py @@ -2,11 +2,7 @@ from avocado.utils.network.hosts import LocalHost from avocado.utils.network.interfaces import NetworkInterface - -from virttest import error_context -from virttest import utils_net -from virttest import utils_test -from virttest import env_process +from virttest import env_process, error_context, utils_net, utils_test @error_context.context_aware @@ -49,8 +45,9 @@ def cleanup_ovs_ports(netdst, ports): host_hw_interface = utils_net.Bridge().list_iface(netdst)[0] else: host_hw_interface = host_bridge.list_ports(netdst) - tmp_ports = re.findall(r"t[0-9]{1,}-[a-zA-Z0-9]{6}", - ' '.join(host_hw_interface)) + tmp_ports = re.findall( + r"t[0-9]{1,}-[a-zA-Z0-9]{6}", " ".join(host_hw_interface) + ) if tmp_ports: for p in tmp_ports: host_bridge.del_port(netdst, p) @@ -69,7 +66,7 @@ def cleanup_ovs_ports(netdst, ports): if netdst in utils_net.Bridge().list_br(): host_hw_iface = NetworkInterface(host_hw_interface, localhost) elif utils_net.ovs_br_exists(netdst) is True: - host_hw_iface = NetworkInterface(' '.join(host_hw_interface), localhost) + host_hw_iface = NetworkInterface(" ".join(host_hw_interface), localhost) else: raise OSError(f"invalid host iface {netdst}") host_mtu_origin = host_hw_iface.get_mtu() @@ -84,29 +81,27 @@ def cleanup_ovs_ports(netdst, ports): host_ip = utils_net.get_ip_address_by_interface(params["netdst"]) if os_type == "linux": session.cmd_output_safe(params["nm_stop_cmd"]) - guest_ifname = utils_net.get_linux_ifname(session, - vm.get_mac_address()) - output = session.cmd_output_safe( - params["check_linux_mtu_cmd"] % guest_ifname) + guest_ifname = utils_net.get_linux_ifname(session, vm.get_mac_address()) + output = session.cmd_output_safe(params["check_linux_mtu_cmd"] % guest_ifname) error_context.context(output, test.log.info) - match_string = "mtu %s" % params["mtu_value"] + match_string = "mtu {}".format(params["mtu_value"]) if match_string not in output: - test.fail("host mtu %s not exposed to guest" % params["mtu_value"]) + test.fail("host mtu {} not exposed to guest".format(params["mtu_value"])) elif os_type == "windows": connection_id = utils_net.get_windows_nic_attribute( - session, "macaddress", vm.get_mac_address(), "netconnectionid") - output = session.cmd_output_safe( - params["check_win_mtu_cmd"] % connection_id) + session, "macaddress", vm.get_mac_address(), "netconnectionid" + ) + output = session.cmd_output_safe(params["check_win_mtu_cmd"] % connection_id) error_context.context(output, test.log.info) lines = output.strip().splitlines() - lines_len = len(lines) - - line_table = lines[0].split(' ') - line_value = lines[2].split(' ') - while '' in line_table: - line_table.remove('') - while '' in line_value: - line_value.remove('') + len(lines) + + line_table = lines[0].split(" ") + line_value = lines[2].split(" ") + while "" in line_table: + line_table.remove("") + while "" in line_value: + line_value.remove("") index = 0 for name in line_table: if re.findall("MTU", name): @@ -115,12 +110,14 @@ def cleanup_ovs_ports(netdst, ports): guest_mtu_value = line_value[index] test.log.info("MTU is %s", guest_mtu_value) if not int(guest_mtu_value) == mtu_value: - test.fail("Host mtu %s is not exposed to " - "guest!" % params["mtu_value"]) + test.fail( + "Host mtu {} is not exposed to " "guest!".format(params["mtu_value"]) + ) test.log.info("Ping from guest to host with packet size 3972") - status, output = utils_test.ping(host_ip, 10, packetsize=3972, - timeout=30, session=session) + status, output = utils_test.ping( + host_ip, 10, packetsize=3972, timeout=30, session=session + ) ratio = utils_test.get_loss_ratio(output) if ratio != 0: test.fail("Loss ratio is %s", ratio) diff --git a/qemu/tests/file_copy_stress.py b/qemu/tests/file_copy_stress.py index 9c28ba23ca..b39579cdaa 100644 --- a/qemu/tests/file_copy_stress.py +++ b/qemu/tests/file_copy_stress.py @@ -1,8 +1,6 @@ import time -from virttest import error_context -from virttest import utils_misc -from virttest import utils_test +from virttest import error_context, utils_misc, utils_test @error_context.context_aware @@ -31,20 +29,17 @@ def run(test, params, env): try: stress_timeout = float(params.get("stress_timeout", "3600")) - error_context.context("Do file transfer between host and guest", - test.log.info) + error_context.context("Do file transfer between host and guest", test.log.info) start_time = time.time() stop_time = start_time + stress_timeout # here when set a run flag, when other case call this case as a # subprocess backgroundly, can set this run flag to False to stop # the stress test. env["file_transfer_run"] = True - while (env["file_transfer_run"] and time.time() < stop_time): + while env["file_transfer_run"] and time.time() < stop_time: scp_threads = [] for index in range(scp_sessions): - scp_threads.append((utils_test.run_file_transfer, (test, - params, - env))) + scp_threads.append((utils_test.run_file_transfer, (test, params, env))) utils_misc.parallel(scp_threads) finally: diff --git a/qemu/tests/fio_linux.py b/qemu/tests/fio_linux.py index c06e0c34cf..4e3921bc14 100644 --- a/qemu/tests/fio_linux.py +++ b/qemu/tests/fio_linux.py @@ -3,10 +3,11 @@ 1. Boot guest with "aio=native" or "aio=threads" CLI option and run fio tools. """ + import re -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc + from provider.storage_benchmark import generate_instance @@ -25,10 +26,11 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _get_data_disks(): - """ Get the data disks by serial or wwn options. """ + """Get the data disks by serial or wwn options.""" for data_image in data_images: - extra_params = params.get("blk_extra_params_%s" % data_image, '') + extra_params = params.get(f"blk_extra_params_{data_image}", "") match = re.search(r"(serial|wwn)=(\w+)", extra_params, re.M) if match: drive_id = match.group(2) @@ -36,28 +38,28 @@ def _get_data_disks(): continue drive_path = utils_misc.get_linux_drive_path(session, drive_id) if not drive_path: - test.error("Failed to get '%s' drive path" % data_image) + test.error(f"Failed to get '{data_image}' drive path") yield drive_path[5:] def _run_fio_test(target): - for option in params['fio_options'].split(';'): - fio.run('--filename=%s %s' % (target, option)) + for option in params["fio_options"].split(";"): + fio.run(f"--filename={target} {option}") data_images = params["images"].split()[1:] info = [] for image in data_images: - aio = params.get('image_aio_%s' % image, 'threads') - cache = params.get('drive_cache_%s' % image, 'none') - info.append('%s(\"aio=%s,cache=%s\")' % (image, aio, cache)) - test.log.info('Boot a guest with %s.', ', '.join(info)) + aio = params.get(f"image_aio_{image}", "threads") + cache = params.get(f"drive_cache_{image}", "none") + info.append(f'{image}("aio={aio},cache={cache}")') + test.log.info("Boot a guest with %s.", ", ".join(info)) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=float(params.get("login_timeout", 240))) - fio = generate_instance(params, vm, 'fio') + fio = generate_instance(params, vm, "fio") try: - if params.get('image_backend') == 'nvme_direct': - _run_fio_test(params.get('fio_filename')) + if params.get("image_backend") == "nvme_direct": + _run_fio_test(params.get("fio_filename")) else: for did in _get_data_disks(): _run_fio_test(did) diff --git a/qemu/tests/fio_perf.py b/qemu/tests/fio_perf.py index 153969afa8..f13079fd19 100644 --- a/qemu/tests/fio_perf.py +++ b/qemu/tests/fio_perf.py @@ -1,20 +1,23 @@ +import logging import os import re -import six -import time import threading -import logging +import time +import six from avocado.utils import process - -from virttest import utils_misc, utils_test, utils_numeric -from virttest import data_dir -from virttest import utils_disk -from virttest import error_context +from virttest import ( + data_dir, + error_context, + utils_disk, + utils_misc, + utils_numeric, + utils_test, +) from provider.storage_benchmark import generate_instance -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def format_result(result, base="12", fbase="2"): @@ -52,14 +55,22 @@ def check_disk_status(session, timeout, num): while time.time() < end_time: disks_str = session.cmd_output_safe(disk_status_cmd) LOG_JOB.info("disks_str is %s", disks_str) - disks = re.findall("Disk %s.*\n" % num, disks_str) + disks = re.findall(f"Disk {num}.*\n", disks_str) if not disks: continue return disks -def get_version(session, result_file, kvm_ver_chk_cmd, - guest_ver_cmd, type, driver_format, vfsd_ver_chk_cmd, timeout): +def get_version( + session, + result_file, + kvm_ver_chk_cmd, + guest_ver_cmd, + type, + driver_format, + vfsd_ver_chk_cmd, + timeout, +): """ collect qemu, kernel, virtiofsd version if needed and driver version info and write them info results file @@ -75,28 +86,27 @@ def get_version(session, result_file, kvm_ver_chk_cmd, kvm_ver = process.system_output(kvm_ver_chk_cmd, shell=True).decode() host_ver = os.uname()[2] - result_file.write("### kvm-userspace-ver : %s\n" % kvm_ver) - result_file.write("### kvm_version : %s\n" % host_ver) + result_file.write(f"### kvm-userspace-ver : {kvm_ver}\n") + result_file.write(f"### kvm_version : {host_ver}\n") if driver_format != "ide": result = session.cmd_output(guest_ver_cmd, timeout) if type == "windows": - guest_ver = re.findall(r".*?(\d{2}\.\d{2}\.\d{3}\.\d{4}).*?", - result) + guest_ver = re.findall(r".*?(\d{2}\.\d{2}\.\d{3}\.\d{4}).*?", result) result_file.write( - "### guest-kernel-ver :Microsoft Windows [Version %s]\n" % - guest_ver[0]) + f"### guest-kernel-ver :Microsoft Windows [Version {guest_ver[0]}]\n" + ) else: - result_file.write("### guest-kernel-ver :%s" % result) + result_file.write(f"### guest-kernel-ver :{result}") else: - result_file.write("### guest-kernel-ver : Microsoft Windows " - "[Version ide driver format]\n") + result_file.write( + "### guest-kernel-ver : Microsoft Windows " "[Version ide driver format]\n" + ) if vfsd_ver_chk_cmd: LOG_JOB.info("Check virtiofsd version on host.") - virtiofsd_ver = process.system_output(vfsd_ver_chk_cmd, - shell=True).decode() - result_file.write("### virtiofsd_version : %s\n" % virtiofsd_ver) + virtiofsd_ver = process.system_output(vfsd_ver_chk_cmd, shell=True).decode() + result_file.write(f"### virtiofsd_version : {virtiofsd_ver}\n") @error_context.context_aware @@ -120,7 +130,7 @@ def fio_thread(): run fio command in guest """ # generate instance with fio - fio = generate_instance(params, vm, 'fio') + fio = generate_instance(params, vm, "fio") try: fio.run(run_fio_options) finally: @@ -136,9 +146,9 @@ def clean_tmp_files(session, os_type, guest_result_file, timeout): :param timeout: Timeout in seconds """ if os_type == "linux": - session.cmd("rm -rf %s" % guest_result_file, timeout) + session.cmd(f"rm -rf {guest_result_file}", timeout) elif os_type == "windows": - session.cmd("del /f/s/q %s" % guest_result_file, timeout) + session.cmd(f"del /f/s/q {guest_result_file}", timeout) def _pin_vm_threads(node): """ @@ -183,13 +193,20 @@ def _pin_vm_threads(node): vfsd_ver_chk_cmd = params.get("vfsd_ver_chk_cmd") delete_test_file = params.get("delete_test_file", "no") - result_path = utils_misc.get_path(test.resultsdir, - "fio_result.RHS") + result_path = utils_misc.get_path(test.resultsdir, "fio_result.RHS") result_file = open(result_path, "w") # scratch host and windows guest version info - get_version(session, result_file, kvm_ver_chk_cmd, guest_ver_cmd, os_type, - driver_format, vfsd_ver_chk_cmd, cmd_timeout) + get_version( + session, + result_file, + kvm_ver_chk_cmd, + guest_ver_cmd, + os_type, + driver_format, + vfsd_ver_chk_cmd, + cmd_timeout, + ) if os_type == "windows": # turn off driver verifier @@ -208,22 +225,25 @@ def _pin_vm_threads(node): if diskstatus == "Offline": online_disk_cmd = params.get("online_disk_cmd") online_disk_run = online_disk_cmd % num - (s, o) = session.cmd_status_output(online_disk_run, - timeout=cmd_timeout) + (s, o) = session.cmd_status_output(online_disk_run, timeout=cmd_timeout) if s: - test.fail("Failed to online disk: %s" % o) + test.fail(f"Failed to online disk: {o}") for fs in params.objects("filesystems"): fs_params = params.object_params(fs) fs_target = fs_params.get("fs_target") fs_dest = fs_params.get("fs_dest") - fs_source = fs_params.get("fs_source_dir") - error_context.context("Create a destination directory %s " - "inside guest." % fs_dest, test.log.info) + fs_params.get("fs_source_dir") + error_context.context( + f"Create a destination directory {fs_dest} " "inside guest.", + test.log.info, + ) utils_misc.make_dirs(fs_dest, session) - error_context.context("Mount virtiofs target %s to %s inside" - " guest." % (fs_target, fs_dest), test.log.info) - if not utils_disk.mount(fs_target, fs_dest, 'virtiofs', session=session): - test.fail('Mount virtiofs target failed.') + error_context.context( + f"Mount virtiofs target {fs_target} to {fs_dest} inside" " guest.", + test.log.info, + ) + if not utils_disk.mount(fs_target, fs_dest, "virtiofs", session=session): + test.fail("Mount virtiofs target failed.") # format disk if format == "True": session.cmd(pre_cmd, cmd_timeout) @@ -231,98 +251,119 @@ def _pin_vm_threads(node): # get order_list order_line = "" for order in order_list.split(): - order_line += "%s|" % format_result(order) + order_line += f"{format_result(order)}|" # get result tested by each scenario for io_pattern in rw.split(): - result_file.write("Category:%s\n" % io_pattern) - result_file.write("%s\n" % order_line.rstrip("|")) + result_file.write(f"Category:{io_pattern}\n") + result_file.write("{}\n".format(order_line.rstrip("|"))) for bs in block_size.split(): for io_depth in iodepth.split(): for numjobs in threads.split(): line = "" - line += "%s|" % format_result(bs[:-1]) - line += "%s|" % format_result(io_depth) - line += "%s|" % format_result(numjobs) + line += f"{format_result(bs[:-1])}|" + line += f"{format_result(io_depth)}|" + line += f"{format_result(numjobs)}|" file_name = None if format == "True" or params.objects("filesystems"): file_name = io_pattern + "_" + bs + "_" + io_depth run_fio_options = fio_options % ( - io_pattern, bs, io_depth, file_name, numjobs) + io_pattern, + bs, + io_depth, + file_name, + numjobs, + ) else: run_fio_options = fio_options % ( - io_pattern, bs, io_depth, numjobs) + io_pattern, + bs, + io_depth, + numjobs, + ) test.log.info("run_fio_options are: %s", run_fio_options) if os_type == "linux": - (s, o) = session.cmd_status_output(drop_cache, - timeout=cmd_timeout) + (s, o) = session.cmd_status_output( + drop_cache, timeout=cmd_timeout + ) if s: - test.fail("Failed to free memory: %s" % o) + test.fail(f"Failed to free memory: {o}") cpu_file = os.path.join(data_dir.get_tmp_dir(), "cpus") - io_exits_b = int(process.system_output( - "cat /sys/kernel/debug/kvm/exits")) + io_exits_b = int( + process.system_output("cat /sys/kernel/debug/kvm/exits") + ) fio_t = threading.Thread(target=fio_thread) fio_t.start() - process.system_output("mpstat 1 60 > %s" % cpu_file, - shell=True) + process.system_output(f"mpstat 1 60 > {cpu_file}", shell=True) fio_t.join() if file_name and delete_test_file == "yes": test.log.info("Ready delete: %s", file_name) - session.cmd("rm -rf /mnt/%s" % file_name) - - io_exits_a = int(process.system_output( - "cat /sys/kernel/debug/kvm/exits")) - vm.copy_files_from(guest_result_file, - data_dir.get_tmp_dir()) - fio_result_file = os.path.join(data_dir.get_tmp_dir(), - "fio_result") - o = process.system_output("egrep '(read|write)' %s" % - fio_result_file).decode() + session.cmd(f"rm -rf /mnt/{file_name}") + + io_exits_a = int( + process.system_output("cat /sys/kernel/debug/kvm/exits") + ) + vm.copy_files_from(guest_result_file, data_dir.get_tmp_dir()) + fio_result_file = os.path.join(data_dir.get_tmp_dir(), "fio_result") + o = process.system_output( + f"egrep '(read|write)' {fio_result_file}" + ).decode() results = re.findall(pattern, o) - o = process.system_output("egrep 'lat' %s" % - fio_result_file).decode() + o = process.system_output(f"egrep 'lat' {fio_result_file}").decode() laten = re.findall( - r"\s{5}lat\s\((\wsec)\).*?avg=[\s]?(\d+(?:[\.][\d]+)?).*?", o) + r"\s{5}lat\s\((\wsec)\).*?avg=[\s]?(\d+(?:[\.][\d]+)?).*?", o + ) bw = float(utils_numeric.normalize_data_size(results[0][1])) - iops = float(utils_numeric.normalize_data_size( - results[0][0], order_magnitude="B", factor=1000)) + iops = float( + utils_numeric.normalize_data_size( + results[0][0], order_magnitude="B", factor=1000 + ) + ) if os_type == "linux" and not params.objects("filesystems"): - o = process.system_output("egrep 'util' %s" % - fio_result_file).decode() - util = float(re.findall(r".*?util=(\d+(?:[\.][\d]+))%", - o)[0]) - - lat = float(laten[0][1]) / 1000 if laten[0][0] == "usec" \ + o = process.system_output( + f"egrep 'util' {fio_result_file}" + ).decode() + util = float(re.findall(r".*?util=(\d+(?:[\.][\d]+))%", o)[0]) + + lat = ( + float(laten[0][1]) / 1000 + if laten[0][0] == "usec" else float(laten[0][1]) + ) if re.findall("rw", io_pattern): bw = bw + float( - utils_numeric.normalize_data_size(results[1][1])) + utils_numeric.normalize_data_size(results[1][1]) + ) iops = iops + float( utils_numeric.normalize_data_size( - results[1][0], order_magnitude="B", - factor=1000)) - lat1 = float(laten[1][1]) / 1000 \ - if laten[1][0] == "usec" else float(laten[1][1]) + results[1][0], order_magnitude="B", factor=1000 + ) + ) + lat1 = ( + float(laten[1][1]) / 1000 + if laten[1][0] == "usec" + else float(laten[1][1]) + ) lat = lat + lat1 - ret = process.system_output("tail -n 1 %s" % cpu_file) + ret = process.system_output(f"tail -n 1 {cpu_file}") idle = float(ret.split()[-1]) iowait = float(ret.split()[5]) cpu = 100 - idle - iowait normal = bw / cpu io_exits = io_exits_a - io_exits_b for result in bw, iops, lat, cpu, normal: - line += "%s|" % format_result(result) + line += f"{format_result(result)}|" if os_type == "windows": - line += "%s" % format_result(io_exits) + line += f"{format_result(io_exits)}" if os_type == "linux": if not params.objects("filesystems"): - line += "%s|" % format_result(io_exits) - line += "%s" % format_result(util) # pylint: disable=E0606 + line += f"{format_result(io_exits)}|" + line += f"{format_result(util)}" # pylint: disable=E0606 else: - line += "%s" % format_result(io_exits) - result_file.write("%s\n" % line) + line += f"{format_result(io_exits)}" + result_file.write(f"{line}\n") # del temporary files in guest os clean_tmp_files(session, os_type, guest_result_file, cmd_timeout) @@ -332,6 +373,6 @@ def _pin_vm_threads(node): fs_params = params.object_params(fs) fs_target = fs_params.get("fs_target") fs_dest = fs_params.get("fs_dest") - utils_disk.umount(fs_target, fs_dest, 'virtiofs', session=session) + utils_disk.umount(fs_target, fs_dest, "virtiofs", session=session) utils_misc.safe_rmdir(fs_dest, session=session) session.close() diff --git a/qemu/tests/fio_windows.py b/qemu/tests/fio_windows.py index 4d1ffecf9a..535b226707 100644 --- a/qemu/tests/fio_windows.py +++ b/qemu/tests/fio_windows.py @@ -1,8 +1,8 @@ import re import time -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc + from provider import win_driver_utils @@ -25,22 +25,22 @@ def run(test, params, env): fio_cmd = params.get("fio_cmd") timeout = float(params.get("login_timeout", 360)) cmd_timeout = int(params.get("cmd_timeout", "360")) - check_installed_cmd = 'dir "%s/fio"|findstr /I fio.exe' % install_path - check_installed_cmd = params.get("check_installed_cmd", - check_installed_cmd) + check_installed_cmd = f'dir "{install_path}/fio"|findstr /I fio.exe' + check_installed_cmd = params.get("check_installed_cmd", check_installed_cmd) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) - if not params.get('image_backend') == 'nvme_direct': + if not params.get("image_backend") == "nvme_direct": error_context.context("Format disk", test.log.info) - utils_misc.format_windows_disk(session, params["disk_index"], - mountpoint=params["disk_letter"]) + utils_misc.format_windows_disk( + session, params["disk_index"], mountpoint=params["disk_letter"] + ) try: installed = session.cmd_status(check_installed_cmd) == 0 if not installed: - dst = r"%s:\\" % utils_misc.get_winutils_vol(session) + dst = rf"{utils_misc.get_winutils_vol(session)}:\\" error_context.context("Install fio in guest", test.log.info) install_cmd = params["install_cmd"] @@ -52,16 +52,16 @@ def run(test, params, env): session.cmd(config_cmd) error_context.context("Start fio in guest.", test.log.info) - status, output = session.cmd_status_output(fio_cmd, timeout=(cmd_timeout*2)) + status, output = session.cmd_status_output(fio_cmd, timeout=(cmd_timeout * 2)) if status: - test.error("Failed to run fio, output: %s" % output) + test.error(f"Failed to run fio, output: {output}") finally: error_context.context("Copy fio log from guest to host.", test.log.info) try: vm.copy_files_from(fio_log_file, test.resultsdir) except Exception as err: - test.log.warn("Log file copy failed: %s", err) + test.log.warning("Log file copy failed: %s", err) if session: session.close() win_driver_utils.memory_leak_check(vm, test, params) diff --git a/qemu/tests/flag_check.py b/qemu/tests/flag_check.py index a8be3dd21f..caff935881 100644 --- a/qemu/tests/flag_check.py +++ b/qemu/tests/flag_check.py @@ -1,14 +1,8 @@ -import re import os.path +import re -from avocado.utils import download -from avocado.utils import process - -from virttest import error_context -from virttest import utils_misc -from virttest import cpu -from virttest import data_dir -from virttest import virt_vm +from avocado.utils import download, process +from virttest import cpu, data_dir, error_context, utils_misc, virt_vm @error_context.context_aware @@ -71,7 +65,7 @@ def qemu_support_flag(model_info, reg): :param model_info: model_info get from dump file :param reg: reg name, e.g feature_edx """ - reg_re = re.compile(r".*%s.*\((.*)\)\n" % reg) + reg_re = re.compile(rf".*{reg}.*\((.*)\)\n") flag = reg_re.search(model_info) try: if flag: @@ -103,8 +97,7 @@ def get_extra_flag(extra_flags, symbol, lack_check=False): return host supported flags if lack_check is false """ flags = [] - re_flags = [_[1:] for _ in extra_flags.split(",") - if _ and symbol == _[0]] + re_flags = [_[1:] for _ in extra_flags.split(",") if _ and symbol == _[0]] for flag in re_flags: if lack_check: flags.append(flag) @@ -119,7 +112,7 @@ def get_guest_cpuflags(vm_session): :param vm_session: session to checked vm. :return: [corespond flags] """ - flags_re = re.compile(r'^flags\s*:(.*)$', re.MULTILINE) + flags_re = re.compile(r"^flags\s*:(.*)$", re.MULTILINE) out = vm_session.cmd_output("cat /proc/cpuinfo") try: flags = flags_re.search(out).groups()[0].split() @@ -142,8 +135,9 @@ def get_guest_cpuflags(vm_session): guest_cpumodel = vm.cpuinfo.model extra_flags = params.get("cpu_model_flags", " ") - error_context.context("Boot guest with -cpu %s,%s" % - (guest_cpumodel, extra_flags), test.log.info) + error_context.context( + f"Boot guest with -cpu {guest_cpumodel},{extra_flags}", test.log.info + ) if params.get("start_vm") == "no" and "unknown,check" in extra_flags: params["start_vm"] = "yes" @@ -155,7 +149,7 @@ def get_guest_cpuflags(vm_session): except virt_vm.VMCreateError as detail: output = str(detail) if params["qemu_output"] not in output: - test.fail("no qemu output: %s" % params["qemu_output"]) + test.fail("no qemu output: {}".format(params["qemu_output"])) else: vm.verify_alive() timeout = float(params.get("login_timeout", 240)) @@ -167,12 +161,10 @@ def get_guest_cpuflags(vm_session): qemu_model = host_cpumodel[0] else: qemu_model = guest_cpumodel - error_context.context("Get model %s support flags" % qemu_model, - test.log.info) + error_context.context(f"Get model {qemu_model} support flags", test.log.info) # Get flags for every reg from model's info - models_info = process.system_output( - "cat %s" % cpuinfo_file).split("x86") + models_info = process.system_output(f"cat {cpuinfo_file}").split("x86") model_info = qemu_model_info(models_info, qemu_model) reg_list = params.get("reg_list", "feature_edx ").split() model_support_flags = " " @@ -180,9 +172,8 @@ def get_guest_cpuflags(vm_session): for reg in reg_list: reg_flags = qemu_support_flag(model_info, reg) if reg_flags: - model_support_flags += " %s" % reg_flags - model_support_flags = set(map(cpu.Flag, - model_support_flags.split())) + model_support_flags += f" {reg_flags}" + model_support_flags = set(map(cpu.Flag, model_support_flags.split())) error_context.context("Get guest flags", test.log.info) guest_flags = get_guest_cpuflags(session) @@ -198,15 +189,14 @@ def get_guest_cpuflags(vm_session): # option_flags are generated by kernel or kvm, which are not definded in # dump file, but can be displayed in guest option_flags = params.get("option_flags", " ").split() - if params['smp'] == '1' and 'up' not in option_flags: - option_flags.append('up') + if params["smp"] == "1" and "up" not in option_flags: + option_flags.append("up") option_flags = set(map(cpu.Flag, option_flags)) # add_flags are exposed by +flag add_flags = get_extra_flag(extra_flags, "+") # del_flags are disabled by -flag del_flags = get_extra_flag(extra_flags, "-", lack_check=True) - expected_flags = ((model_support_flags | add_flags) - - del_flags - out_flags) + expected_flags = (model_support_flags | add_flags) - del_flags - out_flags # get all flags for host lack flag checking check_flags = get_extra_flag(extra_flags, "+", lack_check=True) check_flags = check_flags - no_check_flags @@ -222,18 +212,16 @@ def get_guest_cpuflags(vm_session): if flag not in process_output: miss_warn.extend(flag.split()) if miss_warn: - test.fail("no warning for lack flag %s" % miss_warn) + test.fail(f"no warning for lack flag {miss_warn}") - error_context.context("Compare guest flags with expected flags", - test.log.info) + error_context.context("Compare guest flags with expected flags", test.log.info) all_support_flags = get_all_support_flags() missing_flags = expected_flags - guest_flags - unexpect_flags = (guest_flags - expected_flags - - all_support_flags - option_flags) + unexpect_flags = guest_flags - expected_flags - all_support_flags - option_flags if missing_flags or unexpect_flags: - test.fail("missing flags:\n %s\n" - "more flags than expected:\n %s\n" - "expected flags:\n %s\n" - "guest flags:\n %s\n" - % (missing_flags, unexpect_flags, expected_flags, - guest_flags)) + test.fail( + f"missing flags:\n {missing_flags}\n" + f"more flags than expected:\n {unexpect_flags}\n" + f"expected flags:\n {expected_flags}\n" + f"guest flags:\n {guest_flags}\n" + ) diff --git a/qemu/tests/floppy.py b/qemu/tests/floppy.py index 9073759ba4..87095ba917 100644 --- a/qemu/tests/floppy.py +++ b/qemu/tests/floppy.py @@ -1,16 +1,12 @@ -import time import os -import sys import re -import six +import sys +import time import aexpect - +import six from avocado.utils import process - -from virttest import data_dir -from virttest import env_process -from virttest import error_context +from virttest import data_dir, env_process, error_context from virttest.utils_test.qemu import migration @@ -49,13 +45,13 @@ def create_floppy(params, prepare=True): if not os.path.isabs(floppy): floppy = os.path.join(data_dir.get_data_dir(), floppy) if prepare: - process.run("dd if=/dev/zero of=%s bs=512 count=2880" % floppy) + process.run(f"dd if=/dev/zero of={floppy} bs=512 count=2880") return floppy def cleanup_floppy(path): - """ Removes created floppy """ + """Removes created floppy""" error_context.context("cleaning up temp floppy images", test.log.info) - os.remove("%s" % path) + os.remove(f"{path}") def lazy_copy(vm, dst_path, check_path, copy_timeout=None, dsize=None): """ @@ -69,17 +65,16 @@ def lazy_copy(vm, dst_path, check_path, copy_timeout=None, dsize=None): if copy_timeout is None: copy_timeout = 120 session = vm.wait_for_login(timeout=login_timeout) - cmd = ('nohup bash -c "while [ true ]; do echo \"1\" | ' - 'tee -a %s >> %s; sleep 0.1; done" 2> /dev/null &' % - (check_path, dst_path)) - pid = re.search(r"\[.+\] (.+)", - session.cmd_output(cmd, timeout=copy_timeout)) + cmd = ( + 'nohup bash -c "while [ true ]; do echo "1" | ' + f'tee -a {check_path} >> {dst_path}; sleep 0.1; done" 2> /dev/null &' + ) + pid = re.search(r"\[.+\] (.+)", session.cmd_output(cmd, timeout=copy_timeout)) return pid.group(1) - class MiniSubtest(object): - + class MiniSubtest: def __new__(cls, *args, **kargs): - self = super(MiniSubtest, cls).__new__(cls) + self = super().__new__(cls) ret = None exc_info = None if args is None: @@ -101,7 +96,6 @@ def __new__(cls, *args, **kargs): return ret class test_singlehost(MiniSubtest): - def test(self): create_floppy(params) params["start_vm"] = "yes" @@ -117,7 +111,7 @@ def test(self): # needs time to load and init floppy driver if self.dest_dir: lsmod = self.session.cmd("lsmod") - if 'floppy' not in lsmod: + if "floppy" not in lsmod: self.session.cmd("modprobe floppy") else: time.sleep(20) @@ -129,56 +123,57 @@ def test(self): if self.dest_dir: error_context.context("Mounting floppy") - self.session.cmd("mount %s %s" % (guest_floppy_path, - self.dest_dir)) + self.session.cmd(f"mount {guest_floppy_path} {self.dest_dir}") error_context.context("Testing floppy") self.session.cmd(params["test_floppy_cmd"]) error_context.context("Copying file to the floppy") md5_cmd = params.get("md5_cmd") if md5_cmd: - md5_source = self.session.cmd("%s %s" % (md5_cmd, source_file)) + md5_source = self.session.cmd(f"{md5_cmd} {source_file}") try: md5_source = md5_source.split(" ")[0] except IndexError: - test.error("Failed to get md5 from source file," - " output: '%s'" % md5_source) + test.error( + "Failed to get md5 from source file," f" output: '{md5_source}'" + ) else: md5_source = None - self.session.cmd("%s %s %s" % (params["copy_cmd"], source_file, - dest_file)) - test.log.info("Succeed to copy file '%s' into floppy disk", - source_file) + self.session.cmd( + "{} {} {}".format(params["copy_cmd"], source_file, dest_file) + ) + test.log.info("Succeed to copy file '%s' into floppy disk", source_file) - error_context.context("Checking if the file is unchanged " - "after copy") + error_context.context("Checking if the file is unchanged " "after copy") if md5_cmd: - md5_dest = self.session.cmd("%s %s" % (md5_cmd, dest_file)) + md5_dest = self.session.cmd(f"{md5_cmd} {dest_file}") try: md5_dest = md5_dest.split(" ")[0] except IndexError: - test.error("Failed to get md5 from dest file," - " output: '%s'" % md5_dest) + test.error( + "Failed to get md5 from dest file," f" output: '{md5_dest}'" + ) if md5_source != md5_dest: test.fail("File changed after copy to floppy") else: md5_dest = None - self.session.cmd("%s %s %s" % (params["diff_file_cmd"], - source_file, dest_file)) + self.session.cmd( + "{} {} {}".format(params["diff_file_cmd"], source_file, dest_file) + ) def clean(self): - clean_cmd = "%s %s" % (params["clean_cmd"], dest_file) + clean_cmd = "{} {}".format(params["clean_cmd"], dest_file) self.session.cmd(clean_cmd) if self.dest_dir: - self.session.cmd("umount %s" % self.dest_dir) + self.session.cmd(f"umount {self.dest_dir}") self.session.close() class Multihost(MiniSubtest): - def test(self): - error_context.context("Preparing migration env and floppies.", - test.log.info) + error_context.context( + "Preparing migration env and floppies.", test.log.info + ) mig_protocol = params.get("mig_protocol", "tcp") self.mig_type = migration.MultihostMigration if mig_protocol == "fd": @@ -192,7 +187,12 @@ def test(self): self.srchost = params["hosts"][0] self.dsthost = params["hosts"][1] self.is_src = params["hostid"] == self.srchost - self.mig = self.mig_type(test, params, env, False, ) + self.mig = self.mig_type( + test, + params, + env, + False, + ) if self.is_src: vm = env.get_vm(self.vms[0]) @@ -200,9 +200,13 @@ def test(self): self.floppy = create_floppy(params) self.floppy_dir = os.path.dirname(self.floppy) params["start_vm"] = "yes" - env_process.process(test, params, env, - env_process.preprocess_image, - env_process.preprocess_vm) + env_process.process( + test, + params, + env, + env_process.preprocess_image, + env_process.preprocess_vm, + ) vm = env.get_vm(self.vms[0]) vm.wait_for_login(timeout=login_timeout) else: @@ -215,11 +219,10 @@ def clean(self): cleanup_floppy(self.floppy) class test_multihost_write(Multihost): - def test(self): from autotest.client.shared.syncdata import SyncData - super(test_multihost_write, self).test() + super().test() copy_timeout = int(params.get("copy_timeout", 480)) self.mount_dir = params["mount_dir"] @@ -227,9 +230,7 @@ def test(self): check_copy_path = params["check_copy_path"] pid = None - sync_id = {'src': self.srchost, - 'dst': self.dsthost, - "type": "file_trasfer"} + sync_id = {"src": self.srchost, "dst": self.dsthost, "type": "file_trasfer"} filename = "orig" src_file = os.path.join(self.mount_dir, filename) @@ -238,16 +239,15 @@ def test(self): session = vm.wait_for_login(timeout=login_timeout) if self.mount_dir: - session.cmd("rm -f %s" % (src_file)) - session.cmd("rm -f %s" % (check_copy_path)) + session.cmd(f"rm -f {src_file}") + session.cmd(f"rm -f {check_copy_path}") # If mount_dir specified, treat guest as a Linux OS # Some Linux distribution does not load floppy at boot # and Windows needs time to load and init floppy driver - error_context.context("Prepare floppy for writing.", - test.log.info) + error_context.context("Prepare floppy for writing.", test.log.info) if self.mount_dir: lsmod = session.cmd("lsmod") - if 'floppy' not in lsmod: + if "floppy" not in lsmod: session.cmd("modprobe floppy") else: time.sleep(20) @@ -256,16 +256,21 @@ def test(self): error_context.context("Mount and copy data.", test.log.info) if self.mount_dir: - session.cmd("mount %s %s" % (guest_floppy_path, - self.mount_dir), - timeout=30) + session.cmd( + f"mount {guest_floppy_path} {self.mount_dir}", timeout=30 + ) error_context.context("File copying test.", test.log.info) pid = lazy_copy(vm, src_file, check_copy_path, copy_timeout) - sync = SyncData(self.mig.master_id(), self.mig.hostid, - self.mig.hosts, sync_id, self.mig.sync_server) + sync = SyncData( + self.mig.master_id(), + self.mig.hostid, + self.mig.hosts, + sync_id, + self.mig.sync_server, + ) pid = sync.sync(pid, timeout=floppy_prepare_timeout)[self.srchost] @@ -275,50 +280,54 @@ def test(self): vm = env.get_vm(self.vms[0]) session = vm.wait_for_login(timeout=login_timeout) error_context.context("Wait for copy finishing.", test.log.info) - status = session.cmd_status("kill %s" % pid, - timeout=copy_timeout) + status = session.cmd_status(f"kill {pid}", timeout=copy_timeout) if status != 0: - test.fail("Copy process was terminatted with" - " error code %s" % (status)) + test.fail( + "Copy process was terminatted with" f" error code {status}" + ) - session.cmd_status("kill -s SIGINT %s" % (pid), - timeout=copy_timeout) + session.cmd_status(f"kill -s SIGINT {pid}", timeout=copy_timeout) - error_context.context("Check floppy file checksum.", - test.log.info) + error_context.context("Check floppy file checksum.", test.log.info) md5_cmd = params.get("md5_cmd", "md5sum") if md5_cmd: - md5_floppy = session.cmd("%s %s" % (md5_cmd, src_file)) + md5_floppy = session.cmd(f"{md5_cmd} {src_file}") try: md5_floppy = md5_floppy.split(" ")[0] except IndexError: - test.error("Failed to get md5 from source file," - " output: '%s'" % md5_floppy) - md5_check = session.cmd("%s %s" % (md5_cmd, check_copy_path)) + test.error( + "Failed to get md5 from source file," + f" output: '{md5_floppy}'" + ) + md5_check = session.cmd(f"{md5_cmd} {check_copy_path}") try: md5_check = md5_check.split(" ")[0] except IndexError: - test.error("Failed to get md5 from dst file," - " output: '%s'" % md5_floppy) + test.error( + "Failed to get md5 from dst file," + f" output: '{md5_floppy}'" + ) if md5_check != md5_floppy: - test.fail("There is mistake in copying, " - "it is possible to check file on vm.") + test.fail( + "There is mistake in copying, " + "it is possible to check file on vm." + ) - session.cmd("rm -f %s" % (src_file)) - session.cmd("rm -f %s" % (check_copy_path)) + session.cmd(f"rm -f {src_file}") + session.cmd(f"rm -f {check_copy_path}") - self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts, - 'finish_floppy_test', login_timeout) + self.mig._hosts_barrier( + self.mig.hosts, self.mig.hosts, "finish_floppy_test", login_timeout + ) def clean(self): - super(test_multihost_write, self).clean() + super().clean() class test_multihost_eject(Multihost): - def test(self): from autotest.client.shared.syncdata import SyncData - super(test_multihost_eject, self).test() + super().test() self.mount_dir = params.get("mount_dir", None) format_floppy_cmd = params["format_floppy_cmd"] @@ -327,15 +336,12 @@ def test(self): if not os.path.isabs(floppy): floppy = os.path.join(data_dir.get_data_dir(), floppy) if not os.path.isabs(second_floppy): - second_floppy = os.path.join(data_dir.get_data_dir(), - second_floppy) + second_floppy = os.path.join(data_dir.get_data_dir(), second_floppy) if not self.is_src: self.floppy = create_floppy(params) pid = None - sync_id = {'src': self.srchost, - 'dst': self.dsthost, - "type": "file_trasfer"} + sync_id = {"src": self.srchost, "dst": self.dsthost, "type": "file_trasfer"} filename = "orig" src_file = os.path.join(self.mount_dir, filename) @@ -343,16 +349,15 @@ def test(self): vm = env.get_vm(self.vms[0]) session = vm.wait_for_login(timeout=login_timeout) - if self.mount_dir: # If linux - session.cmd("rm -f %s" % (src_file)) + if self.mount_dir: # If linux + session.cmd(f"rm -f {src_file}") # If mount_dir specified, treat guest as a Linux OS # Some Linux distribution does not load floppy at boot # and Windows needs time to load and init floppy driver - error_context.context("Prepare floppy for writing.", - test.log.info) - if self.mount_dir: # If linux + error_context.context("Prepare floppy for writing.", test.log.info) + if self.mount_dir: # If linux lsmod = session.cmd("lsmod") - if 'floppy' not in lsmod: + if "floppy" not in lsmod: session.cmd("modprobe floppy") else: time.sleep(20) @@ -364,48 +369,58 @@ def test(self): session.cmd(format_floppy_cmd) except aexpect.ShellCmdError as e: if e.status == 1: - test.log.error("First access to floppy failed, " - " Trying a second time as a workaround") + test.log.error( + "First access to floppy failed, " + " Trying a second time as a workaround" + ) session.cmd(format_floppy_cmd) error_context.context("Check floppy") - if self.mount_dir: # If linux - session.cmd("mount %s %s" % (guest_floppy_path, - self.mount_dir), timeout=30) - session.cmd("umount %s" % (self.mount_dir), timeout=30) + if self.mount_dir: # If linux + session.cmd( + f"mount {guest_floppy_path} {self.mount_dir}", timeout=30 + ) + session.cmd(f"umount {self.mount_dir}", timeout=30) written = None if self.mount_dir: filepath = os.path.join(self.mount_dir, "test.txt") - session.cmd("echo 'test' > %s" % (filepath)) - output = session.cmd("cat %s" % (filepath)) + session.cmd(f"echo 'test' > {filepath}") + output = session.cmd(f"cat {filepath}") written = "test\n" - else: # Windows version. + else: # Windows version. filepath = "A:\\test.txt" - session.cmd("echo test > %s" % (filepath)) - output = session.cmd("type %s" % (filepath)) + session.cmd(f"echo test > {filepath}") + output = session.cmd(f"type {filepath}") written = "test \n\n" if output != written: - test.fail("Data read from the floppy differs" - "from the data written to it." - " EXPECTED: %s GOT: %s" % - (repr(written), repr(output))) + test.fail( + "Data read from the floppy differs" + "from the data written to it." + f" EXPECTED: {repr(written)} GOT: {repr(output)}" + ) error_context.context("Change floppy.") vm.monitor.cmd("eject floppy0") - vm.monitor.cmd("change floppy %s" % (second_floppy)) + vm.monitor.cmd(f"change floppy {second_floppy}") session.cmd(format_floppy_cmd) error_context.context("Mount and copy data") - if self.mount_dir: # If linux - session.cmd("mount %s %s" % (guest_floppy_path, - self.mount_dir), timeout=30) + if self.mount_dir: # If linux + session.cmd( + f"mount {guest_floppy_path} {self.mount_dir}", timeout=30 + ) if second_floppy not in vm.monitor.info("block"): test.fail("Wrong floppy image is placed in vm.") - sync = SyncData(self.mig.master_id(), self.mig.hostid, - self.mig.hosts, sync_id, self.mig.sync_server) + sync = SyncData( + self.mig.master_id(), + self.mig.hostid, + self.mig.hosts, + sync_id, + self.mig.sync_server, + ) pid = sync.sync(pid, timeout=floppy_prepare_timeout)[self.srchost] @@ -417,30 +432,34 @@ def test(self): written = None if self.mount_dir: filepath = os.path.join(self.mount_dir, "test.txt") - session.cmd("echo 'test' > %s" % (filepath)) - output = session.cmd("cat %s" % (filepath)) + session.cmd(f"echo 'test' > {filepath}") + output = session.cmd(f"cat {filepath}") written = "test\n" - else: # Windows version. + else: # Windows version. filepath = "A:\\test.txt" - session.cmd("echo test > %s" % (filepath)) - output = session.cmd("type %s" % (filepath)) + session.cmd(f"echo test > {filepath}") + output = session.cmd(f"type {filepath}") written = "test \n\n" if output != written: - test.fail("Data read from the floppy differs" - "from the data written to it." - " EXPECTED: %s GOT: %s" % - (repr(written), repr(output))) + test.fail( + "Data read from the floppy differs" + "from the data written to it." + f" EXPECTED: {repr(written)} GOT: {repr(output)}" + ) - self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts, - 'finish_floppy_test', login_timeout) + self.mig._hosts_barrier( + self.mig.hosts, self.mig.hosts, "finish_floppy_test", login_timeout + ) def clean(self): - super(test_multihost_eject, self).clean() + super().clean() test_type = params.get("test_type", "test_singlehost") - if (test_type in locals()): + if test_type in locals(): tests_group = locals()[test_type] tests_group() else: - test.fail("Test group '%s' is not defined in" - " migration_with_dst_problem test" % test_type) + test.fail( + f"Test group '{test_type}' is not defined in" + " migration_with_dst_problem test" + ) diff --git a/qemu/tests/flow_caches_stress_test.py b/qemu/tests/flow_caches_stress_test.py index 10b46e21b5..d9121c4cf5 100644 --- a/qemu/tests/flow_caches_stress_test.py +++ b/qemu/tests/flow_caches_stress_test.py @@ -2,12 +2,7 @@ import time from avocado.utils import process - -from virttest import error_context -from virttest import utils_netperf -from virttest import utils_net -from virttest import env_process -from virttest import data_dir +from virttest import data_dir, env_process, error_context, utils_net, utils_netperf # This decorator makes the test function aware of context strings @@ -31,13 +26,14 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def get_if_queues(ifname): """ Query interface queues with 'ethtool -l' :param ifname: interface name """ - cmd = "ethtool -l %s" % ifname + cmd = f"ethtool -l {ifname}" out = session.cmd_output(cmd) test.log.info(out) @@ -59,12 +55,14 @@ def get_if_queues(ifname): if "nf_conntrack" in session.cmd_output("lsmod"): msg = "Unload nf_conntrack module in guest." error_context.context(msg, test.log.info) - black_str = "#disable nf_conntrack\\nblacklist nf_conntrack\\n" \ - "blacklist nf_conntrack_ipv6\\nblacklist xt_conntrack\\n" \ - "blacklist nf_conntrack_ftp\\nblacklist xt_state\\n" \ - "blacklist iptable_nat\\nblacklist ipt_REDIRECT\\n" \ - "blacklist nf_nat\\nblacklist nf_conntrack_ipv4" - cmd = "echo -e '%s' >> /etc/modprobe.d/blacklist.conf" % black_str + black_str = ( + "#disable nf_conntrack\\nblacklist nf_conntrack\\n" + "blacklist nf_conntrack_ipv6\\nblacklist xt_conntrack\\n" + "blacklist nf_conntrack_ftp\\nblacklist xt_state\\n" + "blacklist iptable_nat\\nblacklist ipt_REDIRECT\\n" + "blacklist nf_nat\\nblacklist nf_conntrack_ipv4" + ) + cmd = f"echo -e '{black_str}' >> /etc/modprobe.d/blacklist.conf" session.cmd(cmd) session = vm.reboot(session, timeout=timeout) if "nf_conntrack" in session.cmd_output("lsmod"): @@ -73,22 +71,22 @@ def get_if_queues(ifname): error_context.context(err, test.log.info) session.cmd(nf_conntrack_max_set_cmd) - netperf_link = os.path.join(data_dir.get_deps_dir("netperf"), - params.get("netperf_link")) + netperf_link = os.path.join( + data_dir.get_deps_dir("netperf"), params.get("netperf_link") + ) md5sum = params.get("pkg_md5sum") client_num = params.get("netperf_client_num", 520) netperf_timeout = int(params.get("netperf_timeout", 600)) disable_firewall = params.get("disable_firewall", "") if int(params.get("queues", 1)) > 1 and params.get("os_type") == "linux": - error_context.context("Enable multi queues support in guest.", - test.log.info) + error_context.context("Enable multi queues support in guest.", test.log.info) guest_mac = vm.get_mac_address() ifname = utils_net.get_linux_ifname(session, guest_mac) get_if_queues(ifname) try: - cmd = "ethtool -L %s combined %s" % (ifname, params.get("queues")) + cmd = "ethtool -L {} combined {}".format(ifname, params.get("queues")) status, out = session.cmd_status_output(cmd) except Exception as err: get_if_queues(ifname) @@ -110,20 +108,23 @@ def get_if_queues(ifname): client = params.get("shell_client", "ssh") port = params.get("shell_port", "22") prompt = params.get("shell_prompt", r"^root@.*[\#\$]\s*$|#") - linesep = params.get( - "shell_linesep", "\n").encode().decode('unicode_escape') + linesep = params.get("shell_linesep", "\n").encode().decode("unicode_escape") status_test_command = params.get("status_test_command", "echo $?") compile_option_client = params.get("compile_option_client", "") - netperf_client = utils_netperf.NetperfClient(netperf_client_ip, - g_client_path, - md5sum, g_client_link, - client, port, - username=username, - password=password, - prompt=prompt, - linesep=linesep, - status_test_command=status_test_command, - compile_option=compile_option_client) + netperf_client = utils_netperf.NetperfClient( + netperf_client_ip, + g_client_path, + md5sum, + g_client_link, + client, + port, + username=username, + password=password, + prompt=prompt, + linesep=linesep, + status_test_command=status_test_command, + compile_option=compile_option_client, + ) error_context.context("Setup netperf in host", test.log.info) host_ip = utils_net.get_host_ip_address(params) @@ -133,25 +134,29 @@ def get_if_queues(ifname): server_passwd = params["hostpasswd"] server_username = params.get("host_username", "root") compile_option_server = params.get("compile_option_server", "") - netperf_server = utils_netperf.NetperfServer(host_ip, - server_path, - md5sum, - netperf_link, - server_shell_client, server_shell_port, - username=server_username, - password=server_passwd, - prompt=prompt, - linesep=linesep, - status_test_command=status_test_command, - compile_option=compile_option_server) + netperf_server = utils_netperf.NetperfServer( + host_ip, + server_path, + md5sum, + netperf_link, + server_shell_client, + server_shell_port, + username=server_username, + password=server_passwd, + prompt=prompt, + linesep=linesep, + status_test_command=status_test_command, + compile_option=compile_option_server, + ) try: error_context.base_context("Run netperf test between host and guest.") error_context.context("Start netserver in host.", test.log.info) netperf_server.start() - error_context.context("Start Netperf in guest for %ss." - % netperf_timeout, test.log.info) - test_option = "-t TCP_CRR -l %s -- -b 10 -D" % netperf_timeout + error_context.context( + f"Start Netperf in guest for {netperf_timeout}s.", test.log.info + ) + test_option = f"-t TCP_CRR -l {netperf_timeout} -- -b 10 -D" netperf_client.bg_start(host_ip, test_option, client_num) start_time = time.time() deviation_time = params.get_numeric("deviation_time") diff --git a/qemu/tests/format_disk.py b/qemu/tests/format_disk.py index f64f02f47d..0e534eacdd 100644 --- a/qemu/tests/format_disk.py +++ b/qemu/tests/format_disk.py @@ -1,9 +1,7 @@ import re import aexpect - -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc @error_context.context_aware @@ -32,14 +30,14 @@ def run(test, params, env): cmd_timeout = int(params.get("cmd_timeout", 360)) os_type = params["os_type"] - if os_type == 'linux': + if os_type == "linux": dmesg_cmd = params.get("dmesg_cmd", "dmesg -C") session.cmd(dmesg_cmd) drive_path = "" - if os_type == 'linux': + if os_type == "linux": drive_name = params.objects("images")[-1] - drive_id = params["blk_extra_params_%s" % drive_name].split("=")[1] + drive_id = params[f"blk_extra_params_{drive_name}"].split("=")[1] # If a device option(bool/str) in qemu cmd line doesn't have a value, # qemu assigns the value as "on". if drive_id == "NO_EQUAL_STRING": @@ -48,125 +46,119 @@ def run(test, params, env): drive_id = "" drive_path = utils_misc.get_linux_drive_path(session, drive_id) if not drive_path: - test.error("Failed to get '%s' drive path" % drive_name) + test.error(f"Failed to get '{drive_name}' drive path") # Create a partition on disk create_partition_cmd = params.get("create_partition_cmd") if create_partition_cmd: has_dispart = re.findall("diskpart", create_partition_cmd, re.I) - if (os_type == 'windows' and has_dispart): + if os_type == "windows" and has_dispart: error_context.context("Get disk list in guest") list_disk_cmd = params.get("list_disk_cmd") - status, output = session.cmd_status_output(list_disk_cmd, - timeout=cmd_timeout) + status, output = session.cmd_status_output( + list_disk_cmd, timeout=cmd_timeout + ) for i in re.findall(r"Disk*.(\d+)\s+Offline", output): - error_context.context("Set disk '%s' to online status" % i, - test.log.info) + error_context.context(f"Set disk '{i}' to online status", test.log.info) set_online_cmd = params.get("set_online_cmd") % i - status, output = session.cmd_status_output(set_online_cmd, - timeout=cmd_timeout) + status, output = session.cmd_status_output( + set_online_cmd, timeout=cmd_timeout + ) if status != 0: - test.fail("Can not set disk online %s" % output) + test.fail(f"Can not set disk online {output}") error_context.context("Create partition on disk", test.log.info) - status, output = session.cmd_status_output(create_partition_cmd, - timeout=cmd_timeout) + status, output = session.cmd_status_output( + create_partition_cmd, timeout=cmd_timeout + ) if status != 0: - test.fail("Failed to create partition with error: %s" % output) + test.fail(f"Failed to create partition with error: {output}") format_cmd = params.get("format_cmd", "").format(drive_path) if format_cmd: - if os_type == 'linux': + if os_type == "linux": show_mount_cmd = params["show_mount_cmd"].format(drive_path) status = session.cmd_status(show_mount_cmd) if not status: error_context.context("Umount before format", test.log.info) umount_cmd = params["umount_cmd"].format(drive_path) - status, output = session.cmd_status_output(umount_cmd, - timeout=cmd_timeout) + status, output = session.cmd_status_output( + umount_cmd, timeout=cmd_timeout + ) if status != 0: - test.fail("Failed to umount with error: %s" % output) + test.fail(f"Failed to umount with error: {output}") error_context.context("Wipe existing filesystem", test.log.info) wipefs_cmd = params["wipefs_cmd"].format(drive_path) session.cmd(wipefs_cmd) - error_context.context("Format the disk with cmd '%s'" % format_cmd, - test.log.info) - status, output = session.cmd_status_output(format_cmd, - timeout=cmd_timeout) + error_context.context(f"Format the disk with cmd '{format_cmd}'", test.log.info) + status, output = session.cmd_status_output(format_cmd, timeout=cmd_timeout) if status != 0: - test.fail("Failed to format with error: %s" % output) + test.fail(f"Failed to format with error: {output}") mount_cmd = params.get("mount_cmd", "").format(drive_path) if mount_cmd: - error_context.context("Mount the disk with cmd '%s'" % mount_cmd, - test.log.info) - status, output = session.cmd_status_output(mount_cmd, - timeout=cmd_timeout) + error_context.context(f"Mount the disk with cmd '{mount_cmd}'", test.log.info) + status, output = session.cmd_status_output(mount_cmd, timeout=cmd_timeout) if status != 0: show_dev_cmd = params.get("show_dev_cmd", "").format(drive_path) device_list = session.cmd_output_safe(show_dev_cmd) - test.log.debug("The devices which will be mounted are: %s", - device_list) - test.fail("Failed to mount with error: %s" % output) + test.log.debug("The devices which will be mounted are: %s", device_list) + test.fail(f"Failed to mount with error: {output}") testfile_name = params.get("testfile_name") if testfile_name: - error_context.context("Write some random string to test file", - test.log.info) + error_context.context("Write some random string to test file", test.log.info) ranstr = utils_misc.generate_random_string(100) writefile_cmd = params["writefile_cmd"] writefile_cmd = writefile_cmd % (ranstr, testfile_name) - status, output = session.cmd_status_output(writefile_cmd, - timeout=cmd_timeout) + status, output = session.cmd_status_output(writefile_cmd, timeout=cmd_timeout) if status != 0: - test.fail("Write to file error: %s" % output) + test.fail(f"Write to file error: {output}") - error_context.context("Read in the file to see whether " - "content has changed", test.log.info) + error_context.context( + "Read in the file to see whether " "content has changed", test.log.info + ) md5chk_cmd = params.get("md5chk_cmd") if md5chk_cmd: - status, output = session.cmd_status_output(md5chk_cmd, - timeout=cmd_timeout) + status, output = session.cmd_status_output(md5chk_cmd, timeout=cmd_timeout) if status != 0: test.fail("Check file md5sum error.") readfile_cmd = params["readfile_cmd"] readfile_cmd = readfile_cmd % testfile_name - status, output = session.cmd_status_output(readfile_cmd, - timeout=cmd_timeout) + status, output = session.cmd_status_output(readfile_cmd, timeout=cmd_timeout) if status != 0: - test.fail("Read file error: %s" % output) + test.fail(f"Read file error: {output}") if output.strip() != ranstr: - test.fail("The content written to file has changed, " - "from: %s, to: %s" % (ranstr, output.strip())) + test.fail( + "The content written to file has changed, " + f"from: {ranstr}, to: {output.strip()}" + ) umount_cmd = params.get("umount_cmd", "").format(drive_path) if umount_cmd: - error_context.context("Unmounting disk(s) after file " - "write/read operation") - status, output = session.cmd_status_output(umount_cmd, - timeout=cmd_timeout) + error_context.context("Unmounting disk(s) after file " "write/read operation") + status, output = session.cmd_status_output(umount_cmd, timeout=cmd_timeout) if status != 0: - show_mount_cmd = params.get( - "show_mount_cmd", "").format(drive_path) + show_mount_cmd = params.get("show_mount_cmd", "").format(drive_path) mount_list = session.cmd_output_safe(show_mount_cmd) test.log.debug("The mounted devices are: %s", mount_list) - test.fail("Failed to umount with error: %s" % output) + test.fail(f"Failed to umount with error: {output}") # Clean partition on disk clean_partition_cmd = params.get("clean_partition_cmd") if clean_partition_cmd: - status, output = session.cmd_status_output(clean_partition_cmd, - timeout=cmd_timeout) + status, output = session.cmd_status_output( + clean_partition_cmd, timeout=cmd_timeout + ) if status != 0: - test.fail("Failed to clean partition with error: %s" % output) + test.fail(f"Failed to clean partition with error: {output}") output = "" try: output = session.cmd("dmesg -c") - error_context.context("Checking if there are I/O error " - "messages in dmesg") + error_context.context("Checking if there are I/O error " "messages in dmesg") except aexpect.ShellCmdError: pass diff --git a/qemu/tests/fullscreen_setup.py b/qemu/tests/fullscreen_setup.py index 564e1db96e..7f3eaf2c90 100644 --- a/qemu/tests/fullscreen_setup.py +++ b/qemu/tests/fullscreen_setup.py @@ -28,23 +28,24 @@ def run(test, params, env): utils_spice.wait_timeout(20) for vm in params.get("vms").split(): - utils_spice.clear_interface(env.get_vm(vm), - int(params.get("login_timeout", "360"))) + utils_spice.clear_interface( + env.get_vm(vm), int(params.get("login_timeout", "360")) + ) utils_spice.wait_timeout(20) guest_vm = env.get_vm(params["guest_vm"]) guest_vm.verify_alive() guest_session = guest_vm.wait_for_login( - timeout=int(params.get("login_timeout", 360))) - guest_root_session = guest_vm.wait_for_login(username="root", - password="123456") + timeout=int(params.get("login_timeout", 360)) + ) + guest_root_session = guest_vm.wait_for_login(username="root", password="123456") client_vm = env.get_vm(params["client_vm"]) client_vm.verify_alive() client_session = client_vm.wait_for_login( - timeout=int(params.get("login_timeout", 360))) - client_root_session = client_vm.wait_for_login(username="root", - password="123456") + timeout=int(params.get("login_timeout", 360)) + ) + client_vm.wait_for_login(username="root", password="123456") test.log.debug("Exporting client display") client_session.cmd("export DISPLAY=:0.0") @@ -71,8 +72,11 @@ def run(test, params, env): # Changing the guest resolution client_session.cmd("xrandr -s " + newClientResolution) - test.log.info("The resolution on the client has been changed from %s to: %s", - current, newClientResolution) + test.log.info( + "The resolution on the client has been changed from %s to: %s", + current, + newClientResolution, + ) test.log.debug("Exporting guest display") guest_session.cmd("export DISPLAY=:0.0") diff --git a/qemu/tests/fwcfg.py b/qemu/tests/fwcfg.py index 87c949a7f8..e176ea8be2 100644 --- a/qemu/tests/fwcfg.py +++ b/qemu/tests/fwcfg.py @@ -1,10 +1,7 @@ from avocado.utils import process -from virttest import env_process -from virttest import error_context -from virttest import utils_disk -from virttest import utils_test -from provider import win_dump_utils -from provider import win_driver_utils +from virttest import env_process, error_context, utils_disk, utils_test + +from provider import win_driver_utils, win_dump_utils @error_context.context_aware @@ -19,8 +16,8 @@ def run(test, params, env): 5) Check the dump file can be open with windb tools. """ win_dump_utils.set_vm_for_dump(test, params) - vm_name = params['main_vm'] - params['start_vm'] = 'yes' + vm_name = params["main_vm"] + params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) @@ -31,33 +28,29 @@ def run(test, params, env): utils_test.qemu.windrv_verify_running(session, test, driver) if params.get("setup_verifier", "yes") == "yes": error_context.context("Enable fwcfg driver verified", test.log.info) - session = utils_test.qemu.setup_win_driver_verifier(session, - driver, vm) + session = utils_test.qemu.setup_win_driver_verifier(session, driver, vm) error_context.context("Disable security alert", test.log.info) win_dump_utils.disable_security_alert(params, session) - disk = sorted(session.cmd('wmic diskdrive get index').split()[1:])[-1] + disk = sorted(session.cmd("wmic diskdrive get index").split()[1:])[-1] utils_disk.update_windows_disk_attributes(session, disk) - disk_letter = utils_disk.configure_empty_disk(session, - disk, - params['image_size_stg'], - params["os_type"])[0] + disk_letter = utils_disk.configure_empty_disk( + session, disk, params["image_size_stg"], params["os_type"] + )[0] error_context.context("Generate the Memory.dmp file", test.log.info) - dump_file, dump_zip_file = win_dump_utils.generate_mem_dump(test, - params, - vm) + dump_file, dump_zip_file = win_dump_utils.generate_mem_dump(test, params, vm) try: - error_context.context("Copy the Memory.dmp.zip file " - "from host to guest", test.log.info) - vm.copy_files_to(dump_zip_file, "%s:\\Memory.dmp.zip" % disk_letter) + error_context.context( + "Copy the Memory.dmp.zip file " "from host to guest", test.log.info + ) + vm.copy_files_to(dump_zip_file, f"{disk_letter}:\\Memory.dmp.zip") unzip_cmd = params["unzip_cmd"] % (disk_letter, disk_letter) unzip_timeout = int(params.get("unzip_timeout", 1800)) - status, output = session.cmd_status_output(unzip_cmd, - timeout=unzip_timeout) + status, output = session.cmd_status_output(unzip_cmd, timeout=unzip_timeout) if status: - test.error("unzip dump file failed as:\n%s" % output) + test.error(f"unzip dump file failed as:\n{output}") session.cmd(params["move_cmd"].format(disk_letter)) session.cmd(params["save_path_cmd"].format(disk_letter)) windbg_installed = False @@ -65,15 +58,14 @@ def run(test, params, env): if not status: windbg_installed = True if not windbg_installed: - win_dump_utils.install_windbg(test, params, session, - timeout=wdbg_timeout) + win_dump_utils.install_windbg(test, params, session, timeout=wdbg_timeout) # TODO: A temporary workaround to clear up unexpected pop-up in guest if params.get("need_reboot", "no") == "yes": session = vm.reboot() win_dump_utils.dump_windbg_check(test, params, session) finally: - process.system("rm %s %s" % (dump_file, dump_zip_file), shell=True) - session.cmd("del %s" % params["dump_analyze_file"]) + process.system(f"rm {dump_file} {dump_zip_file}", shell=True) + session.cmd("del {}".format(params["dump_analyze_file"])) session.cmd(params["del_path_file"]) session.close() win_driver_utils.memory_leak_check(vm, test, params) diff --git a/qemu/tests/getfd.py b/qemu/tests/getfd.py index a51221be08..a37292f2cf 100644 --- a/qemu/tests/getfd.py +++ b/qemu/tests/getfd.py @@ -13,6 +13,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def has_fd(pid, filepath): """ Returns true if process has a file descriptor pointing to filepath @@ -21,7 +22,7 @@ def has_fd(pid, filepath): :param filepath: the full path for the file """ pathlist = [] - dirname = "/proc/%s/fd" % pid + dirname = f"/proc/{pid}/fd" dirlist = [os.path.join(dirname, f) for f in os.listdir(dirname)] for f in dirlist: if os.path.islink(f): @@ -40,29 +41,31 @@ def has_fd(pid, filepath): test.error("Fail to get process id for VM") # directory for storing temporary files - fdfiles_dir = os.path.join(test.tmpdir, 'fdfiles') + fdfiles_dir = os.path.join(test.tmpdir, "fdfiles") if not os.path.isdir(fdfiles_dir): os.mkdir(fdfiles_dir) # number of files nofiles = int(params.get("number_of_files", "900")) for n in range(nofiles): - name = "fdfile-%s" % n + name = f"fdfile-{n}" path = os.path.join(fdfiles_dir, name) fd = os.open(path, os.O_RDWR | os.O_CREAT) response = vm.monitor.getfd(fd, name) os.close(fd) # getfd is supposed to generate no output if response: - test.error("getfd returned error: %s" % response) + test.error(f"getfd returned error: {response}") # check if qemu process has a copy of the fd if not has_fd(pid, path): - test.error("QEMU process does not seem to have a file " - "descriptor pointing to file %s" % path) + test.error( + "QEMU process does not seem to have a file " + f"descriptor pointing to file {path}" + ) # clean up files for n in range(nofiles): - name = "fdfile-%s" % n + name = f"fdfile-{n}" path = os.path.join(fdfiles_dir, name) try: os.unlink(path) diff --git a/qemu/tests/gluster_boot_snap_boot.py b/qemu/tests/gluster_boot_snap_boot.py index d53a048954..dfb4121370 100644 --- a/qemu/tests/gluster_boot_snap_boot.py +++ b/qemu/tests/gluster_boot_snap_boot.py @@ -1,7 +1,4 @@ -from virttest import env_process -from virttest import error_context -from virttest import qemu_storage -from virttest import data_dir +from virttest import data_dir, env_process, error_context, qemu_storage @error_context.context_aware @@ -23,9 +20,9 @@ def run(test, params, env): image_name = params.get("image_name") timeout = int(params.get("login_timeout", 360)) # Workaroud wrong config file order. - params['image_name_backing_file_snapshot'] = params.get("image_name") - params['image_format_backing_file_snapshot'] = params.get("image_format") - params['image_name_snapshot'] = params.get("image_name") + "-snap" + params["image_name_backing_file_snapshot"] = params.get("image_name") + params["image_format_backing_file_snapshot"] = params.get("image_format") + params["image_name_snapshot"] = params.get("image_name") + "-snap" error_context.context("boot guest over glusterfs", test.log.info) vm = env.get_vm(params["main_vm"]) @@ -41,6 +38,10 @@ def run(test, params, env): image = qemu_storage.QemuImg(snapshot_params, base_dir, image_name) image.create(snapshot_params) - env_process.process(test, snapshot_params, env, - env_process.preprocess_image, - env_process.preprocess_vm) + env_process.process( + test, + snapshot_params, + env, + env_process.preprocess_image, + env_process.preprocess_vm, + ) diff --git a/qemu/tests/gluster_create_images.py b/qemu/tests/gluster_create_images.py index 55ee10a499..a2295faf49 100644 --- a/qemu/tests/gluster_create_images.py +++ b/qemu/tests/gluster_create_images.py @@ -1,5 +1,4 @@ -from virttest import env_process -from virttest import error_context +from virttest import env_process, error_context @error_context.context_aware diff --git a/qemu/tests/guest_iommu_group.py b/qemu/tests/guest_iommu_group.py index cc116fb9ed..11e437380e 100644 --- a/qemu/tests/guest_iommu_group.py +++ b/qemu/tests/guest_iommu_group.py @@ -14,18 +14,25 @@ def run(test, params, env): """ def verify_iommu_group(): - """ Verify whether the iommu group is seperated correctly. """ + """Verify whether the iommu group is seperated correctly.""" error_context.context( - 'Verify whether the iommu group is seperated correctly.', test.log.info) - device_id = session.cmd( - "lspci | grep 'PCIe\\|Virtio\\|USB\\|VGA\\|PCI' | awk '{print $1}'").strip().split() + "Verify whether the iommu group is seperated correctly.", test.log.info + ) + device_id = ( + session.cmd( + "lspci | grep 'PCIe\\|Virtio\\|USB\\|VGA\\|PCI' | awk '{print $1}'" + ) + .strip() + .split() + ) group_id = [] for id in device_id: g_id = session.cmd( """dmesg | grep "iommu group" | grep '%s' | awk -F " " '{print $NF}'""" - % ("0000:" + id)).strip() + % ("0000:" + id) + ).strip() if g_id == "": - test.fail("Device ID: '%s' didn't in iommu group" % id) + test.fail(f"Device ID: '{id}' didn't in iommu group") else: group_id.append(g_id) test.log.info("Group ID of %s: %s", id, g_id) diff --git a/qemu/tests/guest_iommu_test.py b/qemu/tests/guest_iommu_test.py index 352da0e692..101caba505 100644 --- a/qemu/tests/guest_iommu_test.py +++ b/qemu/tests/guest_iommu_test.py @@ -1,15 +1,12 @@ import logging import re -from virttest import error_context -from virttest import utils_misc -from virttest import utils_test -from virttest import utils_disk -from provider.storage_benchmark import generate_instance - from avocado.utils import cpu +from virttest import error_context, utils_disk, utils_misc, utils_test + +from provider.storage_benchmark import generate_instance -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def check_data_disks(test, params, env, vm, session): @@ -22,62 +19,61 @@ def check_data_disks(test, params, env, vm, session): :param vm: VM object :param session: VM session """ + def _get_mount_points(): - """ Get data disk mount point(s) """ + """Get data disk mount point(s)""" mount_points = [] os_type = params["os_type"] if os_type == "linux": - mounts = session.cmd_output_safe('cat /proc/mounts | grep /dev/') + mounts = session.cmd_output_safe("cat /proc/mounts | grep /dev/") for img in image_list: - size = params["image_size_%s" % img] - img_param = params["blk_extra_params_%s" % img].split('=')[1] + size = params[f"image_size_{img}"] + img_param = params[f"blk_extra_params_{img}"].split("=")[1] drive_path = utils_misc.get_linux_drive_path(session, img_param) if not drive_path: - test.error("Failed to get drive path of '%s'" % img) + test.error(f"Failed to get drive path of '{img}'") did = drive_path[5:] - for mp in re.finditer(r'/dev/%s\d+\s+(\S+)\s+' % did, mounts): + for mp in re.finditer(rf"/dev/{did}\d+\s+(\S+)\s+", mounts): mount_points.append(mp.group(1)) else: - mp = utils_disk.configure_empty_linux_disk(session, - did, size) + mp = utils_disk.configure_empty_linux_disk(session, did, size) mount_points.extend(mp) elif os_type == "windows": size_record = [] for img in image_list: - size = params["image_size_%s" % img] + size = params[f"image_size_{img}"] if size in size_record: continue size_record.append(size) disks = utils_disk.get_windows_disks_index(session, size) if not disks: - test.fail("Fail to list image %s" % img) - if not utils_disk.update_windows_disk_attributes(session, - disks): + test.fail(f"Fail to list image {img}") + if not utils_disk.update_windows_disk_attributes(session, disks): test.fail("Failed to update windows disk attributes") for disk in disks: - d_letter = utils_disk.configure_empty_windows_disk(session, - disk, - size) + d_letter = utils_disk.configure_empty_windows_disk( + session, disk, size + ) if not d_letter: test.fail("Fail to format disks") mount_points.extend(d_letter) else: - test.cancel("Unsupported OS type '%s'" % os_type) + test.cancel(f"Unsupported OS type '{os_type}'") return mount_points image_list = params.objects("images")[1:] - image_num = len(image_list) + len(image_list) error_context.context("Check data disks in monitor!", LOG_JOB.info) monitor_info_block = vm.monitor.info_block(False) blocks = monitor_info_block.keys() for image in image_list: - drive = "drive_%s" % image + drive = f"drive_{image}" if drive not in blocks: - test.fail("%s is missing: %s" % (drive, blocks)) + test.fail(f"{drive} is missing: {blocks}") error_context.context("Read and write data on data disks", LOG_JOB.info) - iozone_test = generate_instance(params, vm, 'iozone') + iozone_test = generate_instance(params, vm, "iozone") iozone_cmd = params["iozone_cmd"] iozone_timeout = float(params.get("iozone_timeout", 1800)) try: @@ -88,28 +84,28 @@ def _get_mount_points(): def verify_eim_status(test, params, session): - error_context.context('verify eim status.', test.log.info) + error_context.context("verify eim status.", test.log.info) variant_name = params.get("diff_parameter") if variant_name == "eim_off" or variant_name == "eim_on": - for key_words in params['check_key_words'].split(';'): - output = session.cmd_output("journalctl -k | grep -i \"%s\"" % key_words) + for key_words in params["check_key_words"].split(";"): + output = session.cmd_output(f'journalctl -k | grep -i "{key_words}"') if not output: - test.fail('journalctl -k | grep -i "%s"' - "from the systemd journal log." % key_words) + test.fail( + f'journalctl -k | grep -i "{key_words}"' "from the systemd journal log." + ) test.log.debug(output) def verify_x2apic_status(test, params, session): - error_context.context('verify x2apic status.', test.log.info) + error_context.context("verify x2apic status.", test.log.info) variant_name = params.get("diff_parameter") if variant_name == "x2apic": - for key_words in params['check_key_words'].split(';'): - output = session.cmd_output( - "journalctl -k | grep -i \"%s\"" % key_words - ) + for key_words in params["check_key_words"].split(";"): + output = session.cmd_output(f'journalctl -k | grep -i "{key_words}"') if not output: - test.fail('journalctl -k | grep -i "%s"' - "from the systemd journal log." % key_words) + test.fail( + f'journalctl -k | grep -i "{key_words}"' "from the systemd journal log." + ) test.log.debug(output) @@ -131,7 +127,7 @@ def run(test, params, env): :param env: Dictionary with test environment. """ - if cpu.get_vendor() != 'intel': + if cpu.get_vendor() != "intel": test.cancel("This case only support Intel platform") vm = env.get_vm(params["main_vm"]) diff --git a/qemu/tests/hdparm.py b/qemu/tests/hdparm.py index 875fef23f3..e5d58356f7 100644 --- a/qemu/tests/hdparm.py +++ b/qemu/tests/hdparm.py @@ -1,7 +1,6 @@ import re import aexpect - from virttest import error_context @@ -18,26 +17,28 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def check_setting_result(set_cmd, timeout): params = re.findall("(-[a-zA-Z])([0-9]*)", set_cmd) disk = re.findall(r"(\/+[a-z]*\/[a-z]*$)", set_cmd)[0] unsupport_param = 0 - for (param, value) in params: + for param, value in params: check_value = True - cmd = "hdparm %s %s" % (param, disk) + cmd = f"hdparm {param} {disk}" (s, output) = session.cmd_status_output(cmd, timeout) failed_count = len(re.findall("failed:", output)) ignore_count = len(re.findall(ignore_string, output)) if failed_count > ignore_count: - test.error("Fail to get %s parameter value. " - "Output is:\n%s" % (param, output.strip())) + test.error( + f"Fail to get {param} parameter value. " + f"Output is:\n{output.strip()}" + ) else: check_value = False unsupport_param += 1 - test.log.warn("Disk %s not support parameter %s", disk, param) + test.log.warning("Disk %s not support parameter %s", disk, param) if check_value and value not in output: - test.fail("Fail to set %s parameter to value: %s" - % (param, value)) + test.fail(f"Fail to set {param} parameter to value: {value}") if len(params) == unsupport_param: test.cancel("All parameters are not supported. Skip the test") @@ -47,14 +48,16 @@ def perform_read_timing(disk, timeout, num=5): cmd = params["device_cache_read_cmd"] % disk (s, output) = session.cmd_status_output(cmd, timeout) if s != 0: - test.fail("Fail to perform device/cache read" - " timings \nOutput is: %s\n" % output) - test.log.info("Output of device/cache read timing check (%s of %s):", - i + 1, num) + test.fail( + "Fail to perform device/cache read" + f" timings \nOutput is: {output}\n" + ) + test.log.info( + "Output of device/cache read timing check (%s of %s):", i + 1, num + ) for line in output.strip().splitlines(): test.log.info(line) - (result, unit) = re.findall("= *([0-9]*.+[0-9]*) ([a-zA-Z]*)", - output)[1] + (result, unit) = re.findall("= *([0-9]*.+[0-9]*) ([a-zA-Z]*)", output)[1] if unit == "kB": result = float(result) / 1024.0 results += float(result) @@ -78,15 +81,21 @@ def perform_read_timing(disk, timeout, num=5): failed_count = len(re.findall("failed:", err.output)) ignore_count = len(re.findall(ignore_string, err.output)) if failed_count > ignore_count: - test.error("Fail to setting hard disk to lower " - "performance. Output is:%s", err.output) + test.error( + "Fail to setting hard disk to lower " "performance. Output is:%s", + err.output, + ) - error_context.context("Checking hard disk keyval under " - "lower performance settings") + error_context.context( + "Checking hard disk keyval under " "lower performance settings" + ) check_setting_result(cmd, timeout) low_result = perform_read_timing(disk, timeout) - test.log.info("Average buffered disk read speed under low performance " - "settings: %.2f MB/sec", low_result) + test.log.info( + "Average buffered disk read speed under low performance " + "settings: %.2f MB/sec", + low_result, + ) error_context.context("Setting hard disk to higher performance") cmd = params["high_status_cmd"] % disk @@ -96,15 +105,21 @@ def perform_read_timing(disk, timeout, num=5): failed_count = len(re.findall("failed:", err.output)) ignore_count = len(re.findall(ignore_string, err.output)) if failed_count > ignore_count: - test.error("Fail to setting hard disk to higher " - "performance. Output is:%s", err.output) + test.error( + "Fail to setting hard disk to higher " "performance. Output is:%s", + err.output, + ) - error_context.context("Checking hard disk keyval under " - "higher performance settings") + error_context.context( + "Checking hard disk keyval under " "higher performance settings" + ) check_setting_result(cmd, timeout) high_result = perform_read_timing(disk, timeout) - test.log.info("Average buffered disk read speed under high performance " - "settings: %.2f MB/sec", high_result) + test.log.info( + "Average buffered disk read speed under high performance " + "settings: %.2f MB/sec", + high_result, + ) if not float(high_result) > float(low_result): test.fail("High performance setting does not increase read speed") diff --git a/qemu/tests/hello_world.py b/qemu/tests/hello_world.py index ce81105dd7..d20970dda4 100644 --- a/qemu/tests/hello_world.py +++ b/qemu/tests/hello_world.py @@ -1,7 +1,6 @@ import time from avocado.utils import process - from virttest import error_context @@ -43,8 +42,9 @@ def run(test, params, env): session = vm.wait_for_login(timeout=timeout) # Send command to the guest, using session command. - error_context.context("Echo 'Hello, world!' in guest and get the output", - test.log.info) + error_context.context( + "Echo 'Hello, world!' in guest and get the output", test.log.info + ) # Here, timeout was passed explicitly to show it can be tweaked guest_cmd = "echo 'Hello, world!'" # If you just need the output, use session.cmd(). If the command fails, @@ -56,7 +56,7 @@ def run(test, params, env): test.log.info("Guest cmd output: '%s'", guest_cmd_output) # Here, we will fail a test if the guest outputs something unexpected - if guest_cmd_output != 'Hello, world!': + if guest_cmd_output != "Hello, world!": test.fail("Unexpected output from guest") # Send command to the guest, using monitor command. @@ -70,20 +70,19 @@ def run(test, params, env): vm.verify_status("running") # Send command to host - error_context.context("Echo 'Hello, world!' in the host using shell", - test.log.info) + error_context.context("Echo 'Hello, world!' in the host using shell", test.log.info) # If the command fails, it will raise a process.CmdError exception - host_cmd_output = process.system_output("echo 'Hello, world!'", - shell=True).decode() + host_cmd_output = process.system_output("echo 'Hello, world!'", shell=True).decode() test.log.info("Host cmd output '%s'", host_cmd_output) # Here, we will fail a test if the host outputs something unexpected - if host_cmd_output != 'Hello, world!': + if host_cmd_output != "Hello, world!": test.fail("Unexpected output from guest") # An example of getting a required parameter from the config file - error_context.context("Get a required parameter from the config file", - test.log.info) + error_context.context( + "Get a required parameter from the config file", test.log.info + ) sleep_time = int(params["sleep_time"]) test.log.info("Sleep for '%d' seconds", sleep_time) time.sleep(sleep_time) diff --git a/qemu/tests/hotplug_block_resize.py b/qemu/tests/hotplug_block_resize.py index cb5635443c..63340bf620 100644 --- a/qemu/tests/hotplug_block_resize.py +++ b/qemu/tests/hotplug_block_resize.py @@ -1,17 +1,19 @@ import json import re -from virttest import data_dir -from virttest import error_context -from virttest import qemu_storage -from virttest import storage -from virttest import utils_disk -from virttest import utils_misc -from virttest import utils_test +from virttest import ( + data_dir, + error_context, + qemu_storage, + storage, + utils_disk, + utils_misc, + utils_test, +) +from virttest.qemu_capabilities import Flags from virttest.utils_numeric import normalize_data_size from provider.block_devices_plug import BlockDevicesPlug -from virttest.qemu_capabilities import Flags from provider.storage_benchmark import generate_instance ENLARGE, SHRINK = ("enlarge", "shrink") @@ -35,58 +37,66 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def _change_vm_power(): - """ Change the vm power. """ - method, command = params['command_opts'].split(',') - test.log.info('Sending command(%s): %s', method, command) - if method == 'shell': + """Change the vm power.""" + method, command = params["command_opts"].split(",") + test.log.info("Sending command(%s): %s", method, command) + if method == "shell": power_session = vm.wait_for_login(timeout=360) power_session.sendline(command) else: getattr(vm.monitor, command)() if shutdown_vm: - if not utils_misc.wait_for( - lambda: vm.monitor.get_event("SHUTDOWN"), 600): + if not utils_misc.wait_for(lambda: vm.monitor.get_event("SHUTDOWN"), 600): raise test.fail("Not received SHUTDOWN QMP event.") def _check_vm_status(timeout=600): - """ Check the status of vm. """ - action = 'shutdown' if shutdown_vm else 'login' - if not getattr(vm, 'wait_for_%s' % action)(timeout=timeout): - test.fail('Failed to %s vm.' % action) + """Check the status of vm.""" + action = "shutdown" if shutdown_vm else "login" + if not getattr(vm, f"wait_for_{action}")(timeout=timeout): + test.fail(f"Failed to {action} vm.") def _block_resize(dev): - """ Resize the block size. """ - resize_size = int(float(normalize_data_size(re.search( - r'(\d+\.?(\d+)?\w)', params['resize_size']).group(1), "B"))) - size = str( - data_image_size + resize_size) if resize_op == ENLARGE else str( - data_image_size - resize_size) + """Resize the block size.""" + resize_size = int( + float( + normalize_data_size( + re.search(r"(\d+\.?(\d+)?\w)", params["resize_size"]).group(1), "B" + ) + ) + ) + size = ( + str(data_image_size + resize_size) + if resize_op == ENLARGE + else str(data_image_size - resize_size) + ) test.log.info("Start to %s %s to %sB.", resize_op, plug[0], size) - args = (None, size, dev) if vm.check_capability( - Flags.BLOCKDEV) else (dev, size) + args = (None, size, dev) if vm.check_capability(Flags.BLOCKDEV) else (dev, size) vm.monitor.block_resize(*args) return size def _check_img_size(size): - """ Check the size of image after resize. """ + """Check the size of image after resize.""" img = qemu_storage.QemuImg( - data_image_params, data_dir.get_data_dir(), data_image) - if json.loads(img.info(True, 'json'))['virtual-size'] != int(size): - test.fail('The virtual size is not equal to %sB after %s.' % - (size, resize_op)) + data_image_params, data_dir.get_data_dir(), data_image + ) + if json.loads(img.info(True, "json"))["virtual-size"] != int(size): + test.fail(f"The virtual size is not equal to {size}B after {resize_op}.") - shutdown_vm = params.get('shutdown_vm', 'no') == 'yes' - reboot = params.get('reboot_vm', 'no') == 'yes' + shutdown_vm = params.get("shutdown_vm", "no") == "yes" + reboot = params.get("reboot_vm", "no") == "yes" data_image = params.get("images").split()[-1] data_image_params = params.object_params(data_image) - data_image_size = int(float(normalize_data_size( - data_image_params.get("image_size"), "B"))) + data_image_size = int( + float(normalize_data_size(data_image_params.get("image_size"), "B")) + ) data_image_filename = storage.get_image_filename( - data_image_params, data_dir.get_data_dir()) - resize_op = SHRINK if '-' in params['resize_size'] else ENLARGE - os_type = params['os_type'] - is_windows = os_type == 'windows' + data_image_params, data_dir.get_data_dir() + ) + resize_op = SHRINK if "-" in params["resize_size"] else ENLARGE + os_type = params["os_type"] + is_windows = os_type == "windows" vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -94,22 +104,26 @@ def _check_img_size(size): if is_windows: session = utils_test.qemu.windrv_check_running_verifier( - session, vm, test, params['driver_name'], 300) + session, vm, test, params["driver_name"], 300 + ) plug = BlockDevicesPlug(vm) plug.hotplug_devs_serial() - block_size = _block_resize(vm.get_block({'file': data_image_filename})) + block_size = _block_resize(vm.get_block({"file": data_image_filename})) _check_img_size(block_size) - iozone = generate_instance(params, vm, 'iozone') + iozone = generate_instance(params, vm, "iozone") try: if is_windows: if not utils_disk.update_windows_disk_attributes(session, plug[:]): - test.fail('Failed to clear readonly and online on %s.' % plug[:]) + test.fail(f"Failed to clear readonly and online on {plug[:]}.") mount_point = utils_disk.configure_empty_disk( - session, plug[0], block_size + 'B', os_type)[0] - iozone_size = str(int(float(normalize_data_size(block_size)))) + 'M' - iozone.run(params['iozone_options'].format(mount_point, iozone_size), - float(params['iozone_timeout'])) + session, plug[0], block_size + "B", os_type + )[0] + iozone_size = str(int(float(normalize_data_size(block_size)))) + "M" + iozone.run( + params["iozone_options"].format(mount_point, iozone_size), + float(params["iozone_timeout"]), + ) finally: iozone.clean() if shutdown_vm or reboot: diff --git a/qemu/tests/hotplug_mem.py b/qemu/tests/hotplug_mem.py index 33171dfc66..3255fd8383 100644 --- a/qemu/tests/hotplug_mem.py +++ b/qemu/tests/hotplug_mem.py @@ -2,25 +2,22 @@ import time from avocado.utils.wait import wait_for - -from virttest import utils_test -from virttest import error_context -from virttest.utils_test import BackgroundTest -from virttest.utils_test import run_virt_sub_test +from virttest import error_context, utils_test +from virttest.utils_test import BackgroundTest, run_virt_sub_test from virttest.utils_test.qemu import MemoryHotplugTest -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class MemoryHotplugSimple(MemoryHotplugTest): - def run_sub_test(self): - """ Run virt sub test before/after hotplug/unplug memory device""" + """Run virt sub test before/after hotplug/unplug memory device""" if self.params.get("sub_type"): - step = ("Run sub test '%s' %s %s memory device" % - (self.params["sub_test"], - self.params["stage"], - self.params["operation"])) + step = "Run sub test '{}' {} {} memory device".format( + self.params["sub_test"], + self.params["stage"], + self.params["operation"], + ) error_context.context(step, LOG_JOB.info) args = (self.test, self.params, self.env, self.params["sub_type"]) run_virt_sub_test(*args) @@ -48,9 +45,9 @@ def restore_memory(self, pre_vm, post_vm): elif len(mem_devs_origin) < len(mem_devs_post): mem_devs = mem_devs_post - mem_devs_origin vm, operation = pre_vm, "unplug" - func = getattr(self, "%s_memory" % operation) # pylint: disable=E0606 - for mem_dev in mem_devs: # pylint: disable=E0606 - func(vm, mem_dev) # pylint: disable=E0606 + func = getattr(self, f"{operation}_memory") # pylint: disable=E0606 + for mem_dev in mem_devs: # pylint: disable=E0606 + func(vm, mem_dev) # pylint: disable=E0606 def get_mem_by_name(self, vm, name): """ @@ -62,12 +59,12 @@ def get_mem_by_name(self, vm, name): def unplug_memory(self, vm, target_mem): """Unplug the target memory, if the memory not exists, - hotplug it, then unplug it + hotplug it, then unplug it """ devs = self.get_mem_by_name(vm, target_mem) if not devs and self.params.get("strict_check") != "yes": self.hotplug_memory(vm, target_mem) - return super(MemoryHotplugSimple, self).unplug_memory(vm, target_mem) + return super().unplug_memory(vm, target_mem) def start_test(self): operation = self.params["operation"] @@ -75,12 +72,11 @@ def start_test(self): stage = self.params.get("stage", "before") login_timeout = int(self.params.get("login_timeout", 360)) sub_test_runner = ( - stage == 'during' and [ - self.run_background_test] or [ - self.run_sub_test])[0] - func = getattr(self, "%s_memory" % operation) + stage == "during" and [self.run_background_test] or [self.run_sub_test] + )[0] + func = getattr(self, f"{operation}_memory") if not callable(func): - self.test.error("Unsupported memory operation '%s'" % operation) + self.test.error(f"Unsupported memory operation '{operation}'") vm = self.env.get_vm(self.params["main_vm"]) vm.wait_for_login(timeout=login_timeout) bootup_time = time.time() - vm.start_time @@ -89,7 +85,7 @@ def start_test(self): if stage != "after": sub_test = sub_test_runner() if self.params.get("sub_type") == "boot": - time.sleep(bootup_time/2) + time.sleep(bootup_time / 2) for target_mem in target_mems.split(): func(vm, target_mem) self.check_memory(vm) @@ -104,11 +100,9 @@ def start_test(self): vm.reboot() finally: try: - self.restore_memory( - vm, self.env.get_vm( - self.params['main_vm'])) + self.restore_memory(vm, self.env.get_vm(self.params["main_vm"])) except Exception as details: - LOG_JOB.warn("Error happen when restore vm: %s", details) + LOG_JOB.warning("Error happen when restore vm: %s", details) self.close_sessions() diff --git a/qemu/tests/hotplug_mem_migration.py b/qemu/tests/hotplug_mem_migration.py index cacd8f4ba5..e621e4ec8b 100644 --- a/qemu/tests/hotplug_mem_migration.py +++ b/qemu/tests/hotplug_mem_migration.py @@ -1,15 +1,14 @@ import logging +import math import re import time -import math - from decimal import Decimal from virttest import utils_test from virttest.utils_misc import NumaInfo from virttest.utils_test.qemu import MemoryHotplugTest -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def run(test, params, env): @@ -27,21 +26,24 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _check_online_mem(session): mem_device = session.cmd_output(cmd_check_online_mem, timeout=10) LOG_JOB.info(mem_device) - online_mem = re.search(r'online memory:\s+\d+(\.\d*)?G', mem_device).group() - online_mem = re.search(r'\d+(\.\d*)?', online_mem).group() + online_mem = re.search(r"online memory:\s+\d+(\.\d*)?G", mem_device).group() + online_mem = re.search(r"\d+(\.\d*)?", online_mem).group() return Decimal(online_mem) def _compare_mem_size(online_mem, expect_mem_size): if Decimal(online_mem) != expect_mem_size: - test.fail("The online mem size is %sG not match expected memory" - " %sG" % (online_mem, expect_mem_size)) + test.fail( + f"The online mem size is {online_mem}G not match expected memory" + f" {expect_mem_size}G" + ) - cmd_check_online_mem = params.get('cmd_check_online_mem') - cmd_new_folder = params.get('cmd_new_folder') - numa_test = params.get('numa_test') + cmd_check_online_mem = params.get("cmd_check_online_mem") + cmd_new_folder = params.get("cmd_new_folder") + numa_test = params.get("numa_test") mem_plug_size = params.get("size_mem") target_mems = params["target_mems"] @@ -50,7 +52,7 @@ def _compare_mem_size(online_mem, expect_mem_size): session = vm.wait_for_login() numa_info = NumaInfo(session=session) - mem_plug_size = Decimal(re.search(r'\d+', mem_plug_size).group()) + mem_plug_size = Decimal(re.search(r"\d+", mem_plug_size).group()) expect_mem_size = _check_online_mem(session) hotplug_test = MemoryHotplugTest(test, params, env) for target_mem in target_mems.split(): @@ -63,12 +65,11 @@ def _compare_mem_size(online_mem, expect_mem_size): for node in numa_info.get_online_nodes(): LOG_JOB.info("Use the hotplug memory by numa %s.", node) session.cmd(cmd_new_folder) - free_size = float(numa_info.read_from_node_meminfo(node, 'MemFree')) + free_size = float(numa_info.read_from_node_meminfo(node, "MemFree")) session.cmd(numa_test % (node, math.floor(free_size * 0.9)), timeout=600) try: stress_args = params.get("stress_args") - stress_test = utils_test.VMStress(vm, "stress", params, - stress_args=stress_args) + stress_test = utils_test.VMStress(vm, "stress", params, stress_args=stress_args) stress_test.load_stress_tool() except utils_test.StressError as info: test.error(info) diff --git a/qemu/tests/hotplug_mem_negative.py b/qemu/tests/hotplug_mem_negative.py index 7d6fa458a4..af180c710d 100644 --- a/qemu/tests/hotplug_mem_negative.py +++ b/qemu/tests/hotplug_mem_negative.py @@ -1,11 +1,7 @@ import re -from avocado.utils import memory -from avocado.utils import process - -from virttest import env_process -from virttest import error_context - +from avocado.utils import memory, process +from virttest import env_process, error_context from virttest.qemu_devices import qdevices from virttest.utils_numeric import normalize_data_size from virttest.utils_test.qemu import MemoryHotplugTest @@ -24,6 +20,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + @error_context.context_aware def _hotplug_memory(vm, name): hotplug_test = MemoryHotplugTest(test, params, env) @@ -31,12 +28,13 @@ def _hotplug_memory(vm, name): for dev in devices: if isinstance(dev, qdevices.Dimm): if params["set_addr"] == "yes": - addr = params["addr_dimm_%s" % name] + addr = params[f"addr_dimm_{name}"] else: addr = hotplug_test.get_mem_addr(vm, dev.get_qid()) dev.set_param("addr", addr) - error_context.context("Hotplug %s '%s' to VM" % - ("pc-dimm", dev.get_qid()), test.log.info) + error_context.context( + "Hotplug {} '{}' to VM".format("pc-dimm", dev.get_qid()), test.log.info + ) vm.devices.simple_hotplug(dev, vm.monitor) hotplug_test.update_vm_after_hotplug(vm, dev) return devices @@ -47,32 +45,36 @@ def collect_hotplug_info(): try: _hotplug_memory(vm, target_mem) except Exception as e: - error_context.context("Error happen %s: %s" % - (target_mem, e), test.log.info) + error_context.context(f"Error happen {target_mem}: {e}", test.log.info) details.update({target_mem: str(e)}) else: - error_context.context("Hotplug memory successful", - test.log.info) + error_context.context("Hotplug memory successful", test.log.info) details.update({target_mem: "Hotplug memory successful"}) return details def check_msg(keywords, msg): - if not re.search(r"%s" % keywords, msg): + if not re.search(rf"{keywords}", msg): test.fail("No valid keywords were found in the qemu prompt message") if params["size_mem"] == "": - ipa_limit_check = params.get('ipa_limit_check') + ipa_limit_check = params.get("ipa_limit_check") overcommit_mem = normalize_data_size("%sK" % (memory.memtotal() * 2), "G") - params["size_mem"] = "%sG" % round(float(overcommit_mem)) + params["size_mem"] = f"{round(float(overcommit_mem))}G" if ipa_limit_check: system_init_mem = int(params["system_init_mem"]) slots_mem = int(params["slots_mem"]) extend_mem_region = int(params["extend_mem_region"]) - ipa_limit = process.run(ipa_limit_check, shell=True).stdout.decode().strip() or 40 + ipa_limit = ( + process.run(ipa_limit_check, shell=True).stdout.decode().strip() or 40 + ) ipa_limit_size = 1 << int(ipa_limit) - ipa_limit_size = int(normalize_data_size("%sB" % ipa_limit_size, "G")) - limit_maxmem = (ipa_limit_size - system_init_mem - (1 * slots_mem) - extend_mem_region) - maxmem_mem = int(normalize_data_size("%s" % params["maxmem_mem"], "G")) + ipa_limit_size = int(normalize_data_size(f"{ipa_limit_size}B", "G")) + limit_maxmem = ( + ipa_limit_size - system_init_mem - (1 * slots_mem) - extend_mem_region + ) + maxmem_mem = int( + normalize_data_size("{}".format(params["maxmem_mem"]), "G") + ) params["maxmem_mem"] = "%dG" % min(limit_maxmem, maxmem_mem) if params["policy_mem"] == "bind": @@ -90,6 +92,7 @@ def check_msg(keywords, msg): else: for target_mem in params.objects("target_mems"): mem_params = params.object_params(target_mem) - error_context.context("Check %s qemu prompt " - "message." % target_mem, test.log.info) + error_context.context( + f"Check {target_mem} qemu prompt " "message.", test.log.info + ) check_msg(mem_params["keywords"], msg[target_mem]) diff --git a/qemu/tests/hotplug_mem_repeat.py b/qemu/tests/hotplug_mem_repeat.py index 6f78a6f7c8..8682b6936a 100644 --- a/qemu/tests/hotplug_mem_repeat.py +++ b/qemu/tests/hotplug_mem_repeat.py @@ -1,15 +1,12 @@ import logging -from virttest import error_context -from virttest import utils_test - +from virttest import error_context, utils_test from virttest.utils_test.qemu import MemoryHotplugTest -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class MemoryHotplugRepeat(MemoryHotplugTest): - def repeat_hotplug(self, vm, target_mems): """ Hotplug memory in target_mems @@ -39,30 +36,33 @@ def start_test(self): times = self.params.get_numeric("repeat_times", int) target_mems = [] for i in range(times): - target_mems.append("mem%s" % i) + target_mems.append(f"mem{i}") vm = self.env.get_vm(self.params["main_vm"]) session = vm.wait_for_login() if self.params.get("vm_arch_name", "") == "aarch64": self.test.log.info("Check basic page size on guest.") get_basic_page = self.params.get("get_basic_page") - if session.cmd(get_basic_page).strip() == '65536': - self.params['size_mem'] = self.params.get("size_mem_64k") + if session.cmd(get_basic_page).strip() == "65536": + self.params["size_mem"] = self.params.get("size_mem_64k") if self.params.get_boolean("mem_unplug_test", False): arg = "movable_node" utils_test.update_boot_option(vm, args_added=arg) original_mem = self.get_guest_total_mem(vm) if self.params["test_type"] == "scalability_test": - error_context.context("Repeat hotplug memory for %s times" - % times, LOG_JOB.info) + error_context.context( + f"Repeat hotplug memory for {times} times", LOG_JOB.info + ) self.repeat_hotplug(vm, target_mems) if self.params.get_boolean("mem_unplug_test", False): - error_context.context("Repeat unplug memory for %s times" - % times, LOG_JOB.info) + error_context.context( + f"Repeat unplug memory for {times} times", LOG_JOB.info + ) self.repeat_unplug(vm, target_mems) else: for target_mem in target_mems: - error_context.context("Hotplug and unplug memory %s" - % target_mem, LOG_JOB.info) + error_context.context( + f"Hotplug and unplug memory {target_mem}", LOG_JOB.info + ) self.hotplug_memory(vm, target_mem) if self.params.get_boolean("mem_unplug_test", False): self.unplug_memory(vm, target_mem) @@ -70,8 +70,10 @@ def start_test(self): if self.params.get_boolean("mem_unplug_test", False): current_mem = self.get_guest_total_mem(vm) if current_mem != original_mem: - self.test.fail("Guest memory changed about repeat" - " hotpug/unplug memory %d times" % times) + self.test.fail( + "Guest memory changed about repeat" + " hotpug/unplug memory %d times" % times + ) vm.verify_kernel_crash() session.close() diff --git a/qemu/tests/hotplug_mem_reserve.py b/qemu/tests/hotplug_mem_reserve.py index f7365fea15..5c1a8d79a2 100644 --- a/qemu/tests/hotplug_mem_reserve.py +++ b/qemu/tests/hotplug_mem_reserve.py @@ -1,8 +1,6 @@ from avocado.utils import process - -from virttest.utils_misc import normalize_data_size -from virttest.utils_misc import wait_for from virttest.staging import utils_memory +from virttest.utils_misc import normalize_data_size, wait_for from virttest.utils_test.qemu import MemoryHotplugTest @@ -18,6 +16,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def get_hp_rsvd(): """ A generator to get HugePages_Rsvd until it does not change @@ -35,18 +34,18 @@ def get_hp_rsvd(): mem_name = params["target_mems"] hp_size = utils_memory.read_from_meminfo("Hugepagesize") hp_total = utils_memory.read_from_meminfo("HugePages_Total") - size_target_mem = params["size_mem_%s" % mem_name] - hp_target = int(float(normalize_data_size(size_target_mem, "K")) / hp_size)\ - + int(hp_total) - process.system("echo %s > /proc/sys/vm/nr_hugepages" % hp_target, - shell=True) + size_target_mem = params[f"size_mem_{mem_name}"] + hp_target = int(float(normalize_data_size(size_target_mem, "K")) / hp_size) + int( + hp_total + ) + process.system(f"echo {hp_target} > /proc/sys/vm/nr_hugepages", shell=True) hotplug_test = MemoryHotplugTest(test, params, env) hotplug_test.hotplug_memory(vm, mem_name) hotplug_test.check_memory(vm) timeout = int(params.get("check_timeout", 60)) rsvd_is_stable = get_hp_rsvd() if not wait_for(lambda: next(rsvd_is_stable), timeout, 5, 3): - test.error("HugePages_Rsvd is not stable in %ss" % timeout) + test.error(f"HugePages_Rsvd is not stable in {timeout}s") try: hugepage_rsvd = utils_memory.read_from_meminfo("HugePages_Rsvd") test.log.info("HugePages_Rsvd is %s after hotplug memory", hugepage_rsvd) @@ -54,16 +53,22 @@ def get_hp_rsvd(): hugepages_total = utils_memory.read_from_meminfo("HugePages_Total") hugepages_free = utils_memory.read_from_meminfo("HugePages_Free") hugepagesize = utils_memory.read_from_meminfo("Hugepagesize") - test.log.info("HugePages_Total is %s, hugepages_free is %s", - hugepages_total, hugepages_free) - plug_size = params["size_mem_%s" % mem_name] - numa_size = params["size_mem_%s" % params["mem_devs"]] - expected_size = float(normalize_data_size(plug_size, "K")) + \ - float(normalize_data_size(numa_size, "K")) + test.log.info( + "HugePages_Total is %s, hugepages_free is %s", + hugepages_total, + hugepages_free, + ) + plug_size = params[f"size_mem_{mem_name}"] + numa_size = params["size_mem_{}".format(params["mem_devs"])] + expected_size = float(normalize_data_size(plug_size, "K")) + float( + normalize_data_size(numa_size, "K") + ) page_number = hugepages_total - hugepages_free + hugepage_rsvd if page_number * hugepagesize != int(expected_size): - test.fail("HugePages_Total - HugePages_Free + HugePages_Rsvd is" - "not equal to memory backend size") + test.fail( + "HugePages_Total - HugePages_Free + HugePages_Rsvd is" + "not equal to memory backend size" + ) else: if hugepage_rsvd != 0: test.fail("HugePages_Rsvd is not 0 when reserve option is off") diff --git a/qemu/tests/hotplug_mem_share_discard_data.py b/qemu/tests/hotplug_mem_share_discard_data.py index bf7218cf96..1cdb2f1688 100644 --- a/qemu/tests/hotplug_mem_share_discard_data.py +++ b/qemu/tests/hotplug_mem_share_discard_data.py @@ -1,11 +1,8 @@ from avocado.utils import process - -from virttest import error_context -from virttest import env_process - +from virttest import env_process, error_context +from virttest.staging import utils_memory from virttest.utils_numeric import normalize_data_size from virttest.utils_test.qemu import MemoryHotplugTest -from virttest.staging import utils_memory @error_context.context_aware @@ -28,19 +25,21 @@ def run(test, params, env): timeout = int(params.get("login_timeout", 240)) - mem_dev = params.get('mem_devs') - size_mem = int(normalize_data_size(params['size_mem_%s' % mem_dev], 'K')) + mem_dev = params.get("mem_devs") + size_mem = int(normalize_data_size(params[f"size_mem_{mem_dev}"], "K")) total_hg_size = size_mem target_mem = params.get("target_mems") - if params.get('backend_mem_%s' % target_mem) == 'memory-backend-file': - size_target_mem = int(normalize_data_size(params['size_mem_%s' % target_mem], 'K')) + if params.get(f"backend_mem_{target_mem}") == "memory-backend-file": + size_target_mem = int( + normalize_data_size(params[f"size_mem_{target_mem}"], "K") + ) total_hg_size += size_target_mem hp_size = utils_memory.read_from_meminfo("Hugepagesize") - params['target_hugepages'] = int(total_hg_size // hp_size) - params['setup_hugepages'] = "yes" - params['not_preprocess'] = "no" + params["target_hugepages"] = int(total_hg_size // hp_size) + params["setup_hugepages"] = "yes" + params["not_preprocess"] = "no" env_process.preprocess(test, params, env) @@ -59,30 +58,32 @@ def run(test, params, env): vm.destroy() hp_total = int(utils_memory.read_from_meminfo("HugePages_Total")) - hp_free = int(utils_memory.read_from_meminfo('HugePages_Free')) + hp_free = int(utils_memory.read_from_meminfo("HugePages_Free")) - error_context.context("hp_total: %s" % str(hp_total), test.log.debug) - error_context.context("hp_free: %s" % str(hp_free), test.log.debug) + error_context.context(f"hp_total: {str(hp_total)}", test.log.debug) + error_context.context(f"hp_free: {str(hp_free)}", test.log.debug) if params.get("backend_mem_plug1") == "memory-backend-file": if not params.get_boolean("discard-data_plug1", True): try: - process.system("ls %s" % params["mem-path_plug1"]) + process.system("ls {}".format(params["mem-path_plug1"])) except process.CmdError: - test.fail("Error, %s not found." % params["mem-path_plug1"]) + test.fail("Error, {} not found.".format(params["mem-path_plug1"])) op = (hp_total - hp_free) * (hp_size / 1024) - hp_used = int(normalize_data_size("%sM" % str(op), "K")) + hp_used = int(normalize_data_size(f"{str(op)}M", "K")) - error_context.context("hp_used: %s" % str(hp_used), test.log.debug) + error_context.context(f"hp_used: {str(hp_used)}", test.log.debug) if hp_used != size_target_mem: test.fail("Error, total hugepages doesn't match with used memory") elif hp_total != hp_free: test.fail("Error, free hugepages doesn't match with total hugepages") # Deletes the mem-path file to avoid test error - process.system("rm -rf %s" % params["mem-path_plug1"]) + process.system("rm -rf {}".format(params["mem-path_plug1"])) # Compares free and total memory values after deleting mem-path file - hp_free_after_delete = int(utils_memory.read_from_meminfo('HugePages_Free')) + hp_free_after_delete = int(utils_memory.read_from_meminfo("HugePages_Free")) if hp_total != hp_free_after_delete: - test.fail("Error, free hugepages doesn't match with total hugepages after deleting mem-path file") + test.fail( + "Error, free hugepages doesn't match with total hugepages after deleting mem-path file" + ) diff --git a/qemu/tests/hotplug_mem_simple.py b/qemu/tests/hotplug_mem_simple.py index 36bacfe239..52ecb0bdb8 100644 --- a/qemu/tests/hotplug_mem_simple.py +++ b/qemu/tests/hotplug_mem_simple.py @@ -16,15 +16,14 @@ def run(test, params, env): :param env: Dictionary with test environment. """ vm = env.get_vm(params["main_vm"]) - session = vm.wait_for_login() + vm.wait_for_login() mem_name = params["target_mems"] hotplug_test = MemoryHotplugTest(test, params, env) hotplug_test.hotplug_memory(vm, mem_name) hotplug_test.check_memory(vm) - if params['os_type'] == 'linux': + if params["os_type"] == "linux": stress_args = params.get("stress_args") - stress_test = utils_test.VMStress(vm, "stress", params, - stress_args=stress_args) + stress_test = utils_test.VMStress(vm, "stress", params, stress_args=stress_args) stress_test.load_stress_tool() time.sleep(60) stress_test.unload_stress() diff --git a/qemu/tests/hotplug_mem_stress_ng.py b/qemu/tests/hotplug_mem_stress_ng.py index 8107f9dcc1..055e18a08f 100644 --- a/qemu/tests/hotplug_mem_stress_ng.py +++ b/qemu/tests/hotplug_mem_stress_ng.py @@ -1,5 +1,4 @@ -from virttest import utils_test -from virttest import error_context +from virttest import error_context, utils_test from virttest.utils_test.qemu import MemoryHotplugTest @@ -35,7 +34,7 @@ def run(test, params, env): cmd = run_stress % arg status, output = session.cmd_status_output(cmd, timeout=1500) if status: - test.fail("Stress_ng cmd '%s' failed with '%s'" % (cmd, output)) + test.fail(f"Stress_ng cmd '{cmd}' failed with '{output}'") hotplug_test.unplug_memory(vm, mem_name) hotplug_test.check_memory(vm) hotplug_test.hotplug_memory(vm, mem_name) @@ -45,5 +44,5 @@ def run(test, params, env): hotplug_test.check_memory(vm) finally: if session: - session.cmd_output_safe("rm -rf %s" % params["stress_ng_dir"]) + session.cmd_output_safe("rm -rf {}".format(params["stress_ng_dir"])) session.close() diff --git a/qemu/tests/hotplug_port_chardev_pci_with_console.py b/qemu/tests/hotplug_port_chardev_pci_with_console.py index e138753b86..29bce4e8ae 100644 --- a/qemu/tests/hotplug_port_chardev_pci_with_console.py +++ b/qemu/tests/hotplug_port_chardev_pci_with_console.py @@ -1,7 +1,6 @@ from virttest import error_context -from qemu.tests.virtio_console import add_chardev -from qemu.tests.virtio_console import add_virtio_ports_to_vm +from qemu.tests.virtio_console import add_chardev, add_virtio_ports_to_vm from qemu.tests.virtio_serial_file_transfer import transfer_data from qemu.tests.virtio_serial_hotplug_port_pci import get_buses_and_serial_devices @@ -14,8 +13,8 @@ def get_virtio_serial_pci(vm, serial_device): :param serial_device: serial device :return: virtio-serial-pci id """ - serial_device_bus = serial_device.get_param('bus') - serial_bus_id = serial_device_bus.split('.')[0] + serial_device_bus = serial_device.get_param("bus") + serial_bus_id = serial_device_bus.split(".")[0] return vm.devices.get(serial_bus_id) @@ -34,17 +33,16 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ - vm = env.get_vm(params['main_vm']) + vm = env.get_vm(params["main_vm"]) char_devices = add_chardev(vm, params) - serials = params.objects('extra_serials') - serial_devices = get_buses_and_serial_devices( - vm, params, char_devices, serials)[1] + serials = params.objects("extra_serials") + serial_devices = get_buses_and_serial_devices(vm, params, char_devices, serials)[1] vm.devices.simple_hotplug(char_devices[0], vm.monitor) vm.devices.simple_hotplug(serial_devices[0], vm.monitor) for device in serial_devices: add_virtio_ports_to_vm(vm, params, device) - params['file_transfer_serial_port'] = serials[0] - transfer_data(params, vm, sender='both') + params["file_transfer_serial_port"] = serials[0] + transfer_data(params, vm, sender="both") if params.get("unplug_pci") == "yes": virtio_serial_pci = get_virtio_serial_pci(vm, serial_devices[0]) vm.devices.simple_unplug(virtio_serial_pci, vm.monitor) diff --git a/qemu/tests/hotplug_unplug_during_io_repeat.py b/qemu/tests/hotplug_unplug_during_io_repeat.py index e5cb6cf349..ab3e210c29 100644 --- a/qemu/tests/hotplug_unplug_during_io_repeat.py +++ b/qemu/tests/hotplug_unplug_during_io_repeat.py @@ -1,10 +1,7 @@ import time from avocado.utils import process - -from virttest import error_context -from virttest import utils_misc -from virttest import utils_disk +from virttest import error_context, utils_disk, utils_misc from provider.block_devices_plug import BlockDevicesPlug from provider.storage_benchmark import generate_instance @@ -25,70 +22,82 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def _check_iozone_status(): ck_session = vm.wait_for_login(timeout=360) if not utils_misc.wait_for( - lambda: check_cmd[os_type].split()[-1].lower() in ck_session.cmd_output( - check_cmd[os_type]).lower(), 180, step=3.0): + lambda: check_cmd[os_type].split()[-1].lower() + in ck_session.cmd_output(check_cmd[os_type]).lower(), + 180, + step=3.0, + ): test.fail("Iozone is not alive!") ck_session.close() def _run_iozone_background(): test.log.info("Start iozone under background.") thread = utils_misc.InterruptedThread( - iozone.run, (params['iozone_options'].format(mount_point), # pylint: disable=E0606 - float(params['iozone_timeout']))) + iozone.run, + ( + params["iozone_options"].format(mount_point), # pylint: disable=E0606 + float(params["iozone_timeout"]), + ), + ) thread.start() _check_iozone_status() return thread - check_cmd = {'linux': 'pgrep -lx iozone', - 'windows': 'TASKLIST /FI "IMAGENAME eq IOZONE.EXE'} - os_type = params['os_type'] + check_cmd = { + "linux": "pgrep -lx iozone", + "windows": 'TASKLIST /FI "IMAGENAME eq IOZONE.EXE', + } + os_type = params["os_type"] vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=360) - iozone = generate_instance(params, vm, 'iozone') + iozone = generate_instance(params, vm, "iozone") plug = BlockDevicesPlug(vm) need_format = True try: - for i in range(int(params['repeat_time'])): - test.log.info('Start to run testing.(iteration: %d).', (i + 1)) + for i in range(int(params["repeat_time"])): + test.log.info("Start to run testing.(iteration: %d).", (i + 1)) plug.hotplug_devs_serial() dev = plug[0] if need_format: - if os_type == 'windows': + if os_type == "windows": utils_disk.update_windows_disk_attributes(session, dev) else: full_dev = "/dev/" + dev - cmd = "dd if=/dev/zero of={0} bs=1M count=64 oflag=direct "\ - "&& sleep 1; partprobe {0}".format(full_dev) + cmd = ( + f"dd if=/dev/zero of={full_dev} bs=1M count=64 oflag=direct " + f"&& sleep 1; partprobe {full_dev}" + ) session.cmd(cmd) mount_point = utils_disk.configure_empty_disk( - session, dev, params['image_size_stg0'], os_type)[0] - if os_type == 'windows': + session, dev, params["image_size_stg0"], os_type + )[0] + if os_type == "windows": need_format = False iozone_thread = _run_iozone_background() - time.sleep(float(params['sleep_time'])) + time.sleep(float(params["sleep_time"])) _check_iozone_status() plug.unplug_devs_serial() iozone_thread.join(suppress_exception=True) - if need_format and os_type != 'windows': + if need_format and os_type != "windows": test.log.info("umount dev:%s", dev) - session.cmd( - "mount|grep {0} ; umount /mnt/{0}1 && sleep 3".format(dev)) + session.cmd(f"mount|grep {dev} ; umount /mnt/{dev}1 && sleep 3") except Exception as e: pid = vm.get_pid() test.log.debug("Find %s Exception:'%s'.", pid, str(e)) if pid: logdir = test.logdir - process.getoutput("gstack %s > %s/gstack.log" % (pid, logdir)) + process.getoutput(f"gstack {pid} > {logdir}/gstack.log") process.getoutput( - "timeout 20 strace -tt -T -v -f -s 32 -p %s -o %s/strace.log" % ( - pid, logdir)) + f"timeout 20 strace -tt -T -v -f -s 32 -p {pid} -o {logdir}/strace.log" + ) else: test.log.debug("VM dead...") raise e diff --git a/qemu/tests/hotplug_virtio_mem.py b/qemu/tests/hotplug_virtio_mem.py index 0a3bc88c0c..c83ceff146 100644 --- a/qemu/tests/hotplug_virtio_mem.py +++ b/qemu/tests/hotplug_virtio_mem.py @@ -1,9 +1,6 @@ import time -from virttest import utils_qemu -from virttest import error_context -from virttest import utils_misc - +from virttest import error_context, utils_misc, utils_qemu from virttest.qemu_monitor import QMPCmdError from virttest.utils_misc import normalize_data_size from virttest.utils_test.qemu import MemoryHotplugTest @@ -45,7 +42,7 @@ def run(test, params, env): _, vmem_dev = hotplug_test.hotplug_memory(vm, target_mem) device_id = vmem_dev.get_qid() - requested_size_vmem_test = params.get("requested-size_test_%s" % target_mem) + requested_size_vmem_test = params.get(f"requested-size_test_{target_mem}") node_id = int(vmem_dev.get_param("node")) req_size = vmem_dev.get_param("requested-size") @@ -67,17 +64,16 @@ def run(test, params, env): virtio_mem_utils.check_numa_plugged_mem( node_id, requested_size, threshold, vm, test ) - if qemu_version in VersionInterval('[8.1.0,)'): + if qemu_version in VersionInterval("[8.1.0,)"): try: hotplug_test.unplug_memory(vm, target_mem) except QMPCmdError as e: if error_msg not in str(e.data): - test.fail("Unexpected error message: %s" % str(e.data)) + test.fail(f"Unexpected error message: {str(e.data)}") test.log.info(error_msg) else: test.fail( - "%s shouldn't have been unplugged! 'size' is greater than 0" - % target_mem + f"{target_mem} shouldn't have been unplugged! 'size' is greater than 0" ) vm.monitor.qom_set(device_id, "requested-size", 0) diff --git a/qemu/tests/hpt_huge_page_negative.py b/qemu/tests/hpt_huge_page_negative.py index fc9341e755..6d72e80f2d 100644 --- a/qemu/tests/hpt_huge_page_negative.py +++ b/qemu/tests/hpt_huge_page_negative.py @@ -1,7 +1,6 @@ import re -from virttest import error_context -from virttest import virt_vm +from virttest import error_context, virt_vm @error_context.context_aware @@ -17,15 +16,15 @@ def run(test, params, env): """ vm = env.get_vm(params["main_vm"]) - error_msg = params.get('error_msg') + error_msg = params.get("error_msg") try: vm.create(params=params) except virt_vm.VMCreateError as e: o = e.output else: test.fail("Test failed since vm shouldn't be launched") - error_context.context("Check the expected error message: %s" - % error_msg, test.log.info) + error_context.context( + f"Check the expected error message: {error_msg}", test.log.info + ) if not re.search(error_msg, o): # pylint: disable=E0601 - test.fail("Can not get expected error message: %s from %s" - % (error_msg, o)) + test.fail(f"Can not get expected error message: {error_msg} from {o}") diff --git a/qemu/tests/hpt_max_page_size.py b/qemu/tests/hpt_max_page_size.py index 8ec160dfbb..b40b6fc1c1 100644 --- a/qemu/tests/hpt_max_page_size.py +++ b/qemu/tests/hpt_max_page_size.py @@ -25,10 +25,12 @@ def run(test, params, env): :params params: Dictionary with the test parameters. :params env: Dictionary with test environment. """ + def _check_meminfo(key): - meminfo = session.cmd_output("grep %s /proc/meminfo" % key) - actual_value = re.search(r'\d{4,}', meminfo) + meminfo = session.cmd_output(f"grep {key} /proc/meminfo") + actual_value = re.search(r"\d{4,}", meminfo) return actual_value.group(0) if actual_value else "" + timeout = params.get_numeric("login_timeout", 240) vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -36,19 +38,24 @@ def _check_meminfo(key): error_context.context("Check output Hugepage size.", test.log.info) if _check_meminfo("Hugepagesize") != params["expected_value"]: - test.fail("The hugepage size doesn't match, " - "please check meminfo: %s " % _check_meminfo("Huge")) + test.fail( + "The hugepage size doesn't match, " "please check meminfo: {} ".format( + _check_meminfo("Huge") + ) + ) # Please set 1G huge page as default huge page size on power9 if params.get("sub_type") == "hugepage_reset": origin_nr = params.get("origin_nr") - error_context.context("Setup hugepage number to %s in guest" % origin_nr, test.log.info) + error_context.context( + f"Setup hugepage number to {origin_nr} in guest", test.log.info + ) set_hugepage_cmd = params.get("set_hugepage_cmd") if session.cmd_status(set_hugepage_cmd): test.fail("Failed to assign nr in the guest") check_result_cmd = params.get("check_result_cmd") output = session.cmd_output(check_result_cmd) - result_value = re.search(r'\d{1,}', output).group(0) + result_value = re.search(r"\d{1,}", output).group(0) if result_value != origin_nr: - test.fail("Assigned nr %s doesn't match expected %s" % (result_value, origin_nr)) + test.fail(f"Assigned nr {result_value} doesn't match expected {origin_nr}") vm.verify_kernel_crash() vm.destroy() diff --git a/qemu/tests/hpt_miscellaneous.py b/qemu/tests/hpt_miscellaneous.py index b0a9d1214e..6afffadfaf 100644 --- a/qemu/tests/hpt_miscellaneous.py +++ b/qemu/tests/hpt_miscellaneous.py @@ -1,12 +1,10 @@ import logging import re - -from virttest import error_context +from virttest import error_context, utils_misc from virttest.utils_test.qemu import MemoryHotplugTest -from virttest import utils_misc -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def set_hpt(session, params, test, hpt_size): @@ -26,7 +24,7 @@ def set_hpt(session, params, test, hpt_size): if re.search(r"Input/output error", o): return else: - test.fail("Set hpt test failed,please check:'%s'" % o) + test.fail(f"Set hpt test failed,please check:'{o}'") def verify_hpt(test, params, session, hpt_size): @@ -38,18 +36,18 @@ def verify_hpt(test, params, session, hpt_size): test.fail("Fail to get HPT value") else: if int(get_hpt_value) != hpt_size: - test.fail("HPT order not match! '%s' vs '%s'" - % (get_hpt_value, hpt_size)) + test.fail(f"HPT order not match! '{get_hpt_value}' vs '{hpt_size}'") def check_mem_increase(session, params, orig_mem, increase_mem): """Check the size of memory increased.""" - new_mem = int(session.cmd_output(cmd=params['free_mem_cmd'])) + new_mem = int(session.cmd_output(cmd=params["free_mem_cmd"])) if (new_mem - orig_mem) == increase_mem: error_context.context( - 'Get guest free memory size after hotplug pc-dimm.', LOG_JOB.info) - LOG_JOB.debug('Guest free memory size is %d bytes', new_mem) - LOG_JOB.info("Guest memory size is increased %s.", params['expected_size']) + "Get guest free memory size after hotplug pc-dimm.", LOG_JOB.info + ) + LOG_JOB.debug("Guest free memory size is %d bytes", new_mem) + LOG_JOB.info("Guest memory size is increased %s.", params["expected_size"]) return True return False @@ -98,20 +96,22 @@ def run(test, params, env): hpt_default = int(get_hpt_def) test.log.debug("Default hpt order : '%s'", get_hpt_def) increment_sequence = params.get("increment_sequence").split() - error_context.context("hpt changes according to increment", - test.log.info) + error_context.context("hpt changes according to increment", test.log.info) if params.get("sub_type") == "mem": # For HPT reszing after hotplug memory - orig_mem = int(session.cmd_output(cmd=params['free_mem_cmd'])) + orig_mem = int(session.cmd_output(cmd=params["free_mem_cmd"])) hpt_mem = MemoryHotplugTest(test, params, env) - hpt_mem.hotplug_memory(vm, params['plug_mem_name']) - increase_mem = int(params['expected_size']) - test.log.debug('Guest free memory size is %d bytes', orig_mem) - plug_timeout = float(params.get('plug_timeout', 20)) - if not utils_misc.wait_for(lambda: check_mem_increase( - session, params, orig_mem, increase_mem), plug_timeout): - test.fail("Guest memory size is not increased %s in %s sec." - % (increase_mem, plug_timeout)) + hpt_mem.hotplug_memory(vm, params["plug_mem_name"]) + increase_mem = int(params["expected_size"]) + test.log.debug("Guest free memory size is %d bytes", orig_mem) + plug_timeout = float(params.get("plug_timeout", 20)) + if not utils_misc.wait_for( + lambda: check_mem_increase(session, params, orig_mem, increase_mem), + plug_timeout, + ): + test.fail( + f"Guest memory size is not increased {increase_mem} in {plug_timeout} sec." + ) for increm in increment_sequence: hpt_size = hpt_size + int(increm) set_hpt(session, params, test, hpt_size) diff --git a/qemu/tests/hugepage_mem_stress.py b/qemu/tests/hugepage_mem_stress.py index f37d0fe65e..0435bb3733 100644 --- a/qemu/tests/hugepage_mem_stress.py +++ b/qemu/tests/hugepage_mem_stress.py @@ -1,9 +1,5 @@ -from virttest import error_context -from virttest import utils_misc -from virttest import utils_test - -from virttest.utils_test import BackgroundTest -from virttest.utils_test import utils_memory +from virttest import error_context, utils_misc, utils_test +from virttest.utils_test import BackgroundTest, utils_memory @error_context.context_aware @@ -20,10 +16,13 @@ def run(test, params, env): :params params: Dictionary with the test parameters. :params env: Dictionary with test environment. """ + def heavyload_install(): if session.cmd_status(test_installed_cmd) != 0: - test.log.warning("Could not find installed heavyload in guest, will" - " install it via winutils.iso ") + test.log.warning( + "Could not find installed heavyload in guest, will" + " install it via winutils.iso " + ) winutil_drive = utils_misc.get_winutils_vol(session) if not winutil_drive: test.cancel("WIN_UTILS CDROM not found.") @@ -40,9 +39,11 @@ def heavyload_install(): error_context.context("Run memory heavy stress in guest", test.log.info) if os_type == "linux": stress_args = params["stress_custom_args"] % ( - params.get_numeric("mem") / 512) - stress_test = utils_test.VMStress(vm, "stress", - params, stress_args=stress_args) + params.get_numeric("mem") / 512 + ) + stress_test = utils_test.VMStress( + vm, "stress", params, stress_args=stress_args + ) try: stress_test.load_stress_tool() utils_misc.wait_for(lambda: (stress_test.app_running is False), 30) @@ -51,26 +52,33 @@ def heavyload_install(): stress_test.clean() else: install_path = params["install_path"] - test_installed_cmd = 'dir "%s" | findstr /I heavyload' % install_path + test_installed_cmd = f'dir "{install_path}" | findstr /I heavyload' heavyload_install() error_context.context("Run heavyload inside guest.", test.log.info) - heavyload_bin = r'"%s\heavyload.exe" ' % install_path - heavyload_options = ["/MEMORY %d" % (params.get_numeric("mem") / 512), - "/DURATION %d" % (stress_duration // 60), - "/AUTOEXIT", - "/START"] + heavyload_bin = rf'"{install_path}\heavyload.exe" ' + heavyload_options = [ + "/MEMORY %d" % (params.get_numeric("mem") / 512), + "/DURATION %d" % (stress_duration // 60), + "/AUTOEXIT", + "/START", + ] start_cmd = heavyload_bin + " ".join(heavyload_options) - stress_tool = BackgroundTest(session.cmd, (start_cmd, - stress_duration, stress_duration)) + stress_tool = BackgroundTest( + session.cmd, (start_cmd, stress_duration, stress_duration) + ) stress_tool.start() if not utils_misc.wait_for(stress_tool.is_alive, stress_duration): test.error("Failed to start heavyload process.") stress_tool.join(stress_duration) if params.get_boolean("non_existent_point"): - error_context.context("Check large memory pages free on host.", - test.log.info) - if utils_memory.get_num_huge_pages() != utils_memory.get_num_huge_pages_free(): + error_context.context( + "Check large memory pages free on host.", test.log.info + ) + if ( + utils_memory.get_num_huge_pages() + != utils_memory.get_num_huge_pages_free() + ): test.fail("HugePages leaked.") finally: session.close() diff --git a/qemu/tests/hugepage_reset.py b/qemu/tests/hugepage_reset.py index 3107d94f2e..d1fd4f00b4 100644 --- a/qemu/tests/hugepage_reset.py +++ b/qemu/tests/hugepage_reset.py @@ -1,10 +1,6 @@ import time -from virttest import env_process -from virttest import test_setup -from virttest import utils_test -from virttest import utils_misc -from virttest import error_context +from virttest import env_process, error_context, test_setup, utils_misc, utils_test from virttest.staging import utils_memory from virttest.utils_test import BackgroundTest @@ -31,80 +27,92 @@ def set_hugepage(): for h_size in (origin_nr - 2, origin_nr + 2): hp_config.target_hugepages = h_size hp_config.set_hugepages() - if params.get('on_numa_node'): - test.log.info('Set hugepage size %s to target node %s', - h_size, target_node) - hp_config.set_node_num_huge_pages(h_size, target_node, - hugepage_size) + if params.get("on_numa_node"): + test.log.info( + "Set hugepage size %s to target node %s", h_size, target_node + ) + hp_config.set_node_num_huge_pages( + h_size, target_node, hugepage_size + ) except ValueError as err: test.cancel(err) def run_stress(): def heavyload_install(): - if session.cmd_status(test_install_cmd) != 0: # pylint: disable=E0606 - test.log.warning("Could not find installed heavyload in guest, " - "will install it via winutils.iso ") + if session.cmd_status(test_install_cmd) != 0: # pylint: disable=E0606 + test.log.warning( + "Could not find installed heavyload in guest, " + "will install it via winutils.iso " + ) winutil_drive = utils_misc.get_winutils_vol(session) if not winutil_drive: test.cancel("WIN_UTILS CDROM not found.") install_cmd = params["install_cmd"] % winutil_drive session.cmd(install_cmd) - test.log.info('Loading stress on guest.') + test.log.info("Loading stress on guest.") stress_duration = params.get("stress_duration", 60) if params["os_type"] == "linux": - params['stress_args'] = '--vm %s --vm-bytes 256M --timeout %s' % ( - mem // 512, stress_duration) - stress = utils_test.VMStress(vm, 'stress', params) + params["stress_args"] = ( + f"--vm {mem // 512} --vm-bytes 256M --timeout {stress_duration}" + ) + stress = utils_test.VMStress(vm, "stress", params) stress.load_stress_tool() time.sleep(stress_duration) stress.unload_stress() else: session = vm.wait_for_login() install_path = params["install_path"] - test_install_cmd = 'dir "%s" | findstr /I heavyload' % install_path + test_install_cmd = f'dir "{install_path}" | findstr /I heavyload' heavyload_install() - heavyload_bin = r'"%s\heavyload.exe" ' % install_path - heavyload_options = ["/MEMORY 100", - "/DURATION %d" % (stress_duration // 60), - "/AUTOEXIT", - "/START"] + heavyload_bin = rf'"{install_path}\heavyload.exe" ' + heavyload_options = [ + "/MEMORY 100", + "/DURATION %d" % (stress_duration // 60), + "/AUTOEXIT", + "/START", + ] start_cmd = heavyload_bin + " ".join(heavyload_options) - stress_tool = BackgroundTest(session.cmd, (start_cmd, - stress_duration, - stress_duration)) + stress_tool = BackgroundTest( + session.cmd, (start_cmd, stress_duration, stress_duration) + ) stress_tool.start() if not utils_misc.wait_for(stress_tool.is_alive, 30): test.error("Failed to start heavyload process") stress_tool.join(stress_duration) - origin_nr = int(params['origin_nr']) + origin_nr = int(params["origin_nr"]) host_numa_node = utils_misc.NumaInfo() - mem = int(float(utils_misc.normalize_data_size("%sM" % params["mem"]))) - if params.get('on_numa_node'): + mem = int(float(utils_misc.normalize_data_size("{}M".format(params["mem"])))) + if params.get("on_numa_node"): for target_node in host_numa_node.get_online_nodes_withmem(): node_mem_free = host_numa_node.read_from_node_meminfo( - target_node, 'MemFree') + target_node, "MemFree" + ) if int(node_mem_free) > mem: - params['target_nodes'] = target_node - params["qemu_command_prefix"] = ("numactl --membind=%s" % - target_node) - params['target_num_node%s' % target_node] = origin_nr + params["target_nodes"] = target_node + params["qemu_command_prefix"] = f"numactl --membind={target_node}" + params[f"target_num_node{target_node}"] = origin_nr break test.log.info( - 'The free memory of node %s is %s, is not enough for' - ' guest memory: %s', target_node, node_mem_free, mem) + "The free memory of node %s is %s, is not enough for" + " guest memory: %s", + target_node, + node_mem_free, + mem, + ) else: - test.cancel("No node on your host has sufficient free memory for " - "this test.") + test.cancel( + "No node on your host has sufficient free memory for " "this test." + ) hp_config = test_setup.HugePageConfig(params) hp_config.target_hugepages = origin_nr - test.log.info('Setup hugepage number to %s', origin_nr) + test.log.info("Setup hugepage number to %s", origin_nr) hp_config.setup() hugepage_size = utils_memory.get_huge_page_size() params["hugepage_path"] = hp_config.hugepage_path - params['start_vm'] = "yes" - vm_name = params['main_vm'] + params["start_vm"] = "yes" + vm_name = params["main_vm"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) run_stress() diff --git a/qemu/tests/hugepage_specify_node.py b/qemu/tests/hugepage_specify_node.py index 8d4e4affda..d08c759429 100644 --- a/qemu/tests/hugepage_specify_node.py +++ b/qemu/tests/hugepage_specify_node.py @@ -2,12 +2,7 @@ import re from avocado.utils import memory - -from virttest import env_process -from virttest import error_context -from virttest import test_setup -from virttest import utils_misc - +from virttest import env_process, error_context, test_setup, utils_misc from virttest.utils_numeric import normalize_data_size @@ -27,8 +22,8 @@ def run(test, params, env): """ memory.drop_caches() hugepage_size = params.get_numeric("hugepage_size", memory.get_huge_page_size()) - mem_size = int(normalize_data_size("%sM" % params["mem"], "K")) - idle_node_mem = int(normalize_data_size("%sM" % params["idle_node_mem"], "K")) + mem_size = int(normalize_data_size("{}M".format(params["mem"]), "K")) + idle_node_mem = int(normalize_data_size("{}M".format(params["idle_node_mem"]), "K")) error_context.context("Get host numa topological structure.", test.log.info) host_numa_node = utils_misc.NumaInfo() @@ -37,8 +32,10 @@ def run(test, params, env): node_meminfo = host_numa_node.get_all_node_meminfo() for node_id in node_list: - error_context.base_context("Check preprocess HugePages Free on host " - "numa node %s." % node_id, test.log.info) + error_context.base_context( + "Check preprocess HugePages Free on host " f"numa node {node_id}.", + test.log.info, + ) node_memfree = int(node_meminfo[node_id]["MemFree"]) if node_memfree < idle_node_mem: idle_node_list.remove(node_id) @@ -46,27 +43,34 @@ def run(test, params, env): node_list.remove(node_id) if len(idle_node_list) < 2 or not node_list: - test.cancel("Host node does not have enough nodes to run the test, " - "skipping test...") + test.cancel( + "Host node does not have enough nodes to run the test, " "skipping test..." + ) for node_id in node_list: - error_context.base_context("Specify qemu process only allocate " - "HugePages from node%s." % node_id, test.log.info) - params["target_nodes"] = "%s" % node_id + error_context.base_context( + "Specify qemu process only allocate " f"HugePages from node{node_id}.", + test.log.info, + ) + params["target_nodes"] = f"{node_id}" huge_pages_num = math.ceil(mem_size / hugepage_size) - params["target_num_node%s" % node_id] = huge_pages_num - error_context.context("Setup huge pages for specify node%s." % - node_id, test.log.info) + params[f"target_num_node{node_id}"] = huge_pages_num + error_context.context( + f"Setup huge pages for specify node{node_id}.", test.log.info + ) check_list = [_ for _ in idle_node_list if _ != node_id] for idle_node in check_list: - params["target_nodes"] += " %s" % idle_node - params["target_num_node%s" % idle_node] = math.ceil(idle_node_mem / hugepage_size) - error_context.context("Setup huge pages for idle node%s." % - idle_node, test.log.info) + params["target_nodes"] += f" {idle_node}" + params[f"target_num_node{idle_node}"] = math.ceil( + idle_node_mem / hugepage_size + ) + error_context.context( + f"Setup huge pages for idle node{idle_node}.", test.log.info + ) hp_config = test_setup.HugePageConfig(params) try: hp_config.setup() - params["qemu_command_prefix"] = "numactl --membind=%s" % node_id + params["qemu_command_prefix"] = f"numactl --membind={node_id}" params["start_vm"] = "yes" params["hugepage_path"] = hp_config.hugepage_path env_process.preprocess_vm(test, params, env, params["main_vm"]) @@ -77,23 +81,32 @@ def run(test, params, env): meminfo = host_numa_node.get_all_node_meminfo() for index in check_list: - error_context.base_context("Check process HugePages Free on host " - "numa node %s." % index, test.log.info) + error_context.base_context( + "Check process HugePages Free on host " f"numa node {index}.", + test.log.info, + ) hugepages_free = int(meminfo[index]["HugePages_Free"]) if int(node_meminfo[index]["HugePages_Free"]) > hugepages_free: - test.fail("Qemu still use HugePages from other node." - "Expect: node%s, used: node%s." % (node_id, index)) + test.fail( + "Qemu still use HugePages from other node." + f"Expect: node{node_id}, used: node{index}." + ) finally: vm.destroy() except Exception as details: # Check if there have enough contiguous Memory on the host node details = str(details) - if re.findall(r"Cannot set %s hugepages" % huge_pages_num, details): - test.cancel("Host node does not have enough contiguous memory " - "to run the test, skipping test...") + if re.findall(rf"Cannot set {huge_pages_num} hugepages", details): + test.cancel( + "Host node does not have enough contiguous memory " + "to run the test, skipping test..." + ) test.error(details) finally: hp_config.cleanup() utils_misc.wait_for( - lambda: int(hp_config.get_node_num_huge_pages(node_id, hugepage_size)) == 0, - first=2.0, timeout=10) + lambda: int(hp_config.get_node_num_huge_pages(node_id, hugepage_size)) + == 0, + first=2.0, + timeout=10, + ) diff --git a/qemu/tests/hv_avic.py b/qemu/tests/hv_avic.py index 03a79e3185..319443c97b 100644 --- a/qemu/tests/hv_avic.py +++ b/qemu/tests/hv_avic.py @@ -1,4 +1,5 @@ import time + from virttest import error_context @@ -46,15 +47,17 @@ def install_epel_repo(): status = session.cmd_status(cpuid_chk_cmd) if status: install_epel_repo() - status = session.cmd_status("yum -y install %s" % cpuid_pkg, timeout=300) + status = session.cmd_status(f"yum -y install {cpuid_pkg}", timeout=300) if status: test.error("Fail to install target cpuid") - error_context.context("Check the corresponding CPUID entries with " - "the flag 'hv-avic'", test.log.info) + error_context.context( + "Check the corresponding CPUID entries with " "the flag 'hv-avic'", + test.log.info, + ) output = session.cmd_output(check_cpuid_entry_cmd) - eax_value = output.splitlines()[-1].split()[2].split('0x')[-1] - eax_value = bin(int(eax_value, 16)).split('0b')[-1] - if eax_value[-4] != '0': - test.fail('CPUID 0x40000004.EAX BIT(3) not cleared') - if eax_value[-10] == '0': - test.fail('CPUID 0x40000004.EAX BIT(9) not set') + eax_value = output.splitlines()[-1].split()[2].split("0x")[-1] + eax_value = bin(int(eax_value, 16)).split("0b")[-1] + if eax_value[-4] != "0": + test.fail("CPUID 0x40000004.EAX BIT(3) not cleared") + if eax_value[-10] == "0": + test.fail("CPUID 0x40000004.EAX BIT(9) not set") diff --git a/qemu/tests/hv_check_cpu_utilization.py b/qemu/tests/hv_check_cpu_utilization.py index 54d116913c..9936f0d357 100644 --- a/qemu/tests/hv_check_cpu_utilization.py +++ b/qemu/tests/hv_check_cpu_utilization.py @@ -1,14 +1,12 @@ -import time -import threading -import re import logging +import re +import threading +import time -from virttest import utils_misc -from virttest import utils_test -from virttest import error_context from avocado.utils import process +from virttest import error_context, utils_misc, utils_test -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def _check_cpu_usage(session): @@ -18,10 +16,9 @@ def _check_cpu_usage(session): param session: a session object to send wmic commands """ - status, output = session.cmd_status_output( - "wmic cpu get loadpercentage /value") + status, output = session.cmd_status_output("wmic cpu get loadpercentage /value") if not status: - result = re.search(r'LoadPercentage=(\d+)', output) + result = re.search(r"LoadPercentage=(\d+)", output) if result: percent = int(result.group(1)) if percent > 1: @@ -60,18 +57,17 @@ def _stop_service(test, params, session, service): service_stop_cmd = params.get("service_stop_cmd") s, o = session.cmd_status_output("sc query") if s: - test.error("Failed to query service list, " - "status=%s, output=%s" % (s, o)) - service_item = re.search( - r'SERVICE_NAME:\s+%s' % service, o, re.I | re.M) + test.error("Failed to query service list, " f"status={s}, output={o}") + service_item = re.search(rf"SERVICE_NAME:\s+{service}", o, re.I | re.M) if not service_item: return s, o = session.cmd_status_output(service_check_cmd % service) if s: - test.error("Failed to get status for service: %s, " - "status=%s, output=%s" % (service, s, o)) - if re.search(r'STOPPED', o, re.I | re.M): + test.error( + f"Failed to get status for service: {service}, " f"status={s}, output={o}" + ) + if re.search(r"STOPPED", o, re.I | re.M): return session.cmd(service_stop_cmd.format(service)) @@ -119,10 +115,10 @@ def run(test, params, env): # stop windows defender if set_owner_cmd and set_full_control_cmd: - set_owner_cmd = utils_misc.set_winutils_letter(session, - set_owner_cmd) + set_owner_cmd = utils_misc.set_winutils_letter(session, set_owner_cmd) set_full_control_cmd = utils_misc.set_winutils_letter( - session, set_full_control_cmd) + session, set_full_control_cmd + ) session.cmd(set_owner_cmd) session.cmd(set_full_control_cmd) session.cmd(params["reg_cmd"]) @@ -137,8 +133,9 @@ def run(test, params, env): time.sleep(1800) # start background checking guest cpu usage - thread = threading.Thread(target=_check_cpu_thread_func, - args=(session, guest_check_timeout)) + thread = threading.Thread( + target=_check_cpu_thread_func, args=(session, guest_check_timeout) + ) thread.start() time.sleep(60) @@ -147,14 +144,16 @@ def run(test, params, env): process.system(params["host_check_cmd"] % pid, shell=True) thread.join(guest_check_timeout + 360) - vcpu_thread_pattern = params.get("vcpu_thread_pattern", - r'thread_id.?[:|=]\s*(\d+)') + vcpu_thread_pattern = params.get("vcpu_thread_pattern", r"thread_id.?[:|=]\s*(\d+)") vcpu_ids = vm.get_vcpu_pids(vcpu_thread_pattern) for thread_id in vcpu_ids: # output result host_cpu_usage = process.system_output( - params["thread_process_cmd"] % thread_id, shell=True) + params["thread_process_cmd"] % thread_id, shell=True + ) host_cpu_usage = float(host_cpu_usage.decode()) if host_cpu_usage > thread_cpu_level: - test.fail("The cpu usage of thread %s is %s" - " > %s" % (thread_id, host_cpu_usage, thread_cpu_level)) + test.fail( + f"The cpu usage of thread {thread_id} is {host_cpu_usage}" + f" > {thread_cpu_level}" + ) diff --git a/qemu/tests/hv_crash.py b/qemu/tests/hv_crash.py index bed863abd3..07d71fc486 100644 --- a/qemu/tests/hv_crash.py +++ b/qemu/tests/hv_crash.py @@ -1,6 +1,4 @@ -from virttest import error_context -from virttest import env_process -from virttest import utils_misc +from virttest import env_process, error_context, utils_misc @error_context.context_aware diff --git a/qemu/tests/hv_enforce_cpuid_msr_check.py b/qemu/tests/hv_enforce_cpuid_msr_check.py index fa8a449f6c..08ef3ce6e4 100644 --- a/qemu/tests/hv_enforce_cpuid_msr_check.py +++ b/qemu/tests/hv_enforce_cpuid_msr_check.py @@ -1,4 +1,5 @@ from virttest import error_context + from provider import qemu_img_utils @@ -43,38 +44,41 @@ def _run_msr_tools(session): return res error_context.context("The case starts...", test.log.info) - error_context.context("Boot the guest with 'hv-enforce-cpuid ", - test.log.info) + error_context.context("Boot the guest with 'hv-enforce-cpuid ", test.log.info) session = None origin_flags = params["cpu_model_flags"] try: - params["cpu_model_flags"] = origin_flags + "," \ - + params.get("cpu_model_flags_with_enforce") + params["cpu_model_flags"] = ( + origin_flags + "," + params.get("cpu_model_flags_with_enforce") + ) vm = qemu_img_utils.boot_vm_with_images(test, params, env) session = vm.wait_for_login(timeout=360) _set_env(session) res_with_hv = _run_msr_tools(session) res_with_hv = res_with_hv.split("\n")[0] if res_with_hv != params.get("expect_result_with_enforce"): - test.fail("The output from the case of cpu with hv-enforce-cpuid " - "was NOT expected." - + " The tuple in return is : %s" % res_with_hv) + test.fail( + "The output from the case of cpu with hv-enforce-cpuid " + "was NOT expected." + f" The tuple in return is : {res_with_hv}" + ) finally: vm.destroy() - error_context.context("Boot the guest without 'hv-enforce-cpuid ", - test.log.info) + error_context.context("Boot the guest without 'hv-enforce-cpuid ", test.log.info) try: - params["cpu_model_flags"] = origin_flags + "," \ - + params.get("cpu_model_flags_without_enforce") + params["cpu_model_flags"] = ( + origin_flags + "," + params.get("cpu_model_flags_without_enforce") + ) vm = qemu_img_utils.boot_vm_with_images(test, params, env) session = vm.wait_for_login(timeout=360) _set_env(session) res_without_hv = _run_msr_tools(session) res_without_hv = res_without_hv.split("\n")[0] if res_without_hv != params.get("expect_result_without_enforce"): - test.fail("The output from the case of cpu without " - "hv-enforce-cpuid was NOT expected." - + " The tuple in return is : %s" % res_without_hv) + test.fail( + "The output from the case of cpu without " + "hv-enforce-cpuid was NOT expected." + + f" The tuple in return is : {res_without_hv}" + ) finally: vm.destroy() diff --git a/qemu/tests/hv_enforce_cpuid_smoke.py b/qemu/tests/hv_enforce_cpuid_smoke.py index f892ce7e7f..3a7122ea0a 100644 --- a/qemu/tests/hv_enforce_cpuid_smoke.py +++ b/qemu/tests/hv_enforce_cpuid_smoke.py @@ -1,4 +1,5 @@ from virttest import error_context + from provider import qemu_img_utils @@ -20,14 +21,17 @@ def run(test, params, env): cpu_model_flags_list = params.objects("cpu_model_flags_list") for cpu_model_flags in cpu_model_flags_list: try: - error_context.context("Start the guest with %s." - % cpu_model_flags, test.log.info) + error_context.context( + f"Start the guest with {cpu_model_flags}.", test.log.info + ) params["cpu_model_flags"] = cpu_model_flags vm = qemu_img_utils.boot_vm_with_images(test, params, env) vm.wait_for_login(timeout=360) except Exception: - res.append("Case was failed in smoke test with " - "parameter(s): %s \n " % cpu_model_flags) + res.append( + "Case was failed in smoke test with " + f"parameter(s): {cpu_model_flags} \n " + ) pass finally: if vm: @@ -37,6 +41,8 @@ def run(test, params, env): error_msg = "" for case in res: error_msg += case - test.fail("The failed message(s): \n" - + error_msg - + "The number of failed cases is: %s. " % len(res)) + test.fail( + "The failed message(s): \n" + + error_msg + + f"The number of failed cases is: {len(res)}. " + ) diff --git a/qemu/tests/hv_flag_cpuid_check.py b/qemu/tests/hv_flag_cpuid_check.py index 06d69d6af1..c3e2daa9f0 100644 --- a/qemu/tests/hv_flag_cpuid_check.py +++ b/qemu/tests/hv_flag_cpuid_check.py @@ -1,7 +1,7 @@ import re import time -from virttest import error_context -from virttest import env_process + +from virttest import env_process, error_context @error_context.context_aware @@ -43,10 +43,12 @@ def run_cpuid_check(check_register, check_bit): Param check_bit: Check bit in cpu register, like 14 bit in EAX register. """ - error_context.context("Check the corresponding CPUID entries with " - "the flag %s" % hv_flag, test.log.info) + error_context.context( + "Check the corresponding CPUID entries with " f"the flag {hv_flag}", + test.log.info, + ) output = session.cmd_output(check_cpuid_entry_cmd) - match = re.search(r'%s=0x([0-9a-fA-F]+)' % check_register, output) + match = re.search(rf"{check_register}=0x([0-9a-fA-F]+)", output) value = int(match.group(1), 16) bit_result = (value >> check_bit) & 0x01 return bit_result @@ -81,22 +83,22 @@ def install_epel_repo(): cpu_model_flags = params["cpu_model_flags"] hv_flags_to_ignore = params["hv_flags_to_ignore"].split() - error_context.context("Boot the guest with %s flag" % hv_flag, test.log.info) + error_context.context(f"Boot the guest with {hv_flag} flag", test.log.info) vm, session = _boot_guest_with_cpu_flag(cpu_model_flags) status = session.cmd_status(cpuid_chk_cmd) if status: install_epel_repo() - status = session.cmd_status("yum -y install %s" % cpuid_pkg) + status = session.cmd_status(f"yum -y install {cpuid_pkg}") if status: test.error("Fail to install target cpuid") if not run_cpuid_check(check_register, check_bit): - test.fail('CPUID %s BIT(%s) does not set' % (check_register, check_bit)) + test.fail(f"CPUID {check_register} BIT({check_bit}) does not set") vm.graceful_shutdown(timeout=timeout) - error_context.context("Boot the guest without %s flag" % hv_flag, test.log.info) - without_hv_flag = ','.join( - [_ for _ in cpu_model_flags.split(',') - if _ not in hv_flags_to_ignore]) + error_context.context(f"Boot the guest without {hv_flag} flag", test.log.info) + without_hv_flag = ",".join( + [_ for _ in cpu_model_flags.split(",") if _ not in hv_flags_to_ignore] + ) vm, session = _boot_guest_with_cpu_flag(without_hv_flag) if run_cpuid_check(check_register, check_bit): - test.fail('CPUID %s BIT(%s) was set' % (check_register, check_bit)) + test.fail(f"CPUID {check_register} BIT({check_bit}) was set") diff --git a/qemu/tests/hv_info_check.py b/qemu/tests/hv_info_check.py index 25edab4175..b7971f45d7 100644 --- a/qemu/tests/hv_info_check.py +++ b/qemu/tests/hv_info_check.py @@ -12,20 +12,23 @@ def run(test, params, env): cpu_model_flags = params["cpu_model_flags"] vm = env.get_vm(params["main_vm"]) - error_context.context("Query supported HyperV Enlightenments " - "by host", test.log.info) + error_context.context( + "Query supported HyperV Enlightenments " "by host", test.log.info + ) missing = [] - args = {"type": "full", "model": {"name": "host", - "props": {"hv-passthrough": True}}} - output = vm.monitor.cmd('query-cpu-model-expansion', args) - model = output.get('model') - model_prop = model.get('props') - cpu_model_flags = cpu_model_flags.replace('_', '-') - cpu_model_flags = [i for i in cpu_model_flags.split(',') if 'hv' in i] + args = { + "type": "full", + "model": {"name": "host", "props": {"hv-passthrough": True}}, + } + output = vm.monitor.cmd("query-cpu-model-expansion", args) + model = output.get("model") + model_prop = model.get("props") + cpu_model_flags = cpu_model_flags.replace("_", "-") + cpu_model_flags = [i for i in cpu_model_flags.split(",") if "hv" in i] for flag in cpu_model_flags: - if 'hv-spinlocks' in flag: + if "hv-spinlocks" in flag: continue if model_prop.get(flag) is not True: missing.append(flag) if missing: - test.fail('Check cpu model props failed, %s is not True' % missing) + test.fail(f"Check cpu model props failed, {missing} is not True") diff --git a/qemu/tests/hv_kvm_unit_test.py b/qemu/tests/hv_kvm_unit_test.py index 4712879112..b3896cea2d 100644 --- a/qemu/tests/hv_kvm_unit_test.py +++ b/qemu/tests/hv_kvm_unit_test.py @@ -1,10 +1,8 @@ -import re import json +import re -from virttest import error_context -from virttest import data_dir -from virttest import cpu from avocado.utils import process +from virttest import cpu, data_dir, error_context @error_context.context_aware @@ -37,8 +35,9 @@ def run(test, params, env): unit_test_cmd = test_cmd % (tmp_dir, unit_test, cpu_param) result_output = process.system_output(unit_test_cmd, shell=True) result_output = result_output.decode() - find_result = re.findall('^%s' % unit_test_result[0], result_output, re.M) + find_result = re.findall(f"^{unit_test_result[0]}", result_output, re.M) if len(find_result) != int(unit_test_result[1]): - test.fail("Unit test result mismatch target, " - "target=%s, output=%s" % - (unit_test_result[1], result_output)) + test.fail( + "Unit test result mismatch target, " + f"target={unit_test_result[1]}, output={result_output}" + ) diff --git a/qemu/tests/hv_time.py b/qemu/tests/hv_time.py index 2d44a78f37..7845186bee 100644 --- a/qemu/tests/hv_time.py +++ b/qemu/tests/hv_time.py @@ -1,8 +1,6 @@ import re -from virttest import error_context -from virttest import utils_misc -from virttest import env_process +from virttest import env_process, error_context, utils_misc @error_context.context_aware @@ -38,17 +36,17 @@ def _setup_environments(): session = vm.reboot(session, timeout=timeout) s, o = session.cmd_status_output(check_pltclk_cmd) if s: - test.error("Failed to check the useplatfromclock after reboot, " - "status=%s, output=%s" % (s, o)) - use_pltck = re.search(r'useplatformclock\s+no', o, re.I | re.M) + test.error( + "Failed to check the useplatfromclock after reboot, " + f"status={s}, output={o}" + ) + use_pltck = re.search(r"useplatformclock\s+no", o, re.I | re.M) if not use_pltck: - test.error("The useplatfromclock isn't off after reboot, " - "output=%s" % o) + test.error("The useplatfromclock isn't off after reboot, " f"output={o}") test.log.info("Copy the related files to the guest") for f in gettime_filenames: - copy_file_cmd = utils_misc.set_winutils_letter(session, - copy_cmd % f) + copy_file_cmd = utils_misc.set_winutils_letter(session, copy_cmd % f) session.cmd(copy_file_cmd) vm.graceful_shutdown(timeout=timeout) @@ -59,7 +57,7 @@ def _run_gettime(session): return: the cpu cycles amount of certain IO operation """ o = session.cmd_output_safe(run_gettime_cmd, timeout=timeout) - cycles = int(re.search(r'\d+', o).group(0)) + cycles = int(re.search(r"\d+", o).group(0)) test.log.info("The cycles with out hv_time is %d", cycles) return cycles @@ -105,10 +103,11 @@ def _check_result(cycles_without_flag, cycles_with_flag): """ factor = cycles_with_flag / float(cycles_without_flag) if factor > 0.1: - test.fail("Cycles with flag is %d, cycles without flag is %d, " - "the factor is %f > 0.1" % (cycles_with_flag, - cycles_without_flag, - factor)) + test.fail( + "Cycles with flag is %d, cycles without flag is %d, " + "the factor is %f > 0.1" + % (cycles_with_flag, cycles_without_flag, factor) + ) close_pltclk_cmd = params["close_pltclk_cmd"] check_pltclk_cmd = params["check_pltclk_cmd"] @@ -118,8 +117,9 @@ def _check_result(cycles_without_flag, cycles_with_flag): timeout = params.get("timeout", 360) hv_time_flags = params["hv_time_flags"].split() flags_with_hv_time = params["cpu_model_flags"] - flags_without_hv_time = ','.join( - [_ for _ in flags_with_hv_time.split(',') if _ not in hv_time_flags]) + flags_without_hv_time = ",".join( + [_ for _ in flags_with_hv_time.split(",") if _ not in hv_time_flags] + ) error_context.context("Setting up environments", test.log.info) _setup_environments() diff --git a/qemu/tests/hv_tlbflush.py b/qemu/tests/hv_tlbflush.py index e7fcc542b0..eb2034ab71 100644 --- a/qemu/tests/hv_tlbflush.py +++ b/qemu/tests/hv_tlbflush.py @@ -1,14 +1,16 @@ -import time import datetime import os +import time -from virttest import error_context -from virttest import env_process -from virttest import utils_misc -from virttest import utils_numeric -from virttest import data_dir -from virttest import utils_test from avocado.utils import cpu +from virttest import ( + data_dir, + env_process, + error_context, + utils_misc, + utils_numeric, + utils_test, +) @error_context.context_aware @@ -47,14 +49,16 @@ def _prepare_test_environment(): test.log.info("Copy tlbflush tool related files") for f in tlbflush_filenames: copy_file_cmd = utils_misc.set_winutils_letter( - session, copy_tlbflush_cmd % f) + session, copy_tlbflush_cmd % f + ) session.cmd(copy_file_cmd) test.log.info("Create a large file for test") create_test_file_cmd = params["create_test_file_cmd"] test_file_size = params["test_file_size"] test_file_size = utils_numeric.normalize_data_size( - test_file_size, order_magnitude="B") + test_file_size, order_magnitude="B" + ) session.cmd(create_test_file_cmd % test_file_size) vm.graceful_shutdown(timeout=timeout) @@ -65,10 +69,12 @@ def _prepare_test_environment(): host_cpu_count = cpu.total_cpus_count() host_stress = utils_test.HostStress( - stress_type, params, + stress_type, + params, download_type="tarball", downloaded_file_path=downloaded_file_path, - stress_args="--cpu %s > /dev/null 2>&1& " % host_cpu_count) + stress_args=f"--cpu {host_cpu_count} > /dev/null 2>&1& ", + ) return host_stress def _clean_test_environment(host_stress): @@ -147,20 +153,18 @@ def _run_tlbflush(session, host_stress): _start_host_stress(host_stress) test.log.info("Start run hv_tlbflush.exe on guest") - s, o = session.cmd_status_output(run_tlbflush_cmd, - run_tlbflush_timeout) + s, o = session.cmd_status_output(run_tlbflush_cmd, run_tlbflush_timeout) test.log.info("Stop stress on host") _stop_host_stress(host_stress) if s: - test.error("Run tlbflush error: status = %s, output = %s", - (s, o)) - time_str = o.strip().split('\n')[-1] + test.error("Run tlbflush error: status = %s, output = %s", (s, o)) + time_str = o.strip().split("\n")[-1] time_str = time_str.split(".")[0] s_t = time.strptime(time_str, "%H:%M:%S") - total_time = datetime.timedelta(hours=s_t.tm_hour, - minutes=s_t.tm_min, - seconds=s_t.tm_sec).total_seconds() + total_time = datetime.timedelta( + hours=s_t.tm_hour, minutes=s_t.tm_min, seconds=s_t.tm_sec + ).total_seconds() test.log.info("Running result: %f", total_time) return total_time @@ -174,9 +178,9 @@ def _run_tlbflush(session, host_stress): try: error_context.context("Boot guest with hv_tlbflush related flags") - hv_flag_without_tlbflush = ','.join( - [_ for _ in cpu_model_flags.split(',') - if _ not in hv_flags_to_ignore]) + hv_flag_without_tlbflush = ",".join( + [_ for _ in cpu_model_flags.split(",") if _ not in hv_flags_to_ignore] + ) vm, session = _boot_guest_with_cpu_flag(hv_flag_without_tlbflush) error_context.context("Run tlbflush without hv_tlbflush", test.log.info) @@ -192,10 +196,11 @@ def _run_tlbflush(session, host_stress): factor = time_with_flag / time_without_flag vm_arch = params.get("vm_arch_name") if factor >= 0.5 if vm_arch == "x86_64" else factor >= 1.0: - test.fail("The improvement factor=%d is not enough. " - "Time WITHOUT flag: %s, " - "Time WITH flag: %s" % - (factor, time_without_flag, time_with_flag)) + test.fail( + "The improvement factor=%d is not enough. " + "Time WITHOUT flag: %s, " + "Time WITH flag: %s" % (factor, time_without_flag, time_with_flag) + ) finally: _clean_test_environment(host_stress) diff --git a/qemu/tests/hv_type.py b/qemu/tests/hv_type.py index 24054ddecc..41d4c63ff3 100644 --- a/qemu/tests/hv_type.py +++ b/qemu/tests/hv_type.py @@ -1,7 +1,6 @@ import os -from virttest import data_dir -from virttest import error_context +from virttest import data_dir, error_context @error_context.context_aware @@ -36,8 +35,7 @@ def run(test, params, env): if status: test.error("Fail to uninstall existing virt-what") test.log.info("Copy target virt-what pkg to guest") - virt_what_pkg = os.path.join(data_dir.get_deps_dir("virt_what"), - virt_what_pkg) + virt_what_pkg = os.path.join(data_dir.get_deps_dir("virt_what"), virt_what_pkg) try: vm.copy_files_to(virt_what_pkg, virt_what_guest_dir) status = session.cmd_status(virt_what_install_cmd) diff --git a/qemu/tests/hv_vapic_test.py b/qemu/tests/hv_vapic_test.py index 0fe871d76b..335140f390 100644 --- a/qemu/tests/hv_vapic_test.py +++ b/qemu/tests/hv_vapic_test.py @@ -1,11 +1,8 @@ -import re import os +import re import time -from virttest import error_context -from virttest import utils_disk -from virttest import env_process -from virttest import data_dir +from virttest import data_dir, env_process, error_context, utils_disk from virttest.qemu_storage import QemuImg from provider.storage_benchmark import generate_instance @@ -58,7 +55,8 @@ def _format_tmpfs_disk(): disk_size = params["image_size_" + params["tmpfs_image_name"]] disk_id = utils_disk.get_windows_disks_index(session, disk_size)[0] drive_letter = utils_disk.configure_empty_windows_disk( - session, disk_id, disk_size)[0] + session, disk_id, disk_size + )[0] vm.graceful_shutdown(timeout=timeout) return drive_letter @@ -92,7 +90,7 @@ def _run_fio(session, drive_letter): test.log.info("Format tmpfs data disk") utils_disk.create_filesystem_windows(session, drive_letter, "ntfs") test.log.info("Start fio test") - fio = generate_instance(params, vm, 'fio') + fio = generate_instance(params, vm, "fio") o = fio.run(params["fio_options"] % drive_letter) return int(re.search(bw_search_reg, o, re.M).group(1)) @@ -111,8 +109,7 @@ def _run_fio(session, drive_letter): error_context.context("Start fio in guest", test.log.info) bw_with_hv_vapic = _run_fio(session, drive_letter) - error_context.context("Shutdown guest and boot without hv_vapnic", - test.log.info) + error_context.context("Shutdown guest and boot without hv_vapnic", test.log.info) vm.graceful_shutdown(timeout=timeout) cpu_model_flags = cpu_model_flags.replace(",hv_vapic", "") vm, session = _boot_guest_with_cpu_flag(cpu_model_flags) @@ -124,7 +121,8 @@ def _run_fio(session, drive_letter): improvement = (float)(bw_with_hv_vapic - bw_without_hv_vapic) improvement /= bw_without_hv_vapic if improvement < 0.05: - test.fail("Improvement not above 5%%." - " bw with hv_vapic: %s," - " bw without hv_vapic: %s" % - (bw_with_hv_vapic, bw_without_hv_vapic)) + test.fail( + "Improvement not above 5%." + f" bw with hv_vapic: {bw_with_hv_vapic}," + f" bw without hv_vapic: {bw_without_hv_vapic}" + ) diff --git a/qemu/tests/image_commit_bypass_host_cache.py b/qemu/tests/image_commit_bypass_host_cache.py index 7e9d4ef27b..356288ec98 100644 --- a/qemu/tests/image_commit_bypass_host_cache.py +++ b/qemu/tests/image_commit_bypass_host_cache.py @@ -2,10 +2,9 @@ from avocado import fail_on from avocado.utils import process +from virttest import data_dir, qemu_storage, virt_vm + from provider import qemu_img_utils as img_utils -from virttest import data_dir -from virttest import qemu_storage -from virttest import virt_vm def run(test, params, env): @@ -24,11 +23,12 @@ def run(test, params, env): trace_events = params["trace_events"].split() sync_bin = params.get("sync_bin", "sync") images = params["images"].split() - params["image_name_%s" % images[0]] = params["image_name"] - params["image_format_%s" % images[0]] = params["image_format"] + params[f"image_name_{images[0]}"] = params["image_name"] + params[f"image_format_{images[0]}"] = params["image_format"] - base, sn = (qemu_storage.QemuImg(params.object_params(tag), root_dir, tag) - for tag in images) + base, sn = ( + qemu_storage.QemuImg(params.object_params(tag), root_dir, tag) for tag in images + ) try: sn.create(sn.params) vm = img_utils.boot_vm_with_images(test, params, env, (sn.tag,)) @@ -36,8 +36,7 @@ def run(test, params, env): test.fail(str(detail)) guest_file = params["guest_tmp_filename"] - test.log.debug("Create tmp file %s in image %s", guest_file, - sn.image_filename) + test.log.debug("Create tmp file %s in image %s", guest_file, sn.image_filename) img_utils.save_random_file_to_vm(vm, guest_file, 2048 * 100, sync_bin) vm.destroy() @@ -45,18 +44,17 @@ def run(test, params, env): test.log.debug("commit snapshot, strace log %s", strace_log) with img_utils.strace(sn, trace_events, strace_log): fail_on((process.CmdError,))(sn.commit)() - fail_msg = "'O_DIRECT' is presented in system calls %s" % trace_events + fail_msg = f"'O_DIRECT' is presented in system calls {trace_events}" if img_utils.check_flag(strace_log, base.image_filename, "O_DIRECT"): test.fail(fail_msg) if img_utils.check_flag(strace_log, sn.image_filename, "O_DIRECT"): test.fail(fail_msg) strace_log = os.path.join(test.debugdir, "commit_bypass.log") - test.log.debug("commit snapshot with cache 'none', strace log: %s", - strace_log) + test.log.debug("commit snapshot with cache 'none', strace log: %s", strace_log) with img_utils.strace(sn, trace_events, strace_log): fail_on((process.CmdError,))(sn.commit)(cache_mode="none") - fail_msg = "'O_DIRECT' is missing in system calls %s" % trace_events + fail_msg = f"'O_DIRECT' is missing in system calls {trace_events}" if not img_utils.check_flag(strace_log, base.image_filename, "O_DIRECT"): test.fail(fail_msg) if not img_utils.check_flag(strace_log, sn.image_filename, "O_DIRECT"): diff --git a/qemu/tests/image_compare_bypass_host_cache.py b/qemu/tests/image_compare_bypass_host_cache.py index 79faf54811..071f1ac200 100644 --- a/qemu/tests/image_compare_bypass_host_cache.py +++ b/qemu/tests/image_compare_bypass_host_cache.py @@ -1,11 +1,9 @@ import os -from avocado.utils import path -from avocado.utils import process -from provider.qemu_img_utils import check_flag -from provider.qemu_img_utils import strace -from virttest import data_dir -from virttest import qemu_storage +from avocado.utils import path, process +from virttest import data_dir, qemu_storage + +from provider.qemu_img_utils import check_flag, strace def run(test, params, env): @@ -16,6 +14,7 @@ def run(test, params, env): 6. compare with source cache mode `none`. 7. check strace output that `O_DIRECT` is on. """ + def compare_images(source, target, source_cache_mode=None): ret = source.compare_to(target, source_cache_mode=source_cache_mode) if ret.exit_status == 0: @@ -50,7 +49,7 @@ def compare_images(source, target, source_cache_mode=None): strace_output_file = os.path.join(test.debugdir, "compare.log") with strace(source, strace_events, strace_output_file): compare_images(source, target) - fail_msg = "'O_DIRECT' is presented in system calls %s" % strace_events + fail_msg = f"'O_DIRECT' is presented in system calls {strace_events}" if check_flag(strace_output_file, source.image_filename, "O_DIRECT"): test.fail(fail_msg) if check_flag(strace_output_file, target.image_filename, "O_DIRECT"): @@ -59,7 +58,7 @@ def compare_images(source, target, source_cache_mode=None): strace_output_file = os.path.join(test.debugdir, "compare_bypass.log") with strace(source, strace_events, strace_output_file): compare_images(source, target, source_cache_mode="none") - fail_msg = "'O_DIRECT' is not presented in system calls %s" % strace_events + fail_msg = f"'O_DIRECT' is not presented in system calls {strace_events}" if not check_flag(strace_output_file, source.image_filename, "O_DIRECT"): test.fail(fail_msg) if not check_flag(strace_output_file, target.image_filename, "O_DIRECT"): diff --git a/qemu/tests/image_convert_bypass_host_cache.py b/qemu/tests/image_convert_bypass_host_cache.py index 8afa4c5df3..732443d17f 100644 --- a/qemu/tests/image_convert_bypass_host_cache.py +++ b/qemu/tests/image_convert_bypass_host_cache.py @@ -2,12 +2,9 @@ from avocado import fail_on from avocado.utils import process -from provider.qemu_img_utils import find_strace -from provider.qemu_img_utils import check_flag -from provider.qemu_img_utils import strace -from virttest import data_dir -from virttest import qemu_storage -from virttest import storage +from virttest import data_dir, qemu_storage, storage + +from provider.qemu_img_utils import check_flag, find_strace, strace def run(test, params, env): @@ -27,41 +24,51 @@ def run(test, params, env): image = qemu_storage.QemuImg(image_params, root_dir, image) convert_target1, convert_target2 = params["convert_target"].split() - strace_output_file = os.path.join(test.debugdir, - "convert_to_%s.log" % convert_target1) + strace_output_file = os.path.join( + test.debugdir, f"convert_to_{convert_target1}.log" + ) image_params["convert_target"] = convert_target1 - test.log.debug("Convert image from %s to %s, strace log: %s", image.tag, - convert_target1, strace_output_file) + test.log.debug( + "Convert image from %s to %s, strace log: %s", + image.tag, + convert_target1, + strace_output_file, + ) with strace(image, strace_events, strace_output_file): fail_on((process.CmdError,))(image.convert)(image_params, root_dir) convert_target1_filename = storage.get_image_filename( - params.object_params(convert_target1), root_dir) - fail_msg = "'O_DIRECT' is presented in system calls %s" % strace_events + params.object_params(convert_target1), root_dir + ) + fail_msg = f"'O_DIRECT' is presented in system calls {strace_events}" if check_flag(strace_output_file, image.image_filename, "O_DIRECT"): test.fail(fail_msg) if check_flag(strace_output_file, convert_target1_filename, "O_DIRECT"): test.fail(fail_msg) - strace_output_file = os.path.join(test.debugdir, - "convert_to_%s.log" % convert_target2) + strace_output_file = os.path.join( + test.debugdir, f"convert_to_{convert_target2}.log" + ) image_params["convert_target"] = convert_target2 - test.log.debug(("Convert image from %s to %s with cache mode " - "'none', strace log: %s"), image.tag, convert_target2, - strace_output_file) + test.log.debug( + ("Convert image from %s to %s with cache mode " "'none', strace log: %s"), + image.tag, + convert_target2, + strace_output_file, + ) with strace(image, strace_events, strace_output_file): fail_on((process.CmdError,))(image.convert)( - image_params, root_dir, - cache_mode="none", source_cache_mode="none") + image_params, root_dir, cache_mode="none", source_cache_mode="none" + ) convert_target2_filename = storage.get_image_filename( - params.object_params(convert_target2), root_dir) - fail_msg = "'O_DIRECT' is not presented in system calls %s" % strace_events + params.object_params(convert_target2), root_dir + ) + fail_msg = f"'O_DIRECT' is not presented in system calls {strace_events}" if not check_flag(strace_output_file, image.image_filename, "O_DIRECT"): test.fail(fail_msg) - if not check_flag(strace_output_file, - convert_target2_filename, "O_DIRECT"): + if not check_flag(strace_output_file, convert_target2_filename, "O_DIRECT"): test.fail(fail_msg) params["images"] += params["convert_target"] diff --git a/qemu/tests/image_create_with_large_size.py b/qemu/tests/image_create_with_large_size.py index 16c9f104f8..7fb0a2bd50 100755 --- a/qemu/tests/image_create_with_large_size.py +++ b/qemu/tests/image_create_with_large_size.py @@ -1,5 +1,4 @@ from avocado.core import exceptions - from virttest import data_dir from virttest.qemu_storage import QemuImg @@ -29,7 +28,7 @@ def run(test, params, env): large.create(large.params) except exceptions.TestError as err: if create_err_info not in str(err) or large_filename not in str(err): - test.fail("CML failed with unexpected output: %s" % err) + test.fail(f"CML failed with unexpected output: {err}") else: test.fail("There is no error when creating an image with large size.") @@ -40,4 +39,4 @@ def run(test, params, env): if status == 0: test.fail("There is no error when resizing an image with large size.") elif resize_err_info not in output: - test.fail("CML failed with unexpected output: %s" % output) + test.fail(f"CML failed with unexpected output: {output}") diff --git a/qemu/tests/image_create_with_preallocation.py b/qemu/tests/image_create_with_preallocation.py index 0235421123..fd1466a8b8 100644 --- a/qemu/tests/image_create_with_preallocation.py +++ b/qemu/tests/image_create_with_preallocation.py @@ -3,9 +3,7 @@ from avocado import fail_on from avocado.utils import process - -from virttest import data_dir -from virttest import qemu_storage +from virttest import data_dir, qemu_storage from provider import qemu_img_utils as img_utils @@ -45,8 +43,10 @@ def check_fallocate_syscall(trace_event): fail_on((process.CmdError,))(img_stg.create)(image_stg_params) with open(strace_log) as fd: if trace_event not in fd.read(): - test.fail("Not invoked fallocate system call when " - "creating an image with preallocation=falloc") + test.fail( + "Not invoked fallocate system call when " + "creating an image with preallocation=falloc" + ) def check_actual_size_field(): """ @@ -57,15 +57,17 @@ def check_actual_size_field(): info = json.loads(cmd_result) if params["preallocated_stg"] in ["full", "falloc"]: if info["actual-size"] < actual_size: - test.fail("The 'actual-size' field from qemu-img info " - "is not greater than or equal to %s. " - "The actual output is %s" - % (actual_size, cmd_result)) + test.fail( + "The 'actual-size' field from qemu-img info " + f"is not greater than or equal to {actual_size}. " + f"The actual output is {cmd_result}" + ) elif params["preallocated_stg"] in ["off", "metadata"]: if info["actual-size"] >= actual_size: - test.fail("The 'actual-size' field from qemu-img info " - "is not less than %s. The actual output is %s" - % (actual_size, cmd_result)) + test.fail( + "The 'actual-size' field from qemu-img info " + f"is not less than {actual_size}. The actual output is {cmd_result}" + ) trace_event = params.get("trace_event") image_stg = params["images"] diff --git a/qemu/tests/image_creation_lock_release.py b/qemu/tests/image_creation_lock_release.py index 738424b8a2..0948c6cabd 100755 --- a/qemu/tests/image_creation_lock_release.py +++ b/qemu/tests/image_creation_lock_release.py @@ -1,5 +1,4 @@ from avocado.utils import process - from virttest import data_dir from virttest.qemu_storage import QemuImg @@ -19,11 +18,13 @@ def run(test, params, env): test_filename = test_image.image_filename lock_err_info = 'Failed to get "consistent read" lock' try: - process.run("qemu-img create -f raw -o preallocation=full %s 1G & " - "sleep 0.5;qemu-io -c info -c close -r %s" - % (test_filename, test_filename), shell=True) + process.run( + f"qemu-img create -f raw -o preallocation=full {test_filename} 1G & " + f"sleep 0.5;qemu-io -c info -c close -r {test_filename}", + shell=True, + ) except process.CmdError as err: if lock_err_info in err.result.stderr.decode(): - test.fail("Image lock not released: %s" % err) + test.fail(f"Image lock not released: {err}") else: - test.error("Command line failed: %s" % err) + test.error(f"Command line failed: {err}") diff --git a/qemu/tests/image_creation_luks_with_non_utf8_secret.py b/qemu/tests/image_creation_luks_with_non_utf8_secret.py index 0b22bb8df0..7b0c298b19 100755 --- a/qemu/tests/image_creation_luks_with_non_utf8_secret.py +++ b/qemu/tests/image_creation_luks_with_non_utf8_secret.py @@ -2,9 +2,7 @@ import re from avocado.utils import process - -from virttest import data_dir -from virttest import utils_misc +from virttest import data_dir, utils_misc def run(test, params, env): @@ -29,13 +27,18 @@ def run(test, params, env): non_utf8_secret_file = os.path.join(tmp_dir, "non_utf8_secret") non_utf8_secret = params["echo_non_utf8_secret_cmd"] % non_utf8_secret_file process.run(non_utf8_secret, shell=True) - qemu_img_create_cmd = params["qemu_img_create_cmd"] % (non_utf8_secret_file, - image_stg_path) - cmd_result = process.run(qemu_img_create_cmd, - ignore_status=True, shell=True) + qemu_img_create_cmd = params["qemu_img_create_cmd"] % ( + non_utf8_secret_file, + image_stg_path, + ) + cmd_result = process.run(qemu_img_create_cmd, ignore_status=True, shell=True) if os.path.exists(image_stg_path): - test.fail("The image '%s' should not exist. Since created" - " it with non_utf8_secret." % image_stg_path) + test.fail( + f"The image '{image_stg_path}' should not exist. Since created" + " it with non_utf8_secret." + ) if not re.search(err_info, cmd_result.stderr.decode(), re.I): - test.fail("Failed to get error information. The actual error " - "information is %s." % cmd_result.stderr.decode()) + test.fail( + "Failed to get error information. The actual error " + f"information is {cmd_result.stderr.decode()}." + ) diff --git a/qemu/tests/image_locking_read_test.py b/qemu/tests/image_locking_read_test.py index ae14fe8c3b..ce19267285 100644 --- a/qemu/tests/image_locking_read_test.py +++ b/qemu/tests/image_locking_read_test.py @@ -1,8 +1,5 @@ from avocado import fail_on - -from virttest import env_process -from virttest import error_context -from virttest import virt_vm +from virttest import env_process, error_context, virt_vm from qemu.tests.qemu_disk_img import QemuImgTest @@ -21,12 +18,17 @@ def run(test, params, env): 6. boot second vm from sn12 and verify the temporary file is presented. """ - params.update({"image_name_image1": params["image_name"], - "image_format_image1": params["image_format"]}) + params.update( + { + "image_name_image1": params["image_name"], + "image_format_image1": params["image_format"], + } + ) error_context.context("boot first vm from first image chain", test.log.info) - env_process.process(test, params, env, env_process.preprocess_image, - env_process.preprocess_vm) + env_process.process( + test, params, env, env_process.preprocess_image, env_process.preprocess_vm + ) vm1 = env.get_vm(params["main_vm"]) vm1.verify_alive() @@ -37,37 +39,40 @@ def run(test, params, env): error_context.context("create the second snapshot chain", test.log.info) for image in images: - test.log.debug("create snapshot %s based on %s", image.image_filename, - image.base_image_filename) + test.log.debug( + "create snapshot %s based on %s", + image.image_filename, + image.base_image_filename, + ) image.create_snapshot() test.log.debug("boot from snapshot %s", image.image_filename) try: # ensure vm only boot with this snapshot - image.start_vm({"boot_drive_%s" % image.tag: "yes"}) + image.start_vm({f"boot_drive_{image.tag}": "yes"}) except virt_vm.VMCreateError: # add images in second chain to images so they could be deleted # in postprocess - params["images"] += " %s" % image - test.fail("fail to start vm from snapshot %s" % - image.image_filename) + params["images"] += f" {image}" + test.fail(f"fail to start vm from snapshot {image.image_filename}") else: if image is not images[-1]: image.destroy_vm() tmpfile = params.get("guest_tmp_filename") - error_context.context("create a temporary file: %s in %s" % - (tmpfile, image.image_filename), test.log.info) + error_context.context( + f"create a temporary file: {tmpfile} in {image.image_filename}", + test.log.info, + ) hash_val = image.save_file(tmpfile) test.log.debug("The hash of temporary file:\n%s", hash_val) image.destroy_vm() - error_context.context("commit image %s" % image.image_filename, - test.log.info) + error_context.context(f"commit image {image.image_filename}", test.log.info) fail_on()(image.commit)() error_context.context("check temporary file after commit", test.log.info) image = images[-2] test.log.debug("boot vm from %s", image.image_filename) - image.start_vm({"boot_drive_%s" % image.tag: "yes"}) + image.start_vm({f"boot_drive_{image.tag}": "yes"}) if not image.check_file(tmpfile, hash_val): - test.fail("File %s's hash is different after commit" % tmpfile) + test.fail(f"File {tmpfile}'s hash is different after commit") diff --git a/qemu/tests/image_rebase_bypass_host_cache.py b/qemu/tests/image_rebase_bypass_host_cache.py index 4d620041f9..16da30db0e 100644 --- a/qemu/tests/image_rebase_bypass_host_cache.py +++ b/qemu/tests/image_rebase_bypass_host_cache.py @@ -3,18 +3,20 @@ from avocado import fail_on from avocado.utils import process +from virttest import data_dir, qemu_storage + from provider import qemu_img_utils as img_utils -from virttest import data_dir -from virttest import qemu_storage def coroutine(func): """Start coroutine.""" + @functools.wraps(func) def start(*args, **kargs): cr = func(*args, **kargs) cr.send(None) return cr + return start @@ -27,6 +29,7 @@ def run(test, params, env): 4) rebase sn2 to image1 with cache mode 'none' and check flag O_DIRECT is on. """ + def remove_snapshots(): """Remove snapshots created.""" while snapshots: @@ -58,9 +61,13 @@ def save_file_to_snapshot(): while True: snapshot = yield test.log.debug("boot vm from image %s", snapshot.tag) - vm = img_utils.boot_vm_with_images(test, params, env, - images=(snapshot.tag,), - vm_name="VM_%s" % snapshot.tag) + vm = img_utils.boot_vm_with_images( + test, + params, + env, + images=(snapshot.tag,), + vm_name=f"VM_{snapshot.tag}", + ) guest_file = params["guest_tmp_filename"] % snapshot.tag test.log.debug("create tmp file %s in %s", guest_file, snapshot.tag) img_utils.save_random_file_to_vm(vm, guest_file, 2048, sync_bin) @@ -68,8 +75,8 @@ def save_file_to_snapshot(): img_utils.find_strace() base = params["image_chain"].split()[0] - params["image_name_%s" % base] = params["image_name"] - params["image_format_%s" % base] = params["image_format"] + params[f"image_name_{base}"] = params["image_name"] + params[f"image_format_{base}"] = params["image_format"] root_dir = data_dir.get_data_dir() base = qemu_storage.QemuImg(params.object_params(base), root_dir, base) trace_events = params["trace_event"].split() @@ -82,7 +89,7 @@ def save_file_to_snapshot(): test.log.debug("rebase snapshot %s to %s", top.tag, base.tag) with img_utils.strace(top, trace_events, strace_log): top.base_tag = base.tag - fail_on((process.CmdError))(top.rebase)(params) + fail_on(process.CmdError)(top.rebase)(params) fail_msg = "'O_DIRECT' is presented in %s with file %s" for image in [base] + snapshots: @@ -94,18 +101,16 @@ def save_file_to_snapshot(): strace_log = os.path.join(test.debugdir, "rebase_bypass.log") top = snapshots[-1] - test.log.debug("rebase snapshot %s to %s in cache mode 'none'", - top.tag, base.tag) + test.log.debug("rebase snapshot %s to %s in cache mode 'none'", top.tag, base.tag) with img_utils.strace(top, trace_events, strace_log): top.base_tag = base.tag - fail_on((process.CmdError))(top.rebase)(params, - cache_mode="none", - source_cache_mode="none") + fail_on(process.CmdError)(top.rebase)( + params, cache_mode="none", source_cache_mode="none" + ) fail_msg = "'O_DIRECT' is missing in %s with file %s" for image in [base] + snapshots: - if not img_utils.check_flag(strace_log, - image.image_filename, "O_DIRECT"): + if not img_utils.check_flag(strace_log, image.image_filename, "O_DIRECT"): test.fail(fail_msg % (trace_events, image.image_filename)) remove_snapshots() diff --git a/qemu/tests/in_place_upgrade.py b/qemu/tests/in_place_upgrade.py index e61dc19f12..5c74129492 100644 --- a/qemu/tests/in_place_upgrade.py +++ b/qemu/tests/in_place_upgrade.py @@ -1,14 +1,11 @@ import logging - -from virttest import data_dir -from virttest import error_context -from virttest import storage from avocado.utils import process -from provider.in_place_upgrade_base import IpuTest +from virttest import data_dir, error_context, storage +from provider.in_place_upgrade_base import IpuTest -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") @error_context.context_aware @@ -97,8 +94,9 @@ def run(test, params, env): ipu_timeout = int(params.get("ipu_after_timeout")) usr = params.get("user_assistant") passwd = params.get("user_assistant_pw") - upgrade_test.session = vm.wait_for_login(timeout=ipu_timeout, - username=usr, password=passwd) + upgrade_test.session = vm.wait_for_login( + timeout=ipu_timeout, username=usr, password=passwd + ) # restore settings in the guest upgrade_test.post_upgrade_restore(test) # post checking @@ -106,9 +104,12 @@ def run(test, params, env): post_rhel_ver = upgrade_test.run_guest_cmd(check_rhel_ver) vm.verify_kernel_crash() if params.get("device_cio_free_check_cmd"): - cio_status = str(upgrade_test.session.cmd_status_output( - params.get("device_cio_free_check_cmd"))) - if 'inactive' in cio_status: + cio_status = str( + upgrade_test.session.cmd_status_output( + params.get("device_cio_free_check_cmd") + ) + ) + if "inactive" in cio_status: test.fail("device_cio_free is not enabled after upgrading") finally: vm.graceful_shutdown(timeout=300) @@ -118,7 +119,6 @@ def run(test, params, env): image_path = params.get("images_base_dir", data_dir.get_data_dir()) old_name = storage.get_image_filename(image_params, image_path) upgraded_name = old_name.replace(pre_rhel_ver, post_rhel_ver) - process.run(params.get("image_clone_command") % - (old_name, upgraded_name)) + process.run(params.get("image_clone_command") % (old_name, upgraded_name)) except Exception as error: - test.log.warning("Failed to rename upgraded image:%s" % str(error)) + test.log.warning("Failed to rename upgraded image: %s", str(error)) diff --git a/qemu/tests/in_place_upgrade_legacy.py b/qemu/tests/in_place_upgrade_legacy.py index dcd11d8582..fcd02aaeb3 100644 --- a/qemu/tests/in_place_upgrade_legacy.py +++ b/qemu/tests/in_place_upgrade_legacy.py @@ -1,14 +1,12 @@ -import re import logging +import re - -from virttest import error_context -from virttest import data_dir -from virttest import storage from avocado.utils import process +from virttest import data_dir, error_context, storage + from provider.in_place_upgrade_base import IpuTest -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class IpuLegacyTest(IpuTest): @@ -18,8 +16,7 @@ class IpuLegacyTest(IpuTest): """ def __init__(self, test, params): - - super(IpuLegacyTest, self).__init__(test, params) + super().__init__(test, params) self.session = None self.test = test self.params = params @@ -43,13 +40,12 @@ def pre_upgrade_whitelist(self, test): fix_answer_sec = self.params.get("fix_answer_section") self.session.cmd(fix_answer_sec, timeout=1200) erase_old_kernel = self.params.get("clean_up_old_kernel") - s, output = self.session.cmd_status_output(erase_old_kernel, - timeout=1200) + s, output = self.session.cmd_status_output(erase_old_kernel, timeout=1200) error_info = self.params.get("error_info") if re.search(error_info, output): pass except Exception as info: - test.fail("Failed to fix known issues in advance :%s" % str(info)) + test.fail(f"Failed to fix known issues in advance :{str(info)}") @error_context.context_aware @@ -149,9 +145,12 @@ def run(test, params, env): post_rhel_ver = upgrade_test.run_guest_cmd(check_rhel_ver) vm.verify_kernel_crash() if params.get("device_cio_free_check_cmd"): - cio_status = str(upgrade_test.session.cmd_status_output( - params.get("device_cio_free_check_cmd"))) - if 'inactive' in cio_status: + cio_status = str( + upgrade_test.session.cmd_status_output( + params.get("device_cio_free_check_cmd") + ) + ) + if "inactive" in cio_status: test.fail("device_cio_free is not enabled after upgrading") finally: vm.graceful_shutdown(timeout=300) @@ -161,7 +160,6 @@ def run(test, params, env): image_path = params.get("images_base_dir", data_dir.get_data_dir()) old_name = storage.get_image_filename(image_params, image_path) upgraded_name = old_name.replace(pre_rhel_ver, post_rhel_ver + "0") - process.run(params.get("image_clone_command") % - (old_name, upgraded_name)) + process.run(params.get("image_clone_command") % (old_name, upgraded_name)) except Exception as error: - test.log.warning("Failed to rename upgraded image:%s" % str(error)) + test.log.warning("Failed to rename upgraded image: %s", str(error)) diff --git a/qemu/tests/insert_media.py b/qemu/tests/insert_media.py index 15d5ebafa3..c93d5ea966 100644 --- a/qemu/tests/insert_media.py +++ b/qemu/tests/insert_media.py @@ -1,6 +1,4 @@ -from virttest import env_process -from virttest import utils_misc - +from virttest import env_process, utils_misc from virttest.qemu_capabilities import Flags @@ -14,17 +12,18 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def move_tary(action, dev_id): - getattr(vm.monitor, 'blockdev_%s_tray' % action)(dev_id) + getattr(vm.monitor, f"blockdev_{action}_tray")(dev_id) if not utils_misc.wait_for( - lambda: vm.monitor.get_event(tray_move_event), 60, 0, 3): - test.fail('Failed to get event %s after %s tray.' % - (tray_move_event, action)) + lambda: vm.monitor.get_event(tray_move_event), 60, 0, 3 + ): + test.fail(f"Failed to get event {tray_move_event} after {action} tray.") - tray_move_event = params.get('tray_move_event') - dev_id = params.get('cdroms').split()[0] + tray_move_event = params.get("tray_move_event") + dev_id = params.get("cdroms").split()[0] params["start_vm"] = "yes" - vm_name = params.get('main_vm') + vm_name = params.get("main_vm") env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) @@ -33,23 +32,23 @@ def move_tary(action, dev_id): vm.verify_alive() drive = vm.devices[dev_id] - top_node = vm.devices[drive.get_param('drive')] + top_node = vm.devices[drive.get_param("drive")] nodes = [top_node] - nodes.extend((n for n in top_node.get_child_nodes())) + nodes.extend(n for n in top_node.get_child_nodes()) for node in nodes: vm.devices.remove(node, True) if node is not top_node: top_node.del_child_node(node) - drive.set_param('drive', None) + drive.set_param("drive", None) vm.destroy(False) vm = vm.clone(copy_state=True) vm.create() - move_tary('open', dev_id) + move_tary("open", dev_id) vm.monitor.blockdev_remove_medium(dev_id) for node in reversed(nodes): vm.devices.simple_hotplug(node, vm.monitor) vm.monitor.blockdev_insert_medium(dev_id, top_node.get_qid()) - move_tary('close', dev_id) + move_tary("close", dev_id) vm.destroy() diff --git a/qemu/tests/interrupt_check.py b/qemu/tests/interrupt_check.py index 9cf892e8d9..74d4ae8bff 100644 --- a/qemu/tests/interrupt_check.py +++ b/qemu/tests/interrupt_check.py @@ -1,9 +1,6 @@ import time -from virttest import error_context -from virttest import utils_net -from virttest import utils_disk -from virttest import utils_misc +from virttest import error_context, utils_disk, utils_misc, utils_net @error_context.context_aware @@ -24,19 +21,23 @@ def get_irq_info(): """ Get interrupt information using specified pattern """ - return session.cmd_output("grep '%s' /proc/interrupts" % irq_pattern, - print_func=test.log.info).split() + return session.cmd_output( + f"grep '{irq_pattern}' /proc/interrupts", print_func=test.log.info + ).split() def analyze_interrupts(irq_before_test, irq_after_test): """ Compare interrupt information and analyze them """ error_context.context("Analyzing interrupts", test.log.info) - filtered_result = [x for x in zip(irq_before_test, irq_after_test) - if x[0] != x[1]] + filtered_result = [ + x for x in zip(irq_before_test, irq_after_test) if x[0] != x[1] + ] if not filtered_result: - test.fail("Number of interrupts on the CPUs have not changed after" - " test execution") + test.fail( + "Number of interrupts on the CPUs have not changed after" + " test execution" + ) elif any([int(x[1]) < int(x[0]) for x in filtered_result]): test.fail("The number of interrupts has decreased") @@ -78,19 +79,21 @@ def hotplug_test(): for vcpu_device in vcpu_devices: vm.hotplug_vcpu_device(vcpu_device) if not utils_misc.wait_for( - lambda: vm.get_cpu_count() == current_cpu + len(vcpu_devices), - 30): + lambda: vm.get_cpu_count() == current_cpu + len(vcpu_devices), 30 + ): test.fail("Actual number of guest CPUs is not equal to expected") guest_cpus = vm.get_cpu_count() irq_info_after_hotplug = get_irq_info() - if (len(irq_info_after_hotplug) != (len(irq_info_before_test) - + len(vcpu_devices))): - test.fail("Number of CPUs for %s is incorrect" % irq_pattern) - - irq_num_before_hotplug = irq_info_before_test[1: (current_cpu+1)] - irq_num_after_hotplug = irq_info_after_hotplug[1: (guest_cpus+1)] - if (sum(map(int, irq_num_after_hotplug)) <= - sum(map(int, irq_num_before_hotplug))): + if len(irq_info_after_hotplug) != ( + len(irq_info_before_test) + len(vcpu_devices) + ): + test.fail(f"Number of CPUs for {irq_pattern} is incorrect") + + irq_num_before_hotplug = irq_info_before_test[1 : (current_cpu + 1)] + irq_num_after_hotplug = irq_info_after_hotplug[1 : (guest_cpus + 1)] + if sum(map(int, irq_num_after_hotplug)) <= sum( + map(int, irq_num_before_hotplug) + ): test.fail("Abnormal number of interrupts") def standby_test(): @@ -107,11 +110,16 @@ def standby_test(): guest_ip = vm.get_address() guest_ifname = utils_net.get_linux_ifname(session, vm.get_mac_address(0)) irq_pattern = params["irq_pattern"].format(ifname=guest_ifname) - test_execution = {"dd": dd_test, "ping": ping_test, - "hotplug": hotplug_test, "standby": standby_test} - - error_context.base_context("Get interrupt info before executing test", - test.log.info) + test_execution = { + "dd": dd_test, + "ping": ping_test, + "hotplug": hotplug_test, + "standby": standby_test, + } + + error_context.base_context( + "Get interrupt info before executing test", test.log.info + ) irq_info_before_test = get_irq_info() error_context.context("Execute test to verify increased interrupts") diff --git a/qemu/tests/invalid_cpu_device_hotplug.py b/qemu/tests/invalid_cpu_device_hotplug.py index 3596763dc7..8be5b1f877 100644 --- a/qemu/tests/invalid_cpu_device_hotplug.py +++ b/qemu/tests/invalid_cpu_device_hotplug.py @@ -1,13 +1,10 @@ -import re import json +import re -from virttest import arch -from virttest import utils_misc -from virttest import error_context +from virttest import arch, error_context, utils_misc from virttest.qemu_monitor import QMPCmdError -from provider import cpu_utils -from provider import win_wora +from provider import cpu_utils, win_wora @error_context.context_aware @@ -23,8 +20,9 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ - not_match_err = ("Hotplug %s failed but the error description does not " - "match: '%s'") + not_match_err = ( + "Hotplug %s failed but the error description does not " "match: '%s'" + ) expected_info = "Hotplug %s failed as expected, error description: '%s'" hotplug_pass_err = "Still able to hotplug %s via qmp" @@ -35,10 +33,12 @@ def hotplug_inuse_vcpu(): for vcpu_prop in vcpu_props: main_vcpu_props.setdefault(vcpu_prop, "0") vm.params["vcpu_props_main_vcpu"] = json.dumps(main_vcpu_props) - error_context.context("Define the invalid vcpu: %s" % "main_vcpu", - test.log.info) - in_use_vcpu_dev = vm.devices.vcpu_device_define_by_params(vm.params, - "main_vcpu") + error_context.context( + "Define the invalid vcpu: {}".format("main_vcpu"), test.log.info + ) + in_use_vcpu_dev = vm.devices.vcpu_device_define_by_params( + vm.params, "main_vcpu" + ) try: error_context.context("Hotplug the main vcpu", test.log.info) in_use_vcpu_dev.enable(vm.monitor) @@ -51,27 +51,26 @@ def hotplug_inuse_vcpu(): test.fail(hotplug_pass_err % "main vcpu") # New vCPU - error_context.context("hotplug vcpu device: %s" % vcpu_device_id, - test.log.info) + error_context.context(f"hotplug vcpu device: {vcpu_device_id}", test.log.info) vm.hotplug_vcpu_device(vcpu_device_id) if not utils_misc.wait_for( - lambda: cpu_utils.check_if_vm_vcpus_match_qemu(vm), 10): + lambda: cpu_utils.check_if_vm_vcpus_match_qemu(vm), 10 + ): test.fail("Actual number of guest CPUs is not equal to expected") # Duplicate vCPU - duplicate_vcpu_params = vm.devices.get_by_qid( - vcpu_device_id)[0].params.copy() + duplicate_vcpu_params = vm.devices.get_by_qid(vcpu_device_id)[0].params.copy() del duplicate_vcpu_params["id"] - vm.params["vcpu_props_duplicate_vcpu"] = json.dumps( - duplicate_vcpu_params) + vm.params["vcpu_props_duplicate_vcpu"] = json.dumps(duplicate_vcpu_params) duplicate_vcpu_dev = vm.devices.vcpu_device_define_by_params( - vm.params, "duplicate_vcpu") + vm.params, "duplicate_vcpu" + ) try: error_context.context("hotplug the duplicate vcpu", test.log.info) duplicate_vcpu_dev.enable(vm.monitor) except QMPCmdError as err: dev_count = maxcpus - if 'ppc64' in arch_name: + if "ppc64" in arch_name: dev_count //= threads qmp_desc = err.data["desc"] if not re.match(error_desc.format(str(dev_count - 1)), qmp_desc): @@ -110,14 +109,18 @@ def hotplug_outofrange_vcpu(): vcpu_device.enable(vm.monitor) except QMPCmdError as err: qmp_desc = err.data["desc"] - if error_desc.format(outofrange_vcpu_num, vcpu_props[0], - (vcpu_bus.addr_lengths[0] - 1)) != qmp_desc: + if ( + error_desc.format( + outofrange_vcpu_num, vcpu_props[0], (vcpu_bus.addr_lengths[0] - 1) + ) + != qmp_desc + ): test.error(not_match_err % ("out_of_range vcpu", qmp_desc)) test.log.info(expected_info, "out_of_range vcpu", qmp_desc) else: test.fail(hotplug_pass_err % "out_of_range vcpu") - arch_name = params.get('vm_arch_name', arch.ARCH) + arch_name = params.get("vm_arch_name", arch.ARCH) vcpu_device_id = params["vcpu_devices"] error_desc = params["error_desc"] vm = env.get_vm(params["main_vm"]) @@ -127,19 +130,22 @@ def hotplug_outofrange_vcpu(): if params.get_boolean("workaround_need"): win_wora.modify_driver(params, session) - error_context.context("Check the number of guest CPUs after startup", - test.log.info) + error_context.context("Check the number of guest CPUs after startup", test.log.info) if not cpu_utils.check_if_vm_vcpus_match_qemu(vm): - test.error("The number of guest CPUs is not equal to the qemu command " - "line configuration") + test.error( + "The number of guest CPUs is not equal to the qemu command " + "line configuration" + ) - vcpu_bus = vm.devices.get_buses({'aobject': 'vcpu'})[0] + vcpu_bus = vm.devices.get_buses({"aobject": "vcpu"})[0] vcpu_props = vcpu_bus.addr_items maxcpus = vm.cpuinfo.maxcpus threads = vm.cpuinfo.threads - invalid_hotplug_tests = {"in_use_vcpu": hotplug_inuse_vcpu, - "invalid_vcpu": hotplug_invalid_vcpu, - "out_of_range_vcpu": hotplug_outofrange_vcpu} + invalid_hotplug_tests = { + "in_use_vcpu": hotplug_inuse_vcpu, + "invalid_vcpu": hotplug_invalid_vcpu, + "out_of_range_vcpu": hotplug_outofrange_vcpu, + } invalid_hotplug_tests[params["execute_test"]]() session.close() diff --git a/qemu/tests/invalid_parameter.py b/qemu/tests/invalid_parameter.py index 9fbf32dc84..63e7a5fc26 100644 --- a/qemu/tests/invalid_parameter.py +++ b/qemu/tests/invalid_parameter.py @@ -1,5 +1,4 @@ -from virttest import env_process -from virttest import error_context +from virttest import env_process, error_context @error_context.context_aware @@ -14,7 +13,7 @@ def run(test, params, env): :param env: Dictionary with test environment. """ vm_name = params["main_vm"] - params['start_vm'] = "yes" + params["start_vm"] = "yes" try: error_context.context("Start guest with invalid parameters.") env_process.preprocess_vm(test, params, env, vm_name) diff --git a/qemu/tests/ioeventfd.py b/qemu/tests/ioeventfd.py index 5a5c0d2460..f187fb7758 100644 --- a/qemu/tests/ioeventfd.py +++ b/qemu/tests/ioeventfd.py @@ -3,13 +3,12 @@ 1. Test ioeventfd under stress. 2. Check the ioeventfd property. """ + import re -from virttest import error_context -from virttest import env_process -from virttest import utils_test -from virttest import qemu_qtree from avocado.utils import process +from virttest import env_process, error_context, qemu_qtree, utils_test + from provider.storage_benchmark import generate_instance from qemu.tests.virtio_serial_file_transfer import transfer_data @@ -59,6 +58,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _set_ioeventfd_options(): """ Set the ioeventfd options. @@ -67,47 +67,49 @@ def _set_ioeventfd_options(): """ dev_type = params.get("dev_type") if dev_type == "virtio_serial": - params['virtio_serial_extra_params_vs1'] = ioeventfd - dev_id = params.get('dev_id', 'virtio_serial_pci0') - elif params['drive_format'] == 'virtio': - params['blk_extra_params_image1'] = ioeventfd - dev_id = 'image1' - elif params['drive_format'] == 'scsi-hd': - params['bus_extra_params_image1'] = ioeventfd - dev_id = params.get('dev_id', 'virtio_scsi_pci0') + params["virtio_serial_extra_params_vs1"] = ioeventfd + dev_id = params.get("dev_id", "virtio_serial_pci0") + elif params["drive_format"] == "virtio": + params["blk_extra_params_image1"] = ioeventfd + dev_id = "image1" + elif params["drive_format"] == "scsi-hd": + params["bus_extra_params_image1"] = ioeventfd + dev_id = params.get("dev_id", "virtio_scsi_pci0") else: raise ValueError(f"unexpected dev_type: {dev_type}") return dev_id def _dd_test(session): - """ Execute dd testing inside guest. """ - test.log.info('Doing dd testing inside guest.') - test.log.debug(session.cmd(params['dd_cmd'], float(params['stress_timeout']))) + """Execute dd testing inside guest.""" + test.log.info("Doing dd testing inside guest.") + test.log.debug(session.cmd(params["dd_cmd"], float(params["stress_timeout"]))) def _fio_test(session): - """ Execute fio testing inside guest. """ - test.log.info('Doing fio testing inside guest.') + """Execute fio testing inside guest.""" + test.log.info("Doing fio testing inside guest.") session = utils_test.qemu.windrv_check_running_verifier( - session, vm, test, params["driver_name"]) - fio = generate_instance(params, vm, 'fio') + session, vm, test, params["driver_name"] + ) + fio = generate_instance(params, vm, "fio") try: - fio.run(params['fio_options'], float(params['stress_timeout'])) + fio.run(params["fio_options"], float(params["stress_timeout"])) finally: fio.clean() def _io_stress_test(): - """ Execute io stress testing inside guest. """ - {'windows': _fio_test, 'linux': _dd_test}[os_type](session) + """Execute io stress testing inside guest.""" + {"windows": _fio_test, "linux": _dd_test}[os_type](session) def _iozone_test(session): - """ Execute iozone testing inside guest. """ - test.log.info('Doing iozone inside guest.') - if os_type == 'windows': + """Execute iozone testing inside guest.""" + test.log.info("Doing iozone inside guest.") + if os_type == "windows": session = utils_test.qemu.windrv_check_running_verifier( - session, vm, test, params["driver_name"]) - iozone = generate_instance(params, vm, 'iozone') + session, vm, test, params["driver_name"] + ) + iozone = generate_instance(params, vm, "iozone") try: - iozone.run(params['iozone_options'], float(params['iozone_timeout'])) + iozone.run(params["iozone_options"], float(params["iozone_timeout"])) finally: iozone.clean() return session @@ -116,71 +118,76 @@ def _check_property(vm, ioeventfd_opt): """ Check the value of ioeventfd by sending "info qtree" in QMP. """ - ioevent_qtree_val = 'true' if 'on' in ioeventfd_opt else 'false' - test.log.info('Execute info qtree in QMP monitor.') + ioevent_qtree_val = "true" if "on" in ioeventfd_opt else "false" + test.log.info("Execute info qtree in QMP monitor.") qtree = qemu_qtree.QtreeContainer() - qtree.parse_info_qtree(vm.monitor.info('qtree')) + qtree.parse_info_qtree(vm.monitor.info("qtree")) for node in qtree.get_nodes(): if isinstance(node, qemu_qtree.QtreeDev) and ( - node.qtree.get('id', None) == dev_id): - if node.qtree.get('ioeventfd', None) is None: - test.fail('The qtree device %s has no property ioeventfd.' - % dev_id) - elif node.qtree['ioeventfd'] == ioevent_qtree_val: + node.qtree.get("id", None) == dev_id + ): + if node.qtree.get("ioeventfd", None) is None: + test.fail(f"The qtree device {dev_id} has no property ioeventfd.") + elif node.qtree["ioeventfd"] == ioevent_qtree_val: test.log.info( - 'The \"%s\" matches with qtree device \"%s\"(%s).', - ioeventfd_opt, dev_id, ioevent_qtree_val) + 'The "%s" matches with qtree device "%s"(%s).', + ioeventfd_opt, + dev_id, + ioevent_qtree_val, + ) break else: test.fail( - 'The \"%s\" mismatches with qtree device \"%s\"(%s).' % - (ioeventfd_opt, dev_id, ioevent_qtree_val)) + f'The "{ioeventfd_opt}" mismatches with qtree device "{dev_id}"({ioevent_qtree_val}).' + ) else: - test.error('No such \"%s\" qtree device.' % dev_id) + test.error(f'No such "{dev_id}" qtree device.') def _get_ioeventfds(ioeventfd_opt): """ Get the number of ioeventfds inside host. """ - test.log.info('Check the \"%s\" via /proc/$PID/fd/.', ioeventfd) - dst_log = 'off' if 'off' in ioeventfd_opt else 'on' - cmd = 'ls -l /proc/$(pgrep qemu-kvm)/fd > /tmp/{0}; cat /tmp/{0}'.format(dst_log) - test.log.debug('Running \'%s\'', cmd) + test.log.info('Check the "%s" via /proc/$PID/fd/.', ioeventfd) + dst_log = "off" if "off" in ioeventfd_opt else "on" + cmd = f"ls -l /proc/$(pgrep qemu-kvm)/fd > /tmp/{dst_log}; cat /tmp/{dst_log}" + test.log.debug("Running '%s'", cmd) s, o = process.getstatusoutput(cmd) test.log.debug(o) if s: - test.error('Failed to get the number of event fd.\n%s' % o) + test.error(f"Failed to get the number of event fd.\n{o}") def _compare_ioeventfds(): """ Compare fd number of ioeventfd=on between ioeventfd=off """ error_context.context( - 'Compare the output of \'ls -l /proc/$PID/fd/\'.', test.log.info) - cmd = 'grep -c eventfd /tmp/off /tmp/on;rm -rf /tmp/off /tmp/on' - test.log.debug('Running \'%s\'', cmd) + "Compare the output of 'ls -l /proc/$PID/fd/'.", test.log.info + ) + cmd = "grep -c eventfd /tmp/off /tmp/on;rm -rf /tmp/off /tmp/on" + test.log.debug("Running '%s'", cmd) s, o = process.getstatusoutput(cmd) test.log.debug(o) if s: - test.error('Failed to compare the outputs.\n%s' % s) - nums = re.findall(r'\w+:(\d+)', o, re.M) + test.error(f"Failed to compare the outputs.\n{s}") + nums = re.findall(r"\w+:(\d+)", o, re.M) if int(nums[0]) > int(nums[1]): - test.fail('The number of event fds with \"off\" ' - 'should be less than the one with \"on\".') - test.log.info('The number of event fds with \"off\" ' - 'is less than the one with \"on\".') + test.fail( + 'The number of event fds with "off" ' + 'should be less than the one with "on".' + ) + test.log.info( + 'The number of event fds with "off" ' 'is less than the one with "on".' + ) - params['start_vm'] = 'yes' - os_type = params['os_type'] + params["start_vm"] = "yes" + os_type = params["os_type"] timeout = float(params.get("login_timeout", 240)) - ioeventfds = (params['orig_ioeventfd'], params['new_ioeventfd']) + ioeventfds = (params["orig_ioeventfd"], params["new_ioeventfd"]) for ioeventfd in ioeventfds: dev_id = _set_ioeventfd_options() # Disable iothread when ioeventfd=off - if ioeventfd == "ioeventfd=off" and params.get( - "iothread_scheme"): - error_context.context("Disable iothread under %s" % ioeventfd, - test.log.info) + if ioeventfd == "ioeventfd=off" and params.get("iothread_scheme"): + error_context.context(f"Disable iothread under {ioeventfd}", test.log.info) clone_params = params.copy() clone_params["iothread_scheme"] = None clone_params["image_iothread"] = None @@ -188,25 +195,26 @@ def _compare_ioeventfds(): else: clone_params = params - error_context.context('Boot a guest with "%s".' % ioeventfd, test.log.info) + error_context.context(f'Boot a guest with "{ioeventfd}".', test.log.info) env_process.preprocess_vm(test, clone_params, env, params["main_vm"]) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) - if params.get('io_stress', 'no') == 'yes': + if params.get("io_stress", "no") == "yes": _io_stress_test() else: _check_property(vm, ioeventfd) _get_ioeventfds(ioeventfd) - if params.get('reboot', 'no') == 'yes': - error_context.context('Reboot the guest.', test.log.info) + if params.get("reboot", "no") == "yes": + error_context.context("Reboot the guest.", test.log.info) session = _iozone_test(vm.reboot(session, timeout=timeout)) - if params.get('data_transfer', 'no') == 'yes': - if os_type == 'windows': + if params.get("data_transfer", "no") == "yes": + if os_type == "windows": session = utils_test.qemu.windrv_check_running_verifier( - session, vm, test, params["driver_name"]) + session, vm, test, params["driver_name"] + ) transfer_data(params, vm) session.close() vm.destroy(gracefully=True) - if params.get('compare_fd', 'no') == 'yes': + if params.get("compare_fd", "no") == "yes": _compare_ioeventfds() diff --git a/qemu/tests/iozone_linux.py b/qemu/tests/iozone_linux.py index 6c170b0858..0ac5bada57 100644 --- a/qemu/tests/iozone_linux.py +++ b/qemu/tests/iozone_linux.py @@ -1,8 +1,7 @@ import re -from virttest import error_context -from virttest import utils_disk -from virttest import utils_misc +from virttest import error_context, utils_disk, utils_misc + from provider.storage_benchmark import generate_instance @@ -16,11 +15,12 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _get_data_disks(): - """ Get the data disks by serial or wwn options. """ + """Get the data disks by serial or wwn options.""" disks = {} for data_image in params["images"].split()[1:]: - extra_params = params.get("blk_extra_params_%s" % data_image, '') + extra_params = params.get(f"blk_extra_params_{data_image}", "") match = re.search(r"(serial|wwn)=(\w+)", extra_params, re.M) if match: drive_id = match.group(2) @@ -28,57 +28,60 @@ def _get_data_disks(): continue drive_path = utils_misc.get_linux_drive_path(session, drive_id) if not drive_path: - test.error("Failed to get '%s' drive path" % data_image) + test.error(f"Failed to get '{data_image}' drive path") disks[drive_path[5:]] = data_image return disks def _get_mounted_points(did, disks, mount_info): - """ Get the mounted points. """ + """Get the mounted points.""" points = [] - for id in re.finditer(r'(%s\d+)' % did, ' '.join(disks)): - s = re.search(r'/dev/%s\s+(\S+)\s+' % id.group(1), mount_info, re.M) + for id in re.finditer(rf"({did}\d+)", " ".join(disks)): + s = re.search(rf"/dev/{id.group(1)}\s+(\S+)\s+", mount_info, re.M) if s: points.append(s.group(1)) return points def _wait_for_procs_done(timeout=1800): - """ Wait all the processes are done. """ + """Wait all the processes are done.""" if not utils_misc.wait_for( - lambda: 'iozone' not in session.cmd_output('pgrep -xl iozone'), - timeout, step=3.0): - test.error('Not all iozone processes done in %s sec.' % timeout) + lambda: "iozone" not in session.cmd_output("pgrep -xl iozone"), + timeout, + step=3.0, + ): + test.error(f"Not all iozone processes done in {timeout} sec.") - iozone_test_dir = params.get('iozone_test_dir', '/home') - iozone_cmd_options = params['iozone_cmd_options'] + iozone_test_dir = params.get("iozone_test_dir", "/home") + iozone_cmd_options = params["iozone_cmd_options"] iozone_timeout = float(params.get("iozone_timeout", 1800)) - n_partitions = params.get('partitions_num', 1) - fstype = params.get('fstype', 'xfs') - labeltype = params.get('labeltype', utils_disk.PARTITION_TABLE_TYPE_GPT) + n_partitions = params.get("partitions_num", 1) + fstype = params.get("fstype", "xfs") + labeltype = params.get("labeltype", utils_disk.PARTITION_TABLE_TYPE_GPT) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=float(params.get("login_timeout", 360))) _wait_for_procs_done() error_context.context("Running IOzone command on guest.") - iozone = generate_instance(params, vm, 'iozone') + iozone = generate_instance(params, vm, "iozone") try: dids = _get_data_disks() if dids: - mount_info = session.cmd_output_safe('cat /proc/mounts | grep \'/dev/\'') + mount_info = session.cmd_output_safe("cat /proc/mounts | grep '/dev/'") disks = utils_disk.get_linux_disks(session, True) for did, image_name in dids.items(): - size = params.get('image_size_%s' % image_name) - start = params.get('image_start_%s' % image_name, "0M") + size = params.get(f"image_size_{image_name}") + start = params.get(f"image_start_{image_name}", "0M") mounted_points = _get_mounted_points(did, disks, mount_info) if not mounted_points: mounted_points = utils_disk.configure_empty_linux_disk( - session, did, size, start, n_partitions, fstype, labeltype) + session, did, size, start, n_partitions, fstype, labeltype + ) for mounted_point in mounted_points: iozone.run(iozone_cmd_options % mounted_point, iozone_timeout) utils_disk.clean_partition_linux(session, did) else: iozone.run(iozone_cmd_options % iozone_test_dir, iozone_timeout) finally: - if params.get('sub_test_shutdown_vm', 'no') == 'no': + if params.get("sub_test_shutdown_vm", "no") == "no": iozone.clean() session.close() diff --git a/qemu/tests/iperf_test.py b/qemu/tests/iperf_test.py index b6ca5bd882..7b878ada70 100644 --- a/qemu/tests/iperf_test.py +++ b/qemu/tests/iperf_test.py @@ -4,12 +4,7 @@ from aexpect import ShellCmdError from avocado.utils import process - -from virttest import data_dir -from virttest import utils_net -from virttest import utils_misc -from virttest import error_context -from virttest import utils_package +from virttest import data_dir, error_context, utils_misc, utils_net, utils_package @error_context.context_aware @@ -33,8 +28,11 @@ def iperf_compile(src_path, dst_path, session=None): """Compile iperf and return its binary file path.""" iperf_version = params["iperf_version"] iperf_source_path = os.path.join(dst_path, iperf_version) - compile_cmd = params["linux_compile_cmd"] % (src_path, dst_path, - iperf_source_path) + compile_cmd = params["linux_compile_cmd"] % ( + src_path, + dst_path, + iperf_source_path, + ) try: if session: test.log.info("Compiling %s in guest...", iperf_version) @@ -46,8 +44,8 @@ def iperf_compile(src_path, dst_path, session=None): test.log.error(err_msg) test.error("Failed to compile iperf") else: - iperf_bin_name = re.sub(r'[-2]', '', iperf_version.split('.')[0]) - return os.path.join(iperf_source_path, 'src', iperf_bin_name) + iperf_bin_name = re.sub(r"[-2]", "", iperf_version.split(".")[0]) + return os.path.join(iperf_source_path, "src", iperf_bin_name) def iperf_start(session, iperf_path, options, catch_data): """Start iperf session, analyze result if catch_data.""" @@ -59,8 +57,9 @@ def iperf_start(session, iperf_path, options, catch_data): data_info = session.cmd_output(iperf_cmd, timeout=120) else: test.log.info(info_text, "host", iperf_cmd) - data_info = process.system_output(iperf_cmd, timeout=120, - verbose=False).decode() + data_info = process.system_output( + iperf_cmd, timeout=120, verbose=False + ).decode() except Exception as err_msg: test.log.error(str(err_msg)) test.error("Failed to start iperf session") @@ -72,9 +71,11 @@ def iperf_start(session, iperf_path, options, catch_data): if not parallel_cur: test.fail("iperf client not connected to server") elif parallel_exp and parallel_cur != parallel_exp: - test.fail("Number of parallel threads running(%d) is " - "inconsistent with expectations(%d)" - % (parallel_cur, parallel_exp)) + test.fail( + "Number of parallel threads running(%d) is " + "inconsistent with expectations(%d)" + % (parallel_cur, parallel_exp) + ) test.log.info("iperf client successfully connected to server") def is_iperf_running(name_pattern, session=None): @@ -82,12 +83,13 @@ def is_iperf_running(name_pattern, session=None): check_iperf_cmd = params["check_iperf_cmd"] % name_pattern status = serial_session.cmd_status(check_iperf_cmd) else: - status = process.system("pgrep -f %s" % name_pattern, - ignore_status=True, verbose=False) + status = process.system( + f"pgrep -f {name_pattern}", ignore_status=True, verbose=False + ) return status == 0 def rss_check(): - if os_type == 'linux': + if os_type == "linux": ifname = utils_net.get_linux_ifname(guest_session, vm.get_mac_address()) check_rss_state_cmd = params.get("check_rss_state") output = guest_session.cmd_output(check_rss_state_cmd % ifname) @@ -96,18 +98,18 @@ def rss_check(): test.fail("Rss support for virtio-net driver is bad") else: test.log.info("Rss support for virtio-net driver is works well") - test.log.info('enable rxhash to check network if can works well') + test.log.info("enable rxhash to check network if can works well") enable_rxhash_cmd = params.get("enable_rxhash_cmd") status, output = guest_session.cmd_status_output(enable_rxhash_cmd % ifname) if status != 0: - test.fail("Can not enable rxhash: %s" % output) + test.fail(f"Can not enable rxhash: {output}") else: - test.log.info('Run the command "netkvm-wmi.cmd rss" to collect statistics') rss_test_cmd = utils_misc.set_winutils_letter( - guest_session, params["rss_test_cmd"]) + guest_session, params["rss_test_cmd"] + ) rss_statistics = guest_session.cmd_output(rss_test_cmd) - patterns = r'^((?:Errors)|(?:Misses))=0' + patterns = r"^((?:Errors)|(?:Misses))=0" result = re.findall(patterns, rss_statistics, re.M) if len(result) == 2: test.log.info("Rss support for virtio-net driver is works well") @@ -121,10 +123,10 @@ def rss_check(): iperf_test_duration = int(params["iperf_test_duration"]) iperf_deps_dir = data_dir.get_deps_dir("iperf") host_iperf_file = params["host_iperf_file"] - guest_iperf_file = params.get('guest_iperf_file', host_iperf_file) + guest_iperf_file = params.get("guest_iperf_file", host_iperf_file) host_iperf_src_path = os.path.join(iperf_deps_dir, host_iperf_file) guest_iperf_remote_path = os.path.join(iperf_deps_dir, guest_iperf_file) - guest_iperf_path = params.get('guest_iperf_path', tmp_dir) + guest_iperf_path = params.get("guest_iperf_path", tmp_dir) guest_iperf_src_path = os.path.join(guest_iperf_path, guest_iperf_file) vm = env.get_vm(params["main_vm"]) @@ -138,7 +140,7 @@ def rss_check(): guest_ip_addr = vm.get_address() host_iperf_bin = iperf_compile(host_iperf_src_path, tmp_dir) - if os_type == 'linux': + if os_type == "linux": if not utils_package.package_install("gcc-c++", guest_session): test.cancel("Please install gcc-c++ to proceed") guest_iperf__bin = iperf_compile(guest_iperf_src_path, tmp_dir, guest_session) @@ -146,13 +148,15 @@ def rss_check(): guest_iperf__bin = guest_iperf_src_path iperf_deplist = params.get("iperf_deplist") if iperf_deplist: - for d_name in iperf_deplist.split(','): + for d_name in iperf_deplist.split(","): dep_path = os.path.join(data_dir.get_deps_dir("iperf"), d_name) vm.copy_files_to(dep_path, guest_iperf_path) - search_pattern = {'host': host_iperf_bin.replace('src/', 'src/.*'), - 'linux': guest_iperf__bin.replace('src/', 'src/.*'), - 'windows': guest_iperf_file} + search_pattern = { + "host": host_iperf_bin.replace("src/", "src/.*"), + "linux": guest_iperf__bin.replace("src/", "src/.*"), + "windows": guest_iperf_file, + } if params.get("iperf_server") == params["main_vm"]: s_ip = params.get("multicast_addr", guest_ip_addr) @@ -161,12 +165,20 @@ def rss_check(): else: s_ip = params.get("multicast_addr", host_ip_addr) s_info = [search_pattern["host"], s_ip, None, host_iperf_bin] - c_info = [search_pattern[os_type], guest_ip_addr, guest_session, guest_iperf__bin] + c_info = [ + search_pattern[os_type], + guest_ip_addr, + guest_session, + guest_iperf__bin, + ] s_catch_data = params["catch_data"] % (s_info[1], c_info[1]) s_options = params["iperf_server_options"] - c_options = params["iperf_client_options"] % (s_info[1], c_info[1], - iperf_test_duration) + c_options = params["iperf_client_options"] % ( + s_info[1], + c_info[1], + iperf_test_duration, + ) s_info.extend([s_options, s_catch_data]) c_info.extend([c_options, None]) @@ -180,26 +192,33 @@ def rss_check(): bg_client = utils_misc.InterruptedThread(iperf_start, c_start_args) bg_server.start() - if not utils_misc.wait_for(lambda: is_iperf_running(s_info[0], - s_info[2]), 5, 2): + if not utils_misc.wait_for( + lambda: is_iperf_running(s_info[0], s_info[2]), 5, 2 + ): test.error("Failed to start iperf server.") error_context.context("iperf server has started.", test.log.info) bg_client.start() - if not utils_misc.wait_for(lambda: is_iperf_running(c_info[0], - c_info[2]), 5): + if not utils_misc.wait_for(lambda: is_iperf_running(c_info[0], c_info[2]), 5): test.error("Failed to start iperf client.") error_context.context("iperf client has started.", test.log.info) - utils_misc.wait_for(lambda: not is_iperf_running(c_info[0], c_info[2]), - iperf_test_duration, 0, 5, - "Waiting for iperf test to finish.") + utils_misc.wait_for( + lambda: not is_iperf_running(c_info[0], c_info[2]), + iperf_test_duration, + 0, + 5, + "Waiting for iperf test to finish.", + ) bg_server.join(timeout=60) bg_client.join(timeout=60) finally: test.log.info("Cleanup host environment...") if is_iperf_running(search_pattern["host"]): - process.run('pkill -9 -f %s' % search_pattern["host"], - verbose=False, ignore_status=True) - shutil.rmtree(host_iperf_bin.rsplit('/', 2)[0], ignore_errors=True) + process.run( + "pkill -9 -f {}".format(search_pattern["host"]), + verbose=False, + ignore_status=True, + ) + shutil.rmtree(host_iperf_bin.rsplit("/", 2)[0], ignore_errors=True) guest_session.close() serial_session.close() diff --git a/qemu/tests/ipi_x2apic.py b/qemu/tests/ipi_x2apic.py index 39db03e1ab..1dc511e687 100644 --- a/qemu/tests/ipi_x2apic.py +++ b/qemu/tests/ipi_x2apic.py @@ -2,9 +2,7 @@ import re import aexpect - -from virttest import env_process -from virttest import error_context +from virttest import env_process, error_context def get_re_average(opt, re_str): @@ -43,8 +41,10 @@ def run(test, params, env): smp = params.get("smp") if int(smp) < 2: params["smp"] = 2 - test.log.warn("This case need at least 2 vcpu, but only 1 specified in" - " configuration. So change the vcpu to 2.") + test.log.warning( + "This case need at least 2 vcpu, but only 1 specified in" + " configuration. So change the vcpu to 2." + ) vm_name = params.get("main_vm") error_context.context("Boot guest with x2apic cpu flag.", test.log.info) env_process.preprocess_vm(test, params, env, vm_name) @@ -60,11 +60,11 @@ def run(test, params, env): x2apic_check_string = params.get("x2apic_check_string").split(",") for check_string in x2apic_check_string: if check_string.strip() not in x2apic_output: - msg = "%s is not displayed in output" % check_string + msg = f"{check_string} is not displayed in output" test.fail(msg) pipetest_cmd = params.get("pipetest_cmd") - if session.cmd_status("test -x %s" % pipetest_cmd): + if session.cmd_status(f"test -x {pipetest_cmd}"): file_link = os.path.join(test.virtdir, "scripts/pipetest.c") vm.copy_files_to(file_link, "/tmp/pipetest.c") build_pipetest_cmd = params.get("build_pipetest_cmd") @@ -87,8 +87,7 @@ def run(test, params, env): vm.verify_alive() session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) if check_x2apic_cmd: - error_context.context("Check x2apic flag in guest after reboot.", - test.log.info) + error_context.context("Check x2apic flag in guest after reboot.", test.log.info) x2apic_output = session.cmd_output(check_x2apic_cmd).strip() test.log.info(x2apic_output) if x2apic_output: @@ -100,14 +99,13 @@ def run(test, params, env): except aexpect.ShellTimeoutError as e: o = e val2 = get_re_average(o, re_str) - error_context.context("Compare the output of pipetest script.", - test.log.info) + error_context.context("Compare the output of pipetest script.", test.log.info) if val1 >= val2: msg = "Overhead of IPI with x2apic is not smaller than that without" - msg += " x2apic. pipetest script output with x2apic: %s. " % val1 - msg += "pipetest script output without x2apic: %s" % val2 + msg += f" x2apic. pipetest script output with x2apic: {val1}. " + msg += f"pipetest script output without x2apic: {val2}" test.fail(msg) - msg = "pipetest script output with x2apic: %s. " % val1 - msg += "pipetest script output without x2apic: %s" % val2 + msg = f"pipetest script output with x2apic: {val1}. " + msg += f"pipetest script output without x2apic: {val2}" test.log.info(msg) session.close() diff --git a/qemu/tests/kdump_with_stress.py b/qemu/tests/kdump_with_stress.py index b6bbd6d2ba..1ffc02886b 100644 --- a/qemu/tests/kdump_with_stress.py +++ b/qemu/tests/kdump_with_stress.py @@ -1,7 +1,4 @@ -from virttest import error_context -from virttest import utils_test -from virttest import utils_misc -from virttest import data_dir +from virttest import data_dir, error_context, utils_misc, utils_test from generic.tests import kdump @@ -20,6 +17,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def install_stress_app(session): """ Install stress app in guest. @@ -27,7 +25,7 @@ def install_stress_app(session): stress_path = data_dir.get_deps_dir("stress") stress_guest_path = params["tmp_dir"] test.log.info("Copy stress package to guest.") - session.cmd_status_output("mkdir -p %s" % stress_guest_path) + session.cmd_status_output(f"mkdir -p {stress_guest_path}") vm.copy_files_to(stress_path, stress_guest_path) session.cmd(params["install_cmd"]) @@ -47,9 +45,11 @@ def start_stress(session): bg = "" bg_stress_test = params.get("run_bgstress") - bg = utils_misc.InterruptedThread(utils_test.run_virt_sub_test, - (test, params, env), - {"sub_type": bg_stress_test}) + bg = utils_misc.InterruptedThread( + utils_test.run_virt_sub_test, + (test, params, env), + {"sub_type": bg_stress_test}, + ) bg.start() if stress_type == "io": @@ -59,8 +59,9 @@ def start_stress(session): test.log.info("Launch stress app in guest with command: '%s'", cmd) session.sendline(cmd) - running = utils_misc.wait_for(lambda: stress_running(session), - timeout=150, step=5) + running = utils_misc.wait_for( + lambda: stress_running(session), timeout=150, step=5 + ) if not running: test.error("Stress isn't running") @@ -83,18 +84,20 @@ def stress_running(session): def_kdump_enable_cmd = "chkconfig kdump on && service kdump restart" kdump_enable_cmd = params.get("kdump_enable_cmd", def_kdump_enable_cmd) def_crash_kernel_prob_cmd = "grep -q 1 /sys/kernel/kexec_crash_loaded" - crash_kernel_prob_cmd = params.get("crash_kernel_prob_cmd", - def_crash_kernel_prob_cmd) + crash_kernel_prob_cmd = params.get( + "crash_kernel_prob_cmd", def_crash_kernel_prob_cmd + ) - session = kdump.kdump_enable(vm, vm.name, - crash_kernel_prob_cmd, kernel_param_cmd, - kdump_enable_cmd, timeout) + session = kdump.kdump_enable( + vm, vm.name, crash_kernel_prob_cmd, kernel_param_cmd, kdump_enable_cmd, timeout + ) try: start_stress(session) - error_context.context("Kdump Testing, force the Linux kernel to crash", - test.log.info) + error_context.context( + "Kdump Testing, force the Linux kernel to crash", test.log.info + ) crash_cmd = params.get("crash_cmd", "echo c > /proc/sysrq-trigger") if crash_cmd == "nmi": kdump.crash_test(test, vm, None, crash_cmd, timeout) diff --git a/qemu/tests/kernbench.py b/qemu/tests/kernbench.py index ffb1082330..78f937b70f 100644 --- a/qemu/tests/kernbench.py +++ b/qemu/tests/kernbench.py @@ -1,9 +1,7 @@ import os import re -from avocado.utils import cpu -from avocado.utils import process - +from avocado.utils import cpu, process from virttest import env_process @@ -19,9 +17,10 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def download_if_not_exists(): if not os.path.exists(file_name): - cmd = "wget -t 10 -c -P %s %s" % (tmp_dir, file_link) + cmd = f"wget -t 10 -c -P {tmp_dir} {file_link}" process.system(cmd) def cmd_status_output(cmd, timeout=360): @@ -36,14 +35,14 @@ def cmd_status_output(cmd, timeout=360): def check_ept(): output = process.system_output("grep 'flags' /proc/cpuinfo") - flags = output.splitlines()[0].split(':')[1].split() + flags = output.splitlines()[0].split(":")[1].split() need_ept = params.get("need_ept", "no") - if 'ept' not in flags and "yes" in need_ept: + if "ept" not in flags and "yes" in need_ept: test.cancel("This test requires a host that supports EPT") - elif 'ept' in flags and "no" in need_ept: + elif "ept" in flags and "no" in need_ept: cmd = "modprobe -r kvm_intel && modprobe kvm_intel ept=0" process.system(cmd, timeout=100, shell=True) - elif 'ept' in flags and "yes" in need_ept: + elif "ept" in flags and "yes" in need_ept: cmd = "modprobe -r kvm_intel && modprobe kvm_intel ept=1" process.system(cmd, timeout=100, shell=True) @@ -56,17 +55,19 @@ def install_gcc(): libgcc_link = params.get("libgcc_link") (s, o) = cmd_status_output(cmd) if s: - cmd = "rpm -ivh %s --nodeps; rpm -ivh %s --nodeps; rpm -ivh %s"\ - " --nodeps; rpm -ivh %s --nodeps" % (libgomp_link, - libgcc_link, cpp_link, gcc_link) + cmd = ( + f"rpm -ivh {libgomp_link} --nodeps; rpm -ivh {libgcc_link} --nodeps; rpm -ivh {cpp_link}" + f" --nodeps; rpm -ivh {gcc_link} --nodeps" + ) else: gcc = o.splitlines()[0].strip() if gcc in gcc_link: - cmd = "rpm -e %s && rpm -ivh %s" % (gcc, gcc_link) + cmd = f"rpm -e {gcc} && rpm -ivh {gcc_link}" else: - cmd = "rpm -ivh %s --nodeps; rpm -ivh %s --nodeps; rpm -ivh"\ - " %s --nodeps; rpm -ivh %s --nodeps" % (libgomp_link, - libgcc_link, cpp_link, gcc_link) + cmd = ( + f"rpm -ivh {libgomp_link} --nodeps; rpm -ivh {libgcc_link} --nodeps; rpm -ivh" + f" {cpp_link} --nodeps; rpm -ivh {gcc_link} --nodeps" + ) (s, o) = cmd_status_output(cmd) if s: test.log.debug("Fail to install gcc.output:%s", o) @@ -76,14 +77,14 @@ def record_result(result): (m_value, s_value) = re.findall(re_result, result)[0] s_value = float(m_value) * 60 + float(s_value) shortname = params.get("shortname") - result_str = "%s: %ss\n" % (shortname, s_value) + result_str = f"{shortname}: {s_value}s\n" result_file = params.get("result_file") f1 = open(result_file, "a+") result = f1.read() result += result_str f1.write(result_str) f1.close() - open(os.path.basename(result_file), 'w').write(result) + open(os.path.basename(result_file), "w").write(result) test.log.info("Test result got from %s:\n%s", result_file, result) test_type = params.get("test_type") @@ -98,15 +99,14 @@ def record_result(result): env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(params["main_vm"]) vm.verify_alive() - session = vm.wait_for_login( - timeout=int(params.get("login_timeout", 360))) + session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) # Create tmp folder and download files if need. if not os.path.exists(tmp_dir): - process.system("mkdir %s" % tmp_dir) + process.system(f"mkdir {tmp_dir}") files = params.get("files_need").split() for file in files: - file_link = params.get("%s_link" % file) + file_link = params.get(f"{file}_link") file_name = os.path.join(tmp_dir, os.path.basename(file_link)) download_if_not_exists() @@ -121,7 +121,7 @@ def record_result(result): pre_cmd = params.get("pre_cmd") (s, o) = cmd_status_output(pre_cmd, timeout=cmd_timeout) if s: - test.error("Fail command:%s\nOutput: %s" % (pre_cmd, o)) + test.error(f"Fail command:{pre_cmd}\nOutput: {o}") if "guest" in test_type: cpu_num = params.get("smp") @@ -131,7 +131,7 @@ def record_result(result): test.log.info("Start making the kernel ....") (s, o) = cmd_status_output(test_cmd, timeout=cmd_timeout) if s: - test.error("Fail command:%s\n Output:%s" % (test_cmd, o)) + test.error(f"Fail command:{test_cmd}\n Output:{o}") else: test.log.info("Output for command %s is:\n %s", test_cmd, o) record_result(o) diff --git a/qemu/tests/kernel_install.py b/qemu/tests/kernel_install.py index 738b381f93..8d60ceb124 100644 --- a/qemu/tests/kernel_install.py +++ b/qemu/tests/kernel_install.py @@ -1,11 +1,7 @@ import os -from avocado.utils import aurl -from avocado.utils import download - -from virttest import error_context -from virttest import utils_test -from virttest import data_dir +from avocado.utils import aurl, download +from virttest import data_dir, error_context, utils_test CLIENT_TEST = "kernelinstall" @@ -27,7 +23,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ - sub_test_path = os.path.join(test.bindir, "../%s" % CLIENT_TEST) + sub_test_path = os.path.join(test.bindir, f"../{CLIENT_TEST}") _tmp_file_list = [] _tmp_params_dict = {} @@ -46,7 +42,7 @@ def _save_bootloader_config(session): try: default_kernel = session.cmd_output("grubby --default-kernel") except Exception as e: - test.log.warn("Save grub config failed: '%s'", e) + test.log.warning("Save grub config failed: '%s'", e) return default_kernel @@ -54,22 +50,23 @@ def _restore_bootloader_config(session, default_kernel): error_context.context("Restore the grub to old version") if not default_kernel: - test.log.warn("Could not get previous grub config, do noting.") + test.log.warning("Could not get previous grub config, do noting.") return - cmd = "grubby --set-default=%s" % default_kernel.strip() + cmd = f"grubby --set-default={default_kernel.strip()}" try: session.cmd(cmd) except Exception as e: - test.error("Restore grub failed: '%s'" % e) + test.error(f"Restore grub failed: '{e}'") def _clean_up_tmp_files(file_list): for f in file_list: try: os.unlink(f) except Exception as e: - test.log.warn("Could remove tmp file '%s', error message: '%s'", - f, e) + test.log.warning( + "Could remove tmp file '%s', error message: '%s'", f, e + ) def _build_params(param_str, default_value=""): param = _tmp_params_dict.get(param_str) @@ -86,8 +83,7 @@ def _build_params(param_str, default_value=""): timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) - test.log.info("Guest kernel before install: %s", - session.cmd('uname -a').strip()) + test.log.info("Guest kernel before install: %s", session.cmd("uname -a").strip()) error_context.context("Save current default kernel information") default_kernel = _save_bootloader_config(session) @@ -104,38 +100,38 @@ def _build_params(param_str, default_value=""): sub_test_params = {} # rpm - sub_test_params.update(_build_params('kernel_rpm_path')) - sub_test_params.update(_build_params('kernel_deps_rpms')) + sub_test_params.update(_build_params("kernel_rpm_path")) + sub_test_params.update(_build_params("kernel_deps_rpms")) # koji - sub_test_params.update(_build_params('kernel_dep_pkgs')) - sub_test_params.update(_build_params('kernel_sub_pkgs')) - sub_test_params.update(_build_params('kernel_koji_tag')) - sub_test_params.update(_build_params('need_reboot')) + sub_test_params.update(_build_params("kernel_dep_pkgs")) + sub_test_params.update(_build_params("kernel_sub_pkgs")) + sub_test_params.update(_build_params("kernel_koji_tag")) + sub_test_params.update(_build_params("need_reboot")) # git - sub_test_params.update(_build_params('kernel_git_repo')) - sub_test_params.update(_build_params('kernel_git_repo_base')) - sub_test_params.update(_build_params('kernel_git_branch')) - sub_test_params.update(_build_params('kernel_git_commit')) - sub_test_params.update(_build_params('kernel_patch_list')) - sub_test_params.update(_build_params('kernel_config')) - sub_test_params.update(_build_params('kernel_config_list')) + sub_test_params.update(_build_params("kernel_git_repo")) + sub_test_params.update(_build_params("kernel_git_repo_base")) + sub_test_params.update(_build_params("kernel_git_branch")) + sub_test_params.update(_build_params("kernel_git_commit")) + sub_test_params.update(_build_params("kernel_patch_list")) + sub_test_params.update(_build_params("kernel_config")) + sub_test_params.update(_build_params("kernel_config_list")) # src - sub_test_params.update(_build_params('kernel_src_pkg')) - sub_test_params.update(_build_params('kernel_config')) - sub_test_params.update(_build_params('kernel_patch_list')) + sub_test_params.update(_build_params("kernel_src_pkg")) + sub_test_params.update(_build_params("kernel_config")) + sub_test_params.update(_build_params("kernel_patch_list")) - tag = params.get('kernel_tag') + tag = params.get("kernel_tag") error_context.context("Generate control file for kernel install test") # Generate control file from parameters control_base = "params = %s\n" control_base += "job.run_test('kernelinstall'" - control_base += ", install_type='%s'" % install_type + control_base += f", install_type='{install_type}'" control_base += ", params=params" if install_type == "tar" and tag: - control_base += ", tag='%s'" % tag + control_base += f", tag='{tag}'" control_base += ")" control_dir = os.path.join(data_dir.get_root_dir(), "shared", "control") test_control_file = "kernel_install.control" @@ -147,15 +143,16 @@ def _build_params(param_str, default_value=""): fd.write(control_str) fd.close() _tmp_file_list.append(os.path.abspath(test_control_path)) - except IOError as e: + except OSError as e: _clean_up_tmp_files(_tmp_file_list) - test.error("Fail to Generate control file, error message:\n '%s'" % e) + test.error(f"Fail to Generate control file, error message:\n '{e}'") params["test_control_file_install"] = test_control_file error_context.context("Launch kernel installation test in guest") - utils_test.run_virt_sub_test(test, params, env, - sub_type="autotest_control", tag="install") + utils_test.run_virt_sub_test( + test, params, env, sub_type="autotest_control", tag="install" + ) if params.get("need_reboot", "yes") == "yes": error_context.context("Reboot guest after kernel is installed") @@ -172,11 +169,11 @@ def _build_params(param_str, default_value=""): sub_test = params.get("sub_test") tag = params.get("sub_test_tag", "run") try: - utils_test.run_virt_sub_test(test, params, env, - sub_type=sub_test, tag=tag) + utils_test.run_virt_sub_test(test, params, env, sub_type=sub_test, tag=tag) except Exception as e: - test.log.error("Fail to run sub_test '%s', error message: '%s'", - sub_test, e) + test.log.error( + "Fail to run sub_test '%s', error message: '%s'", sub_test, e + ) if params.get("restore_defaut_kernel", "no") == "yes": # Restore grub @@ -187,13 +184,11 @@ def _build_params(param_str, default_value=""): except Exception as e: _clean_up_tmp_files(_tmp_file_list) session.close() - test.fail("Fail to restore to default kernel," - " error message:\n '%s'" % e) + test.fail("Fail to restore to default kernel," f" error message:\n '{e}'") vm.reboot() session = vm.wait_for_login(timeout=timeout) - test.log.info("Guest kernel after install: %s", - session.cmd('uname -a').strip()) + test.log.info("Guest kernel after install: %s", session.cmd("uname -a").strip()) # Finally, let me clean up the tmp files. _clean_up_tmp_files(_tmp_file_list) diff --git a/qemu/tests/kexec.py b/qemu/tests/kexec.py index d255fb60e4..fcfd6b0dcd 100644 --- a/qemu/tests/kexec.py +++ b/qemu/tests/kexec.py @@ -32,9 +32,10 @@ def install_new_kernel(): try: # pylint: disable=E0611 from qemu.tests import rh_kernel_update + rh_kernel_update.run_rh_kernel_update(test, params, env) except Exception as detail: - test.error("Failed to install a new kernel in guest: %s" % detail) + test.error(f"Failed to install a new kernel in guest: {detail}") vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -68,9 +69,8 @@ def install_new_kernel(): if cur_kernel_version not in kernel: new_kernel = kernel[7:] if not new_kernel: - test.error("Could not find new kernel, " - "command line output: %s" % output) - msg = "Reboot to kernel %s through kexec" % new_kernel + test.error("Could not find new kernel, " f"command line output: {output}") + msg = f"Reboot to kernel {new_kernel} through kexec" error_context.context(msg, test.log.info) cmd = params.get("get_kernel_image") % new_kernel kernel_file = session.cmd_output(cmd).strip().splitlines()[0] @@ -84,8 +84,7 @@ def install_new_kernel(): kernel = session.cmd_output(check_cur_kernel_cmd).strip() test.log.info("Current kernel is: %s", kernel) if kernel.strip() != new_kernel.strip(): - test.fail("Fail to boot to kernel %s, current kernel is %s" - % (new_kernel, kernel)) + test.fail(f"Fail to boot to kernel {new_kernel}, current kernel is {kernel}") if "yes" in check_x2apic: check_x2apic_flag() session.close() diff --git a/qemu/tests/kill_app.py b/qemu/tests/kill_app.py index 4f5921d489..1b84972ed2 100644 --- a/qemu/tests/kill_app.py +++ b/qemu/tests/kill_app.py @@ -8,10 +8,11 @@ if application is running when it should . """ + import logging import os -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def run(test, params, env): @@ -26,7 +27,7 @@ def run(test, params, env): :param env: Dictionary with test environment. """ kill_on_vms = params.get("kill_on_vms", "") - vms = kill_on_vms.split(',') + vms = kill_on_vms.split(",") app_name = params.get("kill_app_name", None) test.log.debug("vms %s", vms) if not vms: @@ -48,14 +49,12 @@ def kill_app(vm_name, app_name, params, env): vm = env.get_vm(params[vm_name]) vm.verify_alive() - vm_session = vm.wait_for_login( - timeout=int(params.get("login_timeout", 360))) + vm_session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) # get PID of remote-viewer and kill it LOG_JOB.info("Get PID of %s", app_name) - vm_session.cmd("pgrep %s" % app_name) + vm_session.cmd(f"pgrep {app_name}") LOG_JOB.info("Try to kill %s", app_name) - vm_session.cmd("pkill %s" % app_name - .split(os.path.sep)[-1]) + vm_session.cmd(f"pkill {app_name.split(os.path.sep)[-1]}") vm.verify_alive() vm_session.close() diff --git a/qemu/tests/ksm_base.py b/qemu/tests/ksm_base.py index 78980d557f..2099b99ddb 100644 --- a/qemu/tests/ksm_base.py +++ b/qemu/tests/ksm_base.py @@ -1,15 +1,11 @@ -import time -import random import os +import random import re +import time import aexpect - from avocado.utils import process - -from virttest import data_dir -from virttest import error_context -from virttest import utils_misc +from virttest import data_dir, error_context, utils_misc TMPFS_OVERHEAD = 0.0022 @@ -26,6 +22,7 @@ def run(test, params, env): :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ + def _start_allocator(vm, session, timeout): """ Execute guest script and wait until it is initialized. @@ -36,14 +33,13 @@ def _start_allocator(vm, session, timeout): started properly. """ test.log.debug("Starting guest script on guest %s", vm.name) - session.sendline("$(command -v python python3 | head -1) " - "/tmp/ksm_overcommit_guest.py") + session.sendline( + "$(command -v python python3 | head -1) " "/tmp/ksm_overcommit_guest.py" + ) try: - _ = session.read_until_last_line_matches(["PASS:", "FAIL:"], - timeout) + _ = session.read_until_last_line_matches(["PASS:", "FAIL:"], timeout) except aexpect.ExpectProcessTerminatedError as exc: - test.fail("Command guest script on vm '%s' failed: %s" % - (vm.name, str(exc))) + test.fail(f"Command guest script on vm '{vm.name}' failed: {str(exc)}") def _execute_allocator(command, vm, session, timeout): """ @@ -57,16 +53,22 @@ def _execute_allocator(command, vm, session, timeout): :return: Tuple (match index, data) """ - test.log.debug("Executing '%s' on guest script loop, vm: %s, timeout: " - "%s", command, vm.name, timeout) + test.log.debug( + "Executing '%s' on guest script loop, vm: %s, timeout: " "%s", + command, + vm.name, + timeout, + ) session.sendline(command) try: (match, data) = session.read_until_last_line_matches( - ["PASS:", "FAIL:"], - timeout) + ["PASS:", "FAIL:"], timeout + ) except aexpect.ExpectProcessTerminatedError as exc: - e_str = ("Failed to execute command '%s' on guest script, " - "vm '%s': %s" % (command, vm.name, str(exc))) + e_str = ( + f"Failed to execute command '{command}' on guest script, " + f"vm '{vm.name}': {str(exc)}" + ) test.fail(e_str) return (match, data) @@ -79,13 +81,13 @@ def _execute_allocator(command, vm, session, timeout): # Prepare work in guest error_context.context("Turn off swap in guest", test.log.info) session.cmd_status_output("swapoff -a") - script_file_path = os.path.join(data_dir.get_root_dir(), - "shared/scripts/ksm_overcommit_guest.py") + script_file_path = os.path.join( + data_dir.get_root_dir(), "shared/scripts/ksm_overcommit_guest.py" + ) vm.copy_files_to(script_file_path, "/tmp") test_type = params.get("test_type") shared_mem = int(params["shared_mem"]) - get_free_mem_cmd = params.get("get_free_mem_cmd", - "grep MemFree /proc/meminfo") + get_free_mem_cmd = params.get("get_free_mem_cmd", "grep MemFree /proc/meminfo") free_mem = vm.get_memory_size(get_free_mem_cmd) max_mem = int(free_mem / (1 + TMPFS_OVERHEAD) - guest_script_overhead) @@ -100,44 +102,42 @@ def _execute_allocator(command, vm, session, timeout): query_cmd = re.sub("QEMU_PID", str(vm.process.get_pid()), query_cmd) - sharing_page_0 = process.run(query_cmd, - verbose=False, - ignore_status=True, - shell=True).stdout_text + sharing_page_0 = process.run( + query_cmd, verbose=False, ignore_status=True, shell=True + ).stdout_text if query_regex: sharing_page_0 = re.findall(query_regex, sharing_page_0)[0] error_context.context("Start to allocate pages inside guest", test.log.info) _start_allocator(vm, session, 60) error_context.context("Start to fill memory in guest", test.log.info) - mem_fill = "mem = MemFill(%s, 0, %s)" % (shared_mem, seed) + mem_fill = f"mem = MemFill({shared_mem}, 0, {seed})" _execute_allocator(mem_fill, vm, session, fill_timeout) cmd = "mem.value_fill()" _execute_allocator(cmd, vm, session, fill_timeout) time.sleep(120) - sharing_page_1 = process.run(query_cmd, - verbose=False, - ignore_status=True, - shell=True).stdout_text + sharing_page_1 = process.run( + query_cmd, verbose=False, ignore_status=True, shell=True + ).stdout_text if query_regex: sharing_page_1 = re.findall(query_regex, sharing_page_1)[0] - error_context.context("Start to fill memory with random value in guest", - test.log.info) + error_context.context( + "Start to fill memory with random value in guest", test.log.info + ) split = params.get("split") if split == "yes": if test_type == "negative": - cmd = "mem.static_random_fill(%s)" % random_bits + cmd = f"mem.static_random_fill({random_bits})" else: cmd = "mem.static_random_fill()" _execute_allocator(cmd, vm, session, fill_timeout) time.sleep(120) - sharing_page_2 = process.run(query_cmd, - verbose=False, - ignore_status=True, - shell=True).stdout_text + sharing_page_2 = process.run( + query_cmd, verbose=False, ignore_status=True, shell=True + ).stdout_text if query_regex: sharing_page_2 = re.findall(query_regex, sharing_page_2)[0] @@ -168,17 +168,20 @@ def _execute_allocator(command, vm, session, timeout): if int(sharing_page[1]) <= int(sharing_page[2]): fail_type += 4 - fail = ["Sharing page increased abnormally", - "Sharing page didn't increase", "Sharing page didn't split"] + fail = [ + "Sharing page increased abnormally", + "Sharing page didn't increase", + "Sharing page didn't split", + ] if fail_type != 0: turns = 0 - while (fail_type > 0): + while fail_type > 0: if fail_type % 2 == 1: test.log.error(fail[turns]) fail_type = fail_type / 2 turns += 1 - test.fail("KSM test failed: %s %s %s" % - (sharing_page_0, sharing_page_1, - sharing_page_2)) + test.fail( + f"KSM test failed: {sharing_page_0} {sharing_page_1} {sharing_page_2}" + ) session.close() diff --git a/qemu/tests/ksm_ksmtuned.py b/qemu/tests/ksm_ksmtuned.py index a9af1197e5..04b9776286 100644 --- a/qemu/tests/ksm_ksmtuned.py +++ b/qemu/tests/ksm_ksmtuned.py @@ -1,16 +1,11 @@ -import re import os - +import re from shutil import copyfile from avocado.utils import process - -from virttest import arch -from virttest import env_process -from virttest import utils_misc -from virttest.utils_test import VMStress -from virttest.utils_test import BackgroundTest +from virttest import arch, env_process, utils_misc from virttest.staging import utils_memory +from virttest.utils_test import BackgroundTest, VMStress def run(test, params, env): @@ -30,15 +25,19 @@ def run(test, params, env): :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ + def check_ksm(mem, threshold_reached=False): """ :param mem: Boot guest with given memory, in KB :ksmtuned_enabled: ksmtuned threshold is reached or not """ + def heavyload_install(): - if session.cmd_status(test_install_cmd) != 0: # pylint: disable=E0606 - test.log.warning("Could not find installed heavyload in guest, " - "will install it via winutils.iso ") + if session.cmd_status(test_install_cmd) != 0: # pylint: disable=E0606 + test.log.warning( + "Could not find installed heavyload in guest, " + "will install it via winutils.iso " + ) winutil_drive = utils_misc.get_winutils_vol(session) if not winutil_drive: test.cancel("WIN_UTILS CDROM not found.") @@ -46,50 +45,58 @@ def heavyload_install(): session.cmd(install_cmd) def check_qemu_used_mem(qemu_pid, mem): - qemu_used_page = process.getoutput(get_qemu_used_mem % qemu_pid, - shell=True) + qemu_used_page = process.getoutput(get_qemu_used_mem % qemu_pid, shell=True) qemu_used_mem = float(qemu_used_page) * pagesize if qemu_used_mem < mem * mem_thres: return False return True - params['mem'] = mem // 1024 - params['start_vm'] = 'yes' - vm_name = params['main_vm'] + params["mem"] = mem // 1024 + params["start_vm"] = "yes" + vm_name = params["main_vm"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) session = vm.wait_for_login() qemu_pid = vm.get_pid() if params["os_type"] == "linux": - params['stress_args'] = ('--cpu 4 --io 4 --vm 2 --vm-bytes %sM' % - (int(params['mem']) // 2)) + params["stress_args"] = "--cpu 4 --io 4 --vm 2 --vm-bytes %sM" % ( + int(params["mem"]) // 2 + ) stress_test = VMStress(vm, "stress", params) stress_test.load_stress_tool() else: install_path = params["install_path"] - test_install_cmd = 'dir "%s" | findstr /I heavyload' % install_path + test_install_cmd = f'dir "{install_path}" | findstr /I heavyload' heavyload_install() - heavyload_bin = r'"%s\heavyload.exe" ' % install_path + heavyload_bin = rf'"{install_path}\heavyload.exe" ' heavyload_options = ["/MEMORY 100", "/START"] start_cmd = heavyload_bin + " ".join(heavyload_options) - stress_tool = BackgroundTest(session.cmd, (start_cmd, - stress_timeout, - stress_timeout)) + stress_tool = BackgroundTest( + session.cmd, (start_cmd, stress_timeout, stress_timeout) + ) stress_tool.start() if not utils_misc.wait_for(stress_tool.is_alive, stress_timeout): test.error("Failed to start heavyload process") - if not utils_misc.wait_for(lambda: check_qemu_used_mem(qemu_pid, mem), - stress_timeout, 10, 10): - test.error("QEMU used memory doesn't reach %s of guest mem %sM in " - "%ss" % (mem_thres, mem // 1024, stress_timeout)) - cmd = params['cmd_check_ksm_status'] + if not utils_misc.wait_for( + lambda: check_qemu_used_mem(qemu_pid, mem), stress_timeout, 10, 10 + ): + test.error( + f"QEMU used memory doesn't reach {mem_thres} of guest mem {mem // 1024}M in " + f"{stress_timeout}s" + ) + cmd = params["cmd_check_ksm_status"] free_mem_host = utils_memory.freememtotal() - ksm_status = utils_misc.wait_for(lambda: '1' == process.getoutput(cmd), - 40, first=20.0) + ksm_status = utils_misc.wait_for( + lambda: "1" == process.getoutput(cmd), 40, first=20.0 + ) vm.destroy() - test.log.info("The ksm threshold is %sM, QEMU used memory is %sM, " - "and the total free memory on host is %sM", - ksm_thres // 1024, mem // 1024, free_mem_host // 1024) + test.log.info( + "The ksm threshold is %sM, QEMU used memory is %sM, " + "and the total free memory on host is %sM", + ksm_thres // 1024, + mem // 1024, + free_mem_host // 1024, + ) if threshold_reached: if free_mem_host > ksm_thres: test.error("Host memory is not consumed as much as expected") @@ -97,40 +104,39 @@ def check_qemu_used_mem(qemu_pid, mem): test.fail("KSM should be running") else: if free_mem_host < ksm_thres: - test.error("Host memory is consumed too much more than " - "expected") + test.error("Host memory is consumed too much more than " "expected") if ksm_status: test.fail("KSM should not be running") total_mem_host = utils_memory.memtotal() utils_memory.drop_caches() free_mem_host = utils_memory.freememtotal() - ksm_thres = process.getoutput(params['cmd_get_thres'], shell=True) - ksm_thres = int(total_mem_host * - (int(re.findall('\\d+', ksm_thres)[0]) / 100)) + ksm_thres = process.getoutput(params["cmd_get_thres"], shell=True) + ksm_thres = int(total_mem_host * (int(re.findall("\\d+", ksm_thres)[0]) / 100)) guest_mem = (free_mem_host - ksm_thres) // 2 - if arch.ARCH in ('ppc64', 'ppc64le'): + if arch.ARCH in ("ppc64", "ppc64le"): guest_mem = guest_mem - guest_mem % (256 * 1024) status_ksm_service = process.system( - params['cmd_status_ksmtuned'], ignore_status=True) + params["cmd_status_ksmtuned"], ignore_status=True + ) if status_ksm_service != 0: - process.run(params['cmd_start_ksmtuned']) + process.run(params["cmd_start_ksmtuned"]) stress_timeout = params.get("stress_timeout", 1800) mem_thres = float(params.get("mem_thres", 0.95)) - get_qemu_used_mem = params['cmd_get_qemu_used_mem'] + get_qemu_used_mem = params["cmd_get_qemu_used_mem"] pagesize = utils_memory.getpagesize() check_ksm(guest_mem) - ksm_config_file = params['ksm_config_file'] - backup_file = ksm_config_file + '.backup' + ksm_config_file = params["ksm_config_file"] + backup_file = ksm_config_file + ".backup" copyfile(ksm_config_file, backup_file) - threshold = params.get_numeric('ksm_threshold') + threshold = params.get_numeric("ksm_threshold") with open(ksm_config_file, "a+") as f: - f.write('%s=%s' % (params['ksm_thres_conf'], threshold)) - process.run(params['cmd_restart_ksmtuned']) + f.write("{}={}".format(params["ksm_thres_conf"], threshold)) + process.run(params["cmd_restart_ksmtuned"]) ksm_thres = total_mem_host * (threshold / 100) guest_mem = total_mem_host - ksm_thres // 2 - if arch.ARCH in ('ppc64', 'ppc64le'): + if arch.ARCH in ("ppc64", "ppc64le"): guest_mem = guest_mem - guest_mem % (256 * 1024) try: check_ksm(guest_mem, threshold_reached=True) @@ -138,6 +144,6 @@ def check_qemu_used_mem(qemu_pid, mem): copyfile(backup_file, ksm_config_file) os.remove(backup_file) if status_ksm_service != 0: - process.run(params['cmd_stop_ksmtuned']) + process.run(params["cmd_stop_ksmtuned"]) else: - process.run(params['cmd_restart_ksmtuned']) + process.run(params["cmd_restart_ksmtuned"]) diff --git a/qemu/tests/ksm_overcommit.py b/qemu/tests/ksm_overcommit.py index 1696ad60b3..187a852123 100644 --- a/qemu/tests/ksm_overcommit.py +++ b/qemu/tests/ksm_overcommit.py @@ -1,13 +1,11 @@ -import time -import random import math import os +import random +import time import aexpect - from avocado.utils import process - -from virttest import utils_misc, utils_test, env_process, data_dir +from virttest import data_dir, env_process, utils_misc, utils_test from virttest.staging import utils_memory @@ -61,6 +59,7 @@ def run(test, params, env): :param cfg: ksm_perf_ratio - performance ratio, increase it when your machine is too slow """ + def _start_allocator(vm, session, timeout): """ Execute ksm_overcommit_guest.py on guest, wait until it's initialized. @@ -71,13 +70,13 @@ def _start_allocator(vm, session, timeout): ksm_overcommit_guest.py started properly. """ test.log.debug("Starting ksm_overcommit_guest.py on guest %s", vm.name) - session.sendline("$(command -v python python3 | head -1) " - "/tmp/ksm_overcommit_guest.py") + session.sendline( + "$(command -v python python3 | head -1) " "/tmp/ksm_overcommit_guest.py" + ) try: session.read_until_last_line_matches(["PASS:", "FAIL:"], timeout) except aexpect.ExpectProcessTerminatedError as details: - e_msg = ("Command ksm_overcommit_guest.py on vm '%s' failed: %s" % - (vm.name, str(details))) + e_msg = f"Command ksm_overcommit_guest.py on vm '{vm.name}' failed: {str(details)}" test.fail(e_msg) def _execute_allocator(command, vm, session, timeout): @@ -92,17 +91,22 @@ def _execute_allocator(command, vm, session, timeout): :return: Tuple (match index, data) """ - test.log.debug("Executing '%s' on ksm_overcommit_guest.py loop, " - "vm: %s, timeout: %s", command, vm.name, timeout) + test.log.debug( + "Executing '%s' on ksm_overcommit_guest.py loop, " "vm: %s, timeout: %s", + command, + vm.name, + timeout, + ) session.sendline(command) try: (match, data) = session.read_until_last_line_matches( - ["PASS:", "FAIL:"], - timeout) + ["PASS:", "FAIL:"], timeout + ) except aexpect.ExpectProcessTerminatedError as details: - e_msg = ("Failed to execute command '%s' on " - "ksm_overcommit_guest.py, vm '%s': %s" % - (command, vm.name, str(details))) + e_msg = ( + f"Failed to execute command '{command}' on " + f"ksm_overcommit_guest.py, vm '{vm.name}': {str(details)}" + ) test.fail(e_msg) return (match, data) @@ -112,11 +116,11 @@ def get_ksmstat(): :return: memory in MB """ - fpages = open('/sys/kernel/mm/ksm/pages_sharing') + fpages = open("/sys/kernel/mm/ksm/pages_sharing") ksm_pages = int(fpages.read()) fpages.close() sharing_mem = ksm_pages * pagesize - return int(float(utils_misc.normalize_data_size("%sK" % sharing_mem))) + return int(float(utils_misc.normalize_data_size(f"{sharing_mem}K"))) def initialize_guests(): """ @@ -140,30 +144,37 @@ def initialize_guests(): _execute_allocator(cmd, vm, lsessions[i], 60 * perf_ratio) cmd = "mem.value_fill(%d)" % skeys[0] - _execute_allocator(cmd, vm, lsessions[i], - fill_base_timeout * 2 * perf_ratio) + _execute_allocator( + cmd, vm, lsessions[i], fill_base_timeout * 2 * perf_ratio + ) # Let ksm_overcommit_guest.py do its job # (until shared mem reaches expected value) shm = 0 j = 0 - test.log.debug("Target shared meminfo for guest %s: %s", vm.name, - ksm_size) - while ((new_ksm and (shm < (ksm_size * (i + 1)))) or - (not new_ksm and (shm < (ksm_size)))): + test.log.debug("Target shared meminfo for guest %s: %s", vm.name, ksm_size) + while (new_ksm and (shm < (ksm_size * (i + 1)))) or ( + not new_ksm and (shm < (ksm_size)) + ): if j > 256: test.log.debug(utils_test.get_memory_info(lvms)) - test.error("SHM didn't merge the memory until " - "the DL on guest: %s" % vm.name) + test.error( + "SHM didn't merge the memory until " + f"the DL on guest: {vm.name}" + ) pause = ksm_size / 200 * perf_ratio test.log.debug("Waiting %ds before proceeding...", pause) time.sleep(pause) - if (new_ksm): + if new_ksm: shm = get_ksmstat() else: shm = vm.get_shared_meminfo() - test.log.debug("Shared meminfo for guest %s after " - "iteration %s: %s", vm.name, j, shm) + test.log.debug( + "Shared meminfo for guest %s after " "iteration %s: %s", + vm.name, + j, + shm, + ) j += 1 # Keep some reserve @@ -181,14 +192,19 @@ def separate_first_guest(): test.log.info("Phase 2: Split the pages on the first guest") cmd = "mem.static_random_fill()" - data = _execute_allocator(cmd, lvms[0], lsessions[0], - fill_base_timeout * 2 * perf_ratio)[1] + data = _execute_allocator( + cmd, lvms[0], lsessions[0], fill_base_timeout * 2 * perf_ratio + )[1] r_msg = data.splitlines()[-1] test.log.debug("Return message of static_random_fill: %s", r_msg) out = int(r_msg.split()[4]) - test.log.debug("Performance: %dMB * 1000 / %dms = %dMB/s", ksm_size, - out, (ksm_size * 1000 / out)) + test.log.debug( + "Performance: %dMB * 1000 / %dms = %dMB/s", + ksm_size, + out, + (ksm_size * 1000 / out), + ) test.log.debug(utils_test.get_memory_info(lvms)) test.log.debug("Phase 2: PASS") @@ -196,8 +212,9 @@ def split_guest(): """ Sequential split of pages on guests up to memory limit """ - test.log.info("Phase 3a: Sequential split of pages on guests up to " - "memory limit") + test.log.info( + "Phase 3a: Sequential split of pages on guests up to " "memory limit" + ) last_vm = 0 session = None vm = None @@ -205,48 +222,55 @@ def split_guest(): # Check VMs for j in range(0, vmsc): if not lvms[j].is_alive: - e_msg = ("VM %d died while executing static_random_fill on" - " VM %d in allocator loop" % (j, i)) + e_msg = ( + "VM %d died while executing static_random_fill on" + " VM %d in allocator loop" % (j, i) + ) test.fail(e_msg) vm = lvms[i] session = lsessions[i] cmd = "mem.static_random_fill()" - test.log.debug("Executing %s on ksm_overcommit_guest.py loop, " - "vm: %s", cmd, vm.name) + test.log.debug( + "Executing %s on ksm_overcommit_guest.py loop, " "vm: %s", cmd, vm.name + ) session.sendline(cmd) out = "" try: - test.log.debug("Watching host mem while filling vm %s memory", - vm.name) - while (not out.startswith("PASS") and - not out.startswith("FAIL")): + test.log.debug("Watching host mem while filling vm %s memory", vm.name) + while not out.startswith("PASS") and not out.startswith("FAIL"): if not vm.is_alive(): - e_msg = ("VM %d died while executing " - "static_random_fill on allocator loop" % i) + e_msg = ( + "VM %d died while executing " + "static_random_fill on allocator loop" % i + ) test.fail(e_msg) free_mem = int(utils_memory.read_from_meminfo("MemFree")) - if (ksm_swap): - free_mem = (free_mem + - int(utils_memory.read_from_meminfo("SwapFree"))) + if ksm_swap: + free_mem = free_mem + int( + utils_memory.read_from_meminfo("SwapFree") + ) test.log.debug("Free memory on host: %d", free_mem) # We need to keep some memory for python to run. - if (free_mem < 64000) or (ksm_swap and - free_mem < (450000 * perf_ratio)): + if (free_mem < 64000) or ( + ksm_swap and free_mem < (450000 * perf_ratio) + ): vm.pause() for j in range(0, i): lvms[j].destroy(gracefully=False) time.sleep(20) vm.resume() - test.log.debug("Only %s free memory, killing %d guests", - free_mem, (i - 1)) + test.log.debug( + "Only %s free memory, killing %d guests", free_mem, (i - 1) + ) last_vm = i out = session.read_nonblocking(0.1, 1) time.sleep(2) except OSError: - test.log.debug("Only %s host free memory, killing %d guests", - free_mem, (i - 1)) + test.log.debug( + "Only %s host free memory, killing %d guests", free_mem, (i - 1) + ) test.log.debug("Stopping %s", vm.name) vm.pause() for j in range(0, i): @@ -272,8 +296,9 @@ def split_guest(): # Verify last machine with randomly generated memory cmd = "mem.static_random_verify()" - _execute_allocator(cmd, lvms[last_vm], lsessions[last_vm], - (mem / 200 * 50 * perf_ratio)) + _execute_allocator( + cmd, lvms[last_vm], lsessions[last_vm], (mem / 200 * 50 * perf_ratio) + ) test.log.debug(utils_test.get_memory_info([lvms[last_vm]])) lsessions[last_vm].cmd_output("die()", 20) @@ -302,30 +327,33 @@ def split_parallel(): test.log.info("Phase 1: PASS") test.log.info("Phase 2a: Simultaneous merging") - test.log.debug("Memory used by allocator on guests = %dMB", - (ksm_size / max_alloc)) + test.log.debug( + "Memory used by allocator on guests = %dMB", (ksm_size / max_alloc) + ) for i in range(0, max_alloc): - cmd = "mem = MemFill(%d, %s, %s)" % ((ksm_size / max_alloc), - skeys[i], dkeys[i]) + cmd = "mem = MemFill(%d, %s, %s)" % ( + (ksm_size / max_alloc), + skeys[i], + dkeys[i], + ) _execute_allocator(cmd, vm, lsessions[i], 60 * perf_ratio) cmd = "mem.value_fill(%d)" % (skeys[0]) - _execute_allocator(cmd, vm, lsessions[i], - fill_base_timeout * perf_ratio) + _execute_allocator(cmd, vm, lsessions[i], fill_base_timeout * perf_ratio) # Wait until ksm_overcommit_guest.py merges pages (3 * ksm_size / 3) shm = 0 i = 0 test.log.debug("Target shared memory size: %s", ksm_size) - while (shm < ksm_size): + while shm < ksm_size: if i > 64: test.log.debug(utils_test.get_memory_info(lvms)) test.error("SHM didn't merge the memory until DL") pause = ksm_size / 200 * perf_ratio test.log.debug("Waiting %ds before proceed...", pause) time.sleep(pause) - if (new_ksm): + if new_ksm: shm = get_ksmstat() else: shm = vm.get_shared_meminfo() @@ -339,52 +367,63 @@ def split_parallel(): # Actual splitting for i in range(0, max_alloc): cmd = "mem.static_random_fill()" - data = _execute_allocator(cmd, vm, lsessions[i], - fill_base_timeout * perf_ratio)[1] + data = _execute_allocator( + cmd, vm, lsessions[i], fill_base_timeout * perf_ratio + )[1] data = data.splitlines()[-1] test.log.debug(data) out = int(data.split()[4]) - test.log.debug("Performance: %dMB * 1000 / %dms = %dMB/s", - (ksm_size / max_alloc), out, - (ksm_size * 1000 / out / max_alloc)) + test.log.debug( + "Performance: %dMB * 1000 / %dms = %dMB/s", + (ksm_size / max_alloc), + out, + (ksm_size * 1000 / out / max_alloc), + ) test.log.debug(utils_test.get_memory_info([vm])) test.log.info("Phase 2b: PASS") test.log.info("Phase 2c: Simultaneous verification") for i in range(0, max_alloc): cmd = "mem.static_random_verify()" - data = _execute_allocator(cmd, vm, lsessions[i], - (mem / 200 * 50 * perf_ratio))[1] + data = _execute_allocator( + cmd, vm, lsessions[i], (mem / 200 * 50 * perf_ratio) + )[1] test.log.info("Phase 2c: PASS") test.log.info("Phase 2d: Simultaneous merging") # Actual splitting for i in range(0, max_alloc): cmd = "mem.value_fill(%d)" % skeys[0] - data = _execute_allocator(cmd, vm, lsessions[i], - fill_base_timeout * 2 * perf_ratio)[1] + data = _execute_allocator( + cmd, vm, lsessions[i], fill_base_timeout * 2 * perf_ratio + )[1] test.log.debug(utils_test.get_memory_info([vm])) test.log.info("Phase 2d: PASS") test.log.info("Phase 2e: Simultaneous verification") for i in range(0, max_alloc): cmd = "mem.value_check(%d)" % skeys[0] - data = _execute_allocator(cmd, vm, lsessions[i], - (mem / 200 * 50 * perf_ratio))[1] + data = _execute_allocator( + cmd, vm, lsessions[i], (mem / 200 * 50 * perf_ratio) + )[1] test.log.info("Phase 2e: PASS") test.log.info("Phase 2f: Simultaneous spliting last 96B") for i in range(0, max_alloc): cmd = "mem.static_random_fill(96)" - data = _execute_allocator(cmd, vm, lsessions[i], - fill_base_timeout * perf_ratio)[1] + data = _execute_allocator( + cmd, vm, lsessions[i], fill_base_timeout * perf_ratio + )[1] data = data.splitlines()[-1] out = int(data.split()[4]) - test.log.debug("Performance: %dMB * 1000 / %dms = %dMB/s", - ksm_size / max_alloc, out, - (ksm_size * 1000 / out / max_alloc)) + test.log.debug( + "Performance: %dMB * 1000 / %dms = %dMB/s", + ksm_size / max_alloc, + out, + (ksm_size * 1000 / out / max_alloc), + ) test.log.debug(utils_test.get_memory_info([vm])) test.log.info("Phase 2f: PASS") @@ -392,8 +431,9 @@ def split_parallel(): test.log.info("Phase 2g: Simultaneous verification last 96B") for i in range(0, max_alloc): cmd = "mem.static_random_verify(96)" - _, data = _execute_allocator(cmd, vm, lsessions[i], - (mem / 200 * 50 * perf_ratio)) + _, data = _execute_allocator( + cmd, vm, lsessions[i], (mem / 200 * 50 * perf_ratio) + ) test.log.debug(utils_test.get_memory_info([vm])) test.log.info("Phase 2g: PASS") @@ -409,7 +449,7 @@ def split_parallel(): test.log.info("Killing ksmtuned...") process.run("killall ksmtuned") new_ksm = False - if (os.path.exists("/sys/kernel/mm/ksm/run")): + if os.path.exists("/sys/kernel/mm/ksm/run"): process.run("echo 50 > /sys/kernel/mm/ksm/sleep_millisecs", shell=True) process.run("echo 5000 > /sys/kernel/mm/ksm/pages_to_scan", shell=True) process.run("echo 1 > /sys/kernel/mm/ksm/run", shell=True) @@ -417,20 +457,20 @@ def split_parallel(): e_up = "/sys/kernel/mm/transparent_hugepage/enabled" e_rh = "/sys/kernel/mm/redhat_transparent_hugepage/enabled" if os.path.exists(e_up): - process.run("echo 'never' > %s" % e_up, shell=True) + process.run(f"echo 'never' > {e_up}", shell=True) if os.path.exists(e_rh): - process.run("echo 'never' > %s" % e_rh, shell=True) + process.run(f"echo 'never' > {e_rh}", shell=True) new_ksm = True else: try: process.run("modprobe ksm") process.run("ksmctl start 5000 100") except process.CmdError as details: - test.fail("Failed to load KSM: %s" % details) + test.fail(f"Failed to load KSM: {details}") # host_reserve: mem reserve kept for the host system to run host_reserve = int(params.get("ksm_host_reserve", -1)) - if (host_reserve == -1): + if host_reserve == -1: try: available = utils_memory.read_from_meminfo("MemAvailable") except process.CmdError: # ancient kernels @@ -438,7 +478,7 @@ def split_parallel(): available = utils_memory.read_from_meminfo("MemFree") # default host_reserve = UsedMem + one_minimal_guest(128MB) # later we add 64MB per additional guest - host_reserve = ((utils_memory.memtotal() - available) / 1024 + 128) + host_reserve = (utils_memory.memtotal() - available) / 1024 + 128 # using default reserve _host_reserve = True else: @@ -446,7 +486,7 @@ def split_parallel(): # guest_reserve: mem reserve kept to avoid guest OS to kill processes guest_reserve = int(params.get("ksm_guest_reserve", -1)) - if (guest_reserve == -1): + if guest_reserve == -1: # In case of OOM, set guest_reserve to 1536M guest_reserve = 1536 # using default reserve @@ -462,14 +502,14 @@ def split_parallel(): vmsc = int(overcommit) + 1 vmsc = max(vmsc, max_vms) - if (params['ksm_mode'] == "serial"): + if params["ksm_mode"] == "serial": max_alloc = vmsc if _host_reserve: # First round of additional guest reserves host_reserve += vmsc * 64 _host_reserve = vmsc - host_mem = (int(utils_memory.memtotal()) / 1024 - host_reserve) + host_mem = int(utils_memory.memtotal()) / 1024 - host_reserve pagesize = utils_memory.getpagesize() ksm_swap = False @@ -483,17 +523,18 @@ def split_parallel(): else: perf_ratio = 1 - if (params['ksm_mode'] == "parallel"): + if params["ksm_mode"] == "parallel": vmsc = 1 overcommit = 1 mem = host_mem # 32bit system adjustment if "64" not in params.get("vm_arch_name"): - test.log.debug("Probably i386 guest architecture, " - "max allocator mem = 2G") + test.log.debug( + "Probably i386 guest architecture, " "max allocator mem = 2G" + ) # Guest can have more than 2G but # kvm mem + 1MB (allocator itself) can't - if (host_mem > 3100): + if host_mem > 3100: mem = 3100 if os.popen("uname -i").readline().startswith("i386"): @@ -512,14 +553,14 @@ def split_parallel(): mem = 8192 # 32bit system adjustment - if params["vm_arch_name"] == 'i686': - test.log.debug("Probably i386 guest architecture, " - "max allocator mem = 2G") + if params["vm_arch_name"] == "i686": + test.log.debug( + "Probably i386 guest architecture, " "max allocator mem = 2G" + ) # Guest can have more than 2G but # kvm mem + 1MB (allocator itself) can't if mem - guest_reserve - 1 > 3100: - vmsc = int(math.ceil((host_mem * overcommit) / - (3100 + guest_reserve))) + vmsc = int(math.ceil((host_mem * overcommit) / (3100 + guest_reserve))) if _host_reserve: host_reserve += (vmsc - _host_reserve) * 64 host_mem -= (vmsc - _host_reserve) * 64 @@ -530,8 +571,7 @@ def split_parallel(): test.log.debug("Host is i386 architecture, max guest mem is 2G") # Guest system with qemu overhead (64M) can't have more than 2G if mem > 3100 - 64: - vmsc = int(math.ceil((host_mem * overcommit) / - (3100 - 64.0))) + vmsc = int(math.ceil((host_mem * overcommit) / (3100 - 64.0))) if _host_reserve: host_reserve += (vmsc - _host_reserve) * 64 host_mem -= (vmsc - _host_reserve) * 64 @@ -545,8 +585,7 @@ def split_parallel(): swap = int(utils_memory.read_from_meminfo("SwapTotal")) / 1024 test.log.debug("Overcommit = %f", overcommit) - test.log.debug("True overcommit = %f ", (float(vmsc * mem) / - float(host_mem))) + test.log.debug("True overcommit = %f ", (float(vmsc * mem) / float(host_mem))) test.log.debug("Host memory = %dM", host_mem) test.log.debug("Guest memory = %dM", mem) test.log.debug("Using swap = %s", ksm_swap) @@ -579,11 +618,11 @@ def split_parallel(): # we need to specify and create them here vm_name = params["main_vm"] vm_arch_name = params["vm_arch_name"] - if (vm_arch_name in ("ppc64", "ppc64le")): + if vm_arch_name in ("ppc64", "ppc64le"): if divmod(mem, 256)[1] > 0: mem = 256 * (divmod(mem, 256)[0] + 1) - params['mem'] = mem - params['vms'] = vm_name + params["mem"] = mem + params["vms"] = vm_name # ksm_size: amount of memory used by allocator ksm_size = mem - guest_reserve @@ -609,13 +648,14 @@ def split_parallel(): # Last VM is later used to run more allocators simultaneously lvms.append(lvms[0].clone(vm_name, params)) env.register_vm(vm_name, lvms[i]) - params['vms'] += " " + vm_name + params["vms"] += " " + vm_name test.log.debug("Booting guest %s", lvms[i].name) lvms[i].create() if not lvms[i].is_alive(): - test.error("VM %s seems to be dead; Test requires a" - "living VM" % lvms[i].name) + test.error( + f"VM {lvms[i].name} seems to be dead; Test requires a" "living VM" + ) lsessions.append(lvms[i].wait_for_login(timeout=360)) @@ -626,18 +666,19 @@ def split_parallel(): test.log.debug(utils_test.get_memory_info(lvms)) # Copy ksm_overcommit_guest.py into guests - vksmd_src = os.path.join(data_dir.get_shared_dir(), - "scripts", "ksm_overcommit_guest.py") + vksmd_src = os.path.join( + data_dir.get_shared_dir(), "scripts", "ksm_overcommit_guest.py" + ) dst_dir = "/tmp" for vm in lvms: vm.copy_files_to(vksmd_src, dst_dir) test.log.info("Phase 0: PASS") - if params['ksm_mode'] == "parallel": + if params["ksm_mode"] == "parallel": test.log.info("Starting KSM test parallel mode") split_parallel() test.log.info("KSM test parallel mode: PASS") - elif params['ksm_mode'] == "serial": + elif params["ksm_mode"] == "serial": test.log.info("Starting KSM test serial mode") initialize_guests() separate_first_guest() diff --git a/qemu/tests/kvm_stat.py b/qemu/tests/kvm_stat.py index f6b457b218..53106d0183 100644 --- a/qemu/tests/kvm_stat.py +++ b/qemu/tests/kvm_stat.py @@ -1,10 +1,8 @@ -import re import os +import re from avocado.utils import process -from virttest import env_process -from virttest import utils_package -from virttest import utils_misc +from virttest import env_process, utils_misc, utils_package from virttest.utils_numeric import normalize_data_size @@ -30,25 +28,39 @@ def tweak_file(origin, target, filename): f.write(content) def separate_file(file_path, rotate_size_limit): - rotate_size_limit = int(normalize_data_size(rotate_size_limit, order_magnitude="B", factor=1024)) + rotate_size_limit = int( + normalize_data_size(rotate_size_limit, order_magnitude="B", factor=1024) + ) num = rotate_time + 1 while num > 0: - if utils_misc.wait_for(lambda: os.path.getsize(file_path) > rotate_size_limit, 60, 5, 5): - process.system('logrotate -v /etc/logrotate.d/kvm_stat') + if utils_misc.wait_for( + lambda: os.path.getsize(file_path) > rotate_size_limit, 60, 5, 5 + ): + process.system("logrotate -v /etc/logrotate.d/kvm_stat") num -= 1 def check_log(): test.log.info("check if log file num match with rotate") - check_log_num = int(process.system_output(params.get("check_log_num"), shell=True)) + check_log_num = int( + process.system_output(params.get("check_log_num"), shell=True) + ) if check_log_num == rotate_time: - test.log.info("Get the expected log file num %s" % check_log_num) + test.log.info("Get the expected log file num %s", check_log_num) else: - test.fail("Except %s log file, but get %s" % (rotate_time, check_log_num)) + test.fail(f"Except {rotate_time} log file, but get {check_log_num}") def restore_env(): test.log.info("Restore the host environment") - tweak_file(r"-s\s([\.\d]+)\s", "-s %s " % initial_kvm_stat_interval, kvm_stat_service_path) - tweak_file(r"size \s(.*)\s", "size %s" % initial_rotate_size, logrotate_config_file_path) + tweak_file( + r"-s\s([\.\d]+)\s", + f"-s {initial_kvm_stat_interval} ", + kvm_stat_service_path, + ) + tweak_file( + r"size \s(.*)\s", + f"size {initial_rotate_size}", + logrotate_config_file_path, + ) depends_pkgs = params.objects("depends_pkgs") test.log.info("Install packages: %s in host", depends_pkgs) @@ -56,12 +68,14 @@ def restore_env(): test.cancel("Install %s packages failed", depends_pkgs) kvm_stat_service_path = params.get("kvm_stat_service_path") - with open(kvm_stat_service_path, 'r') as fd: + with open(kvm_stat_service_path, "r") as fd: content = fd.read() - initial_kvm_stat_interval = re.search(r"ExecStart=.*-s\s([\.\d]+)\s", content).group(1) + initial_kvm_stat_interval = re.search( + r"ExecStart=.*-s\s([\.\d]+)\s", content + ).group(1) logrotate_config_file_path = params.get("logrotate_config_file_path") - with open(logrotate_config_file_path, 'r') as fd: + with open(logrotate_config_file_path, "r") as fd: content = fd.read() initial_rotate_size = re.search(r"size\s(.*)\s", content).group(1) rotate_time = int(re.search(r"rotate\s+(\d+)", content)[1]) @@ -70,8 +84,16 @@ def restore_env(): kvm_stat_interval = params.get("kvm_stat_interval") try: - test.log.info("Adjust the parameter '-s' is %s in %s", kvm_stat_interval, kvm_stat_service_path) - tweak_file(r"-s %s" % initial_kvm_stat_interval, "-s %s" % kvm_stat_interval, kvm_stat_service_path) + test.log.info( + "Adjust the parameter '-s' is %s in %s", + kvm_stat_interval, + kvm_stat_service_path, + ) + tweak_file( + rf"-s {initial_kvm_stat_interval}", + f"-s {kvm_stat_interval}", + kvm_stat_service_path, + ) test.log.info("Start kvm_stat.service") kvm_stat_start_cmd = params.get("kvm_stat_start_cmd") @@ -81,12 +103,16 @@ def restore_env(): else: test.log.info("Successfully to start the kvm_stat.service") - params["start_vm"] = 'yes' + params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, params.get("main_vm")) - vm = env.get_vm(params["main_vm"]) + env.get_vm(params["main_vm"]) test.log.info("Start logrotate command") - tweak_file(r"size %s" % initial_rotate_size, "size %s" % rotate_size_limit, logrotate_config_file_path) + tweak_file( + rf"size {initial_rotate_size}", + f"size {rotate_size_limit}", + logrotate_config_file_path, + ) log_file = params.get("log_file") separate_file(log_file, rotate_size_limit) check_log() diff --git a/qemu/tests/kvm_unit_test.py b/qemu/tests/kvm_unit_test.py index 809523bfe0..dee9a17456 100644 --- a/qemu/tests/kvm_unit_test.py +++ b/qemu/tests/kvm_unit_test.py @@ -1,13 +1,13 @@ +import glob import os import shutil -import glob + try: from configparser import ConfigParser except ImportError: from ConfigParser import ConfigParser -from virttest import utils_misc -from virttest import env_process +from virttest import env_process, utils_misc def run(test, params, env): @@ -22,111 +22,120 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ - unittest_dir = os.path.join(test.builddir, 'unittests') + unittest_dir = os.path.join(test.builddir, "unittests") if not os.path.isdir(unittest_dir): - test.cancel("No unittest dir %s available (did you run the " - "build test first?)" % unittest_dir) + test.cancel( + f"No unittest dir {unittest_dir} available (did you run the " + "build test first?)" + ) os.chdir(unittest_dir) - unittest_list = glob.glob('*.flat') + unittest_list = glob.glob("*.flat") if not unittest_list: - test.cancel("No unittest files available (did you run the " - "build test first?)") - test.log.debug('Flat file list: %s', unittest_list) + test.cancel( + "No unittest files available (did you run the " "build test first?)" + ) + test.log.debug("Flat file list: %s", unittest_list) - unittest_cfg = os.path.join(unittest_dir, 'unittests.cfg') + unittest_cfg = os.path.join(unittest_dir, "unittests.cfg") parser = ConfigParser() parser.read(unittest_cfg) test_list = parser.sections() if not test_list: - test.error("No tests listed on config file %s" % unittest_cfg) - test.log.debug('Unit test list: %s', test_list) + test.error(f"No tests listed on config file {unittest_cfg}") + test.log.debug("Unit test list: %s", test_list) - if params.get('unittest_test_list'): - test_list = params.get('unittest_test_list').split() - test.log.info('Original test list overriden by user') - test.log.info('User defined unit test list: %s', test_list) + if params.get("unittest_test_list"): + test_list = params.get("unittest_test_list").split() + test.log.info("Original test list overriden by user") + test.log.info("User defined unit test list: %s", test_list) - black_list = params.get('unittest_test_blacklist', '').split() + black_list = params.get("unittest_test_blacklist", "").split() if black_list: for b in black_list: if b in test_list: test_list.remove(b) - test.log.info('Tests blacklisted by user: %s', black_list) - test.log.info('Test list after blacklist: %s', test_list) + test.log.info("Tests blacklisted by user: %s", black_list) + test.log.info("Test list after blacklist: %s", test_list) nfail = 0 tests_failed = [] - timeout = int(params.get('unittest_timeout', 600)) + timeout = int(params.get("unittest_timeout", 600)) - extra_params_original = params.get('extra_params') + extra_params_original = params.get("extra_params") for t in test_list: - test.log.info('Running %s', t) + test.log.info("Running %s", t) flat_file = None - if parser.has_option(t, 'file'): - flat_file = parser.get(t, 'file') + if parser.has_option(t, "file"): + flat_file = parser.get(t, "file") if flat_file is None: nfail += 1 tests_failed.append(t) - test.log.error('Unittest config file %s has section %s but no ' - 'mandatory option file', unittest_cfg, t) + test.log.error( + "Unittest config file %s has section %s but no " + "mandatory option file", + unittest_cfg, + t, + ) continue if flat_file not in unittest_list: nfail += 1 tests_failed.append(t) - test.log.error('Unittest file %s referenced in config file %s but ' - 'was not found under the unittest dir', flat_file, - unittest_cfg) + test.log.error( + "Unittest file %s referenced in config file %s but " + "was not found under the unittest dir", + flat_file, + unittest_cfg, + ) continue smp = None - if parser.has_option(t, 'smp'): - smp = int(parser.get(t, 'smp')) - params['smp'] = smp + if parser.has_option(t, "smp"): + smp = int(parser.get(t, "smp")) + params["smp"] = smp extra_params = None - if parser.has_option(t, 'extra_params'): - extra_params = parser.get(t, 'extra_params') - if not params.get('extra_params'): - params['extra_params'] = "" - params['extra_params'] += ' %s' % extra_params + if parser.has_option(t, "extra_params"): + extra_params = parser.get(t, "extra_params") + if not params.get("extra_params"): + params["extra_params"] = "" + params["extra_params"] += f" {extra_params}" vm_name = params["main_vm"] - params['kernel'] = os.path.join(unittest_dir, flat_file) + params["kernel"] = os.path.join(unittest_dir, flat_file) - testlog_path = os.path.join(test.debugdir, "%s.log" % t) + testlog_path = os.path.join(test.debugdir, f"{t}.log") testlog = None try: try: - vm_name = params['main_vm'] + vm_name = params["main_vm"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.create() vm.resume() testlog = vm.get_testlog_filename() - msg = ("Waiting for unittest '%s' to complete, timeout %s" % - (t, timeout)) + msg = f"Waiting for unittest '{t}' to complete, timeout {timeout}" if os.path.isfile(testlog): - msg += (", output in %s" % testlog) + msg += f", output in {testlog}" else: testlog = None test.log.info(msg) if not utils_misc.wait_for(vm.is_dead, timeout): - test.fail("Timeout elapsed (%ss)" % timeout) + test.fail(f"Timeout elapsed ({timeout}s)") # Check qemu's exit status status = vm.process.get_status() # Check whether there's an isa_debugexit device in the vm - isa_debugexit = 'isa-debug-exit' in vm.qemu_command + isa_debugexit = "isa-debug-exit" in vm.qemu_command if isa_debugexit: good_status = 1 @@ -141,18 +150,20 @@ def run(test, params, env): except Exception as e: nfail += 1 tests_failed.append(t) - test.log.error('Exception happened during %s: %s', t, str(e)) + test.log.error("Exception happened during %s: %s", t, str(e)) finally: try: if testlog is not None: shutil.copy(vm.get_testlog_filename(), testlog_path) - test.log.info("Unit test log collected and available " - "under %s", testlog_path) - except (NameError, IOError): + test.log.info( + "Unit test log collected and available " "under %s", + testlog_path, + ) + except (OSError, NameError): test.log.error("Not possible to collect logs") # Restore the extra params so other tests can run normally - params['extra_params'] = extra_params_original + params["extra_params"] = extra_params_original if nfail != 0: - test.fail("Unit tests failed: %s" % " ".join(tests_failed)) + test.fail("Unit tests failed: {}".format(" ".join(tests_failed))) diff --git a/qemu/tests/kvm_unit_test_nested.py b/qemu/tests/kvm_unit_test_nested.py index 32afb21be6..47c593d606 100644 --- a/qemu/tests/kvm_unit_test_nested.py +++ b/qemu/tests/kvm_unit_test_nested.py @@ -33,5 +33,4 @@ def run(test, params, env): status, output = process.getstatusoutput(run_cmd, timeout) if output: - test.fail("kvm_unit_tests failed, status: %s, output: %s" % - (status, output)) + test.fail(f"kvm_unit_tests failed, status: {status}, output: {output}") diff --git a/qemu/tests/larger_buffer_with_none_cache_mode.py b/qemu/tests/larger_buffer_with_none_cache_mode.py index 7b4581e19a..1acd160c9b 100755 --- a/qemu/tests/larger_buffer_with_none_cache_mode.py +++ b/qemu/tests/larger_buffer_with_none_cache_mode.py @@ -1,13 +1,12 @@ import os -from provider.qemu_img_utils import strace - from avocado import fail_on from avocado.utils import process - from virttest import data_dir from virttest.qemu_storage import QemuImg +from provider.qemu_img_utils import strace + def run(test, params, env): """ @@ -26,24 +25,34 @@ def run(test, params, env): source = QemuImg(params.object_params(src_image), root_dir, src_image) strace_event = params["strace_event"] strace_events = strace_event.split() - strace_output_file = os.path.join(test.debugdir, - "convert_with_none.log") + strace_output_file = os.path.join(test.debugdir, "convert_with_none.log") src_filename = source.image_filename - process.run("dd if=/dev/urandom of=%s bs=1M count=100" % src_filename) - test.log.debug("Convert from %s to %s with cache mode none, strace log: " - "%s.", src_filename, tgt_image, strace_output_file) + process.run(f"dd if=/dev/urandom of={src_filename} bs=1M count=100") + test.log.debug( + "Convert from %s to %s with cache mode none, strace log: " "%s.", + src_filename, + tgt_image, + strace_output_file, + ) with strace(source, strace_events, strace_output_file, trace_child=True): fail_on((process.CmdError,))(source.convert)( - params.object_params(src_image), root_dir, cache_mode="none") + params.object_params(src_image), root_dir, cache_mode="none" + ) - test.log.debug("Check whether the max size of %s syscall is 2M in %s.", - strace_event, strace_output_file) + test.log.debug( + "Check whether the max size of %s syscall is 2M in %s.", + strace_event, + strace_output_file, + ) with open(strace_output_file) as fd: for line in fd.readlines(): if int(line.split()[-1]) == 2097152: break else: - test.fail("The max size of '%s' is not 2M, check '%s' please.", - strace_event, strace_output_file) + test.fail( + "The max size of '%s' is not 2M, check '%s' please.", + strace_event, + strace_output_file, + ) params["images"] += " " + tgt_image diff --git a/qemu/tests/libvirt_host_model_test.py b/qemu/tests/libvirt_host_model_test.py index 8fa4b97dae..3c577eeccf 100644 --- a/qemu/tests/libvirt_host_model_test.py +++ b/qemu/tests/libvirt_host_model_test.py @@ -2,12 +2,7 @@ import uuid from avocado.utils import path -from virttest import cpu -from virttest import virsh -from virttest import data_dir -from virttest import libvirt_xml -from virttest import env_process -from virttest import error_context +from virttest import cpu, data_dir, env_process, error_context, libvirt_xml, virsh def extend_flags_patterns(flags_dict): @@ -16,7 +11,7 @@ def extend_flags_patterns(flags_dict): :param flags_dict: The original dict of flags """ tmp_dict = {} - replace_char = [('_', ''), ('_', '-'), ('-', '_'), ('-', '')] + replace_char = [("_", ""), ("_", "-"), ("-", "_"), ("-", "")] for flag in flags_dict.keys(): tmp_list = [] tmp_list.extend(set(map(lambda x: flag.replace(*x), replace_char))) @@ -36,8 +31,8 @@ def get_cpu_info_from_dumpxml(name): cpu_model = cpu_xml.model cpu_features = {} for i in range(0, len(feature_list)): - feature_name = cpu_xml.get_feature(i).get('name') - feature_policy = cpu_xml.get_feature(i).get('policy') + feature_name = cpu_xml.get_feature(i).get("name") + feature_policy = cpu_xml.get_feature(i).get("policy") if feature_policy == "require": feature_policy = "on" elif feature_policy == "disable": @@ -64,16 +59,16 @@ def compare_cpu_info(test, params): except path.CmdNotFoundError: test.cancel("Virsh executable not set or found on path") - xml = """ + xml = f""" - %s + {name} 1 - hvm + hvm - """ % (name, vm_arch, machine) + """ xml_file = os.path.join(data_dir.get_tmp_dir(), "temp_xml_for_cpu") with open(xml_file, "w") as f: f.write(xml) @@ -92,14 +87,17 @@ def compare_cpu_info(test, params): cpu_model_libvirt = libvirt_cpu_info["model"] qemu_proc_cpu_flags = qemu_cpu_info["flags"] if cpu_model_qemu != cpu_model_libvirt: - test.log.error("mismatch cpu model bwteen qemu %s and libvirt %s", - cpu_model_qemu, cpu_model_libvirt) + test.log.error( + "mismatch cpu model bwteen qemu %s and libvirt %s", + cpu_model_qemu, + cpu_model_libvirt, + ) return False params["cpu_model"] = cpu_model_qemu - qemu_cpu_flags = cpu.parse_qemu_cpu_flags(qemu_cpu_info['flags']) - libvirt_cpu_flags = libvirt_cpu_info['features'] + qemu_cpu_flags = cpu.parse_qemu_cpu_flags(qemu_cpu_info["flags"]) + libvirt_cpu_flags = libvirt_cpu_info["features"] qemu_cpu_flags = extend_flags_patterns(qemu_cpu_flags) - exclude_map = eval(params.get('exclude_map', '{}')) + exclude_map = eval(params.get("exclude_map", "{}")) check_exclude = False exclude_map_flags = [] if cpu_model_qemu in exclude_map.keys(): @@ -110,7 +108,7 @@ def compare_cpu_info(test, params): result_bool = True for flag in libvirt_cpu_flags.keys(): if flag not in qemu_cpu_flags.keys(): - if libvirt_cpu_flags[flag] == 'on': + if libvirt_cpu_flags[flag] == "on": miss_flags.append(flag) elif libvirt_cpu_flags[flag] != qemu_cpu_flags[flag]: mismatch_flags.append(flag) @@ -124,8 +122,9 @@ def compare_cpu_info(test, params): result_bool = False break if mismatch_flags: - test.log.error("\nmismatch flags %s between libvirt and qemu\n", - mismatch_flags) + test.log.error( + "\nmismatch flags %s between libvirt and qemu\n", mismatch_flags + ) if not check_exclude: result_bool = False else: @@ -162,9 +161,10 @@ def run(test, params, env): cpu_flags = params.get("cpu_model_flags") params["cpu_model_flags"] = cpu.recombine_qemu_cpu_flags( - qemu_proc_cpu_flags, cpu_flags) + qemu_proc_cpu_flags, cpu_flags + ) params["start_vm"] = "yes" - vm_name = params['main_vm'] + vm_name = params["main_vm"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) diff --git a/qemu/tests/live_backup.py b/qemu/tests/live_backup.py index bc99bdafd0..18bd7ffe66 100644 --- a/qemu/tests/live_backup.py +++ b/qemu/tests/live_backup.py @@ -1,4 +1,5 @@ from virttest import error_context + from qemu.tests import live_backup_base diff --git a/qemu/tests/live_backup_add_bitmap.py b/qemu/tests/live_backup_add_bitmap.py index 70acf2ae70..4d8789265c 100644 --- a/qemu/tests/live_backup_add_bitmap.py +++ b/qemu/tests/live_backup_add_bitmap.py @@ -6,6 +6,7 @@ """ from virttest import error_context + from provider import block_dirty_bitmap @@ -22,6 +23,7 @@ def run(test, params, env): :param params: test parameters dictionary :param env: test environment """ + def check_bitmap_existence_as_expected(bitmaps, existence_param): """Check bitmaps' existence.""" bitmap_dict = block_dirty_bitmap.get_bitmaps(vm.monitor.info("block")) @@ -30,11 +32,10 @@ def check_bitmap_existence_as_expected(bitmaps, existence_param): for bitmap_params in bitmaps: bitmap = bitmap_params.get("bitmap_name") existence = bitmap_params.get(existence_param, "yes") == "yes" - if not block_dirty_bitmap.check_bitmap_existence(bitmap_dict, - bitmap_params, - existence): - msg = "bitmap %s %s exists" % (bitmap, - "not" if existence else "") + if not block_dirty_bitmap.check_bitmap_existence( + bitmap_dict, bitmap_params, existence + ): + msg = "bitmap {} {} exists".format(bitmap, "not" if existence else "") msgs.append(msg) if msgs: test.fail("\n".join(msgs)) @@ -63,6 +64,5 @@ def check_bitmap_existence_as_expected(bitmaps, existence_param): # wait till boot finishes vm.wait_for_login(timeout=int(params.get("login_timeout", 360))).close() - error_context.context("check bitmap exsitence after shutdown", - test.log.info) + error_context.context("check bitmap exsitence after shutdown", test.log.info) check_bitmap_existence_as_expected(bitmaps, "existence_after_shutdown") diff --git a/qemu/tests/live_backup_base.py b/qemu/tests/live_backup_base.py index e9f5e0adbf..f74f31eed4 100644 --- a/qemu/tests/live_backup_base.py +++ b/qemu/tests/live_backup_base.py @@ -1,20 +1,17 @@ +import logging import re import time -import logging from avocado.utils import process - -from virttest import utils_misc -from virttest import qemu_storage +from virttest import qemu_storage, utils_misc from virttest.staging import utils_memory from qemu.tests import block_copy -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class LiveBackup(block_copy.BlockCopy): - """ Provide basic functions for live backup test cases. """ @@ -28,7 +25,7 @@ def __init__(self, test, params, env, tag): :param env: Dictionary with test environment. :param tag: Image tag defined in parameter images """ - super(LiveBackup, self).__init__(test, params, env, tag) + super().__init__(test, params, env, tag) self.image_chain = self.params.get("image_chain").split() self.image_cmd = utils_misc.get_qemu_img_binary(params) self.source_image = self.params.get("source_image") @@ -48,9 +45,9 @@ def generate_backup_params(self): backup_image = self.image_chain[0] for key, value in backup_params.items(): if key not in ["image_name", "image_format"]: - self.params["%s_%s" % (key, backup_image)] = value - self.params["image_name_%s" % backup_image] = "images/%s" % backup_image - self.params["image_format_%s" % backup_image] = self.backup_format + self.params[f"{key}_{backup_image}"] = value + self.params[f"image_name_{backup_image}"] = f"images/{backup_image}" + self.params[f"image_format_{backup_image}"] = self.backup_format def create_backup_image(self): """ @@ -60,8 +57,7 @@ def create_backup_image(self): backup_image = self.image_chain[self.backup_index] backup_params = self.params.object_params(backup_image) backup_params["base_format"] = self.params.get("format") - qemu_image = qemu_storage.QemuImg(backup_params, - self.data_dir, backup_image) + qemu_image = qemu_storage.QemuImg(backup_params, self.data_dir, backup_image) LOG_JOB.info("create backup image for %s", backup_image) backup_image_name, _ = qemu_image.create(backup_params) self.backup_index += 1 @@ -81,40 +77,47 @@ def create_backup(self, sync, backup_image_name=""): if sync == "full": mode = "absolute-paths" granularity = int(self.params.get("granularity", 65536)) - backup_image_name = "images/%s.%s" % (self.image_chain[0], - backup_format) + backup_image_name = f"images/{self.image_chain[0]}.{backup_format}" backup_image_name = utils_misc.get_path(self.data_dir, backup_image_name) self.trash_files.append(backup_image_name) if transaction == "yes": args_list = [] - bitmap_args = {"node": drive_name, - "name": bitmap_name, - "granularity": granularity} - self.transaction_add(args_list, "block-dirty-bitmap-add", - bitmap_args) - backup_args = {"device": drive_name, - "target": backup_image_name, - "format": backup_format, - "sync": sync, - "mode": mode, - "speed": speed} + bitmap_args = { + "node": drive_name, + "name": bitmap_name, + "granularity": granularity, + } + self.transaction_add(args_list, "block-dirty-bitmap-add", bitmap_args) + backup_args = { + "device": drive_name, + "target": backup_image_name, + "format": backup_format, + "sync": sync, + "mode": mode, + "speed": speed, + } self.transaction_add(args_list, "drive-backup", backup_args) - LOG_JOB.info("Create bitmap and drive-backup with transaction " - "for %s", drive_name) + LOG_JOB.info( + "Create bitmap and drive-backup with transaction " "for %s", + drive_name, + ) self.vm.monitor.transaction(args_list) if not self.get_status(): self.test.fail("full backup job not found") return None LOG_JOB.info("Create bitmap for %s", drive_name) - self.vm.monitor.operate_dirty_bitmap("add", drive_name, bitmap_name, granularity) + self.vm.monitor.operate_dirty_bitmap( + "add", drive_name, bitmap_name, granularity + ) if not backup_image_name: self.test.error("No backup target provided.") LOG_JOB.info("Create %s backup for %s", sync, drive_name) - self.vm.monitor.drive_backup(drive_name, backup_image_name, backup_format, - sync, speed, mode, bitmap_name) + self.vm.monitor.drive_backup( + drive_name, backup_image_name, backup_format, sync, speed, mode, bitmap_name + ) if not self.get_status(): - self.test.fail("%s backup job not found" % sync) + self.test.fail(f"{sync} backup job not found") utils_memory.drop_caches() def transaction_add(self, args_list, type, data): @@ -157,7 +160,8 @@ def get_image_size(self, image): qemu_image = qemu_storage.QemuImg(params, self.data_dir, image) if self.vm: pids = process.getoutput( - "lsof %s |grep -v PID|awk '{print $2}'" % qemu_image.image_filename) + f"lsof {qemu_image.image_filename} |grep -v PID|awk '{{print $2}}'" + ) force_share = str(self.vm.get_pid()) in pids if force_share and not self.vm.is_paused(): self.vm.pause() @@ -177,9 +181,8 @@ def reopen(self): Closing the vm and reboot it with the backup image. """ image_chain = self.image_chain - image_name = self.params.get("image_name_%s" % - image_chain[-1]) - super(LiveBackup, self).reopen(image_name) + image_name = self.params.get(f"image_name_{image_chain[-1]}") + super().reopen(image_name) def before_full_backup(self): """ @@ -228,8 +231,8 @@ def verify_job_info(self): value = info.get(check_param, "") if str(value) != target_value: self.test.fail( - "%s unmatched. Target is %s, result is %s" % - (check_param, target_value, value)) + f"{check_param} unmatched. Target is {target_value}, result is {value}" + ) def create_files(self): """ @@ -250,15 +253,16 @@ def verify_efficiency(self): Verify time and space efficiency for incremental backup. """ if not self.incremental_backup_time < self.full_backup_time: - self.test.fail("incremental backup time %s, " - "larger than full backup time %s " % - (self.incremental_backup_time, - self.full_backup_time)) + self.test.fail( + f"incremental backup time {self.incremental_backup_time}, " + f"larger than full backup time {self.full_backup_time} " + ) LOG_JOB.info("Incremental backup time efficiency check passed.") full_size = self.get_image_size(self.image_chain[0]) incre_size = self.get_image_size(self.image_chain[1]) if incre_size > full_size: - self.test.fail("incremental backup image size %s, " - "larger than full backup image size %s " % - (incre_size, full_size)) + self.test.fail( + f"incremental backup image size {incre_size}, " + f"larger than full backup image size {full_size} " + ) LOG_JOB.info("Incremental backup space efficiency check passed.") diff --git a/qemu/tests/live_snapshot.py b/qemu/tests/live_snapshot.py index 7ccbb5b772..5a17d8375a 100644 --- a/qemu/tests/live_snapshot.py +++ b/qemu/tests/live_snapshot.py @@ -1,11 +1,7 @@ import time from avocado.utils import process - -from virttest import error_context -from virttest import utils_misc -from virttest import utils_test -from virttest import data_dir +from virttest import data_dir, error_context, utils_misc, utils_test def run(test, params, env): @@ -31,7 +27,7 @@ def create_snapshot(vm): """ error_context.context("Creating live snapshot ...", test.log.info) block_info = vm.monitor.info("block") - if vm.monitor.protocol == 'qmp': + if vm.monitor.protocol == "qmp": device = block_info[0]["device"] else: device = "".join(block_info).split(":")[0] @@ -44,7 +40,7 @@ def create_snapshot(vm): test.log.error(snapshot_info) test.fail("Snapshot doesn't exist") - snapshot_file = "images/%s" % params.get("snapshot_file") + snapshot_file = "images/{}".format(params.get("snapshot_file")) snapshot_file = utils_misc.get_path(data_dir.get_data_dir(), snapshot_file) timeout = int(params.get("login_timeout", 360)) dd_timeout = int(params.get("dd_timeout", 900)) @@ -57,7 +53,7 @@ def runtime_test(): try: clean_cmd = params.get("clean_cmd") file_create = params.get("file_create") - clean_cmd += " %s" % file_create + clean_cmd += f" {file_create}" test.log.info("Clean file before creation") session.cmd(clean_cmd) # pylint: disable=E0606 @@ -97,8 +93,8 @@ def file_transfer_test(): def installation_test(): args = (test, params, env) bg = utils_misc.InterruptedThread( - utils_test.run_virt_sub_test, args, - {"sub_type": "unattended_install"}) + utils_test.run_virt_sub_test, args, {"sub_type": "unattended_install"} + ) bg.start() if bg.is_alive(): sleep_time = int(params.get("sleep_time", 60)) @@ -108,8 +104,9 @@ def installation_test(): bg.join() except Exception: raise + try: subcommand = params.get("subcommand") - eval("%s_test()" % subcommand) + eval(f"{subcommand}_test()") finally: - process.system("rm -f %s" % snapshot_file) + process.system(f"rm -f {snapshot_file}") diff --git a/qemu/tests/live_snapshot_base.py b/qemu/tests/live_snapshot_base.py index 30034dec3c..20a7f03651 100644 --- a/qemu/tests/live_snapshot_base.py +++ b/qemu/tests/live_snapshot_base.py @@ -1,10 +1,5 @@ -from avocado.utils import crypto -from avocado.utils import process - -from virttest import error_context -from virttest import utils_misc -from virttest import storage -from virttest import data_dir +from avocado.utils import crypto, process +from virttest import data_dir, error_context, storage, utils_misc @error_context.context_aware @@ -30,33 +25,31 @@ def run(test, params, env): copy_timeout = params.get("copy_timeoout", 600) base_file = storage.get_image_filename(params, data_dir.get_data_dir()) device = vm.get_block({"file": base_file}) - snapshot_file = "images/%s" % params.get("snapshot_file") + snapshot_file = "images/{}".format(params.get("snapshot_file")) snapshot_file = utils_misc.get_path(data_dir.get_data_dir(), snapshot_file) snapshot_format = params.get("snapshot_format", "qcow2") tmp_name = utils_misc.generate_random_string(5) - src = dst = "/tmp/%s" % tmp_name + src = dst = f"/tmp/{tmp_name}" if params.get("os_type") != "linux": - dst = "c:\\users\\public\\%s" % tmp_name + dst = f"c:\\users\\public\\{tmp_name}" try: - error_context.context("create file on host, copy it to guest", - test.log.info) + error_context.context("create file on host, copy it to guest", test.log.info) cmd = params.get("dd_cmd") % src process.system(cmd, timeout=dd_timeout, shell=True) md5 = crypto.hash_file(src, algorithm="md5") vm.copy_files_to(src, dst, timeout=copy_timeout) - process.system("rm -f %s" % src) + process.system(f"rm -f {src}") error_context.context("create live snapshot", test.log.info) - if vm.live_snapshot(base_file, snapshot_file, - snapshot_format) != device: + if vm.live_snapshot(base_file, snapshot_file, snapshot_format) != device: test.fail("Fail to create snapshot") backing_file = vm.monitor.get_backingfile(device) if backing_file != base_file: - test.log.error( - "backing file: %s, base file: %s", backing_file, base_file) + test.log.error("backing file: %s, base file: %s", backing_file, base_file) test.fail("Got incorrect backing file") - error_context.context("copy file to host, check content not changed", - test.log.info) + error_context.context( + "copy file to host, check content not changed", test.log.info + ) vm.copy_files_from(dst, src, timeout=copy_timeout) if md5 and (md5 != crypto.hash_file(src, algorithm="md5")): test.fail("diff md5 before/after create snapshot") @@ -64,4 +57,4 @@ def run(test, params, env): finally: if session: session.close() - process.system("rm -f %s %s" % (snapshot_file, src)) + process.system(f"rm -f {snapshot_file} {src}") diff --git a/qemu/tests/live_snapshot_basic.py b/qemu/tests/live_snapshot_basic.py index 75f9c3ec7f..d1266d19cf 100644 --- a/qemu/tests/live_snapshot_basic.py +++ b/qemu/tests/live_snapshot_basic.py @@ -1,17 +1,16 @@ import logging import re -from virttest import utils_misc -from virttest import data_dir -from virttest.qemu_storage import QemuImg from avocado.core import exceptions +from virttest import data_dir, utils_misc +from virttest.qemu_storage import QemuImg + from qemu.tests import block_copy -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class LiveSnapshot(block_copy.BlockCopy): - """ Provide basic functions for live snapshot test cases. """ @@ -23,15 +22,17 @@ def __init__(self, test, params, env, tag): :param params: A dict containing VM preprocessing parameters. :param env: The environment (a dict-like object). """ - super(LiveSnapshot, self).__init__(test, params, env, tag) + super().__init__(test, params, env, tag) self.default_params = {"login_timeout": 360} self.snapshot_file = self.params.get("snapshot_file") self.node_name = self.params.get("node_name") self.snapshot_node_name = self.params.get("snapshot_node_name") self.snapshot_mode = params.get("snapshot_mode", "absolute-paths") self.snapshot_format = params.get("snapshot_format", "qcow2") - self.snapshot_args = {"mode": self.snapshot_mode, - "format": self.snapshot_format} + self.snapshot_args = { + "mode": self.snapshot_mode, + "format": self.snapshot_format, + } if self.node_name: self.snapshot_args.update({"node-name": self.node_name}) if self.snapshot_node_name: @@ -42,7 +43,7 @@ def create_image(self): Create a image. """ image_name = self.params.get("image_name") - self.params['image_name_snapshot'] = image_name + "-snap" + self.params["image_name_snapshot"] = image_name + "-snap" snapshot_params = self.params.object_params("snapshot") base_dir = self.params.get("images_base_dir", data_dir.get_data_dir()) @@ -55,7 +56,7 @@ def get_snapshot_file(self): Get path of snapshot file. """ image_format = self.params["image_format"] - snapshot_file = "images/%s.%s" % (self.snapshot_file, image_format) + snapshot_file = f"images/{self.snapshot_file}.{image_format}" return utils_misc.get_path(data_dir.get_data_dir(), snapshot_file) def create_snapshot(self): @@ -69,8 +70,9 @@ def create_snapshot(self): self.snapshot_file = self.get_snapshot_file() self.trash_files.append(self.snapshot_file) LOG_JOB.info("Creating snapshot") - self.vm.monitor.live_snapshot(self.device, self.snapshot_file, - **self.snapshot_args) + self.vm.monitor.live_snapshot( + self.device, self.snapshot_file, **self.snapshot_args + ) LOG_JOB.info("Checking snapshot created successfully") self.check_snapshot() @@ -83,13 +85,13 @@ def check_snapshot(self): LOG_JOB.error(snapshot_info) raise exceptions.TestFail("Snapshot doesn't exist") if self.snapshot_node_name: - match_string = "u?'node-name': u?'%s'" % self.snapshot_node_name + match_string = f"u?'node-name': u?'{self.snapshot_node_name}'" if not re.search(match_string, snapshot_info): LOG_JOB.error(snapshot_info) - raise exceptions.TestFail("Can not find node name %s of" - " snapshot in block info %s" - % (self.snapshot_node_name, - snapshot_info)) + raise exceptions.TestFail( + f"Can not find node name {self.snapshot_node_name} of" + f" snapshot in block info {snapshot_info}" + ) def action_after_finished(self): """ diff --git a/qemu/tests/live_snapshot_chain.py b/qemu/tests/live_snapshot_chain.py index eeb843d36a..f65189fe55 100644 --- a/qemu/tests/live_snapshot_chain.py +++ b/qemu/tests/live_snapshot_chain.py @@ -2,11 +2,7 @@ import re import time -from virttest import error_context -from virttest import storage -from virttest import qemu_storage -from virttest import data_dir -from virttest import utils_misc +from virttest import data_dir, error_context, qemu_storage, storage, utils_misc @error_context.context_aware @@ -25,13 +21,14 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def generate_snapshot_chain(snapshot_chain, snapshot_num): for i in range(snapshot_num): - snapshot_tag = "sn%s" % i - snapshot_chain += " %s" % snapshot_tag - params["image_name_%s" % snapshot_tag] = "images/%s" % snapshot_tag + snapshot_tag = f"sn{i}" + snapshot_chain += f" {snapshot_tag}" + params[f"image_name_{snapshot_tag}"] = f"images/{snapshot_tag}" if snapshot_num > 0: - params["check_base_image_%s" % snapshot_tag] = "yes" + params[f"check_base_image_{snapshot_tag}"] = "yes" return snapshot_chain def get_base_image(snapshot_chain, snapshot_file): @@ -70,14 +67,15 @@ def cleanup_images(snapshot_chain, params): image_params = params.object_params(image) if index != 0: image = qemu_storage.QemuImg( - image_params, data_dir.get_data_dir(), image) + image_params, data_dir.get_data_dir(), image + ) if not os.path.exists(image.image_filename): - errs.append("Image %s was not created during test." - % image.image_filename) + errs.append( + f"Image {image.image_filename} was not created during test." + ) image.remove() except Exception as details: - errs.append("Fail to remove image %s: %s" - % (image.image_filename, details)) + errs.append(f"Fail to remove image {image.image_filename}: {details}") return errs vm = env.get_vm(params["main_vm"]) @@ -105,12 +103,14 @@ def cleanup_images(snapshot_chain, params): if image_params.get("file_create"): session.cmd(dir_create_cmd % file_dir) if index > 0: - snapshot_file = storage.get_image_filename(image_params, - data_dir.get_data_dir()) + snapshot_file = storage.get_image_filename( + image_params, data_dir.get_data_dir() + ) base_image = get_base_image(snapshot_chain, image) base_image_params = params.object_params(base_image) - base_file = storage.get_image_filename(base_image_params, - data_dir.get_data_dir()) + base_file = storage.get_image_filename( + base_image_params, data_dir.get_data_dir() + ) snapshot_format = image_params.get("image_format") error_context.context("Do pre snapshot operates", test.log.info) @@ -120,8 +120,7 @@ def cleanup_images(snapshot_chain, params): error_context.context("Do live snapshot ", test.log.info) vm.live_snapshot(base_file, snapshot_file, snapshot_format) - error_context.context("Do post snapshot operates", - test.log.info) + error_context.context("Do post snapshot operates", test.log.info) if image_params.get("post_snapshot_cmd"): do_operate(image_params, "post_snapshot_cmd") md5 = "" @@ -138,8 +137,7 @@ def cleanup_images(snapshot_chain, params): md5_value[image] = {image: md5} status, output = session.cmd_status_output(sync_cmd) if status != 0: - test.error("Execute '%s' with failures('%s') " % - (sync_cmd, output)) + test.error(f"Execute '{sync_cmd}' with failures('{output}') ") if image_params.get("check_alive_cmd"): session.cmd(image_params.get("check_alive_cmd")) if image_params.get("file_create"): @@ -168,22 +166,21 @@ def cleanup_images(snapshot_chain, params): for file in md5_value[image]: md5 = session.cmd_output(md5_cmd % file) if md5 != md5_value[image][file]: - error_message = "File %s in image %s changed " %\ - (file, image) - error_message += "from '%s' to '%s'(md5)" %\ - (md5_value[image][file], md5) + error_message = f"File {file} in image {image} changed " + error_message += ( + f"from '{md5_value[image][file]}' to '{md5}'(md5)" + ) test.fail(error_message) files_check = session.cmd(file_check_cmd % file_dir) if files_check != files_in_guest[image]: - error_message = "Files in image %s is not as expect:" %\ - image - error_message += "Before shut down: %s" %\ - files_in_guest[image] - error_message += "Now: %s" % files_check + error_message = f"Files in image {image} is not as expect:" + error_message += f"Before shut down: {files_in_guest[image]}" + error_message += f"Now: {files_check}" test.fail(error_message) if image_params.get("image_check"): image = qemu_storage.QemuImg( - image_params, data_dir.get_data_dir(), image) + image_params, data_dir.get_data_dir(), image + ) image.check_image(image_params, data_dir.get_data_dir()) session.close() @@ -191,11 +188,13 @@ def cleanup_images(snapshot_chain, params): if vm.is_alive(): vm.destroy() errs = cleanup_images(snapshot_chain, params) - test.assertFalse(errs, "Errors occurred while removing images:\n%s" - % "\n".join(errs)) + test.assertFalse( + errs, "Errors occurred while removing images:\n{}".format("\n".join(errs)) + ) except Exception as details: - error_context.context("Force-cleaning after exception: %s" % details, - test.log.error) + error_context.context( + f"Force-cleaning after exception: {details}", test.log.error + ) if vm.is_alive(): vm.destroy() cleanup_images(snapshot_chain, params) diff --git a/qemu/tests/live_snapshot_integrity.py b/qemu/tests/live_snapshot_integrity.py index 3bb8ceffe0..6b14bb9ba7 100644 --- a/qemu/tests/live_snapshot_integrity.py +++ b/qemu/tests/live_snapshot_integrity.py @@ -31,8 +31,8 @@ def run(test, params, env): stress_test.create_snapshot() stress_test.create_file(file_name_sn1) stress_test.action_after_finished() - format_postfix = ".%s" % params["image_format"] - snapshot = stress_test.snapshot_file.replace(format_postfix, '') + format_postfix = ".{}".format(params["image_format"]) + snapshot = stress_test.snapshot_file.replace(format_postfix, "") stress_test.reopen(snapshot) for name in file_names: stress_test.verify_md5(name) diff --git a/qemu/tests/live_snapshot_negative.py b/qemu/tests/live_snapshot_negative.py index f28e5a1fe0..a0b8285ee2 100644 --- a/qemu/tests/live_snapshot_negative.py +++ b/qemu/tests/live_snapshot_negative.py @@ -1,17 +1,15 @@ -import os import logging +import os -from virttest import utils_misc -from virttest import error_context -from virttest import data_dir -from qemu.tests.live_snapshot_basic import LiveSnapshot from avocado.core import exceptions +from virttest import data_dir, error_context, utils_misc -LOG_JOB = logging.getLogger('avocado.test') +from qemu.tests.live_snapshot_basic import LiveSnapshot +LOG_JOB = logging.getLogger("avocado.test") -class LiveSnapshotNegative(LiveSnapshot): +class LiveSnapshotNegative(LiveSnapshot): """ Provide basic functions for live snapshot negative test cases. """ @@ -20,39 +18,43 @@ def nonexist_snapshot_file(self): """ Generate a non-existed path of snapshot file. """ - error_context.context("Generate a non-existed path of" - " snapshot file", LOG_JOB.info) + error_context.context( + "Generate a non-existed path of" " snapshot file", LOG_JOB.info + ) tmp_name = utils_misc.generate_random_string(5) dst = os.path.join(data_dir.get_tmp_dir(), tmp_name) path = os.path.join(dst, self.snapshot_file) if not os.path.exists(path): return path - raise exceptions.TestFail("Path %s is existed." % path) + raise exceptions.TestFail(f"Path {path} is existed.") def create_snapshot(self): """ Create a live disk snapshot. """ self.snapshot_file = self.nonexist_snapshot_file() - kwargs = {"device": self.device, - "snapshot-file": self.snapshot_file, - "format": self.snapshot_format, - "mode": self.snapshot_mode} - if 'format' not in kwargs: + kwargs = { + "device": self.device, + "snapshot-file": self.snapshot_file, + "format": self.snapshot_format, + "mode": self.snapshot_mode, + } + if "format" not in kwargs: kwargs.update({"format": "qcow2"}) - if 'mode' not in kwargs: + if "mode" not in kwargs: kwargs.update({"mode": "absolute-paths"}) match_str = self.params.get("match_str") if self.snapshot_mode == "existing": match_str = match_str % self.snapshot_file - error_context.context("Create live snapshot with non-existed path.", - LOG_JOB.info) + error_context.context( + "Create live snapshot with non-existed path.", LOG_JOB.info + ) response = self.vm.monitor.cmd_qmp("blockdev-snapshot-sync", kwargs) if match_str not in str(response): - raise exceptions.TestFail("Fail to get expected result." - "%s is expected in %s" % - (match_str, response)) + raise exceptions.TestFail( + "Fail to get expected result." f"{match_str} is expected in {response}" + ) @error_context.context_aware diff --git a/qemu/tests/live_snapshot_runtime.py b/qemu/tests/live_snapshot_runtime.py index 2a9dfed868..c3f9b06d9f 100644 --- a/qemu/tests/live_snapshot_runtime.py +++ b/qemu/tests/live_snapshot_runtime.py @@ -1,13 +1,11 @@ -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc from qemu.tests import live_snapshot_basic class LiveSnapshotRuntime(live_snapshot_basic.LiveSnapshot): - def __init__(self, test, params, env, tag): - super(LiveSnapshotRuntime, self).__init__(test, params, env, tag) + super().__init__(test, params, env, tag) @error_context.context_aware def reboot(self): @@ -15,7 +13,7 @@ def reboot(self): Reset guest with system_reset; """ method = self.params.get("reboot_method", "system_reset") - return super(LiveSnapshotRuntime, self).reboot(method=method, boot_check=False) + return super().reboot(method=method, boot_check=False) @error_context.context_aware def action_when_start(self): @@ -23,7 +21,7 @@ def action_when_start(self): start pre-action in new threads; do live snapshot during pre-action. """ - tag = self.params.get("source_image", "image1") + self.params.get("source_image", "image1") for test in self.params.get("when_start").split(): if hasattr(self, test): fun = getattr(self, test) diff --git a/qemu/tests/live_snapshot_simple.py b/qemu/tests/live_snapshot_simple.py index c3c6a0e3e2..f9ff7dfbd5 100644 --- a/qemu/tests/live_snapshot_simple.py +++ b/qemu/tests/live_snapshot_simple.py @@ -1,4 +1,5 @@ from virttest import error_context + from qemu.tests import live_snapshot_basic diff --git a/qemu/tests/live_snapshot_stress.py b/qemu/tests/live_snapshot_stress.py index e474789f21..70df3faa9f 100644 --- a/qemu/tests/live_snapshot_stress.py +++ b/qemu/tests/live_snapshot_stress.py @@ -1,18 +1,15 @@ import logging -from virttest import utils_misc -from virttest import utils_test -from virttest import error_context +from virttest import error_context, utils_misc, utils_test from qemu.tests import live_snapshot_basic -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class LiveSnapshotStress(live_snapshot_basic.LiveSnapshot): - def __init__(self, test, params, env, tag): - super(LiveSnapshotStress, self).__init__(test, params, env, tag) + super().__init__(test, params, env, tag) @error_context.context_aware def load_stress(self): @@ -33,6 +30,7 @@ def unload_stress(self): """ stop stress app """ + def _unload_stress(): session = self.get_session() cmd = self.params.get("stop_cmd") @@ -40,8 +38,13 @@ def _unload_stress(): return not self.stress_app_running() error_context.context("stop stress app in guest", LOG_JOB.info) - utils_misc.wait_for(_unload_stress, first=2.0, - text="wait stress app quit", step=1.0, timeout=120) + utils_misc.wait_for( + _unload_stress, + first=2.0, + text="wait stress app quit", + step=1.0, + timeout=120, + ) def stress_app_running(self): """ diff --git a/qemu/tests/live_snapshot_transaction.py b/qemu/tests/live_snapshot_transaction.py index 116dd32200..487bcf6e65 100644 --- a/qemu/tests/live_snapshot_transaction.py +++ b/qemu/tests/live_snapshot_transaction.py @@ -20,25 +20,31 @@ def run(test, params, env): try: for image in params.objects("images"): image_params = params.object_params(image) - transaction_test = live_snapshot_basic.LiveSnapshot(test, - image_params, - env, image) + transaction_test = live_snapshot_basic.LiveSnapshot( + test, image_params, env, image + ) transaction_test.snapshot_args.update({"device": transaction_test.device}) transaction_test.snapshot_file = image + "-snap" snapshot_file = transaction_test.get_snapshot_file() transaction_test.snapshot_args.update({"snapshot-file": snapshot_file}) - args = {"type": "blockdev-snapshot-sync", - "data": transaction_test.snapshot_args} + args = { + "type": "blockdev-snapshot-sync", + "data": transaction_test.snapshot_args, + } arg_list.append(args) - error_context.context("Create multiple live snapshots simultaneously" - " with transaction", test.log.info) + error_context.context( + "Create multiple live snapshots simultaneously" " with transaction", + test.log.info, + ) output = transaction_test.vm.monitor.transaction(arg_list) # return nothing on successful transaction if bool(output): - test.fail("Live snapshot transatcion failed," - " there should be nothing on success.\n" - "More details: %s" % output) + test.fail( + "Live snapshot transatcion failed," + " there should be nothing on success.\n" + f"More details: {output}" + ) transaction_test.action_after_finished() finally: try: diff --git a/qemu/tests/luks_convert.py b/qemu/tests/luks_convert.py index 13d5ede8bc..678f820d4d 100644 --- a/qemu/tests/luks_convert.py +++ b/qemu/tests/luks_convert.py @@ -1,8 +1,7 @@ -from virttest import data_dir -from virttest import qemu_storage - from avocado import fail_on from avocado.utils import process +from virttest import data_dir, qemu_storage + from provider import qemu_img_utils as img_utils @@ -17,12 +16,10 @@ def run(test, params, env): sync_bin = params.get("sync_bin", "sync") test.log.debug("Create temporary file on guest: %s", guest_temp_file) - img_utils.save_random_file_to_vm(vm, guest_temp_file, 2048 * 512, - sync_bin) + img_utils.save_random_file_to_vm(vm, guest_temp_file, 2048 * 512, sync_bin) test.log.debug("Get md5 value of the temporary file") - md5_value = img_utils.check_md5sum(guest_temp_file, md5sum_bin, - session) + md5_value = img_utils.check_md5sum(guest_temp_file, md5sum_bin, session) session.close() vm.destroy() @@ -36,12 +33,12 @@ def run(test, params, env): cache_mode = params.get("cache_mode") test.log.debug("Convert from %s to %s", convert_source, convert_target) fail_on((process.CmdError,))(source.convert)( - source_params, root_dir, cache_mode=cache_mode) + source_params, root_dir, cache_mode=cache_mode + ) test.log.debug("Compare images: %s and %s", convert_source, convert_target) compare_cache_mode = params.get("compare_cache_mode") - compare_ret = source.compare_to(target, - source_cache_mode=compare_cache_mode) + compare_ret = source.compare_to(target, source_cache_mode=compare_cache_mode) if compare_ret.exit_status != 0: test.log.error(compare_ret.stdout_text) if compare_ret.exit_status == 1: @@ -49,12 +46,12 @@ def run(test, params, env): test.error(compare_ret.stdout_text) if tmp_file_check: - vm = img_utils.boot_vm_with_images(test, params, env, - (convert_target,)) + vm = img_utils.boot_vm_with_images(test, params, env, (convert_target,)) session = vm.wait_for_login() test.log.debug("Verify md5 value of the temporary file") - img_utils.check_md5sum(guest_temp_file, md5sum_bin, session, - md5_value_to_check=md5_value) + img_utils.check_md5sum( + guest_temp_file, md5sum_bin, session, md5_value_to_check=md5_value + ) session.close() vm.destroy() target.remove() diff --git a/qemu/tests/luks_image_over_qsd.py b/qemu/tests/luks_image_over_qsd.py index 2204cc7ebc..c8d0602275 100644 --- a/qemu/tests/luks_image_over_qsd.py +++ b/qemu/tests/luks_image_over_qsd.py @@ -1,37 +1,40 @@ -from provider.qsd import QsdDaemonDev - -from virttest import error_context -from virttest import qemu_storage -from virttest import utils_numeric -from virttest import data_dir -from virttest import utils_disk -from virttest import storage +from virttest import ( + data_dir, + error_context, + qemu_storage, + storage, + utils_disk, + utils_numeric, +) from virttest.qemu_monitor import QMPCmdError +from provider.qsd import QsdDaemonDev + # This decorator makes the test function aware of context strings @error_context.context_aware def run(test, params, env): """ - Export a luks image via QSD with NBD inet. - 1. Create a luks image. - 2. Start a QSD daemon, and export the luks image with NBD inet. - 3. Check the info of the image over NBD. - 4. Boot up a guest with the exported image as a data disk. - 5. Check there is the data disk in the guest. - 6. Connect to the QMP sock - 6.1 Query the block exports - 6.2 luks key management - 7. Stop the QSD daemon - :param test: VT test object. - :param params: Dictionary with the test parameters. - :param env: Dictionary with test environment. - """ + Export a luks image via QSD with NBD inet. + 1. Create a luks image. + 2. Start a QSD daemon, and export the luks image with NBD inet. + 3. Check the info of the image over NBD. + 4. Boot up a guest with the exported image as a data disk. + 5. Check there is the data disk in the guest. + 6. Connect to the QMP sock + 6.1 Query the block exports + 6.2 luks key management + 7. Stop the QSD daemon + :param test: VT test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + def pre_test(): qsd.start_daemon() def check_disk(image_params): - luks_image_size = image_params['image_size'] + luks_image_size = image_params["image_size"] login_timeout = params.get_numeric("login_timeout", 360) vm = env.get_vm(params["main_vm"]) try: @@ -41,7 +44,7 @@ def check_disk(image_params): disks = utils_disk.get_linux_disks(session, True) session.close() for _, attr in disks.items(): - if 'disk' in attr and luks_image_size in attr: + if "disk" in attr and luks_image_size in attr: break else: test.fail("Failed to find the luks image in guest") @@ -49,22 +52,28 @@ def check_disk(image_params): vm.destroy() def querey_block_exports(image_params): - luks_image_size = image_params['image_size'] + luks_image_size = image_params["image_size"] out = qsd.monitor.cmd("query-block-exports")[0] if out.get("type") != "nbd": test.fail("The exported type is not matched to 'nbd'.") out = qsd.monitor.cmd("query-named-block-nodes")[0] image_info = out.get("image") - image_size = utils_numeric.normalize_data_size( - str(image_info.get("virtual-size")), "G").split('.')[0] + 'G' + image_size = ( + utils_numeric.normalize_data_size( + str(image_info.get("virtual-size")), "G" + ).split(".")[0] + + "G" + ) image_name = image_info.get("filename") - expected_image_name = storage.get_image_filename(image_params, - data_dir.get_data_dir()) + expected_image_name = storage.get_image_filename( + image_params, data_dir.get_data_dir() + ) if image_size != luks_image_size or image_name != expected_image_name: test.fail( - "The image size(%s) or image name(%s) is not matched to the " - "original image." % (image_size, image_name)) + f"The image size({image_size}) or image name({image_name}) is not matched to the " + "original image." + ) def hotplug_secret_objects(): args1 = {"qom-type": "secret", "id": "sec1", "data": "redhat1"} @@ -73,22 +82,29 @@ def hotplug_secret_objects(): out = qsd.monitor.cmd("object-add", args) if "error" in out: test.fail("Add secret object failed, check please") - args = {"node-name": "fmt_stg1", "job-id": "job_add_key1", - "options": {"driver": "luks", "state": "active", - "new-secret": "sec1", "keyslot": 1, - "iter-time": 10}} + args = { + "node-name": "fmt_stg1", + "job-id": "job_add_key1", + "options": { + "driver": "luks", + "state": "active", + "new-secret": "sec1", + "keyslot": 1, + "iter-time": 10, + }, + } try: qsd.monitor.cmd("x-blockdev-amend", args) except QMPCmdError as e: qmp_error_msg = params.get("qmp_error_msg") if qmp_error_msg not in str(e.data): - test.fail("The error msg(%s) is not correct." % str(e)) + test.fail(f"The error msg({str(e)}) is not correct.") else: test.fail("Unexpected success when running x-blockdev-amend") def run_test(): - luks_img_param = params.object_params('stg1') - nbd_image_tag = params['nbd_image_tag'] + luks_img_param = params.object_params("stg1") + nbd_image_tag = params["nbd_image_tag"] nbd_image_params = params.object_params(nbd_image_tag) qemu_img = qemu_storage.QemuImg(nbd_image_params, None, nbd_image_tag) qemu_img.info() @@ -100,7 +116,7 @@ def run_test(): def post_test(): qsd.stop_daemon() - qsd = QsdDaemonDev(params.objects('qsd_namespaces')[0], params) + qsd = QsdDaemonDev(params.objects("qsd_namespaces")[0], params) pre_test() try: run_test() diff --git a/qemu/tests/macvtap_event_notification.py b/qemu/tests/macvtap_event_notification.py index f80fe8d794..5894f7bb89 100644 --- a/qemu/tests/macvtap_event_notification.py +++ b/qemu/tests/macvtap_event_notification.py @@ -2,12 +2,7 @@ import time from avocado.utils import process - -from virttest import error_context -from virttest import utils_misc -from virttest import utils_net -from virttest import env_process - +from virttest import env_process, error_context, utils_misc, utils_net _system_output = functools.partial(process.system_output, shell=True) @@ -28,8 +23,7 @@ def run(test, params, env): qemu_binary = utils_misc.get_qemu_binary(params) if not utils_misc.qemu_has_option("qmp", qemu_binary): - test.cancel("This test case requires a host QEMU with QMP " - "monitor support") + test.cancel("This test case requires a host QEMU with QMP " "monitor support") if params.get("nettype", "macvtap") != "macvtap": test.cancel("This test case test macvtap.") @@ -48,12 +42,13 @@ def run(test, params, env): post_cmd = params.get("post_cmd") post_cmd_type = params.get("post_cmd_type") - session = vm.wait_for_serial_login(timeout=int(params.get("login_timeout", - 360))) + session = vm.wait_for_serial_login(timeout=int(params.get("login_timeout", 360))) - callback = {"host_cmd": _system_output, - "guest_cmd": session.cmd_output, - "qmp_cmd": vm.get_monitors_by_type("qmp")[0].send_args_cmd} + callback = { + "host_cmd": _system_output, + "guest_cmd": session.cmd_output, + "qmp_cmd": vm.get_monitors_by_type("qmp")[0].send_args_cmd, + } def send_cmd(cmd, cmd_type): if cmd_type in callback.keys(): @@ -69,13 +64,13 @@ def send_cmd(cmd, cmd_type): mac = vm.get_mac_address() interface_name = utils_net.get_linux_ifname(session, mac) - error_context.context("In guest, change network interface " - "to promisc state.", test.log.info) + error_context.context( + "In guest, change network interface " "to promisc state.", test.log.info + ) event_cmd = params.get("event_cmd") % interface_name send_cmd(event_cmd, event_cmd_type) - error_context.context("Try to get qmp events in %s seconds!" % timeout, - test.log.info) + error_context.context(f"Try to get qmp events in {timeout} seconds!", test.log.info) end_time = time.time() + timeout qmp_monitors = vm.get_monitors_by_type("qmp") qmp_num = len(qmp_monitors) @@ -83,8 +78,8 @@ def send_cmd(cmd, cmd_type): for monitor in qmp_monitors: event = monitor.get_event(event_check) if event: - txt = "Monitr %s " % monitor.name - txt += "receive qmp %s event notification" % event_check + txt = f"Monitr {monitor.name} " + txt += f"receive qmp {event_check} event notification" test.log.info(txt) qmp_num -= 1 qmp_monitors.remove(monitor) @@ -95,22 +90,22 @@ def send_cmd(cmd, cmd_type): output = session.cmd("ip link show") err = "Monitor(s) " for monitor in qmp_monitors: - err += "%s " % monitor.name - err += " did not receive qmp %s event notification." % event_check - err += " ip link show command output in guest: %s" % output + err += f"{monitor.name} " + err += f" did not receive qmp {event_check} event notification." + err += f" ip link show command output in guest: {output}" test.fail(err) if post_cmd: for nic in vm.virtnet: post_cmd = post_cmd % nic.device_id - error_context.context("Run post_cmd '%s'" % post_cmd, test.log.info) + error_context.context(f"Run post_cmd '{post_cmd}'", test.log.info) post_cmd_type = params.get("post_cmd_type", event_cmd_type) output = send_cmd(post_cmd, post_cmd_type) post_cmd_check = params.get("post_cmd_check") if post_cmd_check: if post_cmd_check not in str(output): - err = "Did not find '%s' in " % post_cmd_check - err += "'%s' command's output: %s" % (post_cmd, output) + err = f"Did not find '{post_cmd_check}' in " + err += f"'{post_cmd}' command's output: {output}" test.fail(err) if session: diff --git a/qemu/tests/macvtap_guest_communicate.py b/qemu/tests/macvtap_guest_communicate.py index 7ce13639e7..e509656e9c 100644 --- a/qemu/tests/macvtap_guest_communicate.py +++ b/qemu/tests/macvtap_guest_communicate.py @@ -1,10 +1,6 @@ import os -from virttest import error_context -from virttest import data_dir -from virttest import utils_net -from virttest import utils_misc -from virttest import utils_netperf +from virttest import data_dir, error_context, utils_misc, utils_net, utils_netperf @error_context.context_aware @@ -19,46 +15,50 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def ping_test(): # Ping from guest1 to guest2 for 30 counts - status, output = utils_net.ping(dest=addresses[1], count=30, - timeout=60, - session=sessions[0]) + status, output = utils_net.ping( + dest=addresses[1], count=30, timeout=60, session=sessions[0] + ) if status: - test.fail("ping %s unexpected, output %s" % (vms[1], output)) + test.fail(f"ping {vms[1]} unexpected, output {output}") def netperf_test(): """ Netperf stress test between two guest. """ n_client = utils_netperf.NetperfClient( - addresses[0], params["client_path"], - netperf_source=os.path.join(data_dir.get_deps_dir("netperf"), - params.get("netperf_client_link")), + addresses[0], + params["client_path"], + netperf_source=os.path.join( + data_dir.get_deps_dir("netperf"), params.get("netperf_client_link") + ), client=params.get("shell_client"), port=params.get("shell_port"), prompt=params.get("shell_prompt", r"^root@.*[\#\$]\s*$|#"), username=params.get("username"), password=params.get("password"), - linesep=params.get("shell_linesep", "\n").encode().decode( - 'unicode_escape'), + linesep=params.get("shell_linesep", "\n").encode().decode("unicode_escape"), status_test_command=params.get("status_test_command", ""), - compile_option=params.get("compile_option_client", "")) + compile_option=params.get("compile_option_client", ""), + ) n_server = utils_netperf.NetperfServer( addresses[1], params["server_path"], - netperf_source=os.path.join(data_dir.get_deps_dir("netperf"), - params.get("netperf_server_link")), + netperf_source=os.path.join( + data_dir.get_deps_dir("netperf"), params.get("netperf_server_link") + ), username=params.get("username"), password=params.get("password"), client=params.get("shell_client"), port=params.get("shell_port"), prompt=params.get("shell_prompt", r"^root@.*[\#\$]\s*$|#"), - linesep=params.get("shell_linesep", "\n").encode().decode( - 'unicode_escape'), + linesep=params.get("shell_linesep", "\n").encode().decode("unicode_escape"), status_test_command=params.get("status_test_command", "echo $?"), - compile_option=params.get("compile_option_server", "")) + compile_option=params.get("compile_option_server", ""), + ) try: n_server.start() @@ -67,24 +67,30 @@ def netperf_test(): test_protocols = params.get("test_protocols", "TCP_STREAM") netperf_output_unit = params.get("netperf_output_unit") test_option = params.get("test_option", "") - test_option += " -l %s" % netperf_test_duration + test_option += f" -l {netperf_test_duration}" if netperf_output_unit in "GMKgmk": - test_option += " -f %s" % netperf_output_unit - t_option = "%s -t %s" % (test_option, test_protocols) - n_client.bg_start(addresses[1], - t_option, - params.get_numeric("netperf_para_sessions"), - params.get("netperf_cmd_prefix", ""), - package_sizes=params.get("netperf_sizes")) - if utils_misc.wait_for(n_client.is_netperf_running, 10, 0, 1, - "Wait netperf test start"): + test_option += f" -f {netperf_output_unit}" + t_option = f"{test_option} -t {test_protocols}" + n_client.bg_start( + addresses[1], + t_option, + params.get_numeric("netperf_para_sessions"), + params.get("netperf_cmd_prefix", ""), + package_sizes=params.get("netperf_sizes"), + ) + if utils_misc.wait_for( + n_client.is_netperf_running, 10, 0, 1, "Wait netperf test start" + ): test.log.info("Netperf test start successfully.") else: test.error("Can not start netperf client.") utils_misc.wait_for( lambda: not n_client.is_netperf_running(), - netperf_test_duration, 0, 5, - "Wait netperf test finish %ss" % netperf_test_duration) + netperf_test_duration, + 0, + 5, + f"Wait netperf test finish {netperf_test_duration}s", + ) finally: n_server.stop() n_server.cleanup(True) diff --git a/qemu/tests/max_channel_lun.py b/qemu/tests/max_channel_lun.py index 4d394a053b..6b0fbe27fd 100644 --- a/qemu/tests/max_channel_lun.py +++ b/qemu/tests/max_channel_lun.py @@ -1,11 +1,7 @@ -import re import random +import re -from virttest import error_context -from virttest import env_process -from virttest import virt_vm -from virttest import qemu_storage -from virttest import data_dir +from virttest import data_dir, env_process, error_context, qemu_storage, virt_vm @error_context.context_aware @@ -22,41 +18,40 @@ def run(test, params, env): :param env: Dictionary with test environment. """ - vm = env.get_vm(params['main_vm']) + vm = env.get_vm(params["main_vm"]) timeout = float(params.get("login_timeout", 240)) - stg_image_num = int(params.get('stg_image_num')) - stg_image_name = params.get('stg_image_name', 'images/%s') - channel = params.get('channel') + stg_image_num = int(params.get("stg_image_num")) + stg_image_name = params.get("stg_image_name", "images/%s") + channel = params.get("channel") error_info = params["error_info"] for i in range(stg_image_num): name = "stg%d" % i - params['images'] += " %s" % name - params["image_name_%s" % name] = stg_image_name % name - params["blk_extra_params_%s" % name] = channel - params["drive_port_%s" % name] = i - params["scsi_hba_%s" % name] = "spapr-vscsi" - if params['luns'] == "lun_33": + params["images"] += f" {name}" + params[f"image_name_{name}"] = stg_image_name % name + params[f"blk_extra_params_{name}"] = channel + params[f"drive_port_{name}"] = i + params[f"scsi_hba_{name}"] = "spapr-vscsi" + if params["luns"] == "lun_33": img_params = params.object_params("stg32") - image = qemu_storage.QemuImg(img_params, data_dir.get_data_dir(), - "stg32") - params["extra_params"] = "-blockdev node-name=file_stg32,\ -driver=file,auto-read-only=on,discard=unmap,aio=threads,filename=%s,\ + image = qemu_storage.QemuImg(img_params, data_dir.get_data_dir(), "stg32") + params["extra_params"] = f"-blockdev node-name=file_stg32,\ +driver=file,auto-read-only=on,discard=unmap,aio=threads,filename={image.image_filename},\ cache.direct=on,cache.no-flush=off -blockdev node-name=drive_stg32,\ driver=qcow2,read-only=off,cache.direct=on,cache.no-flush=off,\ file=file_stg32 -device scsi-hd,lun=32,id=stg32,bus=spapr_vscsi0.0,\ -drive=drive_stg32,write-cache=on,channel=0" % image.image_filename +drive=drive_stg32,write-cache=on,channel=0" image.create(params) - params['start_vm'] = 'yes' + params["start_vm"] = "yes" try: - env_process.process(test, params, env, - env_process.preprocess_image, - env_process.preprocess_vm) + env_process.process( + test, params, env, env_process.preprocess_image, env_process.preprocess_vm + ) except virt_vm.VMCreateError as e: if error_info not in e.output: - test.fail("%s is not reported by QEMU" % error_info) + test.fail(f"{error_info} is not reported by QEMU") - if params['luns'] == "lun_32": + if params["luns"] == "lun_32": session = vm.wait_for_login(timeout=timeout) o = session.cmd_output("lsblk -o SUBSYSTEMS|grep vio|wc -l") if int(o) != stg_image_num: @@ -64,15 +59,13 @@ def run(test, params, env): o = session.cmd_output("lsblk -o KNAME,SUBSYSTEMS|grep vio") disks = re.findall(r"(sd\w+)", o, re.M) disk = random.choice(disks) - cmd_w = "dd if=/dev/zero of=/dev/%s bs=1M count=8" % disk - cmd_r = "dd if=/dev/%s of=/dev/null bs=1M count=8" % disk - error_context.context('Do dd writing test on the data disk.', - test.log.info) + cmd_w = f"dd if=/dev/zero of=/dev/{disk} bs=1M count=8" + cmd_r = f"dd if=/dev/{disk} of=/dev/null bs=1M count=8" + error_context.context("Do dd writing test on the data disk.", test.log.info) status = session.cmd_status(cmd_w, timeout=timeout) if status != 0: test.error("dd writing test failed") - error_context.context('Do dd reading test on the data disk.', - test.log.info) + error_context.context("Do dd reading test on the data disk.", test.log.info) status = session.cmd_status(cmd_r, timeout=timeout) if status != 0: test.error("dd reading test failed") diff --git a/qemu/tests/memhp_threads.py b/qemu/tests/memhp_threads.py index 8027a59843..f8873caab3 100644 --- a/qemu/tests/memhp_threads.py +++ b/qemu/tests/memhp_threads.py @@ -1,10 +1,7 @@ import time from avocado.utils import process -from virttest import error_context -from virttest import utils_qemu -from virttest import utils_misc -from virttest import utils_test +from virttest import error_context, utils_misc, utils_qemu, utils_test from virttest.qemu_devices.qdevices import Memory from virttest.utils_test.qemu import MemoryHotplugTest from virttest.utils_version import VersionInterval @@ -27,12 +24,13 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def get_qemu_threads(cmd, timeout=60): """ Get qemu threads when it's stable """ threads = 0 - start_time = time.time() + time.time() end_time = time.time() + float(timeout) while time.time() < end_time: cur_threads = int(process.system_output(cmd, shell=True)) @@ -41,26 +39,26 @@ def get_qemu_threads(cmd, timeout=60): time.sleep(1) else: return threads - test.error("Can't get stable qemu threads number in %ss." % timeout) + test.error(f"Can't get stable qemu threads number in {timeout}s.") vm = env.get_vm(params["main_vm"]) get_threads_cmd = params["get_threads_cmd"] % vm.get_pid() qemu_binary = utils_misc.get_qemu_binary(params) qemu_version = utils_qemu.get_qemu_version(qemu_binary)[0] target_mems = params.get("target_mems").split() - if qemu_version in VersionInterval('[7.1.0,)'): + if qemu_version in VersionInterval("[7.1.0,)"): threads_default = params.get_numeric("smp") else: - target_mems.remove('plug1') + target_mems.remove("plug1") for target_mem in target_mems: test.log.info("Get qemu threads number at beginning") pre_threads = get_qemu_threads(get_threads_cmd) - test.log.info("QEMU boot threads number is %s" % pre_threads) + test.log.info("QEMU boot threads number is %s", pre_threads) new_params = params.object_params(target_mem).object_params("mem") attrs = Memory.__attributes__[new_params["backend"]][:] new_params = new_params.copy_from_keys(attrs) dev = Memory(new_params["backend"], new_params) - dev.set_param("id", "%s-%s" % ("mem", target_mem)) + dev.set_param("id", "{}-{}".format("mem", target_mem)) args = [vm.monitor, vm.devices.qemu_version] bg = utils_test.BackgroundTest(dev.hotplug, args) test.log.info("Hotplug memory backend '%s' to guest", dev["id"]) @@ -69,17 +67,19 @@ def get_qemu_threads(cmd, timeout=60): if mem_params.get("prealloc-threads_mem"): threads_num = mem_params.get_numeric("prealloc-threads_mem") else: - threads_num = threads_default # pylint: disable=E0606 + threads_num = threads_default # pylint: disable=E0606 test.log.info("Get qemu threads number again") post_threads = get_qemu_threads(get_threads_cmd) if post_threads - pre_threads != threads_num: - test.fail("QEMU threads number is not right, pre is %s, post is %s" - % (pre_threads, post_threads)) + test.fail( + f"QEMU threads number is not right, pre is {pre_threads}, post is {post_threads}" + ) bg.join() memhp_test = MemoryHotplugTest(test, params, env) memhp_test.update_vm_after_hotplug(vm, dev) - dimm = vm.devices.dimm_device_define_by_params(params.object_params(target_mem), - target_mem) + dimm = vm.devices.dimm_device_define_by_params( + params.object_params(target_mem), target_mem + ) dimm.set_param("memdev", dev["id"]) test.log.info("Hotplug pc-dimm '%s' to guest", dimm["id"]) vm.devices.simple_hotplug(dimm, vm.monitor) diff --git a/qemu/tests/memory_leak_after_nichotplug.py b/qemu/tests/memory_leak_after_nichotplug.py index 7d6f1a6a26..d4196a504b 100644 --- a/qemu/tests/memory_leak_after_nichotplug.py +++ b/qemu/tests/memory_leak_after_nichotplug.py @@ -1,9 +1,6 @@ import time -from virttest import error_context -from virttest import utils_net -from virttest import utils_misc -from virttest import env_process +from virttest import env_process, error_context, utils_misc, utils_net @error_context.context_aware @@ -33,22 +30,25 @@ def run(test, params, env): else: session = vm.wait_for_serial_login(timeout=timeout) - free_mem_before_nichotplug = utils_misc.get_free_mem(session, - os_type) - test.log.info("Guest free memory before nic hotplug: %d", - free_mem_before_nichotplug) + free_mem_before_nichotplug = utils_misc.get_free_mem(session, os_type) + test.log.info( + "Guest free memory before nic hotplug: %d", free_mem_before_nichotplug + ) if os_type == "windows": - error_context.context("Add network devices through monitor cmd", - test.log.info) + error_context.context("Add network devices through monitor cmd", test.log.info) pci_model = params.get("pci_model") netdst = params.get("netdst", "virbr0") nettype = params.get("nettype", "bridge") for i in range(1, 100): - nic_name = 'hotadded%s' % i - vm.hotplug_nic(nic_model=pci_model, nic_name=nic_name, - netdst=netdst, nettype=nettype, - queues=params.get('queues')) + nic_name = f"hotadded{i}" + vm.hotplug_nic( + nic_model=pci_model, + nic_name=nic_name, + netdst=netdst, + nettype=nettype, + queues=params.get("queues"), + ) time.sleep(3) vm.hotunplug_nic(nic_name) time.sleep(3) @@ -57,23 +57,24 @@ def run(test, params, env): mac = vm.get_mac_address() guest_nic = utils_net.get_linux_ifname(session, mac) for i in range(1, 300): - session.cmd_output_safe("ip link add link %s name %s.%s type vlan id %s" % - (guest_nic, guest_nic, i, i)) + session.cmd_output_safe( + f"ip link add link {guest_nic} name {guest_nic}.{i} type vlan id {i}" + ) time.sleep(3) for i in range(1, 300): - session.cmd_output_safe("ip link delete %s.%s" % (guest_nic, i)) + session.cmd_output_safe(f"ip link delete {guest_nic}.{i}") - free_mem_after_nichotplug = utils_misc.get_free_mem(session, - os_type) - test.log.info("Guest free memory after nic hotplug: %d", - free_mem_after_nichotplug) + free_mem_after_nichotplug = utils_misc.get_free_mem(session, os_type) + test.log.info("Guest free memory after nic hotplug: %d", free_mem_after_nichotplug) mem_reduced = free_mem_before_nichotplug - free_mem_after_nichotplug - if (os_type == "windows" and mem_reduced > 1024) \ - or (os_type == "linux" and mem_reduced > 200): - test.error("There might be memory leak after hotplug nic. " - "Memory reduced %d" % mem_reduced) - error_context.context("Memory reduced = %d" % mem_reduced, - test.log.info) + if (os_type == "windows" and mem_reduced > 1024) or ( + os_type == "linux" and mem_reduced > 200 + ): + test.error( + "There might be memory leak after hotplug nic. " + "Memory reduced %d" % mem_reduced + ) + error_context.context("Memory reduced = %d" % mem_reduced, test.log.info) session.close() diff --git a/qemu/tests/microcode_test.py b/qemu/tests/microcode_test.py index 79defd3d76..d6478be32c 100644 --- a/qemu/tests/microcode_test.py +++ b/qemu/tests/microcode_test.py @@ -16,6 +16,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def get_microcode_ver(cmd, session=None): """ Get microcde version in guest or host diff --git a/qemu/tests/migration.py b/qemu/tests/migration.py index 44e600054d..55dc02eeeb 100644 --- a/qemu/tests/migration.py +++ b/qemu/tests/migration.py @@ -1,16 +1,17 @@ +import ast +import re import time import types -import re import aexpect -import ast - -from virttest import utils_misc -from virttest import utils_test -from virttest import utils_package -from virttest import error_context -from virttest import qemu_monitor # For MonitorNotSupportedMigCapError -from virttest import qemu_migration +from virttest import ( + error_context, + qemu_migration, + qemu_monitor, # For MonitorNotSupportedMigCapError + utils_misc, + utils_package, + utils_test, +) # Define get_function-functions as global to allow importing from other tests @@ -34,12 +35,10 @@ def mig_set_speed(vm, params, test): def check_dma(vm, params, test): - dmesg_pattern = params.get("dmesg_pattern", - "ata.*?configured for PIO") + dmesg_pattern = params.get("dmesg_pattern", "ata.*?configured for PIO") dma_pattern = params.get("dma_pattern", r"DMA.*?\(\?\)$") pio_pattern = params.get("pio_pattern", r"PIO.*?pio\d+\s+$") - hdparm_cmd = params.get("hdparm_cmd", - "i=`ls /dev/[shv]da` ; hdparm -I $i") + hdparm_cmd = params.get("hdparm_cmd", "i=`ls /dev/[shv]da` ; hdparm -I $i") session_dma = vm.wait_for_login() hdparm_output = session_dma.cmd_output(hdparm_cmd) failed_msg = "" @@ -49,12 +48,12 @@ def check_dma(vm, params, test): failed_msg += "Failed in PIO check from hdparm output.\n" if failed_msg: - failed_msg += "hdparm output is: %s\n" % hdparm_output + failed_msg += f"hdparm output is: {hdparm_output}\n" dmesg = session_dma.cmd_output("dmesg") if not re.search(dmesg_pattern, dmesg): failed_msg += "Failed in dmesg check.\n" - failed_msg += " dmesg from guest is: %s\n" % dmesg + failed_msg += f" dmesg from guest is: {dmesg}\n" if failed_msg: test.fail(failed_msg) @@ -77,6 +76,7 @@ def run(test, params, env): :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ + def guest_stress_start(guest_stress_test): """ Start a stress test in guest, Could be 'iozone', 'dd', 'stress' @@ -91,7 +91,7 @@ def guest_stress_start(guest_stress_test): test_type = params.get("test_type") func = autotest_control.run new_params = params.copy() - new_params["test_control_file"] = "%s.control" % test_type + new_params["test_control_file"] = f"{test_type}.control" args = (test, new_params, env) timeout = 60 @@ -100,9 +100,12 @@ def guest_stress_start(guest_stress_test): vm.verify_alive() session = vm.wait_for_login(timeout=login_timeout) func = session.cmd_output - args = ("for((;;)) do dd if=/dev/zero of=/tmp/test bs=5M " - "count=100; rm -f /tmp/test; done", - login_timeout, test.log.info) + args = ( + "for((;;)) do dd if=/dev/zero of=/tmp/test bs=5M " + "count=100; rm -f /tmp/test; done", + login_timeout, + test.log.info, + ) test.log.info("Start %s test in guest", guest_stress_test) bg = utils_test.BackgroundTest(func, args) # pylint: disable=E0606 @@ -139,10 +142,13 @@ def guest_stress_deamon(): vm.verify_alive() session = vm.wait_for_login() if stress_stop_cmd: - test.log.warn("Killing background stress process " - "with cmd '%s', you would see some " - "error message in client test result," - "it's harmless.", stress_stop_cmd) + test.log.warning( + "Killing background stress process " + "with cmd '%s', you would see some " + "error message in client test result," + "it's harmless.", + stress_stop_cmd, + ) session.cmd(stress_stop_cmd) bg.join(10) except Exception: @@ -158,7 +164,7 @@ def guest_stress_deamon(): mig_exec_cmd_dst = params.get("migration_exec_cmd_dst") if mig_exec_cmd_src and "gzip" in mig_exec_cmd_src: mig_exec_file = params.get("migration_exec_file", "/var/tmp/exec") - mig_exec_file += "-%s" % utils_misc.generate_random_string(8) + mig_exec_file += f"-{utils_misc.generate_random_string(8)}" mig_exec_cmd_src = mig_exec_cmd_src % mig_exec_file mig_exec_cmd_dst = mig_exec_cmd_dst % mig_exec_file offline = params.get("offline", "no") == "yes" @@ -170,7 +176,6 @@ def guest_stress_deamon(): vm.verify_alive() if living_guest_os: - session = vm.wait_for_login(timeout=login_timeout) # Get the output of migration_test_command @@ -193,8 +198,10 @@ def guest_stress_deamon(): try: check_command = params.get("migration_bg_check_command", "") - error_context.context("Checking the background command in the " - "guest pre migration", test.log.info) + error_context.context( + "Checking the background command in the " "guest pre migration", + test.log.info, + ) if session2.cmd_status(check_command, timeout=30) != 0: test.error("migration bg check command failed") session2.close() @@ -204,8 +211,7 @@ def guest_stress_deamon(): if guest_stress_test: guest_stress_start(guest_stress_test) params["action"] = "run" - deamon_thread = utils_test.BackgroundTest( - guest_stress_deamon, ()) + deamon_thread = utils_test.BackgroundTest(guest_stress_deamon, ()) deamon_thread.start() capabilities = ast.literal_eval(params.get("migrate_capabilities", "{}")) @@ -227,15 +233,21 @@ def guest_stress_deamon(): else: test.log.info("Round %s pong...", str(i / 2)) try: - vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay, - offline, check, - migration_exec_cmd_src=mig_exec_cmd_src, - migration_exec_cmd_dst=mig_exec_cmd_dst, - migrate_capabilities=capabilities, - mig_inner_funcs=inner_funcs, - env=env, migrate_parameters=migrate_parameters) + vm.migrate( + mig_timeout, + mig_protocol, + mig_cancel_delay, + offline, + check, + migration_exec_cmd_src=mig_exec_cmd_src, + migration_exec_cmd_dst=mig_exec_cmd_dst, + migrate_capabilities=capabilities, + mig_inner_funcs=inner_funcs, + env=env, + migrate_parameters=migrate_parameters, + ) except qemu_monitor.MonitorNotSupportedMigCapError as e: - test.cancel("Unable to access capability: %s" % e) + test.cancel(f"Unable to access capability: {e}") except: raise @@ -253,8 +265,10 @@ def guest_stress_deamon(): test.log.info("Logged in after migration") # Make sure the background process is still running - error_context.context("Checking the background command in the " - "guest post migration", test.log.info) + error_context.context( + "Checking the background command in the " "guest post migration", + test.log.info, + ) session2.cmd(check_command, timeout=30) # Get the output of migration_test_command @@ -262,15 +276,22 @@ def guest_stress_deamon(): # Compare output to reference output if output != reference_output: - test.log.info("Command output before migration differs from " - "command output after migration") + test.log.info( + "Command output before migration differs from " + "command output after migration" + ) test.log.info("Command: %s", test_command) - test.log.info("Output before: %s", - utils_misc.format_str_for_message(reference_output)) - test.log.info("Output after: %s", - utils_misc.format_str_for_message(output)) - test.fail("Command '%s' produced different output " - "before and after migration" % test_command) + test.log.info( + "Output before: %s", + utils_misc.format_str_for_message(reference_output), + ) + test.log.info( + "Output after: %s", utils_misc.format_str_for_message(output) + ) + test.fail( + f"Command '{test_command}' produced different output " + "before and after migration" + ) finally: # Kill the background process @@ -288,8 +309,10 @@ def guest_stress_deamon(): if not int(details.status) == int(ignore_status): raise except aexpect.ShellTimeoutError: - test.log.debug("Remote session not responsive, " - "shutting down VM %s", vm.name) + test.log.debug( + "Remote session not responsive, " "shutting down VM %s", + vm.name, + ) vm.destroy(gracefully=True) if deamon_thread is not None: # Set deamon thread action to stop after migrate @@ -297,7 +320,13 @@ def guest_stress_deamon(): deamon_thread.join() else: # Just migrate without depending on a living guest OS - vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay, offline, - check, migration_exec_cmd_src=mig_exec_cmd_src, - migration_exec_cmd_dst=mig_exec_cmd_dst, - migrate_parameters=migrate_parameters) + vm.migrate( + mig_timeout, + mig_protocol, + mig_cancel_delay, + offline, + check, + migration_exec_cmd_src=mig_exec_cmd_src, + migration_exec_cmd_dst=mig_exec_cmd_dst, + migrate_parameters=migrate_parameters, + ) diff --git a/qemu/tests/migration_after_nichotplug.py b/qemu/tests/migration_after_nichotplug.py index 7bed30cc31..b3f5792ec5 100644 --- a/qemu/tests/migration_after_nichotplug.py +++ b/qemu/tests/migration_after_nichotplug.py @@ -1,9 +1,6 @@ import time -from virttest import error_context -from virttest import utils_test -from virttest import virt_vm -from virttest import utils_net +from virttest import error_context, utils_net, utils_test, virt_vm @error_context.context_aware @@ -45,20 +42,22 @@ def check_nic_is_empty(): if guest_is_not_windows: # grep return code: match(0), un-match(1), error(2) if status != 1: - test.fail("nic is not empty with output: %s" % output) + test.fail(f"nic is not empty with output: {output}") else: if status: - test.error("Error occured when get nic status, " - "with status=%s, output=%s" % (status, output)) + test.error( + "Error occured when get nic status, " + f"with status={status}, output={output}" + ) if "Ethernet" in output: - test.fail("nic is not empty with output: %s" % output) + test.fail(f"nic is not empty with output: {output}") vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = int(params.get("login_timeout", 360)) session_serial = vm.wait_for_serial_login(timeout=timeout) - guest_is_not_windows = (params.get("os_type") != 'windows') + guest_is_not_windows = params.get("os_type") != "windows" run_dhclient = params.get("run_dhclient", "no") mig_timeout = float(params.get("mig_timeout", "3600")) nettype = params.get("nettype", "bridge") @@ -72,33 +71,34 @@ def check_nic_is_empty(): # Modprobe the module if specified in config file module = params.get("modprobe_module") if guest_is_not_windows and module: - session_serial.cmd("modprobe %s" % module) + session_serial.cmd(f"modprobe {module}") if session_serial: session_serial.close() if with_unplug: nic_name = vm.virtnet[0].nic_name - error_context.context( - "Hot unplug %s before migration" % nic_name, test.log.info) + error_context.context(f"Hot unplug {nic_name} before migration", test.log.info) vm.hotunplug_nic(nic_name) - error_context.context( - "Check whether the guest's nic is empty", test.log.info) + error_context.context("Check whether the guest's nic is empty", test.log.info) check_nic_is_empty() vm.params["nics"] = "" if "q35" in params["machine_type"]: vm.params["pcie_extra_root_port"] = 1 else: - error_context.context("Add network devices through monitor cmd", - test.log.info) - nic_name = 'hotadded' + error_context.context("Add network devices through monitor cmd", test.log.info) + nic_name = "hotadded" enable_msix_vectors = params.get("enable_msix_vectors") - nic_info = vm.hotplug_nic(nic_model=pci_model, nic_name=nic_name, - netdst=netdst, nettype=nettype, - queues=params.get('queues'), - enable_msix_vectors=enable_msix_vectors) - nic_mac = nic_info['mac'] - vm.params['nics'] += " %s" % nic_name - vm.params['nic_model_%s' % nic_name] = nic_info['nic_model'] + nic_info = vm.hotplug_nic( + nic_model=pci_model, + nic_name=nic_name, + netdst=netdst, + nettype=nettype, + queues=params.get("queues"), + enable_msix_vectors=enable_msix_vectors, + ) + nic_mac = nic_info["mac"] + vm.params["nics"] += f" {nic_name}" + vm.params[f"nic_model_{nic_name}"] = nic_info["nic_model"] # Only run dhclient if explicitly set and guest is not running Windows. # Most modern Linux guests run NetworkManager, and thus do not need this. @@ -113,8 +113,7 @@ def check_nic_is_empty(): error_context.context("Disable the primary link of guest", test.log.info) set_link(nic_name, up=False) - error_context.context("Check if new interface gets ip address", - test.log.info) + error_context.context("Check if new interface gets ip address", test.log.info) try: ip = vm.wait_for_get_address(nic_name) except virt_vm.VMIPAddressMissingError: @@ -124,19 +123,19 @@ def check_nic_is_empty(): error_context.context("Ping guest's new ip from host", test.log.info) s, o = utils_test.ping(ip, 10, timeout=15) if s != 0: - test.fail("New nic failed ping test with output:\n %s" % o) + test.fail(f"New nic failed ping test with output:\n {o}") error_context.context("Re-enabling the primary link", test.log.info) set_link(nic_name, up=True) - error_context.context("Migrate from source VM to Destination VM", - test.log.info) + error_context.context("Migrate from source VM to Destination VM", test.log.info) vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay, env=env) if with_unplug: error_context.context( "Check whether the guest's nic is still empty after migration", - test.log.info) + test.log.info, + ) check_nic_is_empty() else: error_context.context("Disable the primary link", test.log.info) @@ -145,16 +144,16 @@ def check_nic_is_empty(): error_context.context("Ping guest's new ip from host", test.log.info) s, o = utils_test.ping(ip, 10, timeout=15) if s != 0: - test.fail("New nic failed ping test with output:\n %s" % o) + test.fail(f"New nic failed ping test with output:\n {o}") error_context.context("Re-enabling the primary link", test.log.info) set_link(nic_name, up=True) - error_context.context("Reboot guest and verify new nic works", - test.log.info) + error_context.context("Reboot guest and verify new nic works", test.log.info) host_ip = utils_net.get_ip_address_by_interface(netdst) session = vm.reboot() - status, output = utils_test.ping(dest=host_ip, count=100, - timeout=240, session=session) + status, output = utils_test.ping( + dest=host_ip, count=100, timeout=240, session=session + ) if status != 0: test.fail("Fail to ping host form guest") diff --git a/qemu/tests/migration_after_vm_paused.py b/qemu/tests/migration_after_vm_paused.py index fe7f25a022..0d5f49717d 100644 --- a/qemu/tests/migration_after_vm_paused.py +++ b/qemu/tests/migration_after_vm_paused.py @@ -2,18 +2,13 @@ import time import aexpect +from virttest import error_context, utils_misc, utils_test -from virttest import error_context -from virttest import utils_misc -from virttest import utils_test +LOG_JOB = logging.getLogger("avocado.test") -LOG_JOB = logging.getLogger('avocado.test') - - -class MigrationAfterVmPaused(object): +class MigrationAfterVmPaused: def __init__(self, test, params, env): - self.test = test self.params = params self.env = env @@ -24,15 +19,13 @@ def __init__(self, test, params, env): self.mig_exec_cmd_src = self.params.get("migration_exec_cmd_src") self.mig_exec_cmd_dst = self.params.get("migration_exec_cmd_dst") if self.mig_exec_cmd_src and "gzip" in self.mig_exec_cmd_src: - self.mig_exec_file = self.params.get("migration_exec_file", - "/var/tmp/exec") - self.mig_exec_file += "-%s" % utils_misc.generate_random_string(8) + self.mig_exec_file = self.params.get("migration_exec_file", "/var/tmp/exec") + self.mig_exec_file += f"-{utils_misc.generate_random_string(8)}" self.mig_exec_cmd_src = self.mig_exec_cmd_src % self.mig_exec_file self.mig_exec_cmd_dst = self.mig_exec_cmd_dst % self.mig_exec_file self.offline = self.params.get("offline", "no") == "yes" self.check = self.params.get("vmstate_check", "no") == "yes" - self.living_guest_os = self.params.get("migration_living_guest", - "yes") == "yes" + self.living_guest_os = self.params.get("migration_living_guest", "yes") == "yes" self.vm = self.env.get_vm(self.params["main_vm"]) self.test_command = self.params.get("migration_test_command") self.background_command = self.params.get("migration_bg_command") @@ -42,11 +35,15 @@ def __init__(self, test, params, env): self.stress_stop_cmd = self.params.get("stress_stop_cmd") def stress_test_in_guest(self, timeout=60): - self.bg = utils_misc.InterruptedThread( utils_test.run_virt_sub_test, - args=(self.test, self.params, self.env,), - kwargs={"sub_type": self.guest_stress_test}) + args=( + self.test, + self.params, + self.env, + ), + kwargs={"sub_type": self.guest_stress_test}, + ) self.bg.start() LOG_JOB.info("sleep %ds waiting guest stress test start.", timeout) time.sleep(timeout) @@ -54,16 +51,18 @@ def stress_test_in_guest(self, timeout=60): self.test.fail("Failed to start guest stress test!") def stop_stress_test_in_guest(self): - if self.bg and self.bg.is_alive(): try: self.vm.verify_alive() session = self.vm.wait_for_login() if self.stress_stop_cmd: - LOG_JOB.warn("Killing background stress process " - "with cmd '%s', you would see some " - "error message in client test result," - "it's harmless.", self.stress_stop_cmd) + LOG_JOB.warning( + "Killing background stress process " + "with cmd '%s', you would see some " + "error message in client test result," + "it's harmless.", + self.stress_stop_cmd, + ) session.cmd(self.stress_stop_cmd) self.bg.join(10) except Exception: @@ -71,7 +70,6 @@ def stop_stress_test_in_guest(self): @error_context.context_aware def before_migration(self): - self.vm.verify_alive() if self.living_guest_os: session = self.vm.wait_for_login(timeout=self.login_timeout) @@ -83,41 +81,49 @@ def before_migration(self): # Start another session with the guest and make sure the background # process is running session2 = self.vm.wait_for_login(timeout=self.login_timeout) - error_context.context("Checking the background command in " - "the guest pre migration", LOG_JOB.info) + error_context.context( + "Checking the background command in " "the guest pre migration", + LOG_JOB.info, + ) session2.cmd(self.bg_check_command, timeout=30) session2.close() else: # Just migrate on a living guest OS - self.test.fail("The guest is not alive," - " this test must on a living guest OS.") + self.test.fail( + "The guest is not alive," " this test must on a living guest OS." + ) @error_context.context_aware def after_migration(self): - LOG_JOB.info("Logging into guest after migration...") session2 = self.vm.wait_for_login(timeout=self.login_timeout) LOG_JOB.info("Logged in after migration") - error_context.context("Checking the background command in the guest " - "post migration", LOG_JOB.info) + error_context.context( + "Checking the background command in the guest " "post migration", + LOG_JOB.info, + ) session2.cmd(self.bg_check_command, timeout=30) output = session2.cmd_output(self.test_command) # Compare output to reference output if output != self.reference_output: - LOG_JOB.info("Command output before migration differs from " - "command output after migration") + LOG_JOB.info( + "Command output before migration differs from " + "command output after migration" + ) LOG_JOB.info("Command: %s", self.test_command) - LOG_JOB.info("Output before: %s", - utils_misc.format_str_for_message(self.reference_output)) - LOG_JOB.info("Output after: %s", - utils_misc.format_str_for_message(output)) - self.test.fail("Command '%s' produced different output " - "before and after migration" % self.test_command) + LOG_JOB.info( + "Output before: %s", + utils_misc.format_str_for_message(self.reference_output), + ) + LOG_JOB.info("Output after: %s", utils_misc.format_str_for_message(output)) + self.test.fail( + f"Command '{self.test_command}' produced different output " + "before and after migration" + ) # Kill the background process if session2 and session2.is_alive(): bg_kill_cmd = self.params.get("migration_bg_kill_command", None) - ignore_status = self.params.get( - "migration_bg_kill_ignore_status", 1) + ignore_status = self.params.get("migration_bg_kill_ignore_status", 1) if bg_kill_cmd is not None: try: session2.cmd(bg_kill_cmd) @@ -132,21 +138,23 @@ def after_migration(self): LOG_JOB.debug("Remote session not responsive.") def ping_pong_migration(self): - for i in range(int(self.ping_pong)): if i % 2 == 0: LOG_JOB.info("Round %s ping...", (i / 2)) else: LOG_JOB.info("Round %s pong...", (i / 2)) - self.vm.migrate(self.mig_timeout, self.mig_protocol, - self.mig_cancel_delay, - self.offline, self.check, - migration_exec_cmd_src=self.mig_exec_cmd_src, - migration_exec_cmd_dst=self.mig_exec_cmd_dst, - env=self.env) + self.vm.migrate( + self.mig_timeout, + self.mig_protocol, + self.mig_cancel_delay, + self.offline, + self.check, + migration_exec_cmd_src=self.mig_exec_cmd_src, + migration_exec_cmd_dst=self.mig_exec_cmd_dst, + env=self.env, + ) def start_test(self): - self.before_migration() if self.guest_stress_test: self.stress_test_in_guest() diff --git a/qemu/tests/migration_virtio_mem_ignore_shared.py b/qemu/tests/migration_virtio_mem_ignore_shared.py index b095fb7b13..ed61910b50 100644 --- a/qemu/tests/migration_virtio_mem_ignore_shared.py +++ b/qemu/tests/migration_virtio_mem_ignore_shared.py @@ -1,9 +1,7 @@ import ast import time -from virttest import error_context -from virttest import utils_test - +from virttest import error_context, utils_test from virttest.utils_misc import normalize_data_size from provider import virtio_mem_utils @@ -33,10 +31,11 @@ def run(test, params, env): vm = env.get_vm(params["main_vm"]) vm.verify_alive() # Stress in source VM - error_context.base_context("Install and compile stress tool", - test.log.info) + error_context.base_context("Install and compile stress tool", test.log.info) test_mem = params.get_numeric("mem", target_type=float) - params["stress_args"] = '--cpu 4 --io 4 --vm 2 --vm-bytes %fM' % float(test_mem * 0.8) + params["stress_args"] = ( + f"--cpu 4 --io 4 --vm 2 --vm-bytes {float(test_mem * 0.8):f}M" + ) clone = None try: stress_test = utils_test.VMStress(vm, "stress", params) @@ -44,31 +43,35 @@ def run(test, params, env): try: stress_test.load_stress_tool() # Migration - error_context.base_context("Set migrate capabilities and do migration", - test.log.info) - capabilities = ast.literal_eval(params.get("migrate_capabilities", - "{'x-ignore-shared': 'on'}")) + error_context.base_context( + "Set migrate capabilities and do migration", test.log.info + ) + capabilities = ast.literal_eval( + params.get("migrate_capabilities", "{'x-ignore-shared': 'on'}") + ) mig_timeout = params.get_numeric("mig_timeout", 1200, float) mig_protocol = params.get("migration_protocol", "tcp") - clone = vm.migrate(mig_timeout, - mig_protocol, - env=env, - migrate_capabilities=capabilities, - not_wait_for_migration=True) + clone = vm.migrate( + mig_timeout, + mig_protocol, + env=env, + migrate_capabilities=capabilities, + not_wait_for_migration=True, + ) vm.wait_for_migration(mig_timeout) clone.resume() error_context.context("Check the total ram migrated", test.log.info) - total_mem_migrated = str(vm.monitor.info("migrate")['ram']['total']) - total_mem_migrated = float(normalize_data_size(total_mem_migrated, 'M')) - test.log.debug("Total memory migrated: %f" % total_mem_migrated) + total_mem_migrated = str(vm.monitor.info("migrate")["ram"]["total"]) + total_mem_migrated = float(normalize_data_size(total_mem_migrated, "M")) + test.log.debug("Total memory migrated: %f", total_mem_migrated) mem_threshold = params.get_numeric("mem_threshold", target_type=float) if total_mem_migrated > test_mem * mem_threshold: test.error("Error, more memory than expected has been migrated!") - test.log.debug("Stress tool running status: %s" % stress_test.app_running()) + test.log.debug("Stress tool running status: %s", stress_test.app_running()) if not stress_test.app_running(): test.fail("Stress tool must be running at this point!") @@ -79,30 +82,26 @@ def run(test, params, env): stress_test.unload_stress() stress_test.clean() - error_context.base_context("Test virtio-mem device on destination VM", - test.log.info) - virtio_mem_model = 'virtio-mem-pci' - if '-mmio:' in params.get("machine_type"): - virtio_mem_model = 'virtio-mem-device' - vmem_dev = clone.devices.get_by_params({'driver': virtio_mem_model})[0] + error_context.base_context( + "Test virtio-mem device on destination VM", test.log.info + ) + virtio_mem_model = "virtio-mem-pci" + if "-mmio:" in params.get("machine_type"): + virtio_mem_model = "virtio-mem-device" + vmem_dev = clone.devices.get_by_params({"driver": virtio_mem_model})[0] device_id = vmem_dev.get_qid() requested_size_vmem = params.get("requested-size_test_vmem0") for requested_size in requested_size_vmem.split(): - req_size_normalized = int(float(normalize_data_size(requested_size, - 'B'))) + req_size_normalized = int(float(normalize_data_size(requested_size, "B"))) clone.monitor.qom_set(device_id, "requested-size", req_size_normalized) time.sleep(45) - virtio_mem_utils.check_memory_devices(device_id, - requested_size, - threshold, - clone, - test) - virtio_mem_utils.check_numa_plugged_mem(0, - requested_size, - threshold, - clone, - test) + virtio_mem_utils.check_memory_devices( + device_id, requested_size, threshold, clone, test + ) + virtio_mem_utils.check_numa_plugged_mem( + 0, requested_size, threshold, clone, test + ) finally: if clone: clone.destroy(gracefully=False) - env.unregister_vm("%s_clone" % vm.name) + env.unregister_vm(f"{vm.name}_clone") diff --git a/qemu/tests/migration_with_block.py b/qemu/tests/migration_with_block.py index 789c1cda68..6f6b6d745d 100644 --- a/qemu/tests/migration_with_block.py +++ b/qemu/tests/migration_with_block.py @@ -1,22 +1,17 @@ import ast -import re import os -import threading +import re import sys -import six - -from virttest import error_context -from virttest import utils_test -from virttest import utils_disk -from virttest import utils_misc -from virttest.remote import scp_to_remote -from virttest import data_dir +import threading +import six from avocado.utils import process +from virttest import data_dir, error_context, utils_disk, utils_misc, utils_test +from virttest.remote import scp_to_remote -from provider.storage_benchmark import generate_instance -from provider.cdrom import QMPEventCheckCDEject, QMPEventCheckCDChange from provider import win_driver_utils +from provider.cdrom import QMPEventCheckCDChange, QMPEventCheckCDEject +from provider.storage_benchmark import generate_instance @error_context.context_aware @@ -80,27 +75,33 @@ def run(self): self.exit_event.set() def scp_package(src, dst): - """ Copy file from the host to the guest. """ - scp_to_remote(vm.get_address(), '22', params.get('username'), - params.get('password'), src, dst) + """Copy file from the host to the guest.""" + scp_to_remote( + vm.get_address(), + "22", + params.get("username"), + params.get("password"), + src, + dst, + ) def unpack_package(session, src, dst): - """ Unpack the package. """ - session.cmd('tar -xvf %s -C %s' % (src, dst)) + """Unpack the package.""" + session.cmd(f"tar -xvf {src} -C {dst}") def install_package(session, src, dst): - """ Install the package. """ - cmd = ' && '.join(("cd %s && ./configure --prefix=%s", "make && make install")) + """Install the package.""" + cmd = " && ".join(("cd %s && ./configure --prefix=%s", "make && make install")) session.cmd(cmd % (src, dst), 300) def cleanup(session, src): - """ Remove files. """ - session.cmd('rm -rf %s' % src) + """Remove files.""" + session.cmd(f"rm -rf {src}") def _get_data_disks_linux(): - """ Get the data disks by serial or wwn options in linux. """ - for data_image in params['images'].split()[1:]: - extra_params = params.get("blk_extra_params_%s" % data_image, '') + """Get the data disks by serial or wwn options in linux.""" + for data_image in params["images"].split()[1:]: + extra_params = params.get(f"blk_extra_params_{data_image}", "") match = re.search(r"(serial|wwn)=(\w+)", extra_params, re.M) if match: drive_id = match.group(2) @@ -108,23 +109,22 @@ def _get_data_disks_linux(): continue drive_path = utils_misc.get_linux_drive_path(session, drive_id) if not drive_path: - test.error("Failed to get '%s' drive path" % data_image) - yield drive_path[5:], params.object_params(data_image)['image_size'] + test.error(f"Failed to get '{data_image}' drive path") + yield drive_path[5:], params.object_params(data_image)["image_size"] def _get_data_disks_win(): - """ Get the data disks in windows. """ - for data_image in params['images'].split()[1:]: - size = params.object_params(data_image)['image_size'] + """Get the data disks in windows.""" + for data_image in params["images"].split()[1:]: + size = params.object_params(data_image)["image_size"] yield utils_disk.get_windows_disks_index(session, size)[0], size def get_data_disks(): - """ Get the data disks. """ + """Get the data disks.""" _get_disks = _get_data_disks_win if windows else _get_data_disks_linux - for disk, size in _get_disks(): - yield disk, size + yield from _get_disks() def format_data_disks(): - """ Format the data disks. """ + """Format the data disks.""" for disk, size in get_data_disks(): if windows: if not utils_disk.update_windows_disk_attributes(session, disk): @@ -132,9 +132,9 @@ def format_data_disks(): yield utils_disk.configure_empty_disk(session, disk, size, os_type)[0] def run_iozone(timeout): - """ Do iozone testing inside guest. """ + """Do iozone testing inside guest.""" test.log.info("Do iozone testing on data disks.") - iozone = generate_instance(params, vm, 'iozone') + iozone = generate_instance(params, vm, "iozone") try: for target in format_data_disks(): iozone.run(stress_options.format(target), timeout) @@ -142,91 +142,97 @@ def run_iozone(timeout): iozone.clean() def run_stressapptest(timeout): - """ Do stressapptest testing inside guest. """ + """Do stressapptest testing inside guest.""" test.log.info("Do stressapptest testing on data disks.") sub_session = vm.wait_for_login(timeout=360) try: host_path = os.path.join( - data_dir.get_deps_dir('stress'), 'stressapptest.tar') - scp_package(host_path, '/home/') - unpack_package(sub_session, '/home/stressapptest.tar', '/home') - install_package(sub_session, '/home/stressapptest', '/home/stressapptest') - stress_bin_path = '/home/stressapptest/bin/stressapptest' - sub_session.cmd('{} {}'.format(stress_bin_path, stress_options), timeout) + data_dir.get_deps_dir("stress"), "stressapptest.tar" + ) + scp_package(host_path, "/home/") + unpack_package(sub_session, "/home/stressapptest.tar", "/home") + install_package(sub_session, "/home/stressapptest", "/home/stressapptest") + stress_bin_path = "/home/stressapptest/bin/stressapptest" + sub_session.cmd(f"{stress_bin_path} {stress_options}", timeout) finally: - cleanup(sub_session, '/home/stressapptest*') + cleanup(sub_session, "/home/stressapptest*") sub_session.close() def run_stress_background(timeout): - """ Run stress inside guest. """ - thread = _StressThread(stress_maps[stress_name], exit_event, (timeout, )) + """Run stress inside guest.""" + thread = _StressThread(stress_maps[stress_name], exit_event, (timeout,)) thread.start() return thread def get_cdrom_size(): - """ Get the size of cdrom device inside guest. """ + """Get the size of cdrom device inside guest.""" error_context.context("Get the cdrom's size in guest.", test.log.info) cmd = params["check_size"] if not utils_misc.wait_for( - lambda: re.search(r'(\d+)', session.cmd(cmd), re.M), 10): - test.fail('Failed to get the cdrom\'s size.') - cdrom_size = re.search(r'(\d+)', session.cmd(cmd), re.M).group(1) + lambda: re.search(r"(\d+)", session.cmd(cmd), re.M), 10 + ): + test.fail("Failed to get the cdrom's size.") + cdrom_size = re.search(r"(\d+)", session.cmd(cmd), re.M).group(1) cdrom_size = int(cdrom_size) * 512 if not windows else int(cdrom_size) test.log.info("The cdrom's size is %s in guest.", cdrom_size) return cdrom_size def get_iso_size(iso_file): - """ Get the size of iso on host.""" + """Get the size of iso on host.""" error_context.context("Get the iso size on host.", test.log.info) - return int(process.system_output( - 'ls -l %s | awk \'{print $5}\'' % iso_file, shell=True).decode()) + return int( + process.system_output( + f"ls -l {iso_file} | awk '{{print $5}}'", shell=True + ).decode() + ) def compare_cdrom_size(iso_file): - """ Compare the cdrom's size between host and guest. """ + """Compare the cdrom's size between host and guest.""" error_context.context( - "Compare the cdrom's size between host and guest.", test.log.info) + "Compare the cdrom's size between host and guest.", test.log.info + ) ios_size = get_iso_size(iso_file) if not utils_misc.wait_for(lambda: get_cdrom_size() == ios_size, 30, step=3): - test.fail('The size inside guest is not equal to iso size on host.') + test.fail("The size inside guest is not equal to iso size on host.") return get_cdrom_size() def check_cdrom_info_by_qmp(check_items): - """ Check the cdrom device info by qmp. """ + """Check the cdrom device info by qmp.""" error_context.context( - 'Check if the info \"%s\" are match with the output of query-block.' % - str(check_items), test.log.info) + f'Check if the info "{str(check_items)}" are match with the output of query-block.', + test.log.info, + ) blocks = vm.monitor.info_block() for key, val in check_items.items(): if blocks[device_name][key] == val: # pylint: disable=E0606 continue - test.fail( - 'No such \"%s: %s\" in the output of query-block.' % (key, val)) + test.fail(f'No such "{key}: {val}" in the output of query-block.') def check_block(block): - """ Check if the block device is existed in query-block.""" + """Check if the block device is existed in query-block.""" return True if block in str(vm.monitor.info("block")) else False def eject_cdrom(): - """ Eject cdrom. """ + """Eject cdrom.""" error_context.context("Eject the original device.", test.log.info) - with eject_check: # pylint: disable=E0606 + with eject_check: # pylint: disable=E0606 vm.eject_cdrom(device_name, True) if check_block(orig_img_name): # pylint: disable=E0606 - test.fail("Failed to eject cdrom %s. " % orig_img_name) + test.fail(f"Failed to eject cdrom {orig_img_name}. ") def change_cdrom(): - """ Change cdrom. """ + """Change cdrom.""" error_context.context("Insert new image to device.", test.log.info) with change_check: # pylint: disable=E0606 vm.change_media(device_name, new_img_name) # pylint: disable=E0606 if not check_block(new_img_name): - test.fail("Fail to change cdrom to %s." % new_img_name) + test.fail(f"Fail to change cdrom to {new_img_name}.") def change_vm_power(): - """ Change the vm power. """ - method, command = params['command_opts'].split(',') - test.log.info('Sending command(%s): %s', method, command) - if method == 'shell': + """Change the vm power.""" + method, command = params["command_opts"].split(",") + test.log.info("Sending command(%s): %s", method, command) + if method == "shell": p_session = vm.wait_for_login(timeout=360) p_session.sendline(command) p_session.close() @@ -234,18 +240,18 @@ def change_vm_power(): getattr(vm.monitor, command)() def check_vm_status(timeout=600): - """ Check the status of vm. """ - action = 'shutdown' if shutdown_vm else 'login' - if not getattr(vm, 'wait_for_%s' % action)(timeout=timeout): - test.fail('Failed to %s vm.' % action) + """Check the status of vm.""" + action = "shutdown" if shutdown_vm else "login" + if not getattr(vm, f"wait_for_{action}")(timeout=timeout): + test.fail(f"Failed to {action} vm.") def set_dst_params(): - """ Set the params of dst vm. """ - for name, val in ast.literal_eval(params.get('set_dst_params', '{}')).items(): + """Set the params of dst vm.""" + for name, val in ast.literal_eval(params.get("set_dst_params", "{}")).items(): vm.params[name] = val def ping_pong_migration(repeat_times): - """ Do ping pong migration. """ + """Do ping pong migration.""" for i in range(repeat_times): set_dst_params() if i % 2 == 0: @@ -254,85 +260,107 @@ def ping_pong_migration(repeat_times): test.log.info("Round %s pong...", str(i / 2)) if do_migration_background: args = (mig_timeout, mig_protocol, mig_cancel_delay) - kwargs = {'migrate_capabilities': capabilities, - 'mig_inner_funcs': inner_funcs, 'env': env} - migration_thread = utils_misc.InterruptedThread(vm.migrate, - args, kwargs) + kwargs = { + "migrate_capabilities": capabilities, + "mig_inner_funcs": inner_funcs, + "env": env, + } + migration_thread = utils_misc.InterruptedThread( + vm.migrate, args, kwargs + ) migration_thread.start() - if not utils_misc.wait_for(lambda: ( - bool(vm.monitor.query("migrate")) and ( - 'completed' != vm.monitor.query("migrate")['status'])), - timeout=60, first=10): - test.error('Migration thread is not alive.') + if not utils_misc.wait_for( + lambda: ( + bool(vm.monitor.query("migrate")) + and ("completed" != vm.monitor.query("migrate")["status"]) + ), + timeout=60, + first=10, + ): + test.error("Migration thread is not alive.") vm.monitor.wait_for_migrate_progress( - float(params['percent_start_post_copy'])) + float(params["percent_start_post_copy"]) + ) vm.monitor.migrate_start_postcopy() migration_thread.join() - test.log.info('Migration thread is done.') + test.log.info("Migration thread is done.") else: - vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay, - migrate_capabilities=capabilities, - mig_inner_funcs=inner_funcs, env=env) + vm.migrate( + mig_timeout, + mig_protocol, + mig_cancel_delay, + migrate_capabilities=capabilities, + mig_inner_funcs=inner_funcs, + env=env, + ) def bg_stress_is_alive(session, name): - """ Check whether the background stress is alive. """ - return session.cmd_output('pgrep -xl %s' % name) + """Check whether the background stress is alive.""" + return session.cmd_output(f"pgrep -xl {name}") - shutdown_vm = params.get('shutdown_vm', 'no') == 'yes' - reboot = params.get('reboot_vm', 'no') == 'yes' - with_cdrom = params.get('with_cdrom', 'no') == 'yes' - os_type = params['os_type'] - windows = os_type == 'windows' - src_desc = params.get('src_addition_desc', '') - dst_desc = params.get('dst_addition_desc', '') + shutdown_vm = params.get("shutdown_vm", "no") == "yes" + reboot = params.get("reboot_vm", "no") == "yes" + with_cdrom = params.get("with_cdrom", "no") == "yes" + os_type = params["os_type"] + windows = os_type == "windows" + src_desc = params.get("src_addition_desc", "") + dst_desc = params.get("dst_addition_desc", "") mig_timeout = float(params.get("mig_timeout", "3600")) mig_protocol = params.get("migration_protocol", "tcp") mig_cancel_delay = int(params.get("mig_cancel") == "yes") * 2 inner_funcs = ast.literal_eval(params.get("migrate_inner_funcs", "[]")) capabilities = ast.literal_eval(params.get("migrate_capabilities", "{}")) - do_migration_background = params.get('do_migration_background', 'no') == 'yes' + do_migration_background = params.get("do_migration_background", "no") == "yes" - stress_name = params.get('stress_name') - stress_maps = {'iozone': run_iozone, 'stressapptest': run_stressapptest} - stress_options = params.get('stress_options') - stress_timeout = int(params.get('stress_timeout', '1800')) - do_stress_background = params.get('do_stress_background', 'no') == 'yes' - kill_bg_stress = params.get('kill_bg_stress', 'no') == 'yes' + stress_name = params.get("stress_name") + stress_maps = {"iozone": run_iozone, "stressapptest": run_stressapptest} + stress_options = params.get("stress_options") + stress_timeout = int(params.get("stress_timeout", "1800")) + do_stress_background = params.get("do_stress_background", "no") == "yes" + kill_bg_stress = params.get("kill_bg_stress", "no") == "yes" exit_event = threading.Event() - error_context.context('Boot guest %s on src host.' % src_desc, test.log.info) + error_context.context(f"Boot guest {src_desc} on src host.", test.log.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=360) if windows: session = utils_test.qemu.windrv_check_running_verifier( - session, vm, test, params["driver_name"]) + session, vm, test, params["driver_name"] + ) - if params.get('run_stress_before_migration', 'no') == 'yes': + if params.get("run_stress_before_migration", "no") == "yes": if do_stress_background: stress_thread = run_stress_background(stress_timeout) - if not utils_misc.wait_for(lambda: ( - stress_thread.exit_event.is_set() or bg_stress_is_alive( - session, stress_name)), 120, step=3): - test.error('The %s is not alive.' % stress_name) + if not utils_misc.wait_for( + lambda: ( + stress_thread.exit_event.is_set() + or bg_stress_is_alive(session, stress_name) + ), + 120, + step=3, + ): + test.error(f"The {stress_name} is not alive.") if stress_thread.exit_event.is_set(): stress_thread.exit_event.clear() - six.reraise(stress_thread.exc_info[0], - stress_thread.exc_info[1], - stress_thread.exc_info[2]) + six.reraise( + stress_thread.exc_info[0], + stress_thread.exc_info[1], + stress_thread.exc_info[2], + ) else: stress_maps[stress_name](stress_timeout) if with_cdrom: - cdrom_params = params.object_params(params['cdroms']) - check_orig_items = ast.literal_eval(cdrom_params['check_orig_items']) + cdrom_params = params.object_params(params["cdroms"]) + check_orig_items = ast.literal_eval(cdrom_params["check_orig_items"]) orig_img_name = params["cdrom_orig_file"] new_img_name = params["cdrom_new_file"] device_name = vm.get_block({"file": orig_img_name}) if device_name is None: - test.fail("Failed to get device using image %s." % orig_img_name) + test.fail(f"Failed to get device using image {orig_img_name}.") check_cdrom_info_by_qmp(check_orig_items) orig_size = compare_cdrom_size(orig_img_name) @@ -342,16 +370,16 @@ def bg_stress_is_alive(session, name): change_cdrom() device_name = vm.get_block({"file": new_img_name}) - check_new_items = ast.literal_eval(cdrom_params['check_new_items']) + check_new_items = ast.literal_eval(cdrom_params["check_new_items"]) check_cdrom_info_by_qmp(check_new_items) new_size = compare_cdrom_size(new_img_name) if new_size == orig_size: - test.fail('The new size inside guest is equal to the orig iso size.') + test.fail("The new size inside guest is equal to the orig iso size.") - error_context.context('Boot guest %s on dst host.' % dst_desc, test.log.info) - ping_pong_migration(int(params.get('repeat_ping_pong', '1'))) + error_context.context(f"Boot guest {dst_desc} on dst host.", test.log.info) + ping_pong_migration(int(params.get("repeat_ping_pong", "1"))) - if params.get('run_stress_after_migration', 'no') == 'yes': + if params.get("run_stress_after_migration", "no") == "yes": if do_stress_background: run_stress_background(stress_timeout) else: @@ -360,14 +388,16 @@ def bg_stress_is_alive(session, name): if do_stress_background: if bg_stress_is_alive(session, stress_name): if kill_bg_stress: - session.cmd('killall %s' % stress_name) + session.cmd(f"killall {stress_name}") else: stress_thread.join(stress_timeout) if stress_thread.exit_event.is_set(): stress_thread.exit_event.clear() - six.reraise(stress_thread.exc_info[0], - stress_thread.exc_info[1], - stress_thread.exc_info[2]) + six.reraise( + stress_thread.exc_info[0], + stress_thread.exc_info[1], + stress_thread.exc_info[2], + ) if shutdown_vm or reboot: change_vm_power() diff --git a/qemu/tests/migration_with_dst_problem.py b/qemu/tests/migration_with_dst_problem.py index 60f4230aa5..69f3ca7098 100644 --- a/qemu/tests/migration_with_dst_problem.py +++ b/qemu/tests/migration_with_dst_problem.py @@ -1,20 +1,19 @@ import os -import time import re import sys -import six +import time import aexpect - -from avocado.utils import data_factory -from avocado.utils import process - -from virttest import error_context -from virttest import env_process -from virttest import utils_misc -from virttest import utils_numeric -from virttest import qemu_storage -from virttest import data_dir +import six +from avocado.utils import data_factory, process +from virttest import ( + data_dir, + env_process, + error_context, + qemu_storage, + utils_misc, + utils_numeric, +) @error_context.context_aware @@ -53,19 +52,16 @@ def run(test, params, env): mount_path = None while mount_path is None or os.path.exists(mount_path): test_rand = data_factory.generate_random_string(3) - mount_path = ("%s/ni_mount_%s" % - (data_dir.get_data_dir(), test_rand)) + mount_path = f"{data_dir.get_data_dir()}/ni_mount_{test_rand}" mig_dst = os.path.join(mount_path, "mig_dst") - migration_exec_cmd_src = params.get("migration_exec_cmd_src", - "gzip -c > %s") - migration_exec_cmd_src = (migration_exec_cmd_src % (mig_dst)) - - class MiniSubtest(object): + migration_exec_cmd_src = params.get("migration_exec_cmd_src", "gzip -c > %s") + migration_exec_cmd_src = migration_exec_cmd_src % (mig_dst) + class MiniSubtest: def __new__(cls, *args, **kargs): - self = super(MiniSubtest, cls).__new__(cls) + self = super().__new__(cls) ret = None exc_info = None if args is None: @@ -95,23 +91,20 @@ def control_service(session, service, init_service, action, timeout=60): :param action: action with service (start|stop|restart) :param init_service: name of service for old service control. """ - status = utils_misc.get_guest_service_status(session, service, - service_former=init_service) + status = utils_misc.get_guest_service_status( + session, service, service_former=init_service + ) if action == "start" and status == "active": - test.log.debug("%s already started, no need start it again.", - service) + test.log.debug("%s already started, no need start it again.", service) return if action == "stop" and status == "inactive": - test.log.debug("%s already stopped, no need stop it again.", - service) + test.log.debug("%s already stopped, no need stop it again.", service) return try: session.cmd("systemctl --version", timeout=timeout) - session.cmd("systemctl %s %s.service" % (action, service), - timeout=timeout) + session.cmd(f"systemctl {action} {service}.service", timeout=timeout) except: - session.cmd("service %s %s" % (init_service, action), - timeout=timeout) + session.cmd(f"service {init_service} {action}", timeout=timeout) def set_nfs_server(vm, share_cfg): """ @@ -120,7 +113,7 @@ def set_nfs_server(vm, share_cfg): :param vm: Virtual machine for vm. """ session = vm.wait_for_login(timeout=login_timeout) - cmd = "echo '%s' > /etc/exports" % (share_cfg) + cmd = f"echo '{share_cfg}' > /etc/exports" control_service(session, "nfs-server", "nfs", "stop") session.cmd(cmd) control_service(session, "nfs-server", "nfs", "start") @@ -133,7 +126,7 @@ def umount(mount_path): :param mount_path: path where nfs dir will be placed. """ - process.run("umount -f %s" % (mount_path)) + process.run(f"umount -f {mount_path}") def create_file_disk(dst_path, size): """ @@ -142,8 +135,8 @@ def create_file_disk(dst_path, size): :param dst_path: Path to file. :param size: Size of file in MB """ - process.run("dd if=/dev/zero of=%s bs=1M count=%s" % (dst_path, size)) - process.run("mkfs.ext3 -F %s" % (dst_path)) + process.run(f"dd if=/dev/zero of={dst_path} bs=1M count={size}") + process.run(f"mkfs.ext3 -F {dst_path}") def mount(disk_path, mount_path, options=None): """ @@ -156,9 +149,9 @@ def mount(disk_path, mount_path, options=None): if options is None: options = "" else: - options = "%s" % options + options = f"{options}" - process.run("mount %s %s %s" % (options, disk_path, mount_path)) + process.run(f"mount {options} {disk_path} {mount_path}") def find_disk_vm(vm, disk_serial): """ @@ -172,7 +165,7 @@ def find_disk_vm(vm, disk_serial): session = vm.wait_for_login(timeout=login_timeout) disk_path = os.path.join("/", "dev", "disk", "by-id") - disks = session.cmd("ls %s" % disk_path).split("\n") + disks = session.cmd(f"ls {disk_path}").split("\n") session.close() disk = list(filter(lambda x: x.endswith(disk_serial), disks)) if not disk: @@ -187,8 +180,8 @@ def prepare_disk(vm, disk_path, mount_path): :param disk_path: Path to disk in guest system. """ session = vm.wait_for_login(timeout=login_timeout) - session.cmd("mkfs.ext3 -F %s" % (disk_path)) - session.cmd("mount %s %s" % (disk_path, mount_path)) + session.cmd(f"mkfs.ext3 -F {disk_path}") + session.cmd(f"mount {disk_path} {mount_path}") session.close() def disk_load(vm, src_path, dst_path, copy_timeout=None, dsize=None): @@ -204,14 +197,14 @@ def disk_load(vm, src_path, dst_path, copy_timeout=None, dsize=None): if dsize is None: dsize = 100 session = vm.wait_for_login(timeout=login_timeout) - cmd = ("nohup /bin/bash -c 'while true; do dd if=%s of=%s bs=1M " - "count=%s; done;' 2> /dev/null &" % (src_path, dst_path, dsize)) - pid = re.search(r"\[.+\] (.+)", - session.cmd_output(cmd, timeout=copy_timeout)) + cmd = ( + f"nohup /bin/bash -c 'while true; do dd if={src_path} of={dst_path} bs=1M " + f"count={dsize}; done;' 2> /dev/null &" + ) + pid = re.search(r"\[.+\] (.+)", session.cmd_output(cmd, timeout=copy_timeout)) return pid.group(1) - class IscsiServer_tgt(object): - + class IscsiServer_tgt: """ Class for set and start Iscsi server. """ @@ -237,14 +230,12 @@ def set_iscsi_server(self, vm_ds, disk_path, disk_size): """ session = vm_ds.wait_for_login(timeout=login_timeout) - session.cmd("dd if=/dev/zero of=%s bs=1M count=%s" % (disk_path, - disk_size)) + session.cmd(f"dd if=/dev/zero of={disk_path} bs=1M count={disk_size}") status, output = session.cmd_status_output("setenforce 0") if status not in [0, 127]: - test.log.warn("Function setenforce fails.\n %s", output) + test.log.warning("Function setenforce fails.\n %s", output) - config = self.config % (self.server_name, disk_path, - self.user, self.passwd) + config = self.config % (self.server_name, disk_path, self.user, self.passwd) cmd = "cat > /etc/tgt/conf.d/virt.conf << EOF" + config + "EOF" control_service(session, "tgtd", "tgtd", "stop") session.sendline(cmd) @@ -254,7 +245,7 @@ def set_iscsi_server(self, vm_ds, disk_path, disk_size): def find_disk(self): disk_path = os.path.join("/", "dev", "disk", "by-path") - disks = process.run("ls %s" % disk_path).stdout.split("\n") + disks = process.run(f"ls {disk_path}").stdout.split("\n") disk = list(filter(lambda x: self.server_name in x, disks)) if not disk: return None @@ -269,27 +260,33 @@ def connect(self, vm_ds): :return: path where disk is connected. """ ip_dst = vm_ds.get_address() - process.run("iscsiadm -m discovery -t st -p %s" % (ip_dst)) - - server_ident = ('iscsiadm -m node --targetname "%s:dev01"' - ' --portal %s' % (self.server_name, ip_dst)) - process.run("%s --op update --name node.session.auth.authmethod" - " --value CHAP" % (server_ident)) - process.run("%s --op update --name node.session.auth.username" - " --value %s" % (server_ident, self.user)) - process.run("%s --op update --name node.session.auth.password" - " --value %s" % (server_ident, self.passwd)) - process.run("%s --login" % (server_ident)) + process.run(f"iscsiadm -m discovery -t st -p {ip_dst}") + + server_ident = ( + f'iscsiadm -m node --targetname "{self.server_name}:dev01"' + f" --portal {ip_dst}" + ) + process.run( + f"{server_ident} --op update --name node.session.auth.authmethod" + " --value CHAP" + ) + process.run( + f"{server_ident} --op update --name node.session.auth.username" + f" --value {self.user}" + ) + process.run( + f"{server_ident} --op update --name node.session.auth.password" + f" --value {self.passwd}" + ) + process.run(f"{server_ident} --login") time.sleep(1.0) return self.find_disk() def disconnect(self): - server_ident = ('iscsiadm -m node --targetname "%s:dev01"' % - (self.server_name)) - process.run("%s --logout" % (server_ident)) - - class IscsiServer(object): + server_ident = f'iscsiadm -m node --targetname "{self.server_name}:dev01"' + process.run(f"{server_ident} --logout") + class IscsiServer: """ Iscsi server implementation interface. """ @@ -303,10 +300,9 @@ def __init__(self, iscsi_type, *args, **kargs): def __getattr__(self, name): if self.ic: return self.ic.__getattribute__(name) - raise AttributeError("Cannot find attribute %s in class" % name) + raise AttributeError(f"Cannot find attribute {name} in class") class test_read_only_dest(MiniSubtest): - """ Migration to read-only destination by using a migration to file. @@ -326,7 +322,7 @@ def test(self): vm_guest = env.get_vm("virt_test_vm1_guest") ro_timeout = int(params.get("read_only_timeout", "480")) exp_str = r".*Read-only file system.*" - process.run("mkdir -p %s" % (mount_path)) + process.run(f"mkdir -p {mount_path}") vm_ds.verify_alive() vm_guest.create() @@ -334,19 +330,24 @@ def test(self): set_nfs_server(vm_ds, "/mnt *(ro,async,no_root_squash)") - mount_src = "%s:/mnt" % (vm_ds.get_address()) - mount(mount_src, mount_path, - "-o hard,timeo=14,rsize=8192,wsize=8192") - vm_guest.migrate(mig_timeout, mig_protocol, - not_wait_for_migration=True, - migration_exec_cmd_src=migration_exec_cmd_src, - env=env) - - if not utils_misc.wait_for(lambda: process_output_check( - vm_guest.process, exp_str), - timeout=ro_timeout, first=2): - test.fail("The Read-only file system warning not" - " come in time limit.") + mount_src = f"{vm_ds.get_address()}:/mnt" + mount(mount_src, mount_path, "-o hard,timeo=14,rsize=8192,wsize=8192") + vm_guest.migrate( + mig_timeout, + mig_protocol, + not_wait_for_migration=True, + migration_exec_cmd_src=migration_exec_cmd_src, + env=env, + ) + + if not utils_misc.wait_for( + lambda: process_output_check(vm_guest.process, exp_str), + timeout=ro_timeout, + first=2, + ): + test.fail( + "The Read-only file system warning not" " come in time limit." + ) def clean(self): if os.path.exists(mig_dst): @@ -356,7 +357,6 @@ def clean(self): os.rmdir(mount_path) class test_low_space_dest(MiniSubtest): - """ Migrate to destination with low space. @@ -371,15 +371,16 @@ def test(self): self.disk_path = None while self.disk_path is None or os.path.exists(self.disk_path): self.disk_path = ( - "%s/disk_%s" % - (test.tmpdir, data_factory.generate_random_string(3))) + f"{test.tmpdir}/disk_{data_factory.generate_random_string(3)}" + ) - disk_size = int(utils_misc.normalize_data_size( - params.get("disk_size", "10M"), "M")) + disk_size = int( + utils_misc.normalize_data_size(params.get("disk_size", "10M"), "M") + ) exp_str = r".*gzip: stdout: No space left on device.*" vm_guest = env.get_vm("virt_test_vm1_guest") - process.run("mkdir -p %s" % (mount_path)) + process.run(f"mkdir -p {mount_path}") vm_guest.verify_alive() vm_guest.wait_for_login(timeout=login_timeout) @@ -387,16 +388,23 @@ def test(self): create_file_disk(self.disk_path, disk_size) mount(self.disk_path, mount_path, "-o loop") - vm_guest.migrate(mig_timeout, mig_protocol, - not_wait_for_migration=True, - migration_exec_cmd_src=migration_exec_cmd_src, - env=env) - - if not utils_misc.wait_for(lambda: process_output_check( - vm_guest.process, exp_str), - timeout=60, first=1): - test.fail("The migration to destination with low " - "storage space didn't fail as it should.") + vm_guest.migrate( + mig_timeout, + mig_protocol, + not_wait_for_migration=True, + migration_exec_cmd_src=migration_exec_cmd_src, + env=env, + ) + + if not utils_misc.wait_for( + lambda: process_output_check(vm_guest.process, exp_str), + timeout=60, + first=1, + ): + test.fail( + "The migration to destination with low " + "storage space didn't fail as it should." + ) def clean(self): if os.path.exists(mount_path): @@ -406,7 +414,6 @@ def clean(self): os.remove(self.disk_path) class test_extensive_io(MiniSubtest): - """ Migrate after extensive_io abstract class. This class only define basic funtionaly and define interface. For other tests. @@ -430,36 +437,39 @@ def test(self): if params.get("nettype") != "bridge": test.cancel("Unable start test without params nettype=bridge.") - self.disk_serial = params.get("drive_serial_image2_vm1", - "nfs-disk-image2-vm1") - self.disk_serial_src = params.get("drive_serial_image1_vm1", - "root-image1-vm1") + self.disk_serial = params.get( + "drive_serial_image2_vm1", "nfs-disk-image2-vm1" + ) + self.disk_serial_src = params.get( + "drive_serial_image1_vm1", "root-image1-vm1" + ) self.guest_mount_path = params.get("guest_disk_mount_path", "/mnt") self.copy_timeout = int(params.get("copy_timeout", "1024")) - self.copy_block_size = int(utils_numeric.normalize_data_size( - params.get("copy_block_size", "100M"), "M")) - self.disk_size = "%sM" % int(self.copy_block_size * 1.4) + self.copy_block_size = int( + utils_numeric.normalize_data_size( + params.get("copy_block_size", "100M"), "M" + ) + ) + self.disk_size = f"{int(self.copy_block_size * 1.4)}M" - self.server_recover_timeout = ( - int(params.get("server_recover_timeout", "240"))) + self.server_recover_timeout = int( + params.get("server_recover_timeout", "240") + ) - process.run("mkdir -p %s" % (mount_path)) + process.run(f"mkdir -p {mount_path}") self.test_params() self.config() self.vm_guest_params = params.copy() self.vm_guest_params["images_base_dir_image2_vm1"] = mount_path - self.vm_guest_params["image_name_image2_vm1"] = "ni_mount_%s/test" % (test_rand) + self.vm_guest_params["image_name_image2_vm1"] = f"ni_mount_{test_rand}/test" self.vm_guest_params["image_size_image2_vm1"] = self.disk_size self.vm_guest_params = self.vm_guest_params.object_params("vm1") - self.image2_vm_guest_params = (self.vm_guest_params. - object_params("image2")) + self.image2_vm_guest_params = self.vm_guest_params.object_params("image2") - env_process.preprocess_image(test, - self.image2_vm_guest_params, - env) + env_process.preprocess_image(test, self.image2_vm_guest_params, env) self.vm_guest.create(params=self.vm_guest_params) self.vm_guest.verify_alive() @@ -497,8 +507,13 @@ def workload(self): disk_path_src = find_disk_vm(self.vm_guest, self.disk_serial_src) dst_path = os.path.join(self.guest_mount_path, "test.data") - self.copier_pid = disk_load(self.vm_guest, disk_path_src, dst_path, - self.copy_timeout, self.copy_block_size) + self.copier_pid = disk_load( + self.vm_guest, + disk_path_src, + dst_path, + self.copy_timeout, + self.copy_block_size, + ) def restart_server(self): raise NotImplementedError() @@ -514,26 +529,31 @@ def clean(self): try: if self.vm_guest.is_alive(): session = self.vm_guest.wait_for_login(timeout=login_timeout) - session.cmd("kill -9 %s" % (self.copier_pid)) + session.cmd(f"kill -9 {self.copier_pid}") except: - test.log.warn("It was impossible to stop copier. Something " - "probably happened with GUEST or NFS server.") + test.log.warning( + "It was impossible to stop copier. Something " + "probably happened with GUEST or NFS server." + ) if params.get("kill_vm") == "yes": if self.vm_guest.is_alive(): self.vm_guest.destroy() - utils_misc.wait_for(lambda: self.vm_guest.is_dead(), 30, - 2, 2, "Waiting for dying of guest.") - qemu_img = qemu_storage.QemuImg(self.image2_vm_guest_params, - mount_path, - None) - qemu_img.check_image(self.image2_vm_guest_params, - mount_path) + utils_misc.wait_for( + lambda: self.vm_guest.is_dead(), + 30, + 2, + 2, + "Waiting for dying of guest.", + ) + qemu_img = qemu_storage.QemuImg( + self.image2_vm_guest_params, mount_path, None + ) + qemu_img.check_image(self.image2_vm_guest_params, mount_path) self.clean_test() class test_extensive_io_nfs(test_extensive_io): - """ Migrate after extensive io. @@ -563,28 +583,28 @@ def config(self): set_nfs_server(vm_ds, "/mnt *(rw,async,no_root_squash)") - mount_src = "%s:/mnt" % (vm_ds.get_address()) - mount(mount_src, mount_path, - "-o hard,timeo=14,rsize=8192,wsize=8192") + mount_src = f"{vm_ds.get_address()}:/mnt" + mount(mount_src, mount_path, "-o hard,timeo=14,rsize=8192,wsize=8192") def restart_server(self): time.sleep(10) # Wait for wail until copy start working. - control_service(self.control_session_ds, "nfs-server", - "nfs", "stop") # Stop NFS server + control_service( + self.control_session_ds, "nfs-server", "nfs", "stop" + ) # Stop NFS server time.sleep(5) - control_service(self.control_session_ds, "nfs-server", - "nfs", "start") # Start NFS server + control_service( + self.control_session_ds, "nfs-server", "nfs", "start" + ) # Start NFS server """ Touch waits until all previous requests are invalidated (NFS grace period). Without grace period qemu start takes to long and timers for machine creation dies. """ - qemu_img = qemu_storage.QemuImg(self.image2_vm_guest_params, - mount_path, - None) - process.run("touch %s" % (qemu_img.image_filename), - self.server_recover_timeout) + qemu_img = qemu_storage.QemuImg( + self.image2_vm_guest_params, mount_path, None + ) + process.run(f"touch {qemu_img.image_filename}", self.server_recover_timeout) def clean_test(self): if os.path.exists(mount_path): @@ -592,7 +612,6 @@ def clean_test(self): os.rmdir(mount_path) class test_extensive_io_iscsi(test_extensive_io): - """ Migrate after extensive io. @@ -628,30 +647,32 @@ def config(self): self.isci_server = IscsiServer("tgt") disk_path = os.path.join(self.guest_mount_path, "disk1") - self.isci_server.set_iscsi_server(vm_ds, disk_path, - (int(float(self.disk_size) * 1.1) / (1024 * 1024))) + self.isci_server.set_iscsi_server( + vm_ds, disk_path, (int(float(self.disk_size) * 1.1) / (1024 * 1024)) + ) self.host_disk_path = self.isci_server.connect(vm_ds) - process.run("mkfs.ext3 -F %s" % (self.host_disk_path)) + process.run(f"mkfs.ext3 -F {self.host_disk_path}") mount(self.host_disk_path, mount_path) def restart_server(self): time.sleep(10) # Wait for wail until copy start working. - control_service(self.control_session_ds, "tgtd", - "tgtd", "stop", 240) # Stop Iscsi server + control_service( + self.control_session_ds, "tgtd", "tgtd", "stop", 240 + ) # Stop Iscsi server time.sleep(5) - control_service(self.control_session_ds, "tgtd", - "tgtd", "start", 240) # Start Iscsi server + control_service( + self.control_session_ds, "tgtd", "tgtd", "start", 240 + ) # Start Iscsi server """ Wait for iscsi server after restart and will be again accessible. """ - qemu_img = qemu_storage.QemuImg(self.image2_vm_guest_params, - mount_path, - None) - process.run("touch %s" % (qemu_img.image_filename), - self.server_recover_timeout) + qemu_img = qemu_storage.QemuImg( + self.image2_vm_guest_params, mount_path, None + ) + process.run(f"touch {qemu_img.image_filename}", self.server_recover_timeout) def clean_test(self): if os.path.exists(mount_path): @@ -661,9 +682,11 @@ def clean_test(self): self.isci_server.disconnect() test_type = params.get("test_type") - if (test_type in locals()): + if test_type in locals(): tests_group = locals()[test_type] tests_group() else: - test.fail("Test group '%s' is not defined in" - " migration_with_dst_problem test" % test_type) + test.fail( + f"Test group '{test_type}' is not defined in" + " migration_with_dst_problem test" + ) diff --git a/qemu/tests/migration_with_file_transfer.py b/qemu/tests/migration_with_file_transfer.py index 743c0efcec..4956ee9d14 100644 --- a/qemu/tests/migration_with_file_transfer.py +++ b/qemu/tests/migration_with_file_transfer.py @@ -1,10 +1,7 @@ import os -from avocado.utils import crypto -from avocado.utils import process - -from virttest import error_context -from virttest import utils_misc +from avocado.utils import crypto, process +from virttest import error_context, utils_misc @error_context.context_aware @@ -32,26 +29,27 @@ def run(test, params, env): mig_protocol = params.get("migration_protocol", "tcp") mig_cancel_delay = int(params.get("mig_cancel") == "yes") * 2 - host_path = "/tmp/file-%s" % utils_misc.generate_random_string(6) - host_path_returned = "%s-returned" % host_path + host_path = f"/tmp/file-{utils_misc.generate_random_string(6)}" + host_path_returned = f"{host_path}-returned" guest_path = params.get("guest_path", "/tmp/file") file_size = params.get("file_size", "500") transfer_timeout = int(params.get("transfer_timeout", "240")) migrate_between_vhost_novhost = params.get("migrate_between_vhost_novhost") if mig_protocol == "exec": - mig_file = os.path.join(test.tmpdir, "tmp-%s" % - utils_misc.generate_random_string(8)) + mig_file = os.path.join( + test.tmpdir, f"tmp-{utils_misc.generate_random_string(8)}" + ) try: - process.run("dd if=/dev/urandom of=%s bs=1M count=%s" - % (host_path, file_size)) + process.run(f"dd if=/dev/urandom of={host_path} bs=1M count={file_size}") def run_and_migrate(bg): bg.start() try: while bg.is_alive(): - test.log.info("File transfer not ended, starting a round of " - "migration...") + test.log.info( + "File transfer not ended, starting a round of " "migration..." + ) if migrate_between_vhost_novhost == "yes": vhost_status = vm.params.get("vhost") if vhost_status == "vhost=on": @@ -63,10 +61,14 @@ def run_and_migrate(bg): if mig_protocol == "exec" and migration_exec_cmd_src: migration_exec_cmd_src %= mig_file # pylint: disable=E0606 migration_exec_cmd_dst %= mig_file - vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay, - env=env, - migration_exec_cmd_src=migration_exec_cmd_src, - migration_exec_cmd_dst=migration_exec_cmd_dst) + vm.migrate( + mig_timeout, + mig_protocol, + mig_cancel_delay, + env=env, + migration_exec_cmd_src=migration_exec_cmd_src, + migration_exec_cmd_dst=migration_exec_cmd_dst, + ) except Exception: # If something bad happened in the main thread, ignore # exceptions raised in the background thread @@ -75,20 +77,24 @@ def run_and_migrate(bg): else: bg.join() - error_context.context("transferring file to guest while migrating", - test.log.info) + error_context.context( + "transferring file to guest while migrating", test.log.info + ) bg = utils_misc.InterruptedThread( vm.copy_files_to, (host_path, guest_path), - dict(verbose=True, timeout=transfer_timeout)) + dict(verbose=True, timeout=transfer_timeout), + ) run_and_migrate(bg) - error_context.context("transferring file back to host while migrating", - test.log.info) + error_context.context( + "transferring file back to host while migrating", test.log.info + ) bg = utils_misc.InterruptedThread( vm.copy_files_from, (guest_path, host_path_returned), - dict(verbose=True, timeout=transfer_timeout)) + dict(verbose=True, timeout=transfer_timeout), + ) run_and_migrate(bg) # Make sure the returned file is identical to the original one @@ -96,8 +102,10 @@ def run_and_migrate(bg): orig_hash = crypto.hash_file(host_path) returned_hash = crypto.hash_file(host_path_returned) if orig_hash != returned_hash: - test.fail("Returned file hash (%s) differs from " - "original one (%s)" % (returned_hash, orig_hash)) + test.fail( + f"Returned file hash ({returned_hash}) differs from " + f"original one ({orig_hash})" + ) error_context.context() finally: @@ -107,4 +115,4 @@ def run_and_migrate(bg): if os.path.isfile(host_path_returned): os.remove(host_path_returned) if mig_protocol == "exec": - process.run("rm -rf %s" % mig_file) + process.run(f"rm -rf {mig_file}") diff --git a/qemu/tests/migration_with_json_backing.py b/qemu/tests/migration_with_json_backing.py index 2dbbe7a4aa..972c4d77c5 100644 --- a/qemu/tests/migration_with_json_backing.py +++ b/qemu/tests/migration_with_json_backing.py @@ -1,13 +1,12 @@ -import logging import json +import logging from avocado.utils import process - -from virttest.qemu_storage import QemuImg, get_image_json, get_image_repr from virttest.data_dir import get_data_dir from virttest.qemu_devices.qdevices import QBlockdevFormatNode +from virttest.qemu_storage import QemuImg, get_image_json, get_image_repr -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def run(test, params, env): @@ -25,9 +24,11 @@ def run(test, params, env): :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ + def get_img_objs(images): - return [QemuImg(params.object_params(tag), get_data_dir(), tag) - for tag in images] + return [ + QemuImg(params.object_params(tag), get_data_dir(), tag) for tag in images + ] def _create_image_with_backing(image): secids = [s.image_id for s in image.encryption_config.base_key_secrets] @@ -35,30 +36,30 @@ def _create_image_with_backing(image): image.create(params) else: base_params = params.object_params(image.base_tag) - backing_file = "'%s'" % get_image_json( - image.base_tag, base_params, get_data_dir()) - cmd = '{cmd} create -F {base_fmt} -b {backing} -f {fmt} {f} {size}' - qemu_img_cmd = cmd.format(cmd=image.image_cmd, - base_fmt=base_params['image_format'], - backing=backing_file, - fmt=image.image_format, - f=image.image_filename, - size=image.size) + backing_file = ( + f"'{get_image_json(image.base_tag, base_params, get_data_dir())}'" + ) + cmd = "{cmd} create -F {base_fmt} -b {backing} -f {fmt} {f} {size}" + qemu_img_cmd = cmd.format( + cmd=image.image_cmd, + base_fmt=base_params["image_format"], + backing=backing_file, + fmt=image.image_format, + f=image.image_filename, + size=image.size, + ) - msg = "Create image by command: %s" % qemu_img_cmd + msg = f"Create image by command: {qemu_img_cmd}" LOG_JOB.info(msg) - cmd_result = process.run( - qemu_img_cmd, shell=True, verbose=False, ignore_status=False) + process.run(qemu_img_cmd, shell=True, verbose=False, ignore_status=False) def src_sn_chain_setup(image_objects): - for image in image_objects[1:len(image_objects)-1]: + for image in image_objects[1 : len(image_objects) - 1]: _create_image_with_backing(image) def blockdev_add_image(tag): image_params = params.object_params(tag) - devices = vm.devices.images_define_by_params(tag, - image_params, - 'disk') + devices = vm.devices.images_define_by_params(tag, image_params, "disk") devices.pop() for dev in devices: if vm.devices.get_by_qid(dev.get_qid()): @@ -67,15 +68,14 @@ def blockdev_add_image(tag): dev.params["backing"] = None ret = vm.devices.simple_hotplug(dev, vm.monitor) if not ret[1]: - test.fail("Failed to hotplug '%s': %s." - % (dev, ret[0])) + test.fail(f"Failed to hotplug '{dev}': {ret[0]}.") def create_snapshot(): options = ["node", "overlay"] cmd = "blockdev-snapshot" arguments = params.copy_from_keys(options) - arguments.setdefault("node", "drive_%s" % params["base_tag"]) - arguments.setdefault("overlay", "drive_%s" % params["snapshot_tag"]) + arguments.setdefault("node", "drive_{}".format(params["base_tag"])) + arguments.setdefault("overlay", "drive_{}".format(params["snapshot_tag"])) return vm.monitor.cmd(cmd, dict(arguments)) def verify_backing_chain(info): @@ -84,19 +84,18 @@ def verify_backing_chain(info): base_image = None if image.base_tag: base_params = params.object_params(image.base_tag) - base_image = get_image_repr(image.base_tag, - base_params, - get_data_dir(), - 'filename') + base_image = get_image_repr( + image.base_tag, base_params, get_data_dir(), "filename" + ) backing_info = img_info.get("full-backing-filename") if backing_info and "json" in backing_info: back_info = backing_info.strip("json:") backing_info = json.loads(back_info)["file"]["filename"] if base_image != backing_info: - test.fail(("backing chain check for image %s failed, backing" - " file from info is %s, which should be %s.") % - (image.image_filename, backing_info, - base_image)) + test.fail( + f"backing chain check for image {image.image_filename} failed, backing" + f" file from info is {backing_info}, which should be {base_image}." + ) def check_backing_file(image): out = json.loads(image.info(force_share=True, output="json")) @@ -107,7 +106,7 @@ def clean_images(image_objects): image.remove() images_tag = params.get("image_chain").split() - params["image_name_%s" % images_tag[0]] = params["image_name"] + params[f"image_name_{images_tag[0]}"] = params["image_name"] images = get_img_objs(images_tag) try: src_sn_chain_setup(images) diff --git a/qemu/tests/migration_with_netperf.py b/qemu/tests/migration_with_netperf.py index 26f0baef4f..35cbefcc53 100644 --- a/qemu/tests/migration_with_netperf.py +++ b/qemu/tests/migration_with_netperf.py @@ -1,9 +1,6 @@ import os -from virttest import error_context -from virttest import utils_netperf -from virttest import data_dir -from virttest import utils_net +from virttest import data_dir, error_context, utils_net, utils_netperf @error_context.context_aware @@ -33,15 +30,18 @@ def run(test, params, env): guest_address = vm.get_address() host_address = utils_net.get_host_ip_address(params) remote_ip = params.get("remote_host", host_address) - netperf_link = os.path.join(data_dir.get_deps_dir("netperf"), - params.get("netperf_link")) + netperf_link = os.path.join( + data_dir.get_deps_dir("netperf"), params.get("netperf_link") + ) netperf_server_link = params.get("netperf_server_link_win") if netperf_server_link: - netperf_server_link = os.path.join(data_dir.get_deps_dir("netperf"), - netperf_server_link) + netperf_server_link = os.path.join( + data_dir.get_deps_dir("netperf"), netperf_server_link + ) netperf_client_link = params.get("netperf_client_link_win", netperf_link) - netperf_client_link = os.path.join(data_dir.get_deps_dir("netperf"), - netperf_client_link) + netperf_client_link = os.path.join( + data_dir.get_deps_dir("netperf"), netperf_client_link + ) server_path = params.get("server_path", "/var/tmp/") client_path = params.get("client_path", "/var/tmp/") server_path_win = params.get("server_path_win") @@ -68,28 +68,32 @@ def run(test, params, env): netperf_server_h = None try: netperf_client_g = utils_netperf.NetperfClient( - guest_address, g_client_path, + guest_address, + g_client_path, netperf_source=g_client_link, client=params.get("shell_client"), port=params.get("shell_port"), prompt=params.get("shell_prompt", r"^root@.*[\#\$]\s*$|#"), username=params.get("username"), password=params.get("password"), - linesep=params.get("shell_linesep", "\n").encode().decode( - 'unicode_escape'), + linesep=params.get("shell_linesep", "\n").encode().decode("unicode_escape"), status_test_command=params.get("status_test_command", ""), - compile_option=params.get("compile_option_client_g", "")) + compile_option=params.get("compile_option_client_g", ""), + ) netperf_server_h = utils_netperf.NetperfServer( remote_ip, server_path, netperf_source=netperf_link, password=params.get("hostpassword"), - compile_option=params.get("compile_option", "")) + compile_option=params.get("compile_option", ""), + ) netperf_client_h = utils_netperf.NetperfClient( - remote_ip, client_path, + remote_ip, + client_path, netperf_source=netperf_link, password=params.get("hostpassword"), - compile_option=params.get("compile_option", "")) + compile_option=params.get("compile_option", ""), + ) netperf_server_g = utils_netperf.NetperfServer( guest_address, g_server_path, @@ -99,10 +103,10 @@ def run(test, params, env): client=params.get("shell_client"), port=params.get("shell_port"), prompt=params.get("shell_prompt", r"^root@.*[\#\$]\s*$|#"), - linesep=params.get("shell_linesep", "\n").encode().decode( - 'unicode_escape'), + linesep=params.get("shell_linesep", "\n").encode().decode("unicode_escape"), status_test_command=params.get("status_test_command", "echo $?"), - compile_option=params.get("compile_option_server_g", "")) + compile_option=params.get("compile_option_server_g", ""), + ) error_context.base_context("Run netperf test between host and guest") error_context.context("Start netserver in guest.", test.log.info) netperf_server_g.start() @@ -111,7 +115,7 @@ def run(test, params, env): netperf_server_h.start() error_context.context("Start Netperf in host", test.log.info) - test_option = "-l %s" % netperf_timeout + test_option = f"-l {netperf_timeout}" netperf_client_h.bg_start(guest_address, test_option, client_num) if netperf_client_g: error_context.context("Start Netperf in guest", test.log.info) @@ -120,8 +124,9 @@ def run(test, params, env): m_count = 0 while netperf_client_h.is_netperf_running(): m_count += 1 - error_context.context("Start migration iterations: %s " % m_count, - test.log.info) + error_context.context( + f"Start migration iterations: {m_count} ", test.log.info + ) vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay, env=env) finally: if netperf_server_g: diff --git a/qemu/tests/migration_with_numa.py b/qemu/tests/migration_with_numa.py index 70eec7fc5b..08baac9057 100644 --- a/qemu/tests/migration_with_numa.py +++ b/qemu/tests/migration_with_numa.py @@ -1,11 +1,8 @@ import re from avocado.utils import process -from virttest import env_process -from virttest import error_context -from virttest.utils_misc import get_mem_info -from virttest.utils_misc import normalize_data_size -from virttest.utils_misc import NumaInfo +from virttest import env_process, error_context +from virttest.utils_misc import NumaInfo, get_mem_info, normalize_data_size @error_context.context_aware @@ -23,7 +20,7 @@ def run(test, params, env): :param env: Dictionary with test environment """ - def get_nodes_size(size_type='MemTotal', session=None): + def get_nodes_size(size_type="MemTotal", session=None): """ Get the node size of each node in host/guest, descending sort with size @@ -38,24 +35,24 @@ def get_nodes_size(size_type='MemTotal', session=None): numa_nodes = numa_info.online_nodes for node in numa_nodes: node_size = numa_info.online_nodes_meminfo[node][size_type] - nodes_size[node] = float(normalize_data_size('%s KB' % node_size)) + nodes_size[node] = float(normalize_data_size(f"{node_size} KB")) nodes_size = sorted(nodes_size.items(), key=lambda item: item[1], reverse=True) return nodes_size - host_nodes_size = get_nodes_size(size_type='MemFree') - mem_devs = params.objects('mem_devs') + host_nodes_size = get_nodes_size(size_type="MemFree") + mem_devs = params.objects("mem_devs") if len(host_nodes_size) < len(mem_devs): test.cancel("Host do not have enough nodes for testing!") for mem_dev in mem_devs: - size_mem = params.object_params(mem_dev).get('size_mem') + size_mem = params.object_params(mem_dev).get("size_mem") size_mem = float(normalize_data_size(size_mem)) if host_nodes_size[0][1] >= size_mem: - params['host-nodes_mem_%s' % mem_dev] = str(host_nodes_size[0][0]) + params[f"host-nodes_mem_{mem_dev}"] = str(host_nodes_size[0][0]) del host_nodes_size[0] else: test.cancel("host nodes do not have enough memory for testing!") - params['start_vm'] = 'yes' + params["start_vm"] = "yes" env_process.preprocess(test, params, env) vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() @@ -66,48 +63,55 @@ def get_nodes_size(size_type='MemTotal', session=None): session = vm.wait_for_login() os_type = params["os_type"] - if os_type == 'linux': + if os_type == "linux": error_context.context("Check the numa memory size in guest", test.log.info) # Use 30 plus the gap of 'MemTotal' in OS and '-m' in cli as threshold - mem_total = get_mem_info(session, 'MemTotal') - mem_total = float(normalize_data_size('%s KB' % mem_total)) - error_context.context("MemTotal in guest os is %s MB" - % mem_total, test.log.info) + mem_total = get_mem_info(session, "MemTotal") + mem_total = float(normalize_data_size(f"{mem_total} KB")) + error_context.context(f"MemTotal in guest os is {mem_total} MB", test.log.info) threshold = float(params.get_numeric("mem") - mem_total) + 30 - error_context.context("The acceptable threshold is: %s" - % threshold, test.log.info) - guest_nodes_size = get_nodes_size(size_type='MemTotal', session=session) + error_context.context( + f"The acceptable threshold is: {threshold}", test.log.info + ) + guest_nodes_size = get_nodes_size(size_type="MemTotal", session=session) guest_nodes_size = dict(guest_nodes_size) - for nodenr, node in enumerate(params.objects('guest_numa_nodes')): + for nodenr, node in enumerate(params.objects("guest_numa_nodes")): mdev = params.get("numa_memdev_node%d" % nodenr) if mdev: - mdev = mdev.split('-')[1] - size = float(normalize_data_size(params.get("size_mem_%s" % mdev))) + mdev = mdev.split("-")[1] + size = float(normalize_data_size(params.get(f"size_mem_{mdev}"))) if abs(size - guest_nodes_size[nodenr]) > threshold: - test.fail("[Guest]Wrong size of numa node %d: %f. Expected:" - " %s" % (nodenr, guest_nodes_size[nodenr], size)) + test.fail( + "[Guest]Wrong size of numa node %d: %f. Expected:" + " %s" % (nodenr, guest_nodes_size[nodenr], size) + ) error_context.context("Check the numa memory policy in dest host", test.log.info) qemu_pid = vm.get_pid() for mem_dev in mem_devs: memdev_params = params.object_params(mem_dev) - size_mem = memdev_params.get('size_mem') - size_mem = int(float(normalize_data_size(size_mem, 'K'))) - smaps = process.getoutput("grep -E -B1 '^Size: *%d' /proc/%d/smaps" - % (size_mem, qemu_pid)) - mem_start_pattern = r'(\w+)-\w+\s+\w+-\w+\s+\w+\s+\w+:\w+\s\w+\s+\n'\ - r'Size:\s+%d' % size_mem + size_mem = memdev_params.get("size_mem") + size_mem = int(float(normalize_data_size(size_mem, "K"))) + smaps = process.getoutput( + "grep -E -B1 '^Size: *%d' /proc/%d/smaps" % (size_mem, qemu_pid) + ) + mem_start_pattern = ( + r"(\w+)-\w+\s+\w+-\w+\s+\w+\s+\w+:\w+\s\w+\s+\n" r"Size:\s+%d" % size_mem + ) match = re.search(mem_start_pattern, smaps) if not match: - test.error("Failed to get the mem start address in smaps: %s" % smaps) + test.error(f"Failed to get the mem start address in smaps: {smaps}") mem_start = match.groups()[0] - numa_maps = process.getoutput("grep %s /proc/%d/numa_maps" - % (mem_start, qemu_pid)) - node_match = re.search(r'bind:(\d+)', numa_maps) + numa_maps = process.getoutput( + "grep %s /proc/%d/numa_maps" % (mem_start, qemu_pid) + ) + node_match = re.search(r"bind:(\d+)", numa_maps) if not node_match: - test.fail("Failed to get the bind node in numa_maps: %s" % numa_maps) + test.fail(f"Failed to get the bind node in numa_maps: {numa_maps}") bind_node = node_match.groups()[0] - expected_node = memdev_params.get('host-nodes_mem') + expected_node = memdev_params.get("host-nodes_mem") if bind_node != expected_node: - test.fail("Host node for memdev %s in numa_maps is %s, while the " - "expected is:%s" % (mem_dev, bind_node, expected_node)) + test.fail( + f"Host node for memdev {mem_dev} in numa_maps is {bind_node}, while the " + f"expected is:{expected_node}" + ) diff --git a/qemu/tests/migration_with_reboot.py b/qemu/tests/migration_with_reboot.py index 0fd3d2c727..47d5342ca9 100644 --- a/qemu/tests/migration_with_reboot.py +++ b/qemu/tests/migration_with_reboot.py @@ -32,30 +32,35 @@ def run(test, params, env): mig_cancel_delay = int(params.get("mig_cancel") == "yes") * 2 migration_exec_cmd_src = params.get("migration_exec_cmd_src") migration_exec_cmd_dst = params.get("migration_exec_cmd_dst") - pre_migrate = migration.get_functions(params.get("pre_migrate"), - migration.__dict__) - post_migrate = migration.get_functions(params.get("post_migrate"), - migration.__dict__) + pre_migrate = migration.get_functions(params.get("pre_migrate"), migration.__dict__) + post_migrate = migration.get_functions( + params.get("post_migrate"), migration.__dict__ + ) if migration_exec_cmd_src and "%s" in migration_exec_cmd_src: - mig_file = os.path.join(tempfile.mkdtemp(prefix="migrate", - dir=test.workdir), - "migrate_file") + mig_file = os.path.join( + tempfile.mkdtemp(prefix="migrate", dir=test.workdir), "migrate_file" + ) migration_exec_cmd_src %= mig_file migration_exec_cmd_dst %= mig_file try: # Reboot the VM in the background bg = utils_misc.InterruptedThread( - vm.reboot, kwargs={'session': session, 'timeout': login_timeout}) + vm.reboot, kwargs={"session": session, "timeout": login_timeout} + ) bg.start() try: while bg.is_alive(): for func in pre_migrate: func(vm, params, test) - vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay, - env=env, - migration_exec_cmd_src=migration_exec_cmd_src, - migration_exec_cmd_dst=migration_exec_cmd_dst) + vm.migrate( + mig_timeout, + mig_protocol, + mig_cancel_delay, + env=env, + migration_exec_cmd_src=migration_exec_cmd_src, + migration_exec_cmd_dst=migration_exec_cmd_dst, + ) # run some functions after migrate finish. for func in post_migrate: func(vm, params, test) diff --git a/qemu/tests/migration_with_sgx.py b/qemu/tests/migration_with_sgx.py index c64f09876d..8892ec2820 100644 --- a/qemu/tests/migration_with_sgx.py +++ b/qemu/tests/migration_with_sgx.py @@ -1,8 +1,7 @@ from virttest import error_context from virttest.utils_misc import verify_dmesg -from provider.sgx import SGXHostCapability -from provider.sgx import SGXChecker +from provider.sgx import SGXChecker, SGXHostCapability @error_context.context_aware @@ -41,8 +40,7 @@ def run(test, params, env): vm.migrate(mig_timeout, mig_protocol, env=env) session = vm.wait_for_login() verify_dmesg() - dmesg_output = session.cmd_output(params["guest_sgx_check"], - timeout=240).strip() + dmesg_output = session.cmd_output(params["guest_sgx_check"], timeout=240).strip() session.close() test_check = SGXChecker(test, params, vm) diff --git a/qemu/tests/migration_with_speed_measurement.py b/qemu/tests/migration_with_speed_measurement.py index cab5823de3..5e018ba05a 100644 --- a/qemu/tests/migration_with_speed_measurement.py +++ b/qemu/tests/migration_with_speed_measurement.py @@ -1,16 +1,14 @@ import os import re -import six import time -from virttest import utils_misc -from virttest import qemu_migration +import six +from virttest import qemu_migration, utils_misc from provider import cpuflags -class Statistic(object): - +class Statistic: """ Class to display and collect average, max and min values of a given data set. @@ -76,8 +74,7 @@ def run(test, params, env): vm_mem = int(params.get("mem", "512")) - get_mig_speed = re.compile(r"^transferred ram: (\d+) kbytes$", - re.MULTILINE) + get_mig_speed = re.compile(r"^transferred ram: (\d+) kbytes$", re.MULTILINE) mig_speed = params.get("mig_speed", "1G") mig_speed_accuracy = float(params.get("mig_speed_accuracy", "0.2")) @@ -91,11 +88,14 @@ def get_migration_statistic(vm): pass for _ in range(30): o = vm.monitor.info("migrate") - warning_msg = ("Migration already ended. Migration speed is" - " probably too high and will block vm while" - " filling its memory.") - fail_msg = ("Could not determine the transferred memory from" - " monitor data: %s" % o) + warning_msg = ( + "Migration already ended. Migration speed is" + " probably too high and will block vm while" + " filling its memory." + ) + fail_msg = ( + "Could not determine the transferred memory from" f" monitor data: {o}" + ) if isinstance(o, six.string_types): if "status: active" not in o: test.error(warning_msg) @@ -123,24 +123,27 @@ def get_migration_statistic(vm): try: # Reboot the VM in the background - cpuflags.install_cpuflags_util_on_vm(test, vm, install_path, - extra_flags="-msse3 -msse2") + cpuflags.install_cpuflags_util_on_vm( + test, vm, install_path, extra_flags="-msse3 -msse2" + ) qemu_migration.set_speed(vm, mig_speed) - cmd = ("%s/cpuflags-test --stressmem %d,%d" % - (os.path.join(install_path, "cpu_flags", "src"), - vm_mem * 4, vm_mem / 2)) + cmd = "%s/cpuflags-test --stressmem %d,%d" % ( + os.path.join(install_path, "cpu_flags", "src"), + vm_mem * 4, + vm_mem / 2, + ) test.log.debug("Sending command: %s", cmd) session.sendline(cmd) time.sleep(2) - clonevm = vm.migrate(mig_timeout, mig_protocol, - not_wait_for_migration=True, env=env) + clonevm = vm.migrate( + mig_timeout, mig_protocol, not_wait_for_migration=True, env=env + ) - mig_speed = int(float( - utils_misc.normalize_data_size(mig_speed, "M"))) + mig_speed = int(float(utils_misc.normalize_data_size(mig_speed, "M"))) mig_stat = get_migration_statistic(vm) @@ -148,25 +151,25 @@ def get_migration_statistic(vm): ack_speed = mig_speed * mig_speed_accuracy test.log.info("Target migration speed: %d MB/s.", mig_speed) - test.log.info( - "Average migration speed: %d MB/s", mig_stat.get_average()) + test.log.info("Average migration speed: %d MB/s", mig_stat.get_average()) test.log.info("Minimum migration speed: %d MB/s", mig_stat.get_min()) test.log.info("Maximum migration speed: %d MB/s", mig_stat.get_max()) - test.log.info("Maximum tolerable divergence: %3.1f%%", - mig_speed_accuracy * 100) + test.log.info("Maximum tolerable divergence: %3.1f%%", mig_speed_accuracy * 100) if real_speed < mig_speed - ack_speed: divergence = (1 - float(real_speed) / float(mig_speed)) * 100 - test.error("Average migration speed (%s MB/s) " - "is %3.1f%% lower than target (%s MB/s)" % - (real_speed, divergence, mig_speed)) + test.error( + f"Average migration speed ({real_speed} MB/s) " + f"is {divergence:3.1f}% lower than target ({mig_speed} MB/s)" + ) if real_speed > mig_speed + ack_speed: divergence = (1 - float(mig_speed) / float(real_speed)) * 100 - test.error("Average migration speed (%s MB/s) " - "is %3.1f%% higher than target (%s MB/s)" % - (real_speed, divergence, mig_speed)) + test.error( + f"Average migration speed ({real_speed} MB/s) " + f"is {divergence:3.1f}% higher than target ({mig_speed} MB/s)" + ) finally: session.close() diff --git a/qemu/tests/migration_with_vsock.py b/qemu/tests/migration_with_vsock.py index 08d32075fa..690ec7671f 100644 --- a/qemu/tests/migration_with_vsock.py +++ b/qemu/tests/migration_with_vsock.py @@ -1,16 +1,15 @@ -import random import ast +import random + +from avocado.utils import path, process +from virttest import error_context, utils_misc -from avocado.utils import path -from avocado.utils import process -from virttest import error_context -from virttest import utils_misc from qemu.tests.vsock_test import ( + check_received_data, compile_nc_vsock, - vsock_listen, send_data_from_guest_to_host, - check_received_data, - vsock_connect + vsock_connect, + vsock_listen, ) @@ -39,13 +38,12 @@ def run(test, params, env): """ def ping_pong_migration(repeat_times): - """ Do ping pong migration. """ + """Do ping pong migration.""" mig_timeout = float(params.get("mig_timeout", "3600")) mig_protocol = params.get("migration_protocol", "tcp") mig_cancel_delay = int(params.get("mig_cancel") == "yes") * 2 inner_funcs = ast.literal_eval(params.get("migrate_inner_funcs", "[]")) - capabilities = ast.literal_eval( - params.get("migrate_capabilities", "{}")) + capabilities = ast.literal_eval(params.get("migrate_capabilities", "{}")) for i in range(repeat_times): if i % 2 == 0: test.log.info("Round %s ping...", str(i / 2)) @@ -97,31 +95,25 @@ def input_character_vsock(): # do migration ping_pong_migration(1) session = vm.wait_for_login(timeout=360) - if session.cmd_output("ss --vsock | grep %s" % port): - test.fail( - "vsock listening process inside guest does not exit after migrate") + if session.cmd_output(f"ss --vsock | grep {port}"): + test.fail("vsock listening process inside guest does not exit after migrate") host_vsock_session.close() # send data from guest to host - tmp_file = "/tmp/vsock_file_%s" % utils_misc.generate_random_string(6) - rec_session = send_data_from_guest_to_host( - session, tool_bin, guest_cid, tmp_file - ) + tmp_file = f"/tmp/vsock_file_{utils_misc.generate_random_string(6)}" + rec_session = send_data_from_guest_to_host(session, tool_bin, guest_cid, tmp_file) utils_misc.wait_for(lambda: not rec_session.is_alive(), timeout=20) - cmd_chksum = "md5sum %s" % tmp_file - md5_origin = session.cmd_output( - cmd_chksum, timeout=180, safe=True).split()[0] - md5_received = process.system_output( - cmd_chksum, timeout=180).split()[0].decode() + cmd_chksum = f"md5sum {tmp_file}" + md5_origin = session.cmd_output(cmd_chksum, timeout=180, safe=True).split()[0] + md5_received = process.system_output(cmd_chksum, timeout=180).split()[0].decode() host_vsock_session = input_character_vsock() ping_pong_migration(3) - cmd_rm = "rm -rf %s" % tmp_file + cmd_rm = f"rm -rf {tmp_file}" if vsock_test_tool == "nc_vsock": - cmd_rm += "; rm -rf %s*" % tool_bin + cmd_rm += f"; rm -rf {tool_bin}*" session.cmd_output_safe(cmd_rm, timeout=120) process.system(cmd_rm, shell=True, ignore_status=True) if md5_received != md5_origin: test.fail( "Data transfer not integrated, the original md5 value" - " is %s, while the md5 value received on host is %s" - % (md5_origin, md5_received) + f" is {md5_origin}, while the md5 value received on host is {md5_received}" ) diff --git a/qemu/tests/mlock_basic.py b/qemu/tests/mlock_basic.py index 4ef260aab1..6c78be5e64 100644 --- a/qemu/tests/mlock_basic.py +++ b/qemu/tests/mlock_basic.py @@ -1,14 +1,13 @@ import logging from resource import getpagesize -from virttest import env_process -from virttest import error_context +from virttest import env_process, error_context from virttest.staging.utils_memory import read_from_vmstat -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") -class MlockBasic(object): +class MlockBasic: """ Base class for mlock test """ @@ -34,49 +33,63 @@ def _check_mlock_unevictable(self): nr_mlock = self.mlock_post - self.mlock_pre nr_unevictable = self.unevictable_post - self.unevictable_pre if nr_mlock < vm_pages: - self.test.fail("nr_mlock is not fit with VM memory" - " when mlock is %s!" - " nr_mlock = %d, vm_mem = %d." - % (self.realtime_mlock, nr_mlock, self.vm_mem)) + self.test.fail( + "nr_mlock is not fit with VM memory" + " when mlock is %s!" + " nr_mlock = %d, vm_mem = %d." + % (self.realtime_mlock, nr_mlock, self.vm_mem) + ) if nr_unevictable < vm_pages: - self.test.fail("nr_unevictable is not fit with VM memory" - " when mlock is %s!" - " nr_unevictable = %d, vm_mem = %d." - % (self.realtime_mlock, nr_unevictable, - self.vm_mem)) + self.test.fail( + "nr_unevictable is not fit with VM memory" + " when mlock is %s!" + " nr_unevictable = %d, vm_mem = %d." + % (self.realtime_mlock, nr_unevictable, self.vm_mem) + ) else: if self.mlock_post != self.mlock_pre: - self.test.fail("mlock_post != mlock_pre when mlock is %s!" - % self.realtime_mlock) + self.test.fail( + f"mlock_post != mlock_pre when mlock is {self.realtime_mlock}!" + ) if self.unevictable_post != self.unevictable_pre: - self.test.fail("unevictable_post != unevictable_pre" - " when mlock is %s!" - % self.realtime_mlock) + self.test.fail( + "unevictable_post != unevictable_pre" + f" when mlock is {self.realtime_mlock}!" + ) def start(self): """ Start mlock basic test """ - error_context.context("Get nr_mlock and nr_unevictable in host" - " before VM start!", LOG_JOB.info) + error_context.context( + "Get nr_mlock and nr_unevictable in host" " before VM start!", LOG_JOB.info + ) self.mlock_pre = read_from_vmstat("nr_mlock") self.unevictable_pre = read_from_vmstat("nr_unevictable") - LOG_JOB.info("mlock_pre is %d and unevictable_pre is %d.", - self.mlock_pre, self.unevictable_pre) + LOG_JOB.info( + "mlock_pre is %d and unevictable_pre is %d.", + self.mlock_pre, + self.unevictable_pre, + ) self.params["start_vm"] = "yes" error_context.context("Starting VM!", LOG_JOB.info) - env_process.preprocess_vm(self.test, self.params, - self.env, self.params["main_vm"]) + env_process.preprocess_vm( + self.test, self.params, self.env, self.params["main_vm"] + ) self.vm = self.env.get_vm(self.params["main_vm"]) self.vm.verify_alive() - error_context.context("Get nr_mlock and nr_unevictable in host" - " after VM start!", LOG_JOB.info) + error_context.context( + "Get nr_mlock and nr_unevictable in host" " after VM start!", LOG_JOB.info + ) self.mlock_post = read_from_vmstat("nr_mlock") self.unevictable_post = read_from_vmstat("nr_unevictable") - LOG_JOB.info("mlock_post is %d and unevictable_post is %d.", - self.mlock_post, self.unevictable_post) + LOG_JOB.info( + "mlock_post is %d and unevictable_post is %d.", + self.mlock_post, + self.unevictable_post, + ) self._check_mlock_unevictable() diff --git a/qemu/tests/mmu_basic.py b/qemu/tests/mmu_basic.py index 5086a78ff0..12e6ef096e 100644 --- a/qemu/tests/mmu_basic.py +++ b/qemu/tests/mmu_basic.py @@ -1,7 +1,6 @@ import re -from virttest import error_context -from virttest import utils_test +from virttest import error_context, utils_test @error_context.context_aware @@ -30,25 +29,25 @@ def cpu_info_match(pattern): session = vm.wait_for_login() error_context.context("Check the MMU mode.", test.log.info) - if cpu_info_match('MMU'): - if cpu_info_match('POWER9'): - if cpu_info_match('Radix') is False: + if cpu_info_match("MMU"): + if cpu_info_match("POWER9"): + if cpu_info_match("Radix") is False: test.fail("mmu mode is not Radix, doesn't meet expectations.") else: - if cpu_info_match('Hash') is False: + if cpu_info_match("Hash") is False: test.fail("mmu mode is not Hash, doesn't meet expectations.") else: - if params["mmu_option"] == 'yes': + if params["mmu_option"] == "yes": test.fail("There should be MMU mode.") utils_test.update_boot_option(vm, args_added="disable_radix") session = vm.wait_for_login() error_context.context("Check the MMU mode.", test.log.info) - if cpu_info_match('MMU'): - if cpu_info_match('Hash') is False: + if cpu_info_match("MMU"): + if cpu_info_match("Hash") is False: test.fail("mmu mode is not Hash, mmu mode disabled failure.") else: - if params["mmu_option"] == 'yes': + if params["mmu_option"] == "yes": test.fail("There should be MMU mode.") vm.verify_dmesg() diff --git a/qemu/tests/monitor_cmds_check.py b/qemu/tests/monitor_cmds_check.py index 1a2f277665..6ef04880e2 100644 --- a/qemu/tests/monitor_cmds_check.py +++ b/qemu/tests/monitor_cmds_check.py @@ -1,5 +1,4 @@ -from virttest import error_context -from virttest import qemu_monitor +from virttest import error_context, qemu_monitor @error_context.context_aware @@ -30,10 +29,12 @@ def is_supported(cmd): protocol = vm.monitor.protocol black_cmds = params.get("black_cmds", "").split() - error_context.context("Verify black commands are unavaliable in " - "'%s' monitor" % protocol, test.log.info) + error_context.context( + "Verify black commands are unavaliable in " f"'{protocol}' monitor", + test.log.info, + ) test.log.info("Black commands: %s", black_cmds) cmds = [cmd for cmd in black_cmds if is_supported(cmd)] if cmds: - msg = "Unexpected commands %s found in %s monitor" % (cmds, protocol) + msg = f"Unexpected commands {cmds} found in {protocol} monitor" test.fail(msg) diff --git a/qemu/tests/mq_change_qnum.py b/qemu/tests/mq_change_qnum.py index d04c5da9c4..d77c4bcd25 100644 --- a/qemu/tests/mq_change_qnum.py +++ b/qemu/tests/mq_change_qnum.py @@ -1,11 +1,7 @@ import re import aexpect - -from virttest import error_context -from virttest import utils_net -from virttest import utils_test -from virttest import utils_misc +from virttest import error_context, utils_misc, utils_net, utils_test @error_context.context_aware @@ -24,6 +20,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def change_queues_number(session, ifname, q_number, queues_status=None): """ Change queues number @@ -37,10 +34,15 @@ def change_queues_number(session, ifname, q_number, queues_status=None): err_msg = "" expect_q_number = q_number - if (q_number != queues_status[1] and q_number <= queues_status[0] - and q_number > 0): - if (cur_queues_status[1] != q_number - or cur_queues_status[0] != queues_status[0]): + if ( + q_number != queues_status[1] + and q_number <= queues_status[0] + and q_number > 0 + ): + if ( + cur_queues_status[1] != q_number + or cur_queues_status[0] != queues_status[0] + ): err_msg = "Param is valid, but change queues failed, " elif cur_queues_status != queues_status: if q_number != queues_status[1]: @@ -49,12 +51,12 @@ def change_queues_number(session, ifname, q_number, queues_status=None): expect_q_number = queues_status[1] if len(err_msg) > 0: - err_msg += "current queues set is %s, " % cur_queues_status[1] - err_msg += "max allow queues set is %s, " % cur_queues_status[0] - err_msg += "when run cmd: '%s', " % mq_set_cmd - err_msg += "expect queues are %s," % expect_q_number - err_msg += "expect max allow queues are %s, " % queues_status[0] - err_msg += "output: '%s'" % output + err_msg += f"current queues set is {cur_queues_status[1]}, " + err_msg += f"max allow queues set is {cur_queues_status[0]}, " + err_msg += f"when run cmd: '{mq_set_cmd}', " + err_msg += f"expect queues are {expect_q_number}," + err_msg += f"expect max allow queues are {queues_status[0]}, " + err_msg += f"output: '{output}'" test.fail(err_msg) return [int(_) for _ in cur_queues_status] @@ -63,15 +65,15 @@ def get_queues_status(session, ifname, timeout=240): """ Get queues status """ - mq_get_cmd = "ethtool -l %s" % ifname + mq_get_cmd = f"ethtool -l {ifname}" nic_mq_info = session.cmd_output(mq_get_cmd, timeout=timeout) queues_reg = re.compile(r"Combined:\s+(\d)", re.I) queues_info = queues_reg.findall(" ".join(nic_mq_info.splitlines())) if len(queues_info) != 2: err_msg = "Oops, get guest queues info failed, " err_msg += "make sure your guest support MQ.\n" - err_msg += "Check cmd is: '%s', " % mq_get_cmd - err_msg += "Command output is: '%s'." % nic_mq_info + err_msg += f"Check cmd is: '{mq_get_cmd}', " + err_msg += f"Command output is: '{nic_mq_info}'." test.cancel(err_msg) return [int(x) for x in queues_info] @@ -84,12 +86,13 @@ def enable_multi_queues(vm): change_queues_number(sess, ifname, queues) def ping_test(dest_ip, ping_time, lost_raito, session=None): - status, output = utils_test.ping(dest=dest_ip, timeout=ping_time, - session=session) + status, output = utils_test.ping( + dest=dest_ip, timeout=ping_time, session=session + ) packets_lost = utils_test.get_loss_ratio(output) if packets_lost > lost_raito: - err = " %s%% packages lost during ping. " % packets_lost - err += "Ping command log:\n %s" % "\n".join(output.splitlines()[-3:]) + err = f" {packets_lost}% packages lost during ping. " + err += "Ping command log:\n {}".format("\n".join(output.splitlines()[-3:])) test.fail(err) error_context.context("Init guest and try to login", test.log.info) @@ -116,18 +119,17 @@ def ping_test(dest_ip, ping_time, lost_raito, session=None): f_ping_time = int(params.get("final_ping_time", 60)) bg_test = None try: - ifnames = [] for nic_index, nic in enumerate(vm.virtnet): - ifname = utils_net.get_linux_ifname(session, - vm.virtnet[nic_index].mac) + ifname = utils_net.get_linux_ifname(session, vm.virtnet[nic_index].mac) ifnames.append(ifname) - error_context.context("Run test %s background" % bg_stress_test, - test.log.info) + error_context.context(f"Run test {bg_stress_test} background", test.log.info) stress_thread = utils_misc.InterruptedThread( - utils_test.run_virt_sub_test, (test, params, env), - {"sub_type": bg_stress_test}) + utils_test.run_virt_sub_test, + (test, params, env), + {"sub_type": bg_stress_test}, + ) stress_thread.start() if bg_ping == "yes": @@ -139,7 +141,7 @@ def ping_test(dest_ip, ping_time, lost_raito, session=None): error_context.context("Change queues number repeatly", test.log.info) repeat_counts = int(params.get("repeat_counts", 10)) for nic_index, nic in enumerate(vm.virtnet): - if "virtio" not in nic['nic_model']: + if "virtio" not in nic["nic_model"]: continue queues = int(vm.virtnet[nic_index].queues) if queues == 1: @@ -154,32 +156,33 @@ def ping_test(dest_ip, ping_time, lost_raito, session=None): change_list = default_change_list for repeat_num in range(1, repeat_counts + 1): - error_context.context("Change queues number -- %sth" - % repeat_num, test.log.info) + error_context.context( + f"Change queues number -- {repeat_num}th", test.log.info + ) try: queues_status = get_queues_status(session, ifname) for q_number in change_list: - queues_status = change_queues_number(session, - ifname, - int(q_number), - queues_status) + queues_status = change_queues_number( + session, ifname, int(q_number), queues_status + ) except aexpect.ShellProcessTerminatedError: vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login(timeout=login_timeout) queues_status = get_queues_status(session, ifname) for q_number in change_list: - queues_status = change_queues_number(session, - ifname, - int(q_number), - queues_status) + queues_status = change_queues_number( + session, ifname, int(q_number), queues_status + ) if params.get("ping_after_changing_queues", "yes") == "yes": default_host = "www.redhat.com" ext_host = utils_net.get_default_gateway(session) if not ext_host: # Fallback to a hardcode host, eg: - test.log.warn("Can't get specified host," - " Fallback to default host '%s'", default_host) + test.log.warning( + "Can't get specified host," " Fallback to default host '%s'", + default_host, + ) ext_host = default_host s_session = vm.wait_for_login(timeout=login_timeout) txt = "ping %s after changing queues in guest." @@ -187,8 +190,7 @@ def ping_test(dest_ip, ping_time, lost_raito, session=None): ping_test(ext_host, f_ping_time, f_ping_lost_ratio, s_session) if stress_thread: - error_context.context("wait for background test finish", - test.log.info) + error_context.context("wait for background test finish", test.log.info) try: stress_thread.join() except Exception as err: @@ -202,11 +204,12 @@ def ping_test(dest_ip, ping_time, lost_raito, session=None): if s_session: s_session.close() if bg_test: - error_context.context("Wait for background ping test finish.", - test.log.info) + error_context.context( + "Wait for background ping test finish.", test.log.info + ) try: bg_test.join() except Exception as err: txt = "Fail to wait background ping test finish. " - txt += "Got error message %s" % err + txt += f"Got error message {err}" test.fail(txt) diff --git a/qemu/tests/mq_enabled_chk.py b/qemu/tests/mq_enabled_chk.py index 5bcb4edeb2..a8f6dec92a 100644 --- a/qemu/tests/mq_enabled_chk.py +++ b/qemu/tests/mq_enabled_chk.py @@ -1,9 +1,6 @@ import re -from virttest import env_process -from virttest import error_context -from virttest import utils_net -from virttest import utils_test +from virttest import env_process, error_context, utils_net, utils_test @error_context.context_aware @@ -31,16 +28,15 @@ def get_queues_status(session, ifname, timeout=240): :return: queues status list """ - mq_get_cmd = "ethtool -l %s" % ifname - nic_mq_info = session.cmd_output(mq_get_cmd, timeout=timeout, - safe=True) + mq_get_cmd = f"ethtool -l {ifname}" + nic_mq_info = session.cmd_output(mq_get_cmd, timeout=timeout, safe=True) queues_reg = re.compile(r"Combined:\s+(\d)", re.I) queues_info = queues_reg.findall(" ".join(nic_mq_info.splitlines())) if len(queues_info) != 2: err_msg = "Oops, get guest queues info failed, " err_msg += "make sure your guest support MQ.\n" - err_msg += "Check cmd is: '%s', " % mq_get_cmd - err_msg += "Command output is: '%s'." % nic_mq_info + err_msg += f"Check cmd is: '{mq_get_cmd}', " + err_msg += f"Command output is: '{nic_mq_info}'." test.cancel(err_msg) return [int(x) for x in queues_info] @@ -59,17 +55,18 @@ def chk_mq_enabled(vm, queues): queues_status_list = get_queues_status(session, ifname) session.close() - if not queues_status_list[0] == queues or \ - not queues_status_list[1] == min(queues, int(vm.cpuinfo.smp)): + if not queues_status_list[0] == queues or not queues_status_list[1] == min( + queues, int(vm.cpuinfo.smp) + ): txt = "Pre-set maximums Combined should equals to queues in qemu" txt += " cmd line.\n" txt += "Current hardware settings Combined should be the min of " txt += "queues and smp.\n" - txt += "Pre-set maximum Combined is: %s, " % queues_status_list[0] - txt += " queues in qemu cmd line is: %s.\n" % queues + txt += f"Pre-set maximum Combined is: {queues_status_list[0]}, " + txt += f" queues in qemu cmd line is: {queues}.\n" txt += "Current hardware settings Combined " - txt += "is: %s, " % queues_status_list[1] - txt += " smp in qemu cmd line is: %s." % int(vm.cpuinfo.smp) + txt += f"is: {queues_status_list[1]}, " + txt += f" smp in qemu cmd line is: {int(vm.cpuinfo.smp)}." test.fail(txt) error_context.context("Init the guest and try to login", test.log.info) @@ -85,8 +82,7 @@ def chk_mq_enabled(vm, queues): chk_mq_enabled(vm, int(queues)) guest_ip = vm.get_address() - status, output = utils_net.ping(guest_ip, 10, session=None, - timeout=20) + status, output = utils_net.ping(guest_ip, 10, session=None, timeout=20) if utils_test.get_loss_ratio(output) > 0: test.fail("Packet lost while doing ping test") diff --git a/qemu/tests/msi_change_flag.py b/qemu/tests/msi_change_flag.py index d132a5be6b..49854c4a10 100644 --- a/qemu/tests/msi_change_flag.py +++ b/qemu/tests/msi_change_flag.py @@ -1,11 +1,10 @@ +import ctypes import os import re -import ctypes from avocado.utils import crypto, process -from virttest import utils_misc -from virttest import utils_test -from virttest import error_context +from virttest import error_context, utils_misc, utils_test + from provider import win_dev @@ -26,13 +25,13 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def irq_check(session, device_name, devcon_folder): - hwid = win_dev.get_hwids(session, device_name, devcon_folder, - login_timeout)[0] + hwid = win_dev.get_hwids(session, device_name, devcon_folder, login_timeout)[0] get_irq_cmd = params["get_irq_cmd"] % (devcon_folder, hwid) - irq_list = re.findall(r':\s+(\d+)', session.cmd_output(get_irq_cmd), re.M) + irq_list = re.findall(r":\s+(\d+)", session.cmd_output(get_irq_cmd), re.M) if not irq_list: - test.error("device %s's irq checked fail" % device_name) + test.error(f"device {device_name}'s irq checked fail") return irq_list def get_file_md5sum(file_name, session, timeout): @@ -40,10 +39,9 @@ def get_file_md5sum(file_name, session, timeout): return: Return the md5sum value of the guest. """ test.log.info("Get md5sum of the file:'%s'", file_name) - s, o = session.cmd_status_output("md5sum %s" % file_name, - timeout=timeout) + s, o = session.cmd_status_output(f"md5sum {file_name}", timeout=timeout) if s != 0: - test.error("Get file md5sum failed as %s" % o) + test.error(f"Get file md5sum failed as {o}") return re.findall(r"\w{32}", o)[0] tmp_dir = params["tmp_dir"] @@ -59,16 +57,20 @@ def get_file_md5sum(file_name, session, timeout): if params.get("os_type") == "linux": error_context.context("Check the pci msi in guest", test.log.info) pci_id = session.cmd_output_safe("lspci |grep Eth |awk {'print $1'}").strip() - status = session.cmd_output_safe("lspci -vvv -s %s|grep MSI-X" % pci_id).strip() - enable_status = re.search(r'Enable\+', status, re.M | re.I) + status = session.cmd_output_safe(f"lspci -vvv -s {pci_id}|grep MSI-X").strip() + enable_status = re.search(r"Enable\+", status, re.M | re.I) if enable_status.group() == "Enable+": error_context.context("Disable pci msi in guest", test.log.info) utils_test.update_boot_option(vm, args_added="pci=nomsi") session_msi = vm.wait_for_serial_login(timeout=login_timeout) - pci_id = session_msi.cmd_output_safe("lspci |grep Eth |awk {'print $1'}").strip() - status = session_msi.cmd_output_safe("lspci -vvv -s %s|grep MSI-X" % pci_id).strip() + pci_id = session_msi.cmd_output_safe( + "lspci |grep Eth |awk {'print $1'}" + ).strip() + status = session_msi.cmd_output_safe( + f"lspci -vvv -s {pci_id}|grep MSI-X" + ).strip() session_msi.close() - change_status = re.search(r'Enable\-', status, re.M | re.I) + change_status = re.search(r"Enable\-", status, re.M | re.I) if change_status.group() != "Enable-": test.fail("virtio device's statuts is not correct") elif enable_status.group() != "Enable+": @@ -78,38 +80,38 @@ def get_file_md5sum(file_name, session, timeout): driver_verifier = params.get("driver_verifier", driver) device_name = params["device_name"] - devcon_folder = utils_misc.set_winutils_letter(session, - params["devcon_folder"]) - error_context.context("Boot guest with %s device" % driver, - test.log.info) - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, driver_verifier, - login_timeout) - error_context.context("Check %s's irq number" % device_name, - test.log.info) + devcon_folder = utils_misc.set_winutils_letter(session, params["devcon_folder"]) + error_context.context(f"Boot guest with {driver} device", test.log.info) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_verifier, login_timeout + ) + error_context.context(f"Check {device_name}'s irq number", test.log.info) irq_list = irq_check(session, device_name, devcon_folder) irq_nums = len(irq_list) - if not irq_nums > 1 and\ - max(ctypes.c_int32(int(irq)).value for irq in irq_list) < 0: - test.fail("%s's irq is not correct." % device_name) + if ( + not irq_nums > 1 + and max(ctypes.c_int32(int(irq)).value for irq in irq_list) < 0 + ): + test.fail(f"{device_name}'s irq is not correct.") if params.get("msi_cmd"): error_context.context("Disable MSI in guest", test.log.info) - hwid_msi = win_dev.get_hwids(session, device_name, devcon_folder, - login_timeout)[0] + hwid_msi = win_dev.get_hwids( + session, device_name, devcon_folder, login_timeout + )[0] session.cmd(params["msi_cmd"] % (hwid_msi, 0)) session = vm.reboot(session=session) - error_context.context("Check %s's irq number" % device_name, - test.log.info) + error_context.context(f"Check {device_name}'s irq number", test.log.info) irq_list = irq_check(session, device_name, devcon_folder) irq_nums = len(irq_list) - if not irq_nums == 1 and \ - min(ctypes.c_int32(int(irq)).value for irq in irq_list) > 0: - test.fail("%s's irq is not correct." % device_name) + if ( + not irq_nums == 1 + and min(ctypes.c_int32(int(irq)).value for irq in irq_list) > 0 + ): + test.fail(f"{device_name}'s irq is not correct.") # prepare test data - guest_path = (tmp_dir + "src-%s" % utils_misc.generate_random_string(8)) - host_path = os.path.join(test.tmpdir, "tmp-%s" % - utils_misc.generate_random_string(8)) + guest_path = tmp_dir + f"src-{utils_misc.generate_random_string(8)}" + host_path = os.path.join(test.tmpdir, f"tmp-{utils_misc.generate_random_string(8)}") test.log.info("Test setup: Creating %dMB file on host", filesize) process.run(dd_cmd % host_path, shell=True) @@ -117,16 +119,15 @@ def get_file_md5sum(file_name, session, timeout): src_md5 = crypto.hash_file(host_path, algorithm="md5") test.log.info("md5 value of data from src: %s", src_md5) # transfer data - error_context.context("Transfer data from host to %s" % vm.name, - test.log.info) + error_context.context(f"Transfer data from host to {vm.name}", test.log.info) vm.copy_files_to(host_path, guest_path) - dst_md5 = get_file_md5sum(guest_path, session, - timeout=file_md5_check_timeout) + dst_md5 = get_file_md5sum(guest_path, session, timeout=file_md5_check_timeout) test.log.info("md5 value of data in %s: %s", vm.name, dst_md5) if dst_md5 != src_md5: - test.fail("File changed after transfer host -> %s" % vm.name) + test.fail(f"File changed after transfer host -> {vm.name}") finally: os.remove(host_path) - session.cmd(delete_cmd % guest_path, - timeout=login_timeout, ignore_all_errors=True) + session.cmd( + delete_cmd % guest_path, timeout=login_timeout, ignore_all_errors=True + ) session.close() diff --git a/qemu/tests/multi_disk.py b/qemu/tests/multi_disk.py index cacdfdd212..ed626f79a3 100644 --- a/qemu/tests/multi_disk.py +++ b/qemu/tests/multi_disk.py @@ -3,23 +3,19 @@ :copyright: 2011-2012 Red Hat Inc. """ -import re + import random +import re import string -from avocado.utils import astring -from avocado.utils import process +from avocado.utils import astring, process +from virttest import env_process, error_context, qemu_qtree, utils_disk, utils_misc -from virttest import env_process -from virttest import error_context -from virttest import qemu_qtree -from virttest import utils_misc -from virttest import utils_disk from provider.storage_benchmark import generate_instance -_RE_RANGE1 = re.compile(r'range\([ ]*([-]?\d+|n).*\)') -_RE_RANGE2 = re.compile(r',[ ]*([-]?\d+|n)') -_RE_BLANKS = re.compile(r'^([ ]*)') +_RE_RANGE1 = re.compile(r"range\([ ]*([-]?\d+|n).*\)") +_RE_RANGE2 = re.compile(r",[ ]*([-]?\d+|n)") +_RE_BLANKS = re.compile(r"^([ ]*)") @error_context.context_aware @@ -39,20 +35,20 @@ def _range(buf, n=None): return False out = [out.groups()[0]] out.extend(_RE_RANGE2.findall(buf)) - if 'n' in out: + if "n" in out: if n is None: # Don't know what to substitute, return the original return buf else: # Doesn't cover all cases and also it works it's way... n = int(n) - if out[0] == 'n': + if out[0] == "n": out[0] = int(n) - if len(out) > 1 and out[1] == 'n': + if len(out) > 1 and out[1] == "n": out[1] = int(out[0]) + n - if len(out) > 2 and out[2] == 'n': + if len(out) > 2 and out[2] == "n": out[2] = (int(out[1]) - int(out[0])) / n - if len(out) > 3 and out[3] == 'n': + if len(out) > 3 and out[3] == "n": _len = len(range(int(out[0]), int(out[1]), int(out[2]))) out[3] = n / _len if n % _len: @@ -94,13 +90,14 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _add_param(name, value): - """ Converts name+value to stg_params string """ + """Converts name+value to stg_params string""" if value: - value = re.sub(' ', '\\ ', value) - return " %s:%s " % (name, value) + value = re.sub(" ", "\\ ", value) + return f" {name}:{value} " else: - return '' + return "" def _do_post_cmd(session): cmd = params.get("post_cmd") @@ -109,8 +106,9 @@ def _do_post_cmd(session): session.close() def _get_windows_disks_index(image_size): - cmd_file = "disk_" + ''.join( - random.sample(string.ascii_letters + string.digits, 4)) + cmd_file = "disk_" + "".join( + random.sample(string.ascii_letters + string.digits, 4) + ) disk_indexs = [] list_disk_cmd = "echo list disk > " + cmd_file list_disk_cmd += " && echo exit >> " + cmd_file @@ -125,7 +123,7 @@ def _get_windows_disks_index(image_size): else: disk_size = image_size[:-1] + " GB" - regex_str = r'Disk (\d+).*?%s' % disk_size + regex_str = rf"Disk (\d+).*?{disk_size}" for cmd_file in all_disks.splitlines(): if cmd_file.startswith(" Disk"): @@ -136,33 +134,36 @@ def _get_windows_disks_index(image_size): def _get_data_disks(): if ostype == "windows": - error_context.context("Get windows disk index that to " - "be formatted", test.log.info) + error_context.context( + "Get windows disk index that to " "be formatted", test.log.info + ) data_disks = _get_windows_disks_index(stg_image_size) if len(data_disks) < stg_image_num: - test.fail("Fail to list all the volumes" - ", %s" % err_msg % len(data_disks)) + test.fail( + "Fail to list all the volumes" f", {err_msg}" % len(data_disks) + ) if len(data_disks) > drive_letters: black_list.extend(utils_misc.get_winutils_vol(session)) - data_disks = random.sample(data_disks, - drive_letters - len(black_list)) - error_context.context("Clear readonly for all disks and online " - "them in windows guest.", test.log.info) - if not utils_disk.update_windows_disk_attributes(session, - data_disks): + data_disks = random.sample(data_disks, drive_letters - len(black_list)) + error_context.context( + "Clear readonly for all disks and online " "them in windows guest.", + test.log.info, + ) + if not utils_disk.update_windows_disk_attributes(session, data_disks): test.fail("Failed to update windows disk attributes.") - dd_test = "no" else: - error_context.context("Get linux disk that to be " - "formatted", test.log.info) + error_context.context( + "Get linux disk that to be " "formatted", test.log.info + ) data_disks = [] all_disks = utils_disk.get_linux_disks(session, True) for kname, attr in all_disks.items(): if attr[1] == stg_image_size and attr[2] == "disk": data_disks.append(kname) if len(data_disks) < stg_image_num: - test.fail("Fail to list all the volumes" - ", %s" % err_msg % len(data_disks)) + test.fail( + "Fail to list all the volumes" f", {err_msg}" % len(data_disks) + ) return sorted(data_disks) error_context.context("Parsing test configuration", test.log.info) @@ -176,18 +177,17 @@ def _get_data_disks(): stg_params += _add_param("drive_cache", params.get("stg_drive_cache")) if params.get("stg_assign_index") != "no": # Assume 0 and 1 are already occupied (hd0 and cdrom) - stg_params += _add_param("drive_index", 'range(2,n)') + stg_params += _add_param("drive_index", "range(2,n)") param_matrix = {} - stg_params = stg_params.split(' ') + stg_params = stg_params.split(" ") i = 0 while i < len(stg_params) - 1: if not stg_params[i].strip(): i += 1 continue - if stg_params[i][-1] == '\\': - stg_params[i] = '%s %s' % (stg_params[i][:-1], - stg_params.pop(i + 1)) + if stg_params[i][-1] == "\\": + stg_params[i] = f"{stg_params[i][:-1]} {stg_params.pop(i + 1)}" i += 1 rerange = [] @@ -195,15 +195,17 @@ def _get_data_disks(): for i in range(len(stg_params)): if not stg_params[i].strip(): continue - (cmd, parm) = stg_params[i].split(':', 1) + (cmd, parm) = stg_params[i].split(":", 1) if cmd == "image_name": has_name = True if _RE_RANGE1.match(parm): parm = _range(parm) if parm is False: - test.error("Incorrect cfg: stg_params %s looks " - "like range(..) but doesn't contain " - "numbers." % cmd) + test.error( + f"Incorrect cfg: stg_params {cmd} looks " + "like range(..) but doesn't contain " + "numbers." + ) param_matrix[cmd] = parm if type(parm) is str: # When we know the stg_image_num, substitute it. @@ -211,52 +213,55 @@ def _get_data_disks(): continue else: # ',' separated list of values - parm = parm.split(',') + parm = parm.split(",") j = 0 while j < len(parm) - 1: - if parm[j][-1] == '\\': - parm[j] = '%s,%s' % (parm[j][:-1], parm.pop(j + 1)) + if parm[j][-1] == "\\": + parm[j] = f"{parm[j][:-1]},{parm.pop(j + 1)}" j += 1 param_matrix[cmd] = parm stg_image_num = max(stg_image_num, len(parm)) - stg_image_num = int(params.get('stg_image_num', stg_image_num)) + stg_image_num = int(params.get("stg_image_num", stg_image_num)) for cmd in rerange: param_matrix[cmd] = _range(param_matrix[cmd], stg_image_num) # param_table* are for pretty print of param_matrix param_table = [] - param_table_header = ['name'] + param_table_header = ["name"] if not has_name: - param_table_header.append('image_name') + param_table_header.append("image_name") for _ in param_matrix: param_table_header.append(_) - stg_image_name = params.get('stg_image_name', 'images/%s') + stg_image_name = params.get("stg_image_name", "images/%s") for i in range(stg_image_num): name = "stg%d" % i - params['images'] += " %s" % name + params["images"] += f" {name}" param_table.append([]) param_table[-1].append(name) if not has_name: - params["image_name_%s" % name] = stg_image_name % name - param_table[-1].append(params.get("image_name_%s" % name)) + params[f"image_name_{name}"] = stg_image_name % name + param_table[-1].append(params.get(f"image_name_{name}")) for parm in param_matrix.items(): - params['%s_%s' % (parm[0], name)] = str(parm[1][i % len(parm[1])]) - param_table[-1].append(params.get('%s_%s' % (parm[0], name))) + params[f"{parm[0]}_{name}"] = str(parm[1][i % len(parm[1])]) + param_table[-1].append(params.get(f"{parm[0]}_{name}")) - if params.get("multi_disk_params_only") == 'yes': + if params.get("multi_disk_params_only") == "yes": # Only print the test param_matrix and finish - test.log.info('Newly added disks:\n%s', - astring.tabular_output(param_table, param_table_header)) + test.log.info( + "Newly added disks:\n%s", + astring.tabular_output(param_table, param_table_header), + ) return - disk_check_cmd = params.get('disk_check_cmd') - indirect_image_blacklist = params.get('indirect_image_blacklist').split() + disk_check_cmd = params.get("disk_check_cmd") + indirect_image_blacklist = params.get("indirect_image_blacklist").split() get_new_disks_cmd = params.get("get_new_disks_cmd") if disk_check_cmd: - new_images = process.run(get_new_disks_cmd, ignore_status=True, - shell=True).stdout_text + new_images = process.run( + get_new_disks_cmd, ignore_status=True, shell=True + ).stdout_text for black_disk in indirect_image_blacklist[:]: if re.search(black_disk, new_images): indirect_image_blacklist.remove(black_disk) @@ -266,8 +271,7 @@ def _get_data_disks(): error_context.context("Start the guest with new disks", test.log.info) for vm_name in params.objects("vms"): vm_params = params.object_params(vm_name) - env_process.process_images(env_process.preprocess_image, test, - vm_params) + env_process.process_images(env_process.preprocess_image, test, vm_params) error_context.context("Start the guest with those disks", test.log.info) vm = env.get_vm(params["main_vm"]) @@ -285,9 +289,9 @@ def _get_data_disks(): dd_test = params.get("dd_test", "no") pre_command = params.get("pre_command", "") labeltype = params.get("labeltype", "gpt") - iozone_target_num = int(params.get('iozone_target_num', '5')) - iozone_options = params.get('iozone_options') - iozone_timeout = float(params.get('iozone_timeout', '7200')) + iozone_target_num = int(params.get("iozone_target_num", "5")) + iozone_options = params.get("iozone_options") + iozone_timeout = float(params.get("iozone_timeout", "7200")) have_qtree = True out = vm.monitor.human_monitor_cmd("info qtree", debug=False) @@ -298,20 +302,20 @@ def _get_data_disks(): error_context.context("Verifying qtree vs. test params") err = 0 qtree = qemu_qtree.QtreeContainer() - qtree.parse_info_qtree(vm.monitor.info('qtree')) + qtree.parse_info_qtree(vm.monitor.info("qtree")) disks = qemu_qtree.QtreeDisksContainer(qtree.get_nodes()) (tmp1, tmp2) = disks.parse_info_block(vm.monitor.info_block()) err += tmp1 + tmp2 err += disks.generate_params() err += disks.check_disk_params(params) (tmp1, tmp2, _, _) = disks.check_guests_proc_scsi( - session.cmd_output('cat /proc/scsi/scsi')) + session.cmd_output("cat /proc/scsi/scsi") + ) err += tmp1 + tmp2 if err: - test.fail("%s errors occurred while verifying qtree vs." - " params" % err) - if params.get('multi_disk_only_qtree') == 'yes': + test.fail(f"{err} errors occurred while verifying qtree vs." " params") + if params.get("multi_disk_only_qtree") == "yes": return try: err_msg = "Set disks num: %d" % stg_image_num @@ -322,21 +326,25 @@ def _get_data_disks(): _do_post_cmd(session) raise if iozone_options: - iozone = generate_instance(params, vm, 'iozone') + iozone = generate_instance(params, vm, "iozone") random.shuffle(disks) try: for i in range(n_repeat): test.log.info("iterations: %s", (i + 1)) - test.log.info("Get disks:" + " ".join(disks)) + test.log.info("Get disks: %s", " ".join(disks)) for n, disk in enumerate(disks): - error_context.context("Format disk in guest: '%s'" % disk, - test.log.info) + error_context.context(f"Format disk in guest: '{disk}'", test.log.info) # Random select one file system from file_system index = random.randint(0, (len(file_system) - 1)) fstype = file_system[index].strip() partitions = utils_disk.configure_empty_disk( - session, disk, stg_image_size, ostype, - fstype=fstype, labeltype=labeltype) + session, + disk, + stg_image_size, + ostype, + fstype=fstype, + labeltype=labeltype, + ) if not partitions: test.fail("Fail to format disks.") cmd_list = params["cmd_list"] @@ -346,8 +354,10 @@ def _get_data_disks(): partition += ":" else: partition = partition.split("/")[-1] - error_context.context("Copy file into / out of partition:" - " %s..." % partition, test.log.info) + error_context.context( + "Copy file into / out of partition:" f" {partition}...", + test.log.info, + ) for cmd_l in cmd_list.split(): cmd = params.get(cmd_l) if cmd: @@ -356,17 +366,21 @@ def _get_data_disks(): key_word = params["check_result_key_word"] output = session.cmd_output(cmd) if iozone_options and n < iozone_target_num: - iozone.run(iozone_options.format(orig_partition), # pylint: disable=E0606 - iozone_timeout) + iozone.run( + iozone_options.format(orig_partition), # pylint: disable=E0606 + iozone_timeout, + ) if key_word not in output: test.fail("Files on guest os root fs and disk differ") if dd_test != "no": - error_context.context("dd test on partition: %s..." - % partition, test.log.info) + error_context.context( + f"dd test on partition: {partition}...", test.log.info + ) status, output = session.cmd_status_output( - dd_test % (partition, partition), timeout=cmd_timeout) + dd_test % (partition, partition), timeout=cmd_timeout + ) if status != 0: - test.fail("dd test fail: %s" % output) + test.fail(f"dd test fail: {output}") # When multiple SCSI disks are simulated by scsi_debug, # they could be viewed as multiple paths to the same # storage device. So need umount partition before operate @@ -374,10 +388,12 @@ def _get_data_disks(): # (xfs integrity checks error). if ostype == "linux" and "scsi_debug add_host" in pre_command: status, output = session.cmd_status_output( - "umount /dev/%s" % partition, timeout=cmd_timeout) + f"umount /dev/{partition}", timeout=cmd_timeout + ) if status != 0: - test.fail("Failed to umount partition '%s': %s" - % (partition, output)) + test.fail( + f"Failed to umount partition '{partition}': {output}" + ) need_reboot = params.get("need_reboot", "no") need_shutdown = params.get("need_shutdown", "no") if need_reboot == "yes": @@ -394,7 +410,7 @@ def _get_data_disks(): session = vm.wait_for_login(timeout=login_timeout) disks = _get_data_disks() - test.log.info("Get disks again:" + " ".join(disks)) + test.log.info("Get disks again: %s", " ".join(disks)) error_context.context("Delete partitions in guest.", test.log.info) for disk in disks: utils_disk.clean_partition(session, disk, ostype) diff --git a/qemu/tests/multi_disk_random_hotplug.py b/qemu/tests/multi_disk_random_hotplug.py index e312727c2d..669e0c72b6 100644 --- a/qemu/tests/multi_disk_random_hotplug.py +++ b/qemu/tests/multi_disk_random_hotplug.py @@ -3,21 +3,23 @@ :copyright: 2013 Red Hat Inc. """ + import random -import time import re +import time from avocado.utils import process - -from virttest import error_context -from virttest import funcatexit -from virttest import data_dir -from virttest import qemu_qtree -from virttest import utils_test -from virttest import utils_disk -from virttest import env_process -from virttest.remote import LoginTimeoutError +from virttest import ( + data_dir, + env_process, + error_context, + funcatexit, + qemu_qtree, + utils_disk, + utils_test, +) from virttest.qemu_monitor import Monitor +from virttest.remote import LoginTimeoutError from provider.block_devices_plug import BlockDevicesPlug from provider.storage_benchmark import generate_instance @@ -52,31 +54,41 @@ def convert_params(params, args): :return: Updated dictionary with the test parameters :rtype: virttest.utils_params.Params """ - convert = {'fmt': 'drive_format', 'cache': 'drive_cache', - 'werror': 'drive_werror', 'rerror': 'drive_rerror', - 'serial': 'drive_serial', 'snapshot': 'image_snapshot', - 'bus': 'drive_bus', 'unit': 'drive_unit', 'port': 'drive_port', - 'readonly': 'image_readonly', 'scsiid': 'drive_scsiid', - 'lun': 'drive_lun', 'aio': 'image_aio', - 'imgfmt': 'image_format', 'pci_addr': 'drive_pci_addr', - 'x_data_plane': 'x-data-plane', - 'scsi': 'virtio-blk-pci_scsi'} - name = args.pop('name') - params['images'] += " %s" % name - params['image_name_%s' % name] = args.pop('filename') - params["image_size_%s" % name] = params['stg_image_size'] - params['remove_image_%s' % name] = 'yes' - params['boot_drive_%s' % name] = 'no' - if params.get('image_format_%s' % name): - params['image_format_%s' % name] = params.get('image_format_%s' % name) + convert = { + "fmt": "drive_format", + "cache": "drive_cache", + "werror": "drive_werror", + "rerror": "drive_rerror", + "serial": "drive_serial", + "snapshot": "image_snapshot", + "bus": "drive_bus", + "unit": "drive_unit", + "port": "drive_port", + "readonly": "image_readonly", + "scsiid": "drive_scsiid", + "lun": "drive_lun", + "aio": "image_aio", + "imgfmt": "image_format", + "pci_addr": "drive_pci_addr", + "x_data_plane": "x-data-plane", + "scsi": "virtio-blk-pci_scsi", + } + name = args.pop("name") + params["images"] += f" {name}" + params[f"image_name_{name}"] = args.pop("filename") + params[f"image_size_{name}"] = params["stg_image_size"] + params[f"remove_image_{name}"] = "yes" + params[f"boot_drive_{name}"] = "no" + if params.get(f"image_format_{name}"): + params[f"image_format_{name}"] = params.get(f"image_format_{name}") else: - params['image_format_%s' % name] = params.get('image_format') - if params.get('image_iothread_%s' % name): - params['image_iothread_%s' % name] = params.get('image_iothread_%s' % name) + params[f"image_format_{name}"] = params.get("image_format") + if params.get(f"image_iothread_{name}"): + params[f"image_iothread_{name}"] = params.get(f"image_iothread_{name}") else: - params['image_iothread_%s' % name] = params.get('image_iothread') + params[f"image_iothread_{name}"] = params.get("image_iothread") for key, value in args.items(): - params["%s_%s" % (convert.get(key, key), name)] = value + params[f"{convert.get(key, key)}_{name}"] = value return params @@ -97,6 +109,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def verify_qtree(params, info_qtree, info_block, qdev): """ Verifies that params, info qtree, info block and /proc/scsi/ matches @@ -121,103 +134,106 @@ def verify_qtree(params, info_qtree, info_block, qdev): test.log.error("info qtree:\n%s", info_qtree) test.log.error("info block:\n%s", info_block) test.log.error(qdev.str_bus_long()) - test.fail("%s errors occurred while verifying" - " qtree vs. params" % err) + test.fail(f"{err} errors occurred while verifying" " qtree vs. params") def _create_params_matrix(): matrix = {} - stg_image_name = params['stg_image_name'] + stg_image_name = params["stg_image_name"] if not stg_image_name[0] == "/": - stg_image_name = "%s/%s" % (data_dir.get_data_dir(), stg_image_name) - matrix['stg_image_name'] = stg_image_name - stg_params = params.get('stg_params', '').split(' ') + stg_image_name = f"{data_dir.get_data_dir()}/{stg_image_name}" + matrix["stg_image_name"] = stg_image_name + stg_params = params.get("stg_params", "").split(" ") for i in range(len(stg_params)): if not stg_params[i].strip(): continue - if stg_params[i][-1] == '\\': - stg_params[i] = '%s %s' % (stg_params[i][:-1], - stg_params.pop(i + 1)) + if stg_params[i][-1] == "\\": + stg_params[i] = f"{stg_params[i][:-1]} {stg_params.pop(i + 1)}" if not stg_params[i].strip(): continue - (cmd, parm) = stg_params[i].split(':', 1) + (cmd, parm) = stg_params[i].split(":", 1) # ',' separated list of values - parm = parm.split(',') + parm = parm.split(",") for j in range(len(parm)): - if parm[j][-1] == '\\': - parm[j] = '%s,%s' % (parm[j][:-1], parm.pop(j + 1)) + if parm[j][-1] == "\\": + parm[j] = f"{parm[j][:-1]},{parm.pop(j + 1)}" matrix[cmd] = parm return matrix def configure_images_params(params): params_matrix = _create_params_matrix() - _formats = params_matrix.pop('fmt', [params.get('drive_format')]) + _formats = params_matrix.pop("fmt", [params.get("drive_format")]) formats = _formats[:] usb_port_occupied = 0 - usb_max_port = params.get('usb_max_port', 6) - set_drive_bus = params.get('set_drive_bus', 'no') == 'yes' - no_disks = int(params['stg_image_num']) + usb_max_port = params.get("usb_max_port", 6) + set_drive_bus = params.get("set_drive_bus", "no") == "yes" + no_disks = int(params["stg_image_num"]) i = 0 while i < no_disks: # Set the format if len(formats) < 1: if i == 0: - test.error("Fail to add any disks, probably bad" - " configuration.") - test.log.warn("Can't create desired number '%s' of disk types " - "'%s'. Using '%d' no disks.", no_disks, - _formats, i) + test.error("Fail to add any disks, probably bad" " configuration.") + test.log.warning( + "Can't create desired number '%s' of disk types " + "'%s'. Using '%d' no disks.", + no_disks, + _formats, + i, + ) break - name = 'stg%d' % i - args = {'name': name, 'filename': params_matrix['stg_image_name'] % i} + name = "stg%d" % i + args = {"name": name, "filename": params_matrix["stg_image_name"] % i} fmt = random.choice(formats) drive_bus = None - if set_drive_bus and fmt != 'virtio': + if set_drive_bus and fmt != "virtio": drive_bus = str(i) - if fmt == 'virtio_scsi': - args['fmt'] = 'scsi-hd' - args['scsi_hba'] = 'virtio-scsi-pci' - elif fmt == 'lsi_scsi': - args['fmt'] = 'scsi-hd' - args['scsi_hba'] = 'lsi53c895a' - elif fmt == 'spapr_vscsi': - args['fmt'] = 'scsi-hd' - args['scsi_hba'] = 'spapr-vscsi' - elif fmt == 'usb2' or fmt == 'usb3': + if fmt == "virtio_scsi": + args["fmt"] = "scsi-hd" + args["scsi_hba"] = "virtio-scsi-pci" + elif fmt == "lsi_scsi": + args["fmt"] = "scsi-hd" + args["scsi_hba"] = "lsi53c895a" + elif fmt == "spapr_vscsi": + args["fmt"] = "scsi-hd" + args["scsi_hba"] = "spapr-vscsi" + elif fmt == "usb2" or fmt == "usb3": usb_port_occupied += 1 if usb_port_occupied > int(usb_max_port): continue - args['fmt'] = fmt + args["fmt"] = fmt else: - args['fmt'] = fmt - args['drive_bus'] = drive_bus + args["fmt"] = fmt + args["drive_bus"] = drive_bus # Other params for key, value in params_matrix.items(): args[key] = random.choice(value) env_process.preprocess_image( - test, convert_params(params, args).object_params(name), name) + test, convert_params(params, args).object_params(name), name + ) i += 1 def _postprocess_images(): # remove and check the images _disks = [] - for disk in params['images'].split(' '): + for disk in params["images"].split(" "): if disk.startswith("stg"): - env_process.postprocess_image( - test, params.object_params(disk), disk) + env_process.postprocess_image(test, params.object_params(disk), disk) else: _disks.append(disk) - params['images'] = " ".join(_disks) + params["images"] = " ".join(_disks) def verify_qtree_unsupported(params, info_qtree, info_block, qdev): - return test.log.warn("info qtree not supported. Can't verify qtree vs. " - "guest disks.") + return test.log.warning( + "info qtree not supported. Can't verify qtree vs. " "guest disks." + ) def enable_driver_verifier(driver, timeout=300): return utils_test.qemu.windrv_check_running_verifier( - session, vm, test, driver, timeout) + session, vm, test, driver, timeout + ) def _initial_win_drives(): - size = params['stg_image_size'] + size = params["stg_image_size"] disks = utils_disk.get_windows_disks_index(session, size) if not utils_disk.update_windows_disk_attributes(session, disks): test.fail("Failed to update windows disk attributes.") @@ -226,10 +242,10 @@ def _initial_win_drives(): def run_stress_iozone(): error_context.context("Run iozone stress after hotplug", test.log.info) - iozone = generate_instance(params, vm, 'iozone') + iozone = generate_instance(params, vm, "iozone") try: - iozone_cmd_option = params['iozone_cmd_option'] - iozone_timeout = float(params['iozone_timeout']) + iozone_cmd_option = params["iozone_cmd_option"] + iozone_timeout = float(params["iozone_timeout"]) for letter in _initial_win_drives(): iozone.run(iozone_cmd_option.format(letter), iozone_timeout) finally: @@ -243,25 +259,26 @@ def run_stress_dd(): if not dev: continue if not re.findall(system_dev, dev): - session.cmd(params['dd_cmd'].format(dev), - int(params['dd_timeout'])) + session.cmd(params["dd_cmd"].format(dev), int(params["dd_timeout"])) - Monitor.CONNECT_TIMEOUT = params.get_numeric('connect_timeout', 60) + Monitor.CONNECT_TIMEOUT = params.get_numeric("connect_timeout", 60) BlockDevicesPlug.ACQUIRE_LOCK_TIMEOUT = params.get_numeric( - 'acquire_lock_timeout', 20) + "acquire_lock_timeout", 20 + ) BlockDevicesPlug.VERIFY_UNPLUG_TIMEOUT = params.get_numeric( - 'verify_unplug_timeout', 60) + "verify_unplug_timeout", 60 + ) configure_images_params(params) - params['start_vm'] = 'yes' + params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, params["main_vm"]) - vm = env.get_vm(params['main_vm']) + vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) - is_windows = params['os_type'] == 'windows' + is_windows = params["os_type"] == "windows" try: if is_windows: - session = enable_driver_verifier(params['driver_name']) + session = enable_driver_verifier(params["driver_name"]) out = vm.monitor.human_monitor_cmd("info qtree", debug=False) if "unknown command" in str(out): verify_qtree = verify_qtree_unsupported @@ -269,56 +286,66 @@ def run_stress_dd(): # Modprobe the module if specified in config file module = params.get("modprobe_module") if module: - session.cmd("modprobe %s" % module) + session.cmd(f"modprobe {module}") - stress_cmd = params.get('stress_cmd') + stress_cmd = params.get("stress_cmd") if stress_cmd: - funcatexit.register(env, params.get('type'), stop_stresser, vm, - params.get('stress_kill_cmd')) + funcatexit.register( + env, + params.get("type"), + stop_stresser, + vm, + params.get("stress_kill_cmd"), + ) stress_session = vm.wait_for_login(timeout=10) - for _ in range(int(params.get('no_stress_cmds', 1))): + for _ in range(int(params.get("no_stress_cmds", 1))): stress_session.sendline(stress_cmd) rp_times = int(params.get("repeat_times", 1)) queues = params.get("multi_disk_type") == "parallel" - timeout = params.get_numeric('plug_timeout', 300) - interval_time_unplug = params.get_numeric('interval_time_unplug', 0) + timeout = params.get_numeric("plug_timeout", 300) + interval_time_unplug = params.get_numeric("interval_time_unplug", 0) if queues: # parallel - hotplug, unplug = 'hotplug_devs_threaded', 'unplug_devs_threaded' - else: # serial - hotplug, unplug = 'hotplug_devs_serial', 'unplug_devs_serial' + hotplug, unplug = "hotplug_devs_threaded", "unplug_devs_threaded" + else: # serial + hotplug, unplug = "hotplug_devs_serial", "unplug_devs_serial" context_msg = "Running sub test '%s' %s" plug = BlockDevicesPlug(vm) for iteration in range(rp_times): - error_context.context("Hotplugging/unplugging devices, iteration %d" - % iteration, test.log.info) + error_context.context( + "Hotplugging/unplugging devices, iteration %d" % iteration, + test.log.info, + ) sub_type = params.get("sub_type_before_plug") if sub_type: - error_context.context(context_msg % (sub_type, "before hotplug"), - test.log.info) + error_context.context( + context_msg % (sub_type, "before hotplug"), test.log.info + ) utils_test.run_virt_sub_test(test, params, env, sub_type) error_context.context("Hotplug the devices", test.log.debug) getattr(plug, hotplug)(timeout=timeout) - time.sleep(float(params.get('wait_after_hotplug', 0))) + time.sleep(float(params.get("wait_after_hotplug", 0))) error_context.context("Verify disks after hotplug", test.log.debug) - info_qtree = vm.monitor.info('qtree', False) + info_qtree = vm.monitor.info("qtree", False) info_block = vm.monitor.info_block(False) vm.verify_alive() verify_qtree(params, info_qtree, info_block, vm.devices) sub_type = params.get("sub_type_after_plug") if sub_type: - error_context.context(context_msg % (sub_type, "after hotplug"), - test.log.info) + error_context.context( + context_msg % (sub_type, "after hotplug"), test.log.info + ) utils_test.run_virt_sub_test(test, params, env, sub_type) run_stress_iozone() if is_windows else run_stress_dd() sub_type = params.get("sub_type_before_unplug") if sub_type: - error_context.context(context_msg % (sub_type, "before hotunplug"), - test.log.info) + error_context.context( + context_msg % (sub_type, "before hotunplug"), test.log.info + ) utils_test.run_virt_sub_test(test, params, env, sub_type) error_context.context("Unplug and remove the devices", test.log.debug) @@ -330,37 +357,40 @@ def run_stress_dd(): _postprocess_images() error_context.context("Verify disks after unplug", test.log.debug) - time.sleep(params.get_numeric('wait_after_unplug', 0, float)) - info_qtree = vm.monitor.info('qtree', False) + time.sleep(params.get_numeric("wait_after_unplug", 0, float)) + info_qtree = vm.monitor.info("qtree", False) info_block = vm.monitor.info_block(False) vm.verify_alive() verify_qtree(params, info_qtree, info_block, vm.devices) sub_type = params.get("sub_type_after_unplug") if sub_type: - error_context.context(context_msg % (sub_type, "after hotunplug"), - test.log.info) + error_context.context( + context_msg % (sub_type, "after hotunplug"), test.log.info + ) utils_test.run_virt_sub_test(test, params, env, sub_type) configure_images_params(params) # Check for various KVM failures - error_context.context("Validating VM after all disk hotplug/unplugs", - test.log.debug) + error_context.context( + "Validating VM after all disk hotplug/unplugs", test.log.debug + ) vm.verify_alive() - out = session.cmd_output('dmesg') + out = session.cmd_output("dmesg") if "I/O error" in out: - test.log.warn(out) - test.error("I/O error messages occured in dmesg, " - "check the log for details.") + test.log.warning(out) + test.error( + "I/O error messages occured in dmesg, " "check the log for details." + ) except Exception as e: pid = vm.get_pid() test.log.debug("Find %s Exception:'%s'.", pid, str(e)) if pid: logdir = test.logdir - process.getoutput("gstack %s > %s/gstack.log" % (pid, logdir)) + process.getoutput(f"gstack {pid} > {logdir}/gstack.log") process.getoutput( - "timeout 20 strace -tt -T -v -f -s 32 -p %s -o %s/strace.log" % ( - pid, logdir)) + f"timeout 20 strace -tt -T -v -f -s 32 -p {pid} -o {logdir}/strace.log" + ) else: test.log.debug("VM dead...") raise e diff --git a/qemu/tests/multi_disk_wild_hotplug.py b/qemu/tests/multi_disk_wild_hotplug.py index 69f36a2350..156262fb30 100644 --- a/qemu/tests/multi_disk_wild_hotplug.py +++ b/qemu/tests/multi_disk_wild_hotplug.py @@ -1,13 +1,13 @@ """Wild hot-plug-unplug test""" + import copy -import time import os +import time from avocado.utils import process - -from virttest.qemu_monitor import QMPCmdError -from virttest import error_context, env_process from virttest import data_dir as virttest_data_dir +from virttest import env_process, error_context +from virttest.qemu_monitor import QMPCmdError # This decorator makes the test function aware of context strings @@ -48,12 +48,12 @@ def _simple_io_test(): def _configure_images_params(): for i in range(image_num): name = "stg%d" % i - params['image_name_%s' % name] = "images/%s" % name - params['image_size_%s' % name] = stg_image_size + params[f"image_name_{name}"] = f"images/{name}" + params[f"image_size_{name}"] = stg_image_size params["images"] = params["images"] + " " + name if params["drive_format"] == "scsi-hd": - params["drive_bus_%s" % name] = 0 if share_bus == "yes" else 1 - params["blk_extra_params_%s" % name] = "lun=%d" % (i+1) + params[f"drive_bus_{name}"] = 0 if share_bus == "yes" else 1 + params[f"blk_extra_params_{name}"] = "lun=%d" % (i + 1) image_params = params.object_params(name) env_process.preprocess_image(test, image_params, name) @@ -69,7 +69,7 @@ def _hotplug_images(): try: vm.monitor.cmd("device_add", images_params[name], debug=False) except QMPCmdError as e: - test.log.warning('Ignore hotplug error: %s', str(e)) + test.log.warning("Ignore hotplug error: %s", str(e)) def _hotunplug_images(): for i in range(1, image_num): @@ -77,7 +77,7 @@ def _hotunplug_images(): try: vm.monitor.cmd("device_del", {"id": name}, debug=False) except QMPCmdError as e: - test.log.warning('Ignore hotunplug error: %s', str(e)) + test.log.warning("Ignore hotunplug error: %s", str(e)) stg_image_size = params.get("stg_image_size", "256M") image_num = params.get_numeric("stg_image_num", 20) @@ -89,12 +89,11 @@ def _hotunplug_images(): images_params = {} error_context.context("Create images %d" % image_num, test.log.info) _configure_images_params() - params['start_vm'] = 'yes' + params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, params["main_vm"]) - vm = env.get_vm(params['main_vm']) + vm = env.get_vm(params["main_vm"]) try: - session = vm.wait_for_login( - timeout=params.get_numeric("login_timeout", 360)) + session = vm.wait_for_login(timeout=params.get_numeric("login_timeout", 360)) error_context.context("Get images params", test.log.info) _get_images_params() @@ -120,22 +119,22 @@ def _hotunplug_images(): time.sleep(wait_time) error_context.context("Check disks in guest.", test.log.info) # re-login in case previous session is expired - session = vm.wait_for_login( - timeout=params.get_numeric("relogin_timeout", 60)) + session = vm.wait_for_login(timeout=params.get_numeric("relogin_timeout", 60)) new_disks_num = int(session.cmd("lsblk -d -n|wc -l", timeout=300)) test.log.info("There are total %d disks after hotplug", new_disks_num) if new_disks_num != disks_num: - test.log.warning("Find unmatched disk numbers %d %d", disks_num, - new_disks_num) + test.log.warning( + "Find unmatched disk numbers %d %d", disks_num, new_disks_num + ) except Exception as e: pid = vm.get_pid() test.log.debug("Find %s Exception:'%s'.", pid, str(e)) if pid: logdir = test.logdir - process.getoutput("gstack %s > %s/gstack.log" % (pid, logdir)) + process.getoutput(f"gstack {pid} > {logdir}/gstack.log") process.getoutput( - "timeout 20 strace -tt -T -v -f -s 32 -p %s -o %s/strace.log" % ( - pid, logdir)) + f"timeout 20 strace -tt -T -v -f -s 32 -p {pid} -o {logdir}/strace.log" + ) else: test.log.debug("VM dead...") raise e diff --git a/qemu/tests/multi_macvtap_devices.py b/qemu/tests/multi_macvtap_devices.py index ede4832b30..eb52a318e6 100644 --- a/qemu/tests/multi_macvtap_devices.py +++ b/qemu/tests/multi_macvtap_devices.py @@ -2,40 +2,37 @@ import time import aexpect - from avocado.utils import process +from virttest import env_process, error_context, utils_net -from virttest import error_context -from virttest import utils_net -from virttest import env_process - -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") -def guest_ping(test, session, dst_ip, count=None, os_type="linux", - p_size=1472, timeout=360): +def guest_ping( + test, session, dst_ip, count=None, os_type="linux", p_size=1472, timeout=360 +): """ Do ping test in guest """ ping_cmd = "ping" if os_type == "linux": if count: - ping_cmd += " -c %s" % count - ping_cmd += " -s %s %s" % (p_size, dst_ip) + ping_cmd += f" -c {count}" + ping_cmd += f" -s {p_size} {dst_ip}" else: if not count: ping_cmd += " -t " - ping_cmd += " -l %s %s" % (p_size, dst_ip) + ping_cmd += f" -l {p_size} {dst_ip}" try: LOG_JOB.debug("Ping dst vm with cmd: '%s'", ping_cmd) session.cmd(ping_cmd, timeout=timeout) except aexpect.ShellTimeoutError as err: if count: - test.error("Error during ping guest ip, %s" % err) + test.error(f"Error during ping guest ip, {err}") def wait_guest_network_up(test, session, dst_ip, timeout=180): - txt = "Check whether guest network up by ping %s " % dst_ip + txt = f"Check whether guest network up by ping {dst_ip} " error_context.context(txt, LOG_JOB.info) end_time = time.time() + timeout while time.time() < end_time: @@ -76,19 +73,20 @@ def run(test, params, env): try: ext_host = process.system_output(ext_host_get_cmd, shell=True) except process.CmdError: - test.log.warn("Can't get specified host with cmd '%s'," - " Fallback to default host '%s'", - ext_host_get_cmd, default_host) + test.log.warning( + "Can't get specified host with cmd '%s'," " Fallback to default host '%s'", + ext_host_get_cmd, + default_host, + ) ext_host = default_host try: - txt = "Create and up %s macvtap devices in setting mode." % macvtap_num + txt = f"Create and up {macvtap_num} macvtap devices in setting mode." error_context.context(txt, test.log.info) for num in range(macvtap_num): mac = utils_net.generate_mac_address_simple() - ifname = "%s_%s" % (macvtap_mode, num) - tapfd = utils_net.create_and_open_macvtap(ifname, macvtap_mode, - 1, netdst, mac) - check_cmd = "ip -d link show %s" % ifname + ifname = f"{macvtap_mode}_{num}" + utils_net.create_and_open_macvtap(ifname, macvtap_mode, 1, netdst, mac) + check_cmd = f"ip -d link show {ifname}" output = process.system_output(check_cmd) test.log.debug(output) macvtap_ifnames.append(ifname) @@ -102,18 +100,17 @@ def run(test, params, env): vm.verify_alive() session = vm.wait_for_serial_login(timeout=timeout) if wait_guest_network_up(test, session, ext_host, timeout=timeout): - txt = " Ping from guests to %s for %s counts." % (ext_host, - ping_count) + txt = f" Ping from guests to {ext_host} for {ping_count} counts." error_context.context(txt, test.log.info) guest_ping(test, session, ext_host, 100) else: ipconfig_cmd = params.get("ipconfig_cmd", "ifconfig -a") out = session.cmd(ipconfig_cmd) msg = "Could not ping %s successful after %ss." - msg += "Guest network status (%s): %s" % (ipconfig_cmd, out) + msg += f"Guest network status ({ipconfig_cmd}): {out}" test.fail(msg) finally: error_context.context("Delete all macvtap interfaces.", test.log.info) for ifname in macvtap_ifnames: - del_cmd = "ip link delete %s" % ifname + del_cmd = f"ip link delete {ifname}" process.system(del_cmd, ignore_status=True) diff --git a/qemu/tests/multi_nics_stress.py b/qemu/tests/multi_nics_stress.py index 0e3d3c6892..8af590e2ae 100644 --- a/qemu/tests/multi_nics_stress.py +++ b/qemu/tests/multi_nics_stress.py @@ -1,32 +1,41 @@ -import os import logging +import os import time -from virttest import error_context -from virttest import utils_net -from virttest import utils_netperf -from virttest import utils_misc -from virttest import data_dir -from virttest import utils_test -from virttest import env_process +from virttest import ( + data_dir, + env_process, + error_context, + utils_misc, + utils_net, + utils_netperf, + utils_test, +) from virttest.staging import utils_memory -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") -def launch_netperf_client(test, server_ips, netperf_clients, test_option, - test_duration, netperf_para_sess, - netperf_cmd_prefix, params): +def launch_netperf_client( + test, + server_ips, + netperf_clients, + test_option, + test_duration, + netperf_para_sess, + netperf_cmd_prefix, + params, +): """ start netperf client in guest. """ LOG_JOB.info("server_ips = %s", server_ips) for s_ip in server_ips: for n_client in netperf_clients: - n_client.bg_start(s_ip, test_option, - netperf_para_sess, netperf_cmd_prefix) - if utils_misc.wait_for(n_client.is_netperf_running, 10, 0, 3, - "Wait netperf test start"): + n_client.bg_start(s_ip, test_option, netperf_para_sess, netperf_cmd_prefix) + if utils_misc.wait_for( + n_client.is_netperf_running, 10, 0, 3, "Wait netperf test start" + ): LOG_JOB.info("Netperf test start successfully.") else: test.error("Can not start netperf client.") @@ -72,8 +81,7 @@ def run(test, params, env): os_type = params.get("os_type") shell_prompt = params.get("shell_prompt", r"^root@.*[\#\$]\s*$|#") disable_firewall = params.get("disable_firewall", "") - linesep = params.get( - "shell_linesep", "\n").encode().decode('unicode_escape') + linesep = params.get("shell_linesep", "\n").encode().decode("unicode_escape") status_test_command = params.get("status_test_command", "echo $?") ping_count = int(params.get("ping_count", 10)) compile_option_client = params.get("compile_option_client", "") @@ -90,8 +98,10 @@ def run(test, params, env): host_mem = utils_memory.memtotal() // (1024 * 1024) vm_mem = host_mem / (len(vms.split()) + 1) * 1024 if vm_mem < params.get_numeric("min_mem"): - test.cancel("Host total memory is insufficient for this test case," - "each VM's memory can not meet guest OS's requirement") + test.cancel( + "Host total memory is insufficient for this test case," + "each VM's memory can not meet guest OS's requirement" + ) params["mem"] = vm_mem params["start_vm"] = "yes" @@ -102,27 +112,23 @@ def run(test, params, env): server_vm = env.get_vm(server) server_vm.verify_alive() server_ctl = server_vm.wait_for_serial_login(timeout=login_timeout) - error_context.context("Stop fireware on netperf server guest.", - test.log.info) + error_context.context( + "Stop fireware on netperf server guest.", test.log.info + ) server_ctl.cmd(disable_firewall, ignore_all_errors=True) server_ip = server_vm.get_address() server_ips.append(server_ip) s_info["ip"] = server_ip - s_info["os_type"] = params.get("os_type_%s" % server, os_type) - s_info["username"] = params.get("username_%s" % server, - guest_username) - s_info["password"] = params.get("password_%s" % server, - guest_password) - s_info["shell_client"] = params.get("shell_client_%s" % server, - shell_client) - s_info["shell_port"] = params.get("shell_port_%s" % server, - shell_port) - s_info["shell_prompt"] = params.get("shell_prompt_%s" % server, - shell_prompt) - s_info["linesep"] = params.get("linesep_%s" % server, - linesep) - s_info["status_test_command"] = params.get("status_test_command_%s" % server, - status_test_command) + s_info["os_type"] = params.get(f"os_type_{server}", os_type) + s_info["username"] = params.get(f"username_{server}", guest_username) + s_info["password"] = params.get(f"password_{server}", guest_password) + s_info["shell_client"] = params.get(f"shell_client_{server}", shell_client) + s_info["shell_port"] = params.get(f"shell_port_{server}", shell_port) + s_info["shell_prompt"] = params.get(f"shell_prompt_{server}", shell_prompt) + s_info["linesep"] = params.get(f"linesep_{server}", linesep) + s_info["status_test_command"] = params.get( + f"status_test_command_{server}", status_test_command + ) else: err = "Only support setup netperf server in guest." test.error(err) @@ -135,12 +141,12 @@ def run(test, params, env): client_vm.verify_alive() client_ctl = client_vm.wait_for_serial_login(timeout=login_timeout) if params.get("dhcp_cmd"): - status, output = client_ctl.cmd_status_output(params["dhcp_cmd"], timeout=600) + status, output = client_ctl.cmd_status_output( + params["dhcp_cmd"], timeout=600 + ) if status: - test.log.warn("Failed to execute dhcp-command, output:\n %s", - output) - error_context.context("Stop fireware on netperf client guest.", - test.log.info) + test.log.warning("Failed to execute dhcp-command, output:\n %s", output) + error_context.context("Stop fireware on netperf client guest.", test.log.info) client_ctl.cmd(disable_firewall, ignore_all_errors=True) client_ip = client_vm.get_address() @@ -153,44 +159,42 @@ def run(test, params, env): client_ips.append(client_vm.get_address(index=i)) c_info["ip"] = client_ip - c_info["os_type"] = params.get("os_type_%s" % client, os_type) - c_info["username"] = params.get("username_%s" % client, - guest_username) - c_info["password"] = params.get("password_%s" % client, - guest_password) - c_info["shell_client"] = params.get("shell_client_%s" % client, - shell_client) - c_info["shell_port"] = params.get("shell_port_%s" % client, - shell_port) - c_info["shell_prompt"] = params.get("shell_prompt_%s" % client, - shell_prompt) - c_info["linesep"] = params.get("linesep_%s" % client, - linesep) - c_info["status_test_command"] = params.get("status_test_command_%s" % client, - status_test_command) + c_info["os_type"] = params.get(f"os_type_{client}", os_type) + c_info["username"] = params.get(f"username_{client}", guest_username) + c_info["password"] = params.get(f"password_{client}", guest_password) + c_info["shell_client"] = params.get(f"shell_client_{client}", shell_client) + c_info["shell_port"] = params.get(f"shell_port_{client}", shell_port) + c_info["shell_prompt"] = params.get(f"shell_prompt_{client}", shell_prompt) + c_info["linesep"] = params.get(f"linesep_{client}", linesep) + c_info["status_test_command"] = params.get( + f"status_test_command_{client}", status_test_command + ) else: err = "Only support setup netperf client in guest." test.error(err) client_infos.append(c_info) if params.get("os_type") == "linux": - error_context.context("Config static route in netperf server guest.", - test.log.info) + error_context.context( + "Config static route in netperf server guest.", test.log.info + ) nics_list = utils_net.get_linux_ifname(client_ctl) for ip in server_ips: index = server_ips.index(ip) % len(nics_list) - client_ctl.cmd("route add -host %s %s" % (ip, nics_list[index])) + client_ctl.cmd(f"route add -host {ip} {nics_list[index]}") netperf_link = params.get("netperf_link") netperf_link = os.path.join(data_dir.get_deps_dir("netperf"), netperf_link) md5sum = params.get("pkg_md5sum") netperf_server_link = params.get("netperf_server_link_win", netperf_link) - netperf_server_link = os.path.join(data_dir.get_deps_dir("netperf"), - netperf_server_link) + netperf_server_link = os.path.join( + data_dir.get_deps_dir("netperf"), netperf_server_link + ) server_md5sum = params.get("server_md5sum") netperf_client_link = params.get("netperf_client_link_win", netperf_link) - netperf_client_link = os.path.join(data_dir.get_deps_dir("netperf"), - netperf_client_link) + netperf_client_link = os.path.join( + data_dir.get_deps_dir("netperf"), netperf_client_link + ) client_md5sum = params.get("client_md5sum") server_path_linux = params.get("server_path", "/var/tmp") @@ -209,17 +213,20 @@ def run(test, params, env): else: netperf_link_c = netperf_link client_path = client_path_linux - n_client = utils_netperf.NetperfClient(c_info["ip"], - client_path, - md5sum, netperf_link_c, - client=c_info["shell_client"], - port=c_info["shell_port"], - username=c_info["username"], - password=c_info["password"], - prompt=c_info["shell_prompt"], - linesep=c_info["linesep"], - status_test_command=c_info["status_test_command"], - compile_option=compile_option_client) + n_client = utils_netperf.NetperfClient( + c_info["ip"], + client_path, + md5sum, + netperf_link_c, + client=c_info["shell_client"], + port=c_info["shell_port"], + username=c_info["username"], + password=c_info["password"], + prompt=c_info["shell_prompt"], + linesep=c_info["linesep"], + status_test_command=c_info["status_test_command"], + compile_option=compile_option_client, + ) netperf_clients.append(n_client) error_context.context("Setup netperf server.", test.log.info) for s_info in server_infos: @@ -230,17 +237,20 @@ def run(test, params, env): else: netperf_link_s = netperf_link server_path = server_path_linux - n_server = utils_netperf.NetperfServer(s_info["ip"], - server_path, - md5sum, netperf_link_s, - client=s_info["shell_client"], - port=s_info["shell_port"], - username=s_info["username"], - password=s_info["password"], - prompt=s_info["shell_prompt"], - linesep=s_info["linesep"], - status_test_command=s_info["status_test_command"], - compile_option=compile_option_server) + n_server = utils_netperf.NetperfServer( + s_info["ip"], + server_path, + md5sum, + netperf_link_s, + client=s_info["shell_client"], + port=s_info["shell_port"], + username=s_info["username"], + password=s_info["password"], + prompt=s_info["shell_prompt"], + linesep=s_info["linesep"], + status_test_command=s_info["status_test_command"], + compile_option=compile_option_server, + ) netperf_servers.append(n_server) try: @@ -254,29 +264,35 @@ def run(test, params, env): netperf_cmd_prefix = params.get("netperf_cmd_prefix", "") error_context.context("Start netperf clients.", test.log.info) for protocol in test_protocols.split(): - error_context.context("Testing %s protocol" % protocol, - test.log.info) + error_context.context(f"Testing {protocol} protocol", test.log.info) sessions_test = netperf_sessions.split() sizes_test = p_sizes.split() for size in sizes_test: for sess in sessions_test: test_option = params.get("test_option", "") - test_option += " -t %s -l %s " % (protocol, test_duration) - test_option += " -- -m %s" % size - launch_netperf_client(test, server_ips, netperf_clients, - test_option, test_duration, sess, - netperf_cmd_prefix, params) + test_option += f" -t {protocol} -l {test_duration} " + test_option += f" -- -m {size}" + launch_netperf_client( + test, + server_ips, + netperf_clients, + test_option, + test_duration, + sess, + netperf_cmd_prefix, + params, + ) error_context.context("Ping test after netperf testing.", test.log.info) for s_ip in server_ips: - status, output = utils_test.ping(s_ip, ping_count, - timeout=float(ping_count) * 1.5) + status, output = utils_test.ping( + s_ip, ping_count, timeout=float(ping_count) * 1.5 + ) if status != 0: - test.fail("Ping returns non-zero value %s" % output) + test.fail(f"Ping returns non-zero value {output}") package_lost = utils_test.get_loss_ratio(output) if package_lost != 0: - test.fail("%s packeage lost when ping server ip %s " % - (package_lost, server)) + test.fail(f"{package_lost} packeage lost when ping server ip {server} ") finally: for n_server in netperf_servers: n_server.stop() diff --git a/qemu/tests/multi_nics_verify.py b/qemu/tests/multi_nics_verify.py index 28bee82122..14f6353206 100644 --- a/qemu/tests/multi_nics_verify.py +++ b/qemu/tests/multi_nics_verify.py @@ -1,9 +1,6 @@ import os -from virttest import error_context -from virttest import utils_net -from virttest import env_process -from virttest import utils_misc +from virttest import env_process, error_context, utils_misc, utils_net @error_context.context_aware @@ -23,6 +20,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def check_nics_num(expect_c, session): """ Check whether guest NICs number match with params set in cfg file @@ -40,13 +38,13 @@ def check_nics_num(expect_c, session): if not expect_c == actual_c: msg += "Nics count mismatch!\n" return (False, msg) - return (True, msg + 'Nics count match') + return (True, msg + "Nics count match") # Get the ethernet cards number from params nics_num = int(params.get("nics_num", 8)) for i in range(nics_num): - nics = "nic%s" % i - params["nics"] = ' '.join([params["nics"], nics]) + nics = f"nic{i}" + params["nics"] = " ".join([params["nics"], nics]) params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, params["main_vm"]) @@ -77,20 +75,26 @@ def check_nics_num(expect_c, session): if network_manager: for ifname in ifname_list: eth_keyfile_path = keyfile_path % ifname - cmd = "nmcli --offline connection add type ethernet con-name %s ifname %s" \ - " ipv4.method auto > %s" % (ifname, ifname, eth_keyfile_path) + cmd = ( + f"nmcli --offline connection add type ethernet con-name {ifname} ifname {ifname}" + f" ipv4.method auto > {eth_keyfile_path}" + ) s, o = session.cmd_status_output(cmd) if s != 0: err_msg = "Failed to create ether keyfile: %s\nReason is: %s" test.error(err_msg % (eth_keyfile_path, o)) - session.cmd("chown root:root /etc/NetworkManager/system-connections/*.nmconnection") - session.cmd("chmod 600 /etc/NetworkManager/system-connections/*.nmconnection") + session.cmd( + "chown root:root /etc/NetworkManager/system-connections/*.nmconnection" + ) + session.cmd( + "chmod 600 /etc/NetworkManager/system-connections/*.nmconnection" + ) session.cmd("nmcli connection reload") else: for ifname in ifname_list: eth_config_path = ifcfg_path % ifname - eth_config = "DEVICE=%s\\nBOOTPROTO=dhcp\\nONBOOT=yes" % ifname - cmd = "echo -e '%s' > %s" % (eth_config, eth_config_path) + eth_config = f"DEVICE={ifname}\\nBOOTPROTO=dhcp\\nONBOOT=yes" + cmd = f"echo -e '{eth_config}' > {eth_config_path}" s, o = session.cmd_status_output(cmd) if s != 0: err_msg = "Failed to create ether config file: %s\nReason is: %s" @@ -108,24 +112,28 @@ def check_nics_num(expect_c, session): def _check_ip_number(): for index, nic in enumerate(vm.virtnet): - guest_ip = utils_net.get_guest_ip_addr(session_srl, nic.mac, os_type, - ip_version="ipv4") + guest_ip = utils_net.get_guest_ip_addr( + session_srl, nic.mac, os_type, ip_version="ipv4" + ) if not guest_ip: return False return True # Check all the interfaces in guest get ips - session_srl = vm.wait_for_serial_login(timeout=int(params.get("login_timeout", 360))) + session_srl = vm.wait_for_serial_login( + timeout=int(params.get("login_timeout", 360)) + ) if not utils_misc.wait_for(_check_ip_number, 1000, step=10): test.error("Timeout when wait for nics to get ip") nic_interface = [] for index, nic in enumerate(vm.virtnet): test.log.info("index %s nic", index) - guest_ip = utils_net.get_guest_ip_addr(session_srl, nic.mac, os_type, - ip_version="ipv4") + guest_ip = utils_net.get_guest_ip_addr( + session_srl, nic.mac, os_type, ip_version="ipv4" + ) if not guest_ip: - err_log = "vm get interface %s's ip failed." % index + err_log = f"vm get interface {index}'s ip failed." test.fail(err_log) nic_interface.append(guest_ip) session_srl.close() diff --git a/qemu/tests/multi_vms_file_transfer.py b/qemu/tests/multi_vms_file_transfer.py index b9e4b65e57..1fe650d03a 100644 --- a/qemu/tests/multi_vms_file_transfer.py +++ b/qemu/tests/multi_vms_file_transfer.py @@ -1,13 +1,8 @@ -import time import os +import time -from avocado.utils import crypto -from avocado.utils import process - -from virttest import error_context -from virttest import remote -from virttest import utils_misc -from virttest import data_dir +from avocado.utils import crypto, process +from virttest import data_dir, error_context, remote, utils_misc @error_context.context_aware @@ -29,18 +24,19 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def md5_check(session, orig_md5): msg = "Compare copied file's md5 with original file." error_context.context(msg, test.log.info) - md5_cmd = "md5sum %s | awk '{print $1}'" % guest_path + md5_cmd = f"md5sum {guest_path} | awk '{{print $1}}'" s, o = session.cmd_status_output(md5_cmd) if s: - msg = "Fail to get md5 value from guest. Output is %s" % o + msg = f"Fail to get md5 value from guest. Output is {o}" test.error(msg) new_md5 = o.splitlines()[-1] if new_md5 != orig_md5: msg = "File changed after transfer host -> VM1. Original md5 value" - msg += " is %s. Current md5 value is %s" % (orig_md5, new_md5) + msg += f" is {orig_md5}. Current md5 value is {new_md5}" test.fail(msg) vm1 = env.get_vm(params["main_vm"]) @@ -65,76 +61,95 @@ def md5_check(session, orig_md5): if count == 0: count = 1 - host_path = os.path.join(tmp_dir, "tmp-%s" % - utils_misc.generate_random_string(8)) + host_path = os.path.join(tmp_dir, f"tmp-{utils_misc.generate_random_string(8)}") cmd = "dd if=/dev/zero of=%s bs=10M count=%d" % (host_path, count) - guest_path = os.path.join(tmp_dir_guest, "file_transfer-%s" % - utils_misc.generate_random_string(8)) + guest_path = os.path.join( + tmp_dir_guest, f"file_transfer-{utils_misc.generate_random_string(8)}" + ) try: - error_context.context("Creating %dMB file on host" % filesize, - test.log.info) + error_context.context("Creating %dMB file on host" % filesize, test.log.info) process.run(cmd) orig_md5 = crypto.hash_file(host_path, algorithm="md5") - error_context.context("Transferring file host -> VM1, timeout: %ss" % - transfer_timeout, test.log.info) + error_context.context( + f"Transferring file host -> VM1, timeout: {transfer_timeout}s", + test.log.info, + ) t_begin = time.time() vm1.copy_files_to(host_path, guest_path, timeout=transfer_timeout) t_end = time.time() throughput = filesize / (t_end - t_begin) - test.log.info("File transfer host -> VM1 succeed, " - "estimated throughput: %.2fMB/s", throughput) + test.log.info( + "File transfer host -> VM1 succeed, " "estimated throughput: %.2fMB/s", + throughput, + ) md5_check(session_vm1, orig_md5) ip_vm1 = vm1.get_address() ip_vm2 = vm2.get_address() for i in range(repeat_time): - log_vm1 = os.path.join( - test.debugdir, "remote_scp_to_vm1_%s.log" % i) - log_vm2 = os.path.join( - test.debugdir, "remote_scp_to_vm2_%s.log" % i) + log_vm1 = os.path.join(test.debugdir, f"remote_scp_to_vm1_{i}.log") + log_vm2 = os.path.join(test.debugdir, f"remote_scp_to_vm2_{i}.log") - msg = "Transferring file VM1 -> VM2, timeout: %ss." % transfer_timeout - msg += " Repeat: %s/%s" % (i + 1, repeat_time) + msg = f"Transferring file VM1 -> VM2, timeout: {transfer_timeout}s." + msg += f" Repeat: {i + 1}/{repeat_time}" error_context.context(msg, test.log.info) t_begin = time.time() - s = remote.scp_between_remotes(src=ip_vm1, dst=ip_vm2, port=port, - s_passwd=password, d_passwd=password, - s_name=username, d_name=username, - s_path=guest_path, d_path=guest_path, - timeout=transfer_timeout, - log_filename=log_vm2) + remote.scp_between_remotes( + src=ip_vm1, + dst=ip_vm2, + port=port, + s_passwd=password, + d_passwd=password, + s_name=username, + d_name=username, + s_path=guest_path, + d_path=guest_path, + timeout=transfer_timeout, + log_filename=log_vm2, + ) t_end = time.time() throughput = filesize / (t_end - t_begin) - test.log.info("File transfer VM1 -> VM2 succeed, " - "estimated throughput: %.2fMB/s", throughput) + test.log.info( + "File transfer VM1 -> VM2 succeed, " "estimated throughput: %.2fMB/s", + throughput, + ) md5_check(session_vm2, orig_md5) - session_vm1.cmd("rm -rf %s" % guest_path) + session_vm1.cmd(f"rm -rf {guest_path}") - msg = "Transferring file VM2 -> VM1, timeout: %ss." % transfer_timeout - msg += " Repeat: %s/%s" % (i + 1, repeat_time) + msg = f"Transferring file VM2 -> VM1, timeout: {transfer_timeout}s." + msg += f" Repeat: {i + 1}/{repeat_time}" error_context.context(msg, test.log.info) t_begin = time.time() - remote.scp_between_remotes(src=ip_vm2, dst=ip_vm1, port=port, - s_passwd=password, d_passwd=password, - s_name=username, d_name=username, - s_path=guest_path, d_path=guest_path, - timeout=transfer_timeout, - log_filename=log_vm1) + remote.scp_between_remotes( + src=ip_vm2, + dst=ip_vm1, + port=port, + s_passwd=password, + d_passwd=password, + s_name=username, + d_name=username, + s_path=guest_path, + d_path=guest_path, + timeout=transfer_timeout, + log_filename=log_vm1, + ) t_end = time.time() throughput = filesize / (t_end - t_begin) - test.log.info("File transfer VM2 -> VM1 succeed, " - "estimated throughput: %.2fMB/s", throughput) + test.log.info( + "File transfer VM2 -> VM1 succeed, " "estimated throughput: %.2fMB/s", + throughput, + ) md5_check(session_vm1, orig_md5) - session_vm2.cmd("%s %s" % (clean_cmd, guest_path)) + session_vm2.cmd(f"{clean_cmd} {guest_path}") finally: try: - session_vm1.cmd("%s %s" % (clean_cmd, guest_path)) + session_vm1.cmd(f"{clean_cmd} {guest_path}") except Exception: pass try: - session_vm2.cmd("%s %s" % (clean_cmd, guest_path)) + session_vm2.cmd(f"{clean_cmd} {guest_path}") except Exception: pass try: diff --git a/qemu/tests/multi_vms_nics.py b/qemu/tests/multi_vms_nics.py index 460b506b21..c95b27eb5a 100644 --- a/qemu/tests/multi_vms_nics.py +++ b/qemu/tests/multi_vms_nics.py @@ -1,14 +1,14 @@ import re -from avocado.utils import process -from avocado.utils import cpu - -from virttest import error_context -from virttest import utils_test -from virttest import remote -from virttest import utils_net -from virttest import utils_misc -from virttest import env_process +from avocado.utils import cpu, process +from virttest import ( + env_process, + error_context, + remote, + utils_misc, + utils_net, + utils_test, +) from virttest.staging import utils_memory @@ -42,40 +42,59 @@ def run(test, params, env): """ def ping(session, nic, dst_ip, strick_check, flood_minutes): - d_packet_size = [1, 4, 48, 512, 1440, 1500, 1505, 4054, 4055, 4096, - 4192, 8878, 9000, 32767, 65507] + d_packet_size = [ + 1, + 4, + 48, + 512, + 1440, + 1500, + 1505, + 4054, + 4055, + 4096, + 4192, + 8878, + 9000, + 32767, + 65507, + ] packet_size = params.get("packet_size", "").split() or d_packet_size for size in packet_size: - error_context.context("Ping with packet size %s" % size, - test.log.info) - status, output = utils_test.ping(dst_ip, 10, interface=nic, - packetsize=size, timeout=30, - session=session) + error_context.context(f"Ping with packet size {size}", test.log.info) + status, output = utils_test.ping( + dst_ip, 10, interface=nic, packetsize=size, timeout=30, session=session + ) if strict_check: ratio = utils_test.get_loss_ratio(output) if ratio != 0: - test.fail("Loss ratio is %s for packet size" - " %s" % (ratio, size)) + test.fail(f"Loss ratio is {ratio} for packet size" f" {size}") else: if status != 0: - test.fail("Ping returns non-zero value %s" % output) + test.fail(f"Ping returns non-zero value {output}") error_context.context("Flood ping test", test.log.info) - utils_test.ping(dst_ip, None, interface=nic, flood=True, - output_func=None, timeout=flood_minutes * 60, - session=session) + utils_test.ping( + dst_ip, + None, + interface=nic, + flood=True, + output_func=None, + timeout=flood_minutes * 60, + session=session, + ) error_context.context("Final ping test", test.log.info) counts = params.get("ping_counts", 100) - status, output = utils_test.ping(dst_ip, counts, interface=nic, - timeout=float(counts) * 1.5, - session=session) + status, output = utils_test.ping( + dst_ip, counts, interface=nic, timeout=float(counts) * 1.5, session=session + ) if strick_check == "yes": ratio = utils_test.get_loss_ratio(output) if ratio != 0: - test.fail("Packet loss ratio is %s after flood" % ratio) + test.fail(f"Packet loss ratio is {ratio} after flood") else: if status != 0: - test.fail("Ping returns non-zero value %s" % output) + test.fail(f"Ping returns non-zero value {output}") def file_transfer(session, src, dst): username = params.get("username", "") @@ -84,32 +103,48 @@ def file_transfer(session, src, dst): dst_path = "/tmp/2" port = int(params["file_transfer_port"]) - cmd = "dd if=/dev/urandom of=%s bs=100M count=1" % src_path + cmd = f"dd if=/dev/urandom of={src_path} bs=100M count=1" cmd = params.get("file_create_cmd", cmd) - error_context.context("Create file by dd command, cmd: %s" % cmd, - test.log.info) + error_context.context(f"Create file by dd command, cmd: {cmd}", test.log.info) session.cmd(cmd) transfer_timeout = int(params.get("transfer_timeout")) - log_filename = "scp-from-%s-to-%s.log" % (src, dst) - error_context.context("Transfer file from %s to %s" % (src, dst), - test.log.info) - remote.scp_between_remotes(src, dst, port, password, password, - username, username, src_path, dst_path, - log_filename=log_filename, - timeout=transfer_timeout) + log_filename = f"scp-from-{src}-to-{dst}.log" + error_context.context(f"Transfer file from {src} to {dst}", test.log.info) + remote.scp_between_remotes( + src, + dst, + port, + password, + password, + username, + username, + src_path, + dst_path, + log_filename=log_filename, + timeout=transfer_timeout, + ) src_path = dst_path dst_path = "/tmp/3" - log_filename = "scp-from-%s-to-%s.log" % (dst, src) - error_context.context("Transfer file from %s to %s" % (dst, src), - test.log.info) - remote.scp_between_remotes(dst, src, port, password, password, - username, username, src_path, dst_path, - log_filename=log_filename, - timeout=transfer_timeout) - error_context.context("Compare original file and transferred file", - test.log.info) + log_filename = f"scp-from-{dst}-to-{src}.log" + error_context.context(f"Transfer file from {dst} to {src}", test.log.info) + remote.scp_between_remotes( + dst, + src, + port, + password, + password, + username, + username, + src_path, + dst_path, + log_filename=log_filename, + timeout=transfer_timeout, + ) + error_context.context( + "Compare original file and transferred file", test.log.info + ) cmd1 = "md5sum /tmp/1" cmd2 = "md5sum /tmp/3" @@ -119,14 +154,14 @@ def file_transfer(session, src, dst): test.error("File changed after transfer") nic_interface_list = [] - check_irqbalance_cmd = params.get("check_irqbalance_cmd", - "systemctl status irqbalance") - stop_irqbalance_cmd = params.get("stop_irqbalance_cmd", - "systemctl stop irqbalance") - start_irqbalance_cmd = params.get("start_irqbalance_cmd", - "systemctl start irqbalance") - status_irqbalance = params.get("status_irqbalance", - "Active: active|running") + check_irqbalance_cmd = params.get( + "check_irqbalance_cmd", "systemctl status irqbalance" + ) + stop_irqbalance_cmd = params.get("stop_irqbalance_cmd", "systemctl stop irqbalance") + start_irqbalance_cmd = params.get( + "start_irqbalance_cmd", "systemctl start irqbalance" + ) + status_irqbalance = params.get("status_irqbalance", "Active: active|running") vms = params["vms"].split() host_mem = utils_memory.memtotal() // (1024 * 1024) host_cpu_count = cpu.total_count() @@ -134,14 +169,15 @@ def file_transfer(session, src, dst): if params.get("vhost"): vhost_count = 1 if host_cpu_count < (1 + vhost_count) * len(vms): - test.error("The host don't have enough cpus to start guest" - "pcus: %d, minimum of vcpus and vhost: %d" % - (host_cpu_count, (1 + vhost_count) * len(vms))) - params['mem'] = host_mem // len(vms) * 1024 - params['smp'] = params['vcpu_maxcpus'] = \ - host_cpu_count // len(vms) - vhost_count - if params['smp'] % 2 != 0: - params['vcpu_sockets'] = 1 + test.error( + "The host don't have enough cpus to start guest" + "pcus: %d, minimum of vcpus and vhost: %d" + % (host_cpu_count, (1 + vhost_count) * len(vms)) + ) + params["mem"] = host_mem // len(vms) * 1024 + params["smp"] = params["vcpu_maxcpus"] = host_cpu_count // len(vms) - vhost_count + if params["smp"] % 2 != 0: + params["vcpu_sockets"] = 1 params["start_vm"] = "yes" for vm_name in vms: env_process.preprocess_vm(test, params, env, vm_name) @@ -151,15 +187,17 @@ def file_transfer(session, src, dst): host_ip = params.get("srchost", host_ip) flood_minutes = float(params["flood_minutes"]) error_context.context("Check irqbalance service status", test.log.info) - o = process.system_output(check_irqbalance_cmd, ignore_status=True, - shell=True).decode() + o = process.system_output( + check_irqbalance_cmd, ignore_status=True, shell=True + ).decode() check_stop_irqbalance = False if re.findall(status_irqbalance, o): test.log.debug("stop irqbalance") process.run(stop_irqbalance_cmd, shell=True) check_stop_irqbalance = True - o = process.system_output(check_irqbalance_cmd, ignore_status=True, - shell=True).decode() + o = process.system_output( + check_irqbalance_cmd, ignore_status=True, shell=True + ).decode() if re.findall(status_irqbalance, o): test.error("Can not stop irqbalance") thread_list = [] @@ -171,14 +209,13 @@ def file_transfer(session, src, dst): session = vm.wait_for_login(timeout=timeout) thread_list.extend(vm.vcpu_threads) thread_list.extend(vm.vhost_threads) - error_context.context("Check all the nics available or not", - test.log.info) + error_context.context("Check all the nics available or not", test.log.info) for index, nic in enumerate(vm.virtnet): guest_ifname = utils_net.get_linux_ifname(session, nic.mac) guest_ip = vm.get_address(index) if not (guest_ifname and guest_ip): - err_log = "vms %s get ip or ifname failed." % vm_name - err_log = "ifname: %s, ip: %s." % (guest_ifname, guest_ip) + err_log = f"vms {vm_name} get ip or ifname failed." + err_log = f"ifname: {guest_ifname}, ip: {guest_ip}." test.fail(err_log) nic_interface = [guest_ifname, guest_ip, session] nic_interface_list.append(nic_interface) @@ -191,8 +228,11 @@ def file_transfer(session, src, dst): if vthread_num >= len(thread_list): break vcpu_tid = thread_list[vthread_num] - test.log.debug("pin vcpu/vhost thread(%s) to cpu(%s)", - vcpu_tid, numa_node.pin_cpu(vcpu_tid)) + test.log.debug( + "pin vcpu/vhost thread(%s) to cpu(%s)", + vcpu_tid, + numa_node.pin_cpu(vcpu_tid), + ) vthread_num += 1 nic_interface_list_len = len(nic_interface_list) @@ -200,20 +240,19 @@ def file_transfer(session, src, dst): for src_ip_index in range(nic_interface_list_len): error_context.context("Ping test from guest to host", test.log.info) src_ip_info = nic_interface_list[src_ip_index] - ping(src_ip_info[2], src_ip_info[0], host_ip, strict_check, - flood_minutes) - error_context.context("File transfer test between guest and host", - test.log.info) + ping(src_ip_info[2], src_ip_info[0], host_ip, strict_check, flood_minutes) + error_context.context( + "File transfer test between guest and host", test.log.info + ) file_transfer(src_ip_info[2], src_ip_info[1], host_ip) for dst_ip in nic_interface_list[src_ip_index:]: if src_ip_info[1] == dst_ip[1]: continue - txt = "Ping test between %s and %s" % (src_ip_info[1], dst_ip[1]) + txt = f"Ping test between {src_ip_info[1]} and {dst_ip[1]}" error_context.context(txt, test.log.info) - ping(src_ip_info[2], src_ip_info[0], dst_ip[1], strict_check, - flood_minutes) - txt = "File transfer test between %s " % src_ip_info[1] - txt += "and %s" % dst_ip[1] + ping(src_ip_info[2], src_ip_info[0], dst_ip[1], strict_check, flood_minutes) + txt = f"File transfer test between {src_ip_info[1]} " + txt += f"and {dst_ip[1]}" error_context.context(txt, test.log.info) file_transfer(src_ip_info[2], src_ip_info[1], dst_ip[1]) if check_stop_irqbalance: diff --git a/qemu/tests/multi_vms_with_stress.py b/qemu/tests/multi_vms_with_stress.py index d9925b9ad3..db67147867 100644 --- a/qemu/tests/multi_vms_with_stress.py +++ b/qemu/tests/multi_vms_with_stress.py @@ -2,14 +2,8 @@ import re import shutil -from avocado.utils import cpu -from avocado.utils import process - -from virttest import data_dir -from virttest import env_process -from virttest import error_context -from virttest import qemu_storage -from virttest import utils_misc +from avocado.utils import cpu, process +from virttest import data_dir, env_process, error_context, qemu_storage, utils_misc @error_context.context_aware @@ -26,50 +20,54 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def unpack_stress_pkg(): """Unpack the stress package.""" - process.system("tar -xzvf %s -C %s" % (archive_path, stress_inst_dir)) + process.system(f"tar -xzvf {archive_path} -C {stress_inst_dir}") def install_stress_pkg(): """Install the stress package.""" cmd_configure = "cd {0} && ./configure --prefix={0}".format( - os.path.join(stress_inst_dir, params['stress_ver'])) + os.path.join(stress_inst_dir, params["stress_ver"]) + ) cmd_make = "make && make install" process.system(" && ".join((cmd_configure, cmd_make)), shell=True) def run_stress_background(): """Run stress in background.""" - process.system( - params['stress_cmd'], shell=True, ignore_bg_processes=True) + process.system(params["stress_cmd"], shell=True, ignore_bg_processes=True) def is_stress_alive(): """Whether the stress process is alive.""" - cmd = 'pgrep -xl stress' - if not utils_misc.wait_for(lambda: re.search( - r'\d+\s+stress', process.system_output( - cmd, ignore_status=True).decode()), 10): + cmd = "pgrep -xl stress" + if not utils_misc.wait_for( + lambda: re.search( + r"\d+\s+stress", process.system_output(cmd, ignore_status=True).decode() + ), + 10, + ): test.error("The stress process is not alive.") def copy_base_vm_image(): """Copy the base vm image for VMs.""" src_img = qemu_storage.QemuImg( - params, data_dir.get_data_dir(), params['images']) + params, data_dir.get_data_dir(), params["images"] + ) src_filename = src_img.image_filename src_format = src_img.image_format dst_dir = os.path.dirname(src_filename) for vm_name in vms_list: - dst_filename = os.path.join( - dst_dir, '%s.%s' % (vm_name, src_format)) - test.log.info('Copying %s to %s.', src_filename, dst_filename) + dst_filename = os.path.join(dst_dir, f"{vm_name}.{src_format}") + test.log.info("Copying %s to %s.", src_filename, dst_filename) shutil.copy(src_filename, dst_filename) def configure_images_copied(): """Configure the images copied for VMs.""" for vm_name in vms_list: - params['images_%s' % vm_name] = vm_name - image_name = 'image_name_{0}_{0}'.format(vm_name) - params[image_name] = 'images/%s' % vm_name - params['remove_image_%s' % vm_name] = 'yes' + params[f"images_{vm_name}"] = vm_name + image_name = f"image_name_{vm_name}_{vm_name}" + params[image_name] = f"images/{vm_name}" + params[f"remove_image_{vm_name}"] = "yes" def wait_for_login_all_vms(): """Wait all VMs to login.""" @@ -78,36 +76,36 @@ def wait_for_login_all_vms(): def wait_for_shutdown_all_vms(vms, sessions): """Wait all VMs to shutdown.""" for vm, session in zip(vms, sessions): - test.log.info('Shutting down %s.', vm.name) + test.log.info("Shutting down %s.", vm.name) session.sendline(params["shutdown_command"]) if not vm.wait_for_shutdown(): - test.fail("Failed to shutdown %s." % vm.name) + test.fail(f"Failed to shutdown {vm.name}.") - vms_list = params['vms'].split()[1:] + vms_list = params["vms"].split()[1:] copy_base_vm_image() configure_images_copied() - stress_inst_dir = params['stress_inst_dir'] - stress_deps_dir = data_dir.get_deps_dir('stress') - archive_path = os.path.join(stress_deps_dir, params['stress_pkg_name']) + stress_inst_dir = params["stress_inst_dir"] + stress_deps_dir = data_dir.get_deps_dir("stress") + archive_path = os.path.join(stress_deps_dir, params["stress_pkg_name"]) unpack_stress_pkg() install_stress_pkg() run_stress_background() is_stress_alive() - if params.get_boolean('set_maxcpus'): - num_vms = int(len(params.objects('vms'))) + if params.get_boolean("set_maxcpus"): + num_vms = int(len(params.objects("vms"))) online_cpu = cpu.online_count() * 2 // num_vms if (online_cpu % 2) != 0: online_cpu += 1 - params['smp'] = online_cpu - params['vcpu_maxcpus'] = params['smp'] + params["smp"] = online_cpu + params["vcpu_maxcpus"] = params["smp"] - params['start_vm'] = 'yes' - env_process.process(test, params, env, - env_process.preprocess_image, - env_process.preprocess_vm) + params["start_vm"] = "yes" + env_process.process( + test, params, env, env_process.preprocess_image, env_process.preprocess_vm + ) vms = env.get_all_vms() for vm in vms: vm.verify_alive() diff --git a/qemu/tests/nbd_long_export_name.py b/qemu/tests/nbd_long_export_name.py index 6d1825d377..979d8fbea8 100644 --- a/qemu/tests/nbd_long_export_name.py +++ b/qemu/tests/nbd_long_export_name.py @@ -1,9 +1,7 @@ import socket from avocado.utils import process - -from virttest import qemu_storage -from virttest import error_context +from virttest import error_context, qemu_storage from provider.nbd_image_export import QemuNBDExportImage @@ -24,46 +22,52 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _make_export_name(length): - return process.run( - params['create_export_name_cmd'].format(length=length), - ignore_status=True, - shell=True - ).stdout.decode().strip() + return ( + process.run( + params["create_export_name_cmd"].format(length=length), + ignore_status=True, + shell=True, + ) + .stdout.decode() + .strip() + ) tag = params["images"].split()[0] - params['nbd_export_name'] = _make_export_name( - params['max_export_name_len']) + params["nbd_export_name"] = _make_export_name(params["max_export_name_len"]) nbd_export = QemuNBDExportImage(params, tag) nbd_export.export_image() - nbd_image_tag = params['nbd_image_tag'] + nbd_image_tag = params["nbd_image_tag"] nbd_image_params = params.object_params(nbd_image_tag) localhost = socket.gethostname() - nbd_image_params['nbd_server'] = localhost if localhost else 'localhost' + nbd_image_params["nbd_server"] = localhost if localhost else "localhost" qemu_img = qemu_storage.QemuImg(nbd_image_params, None, nbd_image_tag) try: # Access image with the export name, just make sure # qemu-img info can access image successfully out = qemu_img.info() - if 'file format: raw' not in out: - test.fail('Failed to access image, output(%s)' % out) + if "file format: raw" not in out: + test.fail(f"Failed to access image, output({out})") # Access image with wrong export names - for length in params['access_export_name_lens'].split(): - nbd_image_params['nbd_export_name'] = _make_export_name(length) - qemu_img = qemu_storage.QemuImg(nbd_image_params, - None, nbd_image_tag) + for length in params["access_export_name_lens"].split(): + nbd_image_params["nbd_export_name"] = _make_export_name(length) + qemu_img = qemu_storage.QemuImg(nbd_image_params, None, nbd_image_tag) try: out = qemu_img.info() except process.CmdError as e: - if params['errmsg_check_%s' % length] not in str(e): - test.fail('Failed to get export name(%s) from output(%s)' - % (qemu_img.params['nbd_export_name'], out)) + if params[f"errmsg_check_{length}"] not in str(e): + test.fail( + "Failed to get export name({}) from output({})".format( + qemu_img.params["nbd_export_name"], out + ) + ) else: - test.fail('qemu-img should fail due to wrong export name') + test.fail("qemu-img should fail due to wrong export name") finally: nbd_export.stop_export() diff --git a/qemu/tests/nbd_map_snapshots.py b/qemu/tests/nbd_map_snapshots.py index 7d30557406..5e36b0b21d 100644 --- a/qemu/tests/nbd_map_snapshots.py +++ b/qemu/tests/nbd_map_snapshots.py @@ -1,9 +1,7 @@ import socket from avocado.utils import process - -from virttest import data_dir -from virttest import qemu_storage +from virttest import data_dir, qemu_storage from virttest.qemu_io import QemuIOSystem from virttest.qemu_storage import QemuImg @@ -23,13 +21,13 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def _qemu_io(img, cmd): """Run qemu-io cmd to a given img.""" try: QemuIOSystem(test, params, img.image_filename).cmd_output(cmd, 120) except process.CmdError as err: - test.fail( - "qemu-io to '%s' failed: %s." % (img.image_filename, str(err))) + test.fail(f"qemu-io to '{img.image_filename}' failed: {str(err)}.") images = params["image_chain"].split() base_img = images[0] @@ -51,10 +49,10 @@ def _qemu_io(img, cmd): nbd_export = QemuNBDExportImage(params, top_img) nbd_export.export_image() - nbd_image_tag = params['nbd_image_tag'] + nbd_image_tag = params["nbd_image_tag"] nbd_image_params = params.object_params(nbd_image_tag) localhost = socket.gethostname() - nbd_image_params['nbd_server'] = localhost if localhost else 'localhost' + nbd_image_params["nbd_server"] = localhost if localhost else "localhost" qemu_img = qemu_storage.QemuImg(nbd_image_params, None, nbd_image_tag) nbd_image = qemu_img.image_filename map_cmd = params["map_cmd"] @@ -62,13 +60,14 @@ def _qemu_io(img, cmd): test.log.info("Dump the info of '%s'", nbd_image) try: - result = process.run(map_cmd + " " + nbd_image, ignore_status=True, - shell=True) + result = process.run(map_cmd + " " + nbd_image, ignore_status=True, shell=True) if result.exit_status != 0: - test.fail('Failed to execute the map command, error message: %s' - % result.stderr.decode()) + test.fail( + f"Failed to execute the map command, error message: {result.stderr.decode()}" + ) elif check_msg not in result.stdout.decode().strip(): - test.fail("Message '%s' mismatched with '%s'" % ( - check_msg, result.stdout.decode())) + test.fail( + f"Message '{check_msg}' mismatched with '{result.stdout.decode()}'" + ) finally: nbd_export.stop_export() diff --git a/qemu/tests/nbd_unix_connection.py b/qemu/tests/nbd_unix_connection.py index de9ecee9a8..471a38e33c 100644 --- a/qemu/tests/nbd_unix_connection.py +++ b/qemu/tests/nbd_unix_connection.py @@ -18,8 +18,10 @@ def run(test, params, env): def run_nbd_connect_cmd(cmd): result = process.run(cmd, timeout=5, ignore_status=True, shell=True) if result.exit_status != -15: - test.fail('Failed to connect nbd by unix socket,' - 'command error: %s' % result.stderr.decode()) + test.fail( + "Failed to connect nbd by unix socket," + f"command error: {result.stderr.decode()}" + ) # local image to be exported nbd_export = QemuNBDExportImage(params, params["local_image_tag"]) @@ -33,5 +35,5 @@ def run_nbd_connect_cmd(cmd): for iteration in range(5): run_nbd_connect_cmd(nbd_connect_cmd) finally: - test.log.info('Stop export test image.') + test.log.info("Stop export test image.") nbd_export.stop_export() diff --git a/qemu/tests/negative_create.py b/qemu/tests/negative_create.py index d56a8d164b..dbca7faa48 100644 --- a/qemu/tests/negative_create.py +++ b/qemu/tests/negative_create.py @@ -1,12 +1,9 @@ import re -from virttest import virt_vm -from virttest import utils_net -from virttest import env_process +from virttest import env_process, utils_net, virt_vm class VMCreateSuccess(Exception): - def __str__(self): return "VM succeeded to create. This was not expected" @@ -29,8 +26,7 @@ def run(test, params, env): env_process.preprocess_vm(test, params, env, params["main_vm"]) except (virt_vm.VMError, utils_net.NetError) as err: message = str(err) - test.log.debug("VM Failed to create. This was expected. Reason:\n%s", - message) + test.log.debug("VM Failed to create. This was expected. Reason:\n%s", message) error_msg = params.get("error_msg") if error_msg and not re.search(error_msg, message, re.M | re.I): diff --git a/qemu/tests/nested_block_resize.py b/qemu/tests/nested_block_resize.py index 673b6cea63..e28d407b3a 100644 --- a/qemu/tests/nested_block_resize.py +++ b/qemu/tests/nested_block_resize.py @@ -3,11 +3,9 @@ import time from avocado.utils import process +from virttest import data_dir, error_context, storage -from virttest import error_context -from provider import message_queuing, ansible -from virttest import storage -from virttest import data_dir +from provider import ansible, message_queuing # This decorator makes the test function aware of context strings @@ -43,9 +41,10 @@ def _on_resize(obj, msg): test.log.info("Receive resize msg:%s", msg) data_image_params = params.object_params("stg0") data_image_size = params.get_numeric("new_image_size_stg0") - data_image_filename = storage.get_image_filename(data_image_params, - data_dir.get_data_dir()) - data_image_dev = vm.get_block({'file': data_image_filename}) + data_image_filename = storage.get_image_filename( + data_image_params, data_dir.get_data_dir() + ) + data_image_dev = vm.get_block({"file": data_image_filename}) args = (None, data_image_size, data_image_dev) vm.monitor.block_resize(*args) @@ -81,8 +80,9 @@ def _on_status(obj, msg): ansible_extra_vars = params.get("ansible_extra_vars", "{}") playbook_repo = params["playbook_repo"] playbook_timeout = params.get_numeric("playbook_timeout") - playbook_dir = params.get("playbook_dir", - os.path.join(test.workdir, "ansible_playbook")) + playbook_dir = params.get( + "playbook_dir", os.path.join(test.workdir, "ansible_playbook") + ) toplevel_playbook = os.path.join(playbook_dir, params["toplevel_playbook"]) # Use this directory to copy some logs back from the guest test_harness_log_dir = test.logdir @@ -91,16 +91,15 @@ def _on_status(obj, msg): guest_ip_list = [vm.get_address()] test.log.info("Cloning %s", playbook_repo) - process.run("git clone {src} {dst}".format(src=playbook_repo, - dst=playbook_dir), - verbose=False) - - error_context.base_context("Generate playbook related options.", - test.log.info) - extra_vars = {"ansible_ssh_extra_args": ansible_ssh_extra_args, - "ansible_ssh_pass": guest_passwd, - "mq_port": mq_listen_port, - "test_harness_log_dir": test_harness_log_dir} + process.run(f"git clone {playbook_repo} {playbook_dir}", verbose=False) + + error_context.base_context("Generate playbook related options.", test.log.info) + extra_vars = { + "ansible_ssh_extra_args": ansible_ssh_extra_args, + "ansible_ssh_pass": guest_passwd, + "mq_port": mq_listen_port, + "test_harness_log_dir": test_harness_log_dir, + } extra_vars.update(json.loads(ansible_extra_vars)) error_context.context("Execute the ansible playbook.", test.log.info) @@ -110,7 +109,7 @@ def _on_status(obj, msg): remote_user=guest_user, extra_vars=json.dumps(extra_vars), callback_plugin=ansible_callback_plugin, - addl_opts=ansible_addl_opts + addl_opts=ansible_addl_opts, ) # Handle cases @@ -118,12 +117,11 @@ def _on_status(obj, msg): mq_port = params.get("mq_port", 5000) wait_response_timeout = params.get_numeric("wait_response_timeout", 1800) - mq_publisher = message_queuing.MQPublisher(mq_port, - other_options="--broker") + mq_publisher = message_queuing.MQPublisher(mq_port, other_options="--broker") host = "127.0.0.1" - test.log.info("host:{} port:{}".format(host, mq_port)) + test.log.info("host:%s port:%s", host, mq_port) client = message_queuing.MQClient(host, mq_port) time.sleep(2) @@ -144,11 +142,11 @@ def _on_status(obj, msg): if playbook_executor.get_status() != 0: test.fail( "Ansible playbook execution failed, please check the " - "{} for details.".format(ansible_log)) + f"{ansible_log} for details." + ) test.log.info("Ansible playbook execution passed.") finally: - playbook_executor.store_playbook_log(test_harness_log_dir, - ansible_log) + playbook_executor.store_playbook_log(test_harness_log_dir, ansible_log) playbook_executor.close() client.close() mq_publisher.close() diff --git a/qemu/tests/nested_block_resize_l1.py b/qemu/tests/nested_block_resize_l1.py index f8723035d7..24e0d6fa06 100644 --- a/qemu/tests/nested_block_resize_l1.py +++ b/qemu/tests/nested_block_resize_l1.py @@ -1,9 +1,10 @@ -import time import re +import time from avocado.utils import process -from virttest import error_context, env_process +from virttest import env_process, error_context from virttest.utils_misc import get_linux_drive_path + from provider import message_queuing @@ -32,20 +33,20 @@ def _on_status(obj, msg): test.log.info("Receive status msg:%s", msg) vm_status = dict(vm.monitor.get_status()) - test.log.info(str(vm_status['status'])) - obj.send_message("status-rsp:" + vm_status['status']) + test.log.info(str(vm_status["status"])) + obj.send_message("status-rsp:" + vm_status["status"]) test.log.info("Finish handle on_status") def _get_host_drive_path(did): """ Get drive path in host by drive serial or wwn """ - cmd = 'for dev_path in `ls -d /sys/block/*`; do ' - cmd += 'echo `udevadm info -q property -p $dev_path`; done' + cmd = "for dev_path in `ls -d /sys/block/*`; do " + cmd += "echo `udevadm info -q property -p $dev_path`; done" status, output = process.getstatusoutput(cmd) if status != 0: return "" - p = r"DEVNAME=([^\s]+)\s.*(?:ID_SERIAL|ID_SERIAL_SHORT|ID_WWN)=%s" % did + p = rf"DEVNAME=([^\s]+)\s.*(?:ID_SERIAL|ID_SERIAL_SHORT|ID_WWN)={did}" dev = re.search(p, output, re.M) if dev: return dev.groups()[0] @@ -59,7 +60,7 @@ def _get_host_drive_path(did): test.fail("Can not find expected disk") params["image_name_stg"] = pass_path - params["start_vm"] = 'yes' + params["start_vm"] = "yes" test.log.info(pass_path) error_context.context("Boot the main VM", test.log.info) @@ -72,12 +73,11 @@ def _get_host_drive_path(did): host = params.get("mq_publisher") mq_port = params.get("mq_port", 5000) - test.log.info("host:{} port:{}".format(host, mq_port)) + test.log.info("host:%s port:%s", host, mq_port) client = message_queuing.MQClient(host, mq_port) time.sleep(2) cmd_dd = params["cmd_dd"] % guest_path - error_context.context('Do dd writing test on the data disk.', - test.log.info) + error_context.context("Do dd writing test on the data disk.", test.log.info) session.sendline(cmd_dd) time.sleep(2) diff --git a/qemu/tests/nested_hyperv_on_kvm.py b/qemu/tests/nested_hyperv_on_kvm.py index 811cb79685..4f0712daf2 100644 --- a/qemu/tests/nested_hyperv_on_kvm.py +++ b/qemu/tests/nested_hyperv_on_kvm.py @@ -1,10 +1,8 @@ -import time import os +import time -from avocado.utils import download -from avocado.utils import process -from virttest import data_dir -from virttest import error_context +from avocado.utils import download, process +from virttest import data_dir, error_context @error_context.context_aware @@ -25,10 +23,12 @@ def get_vhdx(): image_dir = params.get("images_base_dir", data_dir.get_data_dir()) md5value = params.get("md5value") vhdx_dest = params.get("vhdx_dest") - test.log.info("Parameters: %s %s %s %s" % (download_url, image_dir, md5value, vhdx_dest)) + test.log.info( + "Parameters: %s %s %s %s", download_url, image_dir, md5value, vhdx_dest + ) image_name = os.path.basename(download_url) image_path = os.path.join(image_dir, image_name) - vhdx_name = image_name.replace('qcow2', 'vhdx') + vhdx_name = image_name.replace("qcow2", "vhdx") vhdx_path = os.path.join(image_dir, vhdx_name) download.get_file(download_url, image_path, hash_expected=md5value) @@ -38,7 +38,7 @@ def get_vhdx(): status, output = process.getstatusoutput(cmd_covert, timeout) if status != 0: - test.error("qemu-img convert failed, status: %s, output: %s" % (status, output)) + test.error(f"qemu-img convert failed, status: {status}, output: {output}") vm.copy_files_to(vhdx_path, vhdx_dest, timeout=300) vm = env.get_vm(params["main_vm"]) @@ -50,8 +50,12 @@ def get_vhdx(): get_vhdx() need_reboot = 0 - status = session.cmd_status("powershell Get-VM \ - -ErrorAction SilentlyContinue", timeout=120, safe=True) + status = session.cmd_status( + "powershell Get-VM \ + -ErrorAction SilentlyContinue", + timeout=120, + safe=True, + ) if status: need_reboot = 1 @@ -60,7 +64,7 @@ def get_vhdx(): test.log.info("Hyper-V powershell module has been installed") nested_dest = params.get("nested_dest") - path_cmd = r"powershell Remove-Item %s -recurse -force -ErrorAction SilentlyContinue" % nested_dest + path_cmd = rf"powershell Remove-Item {nested_dest} -recurse -force -ErrorAction SilentlyContinue" try: session.cmd(path_cmd) @@ -77,19 +81,25 @@ def get_vhdx(): # set RemoteSigned policy mainly for windows 10/11, it is default for windows server session.cmd("powershell Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Force") # powershell C:\nested-hyperv-on-kvm\hyperv_env.ps1 - status, output = session.cmd_status_output(r"powershell %s\hyperv_env.ps1" % nested_dest, timeout=1200) + status, output = session.cmd_status_output( + rf"powershell {nested_dest}\hyperv_env.ps1", timeout=1200 + ) if status != 0: test.error("Setup Hyper-v enviroment error: %s", output) else: test.log.info("Setup Hyper-v enviroment pass: %s", output) if need_reboot: - test.log.info("VM will reboot to make Hyper-V powershell module installation work") + test.log.info( + "VM will reboot to make Hyper-V powershell module installation work" + ) session = vm.reboot(session, timeout=360) time.sleep(5) # powershell C:\nested-hyperv-on-kvm\hyperv_run.ps1 - status, output = session.cmd_status_output(r"powershell %s\hyperv_run.ps1" % nested_dest, timeout=1800) + status, output = session.cmd_status_output( + rf"powershell {nested_dest}\hyperv_run.ps1", timeout=1800 + ) if status != 0: test.fail("Test failed, script output is: %s", output) else: diff --git a/qemu/tests/nested_interactive_agent.py b/qemu/tests/nested_interactive_agent.py index d4db5535bb..b4c93a85ab 100644 --- a/qemu/tests/nested_interactive_agent.py +++ b/qemu/tests/nested_interactive_agent.py @@ -22,12 +22,12 @@ def run(test, params, env): :param env: Dictionary with test environment. """ - run_stress = params.get_boolean('run_stress') - mq_publisher = params['mq_publisher'] - mq_port = params.get('mq_port') - wait_response_timeout = params.get_numeric('wait_response_timeout', 600) + run_stress = params.get_boolean("run_stress") + mq_publisher = params["mq_publisher"] + mq_port = params.get("mq_port") + wait_response_timeout = params.get_numeric("wait_response_timeout", 600) - vm = env.get_vm(params['main_vm']) + vm = env.get_vm(params["main_vm"]) vm.verify_alive() vm.wait_for_login() stress_tool = None @@ -39,19 +39,23 @@ def run(test, params, env): mq_subscriber = message_queuing.MQSubscriber(mq_publisher, mq_port) try: - error_context.context('Receive the "APPROVE" message from MQ publisher ' - 'to continue the test.', test.log.info) + error_context.context( + 'Receive the "APPROVE" message from MQ publisher ' "to continue the test.", + test.log.info, + ) try: event = mq_subscriber.receive_event(wait_response_timeout) if event == "NOTIFY": test.log.warning('Got "NOTIFY" message to finish test') return - elif event != 'APPROVE': - test.fail('Got unwanted message from MQ publisher.') + elif event != "APPROVE": + test.fail("Got unwanted message from MQ publisher.") except message_queuing.UnknownEventError as err: test.log.error(err) - test.error('The MQ publisher did not enter the "APPROVE" message ' - 'within the expected time.') + test.error( + 'The MQ publisher did not enter the "APPROVE" message ' + "within the expected time." + ) test.log.info('Already captured the "APPROVE" message.') if not stress_tool: diff --git a/qemu/tests/nested_libguestfs_unittest.py b/qemu/tests/nested_libguestfs_unittest.py index 52a872f58a..36ba7f8643 100644 --- a/qemu/tests/nested_libguestfs_unittest.py +++ b/qemu/tests/nested_libguestfs_unittest.py @@ -1,11 +1,8 @@ -import re import os +import re from avocado.utils import cpu - -from virttest import arch -from virttest import error_context -from virttest import utils_package +from virttest import arch, error_context, utils_package @error_context.context_aware @@ -22,10 +19,9 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ - kvm_module = arch.get_kvm_module_list()[-1].replace('-', '_') + kvm_module = arch.get_kvm_module_list()[-1].replace("-", "_") is_kvm_mode = params["nested_flag"] == "nested_flag_on" - nested_file = os.path.join("/sys/module/", kvm_module, - "parameters/nested") + nested_file = os.path.join("/sys/module/", kvm_module, "parameters/nested") unittest_timeout = params.get_numeric("unittest_timeout") cpu_vendor = cpu.get_vendor() @@ -42,47 +38,56 @@ def run(test, params, env): vm.verify_alive() session = vm.wait_for_login() - error_context.context("Check if libguestfs-tools is installed.", - test.log.info) + error_context.context("Check if libguestfs-tools is installed.", test.log.info) sm = utils_package.RemotePackageMgr(session, "libguestfs-tools") if not (sm.is_installed("libguestfs-tools") or sm.install()): test.cancel("Unable to install libguestfs-tools inside guest.") try: - error_context.context("Execute the libguestfs-test-tool unittest " - "directly launching qemu.", test.log.info) + error_context.context( + "Execute the libguestfs-test-tool unittest " "directly launching qemu.", + test.log.info, + ) stderr_file = "/tmp/lgf_stderr" - lgf_cmd = ("LIBGUESTFS_BACKEND=direct libguestfs-test-tool " - "--timeout {} 2> {}".format(unittest_timeout, - stderr_file)) - lgf_s, lgf_o = session.cmd_status_output(lgf_cmd, - timeout=unittest_timeout) + lgf_cmd = ( + "LIBGUESTFS_BACKEND=direct libguestfs-test-tool " + f"--timeout {unittest_timeout} 2> {stderr_file}" + ) + lgf_s, lgf_o = session.cmd_status_output(lgf_cmd, timeout=unittest_timeout) test.log.debug("libguestfs-test-tool stdout:\n%s", lgf_o) lgf_stderr = session.cmd_output("cat " + stderr_file) lgf_tcg = re.search("Back to tcg accelerator", lgf_stderr) - error_context.context("Analyze the libguestfs-test-tool test result.", - test.log.info) - fail_msg = ("the exit status is non-zero" if lgf_s else - "back to tcg accelerator" if lgf_tcg and is_kvm_mode else "") + error_context.context( + "Analyze the libguestfs-test-tool test result.", test.log.info + ) + fail_msg = ( + "the exit status is non-zero" + if lgf_s + else "back to tcg accelerator" + if lgf_tcg and is_kvm_mode + else "" + ) if fail_msg: test.log.debug("libguestfs-test-tool stderr:\n%s", lgf_stderr) - test.fail("libguestfs-test-tool execution failed due to: %s. " - % fail_msg) + test.fail(f"libguestfs-test-tool execution failed due to: {fail_msg}. ") if cpu_arch != "s390": error_context.context("Check the nested file status.", test.log.info) file_s, file_o = session.cmd_status_output("cat " + nested_file) if re.match(r"[1Y]", file_o) and is_kvm_mode: - test.log.info("Guest runs with nested flag, the nested feature has " - "been enabled.") + test.log.info( + "Guest runs with nested flag, the nested feature has " + "been enabled." + ) elif file_s == 1 and not is_kvm_mode: - test.log.info("Guest runs without nested flag, so the nested file " - "does not exist.") + test.log.info( + "Guest runs without nested flag, so the nested file " + "does not exist." + ) else: test.log.error("Nested file status: %s, output: %s", file_s, file_o) - test.fail("Getting the status of nested file has unexpected " - "result.") + test.fail("Getting the status of nested file has unexpected " "result.") finally: session.cmd("rm -f " + stderr_file, ignore_all_errors=True) session.close() diff --git a/qemu/tests/nested_system_reset.py b/qemu/tests/nested_system_reset.py index a865ebd1a9..0f42bbfee3 100644 --- a/qemu/tests/nested_system_reset.py +++ b/qemu/tests/nested_system_reset.py @@ -1,5 +1,4 @@ -from virttest import error_context -from virttest import utils_package +from virttest import error_context, utils_package @error_context.context_aware diff --git a/qemu/tests/nested_test.py b/qemu/tests/nested_test.py index c840ed6cc1..38e7b02247 100644 --- a/qemu/tests/nested_test.py +++ b/qemu/tests/nested_test.py @@ -1,13 +1,11 @@ -import os import json +import os -from avocado.utils import cpu -from avocado.utils import process +from avocado.utils import cpu, process from avocado.utils.software_manager import manager - -from virttest import error_context -from virttest import data_dir as virttest_data_dir from virttest import cpu as virttest_cpu +from virttest import data_dir as virttest_data_dir +from virttest import error_context @error_context.context_aware @@ -74,14 +72,13 @@ def generate_parameter_file(params): if variant_name == "check_cpu_model_l2": host_cpu_models = virttest_cpu.get_host_cpu_models() - case_name = ','.join(["%s.%s" % (case_name, i) - for i in host_cpu_models]) + case_name = ",".join([f"{case_name}.{i}" for i in host_cpu_models]) - kar_cmd += " --%s=%s " % (test_type, case_name) + kar_cmd += f" --{test_type}={case_name} " l2_guest_name = params.get("l2_guest_name") if l2_guest_name: - kar_cmd += " --guestname=%s" % l2_guest_name + kar_cmd += f" --guestname={l2_guest_name}" clone = params.get("install_node") if clone == "yes": kar_cmd += " --clone=yes" @@ -90,7 +87,7 @@ def generate_parameter_file(params): l2_kar_options = params.get("l2_kar_options") if l2_kar_options: - kar_cmd += " %s" % l2_kar_options + kar_cmd += f" {l2_kar_options}" test.log.info("Kar cmd: %s", kar_cmd) @@ -100,14 +97,16 @@ def generate_parameter_file(params): kar_repo = params.get("kar_repo") cert_url = params.get("cert_url") - data = {"guest_password": guest_password, - "bootstrap_options": bootstrap_options, - "accept_cancel": accept_cancel, - "command_line": kar_cmd, - "setup_br_sh": setup_bridge_sh, - "host_log_files_dir": results_dir, - "kar_repo": kar_repo, - "cert_url": cert_url} + data = { + "guest_password": guest_password, + "bootstrap_options": bootstrap_options, + "accept_cancel": accept_cancel, + "command_line": kar_cmd, + "setup_br_sh": setup_bridge_sh, + "host_log_files_dir": results_dir, + "kar_repo": kar_repo, + "cert_url": cert_url, + } json_file = open(os.path.join(tmp_dir, file_name), "w") json.dump(data, json_file) @@ -115,8 +114,7 @@ def generate_parameter_file(params): return json_file.name - if (params.get('check_vendor', 'no') == 'yes' and - cpu.get_vendor() != 'intel'): + if params.get("check_vendor", "no") == "yes" and cpu.get_vendor() != "intel": test.cancel("We only test this case with Intel platform now") sm = manager.SoftwareManager() @@ -132,14 +130,15 @@ def generate_parameter_file(params): params_file = generate_parameter_file(params) - ansible_cmd = "export ANSIBLE_SSH_ARGS=\"-C -o ControlMaster=auto " \ - "-o ControlPersist=60s " \ - "-o StrictHostKeyChecking=no " \ - "-o UserKnownHostsFile=/dev/null\"; " \ - "ansible-playbook %s " \ - "--extra-vars \"@%s\" " \ - "-i %s " \ - % (playbook_file, params_file, invent_file) + ansible_cmd = ( + 'export ANSIBLE_SSH_ARGS="-C -o ControlMaster=auto ' + "-o ControlPersist=60s " + "-o StrictHostKeyChecking=no " + '-o UserKnownHostsFile=/dev/null"; ' + f"ansible-playbook {playbook_file} " + f'--extra-vars "@{params_file}" ' + f"-i {invent_file} " + ) test.log.debug("ansible cmd: %s", ansible_cmd) @@ -147,5 +146,4 @@ def generate_parameter_file(params): status, output = process.getstatusoutput(ansible_cmd, timeout) if status != 0: - test.fail("ansible_cmd failed, status: %s, output: %s" % - (status, output)) + test.fail(f"ansible_cmd failed, status: {status}, output: {output}") diff --git a/qemu/tests/nested_vsock_con_sockets.py b/qemu/tests/nested_vsock_con_sockets.py index 3c468d0415..e7b068b377 100644 --- a/qemu/tests/nested_vsock_con_sockets.py +++ b/qemu/tests/nested_vsock_con_sockets.py @@ -2,13 +2,10 @@ import os import time -from avocado.utils import path -from avocado.utils import process - -from virttest import error_context -from virttest import utils_misc -from provider import message_queuing, ansible +from avocado.utils import path, process +from virttest import error_context, utils_misc +from provider import ansible, message_queuing from qemu.tests import vsock_test @@ -33,25 +30,26 @@ def _on_exit(obj, msg): def _send_file_from_host_to_l1(obj, msg): test.log.info("Received message: %s", msg) - test.log.info("vm.address: %s" % vm.get_address(timeout=120)) + test.log.info("vm.address: %s", vm.get_address(timeout=120)) vsock_port = params.get_numeric("vsock_port", 2345) - dd_cmd = 'dd if=/dev/urandom of=%s count=10240 bs=1024' % tmp_file + dd_cmd = f"dd if=/dev/urandom of={tmp_file} count=10240 bs=1024" process.system(dd_cmd, shell=True) - md5_origin = process.system_output("md5sum %s" % tmp_file).split()[0] + md5_origin = process.system_output(f"md5sum {tmp_file}").split()[0] cmd_transfer = None if vsock_test_tool == "ncat": tool_bin = path.find_command("ncat") - cmd_transfer = '%s --vsock --send-only -l %s < %s &' % ( - tool_bin, vsock_port, tmp_file) + cmd_transfer = ( + f"{tool_bin} --vsock --send-only -l {vsock_port} < {tmp_file} &" + ) if vsock_test_tool == "nc_vsock": tool_bin = vsock_test.compile_nc_vsock(test, vm, session) - cmd_transfer = '%s -l %s < %s &' % (tool_bin, vsock_port, tmp_file) + cmd_transfer = f"{tool_bin} -l {vsock_port} < {tmp_file} &" if cmd_transfer is None: raise ValueError(f"unsupported test tool: {vsock_test_tool}") - test.log.info("cmd_transfer: %s" % cmd_transfer) + test.log.info("cmd_transfer: %s", cmd_transfer) process.run(cmd_transfer, ignore_bg_processes=True, shell=True) md5_origin = "md5_origin:" + md5_origin.decode() @@ -65,7 +63,7 @@ def _send_file_from_host_to_l1(obj, msg): session = vm.wait_for_login() vsock_test_tool = params["vsock_test_tool"] - tmp_file = "/var/tmp/vsock_file_%s" % utils_misc.generate_random_string(6) + tmp_file = f"/var/tmp/vsock_file_{utils_misc.generate_random_string(6)}" disable_firewall = params.get("disable_firewall") session.cmd(disable_firewall, ignore_all_errors=True) @@ -80,8 +78,9 @@ def _send_file_from_host_to_l1(obj, msg): ansible_extra_vars = params.get("ansible_extra_vars", "{}") playbook_repo = params["playbook_repo"] playbook_timeout = params.get_numeric("playbook_timeout") - playbook_dir = params.get("playbook_dir", - os.path.join(test.workdir, "ansible_playbook")) + playbook_dir = params.get( + "playbook_dir", os.path.join(test.workdir, "ansible_playbook") + ) toplevel_playbook = os.path.join(playbook_dir, params["toplevel_playbook"]) # Use this directory to copy some logs back from the guest test_harness_log_dir = test.logdir @@ -90,16 +89,15 @@ def _send_file_from_host_to_l1(obj, msg): guest_ip = vm.get_address() test.log.info("Cloning %s", playbook_repo) - process.run("git clone {src} {dst}".format(src=playbook_repo, - dst=playbook_dir), - verbose=False) - - error_context.base_context("Generate playbook related options.", - test.log.info) - extra_vars = {"ansible_ssh_extra_args": ansible_ssh_extra_args, - "ansible_ssh_pass": guest_passwd, - "mq_port": mq_port, - "test_harness_log_dir": test_harness_log_dir} + process.run(f"git clone {playbook_repo} {playbook_dir}", verbose=False) + + error_context.base_context("Generate playbook related options.", test.log.info) + extra_vars = { + "ansible_ssh_extra_args": ansible_ssh_extra_args, + "ansible_ssh_pass": guest_passwd, + "mq_port": mq_port, + "test_harness_log_dir": test_harness_log_dir, + } extra_vars.update(json.loads(ansible_extra_vars)) error_context.context("Execute the ansible playbook.", test.log.info) @@ -109,22 +107,21 @@ def _send_file_from_host_to_l1(obj, msg): remote_user=guest_user, extra_vars=json.dumps(extra_vars), callback_plugin=ansible_callback_plugin, - addl_opts=ansible_addl_opts + addl_opts=ansible_addl_opts, ) # Handle test cases wait_response_timeout = params.get_numeric("wait_response_timeout", 600) - mq_publisher = message_queuing.MQPublisher(mq_port, - other_options="--broker") + mq_publisher = message_queuing.MQPublisher(mq_port, other_options="--broker") host = "127.0.0.1" - test.log.info("host:{} port:{}".format(host, mq_port)) + test.log.info("host:%s port:%s", host, mq_port) client = message_queuing.MQClient(host, mq_port) time.sleep(2) - test.log.info("vm.address: %s" % vm.get_address()) + test.log.info("vm.address: %s", vm.get_address()) client.register_msg("L1_up", _send_file_from_host_to_l1) client.register_msg("exit", _on_exit) @@ -141,13 +138,13 @@ def _send_file_from_host_to_l1(obj, msg): if playbook_executor.get_status() != 0: test.fail( "Ansible playbook execution failed, please check the " - "{} for details.".format(ansible_log)) + f"{ansible_log} for details." + ) test.log.info("Ansible playbook execution passed.") finally: - playbook_executor.store_playbook_log(test_harness_log_dir, - ansible_log) + playbook_executor.store_playbook_log(test_harness_log_dir, ansible_log) playbook_executor.close() client.close() mq_publisher.close() test.log.debug("MQ closed") - process.system("rm -f %s" % tmp_file) + process.system(f"rm -f {tmp_file}") diff --git a/qemu/tests/nested_vsock_con_sockets_l1.py b/qemu/tests/nested_vsock_con_sockets_l1.py index 0e35dfbcd0..bd16c5f994 100644 --- a/qemu/tests/nested_vsock_con_sockets_l1.py +++ b/qemu/tests/nested_vsock_con_sockets_l1.py @@ -1,11 +1,9 @@ import os import time -from avocado.utils import path -from avocado.utils import process +from avocado.utils import path, process +from virttest import data_dir, error_context -from virttest import data_dir -from virttest import error_context from provider import message_queuing @@ -18,16 +16,15 @@ def compile_nc_vsock_guest(test, vm, session): :param session: vm session :return: Path to binary nc-vsock or None if compile failed """ - nc_vsock_dir = '/home/' - nc_vsock_bin = 'nc-vsock' - nc_vsock_c = 'nc-vsock.c' + nc_vsock_dir = "/home/" + nc_vsock_bin = "nc-vsock" + nc_vsock_c = "nc-vsock.c" src_file = os.path.join(data_dir.get_deps_dir("nc_vsock"), nc_vsock_c) bin_path = os.path.join(nc_vsock_dir, nc_vsock_bin) - rm_cmd = 'rm -rf %s*' % bin_path + rm_cmd = f"rm -rf {bin_path}*" session.cmd(rm_cmd) vm.copy_files_to(src_file, nc_vsock_dir) - compile_cmd = "cd %s && gcc -o %s %s" % ( - nc_vsock_dir, nc_vsock_bin, nc_vsock_c) + compile_cmd = f"cd {nc_vsock_dir} && gcc -o {nc_vsock_bin} {nc_vsock_c}" guest_status = session.cmd_status(compile_cmd) if guest_status != 0: session.cmd_output_safe(rm_cmd) @@ -57,18 +54,17 @@ def _get_file_l2(obj, msg): session.sendline(cmd_receive) time.sleep(10) - chksum_cmd = 'md5sum %s' % tmp_file + chksum_cmd = f"md5sum {tmp_file}" md5_received = session.cmd_output(chksum_cmd, timeout=60).split()[0] md5_origin = msg.split(":")[1] - test.log.info("md5_origin:" + md5_origin) - test.log.info("md5_received:" + md5_received) + test.log.info("md5_origin: %s", md5_origin) + test.log.info("md5_received: %s", md5_received) obj.set_msg_loop(False) obj.send_message("exit") if md5_origin != md5_received: - test.fail( - "File got on L2 is not identical with the file on the host.") + test.fail("File got on L2 is not identical with the file on the host.") test.log.info("Test ended.") @@ -88,20 +84,18 @@ def _get_file_l2(obj, msg): host = params.get("mq_publisher") mq_port = params.get_numeric("mq_port", 2000) - test.log.info("host:{} port:{}".format(host, mq_port)) + test.log.info("host:%s port:%s", host, mq_port) client = message_queuing.MQClient(host, mq_port) time.sleep(5) cmd_receive = None if vsock_test_tool == "ncat": tool_bin = path.find_command("ncat") - cmd_receive = '%s --vsock %s %s > %s &' % ( - tool_bin, host_cid, vsock_port, tmp_file) + cmd_receive = f"{tool_bin} --vsock {host_cid} {vsock_port} > {tmp_file} &" if vsock_test_tool == "nc_vsock": tool_bin = compile_nc_vsock_guest(test, vm, session) - cmd_receive = '%s %s %s > %s &' % ( - tool_bin, host_cid, vsock_port, tmp_file) + cmd_receive = f"{tool_bin} {host_cid} {vsock_port} > {tmp_file} &" if cmd_receive is None: raise ValueError(f"unexpected test tool: {vsock_test_tool}") @@ -114,6 +108,6 @@ def _get_file_l2(obj, msg): finally: client.close() test.log.debug("MQ closed") - session.cmd_output_safe("rm -rf %s" % tmp_file) + session.cmd_output_safe(f"rm -rf {tmp_file}") if vsock_test_tool == "nc_vsock": - session.cmd_output_safe("rm -rf %s*" % tool_bin) + session.cmd_output_safe(f"rm -rf {tool_bin}*") diff --git a/qemu/tests/netkvm_change_param_value_test.py b/qemu/tests/netkvm_change_param_value_test.py index c1cbf81569..19e1721efa 100644 --- a/qemu/tests/netkvm_change_param_value_test.py +++ b/qemu/tests/netkvm_change_param_value_test.py @@ -1,6 +1,4 @@ -from virttest import utils_test -from virttest import error_context -from virttest import utils_net +from virttest import error_context, utils_net, utils_test from virttest.utils_windows import virtio_win @@ -28,8 +26,7 @@ def start_test(param_name, param_value): param param_name: the netkvm driver parameter to modify param param_value: the value to set to """ - error_context.context("Start set %s to %s" % (param_name, param_value), - test.log.info) + error_context.context(f"Start set {param_name} to {param_value}", test.log.info) utils_net.set_netkvm_param_value(vm, param_name, param_value) test.log.info("Check value after setting %s", param_name) @@ -43,10 +40,10 @@ def start_test(param_name, param_value): guest_ip = vm.get_address() status, output = utils_test.ping(guest_ip, 10, timeout=15) if status: - test.fail("Ping returns non-zero value %s" % output) + test.fail(f"Ping returns non-zero value {output}") package_lost = utils_test.get_loss_ratio(output) if package_lost != 0: - test.fail("Ping test got %s package lost" % package_lost) + test.fail(f"Ping test got {package_lost} package lost") def _get_driver_version(session): """ @@ -55,8 +52,8 @@ def _get_driver_version(session): """ query_version_cmd = params["query_version_cmd"] output = session.cmd_output(query_version_cmd) - version_str = output.strip().split('=')[1] - version = version_str.split('.')[-1][0:3] + version_str = output.strip().split("=")[1] + version = version_str.split(".")[-1][0:3] return int(version) timeout = params.get("timeout", 360) @@ -66,13 +63,13 @@ def _get_driver_version(session): vm.verify_alive() session = vm.wait_for_login(timeout=timeout) - error_context.context("Check if the driver is installed and " - "verified", test.log.info) + error_context.context( + "Check if the driver is installed and " "verified", test.log.info + ) driver_verifier = params["driver_verifier"] - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, - driver_verifier, - timeout) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_verifier, timeout + ) driver_version = _get_driver_version(session) session.close() @@ -82,7 +79,7 @@ def _get_driver_version(session): elif driver_version > 189 and "MTU" in param_names: param_names.remove("MTU") for name in param_names: - attr_name = "param_values_%s" % name + attr_name = f"param_values_{name}" param_values = params.get(attr_name, param_values_default) for value in param_values.split(): start_test(name, value) diff --git a/qemu/tests/netkvm_cpu_mapping.py b/qemu/tests/netkvm_cpu_mapping.py index eaf5fe111f..d54951bc9a 100644 --- a/qemu/tests/netkvm_cpu_mapping.py +++ b/qemu/tests/netkvm_cpu_mapping.py @@ -1,7 +1,6 @@ import re -from virttest import error_context -from virttest import utils_net +from virttest import error_context, utils_net @error_context.context_aware @@ -18,17 +17,15 @@ def run(test, params, env): # boot the vm with the queues queues = int(params["queues"]) - error_context.context("Boot the guest with queues = %s" % queues, - test.log.info) + error_context.context(f"Boot the guest with queues = {queues}", test.log.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() nic = vm.virtnet[0] - nic_vectors = int(nic['vectors']) if nic['vectors'] else (2 * queues + 2) + nic_vectors = int(nic["vectors"]) if nic["vectors"] else (2 * queues + 2) error_context.context("Get CPU mapping info by traceview", test.log.info) output = utils_net.dump_traceview_log_windows(params, vm) check_reg = "SetupInterrruptAffinity.*?Option = 0x0" mapping_count = len(re.findall(check_reg, output)) if mapping_count != nic_vectors: - test.fail("Mapping info count %s not match vectors %s" % - (mapping_count, nic_vectors)) + test.fail(f"Mapping info count {mapping_count} not match vectors {nic_vectors}") diff --git a/qemu/tests/netkvm_in_use.py b/qemu/tests/netkvm_in_use.py index 5882068101..eaf886522c 100644 --- a/qemu/tests/netkvm_in_use.py +++ b/qemu/tests/netkvm_in_use.py @@ -1,7 +1,8 @@ import time -from virttest.utils_test import qemu from virttest import error_context +from virttest.utils_test import qemu + from provider import netperf @@ -18,14 +19,16 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def shutdown(): """ shutdown the vm by shell command """ shutdown_command = params.get("shutdown_command") session.sendline(shutdown_command) - error_context.context("waiting VM to go down (shutdown shell cmd)", - test.log.info) + error_context.context( + "waiting VM to go down (shutdown shell cmd)", test.log.info + ) if not vm.wait_for_shutdown(360): test.fail("Guest refuses to go down") @@ -35,13 +38,11 @@ def stop_continue(): """ error_context.base_context("Stop the VM", test.log.info) vm.pause() - error_context.context("Verify the status of VM is 'paused'", - test.log.info) + error_context.context("Verify the status of VM is 'paused'", test.log.info) if vm.verify_status("paused") is False: test.error("VM status is not paused") vm.resume() - error_context.context("Verify the status of VM is 'running'", - test.log.info) + error_context.context("Verify the status of VM is 'running'", test.log.info) if vm.verify_status("running") is False: test.error("VM status is not running") @@ -60,10 +61,14 @@ def nic_hotplug(): nettype = params.get("nettype", "bridge") nic_hotplug_count = params.get_numeric("nic_hotplug_count", 10) for i in range(1, nic_hotplug_count): - nic_name = 'hotadded%s' % i - vm.hotplug_nic(nic_model=pci_model, nic_name=nic_name, - netdst=netdst, nettype=nettype, - queues=params.get('queues')) + nic_name = f"hotadded{i}" + vm.hotplug_nic( + nic_model=pci_model, + nic_name=nic_name, + netdst=netdst, + nettype=nettype, + queues=params.get("queues"), + ) time.sleep(3) vm.hotunplug_nic(nic_name) time.sleep(3) @@ -71,13 +76,13 @@ def nic_hotplug(): netkvm_sub_test = params["netkvm_sub_test"] driver = params["driver_name"] driver_verifier = params.get("driver_verifier", driver) - driver_running = params.get('driver_running', driver_verifier) + driver_running = params.get("driver_running", driver_verifier) timeout = int(params.get("login_timeout", 360)) - vm_name = params['main_vm'] + vm_name = params["main_vm"] vm = env.get_vm(vm_name) vm.verify_alive() - error_context.context("Boot guest with %s device" % driver, test.log.info) + error_context.context(f"Boot guest with {driver} device", test.log.info) session = vm.wait_for_login(timeout=timeout) qemu.windrv_verify_running(session, test, driver_running) @@ -86,8 +91,9 @@ def nic_hotplug(): error_context.context("Start netperf test", test.log.info) netperf_test = netperf.NetperfTest(params, vm) if netperf_test.start_netperf_test(): - error_context.context("Start %s test during netperf test" - % netkvm_sub_test, test.log.info) - eval("%s()" % netkvm_sub_test) + error_context.context( + f"Start {netkvm_sub_test} test during netperf test", test.log.info + ) + eval(f"{netkvm_sub_test}()") else: test.fail("Failed to start netperf test") diff --git a/qemu/tests/netkvm_protocol_binding.py b/qemu/tests/netkvm_protocol_binding.py index 23382e3228..ae3aab5b3b 100644 --- a/qemu/tests/netkvm_protocol_binding.py +++ b/qemu/tests/netkvm_protocol_binding.py @@ -1,9 +1,5 @@ from aexpect import ShellTimeoutError - -from virttest import error_context -from virttest import utils_test -from virttest import utils_misc -from virttest import utils_net +from virttest import error_context, utils_misc, utils_net, utils_test from virttest.utils_windows import virtio_win @@ -22,6 +18,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + # workaround for driver has no signature def send_key(vm, key): # Send key to guest @@ -31,20 +28,19 @@ def send_key(vm, key): timeout = params.get_numeric("timeout", 360) vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login(timeout=timeout) - error_context.context("Check if the driver is installed and " - "verified", test.log.info) + error_context.context( + "Check if the driver is installed and " "verified", test.log.info + ) driver_verifier = params["driver_verifier"] - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, - driver_verifier, - timeout) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_verifier, timeout + ) error_context.context("Install VIOPROT protocol", test.log.info) media_type = params["virtio_win_media_type"] try: - get_drive_letter = getattr(virtio_win, "drive_letter_%s" % media_type) - get_product_dirname = getattr(virtio_win, - "product_dirname_%s" % media_type) - get_arch_dirname = getattr(virtio_win, "arch_dirname_%s" % media_type) + get_drive_letter = getattr(virtio_win, f"drive_letter_{media_type}") + get_product_dirname = getattr(virtio_win, f"product_dirname_{media_type}") + get_arch_dirname = getattr(virtio_win, f"arch_dirname_{media_type}") except AttributeError: test.error("Not supported virtio win media type '%s'", media_type) viowin_ltr = get_drive_letter(session) @@ -57,9 +53,9 @@ def send_key(vm, key): if not guest_arch: test.error("Could not get architecture dirname of the vm") - inf_middle_path = ("{name}\\{arch}" if media_type == "iso" - else "{arch}\\{name}").format(name=guest_name, - arch=guest_arch) + inf_middle_path = ( + "{name}\\{arch}" if media_type == "iso" else "{arch}\\{name}" + ).format(name=guest_name, arch=guest_arch) inf_find_cmd = 'dir /b /s %s\\vioprot.inf | findstr "\\%s\\\\"' inf_find_cmd %= (viowin_ltr, inf_middle_path) inf_path = session.cmd(inf_find_cmd, timeout=timeout).strip() @@ -74,8 +70,12 @@ def send_key(vm, key): except ShellTimeoutError: send_key(vm, key_to_install_driver) if utils_misc.wait_for( - lambda: params["check_info"] in - session.cmd_output(params["check_installation_cmd"]), 30, 15, 5): + lambda: params["check_info"] + in session.cmd_output(params["check_installation_cmd"]), + 30, + 15, + 5, + ): test.log.info("inf file installation successfully") else: test.error("inf file installation failed") @@ -83,15 +83,15 @@ def send_key(vm, key): error_context.context("Bind netkvm protocol to netkvm adapter") nic_mac = vm.get_mac_address(0) connection_id = utils_net.get_windows_nic_attribute( - session, "macaddress", nic_mac, "netconnectionid", timeout=timeout) + session, "macaddress", nic_mac, "netconnectionid", timeout=timeout + ) bind_cmd = params["bind_cmd"] % connection_id status, output = session.cmd_status_output(bind_cmd, timeout=timeout) if status: - test.error("Bind netkvm protocol failed, output=%s" % output) + test.error(f"Bind netkvm protocol failed, output={output}") error_context.context("Ping out from guest", test.log.info) host_ip = utils_net.get_host_ip_address(params) - status, output = utils_net.ping(host_ip, count=10, timeout=60, - session=session) + status, output = utils_net.ping(host_ip, count=10, timeout=60, session=session) if status: - test.fail("Ping %s failed, output=%s" % (host_ip, output)) + test.fail(f"Ping {host_ip} failed, output={output}") diff --git a/qemu/tests/netkvm_rss_test.py b/qemu/tests/netkvm_rss_test.py index 2a58db7ce2..0958b32092 100644 --- a/qemu/tests/netkvm_rss_test.py +++ b/qemu/tests/netkvm_rss_test.py @@ -13,14 +13,15 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environmen. """ + def execute_command(command, timeout=60, omit=False): """ Execute command and return the output """ - test.log.info("Sending command: %s" % command) + test.log.info("Sending command: %self.", command) status, output = session.cmd_status_output(command, timeout) if status != 0 and omit is False: - test.error("execute command fail: %s" % output) + test.error(f"execute command fail: {output}") return output vm = env.get_vm(params["main_vm"]) @@ -30,10 +31,11 @@ def execute_command(command, timeout=60, omit=False): speedtest_host_path = data_dir.get_deps_dir("speedtest") dst_path = params["dst_path"] test.log.info("Copy Speedtest to guest.") - s, o = session.cmd_status_output("mkdir %s" % dst_path) + s, o = session.cmd_status_output(f"mkdir {dst_path}") if s and "already exists" not in o: - test.error("Could not create Speedtest directory in " - "VM '%s', detail: '%s'" % (vm.name, o)) + test.error( + "Could not create Speedtest directory in " f"VM '{vm.name}', detail: '{o}'" + ) vm.copy_files_to(speedtest_host_path, dst_path) # set up adapterrss in guest diff --git a/qemu/tests/netperf_stress.py b/qemu/tests/netperf_stress.py index 5b8e9cd456..b1c0668934 100644 --- a/qemu/tests/netperf_stress.py +++ b/qemu/tests/netperf_stress.py @@ -1,12 +1,14 @@ import os import time -from virttest import error_context -from virttest import utils_net -from virttest import utils_netperf -from virttest import utils_misc -from virttest import data_dir -from virttest import env_process +from virttest import ( + data_dir, + env_process, + error_context, + utils_misc, + utils_net, + utils_netperf, +) @error_context.context_aware @@ -23,6 +25,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def netperf_test(duration): while duration < max_run_time: time.sleep(10) @@ -50,14 +53,12 @@ def netperf_test(duration): shell_port = params.get("shell_port") os_type = params.get("os_type") shell_prompt = params.get("shell_prompt", r"^root@.*[\#\$]\s*$|#") - linesep = params.get( - "shell_linesep", "\n").encode().decode('unicode_escape') + linesep = params.get("shell_linesep", "\n").encode().decode("unicode_escape") status_test_command = params.get("status_test_command", "echo $?") compile_option_client = params.get("compile_option_client", "") compile_option_server = params.get("compile_option_server", "") disable_firewall = params.get("disable_firewall", "") - if (params.get("netperf_vlan_test", "no") == "yes" and - params.get("host_vlan_ip")): + if params.get("netperf_vlan_test", "no") == "yes" and params.get("host_vlan_ip"): host_ip = params.get("host_vlan_ip") else: host_ip = utils_net.get_host_ip_address(params) @@ -69,15 +70,17 @@ def netperf_test(duration): s_info = {} if server in vms: if params.get("os_type") == "windows": - if params.get_numeric("smp") > 32 or params.get_numeric("vcpu_maxcpus") > 32: + if ( + params.get_numeric("smp") > 32 + or params.get_numeric("vcpu_maxcpus") > 32 + ): params["smp"] = params["vcpu_maxcpus"] = 32 params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, server) server_vm = env.get_vm(server) server_vm.verify_alive() session = server_vm.wait_for_login(timeout=login_timeout) - session.cmd(disable_firewall, - ignore_all_errors=True) + session.cmd(disable_firewall, ignore_all_errors=True) if params.get("netperf_vlan_test", "no") == "yes": vlan_nic = params.get("vlan_nic") server_ip = utils_net.get_linux_ipaddr(session, vlan_nic)[0] @@ -85,43 +88,34 @@ def netperf_test(duration): server_ip = server_vm.get_address() s_info["ip"] = server_ip - s_info["os_type"] = params.get("os_type_%s" % server, os_type) - s_info["username"] = params.get("username_%s" % server, - guest_username) - s_info["password"] = params.get("password_%s" % server, - guest_password) - s_info["shell_client"] = params.get("shell_client_%s" % server, - shell_client) - s_info["shell_port"] = params.get("shell_port_%s" % server, - shell_port) - s_info["shell_prompt"] = params.get("shell_prompt_%s" % server, - shell_prompt) - s_info["linesep"] = params.get("linesep_%s" % server, - linesep) - s_info["status_test_command"] = params.get("status_test_command_%s" % server, - status_test_command) + s_info["os_type"] = params.get(f"os_type_{server}", os_type) + s_info["username"] = params.get(f"username_{server}", guest_username) + s_info["password"] = params.get(f"password_{server}", guest_password) + s_info["shell_client"] = params.get(f"shell_client_{server}", shell_client) + s_info["shell_port"] = params.get(f"shell_port_{server}", shell_port) + s_info["shell_prompt"] = params.get(f"shell_prompt_{server}", shell_prompt) + s_info["linesep"] = params.get(f"linesep_{server}", linesep) + s_info["status_test_command"] = params.get( + f"status_test_command_{server}", status_test_command + ) else: if server == "localhost": s_info["ip"] = host_ip - s_info["password"] = params.get("password_%s" % server, - host_password) + s_info["password"] = params.get(f"password_{server}", host_password) else: s_info["ip"] = server - s_info["password"] = params.get("password_%s" % server, - "redhat") - s_info["os_type"] = params.get("os_type_%s" % server, "linux") - s_info["username"] = params.get("username_%s" % server, - "root") - s_info["shell_client"] = params.get("shell_client_%s" % server, - "ssh") - s_info["shell_port"] = params.get("shell_port_%s" % server, - "22") - s_info["shell_prompt"] = params.get("shell_prompt_%s" % server, - r"^\[.*\][\#\$]\s*$") - s_info["linesep"] = params.get("linesep_%s" % server, - "\n") - s_info["status_test_command"] = params.get("status_test_command_%s" % server, - "echo $?") + s_info["password"] = params.get(f"password_{server}", "redhat") + s_info["os_type"] = params.get(f"os_type_{server}", "linux") + s_info["username"] = params.get(f"username_{server}", "root") + s_info["shell_client"] = params.get(f"shell_client_{server}", "ssh") + s_info["shell_port"] = params.get(f"shell_port_{server}", "22") + s_info["shell_prompt"] = params.get( + f"shell_prompt_{server}", r"^\[.*\][\#\$]\s*$" + ) + s_info["linesep"] = params.get(f"linesep_{server}", "\n") + s_info["status_test_command"] = params.get( + f"status_test_command_{server}", "echo $?" + ) server_infos.append(s_info) for client in netperf_client: @@ -130,63 +124,55 @@ def netperf_test(duration): client_vm = env.get_vm(client) client_vm.verify_alive() session = client_vm.wait_for_login(timeout=login_timeout) - session.cmd(disable_firewall, - ignore_all_errors=True) + session.cmd(disable_firewall, ignore_all_errors=True) if params.get("netperf_vlan_test", "no") == "yes": vlan_nic = params.get("vlan_nic") client_ip = utils_net.get_linux_ipaddr(session, vlan_nic)[0] else: client_ip = client_vm.get_address() c_info["ip"] = client_ip - c_info["os_type"] = params.get("os_type_%s" % client, os_type) - c_info["username"] = params.get("username_%s" % client, - guest_username) - c_info["password"] = params.get("password_%s" % client, - guest_password) - c_info["shell_client"] = params.get("shell_client_%s" % client, - shell_client) - c_info["shell_port"] = params.get("shell_port_%s" % client, - shell_port) - c_info["shell_prompt"] = params.get("shell_prompt_%s" % client, - shell_prompt) - c_info["linesep"] = params.get("linesep_%s" % client, - linesep) - c_info["status_test_command"] = params.get("status_test_command_%s" % client, - status_test_command) + c_info["os_type"] = params.get(f"os_type_{client}", os_type) + c_info["username"] = params.get(f"username_{client}", guest_username) + c_info["password"] = params.get(f"password_{client}", guest_password) + c_info["shell_client"] = params.get(f"shell_client_{client}", shell_client) + c_info["shell_port"] = params.get(f"shell_port_{client}", shell_port) + c_info["shell_prompt"] = params.get(f"shell_prompt_{client}", shell_prompt) + c_info["linesep"] = params.get(f"linesep_{client}", linesep) + c_info["status_test_command"] = params.get( + f"status_test_command_{client}", status_test_command + ) else: if client == "localhost": c_info["ip"] = host_ip - c_info["password"] = params.get("password_%s" % client, - host_password) + c_info["password"] = params.get(f"password_{client}", host_password) else: c_info["ip"] = client - c_info["password"] = params.get("password_%s" % client, - "redhat") - c_info["os_type"] = params.get("os_type_%s" % client, "linux") - c_info["username"] = params.get("username_%s" % client, - "root") - c_info["shell_client"] = params.get("shell_client_%s" % client, - "ssh") - c_info["shell_port"] = params.get("shell_port_%s" % client, - "23") - c_info["shell_prompt"] = params.get("shell_prompt_%s" % client, - r"^\[.*\][\#\$]\s*$") - c_info["linesep"] = params.get("linesep_%s" % client, - "\n") - c_info["status_test_command"] = params.get("status_test_command_%s" % client, - "echo $?") + c_info["password"] = params.get(f"password_{client}", "redhat") + c_info["os_type"] = params.get(f"os_type_{client}", "linux") + c_info["username"] = params.get(f"username_{client}", "root") + c_info["shell_client"] = params.get(f"shell_client_{client}", "ssh") + c_info["shell_port"] = params.get(f"shell_port_{client}", "23") + c_info["shell_prompt"] = params.get( + f"shell_prompt_{client}", r"^\[.*\][\#\$]\s*$" + ) + c_info["linesep"] = params.get(f"linesep_{client}", "\n") + c_info["status_test_command"] = params.get( + f"status_test_command_{client}", "echo $?" + ) client_infos.append(c_info) netperf_link = params.get("netperf_link") netperf_link = os.path.join(data_dir.get_deps_dir("netperf"), netperf_link) md5sum = params.get("pkg_md5sum") netperf_server_link = params.get("netperf_server_link_win", netperf_link) - netperf_server_link = os.path.join(data_dir.get_deps_dir("netperf"), - netperf_server_link) + netperf_server_link = os.path.join( + data_dir.get_deps_dir("netperf"), netperf_server_link + ) server_md5sum = params.get("server_md5sum") netperf_client_link = params.get("netperf_client_link_win", netperf_link) - netperf_client_link = os.path.join(data_dir.get_deps_dir("netperf"), - netperf_client_link) + netperf_client_link = os.path.join( + data_dir.get_deps_dir("netperf"), netperf_client_link + ) client_md5sum = params.get("client_md5sum") server_path_linux = params.get("server_path", "/var/tmp") @@ -204,17 +190,20 @@ def netperf_test(duration): else: netperf_link_c = netperf_link client_path = client_path_linux - n_client = utils_netperf.NetperfClient(c_info["ip"], - client_path, - md5sum, netperf_link_c, - client=c_info["shell_client"], - port=c_info["shell_port"], - username=c_info["username"], - password=c_info["password"], - prompt=c_info["shell_prompt"], - linesep=c_info["linesep"], - status_test_command=c_info["status_test_command"], - compile_option=compile_option_client) + n_client = utils_netperf.NetperfClient( + c_info["ip"], + client_path, + md5sum, + netperf_link_c, + client=c_info["shell_client"], + port=c_info["shell_port"], + username=c_info["username"], + password=c_info["password"], + prompt=c_info["shell_prompt"], + linesep=c_info["linesep"], + status_test_command=c_info["status_test_command"], + compile_option=compile_option_client, + ) netperf_clients.append(n_client) for s_info in server_infos: @@ -225,17 +214,20 @@ def netperf_test(duration): else: netperf_link_s = netperf_link server_path = server_path_linux - n_server = utils_netperf.NetperfServer(s_info["ip"], - server_path, - md5sum, netperf_link_s, - client=s_info["shell_client"], - port=s_info["shell_port"], - username=s_info["username"], - password=s_info["password"], - prompt=s_info["shell_prompt"], - linesep=s_info["linesep"], - status_test_command=s_info["status_test_command"], - compile_option=compile_option_server) + n_server = utils_netperf.NetperfServer( + s_info["ip"], + server_path, + md5sum, + netperf_link_s, + client=s_info["shell_client"], + port=s_info["shell_port"], + username=s_info["username"], + password=s_info["password"], + prompt=s_info["shell_prompt"], + linesep=s_info["linesep"], + status_test_command=s_info["status_test_command"], + compile_option=compile_option_server, + ) netperf_servers.append(n_server) # Get range of message size. @@ -251,27 +243,31 @@ def netperf_test(duration): netperf_output_unit = params.get("netperf_output_unit", " ") netperf_package_sizes = params.get("netperf_package_sizes") test_option = params.get("test_option", "") - test_option += " -l %s" % test_duration + test_option += f" -l {test_duration}" if params.get("netperf_remote_cpu") == "yes": test_option += " -C" if params.get("netperf_local_cpu") == "yes": test_option += " -c" if netperf_output_unit in "GMKgmk": - test_option += " -f %s" % netperf_output_unit + test_option += f" -f {netperf_output_unit}" num = 0 s_len = len(server_infos) for protocol in test_protocols.split(): - error_context.context("Testing %s protocol" % protocol, - test.log.info) - t_option = "%s -t %s" % (test_option, protocol) + error_context.context(f"Testing {protocol} protocol", test.log.info) + t_option = f"{test_option} -t {protocol}" for n_client in netperf_clients: index = num % s_len server_ip = server_infos[index]["ip"] - n_client.bg_start(server_ip, t_option, - netperf_para_sess, netperf_cmd_prefix, - package_sizes=netperf_package_sizes) - if utils_misc.wait_for(n_client.is_netperf_running, 10, 0, 3, - "Wait netperf test start"): + n_client.bg_start( + server_ip, + t_option, + netperf_para_sess, + netperf_cmd_prefix, + package_sizes=netperf_package_sizes, + ) + if utils_misc.wait_for( + n_client.is_netperf_running, 10, 0, 3, "Wait netperf test start" + ): test.log.info("Netperf test start successfully.") else: test.error("Can not start netperf client.") diff --git a/qemu/tests/netperf_udp.py b/qemu/tests/netperf_udp.py index aaa3b110ec..5bf400f161 100644 --- a/qemu/tests/netperf_udp.py +++ b/qemu/tests/netperf_udp.py @@ -1,10 +1,7 @@ import os import re -from virttest import error_context -from virttest import utils_net -from virttest import utils_netperf -from virttest import data_dir +from virttest import data_dir, error_context, utils_net, utils_netperf @error_context.context_aware @@ -40,17 +37,18 @@ def run(test, params, env): error_context.context("Test env prepare", test.log.info) netperf_link = params.get("netperf_link") if netperf_link: - netperf_link = os.path.join(data_dir.get_deps_dir("netperf"), - netperf_link) + netperf_link = os.path.join(data_dir.get_deps_dir("netperf"), netperf_link) md5sum = params.get("pkg_md5sum") netperf_server_link = params.get("netperf_server_link_win") if netperf_server_link: - netperf_server_link = os.path.join(data_dir.get_deps_dir("netperf"), - netperf_server_link) + netperf_server_link = os.path.join( + data_dir.get_deps_dir("netperf"), netperf_server_link + ) netperf_client_link = params.get("netperf_client_link_win") if netperf_client_link: - netperf_client_link = os.path.join(data_dir.get_deps_dir("netperf"), - netperf_client_link) + netperf_client_link = os.path.join( + data_dir.get_deps_dir("netperf"), netperf_client_link + ) server_md5sum = params.get("server_md5sum") client_md5sum = params.get("client_md5sum") @@ -61,12 +59,11 @@ def run(test, params, env): client_path_win = params.get("client_path_win", "c:\\") guest_username = params.get("username", "") guest_password = params.get("password", "") - host_password = params.get("hostpassword") + params.get("hostpassword") client = params.get("shell_client") port = params.get("shell_port") prompt = params.get("shell_prompt", r"^root@.*[\#\$]\s*$|#") - linesep = params.get( - "shell_linesep", "\n").encode().decode('unicode_escape') + linesep = params.get("shell_linesep", "\n").encode().decode("unicode_escape") status_test_command = params.get("status_test_command", "echo $?") compile_option_client = params.get("compile_option_client", "") compile_option_server = params.get("compile_option_server", "") @@ -97,10 +94,10 @@ def run(test, params, env): server_interface = params.get("netdst", "switch") host_nic = utils_net.Interface(server_interface) netserver_ip = host_nic.get_ip() - s_client = params.get("shell_client_%s" % dsthost, "ssh") - s_port = params.get("shell_port_%s" % dsthost, "22") - s_username = params.get("username_%s" % dsthost, "root") - s_password = params.get("password_%s" % dsthost, "redhat") + s_client = params.get(f"shell_client_{dsthost}", "ssh") + s_port = params.get(f"shell_port_{dsthost}", "22") + s_username = params.get(f"username_{dsthost}", "root") + s_password = params.get(f"password_{dsthost}", "redhat") s_link = netperf_link s_path = server_path s_md5sum = md5sum @@ -114,28 +111,35 @@ def run(test, params, env): c_md5sum = md5sum c_link = netperf_link - netperf_client = utils_netperf.NetperfClient(main_vm_ip, - c_path, - c_md5sum, c_link, - client, port, - username=guest_username, - password=guest_password, - prompt=prompt, - linesep=linesep, - status_test_command=status_test_command, - compile_option=compile_option_client) - - netperf_server = utils_netperf.NetperfServer(netserver_ip, - s_path, - s_md5sum, - s_link, - s_client, s_port, - username=s_username, - password=s_password, - prompt=prompt, - linesep=linesep, - status_test_command=status_test_command, - compile_option=compile_option_server) + netperf_client = utils_netperf.NetperfClient( + main_vm_ip, + c_path, + c_md5sum, + c_link, + client, + port, + username=guest_username, + password=guest_password, + prompt=prompt, + linesep=linesep, + status_test_command=status_test_command, + compile_option=compile_option_client, + ) + + netperf_server = utils_netperf.NetperfServer( + netserver_ip, + s_path, + s_md5sum, + s_link, + s_client, + s_port, + username=s_username, + password=s_password, + prompt=prompt, + linesep=linesep, + status_test_command=status_test_command, + compile_option=compile_option_server, + ) # Get range of message size. message_size = params.get("message_size_range", "580 590 1").split() @@ -152,7 +156,7 @@ def run(test, params, env): msg = "Detail result of netperf test with different packet size.\n" for m_size in range(start_size, end_size + 1, step): test_protocol = params.get("test_protocol", "UDP_STREAM") - test_option = "-t %s -- -m %s" % (test_protocol, m_size) + test_option = f"-t {test_protocol} -- -m {m_size}" txt = "Run netperf client with protocol: '%s', packet size: '%s'" error_context.context(txt % (test_protocol, m_size), test.log.info) output = netperf_client.start(netserver_ip, test_option) @@ -161,8 +165,8 @@ def run(test, params, env): try: line_tokens = re.findall(re_str, output)[0].split() except IndexError: - txt = "Fail to get Throughput for %s." % m_size - txt += " netprf client output: %s" % output + txt = f"Fail to get Throughput for {m_size}." + txt += f" netprf client output: {output}" test.error(txt) if not line_tokens: test.error("Output format is not expected") @@ -176,7 +180,7 @@ def run(test, params, env): netperf_server.cleanup() netperf_client.cleanup() except Exception as e: - test.log.warn("Cleanup failed:\n%s\n", e) + test.log.warning("Cleanup failed:\n%s\n", e) with open(os.path.join(test.debugdir, "udp_results"), "w") as result_file: result_file.write(msg) @@ -185,8 +189,8 @@ def run(test, params, env): for i in range(len(throughput) - 1): if abs(throughput[i] - throughput[i + 1]) > throughput[i] * failratio: txt = "The gap between adjacent throughput is greater than" - txt += "%f." % failratio - txt += "Please refer to log file for details:\n %s" % msg + txt += f"{failratio:f}." + txt += f"Please refer to log file for details:\n {msg}" test.fail(txt) test.log.info("The UDP performance as measured via netperf is ok.") test.log.info("Throughput of netperf command: %s", throughput) diff --git a/qemu/tests/netperf_udp_perf.py b/qemu/tests/netperf_udp_perf.py index 2c62ba530f..fac04d6e69 100644 --- a/qemu/tests/netperf_udp_perf.py +++ b/qemu/tests/netperf_udp_perf.py @@ -4,13 +4,11 @@ import time from avocado.utils import process +from virttest import error_context, remote, virt_vm -from virttest import virt_vm -from virttest import remote -from virttest import error_context from provider import netperf_base -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") @error_context.context_aware @@ -37,7 +35,7 @@ def run(test, params, env): except virt_vm.VMIPAddressMissingError: pass - mac = vm.get_mac_address(0) + vm.get_mac_address(0) server_ip = vm.wait_for_get_address(0, timeout=90) if len(params.get("nics", "").split()) > 1: @@ -47,40 +45,49 @@ def run(test, params, env): server_ctl = vm.wait_for_login(timeout=login_timeout) server_ctl_ip = server_ip - test.log.debug(process.system_output("numactl --hardware", - verbose=False, ignore_status=True, - shell=True).decode()) - test.log.debug(process.system_output("numactl --show", - verbose=False, ignore_status=True, - shell=True).decode()) + test.log.debug( + process.system_output( + "numactl --hardware", verbose=False, ignore_status=True, shell=True + ).decode() + ) + test.log.debug( + process.system_output( + "numactl --show", verbose=False, ignore_status=True, shell=True + ).decode() + ) # pin guest vcpus/memory/vhost threads to last numa node of host by default - numa_node = netperf_base.pin_vm_threads(vm, params.get("numa_node")) + netperf_base.pin_vm_threads(vm, params.get("numa_node")) host = params.get("host", "localhost") host_ip = host client = params.get("client", "localhost") client_ip = client client_pub_ip = params.get("client_public_ip") - client = remote.wait_for_login(params["shell_client_client"], - client_pub_ip, - params["shell_port_client"], - params["username_client"], - params["password_client"], - params["shell_prompt_client"]) - cmd = "ifconfig %s %s up" % (params.get("client_physical_nic"), - client_ip) + client = remote.wait_for_login( + params["shell_client_client"], + client_pub_ip, + params["shell_port_client"], + params["username_client"], + params["password_client"], + params["shell_prompt_client"], + ) + cmd = "ifconfig {} {} up".format(params.get("client_physical_nic"), client_ip) netperf_base.ssh_cmd(client, cmd) error_context.context("Prepare env of server/client/host", test.log.info) prepare_list = set([server_ctl, client, host]) tag_dict = {server_ctl: "server", client: "client", host: "host"} - ip_dict = {server_ctl: server_ctl_ip, client: client_pub_ip, - host: host_ip} + ip_dict = {server_ctl: server_ctl_ip, client: client_pub_ip, host: host_ip} for i in prepare_list: params_tmp = params.object_params(tag_dict[i]) - netperf_base.env_setup(test, params, i, ip_dict[i], - username=params_tmp["username"], - shell_port=int(params_tmp["shell_port"]), - password=params_tmp["password"]) + netperf_base.env_setup( + test, + params, + i, + ip_dict[i], + username=params_tmp["username"], + shell_port=int(params_tmp["shell_port"]), + password=params_tmp["password"], + ) netperf_base.tweak_tuned_profile(params, server_ctl, client, host) @@ -88,17 +95,23 @@ def run(test, params, env): try: error_context.context("Start netperf udp stream testing", test.log.info) - start_test(server_ip, server_ctl, host, client, test.resultsdir, - test_duration=int(params.get('test_duration')), - burst_time=params.get('burst_time'), - numbers_per_burst=params.get('numbers_per_burst'), - params=params, test=test) + start_test( + server_ip, + server_ctl, + host, + client, + test.resultsdir, + test_duration=int(params.get("test_duration")), + burst_time=params.get("burst_time"), + numbers_per_burst=params.get("numbers_per_burst"), + params=params, + test=test, + ) if params.get("log_hostinfo_script"): src = os.path.join(test.virtdir, params.get("log_hostinfo_script")) path = os.path.join(test.resultsdir, "systeminfo") - process.system_output("bash %s %s &> %s" % ( - src, test.resultsdir, path), shell=True) + process.system_output(f"bash {src} {test.resultsdir} &> {path}", shell=True) if params.get("log_guestinfo_script") and params.get("log_guestinfo_exec"): src = os.path.join(test.virtdir, params.get("log_guestinfo_script")) @@ -106,7 +119,7 @@ def run(test, params, env): destpath = params.get("log_guestinfo_path", "/tmp/log_guestinfo.sh") vm.copy_files_to(src, destpath) logexec = params.get("log_guestinfo_exec", "bash") - output = server_ctl.cmd_output("%s %s" % (logexec, destpath)) + output = server_ctl.cmd_output(f"{logexec} {destpath}") logfile = open(path, "a+") logfile.write(output) logfile.close() @@ -115,11 +128,18 @@ def run(test, params, env): @error_context.context_aware -def start_test(server, server_ctl, host, client, resultsdir, - test_duration="20", - burst_time="1", - numbers_per_burst="1000 1500 2000 2500 3000", - params=None, test=None): +def start_test( + server, + server_ctl, + host, + client, + resultsdir, + test_duration="20", + burst_time="1", + numbers_per_burst="1000 1500 2000 2500 3000", + params=None, + test=None, +): """ Start to test with different combination of burst_time and numbers_per_burst @@ -127,85 +147,95 @@ def start_test(server, server_ctl, host, client, resultsdir, if params is None: params = {} - fd = open("%s/netperf-udp-perf.result.%s.RHS" % ( - resultsdir, time.time()), "w") - netperf_base.record_env_version(test, params, host, server_ctl, - fd, test_duration) + fd = open(f"{resultsdir}/netperf-udp-perf.result.{time.time()}.RHS", "w") + netperf_base.record_env_version(test, params, host, server_ctl, fd, test_duration) error_context.context("Start Netserver on guest", LOG_JOB.info) netperf_version = params.get("netperf_version", "2.6.0") - client_path = "/tmp/netperf-%s/src/netperf" % netperf_version - server_path = "/tmp/netperf-%s/src/netserver" % netperf_version + client_path = f"/tmp/netperf-{netperf_version}/src/netperf" + server_path = f"/tmp/netperf-{netperf_version}/src/netserver" LOG_JOB.info("Netserver start cmd is '%s'", server_path) - netperf_base.ssh_cmd(server_ctl, "pidof netserver || %s" % server_path) + netperf_base.ssh_cmd(server_ctl, f"pidof netserver || {server_path}") base = params.get("format_base", "18") fbase = params.get("format_fbase", "2") pid = str(os.getpid()) - fname = "/tmp/netperf.%s.nf" % pid + fname = f"/tmp/netperf.{pid}.nf" numa_enable = params.get("netperf_with_numa", "yes") == "yes" - def thread_cmd(params, numa_enable, burst_time, numbers_per_burst, - client, server, test_duration, fname): - option = "%s -t UDP_STREAM -w %s -b %s -H %s -l %s" % ( - client_path, burst_time, numbers_per_burst, - server, test_duration) + def thread_cmd( + params, + numa_enable, + burst_time, + numbers_per_burst, + client, + server, + test_duration, + fname, + ): + option = f"{client_path} -t UDP_STREAM -w {burst_time} -b {numbers_per_burst} -H {server} -l {test_duration}" netperf_base.netperf_thread(params, numa_enable, client, option, fname) def thu_result(fname): - with open(fname, 'rt') as filehandle: + with open(fname, "rt") as filehandle: file = filehandle.readlines()[5:] results = [] for thu in file: - thu_tmp = thu.rstrip('\n').split(" ") + thu_tmp = thu.rstrip("\n").split(" ") thu_result = thu_tmp[-1] results.append(thu_result) return results record_header = True - record_list = ['burst_time', 'numbers_per_burst', 'send_throughput', - 'receive_throughput', 'drop_ratio'] + record_list = [ + "burst_time", + "numbers_per_burst", + "send_throughput", + "receive_throughput", + "drop_ratio", + ] for i in burst_time.split(): for j in numbers_per_burst.split(): - client_thread = threading.Thread(target=thread_cmd, args=( - params, numa_enable, i, j, client, server, test_duration, fname)) + client_thread = threading.Thread( + target=thread_cmd, + args=(params, numa_enable, i, j, client, server, test_duration, fname), + ) client_thread.start() - time.sleep(test_duration+1) + time.sleep(test_duration + 1) client_thread.join() ret = {} - ret['burst_time'] = int(i) - ret['numbers_per_burst'] = int(j) + ret["burst_time"] = int(i) + ret["numbers_per_burst"] = int(j) - finished_result = netperf_base.ssh_cmd(client, "cat %s" % fname) + finished_result = netperf_base.ssh_cmd(client, f"cat {fname}") f = open(fname, "w") f.write(finished_result) f.close() thu_all = thu_result(fname) - ret['send_throughput'] = float(thu_all[0]) - ret['receive_throughput'] = float(thu_all[1]) - ret['drop_ratio'] = float(ret['receive_throughput'] / - ret['send_throughput']) + ret["send_throughput"] = float(thu_all[0]) + ret["receive_throughput"] = float(thu_all[1]) + ret["drop_ratio"] = float( + ret["receive_throughput"] / ret["send_throughput"] + ) row, key_list = netperf_base.netperf_record( - ret, record_list, - header=record_header, - base=base, - fbase=fbase) + ret, record_list, header=record_header, base=base, fbase=fbase + ) if record_header: record_header = False - prefix = '%s--%s' % (i, j) + prefix = f"{i}--{j}" for key in key_list: - test.write_test_keyval( - {'%s--%s' % (prefix, key): ret[key]}) + test.write_test_keyval({f"{prefix}--{key}": ret[key]}) LOG_JOB.info(row) fd.write(row + "\n") fd.flush() LOG_JOB.debug("Remove temporary files") - process.system_output("rm -f %s" % fname, verbose=False, - ignore_status=True, shell=True) - netperf_base.ssh_cmd(client, "rm -f %s" % fname) + process.system_output( + f"rm -f {fname}", verbose=False, ignore_status=True, shell=True + ) + netperf_base.ssh_cmd(client, f"rm -f {fname}") fd.close() diff --git a/qemu/tests/netuser_buffer_test.py b/qemu/tests/netuser_buffer_test.py index 0fab7d525f..75a300bd31 100644 --- a/qemu/tests/netuser_buffer_test.py +++ b/qemu/tests/netuser_buffer_test.py @@ -2,9 +2,7 @@ import time from avocado.utils import process -from virttest import error_context -from virttest import data_dir -from virttest import utils_net +from virttest import data_dir, error_context, utils_net @error_context.context_aware @@ -24,7 +22,7 @@ def run(test, params, env): vm.wait_for_login(timeout=login_timeout) exp_path = params["exp_path"] test_path = os.path.join(data_dir.get_deps_dir(), exp_path) - vm.copy_files_to(test_path, '~') + vm.copy_files_to(test_path, "~") vm.destroy() params["nettype"] = "user" @@ -36,17 +34,16 @@ def run(test, params, env): def mtu_test(): test.log.info("Set mtu value and verfied") serial_session.cmd(params["fw_stop_cmd"], ignore_all_errors=True) - guest_ifname = utils_net.get_linux_ifname(serial_session, - vm.get_mac_address(0)) - if guest_ifname != 'eth0': + guest_ifname = utils_net.get_linux_ifname(serial_session, vm.get_mac_address(0)) + if guest_ifname != "eth0": test.cancel("Guest device name is not expected") serial_session.cmd(params["set_mtu_cmd"] % guest_ifname) - output = serial_session.cmd_output(params["check_mtu_cmd"] % - guest_ifname) - match_string = "mtu %s" % params["mtu_value"] + output = serial_session.cmd_output(params["check_mtu_cmd"] % guest_ifname) + match_string = "mtu {}".format(params["mtu_value"]) if match_string not in output: - test.fail("Guest mtu is not the expected value %s" % - params["mtu_value"]) + test.fail( + "Guest mtu is not the expected value {}".format(params["mtu_value"]) + ) def pkg_buffer_test(): test.log.info("Compile the script and execute") @@ -55,7 +52,9 @@ def pkg_buffer_test(): time.sleep(60) s = process.getstatusoutput( "ps -aux|grep /usr/bin/gnome-calculator |grep -v grep", - timeout=60, shell=True)[0] + timeout=60, + shell=True, + )[0] if s == 0: test.fail("Virtual machine has security issues") serial_session.send_ctrl("^c") @@ -66,6 +65,7 @@ def pkg_buffer_test(): mtu_test() pkg_buffer_test() finally: - serial_session.cmd("rm -rf ~/exp ~/exp.c", - timeout=login_timeout, ignore_all_errors=True) + serial_session.cmd( + "rm -rf ~/exp ~/exp.c", timeout=login_timeout, ignore_all_errors=True + ) serial_session.close() diff --git a/qemu/tests/nfs_perf.py b/qemu/tests/nfs_perf.py index 4ec6189f08..1cad14f049 100644 --- a/qemu/tests/nfs_perf.py +++ b/qemu/tests/nfs_perf.py @@ -1,10 +1,8 @@ -import re import os +import re from avocado.utils import process - -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc STEP_1, STEP_2, STEP_3, STEP_4, STEP_5, STEP_6 = range(6) @@ -29,6 +27,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ + def _do_clean_up(func, *args): try: if args: @@ -36,22 +35,25 @@ def _do_clean_up(func, *args): else: func() except Exception as e: - test.log.warn("Failed to execute function '%s'." - " error message:\n%s", func.__name__, e) + test.log.warning( + "Failed to execute function '%s'." " error message:\n%s", + func.__name__, + e, + ) def _clean_up(step_cnt): error_context.context("Clean up", test.log.info) if step_cnt >= STEP_5: # remove test file. - cmd = "rm -f %s" % " ".join(test_file_list) + cmd = "rm -f {}".format(" ".join(test_file_list)) _do_clean_up(session.cmd, cmd) if step_cnt >= STEP_4: # umount nfs partition. - cmd = "umount %s" % mnt_point + cmd = f"umount {mnt_point}" _do_clean_up(session.cmd, cmd) if step_cnt >= STEP_3: # remove mount ponit directory. - cmd = "rm -rf %s" % mnt_point + cmd = f"rm -rf {mnt_point}" _do_clean_up(session.cmd, cmd) if step_cnt >= STEP_2: # close result file. @@ -64,12 +66,15 @@ def _do_write_test(blk_size, test_file): # Clean up caches session.cmd("echo 3 >/proc/sys/vm/drop_caches") - error_context.context("test %s size block write performance in guest" - " using dd commands" % blk_size, test.log.info) + error_context.context( + f"test {blk_size} size block write performance in guest" + " using dd commands", + test.log.info, + ) dd_cmd = "dd" dd_cmd += " if=/dev/zero" - dd_cmd += " of=%s" % test_file - dd_cmd += " bs=%s" % blk_size + dd_cmd += f" of={test_file}" + dd_cmd += f" bs={blk_size}" dd_cmd += " oflag=direct" dd_cmd += " count=10000" try: @@ -84,12 +89,15 @@ def _do_read_test(blk_size, test_file): # Clean up caches session.cmd("echo 3 >/proc/sys/vm/drop_caches") - error_context.context("test %s size block read performance in guest" - " using dd commands" % blk_size, test.log.info) + error_context.context( + f"test {blk_size} size block read performance in guest" + " using dd commands", + test.log.info, + ) dd_cmd = "dd" - dd_cmd += " if=%s" % test_file + dd_cmd += f" if={test_file}" dd_cmd += " of=/dev/null" - dd_cmd += " bs=%s" % blk_size + dd_cmd += f" bs={blk_size}" dd_cmd += " iflag=direct" try: out = session.cmd_output(dd_cmd, timeout=test_timeout) @@ -101,8 +109,9 @@ def _do_read_test(blk_size, test_file): return out if not hasattr(test, "write_perf_keyval"): - test.cancel("There is no 'write_perf_keyval' method in" - " test object, skip this test") + test.cancel( + "There is no 'write_perf_keyval' method in" " test object, skip this test" + ) error_context.context("boot guest over virtio driver", test.log.info) vm = env.get_vm(params["main_vm"]) @@ -122,8 +131,9 @@ def _do_read_test(blk_size, test_file): qemu_version = "Unknown" else: qemu_path = utils_misc.get_qemu_binary(params) - version_line = process.system_output("%s -help | head -n 1" - % qemu_path, shell=True) + version_line = process.system_output( + f"{qemu_path} -help | head -n 1", shell=True + ) matches = re.findall("version .*?,", version_line, re.I) if matches: qemu_version = " ".join(matches[0].split()[1:]).strip(",") @@ -134,31 +144,32 @@ def _do_read_test(blk_size, test_file): try: result_name = params.get("result_name", "nfs-perf.RHS") result_file_path = utils_misc.get_path(test.resultsdir, result_name) - result_file = open(result_file_path, 'w') + result_file = open(result_file_path, "w") except Exception: _clean_up(STEP_1) raise # After STEP 2 - error_context.context("mount nfs server in guest with tcp protocol", - test.log.info) + error_context.context("mount nfs server in guest with tcp protocol", test.log.info) nfs_server = params.get("nfs_server") nfs_path = params.get("nfs_path") mnt_option = params.get("mnt_option") - mnt_point = "/tmp/nfs_perf_%s" % utils_misc.generate_random_string(4) - test_file_prefix = os.path.join(mnt_point, "test_%si_" % - utils_misc.generate_random_string(4)) + mnt_point = f"/tmp/nfs_perf_{utils_misc.generate_random_string(4)}" + test_file_prefix = os.path.join( + mnt_point, f"test_{utils_misc.generate_random_string(4)}i_" + ) blk_size_list = params.get("blk_size_list", "8k").split() test_file_list = list(map(lambda x: test_file_prefix + x, blk_size_list)) if (not nfs_server) or (not nfs_path) or (not mnt_point): _clean_up(STEP_2) - test.error("Missing configuration for nfs partition." - " Check your config files") + test.error( + "Missing configuration for nfs partition." " Check your config files" + ) try: - session.cmd("mkdir -p %s" % mnt_point) + session.cmd(f"mkdir -p {mnt_point}") except Exception: _clean_up(STEP_2) raise @@ -167,10 +178,10 @@ def _do_read_test(blk_size, test_file): mnt_cmd = "mount" mnt_cmd += " -t nfs" if mnt_option: - mnt_cmd += " -o %s" % mnt_option - mnt_cmd += " %s:%s" % (nfs_server, nfs_path) + mnt_cmd += f" -o {mnt_option}" + mnt_cmd += f" {nfs_server}:{nfs_path}" mnt_cmd_out = mnt_cmd + " /tmp/***_****_****" - mnt_cmd += " %s" % mnt_point + mnt_cmd += f" {mnt_point}" try: session.cmd(mnt_cmd) except Exception: @@ -180,21 +191,20 @@ def _do_read_test(blk_size, test_file): # Record mount command in result file. try: - result_file.write("### kvm-userspace-ver : %s\n" % qemu_version) - result_file.write("### kvm_version : %s\n" % host_ver) - result_file.write("### guest-kernel-ver : %s\n" % guest_ver) - result_file.write("### %s\n" % mnt_cmd_out) + result_file.write(f"### kvm-userspace-ver : {qemu_version}\n") + result_file.write(f"### kvm_version : {host_ver}\n") + result_file.write(f"### guest-kernel-ver : {guest_ver}\n") + result_file.write(f"### {mnt_cmd_out}\n") result_file.write("Category:ALL\n") - except (IOError, ValueError) as e: - test.log.error("Failed to write to result file," - " error message:\n%s", e) + except (OSError, ValueError) as e: + test.log.error("Failed to write to result file," " error message:\n%s", e) result_list = ["%s|%016s|%016s" % ("blk_size", "Write", "Read")] speed_pattern = r"(\d+ bytes).*?([\d\.]+ s).*?([\d\.]+ [KkMmGgTt])B/s" try: prefix = "nfs" for blk_size in blk_size_list: - prefix += "--%s" % blk_size + prefix += f"--{blk_size}" test_file = test_file_list[blk_size_list.index(blk_size)] result = "%08s|" % blk_size[:-1] # Get write test result. @@ -202,31 +212,32 @@ def _do_read_test(blk_size, test_file): tmp_list = re.findall(speed_pattern, out) if not tmp_list: _clean_up(STEP_5) - test.error("Could not get correct write result." - " dd cmd output:\n%s" % out) + test.error( + "Could not get correct write result." f" dd cmd output:\n{out}" + ) _, _, speed = tmp_list[0] speed = utils_misc.normalize_data_size(speed) result += "%016s|" % speed - test.write_perf_keyval({"%s--%s" % (prefix, "write"): speed}) + test.write_perf_keyval({"{}--{}".format(prefix, "write"): speed}) # Get read test result. out = _do_read_test(blk_size, test_file) tmp_list = re.findall(speed_pattern, out) if not tmp_list: _clean_up(STEP_6) - test.error("Could not get correct read result." - " dd cmd output:\n%s" % out) + test.error( + "Could not get correct read result." f" dd cmd output:\n{out}" + ) _, _, speed = tmp_list[0] speed = utils_misc.normalize_data_size(speed) result += "%016s" % speed - test.write_perf_keyval({"%s--%s" % (prefix, "read"): speed}) + test.write_perf_keyval({"{}--{}".format(prefix, "read"): speed}) # Append result into result list. result_list.append(result) finally: try: result_file.write("\n".join(result_list)) - except (IOError, ValueError) as e: - test.log.error("Failed to write to result file," - " error message:\n%s", e) + except (OSError, ValueError) as e: + test.log.error("Failed to write to result file," " error message:\n%s", e) _clean_up(STEP_6) diff --git a/qemu/tests/nic_acpi_index.py b/qemu/tests/nic_acpi_index.py index 9f1845fef7..8244d4cb7d 100644 --- a/qemu/tests/nic_acpi_index.py +++ b/qemu/tests/nic_acpi_index.py @@ -2,11 +2,7 @@ import re from avocado.utils import process - -from virttest import utils_net -from virttest import virt_vm -from virttest import utils_misc -from virttest import error_context +from virttest import error_context, utils_misc, utils_net, virt_vm @error_context.context_aware @@ -29,19 +25,20 @@ def run(test, params, env): def renew_ip_address(session, mac): ifname = utils_net.get_linux_ifname(session, mac) - make_conf = "nmcli connection add type ethernet con-name %s ifname " \ - "%s autoconnect yes" % (ifname, ifname) + make_conf = ( + f"nmcli connection add type ethernet con-name {ifname} ifname " + f"{ifname} autoconnect yes" + ) arp_clean = "arp -n|awk '/^[1-9]/{print \"arp -d \" $1}'|sh" session.cmd_output_safe(make_conf) - session.cmd_output_safe("ip link set dev %s up" % ifname) + session.cmd_output_safe(f"ip link set dev {ifname} up") dhcp_cmd = params.get("dhcp_cmd") session.cmd_output_safe(dhcp_cmd % ifname, timeout=240) session.cmd_output_safe(arp_clean) def verified_nic_name(): - ifname = utils_net.get_linux_ifname(session, - vm.get_mac_address()) - pattern = int(re.findall(r'\d+', ifname)[-1]) + ifname = utils_net.get_linux_ifname(session, vm.get_mac_address()) + pattern = int(re.findall(r"\d+", ifname)[-1]) nic_name_number = params.get_numeric("nic_name_number") if pattern == nic_name_number: test.log.info("nic name match") @@ -52,20 +49,21 @@ def ping_test(): host_ip = utils_net.get_host_ip_address(params) status, output = utils_net.ping(host_ip, 10, timeout=30) if status: - test.fail( - "%s ping %s unexpected, output %s" % (vm.name, host_ip, output)) + test.fail(f"{vm.name} ping {host_ip} unexpected, output {output}") def get_hotplug_nic_ip(vm, nic, session): def _get_address(): try: - index = [_idx for _idx, _nic in enumerate(vm.virtnet) - if _nic == nic][0] + index = [_idx for _idx, _nic in enumerate(vm.virtnet) if _nic == nic][0] return vm.wait_for_get_address(index, timeout=90) except IndexError: - test.error("Nic '%s' not exists in VM '%s'" - % (nic["nic_name"], vm.name)) - except (virt_vm.VMIPAddressMissingError, - virt_vm.VMAddressVerificationError): + test.error( + "Nic '{}' not exists in VM '{}'".format(nic["nic_name"], vm.name) + ) + except ( + virt_vm.VMIPAddressMissingError, + virt_vm.VMAddressVerificationError, + ): renew_ip_address(session, nic["mac"]) return @@ -86,13 +84,14 @@ def _get_address(): session = vm.wait_for_serial_login(timeout=login_timeout) for iteration in range(repeat_times): - error_context.context("Start test iteration %s" % (iteration + 1), - test.log.info) + error_context.context( + "Start test iteration %s" % (iteration + 1), test.log.info + ) nic_hotplug_count = int(params.get("nic_hotplug_count", 1)) nic_hotplugged = [] for nic_index in range(1, nic_hotplug_count + 1): s_session = vm.wait_for_serial_login(timeout=login_timeout) - nic_name = "hotplug_nic%s" % nic_index + nic_name = f"hotplug_nic{nic_index}" nic_params = params.object_params(nic_name) nic_model = nic_params["nic_model"] nic_params["nic_model"] = nic_model diff --git a/qemu/tests/nic_acpi_index_boot.py b/qemu/tests/nic_acpi_index_boot.py index 3986159326..43855ae606 100644 --- a/qemu/tests/nic_acpi_index_boot.py +++ b/qemu/tests/nic_acpi_index_boot.py @@ -1,7 +1,6 @@ import re -from virttest import utils_net -from virttest import error_context +from virttest import error_context, utils_net @error_context.context_aware @@ -22,9 +21,8 @@ def run(test, params, env): vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_serial_login(timeout=login_timeout) - ifname = utils_net.get_linux_ifname(session, - vm.get_mac_address()) - pattern = int(re.findall(r'\d+', ifname)[-1]) + ifname = utils_net.get_linux_ifname(session, vm.get_mac_address()) + pattern = int(re.findall(r"\d+", ifname)[-1]) nic_name_number = params.get_numeric("nic_name_number") if pattern == nic_name_number: test.log.info("nic name match") @@ -33,6 +31,6 @@ def run(test, params, env): host_ip = utils_net.get_host_ip_address(params) status, output = utils_net.ping(host_ip, 10, timeout=30) if status: - test.fail("%s ping %s unexpected, output %s" % (vm.name, host_ip, output)) + test.fail(f"{vm.name} ping {host_ip} unexpected, output {output}") if session: session.close() diff --git a/qemu/tests/nic_bonding.py b/qemu/tests/nic_bonding.py index c7bc32099a..19e47ed7bc 100644 --- a/qemu/tests/nic_bonding.py +++ b/qemu/tests/nic_bonding.py @@ -1,12 +1,10 @@ -import time import os import random +import time import aexpect - from avocado.utils import crypto, process -from virttest import utils_net -from virttest import utils_misc +from virttest import utils_misc, utils_net def run(test, params, env): @@ -36,7 +34,8 @@ def run(test, params, env): ssh_login_cmd = ( "echo LoginGraceTime 5m >> /etc/ssh/sshd_config &&" - " systemctl restart sshd.service || service sshd restart") + " systemctl restart sshd.service || service sshd restart" + ) session_serial.cmd_output_safe(ssh_login_cmd) # get params of bonding @@ -45,18 +44,16 @@ def run(test, params, env): modprobe_cmd = "modprobe -r bonding; modprobe bonding" bonding_params = params.get("bonding_params") if bonding_params: - modprobe_cmd += " %s" % bonding_params + modprobe_cmd += f" {bonding_params}" session_serial.cmd_output_safe(modprobe_cmd) - session_serial.cmd_output_safe("ip link set dev bond0 addr %s up" % mac) + session_serial.cmd_output_safe(f"ip link set dev bond0 addr {mac} up") setup_cmd = "ifenslave bond0 " + " ".join(ifnames) session_serial.cmd_output_safe(setup_cmd) dhcp_cmd = params.get("dhcp_cmd") session_serial.cmd_output_safe(dhcp_cmd, timeout=240) # prepare test data - guest_path = os.path.join(tmp_dir + "dst-%s" % - utils_misc.generate_random_string(8)) - host_path = os.path.join(test.tmpdir, "tmp-%s" % - utils_misc.generate_random_string(8)) + guest_path = os.path.join(tmp_dir + f"dst-{utils_misc.generate_random_string(8)}") + host_path = os.path.join(test.tmpdir, f"tmp-{utils_misc.generate_random_string(8)}") test.log.info("Test setup: Creating %dMB file on host", filesize) process.run(dd_cmd % host_path, shell=True) @@ -68,7 +65,8 @@ def run(test, params, env): test.log.info("md5 value of data original: %s", original_md5) test.log.info("Failover test with file transfer") transfer_thread = utils_misc.InterruptedThread( - vm.copy_files_to, (host_path, guest_path)) + vm.copy_files_to, (host_path, guest_path) + ) transfer_thread.start() try: while transfer_thread.is_alive(): @@ -83,11 +81,12 @@ def run(test, params, env): else: transfer_thread.join() - test.log.info('Cleaning temp file on host') + test.log.info("Cleaning temp file on host") os.remove(host_path) test.log.info("Failover test 2 with file transfer") transfer_thread = utils_misc.InterruptedThread( - vm.copy_files_from, (guest_path, host_path)) + vm.copy_files_from, (guest_path, host_path) + ) transfer_thread.start() try: nic_num = len(ifnames) @@ -98,7 +97,9 @@ def run(test, params, env): session_serial.cmd_output_safe(link_set_cmd % (ifnames[up_index], "up")) nic_indexes.remove(up_index) for num in nic_indexes: - session_serial.cmd_output_safe(link_set_cmd % (ifnames[num], "down")) + session_serial.cmd_output_safe( + link_set_cmd % (ifnames[num], "down") + ) time.sleep(random.randint(3, 5)) up_index += 1 except aexpect.ShellProcessTerminatedError: @@ -109,8 +110,7 @@ def run(test, params, env): current_md5 = crypto.hash_file(host_path, algorithm="md5") test.log.info("md5 value of data current: %s", current_md5) if original_md5 != current_md5: - test.fail("File changed after transfer host -> guest " - "and guest -> host") + test.fail("File changed after transfer host -> guest " "and guest -> host") finally: session_serial.sendline("ifenslave -d bond0 " + " ".join(ifnames)) diff --git a/qemu/tests/nic_bonding_host.py b/qemu/tests/nic_bonding_host.py index 4abed5fabb..691a45e82e 100644 --- a/qemu/tests/nic_bonding_host.py +++ b/qemu/tests/nic_bonding_host.py @@ -1,13 +1,8 @@ -import time import re +import time from avocado.utils import process - -from virttest import error_context -from virttest import utils_test -from virttest import utils_misc -from virttest import env_process -from virttest import utils_net +from virttest import env_process, error_context, utils_misc, utils_net, utils_test @error_context.context_aware @@ -37,27 +32,26 @@ def run(test, params, env): bonding_mode = params.get("bonding_mode", "1") bonding_miimon = params.get("bonding_miimon", "100") bonding_max_bonds = params.get("bonding_max_bonds", "1") - params['netdst'] = bond_br_name + params["netdst"] = bond_br_name host_bridges = utils_net.Bridge() - error_context.context("Load bonding module with mode 802.3ad", - test.log.info) - if not process.system("lsmod|grep bonding", ignore_status=True, - shell=True): + error_context.context("Load bonding module with mode 802.3ad", test.log.info) + if not process.system("lsmod|grep bonding", ignore_status=True, shell=True): process.system("modprobe -r bonding") - process.system("modprobe bonding mode=%s miimon=%s max_bonds=%s" % - (bonding_mode, bonding_miimon, bonding_max_bonds)) + process.system( + f"modprobe bonding mode={bonding_mode} miimon={bonding_miimon} max_bonds={bonding_max_bonds}" + ) - error_context.context("Bring up %s" % bond_iface, test.log.info) + error_context.context(f"Bring up {bond_iface}", test.log.info) host_ifaces = utils_net.get_host_iface() if bond_iface not in host_ifaces: - test.error("Can not find %s in host" % bond_iface) + test.error(f"Can not find {bond_iface} in host") bond_iface = utils_net.Interface(bond_iface) bond_iface.up() - bond_mac = bond_iface.get_mac() + bond_iface.get_mac() host_ph_iface_pre = params.get("host_ph_iface_prefix", "en") host_iface_bonding = int(params.get("host_iface_bonding", 2)) @@ -68,30 +62,29 @@ def run(test, params, env): ifaces_in_use = host_bridges.list_iface() host_ph_ifaces_un = list(set(host_ph_ifaces) - set(ifaces_in_use)) - if (len(host_ph_ifaces_un) < 2 or - len(host_ph_ifaces_un) < host_iface_bonding): - test.cancel("Host need %s nics at least." % host_iface_bonding) + if len(host_ph_ifaces_un) < 2 or len(host_ph_ifaces_un) < host_iface_bonding: + test.cancel(f"Host need {host_iface_bonding} nics at least.") - error_context.context("Add nics to %s" % bond_iface.name, test.log.info) + error_context.context(f"Add nics to {bond_iface.name}", test.log.info) host_ifaces_bonding = host_ph_ifaces_un[:host_iface_bonding] - ifenslave_cmd = "ifenslave %s" % bond_iface.name + ifenslave_cmd = f"ifenslave {bond_iface.name}" op_ifaces = [] for host_iface_bonding in host_ifaces_bonding: op_ifaces.append(utils_net.Interface(host_iface_bonding)) - ifenslave_cmd += " %s" % host_iface_bonding + ifenslave_cmd += f" {host_iface_bonding}" process.system(ifenslave_cmd) - error_context.context("Add a new bridge and add %s to it." - % bond_iface.name, test.log.info) + error_context.context( + f"Add a new bridge and add {bond_iface.name} to it.", test.log.info + ) if bond_br_name not in host_bridges.list_br(): host_bridges.add_bridge(bond_br_name) host_bridges.add_port(bond_br_name, bond_iface.name) error_context.context("Get ip address for bridge", test.log.info) - process.system("dhclient -r; dhclient %s" % bond_br_name, shell=True) + process.system(f"dhclient -r; dhclient {bond_br_name}", shell=True) - error_context.context("Boot up guest with bridge %s" % bond_br_name, - test.log.info) + error_context.context(f"Boot up guest with bridge {bond_br_name}", test.log.info) params["start_vm"] = "yes" vm_name = params.get("main_vm") env_process.preprocess_vm(test, params, env, vm_name) @@ -106,14 +99,21 @@ def run(test, params, env): error_context.context("Start file transfer", test.log.info) f_transfer = utils_misc.InterruptedThread( utils_test.run_virt_sub_test, - args=(test, params, env,), - kwargs={"sub_type": "file_transfer"}) + args=( + test, + params, + env, + ), + kwargs={"sub_type": "file_transfer"}, + ) f_transfer.start() utils_misc.wait_for( - lambda: process.system_output("pidof scp", ignore_status=True), 30) + lambda: process.system_output("pidof scp", ignore_status=True), 30 + ) - error_context.context("Disable and enable physical " - "interfaces in %s" % bond_br_name, test.log.info) + error_context.context( + "Disable and enable physical " f"interfaces in {bond_br_name}", test.log.info + ) while True: for op_iface in op_ifaces: test.log.debug("Turn down %s", op_iface.name) diff --git a/qemu/tests/nic_hotplug.py b/qemu/tests/nic_hotplug.py index c1c76296cb..ecf413218c 100644 --- a/qemu/tests/nic_hotplug.py +++ b/qemu/tests/nic_hotplug.py @@ -1,13 +1,8 @@ import random from avocado.utils import process - -from virttest import utils_test -from virttest import utils_net -from virttest import virt_vm -from virttest import utils_misc +from virttest import error_context, utils_misc, utils_net, utils_test, virt_vm from virttest.qemu_devices import qdevices -from virttest import error_context @error_context.context_aware @@ -39,23 +34,24 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def renew_ip_address(session, mac, is_linux_guest=True): if not is_linux_guest: - utils_net.restart_windows_guest_network_by_key(session, - "macaddress", - mac) + utils_net.restart_windows_guest_network_by_key(session, "macaddress", mac) return None ifname = utils_net.get_linux_ifname(session, mac) if params.get("make_change") == "yes": - p_cfg = "/etc/sysconfig/network-scripts/ifcfg-%s" % ifname - cfg_con = "DEVICE=%s\nBOOTPROTO=dhcp\nONBOOT=yes" % ifname - make_conf = "test -f %s || echo '%s' > %s" % (p_cfg, cfg_con, p_cfg) + p_cfg = f"/etc/sysconfig/network-scripts/ifcfg-{ifname}" + cfg_con = f"DEVICE={ifname}\nBOOTPROTO=dhcp\nONBOOT=yes" + make_conf = f"test -f {p_cfg} || echo '{cfg_con}' > {p_cfg}" else: - make_conf = "nmcli connection add type ethernet con-name %s ifname" \ - " %s autoconnect yes" % (ifname, ifname) + make_conf = ( + f"nmcli connection add type ethernet con-name {ifname} ifname" + f" {ifname} autoconnect yes" + ) arp_clean = "arp -n|awk '/^[1-9]/{print \"arp -d \" $1}'|sh" session.cmd_output_safe(make_conf) - session.cmd_output_safe("ip link set dev %s up" % ifname) + session.cmd_output_safe(f"ip link set dev {ifname} up") dhcp_cmd = params.get("dhcp_cmd") session.cmd_output_safe(dhcp_cmd % ifname, timeout=240) session.cmd_output_safe(arp_clean) @@ -64,16 +60,16 @@ def renew_ip_address(session, mac, is_linux_guest=True): def get_hotplug_nic_ip(vm, nic, session, is_linux_guest=True): def __get_address(): try: - index = [ - _idx for _idx, _nic in enumerate( - vm.virtnet) if _nic == nic][0] + index = [_idx for _idx, _nic in enumerate(vm.virtnet) if _nic == nic][0] return vm.wait_for_get_address(index, timeout=90) except IndexError: test.error( - "Nic '%s' not exists in VM '%s'" % - (nic["nic_name"], vm.name)) - except (virt_vm.VMIPAddressMissingError, - virt_vm.VMAddressVerificationError): + "Nic '{}' not exists in VM '{}'".format(nic["nic_name"], vm.name) + ) + except ( + virt_vm.VMIPAddressMissingError, + virt_vm.VMAddressVerificationError, + ): renew_ip_address(session, nic["mac"], is_linux_guest) return @@ -93,8 +89,8 @@ def ping_hotplug_nic(ip, mac, session, is_linux_guest): if not is_linux_guest: return status, output ifname = utils_net.get_linux_ifname(session, mac) - add_route_cmd = "route add %s dev %s" % (ip, ifname) - del_route_cmd = "route del %s dev %s" % (ip, ifname) + add_route_cmd = f"route add {ip} dev {ifname}" + del_route_cmd = f"route del {ip} dev {ifname}" test.log.warning("Failed to ping %s from host.") test.log.info("Add route and try again") session.cmd_output_safe(add_route_cmd) @@ -110,21 +106,17 @@ def device_add_nic(pci_model, netdev, device_id): :param netdev: netdev id for virtual device :param device_id: device id for virtual device """ - pci_add_cmd = "device_add id=%s, driver=%s, netdev=%s" % (device_id, - pci_model, - netdev) - bus = vm.devices.get_buses({'aobject': params.get('nic_bus', 'pci.0')})[0] + pci_add_cmd = f"device_add id={device_id}, driver={pci_model}, netdev={netdev}" + bus = vm.devices.get_buses({"aobject": params.get("nic_bus", "pci.0")})[0] if isinstance(bus, qdevices.QPCIEBus): root_port_id = bus.get_free_root_port() if root_port_id: - pci_add_cmd += ",bus=%s" % root_port_id + pci_add_cmd += f",bus={root_port_id}" if used_sameid != "yes": root_port = vm.devices.get_buses({"aobject": root_port_id})[0] - root_port.insert(qdevices.QBaseDevice(pci_model, - aobject=device_id)) + root_port.insert(qdevices.QBaseDevice(pci_model, aobject=device_id)) else: - test.error("No free root port for device %s to plug." - % device_id) + test.error(f"No free root port for device {device_id} to plug.") add_output = vm.monitor.send_args_cmd(pci_add_cmd) return add_output @@ -138,7 +130,7 @@ def run_sub_test(params, plug_tag): :return: whether vm was successfully shut-down if needed """ - sub_type = params.get("sub_type_%s" % plug_tag) + sub_type = params.get(f"sub_type_{plug_tag}") login_timeout = params.get_numeric("login_timeout", 360) session = vm.wait_for_serial_login(timeout=login_timeout) shutdown_method = params.get("shutdown_method", "shell") @@ -146,8 +138,9 @@ def run_sub_test(params, plug_tag): if sub_type == "reboot": test.log.info("Running sub test '%s' %s", sub_type, plug_tag) if params.get("reboot_method"): - vm.reboot(session, params["reboot_method"], 0, - login_timeout, serial=True) + vm.reboot( + session, params["reboot_method"], 0, login_timeout, serial=True + ) elif sub_type == "shutdown": test.log.info("Running sub test '%s' %s", sub_type, plug_tag) if shutdown_method == "shell": @@ -162,26 +155,27 @@ def run_sub_test(params, plug_tag): vm = env.get_vm(params["main_vm"]) vm.verify_alive() primary_nic = [nic for nic in vm.virtnet] - guest_is_linux = ("linux" == params.get("os_type", "")) + guest_is_linux = "linux" == params.get("os_type", "") host_ip_addr = utils_net.get_host_ip_address(params) if guest_is_linux: # Modprobe the module if specified in config file module = params.get("modprobe_module") if module: s_session = vm.wait_for_serial_login(timeout=login_timeout) - s_session.cmd_output_safe("modprobe %s" % module) + s_session.cmd_output_safe(f"modprobe {module}") s_session.close() for iteration in range(repeat_times): - error_context.context("Start test iteration %s" % (iteration + 1), - test.log.info) + error_context.context( + "Start test iteration %s" % (iteration + 1), test.log.info + ) nic_hotplug_count = int(params.get("nic_hotplug_count", 1)) nic_hotplugged = [] for nic_index in range(1, nic_hotplug_count + 1): # need to reconnect serial port after # guest reboot for windows guest s_session = vm.wait_for_serial_login(timeout=login_timeout) - nic_name = "hotplug_nic%s" % nic_index + nic_name = f"hotplug_nic{nic_index}" nic_params = params.object_params(nic_name) nic_model = nic_params["pci_model"] nic_params["nic_model"] = nic_model @@ -199,8 +193,10 @@ def run_sub_test(params, plug_tag): s_session.close() return else: - test.fail("Hot-plug error message is not as expected: " - "%s" % str(err_msg)) + test.fail( + "Hot-plug error message is not as expected: " + f"{str(err_msg)}" + ) else: test.fail("Qemu should failed hot-plugging nic with error") else: @@ -209,31 +205,33 @@ def run_sub_test(params, plug_tag): for nic in disable_nic_list: if guest_is_linux: ifname = utils_net.get_linux_ifname(s_session, nic["mac"]) - s_session.cmd_output_safe("ifconfig %s 0.0.0.0" % ifname) + s_session.cmd_output_safe(f"ifconfig {ifname} 0.0.0.0") else: s_session.cmd_output_safe("ipconfig /release all") vm.set_link(nic.device_id, up=False) - test.log.debug("Hotplug %sth '%s' nic named '%s'", - nic_index, nic_model, nic_name) + test.log.debug( + "Hotplug %sth '%s' nic named '%s'", nic_index, nic_model, nic_name + ) hotplug_nic = vm.hotplug_nic(**nic_params) test.log.info("Check if new interface gets ip address") - hotnic_ip = get_hotplug_nic_ip(vm, hotplug_nic, - s_session, guest_is_linux) + hotnic_ip = get_hotplug_nic_ip(vm, hotplug_nic, s_session, guest_is_linux) if not hotnic_ip: test.log.info("Reboot vm after hotplug nic") # reboot vm via serial port since some guest can't auto up # hotplug nic and next step will check is hotplug nic works. s_session = vm.reboot(session=s_session, serial=True) vm.verify_alive() - hotnic_ip = get_hotplug_nic_ip(vm, hotplug_nic, - s_session, guest_is_linux) + hotnic_ip = get_hotplug_nic_ip( + vm, hotplug_nic, s_session, guest_is_linux + ) if not hotnic_ip: test.fail("Hotplug nic still can't get ip after reboot vm") test.log.info("Got the ip address of new nic: %s", hotnic_ip) test.log.info("Ping guest's new ip from host") - status, output = ping_hotplug_nic(host_ip_addr, hotplug_nic["mac"], - s_session, guest_is_linux) + status, output = ping_hotplug_nic( + host_ip_addr, hotplug_nic["mac"], s_session, guest_is_linux + ) if status: err_msg = "New nic failed ping test, error info: '%s'" test.fail(err_msg % output) @@ -243,11 +241,12 @@ def run_sub_test(params, plug_tag): test.log.info("Resume vm") vm.resume() test.log.info("Ping guest's new ip after resume") - status, output = ping_hotplug_nic(host_ip_addr, hotplug_nic["mac"], - s_session, guest_is_linux) + status, output = ping_hotplug_nic( + host_ip_addr, hotplug_nic["mac"], s_session, guest_is_linux + ) if status: err_msg = "New nic failed ping test after stop/cont, " - err_msg += "error info: '%s'" % output + err_msg += f"error info: '{output}'" test.fail(err_msg) # random hotunplug nic diff --git a/qemu/tests/nic_opt.py b/qemu/tests/nic_opt.py index 0666c493f0..a01f8c46b7 100644 --- a/qemu/tests/nic_opt.py +++ b/qemu/tests/nic_opt.py @@ -2,12 +2,14 @@ import time from avocado.utils import cpu -from virttest import data_dir -from virttest import error_context -from virttest import utils_misc -from virttest import utils_net -from virttest import utils_netperf -from virttest import utils_test +from virttest import ( + data_dir, + error_context, + utils_misc, + utils_net, + utils_netperf, + utils_test, +) @error_context.context_aware @@ -18,29 +20,39 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def test_netperf(): """ Netperf stress test for nic option. """ - netperf_client_link = os.path.join(data_dir.get_deps_dir("netperf"), - params.get("netperf_client_link")) + netperf_client_link = os.path.join( + data_dir.get_deps_dir("netperf"), params.get("netperf_client_link") + ) client_path = params.get("client_path") - n_client = utils_netperf.NetperfClient(vm.get_address(), client_path, - netperf_source=netperf_client_link, client=params.get("shell_client"), - port=params.get("shell_port"), username=params.get("username"), - password=params.get("password"), prompt=params.get("shell_prompt"), - linesep=params.get("shell_linesep", "\n").encode().decode( - 'unicode_escape'), - status_test_command=params.get("status_test_command", ""), - compile_option=params.get("compile_option", "")) + n_client = utils_netperf.NetperfClient( + vm.get_address(), + client_path, + netperf_source=netperf_client_link, + client=params.get("shell_client"), + port=params.get("shell_port"), + username=params.get("username"), + password=params.get("password"), + prompt=params.get("shell_prompt"), + linesep=params.get("shell_linesep", "\n").encode().decode("unicode_escape"), + status_test_command=params.get("status_test_command", ""), + compile_option=params.get("compile_option", ""), + ) - n_server = utils_netperf.NetperfServer(utils_net.get_host_ip_address(params), - params.get("server_path", "/var/tmp"), - netperf_source=os.path.join(data_dir.get_deps_dir("netperf"), - params.get("netperf_server_link")), - password=params.get("hostpassword"), - compile_option=params.get("compile_option", "")) + n_server = utils_netperf.NetperfServer( + utils_net.get_host_ip_address(params), + params.get("server_path", "/var/tmp"), + netperf_source=os.path.join( + data_dir.get_deps_dir("netperf"), params.get("netperf_server_link") + ), + password=params.get("hostpassword"), + compile_option=params.get("compile_option", ""), + ) try: n_server.start() @@ -53,23 +65,27 @@ def test_netperf(): netperf_output_unit = params.get("netperf_output_unit") netperf_package_sizes = params.get("netperf_sizes") test_option = params.get("test_option", "") - test_option += " -l %s" % test_duration + test_option += f" -l {test_duration}" if params.get("netperf_remote_cpu") == "yes": test_option += " -C" if params.get("netperf_local_cpu") == "yes": test_option += " -c" if netperf_output_unit in "GMKgmk": - test_option += " -f %s" % netperf_output_unit + test_option += f" -f {netperf_output_unit}" num = 0 for protocol in test_protocols.split(): - error_context.context("Testing %s protocol" % protocol, - test.log.info) - t_option = "%s -t %s" % (test_option, protocol) - n_client.bg_start(utils_net.get_host_ip_address(params), t_option, - netperf_para_sess, netperf_cmd_prefix, - package_sizes=netperf_package_sizes) - if utils_misc.wait_for(n_client.is_netperf_running, 10, 0, 3, - "Wait netperf test start"): + error_context.context(f"Testing {protocol} protocol", test.log.info) + t_option = f"{test_option} -t {protocol}" + n_client.bg_start( + utils_net.get_host_ip_address(params), + t_option, + netperf_para_sess, + netperf_cmd_prefix, + package_sizes=netperf_package_sizes, + ) + if utils_misc.wait_for( + n_client.is_netperf_running, 10, 0, 3, "Wait netperf test start" + ): test.log.info("Netperf test start successfully.") else: test.error("Can not start netperf client.") @@ -105,12 +121,16 @@ def test_ping(): guest_ip = vm.get_address() for size in package_sizes: - error_context.context("From host ping to '%s' with guest '%s'" - " with package size %s. " % - (vm.name, guest_ip, size), test.log.info) - status, output = utils_net.ping(guest_ip, count=10, packetsize=size, timeout=30) + error_context.context( + f"From host ping to '{vm.name}' with guest '{guest_ip}'" + f" with package size {size}. ", + test.log.info, + ) + status, output = utils_net.ping( + guest_ip, count=10, packetsize=size, timeout=30 + ) if status != 0: - test.fail("host ping %s unexpected, output %s" % (guest_ip, output)) + test.fail(f"host ping {guest_ip} unexpected, output {output}") check_type = params.get("check_type") smp_value = params.get_numeric("smp") or params.get_numeric("vcpu_maxcpus") @@ -123,13 +143,15 @@ def test_ping(): match_string = "unable to start vhost net" output = vm.process.get_output() if match_string in output: - test.fail("Qemu output error info: %s" % output) + test.fail(f"Qemu output error info: {output}") if params["os_type"] == "windows": driver_verifier = params["driver_verifier"] - error_context.context("Verify if netkvm.sys is enabled in guest", - test.log.info) - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, driver_verifier) + error_context.context( + "Verify if netkvm.sys is enabled in guest", test.log.info + ) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_verifier + ) func_name = {"ping": test_ping, "netperf": test_netperf} func_name[check_type]() finally: diff --git a/qemu/tests/nic_teaming.py b/qemu/tests/nic_teaming.py index adc5b534e3..edcd4f54a5 100644 --- a/qemu/tests/nic_teaming.py +++ b/qemu/tests/nic_teaming.py @@ -1,11 +1,8 @@ -import time -import re import random +import re +import time -from virttest import error_context -from virttest import utils_test -from virttest import utils_net -from virttest import utils_misc +from virttest import error_context, utils_misc, utils_net, utils_test @error_context.context_aware @@ -23,6 +20,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def team_port_add(ifnames, team_if): """Team0 add ports and return the ip link result for debuging""" for port in ifnames: @@ -30,16 +28,15 @@ def team_port_add(ifnames, team_if): session_serial.cmd_output_safe(params["setdown_cmd"] % port) session_serial.cmd_output_safe(params["addport_cmd"] % port) output_teamnl = session_serial.cmd_output_safe(params["portchk_cmd"]) - ports = re.findall(r"%s" % params["ptn_teamnl"], output_teamnl) + ports = re.findall(r"{}".format(params["ptn_teamnl"]), output_teamnl) for port in ifnames: if port not in ports: - test.fail("Add %s to %s failed." % (port, team_if)) + test.fail(f"Add {port} to {team_if} failed.") session_serial.cmd_output_safe(params["killdhclient_cmd"]) - output = session_serial.cmd_output_safe(params["getip_cmd"], - timeout=300) - team_ip = re.search(r"%s" % params["ptn_ipv4"], output).group() + output = session_serial.cmd_output_safe(params["getip_cmd"], timeout=300) + team_ip = re.search(r"{}".format(params["ptn_ipv4"]), output).group() if not team_ip: - test.fail("Failed to get ip address of %s" % team_if) + test.fail(f"Failed to get ip address of {team_if}") return ports, team_ip def failover(ifnames, timeout): @@ -62,21 +59,25 @@ def failover(ifnames, timeout): break def check_ping(status, output): - """ ratio <5% is acceptance.""" + """ratio <5% is acceptance.""" if status != 0: - test.fail("Ping failed, staus:%s, output:%s" % (status, output)) + test.fail(f"Ping failed, staus:{status}, output:{output}") # if status != 0 the ping process seams hit issue. ratio = utils_test.get_loss_ratio(output) if ratio == -1: - test.fail("The ratio is %s, and status is %s, " - "output is %s" % (ratio, status, output)) + test.fail( + f"The ratio is {ratio}, and status is {status}, " f"output is {output}" + ) elif ratio > int(params["failed_ratio"]): - test.fail("The loss raito is %s, test failed" % ratio) - test.log.info("ping pass with loss raito:%s, that less than %s", - ratio, params["failed_ratio"]) + test.fail(f"The loss raito is {ratio}, test failed") + test.log.info( + "ping pass with loss raito:%s, that less than %s", + ratio, + params["failed_ratio"], + ) def team_if_exist(): - """ judge if team is alive well.""" + """judge if team is alive well.""" team_exists_cmd = params.get("team_if_exists_cmd") return session_serial.cmd_status(team_exists_cmd, safe=True) == 0 @@ -87,26 +88,25 @@ def team_if_exist(): vm.verify_alive() timeout = int(params.get("login_timeout", 1200)) session_serial = vm.wait_for_serial_login(timeout=timeout) - ifnames = [utils_net.get_linux_ifname(session_serial, - vm.get_mac_address(vlan)) - for vlan, nic in enumerate(vm.virtnet)] + ifnames = [ + utils_net.get_linux_ifname(session_serial, vm.get_mac_address(vlan)) + for vlan, nic in enumerate(vm.virtnet) + ] session_serial.cmd_output_safe(params["nm_stop_cmd"]) team_if = params.get("team_if") # initial - error_context.context("Step1: Configure the team environment", - test.log.info) + error_context.context("Step1: Configure the team environment", test.log.info) # steps of building the teaming environment starts modprobe_cmd = "modprobe team" session_serial.cmd_output_safe(modprobe_cmd) session_serial.cmd_output_safe(params["createteam_cmd"]) # this cmd is to create the team0 and correspoding userspace daemon if not team_if_exist(): - test.fail("Interface %s is not created." % team_if) + test.fail(f"Interface {team_if} is not created.") # check if team0 is created successfully ports, team_ip = team_port_add(ifnames, team_if) - test.log.debug("The list of the ports that added to %s : %s", - team_if, ports) + test.log.debug("The list of the ports that added to %s : %s", team_if, ports) test.log.debug("The ip address of %s : %s", team_if, team_ip) output = session_serial.cmd_output_safe(params["team_debug_cmd"]) test.log.debug("team interface configuration: %s", output) @@ -122,29 +122,30 @@ def team_if_exist(): dest = utils_net.get_ip_address_by_interface(params["netdst"]) count = params.get("count") timeout = float(count) * 2 - error_context.context("Step2: Check if guest can ping out:", - test.log.info) - status, output = utils_test.ping(dest=dest, count=10, - interface=team_if, - timeout=30, - session=session) + error_context.context("Step2: Check if guest can ping out:", test.log.info) + status, output = utils_test.ping( + dest=dest, count=10, interface=team_if, timeout=30, session=session + ) check_ping(status, output) # small ping check if the team0 works w/o failover - error_context.context("Step3: Start failover testing until " - "ping finished", test.log.info) - failover_thread = utils_misc.InterruptedThread(failover, - (ifnames, timeout)) + error_context.context( + "Step3: Start failover testing until " "ping finished", test.log.info + ) + failover_thread = utils_misc.InterruptedThread(failover, (ifnames, timeout)) failover_thread.start() # start failover loop until ping finished - error_context.context("Step4: Start ping host for %s counts" - % count, test.log.info) + error_context.context( + f"Step4: Start ping host for {count} counts", test.log.info + ) if failover_thread.is_alive(): - status, output = utils_test.ping(dest=dest, count=count, - interface=team_if, - timeout=float(count) * 1.5, - session=session) - error_context.context("Step5: Check if ping succeeded", - test.log.info) + status, output = utils_test.ping( + dest=dest, + count=count, + interface=team_if, + timeout=float(count) * 1.5, + session=session, + ) + error_context.context("Step5: Check if ping succeeded", test.log.info) check_ping(status, output) else: test.error("The failover thread is not alive") @@ -157,7 +158,7 @@ def team_if_exist(): # finish the main steps and check the result session_serial.cmd_output_safe(params["killteam_cmd"]) if team_if_exist(): - test.fail("Remove %s failed" % team_if) + test.fail(f"Remove {team_if} failed") test.log.info("%s removed", team_if) # remove the team0 and the daemon, check if succeed finally: diff --git a/qemu/tests/nmi_bsod_catch.py b/qemu/tests/nmi_bsod_catch.py index acfd8d42f2..6d8222a04b 100644 --- a/qemu/tests/nmi_bsod_catch.py +++ b/qemu/tests/nmi_bsod_catch.py @@ -1,7 +1,7 @@ import time -from virttest import error_context -from virttest import utils_test +from virttest import error_context, utils_test + from provider import win_dump_utils @@ -34,9 +34,9 @@ def run(test, params, env): driver_name = params.get("driver_name") if driver_name: - session = utils_test.qemu.windrv_check_running_verifier(session, - vm, test, - driver_name) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name + ) if del_dump_cmd: session.sendline(del_dump_cmd) @@ -49,11 +49,11 @@ def run(test, params, env): msg = "Configure the guest" for reg_cmd in reg_cmds: cmd = params.get(reg_cmd.strip()) - msg += " The command is %s " % cmd + msg += f" The command is {cmd} " error_context.context(msg) s, o = session.cmd_status_output(cmd, 360) if s: - test.fail("Fail command: %s. Output: %s" % (cmd, o)) + test.fail(f"Fail command: {cmd}. Output: {o}") if params.get("reboot_after_config") == "yes": error_context.context("Reboot guest", test.log.info) @@ -61,34 +61,35 @@ def run(test, params, env): try: if nmi_cmd: - error_context.context("Send inject-nmi or nmi from host to guest", - test.log.info) + error_context.context( + "Send inject-nmi or nmi from host to guest", test.log.info + ) vm.monitor.send_args_cmd(nmi_cmd) # Wait guest create dump file. if manual_reboot_cmd: bsod_time = params.get("bsod_time", 160) - test.log.info("Waiting guest for creating dump file" - " (%ssec)", bsod_time) + test.log.info("Waiting guest for creating dump file" " (%ssec)", bsod_time) time.sleep(bsod_time) - error_context.context("Send a system_reset monitor command", - test.log.info) + error_context.context("Send a system_reset monitor command", test.log.info) vm.monitor.send_args_cmd(manual_reboot_cmd) session = vm.wait_for_login(timeout=timeout) if check_dump_cmd: - error_context.context("Verify whether the dump files are " - "generated", test.log.info) + error_context.context( + "Verify whether the dump files are " "generated", test.log.info + ) s, o = session.cmd_status_output(check_dump_cmd, 360) test.log.debug("Output for check_dump_cmd command: %s", o) if s: - err_msg = "Could not find dump files in guest. Output: '%s'" % o + err_msg = f"Could not find dump files in guest. Output: '{o}'" test.fail(err_msg) error_context.context("Analyze dump file with windbg", test.log.info) if session.cmd_status(params["chk_sdk_ins"]): - win_dump_utils.install_windbg(test, params, session, - timeout=params.get("wdbg_timeout", 600)) + win_dump_utils.install_windbg( + test, params, session, timeout=params.get("wdbg_timeout", 600) + ) error_context.context("Disable security alert", test.log.info) win_dump_utils.disable_security_alert(params, session) session.cmd(params["save_path_cmd"]) @@ -100,4 +101,4 @@ def run(test, params, env): except Exception as e: # Ignore cleanup exception to avoid it overriding # the actual fault. - test.log.warn("Failed to delete dump files: '%s'", e) + test.log.warning("Failed to delete dump files: '%s'", e) diff --git a/qemu/tests/nmi_watchdog.py b/qemu/tests/nmi_watchdog.py index 37feeba45b..b5d6553d4f 100644 --- a/qemu/tests/nmi_watchdog.py +++ b/qemu/tests/nmi_watchdog.py @@ -23,15 +23,18 @@ def run(test, params, env): get_nmi_cmd = params["get_nmi_cmd"] kernel_version = session.cmd_output("uname -r").strip() nmi_watchdog_type = int(params["nmi_watchdog_type"]) - update_kernel_cmd = ("grubby --update-kernel=/boot/vmlinuz-%s " - "--args='nmi_watchdog=%d'" % - (kernel_version, nmi_watchdog_type)) + update_kernel_cmd = ( + "grubby --update-kernel=/boot/vmlinuz-%s " + "--args='nmi_watchdog=%d'" % (kernel_version, nmi_watchdog_type) + ) - error_context.context("Add 'nmi_watchdog=%d' to guest kernel " - "cmdline and reboot" % nmi_watchdog_type) + error_context.context( + "Add 'nmi_watchdog=%d' to guest kernel " + "cmdline and reboot" % nmi_watchdog_type + ) session.cmd(update_kernel_cmd) time.sleep(int(params.get("sleep_before_reset", 10))) - session = vm.reboot(session, method='shell', timeout=timeout) + session = vm.reboot(session, method="shell", timeout=timeout) try: error_context.context("Getting guest's number of vcpus") guest_cpu_num = session.cmd(params["cpu_chk_cmd"]) @@ -41,8 +44,7 @@ def run(test, params, env): test.log.debug(output.strip()) nmi_counter1 = output.split()[1:] - test.log.info("Waiting 60 seconds to see if guest's NMI counter " - "increases") + test.log.info("Waiting 60 seconds to see if guest's NMI counter " "increases") time.sleep(60) error_context.context("Getting guest's NMI counter 2nd time") @@ -52,10 +54,13 @@ def run(test, params, env): error_context.context("") for i in range(int(guest_cpu_num)): - test.log.info("vcpu: %s, nmi_counter1: %s, nmi_counter2: %s", - i, nmi_counter1[i], nmi_counter2[i]) + test.log.info( + "vcpu: %s, nmi_counter1: %s, nmi_counter2: %s", + i, + nmi_counter1[i], + nmi_counter2[i], + ) if int(nmi_counter2[i]) <= int(nmi_counter1[i]): - test.fail("Guest's NMI counter did not increase " - "after 60 seconds") + test.fail("Guest's NMI counter did not increase " "after 60 seconds") finally: session.close() diff --git a/qemu/tests/nonexist_vcpu_hotplug.py b/qemu/tests/nonexist_vcpu_hotplug.py index fe350d28b4..06799e09be 100644 --- a/qemu/tests/nonexist_vcpu_hotplug.py +++ b/qemu/tests/nonexist_vcpu_hotplug.py @@ -1,6 +1,4 @@ -from virttest import error_context -from virttest import utils_misc -from virttest import cpu +from virttest import cpu, error_context, utils_misc @error_context.context_aware @@ -21,13 +19,16 @@ def run(test, params, env): hotplug_cmd = "cpu_set %s online" - error_context.context("boot the vm, with '-smp X,maxcpus=Y' option," - "thus allow hotplug vcpu", test.log.info) + error_context.context( + "boot the vm, with '-smp X,maxcpus=Y' option," "thus allow hotplug vcpu", + test.log.info, + ) vm = env.get_vm(params["main_vm"]) vm.verify_alive() - error_context.context("check if CPUs in guest matches qemu cmd " - "before hot-plug", test.log.info) + error_context.context( + "check if CPUs in guest matches qemu cmd " "before hot-plug", test.log.info + ) smp_by_cmd = int(params.get("smp")) if not cpu.check_if_vm_vcpu_match(smp_by_cmd, vm): test.error("CPU quantity mismatch cmd before hotplug !") @@ -36,15 +37,21 @@ def run(test, params, env): vcpus_need_hotplug = params.get("nonexist_vcpu", "-1 161").split(" ") for vcpu in vcpus_need_hotplug: try: - error_context.context("hot-pluging vCPU %s" % vcpu, test.log.info) + error_context.context(f"hot-pluging vCPU {vcpu}", test.log.info) output = vm.monitor.send_args_cmd(hotplug_cmd % vcpu) finally: - error_context.context("output from monitor is: %s" % output, - test.log.info) + error_context.context(f"output from monitor is: {output}", test.log.info) # Windows is a little bit lazy that needs more secs to recognize. - error_context.context("hotplugging finished, let's wait a few sec and" - " check cpus quantity in guest.", test.log.info) - if not utils_misc.wait_for(lambda: cpu.check_if_vm_vcpu_match( - smp_by_cmd, vm), - 60, first=10, step=5.0, text="retry later"): + error_context.context( + "hotplugging finished, let's wait a few sec and" + " check cpus quantity in guest.", + test.log.info, + ) + if not utils_misc.wait_for( + lambda: cpu.check_if_vm_vcpu_match(smp_by_cmd, vm), + 60, + first=10, + step=5.0, + text="retry later", + ): test.fail("CPU quantity mismatch cmd after hotplug !") diff --git a/qemu/tests/numa_basic.py b/qemu/tests/numa_basic.py index 4afca53aa7..58914269fb 100644 --- a/qemu/tests/numa_basic.py +++ b/qemu/tests/numa_basic.py @@ -1,7 +1,4 @@ -from virttest import env_process -from virttest import utils_misc -from virttest import utils_test -from virttest import error_context +from virttest import env_process, error_context, utils_misc, utils_test from virttest.staging import utils_memory @@ -25,13 +22,13 @@ def run(test, params, env): host_numa_node = utils_misc.NumaInfo() node_list = host_numa_node.online_nodes for node_id in node_list: - error_context.base_context("Bind qemu process to numa node %s" % node_id, - test.log.info) - vm = "vm_bind_to_%s" % node_id - params['qemu_command_prefix'] = "numactl --cpunodebind=%s" % node_id + error_context.base_context( + f"Bind qemu process to numa node {node_id}", test.log.info + ) + vm = f"vm_bind_to_{node_id}" + params["qemu_command_prefix"] = f"numactl --cpunodebind={node_id}" utils_memory.drop_caches() - node_MemFree = int(host_numa_node.read_from_node_meminfo(node_id, - "MemFree")) + node_MemFree = int(host_numa_node.read_from_node_meminfo(node_id, "MemFree")) if node_MemFree < int(params["mem"]) * 1024: test.cancel("No enough free memory in node %d." % node_id) env_process.preprocess_vm(test, params, env, vm) @@ -40,20 +37,25 @@ def run(test, params, env): session = vm.wait_for_login(timeout=timeout) session.close() - error_context.context("Check the memory use status of qemu process", - test.log.info) - memory_status, _ = utils_test.qemu.get_numa_status(host_numa_node, - vm.get_pid()) + error_context.context( + "Check the memory use status of qemu process", test.log.info + ) + memory_status, _ = utils_test.qemu.get_numa_status(host_numa_node, vm.get_pid()) node_used_most = 0 memory_sz_used_most = 0 for index in range(len(node_list)): if memory_sz_used_most < memory_status[index]: memory_sz_used_most = memory_status[index] node_used_most = node_list[index] - test.log.debug("Qemu used %s pages in node" - " %s", memory_status[index], node_list[index]) + test.log.debug( + "Qemu used %s pages in node" " %s", + memory_status[index], + node_list[index], + ) if node_used_most != node_id: - test.fail("Qemu still use memory from other node. " - "Expect: %s, used: %s" % (node_id, node_used_most)) + test.fail( + "Qemu still use memory from other node. " + f"Expect: {node_id}, used: {node_used_most}" + ) error_context.context("Destroy guest.", test.log.info) vm.destroy() diff --git a/qemu/tests/numa_consistency.py b/qemu/tests/numa_consistency.py index eb05bcdbcb..0d7011a9c4 100644 --- a/qemu/tests/numa_consistency.py +++ b/qemu/tests/numa_consistency.py @@ -1,10 +1,6 @@ import os -from virttest import data_dir -from virttest import env_process -from virttest import error_context -from virttest import utils_misc -from virttest import utils_test +from virttest import data_dir, env_process, error_context, utils_misc, utils_test from virttest.staging import utils_memory @@ -15,8 +11,7 @@ def get_node_used_memory(qemu_pid, node): :param qemu_pid: the process id of qemu-kvm :param node: the NUMA node """ - qemu_memory_status = utils_memory.read_from_numa_maps(qemu_pid, - "N%d" % node) + qemu_memory_status = utils_memory.read_from_numa_maps(qemu_pid, "N%d" % node) used_memory = sum([int(_) for _ in list(qemu_memory_status.values())]) return used_memory @@ -42,12 +37,10 @@ def run(test, params, env): test.cancel("Host only has one NUMA node, skipping test...") node_alloc = node_list[0] - node_mem_alloc = int(host_numa_node.read_from_node_meminfo(node_alloc, - 'MemFree')) + node_mem_alloc = int(host_numa_node.read_from_node_meminfo(node_alloc, "MemFree")) # Get the node with more free memory for node in node_list[1:]: - node_mem_free = int(host_numa_node.read_from_node_meminfo(node, - 'MemFree')) + node_mem_free = int(host_numa_node.read_from_node_meminfo(node, "MemFree")) if node_mem_free > node_mem_alloc: node_mem_alloc = node_mem_free node_alloc = node @@ -59,7 +52,7 @@ def run(test, params, env): params["qemu_command_prefix"] = "numactl -m %d " % node_alloc params["start_vm"] = "yes" - vm_name = params['main_vm'] + vm_name = params["main_vm"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.verify_alive() @@ -69,27 +62,35 @@ def run(test, params, env): test_mem = float(params.get("mem")) * mem_ratio guest_stress_args = params.get("guest_stress_args", "-a -p -l %sM") guest_stress_args = guest_stress_args % int(test_mem) - stress_path = os.path.join(data_dir.get_deps_dir('mem_mapping'), - mem_map_tool) + stress_path = os.path.join(data_dir.get_deps_dir("mem_mapping"), mem_map_tool) utils_memory.drop_caches() - error_context.base_context("Get the qemu memory use for node: %d before stress" - % node_alloc, test.log.info) + error_context.base_context( + "Get the qemu memory use for node: %d before stress" % node_alloc, + test.log.info, + ) memory_before = get_node_used_memory(qemu_pid, node_alloc) try: - guest_stress = utils_test.VMStress(vm, "mem_mapping", params, - download_url=stress_path, - stress_args=guest_stress_args) + guest_stress = utils_test.VMStress( + vm, + "mem_mapping", + params, + download_url=stress_path, + stress_args=guest_stress_args, + ) guest_stress.load_stress_tool() except utils_test.StressError as guest_info: test.error(guest_info) guest_stress.unload_stress() guest_stress.clean() utils_memory.drop_caches() - error_context.context("Get the qemu memory used in node: %d after stress" - % node_alloc, test.log.debug) + error_context.context( + "Get the qemu memory used in node: %d after stress" % node_alloc, + test.log.debug, + ) memory_after = get_node_used_memory(qemu_pid, node_alloc) - test.log.debug("memory_before %d, memory_after: %d" - % (memory_before, memory_after)) + test.log.debug( + "memory_before %d, memory_after: %d", memory_before, memory_after + ) if memory_after <= memory_before: test.error("Memory usage has not increased after the allocation!") finally: diff --git a/qemu/tests/numa_cpu.py b/qemu/tests/numa_cpu.py index c19f2d7482..8e8147630a 100644 --- a/qemu/tests/numa_cpu.py +++ b/qemu/tests/numa_cpu.py @@ -1,7 +1,6 @@ import re -from virttest import error_context -from virttest import utils_package +from virttest import error_context, utils_package from virttest.utils_misc import NumaInfo @@ -12,11 +11,13 @@ def run(test, params, env): and guest os match with the qemu cli """ - def convert_cpu_topology_to_ids(socketid=None, dieid=None, clusterid=None, - coreid=None, threadid=None): + def convert_cpu_topology_to_ids( + socketid=None, dieid=None, clusterid=None, coreid=None, threadid=None + ): """ Convert the cpu topology to cpu id list """ + def _get_boundary(value, max_value, weight): """ Get the data range of one bit @@ -29,24 +30,34 @@ def _get_boundary(value, max_value, weight): max_boundary = int(value if value is not None else (max_value - 1)) * weight return (min_boundary, max_boundary) - if vm_arch in ('x86_64', 'i686'): - socket_min, socket_max = _get_boundary(socketid, vcpu_sockets, socket_weight) + if vm_arch in ("x86_64", "i686"): + socket_min, socket_max = _get_boundary( + socketid, vcpu_sockets, socket_weight + ) die_min, die_max = _get_boundary(dieid, vcpu_dies, die_weight) core_min, core_max = _get_boundary(coreid, vcpu_cores, core_weight) - thread_min, thread_max = _get_boundary(threadid, vcpu_threads, thread_weight) + thread_min, thread_max = _get_boundary( + threadid, vcpu_threads, thread_weight + ) cpu_min = socket_min + die_min + core_min + thread_min cpu_max = socket_max + die_max + core_max + thread_max - elif vm_arch in ('ppc64', 'ppc64le'): + elif vm_arch in ("ppc64", "ppc64le"): cpu_min = int(coreid) cpu_max = int(coreid) + vcpu_threads - 1 - elif vm_arch == 'aarch64': - socket_min, socket_max = _get_boundary(socketid, vcpu_sockets, socket_weight) - cluster_min, cluster_max = _get_boundary(clusterid, vcpu_clusters, cluster_weight) + elif vm_arch == "aarch64": + socket_min, socket_max = _get_boundary( + socketid, vcpu_sockets, socket_weight + ) + cluster_min, cluster_max = _get_boundary( + clusterid, vcpu_clusters, cluster_weight + ) core_min, core_max = _get_boundary(coreid, vcpu_cores, core_weight) - thread_min, thread_max = _get_boundary(threadid, vcpu_threads, thread_weight) + thread_min, thread_max = _get_boundary( + threadid, vcpu_threads, thread_weight + ) cpu_min = socket_min + cluster_min + core_min + thread_min cpu_max = socket_max + cluster_max + core_max + thread_max - cpu_list = list(range(cpu_min, cpu_max + 1)) # pylint: disable=E0606 + cpu_list = list(range(cpu_min, cpu_max + 1)) # pylint: disable=E0606 return cpu_list def numa_cpu_guest(): @@ -55,7 +66,7 @@ def numa_cpu_guest(): """ error_context.context("Get cpus in guest os", test.log.info) numa_cpu_guest = [] - if vm_arch in ('ppc64', 'ppc64le'): + if vm_arch in ("ppc64", "ppc64le"): numa_info_guest = NumaInfo(session=session) # pylint: disable=E0606 nodes_guest = numa_info_guest.online_nodes for node in nodes_guest: @@ -66,13 +77,18 @@ def numa_cpu_guest(): error_context.context("Get SRAT ACPI table", test.log.info) if not utils_package.package_install("acpidump", session): test.cancel("Please install acpidump in guest to proceed") - content = session.cmd_output('cd /tmp && acpidump -n SRAT -b && ' - 'iasl -d srat.dat && cat srat.dsl') - pattern = re.compile(r'Proximity Domain Low\(8\)\s+:\s+([0-9A-Fa-f]+)' - r'\n.*Apic ID\s+:\s+([0-9A-Fa-f]+)') - if vm_arch == 'aarch64': - pattern = re.compile(r'Proximity Domain\s+:\s+([0-9A-Fa-f]+)' - r'\n.*Acpi Processor UID\s+:\s+([0-9A-Fa-f]+)') + content = session.cmd_output( + "cd /tmp && acpidump -n SRAT -b && " "iasl -d srat.dat && cat srat.dsl" + ) + pattern = re.compile( + r"Proximity Domain Low\(8\)\s+:\s+([0-9A-Fa-f]+)" + r"\n.*Apic ID\s+:\s+([0-9A-Fa-f]+)" + ) + if vm_arch == "aarch64": + pattern = re.compile( + r"Proximity Domain\s+:\s+([0-9A-Fa-f]+)" + r"\n.*Acpi Processor UID\s+:\s+([0-9A-Fa-f]+)" + ) node_cpus = pattern.findall(content) tmp = {} @@ -91,7 +107,9 @@ def numa_cpu_cli(): """ Get the cpu id list for each node according to the qemu cli, sort with nodeid. """ - error_context.context("Get the expected cpus in qemu command line", test.log.info) + error_context.context( + "Get the expected cpus in qemu command line", test.log.info + ) numa_cpus = params.objects("guest_numa_cpus") numa_cpu_cli = [] tmp = {} @@ -120,7 +138,7 @@ def numa_cpu_setted(numa_cpu_options): numa_cpu_setted = [] tmp = {} for cpu in numa_cpu_options: - nodeid = cpu['node_id'] + nodeid = cpu["node_id"] socket = cpu.get("socket_id") die = cpu.get("die_id") cluster = cpu.get("cluster_id") @@ -146,7 +164,7 @@ def get_hotpluggable_cpus(): tmp = {} out = vm.monitor.info("hotpluggable-cpus") for vcpu_info in out: - vcpus_count = vcpu_info["vcpus-count"] + vcpu_info["vcpus-count"] vcpu_info = vcpu_info["props"] nodeid = vcpu_info.get("node-id") socket = vcpu_info.get("socket-id") @@ -155,14 +173,21 @@ def get_hotpluggable_cpus(): core = vcpu_info.get("core-id") thread = vcpu_info.get("thread-id") if nodeid is not None: - cpu_list = convert_cpu_topology_to_ids(socket, die, cluster, core, thread) + cpu_list = convert_cpu_topology_to_ids( + socket, die, cluster, core, thread + ) if nodeid in tmp.keys(): tmp[nodeid] += cpu_list else: tmp[nodeid] = cpu_list else: - options = {'socket_id': socket, 'die_id': die, 'cluster_id': cluster, - 'core_id': core, 'thread_id': thread} + options = { + "socket_id": socket, + "die_id": die, + "cluster_id": cluster, + "core_id": core, + "thread_id": thread, + } for key in list(options.keys()): if options[key] is None: del options[key] @@ -177,21 +202,21 @@ def get_hotpluggable_cpus(): os_type = params["os_type"] vm_arch = params["vm_arch_name"] - vcpu_threads = params.get_numeric('vcpu_threads') - if vm_arch in ('x86_64', 'i686'): - vcpu_sockets = params.get_numeric('vcpu_sockets') - vcpu_dies = params.get_numeric('vcpu_dies') - vcpu_cores = params.get_numeric('vcpu_cores') + vcpu_threads = params.get_numeric("vcpu_threads") + if vm_arch in ("x86_64", "i686"): + vcpu_sockets = params.get_numeric("vcpu_sockets") + vcpu_dies = params.get_numeric("vcpu_dies") + vcpu_cores = params.get_numeric("vcpu_cores") socket_weight = vcpu_dies * vcpu_cores * vcpu_threads die_weight = vcpu_cores * vcpu_threads core_weight = vcpu_threads thread_weight = 1 - if vm_arch == 'aarch64': - vcpu_sockets = params.get_numeric('vcpu_sockets') - vcpu_clusters = params.get_numeric('vcpu_clusters') - vcpu_cores = params.get_numeric('vcpu_cores') + if vm_arch == "aarch64": + vcpu_sockets = params.get_numeric("vcpu_sockets") + vcpu_clusters = params.get_numeric("vcpu_clusters") + vcpu_cores = params.get_numeric("vcpu_cores") socket_weight = vcpu_clusters * vcpu_cores * vcpu_threads cluster_weight = vcpu_cores * vcpu_threads @@ -200,32 +225,33 @@ def get_hotpluggable_cpus(): numa_cpu_cli = numa_cpu_cli() - if vm_arch != 'aarch64': + if vm_arch != "aarch64": specified_cpus, unspecified_cpus = get_hotpluggable_cpus() if specified_cpus != numa_cpu_cli: - test.fail("cpu ids for each node with 'info hotpluggable-cpus' is: %s," - "but the seting in qemu cli is: %s" - % (specified_cpus, numa_cpu_cli)) + test.fail( + f"cpu ids for each node with 'info hotpluggable-cpus' is: {specified_cpus}," + f"but the seting in qemu cli is: {numa_cpu_cli}" + ) if qemu_preconfig: node_ids = [] - for node in params.objects('guest_numa_nodes'): + for node in params.objects("guest_numa_nodes"): node_params = params.object_params(node) - node_ids.append(node_params.get_numeric('numa_nodeid')) + node_ids.append(node_params.get_numeric("numa_nodeid")) node_ids = sorted(node_ids) # Set unspecified cpus from node 0 to max, and set the left cpus to node 0 set_numa_node_options = [] - for index, cpu_option in enumerate(unspecified_cpus): # pylint: disable=E0606 + for index, cpu_option in enumerate(unspecified_cpus): # pylint: disable=E0606 try: - cpu_option.update({'node_id': node_ids[index]}) + cpu_option.update({"node_id": node_ids[index]}) except IndexError: - cpu_option.update({'node_id': 0}) + cpu_option.update({"node_id": 0}) set_numa_node_options.append(cpu_option) for options in set_numa_node_options: - vm.monitor.set_numa_node('cpu', **options) + vm.monitor.set_numa_node("cpu", **options) numa_cpu_setted = numa_cpu_setted(set_numa_node_options) @@ -235,12 +261,13 @@ def get_hotpluggable_cpus(): for item in zip(numa_cpu_cli, numa_cpu_setted): expected_cpus.append(sorted(item[0] + item[1])) - if vm_arch != 'aarch64': + if vm_arch != "aarch64": new_specified_cpus = get_hotpluggable_cpus()[0] if new_specified_cpus != expected_cpus: - test.fail("cpu ids for each node with 'info hotpluggable-cpus' after" - "numa_cpu_set is %s, but expected result is: %s" - % (new_specified_cpus, expected_cpus)) + test.fail( + "cpu ids for each node with 'info hotpluggable-cpus' after" + f"numa_cpu_set is {new_specified_cpus}, but expected result is: {expected_cpus}" + ) vm.monitor.exit_preconfig() vm.resume() @@ -249,14 +276,18 @@ def get_hotpluggable_cpus(): numa_cpu_monitor = [sorted(list(item[1])) for item in vm.monitor.info_numa()] if numa_cpu_monitor != expected_cpus: - test.fail("cpu ids for each node with 'info numa' after setted is: %s, " - "but expected result is: %s" % (numa_cpu_monitor, expected_cpus)) + test.fail( + f"cpu ids for each node with 'info numa' after setted is: {numa_cpu_monitor}, " + f"but expected result is: {expected_cpus}" + ) # check numa cpus in guest os, only for Linux - if os_type == 'linux': + if os_type == "linux": session = vm.wait_for_login() numa_cpu_guest = numa_cpu_guest() session.close() if numa_cpu_guest != expected_cpus: - test.fail("cpu ids for each node in guest os is: %s, but the " - "expected result is: %s" % (numa_cpu_guest, expected_cpus)) + test.fail( + f"cpu ids for each node in guest os is: {numa_cpu_guest}, but the " + f"expected result is: {expected_cpus}" + ) diff --git a/qemu/tests/numa_dist.py b/qemu/tests/numa_dist.py index 1993c9361f..ff13bb5a0f 100644 --- a/qemu/tests/numa_dist.py +++ b/qemu/tests/numa_dist.py @@ -13,13 +13,13 @@ def run(test, params, env): vm.verify_alive() os_type = params["os_type"] session = vm.wait_for_login() - if os_type == 'windows': + if os_type == "windows": return expected_numa_dist = {} guest_numa_nodes = params.objects("guest_numa_nodes") for numa_node in guest_numa_nodes: - numa_node_dist_value = ['unset' for i in range(len(guest_numa_nodes))] + numa_node_dist_value = ["unset" for i in range(len(guest_numa_nodes))] numa_params = params.object_params(numa_node) numa_nodeid = numa_params["numa_nodeid"] numa_dist = ast.literal_eval(numa_params.get("numa_dist", "[]")) @@ -31,9 +31,9 @@ def run(test, params, env): for src_id, dist_info in expected_numa_dist.items(): # The distance from a node to itself is always 10 - dist_info[src_id] = '10' + dist_info[src_id] = "10" for dst_id, val in enumerate(dist_info): - if val == 'unset': + if val == "unset": # when distances are only given in one direction for each pair # of nodes, the distances in the opposite directions are assumed # to be the same @@ -44,5 +44,7 @@ def run(test, params, env): guest_numa_dist = numa_info_guest.distances if guest_numa_dist != expected_numa_dist: - test.fail("The actual numa distance info in guest os is: %s, but the " - "expected result is: %s" % (guest_numa_dist, expected_numa_dist)) + test.fail( + f"The actual numa distance info in guest os is: {guest_numa_dist}, but the " + f"expected result is: {expected_numa_dist}" + ) diff --git a/qemu/tests/numa_hmat.py b/qemu/tests/numa_hmat.py index c90f291c59..544f92fcaa 100644 --- a/qemu/tests/numa_hmat.py +++ b/qemu/tests/numa_hmat.py @@ -1,9 +1,7 @@ -import re import os +import re -from virttest import utils_misc -from virttest import env_process - +from virttest import env_process, utils_misc from virttest.utils_version import VersionInterval @@ -17,23 +15,23 @@ def run(test, params, env): """ qemu_path = utils_misc.get_qemu_binary(params) qemu_version = env_process._get_qemu_version(qemu_path) - version_pattern = r'%s-(\d+\.\d+\.\d+)' % os.path.basename(qemu_path) + version_pattern = rf"{os.path.basename(qemu_path)}-(\d+\.\d+\.\d+)" host_qemu = re.findall(version_pattern, qemu_version)[0] - if host_qemu in VersionInterval('[,5.2.0)'): - params['numa_hmat_caches_size_hmat_cache1'] = '50K' - params['numa_hmat_caches_size_hmat_cache2'] = '40K' - params['numa_hmat_caches_size_hmat_cache3'] = '80K' - params['numa_hmat_caches_size_hmat_cache4'] = '70K' - params['numa_hmat_caches_size_hmat_cache5'] = '60K' + if host_qemu in VersionInterval("[,5.2.0)"): + params["numa_hmat_caches_size_hmat_cache1"] = "50K" + params["numa_hmat_caches_size_hmat_cache2"] = "40K" + params["numa_hmat_caches_size_hmat_cache3"] = "80K" + params["numa_hmat_caches_size_hmat_cache4"] = "70K" + params["numa_hmat_caches_size_hmat_cache5"] = "60K" else: - params['numa_hmat_caches_size_hmat_cache1'] = '40K' - params['numa_hmat_caches_size_hmat_cache2'] = '50K' - params['numa_hmat_caches_size_hmat_cache3'] = '60K' - params['numa_hmat_caches_size_hmat_cache4'] = '70K' - params['numa_hmat_caches_size_hmat_cache5'] = '80K' + params["numa_hmat_caches_size_hmat_cache1"] = "40K" + params["numa_hmat_caches_size_hmat_cache2"] = "50K" + params["numa_hmat_caches_size_hmat_cache3"] = "60K" + params["numa_hmat_caches_size_hmat_cache4"] = "70K" + params["numa_hmat_caches_size_hmat_cache5"] = "80K" - params['start_vm'] = 'yes' - vm_name = params['main_vm'] + params["start_vm"] = "yes" + vm_name = params["main_vm"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.wait_for_login() diff --git a/qemu/tests/numa_maxnodes.py b/qemu/tests/numa_maxnodes.py index 8ea8da5793..ed6f317f69 100644 --- a/qemu/tests/numa_maxnodes.py +++ b/qemu/tests/numa_maxnodes.py @@ -1,5 +1,5 @@ -from virttest import env_process -from virttest import error_context +from virttest import env_process, error_context + from qemu.tests import numa_memdev_options @@ -17,19 +17,20 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ - error_context.context("Modify params to boot guest with 128 numa nodes", - test.log.info) + error_context.context( + "Modify params to boot guest with 128 numa nodes", test.log.info + ) node_num = int(params["numa_nodes"]) node_size = params["node_size"] prealloc_mem = params.get("prealloc_mem", "no") mem_devs = "" guest_numa_nodes = "" for index in range(node_num): - guest_numa_nodes += "node%s " % index - mem_devs += "mem%s " % index - params["numa_memdev_node%s" % index] = "mem-mem%s" % index - params["size_mem%s" % index] = node_size - params["prealloc_mem%s" % index] = prealloc_mem + guest_numa_nodes += f"node{index} " + mem_devs += f"mem{index} " + params[f"numa_memdev_node{index}"] = f"mem-mem{index}" + params[f"size_mem{index}"] = node_size + params[f"prealloc_mem{index}"] = prealloc_mem params["guest_numa_nodes"] = guest_numa_nodes params["mem_devs"] = mem_devs @@ -54,8 +55,9 @@ def run(test, params, env): numa_expected = params["numa_expected"] guest_numa = session.cmd_output(numa_cmd).strip() if guest_numa != numa_expected: - test.fail("Guest numa node is %s while expected numa node is %s" - % (guest_numa, numa_expected)) + test.fail( + f"Guest numa node is {guest_numa} while expected numa node is {numa_expected}" + ) error_context.context("Check if error and calltrace in guest", test.log.info) vm.verify_kernel_crash() session.close() diff --git a/qemu/tests/numa_memdev_mlock.py b/qemu/tests/numa_memdev_mlock.py index 757c517044..149c642f1c 100644 --- a/qemu/tests/numa_memdev_mlock.py +++ b/qemu/tests/numa_memdev_mlock.py @@ -1,8 +1,7 @@ -from virttest import error_context +from virttest import env_process, error_context from qemu.tests import numa_memdev_options from qemu.tests.mlock_basic import MlockBasic -from virttest import env_process @error_context.context_aware @@ -24,13 +23,15 @@ def run(test, params, env): error_context.context("Check host's numa node(s)!", test.log.info) valid_nodes = numa_memdev_options.get_host_numa_node() if len(valid_nodes) < 2: - test.cancel("The host numa nodes that whose size is not zero should be " - "at least 2! But there is %d." % len(valid_nodes)) + test.cancel( + "The host numa nodes that whose size is not zero should be " + "at least 2! But there is %d." % len(valid_nodes) + ) - if params.get('policy_mem') != 'default': + if params.get("policy_mem") != "default": error_context.context("Assign host's numa node(s)!", test.log.info) - params['host-nodes_mem0'] = valid_nodes[0] - params['host-nodes_mem1'] = valid_nodes[1] + params["host-nodes_mem0"] = valid_nodes[0] + params["host-nodes_mem1"] = valid_nodes[1] env_process.preprocess_vm(test, params, env, params["main_vm"]) numa_mlock_test = MlockBasic(test, params, env) diff --git a/qemu/tests/numa_memdev_options.py b/qemu/tests/numa_memdev_options.py index 5e0a091230..879f15ca49 100644 --- a/qemu/tests/numa_memdev_options.py +++ b/qemu/tests/numa_memdev_options.py @@ -1,11 +1,7 @@ import re -from avocado.utils import astring -from avocado.utils import process - -from virttest import error_context -from virttest import utils_misc -from virttest import env_process +from avocado.utils import astring, process +from virttest import env_process, error_context, utils_misc from virttest.staging import utils_memory from virttest.utils_numeric import normalize_data_size @@ -19,7 +15,7 @@ def get_host_numa_node(): numa_info = process.getoutput("numactl -H") for i in host_numa: node_size = re.findall(r"node %d size: \d+ \w" % i, numa_info)[0].split()[-2] - if node_size != '0': + if node_size != "0": node_list.append(str(i)) return node_list @@ -32,30 +28,40 @@ def check_query_memdev(test, params, vm): :param params: Dictionary with the test parameters :param vm: VM object """ - mem_devs = params['mem_devs'].split() + mem_devs = params["mem_devs"].split() query_list = vm.monitor.info("memdev") if len(mem_devs) != len(query_list): - test.fail("%d memory devices in query-memdev, but not %d!" - " query-memdev: %s" - % (len(query_list), len(mem_devs), - [item["id"] for item in query_list])) - policy = params['policy_mem'] + test.fail( + "%d memory devices in query-memdev, but not %d!" + " query-memdev: %s" + % (len(query_list), len(mem_devs), [item["id"] for item in query_list]) + ) + policy = params["policy_mem"] for dev in query_list: - mem_dev = dev['id'].split('-')[1] + mem_dev = dev["id"].split("-")[1] memdev_params = params.object_params(mem_dev) - if dev['policy'] != policy: - test.fail("memdev = %s: 'policy' is '%s', but not '%s'!" - % (mem_dev, dev['policy'], policy)) - prealloc = (memdev_params['prealloc'] == 'yes') - if dev['prealloc'] != prealloc: - test.fail("memdev = %s: 'prealloc' is not '%s'!" - % (mem_dev, memdev_params['prealloc'])) - if policy == 'default': + if dev["policy"] != policy: + test.fail( + "memdev = {}: 'policy' is '{}', but not '{}'!".format( + mem_dev, dev["policy"], policy + ) + ) + prealloc = memdev_params["prealloc"] == "yes" + if dev["prealloc"] != prealloc: + test.fail( + "memdev = {}: 'prealloc' is not '{}'!".format( + mem_dev, memdev_params["prealloc"] + ) + ) + if policy == "default": continue - host_node = str(dev['host-nodes'][0]) - if host_node != memdev_params['host-nodes']: - test.fail("memdev = %s: 'host-nodes' is '%s', but not '%s'!" - % (mem_dev, host_node, memdev_params["host-nodes"])) + host_node = str(dev["host-nodes"][0]) + if host_node != memdev_params["host-nodes"]: + test.fail( + "memdev = {}: 'host-nodes' is '{}', but not '{}'!".format( + mem_dev, host_node, memdev_params["host-nodes"] + ) + ) def check_memory_in_procfs(test, params, vm): @@ -67,39 +73,45 @@ def check_memory_in_procfs(test, params, vm): :param vm: VM object """ qemu_pid = vm.get_pid() - policy = params['policy_mem'] - if policy == 'preferred': - policy = 'prefer' - for mem_dev in params['mem_devs'].split(): + policy = params["policy_mem"] + if policy == "preferred": + policy = "prefer" + for mem_dev in params["mem_devs"].split(): memdev_params = params.object_params(mem_dev) - mem_size = memdev_params['size'] + mem_size = memdev_params["size"] mem_size = int(float(utils_misc.normalize_data_size(mem_size, "K"))) - smaps = process.system_output(r"grep -B1 -E '^Size:\s+%d' /proc/%d/smaps" - % (mem_size, qemu_pid)) + smaps = process.system_output( + r"grep -B1 -E '^Size:\s+%d' /proc/%d/smaps" % (mem_size, qemu_pid) + ) smaps = astring.to_text(smaps).strip() mem_path = memdev_params.get("mem-path") if mem_path and (mem_path not in smaps): - test.fail("memdev = %s: mem-path '%s' is not in smaps '%s'!" - % (mem_dev, mem_path, smaps)) - mem_start = re.findall('^([0-9a-fA-F]+)-', smaps, re.M)[0] - numa_maps = process.system_output("grep %s /proc/%d/numa_maps" - % (mem_start, qemu_pid)) + test.fail( + f"memdev = {mem_dev}: mem-path '{mem_path}' is not in smaps '{smaps}'!" + ) + mem_start = re.findall("^([0-9a-fA-F]+)-", smaps, re.M)[0] + numa_maps = process.system_output( + "grep %s /proc/%d/numa_maps" % (mem_start, qemu_pid) + ) numa_maps = astring.to_text(numa_maps).strip() if mem_path and (mem_path not in numa_maps): - test.fail("memdev = %s: mem-path '%s' is not in numa_maps '%s'!" - % (mem_dev, mem_path, numa_maps)) - numa_maps = re.sub(r'\s+\(many\)', '', numa_maps) - policy_numa = numa_maps.split()[1].split(':') + test.fail( + f"memdev = {mem_dev}: mem-path '{mem_path}' is not in numa_maps '{numa_maps}'!" + ) + numa_maps = re.sub(r"\s+\(many\)", "", numa_maps) + policy_numa = numa_maps.split()[1].split(":") if policy != policy_numa[0]: - test.fail("memdev = %s:" - " 'policy' in numa_maps is '%s', but not '%s'!" - % (mem_dev, policy_numa[0], policy)) - elif (policy != 'default'): - host_node = memdev_params['host-nodes'] - if (policy_numa[1] != host_node): - test.fail("memdev = %s:" - " 'host-nodes' in numa_maps is '%s', but not '%s'!" - % (mem_dev, policy_numa[1], host_node)) + test.fail( + f"memdev = {mem_dev}:" + f" 'policy' in numa_maps is '{policy_numa[0]}', but not '{policy}'!" + ) + elif policy != "default": + host_node = memdev_params["host-nodes"] + if policy_numa[1] != host_node: + test.fail( + f"memdev = {mem_dev}:" + f" 'host-nodes' in numa_maps is '{policy_numa[1]}', but not '{host_node}'!" + ) @error_context.context_aware @@ -118,29 +130,31 @@ def run(test, params, env): error_context.context("Check host's numa node(s)!", test.log.info) valid_nodes = get_host_numa_node() if len(valid_nodes) < 2: - test.cancel("The host numa nodes that whose size is not zero should be " - "at least 2! But there is %d." % len(valid_nodes)) + test.cancel( + "The host numa nodes that whose size is not zero should be " + "at least 2! But there is %d." % len(valid_nodes) + ) node1 = valid_nodes[0] node2 = valid_nodes[1] - if params.get('policy_mem') != 'default': + if params.get("policy_mem") != "default": error_context.context("Assign host's numa node(s)!", test.log.info) - params['host-nodes_mem0'] = node1 - params['host-nodes_mem1'] = node2 + params["host-nodes_mem0"] = node1 + params["host-nodes_mem1"] = node2 - if params.get('set_node_hugepage') == 'yes': + if params.get("set_node_hugepage") == "yes": hugepage_size = utils_memory.get_huge_page_size() - normalize_total_hg1 = int(normalize_data_size(params['size_mem0'], 'K')) + normalize_total_hg1 = int(normalize_data_size(params["size_mem0"], "K")) hugepage_num1 = normalize_total_hg1 // hugepage_size - if 'numa_hugepage' in params['shortname']: - params['target_nodes'] = "%s %s" % (node1, node2) - normalize_total_hg2 = int(normalize_data_size(params['size_mem1'], 'K')) + if "numa_hugepage" in params["shortname"]: + params["target_nodes"] = f"{node1} {node2}" + normalize_total_hg2 = int(normalize_data_size(params["size_mem1"], "K")) hugepage_num2 = normalize_total_hg2 // hugepage_size - params['target_num_node%s' % node2] = hugepage_num2 + params[f"target_num_node{node2}"] = hugepage_num2 else: - params['target_nodes'] = node1 - params['target_num_node%s' % node1] = hugepage_num1 - params['setup_hugepages'] = 'yes' + params["target_nodes"] = node1 + params[f"target_num_node{node1}"] = hugepage_num1 + params["setup_hugepages"] = "yes" env_process.preprocess(test, params, env) error_context.context("Starting VM!", test.log.info) diff --git a/qemu/tests/numa_negative.py b/qemu/tests/numa_negative.py index 750b3537e9..a9ccc8b475 100644 --- a/qemu/tests/numa_negative.py +++ b/qemu/tests/numa_negative.py @@ -1,7 +1,6 @@ import re -from virttest import error_context -from virttest import virt_vm +from virttest import error_context, virt_vm @error_context.context_aware @@ -17,22 +16,23 @@ def run(test, params, env): """ vm = env.get_vm(params["main_vm"]) - params['start_vm'] = 'yes' - negative_type = params.get('negative_type') - error_msg = params.get('error_msg', '') + params["start_vm"] = "yes" + negative_type = params.get("negative_type") + error_msg = params.get("error_msg", "") try: vm.create(params=params) output = vm.process.get_output() except virt_vm.VMCreateError as e: output = str(e) - if negative_type == 'non-fatal': - test.fail("Create VM failed as unexpected: %s" % output) + if negative_type == "non-fatal": + test.fail(f"Create VM failed as unexpected: {output}") - error_context.context("Check the expected error message: %s" - % error_msg, test.log.info) + error_context.context( + f"Check the expected error message: {error_msg}", test.log.info + ) if not re.search(error_msg, output): - test.fail("Can not get expected error message: %s" % error_msg) + test.fail(f"Can not get expected error message: {error_msg}") - if negative_type == 'non-fatal': + if negative_type == "non-fatal": vm.verify_alive() vm.verify_kernel_crash() diff --git a/qemu/tests/numa_node_affinity.py b/qemu/tests/numa_node_affinity.py index 5790822b7b..5604d61a8a 100644 --- a/qemu/tests/numa_node_affinity.py +++ b/qemu/tests/numa_node_affinity.py @@ -1,9 +1,6 @@ import re -from virttest import env_process -from virttest import error_context -from virttest import utils_misc - +from virttest import env_process, error_context, utils_misc from virttest.qemu_monitor import QMPCmdError @@ -55,10 +52,9 @@ def run(test, params, env): node_affinity = vm.monitor.qom_get(thread_context_device_id, "node-affinity") except QMPCmdError as e: if not re.search(error_msg, str(e.data)): - test.fail("Cannot get expected error message: %s" % error_msg) - test.log.debug("Get the expected error message: %s" % error_msg) + test.fail(f"Cannot get expected error message: {error_msg}") + test.log.debug("Get the expected error message: %s", error_msg) else: test.fail( - "Got the node-affinity: %s however it is expected to be a non-readable property" - % str(node_affinity) + f"Got the node-affinity: {str(node_affinity)} however it is expected to be a non-readable property" ) diff --git a/qemu/tests/numa_opts.py b/qemu/tests/numa_opts.py index 8f8d7607de..6ce8cf686a 100644 --- a/qemu/tests/numa_opts.py +++ b/qemu/tests/numa_opts.py @@ -1,8 +1,5 @@ from virttest import error_context - -from virttest.utils_misc import normalize_data_size -from virttest.utils_misc import get_mem_info -from virttest.utils_misc import NumaInfo +from virttest.utils_misc import NumaInfo, get_mem_info, normalize_data_size @error_context.context_aware @@ -31,15 +28,15 @@ def numa_info_guest(): numa_guest = [] nodes_guest = numa_info_guest.online_nodes for node in nodes_guest: - node_size = numa_info_guest.online_nodes_meminfo[node]['MemTotal'] - node_size = float(normalize_data_size('%s KB' % node_size)) + node_size = numa_info_guest.online_nodes_meminfo[node]["MemTotal"] + node_size = float(normalize_data_size(f"{node_size} KB")) node_cpus = numa_info_guest.online_nodes_cpus[node] node_cpus = set([int(v) for v in node_cpus.split()]) numa_guest.append((node_size, node_cpus)) # It is a known WONTFIX issue for x86 and ARM, node info of node0 and # node1 is opposite in guest os when vm have 2 nodes - if (vm_arch in ("x86_64", "i686", "aarch64") and len(numa_guest) == 2): + if vm_arch in ("x86_64", "i686", "aarch64") and len(numa_guest) == 2: numa_guest.reverse() return numa_guest @@ -52,29 +49,32 @@ def numa_info_guest(): # Get numa info from monitor numa_monitor = vm.monitors[0].info_numa() - error_context.context("numa info in monitor: %r" % numa_monitor, test.log.info) + error_context.context(f"numa info in monitor: {numa_monitor!r}", test.log.info) monitor_expect_nodes = params.get_numeric("monitor_expect_nodes") if len(numa_monitor) != monitor_expect_nodes: - test.fail("[Monitor]Wrong number of numa nodes: %d. Expected: %d" % - (len(numa_monitor), monitor_expect_nodes)) + test.fail( + "[Monitor]Wrong number of numa nodes: %d. Expected: %d" + % (len(numa_monitor), monitor_expect_nodes) + ) - if os_type == 'linux': + if os_type == "linux": # Get numa info in guest os, only for Linux numa_guest = numa_info_guest() - error_context.context("numa info in guest: %r" % numa_guest, test.log.info) - guest_expect_nodes = int(params.get("guest_expect_nodes", - monitor_expect_nodes)) + error_context.context(f"numa info in guest: {numa_guest!r}", test.log.info) + guest_expect_nodes = int(params.get("guest_expect_nodes", monitor_expect_nodes)) if len(numa_guest) != guest_expect_nodes: - test.fail("[Guest]Wrong number of numa nodes: %d. Expected: %d" % - (len(numa_guest), guest_expect_nodes)) + test.fail( + "[Guest]Wrong number of numa nodes: %d. Expected: %d" + % (len(numa_guest), guest_expect_nodes) + ) # Use 30 plus the gap of 'MemTotal' in OS and '-m' in cli as threshold - MemTotal = get_mem_info(session, 'MemTotal') - MemTotal = float(normalize_data_size('%s KB' % MemTotal)) - error_context.context("MemTotal in guest os is %s MB" - % MemTotal, test.log.info) + MemTotal = get_mem_info(session, "MemTotal") + MemTotal = float(normalize_data_size(f"{MemTotal} KB")) + error_context.context(f"MemTotal in guest os is {MemTotal} MB", test.log.info) threshold = float(params.get_numeric("mem") - MemTotal) + 30 - error_context.context("The acceptable threshold is: %s" - % threshold, test.log.info) + error_context.context( + f"The acceptable threshold is: {threshold}", test.log.info + ) else: numa_guest = numa_monitor error_context.context("Check if error and calltrace in guest", test.log.info) @@ -84,8 +84,8 @@ def numa_info_guest(): for nodenr, node in enumerate(numa_guest): mdev = params.get("numa_memdev_node%d" % (nodenr)) if mdev: - mdev = mdev.split('-')[1] - size = float(normalize_data_size(params.get("size_%s" % mdev))) + mdev = mdev.split("-")[1] + size = float(normalize_data_size(params.get(f"size_{mdev}"))) else: size = params.get_numeric("mem") @@ -93,20 +93,28 @@ def numa_info_guest(): if cpus is not None: cpus = set([int(v) for v in cpus.split(",")]) else: - cpus = set([int(v) for v in range(params.get_numeric('smp'))]) + cpus = set([int(v) for v in range(params.get_numeric("smp"))]) if len(numa_monitor) != 0: if size != numa_monitor[nodenr][0]: - test.fail("[Monitor]Wrong size of numa node %d: %f. Expected: %f" - % (nodenr, numa_monitor[nodenr][0], size)) + test.fail( + "[Monitor]Wrong size of numa node %d: %f. Expected: %f" + % (nodenr, numa_monitor[nodenr][0], size) + ) if cpus != numa_monitor[nodenr][1]: - test.fail("[Monitor]Wrong CPU set on numa node %d: %s. Expected: %s" - % (nodenr, numa_monitor[nodenr][1], cpus)) + test.fail( + "[Monitor]Wrong CPU set on numa node %d: %s. Expected: %s" + % (nodenr, numa_monitor[nodenr][1], cpus) + ) - if os_type == 'linux': + if os_type == "linux": if size - numa_guest[nodenr][0] > threshold: - test.fail("[Guest]Wrong size of numa node %d: %f. Expected: %f" - % (nodenr, numa_guest[nodenr][0], size)) + test.fail( + "[Guest]Wrong size of numa node %d: %f. Expected: %f" + % (nodenr, numa_guest[nodenr][0], size) + ) if cpus != numa_guest[nodenr][1]: - test.fail("[Guest]Wrong CPU set on numa node %d: %s. Expected: %s" - % (nodenr, numa_guest[nodenr][1], cpus)) + test.fail( + "[Guest]Wrong CPU set on numa node %d: %s. Expected: %s" + % (nodenr, numa_guest[nodenr][1], cpus) + ) diff --git a/qemu/tests/numa_prealloc_handling.py b/qemu/tests/numa_prealloc_handling.py index 01a465818e..8bf495479a 100644 --- a/qemu/tests/numa_prealloc_handling.py +++ b/qemu/tests/numa_prealloc_handling.py @@ -1,7 +1,5 @@ from avocado.utils import process - -from virttest import utils_misc -from virttest import utils_package +from virttest import utils_misc, utils_package def run(test, params, env): @@ -21,15 +19,15 @@ def run(test, params, env): cmd_without_tc = params.get("cmd_without_tc") % qemu_path cmd_with_tc = params.get("cmd_with_tc") % qemu_path - execution_time = float(process.getoutput(cmd_without_tc, - ignore_status=True, - shell=True)) - test.log.debug("Execution time without thread_context: %f" % execution_time) + execution_time = float( + process.getoutput(cmd_without_tc, ignore_status=True, shell=True) + ) + test.log.debug("Execution time without thread_context: %f", execution_time) - execution_time_tc = float(process.getoutput(cmd_with_tc, - ignore_status=True, - shell=True)) - test.log.debug("Execution time with thread_context: %f" % execution_time_tc) + execution_time_tc = float( + process.getoutput(cmd_with_tc, ignore_status=True, shell=True) + ) + test.log.debug("Execution time with thread_context: %f", execution_time_tc) if execution_time <= execution_time_tc: test.fail("There is no boot time speedup when using thread-context!") diff --git a/qemu/tests/numa_prealloc_threads.py b/qemu/tests/numa_prealloc_threads.py index f1053a7dd8..d8491901a5 100644 --- a/qemu/tests/numa_prealloc_threads.py +++ b/qemu/tests/numa_prealloc_threads.py @@ -1,11 +1,8 @@ import re -from avocado.utils import cpu - -from virttest import env_process -from virttest import error_context +from avocado.utils import cpu, process +from virttest import env_process, error_context from virttest.qemu_monitor import QMPCmdError -from avocado.utils import process def check_affinity(affinity, cmd_taskset, stage, test): @@ -16,11 +13,11 @@ def check_affinity(affinity, cmd_taskset, stage, test): :param test: QEMU test object """ output = process.getoutput(cmd_taskset) - actual_affinity = re.search("%s affinity list: (%s)" % (stage, affinity), - output).group(1) + actual_affinity = re.search(f"{stage} affinity list: ({affinity})", output).group(1) if actual_affinity != affinity: - test.fail("Expect %s cpu affinity '%s', but get '%s'" - % (stage, affinity, actual_affinity)) + test.fail( + f"Expect {stage} cpu affinity '{affinity}', but get '{actual_affinity}'" + ) def convert_affinity(affinity): @@ -29,13 +26,13 @@ def convert_affinity(affinity): and qemu-kvm command line style (ex: 1-3) """ if isinstance(affinity, str): - start, end = affinity.split('-') + start, end = affinity.split("-") output = list(range(int(start), int(end) + 1)) elif isinstance(affinity, list): if len(affinity) == 1: output = str(affinity[0]) else: - output = "%s-%s" % (affinity[0], affinity[-1]) + output = f"{affinity[0]}-{affinity[-1]}" else: raise TypeError(f"unexpected affinity type: {type(affinity).__name__}") return output @@ -62,10 +59,11 @@ def run(test, params, env): host_cpus = int(cpu.online_count()) smp_fixed = params.get_numeric("smp_fixed") if host_cpus < smp_fixed: - test.cancel("The host only has %d CPUs, it needs at least %d!" - % (host_cpus, smp_fixed)) + test.cancel( + "The host only has %d CPUs, it needs at least %d!" % (host_cpus, smp_fixed) + ) - params['not_preprocess'] = "no" + params["not_preprocess"] = "no" first_cpu_affinity = params.get("first_cpu-affinity") second_cpu_affinity = params.get("second_cpu-affinity") operation_type = params.get("operation") @@ -88,29 +86,34 @@ def run(test, params, env): qemu_cpu_affinity = thread_context_device.get_param("cpu-affinity", "0") cpu_affinity = vm.monitor.qom_get(thread_context_device_id, "cpu-affinity") affinity = convert_affinity(cpu_affinity) - test.log.debug("The affinity: %s and the qemu_cpu_affinity: %s" - % (affinity, qemu_cpu_affinity)) + test.log.debug( + "The affinity: %s and the qemu_cpu_affinity: %s", affinity, qemu_cpu_affinity + ) if qemu_cpu_affinity != affinity: test.fail("Test and QEMU cpu-affinity does not match!") - cmd_taskset = "taskset -c -p %s" % thread_id + cmd_taskset = f"taskset -c -p {thread_id}" check_affinity(affinity, cmd_taskset, "current", test) sandbox = params.get("qemu_sandbox", "on") - error_context.base_context("Setting cpu-affinity: %s" % first_cpu_affinity, - test.log.info) + error_context.base_context( + f"Setting cpu-affinity: {first_cpu_affinity}", test.log.info + ) try: - vm.monitor.qom_set(thread_context_device_id, - "cpu-affinity", - convert_affinity(first_cpu_affinity)) + vm.monitor.qom_set( + thread_context_device_id, + "cpu-affinity", + convert_affinity(first_cpu_affinity), + ) except QMPCmdError as e: if sandbox == "off": - test.fail("Set cpu-affinity '%s' failed as: %s" - % (first_cpu_affinity, str(e.data))) + test.fail( + f"Set cpu-affinity '{first_cpu_affinity}' failed as: {str(e.data)}" + ) if not re.search(error_msg, str(e.data)): - test.fail("Cannot get expected error message: %s" % error_msg) - test.log.debug("Get the expected error message: %s" % error_msg) + test.fail(f"Cannot get expected error message: {error_msg}") + test.log.debug("Get the expected error message: %s", error_msg) else: if sandbox == "on": test.fail("Set cpu-affinity should fail when sandbox=on") @@ -118,14 +121,13 @@ def run(test, params, env): check_affinity(affinity, cmd_taskset, "current", test) if operation_type != "boot_cpu_affinity": - error_context.base_context("Set externally a new CPU affinity", - test.log.info) - cmd_taskset = "taskset -c -p %s %s" % (second_cpu_affinity, - str(thread_id)) + error_context.base_context("Set externally a new CPU affinity", test.log.info) + cmd_taskset = f"taskset -c -p {second_cpu_affinity} {str(thread_id)}" error_context.context("Verify the new cpu-affinity", test.log.info) check_affinity(second_cpu_affinity, cmd_taskset, "new", test) - error_context.context("Checking QEMU main thread remains untouched", - test.log.info) - cmd_taskset = "taskset -c -p %s" % vm.get_pid() + error_context.context( + "Checking QEMU main thread remains untouched", test.log.info + ) + cmd_taskset = f"taskset -c -p {vm.get_pid()}" check_affinity(qemu_cpu_affinity, cmd_taskset, "current", test) diff --git a/qemu/tests/numa_stress.py b/qemu/tests/numa_stress.py index fa52d5c6d8..c4dc33f5fc 100644 --- a/qemu/tests/numa_stress.py +++ b/qemu/tests/numa_stress.py @@ -1,12 +1,8 @@ -import os import math +import os from avocado.utils import process - -from virttest import error_context -from virttest import utils_misc -from virttest import utils_test -from virttest import data_dir +from virttest import data_dir, error_context, utils_misc, utils_test from virttest.staging import utils_memory @@ -71,46 +67,74 @@ def run(test, params, env): test_count = len(node_list) try: test_mem = float(params.get("mem")) * mem_ratio - guest_stress_args = "-a -p -l %sM" % int(test_mem) - stress_path = os.path.join(data_dir.get_deps_dir('mem_mapping'), mem_map_tool) + guest_stress_args = f"-a -p -l {int(test_mem)}M" + stress_path = os.path.join(data_dir.get_deps_dir("mem_mapping"), mem_map_tool) test.log.info("Compile the mem_mapping tool") - cmd_cp_mmap_tool = cmd_cp_mmap_tool % (stress_path, tmp_directory, tmp_directory) + cmd_cp_mmap_tool = cmd_cp_mmap_tool % ( + stress_path, + tmp_directory, + tmp_directory, + ) process.run(cmd_cp_mmap_tool, shell=True) utils_memory.drop_caches() for test_round in range(test_count): cmd_mmap = params.get("cmd_mmap") - error_context.context("Executing stress test round: %s" % test_round, test.log.info) + error_context.context( + f"Executing stress test round: {test_round}", test.log.info + ) try: - error_context.context("Get the qemu process memory use status", test.log.info) + error_context.context( + "Get the qemu process memory use status", test.log.info + ) most_used_node, memory_used = max_mem_map_node(host_numa_node, qemu_pid) numa_node_malloc = most_used_node - mmap_size = math.floor(float(node_meminfo(numa_node_malloc, 'MemTotal')) * mem_ratio) + mmap_size = math.floor( + float(node_meminfo(numa_node_malloc, "MemTotal")) * mem_ratio + ) cmd_mmap = cmd_mmap % (tmp_directory, numa_node_malloc, mmap_size) - error_context.context("Run mem_mapping on host node " - "%s." % numa_node_malloc, test.log.info) + error_context.context( + "Run mem_mapping on host node " f"{numa_node_malloc}.", + test.log.info, + ) process.system(cmd_mmap, shell=True, ignore_bg_processes=True) error_context.context("Run memory heavy stress in guest", test.log.info) try: - guest_stress = utils_test.VMStress(vm, "mem_mapping", params, - download_url=stress_path, - stress_args=guest_stress_args) + guest_stress = utils_test.VMStress( + vm, + "mem_mapping", + params, + download_url=stress_path, + stress_args=guest_stress_args, + ) guest_stress.load_stress_tool() except utils_test.StressError as guest_info: test.error(guest_info) - error_context.context("Get the qemu process memory use status", test.log.info) + error_context.context( + "Get the qemu process memory use status", test.log.info + ) node_after, memory_after = max_mem_map_node(host_numa_node, qemu_pid) if node_after == most_used_node and memory_after >= memory_used: idle_nodes = node_list.copy() idle_nodes.remove(numa_node_malloc) - error_context.context("Run migratepages on host from node " - "%s to node %s." % (numa_node_malloc, - idle_nodes[0]), test.log.info) - migrate_pages = cmd_migrate_pages % (qemu_pid, numa_node_malloc, idle_nodes[0]) + error_context.context( + "Run migratepages on host from node " + f"{numa_node_malloc} to node {idle_nodes[0]}.", + test.log.info, + ) + migrate_pages = cmd_migrate_pages % ( + qemu_pid, + numa_node_malloc, + idle_nodes[0], + ) process.system_output(migrate_pages, shell=True) - error_context.context("Get the qemu process memory use status again", test.log.info) - node_after, memory_after = max_mem_map_node(host_numa_node, qemu_pid) + error_context.context( + "Get the qemu process memory use status again", test.log.info + ) + node_after, memory_after = max_mem_map_node( + host_numa_node, qemu_pid + ) if node_after == most_used_node and memory_after >= memory_used: - test.fail("Memory still stick in node %s" % numa_node_malloc) + test.fail(f"Memory still stick in node {numa_node_malloc}") finally: guest_stress.unload_stress() guest_stress.clean() diff --git a/qemu/tests/nvdimm.py b/qemu/tests/nvdimm.py index 0d7828bee3..032c6de219 100644 --- a/qemu/tests/nvdimm.py +++ b/qemu/tests/nvdimm.py @@ -1,18 +1,15 @@ -import os import logging +import os import time from avocado.utils import process - -from virttest import env_process -from virttest import error_context -from virttest import utils_package +from virttest import env_process, error_context, utils_package from virttest.utils_test.qemu import MemoryHotplugTest -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") -class NvdimmTest(object): +class NvdimmTest: """ Class for NVDIMM test """ @@ -39,7 +36,7 @@ def run_guest_cmd(self, cmd, check_status=True, timeout=240): """ status, output = self.session.cmd_status_output(cmd, timeout=timeout) if check_status and status != 0: - self.test.fail("Execute command '%s' failed, output: %s" % (cmd, output)) + self.test.fail(f"Execute command '{cmd}' failed, output: {output}") return output.strip() def verify_nvdimm(self, vm, mems): @@ -49,13 +46,15 @@ def verify_nvdimm(self, vm, mems): :params vm: VM object :params mems: memory objects """ - dimms_expect = set("dimm-%s" % mem for mem in mems) + dimms_expect = set(f"dimm-{mem}" for mem in mems) LOG_JOB.info("Check if dimm %s in memory-devices", dimms_expect) - dimms_monitor = set([info["data"]["id"] for info in vm.monitor.info("memory-devices")]) + dimms_monitor = set( + [info["data"]["id"] for info in vm.monitor.info("memory-devices")] + ) if not dimms_expect.issubset(dimms_monitor): invisible_dimms = dimms_expect - dimms_monitor - self.test.fail("%s dimms are invisible in monitor" % invisible_dimms) - check_cmd = "test -b %s" % self.params.get("dev_path", "/dev/pmem0") + self.test.fail(f"{invisible_dimms} dimms are invisible in monitor") + check_cmd = "test -b {}".format(self.params.get("dev_path", "/dev/pmem0")) self.run_guest_cmd(check_cmd) def format_nvdimm(self): @@ -80,7 +79,7 @@ def umount_nvdimm(self): """ Umount nvdimm device in guest. """ - umount_cmd = "umount %s" % self.params["dev_path"] + umount_cmd = "umount {}".format(self.params["dev_path"]) self.run_guest_cmd(umount_cmd) def md5_hash(self, file): @@ -90,7 +89,7 @@ def md5_hash(self, file): :param file: A file with fullpath :return: The md5 value of the file """ - cmd = "md5sum %s" % file + cmd = f"md5sum {file}" return self.run_guest_cmd(cmd) @@ -127,8 +126,10 @@ def run(test, params, env): test.error("ndctl is not available in host!") ndctl_ver = process.system_output("ndctl -v", shell=True) if float(ndctl_ver) < 56: - test.cancel("ndctl version should be equal or greater than 56!" - "Current ndctl version is %s." % ndctl_ver) + test.cancel( + "ndctl version should be equal or greater than 56!" + f"Current ndctl version is {ndctl_ver}." + ) try: process.system(params["create_dax_cmd"], shell=True) except process.CmdError: @@ -136,7 +137,7 @@ def run(test, params, env): if not os.path.exists(params["nv_backend"]): test.fail("Check nv_backend in host failed!") params["start_vm"] = "yes" - vm_name = params['main_vm'] + vm_name = params["main_vm"] env_process.preprocess_vm(test, params, env, vm_name) nvdimm_test = NvdimmTest(test, params, env) @@ -173,9 +174,12 @@ def run(test, params, env): nvdimm_test.run_guest_cmd(params["run_test"], timeout=3600) return nv_file = params.get("nv_file", "/mnt/nv") - error_context.context("Create a file in nvdimm mount dir in guest, and get " - "original md5 of the file", test.log.info) - dd_cmd = "dd if=/dev/urandom of=%s bs=1K count=200" % nv_file + error_context.context( + "Create a file in nvdimm mount dir in guest, and get " + "original md5 of the file", + test.log.info, + ) + dd_cmd = f"dd if=/dev/urandom of={nv_file} bs=1K count=200" nvdimm_test.run_guest_cmd(dd_cmd) orig_md5 = nvdimm_test.md5_hash(nv_file) nvdimm_test.umount_nvdimm() @@ -186,8 +190,9 @@ def run(test, params, env): new_md5 = nvdimm_test.md5_hash(nv_file) error_context.context("Compare current md5 to original md5", test.log.info) if new_md5 != orig_md5: - test.fail("'%s' changed. The original md5 is '%s', current md5 is '%s'" - % (nv_file, orig_md5, new_md5)) + test.fail( + f"'{nv_file}' changed. The original md5 is '{orig_md5}', current md5 is '{new_md5}'" + ) nvdimm_test.umount_nvdimm() error_context.context("Check if error and calltrace in guest", test.log.info) vm.verify_kernel_crash() @@ -195,11 +200,11 @@ def run(test, params, env): finally: if nvdimm_test.session: if params.get("nvml_dir"): - nvdimm_test.run_guest_cmd("rm -rf %s" % params.get("nvml_dir")) + nvdimm_test.run_guest_cmd("rm -rf {}".format(params.get("nvml_dir"))) nvdimm_test.session.close() vm.destroy() if params.get("nvdimm_dax") == "yes": try: process.system(params["del_dax_cmd"], timeout=240, shell=True) except process.CmdError: - test.log.warn("Host dax configuration cannot be deleted!") + test.log.warning("Host dax configuration cannot be deleted!") diff --git a/qemu/tests/nvdimm_mapsync.py b/qemu/tests/nvdimm_mapsync.py index 9c664c8da8..9058153d8f 100644 --- a/qemu/tests/nvdimm_mapsync.py +++ b/qemu/tests/nvdimm_mapsync.py @@ -1,11 +1,9 @@ -import re import os import pathlib +import re from avocado.utils import process - -from virttest import env_process -from virttest import error_context +from virttest import env_process, error_context @error_context.context_aware @@ -24,8 +22,10 @@ def run(test, params, env): dev_path = params["dev_path"] p = pathlib.Path(dev_path) if not p.is_block_device(): - test.error("There is no nvdimm device in host, please add kernel param" - "'memmap' to emulate one") + test.error( + "There is no nvdimm device in host, please add kernel param" + "'memmap' to emulate one" + ) format_cmd = params["format_command"] mount_cmd = params["mount_command"] @@ -40,14 +40,14 @@ def run(test, params, env): else: try: params["start_vm"] = "yes" - env_process.preprocess_vm(test, params, env, params['main_vm']) + env_process.preprocess_vm(test, params, env, params["main_vm"]) vm = env.get_vm(params["main_vm"]) vm.verify_alive() vm.wait_for_login() vm_pid = vm.get_pid() error_context.context("Check vmflags in smaps file", test.log.info) - with open('/proc/%s/smaps' % vm_pid, 'r') as fd: + with open(f"/proc/{vm_pid}/smaps", "r") as fd: content = fd.read() check_pattern = params["check_pattern"] vmflags_match = re.search(check_pattern, content, re.M) @@ -56,7 +56,7 @@ def run(test, params, env): test.log.info("Get vmflags: %s", vmflags) else: test.error("Didn't find VmFlags in smaps file") - if 'sf' not in vmflags.split(): + if "sf" not in vmflags.split(): test.fail("Flag 'sf' is not present in smaps file") finally: vm.destroy() diff --git a/qemu/tests/nvdimm_mode.py b/qemu/tests/nvdimm_mode.py index 9af2005c65..336a35adf5 100644 --- a/qemu/tests/nvdimm_mode.py +++ b/qemu/tests/nvdimm_mode.py @@ -1,5 +1,4 @@ -from virttest import utils_package -from virttest import error_context +from virttest import error_context, utils_package @error_context.context_aware @@ -27,7 +26,7 @@ def run(test, params, env): output = session.cmd_output(params["ndctl_check_cmd"]) output = eval(output) for item in output: - if item['mode'] != 'devdax': + if item["mode"] != "devdax": test.fail("Change both nvdimm to dax mode failed") finally: utils_package.package_remove("ndctl", session) diff --git a/qemu/tests/nvdimm_negative.py b/qemu/tests/nvdimm_negative.py index 121df94de2..37cf3a236d 100644 --- a/qemu/tests/nvdimm_negative.py +++ b/qemu/tests/nvdimm_negative.py @@ -1,6 +1,6 @@ import re -from virttest import error_context -from virttest import virt_vm + +from virttest import error_context, virt_vm @error_context.context_aware @@ -16,15 +16,16 @@ def run(test, params, env): """ vm = env.get_vm(params["main_vm"]) - params['start_vm'] = 'yes' - error_msg = params.get('error_msg', '') + params["start_vm"] = "yes" + error_msg = params.get("error_msg", "") try: vm.create(params=params) output = vm.process.get_output() except virt_vm.VMCreateError as e: output = str(e) - error_context.context("Check the expected error message: %s" - % error_msg, test.log.info) + error_context.context( + f"Check the expected error message: {error_msg}", test.log.info + ) if not re.search(error_msg, output): - test.fail("Can not get expected error message: %s" % error_msg) + test.fail(f"Can not get expected error message: {error_msg}") diff --git a/qemu/tests/nvdimm_redis.py b/qemu/tests/nvdimm_redis.py index 2c597e7de1..7a562a275e 100644 --- a/qemu/tests/nvdimm_redis.py +++ b/qemu/tests/nvdimm_redis.py @@ -1,5 +1,4 @@ -from virttest import error_context -from virttest import utils_package +from virttest import error_context, utils_package @error_context.context_aware @@ -29,13 +28,13 @@ def run(test, params, env): for cmd in cmds: s, o = session.cmd_status_output(cmd, timeout=600) if s: - test.error("Failed to run cmd '%s', output: %s" % (cmd, o)) + test.error(f"Failed to run cmd '{cmd}', output: {o}") error_context.context("Run redis test in guest", test.log.info) s, o = session.cmd_status_output(params["run_test"], timeout=3600) if s: - test.fail("Run redis test failed, output: %s" % o) + test.fail(f"Run redis test failed, output: {o}") vm.verify_kernel_crash() finally: if session: - session.cmd_output_safe('rm -rf %s' % params["redis_dir"]) + session.cmd_output_safe("rm -rf {}".format(params["redis_dir"])) vm.destroy() diff --git a/qemu/tests/nvme_plug.py b/qemu/tests/nvme_plug.py index 4c67aa103f..dd525c622c 100644 --- a/qemu/tests/nvme_plug.py +++ b/qemu/tests/nvme_plug.py @@ -1,5 +1,4 @@ -from virttest import env_process -from virttest import utils_disk +from virttest import env_process, utils_disk from virttest.tests import unattended_install from provider.block_devices_plug import BlockDevicesPlug @@ -27,12 +26,12 @@ def run(test, params, env): """ unattended_install.run(test, params, env) - if params.get('remove_options'): - for option in params.get('remove_options').split(): + if params.get("remove_options"): + for option in params.get("remove_options").split(): del params[option] - params['cdroms'] = params.get('default_cdroms') + params["cdroms"] = params.get("default_cdroms") - params['start_vm'] = 'yes' + params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, params["main_vm"]) vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -40,17 +39,17 @@ def run(test, params, env): plug = BlockDevicesPlug(vm) plug.hotplug_devs_serial() - target = '/dev/%s' % plug[0] - os_type = params['os_type'] - data_img_size = params.get('image_size_%s' % params.get('data_img_tag')) - if os_type == 'windows': + target = f"/dev/{plug[0]}" + os_type = params["os_type"] + data_img_size = params.get("image_size_{}".format(params.get("data_img_tag"))) + if os_type == "windows": utils_disk.update_windows_disk_attributes(session, plug[0]) - drive_letter = utils_disk.configure_empty_disk(session, plug[0], - data_img_size, - os_type)[0] - target = r'%s\:\\%s' % (drive_letter, params.get('fio_filename')) - fio = generate_instance(params, vm, 'fio') - for option in params['fio_options'].split(';'): - fio.run('--filename=%s %s' % (target, option)) + drive_letter = utils_disk.configure_empty_disk( + session, plug[0], data_img_size, os_type + )[0] + target = r"{}\:\\{}".format(drive_letter, params.get("fio_filename")) + fio = generate_instance(params, vm, "fio") + for option in params["fio_options"].split(";"): + fio.run(f"--filename={target} {option}") plug.unplug_devs_serial() vm.reboot(session) diff --git a/qemu/tests/nx.py b/qemu/tests/nx.py index 642aefaf1b..6aab43e7df 100644 --- a/qemu/tests/nx.py +++ b/qemu/tests/nx.py @@ -1,7 +1,6 @@ import os -from virttest import data_dir -from virttest import error_context +from virttest import data_dir, error_context @error_context.context_aware @@ -23,9 +22,9 @@ def run(test, params, env): session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) exploit_cmd = params.get("exploit_cmd", "") - if not exploit_cmd or session.cmd_status("test -x %s" % exploit_cmd): - exploit_file = os.path.join(data_dir.get_deps_dir(), 'nx', 'x64_sc_rdo.c') - dst_dir = '/tmp' + if not exploit_cmd or session.cmd_status(f"test -x {exploit_cmd}"): + exploit_file = os.path.join(data_dir.get_deps_dir(), "nx", "x64_sc_rdo.c") + dst_dir = "/tmp" error_context.context("Copy the Exploit file to guest.", test.log.info) vm.copy_files_to(exploit_file, dst_dir) @@ -41,30 +40,31 @@ def run(test, params, env): # if nx is enabled (by default), the program failed. # segmentation error. return value of shell is not zero. exec_res = session.cmd_status(exploit_cmd) - nx_on = params.get('nx_on', 'yes') - if nx_on == 'yes': + nx_on = params.get("nx_on", "yes") + if nx_on == "yes": if exec_res: - test.log.info('NX works good.') - error_context.context("Using execstack to remove the protection.", - test.log.info) - enable_exec = 'execstack -s %s' % exploit_cmd + test.log.info("NX works good.") + error_context.context( + "Using execstack to remove the protection.", test.log.info + ) + enable_exec = f"execstack -s {exploit_cmd}" if session.cmd_status(enable_exec): if session.cmd_status("execstack --help"): msg = "Please make sure guest have execstack command." test.error(msg) - test.error('Failed to enable the execstack') + test.error("Failed to enable the execstack") if session.cmd_status(exploit_cmd): - test.fail('NX is still protecting. Error.') + test.fail("NX is still protecting. Error.") else: - test.log.info('NX is disabled as desired. good') + test.log.info("NX is disabled as desired. good") else: - test.fail('Fatal Error: NX does not protect anything!') + test.fail("Fatal Error: NX does not protect anything!") else: if exec_res: msg = "qemu fail to disable 'nx' flag or the exploit is corrupted." test.error(msg) else: - test.log.info('NX is disabled, and this Test Case passed.') + test.log.info("NX is disabled, and this Test Case passed.") if session: session.close() diff --git a/qemu/tests/offload_checksum_windows.py b/qemu/tests/offload_checksum_windows.py index a1e0fabc16..b691c64916 100644 --- a/qemu/tests/offload_checksum_windows.py +++ b/qemu/tests/offload_checksum_windows.py @@ -1,6 +1,4 @@ -from virttest import utils_test -from virttest import error_context -from virttest import utils_net +from virttest import error_context, utils_net, utils_test from virttest.utils_windows import virtio_win @@ -39,8 +37,9 @@ def start_test(checksum_config="tcp"): param config: the setting config for checksum, tcp or disable """ - error_context.context("Start set tx/rx checksum offload to %s" % checksum_config, - test.log.info) + error_context.context( + f"Start set tx/rx checksum offload to {checksum_config}", test.log.info + ) set_offload_checksum_windows(vm, True, checksum_config) set_offload_checksum_windows(vm, False, checksum_config) @@ -52,13 +51,13 @@ def start_test(checksum_config="tcp"): vm.verify_alive() session = vm.wait_for_login(timeout=timeout) - error_context.context("Check if the driver is installed and " - "verified", test.log.info) + error_context.context( + "Check if the driver is installed and " "verified", test.log.info + ) driver_name = params.get("driver_name", "netkvm") - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, - driver_name, - timeout) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name, timeout + ) session.close() virtio_win.prepare_netkvmco(vm) diff --git a/qemu/tests/openflow_acl_test.py b/qemu/tests/openflow_acl_test.py index aa21d6ace0..07d0aba3bc 100644 --- a/qemu/tests/openflow_acl_test.py +++ b/qemu/tests/openflow_acl_test.py @@ -1,19 +1,14 @@ import functools -import re import os +import re import aexpect - from avocado.utils import process +from virttest import data_dir, error_context, remote, utils_net -from virttest import error_context -from virttest import utils_net -from virttest import remote -from virttest import data_dir - - -_system_statusoutput = functools.partial(process.getstatusoutput, shell=True, - ignore_status=False) +_system_statusoutput = functools.partial( + process.getstatusoutput, shell=True, ignore_status=False +) @error_context.context_aware @@ -35,8 +30,8 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ - def access_service(access_sys, access_targets, disabled, host_ip, - ref=False): + + def access_service(access_sys, access_targets, disabled, host_ip, ref=False): err_msg = "" err_type = "" for asys in access_sys: @@ -44,7 +39,7 @@ def access_service(access_sys, access_targets, disabled, host_ip, test.log.debug("Try to access target %s from %s", atgt, asys) access_params = access_sys[asys] - atgt_disabled = access_params['disabled_%s' % atgt] + atgt_disabled = access_params[f"disabled_{atgt}"] if asys in vms_tags: vm = env.get_vm(asys) session = vm.wait_for_login(timeout=timeout) @@ -61,10 +56,12 @@ def access_service(access_sys, access_targets, disabled, host_ip, else: access_re_sub_string = host_ip - access_cmd = re.sub("ACCESS_TARGET", access_re_sub_string, - access_params['access_cmd']) - ref_cmd = re.sub("ACCESS_TARGET", access_re_sub_string, - access_params['ref_cmd']) + access_cmd = re.sub( + "ACCESS_TARGET", access_re_sub_string, access_params["access_cmd"] + ) + ref_cmd = re.sub( + "ACCESS_TARGET", access_re_sub_string, access_params["ref_cmd"] + ) if access_cmd in ["ssh", "telnet"]: if atgt in vms_tags: @@ -76,21 +73,23 @@ def access_service(access_sys, access_targets, disabled, host_ip, out = "" out_err = "" try: - out = remote_login(access_cmd, target_ip, - remote_src, params, host_ip) + out = remote_login( + access_cmd, target_ip, remote_src, params, host_ip + ) stat = 0 except remote.LoginError as err: stat = 1 - out_err = "Failed to login %s " % atgt - out_err += "from %s, err: %s" % (asys, err.output) + out_err = f"Failed to login {atgt} " + out_err += f"from {asys}, err: {err.output}" if "TelnetServer" in params.get("setup_cmd_windows", ""): try: - out += remote_login(access_cmd, ssh_src_ip, - target_vm, params, host_ip) + out += remote_login( + access_cmd, ssh_src_ip, target_vm, params, host_ip + ) except remote.LoginError as err: stat += 1 - out_err += "Failed to login %s " % asys - out_err += "from %s, err: %s" % (atgt, err.output) + out_err += f"Failed to login {asys} " + out_err += f"from {atgt}, err: {err.output}" if out_err: out = out_err else: @@ -108,27 +107,27 @@ def access_service(access_sys, access_targets, disabled, host_ip, if access_params.get("clean_cmd"): try: - run_func(access_params['clean_cmd']) + run_func(access_params["clean_cmd"]) except Exception: pass if disabled and atgt_disabled and stat == 0: - err_msg += "Still can access %s after" % atgt + err_msg += f"Still can access {atgt} after" err_msg += " disable it from ovs. " - err_msg += "Command: %s " % access_cmd - err_msg += "Output: %s" % out + err_msg += f"Command: {access_cmd} " + err_msg += f"Output: {out}" if disabled and atgt_disabled and stat != 0: test.log.debug("Can not access target as expect.") if not disabled and stat != 0: if ref: - err_msg += "Can not access %s at the" % atgt + err_msg += f"Can not access {atgt} at the" err_msg += " beginning. Please check your setup." err_type = "ref" else: - err_msg += "Still can not access %s" % atgt + err_msg += f"Still can not access {atgt}" err_msg += " after enable the access. " - err_msg += "Command: %s " % access_cmd - err_msg += "Output: %s" % out + err_msg += f"Command: {access_cmd} " + err_msg += f"Output: {out}" if err_msg: if err_type == "ref": test.cancel(err_msg) @@ -153,8 +152,8 @@ def access_service(access_sys, access_targets, disabled, host_ip, else: err_msg += "Reference command failed after setup" err_msg += " the rules. " - err_msg += "Command: %s " % ref_cmd - err_msg += "Output: %s" % out + err_msg += f"Command: {ref_cmd} " + err_msg += f"Output: {out}" if err_msg: if err_type == "ref": test.cancel(err_msg) @@ -162,11 +161,11 @@ def access_service(access_sys, access_targets, disabled, host_ip, def get_acl_cmd(protocol, in_port, action, extra_options): acl_cmd = protocol.strip() - acl_cmd += ",in_port=%s" % in_port.strip() + acl_cmd += f",in_port={in_port.strip()}" if extra_options.strip(): - acl_cmd += ",%s" % ",".join(extra_options.strip().split()) + acl_cmd += ",{}".format(",".join(extra_options.strip().split())) if action.strip(): - acl_cmd += ",action=%s" % action.strip() + acl_cmd += f",action={action.strip()}" return acl_cmd def acl_rules_check(acl_rules, acl_setup_cmd): @@ -193,7 +192,7 @@ def remote_login(client, host, src, params_login, host_ip): username = params_login["username"] password = params_login["password"] prompt = params_login["shell_prompt"] - linesep = eval("'%s'" % params_login.get("shell_linesep", r"\n")) + linesep = eval("'{}'".format(params_login.get("shell_linesep", r"\n"))) quit_cmd = params.get("quit_cmd", "exit") if host == host_ip: # Try to login from guest to host. @@ -205,12 +204,13 @@ def remote_login(client, host, src, params_login, host_ip): if client == "ssh": # We only support ssh for Linux in this test - cmd = ("ssh -o UserKnownHostsFile=/dev/null " - "-o StrictHostKeyChecking=no " - "-o PreferredAuthentications=password -p %s %s@%s" % - (port, username, host)) + cmd = ( + "ssh -o UserKnownHostsFile=/dev/null " + "-o StrictHostKeyChecking=no " + f"-o PreferredAuthentications=password -p {port} {username}@{host}" + ) elif client == "telnet": - cmd = "telnet -l %s %s %s" % (username, host, port) + cmd = f"telnet -l {username} {host} {port}" else: raise remote.LoginBadClientError(client) @@ -220,18 +220,19 @@ def remote_login(client, host, src, params_login, host_ip): else: if params_login.get("os_type") == "windows": if client == "telnet": - cmd = "C:\\telnet.py %s %s " % (host, username) - cmd += "%s \"%s\" && " % (password, prompt) + cmd = f"C:\\telnet.py {host} {username} " + cmd += f'{password} "{prompt}" && ' cmd += "C:\\wait_for_quit.py" - cmd = "%s || ping 127.0.0.1 -n 5 -w 1000 > nul" % cmd + cmd = f"{cmd} || ping 127.0.0.1 -n 5 -w 1000 > nul" else: cmd += " || sleep 5" session = src.wait_for_login() test.log.debug("Sending login command: %s", cmd) session.sendline(cmd) try: - out = remote.handle_prompts(session, username, password, - prompt, timeout, debug=True) + out = remote.handle_prompts( + session, username, password, prompt, timeout, debug=True + ) except Exception as err: session.close() raise err @@ -256,22 +257,25 @@ def setup_service(setup_target): setup_params = params.object_params(os_type) setup_cmd = setup_params.get("setup_cmd", "service SERVICE restart") prepare_cmd = setup_params.get("prepare_cmd") - setup_cmd = re.sub("SERVICE", setup_params.get("service", ""), - setup_cmd) + setup_cmd = re.sub("SERVICE", setup_params.get("service", ""), setup_cmd) - error_context.context("Set up %s service in %s" - % (setup_params.get("service"), setup_target), - test.log.info) + error_context.context( + "Set up {} service in {}".format(setup_params.get("service"), setup_target), + test.log.info, + ) if params.get("copy_ftp_site") and setup_target != "localhost": - ftp_site = os.path.join(data_dir.get_deps_dir(), - params.get("copy_ftp_site")) + ftp_site = os.path.join( + data_dir.get_deps_dir(), params.get("copy_ftp_site") + ) ftp_dir = params.get("ftp_dir") setup_vm.copy_files_to(ftp_site, ftp_dir) access_param = setup_params.object_params(setup_target) if "ftp" in access_param.get("access_cmd") and os_type == "linux": setup_func( - "sed -i 's/anonymous_enable=NO/anonymous_enable=YES/g' %s" - % params["vsftpd_conf"]) + "sed -i 's/anonymous_enable=NO/anonymous_enable=YES/g' {}".format( + params["vsftpd_conf"] + ) + ) if prepare_cmd: setup_func(prepare_cmd, timeout=setup_timeout) setup_func(setup_cmd, timeout=setup_timeout) @@ -292,12 +296,12 @@ def stop_service(setup_target): setup_params = params.object_params(os_type) stop_cmd = setup_params.get("stop_cmd", "service SERVICE stop") cleanup_cmd = setup_params.get("cleanup_cmd") - stop_cmd = re.sub("SERVICE", setup_params.get("service", ""), - stop_cmd) + stop_cmd = re.sub("SERVICE", setup_params.get("service", ""), stop_cmd) - error_context.context("Stop %s service in %s" - % (setup_params.get("service"), setup_target), - test.log.info) + error_context.context( + "Stop {} service in {}".format(setup_params.get("service"), setup_target), + test.log.info, + ) if stop_cmd: setup_func(stop_cmd, timeout=setup_timeout) @@ -307,9 +311,9 @@ def stop_service(setup_target): if setup_target != "localhost": setup_session.close() - timeout = int(params.get("login_timeout", '360')) + timeout = int(params.get("login_timeout", "360")) op_timeout = int(params.get("op_timeout", "360")) - acl_protocol = params['acl_protocol'] + acl_protocol = params["acl_protocol"] acl_extra_options = params.get("acl_extra_options", "") for vm in env.get_all_vms(): @@ -325,7 +329,7 @@ def stop_service(setup_target): vm.copy_files_to(script_path, tmp_dir) if params.get("copy_curl") and params.get("os_type") == "windows": curl_win_path = params.get("curl_win_path", "C:\\curl\\") - session.cmd("dir {0} || mkdir {0}".format(curl_win_path)) + session.cmd(f"dir {curl_win_path} || mkdir {curl_win_path}") for script in params.get("copy_curl").split(): curl_win_link = os.path.join(data_dir.get_deps_dir("curl"), script) vm.copy_files_to(curl_win_link, curl_win_path, timeout=60) @@ -334,7 +338,7 @@ def stop_service(setup_target): vms_tags = params.objects("vms") br_name = params.get("netdst") if br_name == "private": - br_name = params.get("priv_brname", 'atbr0') + br_name = params.get("priv_brname", "atbr0") for setup_target in params.get("setup_targets", "").split(): setup_service(setup_target) @@ -371,57 +375,53 @@ def stop_service(setup_target): check_from_output = access_param.get("check_from_output") access_sys[target] = {} - access_sys[target]['access_cmd'] = access_param['access_cmd'] - access_sys[target]['ref_cmd'] = access_param.get('ref_cmd', "") - access_sys[target]['clean_cmd'] = access_param.get('clean_guest', - "") + access_sys[target]["access_cmd"] = access_param["access_cmd"] + access_sys[target]["ref_cmd"] = access_param.get("ref_cmd", "") + access_sys[target]["clean_cmd"] = access_param.get("clean_guest", "") if check_from_output: - access_sys[target]['check_from_output'] = check_from_output + access_sys[target]["check_from_output"] = check_from_output for tgt in access_targets: tgt_param = access_param.object_params(tgt) acl_disabled = tgt_param.get("acl_disabled") == "yes" - access_sys[target]['disabled_%s' % tgt] = acl_disabled + access_sys[target][f"disabled_{tgt}"] = acl_disabled - error_context.context("Try to access target before setup the rules", - test.log.info) + error_context.context("Try to access target before setup the rules", test.log.info) access_service(access_sys, access_targets, False, host_ip, ref=True) error_context.context("Disable the access in ovs", test.log.info) br_infos = utils_net.openflow_manager(br_name, "show").stdout.decode() - if_port = re.findall(r"(\d+)\(%s\)" % if_name, br_infos) + if_port = re.findall(rf"(\d+)\({if_name}\)", br_infos) if not if_port: - test.cancel("Can not find %s in bridge %s" % (if_name, br_name)) + test.cancel(f"Can not find {if_name} in bridge {br_name}") if_port = if_port[0] acl_cmd = get_acl_cmd(acl_protocol, if_port, "drop", acl_extra_options) utils_net.openflow_manager(br_name, "add-flow", acl_cmd) - acl_rules = utils_net.openflow_manager( - br_name, "dump-flows").stdout.decode() + acl_rules = utils_net.openflow_manager(br_name, "dump-flows").stdout.decode() if not acl_rules_check(acl_rules, acl_cmd): - test.fail("Can not find the rules from ovs-ofctl: %s" % acl_rules) + test.fail(f"Can not find the rules from ovs-ofctl: {acl_rules}") - error_context.context("Try to acess target to exam the disable rules", - test.log.info) + error_context.context( + "Try to acess target to exam the disable rules", test.log.info + ) access_service(access_sys, access_targets, True, host_ip) error_context.context("Enable the access in ovs", test.log.info) acl_cmd = get_acl_cmd(acl_protocol, if_port, "normal", acl_extra_options) utils_net.openflow_manager(br_name, "mod-flows", acl_cmd) - acl_rules = utils_net.openflow_manager( - br_name, "dump-flows").stdout.decode() + acl_rules = utils_net.openflow_manager(br_name, "dump-flows").stdout.decode() if not acl_rules_check(acl_rules, acl_cmd): - test.fail("Can not find the rules from ovs-ofctl: %s" % acl_rules) + test.fail(f"Can not find the rules from ovs-ofctl: {acl_rules}") - error_context.context("Try to acess target to exam the enable rules", - test.log.info) + error_context.context("Try to acess target to exam the enable rules", test.log.info) access_service(access_sys, access_targets, False, host_ip) error_context.context("Delete the access rules in ovs", test.log.info) acl_cmd = get_acl_cmd(acl_protocol, if_port, "", acl_extra_options) utils_net.openflow_manager(br_name, "del-flows", acl_cmd) - acl_rules = utils_net.openflow_manager( - br_name, "dump-flows").stdout.decode() + acl_rules = utils_net.openflow_manager(br_name, "dump-flows").stdout.decode() if acl_rules_check(acl_rules, acl_cmd): - test.fail("Still can find the rules from ovs-ofctl: %s" % acl_rules) - error_context.context("Try to acess target to exam after delete the rules", - test.log.info) + test.fail(f"Still can find the rules from ovs-ofctl: {acl_rules}") + error_context.context( + "Try to acess target to exam after delete the rules", test.log.info + ) access_service(access_sys, access_targets, False, host_ip) for setup_target in params.get("setup_targets", "").split(): diff --git a/qemu/tests/openflow_test.py b/qemu/tests/openflow_test.py index b9d4c5fba9..23ff49b30c 100644 --- a/qemu/tests/openflow_test.py +++ b/qemu/tests/openflow_test.py @@ -1,11 +1,7 @@ import re import time -from virttest import error_context -from virttest import utils_net -from virttest import utils_test -from virttest import utils_misc -from virttest import remote +from virttest import error_context, remote, utils_misc, utils_net, utils_test @error_context.context_aware @@ -21,6 +17,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def run_tcpdump_bg(vm, addresses, dump_protocol): """ Run tcpdump in background, tcpdump will exit once catch a packet @@ -29,12 +26,14 @@ def run_tcpdump_bg(vm, addresses, dump_protocol): bg_session = vm.wait_for_login() if tcpdump_is_alive(bg_session): bg_session.cmd("killall -9 tcpdump") - tcpdump_cmd = ("setsid tcpdump -iany -n -v %s and 'src %s and dst %s'" - " -c 1 >/dev/null 2>&1") - bg_session.sendline(tcpdump_cmd % (dump_protocol, addresses[0], - addresses[1])) - if not utils_misc.wait_for(lambda: tcpdump_is_alive(bg_session), - 30, 0, 1, "Waiting tcpdump start..."): + tcpdump_cmd = ( + "setsid tcpdump -iany -n -v %s and 'src %s and dst %s'" + " -c 1 >/dev/null 2>&1" + ) + bg_session.sendline(tcpdump_cmd % (dump_protocol, addresses[0], addresses[1])) + if not utils_misc.wait_for( + lambda: tcpdump_is_alive(bg_session), 30, 0, 1, "Waiting tcpdump start..." + ): test.cancel("Error, can not run tcpdump") bg_session.close() @@ -42,7 +41,7 @@ def dump_catch_data(session, dump_log, catch_reg): """ Search data from dump_log """ - dump_info = session.cmd_output("cat %s" % dump_log) + dump_info = session.cmd_output(f"cat {dump_log}") if re.findall(catch_reg, dump_info, re.I): return True return False @@ -68,9 +67,11 @@ def tcpdump_catch_packet_test(session, drop_flow=False): err_msg += "%s " % (packet_receive and "can" or "can not") err_msg += "receive the packets" test.error(err_msg) - test.log.info("Correct, flow %s dropped, tcpdump %s receive the packet", - (drop_flow and "was" or "was not"), - (packet_receive and "can" or "can not")) + test.log.info( + "Correct, flow %s dropped, tcpdump %s receive the packet", + (drop_flow and "was" or "was not"), + (packet_receive and "can" or "can not"), + ) def arp_entry_clean(entry=None): """ @@ -79,7 +80,7 @@ def arp_entry_clean(entry=None): if not entry: arp_clean_cmd = "arp -n | awk '/^[1-2]/{print \"arp -d \" $1}'|sh" else: - arp_clean_cmd = "arp -d %s" % entry + arp_clean_cmd = f"arp -d {entry}" for session in sessions: session.cmd_output_safe(arp_clean_cmd) @@ -90,20 +91,22 @@ def check_arp_info(session, entry, vm, match_mac=None): match_string = match_mac or "incomplete" if not arp_entries: - test.error("Can not find arp entry in %s: %s" - % (vm.name, arp_info)) + test.error(f"Can not find arp entry in {vm.name}: {arp_info}") if not re.findall(match_string, arp_entries[0], re.I): - test.fail("Can not find the mac address" - " %s of %s in arp" - " entry %s" % (match_mac, vm.name, arp_entries[0])) + test.fail( + "Can not find the mac address" + f" {match_mac} of {vm.name} in arp" + f" entry {arp_entries[0]}" + ) def ping_test(session, dst, drop_flow=False): """ Ping test, check icmp """ - ping_status, ping_output = utils_test.ping(dest=dst, count=10, - timeout=20, session=session) + ping_status, ping_output = utils_test.ping( + dest=dst, count=10, timeout=20, session=session + ) # when drop_flow is true, ping should failed(return not zero) # drop_flow is false, ping should success packets_lost = 100 @@ -118,15 +121,18 @@ def ping_test(session, dst, drop_flow=False): info_msg = "Correct, icmp flow %s dropped, ping '%s', " info_msg += "packets lost rate is: '%s'" - test.log.info(info_msg, (drop_flow and "was" or "was not"), - (ping_status and "failed" or "success"), - packets_lost) + test.log.info( + info_msg, + (drop_flow and "was" or "was not"), + (ping_status and "failed" or "success"), + packets_lost, + ) def run_ping_bg(vm, dst): """ Run ping in background """ - ping_cmd = "ping %s" % dst + ping_cmd = f"ping {dst}" session = vm.wait_for_login() test.log.info("Ping %s in background", dst) session.sendline(ping_cmd) @@ -138,8 +144,7 @@ def check_bg_ping(session): ping_failed_pattern = r"From .*? icmp_seq=\d+ Destination" ping_failed_pattern += r" Host Unreachable" try: - out = session.read_until_output_matches([ping_pattern, - ping_failed_pattern]) + out = session.read_until_output_matches([ping_pattern, ping_failed_pattern]) if re.search(ping_failed_pattern, out[1]): return False, out[1] else: @@ -156,23 +161,27 @@ def file_transfer(sessions, addresses, timeout): password = params.get("password") sessions[0].cmd(prepare_cmd, timeout=timeout) ori_md5 = sessions[0].cmd_output(md5_cmd) - scp_cmd = (r"scp -v -o UserKnownHostsFile=/dev/null " - r"-o StrictHostKeyChecking=no " - r"-o PreferredAuthentications=password -r " - r"-P %s /tmp/copy_file %s@\[%s\]:/tmp/copy_file" % - (port, username, addresses[1])) + scp_cmd = ( + r"scp -v -o UserKnownHostsFile=/dev/null " + r"-o StrictHostKeyChecking=no " + r"-o PreferredAuthentications=password -r " + rf"-P {port} /tmp/copy_file {username}@\[{addresses[1]}\]:/tmp/copy_file" + ) sessions[0].sendline(scp_cmd) remote.handle_prompts(sessions[0], username, password, prompt, 600) new_md5 = sessions[1].cmd_output(md5_cmd) for session in sessions: session.cmd("rm -f /tmp/copy_file") if new_md5 != ori_md5: - test.fail("Md5 value changed after file transfer, " - "original is %s and the new file" - " is: %s" % (ori_md5, new_md5)) - - def nc_connect_test(sessions, addresses, drop_flow=False, nc_port="8899", - udp_model=False): + test.fail( + "Md5 value changed after file transfer, " + f"original is {ori_md5} and the new file" + f" is: {new_md5}" + ) + + def nc_connect_test( + sessions, addresses, drop_flow=False, nc_port="8899", udp_model=False + ): """ Nc connect test, check tcp and udp """ @@ -191,24 +200,30 @@ def nc_connect_test(sessions, addresses, drop_flow=False, nc_port="8899", nc_protocol = udp_model and "UDP" or "TCP" nc_connect = False if utils_misc.wait_for( - lambda: dump_catch_data(sessions[1], nc_log, "client"), - 10, 0, 2, text="Wait '%s' connect" % nc_protocol): + lambda: dump_catch_data(sessions[1], nc_log, "client"), + 10, + 0, + 2, + text=f"Wait '{nc_protocol}' connect", + ): nc_connect = True if nc_connect == drop_flow: - err_msg = "Error, '%s' " % nc_protocol + err_msg = f"Error, '{nc_protocol}' " err_msg += "flow %s " % (drop_flow and "was" or "was not") err_msg += "dropped, nc connect should" err_msg += " '%s'" % (nc_connect and "failed" or "success") test.error(err_msg) - test.log.info("Correct, '%s' flow %s dropped, and nc connect %s", - nc_protocol, (drop_flow and "was" or "was not"), - (nc_connect and "success" or "failed")) + test.log.info( + "Correct, '%s' flow %s dropped, and nc connect %s", + nc_protocol, + (drop_flow and "was" or "was not"), + (nc_connect and "success" or "failed"), + ) finally: for session in sessions: session.cmd_output_safe("killall nc || killall ncat") - session.cmd("%s %s" % (clean_cmd, nc_log), - ignore_all_errors=True) + session.cmd(f"{clean_cmd} {nc_log}", ignore_all_errors=True) def acl_rules_check(acl_rules, flow_options): flow_options = re.sub("action=", "actions=", flow_options) @@ -231,13 +246,12 @@ def acl_rules_check(acl_rules, flow_options): def remove_plus_items(open_flow_rules): plus_items = ["duration", "n_packets", "n_bytes", "idle_age", "hard_age"] for plus_item in plus_items: - open_flow_rules = re.sub("%s=.*?," % plus_item, "", - open_flow_rules) + open_flow_rules = re.sub(f"{plus_item}=.*?,", "", open_flow_rules) return open_flow_rules br_name = params.get("netdst", "ovs0") - timeout = int(params.get("login_timeout", '360')) - prepare_timeout = int(params.get("prepare_timeout", '360')) + timeout = int(params.get("login_timeout", "360")) + prepare_timeout = int(params.get("prepare_timeout", "360")) clean_cmd = params.get("clean_cmd", "rm -f") sessions = [] addresses = [] @@ -245,7 +259,7 @@ def remove_plus_items(open_flow_rules): bg_ping_session = None if not utils_net.ovs_br_exists(br_name): - test.cancel("%s isn't an openvswith bridge" % br_name) + test.cancel(f"{br_name} isn't an openvswith bridge") error_context.context("Init boot the vms") for vm_name in params.objects("vms"): @@ -257,20 +271,20 @@ def remove_plus_items(open_flow_rules): # set openflow rules: f_protocol = params.get("flow", "arp") - f_base_options = "%s,nw_src=%s,nw_dst=%s" % (f_protocol, addresses[0], - addresses[1]) + f_base_options = f"{f_protocol},nw_src={addresses[0]},nw_dst={addresses[1]}" for session in sessions: - session.cmd("systemctl stop firewalld || service firewalld stop", - ignore_all_errors=True) + session.cmd( + "systemctl stop firewalld || service firewalld stop", ignore_all_errors=True + ) try: for drop_flow in [True, False]: if drop_flow: f_command = "add-flow" f_options = f_base_options + ",action=drop" - drop_icmp = eval(params.get("drop_icmp", 'True')) - drop_tcp = eval(params.get("drop_tcp", 'True')) - drop_udp = eval(params.get("drop_udp", 'True')) + drop_icmp = eval(params.get("drop_icmp", "True")) + drop_tcp = eval(params.get("drop_tcp", "True")) + drop_udp = eval(params.get("drop_udp", "True")) else: f_command = "mod-flows" f_options = f_base_options + ",action=normal" @@ -279,53 +293,55 @@ def remove_plus_items(open_flow_rules): drop_udp = False error_context.base_context("Test prepare") - error_context.context("Do %s %s on %s" - % (f_command, f_options, br_name)) + error_context.context(f"Do {f_command} {f_options} on {br_name}") utils_net.openflow_manager(br_name, f_command, f_options) acl_rules = utils_net.openflow_manager( - br_name, "dump-flows").stdout.decode() + br_name, "dump-flows" + ).stdout.decode() if not acl_rules_check(acl_rules, f_options): - test.fail("Can not find the rules from" - " ovs-ofctl: %s" % acl_rules) + test.fail("Can not find the rules from" f" ovs-ofctl: {acl_rules}") - error_context.context("Run tcpdump in guest %s" % vms[1].name, - test.log.info) + error_context.context(f"Run tcpdump in guest {vms[1].name}", test.log.info) run_tcpdump_bg(vms[1], addresses, f_protocol) - if drop_flow or f_protocol is not "arp": - error_context.context("Clean arp cache in both guest", - test.log.info) + if drop_flow or f_protocol != "arp": + error_context.context("Clean arp cache in both guest", test.log.info) arp_entry_clean(addresses[1]) error_context.base_context( - "Exec '%s' flow '%s' test" % - (f_protocol, drop_flow and "drop" or "normal")) + "Exec '{}' flow '{}' test".format( + f_protocol, drop_flow and "drop" or "normal" + ) + ) if drop_flow: - error_context.context("Ping test form %s to %s" % - (vms[0].name, vms[1].name), test.log.info) + error_context.context( + f"Ping test form {vms[0].name} to {vms[1].name}", + test.log.info, + ) ping_test(sessions[0], addresses[1], drop_icmp) if params.get("run_file_transfer") == "yes": - error_context.context("Transfer file form %s to %s" % - (vms[0].name, vms[1].name), - test.log.info) + error_context.context( + f"Transfer file form {vms[0].name} to {vms[1].name}", + test.log.info, + ) file_transfer(sessions, addresses, prepare_timeout) else: - error_context.context("Ping test form %s to %s in background" % - (vms[0].name, vms[1].name), test.log.info) + error_context.context( + f"Ping test form {vms[0].name} to {vms[1].name} in background", + test.log.info, + ) bg_ping_session = run_ping_bg(vms[0], addresses[1]) - if f_protocol == 'arp' and drop_flow: - error_context.context("Check arp inside %s" % vms[0].name, - test.log.info) + if f_protocol == "arp" and drop_flow: + error_context.context(f"Check arp inside {vms[0].name}", test.log.info) check_arp_info(sessions[0], addresses[1], vms[0]) - elif f_protocol == 'arp' or params.get("check_arp") == "yes": + elif f_protocol == "arp" or params.get("check_arp") == "yes": time.sleep(2) error_context.context("Check arp inside guests.", test.log.info) for index, address in enumerate(addresses): sess_index = (index + 1) % 2 mac = vms[index].virtnet.get_mac_address(0) - check_arp_info(sessions[sess_index], address, vms[index], - mac) + check_arp_info(sessions[sess_index], address, vms[index], mac) error_context.context("Run nc connect test via tcp", test.log.info) nc_connect_test(sessions, addresses, drop_tcp) @@ -337,14 +353,17 @@ def remove_plus_items(open_flow_rules): tcpdump_catch_packet_test(sessions[1], drop_flow) finally: openflow_rules_ori = utils_net.openflow_manager( - br_name, "dump-flows").stdout.decode() + br_name, "dump-flows" + ).stdout.decode() openflow_rules_ori = remove_plus_items(openflow_rules_ori) utils_net.openflow_manager(br_name, "del-flows", f_protocol) openflow_rules = utils_net.openflow_manager( - br_name, "dump-flows").stdout.decode() + br_name, "dump-flows" + ).stdout.decode() openflow_rules = remove_plus_items(openflow_rules) - removed_rule = list(set(openflow_rules_ori.splitlines()) - - set(openflow_rules.splitlines())) + removed_rule = list( + set(openflow_rules_ori.splitlines()) - set(openflow_rules.splitlines()) + ) if f_protocol == "tcp": error_context.context("Run nc connect test via tcp", test.log.info) @@ -356,15 +375,16 @@ def remove_plus_items(open_flow_rules): for session in sessions: session.close() failed_msg = [] - if (not removed_rule or - not acl_rules_check(removed_rule[0], f_options)): - failed_msg.append("Failed to delete %s" % f_options) + if not removed_rule or not acl_rules_check(removed_rule[0], f_options): + failed_msg.append(f"Failed to delete {f_options}") if bg_ping_session: bg_ping_ok = check_bg_ping(bg_ping_session) bg_ping_session.close() if not bg_ping_ok[0]: - failed_msg.append("There is something wrong happen in " - "background ping: %s" % bg_ping_ok[1]) + failed_msg.append( + "There is something wrong happen in " + f"background ping: {bg_ping_ok[1]}" + ) if failed_msg: test.fail(failed_msg) diff --git a/qemu/tests/ovmf_check_efi.py b/qemu/tests/ovmf_check_efi.py index 9644258105..17b20175d5 100644 --- a/qemu/tests/ovmf_check_efi.py +++ b/qemu/tests/ovmf_check_efi.py @@ -21,8 +21,9 @@ def run(test, params, env): dmesg_cmd = params["dmesg_cmd"] dmesg_output = session.cmd_output_safe(dmesg_cmd) if efi_info not in check_output or not dmesg_output: - test.fail("No 'EFI System Partition' info in output of 'gdisk -l', " - "or no efi related info in dmesg") + test.fail( + "No 'EFI System Partition' info in output of 'gdisk -l', " + "or no efi related info in dmesg" + ) if os_type == "windows" and efi_info not in check_output: - test.fail("BIOS version of guest is %s, it should be UEFI" - % check_output) + test.fail(f"BIOS version of guest is {check_output}, it should be UEFI") diff --git a/qemu/tests/ovs_enslave_port.py b/qemu/tests/ovs_enslave_port.py index c14ffab738..5d59bc0cfc 100644 --- a/qemu/tests/ovs_enslave_port.py +++ b/qemu/tests/ovs_enslave_port.py @@ -1,6 +1,5 @@ from avocado.utils import process -from virttest import error_context -from virttest import utils_net +from virttest import error_context, utils_net @error_context.context_aware @@ -21,25 +20,28 @@ def run(test, params, env): netdst = params["netdst"] if utils_net.ovs_br_exists(netdst) is not True: - test.cancel("%s isn't an openvswith bridge" % netdst) + test.cancel(f"{netdst} isn't an openvswith bridge") new_br_name = params.get("new_ovs_bridge_name", "temp_ovs_bridge") host_bridge = utils_net.find_bridge_manager(netdst) if host_bridge.br_exist(new_br_name) is True: host_bridge.del_br(new_br_name) host_bridge.add_br(new_br_name) - error_context.context("OVS bridge %s created." % new_br_name, - test.log.info) + error_context.context(f"OVS bridge {new_br_name} created.", test.log.info) try: ports = host_bridge.list_ports(netdst) host_bridge.add_port(new_br_name, ports[0]) except process.CmdError as e: if "already exists on bridge" not in e.result.stderr_text: - test.fail("Port %s should not be enslaved to another bridge." - " Output: %s" % (ports[0], e.result.stderr_text)) + test.fail( + f"Port {ports[0]} should not be enslaved to another bridge." + f" Output: {e.result.stderr_text}" + ) else: - test.fail("Add port cmd successfully excuted. However, port %s " - "should not be enslaved to another bridge." % ports[0]) + test.fail( + f"Add port cmd successfully excuted. However, port {ports[0]} " + "should not be enslaved to another bridge." + ) finally: host_bridge.del_br(new_br_name) diff --git a/qemu/tests/ovs_host_vlan.py b/qemu/tests/ovs_host_vlan.py index 095b1e2895..a4707fc820 100644 --- a/qemu/tests/ovs_host_vlan.py +++ b/qemu/tests/ovs_host_vlan.py @@ -2,19 +2,14 @@ import os.path from avocado.utils import process +from virttest import data_dir, env_process, error_context, remote, utils_net, utils_test -from virttest import error_context -from virttest import remote -from virttest import data_dir -from virttest import utils_net -from virttest import utils_test -from virttest import env_process +LOG_JOB = logging.getLogger("avocado.test") -LOG_JOB = logging.getLogger('avocado.test') - -def create_file_in_guest(test, session, file_path, size=100, - os_type="linux", timeout=360): +def create_file_in_guest( + test, session, file_path, size=100, os_type="linux", timeout=360 +): """ Create a file with 'dd' in guest. @@ -29,11 +24,11 @@ def create_file_in_guest(test, session, file_path, size=100, if os_type == "linux": cmd = "dd if=/dev/zero of=%s bs=1M count=%d" % (file_path, size) else: - cmd = "fsutil file createnew %s %s" % (file_path, size * 1024 * 1024) + cmd = f"fsutil file createnew {file_path} {size * 1024 * 1024}" status, output = session.cmd_status_output(cmd, timeout=timeout) if status: err = "Fail to create file in guest." - err += " command '%s', output '%s'." % (cmd, output) + err += f" command '{cmd}', output '{output}'." test.error(err) @@ -47,11 +42,11 @@ def ping_result_check(test, loss_ratio, same_vlan): """ if same_vlan and loss_ratio > 0: msg = "Package lost when ping guest in same vlan." - msg += "Loss ratio is %s" % loss_ratio + msg += f"Loss ratio is {loss_ratio}" test.fail(msg) if not same_vlan and loss_ratio != 100: msg = "Ping between guest in different vlan successful." - msg += "Loss ratio is %s" % loss_ratio + msg += f"Loss ratio is {loss_ratio}" test.fail(msg) @@ -68,17 +63,14 @@ def ping(test, os_type, match_error, dest, count, session, same_vlan): :param same_vlan: whether the two guests are in the same vlan """ if os_type == "linux": - status, output = utils_test.ping(dest, count, - timeout=60, - session=session) + status, output = utils_test.ping(dest, count, timeout=60, session=session) loss_ratio = utils_test.get_loss_ratio(output) ping_result_check(test, loss_ratio, same_vlan) LOG_JOB.debug(output) elif os_type == "windows": # TODO, not supported by now - status, output = utils_test.ping(dest, count, timeout=60, - session=session) + status, output = utils_test.ping(dest, count, timeout=60, session=session) if match_error in str(output): - ratio = 100 + pass else: loss_ratio = utils_test.get_loss_ratio(output) ping_result_check(test, loss_ratio, same_vlan) @@ -101,28 +93,24 @@ def netperf_setup(test, params, env): if params.get("os_type") == "linux": netperf_link = params["netperf_link"] netperf_path = params["netperf_path"] - src_link = os.path.join(data_dir.get_deps_dir("netperf"), - netperf_link) + src_link = os.path.join(data_dir.get_deps_dir("netperf"), netperf_link) vm.copy_files_to(src_link, netperf_path, timeout=60) setup_cmd = params.get("setup_cmd") - (status, output) = session.cmd_status_output(setup_cmd % - netperf_path, - timeout=600) + (status, output) = session.cmd_status_output( + setup_cmd % netperf_path, timeout=600 + ) if status != 0: err = "Fail to setup netperf on guest os." - err += " Command output:\n%s" % output + err += f" Command output:\n{output}" test.error(err) elif params.get("os_type") == "windows": # TODO, not suppoted by now - s_link = params.get("netperf_server_link_win", - "netserver-2.6.0.exe") - src_link = os.path.join(data_dir.get_deps_dir("netperf"), - s_link) + s_link = params.get("netperf_server_link_win", "netserver-2.6.0.exe") + src_link = os.path.join(data_dir.get_deps_dir("netperf"), s_link) netperf_path = params["netperf_path"] vm.copy_files_to(src_link, netperf_path, timeout=60) s_link = params.get("netperf_client_link_win", "netperf.exe") - src_link = os.path.join(data_dir.get_deps_dir("netperf"), - s_link) + src_link = os.path.join(data_dir.get_deps_dir("netperf"), s_link) vm.copy_files_to(src_link, netperf_path, timeout=60) finally: if session: @@ -165,8 +153,7 @@ def run(test, params, env): txt = "Stop NetworkManager service in host." error_context.context(txt, test.log.info) - process.system(params["stop_network_manager"], timeout=120, - ignore_status=True) + process.system(params["stop_network_manager"], timeout=120, ignore_status=True) txt = "Create a new private ovs bridge, which has" txt += " no physical nics inside." @@ -176,7 +163,7 @@ def run(test, params, env): try: process.system(ovs_br_create_cmd, shell=True) except process.CmdError: - test.fail("Fail to create ovs bridge %s" % bridge_name) + test.fail(f"Fail to create ovs bridge {bridge_name}") sessions = [] try: @@ -193,57 +180,77 @@ def run(test, params, env): ifname = vm.virtnet[0]["ifname"] guest_ip = vm.virtnet[0].ip vlan = vm_params["ovs_port_vlan"] - create_port_cmd = "ovs-vsctl set Port %s tag=%s" % (ifname, vlan) + create_port_cmd = f"ovs-vsctl set Port {ifname} tag={vlan}" try: - output = process.system_output(create_port_cmd, timeout=120, - ignore_status=False).decode() + output = process.system_output( + create_port_cmd, timeout=120, ignore_status=False + ).decode() process.system_output("ovs-vsctl show") except process.CmdError: - err = "Fail to create ovs port %s " % ifname - err += "on bridge %s." % bridge_name - err += " Command: %s, " % create_port_cmd - err += "output: %s." % output + err = f"Fail to create ovs port {ifname} " + err += f"on bridge {bridge_name}." + err += f" Command: {create_port_cmd}, " + err += f"output: {output}." test.fail(err) session_ctl = vm.wait_for_serial_login(timeout=login_timeout) if os_type == "linux": - txt = "Stop NetworkManager service in guest %s." % vm_name + txt = f"Stop NetworkManager service in guest {vm_name}." test.log.info(txt) session_ctl.cmd(params["stop_network_manager"], timeout=120) mac = vm.get_mac_address() - txt = "Set guest %s mac %s IP to %s" % (vm_name, mac, guest_ip) + txt = f"Set guest {vm_name} mac {mac} IP to {guest_ip}" error_context.context(txt, test.log.info) - utils_net.set_guest_ip_addr(session_ctl, mac, guest_ip, - os_type=os_type) + utils_net.set_guest_ip_addr(session_ctl, mac, guest_ip, os_type=os_type) utils_net.Interface(ifname).down() utils_net.Interface(ifname).up() ips.append(guest_ip) sessions.append(session_ctl) - txt = "Ping between two guests in same vlan. %s -> %s" % (vms[0], - vms[1]) + txt = f"Ping between two guests in same vlan. {vms[0]} -> {vms[1]}" error_context.context(txt, test.log.info) - ping(test, os_type, match_error, ips[1], count=10, - session=sessions[0], same_vlan=True) + ping( + test, + os_type, + match_error, + ips[1], + count=10, + session=sessions[0], + same_vlan=True, + ) txt = "Ping between two guests in different " - txt += "vlan. %s -> %s" % (vms[0], vms[2]) + txt += f"vlan. {vms[0]} -> {vms[2]}" error_context.context(txt, test.log.info) - ping(test, os_type, match_error, ips[2], count=10, - session=sessions[0], same_vlan=False) + ping( + test, + os_type, + match_error, + ips[2], + count=10, + session=sessions[0], + same_vlan=False, + ) txt = "Ping between two guests in another " - txt += "vlan. %s -> %s" % (vms[2], vms[3]) + txt += f"vlan. {vms[2]} -> {vms[3]}" error_context.context(txt, test.log.info) - ping(test, os_type, match_error, ips[3], count=10, - session=sessions[2], same_vlan=True) + ping( + test, + os_type, + match_error, + ips[3], + count=10, + session=sessions[2], + same_vlan=True, + ) txt = "Netperf test between two guests in same vlan." - txt += "%s -> %s" % (vms[0], vms[1]) + txt += f"{vms[0]} -> {vms[1]}" error_context.context(txt, test.log.info) - txt = "Run netserver in VM %s" % vms[0] + txt = f"Run netserver in VM {vms[0]}" error_context.context(txt, test.log.info) shutdown_firewall_cmd = params["shutdown_firewall"] sessions[0].cmd_status_output(shutdown_firewall_cmd, timeout=10) @@ -253,10 +260,10 @@ def run(test, params, env): status, output = sessions[0].cmd_status_output(cmd, timeout=60) if status != 0: err = "Fail to start netserver in VM." - err += " Command output %s" % output + err += f" Command output {output}" test.error(err) - txt = "Run netperf client in VM %s" % vms[1] + txt = f"Run netperf client in VM {vms[1]}" error_context.context(txt, test.log.info) sessions[1].cmd_status_output(shutdown_firewall_cmd, timeout=10) test_duration = int(params.get("netperf_test_duration", 60)) @@ -265,14 +272,12 @@ def run(test, params, env): netperf_cmd = os.path.join(netperf_path, netperf_cmd) cmd = netperf_cmd % (test_duration, ips[0]) if test_protocol: - cmd += " -t %s" % test_protocol + cmd += f" -t {test_protocol}" cmd_timeout = test_duration + 20 - status, output = sessions[1].cmd_status_output(cmd, - timeout=cmd_timeout) + status, output = sessions[1].cmd_status_output(cmd, timeout=cmd_timeout) if status != 0: - err = "Fail to run netperf test between %s and %s." % (vms[0], - vms[1]) - err += " Command output:\n%s" % output + err = f"Fail to run netperf test between {vms[0]} and {vms[1]}." + err += f" Command output:\n{output}" test.fail(err) if params.get("file_transfer_test", "yes") == "yes": @@ -280,35 +285,44 @@ def run(test, params, env): file_create_timeout = int(params.get("file_create_timeout", 720)) file_path = params.get("file_path", "/var/tmp/src_file") - txt = "Create %s MB file %s in %s" % (filesize, - file_path, - vms[0]) + txt = f"Create {filesize} MB file {file_path} in {vms[0]}" error_context.context(txt, test.log.info) - create_file_in_guest(test, session=sessions[0], - file_path=file_path, - size=filesize, os_type=os_type, - timeout=file_create_timeout) - - txt = "Transfer file %s between guests in same " % file_path - txt += "vlan. %s -> %s" % (vms[0], vms[1]) + create_file_in_guest( + test, + session=sessions[0], + file_path=file_path, + size=filesize, + os_type=os_type, + timeout=file_create_timeout, + ) + + txt = f"Transfer file {file_path} between guests in same " + txt += f"vlan. {vms[0]} -> {vms[1]}" error_context.context(txt, test.log.info) password = params.get("password", "kvmautotest") username = params.get("username", "root") f_tmout = int(params.get("file_transfer_timeout", 1200)) shell_port = params.get("shell_port", "22") data_port = params.get("nc_file_transfer_port", "9000") - log_file = "file_transfer_from_%s_to_%s.log" % (ips[0], ips[1]) + f"file_transfer_from_{ips[0]}_to_{ips[1]}.log" if os_type == "linux": # TODO, windows will be supported later - remote.nc_copy_between_remotes(ips[0], ips[1], shell_port, - password, password, - username, username, - file_path, file_path, - d_port=data_port, - timeout=2, - check_sum=True, - s_session=sessions[0], - d_session=sessions[1], - file_transfer_timeout=f_tmout) + remote.nc_copy_between_remotes( + ips[0], + ips[1], + shell_port, + password, + password, + username, + username, + file_path, + file_path, + d_port=data_port, + timeout=2, + check_sum=True, + s_session=sessions[0], + d_session=sessions[1], + file_transfer_timeout=f_tmout, + ) finally: for session in sessions: if session: diff --git a/qemu/tests/ovs_mirror.py b/qemu/tests/ovs_mirror.py index acc814e18d..8a7ac02347 100644 --- a/qemu/tests/ovs_mirror.py +++ b/qemu/tests/ovs_mirror.py @@ -1,15 +1,12 @@ -import re -import os import glob +import os +import re import shutil import time -from avocado.utils import process from avocado.utils import path as utils_path - -from virttest import error_context -from virttest import utils_net -from virttest import env_process +from avocado.utils import process +from virttest import env_process, error_context, utils_net @error_context.context_aware @@ -30,8 +27,7 @@ def run(test, params, env): :param env: Dictionary with test environment. """ - def make_mirror_cmd( - mirror_port, target_port, direction="all", ovs="ovs0"): + def make_mirror_cmd(mirror_port, target_port, direction="all", ovs="ovs0"): """ Generate create ovs port mirror command. @@ -42,24 +38,20 @@ def make_mirror_cmd( :return: string of ovs port mirror command. """ - cmd = ["ovs-vsctl set Bridge %s mirrors=@m " % ovs] + cmd = [f"ovs-vsctl set Bridge {ovs} mirrors=@m "] for port in [mirror_port, target_port]: - cmd.append("-- --id=@%s get Port %s " % (port, port)) + cmd.append(f"-- --id=@{port} get Port {port} ") if direction == "input": - cmd.append( - "-- --id=@m create Mirror name=input_of_%s" % - target_port) - cmd.append("select-dst-port=@%s" % target_port) + cmd.append(f"-- --id=@m create Mirror name=input_of_{target_port}") + cmd.append(f"select-dst-port=@{target_port}") elif direction == "output": - cmd.append( - "-- --id=@m create Mirror name=output_of_%s" % target_port) - cmd.append("select-src-port=@%s" % target_port) + cmd.append(f"-- --id=@m create Mirror name=output_of_{target_port}") + cmd.append(f"select-src-port=@{target_port}") else: - cmd.append( - "-- --id=@m create Mirror name=mirror_%s" % target_port) - cmd.append("select-src-port=@%s" % target_port) - cmd.append("select-dst-port=@%s" % target_port) - cmd.append("output-port=@%s" % mirror_port) + cmd.append(f"-- --id=@m create Mirror name=mirror_{target_port}") + cmd.append(f"select-src-port=@{target_port}") + cmd.append(f"select-dst-port=@{target_port}") + cmd.append(f"output-port=@{mirror_port}") return " ".join(cmd) def create_mirror_port(mirror_port, target_port, direction, ovs): @@ -90,12 +82,12 @@ def check_tcpdump(output, target_ip, host_ip, direction): :return: bool type result. """ - rex = r".*IP (%s|%s) > " % (host_ip, target_ip) - rex += "(%s|%s).*ICMP echo.*" % (target_ip, host_ip) + rex = rf".*IP ({host_ip}|{target_ip}) > " + rex += f"({target_ip}|{host_ip}).*ICMP echo.*" if direction == "input": - rex = r".*IP %s > %s.*ICMP echo reply.*" % (host_ip, target_ip) + rex = rf".*IP {host_ip} > {target_ip}.*ICMP echo reply.*" if direction == "output": - rex = r".*IP %s > %s.*ICMP echo request.*" % (target_ip, host_ip) + rex = rf".*IP {target_ip} > {host_ip}.*ICMP echo request.*" for idx, _ in enumerate(output.splitlines()): if not re.match(rex, _): test.log.debug("Unexpect packet in line %d: %s", idx, _) @@ -125,7 +117,7 @@ def check_tcpdump(output, target_ip, host_ip, direction): env_process.preprocess_vm(test, params, env, p_vm) o_vm = env.get_vm(p_vm) o_vm.verify_alive() - ip = params["ip_%s" % p_vm] + ip = params[f"ip_{p_vm}"] mac = o_vm.get_mac_address() ses = o_vm.wait_for_serial_login(timeout=login_timeout) ses.cmd(pre_guest_cmd) @@ -143,56 +135,53 @@ def check_tcpdump(output, target_ip, host_ip, direction): error_context.context("Create mirror port in ovs", test.log.info) create_mirror_port(mirror_ifname, target_ifname, direction, ovs_name) - ping_cmd = "ping -c 10 %s" % host_ip + ping_cmd = f"ping -c 10 {host_ip}" status, output = session.cmd_status_output(ping_cmd, timeout=60) if status == 0: ifcfg = session.cmd_output_safe("ifconfig") test.log.debug("Guest network info: %s", ifcfg) test.log.debug("Ping results: %s", output) - test.fail("All packets from %s to host should lost" % mirror_vm) + test.fail(f"All packets from {mirror_vm} to host should lost") - error_context.context("Start tcpdump threads in %s" % mirror_vm, - test.log.info) - ifup_cmd = "ifconfig %s 0 up" % mirror_nic + error_context.context(f"Start tcpdump threads in {mirror_vm}", test.log.info) + ifup_cmd = f"ifconfig {mirror_nic} 0 up" session.cmd(ifup_cmd, timeout=60) for vm, ip in [(target_vm, target_ip), (refer_vm, refer_ip)]: - tcpdump_cmd = "tcpdump -l -n host %s and icmp >" % ip - tcpdump_cmd += "/tmp/tcpdump-%s.txt &" % vm + tcpdump_cmd = f"tcpdump -l -n host {ip} and icmp >" + tcpdump_cmd += f"/tmp/tcpdump-{vm}.txt &" test.log.info("tcpdump command: %s", tcpdump_cmd) session.sendline(tcpdump_cmd) - error_context.context("Start ping threads in %s %s" - % (target_vm, refer_vm), test.log.info) + error_context.context( + f"Start ping threads in {target_vm} {refer_vm}", test.log.info + ) for vm in [target_vm, refer_vm]: ses = vms_info[vm][3] nic_name = vms_info[vm][4] ip = vms_info[vm][2] - ifup_cmd = "ifconfig %s %s/%s up" % (nic_name, ip, net_mask) + ifup_cmd = f"ifconfig {nic_name} {ip}/{net_mask} up" ses.cmd(ifup_cmd) time.sleep(0.5) test.log.info("Ping host from %s", vm) - ses.cmd("ping %s -c 100" % host_ip, timeout=150) + ses.cmd(f"ping {host_ip} -c 100", timeout=150) error_context.context("Check tcpdump results", test.log.info) session.cmd_output_safe("pkill tcpdump") - process.system("ovs-vsctl clear bridge %s mirrors" % ovs_name) - ifup_cmd = "ifconfig %s %s/%s up" % (mirror_nic, mirror_ip, net_mask) + process.system(f"ovs-vsctl clear bridge {ovs_name} mirrors") + ifup_cmd = f"ifconfig {mirror_nic} {mirror_ip}/{net_mask} up" session.cmd(ifup_cmd, timeout=60) time.sleep(0.5) for vm in [target_vm, refer_vm]: - src_file = "/tmp/tcpdump-%s.txt" % vm - dst_file = os.path.join(test.resultsdir, "tcpdump-%s.txt" % vm) + src_file = f"/tmp/tcpdump-{vm}.txt" + dst_file = os.path.join(test.resultsdir, f"tcpdump-{vm}.txt") vms_info[mirror_vm][0].copy_files_from(src_file, dst_file) fd = open(dst_file, "r") content = fd.read().strip() fd.close() if vm == refer_vm and content: - test.fail( - "should not packet from %s dumped in %s" % - (refer_vm, mirror_vm)) + test.fail(f"should not packet from {refer_vm} dumped in {mirror_vm}") elif not check_tcpdump(content, target_ip, host_ip, direction): - test.fail( - "Unexpect packages from %s dumped in %s" % (vm, mirror_vm)) + test.fail(f"Unexpect packages from {vm} dumped in {mirror_vm}") finally: for vm in vms_info: vms_info[vm][0].destroy(gracefully=False) diff --git a/qemu/tests/ovs_qos.py b/qemu/tests/ovs_qos.py index 4a53b8c6ce..5bfbcbcfba 100644 --- a/qemu/tests/ovs_qos.py +++ b/qemu/tests/ovs_qos.py @@ -1,16 +1,13 @@ +import glob +import itertools import os import re -import time -import glob import shutil -import itertools +import time -from avocado.utils import process from avocado.utils import path as utils_path - -from virttest import error_context -from virttest import utils_netperf -from virttest import data_dir +from avocado.utils import process +from virttest import data_dir, error_context, utils_netperf @error_context.context_aware @@ -35,12 +32,11 @@ def set_ovs_port_attr(iface, attribute, value): """ Set OVS port attribute. """ - cmd = "ovs-vsctl set interface %s %s=%s" % (iface, attribute, value) + cmd = f"ovs-vsctl set interface {iface} {attribute}={value}" test.log.info("execute host command: %s", cmd) status = process.system(cmd, ignore_status=True) if status != 0: - err_msg = "set %s to %s for interface '%s' " % ( - attribute, value, iface) + err_msg = f"set {attribute} to {value} for interface '{iface}' " err_msg += "exited with nozero statu '%d'" % status test.error(err_msg) @@ -54,17 +50,19 @@ def set_port_qos(vm, rate, burst): :param brust: value of ingress_policing_brust """ iface = vm.get_ifname() - error_context.context("Set QoS for tap '%s' use by vm '%s'" - % (iface, vm.name), test.log.info) - attributes = zip(['ingress_policing_rate', - 'ingress_policing_burst'], - [rate, burst]) + error_context.context( + f"Set QoS for tap '{iface}' use by vm '{vm.name}'", test.log.info + ) + attributes = zip( + ["ingress_policing_rate", "ingress_policing_burst"], [rate, burst] + ) for k, v in attributes: set_ovs_port_attr(iface, k, v) time.sleep(0.1) - def get_throughout(netperf_server, server_vm, netperf_client, - client_vm, client_options=" -l 60"): + def get_throughout( + netperf_server, server_vm, netperf_client, client_vm, client_options=" -l 60" + ): """ Get network throughout by netperf. @@ -75,13 +73,15 @@ def get_throughout(netperf_server, server_vm, netperf_client, :return: float type throughout Kbps. """ - error_context.context("Set '%s' as netperf server" % server_vm.name, - test.log.info) + error_context.context( + f"Set '{server_vm.name}' as netperf server", test.log.info + ) if not netperf_server.is_server_running(): netperf_server.start() - error_context.context("Set '%s' as netperf client" % client_vm.name, - test.log.info) + error_context.context( + f"Set '{client_vm.name}' as netperf client", test.log.info + ) server_ip = server_vm.get_address() output = netperf_client.start(server_ip, client_options) test.log.debug("netperf client output: %s", output) @@ -109,15 +109,16 @@ def report_test_results(datas): if fails: msg = "OVS Qos test failed, " for tap, throughout, rate, burst in fails: - msg += "netperf throughout(%s) on '%s' " % (throughout, tap) - msg += "should be near ingress_policing_rate(%s), " % rate - msg += "ingress_policing_burst is %s;\n" % burst + msg += f"netperf throughout({throughout}) on '{tap}' " + msg += f"should be near ingress_policing_rate({rate}), " + msg += f"ingress_policing_burst is {burst};\n" test.fail(msg) def clear_qos_setting(iface): - error_context.context("Clear qos setting for ovs port '%s'" % iface, - test.log.info) - clear_cmd = "ovs-vsctl clear Port %s qos" % iface + error_context.context( + f"Clear qos setting for ovs port '{iface}'", test.log.info + ) + clear_cmd = f"ovs-vsctl clear Port {iface} qos" process.system(clear_cmd) test.log.info("Clear ovs command: %s", clear_cmd) @@ -125,6 +126,7 @@ def setup_netperf_env(): """ Setup netperf envrioments in vms """ + def __get_vminfo(): """ Get vms information; @@ -132,13 +134,19 @@ def __get_vminfo(): login_timeout = float(params.get("login_timeout", 360)) stop_firewall_cmd = "systemctl stop firewalld||" stop_firewall_cmd += "service firewalld stop" - guest_info = ["status_test_command", "shell_linesep", "shell_prompt", - "username", "password", "shell_client", "shell_port", "os_type"] + guest_info = [ + "status_test_command", + "shell_linesep", + "shell_prompt", + "username", + "password", + "shell_client", + "shell_port", + "os_type", + ] vms_info = [] for _ in params.get("vms").split(): - info = list(map( - lambda x: params.object_params(_).get(x), - guest_info)) + info = list(map(lambda x: params.object_params(_).get(x), guest_info)) vm = env.get_vm(_) vm.verify_alive() session = vm.wait_for_login(timeout=login_timeout) @@ -147,20 +155,16 @@ def __get_vminfo(): return vms_info netperf_link = params.get("netperf_link") - netperf_link = os.path.join( - data_dir.get_deps_dir("netperf"), - netperf_link) + netperf_link = os.path.join(data_dir.get_deps_dir("netperf"), netperf_link) md5sum = params.get("pkg_md5sum") - netperf_server_link = params.get( - "netperf_server_link_win", - netperf_link) - netperf_server_link = os.path.join(data_dir.get_deps_dir("netperf"), - netperf_server_link) - netperf_client_link = params.get( - "netperf_client_link_win", - netperf_link) - netperf_client_link = os.path.join(data_dir.get_deps_dir("netperf"), - netperf_client_link) + netperf_server_link = params.get("netperf_server_link_win", netperf_link) + netperf_server_link = os.path.join( + data_dir.get_deps_dir("netperf"), netperf_server_link + ) + netperf_client_link = params.get("netperf_client_link_win", netperf_link) + netperf_client_link = os.path.join( + data_dir.get_deps_dir("netperf"), netperf_client_link + ) server_path_linux = params.get("server_path", "/var/tmp") client_path_linux = params.get("client_path", "/var/tmp") @@ -188,9 +192,10 @@ def __get_vminfo(): password=info[-4], username=info[-5], prompt=info[-6], - linesep=info[-7].encode().decode('unicode_escape'), + linesep=info[-7].encode().decode("unicode_escape"), status_test_command=info[-8], - compile_option=compile_option_server) + compile_option=compile_option_server, + ) netperf_servers.append((server, vm)) continue else: @@ -210,21 +215,20 @@ def __get_vminfo(): password=info[-4], username=info[-5], prompt=info[-6], - linesep=info[-7].encode().decode('unicode_escape'), + linesep=info[-7].encode().decode("unicode_escape"), status_test_command=info[-8], - compile_option=compile_option_client) + compile_option=compile_option_client, + ) netperf_clients.append((client, vm)) continue return netperf_clients, netperf_servers utils_path.find_command("ovs-vsctl") - if (params.get("netdst") not in - process.system_output("ovs-vsctl show").decode()): + if params.get("netdst") not in process.system_output("ovs-vsctl show").decode(): test.error("This is a openvswitch only test") extra_options = params.get("netperf_client_options", " -l 60") rate_brust_pairs = params.get("rate_brust_pairs").split() - rate_brust_pairs = list( - map(lambda x: map(int, x.split(',')), rate_brust_pairs)) + rate_brust_pairs = list(map(lambda x: map(int, x.split(",")), rate_brust_pairs)) results = [] try: netperf_clients, netperf_servers = setup_netperf_env() @@ -235,11 +239,9 @@ def __get_vminfo(): for rate, burst in rate_brust_pairs: set_port_qos(client_vm, rate, burst) time.sleep(3) - throughout = get_throughout(netperf_server, - server_vm, - netperf_client, - client_vm, - extra_options) + throughout = get_throughout( + netperf_server, server_vm, netperf_client, client_vm, extra_options + ) iface = client_vm.get_ifname() clear_qos_setting(iface) results.append([iface, throughout, rate, burst]) @@ -251,7 +253,7 @@ def __get_vminfo(): for ntpf, _ in itertools.chain(netperf_clients, netperf_servers): ntpf.cleanup() except Exception as e: - test.log.warn("Cleanup failed:\n%s\n", e) + test.log.warning("Cleanup failed:\n%s\n", e) for f in glob.glob("/var/log/openvswith/*.log"): dst = os.path.join(test.resultsdir, os.path.basename(f)) shutil.copy(f, dst) diff --git a/qemu/tests/ovs_quit.py b/qemu/tests/ovs_quit.py index 56a2469fbd..f9fa12aa87 100644 --- a/qemu/tests/ovs_quit.py +++ b/qemu/tests/ovs_quit.py @@ -1,9 +1,11 @@ -from virttest import data_dir -from virttest import env_process -from virttest import error_context -from virttest import virt_vm -from virttest import utils_misc -from virttest import utils_net +from virttest import ( + data_dir, + env_process, + error_context, + utils_misc, + utils_net, + virt_vm, +) @error_context.context_aware @@ -22,7 +24,7 @@ def run(test, params, env): netdst = params.get("netdst") if not utils_net.ovs_br_exists(netdst): - test.cancel("%s isn't an openvswith bridge" % netdst) + test.cancel(f"{netdst} isn't an openvswith bridge") host_bridge = utils_net.find_bridge_manager(netdst) deps_dir = data_dir.get_deps_dir("ovs") @@ -45,6 +47,6 @@ def run(test, params, env): if ports: for p in ports: host_bridge.del_port(netdst, p) - test.fail("%s not delete after qemu quit." % ports) + test.fail(f"{ports} not delete after qemu quit.") else: test.fail("Qemu should quit with error") diff --git a/qemu/tests/passthrough_fc_with_lun_device.py b/qemu/tests/passthrough_fc_with_lun_device.py index 83afd96ffc..b4fa279b84 100644 --- a/qemu/tests/passthrough_fc_with_lun_device.py +++ b/qemu/tests/passthrough_fc_with_lun_device.py @@ -1,16 +1,15 @@ """Pass-through fc device as lun device io test""" -import time -import random -import json + import copy +import json +import random import string +import time + from avocado.utils import process -from virttest import env_process -from virttest import utils_misc, utils_test -from virttest import error_context -from virttest import utils_disk -from virttest.utils_misc import get_linux_drive_path +from virttest import env_process, error_context, utils_disk, utils_misc, utils_test from virttest.utils_disk import configure_empty_disk +from virttest.utils_misc import get_linux_drive_path @error_context.context_aware @@ -27,8 +26,9 @@ def run(test, params, env): """ def _clean_disk_windows(index): - tmp_file = "disk_" + ''.join( - random.sample(string.ascii_letters + string.digits, 4)) + tmp_file = "disk_" + "".join( + random.sample(string.ascii_letters + string.digits, 4) + ) online_cmd = "echo select disk %s > " + tmp_file online_cmd += " && echo clean >> " + tmp_file online_cmd += " && echo rescan >> " + tmp_file @@ -38,11 +38,11 @@ def _clean_disk_windows(index): return session.cmd(online_cmd % index, timeout=timeout) def _get_window_disk_index_by_wwn(uid): - cmd = "powershell -command \"get-disk| Where-Object" - cmd += " {$_.UniqueId -eq '%s'}|select number|FL\"" % uid + cmd = 'powershell -command "get-disk| Where-Object' + cmd += f" {{$_.UniqueId -eq '{uid}'}}|select number|FL\"" status, output = session.cmd_status_output(cmd) if status != 0: - test.fail("execute command fail: %s" % output) + test.fail(f"execute command fail: {output}") output = "".join([s for s in output.splitlines(True) if s.strip()]) test.log.debug(output) info = output.split(":") @@ -56,12 +56,12 @@ def _get_fc_devices(): status, output = process.getstatusoutput(cmd) devs_str = output.strip().replace("\n", " ") if devs_str: - cmd = "lsblk -Jpo 'NAME,HCTL,SERIAL,TRAN,FSTYPE,WWN' %s" % devs_str + cmd = f"lsblk -Jpo 'NAME,HCTL,SERIAL,TRAN,FSTYPE,WWN' {devs_str}" status, output = process.getstatusoutput(cmd) devs = copy.deepcopy(json.loads(output)["blockdevices"]) for dev in devs: - cmd = "lsscsi -gb %s|awk '{print $3}'" % dev["hctl"] + cmd = "lsscsi -gb {}|awk '{{print $3}}'".format(dev["hctl"]) status, output = process.getstatusoutput(cmd) dev["sg_dev"] = output test.log.debug(devs) @@ -73,7 +73,7 @@ def _get_fc_devices(): fc_dev = fc_devs[0] test.log.debug(fc_dev) - vm = env.get_vm(params['main_vm']) + vm = env.get_vm(params["main_vm"]) timeout = float(params.get("timeout", 240)) drive_type = params.get("drive_type") os_type = params["os_type"] @@ -89,25 +89,24 @@ def _get_fc_devices(): params["image_name_stg0"] = fc_dev["sg_dev"] clean_cmd = clean_cmd % params["image_name_stg0"] - error_context.context("run clean cmd %s" % clean_cmd, test.log.info) + error_context.context(f"run clean cmd {clean_cmd}", test.log.info) process.getstatusoutput(clean_cmd) - params['start_vm'] = 'yes' - env_process.process(test, params, env, env_process.preprocess_image, - env_process.preprocess_vm) + params["start_vm"] = "yes" + env_process.process( + test, params, env, env_process.preprocess_image, env_process.preprocess_vm + ) session = vm.wait_for_login(timeout=timeout) disk_wwn = fc_dev["wwn"] disk_wwn = disk_wwn.replace("0x", "") - if os_type == 'windows' and driver_name: - session = utils_test.qemu.windrv_check_running_verifier(session, - vm, - test, - driver_name, - timeout) - - if os_type == 'windows': + if os_type == "windows" and driver_name: + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name, timeout + ) + + if os_type == "windows": part_size = params["part_size"] guest_cmd = utils_misc.set_winutils_letter(session, guest_cmd) did = _get_window_disk_index_by_wwn(disk_wwn) @@ -132,7 +131,7 @@ def _get_fc_devices(): test.log.debug("Get output file path %s", output_path) guest_cmd = guest_cmd.format(output_path) - error_context.context('Start io test...', test.log.info) + error_context.context("Start io test...", test.log.info) session.cmd(guest_cmd, timeout=360) if not vm.monitor.verify_status("running"): test.fail("Guest not run after dd") diff --git a/qemu/tests/passthrough_with_multipath_device.py b/qemu/tests/passthrough_with_multipath_device.py index 7328e3deb8..ae6bc3dcd8 100644 --- a/qemu/tests/passthrough_with_multipath_device.py +++ b/qemu/tests/passthrough_with_multipath_device.py @@ -1,12 +1,9 @@ -import time import random import re +import time from avocado.utils import process - -from virttest import error_context -from virttest import env_process -from virttest import utils_misc +from virttest import env_process, error_context, utils_misc @error_context.context_aware @@ -30,7 +27,7 @@ def run(test, params, env): :param env: Dictionary with test environment. """ - vm = env.get_vm(params['main_vm']) + vm = env.get_vm(params["main_vm"]) timeout = float(params.get("login_timeout", 240)) get_id_cmd = params.get("get_id_cmd") get_mpath_cmd = params.get("get_mpath_cmd") @@ -38,28 +35,28 @@ def run(test, params, env): get_tdev_cmd = params.get("get_tdev_cmd") set_path_cmd = params.get("set_path_cmd") cmd_dd = params.get("cmd_dd") - post_cmd = params.get("post_cmd") + params.get("post_cmd") repeat_time = params.get_numeric("repeat_time") id = process.getoutput(get_id_cmd).strip() get_mpath_cmd = get_mpath_cmd % (id, id) mpath = process.getoutput(get_mpath_cmd).strip() - params["image_name_stg0"] = "/dev/mapper/%s" % mpath - params['start_vm'] = 'yes' + params["image_name_stg0"] = f"/dev/mapper/{mpath}" + params["start_vm"] = "yes" time.sleep(5) - env_process.process(test, params, env, env_process.preprocess_image, - env_process.preprocess_vm) + env_process.process( + test, params, env, env_process.preprocess_image, env_process.preprocess_vm + ) session = vm.wait_for_login(timeout=timeout) out = session.cmd_output(get_tdev_cmd) cmd_dd = cmd_dd % out - error_context.context('Do dd writing test on the data disk.', - test.log.info) + error_context.context("Do dd writing test on the data disk.", test.log.info) session.sendline(cmd_dd) if not vm.monitor.verify_status("running"): test.fail("Guest did not run after dd") get_mdev_cmd = get_mdev_cmd % id o = process.getoutput(get_mdev_cmd) mdev = re.findall(r"sd.", o, re.M) - error_context.context('Alternately close a path every 10 seconds on host') + error_context.context("Alternately close a path every 10 seconds on host") for dev in mdev: process.getoutput(set_path_cmd % ("running", dev)) for i in range(repeat_time): @@ -75,20 +72,17 @@ def run(test, params, env): time.sleep(1) for dev in mdev: process.getoutput(set_path_cmd % ("running", dev)) - if not utils_misc.wait_for(lambda: vm.monitor.verify_status("running"), - timeout=20): + if not utils_misc.wait_for(lambda: vm.monitor.verify_status("running"), timeout=20): test.fail("Guest did not run after change path") for dev in mdev: process.getoutput(set_path_cmd % ("offline", dev)) - if not utils_misc.wait_for(lambda: vm.monitor.verify_status("paused"), - timeout=20): + if not utils_misc.wait_for(lambda: vm.monitor.verify_status("paused"), timeout=20): test.fail("Guest did not pause after offline") dev = random.choice(mdev) process.getoutput(set_path_cmd % ("running", dev)) if vm.monitor.verify_status("paused"): vm.monitor.send_args_cmd("c") - if not utils_misc.wait_for(lambda: vm.monitor.verify_status("running"), - timeout=20): + if not utils_misc.wait_for(lambda: vm.monitor.verify_status("running"), timeout=20): test.fail("Guest did not run after online") session.close() vm.destroy(gracefully=True) diff --git a/qemu/tests/pci_bridge.py b/qemu/tests/pci_bridge.py index 5240ee7a29..d4e4efc8a5 100644 --- a/qemu/tests/pci_bridge.py +++ b/qemu/tests/pci_bridge.py @@ -1,13 +1,9 @@ import logging -from virttest import error_context -from virttest import env_process -from virttest import utils_test -from virttest import utils_misc -from virttest import utils_disk +from virttest import env_process, error_context, utils_disk, utils_misc, utils_test from virttest.qemu_capabilities import Flags -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def prepare_pci_bridge(test, params, pci_bridge_num): @@ -42,34 +38,34 @@ def prepare_images(test, params, image_num, pci_bridge_num, opr, device_num=0): fmt_list = params.objects("disk_driver") for i in range(image_num): - image = "stg%s" % i - params["images"] = ' '.join([params["images"], image]) - params["image_name_%s" % image] = "images/%s" % image - params["image_size_%s" % image] = params["data_image_size"] - params["force_create_image_%s" % image] = "yes" - params["remove_image_%s" % image] = "yes" - params["blk_extra_params_%s" % image] = "serial=TARGET_DISK%s" % i + image = f"stg{i}" + params["images"] = " ".join([params["images"], image]) + params[f"image_name_{image}"] = f"images/{image}" + params[f"image_size_{image}"] = params["data_image_size"] + params[f"force_create_image_{image}"] = "yes" + params[f"remove_image_{image}"] = "yes" + params[f"blk_extra_params_{image}"] = f"serial=TARGET_DISK{i}" if opr != "hotplug_unplug": if i >= len(fmt_list): - params["drive_format_%s" % image] = "virtio" + params[f"drive_format_{image}"] = "virtio" else: - params["drive_format_%s" % image] = fmt_list[i] - d_format = params["drive_format_%s" % image] + params[f"drive_format_{image}"] = fmt_list[i] + d_format = params[f"drive_format_{image}"] if d_format == "scsi-hd" and params["drive_format"] == "scsi-hd": - params["drive_bus_%s" % image] = 1 + params[f"drive_bus_{image}"] = 1 if d_format == "usb3": params["usbs"] += " xhci" params["usb_type_xhci"] = "nec-usb-xhci" content = "usbc_pci_bus_xhci" else: - content = "disk_pci_bus_%s" % image + content = f"disk_pci_bus_{image}" if pci_bridge_num == 1: params[content] = pci_bridges[0] else: index = i % pci_bridge_num params[content] = pci_bridges[index] else: - params["boot_drive_%s" % image] = "no" + params[f"boot_drive_{image}"] = "no" device_num = device_num + 1 return device_num @@ -91,19 +87,19 @@ def prepare_nics(test, params, pci_bridge_num, opr, device_num=0): if opr != "hotplug_unplug": if pci_bridge_num == 1: if device_num >= 31: - test.fail("There are already %d devices on %s" - % (device_num, pci_bridges[0])) - params["nic_pci_bus_%s" % nic] = pci_bridges[0] + test.fail( + "There are already %d devices on %s" % (device_num, pci_bridges[0]) + ) + params[f"nic_pci_bus_{nic}"] = pci_bridges[0] else: index = device_num % pci_bridge_num - params["nic_pci_bus_%s" % nic] = pci_bridges[index] + params[f"nic_pci_bus_{nic}"] = pci_bridges[index] device_num += 1 return device_num -def disk_hotplug(test, params, vm, session, image_name, - drive_format, parent_bus): +def disk_hotplug(test, params, vm, session, image_name, drive_format, parent_bus): """ Hotplug new disk. @@ -115,44 +111,51 @@ def disk_hotplug(test, params, vm, session, image_name, :param drive_format: Drive subsystem type (virtio, scsi, usb3) :param parent_bus: Bus(es), in which this device is plugged in """ + def check_usb_in_guest(): """ Check USB in guest """ output = session.cmd(params["chk_usb_cmd"]) - return (usb_serial in output) # pylint: disable=E0606 + return usb_serial in output # pylint: disable=E0606 - if drive_format not in ('virtio', 'scsi-hd', 'usb3'): - test.cancel("Unsupported drive format: %s" % drive_format) + if drive_format not in ("virtio", "scsi-hd", "usb3"): + test.cancel(f"Unsupported drive format: {drive_format}") image_params = params.object_params(image_name) image_params["drive_format"] = drive_format devices = [] - if drive_format == 'usb3': - usbc_params = {'usb_type': 'nec-usb-xhci'} - devices = vm.devices.usbc_by_params(drive_format, - usbc_params, - pci_bus={'aobject': parent_bus}) - - devices += vm.devices.images_define_by_params(image_name, image_params, - 'disk', None, False, None, - pci_bus={'aobject': parent_bus}) + if drive_format == "usb3": + usbc_params = {"usb_type": "nec-usb-xhci"} + devices = vm.devices.usbc_by_params( + drive_format, usbc_params, pci_bus={"aobject": parent_bus} + ) + + devices += vm.devices.images_define_by_params( + image_name, + image_params, + "disk", + None, + False, + None, + pci_bus={"aobject": parent_bus}, + ) for dev in devices: ret = vm.devices.simple_hotplug(dev, vm.monitor) if ret[1] is False: - test.fail("Failed to hotplug device '%s'." - "Output:\n%s" % (dev, ret[0])) + test.fail(f"Failed to hotplug device '{dev}'." f"Output:\n{ret[0]}") - if drive_format == 'usb3': - usb_serial = params["blk_extra_params_%s" % image_name].split("=")[1] - res = utils_misc.wait_for(check_usb_in_guest, timeout=360, - text="Wait for getting usb device info") + if drive_format == "usb3": + usb_serial = params[f"blk_extra_params_{image_name}"].split("=")[1] + res = utils_misc.wait_for( + check_usb_in_guest, timeout=360, text="Wait for getting usb device info" + ) if res is None: - test.fail("Could not find the usb device serial:[%s]" % usb_serial) + test.fail(f"Could not find the usb device serial:[{usb_serial}]") - if drive_format == 'virtio': + if drive_format == "virtio": return [devices[-1]] else: if Flags.BLOCKDEV in vm.devices.caps: @@ -176,10 +179,10 @@ def check_data_disks(test, params, env, vm, session): error_context.context("Check data disks in monitor!", LOG_JOB.info) monitor_info_block = vm.monitor.info_block(False) - blocks = ','.join(monitor_info_block.keys()) + blocks = ",".join(monitor_info_block.keys()) for image in image_list: if image not in blocks: - test.fail("drive_%s is missed: %s!" % (image, blocks)) + test.fail(f"drive_{image} is missed: {blocks}!") error_context.context("Read and write on data disks!", LOG_JOB.info) os_type = params["os_type"] @@ -202,24 +205,24 @@ def check_data_disks(test, params, env, vm, session): if disk_num < image_num: err_msg = "set disk num: %d" % image_num err_msg += ", get in guest: %d" % disk_num - test.fail("Fail to list all the volumes, %s" % err_msg) + test.fail(f"Fail to list all the volumes, {err_msg}") if not utils_disk.update_windows_disk_attributes(session, disks): test.fail("Failed to update windows disk attributes.") for disk in disks: - drive_letter = utils_disk.configure_empty_disk(session, disk, - data_image_size, - os_type) + drive_letter = utils_disk.configure_empty_disk( + session, disk, data_image_size, os_type + ) if not drive_letter: test.fail("Fail to format disks.") iozone_cmd_disk = iozone_cmd % drive_letter[0] - status, output = session.cmd_status_output(iozone_cmd_disk, - timeout=3600) + status, output = session.cmd_status_output(iozone_cmd_disk, timeout=3600) if status: - test.fail("Check block device '%s' failed! Output: %s" - % (drive_letter[0], output)) + test.fail( + f"Check block device '{drive_letter[0]}' failed! Output: {output}" + ) utils_disk.clean_partition(session, disk, os_type) else: - test.cancel("Unsupported OS type '%s'" % os_type) + test.cancel(f"Unsupported OS type '{os_type}'") @error_context.context_aware @@ -282,11 +285,13 @@ def run(test, params, env): index = i % pci_bridge_num pci_bridge_id = pci_bridges[index] if fmt == "scsi-hd" and params["drive_format"] == "scsi-hd": - params["drive_bus_%s" % image] = 1 - error_context.context("Hotplug a %s disk on %s!" - % (fmt, pci_bridge_id), test.log.info) - device_list += disk_hotplug(test, params, vm, session, - image, fmt, pci_bridge_id) + params[f"drive_bus_{image}"] = 1 + error_context.context( + f"Hotplug a {fmt} disk on {pci_bridge_id}!", test.log.info + ) + device_list += disk_hotplug( + test, params, vm, session, image, fmt, pci_bridge_id + ) check_data_disks(test, params, env, vm, session) @@ -296,7 +301,7 @@ def run(test, params, env): if status: test.fail("Ping guest failed!") elif utils_test.get_loss_ratio(output) == 100: - test.fail("All packets lost during ping guest %s." % guest_ip) + test.fail(f"All packets lost during ping guest {guest_ip}.") if opr == "hotplug_unplug": error_context.context("Unplug those hotplugged devices!", test.log.info) @@ -304,8 +309,7 @@ def run(test, params, env): for dev in device_list: ret = vm.devices.simple_unplug(dev, vm.monitor) if ret[1] is False: - test.fail("Failed to unplug device '%s'." - "Output:\n%s" % (dev, ret[0])) + test.fail(f"Failed to unplug device '{dev}'." f"Output:\n{ret[0]}") elif opr == "with_migration": error_context.context("Migrating...", test.log.info) vm.migrate(float(params.get("mig_timeout", "3600"))) diff --git a/qemu/tests/pci_devices.py b/qemu/tests/pci_devices.py index 7654b8370b..f257e1ecf8 100755 --- a/qemu/tests/pci_devices.py +++ b/qemu/tests/pci_devices.py @@ -4,20 +4,18 @@ :author: Lukas Doktor :copyright: 2013 Red Hat, Inc. """ + import logging import random import re -import six -from virttest import env_process -from virttest import error_context -from virttest import qemu_qtree +import six +from virttest import env_process, error_context, qemu_qtree -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class PCIBusInfo: - """ Structured info about PCI bus """ @@ -26,13 +24,13 @@ def __init__(self, device): self.name = device.aobject if device.child_bus: bus = device.child_bus[0] - self.type = bus.type == 'PCI' + self.type = bus.type == "PCI" self.first = bus.first_port[0] self.last = bus.addr_lengths[0] else: - self.type = True # PCI - self.first = 0 # (first usable) - self.last = 32 # (last + 1) + self.type = True # PCI + self.first = 0 # (first usable) + self.last = 32 # (last + 1) def process_qdev(qdev): @@ -41,20 +39,22 @@ def process_qdev(qdev): """ qdev_devices = {} qdev_devices_noid = [] - for bus in qdev.get_buses({'type': ('PCI', 'PCIE')}): + for bus in qdev.get_buses({"type": ("PCI", "PCIE")}): for device in bus: if isinstance(device, six.string_types): LOG_JOB.error("Not a device %s (bus %s)", device, bus) continue - dev_id = device.get_param('id') - addr = [int(_, 16) for _ in device.get_param('addr').split('.')] + dev_id = device.get_param("id") + addr = [int(_, 16) for _ in device.get_param("addr").split(".")] if len(addr) == 1: addr.append(0) - addr = "%02x.%x" % (addr[0], addr[1]) - dev = {'id': dev_id, - 'type': device.get_param('driver'), - 'bus': device.get_param('bus'), - 'addr': addr} + addr = f"{addr[0]:02x}.{addr[1]:x}" + dev = { + "id": dev_id, + "type": device.get_param("driver"), + "bus": device.get_param("bus"), + "addr": addr, + } if dev_id is None: qdev_devices_noid.append(dev) else: @@ -70,23 +70,28 @@ def process_qtree(qtree): qtree_devices_noid = [] qtree_pciinfo = [] for node in qtree.get_nodes(): - if node.parent and node.parent.qtree.get('type') in ('PCI', 'PCIE'): - dev_id = node.qtree.get('id') - dev = {'id': dev_id, - 'type': node.qtree.get('type'), - 'bus': node.parent.qtree.get('id'), - 'addr': node.qtree.get('addr')} + if node.parent and node.parent.qtree.get("type") in ("PCI", "PCIE"): + dev_id = node.qtree.get("id") + dev = { + "id": dev_id, + "type": node.qtree.get("type"), + "bus": node.parent.qtree.get("id"), + "addr": node.qtree.get("addr"), + } if dev_id is None: # HOOK for VGA - if 'vga' in dev['type'].lower(): - dev['type'] = None + if "vga" in dev["type"].lower(): + dev["type"] = None qtree_devices_noid.append(dev) else: qtree_devices[dev_id] = dev - qtree_pciinfo.append({'class_addr': node.qtree.get('class_addr'), - 'class_pciid': node.qtree.get('class_pciid') - }) + qtree_pciinfo.append( + { + "class_addr": node.qtree.get("class_addr"), + "class_pciid": node.qtree.get("class_pciid"), + } + ) return (qtree_devices, qtree_devices_noid, qtree_pciinfo) @@ -94,11 +99,14 @@ def process_lspci(lspci): """ Get info about PCI devices from lspci """ - lspci = re.findall(r'(\w\w:\w\w.\w) "[^"]+ \[\w{4}\]" "[^"]+ ' - r'\[(\w{4})\]" "[^"]+ \[(\w{4})\].*', lspci) - return [{'class_addr': info[0], - 'class_pciid': "%s:%s" % (info[1], info[2])} - for info in lspci] + lspci = re.findall( + r'(\w\w:\w\w.\w) "[^"]+ \[\w{4}\]" "[^"]+ ' + r'\[(\w{4})\]" "[^"]+ \[(\w{4})\].*', + lspci, + ) + return [ + {"class_addr": info[0], "class_pciid": f"{info[1]}:{info[2]}"} for info in lspci + ] def verify_qdev_vs_qtree(qdev_info, qtree_info): @@ -111,19 +119,18 @@ def verify_qdev_vs_qtree(qdev_info, qtree_info): errors = "" for dev_id, device in qtree_devices.items(): if dev_id not in qdev_devices: - errors += "Device %s is in qtree but not in qdev.\n" % dev_id + errors += f"Device {dev_id} is in qtree but not in qdev.\n" continue for key, value in device.items(): err = "" if qdev_devices[dev_id][key] != value: - err += " %s != %s\n" % (qdev_devices[dev_id][key], value) + err += f" {qdev_devices[dev_id][key]} != {value}\n" if err: - errors += ("Device %s properties mismatch:\n%s" - % (dev_id, err)) + errors += f"Device {dev_id} properties mismatch:\n{err}" for dev_id in qdev_devices: if dev_id not in qtree_devices: - errors += "Device %s is in qdev but not in qtree\n" % dev_id + errors += f"Device {dev_id} is in qdev but not in qtree\n" for device in qtree_devices_noid: for qdev_device in qdev_devices_noid: @@ -131,9 +138,9 @@ def verify_qdev_vs_qtree(qdev_info, qtree_info): qdev_devices_noid.remove(device) break else: - errors += "No match in qdev for device without id %s\n" % device + errors += f"No match in qdev for device without id {device}\n" for device in qdev_devices_noid: - errors += "No match in qtree for device without id %s\n" % device + errors += f"No match in qtree for device without id {device}\n" return errors @@ -145,11 +152,11 @@ def verify_lspci(info_lspci, info_qtree): errors = "" for lspci_dev in info_lspci: if lspci_dev not in info_qtree: - errors += "Device %s is in lspci but not in qtree\n" % lspci_dev + errors += f"Device {lspci_dev} is in lspci but not in qtree\n" for qtree_dev in info_qtree: if qtree_dev not in info_lspci: - errors += "Device %s is in qtree but not in lspci\n" % qtree_dev + errors += f"Device {qtree_dev} is in qtree but not in lspci\n" return errors @@ -158,18 +165,18 @@ def add_bus(qdev, params, bus_type, name, parent_bus): """ Define new bus in params """ - if bus_type == 'bridge': - if parent_bus.type is True: # PCI - bus_type = 'pci-bridge' - else: # PCIE - bus_type = 'i82801b11-bridge' - elif bus_type == 'switch': - bus_type = 'x3130' - elif bus_type == 'root': - bus_type = 'ioh3420' - params['pci_controllers'] += " %s" % name - params['type_%s' % name] = bus_type - params['pci_bus_%s' % name] = parent_bus.name + if bus_type == "bridge": + if parent_bus.type is True: # PCI + bus_type = "pci-bridge" + else: # PCIE + bus_type = "i82801b11-bridge" + elif bus_type == "switch": + bus_type = "x3130" + elif bus_type == "root": + bus_type = "ioh3420" + params["pci_controllers"] += f" {name}" + params[f"type_{name}"] = bus_type + params[f"pci_bus_{name}"] = parent_bus.name pci_params = params.object_params(name) bus = PCIBusInfo(qdev.pcic_by_params(name, pci_params)) return params, bus @@ -197,9 +204,9 @@ def add_devices_random(params, name_idxs, bus, add_device): Define three devices in first, last and random ports of the given bus """ params, name_idxs = add_device(params, name_idxs, bus.name, bus.first) - params, name_idxs = add_device(params, name_idxs, bus.name, - random.randrange(bus.first + 1, - bus.last - 1)) + params, name_idxs = add_device( + params, name_idxs, bus.name, random.randrange(bus.first + 1, bus.last - 1) + ) params, name_idxs = add_device(params, name_idxs, bus.name, bus.last - 1) return params, name_idxs @@ -211,15 +218,14 @@ def add_device_usb(params, name_idxs, parent_bus, addr, device): idx = name_idxs.get(device[0], 0) + 1 name_idxs[device[0]] = idx name = "test_%s%d" % (device[0], idx) - params['usbs'] += ' %s' % name - params['pci_bus_%s' % name] = parent_bus - params['pci_addr_%s' % name] = addr - params['usb_type_%s' % name] = device[1] - if not params.get('reserved_slots_%s' % parent_bus): - params['reserved_slots_%s' % parent_bus] = "" - params['reserved_slots_%s' % parent_bus] += " 0x%x-0x0" % addr - LOG_JOB.debug("Add test device %s %s %s addr:%s", name, device[1], - parent_bus, addr) + params["usbs"] += f" {name}" + params[f"pci_bus_{name}"] = parent_bus + params[f"pci_addr_{name}"] = addr + params[f"usb_type_{name}"] = device[1] + if not params.get(f"reserved_slots_{parent_bus}"): + params[f"reserved_slots_{parent_bus}"] = "" + params[f"reserved_slots_{parent_bus}"] += f" 0x{addr:x}-0x0" + LOG_JOB.debug("Add test device %s %s %s addr:%s", name, device[1], parent_bus, addr) return params, name_idxs @@ -227,24 +233,23 @@ def add_device_usb_uhci(params, name_idxs, parent_bus, addr): """ Creates ehci usb controller """ - return add_device_usb(params, name_idxs, parent_bus, - addr, ('uhci', 'ich9-usb-uhci1')) + return add_device_usb( + params, name_idxs, parent_bus, addr, ("uhci", "ich9-usb-uhci1") + ) def add_device_usb_ehci(params, name_idxs, parent_bus, addr): """ Creates ehci usb controller """ - return add_device_usb(params, name_idxs, parent_bus, - addr, ('ehci', 'usb-ehci')) + return add_device_usb(params, name_idxs, parent_bus, addr, ("ehci", "usb-ehci")) def add_device_usb_xhci(params, name_idxs, parent_bus, addr): """ Creates xhci usb controller """ - return add_device_usb(params, name_idxs, parent_bus, - addr, ('xhci', 'nec-usb-xhci')) + return add_device_usb(params, name_idxs, parent_bus, addr, ("xhci", "nec-usb-xhci")) def add_virtio_disk(params, name_idxs, parent_bus, addr): @@ -254,19 +259,18 @@ def add_virtio_disk(params, name_idxs, parent_bus, addr): idx = name_idxs.get("virtio_disk", 0) + 1 name_idxs["virtio_disk"] = idx name = "test_virtio_disk%d" % idx - params['images'] += ' %s' % name - params['image_name_%s' % name] = 'images/%s' % name - params['pci_bus_%s' % name] = parent_bus - params['drive_bus_%s' % name] = addr - params['drive_format_%s' % name] = 'virtio' - params['force_create_image_%s' % name] = 'yes' - params['remove_image_%s' % name] = 'yes' - params['image_size_%s' % name] = '1M' - if not params.get('reserved_slots_%s' % parent_bus): - params['reserved_slots_%s' % parent_bus] = "" - params['reserved_slots_%s' % parent_bus] += " 0x%x-0x0" % addr - LOG_JOB.debug("Add test device %s virtio_disk %s addr:%s", name, - parent_bus, addr) + params["images"] += f" {name}" + params[f"image_name_{name}"] = f"images/{name}" + params[f"pci_bus_{name}"] = parent_bus + params[f"drive_bus_{name}"] = addr + params[f"drive_format_{name}"] = "virtio" + params[f"force_create_image_{name}"] = "yes" + params[f"remove_image_{name}"] = "yes" + params[f"image_size_{name}"] = "1M" + if not params.get(f"reserved_slots_{parent_bus}"): + params[f"reserved_slots_{parent_bus}"] = "" + params[f"reserved_slots_{parent_bus}"] += f" 0x{addr:x}-0x0" + LOG_JOB.debug("Add test device %s virtio_disk %s addr:%s", name, parent_bus, addr) return params, name_idxs @@ -274,8 +278,12 @@ def add_device_random(params, name_idxs, parent_bus, addr): """ Add device of random type """ - variants = (add_device_usb_uhci, add_device_usb_ehci, add_device_usb_xhci, - add_virtio_disk) + variants = ( + add_device_usb_uhci, + add_device_usb_ehci, + add_device_usb_xhci, + add_virtio_disk, + ) return random.choice(variants)(params, name_idxs, parent_bus, addr) @@ -296,31 +304,31 @@ def run(test, params, env): error_context.context("Creating early names representation") env_process.preprocess_vm(test, params, env, params["main_vm"]) vm = env.get_vm(params["main_vm"]) - qdev = vm.make_create_command() # parse params into qdev + qdev = vm.make_create_command() # parse params into qdev if isinstance(qdev, tuple): qdev = qdev[0] error_context.context("Getting main PCI bus info") error_context.context("Processing test params") - test_params = params['test_setup'] - test_devices = params['test_devices'] - test_device_type = params['test_device_type'] - if not params.get('pci_controllers'): - params['pci_controllers'] = '' - _lasts = [PCIBusInfo(qdev.get_by_properties({'aobject': 'pci.0'})[0])] - _lasts[0].first = 7 # first 6 slots might be already occupied on pci.0 - _lasts[0].last -= 1 # last port is usually used by the VM + test_params = params["test_setup"] + test_devices = params["test_devices"] + test_device_type = params["test_device_type"] + if not params.get("pci_controllers"): + params["pci_controllers"] = "" + _lasts = [PCIBusInfo(qdev.get_by_properties({"aobject": "pci.0"})[0])] + _lasts[0].first = 7 # first 6 slots might be already occupied on pci.0 + _lasts[0].last -= 1 # last port is usually used by the VM use_buses = [] names = {} test.log.info("Test setup") - for line in test_params.split('\\n'): + for line in test_params.split("\\n"): _idx = 0 out = "" - for device in line.split('->'): + for device in line.split("->"): device = device.strip() if device: - if device == 'devices': + if device == "devices": use_buses.append(_lasts[_idx]) out += "->(test_devices)" break @@ -330,7 +338,7 @@ def run(test, params, env): params, bus = add_bus(qdev, params, device, name, _lasts[_idx]) # we inserted a device, increase the upper bus first idx _lasts[_idx].first += 1 - out += "->%s" % (name) + out += f"->{name}" _idx += 1 if len(_lasts) > _idx: _lasts = _lasts[:_idx] @@ -340,20 +348,22 @@ def run(test, params, env): out += " " * (len(_lasts[_idx].name) + 2) test.log.info(out) - add_devices = {'first': add_devices_first, - 'all': add_devices_all}.get(test_devices, - add_devices_random) - add_device = {'uhci': add_device_usb_uhci, - 'ehci': add_device_usb_ehci, - 'xhci': add_device_usb_xhci, - 'virtio_disk': add_virtio_disk, - }.get(test_device_type, add_device_random) + add_devices = {"first": add_devices_first, "all": add_devices_all}.get( + test_devices, add_devices_random + ) + add_device = { + "uhci": add_device_usb_uhci, + "ehci": add_device_usb_ehci, + "xhci": add_device_usb_xhci, + "virtio_disk": add_virtio_disk, + }.get(test_device_type, add_device_random) name_idxs = {} for bus in use_buses: params, name_idxs = add_devices(params, name_idxs, bus, add_device) - params['start_vm'] = 'yes' - env_process.process(test, params, env, env_process.preprocess_image, - env_process.preprocess_vm) + params["start_vm"] = "yes" + env_process.process( + test, params, env, env_process.preprocess_image, env_process.preprocess_vm + ) vm = env.get_vm(params["main_vm"]) # PCI devices are initialized by firmware, which might require some time @@ -363,7 +373,7 @@ def run(test, params, env): error_context.context("Verify qtree vs. qemu devices", test.log.info) qtree = qemu_qtree.QtreeContainer() - _info_qtree = vm.monitor.info('qtree', False) + _info_qtree = vm.monitor.info("qtree", False) qtree.parse_info_qtree(_info_qtree) info_qdev = process_qdev(vm.devices) info_qtree = process_qtree(qtree) @@ -377,8 +387,8 @@ def run(test, params, env): errors += "qdev vs. qtree, " error_context.context("Verify lspci vs. qtree", test.log.info) - if params.get('lspci_cmd'): - _info_lspci = session.cmd_output(params['lspci_cmd']) + if params.get("lspci_cmd"): + _info_lspci = session.cmd_output(params["lspci_cmd"]) info_lspci = process_lspci(_info_lspci) err = verify_lspci(info_lspci, info_qtree[2]) if err: @@ -389,5 +399,7 @@ def run(test, params, env): error_context.context("Results") if errors: - test.fail("Errors occurred while comparing %s. Please check" - " the log for details." % errors[:-2]) + test.fail( + f"Errors occurred while comparing {errors[:-2]}. Please check" + " the log for details." + ) diff --git a/qemu/tests/pci_hotplug.py b/qemu/tests/pci_hotplug.py index 32c42ab0f0..46af301546 100644 --- a/qemu/tests/pci_hotplug.py +++ b/qemu/tests/pci_hotplug.py @@ -1,13 +1,15 @@ import re -import aexpect -from virttest import error_context -from virttest import utils_misc -from virttest import storage -from virttest import utils_test -from virttest import data_dir -from virttest import arch -from virttest import qemu_monitor +import aexpect +from virttest import ( + arch, + data_dir, + error_context, + qemu_monitor, + storage, + utils_misc, + utils_test, +) @error_context.context_aware @@ -28,32 +30,34 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + # Select an image file def find_image(pci_num): - image_params = params.object_params("%s" % img_list[pci_num + 1]) + image_params = params.object_params(f"{img_list[pci_num + 1]}") o = storage.get_image_filename(image_params, data_dir.get_data_dir()) return o def pci_add_nic(pci_num): - pci_add_cmd = "pci_add pci_addr=auto nic model=%s" % pci_model + pci_add_cmd = f"pci_add pci_addr=auto nic model={pci_model}" return pci_add(pci_add_cmd) def pci_add_block(pci_num): image_filename = find_image(pci_num) - pci_add_cmd = ("pci_add pci_addr=auto storage file=%s,if=%s" % - (image_filename, pci_model)) + pci_add_cmd = ( + f"pci_add pci_addr=auto storage file={image_filename},if={pci_model}" + ) return pci_add(pci_add_cmd) def pci_add(pci_add_cmd): - error_context.context("Adding pci device with command 'pci_add'", - test.log.info) + error_context.context("Adding pci device with command 'pci_add'", test.log.info) add_output = vm.monitor.send_args_cmd(pci_add_cmd, convert=False) - pci_info.append(['', '', add_output, pci_model]) + pci_info.append(["", "", add_output, pci_model]) if "OK domain" not in add_output: - test.fail("Add PCI device failed. " - "Monitor command is: %s, Output: %r" % - (pci_add_cmd, add_output)) + test.fail( + "Add PCI device failed. " + f"Monitor command is: {pci_add_cmd}, Output: {add_output!r}" + ) return vm.monitor.info("pci") def is_supported_command(cmd1, cmd2): @@ -75,18 +79,22 @@ def is_supported_device(dev): test.error("Unknown version of qemu") # Probe qemu for a list of supported devices - probe_output = vm.monitor.human_monitor_cmd("%s ?" % cmd_type, - debug=False) - devices_supported = [j.strip('"') for j in - re.findall(r'\"[a-z|0-9|\-|\_|\,|\.]*\"', - probe_output, re.MULTILINE)] - test.log.debug("QEMU reported the following supported devices for " - "PCI hotplug: %s", devices_supported) - return (dev in devices_supported) + probe_output = vm.monitor.human_monitor_cmd(f"{cmd_type} ?", debug=False) + devices_supported = [ + j.strip('"') + for j in re.findall( + r"\"[a-z|0-9|\-|\_|\,|\.]*\"", probe_output, re.MULTILINE + ) + ] + test.log.debug( + "QEMU reported the following supported devices for " "PCI hotplug: %s", + devices_supported, + ) + return dev in devices_supported def verify_supported_device(dev): if not is_supported_device(dev): - test.error("%s doesn't support device: %s" % (cmd_type, dev)) + test.error(f"{cmd_type} doesn't support device: {dev}") def device_add_nic(pci_num, queues=1): device_id = pci_type + "-" + utils_misc.generate_random_id() @@ -96,8 +104,7 @@ def device_add_nic(pci_num, queues=1): if pci_model == "virtio": pci_model = "virtio-net-pci" verify_supported_device(pci_model) - pci_add_cmd = "device_add id=%s,driver=%s" % (pci_info[pci_num][1], - pci_model) + pci_add_cmd = f"device_add id={pci_info[pci_num][1]},driver={pci_model}" if queues > 1 and "virtio" in pci_model: pci_add_cmd += ",mq=on" return device_add(pci_num, pci_add_cmd) @@ -106,12 +113,12 @@ def device_add_block(pci_num, queues=1): device_id = pci_type + "-" + utils_misc.generate_random_id() pci_info.append([device_id, device_id]) - image_format = params.get("image_format_%s" % img_list[pci_num + 1]) + image_format = params.get(f"image_format_{img_list[pci_num + 1]}") if not image_format: image_format = params.get("image_format", "qcow2") image_filename = find_image(pci_num) data_image = params.get("images").split()[-1] - serial_id = params["blk_extra_params_%s" % data_image].split("=")[1] + serial_id = params[f"blk_extra_params_{data_image}"].split("=")[1] pci_model = params.get("pci_model") controller_model = None @@ -121,7 +128,7 @@ def device_add_block(pci_num, queues=1): if pci_model == "scsi" or pci_model == "scsi-hd": if pci_model == "scsi": pci_model = "scsi-disk" - if arch.ARCH in ('ppc64', 'ppc64le'): + if arch.ARCH in ("ppc64", "ppc64le"): controller_model = "spapr-vscsi" else: controller_model = "lsi53c895a" @@ -130,28 +137,25 @@ def device_add_block(pci_num, queues=1): verify_supported_device(controller_model) controller_id = "controller-" + device_id if vm.monitor.protocol == "human": - controller_add_cmd = ("device_add %s,id=%s" % - (controller_model, controller_id)) + controller_add_cmd = f"device_add {controller_model},id={controller_id}" else: - controller_add_cmd = ("device_add driver=%s,id=%s" % - (controller_model, controller_id)) + controller_add_cmd = ( + f"device_add driver={controller_model},id={controller_id}" + ) error_context.context("Adding SCSI controller.", test.log.info) vm.monitor.send_args_cmd(controller_add_cmd, convert=False) verify_supported_device(pci_model) - driver_add_cmd = ("%s auto file=%s,if=none,format=%s,id=%s,serial=%s" % - (drive_cmd_type, image_filename, image_format, - pci_info[pci_num][0], serial_id)) + driver_add_cmd = f"{drive_cmd_type} auto file={image_filename},if=none,format={image_format},id={pci_info[pci_num][0]},serial={serial_id}" if drive_cmd_type == "__com.redhat_drive_add": - driver_add_cmd = ("%s file=%s,format=%s,id=%s,serial=%s" % - (drive_cmd_type, image_filename, image_format, - pci_info[pci_num][0], serial_id)) + driver_add_cmd = f"{drive_cmd_type} file={image_filename},format={image_format},id={pci_info[pci_num][0]},serial={serial_id}" # add block device to vm device container image_name = img_list[pci_num + 1] image_params = params.object_params(image_name) image_name = pci_info[pci_num][0] - blk_insert = vm.devices.images_define_by_params(image_name, - image_params, 'disk') + blk_insert = vm.devices.images_define_by_params( + image_name, image_params, "disk" + ) vm.devices.insert(blk_insert) env.register_vm(vm.name, vm) @@ -159,14 +163,14 @@ def device_add_block(pci_num, queues=1): error_context.context("Adding driver.", test.log.info) vm.monitor.send_args_cmd(driver_add_cmd, convert=False) - pci_add_cmd = ("device_add id=%s,driver=%s,drive=%s" % - (pci_info[pci_num][1], pci_model, pci_info[pci_num][0])) + pci_add_cmd = f"device_add id={pci_info[pci_num][1]},driver={pci_model},drive={pci_info[pci_num][0]}" return device_add(pci_num, pci_add_cmd) def device_add(pci_num, pci_add_cmd): - error_context.context("Adding pci device with command 'device_add'", - test.log.info) - if vm.monitor.protocol == 'qmp': + error_context.context( + "Adding pci device with command 'device_add'", test.log.info + ) + if vm.monitor.protocol == "qmp": add_output = vm.monitor.send_args_cmd(pci_add_cmd, convert=False) else: add_output = vm.monitor.send_args_cmd(pci_add_cmd, convert=False) @@ -175,10 +179,13 @@ def device_add(pci_num, pci_add_cmd): after_add = vm.monitor.info("pci") if pci_info[pci_num][1] not in str(after_add): - test.log.error("Could not find matched id in monitor:" - " %s", pci_info[pci_num][1]) - test.fail("Add device failed. Monitor command is: %s" - ". Output: %r" % (pci_add_cmd, add_output)) + test.log.error( + "Could not find matched id in monitor:" " %s", pci_info[pci_num][1] + ) + test.fail( + f"Add device failed. Monitor command is: {pci_add_cmd}" + f". Output: {add_output!r}" + ) return after_add # Hot add a pci device @@ -188,10 +195,9 @@ def add_device(pci_num, queues=1): try: # get function for adding device. - add_fuction = local_functions["%s_%s" % (cmd_type, pci_type)] + add_fuction = local_functions[f"{cmd_type}_{pci_type}"] except Exception: - test.error("No function for adding '%s' dev with '%s'" % - (pci_type, cmd_type)) + test.error(f"No function for adding '{pci_type}' dev with '{cmd_type}'") after_add = None if add_fuction: # Do add pci device. @@ -214,32 +220,40 @@ def _find_pci(): return True return False - error_context.context("Start checking new added device", - test.log.info) + error_context.context("Start checking new added device", test.log.info) # Compare the output of 'info pci' if after_add == info_pci_ref: - test.fail("No new PCI device shown after executing " - "monitor command: 'info pci'") + test.fail( + "No new PCI device shown after executing " + "monitor command: 'info pci'" + ) secs = int(params.get("wait_secs_for_hook_up", 3)) if not utils_misc.wait_for(_new_shown, test_timeout, secs, 3): - test.fail("No new device shown in output of command " - "executed inside the guest: %s" % reference_cmd) + test.fail( + "No new device shown in output of command " + f"executed inside the guest: {reference_cmd}" + ) if not utils_misc.wait_for(_find_pci, test_timeout, 3, 3): - test.fail("PCI %s %s device not found in guest. " - "Command was: %s" % - (pci_model, pci_type, params.get("find_pci_cmd"))) + test.fail( + "PCI {} {} device not found in guest. " "Command was: {}".format( + pci_model, pci_type, params.get("find_pci_cmd") + ) + ) # Test the newly added device try: if params.get("pci_test_cmd"): - test_cmd = re.sub("PCI_NUM", "%s" % (pci_num + 1), - params.get("pci_test_cmd")) + test_cmd = re.sub( + "PCI_NUM", "%s" % (pci_num + 1), params.get("pci_test_cmd") + ) session.cmd(test_cmd, timeout=disk_op_timeout) except aexpect.ShellError as e: - test.fail("Check for %s device failed after PCI " - "hotplug. Output: %r" % (pci_type, e.output)) + test.fail( + f"Check for {pci_type} device failed after PCI " + f"hotplug. Output: {e.output!r}" + ) except Exception: pci_del(pci_num, ignore_failure=True) @@ -255,36 +269,40 @@ def _device_removed(): blk_removed = [] if cmd_type == "pci_add": slot_id = int(pci_info[pci_num][2].split(",")[2].split()[1]) - cmd = "pci_del pci_addr=%s" % hex(slot_id) + cmd = f"pci_del pci_addr={hex(slot_id)}" vm.monitor.send_args_cmd(cmd, convert=False) blk_removed.append(pci_info[pci_num][1]) elif cmd_type == "device_add": if vm.monitor.protocol == "human": - cmd = "device_del %s" % pci_info[pci_num][1] + cmd = f"device_del {pci_info[pci_num][1]}" else: - cmd = "device_del id=%s" % pci_info[pci_num][1] + cmd = f"device_del id={pci_info[pci_num][1]}" vm.monitor.send_args_cmd(cmd, convert=False) if params.get("cmd_after_unplug_dev"): - cmd = re.sub("PCI_NUM", "%s" % (pci_num + 1), - params.get("cmd_after_unplug_dev")) + cmd = re.sub( + "PCI_NUM", "%s" % (pci_num + 1), params.get("cmd_after_unplug_dev") + ) session.cmd(cmd, timeout=disk_op_timeout) blk_removed.append(pci_info[pci_num][1]) pci_model = params.get("pci_model") if pci_model == "scsi" or pci_model == "scsi-hd": controller_id = "controller-" + pci_info[pci_num][0] if vm.monitor.protocol == "human": - controller_del_cmd = "device_del %s" % controller_id + controller_del_cmd = f"device_del {controller_id}" else: - controller_del_cmd = "device_del id=%s" % controller_id - error_context.context("Deleting SCSI controller.", - test.log.info) + controller_del_cmd = f"device_del id={controller_id}" + error_context.context("Deleting SCSI controller.", test.log.info) vm.monitor.send_args_cmd(controller_del_cmd, convert=False) blk_removed.append(controller_id) - if (not utils_misc.wait_for(_device_removed, test_timeout, 0, 1) and - not ignore_failure): - test.fail("Failed to hot remove PCI device: %s. " - "Monitor command: %s" % (pci_info[pci_num][3], cmd)) + if ( + not utils_misc.wait_for(_device_removed, test_timeout, 0, 1) + and not ignore_failure + ): + test.fail( + f"Failed to hot remove PCI device: {pci_info[pci_num][3]}. " + f"Monitor command: {cmd}" + ) # Remove the device from vm device container for device in vm.devices: if device.str_short() in blk_removed: @@ -306,7 +324,7 @@ def _device_removed(): # Modprobe the module if specified in config file module = params.get("modprobe_module") if module: - session.cmd("modprobe %s" % module) + session.cmd(f"modprobe {module}") # check monitor type qemu_binary = utils_misc.get_qemu_binary(params) @@ -314,16 +332,20 @@ def _device_removed(): # Probe qemu to verify what is the supported syntax for PCI hotplug cmd_type = is_supported_command("device_add", "pci_add") if not cmd_type: - test.error("Could find a suitable method for hotplugging" - " device in this version of qemu") + test.error( + "Could find a suitable method for hotplugging" + " device in this version of qemu" + ) # Determine syntax of drive hotplug # __com.redhat_drive_add == qemu-kvm-0.12 on RHEL 6 # drive_add == qemu-kvm-0.13 onwards drive_cmd_type = is_supported_command("drive_add", "__com.redhat_drive_add") if not drive_cmd_type: - test.error("Could find a suitable method for hotplugging" - " drive in this version of qemu") + test.error( + "Could find a suitable method for hotplugging" + " drive in this version of qemu" + ) local_functions = locals() @@ -343,38 +365,40 @@ def _device_removed(): for pci_num in range(pci_num_range): sub_type = params.get("sub_type_before_plug") if sub_type: - error_context.context(context_msg - % (sub_type, "before hotplug"), - test.log.info) + error_context.context( + context_msg % (sub_type, "before hotplug"), test.log.info + ) utils_test.run_virt_sub_test(test, params, env, sub_type) - error_context.context("Start hot-adding pci device, repeat %d" % j, - test.log.info) + error_context.context( + "Start hot-adding pci device, repeat %d" % j, test.log.info + ) add_device(pci_num, queues) sub_type = params.get("sub_type_after_plug") if sub_type: - error_context.context(context_msg - % (sub_type, "after hotplug"), - test.log.info) + error_context.context( + context_msg % (sub_type, "after hotplug"), test.log.info + ) utils_test.run_virt_sub_test(test, params, env, sub_type) for pci_num in range(pci_num_range): sub_type = params.get("sub_type_before_unplug") if sub_type: - error_context.context(context_msg - % (sub_type, "before hotunplug"), - test.log.info) + error_context.context( + context_msg % (sub_type, "before hotunplug"), test.log.info + ) utils_test.run_virt_sub_test(test, params, env, sub_type) - error_context.context("start hot-deleting pci device, repeat %d" - % j, test.log.info) + error_context.context( + "start hot-deleting pci device, repeat %d" % j, test.log.info + ) pci_del(-(pci_num + 1)) sub_type = params.get("sub_type_after_unplug") if sub_type: - error_context.context(context_msg - % (sub_type, "after hotunplug"), - test.log.info) + error_context.context( + context_msg % (sub_type, "after hotunplug"), test.log.info + ) utils_test.run_virt_sub_test(test, params, env, sub_type) if params.get("reboot_vm", "no") == "yes": diff --git a/qemu/tests/pci_hotplug_check.py b/qemu/tests/pci_hotplug_check.py index 01380eb12d..f123c2d228 100644 --- a/qemu/tests/pci_hotplug_check.py +++ b/qemu/tests/pci_hotplug_check.py @@ -1,15 +1,9 @@ +import random import re import time -import random import aexpect - -from virttest import data_dir -from virttest import error_context -from virttest import utils_misc -from virttest import storage -from virttest import arch -from virttest import env_process +from virttest import arch, data_dir, env_process, error_context, storage, utils_misc @error_context.context_aware @@ -32,17 +26,16 @@ def run(test, params, env): """ def prepare_image_params(params): - pci_num = int(params['pci_num']) + pci_num = int(params["pci_num"]) for i in range(pci_num): - image_name = '%s_%s' % ('stg', i) - params['images'] = ' '.join([params['images'], image_name]) - image_image_name = '%s_%s' % ('image_name', image_name) - params[image_image_name] = '%s_%s' % ('storage', i) - image_image_format = '%s_%s' % ('image_format', image_name) - params[image_image_format] = params.get('image_format_extra', - 'qcow2') - image_image_size = '%s_%s' % ('image_size', image_name) - params[image_image_size] = params.get('image_size_extra', '128K') + image_name = "{}_{}".format("stg", i) + params["images"] = " ".join([params["images"], image_name]) + image_image_name = "{}_{}".format("image_name", image_name) + params[image_image_name] = "{}_{}".format("storage", i) + image_image_format = "{}_{}".format("image_format", image_name) + params[image_image_format] = params.get("image_format_extra", "qcow2") + image_image_size = "{}_{}".format("image_size", image_name) + params[image_image_size] = params.get("image_size_extra", "128K") return params def find_new_device(check_cmd, device_string, chk_timeout=30): @@ -73,14 +66,15 @@ def find_del_device(check_cmd, device_string, chk_timeout=30): # Select an image file def find_image(pci_num): - image_params = params.object_params("%s" % img_list[pci_num + 1]) + image_params = params.object_params(f"{img_list[pci_num + 1]}") o = storage.get_image_filename(image_params, data_dir.get_data_dir()) return o def pci_add_block(pci_num, queues, pci_id): image_filename = find_image(pci_num) - pci_add_cmd = ("pci_add pci_addr=auto storage file=%s,if=%s" % - (image_filename, pci_model)) + pci_add_cmd = ( + f"pci_add pci_addr=auto storage file={image_filename},if={pci_model}" + ) return pci_add(pci_add_cmd) def pci_add(pci_add_cmd): @@ -88,11 +82,12 @@ def pci_add(pci_add_cmd): error_context.context("Adding pci device with command 'pci_add'") add_output = vm.monitor.send_args_cmd(pci_add_cmd, convert=False) guest_device = find_new_device(chk_cmd, guest_devices) - pci_info.append(['', '', add_output, pci_model, guest_device]) + pci_info.append(["", "", add_output, pci_model, guest_device]) if "OK domain" not in add_output: - test.fail("Add PCI device failed. " - "Monitor command is: %s, Output: %r" % - (pci_add_cmd, add_output)) + test.fail( + "Add PCI device failed. " + f"Monitor command is: {pci_add_cmd}, Output: {add_output!r}" + ) return vm.monitor.info("pci") def is_supported_device(dev): @@ -106,17 +101,22 @@ def is_supported_device(dev): test.error("Unknown version of qemu") # Probe qemu for a list of supported devices - probe_output = vm.monitor.human_monitor_cmd("%s ?" % cmd_type) # pylint: disable=E0606 - devices_supported = [j.strip('"') for j in - re.findall(r'\"[a-z|0-9|\-|\_|\,|\.]*\"', - probe_output, re.MULTILINE)] - test.log.debug("QEMU reported the following supported devices for " - "PCI hotplug: %s", devices_supported) - return (dev in devices_supported) + probe_output = vm.monitor.human_monitor_cmd(f"{cmd_type} ?") # pylint: disable=E0606 + devices_supported = [ + j.strip('"') + for j in re.findall( + r"\"[a-z|0-9|\-|\_|\,|\.]*\"", probe_output, re.MULTILINE + ) + ] + test.log.debug( + "QEMU reported the following supported devices for " "PCI hotplug: %s", + devices_supported, + ) + return dev in devices_supported def verify_supported_device(dev): if not is_supported_device(dev): - test.error("%s doesn't support device: %s" % (cmd_type, dev)) + test.error(f"{cmd_type} doesn't support device: {dev}") def device_add_block(pci_num, queues=1, pci_id=None): if pci_id is not None: @@ -125,7 +125,7 @@ def device_add_block(pci_num, queues=1, pci_id=None): device_id = pci_type + "-" + utils_misc.generate_random_id() pci_info.append([device_id, device_id]) - image_format = params.get("image_format_%s" % img_list[pci_num + 1]) + image_format = params.get(f"image_format_{img_list[pci_num + 1]}") if not image_format: image_format = params.get("image_format", "qcow2") image_filename = find_image(pci_num) @@ -133,8 +133,11 @@ def device_add_block(pci_num, queues=1, pci_id=None): pci_model = params.get("pci_model") controller_model = None bus_option = "" - if "q35" in params["machine_type"] or "arm64" in params["machine_type"]\ - and drive_format == "virtio": + if ( + "q35" in params["machine_type"] + or "arm64" in params["machine_type"] + and drive_format == "virtio" + ): bus_option = ",bus=pcie_extra_root_port_%d" % pci_num if pci_model == "virtio": @@ -142,15 +145,14 @@ def device_add_block(pci_num, queues=1, pci_id=None): if pci_model == "scsi": pci_model = "scsi-disk" - if arch.ARCH in ('ppc64', 'ppc64le'): + if arch.ARCH in ("ppc64", "ppc64le"): controller_model = "spapr-vscsi" else: controller_model = "lsi53c895a" if nonlocal_vars["verify_device_flag"]: verify_supported_device(controller_model) controller_id = "controller-" + device_id - controller_add_cmd = ("device_add %s,id=%s" % - (controller_model, controller_id)) + controller_add_cmd = f"device_add {controller_model},id={controller_id}" error_context.context("Adding SCSI controller.") vm.monitor.send_args_cmd(controller_add_cmd) @@ -158,20 +160,14 @@ def device_add_block(pci_num, queues=1, pci_id=None): verify_supported_device(pci_model) nonlocal_vars["verify_device_flag"] = False - add_cmd = "{0} driver=file,filename={1},node-name=file_{2}".format( - drive_cmd_type, image_filename, pci_info[pci_num][0]) - add_cmd += ";{0} driver={1},node-name={2},file=file_{2}".format( - drive_cmd_type, image_format, pci_info[pci_num][0]) + add_cmd = f"{drive_cmd_type} driver=file,filename={image_filename},node-name=file_{pci_info[pci_num][0]}" + add_cmd += f";{drive_cmd_type} driver={image_format},node-name={pci_info[pci_num][0]},file=file_{pci_info[pci_num][0]}" driver_add_cmd = add_cmd if drive_cmd_type == "drive_add": - driver_add_cmd = ("%s auto file=%s,if=none,format=%s,id=%s" % - (drive_cmd_type, image_filename, image_format, - pci_info[pci_num][0])) + driver_add_cmd = f"{drive_cmd_type} auto file={image_filename},if=none,format={image_format},id={pci_info[pci_num][0]}" elif drive_cmd_type == "__com.redhat_drive_add": - driver_add_cmd = ("%s file=%s,format=%s,id=%s" % - (drive_cmd_type, image_filename, image_format, - pci_info[pci_num][0])) + driver_add_cmd = f"{drive_cmd_type} file={image_filename},format={image_format},id={pci_info[pci_num][0]}" # add driver. error_context.context("Adding driver.") if drive_cmd_type != "blockdev-add": @@ -179,15 +175,13 @@ def device_add_block(pci_num, queues=1, pci_id=None): elif pci_id is None: vm.monitor.send_args_cmd(driver_add_cmd, convert=False) - pci_add_cmd = ("device_add id=%s,driver=%s,drive=%s%s" % - (pci_info[pci_num][1], pci_model, - pci_info[pci_num][0], bus_option)) + pci_add_cmd = f"device_add id={pci_info[pci_num][1]},driver={pci_model},drive={pci_info[pci_num][0]}{bus_option}" return device_add(pci_num, pci_add_cmd, pci_id=pci_id) def device_add(pci_num, pci_add_cmd, pci_id=None): error_context.context("Adding pci device with command 'device_add'") guest_devices = session.cmd_output(chk_cmd) - if vm.monitor.protocol == 'qmp': + if vm.monitor.protocol == "qmp": add_output = vm.monitor.send_args_cmd(pci_add_cmd) else: add_output = vm.monitor.send_args_cmd(pci_add_cmd, convert=False) @@ -201,10 +195,13 @@ def device_add(pci_num, pci_add_cmd, pci_id=None): after_add = vm.monitor.info("pci") if pci_info[pci_num][1] not in str(after_add): - test.log.error("Could not find matched id in monitor:" - " %s", pci_info[pci_num][1]) - test.fail("Add device failed. Monitor command is: %s" - ". Output: %r" % (pci_add_cmd, add_output)) + test.log.error( + "Could not find matched id in monitor:" " %s", pci_info[pci_num][1] + ) + test.fail( + f"Add device failed. Monitor command is: {pci_add_cmd}" + f". Output: {add_output!r}" + ) return after_add # Hot add a pci device @@ -214,10 +211,11 @@ def add_device(pci_num, queues=1, pci_id=None): try: # get function for adding device. - add_fuction = local_functions["%s_%s" % (cmd_type, pci_type)] + add_fuction = local_functions[f"{cmd_type}_{pci_type}"] except Exception: - test.error("No function for adding " + "'%s' dev " % pci_type + - "with '%s'" % cmd_type) + test.error( + "No function for adding " + f"'{pci_type}' dev " + f"with '{cmd_type}'" + ) after_add = None if add_fuction: # Do add pci device. @@ -243,27 +241,36 @@ def _find_pci(): error_context.context("Start checking new added device") # Compare the output of 'info pci' if after_add == info_pci_ref: - test.fail("No new PCI device shown after " - "executing monitor command: 'info pci'") + test.fail( + "No new PCI device shown after " + "executing monitor command: 'info pci'" + ) secs = int(params.get("wait_secs_for_hook_up", 3)) if not utils_misc.wait_for(_new_shown, test_timeout, secs, 3): - test.fail("No new device shown in output of" + - "command executed inside the " + - "guest: %s" % reference_cmd) + test.fail( + "No new device shown in output of" + + "command executed inside the " + + f"guest: {reference_cmd}" + ) if not utils_misc.wait_for(_find_pci, test_timeout, 3, 3): - test.fail("PCI %s %s " % (pci_model, pci_type) + - "device not found in guest. Command " + - "was: %s" % params.get("find_pci_cmd")) + test.fail( + f"PCI {pci_model} {pci_type} " + + "device not found in guest. Command " + + "was: {}".format(params.get("find_pci_cmd")) + ) # Test the newly added device try: error_context.context("Check disk in guest", test.log.info) session.cmd(params.get("pci_test_cmd") % (pci_num + 1)) except aexpect.ShellError as e: - test.fail("Check for %s device failed" % pci_type + - "after PCI hotplug." + "Output: %r" % e.output) + test.fail( + f"Check for {pci_type} device failed" + + "after PCI hotplug." + + f"Output: {e.output!r}" + ) except Exception: pci_del(pci_num, ignore_failure=True) @@ -278,16 +285,20 @@ def _device_removed(): before_del = vm.monitor.info("pci") if cmd_type == "pci_add": slot_id = int(pci_info[pci_num][2].split(",")[2].split()[1]) - cmd = "pci_del pci_addr=%s" % hex(slot_id) + cmd = f"pci_del pci_addr={hex(slot_id)}" vm.monitor.send_args_cmd(cmd, convert=False) elif cmd_type == "device_add": - cmd = "device_del id=%s" % pci_info[pci_num][1] + cmd = f"device_del id={pci_info[pci_num][1]}" vm.monitor.send_args_cmd(cmd) - if (not utils_misc.wait_for(_device_removed, test_timeout, 2, 3) and - not ignore_failure): - test.fail("Failed to hot remove PCI device: %s. " - "Monitor command: %s" % (pci_info[pci_num][3], cmd)) + if ( + not utils_misc.wait_for(_device_removed, test_timeout, 2, 3) + and not ignore_failure + ): + test.fail( + f"Failed to hot remove PCI device: {pci_info[pci_num][3]}. " + f"Monitor command: {cmd}" + ) nonlocal_vars = {"verify_device_flag": True} machine_type = params.get("machine_type") @@ -308,21 +319,22 @@ def _device_removed(): # Modprobe the module if specified in config file module = params.get("modprobe_module") if module: - session.cmd("modprobe %s" % module) + session.cmd(f"modprobe {module}") # check monitor type qemu_binary = utils_misc.get_qemu_binary(params) # Probe qemu to verify what is the supported syntax for PCI hotplug - if vm.monitor.protocol == 'qmp': + if vm.monitor.protocol == "qmp": cmd_output = vm.monitor.info("commands") else: cmd_output = vm.monitor.human_monitor_cmd("help", debug=False) - cmd_type = utils_misc.find_substring(str(cmd_output), "device_add", - "pci_add") + cmd_type = utils_misc.find_substring(str(cmd_output), "device_add", "pci_add") if not cmd_type: - test.error("Could find a suitable method for hotplugging" - " device in this version of qemu") + test.error( + "Could find a suitable method for hotplugging" + " device in this version of qemu" + ) # Determine syntax of drive hotplug # __com.redhat_drive_add == qemu-kvm-0.12 on RHEL 6 @@ -330,9 +342,9 @@ def _device_removed(): drive_cmd_type = utils_misc.find_substring(str(cmd_output), "blockdev-add") if not drive_cmd_type: - drive_cmd_type = utils_misc.find_substring(str(cmd_output), - "__com.redhat_drive_add", - "drive_add") + drive_cmd_type = utils_misc.find_substring( + str(cmd_output), "__com.redhat_drive_add", "drive_add" + ) if not drive_cmd_type: test.error("Unknown version of qemu") @@ -349,8 +361,9 @@ def _device_removed(): pci_info = [] # Add block device into guest for pci_num in range(pci_num_range): - error_context.context("Prepare the %d removable pci device" % pci_num, - test.log.info) + error_context.context( + "Prepare the %d removable pci device" % pci_num, test.log.info + ) add_device(pci_num) if pci_info[pci_num][4] is not None: partition = pci_info[pci_num][4] @@ -370,7 +383,8 @@ def _device_removed(): pci_num = random.randint(0, len(pci_info) - 1) error_context.context( "start unplug device, repeat %d of %d-%d" % (j, rp_times, pci_num), - test.log.info) + test.log.info, + ) guest_devices = session.cmd_output(chk_cmd) pci_del(pci_num) device_del = find_del_device(chk_cmd, guest_devices) @@ -379,15 +393,15 @@ def _device_removed(): # sleep to wait delete event time.sleep(5) - error_context.context("Start plug pci device, repeat %d" % j, - test.log.info) + error_context.context("Start plug pci device, repeat %d" % j, test.log.info) guest_devices = session.cmd_output(chk_cmd) add_device(pci_num, pci_id=pci_info[pci_num][0]) device_del = find_new_device(chk_cmd, guest_devices) if device_del != pci_info[pci_num][4]: - test.fail("Device partition changed from %s to %s" % - (pci_info[pci_num][4], device_del)) + test.fail( + f"Device partition changed from {pci_info[pci_num][4]} to {device_del}" + ) cmd = confirm_cmd % (pci_info[pci_num][4], offset) confirm_info = session.cmd_output(cmd) if device_del not in confirm_info: - test.fail("Can not find partition tag in Guest: %s" % confirm_info) + test.fail(f"Can not find partition tag in Guest: {confirm_info}") diff --git a/qemu/tests/pci_hotunplug.py b/qemu/tests/pci_hotunplug.py index 57dd4673db..7494e719e1 100644 --- a/qemu/tests/pci_hotunplug.py +++ b/qemu/tests/pci_hotunplug.py @@ -1,8 +1,6 @@ import re -from virttest import error_context -from virttest import utils_misc -from virttest import utils_test +from virttest import error_context, utils_misc, utils_test @error_context.context_aware @@ -33,15 +31,19 @@ def _device_removed(): before_del = vm.monitor.info("pci") if cmd_type == "device_del": - cmd = "device_del id=%s" % device + cmd = f"device_del id={device}" vm.monitor.send_args_cmd(cmd) else: test.fail("device_del command is not supported") - if (not utils_misc.wait_for(_device_removed, test_timeout, 0, 1) and - not ignore_failure): - test.fail("Failed to hot remove PCI device: %s. " - "Monitor command: %s" % (pci_model, cmd)) + if ( + not utils_misc.wait_for(_device_removed, test_timeout, 0, 1) + and not ignore_failure + ): + test.fail( + f"Failed to hot remove PCI device: {pci_model}. " + f"Monitor command: {cmd}" + ) vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -53,14 +55,14 @@ def _device_removed(): pci_num = int(params.get("unplug_pci_num", 1)) pci_model = params.get("pci_model", "pci-assign") # Need udpate match_string if you use a card other than 82576 - match_string = params.get("match_string", "dev: %s, id \"(.*)\"") + match_string = params.get("match_string", 'dev: %s, id "(.*)"') match_string = match_string % pci_model # Modprobe the module if specified in config file module = params.get("modprobe_module") if module: - error_context.context("modprobe the module %s" % module, test.log.info) - session.cmd("modprobe %s" % module) + error_context.context(f"modprobe the module {module}", test.log.info) + session.cmd(f"modprobe {module}") # Probe qemu to verify what is the supported syntax for PCI hotplug if vm.monitor.protocol == "qmp": @@ -76,20 +78,18 @@ def _device_removed(): context_msg = "Running sub test '%s' %s" sub_type = params.get("sub_type_before_unplug") if sub_type: - error_context.context(context_msg % (sub_type, "before unplug"), - test.log.info) + error_context.context(context_msg % (sub_type, "before unplug"), test.log.info) utils_test.run_virt_sub_test(test, params, env, sub_type) if devices: for device in devices[:pci_num]: # (lmr) I think here is the place where pci_info should go - pci_info = [] - error_context.context("Hot unplug device %s" % device, - test.log.info) + error_context.context(f"Hot unplug device {device}", test.log.info) pci_del(device) sub_type = params.get("sub_type_after_unplug") if sub_type: - error_context.context(context_msg % (sub_type, "after hotunplug"), - test.log.info) + error_context.context( + context_msg % (sub_type, "after hotunplug"), test.log.info + ) utils_test.run_virt_sub_test(test, params, env, sub_type) diff --git a/qemu/tests/pcie_hotplug_opt.py b/qemu/tests/pcie_hotplug_opt.py index 1b6c847fb4..bad70b1c40 100644 --- a/qemu/tests/pcie_hotplug_opt.py +++ b/qemu/tests/pcie_hotplug_opt.py @@ -34,16 +34,16 @@ def hotplug_scsi(): Hot-plug virtio-scsi-pci """ pci_add_cmd = "device_add driver=virtio-scsi-pci, id=plug" - pci_add_cmd += ",bus=%s" % free_root_port_id + pci_add_cmd += f",bus={free_root_port_id}" vm.monitor.send_args_cmd(pci_add_cmd) def hotplug_nic(): """ Hot-plug virtio-net-pci """ - nic_name = 'plug' + nic_name = "plug" nic_params = params.object_params(nic_name) - nic_params["nic_model"] = 'virtio-net-pci' + nic_params["nic_model"] = "virtio-net-pci" nic_params["nic_name"] = nic_name vm.hotplug_nic(**nic_params) @@ -53,21 +53,26 @@ def unplug_device(device): :param device: QDevice object """ - parent_bus = device.get_param('bus') - driver = device.get_param('driver') - device_id = device.get_param('id') - error_context.context("Hot-unplug %s" % driver, test.log.info) + parent_bus = device.get_param("bus") + driver = device.get_param("driver") + device.get_param("id") + error_context.context(f"Hot-unplug {driver}", test.log.info) error_pattern = unplug_error_pattern % (parent_bus, parent_bus) try: device.unplug(vm.monitor) except QMPCmdError as e: if not re.search(error_pattern, e.data["desc"]): - test.fail("Hot-unplug failed but '%s' isn't the expected error" - % e.data["desc"]) - error_context.context("Hot-unplug %s failed as expected: %s" - % (driver, e.data["desc"]), test.log.info) + test.fail( + "Hot-unplug failed but '{}' isn't the expected error".format( + e.data["desc"] + ) + ) + error_context.context( + "Hot-unplug {} failed as expected: {}".format(driver, e.data["desc"]), + test.log.info, + ) else: - test.fail("Hot-unplug %s should not success" % driver) + test.fail(f"Hot-unplug {driver} should not success") def plug_device(driver): """ @@ -75,24 +80,29 @@ def plug_device(driver): :param driver: the driver name """ - error_context.context("Hot-plug %s" % driver, test.log.info) + error_context.context(f"Hot-plug {driver}", test.log.info) error_pattern = hotplug_error_pattern % (free_root_port_id, free_root_port_id) try: callback[driver]() except QMPCmdError as e: if not re.search(error_pattern, e.data["desc"]): - test.fail("Hot-plug failed but '%s' isn't the expected error" - % e.data["desc"]) - error_context.context("Hot-plug %s failed as expected: %s" - % (driver, e.data["desc"]), test.log.info) + test.fail( + "Hot-plug failed but '{}' isn't the expected error".format( + e.data["desc"] + ) + ) + error_context.context( + "Hot-plug {} failed as expected: {}".format(driver, e.data["desc"]), + test.log.info, + ) else: - test.fail("Hot-plug %s should not success" % driver) + test.fail(f"Hot-plug {driver} should not success") vm = env.get_vm(params["main_vm"]) vm.wait_for_login() - images = params.objects('images') - hotplug_error_pattern = params.get('hotplug_error_pattern') - unplug_error_pattern = params.get('unplug_error_pattern') + images = params.objects("images") + hotplug_error_pattern = params.get("hotplug_error_pattern") + unplug_error_pattern = params.get("unplug_error_pattern") unplug_devs = [] blk_image = images[1] @@ -101,7 +111,7 @@ def plug_device(driver): # In this case only one virtio-scsi-pci device, and the drive name is # fixed 'virtio-scsi-pci' for q35 - scsi_pci_dev = vm.devices.get_by_params({'driver': 'virtio-scsi-pci'})[0] + scsi_pci_dev = vm.devices.get_by_params({"driver": "virtio-scsi-pci"})[0] unplug_devs.append(scsi_pci_dev) nic_id = vm.virtnet[0].device_id @@ -113,21 +123,25 @@ def plug_device(driver): # TODO: eject device in windows guest # one free root port is enough, use the default one provided by framework - bus = vm.devices.get_buses({'aobject': 'pci.0'})[0] + bus = vm.devices.get_buses({"aobject": "pci.0"})[0] free_root_port_dev = bus.get_free_root_port() free_root_port_id = free_root_port_dev.child_bus[0].busid plug_image = images[-1] plug_image_params = params.object_params(plug_image) - image_devs = vm.devices.images_define_by_params(plug_image, - plug_image_params, - 'disk') - error_context.context("Hot-plug the Drive/BlockdevNode first, " - "will be used by virtio-blk-pci", test.log.info) + image_devs = vm.devices.images_define_by_params( + plug_image, plug_image_params, "disk" + ) + error_context.context( + "Hot-plug the Drive/BlockdevNode first, " "will be used by virtio-blk-pci", + test.log.info, + ) for image_dev in image_devs[:-1]: vm.devices.simple_hotplug(image_dev, vm.monitor) - callback = {"virtio-blk-pci": hotplug_blk, - "virtio-scsi-pci": hotplug_scsi, - "virtio-net-pci": hotplug_nic} - for driver in ['virtio-blk-pci', 'virtio-scsi-pci', 'virtio-net-pci']: + callback = { + "virtio-blk-pci": hotplug_blk, + "virtio-scsi-pci": hotplug_scsi, + "virtio-net-pci": hotplug_nic, + } + for driver in ["virtio-blk-pci", "virtio-scsi-pci", "virtio-net-pci"]: plug_device(driver) diff --git a/qemu/tests/perf_kvm.py b/qemu/tests/perf_kvm.py index fcc94174dd..7ad3a5aca2 100644 --- a/qemu/tests/perf_kvm.py +++ b/qemu/tests/perf_kvm.py @@ -21,17 +21,18 @@ def run(test, params, env): # Prepare test environment in guest session = vm.wait_for_login(timeout=login_timeout) - session.cmd("cat /proc/kallsyms > %s" % vm_kallsyms_path) - session.cmd("cat /proc/modules > %s" % vm_modules_path) + session.cmd(f"cat /proc/kallsyms > {vm_kallsyms_path}") + session.cmd(f"cat /proc/modules > {vm_modules_path}") vm.copy_files_from("/tmp/guest_kallsyms", "/tmp", timeout=transfer_timeout) vm.copy_files_from("/tmp/guest_modules", "/tmp", timeout=transfer_timeout) - perf_record_cmd = "perf kvm --host --guest --guestkallsyms=%s" % vm_kallsyms_path - perf_record_cmd += " --guestmodules=%s record -a -o /tmp/perf.data sleep %s " % ( - vm_modules_path, perf_record_timeout) - perf_report_cmd = "perf kvm --host --guest --guestkallsyms=%s" % vm_kallsyms_path - perf_report_cmd += " --guestmodules=%s report -i /tmp/perf.data --force " % vm_modules_path + perf_record_cmd = f"perf kvm --host --guest --guestkallsyms={vm_kallsyms_path}" + perf_record_cmd += f" --guestmodules={vm_modules_path} record -a -o /tmp/perf.data sleep {perf_record_timeout} " + perf_report_cmd = f"perf kvm --host --guest --guestkallsyms={vm_kallsyms_path}" + perf_report_cmd += ( + f" --guestmodules={vm_modules_path} report -i /tmp/perf.data --force " + ) process.system(perf_record_cmd) process.system(perf_report_cmd) diff --git a/qemu/tests/performance.py b/qemu/tests/performance.py index be7d3f6b1b..849b251b23 100644 --- a/qemu/tests/performance.py +++ b/qemu/tests/performance.py @@ -1,24 +1,21 @@ import os import re -import shutil import shelve -import six +import shutil import threading + +import six + try: from queue import Queue except ImportError: from Queue import Queue -from avocado.utils import download -from avocado.utils import process +from avocado.utils import download, process +from virttest import data_dir, utils_misc, utils_test -from virttest import utils_test -from virttest import utils_misc -from virttest import data_dir - -def cmd_runner_monitor(test, vm, monitor_cmd, test_cmd, - guest_path, timeout=300): +def cmd_runner_monitor(test, vm, monitor_cmd, test_cmd, guest_path, timeout=300): """ For record the env information such as cpu utilization, meminfo while run guest test in guest. @@ -29,14 +26,15 @@ def cmd_runner_monitor(test, vm, monitor_cmd, test_cmd, @timeout: longest time for monitor running Return: tag the suffix of the results """ + def thread_kill(cmd, p_file): fd = shelve.open(p_file) - o = process.system_output("pstree -p %s" % fd["pid"], verbose=False, - ignore_status=True) + o = process.system_output( + "pstree -p {}".format(fd["pid"]), verbose=False, ignore_status=True + ) tmp = re.split(r"\s+", cmd)[0] - pid = re.findall(r"%s.(\d+)" % tmp, o)[0] - cmd_result = process.run("kill -9 %s" % pid, verbose=False, - ignore_status=True) + pid = re.findall(rf"{tmp}.(\d+)", o)[0] + cmd_result = process.run(f"kill -9 {pid}", verbose=False, ignore_status=True) fd.close() return (cmd_result.exit_status, cmd_result.stdout) @@ -44,13 +42,13 @@ def monitor_thread(m_cmd, p_file, r_file): fd = shelve.open(p_file) fd["pid"] = os.getpid() fd.close() - os.system("%s &> %s" % (m_cmd, r_file)) + os.system(f"{m_cmd} &> {r_file}") def test_thread(session, m_cmd, t_cmd, p_file, flag, timeout): flag.put(True) s, o = session.cmd_status_output(t_cmd, timeout) if s != 0: - test.fail("Test failed or timeout: %s" % o) + test.fail(f"Test failed or timeout: {o}") if not flag.empty(): flag.get() thread_kill(m_cmd, p_file) @@ -58,14 +56,16 @@ def test_thread(session, m_cmd, t_cmd, p_file, flag, timeout): kill_thread_flag = Queue(1) session = vm.wait_for_login(timeout=300) tag = vm.instance - pid_file = "/tmp/monitor_pid_%s" % tag - result_file = "/tmp/host_monitor_result_%s" % tag - - monitor = threading.Thread(target=monitor_thread, args=(monitor_cmd, - pid_file, result_file)) - test_runner = threading.Thread(target=test_thread, args=(session, - monitor_cmd, test_cmd, pid_file, - kill_thread_flag, timeout)) + pid_file = f"/tmp/monitor_pid_{tag}" + result_file = f"/tmp/host_monitor_result_{tag}" + + monitor = threading.Thread( + target=monitor_thread, args=(monitor_cmd, pid_file, result_file) + ) + test_runner = threading.Thread( + target=test_thread, + args=(session, monitor_cmd, test_cmd, pid_file, kill_thread_flag, timeout), + ) monitor.start() test_runner.start() monitor.join(int(timeout)) @@ -74,10 +74,10 @@ def test_thread(session, m_cmd, t_cmd, p_file, flag, timeout): thread_kill(monitor_cmd, pid_file) thread_kill("sh", pid_file) - guest_result_file = "/tmp/guest_result_%s" % tag - guest_monitor_result_file = "/tmp/guest_monitor_result_%s" % tag + guest_result_file = f"/tmp/guest_result_{tag}" + guest_monitor_result_file = f"/tmp/guest_monitor_result_{tag}" vm.copy_files_from(guest_path, guest_result_file) - vm.copy_files_from("%s_monitor" % guest_path, guest_monitor_result_file) + vm.copy_files_from(f"{guest_path}_monitor", guest_monitor_result_file) return tag @@ -107,7 +107,7 @@ def run(test, params, env): # Prepare test environment in guest session = vm.wait_for_login(timeout=login_timeout) - prefix = test.outputdir.split(".performance.")[0] + test.outputdir.split(".performance.")[0] summary_results = params.get("summary_results") guest_ver = session.cmd_output("uname -r").strip() @@ -128,7 +128,7 @@ def run(test, params, env): session.cmd("rm -rf /tmp/src*") session.cmd("mkdir -p /tmp/src_tmp") - session.cmd("tar -xf /tmp/%s -C %s" % (tar_name, "/tmp/src_tmp")) + session.cmd("tar -xf /tmp/{} -C {}".format(tar_name, "/tmp/src_tmp")) # Find the newest file in src tmp directory cmd = "ls -rt /tmp/src_tmp" @@ -137,17 +137,18 @@ def run(test, params, env): new_file = re.findall("(.*)\n", o)[-1] else: test.error("Can not decompress test file in guest") - session.cmd("mv /tmp/src_tmp/%s /tmp/src" % new_file) # pylint: disable=E0606 + session.cmd(f"mv /tmp/src_tmp/{new_file} /tmp/src") # pylint: disable=E0606 if test_patch: - test_patch_path = os.path.join(data_dir.get_deps_dir(), 'performance', - test_patch) + test_patch_path = os.path.join( + data_dir.get_deps_dir(), "performance", test_patch + ) vm.copy_files_to(test_patch_path, "/tmp/src") - session.cmd("cd /tmp/src && patch -p1 < /tmp/src/%s" % test_patch) + session.cmd(f"cd /tmp/src && patch -p1 < /tmp/src/{test_patch}") compile_cmd = params.get("compile_cmd") if compile_cmd: - session.cmd("cd /tmp/src && %s" % compile_cmd) + session.cmd(f"cd /tmp/src && {compile_cmd}") prepare_cmd = params.get("prepare_cmd") if prepare_cmd: @@ -155,20 +156,23 @@ def run(test, params, env): if s != 0: test.error("Fail to prepare test env in guest") - cmd = "cd /tmp/src && python /tmp/cmd_runner.py \"%s &> " % monitor_cmd - cmd += "/tmp/guest_result_monitor\" \"/tmp/src/%s" % test_cmd - cmd += " &> %s \" \"/tmp/guest_result\"" - cmd += " %s" % int(test_timeout) + cmd = f'cd /tmp/src && python /tmp/cmd_runner.py "{monitor_cmd} &> ' + cmd += f'/tmp/guest_result_monitor" "/tmp/src/{test_cmd}' + cmd += ' &> %s " "/tmp/guest_result"' + cmd += f" {int(test_timeout)}" test_cmd = cmd # Run guest test with monitor - tag = cmd_runner_monitor(test, vm, monitor_cmd, test_cmd, - guest_path, timeout=test_timeout) + tag = cmd_runner_monitor( + test, vm, monitor_cmd, test_cmd, guest_path, timeout=test_timeout + ) # Result collecting - result_list = ["/tmp/guest_result_%s" % tag, - "/tmp/host_monitor_result_%s" % tag, - "/tmp/guest_monitor_result_%s" % tag] + result_list = [ + f"/tmp/guest_result_{tag}", + f"/tmp/host_monitor_result_{tag}", + f"/tmp/guest_monitor_result_{tag}", + ] guest_results_dir = os.path.join(test.outputdir, "guest_results") if not os.path.exists(guest_results_dir): os.mkdir(guest_results_dir) @@ -177,24 +181,25 @@ def run(test, params, env): row_pattern = params.get("row_pattern") for i in result_list: if re.findall("monitor_result", i): - result = utils_test.summary_up_result(i, ignore_pattern, - head_pattern, row_pattern) - fd = open("%s.sum" % i, "w") + result = utils_test.summary_up_result( + i, ignore_pattern, head_pattern, row_pattern + ) + fd = open(f"{i}.sum", "w") sum_info = {} head_line = "" for keys in result: - head_line += "\t%s" % keys + head_line += f"\t{keys}" for col in result[keys]: - col_sum = "line %s" % col + col_sum = f"line {col}" if col_sum in sum_info: - sum_info[col_sum] += "\t%s" % result[keys][col] + sum_info[col_sum] += f"\t{result[keys][col]}" else: - sum_info[col_sum] = "%s\t%s" % (col, result[keys][col]) - fd.write("%s\n" % head_line) + sum_info[col_sum] = f"{col}\t{result[keys][col]}" + fd.write(f"{head_line}\n") for keys in sum_info: - fd.write("%s\n" % sum_info[keys]) + fd.write(f"{sum_info[keys]}\n") fd.close() - shutil.copy("%s.sum" % i, guest_results_dir) + shutil.copy(f"{i}.sum", guest_results_dir) shutil.copy(i, guest_results_dir) session.cmd("rm -rf /tmp/src") @@ -209,7 +214,7 @@ def mpstat_ana(filename): :param filename: filename of the mpstat summary file """ - mpstat_result = open(filename, 'r') + mpstat_result = open(filename, "r") key_value = "%idle" index = 0 result = {} @@ -221,7 +226,7 @@ def mpstat_ana(filename): if data[0] == "all": vcpu = "all" else: - vcpu = "vcpu%s" % data[0] + vcpu = f"vcpu{data[0]}" cpu_use = "%20.2f" % (100 - utils_misc.aton(data[index])) result[vcpu] = cpu_use return result @@ -290,17 +295,21 @@ def result_sum(topdir, params, guest_ver, resultsdir, test): category_key = re.split(case_type, category_key)[0] category_key = re.sub(r"\.repeat\d+", "", category_key) - kvm_ver = process.system_output(params.get('ver_cmd', "rpm -q qemu-kvm"), - shell=True) + kvm_ver = process.system_output( + params.get("ver_cmd", "rpm -q qemu-kvm"), shell=True + ) host_ver = os.uname()[2] - test.write_test_keyval({'kvm-userspace-ver': kvm_ver}) - test.write_test_keyval({'host-kernel-ver': host_ver}) - test.write_test_keyval({'guest-kernel-ver': guest_ver}) + test.write_test_keyval({"kvm-userspace-ver": kvm_ver}) + test.write_test_keyval({"host-kernel-ver": host_ver}) + test.write_test_keyval({"guest-kernel-ver": guest_ver}) # Find the results files results_files = {} - file_list = ['guest_result', 'guest_monitor_result.*sum', - 'host_monitor_result.*sum'] + file_list = [ + "guest_result", + "guest_monitor_result.*sum", + "host_monitor_result.*sum", + ] if params.get("file_list"): file_list = params.get("file_list").split() @@ -314,13 +323,16 @@ def result_sum(topdir, params, guest_ver, resultsdir, test): if jump_flag: continue file_dir_norpt = re.sub(r"\.repeat\d+", "", files[0]) - if (repeatn in files[0] and - category_key in file_dir_norpt and - case_type in files[0]): + if ( + repeatn in files[0] + and category_key in file_dir_norpt + and case_type in files[0] + ): for i, pattern in enumerate(file_list): if re.findall(pattern, file): - prefix = re.findall(r"%s\.[\d\w_\.]+" % case_type, - file_dir_norpt)[0] + prefix = re.findall( + rf"{case_type}\.[\d\w_\.]+", file_dir_norpt + )[0] prefix = re.sub(r"\.|_", "--", prefix) if prefix not in results_files.keys(): results_files[prefix] = [] @@ -361,8 +373,7 @@ def result_sum(topdir, params, guest_ver, resultsdir, test): category = "-".join(case_infos) if refresh_order_list: order_list = [] - if (category not in results_matrix.keys() and - category not in no_table_list): + if category not in results_matrix.keys() and category not in no_table_list: results_matrix[category] = {} if threads: if threads not in results_matrix[category].keys(): @@ -372,7 +383,7 @@ def result_sum(topdir, params, guest_ver, resultsdir, test): elif category not in no_table_list: tmp_dic = results_matrix[category] - result_context_file = open(results_files[prefix][0], 'r') + result_context_file = open(results_files[prefix][0], "r") result_context = result_context_file.read() result_context_file.close() for mark in marks: @@ -392,17 +403,18 @@ def result_sum(topdir, params, guest_ver, resultsdir, test): no_table_results[mark_tag] = utils_misc.aton(data) perf_value = no_table_results[mark_tag] else: - tmp_dic[mark_tag] = utils_misc.aton(data) # pylint: disable=E0606 + tmp_dic[mark_tag] = utils_misc.aton(data) # pylint: disable=E0606 perf_value = tmp_dic[mark_tag] else: - test.error("Can not get the right data from result." - "Please check the debug file.") + test.error( + "Can not get the right data from result." + "Please check the debug file." + ) if mark_tag not in no_table_list and mark_tag not in order_list: order_list.append(mark_tag) - test.write_perf_keyval({'%s-%s' % (prefix_perf, mark_tag): - perf_value}) # pylint: disable=E0606 + test.write_perf_keyval({f"{prefix_perf}-{mark_tag}": perf_value}) # pylint: disable=E0606 # start analyze the mpstat results - if params.get('mpstat') == "yes": + if params.get("mpstat") == "yes": guest_cpu_infos = mpstat_ana(results_files[prefix][1]) for vcpu in guest_cpu_infos: if vcpu != "all": @@ -413,8 +425,7 @@ def result_sum(topdir, params, guest_ver, resultsdir, test): order_list.append("Hostcpu") # Add some special key for cases if case_type == "ffsb": - tmp_dic["MBps_per_Hostcpu"] = (tmp_dic["Thro-MBps"] / - tmp_dic["Hostcpu"]) + tmp_dic["MBps_per_Hostcpu"] = tmp_dic["Thro-MBps"] / tmp_dic["Hostcpu"] order_list.append("MBps_per_Hostcpu") elif case_type == "iozone": sum_kbps = 0 @@ -433,22 +444,21 @@ def result_sum(topdir, params, guest_ver, resultsdir, test): else: headline = "" for index, tag in enumerate(order_list): - headline += "%s|" % format_result(tag) + headline += f"{format_result(tag)}|" order_line += "DATA%d|" % index headline = headline.rstrip("|") order_line = order_line.rstrip("|") - result_path = utils_misc.get_path(resultsdir, - "%s-result.RHS" % case_type) + result_path = utils_misc.get_path(resultsdir, f"{case_type}-result.RHS") if os.path.isfile(result_path): result_file = open(result_path, "r+") else: result_file = open(result_path, "w") - result_file.write("### kvm-userspace-version : %s\n" % kvm_ver) - result_file.write("### kvm-version : %s\n" % host_ver) - result_file.write("### guest-kernel-version :%s\n" % guest_ver) + result_file.write(f"### kvm-userspace-version : {kvm_ver}\n") + result_file.write(f"### kvm-version : {host_ver}\n") + result_file.write(f"### guest-kernel-version :{guest_ver}\n") - test.write_test_keyval({'category': headline}) + test.write_test_keyval({"category": headline}) result_file.write("Category:ALL\n") matrix_order = params.get("matrix_order", "").split() if not matrix_order: @@ -456,38 +466,38 @@ def result_sum(topdir, params, guest_ver, resultsdir, test): matrix_order.sort() for category in matrix_order: out_loop_line = order_line - result_file.write("%s\n" % category) + result_file.write(f"{category}\n") line = "" write_out_loop = True - result_file.write("%s\n" % headline) + result_file.write(f"{headline}\n") for item in results_matrix[category]: if isinstance(results_matrix[category][item], dict): tmp_dic = results_matrix[category][item] - line = "%s|" % format_result(item) + line = f"{format_result(item)}|" for tag in order_list: - line += "%s|" % format_result(tmp_dic[tag]) + line += f"{format_result(tmp_dic[tag])}|" if tag in sum_marks: - sum_matrix = get_sum_result(sum_matrix, tmp_dic[tag], - tag) - result_file.write("%s\n" % line.rstrip("|")) + sum_matrix = get_sum_result(sum_matrix, tmp_dic[tag], tag) + result_file.write("{}\n".format(line.rstrip("|"))) write_out_loop = False else: - #line += "%s|" % format_result(results_matrix[category][item]) - re_data = "DATA%s" % order_list.index(item) - out_loop_line = re.sub(re_data, - format_result( - results_matrix[category][item]), - out_loop_line) + # line += "%s|" % format_result(results_matrix[category][item]) + re_data = f"DATA{order_list.index(item)}" + out_loop_line = re.sub( + re_data, + format_result(results_matrix[category][item]), + out_loop_line, + ) if tag in sum_marks: - sum_matrix = get_sum_result(sum_matrix, tmp_dic[tag], - tag) + sum_matrix = get_sum_result(sum_matrix, tmp_dic[tag], tag) if write_out_loop: - result_file.write("%s\n" % out_loop_line) + result_file.write(f"{out_loop_line}\n") if sum_matrix: if case_type == "ffsb": - sum_matrix["MBps_per_Hostcpu"] = (sum_matrix["Thro-MBps"] / - sum_matrix["Hostcpu"]) + sum_matrix["MBps_per_Hostcpu"] = ( + sum_matrix["Thro-MBps"] / sum_matrix["Hostcpu"] + ) sum_marks.append("MBps_per_Hostcpu") result_file.write("Category:SUM\n") headline = "" @@ -498,10 +508,10 @@ def result_sum(topdir, params, guest_ver, resultsdir, test): line += "%20d|" % 0 for tag in sum_marks: headline += "%20s|" % tag - line += "%s|" % format_result(sum_matrix[tag]) + line += f"{format_result(sum_matrix[tag])}|" - result_file.write("%s\n" % headline.rstrip("|")) - result_file.write("%s\n" % line.rstrip("|")) + result_file.write("{}\n".format(headline.rstrip("|"))) + result_file.write("{}\n".format(line.rstrip("|"))) if no_table_results: no_table_order = params.get("no_table_order", "").split() @@ -509,6 +519,6 @@ def result_sum(topdir, params, guest_ver, resultsdir, test): no_table_order = no_table_results.keys() no_table_order.sort() for item in no_table_order: - result_file.write("%s: %s\n" % (item, no_table_results[item])) + result_file.write(f"{item}: {no_table_results[item]}\n") result_file.close() diff --git a/qemu/tests/physical_resources_check.py b/qemu/tests/physical_resources_check.py index d2c0ba9316..4d607e342e 100644 --- a/qemu/tests/physical_resources_check.py +++ b/qemu/tests/physical_resources_check.py @@ -1,16 +1,17 @@ -import re import random +import re import string from avocado.utils import process - -from virttest import error_context -from virttest import qemu_monitor -from virttest import storage -from virttest import utils_misc -from virttest import env_process -from virttest import data_dir -from virttest import qemu_qtree +from virttest import ( + data_dir, + env_process, + error_context, + qemu_monitor, + qemu_qtree, + storage, + utils_misc, +) @error_context.context_aware @@ -28,23 +29,24 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + # Define a function for checking number of hard drivers & NICs def check_num(devices, info_cmd, check_str): f_fail = [] expected_num = params.objects(devices).__len__() o = "" try: - o = vm.monitor.human_monitor_cmd("info %s " % info_cmd) + o = vm.monitor.human_monitor_cmd(f"info {info_cmd} ") except qemu_monitor.MonitorError as e: fail_log = str(e) + "\n" - fail_log += "info/query monitor command failed (%s)" % info_cmd + fail_log += f"info/query monitor command failed ({info_cmd})" f_fail.append(fail_log) test.log.error(fail_log) - ovmf_fd_num = o.count('%s.fd' % check_str) # Exclude ovmf fd drive + ovmf_fd_num = o.count(f"{check_str}.fd") # Exclude ovmf fd drive actual_num = o.count(check_str) - ovmf_fd_num if expected_num != actual_num: - fail_log = "%s number mismatch:\n" % str(devices) + fail_log = f"{str(devices)} number mismatch:\n" fail_log += " Assigned to VM: %d\n" % expected_num fail_log += " Reported by OS: %d" % actual_num f_fail.append(fail_log) @@ -61,10 +63,10 @@ def chk_fmt_model(device, fmt_model, info_cmd, regexp): expected = "rtl8139" o = "" try: - o = vm.monitor.human_monitor_cmd("info %s" % info_cmd) + o = vm.monitor.human_monitor_cmd(f"info {info_cmd}") except qemu_monitor.MonitorError as e: fail_log = str(e) + "\n" - fail_log += "info/query monitor command failed (%s)" % info_cmd + fail_log += f"info/query monitor command failed ({info_cmd})" f_fail.append(fail_log) test.log.error(fail_log) @@ -76,9 +78,9 @@ def chk_fmt_model(device, fmt_model, info_cmd, regexp): found = True if not found: - fail_log = "%s model mismatch:\n" % str(device) - fail_log += " Assigned to VM: %s\n" % expected - fail_log += " Reported by OS: %s" % device_found + fail_log = f"{str(device)} model mismatch:\n" + fail_log += f" Assigned to VM: {expected}\n" + fail_log += f" Reported by OS: {device_found}" f_fail.append(fail_log) test.log.error(fail_log) return f_fail @@ -89,9 +91,9 @@ def verify_device(expect, name, verify_cmd): if verify_cmd: actual = session.cmd_output(verify_cmd) if not re.findall(expect, actual, re.I): - fail_log = "%s mismatch:\n" % name - fail_log += " Assigned to VM: %s\n" % expect.upper() - fail_log += " Reported by OS: %s" % actual + fail_log = f"{name} mismatch:\n" + fail_log += f" Assigned to VM: {expect.upper()}\n" + fail_log += f" Reported by OS: {actual}" f_fail.append(fail_log) test.log.error(fail_log) return f_fail @@ -106,18 +108,18 @@ def get_cpu_number(chk_type, chk_timeout): :return: Actual number of guest cpu number. """ chk_str = params["mem_chk_re_str"] - chk_cmd = params.get("cpu_%s_chk_cmd" % chk_type) + chk_cmd = params.get(f"cpu_{chk_type}_chk_cmd") if chk_cmd is None: - fail_log = "Unknown cpu number checking type: '%s'" % chk_type + fail_log = f"Unknown cpu number checking type: '{chk_type}'" test.log.error(fail_log) return -1 s, output = session.cmd_status_output(chk_cmd, timeout=chk_timeout) num = re.findall(chk_str, output) if s != 0 or not num: - fail_log = "Failed to get guest %s number, " % chk_type - fail_log += "guest output: '%s'" % output + fail_log = f"Failed to get guest {chk_type} number, " + fail_log += f"guest output: '{output}'" test.log.error(fail_log) return -2 @@ -137,13 +139,13 @@ def check_cpu_number(chk_type, actual_n, expected_n): f_fail = [] if actual_n == -1: - fail_log = "Unknown cpu number checking type: '%s'" % chk_type + fail_log = f"Unknown cpu number checking type: '{chk_type}'" test.log.error(fail_log) f_fail.append(fail_log) return f_fail if actual_n == -2: - fail_log = "Failed to get guest %s number." % chk_type + fail_log = f"Failed to get guest {chk_type} number." test.log.error(fail_log) f_fail.append(fail_log) return f_fail @@ -151,15 +153,19 @@ def check_cpu_number(chk_type, actual_n, expected_n): test.log.info("CPU %s number check", chk_type.capitalize()) if actual_n != expected_n: - fail_log = "%s output mismatch:\n" % chk_type.capitalize() - fail_log += " Assigned to VM: '%s'\n" % expected_n - fail_log += " Reported by OS: '%s'" % actual_n + fail_log = f"{chk_type.capitalize()} output mismatch:\n" + fail_log += f" Assigned to VM: '{expected_n}'\n" + fail_log += f" Reported by OS: '{actual_n}'" f_fail.append(fail_log) test.log.error(fail_log) return f_fail - test.log.debug("%s check pass. Expected: '%s', Actual: '%s'", - chk_type.capitalize(), expected_n, actual_n) + test.log.debug( + "%s check pass. Expected: '%s', Actual: '%s'", + chk_type.capitalize(), + expected_n, + actual_n, + ) return f_fail def verify_machine_type(): @@ -174,10 +180,11 @@ def verify_machine_type(): if status != 0: test.error("Failed to get machine type from vm") - machine_type_cmd = "%s -M ?" % utils_misc.get_qemu_binary(params) + machine_type_cmd = f"{utils_misc.get_qemu_binary(params)} -M ?" machine_types = process.system_output( - machine_type_cmd, ignore_status=True).decode() - machine_types = machine_types.split(':')[-1] + machine_type_cmd, ignore_status=True + ).decode() + machine_types = machine_types.split(":")[-1] machine_type_map = {} for machine_type in machine_types.splitlines(): if not machine_type: @@ -186,23 +193,30 @@ def verify_machine_type(): if len(type_pair) == 1 and len(type_pair[0]) == 2: machine_type_map[type_pair[0][0]] = type_pair[0][1] else: - test.log.warn("Unexpect output from qemu-kvm -M " - "?: '%s'", machine_type) + test.log.warning( + "Unexpect output from qemu-kvm -M " "?: '%s'", machine_type + ) try: - expect_mtype = machine_type_map[params['machine_type']].strip() + expect_mtype = machine_type_map[params["machine_type"]].strip() except KeyError: - test.log.warn("Can not find machine type '%s' from qemu-kvm -M ?" - " output. Skip this test.", params['machine_type']) + test.log.warning( + "Can not find machine type '%s' from qemu-kvm -M ?" + " output. Skip this test.", + params["machine_type"], + ) return f_fail if expect_mtype not in actual_mtype: - fail_log += " Assigned to VM: '%s' \n" % expect_mtype - fail_log += " Reported by OS: '%s'" % actual_mtype + fail_log += f" Assigned to VM: '{expect_mtype}' \n" + fail_log += f" Reported by OS: '{actual_mtype}'" f_fail.append(fail_log) test.log.error(fail_log) else: - test.log.info("MachineType check pass. Expected: %s, Actual: %s", - expect_mtype, actual_mtype) + test.log.info( + "MachineType check pass. Expected: %s, Actual: %s", + expect_mtype, + actual_mtype, + ) return f_fail if params.get("catch_serial_cmd") is not None: @@ -229,13 +243,15 @@ def verify_machine_type(): qtree = qemu_qtree.QtreeContainer() try: - qtree.parse_info_qtree(vm.monitor.info('qtree')) + qtree.parse_info_qtree(vm.monitor.info("qtree")) except AttributeError: # monitor doesn't support info qtree qtree = None test.log.info("Starting physical resources check test") - test.log.info("Values assigned to VM are the values we expect " - "to see reported by the Operating System") + test.log.info( + "Values assigned to VM are the values we expect " + "to see reported by the Operating System" + ) # Define a failure counter, as we want to check all physical # resources to know which checks passed and which ones failed n_fail = [] @@ -251,25 +267,27 @@ def verify_machine_type(): cpu_threads_num = get_cpu_number("threads", chk_timeout) cpu_sockets_num = get_cpu_number("sockets", chk_timeout) - if ((params.get("os_type") == 'windows') and cpu_cores_num > 0 and - cpu_lp_num > 0 and cpu_sockets_num > 0): + if ( + (params.get("os_type") == "windows") + and cpu_cores_num > 0 + and cpu_lp_num > 0 + and cpu_sockets_num > 0 + ): actual_cpu_nr = cpu_lp_num * cpu_sockets_num cpu_threads_num = cpu_lp_num / cpu_cores_num if vm.cpuinfo.smp != actual_cpu_nr: fail_log = "CPU count mismatch:\n" - fail_log += " Assigned to VM: %s \n" % vm.cpuinfo.smp - fail_log += " Reported by OS: %s" % actual_cpu_nr + fail_log += f" Assigned to VM: {vm.cpuinfo.smp} \n" + fail_log += f" Reported by OS: {actual_cpu_nr}" n_fail.append(fail_log) test.log.error(fail_log) n_fail.extend(check_cpu_number("cores", cpu_cores_num, vm.cpuinfo.cores)) - n_fail.extend(check_cpu_number("threads", - cpu_threads_num, vm.cpuinfo.threads)) + n_fail.extend(check_cpu_number("threads", cpu_threads_num, vm.cpuinfo.threads)) - n_fail.extend(check_cpu_number("sockets", - cpu_sockets_num, vm.cpuinfo.sockets)) + n_fail.extend(check_cpu_number("sockets", cpu_sockets_num, vm.cpuinfo.sockets)) # Check the cpu vendor_id expected_vendor_id = params.get("cpu_model_vendor") @@ -279,8 +297,8 @@ def verify_machine_type(): if expected_vendor_id not in output: fail_log = "CPU vendor id check failed.\n" - fail_log += " Assigned to VM: '%s'\n" % expected_vendor_id - fail_log += " Reported by OS: '%s'" % output + fail_log += f" Assigned to VM: '{expected_vendor_id}'\n" + fail_log += f" Reported by OS: '{output}'" n_fail.append(fail_log) test.log.error(fail_log) @@ -290,11 +308,11 @@ def verify_machine_type(): vm_mem_limit = params.get("vm_mem_limit") actual_mem = vm.get_memory_size() if vm_mem_limit: - error_context.context("Skip memory checking %s" % vm_mem_limit, test.log.info) + error_context.context(f"Skip memory checking {vm_mem_limit}", test.log.info) elif actual_mem != expected_mem: fail_log = "Memory size mismatch:\n" - fail_log += " Assigned to VM: %s\n" % expected_mem - fail_log += " Reported by OS: %s\n" % actual_mem + fail_log += f" Assigned to VM: {expected_mem}\n" + fail_log += f" Reported by OS: {actual_mem}\n" n_fail.append(fail_log) test.log.error(fail_log) @@ -312,20 +330,22 @@ def verify_machine_type(): if qtree is not None: error_context.context("Images params check", test.log.info) - test.log.debug("Found devices: %s", params.objects('images')) + test.log.debug("Found devices: %s", params.objects("images")) qdisks = qemu_qtree.QtreeDisksContainer(qtree.get_nodes()) - disk_errors = sum(qdisks.parse_info_block( - vm.monitor.info_block())) + disk_errors = sum(qdisks.parse_info_block(vm.monitor.info_block())) disk_errors += qdisks.generate_params() disk_errors += qdisks.check_disk_params(params) if disk_errors: - disk_errors = ("Images check failed with %s errors, " - "check the log for details" % disk_errors) + disk_errors = ( + f"Images check failed with {disk_errors} errors, " + "check the log for details" + ) test.log.error(disk_errors) n_fail.append("\n".join(qdisks.errors)) else: - test.log.info("Images check param skipped (qemu monitor doesn't " - "support 'info qtree')") + test.log.info( + "Images check param skipped (qemu monitor doesn't " "support 'info qtree')" + ) error_context.context("Network card MAC check", test.log.info) o = "" @@ -344,20 +364,18 @@ def verify_machine_type(): mac = vm.get_mac_address(nic_index) if mac.lower() not in found_mac_addresses: fail_log = "MAC address mismatch:\n" - fail_log += " Assigned to VM (not found): %s" % mac + fail_log += f" Assigned to VM (not found): {mac}" n_fail.append(fail_log) test.log.error(fail_log) error_context.context("UUID check", test.log.info) if vm.get_uuid(): - f_fail = verify_device(vm.get_uuid(), "UUID", - params.get("catch_uuid_cmd")) + f_fail = verify_device(vm.get_uuid(), "UUID", params.get("catch_uuid_cmd")) n_fail.extend(f_fail) error_context.context("Hard Disk serial number check", test.log.info) catch_serial_cmd = params.get("catch_serial_cmd") - f_fail = verify_device(params.get("drive_serial"), "Serial", - catch_serial_cmd) + f_fail = verify_device(params.get("drive_serial"), "Serial", catch_serial_cmd) n_fail.extend(f_fail) error_context.context("Machine Type Check", test.log.info) @@ -366,8 +384,10 @@ def verify_machine_type(): if n_fail: session.close() - test.fail("Physical resources check test " - "reported %s failures:\n%s" % - (len(n_fail), "\n".join(n_fail))) + test.fail( + "Physical resources check test " "reported {} failures:\n{}".format( + len(n_fail), "\n".join(n_fail) + ) + ) session.close() diff --git a/qemu/tests/ping_kill_test.py b/qemu/tests/ping_kill_test.py index fbe0f7b0e9..9c11601dac 100644 --- a/qemu/tests/ping_kill_test.py +++ b/qemu/tests/ping_kill_test.py @@ -1,11 +1,8 @@ import time import aexpect - from avocado.utils import process - -from virttest import error_context -from virttest import utils_net +from virttest import error_context, utils_net @error_context.context_aware @@ -19,15 +16,16 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def kill_and_check(vm): """ Kill the vm and check vm is dead """ qemu_pid = vm.get_pid() - cmd = "kill -9 %s" % qemu_pid + cmd = f"kill -9 {qemu_pid}" process.system(cmd) if not vm.wait_until_dead(timeout=10): - test.fail("VM is not dead, 10s after '%s' sent." % cmd) + test.fail(f"VM is not dead, 10s after '{cmd}' sent.") test.log.info("Vm is dead as expected") def guest_ping(session, dst_ip, count=None): @@ -42,18 +40,18 @@ def guest_ping(session, dst_ip, count=None): ping_cmd = "ping" if os_type == "linux": if count: - ping_cmd += " -c %s" % count - ping_cmd += " -s %s %s" % (packetsize, dst_ip) + ping_cmd += f" -c {count}" + ping_cmd += f" -s {packetsize} {dst_ip}" else: if not count: ping_cmd += " -t " - ping_cmd += " -l %s %s" % (packetsize, dst_ip) + ping_cmd += f" -l {packetsize} {dst_ip}" try: test.log.debug("Ping dst vm with cmd: '%s'", ping_cmd) test_runner(ping_cmd) except aexpect.ShellTimeoutError as err: if count: - test.error("Error during ping guest ip, %s" % err) + test.error(f"Error during ping guest ip, {err}") def ping_is_alive(session): """ @@ -61,7 +59,7 @@ def ping_is_alive(session): else return False """ os_type = params.get("os_type") - if os_type == 'linux': + if os_type == "linux": return not session.cmd_status("pidof ping") else: return not session.cmd_status("tasklist | findstr /I ping.exe") @@ -72,7 +70,7 @@ def manage_guest_nic(session, ifname, disabled=True): """ os_type = params.get("os_type", "linux") if os_type == "linux": - shut_down_cmd = "ifconfig %s " % ifname + shut_down_cmd = f"ifconfig {ifname} " if disabled: shut_down_cmd += " down" else: @@ -104,18 +102,17 @@ def manage_guest_nic(session, ifname, disabled=True): error_context.context("Ping dst guest", test.log.info) guest_ping(session, dst_ip, count=4) - error_context.context("Disable the dst guest nic interface", - test.log.info) + error_context.context("Disable the dst guest nic interface", test.log.info) macaddress = dst_vm.get_mac_address() if params.get("os_type") == "linux": ifname = utils_net.get_linux_ifname(session_serial, macaddress) else: - ifname = utils_net.get_windows_nic_attribute(session_serial, - "macaddress", macaddress, "netconnectionid") + ifname = utils_net.get_windows_nic_attribute( + session_serial, "macaddress", macaddress, "netconnectionid" + ) manage_guest_nic(session_serial, ifname) - error_context.context("Ping dst guest after disabling it's nic", - test.log.info) + error_context.context("Ping dst guest after disabling it's nic", test.log.info) ping_timeout = float(params.get("ping_timeout", 21600)) guest_ping(session, dst_ip) # This test need do infinite ping for a long time(6h) @@ -126,7 +123,7 @@ def manage_guest_nic(session, ifname, disabled=True): if not ping_is_alive(check_sess): test.cancel("Ping process is not alive") except Exception as err: - test.error("Check ping status error '%s'" % err) + test.error(f"Check ping status error '{err}'") else: time.sleep(60) diff --git a/qemu/tests/pktgen.py b/qemu/tests/pktgen.py index f791925919..bbd608f97d 100644 --- a/qemu/tests/pktgen.py +++ b/qemu/tests/pktgen.py @@ -1,17 +1,10 @@ +import os import re import time -import os import aexpect - from avocado.utils import process - -from virttest import error_context -from virttest import remote -from virttest import data_dir -from virttest import utils_net -from virttest import utils_test -from virttest import utils_misc +from virttest import data_dir, error_context, remote, utils_misc, utils_net, utils_test @error_context.context_aware @@ -57,14 +50,18 @@ def run(test, params, env): runner = server_session.cmd pktgen_ip = vm_pktgen.get_address() pktgen_mac = vm_pktgen.get_mac_address() - server_interface = utils_net.get_linux_ifname(server_session, - pktgen_mac) + server_interface = utils_net.get_linux_ifname(server_session, pktgen_mac) # pktgen server is a external host assigned elif re.match(r"((\d){1,3}\.){3}(\d){1,3}", pktgen_server): pktgen_ip = pktgen_server - server_session = remote.wait_for_login(s_shell_client, pktgen_ip, - s_shell_port, s_username, - s_passwd, s_shell_prompt) + server_session = remote.wait_for_login( + s_shell_client, + pktgen_ip, + s_shell_port, + s_username, + s_passwd, + s_shell_prompt, + ) runner = server_session.cmd server_interface = params.get("server_interface") if not server_interface: @@ -78,18 +75,16 @@ def run(test, params, env): runner = process.system # copy pktgen_test scipt to the test server. - local_path = os.path.join(data_dir.get_root_dir(), - "shared/scripts/pktgen.sh") + local_path = os.path.join(data_dir.get_root_dir(), "shared/scripts/pktgen.sh") remote_path = "/tmp/pktgen.sh" - remote.scp_to_remote(pktgen_ip, s_shell_port, s_username, s_passwd, - local_path, remote_path) + remote.scp_to_remote( + pktgen_ip, s_shell_port, s_username, s_passwd, local_path, remote_path + ) error_context.context("Run pktgen test", test.log.info) run_threads = params.get("pktgen_threads", 1) pktgen_stress_timeout = float(params.get("pktgen_test_timeout", 600)) - exec_cmd = "%s %s %s %s %s" % (remote_path, vm.get_address(), - vm.get_mac_address(), - server_interface, run_threads) + exec_cmd = f"{remote_path} {vm.get_address()} {vm.get_mac_address()} {server_interface} {run_threads}" try: env["pktgen_run"] = True try: @@ -97,30 +92,33 @@ def run(test, params, env): # backgroud process, can set run flag to False to stop this case. start_time = time.time() stop_time = start_time + pktgen_stress_timeout - while (env["pktgen_run"] and time.time() < stop_time): + while env["pktgen_run"] and time.time() < stop_time: runner(exec_cmd, timeout=pktgen_stress_timeout) # using ping to kill the pktgen stress except aexpect.ShellTimeoutError: - session.cmd("ping %s" % pktgen_ip, ignore_all_errors=True) + session.cmd(f"ping {pktgen_ip}", ignore_all_errors=True) finally: env["pktgen_run"] = False - error_context.context("Verify Host and guest kernel no error " - "and call trace", test.log.info) + error_context.context( + "Verify Host and guest kernel no error " "and call trace", test.log.info + ) vm.verify_kernel_crash() utils_misc.verify_dmesg() error_context.context("Ping external host after pktgen test", test.log.info) session_ping = vm.wait_for_login(timeout=login_timeout) - status, output = utils_test.ping(dest=external_host, session=session_ping, - timeout=240, count=20) + status, output = utils_test.ping( + dest=external_host, session=session_ping, timeout=240, count=20 + ) loss_ratio = utils_test.get_loss_ratio(output) - if (loss_ratio > int(params.get("packet_lost_ratio", 5)) or - loss_ratio == -1): + if loss_ratio > int(params.get("packet_lost_ratio", 5)) or loss_ratio == -1: test.log.debug("Ping %s output: %s", external_host, output) - test.fail("Guest network connction unusable, " - "packet lost ratio is '%d%%'" % loss_ratio) + test.fail( + "Guest network connction unusable, " + "packet lost ratio is '%d%%'" % loss_ratio + ) if server_session: server_session.close() if session: diff --git a/qemu/tests/ple_test.py b/qemu/tests/ple_test.py index 2f5a8666e8..36a2eec011 100644 --- a/qemu/tests/ple_test.py +++ b/qemu/tests/ple_test.py @@ -1,10 +1,7 @@ import re -from avocado.utils import cpu -from avocado.utils import process -from virttest import env_process -from virttest import error_context -from virttest import utils_package +from avocado.utils import cpu, process +from virttest import env_process, error_context, utils_package @error_context.context_aware @@ -23,12 +20,13 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def reload_module(value): """ Reload module """ - process.system("rmmod %s" % module) - cmd = "modprobe %s %s=%s" % (module, mod_param, value) + process.system(f"rmmod {module}") + cmd = f"modprobe {module} {mod_param}={value}" process.system(cmd) def run_unixbench(cmd): @@ -37,13 +35,12 @@ def run_unixbench(cmd): """ error_context.context("Run unixbench inside guest", test.log.info) output = session.cmd_output_safe(cmd, timeout=4800) - scores = re.findall(r"System Benchmarks Index Score\s+(\d+\.?\d+)", - output) + scores = re.findall(r"System Benchmarks Index Score\s+(\d+\.?\d+)", output) return [float(i) for i in scores] module = params["module_name"] mod_param = params["mod_param"] - read_cmd = "cat /sys/module/%s/parameters/%s" % (module, mod_param) + read_cmd = f"cat /sys/module/{module}/parameters/{mod_param}" origin_ple = process.getoutput(read_cmd) error_context.context("Enable ple on host if it's disabled", test.log.info) if origin_ple == 0: @@ -74,11 +71,11 @@ def run_unixbench(cmd): session = vm.wait_for_login() scores_off = run_unixbench(cmd) test.log.info("Unixbench scores are %s when ple is off", scores_off) - scores_off = [x*0.96 for x in scores_off] + scores_off = [x * 0.96 for x in scores_off] if scores_on[0] < scores_off[0] or scores_on[1] < scores_off[1]: test.fail("Scores is much lower when ple is on than off") finally: - session.cmd_output_safe("rm -rf %s" % params["unixbench_dir"]) + session.cmd_output_safe("rm -rf {}".format(params["unixbench_dir"])) session.close() vm.destroy() reload_module(origin_ple) diff --git a/qemu/tests/plug_cdrom.py b/qemu/tests/plug_cdrom.py index b693e7316a..ce5a9cebf2 100644 --- a/qemu/tests/plug_cdrom.py +++ b/qemu/tests/plug_cdrom.py @@ -1,13 +1,11 @@ import ast import re -from virttest import error_context -from virttest import utils_test -from virttest import utils_misc +from virttest import error_context, utils_misc, utils_test from provider.block_devices_plug import BlockDevicesPlug -HOTPLUG, UNPLUG = ('hotplug', 'unplug') +HOTPLUG, UNPLUG = ("hotplug", "unplug") @error_context.context_aware @@ -33,11 +31,13 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def _shutdown_vm(session): """Shutdown vm.""" shutdown_command = params["shutdown_cmd"] error_context.context( - "Shutting down VM by \"%s\"." % shutdown_command, test.log.info) + f'Shutting down VM by "{shutdown_command}".', test.log.info + ) session.sendline(shutdown_command) if not vm.wait_for_shutdown(): test.fail("Failed to shutdown vm.") @@ -50,45 +50,50 @@ def _reboot_vm(session): def _check_cdrom_info_by_qmp(items): """Check the cdrom device info by qmp.""" error_context.context( - 'Check if the info \"%s\" are match with the output of query-block.' % - str(items), test.log.info) + f'Check if the info "{str(items)}" are match with the output of query-block.', + test.log.info, + ) blocks = vm.monitor.info_block() for key, val in items.items(): - if (key == 'device' and val == dev_id) or blocks[dev_id][key] == val: + if (key == "device" and val == dev_id) or blocks[dev_id][key] == val: continue - test.fail( - 'No such \"%s: %s\" in the output of query-block.' % (key, val)) + test.fail(f'No such "{key}: {val}" in the output of query-block.') def _check_cdrom_info_by_guest(): """Check cdrom info inside guest.""" - test.log.info('Check if the file \"%s\" is in the cdrom.', iso_name) - cmd_map = {'linux': 'mount /dev/sr{0} /mnt && ls /mnt && umount /mnt', - 'windows': 'dir {0}:\\'} - cd_exp_map = {'linux': r'sr([0-9])', 'windows': r'(\w):'} - get_cd_map = {'linux': 'lsblk -nb', - 'windows': 'wmic logicaldisk where (Description=' - '\'CD-ROM Disc\') get DeviceID'} - letters = utils_misc.wait_for(lambda: - re.findall(cd_exp_map[os_type], - session.cmd(get_cd_map[os_type]), - re.M), 3) + test.log.info('Check if the file "%s" is in the cdrom.', iso_name) + cmd_map = { + "linux": "mount /dev/sr{0} /mnt && ls /mnt && umount /mnt", + "windows": "dir {0}:\\", + } + cd_exp_map = {"linux": r"sr([0-9])", "windows": r"(\w):"} + get_cd_map = { + "linux": "lsblk -nb", + "windows": "wmic logicaldisk where (Description=" + "'CD-ROM Disc') get DeviceID", + } + letters = utils_misc.wait_for( + lambda: re.findall( + cd_exp_map[os_type], session.cmd(get_cd_map[os_type]), re.M + ), + 3, + ) if not letters: - test.error('No available CD-ROM devices') + test.error("No available CD-ROM devices") for index in range(len(cdroms)): - if iso_name in session.cmd(cmd_map[os_type].format( - letters[index])).lower(): + if iso_name in session.cmd(cmd_map[os_type].format(letters[index])).lower(): break else: - test.fail('No such the file \"%s\" in cdrom.' % iso_name) + test.fail(f'No such the file "{iso_name}" in cdrom.') def _check_cdrom_info(items): _check_cdrom_info_by_qmp(items) _check_cdrom_info_by_guest() - os_type = params['os_type'] - cdroms = params['cdroms'].split() - is_windows = os_type == 'windows' - action = HOTPLUG if params.get('do_hotplug', 'no') == 'yes' else UNPLUG + os_type = params["os_type"] + cdroms = params["cdroms"].split() + is_windows = os_type == "windows" + action = HOTPLUG if params.get("do_hotplug", "no") == "yes" else UNPLUG vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -96,19 +101,20 @@ def _check_cdrom_info(items): if is_windows: session = utils_test.qemu.windrv_check_running_verifier( - session, vm, test, params['driver_name'], 300) + session, vm, test, params["driver_name"], 300 + ) plug = BlockDevicesPlug(vm) for cdrom in cdroms: cdrom_params = params.object_params(cdrom) if cdrom_params["cd_format"] == "ide": test.cancel("Hot-plug cd_format IDE not available, skipped") - items_checked = ast.literal_eval(cdrom_params.get('items_checked')) - dev_id = items_checked['device'] - iso_name = cdrom_params.get('iso_name') + items_checked = ast.literal_eval(cdrom_params.get("items_checked")) + dev_id = items_checked["device"] + iso_name = cdrom_params.get("iso_name") if action == UNPLUG: _check_cdrom_info(items_checked) - getattr(plug, '%s_devs_serial' % action)(cdrom) + getattr(plug, f"{action}_devs_serial")(cdrom) if action == HOTPLUG: _check_cdrom_info(items_checked) diff --git a/qemu/tests/power_htm.py b/qemu/tests/power_htm.py index f9b98a182e..55ec1ed7f6 100644 --- a/qemu/tests/power_htm.py +++ b/qemu/tests/power_htm.py @@ -1,9 +1,7 @@ import re -from virttest import error_context -from virttest import utils_misc -from virttest import utils_package from avocado.utils import process +from virttest import error_context, utils_misc, utils_package @error_context.context_aware @@ -29,23 +27,23 @@ def run(test, params, env): for cmd in cmds: s, o = process.getstatusoutput(cmd, timeout=3600) if s: - test.error("Failed to run cmd '%s', output: %s" % (cmd, o)) + test.error(f"Failed to run cmd '{cmd}', output: {o}") error_context.context("Run htm unit test on host", test.log.info) s, o = process.getstatusoutput(params["run_htm_test"], timeout=3600) if s: - test.fail("Run htm unit test failed, output: %s" % o) + test.fail(f"Run htm unit test failed, output: {o}") # Make sure if host is available by do commands on host - status, output = process.getstatusoutput("rm -rf %s" - % params["htm_dir"]) + status, output = process.getstatusoutput("rm -rf {}".format(params["htm_dir"])) if status: - test.fail("Please check host's status: %s" % output) + test.fail(f"Please check host's status: {output}") utils_misc.verify_dmesg() else: check_exist_cmd = params["check_htm_env"] s, o = process.getstatusoutput(check_exist_cmd) if s: - test.error("Please check htm is supported or not by '%s', output: %s" - % (check_exist_cmd, o)) + test.error( + f"Please check htm is supported or not by '{check_exist_cmd}', output: {o}" + ) vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() pkgs = params["depends_pkgs"].split() @@ -53,12 +51,13 @@ def run(test, params, env): test.error("Install dependency packages failed") session.cmd(params["get_htm_dir"]) download_htm_demo = params["download_htm_demo"] - status = session.cmd_status("wget %s" % download_htm_demo) + status = session.cmd_status(f"wget {download_htm_demo}") if status: - test.error("Failed to download test file, please configure it in cfg : %s" - % download_htm_demo) + test.error( + f"Failed to download test file, please configure it in cfg : {download_htm_demo}" + ) else: status, output = session.cmd_status_output(params["test_htm_command"]) if not re.search(params["expected_htm_test_result"], output): - test.fail("Test failed and please check : %s" % output) + test.fail(f"Test failed and please check : {output}") vm.verify_kernel_crash() diff --git a/qemu/tests/power_nvram.py b/qemu/tests/power_nvram.py index bf0adc6346..82894de1b0 100644 --- a/qemu/tests/power_nvram.py +++ b/qemu/tests/power_nvram.py @@ -1,8 +1,6 @@ import re -from virttest import error_context -from virttest import virt_vm -from virttest import qemu_monitor +from virttest import error_context, qemu_monitor, virt_vm @error_context.context_aware @@ -18,18 +16,20 @@ def run(test, params, env): """ vm = env.get_vm(params["main_vm"]) - error_msg = params.get('nvram_expected_result') + error_msg = params.get("nvram_expected_result") nvram_sub_type = params.get("nvram_sub_type") if nvram_sub_type != "normal": try: vm.create(params=params) except virt_vm.VMCreateError as e: output = e.output - error_context.context("Check the expected error message: %s" - % error_msg, test.log.info) + error_context.context( + f"Check the expected error message: {error_msg}", test.log.info + ) if not re.search(error_msg, output): - test.fail("Can not get expected error message: %s from %s" - % (error_msg, output)) + test.fail( + f"Can not get expected error message: {error_msg} from {output}" + ) except qemu_monitor.MonitorConnectError: pass else: diff --git a/qemu/tests/ppc_change_smt.py b/qemu/tests/ppc_change_smt.py index e496ee65cf..e66a9589e3 100644 --- a/qemu/tests/ppc_change_smt.py +++ b/qemu/tests/ppc_change_smt.py @@ -20,21 +20,20 @@ def run(test, params, env): """ def _check_smt_state(value): - value = ("1" if value == "off" else str(threads) if value == "on" - else value) + value = "1" if value == "off" else str(threads) if value == "on" else value smt_info = session.cmd_output("ppc64_cpu --smt -n") - if not re.match(r"SMT=%s" % value, smt_info): + if not re.match(rf"SMT={value}", smt_info): test.log.info("smt info of guest: %s", smt_info) test.fail("The smt state is inconsistent with expected") test.log.info("smt state matched: %s", value) def _change_smt_state(value): try: - session.cmd("ppc64_cpu --smt=%s" % value) + session.cmd(f"ppc64_cpu --smt={value}") _check_smt_state(value) except ShellCmdError as err: test.log.error(str(err)) - test.error("Failed to change smt state of guest to %s." % value) + test.error(f"Failed to change smt state of guest to {value}.") def _smt_state(n_threads): for i in range(int(log(n_threads, 2)) + 1): @@ -47,18 +46,19 @@ def _smt_state(n_threads): session = vm.wait_for_login() threads = vm.cpuinfo.threads - error_context.context("Check if the number of threads on guest is equal to" - " SMP threads", test.log.info) + error_context.context( + "Check if the number of threads on guest is equal to" " SMP threads", + test.log.info, + ) _check_smt_state(threads) for state in _smt_state(threads): - error_context.context("Change the guest's smt state to %s" % state, - test.log.info) + error_context.context(f"Change the guest's smt state to {state}", test.log.info) _change_smt_state(state) - cpu_count = (threads if state == "on" else 1 if state == "off" - else int(state)) - error_context.context("Check if the online CPU per core is equal to %s" - % cpu_count, test.log.info) + cpu_count = threads if state == "on" else 1 if state == "off" else int(state) + error_context.context( + f"Check if the online CPU per core is equal to {cpu_count}", test.log.info + ) for core_info in session.cmd_output("ppc64_cpu --info").splitlines(): if cpu_count != core_info.count("*"): test.log.info("core_info:\n%s", core_info) diff --git a/qemu/tests/ppc_check_cpu_and_mmu.py b/qemu/tests/ppc_check_cpu_and_mmu.py index a2541b5b7c..15fc288a36 100644 --- a/qemu/tests/ppc_check_cpu_and_mmu.py +++ b/qemu/tests/ppc_check_cpu_and_mmu.py @@ -17,11 +17,13 @@ def run(test, params, env): :param params: the test params. :param env: test environment. """ + def get_cpu_mmu(session=None): cmd_func = session.cmd if session else process.getoutput cpu_info = cmd_func("tail -n 11 /proc/cpuinfo") - cpu_info = re.findall(r"(?:cpu\s+:\s+(\w+\d+)).*(?:MMU\s+:\s+(\w+))", - cpu_info, re.S) + cpu_info = re.findall( + r"(?:cpu\s+:\s+(\w+\d+)).*(?:MMU\s+:\s+(\w+))", cpu_info, re.S + ) if cpu_info: return cpu_info[0] test.error("Unable to get the CPU information of this system.") @@ -30,15 +32,17 @@ def get_cpu_mmu(session=None): vm.verify_alive() guest_session = vm.wait_for_login() - error_context.base_context("Get CPU information of host and guest.", - test.log.info) + error_context.base_context("Get CPU information of host and guest.", test.log.info) host_cpu_model, host_mmu_mode = get_cpu_mmu() guest_cpu_model, guest_mmu_mode = get_cpu_mmu(guest_session) - error_context.context("Assert CPU model and MMU mode of host and guest.", - test.log.info) - assert (guest_cpu_model == host_cpu_model), ("The CPU model of the host " - "and guest do not match") - assert (guest_mmu_mode == host_mmu_mode), ("The MMU mode of the host and " - "guest do not match") + error_context.context( + "Assert CPU model and MMU mode of host and guest.", test.log.info + ) + assert guest_cpu_model == host_cpu_model, ( + "The CPU model of the host " "and guest do not match" + ) + assert guest_mmu_mode == host_mmu_mode, ( + "The MMU mode of the host and " "guest do not match" + ) test.log.info("CPU model and MMU mode of host and guest are matched.") diff --git a/qemu/tests/ppc_ic_mode_check.py b/qemu/tests/ppc_ic_mode_check.py index 219ab673c1..3b7f5fc37a 100644 --- a/qemu/tests/ppc_ic_mode_check.py +++ b/qemu/tests/ppc_ic_mode_check.py @@ -26,8 +26,9 @@ def run(test, params, env): try: vm.create(params=params) except VMCreateError as e: - if re.search(r"kernel_irqchip requested but unavailable|" - r"XIVE-only machines", e.output): + if re.search( + r"kernel_irqchip requested but unavailable|" r"XIVE-only machines", e.output + ): test.cancel(e.output) raise else: @@ -36,12 +37,11 @@ def run(test, params, env): error_context.context("Get irqchip and ic-mode information.", test.log.info) pic_o = vm.monitor.info("pic") - irqchip_match = re.search(r"^irqchip: %s" % kernel_irqchip, pic_o, re.M) - ic_mode_match = session.cmd_status("grep %s /proc/interrupts" - % ic_mode.upper()) == 0 + irqchip_match = re.search(rf"^irqchip: {kernel_irqchip}", pic_o, re.M) + ic_mode_match = session.cmd_status(f"grep {ic_mode.upper()} /proc/interrupts") == 0 error_context.context("Check wherever irqchip/ic-mode match.", test.log.info) if not irqchip_match: - test.fail("irqchip does not match to '%s'." % kernel_irqchip) + test.fail(f"irqchip does not match to '{kernel_irqchip}'.") elif not ic_mode_match: - test.fail("ic-mode does not match to '%s'." % ic_mode) + test.fail(f"ic-mode does not match to '{ic_mode}'.") diff --git a/qemu/tests/ppc_nested_compat.py b/qemu/tests/ppc_nested_compat.py index 1238d7388e..1d0e857a0a 100644 --- a/qemu/tests/ppc_nested_compat.py +++ b/qemu/tests/ppc_nested_compat.py @@ -19,19 +19,20 @@ def run(test, params, env): :param env: test environment. """ - params['start_vm'] = 'yes' - error_msg = params['error_msg'] - vm = env.get_vm(params['main_vm']) + params["start_vm"] = "yes" + error_msg = params["error_msg"] + vm = env.get_vm(params["main_vm"]) - error_context.base_context('Try to create a qemu instance...', test.log.info) + error_context.base_context("Try to create a qemu instance...", test.log.info) try: vm.create(params=params) except VMCreateError as e: if not re.search(error_msg, e.output): test.log.error(e.output) - test.error('The error message could not be searched at qemu ' - 'outputs.') - test.log.info('qemu terminated with the expected error message.') + test.error("The error message could not be searched at qemu " "outputs.") + test.log.info("qemu terminated with the expected error message.") else: - test.fail('The qemu instance should not be launched with ' - '"cap-nested-hv=on" and "max-cpu-compat=power8".') + test.fail( + "The qemu instance should not be launched with " + '"cap-nested-hv=on" and "max-cpu-compat=power8".' + ) diff --git a/qemu/tests/pvpanic.py b/qemu/tests/pvpanic.py index 9a906ef866..6e45a9a33c 100644 --- a/qemu/tests/pvpanic.py +++ b/qemu/tests/pvpanic.py @@ -2,13 +2,10 @@ import random import aexpect - -from virttest import error_context -from virttest import utils_test -from virttest import utils_misc from avocado.utils.wait import wait_for +from virttest import error_context, utils_misc, utils_test -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def setup_test_environment(test, params, vm, session): @@ -27,16 +24,14 @@ def setup_test_environment(test, params, vm, session): timeout = int(params.get("timeout", 360)) if params.get("os_type") == "linux": # stop kdump service and enable unknown_nmi_panic - setup_cmds = [params.get("set_kdump_cmd"), - params.get("set_panic_cmd")] + setup_cmds = [params.get("set_kdump_cmd"), params.get("set_panic_cmd")] else: # modify the register for windows setup_cmds = [params.get("set_panic_cmd")] for cmd in setup_cmds: status, output = session.cmd_status_output(cmd, timeout) if status: - test.error("Command '%s' failed, status: %s, output: %s" % - (cmd, status, output)) + test.error(f"Command '{cmd}' failed, status: {status}, output: {output}") if params.get("os_type") == "windows": vm.reboot(session, timeout=timeout) @@ -52,6 +47,7 @@ def check_qmp_events(vm, event_names, timeout=360): :return: True if one of the events given by `event_names` appeared, otherwise None """ + def _do_check(vm, event_names): for name in event_names: if vm.monitor.get_event(name): @@ -60,8 +56,7 @@ def _do_check(vm, event_names): return True return False - LOG_JOB.info("Try to get qmp events %s in %s seconds!", - event_names, timeout) + LOG_JOB.info("Try to get qmp events %s in %s seconds!", event_names, timeout) return wait_for(lambda: _do_check(vm, event_names), timeout, 5, 5) @@ -87,20 +82,26 @@ def trigger_crash(test, vm, params): cmd = params["notmyfault_cmd"] % random.randint(1, 8) notmyfault_cmd = utils_misc.set_winutils_letter(session, cmd) try: - status, output = session.cmd_status_output(cmd=notmyfault_cmd, - timeout=timeout) + status, output = session.cmd_status_output( + cmd=notmyfault_cmd, timeout=timeout + ) if status: - test.error("Command '%s' failed, status: %s, output: %s" % - (cmd, status, output)) + test.error( + f"Command '{cmd}' failed, status: {status}, output: {output}" + ) # notmyfault_app triggers BSOD of the guest, and it terminates # qemu process, so sometimes, it can not get the status of the cmd. - except (aexpect.ShellTimeoutError, - aexpect.ShellProcessTerminatedError, - aexpect.ShellStatusError): + except ( + aexpect.ShellTimeoutError, + aexpect.ShellProcessTerminatedError, + aexpect.ShellStatusError, + ): pass else: - test.cancel("Crash trigger method %s not supported, " - "please check cfg file for mistake." % crash_method) + test.cancel( + f"Crash trigger method {crash_method} not supported, " + "please check cfg file for mistake." + ) PVPANIC_PANICKED = 1 @@ -135,13 +136,13 @@ def run(test, params, env): vm.verify_alive() session = vm.wait_for_login(timeout=timeout) if params.get("os_type") == "windows": - error_context.context("Check if the driver is installed and " - "verified", test.log.info) + error_context.context( + "Check if the driver is installed and " "verified", test.log.info + ) driver_name = params.get("driver_name", "pvpanic") - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, - driver_name, - timeout) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name, timeout + ) check_empty = False if with_events: if debug_type == 2: diff --git a/qemu/tests/pvpanic_event_check.py b/qemu/tests/pvpanic_event_check.py index fdd980a128..8cb9f558d9 100644 --- a/qemu/tests/pvpanic_event_check.py +++ b/qemu/tests/pvpanic_event_check.py @@ -1,7 +1,5 @@ import aexpect - -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc @error_context.context_aware @@ -33,9 +31,7 @@ def run(test, params, env): check_ISA_cmd = params["check_ISA_cmd"] device_cmd = params["device_cmd"] - error_context.context( - "Setup crash_kexec_post_notifiers=1 in guest", test.log.info - ) + error_context.context("Setup crash_kexec_post_notifiers=1 in guest", test.log.info) session.cmd(setup_guest_cmd) session = vm.reboot(session) s, o = session.cmd_status_output(check_kexec_cmd) @@ -44,13 +40,14 @@ def run(test, params, env): error_context.context("Check kdump server status in guest", test.log.info) if not utils_misc.wait_for( - lambda: session.cmd_output(check_kdump_service).startswith( - kdump_expect_status - ), timeout=20, first=0.0, step=5.0 + lambda: session.cmd_output(check_kdump_service).startswith(kdump_expect_status), + timeout=20, + first=0.0, + step=5.0, ): test.fail( - "Kdump service did not reach %s status " - "within the timeout period" % kdump_expect_status + f"Kdump service did not reach {kdump_expect_status} status " + "within the timeout period" ) error_context.context("Check ISA Bridge in the guest", test.log.info) @@ -60,13 +57,11 @@ def run(test, params, env): o = session.cmd_output(device_cmd) if o.strip() != params["expected_cap"]: test.fail( - "The capability value of the Pvpanic device is %s, " % o + - "while %s is expected" % params["expected_cap"] + f"The capability value of the Pvpanic device is {o}, " + + "while {} is expected".format(params["expected_cap"]) ) - error_context.context( - "Trigger a crash in guest and check qmp event", test.log.info - ) + error_context.context("Trigger a crash in guest and check qmp event", test.log.info) try: session.cmd(trigger_crash_cmd, timeout=5) except aexpect.ShellTimeoutError: @@ -75,6 +70,6 @@ def run(test, params, env): test.fail("Guest should crash.") finally: if vm.monitor.get_event(expect_event) is None: - test.fail("Not found expect event: %s" % expect_event) + test.fail(f"Not found expect event: {expect_event}") if session: session.close() diff --git a/qemu/tests/pvpanic_memory_leak.py b/qemu/tests/pvpanic_memory_leak.py index 0801f2e9f1..8d5878cc5f 100644 --- a/qemu/tests/pvpanic_memory_leak.py +++ b/qemu/tests/pvpanic_memory_leak.py @@ -1,5 +1,5 @@ -from virttest import error_context -from virttest import utils_test +from virttest import error_context, utils_test + from provider import win_driver_utils @@ -21,10 +21,12 @@ def run(test, params, env): vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() - error_context.context("Check if the driver is installed and " - "verified", test.log.info) + error_context.context( + "Check if the driver is installed and " "verified", test.log.info + ) session = utils_test.qemu.windrv_check_running_verifier( - session, vm, test, params["driver_name"]) + session, vm, test, params["driver_name"] + ) try: win_driver_utils.memory_leak_check(vm, test, params) diff --git a/qemu/tests/pxe_query_cpus.py b/qemu/tests/pxe_query_cpus.py index c168549d22..0ab2199773 100644 --- a/qemu/tests/pxe_query_cpus.py +++ b/qemu/tests/pxe_query_cpus.py @@ -1,29 +1,27 @@ +import logging import os import time -import logging import aexpect from avocado.utils import process +from virttest import cpu, env_process, error_context, qemu_monitor, utils_misc, virt_vm -from virttest import error_context -from virttest import utils_misc -from virttest import cpu -from virttest import qemu_monitor -from virttest import env_process -from virttest import virt_vm - -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") @error_context.context_aware def _capture_tftp(test, vm, timeout): error_context.context("Snoop packet in the tap device", LOG_JOB.info) - output = aexpect.run_fg("tcpdump -nli %s port '(tftp or bootps)'" % vm.get_ifname(), - LOG_JOB.debug, "(pxe capture) ", timeout)[1] + output = aexpect.run_fg( + f"tcpdump -nli {vm.get_ifname()} port '(tftp or bootps)'", + LOG_JOB.debug, + "(pxe capture) ", + timeout, + )[1] error_context.context("Analyzing the tcpdump result", LOG_JOB.info) if "tftp" not in output: - test.fail("Couldn't find any TFTP packets after %s seconds" % timeout) + test.fail(f"Couldn't find any TFTP packets after {timeout} seconds") LOG_JOB.info("Found TFTP packet") @@ -34,7 +32,7 @@ def _kill_vms(params, env): env.unregister_vm(vm.name) qemu_bin = os.path.basename(params["qemu_binary"]) - process.run("killall -g %s" % qemu_bin, ignore_status=True) + process.run(f"killall -g {qemu_bin}", ignore_status=True) time.sleep(5) @@ -56,16 +54,16 @@ def run(test, params, env): error_context.context("Enable ept/npt", test.log.info) try: - flag = list(filter(lambda x: x in cpu.get_cpu_flags(), - ['ept', 'npt']))[0] + flag = list(filter(lambda x: x in cpu.get_cpu_flags(), ["ept", "npt"]))[0] except IndexError: test.log.info("Host doesn't support ept/npt, skip the configuration") else: - enable_mmu_cmd = params["enable_mmu_cmd_%s" % flag] - check_mmu_cmd = params["check_mmu_cmd_%s" % flag] - restore_mmu_cmd = params["restore_mmu_cmd_%s" % flag] - status = process.system(check_mmu_cmd, timeout=120, ignore_status=True, - shell=True) + enable_mmu_cmd = params[f"enable_mmu_cmd_{flag}"] + check_mmu_cmd = params[f"check_mmu_cmd_{flag}"] + restore_mmu_cmd = params[f"restore_mmu_cmd_{flag}"] + status = process.system( + check_mmu_cmd, timeout=120, ignore_status=True, shell=True + ) if status != 0: _kill_vms(params, env) process.run(enable_mmu_cmd, shell=True) @@ -88,13 +86,16 @@ def run(test, params, env): count += 1 try: vm.monitor.info("cpus", debug=False) - if params.get('machine_type').startswith("s390"): - if vm.monitor.get_status()['status'] in ['running', - 'guest-panicked']: + if params.get("machine_type").startswith("s390"): + if vm.monitor.get_status()["status"] in [ + "running", + "guest-panicked", + ]: pass else: - raise virt_vm.VMStatusError('Unexpected VM status: "%s"' - % vm.monitor.get_status()) + raise virt_vm.VMStatusError( + f'Unexpected VM status: "{vm.monitor.get_status()}"' + ) else: vm.verify_status("running") if not bg.is_alive(): diff --git a/qemu/tests/qcow2perf.py b/qemu/tests/qcow2perf.py index f278c4ee83..c6f328f0c6 100644 --- a/qemu/tests/qcow2perf.py +++ b/qemu/tests/qcow2perf.py @@ -1,9 +1,7 @@ import re from avocado.utils import process - -from virttest import data_dir -from virttest import error_context +from virttest import data_dir, error_context from virttest.qemu_storage import QemuImg @@ -49,30 +47,44 @@ def run(test, params, env): sn_list.append((sn_tmp, image_params)) # Write to the test image - error_context.context("Prepare the image with write a certain size block", - test.log.info) - dropcache = 'echo 3 > /proc/sys/vm/drop_caches && sleep 5' + error_context.context( + "Prepare the image with write a certain size block", test.log.info + ) + dropcache = "echo 3 > /proc/sys/vm/drop_caches && sleep 5" snapshot_file = sn_list[test_image][0].image_filename if op_type != "writeoffset1": offset = 0 - writecmd0 = writecmd % (write_round, offset, interval_size, - write_unit, interval_size, write_unit) + writecmd0 = writecmd % ( + write_round, + offset, + interval_size, + write_unit, + interval_size, + write_unit, + ) iocmd0 = iocmd % (writecmd0, io_options, snapshot_file) test.log.info("writecmd-offset-0: %s", writecmd0) process.run(dropcache, shell=True) output = process.run(iocmd0, shell=True) else: offset = 1 - writecmd1 = writecmd % (write_round, offset, interval_size, - write_unit, interval_size, write_unit) + writecmd1 = writecmd % ( + write_round, + offset, + interval_size, + write_unit, + interval_size, + write_unit, + ) iocmd1 = iocmd % (writecmd1, io_options, snapshot_file) test.log.info("writecmd-offset-1: %s", writecmd1) process.run(dropcache, shell=True) output = process.run(iocmd1, shell=True) - error_context.context("Do one operations to the image and " - "measure the time", test.log.info) + error_context.context( + "Do one operations to the image and " "measure the time", test.log.info + ) if op_type == "read": readcmd = opcmd % (io_options, snapshot_file) @@ -85,11 +97,9 @@ def run(test, params, env): process.run(dropcache, shell=True) output = process.run(commitcmd, shell=True) elif op_type == "rebase": - new_base_img = QemuImg(params.object_params(new_base), image_dir, - new_base) + new_base_img = QemuImg(params.object_params(new_base), image_dir, new_base) new_base_img.create(params.object_params(new_base)) - rebasecmd = opcmd % (new_base_img.image_filename, - cache_mode, snapshot_file) + rebasecmd = opcmd % (new_base_img.image_filename, cache_mode, snapshot_file) test.log.info("rebase: %s", rebasecmd) process.run(dropcache, shell=True) output = process.run(rebasecmd, shell=True) @@ -101,8 +111,9 @@ def run(test, params, env): output = process.run(convertcmd, shell=True) error_context.context("Result recording", test.log.info) - result_file = open("%s/%s_%s_results" % - (test.resultsdir, "qcow2perf", op_type), 'w') - result_file.write("%s:%s\n" % (op_type, output)) + result_file = open( + "{}/{}_{}_results".format(test.resultsdir, "qcow2perf", op_type), "w" + ) + result_file.write(f"{op_type}:{output}\n") test.log.info("%s takes %s", op_type, output) result_file.close() diff --git a/qemu/tests/qemu_disk_img.py b/qemu/tests/qemu_disk_img.py index 8fa5f817b9..5967ee1446 100644 --- a/qemu/tests/qemu_disk_img.py +++ b/qemu/tests/qemu_disk_img.py @@ -1,23 +1,22 @@ -import re import logging +import re -from avocado.utils import process - -from virttest import data_dir -from virttest import env_process -from virttest import error_context -from virttest import storage -from virttest import qemu_storage -from virttest import utils_test -from virttest import utils_misc -from virttest import error_context from avocado.core import exceptions +from avocado.utils import process +from virttest import ( + data_dir, + env_process, + error_context, + qemu_storage, + storage, + utils_misc, + utils_test, +) -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class QemuImgTest(qemu_storage.QemuImg): - def __init__(self, test, params, env, tag): self.vm = None self.test = test @@ -27,7 +26,7 @@ def __init__(self, test, params, env, tag): self.data_dir = data_dir.get_data_dir() self.trash = [] t_params = params.object_params(tag) - super(QemuImgTest, self).__init__(t_params, self.data_dir, tag) + super().__init__(t_params, self.data_dir, tag) @error_context.context_aware def create_snapshot(self, t_params=None): @@ -42,7 +41,7 @@ def create_snapshot(self, t_params=None): return {} snapshot = storage.get_image_filename(params, self.data_dir) storage.file_remove(params, snapshot) - super(QemuImgTest, self).create(params) + super().create(params) self.trash.append(snapshot) return params @@ -59,7 +58,8 @@ def commit(self, drop=False, cache_mode=None, base=None): cmds.extend(["-t", cache_mode]) if base: base_image_filename = storage.get_image_filename( - self.params.object_params(base), self.root_dir) + self.params.object_params(base), self.root_dir + ) cmds.extend(["-b", base_image_filename]) cmds.extend(["-f", self.image_format, self.image_filename]) LOG_JOB.info("Commit image %s", self.image_filename) @@ -77,8 +77,8 @@ def start_vm(self, t_params=None): base_image = params.get("images", "image1").split()[0] params["start_vm"] = "yes" try: - del params["image_name_%s" % base_image] - del params["image_format_%s" % base_image] + del params[f"image_name_{base_image}"] + del params[f"image_format_{base_image}"] except KeyError: pass vm_name = params["main_vm"] @@ -109,7 +109,7 @@ def __md5sum(self, dst): login_timeout = int(self.params.get("login_timeout", 360)) session = self.vm.wait_for_login(timeout=login_timeout) md5bin = self.params.get("md5sum_bin", "md5sum") - cmd = "%s %s" % (md5bin, dst) + cmd = f"{md5bin} {dst}" status, output = session.cmd_status_output(cmd, timeout=300) if status != 0: LOG_JOB.error("Execute '%s' with failures('%s') ", cmd, output) @@ -121,8 +121,7 @@ def __md5sum(self, dst): def save_file(self, dst): login_timeout = int(self.params.get("login_timeout", 360)) cmd = self.params.get("sync_bin", "sync") - error_context.context("save file('%s') md5sum in guest" % dst, - LOG_JOB.info) + error_context.context(f"save file('{dst}') md5sum in guest", LOG_JOB.info) self.__create_file(dst) session = self.vm.wait_for_login(timeout=login_timeout) LOG_JOB.info("sync guest data") @@ -136,12 +135,12 @@ def save_file(self, dst): @error_context.context_aware def check_file(self, dst, md5): - error_context.context("check file('%s') md5sum in guest" % dst, - LOG_JOB.info) + error_context.context(f"check file('{dst}') md5sum in guest", LOG_JOB.info) if md5 != self.__md5sum(dst): - err = ("Md5 value does not match. " - "Expected value: %s Actual value: %s" % - (md5, self.__md5sum(dst))) + err = ( + "Md5 value does not match. " + f"Expected value: {md5} Actual value: {self.__md5sum(dst)}" + ) LOG_JOB.error(err) return False return True @@ -155,15 +154,16 @@ def destroy_vm(self): @error_context.context_aware def check_image(self, t_params=None): - error_context.context("check image file ('%s')" % self.image_filename, - LOG_JOB.info) + error_context.context( + f"check image file ('{self.image_filename}')", LOG_JOB.info + ) t_params = t_params or {} - return super(QemuImgTest, self).check_image(t_params, self.data_dir) + return super().check_image(t_params, self.data_dir) @error_context.context_aware def get_info(self): - error_context.context("get image file ('%s')" % self.image_filename) - return super(QemuImgTest, self).info() + error_context.context(f"get image file ('{self.image_filename}')") + return super().info() @error_context.context_aware def verify_info(self, params=None): @@ -195,34 +195,34 @@ def verify_info(self, params=None): else: evalue = params.get(option) if avalue is not None and avalue != evalue: - msg = "Get wrong %s from image %s!" % (option, image_filename) - msg += "Expect: %s, actual: %s" % (evalue, avalue) + msg = f"Get wrong {option} from image {image_filename}!" + msg += f"Expect: {evalue}, actual: {avalue}" self.test.fail(msg) @error_context.context_aware def check_backingfile(self): - error_context.context("check image('%s') backing file" % - self.image_filename, LOG_JOB.info) + error_context.context( + f"check image('{self.image_filename}') backing file", LOG_JOB.info + ) out = self.get_info() try: - backingfile = re.search(r'backing file: +(.*)', out, re.M).group(1) + backingfile = re.search(r"backing file: +(.*)", out, re.M).group(1) if not self.base_tag or self.base_tag == "null": - msg = ("Expected backing file is null") - msg += " Actual backing file: %s" % backingfile + msg = "Expected backing file is null" + msg += f" Actual backing file: {backingfile}" raise exceptions.TestFail(msg) else: base_params = self.params.object_params(self.base_tag) base_image_repr = qemu_storage.get_image_repr( - self.base_tag, base_params, self.root_dir) + self.base_tag, base_params, self.root_dir + ) if base_image_repr != backingfile: - msg = ("Expected backing file: %s" % - self.base_image_filename) - msg += " Actual backing file: %s" % backingfile + msg = f"Expected backing file: {self.base_image_filename}" + msg += f" Actual backing file: {backingfile}" raise exceptions.TestFail(msg) except AttributeError: if self.base_tag and self.base_tag != "null": - msg = ("Could not find backing file for image '%s'" % - self.image_filename) + msg = f"Could not find backing file for image '{self.image_filename}'" raise exceptions.TestFail(msg) @error_context.context_aware @@ -230,7 +230,7 @@ def clean(self): error_context.context("clean up useless images") self.destroy_vm() for temp in self.trash: - process.run("rm -f %s" % temp) + process.run(f"rm -f {temp}") def run(test, params, env): @@ -246,7 +246,6 @@ def generate_base_snapshot_pair(image_chain): image_chain = image_chain.split() n = len(image_chain) if n < 2: - raise ValueError("Image_chain should contain at" - "least 2 items, got %s." % n) + raise ValueError("Image_chain should contain at" f"least 2 items, got {n}.") for i in range(1, n): yield [image_chain[i - 1], image_chain[i]] diff --git a/qemu/tests/qemu_disk_img_commit.py b/qemu/tests/qemu_disk_img_commit.py index c6c6d1a3e3..e4c1a8ff5a 100644 --- a/qemu/tests/qemu_disk_img_commit.py +++ b/qemu/tests/qemu_disk_img_commit.py @@ -1,24 +1,20 @@ -import logging import json - -from virttest import data_dir -from virttest.qemu_storage import QemuImg +import logging from avocado.utils import process - -from virttest import error_context +from virttest import data_dir, error_context +from virttest.qemu_storage import QemuImg from qemu.tests import qemu_disk_img -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class CommitTest(qemu_disk_img.QemuImgTest): - def __init__(self, test, params, env): self.tag = params.get("image_commit", "image1") t_params = params.object_params(self.tag) - super(CommitTest, self).__init__(test, t_params, env, self.tag) + super().__init__(test, t_params, env, self.tag) @error_context.context_aware def commit(self, t_params=None): @@ -51,8 +47,11 @@ def _get_img_obj(tag): base_image = params.get("images", "image1").split()[0] params.update( - {"image_name_%s" % base_image: params["image_name"], - "image_format_%s" % base_image: params["image_format"]}) + { + f"image_name_{base_image}": params["image_name"], + f"image_format_{base_image}": params["image_format"], + } + ) t_file = params["guest_file_name"] commit_test = CommitTest(test, params, env) n_params = commit_test.create_snapshot() diff --git a/qemu/tests/qemu_disk_img_convert.py b/qemu/tests/qemu_disk_img_convert.py index 7e6f5a7644..e4a26872d2 100644 --- a/qemu/tests/qemu_disk_img_convert.py +++ b/qemu/tests/qemu_disk_img_convert.py @@ -2,21 +2,18 @@ from avocado.core import exceptions from avocado.utils import process - -from virttest import storage -from virttest import error_context +from virttest import error_context, storage from qemu.tests import qemu_disk_img -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class ConvertTest(qemu_disk_img.QemuImgTest): - def __init__(self, test, params, env): self.tag = params["convert_source"] t_params = params.object_params(self.tag) - super(ConvertTest, self).__init__(test, t_params, env, self.tag) + super().__init__(test, t_params, env, self.tag) @error_context.context_aware def convert(self, t_params=None): @@ -28,8 +25,7 @@ def convert(self, t_params=None): if t_params: params.update(t_params) cache_mode = params.get("cache_mode") - conv = super(ConvertTest, self).convert( - params, self.data_dir, cache_mode) + conv = super().convert(params, self.data_dir, cache_mode) params = params.object_params(conv) converted = storage.get_image_filename(params, self.data_dir) process.run("sync") @@ -44,15 +40,14 @@ def compare_test(self, t_params): :param t_params: Dictionary with the test parameters """ for mode in t_params.objects("compare_mode_list"): - error_context.context("Compare images in %s mode" % mode, - LOG_JOB.info) + error_context.context(f"Compare images in {mode} mode", LOG_JOB.info) cmd_result = None - is_strict = ("strict" == mode) + is_strict = "strict" == mode image1 = self.image_filename image2 = storage.get_image_filename(t_params, self.data_dir) try: cmd_result = self.compare_images(image1, image2, is_strict) - except (exceptions.TestFail, exceptions.TestError) as detail: + except (exceptions.TestFail, exceptions.TestError): if not is_strict: raise if is_strict and cmd_result: @@ -70,8 +65,11 @@ def run(test, params, env): base_image = params.get("images", "image1").split()[0] params.update( - {"image_name_%s" % base_image: params["image_name"], - "image_format_%s" % base_image: params["image_format"]}) + { + f"image_name_{base_image}": params["image_name"], + f"image_format_{base_image}": params["image_format"], + } + ) t_file = params["guest_file_name"] convert_test = ConvertTest(test, params, env) n_params = convert_test.create_snapshot() diff --git a/qemu/tests/qemu_disk_img_info.py b/qemu/tests/qemu_disk_img_info.py index ed20583e61..bc6c0a29e1 100644 --- a/qemu/tests/qemu_disk_img_info.py +++ b/qemu/tests/qemu_disk_img_info.py @@ -1,20 +1,17 @@ import logging -from virttest import env_process -from virttest import error_context -from virttest import storage +from virttest import env_process, error_context, storage from qemu.tests import qemu_disk_img -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class InfoTest(qemu_disk_img.QemuImgTest): - def __init__(self, test, params, env, tag): self.tag = tag t_params = params.object_params(self.tag) - super(InfoTest, self).__init__(test, t_params, env, self.tag) + super().__init__(test, t_params, env, self.tag) @error_context.context_aware def start_vm(self, t_params=None): @@ -53,12 +50,13 @@ def run(test, params, env): base_image = params.get("images", "image1").split()[0] update_params = { - "image_name_%s" % base_image: params["image_name"], - "image_format_%s" % base_image: params["image_format"] + f"image_name_{base_image}": params["image_name"], + f"image_format_{base_image}": params["image_format"], } - optval = (lambda opt, img, p, default: p.get('%s_%s' % (opt, img), - p.get(opt, default))) + def optval(opt, img, p, default): + return p.get(f"{opt}_{img}", p.get(opt, default)) + enable_ceph = params.get("enable_ceph") == "yes" enable_iscsi = params.get("enable_iscsi") == "yes" enable_gluster = params.get("enable_gluster") == "yes" @@ -66,60 +64,78 @@ def run(test, params, env): enable_curl = params.get("enable_curl") == "yes" enable_ssh = params.get("enable_ssh") == "yes" if enable_ceph: - update_params.update({ - "enable_ceph_%s" % base_image: optval("enable_ceph", - base_image, - params, "no"), - "storage_type_%s" % base_image: optval("storage_type", - base_image, - params, "filesystem")}) + update_params.update( + { + f"enable_ceph_{base_image}": optval( + "enable_ceph", base_image, params, "no" + ), + f"storage_type_{base_image}": optval( + "storage_type", base_image, params, "filesystem" + ), + } + ) elif enable_iscsi: - update_params.update({ - "enable_iscsi_%s" % base_image: optval("enable_iscsi", - base_image, - params, "no"), - "storage_type_%s" % base_image: optval("storage_type", - base_image, - params, "filesystem"), - "image_raw_device_%s" % base_image: optval("image_raw_device", - base_image, - params, "no"), - "lun_%s" % base_image: optval("lun", base_image, params, "0")}) + update_params.update( + { + f"enable_iscsi_{base_image}": optval( + "enable_iscsi", base_image, params, "no" + ), + f"storage_type_{base_image}": optval( + "storage_type", base_image, params, "filesystem" + ), + f"image_raw_device_{base_image}": optval( + "image_raw_device", base_image, params, "no" + ), + f"lun_{base_image}": optval("lun", base_image, params, "0"), + } + ) elif enable_gluster: - update_params.update({ - "enable_gluster_%s" % base_image: optval("enable_gluster", - base_image, - params, "no"), - "storage_type_%s" % base_image: optval("storage_type", - base_image, - params, "filesystem")}) + update_params.update( + { + f"enable_gluster_{base_image}": optval( + "enable_gluster", base_image, params, "no" + ), + f"storage_type_{base_image}": optval( + "storage_type", base_image, params, "filesystem" + ), + } + ) elif enable_nbd: - update_params.update({ - "enable_nbd_%s" % base_image: optval("enable_nbd", - base_image, - params, "no"), - "nbd_port_%s" % base_image: optval("nbd_port", - base_image, - params, "10809"), - "storage_type_%s" % base_image: optval("storage_type", - base_image, - params, "filesystem")}) + update_params.update( + { + f"enable_nbd_{base_image}": optval( + "enable_nbd", base_image, params, "no" + ), + f"nbd_port_{base_image}": optval( + "nbd_port", base_image, params, "10809" + ), + f"storage_type_{base_image}": optval( + "storage_type", base_image, params, "filesystem" + ), + } + ) elif enable_curl: - update_params.update({ - "enable_curl_%s" % base_image: optval("enable_curl", - base_image, - params, "no"), - "storage_type_%s" % base_image: optval("storage_type", - base_image, - params, "filesystem")}) + update_params.update( + { + f"enable_curl_{base_image}": optval( + "enable_curl", base_image, params, "no" + ), + f"storage_type_{base_image}": optval( + "storage_type", base_image, params, "filesystem" + ), + } + ) elif enable_ssh: - update_params.update({ - "enable_ssh_%s" % base_image: optval("enable_ssh", - base_image, - params, "no"), - "storage_type_%s" % base_image: optval("storage_type", - base_image, - params, "filesystem")}) + update_params.update( + { + f"enable_ssh_{base_image}": optval( + "enable_ssh", base_image, params, "no" + ), + f"storage_type_{base_image}": optval( + "storage_type", base_image, params, "filesystem" + ), + } + ) params.update(update_params) image_chain = params.get("image_chain", "").split() @@ -127,10 +143,10 @@ def run(test, params, env): md5_dict = {} for idx, tag in enumerate(image_chain): # VM cannot boot up from a readonly image - if params.object_params(tag).get('image_readonly') == 'yes': + if params.object_params(tag).get("image_readonly") == "yes": continue - params["image_chain"] = " ".join(image_chain[:idx + 1]) + params["image_chain"] = " ".join(image_chain[: idx + 1]) info_test = InfoTest(test, params, env, tag) n_params = info_test.create_snapshot() info_test.start_vm(n_params) @@ -138,9 +154,9 @@ def run(test, params, env): for _file in check_files: ret = info_test.check_file(_file, md5_dict[_file]) if not ret: - test.error("Check md5sum fail (file:%s)" % _file) + test.error(f"Check md5sum fail (file:{_file})") # save file in guest - t_file = params["guest_file_name_%s" % tag] + t_file = params[f"guest_file_name_{tag}"] md5 = info_test.save_file(t_file) if not md5: test.error("Fail to save tmp file") diff --git a/qemu/tests/qemu_disk_img_rebase.py b/qemu/tests/qemu_disk_img_rebase.py index 5528d9b9b9..7fcde6ab77 100644 --- a/qemu/tests/qemu_disk_img_rebase.py +++ b/qemu/tests/qemu_disk_img_rebase.py @@ -2,21 +2,18 @@ import logging from avocado.utils import process - -from virttest import error_context -from virttest import storage +from virttest import error_context, storage from qemu.tests import qemu_disk_img -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class RebaseTest(qemu_disk_img.QemuImgTest): - def __init__(self, test, params, env, tag): self.tag = tag t_params = params.object_params(tag) - super(RebaseTest, self).__init__(test, t_params, env, tag) + super().__init__(test, t_params, env, tag) @error_context.context_aware def rebase(self, t_params=None): @@ -28,7 +25,7 @@ def rebase(self, t_params=None): if t_params: params.update(t_params) cache_mode = params.get("cache_mode") - super(RebaseTest, self).rebase(params, cache_mode) + super().rebase(params, cache_mode) return params def clean(self): @@ -36,7 +33,7 @@ def clean(self): for sn in params.get("image_chain").split()[1:]: _params = params.object_params(sn) _image = storage.get_image_filename(_params, self.data_dir) - process.run("rm -f %s" % _image) + process.run(f"rm -f {_image}") def run(test, params, env): @@ -51,15 +48,18 @@ def run(test, params, env): params_bak = copy.deepcopy(params) md5_dict = {} params.update( - {"image_name_%s" % base_image: params["image_name"], - "image_format_%s" % base_image: params["image_format"]}) + { + f"image_name_{base_image}": params["image_name"], + f"image_format_{base_image}": params["image_format"], + } + ) image_chain = params.get("image_chain", "").split() for idx, tag in enumerate(image_chain): - params["image_chain"] = " ".join(image_chain[:idx + 1]) + params["image_chain"] = " ".join(image_chain[: idx + 1]) rebase_test = RebaseTest(test, params, env, tag) n_params = rebase_test.create_snapshot() rebase_test.start_vm(n_params) - t_file = params["guest_file_name_%s" % tag] + t_file = params[f"guest_file_name_{tag}"] md5 = rebase_test.save_file(t_file) if not md5: test.error("Fail to save tmp file") @@ -88,7 +88,7 @@ def run(test, params, env): for _file in check_files: ret = rebase_test.check_file(_file, md5_dict[_file]) if not ret: - test.error("Check md5sum fail (file:%s)" % _file) + test.error(f"Check md5sum fail (file:{_file})") rebase_test.destroy_vm() rebase_test.check_image() diff --git a/qemu/tests/qemu_disk_img_snapshot.py b/qemu/tests/qemu_disk_img_snapshot.py index d566fc17cd..ed766df53e 100644 --- a/qemu/tests/qemu_disk_img_snapshot.py +++ b/qemu/tests/qemu_disk_img_snapshot.py @@ -1,6 +1,7 @@ -from qemu.tests import qemu_disk_img from avocado.core import exceptions +from qemu.tests import qemu_disk_img + def run(test, params, env): """ @@ -21,8 +22,11 @@ def run(test, params, env): base_image = params.get("images", "image1").split()[0] params.update( - {"image_name_%s" % base_image: params["image_name"], - "image_format_%s" % base_image: params["image_format"]}) + { + f"image_name_{base_image}": params["image_name"], + f"image_format_{base_image}": params["image_format"], + } + ) t_file = params["guest_file_name"] snapshot_test = qemu_disk_img.QemuImgTest(test, params, env, base_image) @@ -37,8 +41,9 @@ def run(test, params, env): snapshot_tag = snapshot_test.snapshot_create() output = snapshot_test.snapshot_list() if snapshot_tag not in output: - raise exceptions.TestFail("Snapshot created failed or missed;" - "snapshot list is: \n%s" % output) + raise exceptions.TestFail( + "Snapshot created failed or missed;" f"snapshot list is: \n{output}" + ) test.log.info("Step3. change tmp file before apply snapshot") snapshot_test.start_vm(params) diff --git a/qemu/tests/qemu_guest_agent.py b/qemu/tests/qemu_guest_agent.py index cf7a94b4b4..754f77e97a 100644 --- a/qemu/tests/qemu_guest_agent.py +++ b/qemu/tests/qemu_guest_agent.py @@ -1,40 +1,40 @@ +import base64 +import json import logging -import time import os -import re -import base64 import random +import re import string -import json +import time import aexpect - -from avocado.utils import genio -from avocado.utils import path as avo_path -from avocado.utils import process -from avocado.core import exceptions from aexpect.exceptions import ShellTimeoutError - -from virttest import error_context -from virttest import guest_agent -from virttest import utils_misc -from virttest import utils_disk -from virttest import env_process -from virttest import utils_net -from virttest import data_dir -from virttest import storage -from virttest import qemu_migration +from avocado.core import exceptions +from avocado.utils import genio, process +from avocado.utils import path as avo_path +from virttest import ( + data_dir, + env_process, + error_context, + guest_agent, + qemu_migration, + storage, + utils_disk, + utils_misc, + utils_net, +) from virttest.utils_version import VersionInterval - from virttest.utils_windows import virtio_win -from provider.win_driver_installer_test import (uninstall_gagent, - run_installer_with_interaction) -LOG_JOB = logging.getLogger('avocado.test') +from provider.win_driver_installer_test import ( + run_installer_with_interaction, + uninstall_gagent, +) +LOG_JOB = logging.getLogger("avocado.test") -class BaseVirtTest(object): +class BaseVirtTest: def __init__(self, test, params, env): self.test = test self.params = params @@ -91,7 +91,6 @@ def execute(self, test, params, env): class QemuGuestAgentTest(BaseVirtTest): - def __init__(self, test, params, env): BaseVirtTest.__init__(self, test, params, env) @@ -126,15 +125,16 @@ def _check_ga_pkg(self, session, cmd_check_pkg): :param session: use for sending cmd :param cmd_check_pkg: cmd to check if ga pkg is installed """ - error_context.context("Check whether qemu-ga is installed.", - LOG_JOB.info) + error_context.context("Check whether qemu-ga is installed.", LOG_JOB.info) s, o = session.cmd_status_output(cmd_check_pkg) - if s == 0 and self.params.get("os_variant", "") == 'rhel8': - error_context.context("Check if the installed pkg is the specific" - " one for rhel8 guest.", LOG_JOB.info) + if s == 0 and self.params.get("os_variant", "") == "rhel8": + error_context.context( + "Check if the installed pkg is the specific" " one for rhel8 guest.", + LOG_JOB.info, + ) version_list = [] - full_qga_rpm_info = session.cmd("readlink %s" % self.qga_pkg_path) - build_specific = re.sub(r'/', '-', full_qga_rpm_info) + full_qga_rpm_info = session.cmd(f"readlink {self.qga_pkg_path}") + build_specific = re.sub(r"/", "-", full_qga_rpm_info) if full_qga_rpm_info: for pkg in [o, build_specific]: pattern = r"guest-agent-(\d+.\d+.\d+-\d+).module" @@ -142,12 +142,15 @@ def _check_ga_pkg(self, session, cmd_check_pkg): if qga_v: version_list.append(qga_v[0]) self.qga_v = version_list[-1] - LOG_JOB.info("The installed and the specific pkg " - "version is %s", version_list) + LOG_JOB.info( + "The installed and the specific pkg " "version is %s", version_list + ) if len(version_list) < 2: - self.test.error("Does not find proper qga package at %s, it" - "is recommended to install a specific version" - "of qga rpm in advance" % self.qga_pkg_path) + self.test.error( + f"Does not find proper qga package at {self.qga_pkg_path}, it" + "is recommended to install a specific version" + "of qga rpm in advance" + ) elif version_list[1] != version_list[0]: return False else: @@ -163,8 +166,7 @@ def _check_ga_service(self, session, cmd_check_status): :param session: use for sending cmd :param cmd_check_status: cmd to check if ga service is started """ - error_context.context("Check whether qemu-ga service is started.", - LOG_JOB.info) + error_context.context("Check whether qemu-ga service is started.", LOG_JOB.info) s, o = session.cmd_status_output(cmd_check_status) return s == 0 @@ -183,7 +185,7 @@ def _get_qga_version(self, session, vm, main_ver=True): ver_main = int(re.findall(pattern, qga_ver)[0]) return ver_main else: - match = re.search(r'qemu-guest(-(agent))?-(\d+\.\d+\.\d+-\d+)', qga_ver) + match = re.search(r"qemu-guest(-(agent))?-(\d+\.\d+\.\d+-\d+)", qga_ver) full_ver = match.group(3) return full_ver @@ -193,32 +195,37 @@ def gagent_install(self, session, vm): :param session: use for sending cmd :param vm: guest object. """ - error_context.context("Try to install 'qemu-guest-agent' package.", - LOG_JOB.info) - if self.params.get("os_variant", "") == 'rhel8': + error_context.context( + "Try to install 'qemu-guest-agent' package.", LOG_JOB.info + ) + if self.params.get("os_variant", "") == "rhel8": cmd = self.params["gagent_pkg_check_cmd"] s_check, o_check = session.cmd_status_output(cmd) if s_check == 0: - error_context.context("Remove the original guest agent pkg.", - LOG_JOB.info) - session.cmd("rpm -e %s" % o_check.strip()) - self.gagent_install_cmd = "rpm -ivh %s" % self.qga_pkg_path + error_context.context( + "Remove the original guest agent pkg.", LOG_JOB.info + ) + session.cmd(f"rpm -e {o_check.strip()}") + self.gagent_install_cmd = f"rpm -ivh {self.qga_pkg_path}" - error_context.context("Install qemu-guest-agent pkg in guest.", - LOG_JOB.info) + error_context.context("Install qemu-guest-agent pkg in guest.", LOG_JOB.info) s_inst, o_inst = session.cmd_status_output(self.gagent_install_cmd) if s_inst != 0: - self.test.fail("qemu-guest-agent install failed," - " the detailed info:\n%s." % o_inst) - if self.params.get("os_variant", "") == 'rhel8' and s_check == 0: - error_context.context("A new pkg is installed, so restart" - " qemu-guest-agent service.", - LOG_JOB.info) + self.test.fail( + "qemu-guest-agent install failed," f" the detailed info:\n{o_inst}." + ) + if self.params.get("os_variant", "") == "rhel8" and s_check == 0: + error_context.context( + "A new pkg is installed, so restart" " qemu-guest-agent service.", + LOG_JOB.info, + ) restart_cmd = self.params["gagent_restart_cmd"] s_rst, o_rst = session.cmd_status_output(restart_cmd) if s_rst != 0: - self.test.fail("qemu-guest-agent service restart failed," - " the detailed info:\n%s." % o_rst) + self.test.fail( + "qemu-guest-agent service restart failed," + f" the detailed info:\n{o_rst}." + ) @error_context.context_aware def gagent_uninstall(self, session, vm): @@ -227,12 +234,15 @@ def gagent_uninstall(self, session, vm): :param session: use for sending cmd :param vm: guest object. """ - error_context.context("Try to uninstall 'qemu-guest-agent' package.", - LOG_JOB.info) + error_context.context( + "Try to uninstall 'qemu-guest-agent' package.", LOG_JOB.info + ) s, o = session.cmd_status_output(self.gagent_uninstall_cmd) if s: - self.test.fail("Could not uninstall qemu-guest-agent package " - "in VM '%s', detail: '%s'" % (vm.name, o)) + self.test.fail( + "Could not uninstall qemu-guest-agent package " + f"in VM '{vm.name}', detail: '{o}'" + ) @error_context.context_aware def gagent_start(self, session, vm): @@ -246,8 +256,9 @@ def gagent_start(self, session, vm): # if start a running service, for rhel guest return code is zero, # for windows guest,return code is not zero if s and "already been started" not in o: - self.test.fail("Could not start qemu-ga service in VM '%s'," - "detail: '%s'" % (vm.name, o)) + self.test.fail( + f"Could not start qemu-ga service in VM '{vm.name}'," f"detail: '{o}'" + ) @error_context.context_aware def gagent_stop(self, session, vm): @@ -262,8 +273,9 @@ def gagent_stop(self, session, vm): # if stop a stopped service,for rhel guest return code is zero, # for windows guest,return code is not zero. if s and "is not started" not in o: - self.test.fail("Could not stop qemu-ga service in VM '%s', " - "detail: '%s'" % (vm.name, o)) + self.test.fail( + f"Could not stop qemu-ga service in VM '{vm.name}', " f"detail: '{o}'" + ) @error_context.context_aware def gagent_create(self, params, vm, *args): @@ -280,8 +292,9 @@ def gagent_create(self, params, vm, *args): filename = vm.get_serial_console_filename(gagent_name) gagent_params = params.object_params(gagent_name) gagent_params["monitor_filename"] = filename - gagent = guest_agent.QemuAgent(vm, gagent_name, gagent_serial_type, - gagent_params, get_supported_cmds=True) + gagent = guest_agent.QemuAgent( + vm, gagent_name, gagent_serial_type, gagent_params, get_supported_cmds=True + ) self.gagent = gagent return self.gagent @@ -291,8 +304,7 @@ def gagent_verify(self, params, vm): error_context.context("Check if guest agent work.", LOG_JOB.info) if not self.gagent: - self.test.error("Could not find guest agent object " - "for VM '%s'" % vm.name) + self.test.error("Could not find guest agent object " f"for VM '{vm.name}'") self.gagent.verify_responsive() LOG_JOB.info(self.gagent.cmd("guest-info")) @@ -309,15 +321,17 @@ def gagent_setsebool_value(self, value, params, vm): """ session = self._get_session(params, vm) self._open_session_list.append(session) - error_context.context("Turn %s virt_qemu_ga_read_nonsecurity_files." % - value, LOG_JOB.info) + error_context.context( + f"Turn {value} virt_qemu_ga_read_nonsecurity_files.", LOG_JOB.info + ) set_selinux_bool_cmd = params["setsebool_cmd"] % value session.cmd(set_selinux_bool_cmd).strip() - get_sebool_cmd = params['getsebool_cmd'] + get_sebool_cmd = params["getsebool_cmd"] value_selinux_bool_guest = session.cmd_output(get_sebool_cmd).strip() if value_selinux_bool_guest != value: - self.test.error("Set boolean virt_qemu_ga_read_nonsecurity_files " - "failed.") + self.test.error( + "Set boolean virt_qemu_ga_read_nonsecurity_files " "failed." + ) @error_context.context_aware def log_persistence(self, params, session): @@ -333,14 +347,15 @@ def setup(self, test, params, env): if self.start_vm == "yes": session = self._get_session(params, self.vm) self._open_session_list.append(session) - if self.params.get("os_variant", "") == 'rhel8': - error_context.context("Get the qemu-guest-agent pkg" - " for rhel8 guest.", LOG_JOB.info) + if self.params.get("os_variant", "") == "rhel8": + error_context.context( + "Get the qemu-guest-agent pkg" " for rhel8 guest.", LOG_JOB.info + ) cmd_check_qga_installlog = params["cmd_check_qga_installlog"] s, o = session.cmd_status_output(cmd_check_qga_installlog) if not s: - output = session.cmd_output("cat %s" % o) - test.fail("Failed to get qga, details: %s" % output) + output = session.cmd_output(f"cat {o}") + test.fail(f"Failed to get qga, details: {output}") else: self.qga_pkg_path = params["qga_rpm_path"] if self._check_ga_pkg(session, params.get("gagent_pkg_check_cmd")): @@ -349,24 +364,21 @@ def setup(self, test, params, env): LOG_JOB.info("qemu-ga is not installed or need to update.") self.gagent_install(session, self.vm) - error_context.context("Check qga service running status", - LOG_JOB.info) - if self._check_ga_service( - session, params.get("gagent_status_cmd")): - output = session.cmd_output(params["cmd_check_qgaservice"], - timeout=5) + error_context.context("Check qga service running status", LOG_JOB.info) + if self._check_ga_service(session, params.get("gagent_status_cmd")): + output = session.cmd_output(params["cmd_check_qgaservice"], timeout=5) if output and "qemu-guest-agent" in output: - test.error("qemu-ga service may have some issues, please" - "check more details from %s" % output) + test.error( + "qemu-ga service may have some issues, please" + f"check more details from {output}" + ) else: LOG_JOB.info("qemu-ga service is already running well.") else: LOG_JOB.info("qemu-ga service is not running.") self.gagent_start(session, self.vm) - args = [ - params.get("gagent_serial_type"), - params.get("gagent_name")] + args = [params.get("gagent_serial_type"), params.get("gagent_name")] self.gagent_create(params, self.vm, *args) def run_once(self, test, params, env): @@ -379,7 +391,6 @@ def cleanup(self, test, params, env): class QemuGuestAgentBasicCheck(QemuGuestAgentTest): - def __init__(self, test, params, env): QemuGuestAgentTest.__init__(self, test, params, env) @@ -406,8 +417,7 @@ def gagent_check_install_uninstall(self, test, params, env): session = self._get_session(params, self.vm) for i in range(repeats): - error_context.context("Repeat: %s/%s" % (i + 1, repeats), - LOG_JOB.info) + error_context.context(f"Repeat: {i + 1}/{repeats}", LOG_JOB.info) if self._check_ga_pkg(session, params.get("gagent_pkg_check_cmd")): self.gagent_uninstall(session, self.vm) self.gagent_install(session, self.vm) @@ -433,8 +443,7 @@ def gagent_check_stop_start(self, test, params, env): self.vm.verify_alive() session = self._get_session(params, self.vm) for i in range(repeats): - error_context.context("Repeat: %s/%s" % (i + 1, repeats), - LOG_JOB.info) + error_context.context(f"Repeat: {i + 1}/{repeats}", LOG_JOB.info) self.gagent_stop(session, self.vm) time.sleep(1) self.gagent_start(session, self.vm) @@ -454,8 +463,7 @@ def gagent_check_sync(self, test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ - error_context.context("Check guest agent command 'guest-sync'", - LOG_JOB.info) + error_context.context("Check guest agent command 'guest-sync'", LOG_JOB.info) self.gagent.sync() @error_context.context_aware @@ -474,24 +482,27 @@ def gagent_check_guest_info(self, test, params, env): session = self._get_session(params, None) self._open_session_list.append(session) - error_context.context("Check guest agent command 'guest-info'", - LOG_JOB.info) - qga_ver_qga = self.gagent.guest_info()['version'].strip() - if params.get("os_type") == 'windows': - qga_ver_guest = session.cmd_output(params["cmd_qga_build"] - ).strip() + error_context.context("Check guest agent command 'guest-info'", LOG_JOB.info) + qga_ver_qga = self.gagent.guest_info()["version"].strip() + if params.get("os_type") == "windows": + qga_ver_guest = session.cmd_output(params["cmd_qga_build"]).strip() else: qga_ver_guest_raw = str(self.qga_v) pattern = r"(\d+.\d+.\d+)" qga_ver_guest = re.findall(pattern, qga_ver_guest_raw, re.I)[0] if qga_ver_qga != qga_ver_guest: - test.fail("The qga version %s from qga is different with %s " - "from guest." % (qga_ver_qga, qga_ver_guest)) + test.fail( + f"The qga version {qga_ver_qga} from qga is different with {qga_ver_guest} " + "from guest." + ) @error_context.context_aware def __gagent_check_shutdown(self, shutdown_mode): - error_context.context("Check guest agent command 'guest-shutdown'" - ", shutdown mode '%s'" % shutdown_mode, LOG_JOB.info) + error_context.context( + "Check guest agent command 'guest-shutdown'" + f", shutdown mode '{shutdown_mode}'", + LOG_JOB.info, + ) if not self.env or not self.params: self.test.error("You should run 'setup' method before test") @@ -533,18 +544,21 @@ def _gagent_check_shutdown(self): time.sleep(20) env_process.preprocess_vm(test, params, env, params["main_vm"]) self.vm = env.get_vm(params["main_vm"]) - session = self.vm.wait_for_login(timeout=int(params.get("login_timeout", - 360))) + session = self.vm.wait_for_login( + timeout=int(params.get("login_timeout", 360)) + ) - error_context.context("Check if guest-agent crash after reboot.", - LOG_JOB.info) + error_context.context( + "Check if guest-agent crash after reboot.", LOG_JOB.info + ) output = session.cmd_output(params["cmd_query_log"], timeout=10) try: if "core-dump" in output: - test.fail("Guest-agent aborts after guest-shutdown" - " detail: '%s'" % output) + test.fail( + "Guest-agent aborts after guest-shutdown" f" detail: '{output}'" + ) finally: - session.cmd('rm -rf %s' % params['journal_file']) + session.cmd("rm -rf {}".format(params["journal_file"])) else: _gagent_check_shutdown(self) @@ -559,7 +573,7 @@ def gagent_check_reboot(self, test, params, env): """ self.__gagent_check_shutdown(self.gagent.SHUTDOWN_MODE_REBOOT) pattern = params["gagent_guest_reboot_pattern"] - error_context.context("Verify serial output has '%s'" % pattern) + error_context.context(f"Verify serial output has '{pattern}'") rebooted = self.__gagent_check_serial_output(pattern) if not rebooted: test.fail("Could not reboot VM via guest agent") @@ -568,8 +582,7 @@ def gagent_check_reboot(self, test, params, env): session = self._get_session(self.params, None) session.close() except Exception as detail: - test.fail("Could not login to guest" - " detail: '%s'" % detail) + test.fail("Could not login to guest" f" detail: '{detail}'") @error_context.context_aware def gagent_check_halt(self, test, params, env): @@ -582,7 +595,7 @@ def gagent_check_halt(self, test, params, env): """ self.__gagent_check_shutdown(self.gagent.SHUTDOWN_MODE_HALT) pattern = params["gagent_guest_shutdown_pattern"] - error_context.context("Verify serial output has '%s'" % pattern) + error_context.context(f"Verify serial output has '{pattern}'") halted = self.__gagent_check_serial_output(pattern) if not halted: test.fail("Could not halt VM via guest agent") @@ -590,8 +603,9 @@ def gagent_check_halt(self, test, params, env): try: self.vm.destroy(gracefully=False) except Exception as detail: - LOG_JOB.warn("Got an exception when force destroying guest:" - " '%s'", detail) + LOG_JOB.warning( + "Got an exception when force destroying guest:" " '%s'", detail + ) @error_context.context_aware def gagent_check_sync_delimited(self, test, params, env): @@ -602,8 +616,9 @@ def gagent_check_sync_delimited(self, test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ - error_context.context("Check guest agent command 'guest-sync-delimited'", - LOG_JOB.info) + error_context.context( + "Check guest agent command 'guest-sync-delimited'", LOG_JOB.info + ) self.gagent.sync("guest-sync-delimited") @error_context.context_aware @@ -628,17 +643,16 @@ def gagent_check_set_user_password(self, test, params, env): error_context.context("Change guest's password.") try: self.gagent.set_user_password(new_password, crypted, ga_username) - error_context.context("Check if the guest could be login by new password", - LOG_JOB.info) + error_context.context( + "Check if the guest could be login by new password", LOG_JOB.info + ) self._gagent_verify_password(self.vm, new_password) except guest_agent.VAgentCmdError: test.fail("Failed to set the new password for guest") finally: - error_context.context( - "Reset back the password of guest", - LOG_JOB.info) + error_context.context("Reset back the password of guest", LOG_JOB.info) self.gagent.set_user_password(old_password, username=ga_username) @error_context.context_aware @@ -657,8 +671,7 @@ def gagent_check_get_vcpus(self, test, params, env): session = self._get_session(params, self.vm) self._open_session_list.append(session) - error_context.context("Check can-offline field of guest agent.", - LOG_JOB.info) + error_context.context("Check can-offline field of guest agent.", LOG_JOB.info) vcpus_info = self.gagent.get_vcpus() cpu_num_qga = len(vcpus_info) for vcpu in vcpus_info: @@ -671,28 +684,31 @@ def gagent_check_get_vcpus(self, test, params, env): else: vcpu_can_offline_guest = False if vcpu_can_offline_qga != vcpu_can_offline_guest: - test.fail("The first logical vcpu's can-offline field" - " isn't aligned with what it's in guest.") + test.fail( + "The first logical vcpu's can-offline field" + " isn't aligned with what it's in guest." + ) if vcpu["logical-id"] != 0 and vcpu["can-offline"] is False: - test.fail("The vcpus should be able to offline " - "except vcpu0.") + test.fail("The vcpus should be able to offline " "except vcpu0.") if params.get("os_type") == "windows" and vcpu["can-offline"]: - test.fail("All vcpus should not be able to offline in" - " windows guest.") + test.fail( + "All vcpus should not be able to offline in" " windows guest." + ) error_context.context("Check cpu number.", LOG_JOB.info) output = session.cmd_output(params["get_cpu_cmd"]) if params.get("os_type") == "windows": - cpu_list = output.strip().split('\n') + cpu_list = output.strip().split("\n") cpu_num_guest = sum(map(int, cpu_list)) else: cpu_num_guest = int(output) if cpu_num_qga != cpu_num_guest: - test.fail("CPU number doen't match.\n" - "number from guest os is %s,number from guest-agent is %s." % - (cpu_num_guest, cpu_num_qga)) + test.fail( + "CPU number doen't match.\n" + f"number from guest os is {cpu_num_guest},number from guest-agent is {cpu_num_qga}." + ) @error_context.context_aware def gagent_check_set_vcpus(self, test, params, env): @@ -709,12 +725,14 @@ def gagent_check_set_vcpus(self, test, params, env): if vcpus_num < 2: test.error("the vpus number of guest should be more than 1") for index in range(0, vcpus_num - 1): - if (vcpus_info[index]["online"] is True and - vcpus_info[index]["can-offline"] is True and - vcpus_info[index]['logical-id'] != 0): + if ( + vcpus_info[index]["online"] is True + and vcpus_info[index]["can-offline"] is True + and vcpus_info[index]["logical-id"] != 0 + ): vcpus_info[index]["online"] = False del vcpus_info[index]["can-offline"] - action = {'vcpus': [vcpus_info[index]]} + action = {"vcpus": [vcpus_info[index]]} self.gagent.set_vcpus(action) # Check if the result is as expected vcpus_info = self.gagent.get_vcpus() @@ -750,13 +768,11 @@ def gagent_check_set_mem_blocks(self, test, params, env): # record the memory blocks phys-index which is set to offline mem_off_phys_index_list = [] - error_context.context("Get the size of memory block unit.", - LOG_JOB.info) + error_context.context("Get the size of memory block unit.", LOG_JOB.info) mem_block_info = self.gagent.get_memory_block_info()["size"] mem_unit_size = mem_block_info / float(1024 * 1024) - error_context.context("Offline one memory block in guest.", - LOG_JOB.info) + error_context.context("Offline one memory block in guest.", LOG_JOB.info) mem_size_original = session.cmd_output(cmd_get_mem).strip().split()[1] mem_blocks = self.gagent.get_memory_blocks() mem_list_index = 0 @@ -771,24 +787,30 @@ def gagent_check_set_mem_blocks(self, test, params, env): return session.cmd(cmd_offline_mem % mem_phys_index) - error_context.context("Verify it's changed to offline status via" - " agent.", LOG_JOB.info) + error_context.context( + "Verify it's changed to offline status via" " agent.", LOG_JOB.info + ) mem_blocks = self.gagent.get_memory_blocks() if mem_blocks[mem_list_index]["online"] is not False: - test.fail("%s phys-index memory block is still online" - " via agent." % mem_phys_index) + test.fail( + f"{mem_phys_index} phys-index memory block is still online" + " via agent." + ) - error_context.context("Verify the memory block unit size.", - LOG_JOB.info) + error_context.context("Verify the memory block unit size.", LOG_JOB.info) mem_size = session.cmd_output(cmd_get_mem) mem_size_aft_offline_guest = mem_size.strip().split()[1] delta = float(mem_size_original) - float(mem_size_aft_offline_guest) if delta != mem_unit_size: - test.fail("Memory block info is not correct\nit's %s via agent\n" - "it's %s via guest." % (mem_unit_size, delta)) + test.fail( + f"Memory block info is not correct\nit's {mem_unit_size} via agent\n" + f"it's {delta} via guest." + ) - error_context.context("Offline some memory blocks which can be" - " offline via agent.", LOG_JOB.info) + error_context.context( + "Offline some memory blocks which can be" " offline via agent.", + LOG_JOB.info, + ) # record the memory blocks which will be offline mem_blocks_list = [] count = 0 @@ -797,59 +819,77 @@ def gagent_check_set_mem_blocks(self, test, params, env): if memory["online"] and memory["can-offline"]: mem_phys_index = memory["phys-index"] mem_off_phys_index_list.append(mem_phys_index) - mem_obj = {"online": False, "can-offline": True, - "phys-index": mem_phys_index} + mem_obj = { + "online": False, + "can-offline": True, + "phys-index": mem_phys_index, + } mem_blocks_list.append(mem_obj) count += 1 if count >= 5: break if mem_blocks_list is not None: self.gagent.set_memory_blocks(mem_blocks_list) - error_context.context("Verify memory size is decreased after" - " offline.", LOG_JOB.info) + error_context.context( + "Verify memory size is decreased after" " offline.", LOG_JOB.info + ) mem_size = session.cmd_output(cmd_get_mem) mem_size_aft_offline_qga = mem_size.strip().split()[1] - if float(mem_size_aft_offline_qga) >= \ - float(mem_size_aft_offline_guest): - test.fail("Memory isn't decreased\nsize before is %s\n" - "size after is %s" % (mem_size_aft_offline_guest, - mem_size_aft_offline_qga)) + if float(mem_size_aft_offline_qga) >= float(mem_size_aft_offline_guest): + test.fail( + f"Memory isn't decreased\nsize before is {mem_size_aft_offline_guest}\n" + f"size after is {mem_size_aft_offline_qga}" + ) else: - LOG_JOB.info("The memory blocks are already offline," - " no need to do offline operation.") + LOG_JOB.info( + "The memory blocks are already offline," + " no need to do offline operation." + ) - error_context.context("Recovery the memory blocks which are set to" - " offline before.", LOG_JOB.info) + error_context.context( + "Recovery the memory blocks which are set to" " offline before.", + LOG_JOB.info, + ) # record the memory blocks which will be online mem_blocks_list = [] for mem_phys_index in mem_off_phys_index_list: - mem_obj = {"online": True, "can-offline": True, - "phys-index": mem_phys_index} + mem_obj = { + "online": True, + "can-offline": True, + "phys-index": mem_phys_index, + } mem_blocks_list.append(mem_obj) self.gagent.set_memory_blocks(mem_blocks_list) mem_size_final = session.cmd_output(cmd_get_mem).strip().split()[1] if float(mem_size_final) != float(mem_size_original): - test.fail("Memory is not the same with original\n" - "original size is %s\nfinal size is %s." % - (mem_size_original, mem_size_final)) + test.fail( + "Memory is not the same with original\n" + f"original size is {mem_size_original}\nfinal size is {mem_size_final}." + ) - error_context.context("Offline one memory block which can't be" - " offline.", LOG_JOB.info) + error_context.context( + "Offline one memory block which can't be" " offline.", LOG_JOB.info + ) mem_blocks = self.gagent.get_memory_blocks() for memory in mem_blocks: if memory["online"] and memory["can-offline"] is False: mem_obj_index = memory["phys-index"] break else: - LOG_JOB.info("There is no required memory block that can-offline" - " attribute is False.") + LOG_JOB.info( + "There is no required memory block that can-offline" + " attribute is False." + ) return - mem_blocks_list = [{"online": False, "can-offline": True, - "phys-index": mem_obj_index}] + mem_blocks_list = [ + {"online": False, "can-offline": True, "phys-index": mem_obj_index} + ] result = self.gagent.set_memory_blocks(mem_blocks_list) if "operation-failed" not in result[0]["response"]: - test.fail("Didn't return the suitable description," - " the output info is %s." % result) + test.fail( + "Didn't return the suitable description," + f" the output info is {result}." + ) @error_context.context_aware def gagent_check_get_time(self, test, params, env): @@ -864,17 +904,22 @@ def gagent_check_get_time(self, test, params, env): get_guest_time_cmd = params["get_guest_time_cmd"] error_context.context("get the time of the guest", LOG_JOB.info) nanoseconds_time = self.gagent.get_time() - error_context.context("the time get by guest-get-time is '%d' " - % nanoseconds_time, LOG_JOB.info) + error_context.context( + "the time get by guest-get-time is '%d' " % nanoseconds_time, LOG_JOB.info + ) guest_time = session.cmd_output(get_guest_time_cmd) if not guest_time: test.error("can't get the guest time for contrast") - error_context.context("the time get inside guest by shell cmd is '%d' " - % int(guest_time), LOG_JOB.info) + error_context.context( + "the time get inside guest by shell cmd is '%d' " % int(guest_time), + LOG_JOB.info, + ) delta = abs(int(guest_time) - nanoseconds_time / 1000000000) if delta > 3: - test.fail("the time get by guest agent is not the same " - "with that by time check cmd inside guest") + test.fail( + "the time get by guest agent is not the same " + "with that by time check cmd inside guest" + ) @error_context.context_aware def gagent_check_set_time(self, test, params, env): @@ -898,14 +943,20 @@ def gagent_check_set_time(self, test, params, env): guest_time_before = session.cmd_output(get_guest_time_cmd) if not guest_time_before: test.error("can't get the guest time for contrast") - error_context.context("the time before being moved back into past is '%d' " - % int(guest_time_before), LOG_JOB.info) + error_context.context( + "the time before being moved back into past is '%d' " + % int(guest_time_before), + LOG_JOB.info, + ) # Need to move the guest time one week into the past target_time = (int(guest_time_before) - 604800) * 1000000000 self.gagent.set_time(target_time) guest_time_after = session.cmd_output(get_guest_time_cmd) - error_context.context("the time after being moved back into past is '%d' " - % int(guest_time_after), LOG_JOB.info) + error_context.context( + "the time after being moved back into past is '%d' " + % int(guest_time_after), + LOG_JOB.info, + ) delta = abs(int(guest_time_after) - target_time / 1000000000) if delta > 3: test.fail("the time set for guest is not the same with target") @@ -913,8 +964,7 @@ def gagent_check_set_time(self, test, params, env): # set invalid guest time if needed invalid_time_test = params.get_boolean("invalid_time_test") if invalid_time_test: - error_context.context("Set time to an invalid value.", - LOG_JOB.info) + error_context.context("Set time to an invalid value.", LOG_JOB.info) guest_time_before_invalid = session.cmd_output(get_guest_time_cmd) target_time_invalid = int(guest_time_before) * 1000000000000 try: @@ -924,8 +974,7 @@ def gagent_check_set_time(self, test, params, env): if expected not in e.edata["desc"]: test.fail(str(e)) guest_time_after_invalid = session.cmd_output(get_guest_time_cmd) - delta = abs(int(guest_time_after_invalid) - int( - guest_time_before_invalid)) + delta = abs(int(guest_time_after_invalid) - int(guest_time_before_invalid)) # time should have no change after invalid time set, 1min is # acceptable as there are some check during test if delta > 60: @@ -936,19 +985,24 @@ def gagent_check_set_time(self, test, params, env): move_time_cmd = params["move_time_cmd"] session.cmd("hwclock -w") guest_hwclock_after_set = session.cmd_output("date +%s") - error_context.context("hwclock is '%d' " % int(guest_hwclock_after_set), - LOG_JOB.info) + error_context.context( + "hwclock is '%d' " % int(guest_hwclock_after_set), LOG_JOB.info + ) session.cmd(move_time_cmd) time_after_move = session.cmd_output("date +%s") - error_context.context("the time after move back is '%d' " - % int(time_after_move), LOG_JOB.info) + error_context.context( + "the time after move back is '%d' " % int(time_after_move), LOG_JOB.info + ) self.gagent.set_time() guest_time_after_reset = session.cmd_output(get_guest_time_cmd) - error_context.context("the time after being reset is '%d' " - % int(guest_time_after_reset), LOG_JOB.info) + error_context.context( + "the time after being reset is '%d' " % int(guest_time_after_reset), + LOG_JOB.info, + ) guest_hwclock = session.cmd_output("date +%s") - error_context.context("hwclock for compare is '%d' " % int(guest_hwclock), - LOG_JOB.info) + error_context.context( + "hwclock for compare is '%d' " % int(guest_hwclock), LOG_JOB.info + ) delta = abs(int(guest_time_after_reset) - int(guest_hwclock)) if delta > 3: test.fail("The guest time can't be set from hwclock on host") @@ -970,6 +1024,7 @@ def gagent_check_time_sync(self, test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def time_drift(): """ Get the time diff between host and guest @@ -978,8 +1033,7 @@ def time_drift(): host_time = process.system_output("date +%s") get_guest_time_cmd = params["get_guest_time_cmd"] guest_time = session.cmd_output(get_guest_time_cmd) - LOG_JOB.info("Host time is %s,guest time is %s.", host_time, - guest_time) + LOG_JOB.info("Host time is %s,guest time is %s.", host_time, guest_time) time_diff = abs(int(host_time) - int(guest_time)) return time_diff @@ -994,8 +1048,9 @@ def time_drift(): if session.cmd_status(time_service_status_cmd): session.cmd(time_service_start_cmd) - error_context.context("Config time resource and restart time" - " service.", LOG_JOB.info) + error_context.context( + "Config time resource and restart time" " service.", LOG_JOB.info + ) session.cmd(time_config_cmd) session.cmd(time_service_stop_cmd) session.cmd(time_service_start_cmd) @@ -1005,7 +1060,7 @@ def time_drift(): self.vm.verify_status("paused") pause_time = float(params["pause_time"]) - error_context.context("Sleep %s seconds." % pause_time, LOG_JOB.info) + error_context.context(f"Sleep {pause_time} seconds.", LOG_JOB.info) time.sleep(pause_time) error_context.context("Resume the VM", LOG_JOB.info) @@ -1014,7 +1069,7 @@ def time_drift(): time_diff_before = time_drift() if time_diff_before < (pause_time - 5): - test.error("Time is not paused about %s seconds." % pause_time) + test.error(f"Time is not paused about {pause_time} seconds.") error_context.context("Execute guest-set-time cmd.", LOG_JOB.info) self.gagent.set_time() @@ -1044,8 +1099,9 @@ def _get_mem_used(self, session, cmd): memory_usage = int(output.split(" ")[-2].replace(",", "")) return memory_usage except Exception: - raise exceptions.TestError("Get invalid memory usage by " - "cmd '%s' (%s)" % (cmd, output)) + raise exceptions.TestError( + "Get invalid memory usage by " f"cmd '{cmd}' ({output})" + ) @error_context.context_aware def gagent_check_memory_leak(self, test, params, env): @@ -1060,31 +1116,38 @@ def gagent_check_memory_leak(self, test, params, env): timeout = float(params.get("login_timeout", 240)) test_command = params.get("test_command", "guest-info") - memory_usage_cmd = params.get("memory_usage_cmd", - "tasklist | findstr /I qemu-ga.exe") + memory_usage_cmd = params.get( + "memory_usage_cmd", "tasklist | findstr /I qemu-ga.exe" + ) session = self.vm.wait_for_login(timeout=timeout) - error_context.context("get the memory usage of qemu-ga before run '%s'" % - test_command, LOG_JOB.info) + error_context.context( + f"get the memory usage of qemu-ga before run '{test_command}'", + LOG_JOB.info, + ) memory_usage_before = self._get_mem_used(session, memory_usage_cmd) session.close() repeats = int(params.get("repeats", 1)) for i in range(repeats): - error_context.context("execute '%s' %s times" % (test_command, i + 1), - LOG_JOB.info) + error_context.context( + f"execute '{test_command}' {i + 1} times", LOG_JOB.info + ) return_msg = self.gagent.guest_info() LOG_JOB.info(str(return_msg)) self.vm.verify_alive() - error_context.context("get the memory usage of qemu-ga after run '%s'" % - test_command, LOG_JOB.info) + error_context.context( + f"get the memory usage of qemu-ga after run '{test_command}'", + LOG_JOB.info, + ) session = self.vm.wait_for_login(timeout=timeout) memory_usage_after = self._get_mem_used(session, memory_usage_cmd) session.close() # less than 500K is acceptable. if memory_usage_after - memory_usage_before > 500: - test.fail("The memory usages are different, " - "before run command is %skb and " - "after run command is %skb" % (memory_usage_before, - memory_usage_after)) + test.fail( + "The memory usages are different, " + f"before run command is {memory_usage_before}kb and " + f"after run command is {memory_usage_after}kb" + ) @error_context.context_aware def gagent_check_fstrim(self, test, params, env): @@ -1095,15 +1158,17 @@ def gagent_check_fstrim(self, test, params, env): :param env: Dictionary with test environment. """ + def get_host_scsi_disk(): """ Get latest scsi disk which enulated by scsi_debug module Return the device name and the id in host """ scsi_disk_info = process.system_output( - avo_path.find_command('lsscsi'), shell=True) + avo_path.find_command("lsscsi"), shell=True + ) scsi_disk_info = scsi_disk_info.decode().splitlines() - scsi_debug = [_ for _ in scsi_disk_info if 'scsi_debug' in _][-1] + scsi_debug = [_ for _ in scsi_disk_info if "scsi_debug" in _][-1] scsi_debug = scsi_debug.split() host_id = scsi_debug[0][1:-1] device_name = scsi_debug[-1] @@ -1123,8 +1188,8 @@ def get_provisioning_mode(device, host_id): depends on params for scsi_debug module. """ device_name = os.path.basename(device) - path = "/sys/block/%s/device/scsi_disk" % device_name - path += "/%s/provisioning_mode" % host_id + path = f"/sys/block/{device_name}/device/scsi_disk" + path += f"/{host_id}/provisioning_mode" return genio.read_one_line(path).strip() def get_allocation_bitmap(): @@ -1134,9 +1199,10 @@ def get_allocation_bitmap(): path = "/sys/bus/pseudo/drivers/scsi_debug/map" try: return genio.read_one_line(path).strip() - except IOError: - LOG_JOB.warn("could not get bitmap info, path '%s' is " - "not exist", path) + except OSError: + LOG_JOB.warning( + "could not get bitmap info, path '%s' is " "not exist", path + ) return "" for vm in env.get_all_vms(): @@ -1153,17 +1219,15 @@ def get_allocation_bitmap(): vm_name = params["main_vm"] test_image = "scsi_debug" params["start_vm"] = "yes" - params["image_name_%s" % test_image] = disk_name - params["image_format_%s" % test_image] = "raw" - params["image_raw_device_%s" % test_image] = "yes" - params["force_create_image_%s" % test_image] = "no" - params["drive_format_%s" % test_image] = "scsi-block" - params["drv_extra_params_%s" % test_image] = "discard=on" + params[f"image_name_{test_image}"] = disk_name + params[f"image_format_{test_image}"] = "raw" + params[f"image_raw_device_{test_image}"] = "yes" + params[f"force_create_image_{test_image}"] = "no" + params[f"drive_format_{test_image}"] = "scsi-block" + params[f"drv_extra_params_{test_image}"] = "discard=on" params["images"] = " ".join([params["images"], test_image]) - error_context.context( - "boot guest with disk '%s'" % - disk_name, LOG_JOB.info) + error_context.context(f"boot guest with disk '{disk_name}'", LOG_JOB.info) env_process.preprocess_vm(test, params, env, vm_name) self.initialize(test, params, env) @@ -1171,17 +1235,16 @@ def get_allocation_bitmap(): timeout = float(params.get("login_timeout", 240)) session = self.vm.wait_for_login(timeout=timeout) device_name = get_guest_discard_disk(session) - self.gagent_setsebool_value('on', params, self.vm) + self.gagent_setsebool_value("on", params, self.vm) - error_context.context( - "format disk '%s' in guest" % - device_name, LOG_JOB.info) + error_context.context(f"format disk '{device_name}' in guest", LOG_JOB.info) format_disk_cmd = params["format_disk_cmd"] format_disk_cmd = format_disk_cmd.replace("DISK", device_name) session.cmd(format_disk_cmd) - error_context.context("mount disk with discard options '%s'" % device_name, - LOG_JOB.info) + error_context.context( + f"mount disk with discard options '{device_name}'", LOG_JOB.info + ) mount_disk_cmd = params["mount_disk_cmd"] mount_disk_cmd = mount_disk_cmd.replace("DISK", device_name) session.cmd(mount_disk_cmd) @@ -1190,8 +1253,7 @@ def get_allocation_bitmap(): write_disk_cmd = params["write_disk_cmd"] session.cmd(write_disk_cmd) - error_context.context("Delete the file created before on disk", - LOG_JOB.info) + error_context.context("Delete the file created before on disk", LOG_JOB.info) delete_file_cmd = params["delete_file_cmd"] session.cmd(delete_file_cmd) @@ -1199,31 +1261,39 @@ def get_allocation_bitmap(): bitmap_before_trim = get_allocation_bitmap() if not re.match(r"\d+-\d+", bitmap_before_trim): test.fail("didn't get the bitmap of the target disk") - error_context.context("the bitmap_before_trim is %s" % bitmap_before_trim, - LOG_JOB.info) - total_block_before_trim = abs(sum([eval(i) for i in - bitmap_before_trim.split(',')])) - error_context.context("the total_block_before_trim is %d" - % total_block_before_trim, LOG_JOB.info) + error_context.context( + f"the bitmap_before_trim is {bitmap_before_trim}", LOG_JOB.info + ) + total_block_before_trim = abs( + sum([eval(i) for i in bitmap_before_trim.split(",")]) + ) + error_context.context( + "the total_block_before_trim is %d" % total_block_before_trim, LOG_JOB.info + ) error_context.context("execute the guest-fstrim cmd", LOG_JOB.info) self.gagent.fstrim() - self.gagent_setsebool_value('off', params, self.vm) + self.gagent_setsebool_value("off", params, self.vm) # check the bitmap after trim bitmap_after_trim = get_allocation_bitmap() if not re.match(r"\d+-\d+", bitmap_after_trim): test.fail("didn't get the bitmap of the target disk") - error_context.context("the bitmap_after_trim is %s" % bitmap_after_trim, - LOG_JOB.info) - total_block_after_trim = abs(sum([eval(i) for i in - bitmap_after_trim.split(',')])) - error_context.context("the total_block_after_trim is %d" - % total_block_after_trim, LOG_JOB.info) + error_context.context( + f"the bitmap_after_trim is {bitmap_after_trim}", LOG_JOB.info + ) + total_block_after_trim = abs( + sum([eval(i) for i in bitmap_after_trim.split(",")]) + ) + error_context.context( + "the total_block_after_trim is %d" % total_block_after_trim, LOG_JOB.info + ) if total_block_after_trim > total_block_before_trim: - test.fail("the bitmap_after_trim is lager, the command" - "guest-fstrim may not work") + test.fail( + "the bitmap_after_trim is lager, the command" + "guest-fstrim may not work" + ) if self.vm: self.vm.destroy() @@ -1244,50 +1314,60 @@ def gagent_check_get_disks(self, test, params, env): session = self._get_session(params, None) self._open_session_list.append(session) - error_context.context("Check all disks info in a loop.", - LOG_JOB.info) + error_context.context("Check all disks info in a loop.", LOG_JOB.info) disks_info_qga = self.gagent.get_disks() - cmd_diskinfo_guest = params['diskinfo_guest_cmd'] + cmd_diskinfo_guest = params["diskinfo_guest_cmd"] disks_info_guest = session.cmd_output(cmd_diskinfo_guest) - disks_info_guest = json.loads(disks_info_guest)['blockdevices'] + disks_info_guest = json.loads(disks_info_guest)["blockdevices"] for disk_info_guest in disks_info_guest: - diskname = disk_info_guest['kname'] - error_context.context("Check properties of disk %s" - % diskname, LOG_JOB.info) + diskname = disk_info_guest["kname"] + error_context.context(f"Check properties of disk {diskname}", LOG_JOB.info) for disk_info_qga in disks_info_qga: - if diskname == disk_info_qga['name']: - error_context.context("Check dependencies of disk %s" - % diskname, LOG_JOB.info) - dependencies = disk_info_qga['dependencies'] - if disk_info_guest['type'] == 'disk': - if (dependencies and dependencies[0] != 'null'): - test.error("Disk %s dependencies " - "should be [] or ['null']." % diskname) + if diskname == disk_info_qga["name"]: + error_context.context( + f"Check dependencies of disk {diskname}", LOG_JOB.info + ) + dependencies = disk_info_qga["dependencies"] + if disk_info_guest["type"] == "disk": + if dependencies and dependencies[0] != "null": + test.error( + f"Disk {diskname} dependencies " + "should be [] or ['null']." + ) else: - if (not dependencies or (dependencies[0] != - disk_info_guest['pkname'])): - test.fail("Disk %s dependencies is different " - "between guest and qga." % diskname) - - error_context.context("Check partition of disk %s" - % diskname, LOG_JOB.info) - partition = False if disk_info_guest['type'] != "part" else True + if not dependencies or ( + dependencies[0] != disk_info_guest["pkname"] + ): + test.fail( + f"Disk {diskname} dependencies is different " + "between guest and qga." + ) + + error_context.context( + f"Check partition of disk {diskname}", LOG_JOB.info + ) + partition = False if disk_info_guest["type"] != "part" else True if disk_info_qga["partition"] != partition: - test.fail("Disk %s partition is different " - "between guest and qga." % diskname) - - if disk_info_guest['type'] == 'lvm': - error_context.context("Check alias of disk %s" - % diskname, LOG_JOB.info) + test.fail( + f"Disk {diskname} partition is different " + "between guest and qga." + ) + + if disk_info_guest["type"] == "lvm": + error_context.context( + f"Check alias of disk {diskname}", LOG_JOB.info + ) cmd_get_disk_alias = params["cmd_get_disk_alias"] % diskname disk_alias = session.cmd_output(cmd_get_disk_alias).strip() - if disk_info_qga['alias'] != disk_alias: - test.fail("Disk %s alias is defferent " - "between guest and qga." % diskname) + if disk_info_qga["alias"] != disk_alias: + test.fail( + f"Disk {diskname} alias is defferent " + "between guest and qga." + ) break else: - test.fail("Failed to get disk %s with qga." % diskname) + test.fail(f"Failed to get disk {diskname} with qga.") @error_context.context_aware def gagent_check_ssh_public_key_injection(self, test, params, env): @@ -1317,8 +1397,7 @@ def ssh_key_test(operation, guest_name, *keys, **kwargs): :param kwargs: optional keyword arguments """ - op_func = getattr(self.gagent, 'ssh_%s_authorized_keys' % - operation) + op_func = getattr(self.gagent, f"ssh_{operation}_authorized_keys") op_func(guest_name, *keys, **kwargs) keys_ga = self.gagent.ssh_get_authorized_keys(guest_name) @@ -1357,8 +1436,10 @@ def _generate_host_keys(): process.system(params["cmd_clean_keys"], shell=True) status = process.system(params["ssh_keygen_cmd"], shell=True) if status: - test.error("Can not generate ssh key with no " - "interaction, please have a check.") + test.error( + "Can not generate ssh key with no " + "interaction, please have a check." + ) cmd_get_hostkey = params["cmd_get_hostkey"] host_key = process.getoutput(cmd_get_hostkey) return host_key @@ -1372,14 +1453,14 @@ def _login_guest_test(guest_ip): cmd_login_guest = params["test_login_guest"] % guest_ip output = process.system_output(cmd_login_guest, shell=True) - output = output.strip().decode(encoding="utf-8", - errors="strict") + output = output.strip().decode(encoding="utf-8", errors="strict") if output_check_str not in output: - test.error("Can not login guest without interaction," - " basic function test is fail.") + test.error( + "Can not login guest without interaction," + " basic function test is fail." + ) - def _value_compared_ga_guest(return_value_ga, - return_value_guest, status): + def _value_compared_ga_guest(return_value_ga, return_value_guest, status): """ Compare the return value from guest and ga. @@ -1392,43 +1473,43 @@ def _value_compared_ga_guest(return_value_ga, keys_guest = return_value_guest.replace("\n", ",") for keys_ga in keys_ga_list: if keys_ga not in keys_guest: - test.fail("Key %s is not same with guest, " - "%s ssh keys failed." % keys_ga, status) + test.fail( + "Key {} is not same with guest, " "{} ssh keys failed.".format( + *keys_ga + ), + status, + ) session = self._get_session(params, None) self._open_session_list.append(session) mac_addr = self.vm.get_mac_address() os_type = self.params["os_type"] - guest_user = self.params['guest_user'] + guest_user = self.params["guest_user"] output_check_str = self.params["output_check_str"] - guest_ip_ipv4 = utils_net.get_guest_ip_addr(session, mac_addr, - os_type) + guest_ip_ipv4 = utils_net.get_guest_ip_addr(session, mac_addr, os_type) _prepared_n_restore_env() - error_context.context("Check the basic function ", - LOG_JOB.info) + error_context.context("Check the basic function ", LOG_JOB.info) host_key1 = _generate_host_keys() ssh_key_test("add", guest_user, host_key1, reset=False) _login_guest_test(guest_ip_ipv4) - error_context.context("Check whether can add existed key.", - LOG_JOB.info) + error_context.context("Check whether can add existed key.", LOG_JOB.info) host_key2 = _generate_host_keys() ssh_key_test("add", guest_user, host_key1, host_key2, reset=False) _login_guest_test(guest_ip_ipv4) - error_context.context("Check whether can remove keys", - LOG_JOB.info) + error_context.context("Check whether can remove keys", LOG_JOB.info) host_key3 = "ssh-rsa ANotExistKey" keys_qga, keys_guest = ssh_key_test("remove", guest_user, host_key1, host_key3) for key in [host_key1, host_key3]: if key in keys_guest.replace("\n", ","): - test.fail("Key %s is still in guest," - "Can not remove key in guest." % key) + test.fail( + f"Key {key} is still in guest," "Can not remove key in guest." + ) _login_guest_test(guest_ip_ipv4) - error_context.context("Check whether can reset keys", - LOG_JOB.info) + error_context.context("Check whether can reset keys", LOG_JOB.info) host_key4 = _generate_host_keys() ssh_key_test("add", guest_user, host_key4, reset=True) _login_guest_test(guest_ip_ipv4) @@ -1451,26 +1532,32 @@ def gagent_check_get_cpustats(self, test, params, env): session = self._get_session(params, self.vm) self._open_session_list.append(session) - error_context.context("Check cpustats info of guest and " - "number of cpus.", LOG_JOB.info) + error_context.context( + "Check cpustats info of guest and " "number of cpus.", LOG_JOB.info + ) cs_info_qga = self.gagent.get_cpustats() cpu_num_guest = int(session.cmd_output(params["cpu_num_guest"])) cpu_num_qga = 0 os_type = self.params["os_type"] improper_list = [] for cs in cs_info_qga: - if cs['type'] != os_type: + if cs["type"] != os_type: test.fail("Cpustats info 'type' doesn't match.") for key in cs.keys(): - if (key != "type" and key != "cpu" and - key not in list(params["cpustats_info_list"].split(","))): - improper_list.append({("cpu%s" % cpu_num_qga): key}) + if ( + key != "type" + and key != "cpu" + and key not in list(params["cpustats_info_list"].split(",")) + ): + improper_list.append({(f"cpu{cpu_num_qga}"): key}) cpu_num_qga += 1 if improper_list: - test.fail("Cpustats info is not totally correct: %s" % improper_list) - if cpu_num_qga != int(cpu_num_guest-1): - test.fail("Number of cpus is not correct, cpu_num_qga is: %d" - "cpu_num_guest is %d" % (cpu_num_qga, int(cpu_num_guest-1))) + test.fail(f"Cpustats info is not totally correct: {improper_list}") + if cpu_num_qga != int(cpu_num_guest - 1): + test.fail( + "Number of cpus is not correct, cpu_num_qga is: %d" + "cpu_num_guest is %d" % (cpu_num_qga, int(cpu_num_guest - 1)) + ) @error_context.context_aware def gagent_check_get_diskstats(self, test, params, env): @@ -1494,51 +1581,60 @@ def gagent_check_get_diskstats(self, test, params, env): error_context.context("Check diskstats argument numbers.", LOG_JOB.info) # check the number of arguments whether is correct # with official document - num_arg_guest = int(session.cmd_output(params['count_num_arg'])) + num_arg_guest = int(session.cmd_output(params["count_num_arg"])) key_list = params["diskstats_info_list"].split(",") num_arg_def = len(list(key_list)) if num_arg_guest != num_arg_def: - test.error("Diskstats argument numbers may change, " - "please take a look.") + test.error("Diskstats argument numbers may change, " "please take a look.") disk_num_qga = 0 improper_list = [] for ds in ds_info_qga: - ds_name = ds['name'] - error_context.context("Check %s 'major' and 'minor' value." % ds_name, - LOG_JOB.info) - ds_major_qga = int(ds['major']) - ds_minor_qga = int(ds['minor']) + ds_name = ds["name"] + error_context.context( + f"Check {ds_name} 'major' and 'minor' value.", LOG_JOB.info + ) + ds_major_qga = int(ds["major"]) + ds_minor_qga = int(ds["minor"]) ds_major_guest = int(session.cmd_output(params["cmd_ds_major"] % ds_name)) ds_minor_guest = int(session.cmd_output(params["cmd_ds_minor"] % ds_name)) if ds_major_qga != ds_major_guest or ds_minor_qga != ds_minor_guest: - test.fail("Major's or minor's value is not correct, " - "major&minor in guest are: %d, %d" - "by qga are: %d, %d. Since the following checkpoints " - "are not detected, print all cfg file contents here: %s" % - (ds_major_guest, ds_minor_guest, ds_major_qga, - ds_minor_qga, params["diskstats_info_list"])) + test.fail( + "Major's or minor's value is not correct, " + "major&minor in guest are: %d, %d" + "by qga are: %d, %d. Since the following checkpoints " + "are not detected, print all cfg file contents here: %s" + % ( + ds_major_guest, + ds_minor_guest, + ds_major_qga, + ds_minor_qga, + params["diskstats_info_list"], + ) + ) for Key in ds.keys(): if Key == "stats": - for key in ds['stats'].keys(): + for key in ds["stats"].keys(): if key not in params["diskstats_info_list"]: improper_list.append({ds["name"]: key}) elif Key not in params["diskstats_info_list"]: improper_list.append({ds["name"]: key}) disk_num_qga += 1 - error_context.context("Check diskstats arguments whether are " - "corresponding.", LOG_JOB.info) + error_context.context( + "Check diskstats arguments whether are " "corresponding.", LOG_JOB.info + ) if improper_list: - test.fail("Diskstats info is not totally correct: %s" - % improper_list) + test.fail(f"Diskstats info is not totally correct: {improper_list}") error_context.context("Check disks numbers.", LOG_JOB.info) disk_num_guest = session.cmd_output(params["disk_num_guest"]) if disk_num_qga != int(disk_num_guest): - test.fail("Number of disks is not correct, disk_num_qga is %s;" - "disk_num_guest is %s" % (disk_num_qga, disk_num_guest)) + test.fail( + f"Number of disks is not correct, disk_num_qga is {disk_num_qga};" + f"disk_num_guest is {disk_num_guest}" + ) @error_context.context_aware def gagent_check_get_interfaces(self, test, params, env): @@ -1563,6 +1659,7 @@ def gagent_check_get_interfaces(self, test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def get_interface(ret_list, mac_addr): """ Get the available interface name. @@ -1572,8 +1669,10 @@ def get_interface(ret_list, mac_addr): interface_name = "" if_index = 0 for interface in ret_list: - if "hardware-address" in interface and \ - interface["hardware-address"] == mac_addr: + if ( + "hardware-address" in interface + and interface["hardware-address"] == mac_addr + ): interface_name = interface["name"] break if_index += 1 @@ -1589,12 +1688,10 @@ def ip_addr_check(session, mac_addr, ret_list, if_index, if_name): :param if_index: the interface's index in ret :param if_name: interface name """ - guest_ip_ipv4 = utils_net.get_guest_ip_addr(session, mac_addr, - os_type) - guest_ip_ipv6 = utils_net.get_guest_ip_addr(session, mac_addr, - os_type, - ip_version="ipv6", - linklocal=True) + guest_ip_ipv4 = utils_net.get_guest_ip_addr(session, mac_addr, os_type) + guest_ip_ipv6 = utils_net.get_guest_ip_addr( + session, mac_addr, os_type, ip_version="ipv6", linklocal=True + ) ip_lists = ret_list[if_index]["ip-addresses"] for ip in ip_lists: if ip["ip-address-type"] == "ipv4": @@ -1602,42 +1699,50 @@ def ip_addr_check(session, mac_addr, ret_list, if_index, if_name): elif ip["ip-address-type"] == "ipv6": ip_addr_qga_ipv6 = ip["ip-address"].split("%")[0] else: - test.fail("The ip address type is %s, but it should be" - " ipv4 or ipv6." % ip["ip-address-type"]) - if (guest_ip_ipv4 != ip_addr_qga_ipv4 # pylint: disable=E0606 - or guest_ip_ipv6 != ip_addr_qga_ipv6): # pylint: disable=E0601 - test.fail("Get the wrong ip address for %s interface:\n" - "ipv4 address from qga is %s, the expected is %s;\n" - "ipv6 address from qga is %s, the expected is %s." - % (if_name, ip_addr_qga_ipv4, - guest_ip_ipv4, ip_addr_qga_ipv6, - guest_ip_ipv6)) + test.fail( + "The ip address type is {}, but it should be" + " ipv4 or ipv6.".format(ip["ip-address-type"]) + ) + if ( + guest_ip_ipv4 != ip_addr_qga_ipv4 # pylint: disable=E0606 + or guest_ip_ipv6 != ip_addr_qga_ipv6 + ): # pylint: disable=E0601 + test.fail( + f"Get the wrong ip address for {if_name} interface:\n" + f"ipv4 address from qga is {ip_addr_qga_ipv4}, the expected is {guest_ip_ipv4};\n" + f"ipv6 address from qga is {ip_addr_qga_ipv6}, the expected is {guest_ip_ipv6}." + ) session = self.vm.wait_for_login() session_serial = self.vm.wait_for_serial_login() mac_addr = self.vm.get_mac_address() os_type = self.params["os_type"] - error_context.context("Get the available interface name via" - " guest-network-get-interfaces cmd.", - LOG_JOB.info) + error_context.context( + "Get the available interface name via" " guest-network-get-interfaces cmd.", + LOG_JOB.info, + ) ret = self.gagent.get_network_interface() if_name, if_index = get_interface(ret, mac_addr) if not if_name: - test.fail("Did not get the expected interface," - " the network info is \n%s." % ret) + test.fail( + "Did not get the expected interface," f" the network info is \n{ret}." + ) - error_context.context("Check the available interface name %s" - " via qga." % if_name, LOG_JOB.info) + error_context.context( + f"Check the available interface name {if_name}" " via qga.", LOG_JOB.info + ) if os_type == "linux": - if_name_guest = utils_net.get_linux_ifname(session_serial, - mac_addr) + if_name_guest = utils_net.get_linux_ifname(session_serial, mac_addr) else: if_name_guest = utils_net.get_windows_nic_attribute( - session_serial, "macaddress", mac_addr, "netconnectionid") + session_serial, "macaddress", mac_addr, "netconnectionid" + ) if if_name != if_name_guest: - test.fail("Get the wrong interface name, value from qga is: %s; " - "the expected is: %s" % (if_name, if_name_guest)) + test.fail( + f"Get the wrong interface name, value from qga is: {if_name}; " + f"the expected is: {if_name_guest}" + ) error_context.context("Check ip address via qga.", LOG_JOB.info) ip_addr_check(session, mac_addr, ret, if_index, if_name) @@ -1647,8 +1752,10 @@ def ip_addr_check(session, mac_addr, ret_list, if_index, if_name): # disable interface for windows guest and check it # from guest agent if os_type == "linux": - error_context.context("Create a new bridge in guest and check the" - "result from qga.", LOG_JOB.info) + error_context.context( + "Create a new bridge in guest and check the" "result from qga.", + LOG_JOB.info, + ) add_brige_cmd = "ip link add name br0 type bridge" session.cmd(add_brige_cmd) interfaces_after_add = self.gagent.get_network_interface() @@ -1661,21 +1768,23 @@ def ip_addr_check(session, mac_addr, ret_list, if_index, if_name): del_brige_cmd = "ip link del br0" session.cmd(del_brige_cmd) else: - error_context.context("Set down the interface in windows guest.", - LOG_JOB.info) + error_context.context( + "Set down the interface in windows guest.", LOG_JOB.info + ) session_serial.cmd(self.params["cmd_disable_network"] % if_name) ret_after_down = self.gagent.get_network_interface() if_name_down = get_interface(ret_after_down, mac_addr)[0] if if_name_down: - test.fail("From qga result that the interface is still" - " enabled, detailed info is:\n %s" - % ret_after_down) - error_context.context("Set up the interface in guest.", - LOG_JOB.info) + test.fail( + "From qga result that the interface is still" + f" enabled, detailed info is:\n {ret_after_down}" + ) + error_context.context("Set up the interface in guest.", LOG_JOB.info) session_serial.cmd(self.params["cmd_enable_network"] % if_name) - error_context.context("Change ipv4 address and check the result " - "from qga.", LOG_JOB.info) + error_context.context( + "Change ipv4 address and check the result " "from qga.", LOG_JOB.info + ) # for linux guest, need to delete ip address first if os_type == "linux": ip_lists = ret[if_index]["ip-addresses"] @@ -1683,15 +1792,19 @@ def ip_addr_check(session, mac_addr, ret_list, if_index, if_name): if ip["ip-address-type"] == "ipv4": ip_addr_qga_ipv4 = ip["ip-address"] break - session_serial.cmd("ip addr del %s dev %s" % (ip_addr_qga_ipv4, - if_name)) - utils_net.set_guest_ip_addr(session_serial, mac_addr, "192.168.10.10", - os_type=os_type) + session_serial.cmd(f"ip addr del {ip_addr_qga_ipv4} dev {if_name}") + utils_net.set_guest_ip_addr( + session_serial, mac_addr, "192.168.10.10", os_type=os_type + ) ret_ip_change = self.gagent.get_network_interface() - if_name_ip_change, if_index_ip_change = get_interface( - ret_ip_change, mac_addr) - ip_addr_check(session_serial, mac_addr, ret_ip_change, - if_index_ip_change, if_name_ip_change) + if_name_ip_change, if_index_ip_change = get_interface(ret_ip_change, mac_addr) + ip_addr_check( + session_serial, + mac_addr, + ret_ip_change, + if_index_ip_change, + if_name_ip_change, + ) if session: session.close() @@ -1712,18 +1825,18 @@ def gagent_check_reboot_shutdown(self, test, params, env): gagent = self.gagent gagent.fsfreeze() try: - for mode in (gagent.SHUTDOWN_MODE_POWERDOWN, - gagent.SHUTDOWN_MODE_REBOOT): + for mode in (gagent.SHUTDOWN_MODE_POWERDOWN, gagent.SHUTDOWN_MODE_REBOOT): try: gagent.shutdown(mode) except guest_agent.VAgentCmdError as detail: - if not re.search('guest-shutdown has been disabled', - str(detail)): - test.fail("This is not the desired information: ('%s')" - % str(detail)) + if not re.search("guest-shutdown has been disabled", str(detail)): + test.fail( + f"This is not the desired information: ('{str(detail)}')" + ) else: - test.fail("agent shutdown command shouldn't succeed for " - "freeze FS") + test.fail( + "agent shutdown command shouldn't succeed for " "freeze FS" + ) finally: try: gagent.fsthaw(check_status=False) @@ -1741,30 +1854,42 @@ def _change_bl(self, session): cmd_blacklist_backup = self.params["black_file_backup"] session.cmd(cmd_blacklist_backup) full_qga_ver = self._get_qga_version(session, self.vm, main_ver=False) - value_full_qga_ver = (full_qga_ver in VersionInterval('[8.1.0-5,)')) + value_full_qga_ver = full_qga_ver in VersionInterval("[8.1.0-5,)") black_list_spec = self.params["black_list_spec"] cmd_black_list = self.params["black_list"] black_list_change_cmd = self.params["black_list_change_cmd"] if value_full_qga_ver: black_list_spec = "allow-rpcs" cmd_black_list = self.params["black_list_new"] - black_list_change_cmd = "sed -i 's/allow-rpcs.*/allow-rpcs=%s\"/g' /etc/sysconfig/qemu-ga" - elif full_qga_ver in VersionInterval('[7.2.0-4,)'): + black_list_change_cmd = ( + "sed -i 's/allow-rpcs.*/allow-rpcs=%s\"/g' /etc/sysconfig/qemu-ga" + ) + elif full_qga_ver in VersionInterval("[7.2.0-4,)"): black_list_spec = "BLOCK_RPCS" for black_cmd in cmd_black_list.split(): - bl_check_cmd = self.params["black_list_check_cmd"] % (black_list_spec, black_cmd) + bl_check_cmd = self.params["black_list_check_cmd"] % ( + black_list_spec, + black_cmd, + ) bl_change_cmd = black_list_change_cmd % black_cmd session.cmd(bl_change_cmd) output = session.cmd_output(bl_check_cmd) - if (output == "" and value_full_qga_ver or - (not output == "" and not value_full_qga_ver)): - self.test.fail("Failed to change the cmd to " - "white list, the output is %s" % output) + if ( + output == "" + and value_full_qga_ver + or (not output == "" and not value_full_qga_ver) + ): + self.test.fail( + "Failed to change the cmd to " + f"white list, the output is {output}" + ) s, o = session.cmd_status_output(self.params["gagent_restart_cmd"]) if s: - self.test.fail("Could not restart qemu-ga in VM after changing" - " list, detail: %s" % o) + self.test.fail( + "Could not restart qemu-ga in VM after changing" + f" list, detail: {o}" + ) def _change_bl_back(self, session): """ @@ -1785,11 +1910,13 @@ def _read_check(self, ret_handle, content, count=None): LOG_JOB.info("Read content and do check.") ret_read = self.gagent.guest_file_read(ret_handle, count=count) content_read = base64.b64decode(ret_read["buf-b64"]).decode() - LOG_JOB.info("The read content is '%s'; the real content is '%s'.", - content_read, content) + LOG_JOB.info( + "The read content is '%s'; the real content is '%s'.", content_read, content + ) if not content_read.strip() == content.strip(): - self.test.fail("The read content is '%s'; the real content is '%s'." - % (content_read, content)) + self.test.fail( + f"The read content is '{content_read}'; the real content is '{content}'." + ) def _guest_file_prepare(self): """ @@ -1803,7 +1930,7 @@ def _guest_file_prepare(self): ranstr = utils_misc.generate_random_string(5) file_name = "qgatest" + ranstr - guest_file = "%s%s" % (self.params["file_path"], file_name) + guest_file = "{}{}".format(self.params["file_path"], file_name) return session, guest_file @error_context.context_aware @@ -1827,8 +1954,9 @@ def gagent_check_file_seek(self, test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ - error_context.context("Change guest-file related cmd to white list" - " and get guest file name.") + error_context.context( + "Change guest-file related cmd to white list" " and get guest file name." + ) session, tmp_file = self._guest_file_prepare() error_context.context("Write content to file.", LOG_JOB.info) @@ -1837,8 +1965,10 @@ def gagent_check_file_seek(self, test, params, env): self.gagent.guest_file_write(ret_handle, content) self.gagent.guest_file_flush(ret_handle) - error_context.context("Seek to one position and read file with " - "file-seek/read cmd.", LOG_JOB.info) + error_context.context( + "Seek to one position and read file with " "file-seek/read cmd.", + LOG_JOB.info, + ) self.gagent.guest_file_seek(ret_handle, 0, 0) self._read_check(ret_handle, content) @@ -1846,8 +1976,9 @@ def gagent_check_file_seek(self, test, params, env): self.gagent.guest_file_seek(ret_handle, 0, 0) self._read_check(ret_handle, "he", 2) - LOG_JOB.info("Seek the position to file beginning, offset is 2, and " - "read 2 bytes.") + LOG_JOB.info( + "Seek the position to file beginning, offset is 2, and " "read 2 bytes." + ) self.gagent.guest_file_seek(ret_handle, 2, 0) self._read_check(ret_handle, "ll", 2) @@ -1855,13 +1986,14 @@ def gagent_check_file_seek(self, test, params, env): self.gagent.guest_file_seek(ret_handle, 2, 1) self._read_check(ret_handle, "world", 5) - LOG_JOB.info("Seek from the file end position, offset is -5 and " - "read 3 byte.") + LOG_JOB.info( + "Seek from the file end position, offset is -5 and " "read 3 byte." + ) self.gagent.guest_file_seek(ret_handle, -5, 2) self._read_check(ret_handle, "orl", 3) self.gagent.guest_file_close(ret_handle) - cmd_del_file = "%s %s" % (params["cmd_del"], tmp_file) + cmd_del_file = "{} {}".format(params["cmd_del"], tmp_file) session.cmd(cmd_del_file) self._change_bl_back(session) @@ -1880,18 +2012,19 @@ def gagent_check_file_write(self, test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ - error_context.context("Change guest-file related cmd to white list" - " and get guest file name.") + error_context.context( + "Change guest-file related cmd to white list" " and get guest file name." + ) session, tmp_file = self._guest_file_prepare() - error_context.context("Create new file with mode 'w' and do file" - " write test", LOG_JOB.info) + error_context.context( + "Create new file with mode 'w' and do file" " write test", LOG_JOB.info + ) ret_handle = int(self.gagent.guest_file_open(tmp_file, mode="w+")) content = "hello world\n" content_check = "" for cnt in range(1, 10, 2): - error_context.context("Write %s bytes to guest file." - % cnt, LOG_JOB.info) + error_context.context(f"Write {cnt} bytes to guest file.", LOG_JOB.info) self.gagent.guest_file_seek(ret_handle, 0, 2) self.gagent.guest_file_write(ret_handle, content, cnt) self.gagent.guest_file_flush(ret_handle) @@ -1899,8 +2032,9 @@ def gagent_check_file_write(self, test, params, env): content_check += content[: int(cnt)] self._read_check(ret_handle, content_check) - error_context.context("Write more than all counts bytes to" - " guest file.", LOG_JOB.info) + error_context.context( + "Write more than all counts bytes to" " guest file.", LOG_JOB.info + ) try: self.gagent.guest_file_write(ret_handle, content, 15) except guest_agent.VAgentCmdError as e: @@ -1908,11 +2042,13 @@ def gagent_check_file_write(self, test, params, env): if expected not in e.edata["desc"]: self.test.fail(e) else: - self.test.fail("Cmd 'guest-file-write' is executed " - "successfully after freezing FS! " - "But it should return error.") + self.test.fail( + "Cmd 'guest-file-write' is executed " + "successfully after freezing FS! " + "But it should return error." + ) self.gagent.guest_file_close(ret_handle) - cmd_del_file = "%s %s" % (params["cmd_del"], tmp_file) + cmd_del_file = "{} {}".format(params["cmd_del"], tmp_file) session.cmd(cmd_del_file) self._change_bl_back(session) @@ -1933,6 +2069,7 @@ def gagent_check_file_read(self, test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _read_guest_file_with_count(count_num): """ Read a guest file with count number. @@ -1944,24 +2081,29 @@ def _read_guest_file_with_count(count_num): except guest_agent.VAgentCmdError as detail: info_insuffi = "Insufficient system resources exist to" info_insuffi += " complete the requested service" - if info_insuffi not in detail.edata['desc']: - test.fail("Return error but is not the desired information: " - "('%s')" % str(detail)) + if info_insuffi not in detail.edata["desc"]: + test.fail( + "Return error but is not the desired information: " + f"('{str(detail)}')" + ) - error_context.context("Change guest-file related cmd to white list" - " and get guest file name.") + error_context.context( + "Change guest-file related cmd to white list" " and get guest file name." + ) session, tmp_file = self._guest_file_prepare() content = "helloworld\n" error_context.context("Create a new small file in guest", LOG_JOB.info) - cmd_create_file = "echo helloworld > %s" % tmp_file + cmd_create_file = f"echo helloworld > {tmp_file}" session.cmd(cmd_create_file) - error_context.context("Open guest file via guest-file-open with" - " read only mode.", LOG_JOB.info) + error_context.context( + "Open guest file via guest-file-open with" " read only mode.", LOG_JOB.info + ) # default is read mode ret_handle = int(self.gagent.guest_file_open(tmp_file)) - error_context.context("Read the content and check the result via" - " guest-file cmd", LOG_JOB.info) + error_context.context( + "Read the content and check the result via" " guest-file cmd", LOG_JOB.info + ) self._read_check(ret_handle, content) self.gagent.guest_file_close(ret_handle) @@ -1969,12 +2111,15 @@ def _read_guest_file_with_count(count_num): process.run("dd if=/dev/urandom of=/tmp/big_file bs=1024 count=200") self.vm.copy_files_to("/tmp/big_file", tmp_file) - error_context.context("Open the big guest file via guest-file-open with" - " read only mode.", LOG_JOB.info) + error_context.context( + "Open the big guest file via guest-file-open with" " read only mode.", + LOG_JOB.info, + ) ret_handle = int(self.gagent.guest_file_open(tmp_file)) - error_context.context("Read the big file with an invalid count number", - LOG_JOB.info) + error_context.context( + "Read the big file with an invalid count number", LOG_JOB.info + ) if params.get("os_type") == "linux": main_qga_ver = self._get_qga_version(session, self.vm) if params.get("os_type") == "linux" and main_qga_ver <= 2: # pylint: disable=E0606 @@ -1984,8 +2129,7 @@ def _read_guest_file_with_count(count_num): try: self.gagent.guest_file_seek(ret_handle, 0, 0) except guest_agent.VAgentCmdError as detail: - if re.search("handle '%s' has not been found" % ret_handle, - str(detail)): + if re.search(f"handle '{ret_handle}' has not been found", str(detail)): msg = "As resouce is not sufficient, " msg += "file is closed, so open the file again to " msg += "continue the following tests." @@ -1998,40 +2142,48 @@ def _read_guest_file_with_count(count_num): self.gagent.guest_file_read(ret_handle, count=10000000000) except guest_agent.VAgentCmdError as detail: if not re.search("invalid for argument count", str(detail)): - test.fail("Return error but is not the desired info: " - "('%s')" % str(detail)) + test.fail( + "Return error but is not the desired info: " + f"('{str(detail)}')" + ) else: - LOG_JOB.info("The count number is invalid for windows" - " guest and linux guest in which qga version" - " is bigger than 2.") + LOG_JOB.info( + "The count number is invalid for windows" + " guest and linux guest in which qga version" + " is bigger than 2." + ) else: test.fail("Did not get the expected result.") - error_context.context("Read the file with an valid big count" - " number.", LOG_JOB.info) + error_context.context( + "Read the file with an valid big count" " number.", LOG_JOB.info + ) self.gagent.guest_file_seek(ret_handle, 0, 0) # if guest os resource is enough, will return no error. # else it will return error like "insufficient system resource" # which is expected - count = 1000000000 if (params["os_type"] == 'linux' and - main_qga_ver < 5) else 10000000 + count = ( + 1000000000 + if (params["os_type"] == "linux" and main_qga_ver < 5) + else 10000000 + ) _read_guest_file_with_count(count) self.gagent.guest_file_close(ret_handle) - error_context.context("Open a none existing file with read only mode.", - LOG_JOB.info) + error_context.context( + "Open a none existing file with read only mode.", LOG_JOB.info + ) try: self.gagent.guest_file_open("none_exist_file") except guest_agent.VAgentCmdError as detail: res_linux = "No such file or directory" res_windows = "system cannot find the file" if res_windows not in str(detail) and res_linux not in str(detail): - test.fail("This is not the desired information: " - "('%s')" % str(detail)) + test.fail("This is not the desired information: " f"('{str(detail)}')") else: test.fail("Should not pass with none existing file.") - cmd_del_file = "%s %s" % (params["cmd_del"], tmp_file) + cmd_del_file = "{} {}".format(params["cmd_del"], tmp_file) session.cmd(cmd_del_file) self._change_bl_back(session) @@ -2048,36 +2200,38 @@ def gagent_check_with_fsfreeze(self, test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ - error_context.context("Change guest-file related cmd to white list" - " and get guest file name.") + error_context.context( + "Change guest-file related cmd to white list" " and get guest file name." + ) session, tmp_file = self._guest_file_prepare() content = "hello world\n" - error_context.context("Freeze fs and try to open guest file.", - LOG_JOB.info) + error_context.context("Freeze fs and try to open guest file.", LOG_JOB.info) self.gagent.fsfreeze() try: self.gagent.guest_file_open(tmp_file, mode="a+") except guest_agent.VAgentCmdError as detail: - if not re.search('guest-file-open has been disabled', - str(detail)): - self.test.fail("This is not the desired information: " - "('%s')" % str(detail)) + if not re.search("guest-file-open has been disabled", str(detail)): + self.test.fail( + "This is not the desired information: " f"('{str(detail)}')" + ) else: - self.test.fail("guest-file-open command shouldn't succeed " - "for freeze FS.") + self.test.fail( + "guest-file-open command shouldn't succeed " "for freeze FS." + ) finally: self.gagent.fsthaw() - error_context.context("After thaw fs, try to operate guest" - " file.", LOG_JOB.info) + error_context.context( + "After thaw fs, try to operate guest" " file.", LOG_JOB.info + ) ret_handle = int(self.gagent.guest_file_open(tmp_file, mode="a+")) self.gagent.guest_file_write(ret_handle, content) self.gagent.guest_file_flush(ret_handle) self.gagent.guest_file_seek(ret_handle, 0, 0) self._read_check(ret_handle, "hello world") self.gagent.guest_file_close(ret_handle) - cmd_del_file = "%s %s" % (params["cmd_del"], tmp_file) + cmd_del_file = "{} {}".format(params["cmd_del"], tmp_file) session.cmd(cmd_del_file) self._change_bl_back(session) @@ -2099,6 +2253,7 @@ def gagent_check_with_selinux(self, test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def file_operation(guest_file, open_mode): """ open/write/flush/close file test. @@ -2106,8 +2261,7 @@ def file_operation(guest_file, open_mode): :param guest_file: file in guest :param open_mode: open file mode, "r" is the default value """ - ret_handle = self.gagent.guest_file_open(guest_file, - mode=open_mode) + ret_handle = self.gagent.guest_file_open(guest_file, mode=open_mode) self.gagent.guest_file_write(ret_handle, content) self.gagent.guest_file_flush(ret_handle) self.gagent.guest_file_close(ret_handle) @@ -2118,48 +2272,56 @@ def result_check_enforcing(): when selinux policy mode is enforcing.But can open temp file with append mode via guest agent """ + def check(guest_file, open_mode): - error_context.context("Try to open %s with %s mode via" - " guest agent in enforcing" - " selinux policy." % - (guest_file, open_mode), - LOG_JOB.info) + error_context.context( + f"Try to open {guest_file} with {open_mode} mode via" + " guest agent in enforcing" + " selinux policy.", + LOG_JOB.info, + ) if "/tmp" in guest_file and open_mode == "a+": # can open and operate guest file successfully file_operation(guest_file, open_mode) else: try: - self.gagent.guest_file_open(guest_file, - mode=open_mode) + self.gagent.guest_file_open(guest_file, mode=open_mode) except guest_agent.VAgentCmdError as detail: msg = r"failed to open file.*Permission denied" if not re.search(msg, str(detail)): - test.fail("This is not the desired information: " - "('%s')" % str(detail)) + test.fail( + "This is not the desired information: " + f"('{str(detail)}')" + ) else: - test.fail("When selinux policy is 'Enforcing', guest" - " agent should not open %s with %s mode." % - (guest_file, open_mode)) + test.fail( + "When selinux policy is 'Enforcing', guest" + f" agent should not open {guest_file} with {open_mode} mode." + ) + for ch_file in [guest_temp_file, guest_file]: - check(ch_file, 'a+') - check(ch_file, 'w+') + check(ch_file, "a+") + check(ch_file, "w+") def result_check_permissive(): """ Open guest file via guest agent with different open-mode when selinux policy mode is permissive. """ + def check(guest_file, open_mode): - error_context.context("Try to open %s with %s mode via" - " guest agent in permissive" - " selinux policy." % - (guest_file, open_mode), - LOG_JOB.info) + error_context.context( + f"Try to open {guest_file} with {open_mode} mode via" + " guest agent in permissive" + " selinux policy.", + LOG_JOB.info, + ) # can open and operate guest file successfully file_operation(guest_file, open_mode) + for ch_file in [guest_temp_file, guest_file]: - check(ch_file, 'a+') - check(ch_file, 'w+') + check(ch_file, "a+") + check(ch_file, "w+") content = "hello world\n" guest_temp_file = "/tmp/testqga" @@ -2169,19 +2331,22 @@ def check(guest_file, open_mode): LOG_JOB.info("Change guest-file related cmd to white list.") self._change_bl(session) - error_context.context("Create and write content to temp file and" - " non temp file.", LOG_JOB.info) - session.cmd("echo 'hello world' > %s" % guest_temp_file) - session.cmd("echo 'hello world' > %s" % guest_file) + error_context.context( + "Create and write content to temp file and" " non temp file.", LOG_JOB.info + ) + session.cmd(f"echo 'hello world' > {guest_temp_file}") + session.cmd(f"echo 'hello world' > {guest_file}") - error_context.context("Set selinux policy to 'Enforcing' mode in" - " guest.", LOG_JOB.info) + error_context.context( + "Set selinux policy to 'Enforcing' mode in" " guest.", LOG_JOB.info + ) if session.cmd_output("getenforce").strip() != "Enforcing": session.cmd("setenforce 1") result_check_enforcing() - error_context.context("Set selinux policy to 'Permissive' mode in" - " guest.", LOG_JOB.info) + error_context.context( + "Set selinux policy to 'Permissive' mode in" " guest.", LOG_JOB.info + ) session.cmd("setenforce 0") result_check_permissive() self._change_bl_back(session) @@ -2207,8 +2372,9 @@ def gagent_check_guest_exec(self, test, params, env): :param params: Dictionary with the test parameters """ - def _guest_cmd_run(guest_cmd, cmd_args=None, env_qga=None, - input=None, capture_output=None): + def _guest_cmd_run( + guest_cmd, cmd_args=None, env_qga=None, input=None, capture_output=None + ): """ Execute guest-exec cmd and get the result in timeout. @@ -2224,9 +2390,13 @@ def _guest_cmd_run(guest_cmd, cmd_args=None, env_qga=None, # change cmd_args to be a list needed by guest-exec. if cmd_args: cmd_args = cmd_args.split() - ret = self.gagent.guest_exec(path=guest_cmd, arg=cmd_args, - env=env_qga, input_data=input, - capture_output=capture_output) + ret = self.gagent.guest_exec( + path=guest_cmd, + arg=cmd_args, + env=env_qga, + input_data=input, + capture_output=capture_output, + ) end_time = time.time() + float(params["guest_cmd_timeout"]) while time.time() < end_time: result = self.gagent.guest_exec_status(ret["pid"]) @@ -2236,8 +2406,10 @@ def _guest_cmd_run(guest_cmd, cmd_args=None, env_qga=None, time.sleep(5) if not result["exited"]: - test.error("Guest cmd is still running, pls login guest to" - " handle it or extend your timeout.") + test.error( + "Guest cmd is still running, pls login guest to" + " handle it or extend your timeout." + ) # check the exitcode and output/error data if capture_output # is true if capture_output is not True: @@ -2245,35 +2417,39 @@ def _guest_cmd_run(guest_cmd, cmd_args=None, env_qga=None, if params.get("os_type") == "linux": if result["exitcode"] == 0: if "out-data" in result: - out_data = base64.b64decode(result["out-data"]).\ - decode() - LOG_JOB.info("The guest cmd is executed successfully," - "the output is:\n%s.", out_data) + out_data = base64.b64decode(result["out-data"]).decode() + LOG_JOB.info( + "The guest cmd is executed successfully," + "the output is:\n%s.", + out_data, + ) elif "err-data" in result: - test.fail("When exitcode is 0, should not return" - " error data.") - else: test.fail( - "There is no output with capture_output is true.") + "When exitcode is 0, should not return" " error data." + ) + else: + test.fail("There is no output with capture_output is true.") else: if "out-data" in result: - test.fail("When exitcode is 1, should not return" - " output data.") + test.fail( + "When exitcode is 1, should not return" " output data." + ) elif "err-data" in result: - err_data = base64.b64decode(result["err-data"]).\ - decode() - LOG_JOB.info("The guest cmd failed," - "the error info is:\n%s", err_data) + err_data = base64.b64decode(result["err-data"]).decode() + LOG_JOB.info( + "The guest cmd failed," "the error info is:\n%s", err_data + ) else: - test.fail("There is no output with capture_output is " - "true.") + test.fail("There is no output with capture_output is " "true.") else: # for windows guest,no matter what exitcode is, # the return key is out-data if "out-data" in result: out_data = base64.b64decode(result["out-data"]).decode() - LOG_JOB.info("The guest cmd is executed successfully," - "the output is:\n%s.", out_data) + LOG_JOB.info( + "The guest cmd is executed successfully," "the output is:\n%s.", + out_data, + ) else: test.fail("There is no output with capture_output is true.") return result @@ -2281,23 +2457,25 @@ def _guest_cmd_run(guest_cmd, cmd_args=None, env_qga=None, session = self._get_session(params, self.vm) self._open_session_list.append(session) - error_context.context("Change guest-exec related cmd to white list.", - LOG_JOB.info) + error_context.context( + "Change guest-exec related cmd to white list.", LOG_JOB.info + ) self._change_bl(session) guest_cmd = params["guest_cmd"] guest_cmd_args = params["guest_cmd_args"] - error_context.context("Execute guest cmd and get the output.", - LOG_JOB.info) - result = _guest_cmd_run(guest_cmd=guest_cmd, cmd_args=guest_cmd_args, - capture_output=True) + error_context.context("Execute guest cmd and get the output.", LOG_JOB.info) + result = _guest_cmd_run( + guest_cmd=guest_cmd, cmd_args=guest_cmd_args, capture_output=True + ) if "out-data" not in result and "err-data" not in result: test.fail("There is no output in result.") - error_context.context("Execute guest cmd and no need to get the output.", - LOG_JOB.info) + error_context.context( + "Execute guest cmd and no need to get the output.", LOG_JOB.info + ) result = _guest_cmd_run(guest_cmd=guest_cmd, cmd_args=guest_cmd_args) if "out-data" in result or "err-data" in result: @@ -2307,22 +2485,21 @@ def _guest_cmd_run(guest_cmd, cmd_args=None, env_qga=None, try: self.gagent.guest_exec(path="invalid_cmd") except guest_agent.VAgentCmdError as detail: - if not re.search('Failed to execute child process', str(detail)): - test.fail("This is not the desired information: ('%s')" - % str(detail)) + if not re.search("Failed to execute child process", str(detail)): + test.fail(f"This is not the desired information: ('{str(detail)}')") else: test.fail("Should not success for invalid cmd.") - error_context.context("Execute guest cmd with wrong args.", - LOG_JOB.info) + error_context.context("Execute guest cmd with wrong args.", LOG_JOB.info) if params.get("os_type") == "linux": guest_cmd = "cd" guest_cmd_args = "/tmp/qga_empty_dir" else: guest_cmd = "ping" guest_cmd_args = "invalid-address" - result = _guest_cmd_run(guest_cmd=guest_cmd, cmd_args=guest_cmd_args, - capture_output=True) + result = _guest_cmd_run( + guest_cmd=guest_cmd, cmd_args=guest_cmd_args, capture_output=True + ) if result["exitcode"] == 0: test.fail("The cmd should be failed with wrong args.") self._change_bl_back(session) @@ -2371,8 +2548,7 @@ def _action_after_fsthaw(self, *args): LOG_JOB.info("FS is thawed as expected, can write in guest.") @error_context.context_aware - def _fsfreeze(self, fsfreeze_list=False, mountpoints=None, - check_mountpoints=None): + def _fsfreeze(self, fsfreeze_list=False, mountpoints=None, check_mountpoints=None): """ Test guest agent commands "guest-fsfreeze-freeze/status/thaw/ fsfreeze-list" @@ -2407,11 +2583,12 @@ def _fsfreeze(self, fsfreeze_list=False, mountpoints=None, self.gagent.fsthaw(check_status=False) self._action_before_fsfreeze() - error_context.context("Freeze the FS when fsfreeze_list is" - " %s and mountpoints is %s." % - (fsfreeze_list, mountpoints), LOG_JOB.info) - self.gagent.fsfreeze(fsfreeze_list=fsfreeze_list, - mountpoints=mountpoints) + error_context.context( + "Freeze the FS when fsfreeze_list is" + f" {fsfreeze_list} and mountpoints is {mountpoints}.", + LOG_JOB.info, + ) + self.gagent.fsfreeze(fsfreeze_list=fsfreeze_list, mountpoints=mountpoints) try: if fsfreeze_list: if check_mountpoints: @@ -2439,8 +2616,9 @@ def _fsfreeze(self, fsfreeze_list=False, mountpoints=None, self.gagent.fsthaw(check_status=False) except Exception as detail: # Ignore exception for this thaw action. - LOG_JOB.warn("Finally failed to thaw guest fs," - " detail: '%s'", detail) + LOG_JOB.warning( + "Finally failed to thaw guest fs," " detail: '%s'", detail + ) raise # check after fsthaw self._action_after_fsthaw(write_cmd_guest, write_cmd_timeout) @@ -2499,54 +2677,61 @@ def gagent_check_fsfreeze_list(self, test, params, env): self._open_session_list.append(session) image_size_stg0 = params["image_size_stg0"] - error_context.context("Format the new data disk and mount it.", - LOG_JOB.info) + error_context.context("Format the new data disk and mount it.", LOG_JOB.info) mount_points = [] if params.get("os_type") == "linux": - self.gagent_setsebool_value('on', params, self.vm) + self.gagent_setsebool_value("on", params, self.vm) disk_data = list(utils_disk.get_linux_disks(session).keys()) mnt_point_data = utils_disk.configure_empty_disk( - session, disk_data[0], image_size_stg0, "linux", - labeltype="msdos")[0] + session, disk_data[0], image_size_stg0, "linux", labeltype="msdos" + )[0] mount_points = ["/", mnt_point_data] else: disk_index = utils_misc.wait_for( - lambda: utils_disk.get_windows_disks_index(session, - image_size_stg0), - 120) + lambda: utils_disk.get_windows_disks_index(session, image_size_stg0), + 120, + ) if disk_index: - LOG_JOB.info("Clear readonly for disk and online it in" - " windows guest.") - if not utils_disk.update_windows_disk_attributes(session, - disk_index): + LOG_JOB.info( + "Clear readonly for disk and online it in" " windows guest." + ) + if not utils_disk.update_windows_disk_attributes(session, disk_index): test.error("Failed to update windows disk attributes.") mnt_point_data = utils_disk.configure_empty_disk( - session, disk_index[0], image_size_stg0, "windows", - labeltype="msdos")[0] - mount_points = ["C:\\", "%s:\\" % mnt_point_data] + session, + disk_index[0], + image_size_stg0, + "windows", + labeltype="msdos", + )[0] + mount_points = ["C:\\", f"{mnt_point_data}:\\"] else: test.error("Didn't find any disk_index except system disk.") - error_context.context("Freeze fs without parameter of mountpoints.", - LOG_JOB.info) + error_context.context( + "Freeze fs without parameter of mountpoints.", LOG_JOB.info + ) self._fsfreeze(fsfreeze_list=True, check_mountpoints=mount_points) - error_context.context("Freeze fs with two mount point.", - LOG_JOB.info) + error_context.context("Freeze fs with two mount point.", LOG_JOB.info) self._fsfreeze(fsfreeze_list=True, mountpoints=mount_points) - error_context.context("Freeze fs with every mount point.", - LOG_JOB.info) + error_context.context("Freeze fs with every mount point.", LOG_JOB.info) for mpoint in mount_points: - mpoint = ["%s" % mpoint] + mpoint = [f"{mpoint}"] self._fsfreeze(fsfreeze_list=True, mountpoints=mpoint) - error_context.context("Freeze fs with one valid mountpoint and" - " one invalid mountpoint.", LOG_JOB.info) + error_context.context( + "Freeze fs with one valid mountpoint and" " one invalid mountpoint.", + LOG_JOB.info, + ) if params.get("os_type") == "linux": mount_points_n = ["/", "/invalid"] check_mp = ["/"] - self._fsfreeze(fsfreeze_list=True, mountpoints=mount_points_n, - check_mountpoints=check_mp) - self.gagent_setsebool_value('off', params, self.vm) + self._fsfreeze( + fsfreeze_list=True, + mountpoints=mount_points_n, + check_mountpoints=check_mp, + ) + self.gagent_setsebool_value("off", params, self.vm) else: mount_points_n = ["C:\\", "X:\\"] LOG_JOB.info("Make sure the current status is thaw.") @@ -2557,18 +2742,21 @@ def gagent_check_fsfreeze_list(self, test, params, env): # Thaw guest FS if the fs status is incorrect. self.gagent.fsthaw(check_status=False) try: - self.gagent.fsfreeze(fsfreeze_list=True, - mountpoints=mount_points_n) + self.gagent.fsfreeze(fsfreeze_list=True, mountpoints=mount_points_n) except guest_agent.VAgentCmdError as e: expected = "failed to add X:\\ to snapshot set" if expected not in e.edata["desc"]: test.fail(e) else: - test.fail("Cmd 'guest-fsfreeze-freeze-list' is executed" - " successfully, but it should return error.") + test.fail( + "Cmd 'guest-fsfreeze-freeze-list' is executed" + " successfully, but it should return error." + ) finally: - if self.gagent.get_fsfreeze_status() == \ - self.gagent.FSFREEZE_STATUS_FROZEN: + if ( + self.gagent.get_fsfreeze_status() + == self.gagent.FSFREEZE_STATUS_FROZEN + ): self.gagent.fsthaw(check_status=False) @error_context.context_aware @@ -2588,8 +2776,10 @@ def gagent_check_thaw_unfrozen(self, test, params, env): error_context.context("Thaw the unfrozen FS", LOG_JOB.info) ret = self.gagent.fsthaw(check_status=False) if ret != 0: - test.fail("The return value of thawing an unfrozen fs is %s," - "it should be zero" % ret) + test.fail( + f"The return value of thawing an unfrozen fs is {ret}," + "it should be zero" + ) @error_context.context_aware def gagent_check_freeze_frozen(self, test, params, env): @@ -2605,13 +2795,17 @@ def gagent_check_freeze_frozen(self, test, params, env): try: self.gagent.fsfreeze(check_status=False) except guest_agent.VAgentCmdError as e: - expected = ("Command guest-fsfreeze-freeze has been disabled: " - "the agent is in frozen state") + expected = ( + "Command guest-fsfreeze-freeze has been disabled: " + "the agent is in frozen state" + ) if expected not in e.edata["desc"]: test.fail(e) else: - test.fail("Cmd 'guest-fsfreeze-freeze' is executed successfully " - "after freezing FS! But it should return error.") + test.fail( + "Cmd 'guest-fsfreeze-freeze' is executed successfully " + "after freezing FS! But it should return error." + ) finally: if self.gagent.get_fsfreeze_status() == self.gagent.FSFREEZE_STATUS_FROZEN: self.gagent.fsthaw(check_status=False) @@ -2627,13 +2821,16 @@ def gagent_check_after_init(self, test, params, env): error_context.context("Run init 3 in guest", LOG_JOB.info) session = self._get_session(params, self.vm) session.cmd("init 3") - error_context.context("Check guest agent status after running init 3", - LOG_JOB.info) + error_context.context( + "Check guest agent status after running init 3", LOG_JOB.info + ) if self._check_ga_service(session, params.get("gagent_status_cmd")): LOG_JOB.info("Guest agent service is still running after init 3.") else: - test.fail("Guest agent service is stopped after running init 3! It " - "should be running.") + test.fail( + "Guest agent service is stopped after running init 3! It " + "should be running." + ) @error_context.context_aware def gagent_check_hotplug_frozen(self, test, params, env): @@ -2644,12 +2841,12 @@ def gagent_check_hotplug_frozen(self, test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ + def get_new_disk(disks_before_plug, disks_after_plug): """ Get the new added disks by comparing two disk lists. """ - disk = list( - set(disks_after_plug).difference(set(disks_before_plug))) + disk = list(set(disks_after_plug).difference(set(disks_before_plug))) return disk session = self._get_session(params, self.vm) @@ -2665,9 +2862,9 @@ def get_new_disk(disks_before_plug, disks_after_plug): error_context.context("Hotplug a disk to guest", LOG_JOB.info) image_name_plug = params["images"].split()[1] image_params_plug = params.object_params(image_name_plug) - devs = self.vm.devices.images_define_by_params(image_name_plug, - image_params_plug, - 'disk') + devs = self.vm.devices.images_define_by_params( + image_name_plug, image_params_plug, "disk" + ) for dev in devs: self.vm.devices.simple_hotplug(dev, self.vm.monitor) disk_write_cmd = params["disk_write_cmd"] @@ -2678,29 +2875,51 @@ def get_new_disk(disks_before_plug, disks_after_plug): new_disks = utils_misc.wait_for( lambda: get_new_disk( disks_before_plug.keys(), - utils_disk.get_linux_disks(session, True).keys()), - pause) + utils_disk.get_linux_disks(session, True).keys(), + ), + pause, + ) if not new_disks: test.fail("Can't detect the new hotplugged disks in guest") try: mnt_point = utils_disk.configure_empty_disk( - session, new_disks[0], image_size_stg0, "linux", labeltype="msdos") + session, + new_disks[0], + image_size_stg0, + "linux", + labeltype="msdos", + ) except aexpect.ShellTimeoutError: self.gagent.fsthaw() mnt_point = utils_disk.configure_empty_disk( - session, new_disks[0], image_size_stg0, "linux", labeltype="msdos") + session, + new_disks[0], + image_size_stg0, + "linux", + labeltype="msdos", + ) elif params.get("os_type") == "windows": disk_index = utils_misc.wait_for( - lambda: utils_disk.get_windows_disks_index(session, image_size_stg0), 120) + lambda: utils_disk.get_windows_disks_index( + session, image_size_stg0 + ), + 120, + ) if disk_index: - LOG_JOB.info("Clear readonly for disk and online it in " - "windows guest.") + LOG_JOB.info( + "Clear readonly for disk and online it in " "windows guest." + ) if not utils_disk.update_windows_disk_attributes( - session, disk_index): + session, disk_index + ): test.error("Failed to update windows disk attributes.") mnt_point = utils_disk.configure_empty_disk( - session, disk_index[0], image_size_stg0, "windows", - labeltype="msdos") + session, + disk_index[0], + image_size_stg0, + "windows", + labeltype="msdos", + ) session.cmd(disk_write_cmd % mnt_point[0]) error_context.context("Unplug the added disk", LOG_JOB.info) self.vm.devices.simple_unplug(devs[-1], self.vm.monitor) @@ -2709,10 +2928,12 @@ def get_new_disk(disks_before_plug, disks_after_plug): try: self.gagent.fsthaw(check_status=False) except guest_agent.VAgentCmdError as detail: - if not re.search("fsfreeze is limited up to 10 seconds", - str(detail)): - test.error("guest-fsfreeze-thaw cmd failed with:" - "('%s')" % str(detail)) + if not re.search( + "fsfreeze is limited up to 10 seconds", str(detail) + ): + test.error( + "guest-fsfreeze-thaw cmd failed with:" f"('{str(detail)}')" + ) self.vm.verify_alive() if params.get("os_type") == "linux": utils_disk.umount(new_disks[0], mnt_point[0], session=session) @@ -2732,31 +2953,35 @@ def gagent_check_path_fsfreeze_hook(self, test, params, env): error_context.context("Start gagent with -F option", LOG_JOB.info) self.gagent_start(session, self.vm) - error_context.context("Get the default path of fsfreeze-hook in" - " qemu-ga help.", LOG_JOB.info) + error_context.context( + "Get the default path of fsfreeze-hook in" " qemu-ga help.", LOG_JOB.info + ) s, o = session.cmd_status_output(params["cmd_get_help_info"]) - help_cmd_hook_path = o.strip().replace(')', '').split()[-1] + help_cmd_hook_path = o.strip().replace(")", "").split()[-1] - error_context.context("Get the default path of fsfreeze-hook in" - " man page.", LOG_JOB.info) + error_context.context( + "Get the default path of fsfreeze-hook in" " man page.", LOG_JOB.info + ) LOG_JOB.info("Export qemu-ga man page to guest file.") qga_man_file = "/tmp/man_file" session.cmd(params["cmd_get_man_page"] % qga_man_file) LOG_JOB.info("Get fsfreeze-hook script default path in the file.") - cmd_get_hook_path = r'cat %s |grep /fsfreeze-hook' % qga_man_file + cmd_get_hook_path = rf"cat {qga_man_file} |grep /fsfreeze-hook" output = session.cmd_output(cmd_get_hook_path).strip() - hook_pattern = r'/etc.*fsfreeze-hook' + hook_pattern = r"/etc.*fsfreeze-hook" man_cmd_hook_path = re.findall(hook_pattern, output, re.I)[0] # the expected hook path hook_path_expected = "/etc/qemu-kvm/fsfreeze-hook" - if help_cmd_hook_path != hook_path_expected \ - or man_cmd_hook_path != hook_path_expected: + if ( + help_cmd_hook_path != hook_path_expected + or man_cmd_hook_path != hook_path_expected + ): msg = "The hook path is not correct in qemu-ga -h or man page\n" - msg += "it's in help cmd is %s\n" % help_cmd_hook_path - msg += "it's in man page is %s\n" % man_cmd_hook_path + msg += f"it's in help cmd is {help_cmd_hook_path}\n" + msg += f"it's in man page is {man_cmd_hook_path}\n" test.fail(msg) - session.cmd("rm -rf %s" % qga_man_file) + session.cmd(f"rm -rf {qga_man_file}") @error_context.context_aware def gagent_check_fsfreeze_hook_script(self, test, params, env): @@ -2779,54 +3004,60 @@ def gagent_check_fsfreeze_hook_script(self, test, params, env): """ def log_check(action): - msg = "testing %s:%s" % (user_script_path, action) - hook_log = session.cmd_output("cat %s" % log_path) + msg = f"testing {user_script_path}:{action}" + hook_log = session.cmd_output(f"cat {log_path}") if msg not in hook_log.strip().splitlines()[-2]: - test.fail("Fsfreeze hook test failed\nthe fsfreeze" - " hook log is %s." % hook_log) + test.fail( + "Fsfreeze hook test failed\nthe fsfreeze" + f" hook log is {hook_log}." + ) session = self._get_session(self.params, None) self._open_session_list.append(session) - error_context.context("Checking fsfreeze hook related scripts.", - LOG_JOB.info) + error_context.context("Checking fsfreeze hook related scripts.", LOG_JOB.info) cmd_get_hook_files = "rpm -ql qemu-guest-agent |grep fsfreeze-hook" hook_files = session.cmd_output(cmd_get_hook_files) expect_file_nums = 4 - os_ver = params['os_variant'] + os_ver = params["os_variant"] pattern = r"rhel(\d+)" - if 'rhel' in os_ver and int(re.findall(pattern, os_ver)[0]) <= 8: + if "rhel" in os_ver and int(re.findall(pattern, os_ver)[0]) <= 8: expect_file_nums = 5 if len(hook_files.strip().split()) < expect_file_nums: - test.fail("Fsfreeze hook files are missed, the output is" - " %s" % hook_files) + test.fail("Fsfreeze hook files are missed, the output is" f" {hook_files}") - error_context.context("Checking fsfreeze hook path set in config" - " file.", LOG_JOB.info) + error_context.context( + "Checking fsfreeze hook path set in config" " file.", LOG_JOB.info + ) config_file = "/etc/sysconfig/qemu-ga" - cmd_get_hook_path = "cat %s | grep" \ - " ^FSFREEZE_HOOK_PATHNAME" % config_file + cmd_get_hook_path = f"cat {config_file} | grep" " ^FSFREEZE_HOOK_PATHNAME" o_path = session.cmd_output(cmd_get_hook_path) hook_path = o_path.strip().split("=")[1] - detail = session.cmd_output("ll %s" % hook_path) + detail = session.cmd_output(f"ll {hook_path}") if not re.search(r".*x.*x.*x", detail): - test.fail("Not all users have executable permission" - " of fsfreeze hook, the detail is %s." % detail) + test.fail( + "Not all users have executable permission" + f" of fsfreeze hook, the detail is {detail}." + ) - error_context.context("Checking if agent service is using the" - " fsfreeze hook.", LOG_JOB.info) + error_context.context( + "Checking if agent service is using the" " fsfreeze hook.", LOG_JOB.info + ) cmd_get_hook = "ps aux |grep /usr/bin/qemu-ga |grep fsfreeze-hook" hook_path_info = session.cmd_output(cmd_get_hook).strip() - if params['os_variant'] == 'rhel6': - error_context.context("For rhel6 guest,need to enable fsfreeze" - " hook and restart agent service.", - LOG_JOB.info) + if params["os_variant"] == "rhel6": + error_context.context( + "For rhel6 guest,need to enable fsfreeze" + " hook and restart agent service.", + LOG_JOB.info, + ) if not session.cmd_output(cmd_get_hook): - cmd_enable_hook = "sed -i 's/FSFREEZE_HOOK_ENABLE=0/" \ - "FSFREEZE_HOOK_ENABLE=1/g' %s" % \ - config_file + cmd_enable_hook = ( + "sed -i 's/FSFREEZE_HOOK_ENABLE=0/" + f"FSFREEZE_HOOK_ENABLE=1/g' {config_file}" + ) session.cmd(cmd_enable_hook) session.cmd(params["gagent_restart_cmd"]) hook_path_info = session.cmd_output(cmd_get_hook).strip() @@ -2835,26 +3066,32 @@ def log_check(action): hook_path_service = hook_path_info.split("-F")[-1] if hook_path_service != hook_path: - test.fail("Fsfreeze hook in qemu-guest-agent service is different" - " from config.\nit's %s from service\n" - "it's %s from config." % (hook_path_service, hook_path)) - - error_context.context("Create a simple script to verify fsfreeze" - " hook.", LOG_JOB.info) - cmd_get_user_path = "rpm -ql qemu-guest-agent |grep fsfreeze-hook.d" \ - " |grep -v /usr/share" + test.fail( + "Fsfreeze hook in qemu-guest-agent service is different" + f" from config.\nit's {hook_path_service} from service\n" + f"it's {hook_path} from config." + ) + + error_context.context( + "Create a simple script to verify fsfreeze" " hook.", LOG_JOB.info + ) + cmd_get_user_path = ( + "rpm -ql qemu-guest-agent |grep fsfreeze-hook.d" " |grep -v /usr/share" + ) output = session.cmd_output(cmd_get_user_path) user_script_path = output.strip().split("\n")[-1] user_script_path += "/user_script.sh" - cmd_create_script = 'echo "printf \'testing %%s:%%s\\n\' \\$0 \\$@"' \ - ' > %s' % user_script_path + cmd_create_script = ( + "echo \"printf 'testing %s:%s\\n' \\$0 \\$@\"" f" > {user_script_path}" + ) session.cmd(cmd_create_script) - session.cmd("chmod +x %s" % user_script_path) + session.cmd(f"chmod +x {user_script_path}") - error_context.context("Issue fsfreeze and thaw commands and check" - " logs.", LOG_JOB.info) - cmd_get_log_path = "cat %s |grep ^LOGFILE" % hook_path + error_context.context( + "Issue fsfreeze and thaw commands and check" " logs.", LOG_JOB.info + ) + cmd_get_log_path = f"cat {hook_path} |grep ^LOGFILE" log_path = session.cmd_output(cmd_get_log_path).strip().split("=")[-1] self.gagent.fsfreeze() log_check("freeze") @@ -2870,6 +3107,7 @@ def gagent_check_query_chardev(self, test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ + def check_value_frontend_open(out, expected): """ Get value of 'frontend-open' after executing 'query-chardev' @@ -2882,16 +3120,21 @@ def check_value_frontend_open(out, expected): if ret is expected: break else: - test.fail("The value of parameter 'frontend-open' " - "is %s, it should be %s" % (ret, expected)) - error_context.context("Execute query-chardev when guest agent service " - "is on", LOG_JOB.info) + test.fail( + "The value of parameter 'frontend-open' " + f"is {ret}, it should be {expected}" + ) + + error_context.context( + "Execute query-chardev when guest agent service " "is on", LOG_JOB.info + ) out = self.vm.monitor.query("chardev") check_value_frontend_open(out, True) session = self._get_session(params, self.vm) self.gagent_stop(session, self.vm) - error_context.context("Execute query-chardev when guest agent service " - "is off", LOG_JOB.info) + error_context.context( + "Execute query-chardev when guest agent service " "is off", LOG_JOB.info + ) out = self.vm.monitor.query("chardev") check_value_frontend_open(out, False) session.close() @@ -2907,14 +3150,12 @@ def gagent_check_qgastatus_after_remove_qga(self, test, params, env): error_context.context("Remove qga.service.", LOG_JOB.info) self.gagent_uninstall(session, self.vm) - error_context.context("Check qga.service after removing it.", - LOG_JOB.info) + error_context.context("Check qga.service after removing it.", LOG_JOB.info) try: if self._check_ga_service(session, params.get("gagent_status_cmd")): test.fail("QGA service should be removed.") finally: - error_context.context("Recover test env that start qga.", - LOG_JOB.info) + error_context.context("Recover test env that start qga.", LOG_JOB.info) self.gagent_install(session, self.vm) self.gagent_start(session, self.vm) self.gagent_verify(params, self.vm) @@ -2928,30 +3169,30 @@ def gagent_check_frozen_io(self, test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ - error_context.context("Before freeze/thaw the FS, run the iozone test", - LOG_JOB.info) + error_context.context( + "Before freeze/thaw the FS, run the iozone test", LOG_JOB.info + ) session = self._get_session(self.params, None) self._open_session_list.append(session) - iozone_cmd = utils_misc.set_winutils_letter(session, - params["iozone_cmd"]) + iozone_cmd = utils_misc.set_winutils_letter(session, params["iozone_cmd"]) session.cmd(iozone_cmd, timeout=360) error_context.context("Freeze the FS.", LOG_JOB.info) try: self.gagent.fsfreeze() except guest_agent.VAgentCmdError as detail: - if not re.search("timeout when try to receive Frozen event from" - " VSS provider", str(detail)): - test.fail("guest-fsfreeze-freeze cmd failed with:" - "('%s')" % str(detail)) - if self.gagent.verify_fsfreeze_status( - self.gagent.FSFREEZE_STATUS_FROZEN): + if not re.search( + "timeout when try to receive Frozen event from" " VSS provider", + str(detail), + ): + test.fail("guest-fsfreeze-freeze cmd failed with:" f"('{str(detail)}')") + if self.gagent.verify_fsfreeze_status(self.gagent.FSFREEZE_STATUS_FROZEN): try: self.gagent.fsthaw(check_status=False) except guest_agent.VAgentCmdError as detail: - if not re.search("fsfreeze is limited up to 10 seconds", - str(detail)): - test.error("guest-fsfreeze-thaw cmd failed with:" - "('%s')" % str(detail)) + if not re.search("fsfreeze is limited up to 10 seconds", str(detail)): + test.error( + "guest-fsfreeze-thaw cmd failed with:" f"('{str(detail)}')" + ) self.gagent_verify(self.params, self.vm) @@ -2972,23 +3213,25 @@ def gagent_check_vss_status(self, test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ + def check_vss_info(cmd_type, key, expect_value): - cmd_vss = "sc %s \"QEMU Guest Agent VSS Provider\" | findstr /i %s" % \ - (cmd_type, key) + cmd_vss = ( + f'sc {cmd_type} "QEMU Guest Agent VSS Provider" | findstr /i {key}' + ) status, output = session.cmd_status_output(cmd_vss) if status: - test.error("Command to check VSS service info failed," - "detailed info is:\n%s" % output) + test.error( + "Command to check VSS service info failed," + f"detailed info is:\n{output}" + ) vss_result = output.split()[-1] if vss_result != expect_value: - test.fail("The output is %s which is not expected." - % vss_result) + test.fail(f"The output is {vss_result} which is not expected.") session = self._get_session(self.params, None) self._open_session_list.append(session) - error_context.context("Check VSS Provider service start type.", - LOG_JOB.info) + error_context.context("Check VSS Provider service start type.", LOG_JOB.info) check_vss_info("qc", "START_TYPE", "DEMAND_START") error_context.context("Check VSS Provider status.", LOG_JOB.info) @@ -2997,18 +3240,15 @@ def check_vss_info(cmd_type, key, expect_value): error_context.context("Freeze fs.", LOG_JOB.info) self.gagent.fsfreeze() - error_context.context("Check VSS Provider status after fsfreeze.", - LOG_JOB.info) + error_context.context("Check VSS Provider status after fsfreeze.", LOG_JOB.info) check_vss_info("query", "STATE", "RUNNING") error_context.context("Thaw fs.", LOG_JOB.info) try: self.gagent.fsthaw() except guest_agent.VAgentCmdError as detail: - if not re.search("fsfreeze is limited up to 10 seconds", - str(detail)): - test.error("guest-fsfreeze-thaw cmd failed with:" - "('%s')" % str(detail)) + if not re.search("fsfreeze is limited up to 10 seconds", str(detail)): + test.error("guest-fsfreeze-thaw cmd failed with:" f"('{str(detail)}')") @error_context.context_aware def gagent_check_fsinfo(self, test, params, env): @@ -3027,6 +3267,7 @@ def gagent_check_fsinfo(self, test, params, env): :param env: Dictionary with test environment. """ + def qga_guest_diskusage(mountpoint): """ Send cmd in guest to get disk usage. @@ -3057,60 +3298,62 @@ def check_usage_qga_guest(mount_point): diff_total_qgaguest = int(disk_usage_guest[0]) diff_used_qgaguest = int(disk_usage_guest[1]) if diff_total_qgaguest != 0: - test.fail("File System %s Total bytes doesn't match." % - mount_point) + test.fail(f"File System {mount_point} Total bytes doesn't match.") if diff_used_qgaguest != 0: - if mount_point != 'C:' and mount_point != '/': - test.fail("File system %s used bytes doesn't match." % - mount_point) + if mount_point != "C:" and mount_point != "/": + test.fail(f"File system {mount_point} used bytes doesn't match.") else: # Disk 'C:' and '/' used space usage have a floating interval, # so set a safe value '10485760'. - LOG_JOB.info("Need to check the floating interval for C: " - "or /.") + LOG_JOB.info("Need to check the floating interval for C: " "or /.") if diff_used_qgaguest > 10485760: - test.fail("File System floating interval is too large," - "Something must go wrong.") + test.fail( + "File System floating interval is too large," + "Something must go wrong." + ) else: - LOG_JOB.info("File system '%s' usages are within the safe " - "floating range.", mount_point) + LOG_JOB.info( + "File system '%s' usages are within the safe " + "floating range.", + mount_point, + ) session = self._get_session(params, None) self._open_session_list.append(session) serial_num = params["blk_extra_params_image1"].split("=")[1] - error_context.context("Check all file system info in a loop.", - LOG_JOB.info) + error_context.context("Check all file system info in a loop.", LOG_JOB.info) fs_info_qga = self.gagent.get_fsinfo() for fs in fs_info_qga: device_id = fs["name"] mount_pt = fs["mountpoint"] - if (params["os_type"] == "windows" and - mount_pt != "System Reserved"): + if params["os_type"] == "windows" and mount_pt != "System Reserved": mount_pt = mount_pt[:2] - error_context.context("Check file system '%s' usage statistics." % - mount_pt, LOG_JOB.info) - if mount_pt != 'System Reserved': + error_context.context( + f"Check file system '{mount_pt}' usage statistics.", LOG_JOB.info + ) + if mount_pt != "System Reserved": # disk usage statistic for System Reserved # volume is not supported. check_usage_qga_guest(mount_pt) else: LOG_JOB.info("'%s' disk usage statistic is not supported", mount_pt) - error_context.context("Check file system type of '%s' mount point." - % mount_pt, LOG_JOB.info) + error_context.context( + f"Check file system type of '{mount_pt}' mount point.", LOG_JOB.info + ) fs_type_qga = fs["type"] cmd_get_disk = params["cmd_get_disk"] % mount_pt.replace("/", r"\/") if params["os_type"] == "windows": - cmd_get_disk = params["cmd_get_disk"] % device_id.replace("\\", - r"\\") + cmd_get_disk = params["cmd_get_disk"] % device_id.replace("\\", r"\\") disk_info_guest = session.cmd(cmd_get_disk).strip().split() fs_type_guest = disk_info_guest[1] if fs_type_qga != fs_type_guest: - test.fail("File System doesn't match.\n" - "from guest-agent is %s.\nfrom guest os is %s." - % (fs_type_qga, fs_type_guest)) + test.fail( + "File System doesn't match.\n" + f"from guest-agent is {fs_type_qga}.\nfrom guest os is {fs_type_guest}." + ) else: LOG_JOB.info("File system type is %s which is expected.", fs_type_qga) @@ -3118,27 +3361,28 @@ def check_usage_qga_guest(mount_point): disk_name_qga = fs["name"] disk_name_guest = disk_info_guest[0] if params["os_type"] == "linux": - if not re.findall(r'^/\w*/\w*$', disk_name_guest): - disk_name_guest = session.cmd("readlink %s" % - disk_name_guest).strip() - disk_name_guest = disk_name_guest.split('/')[-1] + if not re.findall(r"^/\w*/\w*$", disk_name_guest): + disk_name_guest = session.cmd(f"readlink {disk_name_guest}").strip() + disk_name_guest = disk_name_guest.split("/")[-1] if disk_name_qga != disk_name_guest: - test.fail("Device name doesn't match.\n" - "from guest-agent is %s.\nit's from guest os is %s." - % (disk_name_qga, disk_name_guest)) + test.fail( + "Device name doesn't match.\n" + f"from guest-agent is {disk_name_qga}.\nit's from guest os is {disk_name_guest}." + ) else: LOG_JOB.info("Disk name is %s which is expected.", disk_name_qga) - error_context.context("Check serial number of some disk.", - LOG_JOB.info) + error_context.context("Check serial number of some disk.", LOG_JOB.info) if fs_type_qga == "UDF" or fs_type_qga == "CDFS": LOG_JOB.info("Only check block disk's serial info, no cdrom.") continue serial_qga = fs["disk"][0]["serial"] if not re.findall(serial_num, serial_qga): - test.fail("Serial name is not correct via qga.\n" - "from guest-agent is %s.\n" - "but it should include %s." % (serial_qga, serial_num)) + test.fail( + "Serial name is not correct via qga.\n" + f"from guest-agent is {serial_qga}.\n" + f"but it should include {serial_num}." + ) else: LOG_JOB.info("Serial number is %s which is expected.", serial_qga) @@ -3154,16 +3398,18 @@ def gagent_check_nonexistent_cmd(self, test, params, env): """ session = self._get_session(params, None) self._open_session_list.append(session) - error_context.context("Issue the no existed guest-agent " - "cmd via qga.", LOG_JOB.info) + error_context.context( + "Issue the no existed guest-agent " "cmd via qga.", LOG_JOB.info + ) cmd_wrong = params["wrong_cmd"] try: self.gagent.cmd(cmd_wrong) except guest_agent.VAgentCmdError as detail: - pattern = "command %s has not been found" % cmd_wrong + pattern = f"command {cmd_wrong} has not been found" if not re.search(pattern, str(detail), re.I): - test.fail("The error info is not correct, the return is" - " %s." % str(detail)) + test.fail( + "The error info is not correct, the return is" f" {str(detail)}." + ) else: test.fail("Should return error info.") @@ -3180,25 +3426,23 @@ def gagent_check_log(self, test, params, env): :param params: Dictionary with the test parameterspy :param env: Dictionary with test environment. """ + def log_check(qga_cmd): """ check guest agent log. """ - error_context.context("Check %s cmd in agent log." % qga_cmd, - LOG_JOB.info) - log_str = session.cmd_output(get_log_cmd).strip().split('\n')[-1] - pattern = r"%s" % qga_cmd + error_context.context(f"Check {qga_cmd} cmd in agent log.", LOG_JOB.info) + log_str = session.cmd_output(get_log_cmd).strip().split("\n")[-1] + pattern = rf"{qga_cmd}" if not re.findall(pattern, log_str, re.M | re.I): - test.fail("The %s command is not recorded in agent" - " log." % qga_cmd) + test.fail(f"The {qga_cmd} command is not recorded in agent" " log.") get_log_cmd = params["get_log_cmd"] session = self._get_session(self.params, self.vm) self._open_session_list.append(session) self._change_bl(session) - error_context.context("Issue some common guest agent commands.", - LOG_JOB.info) + error_context.context("Issue some common guest agent commands.", LOG_JOB.info) self.gagent.get_time() log_check("guest-get-time") @@ -3223,12 +3467,14 @@ def gagent_check_with_migrate(self, test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ - error_context.context("Migrate guest while guest agent service is" - " running.", LOG_JOB.info) + error_context.context( + "Migrate guest while guest agent service is" " running.", LOG_JOB.info + ) qemu_migration.set_speed(self.vm, params.get("mig_speed", "1G")) self.vm.migrate() - error_context.context("Recreate a QemuAgent object after vm" - " migration.", LOG_JOB.info) + error_context.context( + "Recreate a QemuAgent object after vm" " migration.", LOG_JOB.info + ) self.gagent = None args = [params.get("gagent_serial_type"), params.get("gagent_name")] self.gagent_create(params, self.vm, *args) @@ -3253,6 +3499,7 @@ def gagent_check_umount_frozen(self, test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ + def wrap_windows_cmd(cmd): """ add header and footer for cmd in order to run it in diskpart tool. @@ -3260,8 +3507,9 @@ def wrap_windows_cmd(cmd): :param cmd: cmd to be wrapped. :return: wrapped cmd """ - disk = "disk_" + ''.join(random.sample(string.ascii_letters + - string.digits, 4)) + disk = "disk_" + "".join( + random.sample(string.ascii_letters + string.digits, 4) + ) cmd_header = "echo list disk > " + disk cmd_header += " && echo select disk %s >> " + disk cmd_footer = " echo exit >> " + disk @@ -3274,29 +3522,32 @@ def wrap_windows_cmd(cmd): self._open_session_list.append(session) image_size_stg0 = params["image_size_stg0"] - error_context.context("Format the new data disk and mount it.", - LOG_JOB.info) + error_context.context("Format the new data disk and mount it.", LOG_JOB.info) if params.get("os_type") == "linux": - self.gagent_setsebool_value('on', params, self.vm) + self.gagent_setsebool_value("on", params, self.vm) disk_data = list(utils_disk.get_linux_disks(session).keys()) mnt_point = utils_disk.configure_empty_disk( - session, disk_data[0], image_size_stg0, "linux", - labeltype="msdos") - src = "/dev/%s1" % disk_data[0] + session, disk_data[0], image_size_stg0, "linux", labeltype="msdos" + ) + src = f"/dev/{disk_data[0]}1" else: disk_index = utils_misc.wait_for( - lambda: utils_disk.get_windows_disks_index(session, - image_size_stg0), - 120) + lambda: utils_disk.get_windows_disks_index(session, image_size_stg0), + 120, + ) if disk_index: - LOG_JOB.info("Clear readonly for disk and online it in windows" - " guest.") - if not utils_disk.update_windows_disk_attributes(session, - disk_index): + LOG_JOB.info( + "Clear readonly for disk and online it in windows" " guest." + ) + if not utils_disk.update_windows_disk_attributes(session, disk_index): test.error("Failed to update windows disk attributes.") mnt_point = utils_disk.configure_empty_disk( - session, disk_index[0], image_size_stg0, "windows", - labeltype="msdos") + session, + disk_index[0], + image_size_stg0, + "windows", + labeltype="msdos", + ) else: test.error("Didn't find any disk_index except system disk.") @@ -3305,64 +3556,64 @@ def wrap_windows_cmd(cmd): session.cmd("restorecon -Rv /", timeout=180) self.gagent.fsfreeze() - error_context.context("Umount fs or offline disk in guest.", - LOG_JOB.info) + error_context.context("Umount fs or offline disk in guest.", LOG_JOB.info) if params.get("os_type") == "linux": - if params['os_variant'] == 'rhel6': + if params["os_variant"] == "rhel6": try: - session.cmd("umount %s" % mnt_point[0]) + session.cmd(f"umount {mnt_point[0]}") except ShellTimeoutError: - LOG_JOB.info("For rhel6 guest, umount fs will fail after" - " fsfreeze.") + LOG_JOB.info( + "For rhel6 guest, umount fs will fail after" " fsfreeze." + ) else: - test.error("For rhel6 guest, umount fs should fail after" - " fsfreeze.") + test.error( + "For rhel6 guest, umount fs should fail after" " fsfreeze." + ) else: if not utils_disk.umount(src, mnt_point[0], session=session): - test.fail("For rhel7+ guest, umount fs should success" - " after fsfreeze.") + test.fail( + "For rhel7+ guest, umount fs should success" " after fsfreeze." + ) else: - detail_cmd = ' echo detail disk' + detail_cmd = " echo detail disk" detail_cmd = wrap_windows_cmd(detail_cmd) - offline_cmd = ' echo offline disk' + offline_cmd = " echo offline disk" offline_cmd = wrap_windows_cmd(offline_cmd) did = disk_index[0] LOG_JOB.info("Detail for 'Disk%s'", did) details = session.cmd_output(detail_cmd % did) if re.search("Status.*Online", details, re.I | re.M): LOG_JOB.info("Offline 'Disk%s'", did) - status, output = session.cmd_status_output(offline_cmd % did, - timeout=120) + status, output = session.cmd_status_output( + offline_cmd % did, timeout=120 + ) if status != 0: - test.fail("Can not offline disk: %s with" - " fsfreeze." % output) + test.fail(f"Can not offline disk: {output} with" " fsfreeze.") error_context.context("Thaw fs.", LOG_JOB.info) try: self.gagent.fsthaw() except guest_agent.VAgentCmdError as detail: - if not re.search("fsfreeze is limited up to 10 seconds", - str(detail)): - test.error("guest-fsfreeze-thaw cmd failed with: ('%s')" - % str(detail)) + if not re.search("fsfreeze is limited up to 10 seconds", str(detail)): + test.error(f"guest-fsfreeze-thaw cmd failed with: ('{str(detail)}')") - error_context.context("Mount fs or online disk in guest.", - LOG_JOB.info) + error_context.context("Mount fs or online disk in guest.", LOG_JOB.info) if params.get("os_type") == "linux": try: if not utils_disk.mount(src, mnt_point[0], session=session): - if params['os_variant'] != 'rhel6': - test.fail("For rhel7+ guest, mount fs should success" - " after fsthaw.") + if params["os_variant"] != "rhel6": + test.fail( + "For rhel7+ guest, mount fs should success" " after fsthaw." + ) else: - if params['os_variant'] == 'rhel6': - test.fail("For rhel6 guest, mount fs should fail after" - " fsthaw.") + if params["os_variant"] == "rhel6": + test.fail( + "For rhel6 guest, mount fs should fail after" " fsthaw." + ) finally: - self.gagent_setsebool_value('off', params, self.vm) + self.gagent_setsebool_value("off", params, self.vm) else: - if not utils_disk.update_windows_disk_attributes(session, - disk_index): + if not utils_disk.update_windows_disk_attributes(session, disk_index): test.fail("Can't online disk with fsthaw") @error_context.context_aware @@ -3383,10 +3634,12 @@ def gagent_check_user_logoff(self, test, params, env): error_context.context("Make the user log out.", LOG_JOB.info) try: - session.cmd("logoff %s" % login_user_id) - except (aexpect.ShellProcessTerminatedError, - aexpect.ShellProcessTerminatedError, - aexpect.ShellStatusError): + session.cmd(f"logoff {login_user_id}") + except ( + aexpect.ShellProcessTerminatedError, + aexpect.ShellProcessTerminatedError, + aexpect.ShellStatusError, + ): pass else: test.fail("The user logoff failed.") @@ -3403,6 +3656,7 @@ def gagent_check_blacklist(self, test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def bl_check(qga_cmd): """ check if qga cmd is disabled. @@ -3413,55 +3667,59 @@ def bl_check(qga_cmd): else: self.gagent.cmd(qga_cmd) except guest_agent.VAgentCmdError as detail: - if re.search("%s has been disabled" % qga_cmd, str(detail)): + if re.search(f"{qga_cmd} has been disabled", str(detail)): LOG_JOB.info("%s cmd is disabled.", qga_cmd) else: - test.fail("%s cmd failed with:" - "('%s')" % (qga_cmd, str(detail))) + test.fail(f"{qga_cmd} cmd failed with:" f"('{str(detail)}')") else: - test.fail("%s cmd is not in blacklist," - " pls have a check." % qga_cmd) + test.fail(f"{qga_cmd} cmd is not in blacklist," " pls have a check.") session = self._get_session(params, None) self._open_session_list.append(session) - error_context.context("Try to execute guest-file-open command which" - " is in blacklist by default.", LOG_JOB.info) + error_context.context( + "Try to execute guest-file-open command which" + " is in blacklist by default.", + LOG_JOB.info, + ) randstr = utils_misc.generate_random_string(5) guest_file = "/tmp/qgatest" + randstr bl_check("guest-file-open") - error_context.context("Try to execute guest-info command which is" - " not in blacklist.", - LOG_JOB.info) + error_context.context( + "Try to execute guest-info command which is" " not in blacklist.", + LOG_JOB.info, + ) self.gagent.cmd("guest-info") - error_context.context("Change command in blacklist and restart" - " agent service.", LOG_JOB.info) + error_context.context( + "Change command in blacklist and restart" " agent service.", LOG_JOB.info + ) session.cmd("cp /etc/sysconfig/qemu-ga /etc/sysconfig/qemu-ga-bk") full_qga_ver = self._get_qga_version(session, self.vm, main_ver=False) black_list_spec = "BLACKLIST_RPC" - if full_qga_ver in VersionInterval('[8.1.0-5,)'): + if full_qga_ver in VersionInterval("[8.1.0-5,)"): black_list_spec, black_list_spec_replace = "allow-rpcs", "block-rpcs" - elif full_qga_ver in VersionInterval('[7.2.0-4,)'): + elif full_qga_ver in VersionInterval("[7.2.0-4,)"): black_list_spec = "BLOCK_RPCS" if black_list_spec == "allow-rpcs": - black_list_change_cmd = "sed -i 's/%s.*/%s=guest-info\"/g' /etc/sysconfig/qemu-ga" % (black_list_spec, black_list_spec_replace) # pylint: disable=E0606 + black_list_change_cmd = f"sed -i 's/{black_list_spec}.*/{black_list_spec_replace}=guest-info\"/g' /etc/sysconfig/qemu-ga" # pylint: disable=E0606 else: - black_list_change_cmd = "sed -i 's/%s.*/%s=guest-info/g' /etc/sysconfig/qemu-ga" % (black_list_spec, black_list_spec) + black_list_change_cmd = f"sed -i 's/{black_list_spec}.*/{black_list_spec}=guest-info/g' /etc/sysconfig/qemu-ga" try: session.cmd(black_list_change_cmd) session.cmd(params["gagent_restart_cmd"]) - error_context.context("Try to execute guest-file-open and " - "guest-info commands again.", LOG_JOB.info) - ret_handle = int(self.gagent.guest_file_open(guest_file, - mode="a+")) + error_context.context( + "Try to execute guest-file-open and " "guest-info commands again.", + LOG_JOB.info, + ) + ret_handle = int(self.gagent.guest_file_open(guest_file, mode="a+")) self.gagent.guest_file_close(ret_handle) bl_check("guest-info") finally: - session.cmd("rm -rf %s" % guest_file) + session.cmd(f"rm -rf {guest_file}") cmd = "mv -f /etc/sysconfig/qemu-ga-bk /etc/sysconfig/qemu-ga" session.cmd(cmd) @@ -3480,49 +3738,55 @@ def gagent_check_virtio_device(self, test, params, env): def _result_check(rsult_qga, rsult_guest): if rsult_qga != rsult_guest: msg = "The result is different between qga and guest\n" - msg += "from qga: %s\n" % rsult_qga - msg += "from guest: %s\n" % rsult_guest + msg += f"from qga: {rsult_qga}\n" + msg += f"from guest: {rsult_guest}\n" test.fail(msg) devs_list = self.gagent.get_virtio_device() check_driver_cmd_org = params["check_driver_powershell_cmd"] - main_qga_ver = int(self.gagent.guest_info()["version"].split('.')[0]) + main_qga_ver = int(self.gagent.guest_info()["version"].split(".")[0]) for device in devs_list: driver_name = device["driver-name"] - error_context.context("Check %s info." % driver_name, LOG_JOB.info) + error_context.context(f"Check {driver_name} info.", LOG_JOB.info) driver_date = device["driver-date"] driver_version = device["driver-version"] # main_qga_ver includes windows and rhel these individual situations. - device_address = (device["id"] if 5 <= main_qga_ver < 10 or - main_qga_ver >= 102 else device["address"]["data"]) + device_address = ( + device["id"] + if 5 <= main_qga_ver < 10 or main_qga_ver >= 102 + else device["address"]["data"] + ) device_id = device_address["device-id"] vendor_id = device_address["vendor-id"] - filter_name = ("friendlyname" if "Ethernet" in driver_name else - "devicename") - check_driver_cmd = check_driver_cmd_org % ( - filter_name, driver_name) + filter_name = "friendlyname" if "Ethernet" in driver_name else "devicename" + check_driver_cmd = check_driver_cmd_org % (filter_name, driver_name) driver_info_guest = session.cmd_output(check_driver_cmd) # check driver date # driverdate : 20200219000000.******+*** - date_group = re.search(r"driverdate.*\:\s(\d{4})(\d{2})(\d{2})", - driver_info_guest, re.I).groups() + date_group = re.search( + r"driverdate.*\:\s(\d{4})(\d{2})(\d{2})", driver_info_guest, re.I + ).groups() driver_date_guest = "-".join(date_group) if 5 <= main_qga_ver < 10 or main_qga_ver >= 102: - driver_date_guest_timearray = time.strptime(driver_date_guest, - "%Y-%m-%d") + driver_date_guest_timearray = time.strptime( + driver_date_guest, "%Y-%m-%d" + ) driver_date_guest = time.mktime(driver_date_guest_timearray) - if abs(driver_date_guest - int(driver_date/1000000000)) > 86400: - test.fail("The difference of driver_date between guest and qga " - "shoudn't differ by more than 86400 seconds") + if abs(driver_date_guest - int(driver_date / 1000000000)) > 86400: + test.fail( + "The difference of driver_date between guest and qga " + "shoudn't differ by more than 86400 seconds" + ) else: _result_check(driver_date, driver_date_guest) # check driver version - driver_ver_guest = re.search(r"driverversion.*\:\s(\S+)", - driver_info_guest, re.I).group(1) + driver_ver_guest = re.search( + r"driverversion.*\:\s(\S+)", driver_info_guest, re.I + ).group(1) _result_check(driver_version, driver_ver_guest) # check vender id and device id @@ -3557,8 +3821,8 @@ def gagent_check_os_basic_info(self, test, params, env): def _result_check(rsult_qga, rsult_guest): if rsult_qga != rsult_guest: msg = "The result is different between qga and guest\n" - msg += "from qga: %s\n" % rsult_qga - msg += "from guest: %s\n" % rsult_guest + msg += f"from qga: {rsult_qga}\n" + msg += f"from guest: {rsult_guest}\n" test.fail(msg) error_context.context("Check host name of guest.", LOG_JOB.info) @@ -3570,8 +3834,9 @@ def _result_check(rsult_qga, rsult_guest): if params["os_type"] == "linux": # this step that set new hostname and # check it out just for linux. - error_context.context("Check host name after setting new host name.", - LOG_JOB.info) + error_context.context( + "Check host name after setting new host name.", LOG_JOB.info + ) cmd_set_host_name = params["cmd_set_host_name"] host_name_guest = session.cmd_output(cmd_set_host_name).strip() host_name_ga = self.gagent.get_host_name()["host-name"] @@ -3584,28 +3849,24 @@ def _result_check(rsult_qga, rsult_guest): LOG_JOB.info("Check timezone name.") cmd_get_timezone_name = params["cmd_get_timezone_name"] - timezone_name_guest = session.cmd_output( - cmd_get_timezone_name).strip() + timezone_name_guest = session.cmd_output(cmd_get_timezone_name).strip() if params["os_type"] == "windows": # there are standard name and daylight name for windows os, # both are accepted. cmd_dlight_name = params["cmd_get_timezone_dlight_name"] - timezone_dlight_name_guest = session.cmd_output( - cmd_dlight_name).strip() - timezone_name_list = [timezone_name_guest, - timezone_dlight_name_guest] + timezone_dlight_name_guest = session.cmd_output(cmd_dlight_name).strip() + timezone_name_list = [timezone_name_guest, timezone_dlight_name_guest] if timezone_name_ga not in timezone_name_list: msg = "The result is different between qga and guest\n" - msg += "from qga: %s\n" % timezone_name_ga - msg += "from guest: %s\n" % timezone_name_list + msg += f"from qga: {timezone_name_ga}\n" + msg += f"from guest: {timezone_name_list}\n" test.fail(msg) else: _result_check(timezone_name_ga, timezone_name_guest) LOG_JOB.info("Check timezone offset.") cmd_get_timezone_offset = params["cmd_get_timezone_offset"] - timezone_offset_guest = session.cmd_output( - cmd_get_timezone_offset).strip() + timezone_offset_guest = session.cmd_output(cmd_get_timezone_offset).strip() # +08:00 # (UTC+08:00) Beijing, Chongqing, Hong Kong, Urumqi pattern = r"(\S)(\d\d):\d\d" @@ -3616,14 +3877,14 @@ def _result_check(rsult_qga, rsult_guest): else: offset_seconds = int(timezone_list[0][1]) * 3600 if timezone_list[0][0] == "-": - timezone_offset_guest_seconds = int(timezone_list[0][0] - + str(offset_seconds)) + timezone_offset_guest_seconds = int( + timezone_list[0][0] + str(offset_seconds) + ) else: timezone_offset_guest_seconds = int(offset_seconds) _result_check(timezone_offset_ga, timezone_offset_guest_seconds) - error_context.context("Check the current active users number.", - LOG_JOB.info) + error_context.context("Check the current active users number.", LOG_JOB.info) user_qga_list = self.gagent.get_users() user_num_qga = len(user_qga_list) cmd_get_users = params["cmd_get_users"] @@ -3638,7 +3899,7 @@ def _result_check(rsult_qga, rsult_guest): else: user_name_list_guest = [] for user in user_guest_list: - user = user.strip(' >') + user = user.strip(" >") user_name = user.split()[0] user_name_list_guest.append(user_name) # get non duplicate user name @@ -3647,18 +3908,16 @@ def _result_check(rsult_qga, rsult_guest): if user_num_qga != user_num_guest: msg = "Currently active users number are different" msg += " between qga and guest\n" - msg += "from qga: %s\n" % len(user_num_qga) - msg += "from guest: %s\n" % len(user_num_guest) + msg += f"from qga: {len(user_num_qga)}\n" + msg += f"from guest: {len(user_num_guest)}\n" test.fail(msg) - error_context.context("Check the current active users info.", - LOG_JOB.info) + error_context.context("Check the current active users info.", LOG_JOB.info) for user_qga in user_qga_list: login_time_qga = user_qga["login-time"] user_name_qga = user_qga["user"] - error_context.context("Check %s user info." % user_name_qga, - LOG_JOB.info) + error_context.context(f"Check {user_name_qga} user info.", LOG_JOB.info) # only have domain key in windows guest if params["os_type"] == "windows": # username is lowercase letters in windows guest @@ -3675,26 +3934,27 @@ def _result_check(rsult_qga, rsult_guest): # loggin many times. cmd_get_user = params["cmd_get_user"] % user_name records = session.cmd_output(cmd_get_user).strip().splitlines() - error_context.context("Check active users logging time, if " - "multiple instances of the user are " - "logged in, record the earliest one.", - LOG_JOB.info) - first_login = float('inf') + error_context.context( + "Check active users logging time, if " + "multiple instances of the user are " + "logged in, record the earliest one.", + LOG_JOB.info, + ) + first_login = float("inf") time_pattern = params["time_pattern"] cmd_time_trans = params["cmd_time_trans"] for record in records: login_time_guest = re.search(time_pattern, record).group(1) cmd_time_trans_guest = cmd_time_trans % login_time_guest - login_time_guest = session.cmd_output( - cmd_time_trans_guest).strip() + login_time_guest = session.cmd_output(cmd_time_trans_guest).strip() first_login = min(first_login, float(login_time_guest)) delta = abs(float(login_time_qga) - float(first_login)) if delta > 60: - msg = "%s login time are different between" % user_name_qga + msg = f"{user_name_qga} login time are different between" msg += " qga and guest\n" - msg += "from qga: %s\n" % login_time_qga - msg += "from guest: %s\n" % first_login + msg += f"from qga: {login_time_qga}\n" + msg += f"from guest: {first_login}\n" test.fail(msg) @error_context.context_aware @@ -3706,11 +3966,12 @@ def gagent_check_os_info(self, test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _result_check(rsult_qga, rsult_guest): if rsult_qga.lower() != rsult_guest.lower(): msg = "The result is different between qga and guest\n" - msg += "from qga: %s\n" % rsult_qga - msg += "from guest: %s\n" % rsult_guest + msg += f"from qga: {rsult_qga}\n" + msg += f"from guest: {rsult_guest}\n" test.fail(msg) session = self._get_session(params, None) @@ -3738,57 +3999,55 @@ def _result_check(rsult_qga, rsult_guest): if os_type == "windows": os_name = "Microsoft Windows" else: - os_name = re.search(r'(Red Hat.*) release', - os_name_full_guest, re.I).group(1) + os_name = re.search(r"(Red Hat.*) release", os_name_full_guest, re.I).group( + 1 + ) _result_check(os_id_qga, os_id) _result_check(os_name_qga, os_name) error_context.context("Check os pretty name.", LOG_JOB.info) if os_type == "windows": os_pretty_name_guest = re.search( - r'Microsoft (.*)', os_name_full_guest, re.M).group(1) + r"Microsoft (.*)", os_name_full_guest, re.M + ).group(1) else: os_pretty_name_guest = os_name_full_guest if "release" in os_name_full_guest: - os_pretty_name_guest = re.sub(r'release ', '', - os_name_full_guest) + os_pretty_name_guest = re.sub(r"release ", "", os_name_full_guest) _result_check(os_pretty_name_qga, os_pretty_name_guest) error_context.context("Check os version info.", LOG_JOB.info) # 2019, 8.1, 2012 R2, 8 pattern = r"(\d+(.)?(?(2)(\d+))( R2)?)" - os_version_id_guest = re.search(pattern, - os_name_full_guest, re.I).group(1) + os_version_id_guest = re.search(pattern, os_name_full_guest, re.I).group(1) if os_type == "windows": - os_version_guest = re.search(r'(Microsoft.*\d)', - os_name_full_guest, re.I).group(1) + os_version_guest = re.search( + r"(Microsoft.*\d)", os_name_full_guest, re.I + ).group(1) # 2012 R2 if "R2" in os_version_id_guest: - os_version_id_guest = re.sub(r' R2', 'R2', - os_version_id_guest) + os_version_id_guest = re.sub(r" R2", "R2", os_version_id_guest) else: - os_version_guest = re.search(r'release (\d.*)', - os_name_full_guest, re.I).group(1) + os_version_guest = re.search( + r"release (\d.*)", os_name_full_guest, re.I + ).group(1) if "Beta" in os_version_guest: - os_version_guest = re.sub(r'Beta ', '', os_version_guest) + os_version_guest = re.sub(r"Beta ", "", os_version_guest) _result_check(os_version_qga, os_version_guest) _result_check(os_version_id_qga, os_version_id_guest) - error_context.context("Check kernel version and release version.", - LOG_JOB.info) + error_context.context("Check kernel version and release version.", LOG_JOB.info) cmd_get_kernel_ver = params["cmd_get_kernel_ver"] kernel_info_guest = session.cmd_output(cmd_get_kernel_ver).strip() if os_type == "windows": - kernel_g = re.search(r'(\d+\.\d+)\.(\d+)', - kernel_info_guest, re.I) + kernel_g = re.search(r"(\d+\.\d+)\.(\d+)", kernel_info_guest, re.I) kernel_version_guest = kernel_g.group(1) kernel_release_guest = kernel_g.group(2) else: kernel_version_guest = kernel_info_guest cmd_get_kernel_rel = params["cmd_get_kernel_rel"] - kernel_release_guest = session.cmd_output( - cmd_get_kernel_rel).strip() + kernel_release_guest = session.cmd_output(cmd_get_kernel_rel).strip() _result_check(kernel_version_qga, kernel_version_guest) _result_check(kernel_release_qga, kernel_release_guest) @@ -3796,8 +4055,9 @@ def _result_check(rsult_qga, rsult_guest): variant_qga = os_info_qga.get("variant", "") if variant_qga: variant_id_qga = os_info_qga["variant-id"] - variant_guest = "server" \ - if "server" in os_name_full_guest.lower() else "client" + variant_guest = ( + "server" if "server" in os_name_full_guest.lower() else "client" + ) _result_check(variant_qga, variant_guest) _result_check(variant_id_qga, variant_guest) @@ -3818,7 +4078,7 @@ def run_once(self, test, params, env): QemuGuestAgentTest.run_once(self, test, params, env) gagent_check_type = self.params["gagent_check_type"] - chk_type = "gagent_check_%s" % gagent_check_type + chk_type = f"gagent_check_{gagent_check_type}" if hasattr(self, chk_type): func = getattr(self, chk_type) func(test, params, env) @@ -3827,7 +4087,6 @@ def run_once(self, test, params, env): class QemuGuestAgentBasicCheckWin(QemuGuestAgentBasicCheck): - """ Qemu guest agent test class for windows guest. """ @@ -3876,8 +4135,7 @@ def _add_cert(session, cert_path, store): media_type = params["virtio_win_media_type"] check_serial = params.get("cmd_check_serial") - error_context.context("Check whether serial drive is running.", - LOG_JOB.info) + error_context.context("Check whether serial drive is running.", LOG_JOB.info) if check_serial: check_serial_result = session.cmd_output(check_serial).strip() if "Running" not in check_serial_result: @@ -3885,65 +4143,67 @@ def _add_cert(session, cert_path, store): installed_any = False # wait for cdroms having driver installed in case that # they are new appeared in this test - utils_misc.wait_for(lambda: utils_misc.get_winutils_vol(session), - timeout=120, step=10) - devcon_path = utils_misc.set_winutils_letter(session, - params["devcon_path"]) - s, o = session.cmd_status_output("dir %s" % devcon_path, - timeout=120) + utils_misc.wait_for( + lambda: utils_misc.get_winutils_vol(session), timeout=120, step=10 + ) + devcon_path = utils_misc.set_winutils_letter( + session, params["devcon_path"] + ) + s, o = session.cmd_status_output(f"dir {devcon_path}", timeout=120) if s: - test.error("Not found devcon.exe, details: %s" % o) - vm_infos = {'drive_letter': '', - 'product_dirname': '', 'arch_dirname': ''} + test.error(f"Not found devcon.exe, details: {o}") + vm_infos = { + "drive_letter": "", + "product_dirname": "", + "arch_dirname": "", + } for chk_point in vm_infos.keys(): try: - get_content_func = getattr(virtio_win, - "%s_%s" % (chk_point, - media_type)) + get_content_func = getattr( + virtio_win, f"{chk_point}_{media_type}" + ) except AttributeError: - test.error("Not supported virtio " - "win media type '%s'", media_type) + test.error( + "Not supported virtio " "win media type '%s'", media_type + ) vm_infos[chk_point] = get_content_func(session) if not vm_infos[chk_point]: - test.error("Could not get %s of guest" % chk_point) - inf_middle_path = ("{name}\\{arch}" if media_type == "iso" - else "{arch}\\{name}" - ).format(name=vm_infos['product_dirname'], - arch=vm_infos['arch_dirname']) + test.error(f"Could not get {chk_point} of guest") + inf_middle_path = ( + "{name}\\{arch}" if media_type == "iso" else "{arch}\\{name}" + ).format( + name=vm_infos["product_dirname"], arch=vm_infos["arch_dirname"] + ) inf_find_cmd = 'dir /b /s %s\\%s.inf | findstr "\\%s\\\\"' - inf_find_cmd %= (vm_infos['drive_letter'], - driver_name, inf_middle_path) - inf_path = session.cmd(inf_find_cmd, - timeout=120).strip() + inf_find_cmd %= (vm_infos["drive_letter"], driver_name, inf_middle_path) + inf_path = session.cmd(inf_find_cmd, timeout=120).strip() LOG_JOB.info("Found inf file '%s'", inf_path) - error_context.context("Installing certificates", - LOG_JOB.info) - cert_files = params['cert_files'] - cert_files = utils_misc.set_winutils_letter(session, - cert_files) - cert_files = [cert.split("=", - 1) for cert in cert_files.split()] + error_context.context("Installing certificates", LOG_JOB.info) + cert_files = params["cert_files"] + cert_files = utils_misc.set_winutils_letter(session, cert_files) + cert_files = [cert.split("=", 1) for cert in cert_files.split()] for store, cert in cert_files: _chk_cert(session, cert) _add_cert(session, cert, store) for hwid in device_hwid.split(): - output = session.cmd_output("%s find %s" % - (devcon_path, hwid)) + output = session.cmd_output(f"{devcon_path} find {hwid}") if re.search("No matching devices found", output, re.I): continue - inst_cmd = "%s updateni %s %s" % (devcon_path, - inf_path, hwid) + inst_cmd = f"{devcon_path} updateni {inf_path} {hwid}" status, output = session.cmd_status_output(inst_cmd, 360) # acceptable status: OK(0), REBOOT(1) if status > 1: - test.error("Failed to install driver '%s', " - "details:\n%s" % (driver_name, output)) + test.error( + f"Failed to install driver '{driver_name}', " + f"details:\n{output}" + ) installed_any |= True if not installed_any: - test.error("Failed to find target devices " - "by hwids: '%s'" % device_hwid) + test.error( + "Failed to find target devices " f"by hwids: '{device_hwid}'" + ) @error_context.context_aware def get_qga_pkg_path(self, qemu_ga_pkg, test, session, params, vm): @@ -3959,65 +4219,74 @@ def get_qga_pkg_path(self, qemu_ga_pkg, test, session, params, vm): :param vm: Virtual machine object. :return qemu_ga_pkg_path: Return the guest agent pkg path. """ - error_context.context("Get %s path where it locates." % qemu_ga_pkg, - LOG_JOB.info) + error_context.context(f"Get {qemu_ga_pkg} path where it locates.", LOG_JOB.info) qemu_ga_pkg_path = "" if self.gagent_src_type == "url": gagent_host_path = params["gagent_host_path"] gagent_download_url = params["gagent_download_url"] - mqgaw_ver = re.search(r'(?:\d+\.){2}\d+', gagent_download_url) - mqgaw_ver_list = list(map(int, mqgaw_ver.group(0).split('.'))) - mqgaw_name = (params["qga_bin"] if mqgaw_ver_list >= [105, 0, 1] - else params["qga_bin_legacy"]) + mqgaw_ver = re.search(r"(?:\d+\.){2}\d+", gagent_download_url) + mqgaw_ver_list = list(map(int, mqgaw_ver.group(0).split("."))) + mqgaw_name = ( + params["qga_bin"] + if mqgaw_ver_list >= [105, 0, 1] + else params["qga_bin_legacy"] + ) src_qgarpm_path = params["src_qgarpm_path"] % mqgaw_name cmd_get_qgamsi_path = params["get_qgamsi_path"] % mqgaw_name - qga_msi = params['qemu_ga_pkg'] + qga_msi = params["qemu_ga_pkg"] rpm_install = "rpm_install" in gagent_download_url if rpm_install: gagent_download_url = gagent_download_url.split("rpm_install:")[-1] - gagent_download_cmd = 'wget -qP %s %s' % (gagent_host_path, - gagent_download_url) + gagent_download_cmd = ( + f"wget -qP {gagent_host_path} {gagent_download_url}" + ) gagent_host_path += src_qgarpm_path else: gagent_host_path += qga_msi - gagent_download_cmd = 'wget %s %s' % (gagent_host_path, - gagent_download_url) + gagent_download_cmd = f"wget {gagent_host_path} {gagent_download_url}" - error_context.context("Download qemu-ga package from website " - "and copy it to guest.", LOG_JOB.info) + error_context.context( + "Download qemu-ga package from website " "and copy it to guest.", + LOG_JOB.info, + ) process.system(gagent_download_cmd) - s, gagent_host_path = process.getstatusoutput('ls %s' % gagent_host_path) + s, gagent_host_path = process.getstatusoutput(f"ls {gagent_host_path}") if s != 0: - test.error("qemu-ga package is not exist, maybe it is not " - "successfully downloaded ") - s, o = session.cmd_status_output("mkdir %s" % self.gagent_guest_dir) + test.error( + "qemu-ga package is not exist, maybe it is not " + "successfully downloaded " + ) + s, o = session.cmd_status_output(f"mkdir {self.gagent_guest_dir}") if s and "already exists" not in o: - test.error("Could not create qemu-ga directory in " - "VM '%s', detail: '%s'" % (vm.name, o)) + test.error( + "Could not create qemu-ga directory in " + f"VM '{vm.name}', detail: '{o}'" + ) if rpm_install: get_qga_msi = params["installrpm_getmsi"] % gagent_host_path process.system(get_qga_msi, shell=True, timeout=10) - qgamsi_path = process.system_output(cmd_get_qgamsi_path, - shell=True) - qgamsi_path = qgamsi_path.decode(encoding="utf-8", - errors="strict").split("\n") + qgamsi_path = process.system_output(cmd_get_qgamsi_path, shell=True) + qgamsi_path = qgamsi_path.decode( + encoding="utf-8", errors="strict" + ).split("\n") tmp_dir = params["gagent_host_path"] - process.system("cp -r %s %s %s" % (qgamsi_path[0], - qgamsi_path[1], - tmp_dir)) + process.system(f"cp -r {qgamsi_path[0]} {qgamsi_path[1]} {tmp_dir}") gagent_host_path = tmp_dir + qga_msi error_context.context("Copy qemu-ga.msi to guest", LOG_JOB.info) vm.copy_files_to(gagent_host_path, self.gagent_guest_dir) - qemu_ga_pkg_path = r"%s\%s" % (self.gagent_guest_dir, qemu_ga_pkg) + qemu_ga_pkg_path = rf"{self.gagent_guest_dir}\{qemu_ga_pkg}" elif self.gagent_src_type == "virtio-win": vol_virtio_key = "VolumeName like '%virtio-win%'" vol_virtio = utils_misc.get_win_disk_vol(session, vol_virtio_key) - qemu_ga_pkg_path = r"%s:\%s\%s" % (vol_virtio, "guest-agent", - qemu_ga_pkg) + qemu_ga_pkg_path = r"{}:\{}\{}".format( + vol_virtio, "guest-agent", qemu_ga_pkg + ) else: - test.error("Only support 'url' and 'virtio-win' method to " - "download qga installer now.") + test.error( + "Only support 'url' and 'virtio-win' method to " + "download qga installer now." + ) LOG_JOB.info("The qemu-ga pkg full path is %s", qemu_ga_pkg_path) return qemu_ga_pkg_path @@ -4029,10 +4298,12 @@ def setup(self, test, params, env): if self.start_vm == "yes": session = self._get_session(params, self.vm) self._open_session_list.append(session) - qemu_ga_pkg_path = self.get_qga_pkg_path(self.qemu_ga_pkg, test, - session, params, self.vm) - self.gagent_install_cmd = params.get("gagent_install_cmd" - ) % qemu_ga_pkg_path + qemu_ga_pkg_path = self.get_qga_pkg_path( + self.qemu_ga_pkg, test, session, params, self.vm + ) + self.gagent_install_cmd = ( + params.get("gagent_install_cmd") % qemu_ga_pkg_path + ) if self._check_ga_pkg(session, params.get("gagent_pkg_check_cmd")): LOG_JOB.info("qemu-ga is already installed.") @@ -4047,7 +4318,7 @@ def setup(self, test, params, env): self.gagent_start(session, self.vm) time.sleep(5) - if params["check_vioser"] == 'yes': + if params["check_vioser"] == "yes": self._check_serial_driver(test, params, env) args = [params.get("gagent_serial_type"), params.get("gagent_name")] self.gagent_create(params, self.vm, *args) @@ -4066,20 +4337,23 @@ def gagent_check_get_disks(self, test, params, env): session = self._get_session(params, None) self._open_session_list.append(session) - error_context.context("Check all disks info in a loop.", - LOG_JOB.info) + error_context.context("Check all disks info in a loop.", LOG_JOB.info) # Due Since the disk information obtained in windows does not include # partition, the partition attribute is detected as always false. disks_info_qga = self.gagent.get_disks() for disk_info in disks_info_qga: - diskname = disk_info['name'].replace("\\", r"\\") - disk_info_guest = session.cmd_output(params["cmd_get_diskinfo"] - % diskname).strip().split() + diskname = disk_info["name"].replace("\\", r"\\") + disk_info_guest = ( + session.cmd_output(params["cmd_get_diskinfo"] % diskname) + .strip() + .split() + ) error_context.context("Check disk name", LOG_JOB.info) if diskname.upper() != disk_info_guest[0].replace("\\", r"\\"): - test.fail("Disk %s name is different " - "between guest and qga." % diskname) + test.fail( + f"Disk {diskname} name is different " "between guest and qga." + ) @error_context.context_aware def gagent_check_fsfreeze_vss_test(self, test, params, env): @@ -4100,15 +4374,18 @@ def gagent_check_fsfreeze_vss_test(self, test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ + @error_context.context_aware def background_start(session): """ Before freeze or thaw guest file system, start a background test. """ - LOG_JOB.info("Write time stamp to guest file per second " - "as a background job.") + LOG_JOB.info( + "Write time stamp to guest file per second " "as a background job." + ) fswrite_cmd = utils_misc.set_winutils_letter( - session, self.params["gagent_fs_test_cmd"]) + session, self.params["gagent_fs_test_cmd"] + ) session.cmd(fswrite_cmd, timeout=360) @@ -4121,20 +4398,23 @@ def result_check(flag, write_timeout, session): :param write_timeout: timeout of writing to guest file """ time.sleep(write_timeout) - k_cmd = "wmic process where \"name='python.exe' and " \ - "CommandLine Like '%fsfreeze%'\" call terminate" + k_cmd = ( + "wmic process where \"name='python.exe' and " + "CommandLine Like '%fsfreeze%'\" call terminate" + ) s, o = session.cmd_status_output(k_cmd) if s: - self.test.error("Command '%s' failed, status: %s," - " output: %s" % (k_cmd, s, o)) + self.test.error( + f"Command '{k_cmd}' failed, status: {s}," f" output: {o}" + ) error_context.context("Check guest FS status.", LOG_JOB.info) # init fs status to 'thaw' fs_status = "thaw" - file_name = "/tmp/fsfreeze_%s.txt" % flag - process.system("rm -rf %s" % file_name) + file_name = f"/tmp/fsfreeze_{flag}.txt" + process.system(f"rm -rf {file_name}") self.vm.copy_files_from("C:\\fsfreeze.txt", file_name) - with open(file_name, 'r') as f: + with open(file_name, "r") as f: list_time = f.readlines() for i in list(range(0, len(list_time))): @@ -4143,16 +4423,17 @@ def result_check(flag, write_timeout, session): for i in list(range(1, len(list_time))): num_d = float(list_time[i]) - float(list_time[i - 1]) if num_d > 8: - LOG_JOB.info("Time stamp is not continuous," - " so the FS is frozen.") + LOG_JOB.info( + "Time stamp is not continuous," " so the FS is frozen." + ) fs_status = "frozen" break if not fs_status == flag: - self.test.fail("FS is not %s, it's %s." % (flag, fs_status)) + self.test.fail(f"FS is not {flag}, it's {fs_status}.") - error_context.context("Check guest agent command " - "'guest-fsfreeze-freeze/thaw'", - LOG_JOB.info) + error_context.context( + "Check guest agent command " "'guest-fsfreeze-freeze/thaw'", LOG_JOB.info + ) session = self._get_session(self.params, None) self._open_session_list.append(session) @@ -4165,40 +4446,47 @@ def result_check(flag, write_timeout, session): # Thaw guest FS if the fs status is incorrect. self.gagent.fsthaw(check_status=False) - error_context.context("Before freeze/thaw the FS, run the background " - "job.", LOG_JOB.info) + error_context.context( + "Before freeze/thaw the FS, run the background " "job.", LOG_JOB.info + ) background_start(session) error_context.context("Freeze the FS.", LOG_JOB.info) self.gagent.fsfreeze() try: - error_context.context("Waiting %s, then finish writing the time " - "stamp in guest file." % write_timeout) + error_context.context( + f"Waiting {write_timeout}, then finish writing the time " + "stamp in guest file." + ) result_check("frozen", write_timeout, session) # Next, thaw guest fs. - error_context.context("Before freeze/thaw the FS, run the background " - "job.", LOG_JOB.info) + error_context.context( + "Before freeze/thaw the FS, run the background " "job.", LOG_JOB.info + ) background_start(session) error_context.context("Thaw the FS.", LOG_JOB.info) try: self.gagent.fsthaw() except guest_agent.VAgentCmdError as detail: - if re.search("fsfreeze is limited up to 10 seconds", - str(detail)): + if re.search("fsfreeze is limited up to 10 seconds", str(detail)): LOG_JOB.info("FS is thaw as it's limited up to 10 seconds.") else: - test.fail("guest-fsfreeze-thaw cmd failed with:" - "('%s')" % str(detail)) + test.fail( + "guest-fsfreeze-thaw cmd failed with:" f"('{str(detail)}')" + ) except Exception: # Thaw fs finally, avoid problem in following cases. try: self.gagent.fsthaw(check_status=False) except Exception as detail: # Ignore exception for this thaw action. - LOG_JOB.warn("Finally failed to thaw guest fs," - " detail: '%s'", detail) + LOG_JOB.warning( + "Finally failed to thaw guest fs," " detail: '%s'", detail + ) raise - error_context.context("Waiting %s, then finish writing the time " - "stamp in guest file." % write_timeout) + error_context.context( + f"Waiting {write_timeout}, then finish writing the time " + "stamp in guest file." + ) result_check("thaw", write_timeout, session) @error_context.context_aware @@ -4218,12 +4506,13 @@ def gagent_check_fstrim(self, test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def get_blocks(): """ Get the used blocks of data disk. :return: the used blocks """ - blocks = process.system_output("stat -t %s" % image_filename_stg) + blocks = process.system_output(f"stat -t {image_filename_stg}") return blocks.strip().split()[2] session = self._get_session(params, None) @@ -4232,53 +4521,53 @@ def get_blocks(): error_context.context("Format data disk.", LOG_JOB.info) image_size_stg = params["image_size_stg"] disk_index = utils_misc.wait_for( - lambda: utils_disk.get_windows_disks_index(session, - image_size_stg), 120) + lambda: utils_disk.get_windows_disks_index(session, image_size_stg), 120 + ) if not disk_index: test.error("Didn't get windows disk index.") LOG_JOB.info("Clear readonly of disk and online it in windows guest.") if not utils_disk.update_windows_disk_attributes(session, disk_index): test.error("Failed to update windows disk attributes.") mnt_point = utils_disk.configure_empty_windows_disk( - session, disk_index[0], image_size_stg, quick_format=False) + session, disk_index[0], image_size_stg, quick_format=False + ) - error_context.context("Check the original blocks of data disk.", - LOG_JOB.info) + error_context.context("Check the original blocks of data disk.", LOG_JOB.info) image_params_stg = params.object_params("stg") image_filename_stg = storage.get_image_filename( - image_params_stg, data_dir.get_data_dir()) + image_params_stg, data_dir.get_data_dir() + ) error_context.context("Create fragment in data disk.", LOG_JOB.info) - guest_dir = r"%s:" % mnt_point[0] - data_file = os.path.join(guest_dir, - "qga_fstrim%s" % - utils_misc.generate_random_string(5)) + guest_dir = rf"{mnt_point[0]}:" + data_file = os.path.join( + guest_dir, f"qga_fstrim{utils_misc.generate_random_string(5)}" + ) for i in range(2): count = 1000 * (i + 1) LOG_JOB.info("Create %sM file in guest.", count) cmd = "dd if=/dev/random of=%s bs=1M count=%d" % (data_file, count) session.cmd(cmd, timeout=600) - delete_file_cmd = "%s %s" % (params["delete_file_cmd"], - data_file.replace("/", "\\")) + delete_file_cmd = "{} {}".format( + params["delete_file_cmd"], + data_file.replace("/", "\\"), + ) LOG_JOB.info("Delete the guest file created just now.") session.cmd(delete_file_cmd) - error_context.context("Check blocks of data disk before fstrim.", - LOG_JOB.info) + error_context.context("Check blocks of data disk before fstrim.", LOG_JOB.info) blocks_before_fstrim = get_blocks() - error_context.context("Execute the guest-fstrim cmd via qga.", - LOG_JOB.info) + error_context.context("Execute the guest-fstrim cmd via qga.", LOG_JOB.info) self.gagent.fstrim() - error_context.context("Check blocks of data disk after fstrim.", - LOG_JOB.info) + error_context.context("Check blocks of data disk after fstrim.", LOG_JOB.info) blocks_after_fstrim = get_blocks() if int(blocks_after_fstrim) >= int(blocks_before_fstrim): msg = "Fstrim failed\n" - msg += "the blocks before fstrim is %s\n" % blocks_before_fstrim - msg += "the blocks after fstrim is %s." % blocks_after_fstrim + msg += f"the blocks before fstrim is {blocks_before_fstrim}\n" + msg += f"the blocks after fstrim is {blocks_after_fstrim}." test.fail(msg) @error_context.context_aware @@ -4294,9 +4583,8 @@ def gagent_check_resource_leak(self, test, params, env): def execute_qga_cmds_loop(): for i in range(repeats): - if os.environ["AVCADO_TP_QEMU_GUEST_AGENT_SIGNAL"] == 'True': - LOG_JOB.info("execute 'get-osinfo/devices'" - " %s times", (i + 1)) + if os.environ["AVCADO_TP_QEMU_GUEST_AGENT_SIGNAL"] == "True": + LOG_JOB.info("execute 'get-osinfo/devices'" " %s times", (i + 1)) self.gagent.get_osinfo() self.gagent.get_virtio_device() else: @@ -4310,7 +4598,7 @@ def process_resource(): :return qga_memory: the memory of qga.service """ - get_resour = self.params['cmd_get_qga_resource'] + get_resour = self.params["cmd_get_qga_resource"] qga_resources = session.cmd_output(get_resour).strip().split(" ") qga_handles = qga_resources[0] qga_memory = int(qga_resources[-2]) / 1024 @@ -4319,13 +4607,12 @@ def process_resource(): def _is_increase(_list, n, ignore_stable_end=True): if n == 0: return True - if (ignore_stable_end and n != len(_list) - 1 and - _list[n] == _list[n - 1]): + if ignore_stable_end and n != len(_list) - 1 and _list[n] == _list[n - 1]: return _is_increase(_list, n - 1) - return (_list[n] > _list[n - 1] and _is_increase(_list, n - 1)) + return _list[n] > _list[n - 1] and _is_increase(_list, n - 1) def get_index(_list): - return (len(_list)-1) if len(_list) >= 2 else 0 + return (len(_list) - 1) if len(_list) >= 2 else 0 def seperate_list(sum_list): new_l = [] @@ -4342,18 +4629,16 @@ def seperate_list(sum_list): trough_l.append(new_l[i]) return peak_l, trough_l - def check_leak(check_list, check_type='memory'): + def check_leak(check_list, check_type="memory"): peak_l, trough_l = seperate_list(check_list) - idx_p, idx_t = (get_index(peak_l), - get_index(trough_l)) + idx_p, idx_t = (get_index(peak_l), get_index(trough_l)) idx_cl = get_index(check_list) - if (_is_increase(peak_l, idx_p) or - _is_increase(trough_l, idx_t)): - if _is_increase(check_list, idx_cl, - ignore_stable_end=False): - test.fail("QGA commands caused resource leak " - "anyway. %s is %s" % (check_type, - check_list[-1])) + if _is_increase(peak_l, idx_p) or _is_increase(trough_l, idx_t): + if _is_increase(check_list, idx_cl, ignore_stable_end=False): + test.fail( + "QGA commands caused resource leak " + f"anyway. {check_type} is {check_list[-1]}" + ) def _base_on_bg_check_resource_leak(): """ @@ -4374,25 +4659,28 @@ def _base_on_bg_check_resource_leak(): time.sleep(5) check_leak(memory_list) if qga_handles > qga_handle_threshold: - check_leak(handle_list, 'handle') + check_leak(handle_list, "handle") return False return True session = self._get_session(params, None) self._open_session_list.append(session) - error_context.context("Check whether resources leak during executing" - " get-osinfo/devices in a loop.", LOG_JOB.info) + error_context.context( + "Check whether resources leak during executing" + " get-osinfo/devices in a loop.", + LOG_JOB.info, + ) repeats = int(params.get("repeat_times", 1)) qga_mem_threshold = int(params.get("qga_mem_threshold", 1)) qga_handle_threshold = int(params.get("qga_handle_threshold", 1)) check_timeout = int(params.get("check_timeout", 1)) - os.environ["AVCADO_TP_QEMU_GUEST_AGENT_SIGNAL"] = 'True' + os.environ["AVCADO_TP_QEMU_GUEST_AGENT_SIGNAL"] = "True" bg = utils_misc.InterruptedThread(execute_qga_cmds_loop) bg.start() utils_misc.wait_for(lambda: _base_on_bg_check_resource_leak(), 600) - os.environ["AVCADO_TP_QEMU_GUEST_AGENT_SIGNAL"] = 'False' + os.environ["AVCADO_TP_QEMU_GUEST_AGENT_SIGNAL"] = "False" bg.join() @error_context.context_aware @@ -4409,15 +4697,20 @@ def gagent_check_run_qga_as_program(self, test, params, env): run_gagent_program_cmd = params["run_gagent_program_cmd"] kill_qga_program_cmd = params["kill_qga_program_cmd"] - error_context.context("Check qemu-ga service status and stop it, " - "then run qemu-ga as a program", test.log.info) + error_context.context( + "Check qemu-ga service status and stop it, " + "then run qemu-ga as a program", + test.log.info, + ) if self._check_ga_service(session, params.get("gagent_status_cmd")): test.log.info("qemu-ga service is running, stopping it now.") self.gagent_stop(session, self.vm) time.sleep(5) else: - test.error("WARNING: qemu-ga service is not running currently." - " It's better to have a check.") + test.error( + "WARNING: qemu-ga service is not running currently." + " It's better to have a check." + ) session.cmd(run_gagent_program_cmd) _osinfo = self.gagent.get_osinfo() if not _osinfo: @@ -4452,18 +4745,22 @@ def gagent_check_driver_update_via_installer_tool(self, test, params, env): qga_ver_pkg = str(self.gagent.guest_info()["version"]) uninstall_gagent(session, test, gagent_uninstall_cmd) session = vm.reboot(session) - session = run_installer_with_interaction(vm, session, test, params, - run_install_cmd, - copy_files_params=params) + session = run_installer_with_interaction( + vm, session, test, params, run_install_cmd, copy_files_params=params + ) qga_ver_installer = str(self.gagent.guest_info()["version"]) - error_context.context("Check if qga version is corresponding between" - "msi and installer.exe", test.log.info) + error_context.context( + "Check if qga version is corresponding between" "msi and installer.exe", + test.log.info, + ) if not qga_ver_installer: test.fail("Qemu-ga.exe can't work normally.") elif str(qga_ver_installer) != str(qga_ver_pkg): - test.error("Qemu-ga version is not corresponding between " - "installer.exe and msi package.") + test.error( + "Qemu-ga version is not corresponding between " + "installer.exe and msi package." + ) session.close() @@ -4479,21 +4776,25 @@ def gagent_check_debugview_VSS_DLL(self, test, params, env): session = vm.wait_for_login() gagent = self.gagent - cmd_run_debugview = utils_misc.set_winutils_letter(session, - params["cmd_run_debugview"]) + cmd_run_debugview = utils_misc.set_winutils_letter( + session, params["cmd_run_debugview"] + ) cmd_check_string_VSS = params["cmd_check_string_VSS"] error_context.context("Check if debugview can capture log info", test.log.info) s, o = session.cmd_status_output(cmd_run_debugview) if s: - test.error("Debugviewconsole.exe run failed, " - "Please check the output is: %s" % o) + test.error( + "Debugviewconsole.exe run failed, " f"Please check the output is: {o}" + ) gagent.fsfreeze() gagent.fsthaw() s, o = session.cmd_status_output(cmd_check_string_VSS) if s: - test.fail("debugview can't capture expected log info, " - "the actual output is %s" % o) + test.fail( + "debugview can't capture expected log info, " + f"the actual output is {o}" + ) def run(test, params, env): diff --git a/qemu/tests/qemu_guest_agent_hotplug.py b/qemu/tests/qemu_guest_agent_hotplug.py index 93fe3cde2f..84628a8b87 100644 --- a/qemu/tests/qemu_guest_agent_hotplug.py +++ b/qemu/tests/qemu_guest_agent_hotplug.py @@ -20,12 +20,12 @@ def check_status_unplug(out, dev): if out is True: test.log.debug("Unplug %s successfully", dev) else: - test.fail("Error occurred while unpluging %s" % dev) + test.fail(f"Error occurred while unpluging {dev}") vm = env.get_vm(params["main_vm"]) vm.verify_alive() - char_backend = params["backend_char_plug"] + params["backend_char_plug"] char_id = params["id_char_plug"] gagent_name = params["gagent_name"] char_path = vm.get_serial_console_filename(gagent_name) diff --git a/qemu/tests/qemu_guest_agent_snapshot.py b/qemu/tests/qemu_guest_agent_snapshot.py index b0959dec11..2695c6039d 100644 --- a/qemu/tests/qemu_guest_agent_snapshot.py +++ b/qemu/tests/qemu_guest_agent_snapshot.py @@ -1,31 +1,25 @@ import logging import re -from avocado.utils import crypto -from avocado.utils import process - -from virttest import error_context -from virttest import utils_misc +from avocado.utils import crypto, process +from virttest import error_context, utils_misc from provider.blockdev_snapshot_base import BlockDevSnapshotTest from qemu.tests.qemu_guest_agent import QemuGuestAgentBasicCheckWin -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class QemuGuestAgentSnapshotTest(QemuGuestAgentBasicCheckWin): - def __init__(self, test, params, env): - super(QemuGuestAgentSnapshotTest, self).__init__(test, params, - env) - self.snapshot_create = BlockDevSnapshotTest(self.test, self.params, - self.env) + super().__init__(test, params, env) + self.snapshot_create = BlockDevSnapshotTest(self.test, self.params, self.env) @error_context.context_aware def setup(self, test, params, env): # pylint: disable=E1003 if params["os_type"] == "windows": - super(QemuGuestAgentSnapshotTest, self).setup(test, params, env) + super().setup(test, params, env) else: super(QemuGuestAgentBasicCheckWin, self).setup(test, params, env) @@ -34,20 +28,22 @@ def _action_before_fsfreeze(self, *args): copy_timeout = int(self.params.get("copy_timeoout", 600)) file_size = int(self.params.get("file_size", "1024")) tmp_name = utils_misc.generate_random_string(5) - self.host_path = self.guest_path = "/tmp/%s" % tmp_name + self.host_path = self.guest_path = f"/tmp/{tmp_name}" if self.params.get("os_type") != "linux": - self.guest_path = r"c:\%s" % tmp_name + self.guest_path = rf"c:\{tmp_name}" error_context.context("Create a file in host.") - process.run("dd if=/dev/urandom of=%s bs=1M count=%s" - % (self.host_path, file_size)) + process.run(f"dd if=/dev/urandom of={self.host_path} bs=1M count={file_size}") self.orig_hash = crypto.hash_file(self.host_path) - error_context.context("Transfer file from %s to %s" % - (self.host_path, self.guest_path), LOG_JOB.info) + error_context.context( + f"Transfer file from {self.host_path} to {self.guest_path}", + LOG_JOB.info, + ) self.bg = utils_misc.InterruptedThread( self.vm.copy_files_to, (self.host_path, self.guest_path), - dict(verbose=True, timeout=copy_timeout)) + dict(verbose=True, timeout=copy_timeout), + ) self.bg.start() def check_snapshot(self): @@ -57,31 +53,33 @@ def check_snapshot(self): snapshot_info = str(self.vm.monitor.info("block")) snapshot_node_name = self.params.get("snapshot_node_name") if self.params.get("snapshot_file") not in snapshot_info: - self.test.fail("Snapshot doesn't exist:%s" % snapshot_info) + self.test.fail(f"Snapshot doesn't exist:{snapshot_info}") LOG_JOB.info("Found snapshot in guest") if snapshot_node_name: - match_string = "u?'node-name': u?'%s'" % snapshot_node_name + match_string = f"u?'node-name': u?'{snapshot_node_name}'" if not re.search(match_string, snapshot_info): - self.test.fail("Can not find node name %s of" - " snapshot in block info %s" - % (snapshot_node_name, snapshot_info)) + self.test.fail( + f"Can not find node name {snapshot_node_name} of" + f" snapshot in block info {snapshot_info}" + ) LOG_JOB.info("Match node-name if they are same with expected") def cleanup(self, test, params, env): - super(QemuGuestAgentSnapshotTest, self).cleanup(test, params, env) + super().cleanup(test, params, env) self.snapshot_create.snapshot_image.remove() @error_context.context_aware def _action_after_fsfreeze(self, *args): if self.bg.is_alive(): image_tag = self.params.get("image_name", "image1") - image_params = self.params.object_params(image_tag) + self.params.object_params(image_tag) error_context.context("Creating snapshot", LOG_JOB.info) self.snapshot_create.prepare_snapshot_file() self.snapshot_create.create_snapshot() - error_context.context("Checking snapshot created successfully", - LOG_JOB.info) + error_context.context( + "Checking snapshot created successfully", LOG_JOB.info + ) self.check_snapshot() @error_context.context_aware @@ -94,23 +92,23 @@ def _action_after_fsthaw(self, *args): self.bg.join() # Make sure the returned file is identical to the original one try: - self.host_path_returned = "%s-returned" % self.host_path + self.host_path_returned = f"{self.host_path}-returned" self.vm.copy_files_from(self.guest_path, self.host_path_returned) error_context.context("comparing hashes", LOG_JOB.info) self.curr_hash = crypto.hash_file(self.host_path_returned) if self.orig_hash != self.curr_hash: - self.test.fail("Current file hash (%s) differs from " - "original one (%s)" % (self.curr_hash, - self.orig_hash)) + self.test.fail( + f"Current file hash ({self.curr_hash}) differs from " + f"original one ({self.orig_hash})" + ) finally: error_context.context("Delete the created files.", LOG_JOB.info) - process.run("rm -rf %s %s" % (self.host_path, - self.host_path_returned)) + process.run(f"rm -rf {self.host_path} {self.host_path_returned}") session = self._get_session(self.params, None) self._open_session_list.append(session) - cmd_del_file = "rm -rf %s" % self.guest_path + cmd_del_file = f"rm -rf {self.guest_path}" if self.params.get("os_type") == "windows": - cmd_del_file = r"del /f /q %s" % self.guest_path + cmd_del_file = rf"del /f /q {self.guest_path}" session.cmd(cmd_del_file) diff --git a/qemu/tests/qemu_guest_agent_suspend.py b/qemu/tests/qemu_guest_agent_suspend.py index 2e94a2fe4f..375b6975cb 100644 --- a/qemu/tests/qemu_guest_agent_suspend.py +++ b/qemu/tests/qemu_guest_agent_suspend.py @@ -1,16 +1,14 @@ import logging -from virttest import error_context -from virttest import guest_agent +from virttest import error_context, guest_agent from generic.tests.guest_suspend import GuestSuspendBaseTest from qemu.tests.qemu_guest_agent import QemuGuestAgentTest -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class SuspendViaGA(GuestSuspendBaseTest): - guest_agent = None suspend_mode = "" @@ -25,7 +23,6 @@ def start_suspend(self, **args): class QemuGASuspendTest(QemuGuestAgentTest): - """ Test qemu guest agent, this case will: 1) Start VM with virtio serial port. diff --git a/qemu/tests/qemu_guest_agent_update.py b/qemu/tests/qemu_guest_agent_update.py index df9ab3c41a..837cb08949 100644 --- a/qemu/tests/qemu_guest_agent_update.py +++ b/qemu/tests/qemu_guest_agent_update.py @@ -3,20 +3,15 @@ import time from avocado.utils import process - -from virttest import env_process -from virttest import error_context -from virttest import utils_misc -from virttest import data_dir +from virttest import data_dir, env_process, error_context, utils_misc from virttest.utils_windows import wmic from qemu.tests.qemu_guest_agent import QemuGuestAgentBasicCheckWin -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class QemuGuestAgentUpdateTest(QemuGuestAgentBasicCheckWin): - @error_context.context_aware def gagent_check_pkg_update(self, test, params, env): """ @@ -43,8 +38,7 @@ def _change_agent_media(cdrom_virtio): :param cdrom_virtio: iso file """ LOG_JOB.info("Change cdrom to %s", cdrom_virtio) - virtio_iso = utils_misc.get_path(data_dir.get_data_dir(), - cdrom_virtio) + virtio_iso = utils_misc.get_path(data_dir.get_data_dir(), cdrom_virtio) vm.change_media("drive_virtio", virtio_iso) LOG_JOB.info("Wait until device is ready") @@ -53,8 +47,7 @@ def _change_agent_media(cdrom_virtio): end_time = time.time() + timeout while time.time() < end_time: time.sleep(2) - virtio_win_letter = utils_misc.get_win_disk_vol(session, - vol_virtio_key) + virtio_win_letter = utils_misc.get_win_disk_vol(session, vol_virtio_key) if virtio_win_letter: break if not virtio_win_letter: @@ -67,9 +60,10 @@ def _get_pkg_download_cmd(): """ qga_html = "/tmp/qemu-ga.html" qga_url = params["qga_url"] - qga_html_download_cmd = "wget %s -O %s" % (qga_url, qga_html) - process.system(qga_html_download_cmd, - float(params.get("login_timeout", 360))) + qga_html_download_cmd = f"wget {qga_url} -O {qga_html}" + process.system( + qga_html_download_cmd, float(params.get("login_timeout", 360)) + ) with open(qga_html, "r") as f: lines = f.readlines() @@ -79,26 +73,24 @@ def _get_pkg_download_cmd(): list_qga.append(line) tgt_line = list_qga[-2] # qemu-ga-win-7.5.0-2.el7ev - qga_pattern = re.compile(r"%s" % params["qga_pattern"]) + qga_pattern = re.compile(r"{}".format(params["qga_pattern"])) qga_pre_pkg = qga_pattern.findall(tgt_line)[0] LOG_JOB.info("The previous qemu-ga version is %s.", qga_pre_pkg) # https://fedorapeople.org/groups/virt/virtio-win/direct-downloads/ # archive-qemu-ga/qemu-ga-win-7.5.0-2.el7ev/ - qga_url_pre = r"%s/%s/%s" % (qga_url, qga_pre_pkg, - self.qemu_ga_pkg) + qga_url_pre = rf"{qga_url}/{qga_pre_pkg}/{self.qemu_ga_pkg}" qga_host_path = params["gagent_host_path"] - params["gagent_download_cmd"] = "wget %s -O %s" % (qga_url_pre, - qga_host_path) + params["gagent_download_cmd"] = f"wget {qga_url_pre} -O {qga_host_path}" def _qga_install(): """ Install qemu-ga pkg. """ qga_pkg_path = self.get_qga_pkg_path( - self.qemu_ga_pkg, test, session, params, vm) - self.gagent_install_cmd = params.get("gagent_install_cmd" - ) % qga_pkg_path + self.qemu_ga_pkg, test, session, params, vm + ) + self.gagent_install_cmd = params.get("gagent_install_cmd") % qga_pkg_path self.gagent_install(session, vm) error_context.context("Boot up vm.", LOG_JOB.info) @@ -109,30 +101,29 @@ def _qga_install(): session = self._get_session(params, vm) if params.get("driver_uninstall", "no") == "yes": - error_context.context("Uninstall vioser driver in guest.", - LOG_JOB.info) + error_context.context("Uninstall vioser driver in guest.", LOG_JOB.info) device_name = params["device_name"] driver_name = params["driver_name"] - inf_names_get_cmd = wmic.make_query("path win32_pnpsigneddriver", - "DeviceName like '%s'" % - device_name, - props=["InfName"], - get_swch=wmic.FMT_TYPE_LIST) - inf_names = wmic.parse_list(session.cmd(inf_names_get_cmd, - timeout=360)) + inf_names_get_cmd = wmic.make_query( + "path win32_pnpsigneddriver", + f"DeviceName like '{device_name}'", + props=["InfName"], + get_swch=wmic.FMT_TYPE_LIST, + ) + inf_names = wmic.parse_list(session.cmd(inf_names_get_cmd, timeout=360)) for inf_name in inf_names: pnp_cmd = "pnputil /delete-driver %s /uninstall /force" - uninst_store_cmd = params.get("uninst_store_cmd", - pnp_cmd) % inf_name + uninst_store_cmd = params.get("uninst_store_cmd", pnp_cmd) % inf_name s, o = session.cmd_status_output(uninst_store_cmd, 360) if s not in (0, 3010): # for vioser, they need system reboot # acceptable status: OK(0), REBOOT(3010) - test.error("Failed to uninstall driver '%s' from store, " - "details:\n%s" % (driver_name, o)) + test.error( + f"Failed to uninstall driver '{driver_name}' from store, " + f"details:\n{o}" + ) - error_context.context("Install the previous qemu-ga in guest.", - LOG_JOB.info) + error_context.context("Install the previous qemu-ga in guest.", LOG_JOB.info) gagent_download_url = params["gagent_download_url"] rpm_install = "rpm_install" in gagent_download_url if self._check_ga_pkg(session, params["gagent_pkg_check_cmd"]): @@ -148,8 +139,7 @@ def _qga_install(): _qga_install() - error_context.context("Update qemu-ga to the latest one.", - LOG_JOB.info) + error_context.context("Update qemu-ga to the latest one.", LOG_JOB.info) if self.gagent_src_type == "virtio-win" or rpm_install: _change_agent_media(params["cdrom_virtio"]) else: diff --git a/qemu/tests/qemu_img.py b/qemu/tests/qemu_img.py index 297407b480..0647218268 100644 --- a/qemu/tests/qemu_img.py +++ b/qemu/tests/qemu_img.py @@ -1,18 +1,12 @@ import os -import time import re import shutil -import tempfile import signal +import tempfile +import time from avocado.utils import process - -from virttest import error_context -from virttest import utils_misc -from virttest import env_process -from virttest import storage -from virttest import data_dir -from virttest import gluster +from virttest import data_dir, env_process, error_context, gluster, storage, utils_misc from virttest.utils_numeric import normalize_data_size @@ -43,8 +37,9 @@ def remove(path): except OSError: pass - def _get_image_filename(img_name, enable_gluster=False, - enable_nvme=False, img_fmt=None): + def _get_image_filename( + img_name, enable_gluster=False, enable_nvme=False, img_fmt=None + ): """ Generate an image path. @@ -55,16 +50,15 @@ def _get_image_filename(img_name, enable_gluster=False, """ if enable_gluster: gluster_uri = gluster.create_gluster_uri(params) - image_filename = "%s%s" % (gluster_uri, img_name) + image_filename = f"{gluster_uri}{img_name}" if img_fmt: - image_filename += ".%s" % img_fmt + image_filename += f".{img_fmt}" elif enable_nvme: image_filename = image_name else: if img_fmt: - img_name = "%s.%s" % (img_name, img_fmt) - image_filename = utils_misc.get_path(data_dir.get_data_dir(), - img_name) + img_name = f"{img_name}.{img_fmt}" + image_filename = utils_misc.get_path(data_dir.get_data_dir(), img_name) return image_filename def _check(cmd, img): @@ -74,9 +68,10 @@ def _check(cmd, img): :param cmd: qemu-img base command. :param img: image to be checked """ - cmd += " check %s" % img - error_context.context("Checking image '%s' by command '%s'" - % (img, cmd), test.log.info) + cmd += f" check {img}" + error_context.context( + f"Checking image '{img}' by command '{cmd}'", test.log.info + ) try: output = process.system_output(cmd, verbose=False).decode() except process.CmdError as err: @@ -96,30 +91,35 @@ def check_test(cmd): :param cmd: qemu-img base command. """ - test_image = _get_image_filename(params["image_name_dd"], - enable_gluster) + test_image = _get_image_filename(params["image_name_dd"], enable_gluster) create_image_cmd = params["create_image_cmd"] create_image_cmd = create_image_cmd % test_image - msg = " Create image %s by command %s" % (test_image, create_image_cmd) + msg = f" Create image {test_image} by command {create_image_cmd}" error_context.context(msg, test.log.info) process.system(create_image_cmd, verbose=False, shell=True) status, output = _check(cmd, test_image) if not status: - test.fail("Check image '%s' failed with error: %s" % - (test_image, output)) + test.fail(f"Check image '{test_image}' failed with error: {output}") for fmt in params["supported_image_formats"].split(): - output_image = test_image + ".%s" % fmt + output_image = test_image + f".{fmt}" _convert(cmd, fmt, test_image, output_image) status, output = _check(cmd, output_image) if not status: - test.fail("Check image '%s' got error: %s" % - (output_image, output)) + test.fail(f"Check image '{output_image}' got error: {output}") remove(output_image) remove(test_image) - def _create(cmd, img_name, fmt, img_size=None, base_img=None, - base_img_fmt=None, encrypted="no", - preallocated="off", cluster_size=None): + def _create( + cmd, + img_name, + fmt, + img_size=None, + base_img=None, + base_img_fmt=None, + encrypted="no", + preallocated="off", + cluster_size=None, + ): """ Simple wrapper of 'qemu-img create' @@ -139,30 +139,30 @@ def _create(cmd, img_name, fmt, img_size=None, base_img=None, if encrypted == "yes": cmd += " -e" if base_img: - cmd += " -b %s" % base_img + cmd += f" -b {base_img}" if base_img_fmt: - cmd += " -F %s" % base_img_fmt + cmd += f" -F {base_img_fmt}" - cmd += " -f %s" % fmt + cmd += f" -f {fmt}" options = [] if preallocated != "off": - options.append("preallocation=%s" % preallocated) + options.append(f"preallocation={preallocated}") if cluster_size is not None: - options.append("cluster_size=%s" % cluster_size) + options.append(f"cluster_size={cluster_size}") if options: - cmd += " -o %s" % ",".join(options) + cmd += " -o {}".format(",".join(options)) - cmd += " %s" % img_name + cmd += f" {img_name}" if img_size: - cmd += " %s" % img_size + cmd += f" {img_size}" - msg = "Creating image %s by command %s" % (img_name, cmd) + msg = f"Creating image {img_name} by command {cmd}" error_context.context(msg, test.log.info) process.system(cmd, verbose=False) status, out = _check(qemu_img_binary, img_name) if not status: - test.fail("Check image '%s' got error: %s" % (img_name, out)) + test.fail(f"Check image '{img_name}' got error: {out}") def create_test(cmd): """ @@ -173,14 +173,19 @@ def create_test(cmd): image_large = params["image_name_large"] device = params.get("device") if not device: - img = _get_image_filename(image_large, enable_gluster, - enable_nvme, image_format) + img = _get_image_filename( + image_large, enable_gluster, enable_nvme, image_format + ) else: img = device - _create(cmd, img_name=img, fmt=image_format, - img_size=params["image_size_large"], - preallocated=params.get("preallocated", "off"), - cluster_size=params.get("image_cluster_size")) + _create( + cmd, + img_name=img, + fmt=image_format, + img_size=params["image_size_large"], + preallocated=params.get("preallocated", "off"), + cluster_size=params.get("image_cluster_size"), + ) remove(img) def send_signal(timeout=360): @@ -191,9 +196,13 @@ def send_signal(timeout=360): test.log.info("Send signal to qemu-img") end_time = time.time() + timeout while time.time() < end_time: - pid = process.system_output("pidof qemu-img", - ignore_status=True, - verbose=False).decode().strip() + pid = ( + process.system_output( + "pidof qemu-img", ignore_status=True, verbose=False + ) + .decode() + .strip() + ) if bool(pid): break time.sleep(0.1) @@ -211,7 +220,7 @@ def check_command_output(CmdResult): test.log.info("Check result of command") check_output = params.get("check_output", "exit_status") if not hasattr(CmdResult, check_output): - test.error("Unknown check output '%s'" % check_output) + test.error(f"Unknown check output '{check_output}'") output = getattr(CmdResult, check_output) if check_output == "exit_status" and output == 0: return None @@ -221,11 +230,18 @@ def check_command_output(CmdResult): pattern = params.get("command_result_pattern") if not re.findall(pattern, output.decode()): err_msg = "Fail to get expected result!" - err_msg += "Output: %s, expected pattern: %s" % (output, pattern) + err_msg += f"Output: {output}, expected pattern: {pattern}" test.fail(err_msg) - def _convert(cmd, output_fmt, img_name, output_filename, - fmt=None, compressed="no", encrypted="no"): + def _convert( + cmd, + output_fmt, + img_name, + output_filename, + fmt=None, + compressed="no", + encrypted="no", + ): """ Simple wrapper of 'qemu-img convert' function. @@ -246,19 +262,19 @@ def _convert(cmd, output_fmt, img_name, output_filename, if show_progress == "on": cmd += " -p" if fmt: - cmd += " -f %s" % fmt - cmd += " -O %s" % output_fmt + cmd += f" -f {fmt}" + cmd += f" -O {output_fmt}" options = params.get("qemu_img_options") if options: options = options.split() cmd += " -o " for option in options: value = params.get(option) - cmd += "%s=%s," % (option, value) + cmd += f"{option}={value}," cmd = cmd.rstrip(",") - cmd += " %s %s" % (img_name, output_filename) - msg = "Converting '%s' from format '%s'" % (img_name, fmt) - msg += " to '%s'" % output_fmt + cmd += f" {img_name} {output_filename}" + msg = f"Converting '{img_name}' from format '{fmt}'" + msg += f" to '{output_fmt}'" error_context.context(msg, test.log.info) if show_progress == "off": bg = utils_misc.InterruptedThread(send_signal) @@ -272,14 +288,19 @@ def convert_test(cmd): :param cmd: qemu-img base command. """ dest_img_fmt = params["dest_image_format"] - output_filename = "%s.converted_%s.%s" % (image_name, - dest_img_fmt, dest_img_fmt) - - _convert(cmd, dest_img_fmt, image_name, output_filename, - image_format, params["compressed"], params["encrypted"]) + output_filename = f"{image_name}.converted_{dest_img_fmt}.{dest_img_fmt}" + + _convert( + cmd, + dest_img_fmt, + image_name, + output_filename, + image_format, + params["compressed"], + params["encrypted"], + ) orig_img_name = params.get("image_name") - img_name = "%s.%s.converted_%s" % (orig_img_name, - image_format, dest_img_fmt) + img_name = f"{orig_img_name}.{image_format}.converted_{dest_img_fmt}" _boot(img_name, dest_img_fmt) if dest_img_fmt == "qcow2": @@ -287,8 +308,9 @@ def convert_test(cmd): if status: remove(output_filename) else: - test.fail("Check image '%s' failed with error: %s" % - (output_filename, output)) + test.fail( + f"Check image '{output_filename}' failed with error: {output}" + ) else: remove(output_filename) @@ -303,14 +325,15 @@ def _info(cmd, img, sub_info=None, fmt=None): """ cmd += " info" if fmt: - cmd += " -f %s" % fmt - cmd += " %s" % img + cmd += f" -f {fmt}" + cmd += f" {img}" try: output = process.system_output(cmd).decode() except process.CmdError as err: - test.log.error("Get info of image '%s' failed: %s", - img, err.result.stderr.decode()) + test.log.error( + "Get info of image '%s' failed: %s", img, err.result.stderr.decode() + ) return None if not sub_info: @@ -319,8 +342,8 @@ def _info(cmd, img, sub_info=None, fmt=None): sub_info += ": (.*)" matches = re.findall(sub_info, output) if "virtual size" in sub_info: - p = re.compile(r'\.0*(G|K)$') - return p.sub(r'\1', matches[0].split()[0]) + p = re.compile(r"\.0*(G|K)$") + return p.sub(r"\1", matches[0].split()[0]) if matches: return matches[0] return None @@ -334,12 +357,11 @@ def info_test(cmd): img_info = _info(cmd, image_name) test.log.info("Info of image '%s':\n%s", image_name, img_info) if image_format not in img_info: - test.fail("Got unexpected format of image '%s'" - " in info test" % image_name) - if not re.search(r'%s\s+bytes' % normalize_data_size( - image_size, "B"), img_info): - test.fail("Got unexpected size of image '%s'" - " in info test" % image_name) + test.fail(f"Got unexpected format of image '{image_name}'" " in info test") + if not re.search( + r"{}\s+bytes".format(normalize_data_size(image_size, "B")), img_info + ): + test.fail(f"Got unexpected size of image '{image_name}'" " in info test") def snapshot_test(cmd): """ @@ -351,34 +373,32 @@ def snapshot_test(cmd): for i in range(2): crtcmd = cmd sn_name = "snapshot%d" % i - crtcmd += " -c %s %s" % (sn_name, image_name) - msg = "Created snapshot '%s' in '%s' by command %s" % (sn_name, - image_name, - crtcmd) + crtcmd += f" -c {sn_name} {image_name}" + msg = f"Created snapshot '{sn_name}' in '{image_name}' by command {crtcmd}" error_context.context(msg, test.log.info) cmd_result = process.run(crtcmd, verbose=False, ignore_status=True) status, output = cmd_result.exit_status, cmd_result.stdout.decode() if status != 0: - test.fail("Create snapshot failed via command: %s;" - "Output is: %s" % (crtcmd, output)) + test.fail( + f"Create snapshot failed via command: {crtcmd};" + f"Output is: {output}" + ) listcmd = cmd - listcmd += " -l %s" % image_name + listcmd += f" -l {image_name}" cmd_result = process.run(listcmd, verbose=False, ignore_status=True) status, out = cmd_result.exit_status, cmd_result.stdout.decode() if not ("snapshot0" in out and "snapshot1" in out and status == 0): - test.fail("Snapshot created failed or missed;" - "snapshot list is: \n%s" % out) + test.fail("Snapshot created failed or missed;" f"snapshot list is: \n{out}") for i in range(2): sn_name = "snapshot%d" % i delcmd = cmd - delcmd += " -d %s %s" % (sn_name, image_name) - msg = "Delete snapshot '%s' by command %s" % (sn_name, delcmd) + delcmd += f" -d {sn_name} {image_name}" + msg = f"Delete snapshot '{sn_name}' by command {delcmd}" error_context.context(msg, test.log.info) cmd_result = process.run(delcmd, verbose=False, ignore_status=True) status, output = cmd_result.exit_status, cmd_result.stdout.decode() if status != 0: - test.fail("Delete snapshot '%s' failed: %s" % - (sn_name, output)) + test.fail(f"Delete snapshot '{sn_name}' failed: {output}") def commit_test(cmd): """ @@ -396,28 +416,24 @@ def commit_test(cmd): """ test.log.info("Commit testing started!") - base_image_name = storage.get_image_filename(params, - data_dir.get_data_dir()) - pre_name = '.'.join(image_name.split('.')[:-1]) + base_image_name = storage.get_image_filename(params, data_dir.get_data_dir()) + pre_name = ".".join(image_name.split(".")[:-1]) base_image_format = params.get("image_format", "qcow2") - overlay_file_name = "%s_overlay.qcow2" % pre_name - file_create_cmd = params.get("file_create_cmd", - "touch /commit_testfile") - file_info_cmd = params.get("file_info_cmd", - "ls / | grep commit_testfile") - file_exist_chk_cmd = params.get("file_exist_chk_cmd", - "[ -e /commit_testfile ] && echo $?") - file_del_cmd = params.get("file_del_cmd", - "rm -f /commit_testfile") + overlay_file_name = f"{pre_name}_overlay.qcow2" + file_create_cmd = params.get("file_create_cmd", "touch /commit_testfile") + file_info_cmd = params.get("file_info_cmd", "ls / | grep commit_testfile") + file_exist_chk_cmd = params.get( + "file_exist_chk_cmd", "[ -e /commit_testfile ] && echo $?" + ) + file_del_cmd = params.get("file_del_cmd", "rm -f /commit_testfile") try: # Remove the existing overlay file if os.path.isfile(overlay_file_name): remove(overlay_file_name) # Create the new overlay file - create_cmd = "%s create -b %s -F %s -f qcow2 %s" % ( - cmd, base_image_name, base_image_format, overlay_file_name) - msg = "Create overlay file by command: %s" % create_cmd + create_cmd = f"{cmd} create -b {base_image_name} -F {base_image_format} -f qcow2 {overlay_file_name}" + msg = f"Create overlay file by command: {create_cmd}" error_context.context(msg, test.log.info) try: process.system(create_cmd, verbose=False) @@ -426,15 +442,13 @@ def commit_test(cmd): test.log.info("overlay file (%s) created!", overlay_file_name) # Set the qemu harddisk to the overlay file - test.log.info( - "Original image_name is: %s", params.get('image_name')) - params['image_name'] = '.'.join(overlay_file_name.split('.')[:-1]) - test.log.info("Param image_name changed to: %s", - params.get('image_name')) + test.log.info("Original image_name is: %s", params.get("image_name")) + params["image_name"] = ".".join(overlay_file_name.split(".")[:-1]) + test.log.info("Param image_name changed to: %s", params.get("image_name")) msg = "Start a new VM, using overlay file as its harddisk" error_context.context(msg, test.log.info) - vm_name = params['main_vm'] + vm_name = params["main_vm"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.verify_alive() @@ -448,15 +462,16 @@ def commit_test(cmd): output = session.cmd(file_info_cmd) test.log.info("Output of %s: %s", file_info_cmd, output) except Exception as err: - test.fail("Could not create commit_testfile in the " - "overlay file %s" % err) + test.fail( + "Could not create commit_testfile in the " f"overlay file {err}" + ) vm.destroy() # Execute the commit command - cmitcmd = "%s commit -f %s %s" % (cmd, image_format, - overlay_file_name) - error_context.context("Committing image by command %s" % cmitcmd, - test.log.info) + cmitcmd = f"{cmd} commit -f {image_format} {overlay_file_name}" + error_context.context( + f"Committing image by command {cmitcmd}", test.log.info + ) try: process.system(cmitcmd, verbose=False) except process.CmdError: @@ -465,8 +480,8 @@ def commit_test(cmd): msg = "Start a new VM, using image_name as its harddisk" error_context.context(msg, test.log.info) - params['image_name'] = pre_name - vm_name = params['main_vm'] + params["image_name"] = pre_name + vm_name = params["main_vm"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) vm.verify_alive() @@ -501,9 +516,8 @@ def _rebase(cmd, img_name, base_img, backing_fmt, mode="unsafe"): cmd += " -u" if show_progress == "on": cmd += " -p" - cmd += " -b %s -F %s %s" % (base_img, backing_fmt, img_name) - msg = "Trying to rebase '%s' to '%s' by command %s" % (img_name, - base_img, cmd) + cmd += f" -b {base_img} -F {backing_fmt} {img_name}" + msg = f"Trying to rebase '{img_name}' to '{base_img}' by command {cmd}" error_context.context(msg, test.log.info) if show_progress == "off": bg = utils_misc.InterruptedThread(send_signal) @@ -521,10 +535,13 @@ def rebase_test(cmd): :param cmd: qemu-img base command. """ - if 'rebase' not in process.system_output(cmd + ' --help', - ignore_status=True).decode(): - test.cancel("Current kvm user space version does not" - " support 'rebase' subcommand") + if ( + "rebase" + not in process.system_output(cmd + " --help", ignore_status=True).decode() + ): + test.cancel( + "Current kvm user space version does not" " support 'rebase' subcommand" + ) sn_fmt = params.get("snapshot_format", "qcow2") sn1 = params["image_name_snapshot1"] sn1 = _get_image_filename(sn1, enable_gluster, img_fmt=sn_fmt) @@ -534,8 +551,8 @@ def rebase_test(cmd): rebase_mode = params.get("rebase_mode", "safe") if rebase_mode == "safe": # Boot snapshot1 image before create snapshot2 - img_format = sn1.split('.')[-1] - img_name = ".".join(sn1.split('.')[:-1]) + img_format = sn1.split(".")[-1] + img_name = ".".join(sn1.split(".")[:-1]) _boot(img_name, img_format) # Create snapshot2 based on snapshot1 @@ -544,8 +561,8 @@ def rebase_test(cmd): _create(cmd, sn2, sn_fmt, base_img=sn1, base_img_fmt=sn_fmt) # Boot snapshot2 image before rebase - img_format = sn2.split('.')[-1] - img_name = ".".join(sn2.split('.')[:-1]) + img_format = sn2.split(".")[-1] + img_name = ".".join(sn2.split(".")[:-1]) _boot(img_name, img_format) if rebase_mode == "unsafe": @@ -553,21 +570,23 @@ def rebase_test(cmd): _rebase(cmd, sn2, base_img, image_format, mode=rebase_mode) # Boot snapshot image after rebase - img_format = sn2.split('.')[-1] - img_name = ".".join(sn2.split('.')[:-1]) + img_format = sn2.split(".")[-1] + img_name = ".".join(sn2.split(".")[:-1]) _boot(img_name, img_format) # Check sn2's format and backing_file actual_base_img = _info(cmd, sn2, "backing file") base_img_name = os.path.basename(base_img) if base_img_name not in actual_base_img: - test.fail("After rebase the backing_file of 'sn2' is " - "'%s' which is not expected as '%s'" - % (actual_base_img, base_img_name)) + test.fail( + "After rebase the backing_file of 'sn2' is " + f"'{actual_base_img}' which is not expected as '{base_img_name}'" + ) status, output = _check(cmd, sn2) if not status: - test.fail("Check image '%s' failed after rebase;" - "got error: %s" % (sn2, output)) + test.fail( + f"Check image '{sn2}' failed after rebase;" f"got error: {output}" + ) remove(sn2) remove(sn1) @@ -581,19 +600,19 @@ def _amend(cmd, img_name, img_fmt, options): :param options: a comma separated list of format specific options """ - msg = "Amend '%s' with options '%s'" % (img_name, options) + msg = f"Amend '{img_name}' with options '{options}'" cmd += " amend" if img_fmt: - cmd += " -f %s" % img_fmt - cache = params.get("cache_mode", '') + cmd += f" -f {img_fmt}" + cache = params.get("cache_mode", "") if cache: - cmd += " -t %s" % cache + cmd += f" -t {cache}" if options: cmd += " -o " for option in options: - cmd += "%s=%s," % (option, params.get(option)) - cmd = cmd.rstrip(',') - cmd += " %s" % img_name + cmd += f"{option}={params.get(option)}," + cmd = cmd.rstrip(",") + cmd += f" {img_name}" error_context.context(msg, test.log.info) check_command_output(process.run(cmd, ignore_status=True)) @@ -617,13 +636,14 @@ def amend_test(cmd): option = "virtual size" actual = _info(cmd, img, option) if actual is not None and actual != expect: - msg = "Get wrong %s from image %s!" % (option, img_name) - msg += "Expect: %s, actual: %s" % (expect, actual) + msg = f"Get wrong {option} from image {img_name}!" + msg += f"Expect: {expect}, actual: {actual}" test.fail(msg) status, output = _check(cmd, img) if not status: - test.fail("Check image '%s' failed after rebase;" - "got error: %s" % (img, output)) + test.fail( + f"Check image '{img}' failed after rebase;" f"got error: {output}" + ) def _boot(img_name, img_fmt): """ @@ -635,15 +655,15 @@ def _boot(img_name, img_fmt): :param img_name: image name :param img_fmt: image format """ - params['image_name'] = img_name - params['image_format'] = img_fmt - dd_file_size = params.get('dd_file_size', 1000) - image_name = "%s.%s" % (img_name, img_fmt) - msg = "Try to boot vm with image %s" % image_name + params["image_name"] = img_name + params["image_format"] = img_fmt + dd_file_size = params.get("dd_file_size", 1000) + image_name = f"{img_name}.{img_fmt}" + msg = f"Try to boot vm with image {image_name}" error_context.context(msg, test.log.info) vm_name = params.get("main_vm") dd_timeout = int(params.get("dd_timeout", 60)) - params['vms'] = vm_name + params["vms"] = vm_name env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(params.get("main_vm")) vm.verify_alive() @@ -651,8 +671,8 @@ def _boot(img_name, img_fmt): session = vm.wait_for_login(timeout=login_timeout) # Run dd in linux guest - if params.get("os_type") == 'linux': - cmd = "dd if=/dev/zero of=/mnt/test bs=%s count=1000" % dd_file_size + if params.get("os_type") == "linux": + cmd = f"dd if=/dev/zero of=/mnt/test bs={dd_file_size} count=1000" status = session.cmd_status(cmd, timeout=dd_timeout) if status != 0: test.error("dd failed") @@ -661,9 +681,9 @@ def _boot(img_name, img_fmt): try: vm.graceful_shutdown(timeout=login_timeout) except Exception: - image_filename = _get_image_filename(img_name, - enable_gluster, - img_fmt=img_fmt) + image_filename = _get_image_filename( + img_name, enable_gluster, img_fmt=img_fmt + ) backup_img_chain(image_filename) raise finally: @@ -681,19 +701,17 @@ def backup_img_chain(image_file): gluster.glusterfs_mount(g_uri, mount_point) image_name = os.path.basename(image_file) image_file = os.path.join(mount_point, image_name) - test.log.warn("backup %s to %s", image_file, test.resultsdir) + test.log.warning("backup %s to %s", image_file, test.resultsdir) shutil.copy(image_file, test.resultsdir) backing_file = _info(qemu_img, image_file, "backing file", None) if backing_file: backup_img_chain(backing_file) elif enable_gluster: - utils_misc.umount(g_uri, mount_point, - "glusterfs", False, - "fuse.glusterfs") + utils_misc.umount(g_uri, mount_point, "glusterfs", False, "fuse.glusterfs") shutil.rmtree(mount_point) return None # Here starts test subcommand = params["subcommand"] - error_context.context("Running %s_test(cmd)" % subcommand, test.log.info) - eval("%s_test(cmd)" % subcommand) + error_context.context(f"Running {subcommand}_test(cmd)", test.log.info) + eval(f"{subcommand}_test(cmd)") diff --git a/qemu/tests/qemu_img_bitmap.py b/qemu/tests/qemu_img_bitmap.py index 659ea91ccd..ff7df28d8c 100644 --- a/qemu/tests/qemu_img_bitmap.py +++ b/qemu/tests/qemu_img_bitmap.py @@ -2,13 +2,9 @@ import socket from avocado.utils import process - -from virttest import data_dir -from virttest import utils_misc -from virttest.qemu_storage import QemuImg +from virttest import data_dir, utils_misc, utils_qemu from virttest.qemu_io import QemuIOSystem -from virttest import utils_qemu - +from virttest.qemu_storage import QemuImg from virttest.utils_version import VersionInterval from provider.nbd_image_export import QemuNBDExportImage @@ -34,38 +30,31 @@ def _qemu_io(img, cmd): try: QemuIOSystem(test, params, img.image_filename).cmd_output(cmd, 120) except process.CmdError as err: - test.fail( - "qemu-io to '%s' failed: %s." % (img.image_filename, str(err))) + test.fail(f"qemu-io to '{img.image_filename}' failed: {str(err)}.") def _map_nbd_bitmap(nbd_server, nbd_port, bitmap_name): - """"qemu-img map an image over NBD with bitmap""" + """ "qemu-img map an image over NBD with bitmap""" map_from = "--image-opts driver=nbd,server.type=inet,server.host=%s" map_from += ",server.port=%s,x-dirty-bitmap=qemu:dirty-bitmap:%s" map_from %= (nbd_server, nbd_port, bitmap_name) - qemu_map_cmd = "qemu-img map --output=json %s" % map_from + qemu_map_cmd = f"qemu-img map --output=json {map_from}" result = process.run(qemu_map_cmd, shell=True).stdout_text json_data = json.loads(result) return json_data def _check_bitmap_add(img, bitmap_name): - """"Check the result of bitmap add""" + """ "Check the result of bitmap add""" add_bitmap_info = [ - { - "flags": [ - "auto" - ], - "name": "%s" % bitmap_name, - "granularity": 65536 - } + {"flags": ["auto"], "name": f"{bitmap_name}", "granularity": 65536} ] info_output = json.loads(img.info(output="json")) if "bitmaps" not in info_output["format-specific"]["data"]: - test.fail("Add bitmap failed, and image info is '%s'" % info_output) - elif add_bitmap_info != info_output["format-specific"]["data"][ - "bitmaps"]: + test.fail(f"Add bitmap failed, and image info is '{info_output}'") + elif add_bitmap_info != info_output["format-specific"]["data"]["bitmaps"]: test.fail( "The bitmap info is not correct, and the complete image info " - "is '%s'" % info_output) + f"is '{info_output}'" + ) images = params["images"].split() bitmap = images[0] @@ -86,40 +75,44 @@ def _check_bitmap_add(img, bitmap_name): bitmap_img.bitmap_disable(bitmap_name) info_output = json.loads(bitmap_img.info(output="json")) if info_output["format-specific"]["data"]["bitmaps"][0]["flags"]: - test.fail("Disable bitmap failed, and image info is '%s'" % info_output) + test.fail(f"Disable bitmap failed, and image info is '{info_output}'") # --enable command test.log.info("Enable bitmap of the test image.") bitmap_img.bitmap_enable(bitmap_name) info_output = json.loads(bitmap_img.info(output="json")) - if "auto" not in info_output["format-specific"]["data"]["bitmaps"][0][ - "flags"]: - test.fail("Enable bitmap failed, and image info is '%s'" % info_output) + if "auto" not in info_output["format-specific"]["data"]["bitmaps"][0]["flags"]: + test.fail(f"Enable bitmap failed, and image info is '{info_output}'") # --clear command test.log.info("Clear bitmap of the test image.") - _qemu_io(bitmap_img, 'write -P1 0 1M') + _qemu_io(bitmap_img, "write -P1 0 1M") # export the image over NBD nbd_export = QemuNBDExportImage(params, bitmap) nbd_export.export_image() - nbd_image_tag = params['nbd_image_tag'] + nbd_image_tag = params["nbd_image_tag"] nbd_image_params = params.object_params(nbd_image_tag) localhost = socket.gethostname() - nbd_image_params['nbd_server'] = localhost if localhost else 'localhost' + nbd_image_params["nbd_server"] = localhost if localhost else "localhost" # Map the image over NBD with bitmap - nbd_server = nbd_image_params['nbd_server'] - nbd_port = params['nbd_port_bitmap_test'] - nbd_export_bitmap = params['bitmap_name'] + nbd_server = nbd_image_params["nbd_server"] + nbd_port = params["nbd_port_bitmap_test"] + nbd_export_bitmap = params["bitmap_name"] try: res = _map_nbd_bitmap(nbd_server, nbd_port, nbd_export_bitmap) - match_info = {"start": 0, "length": 1048576, "depth": 0, - "present": False, "zero": False, "data": False} - if qemu_version in VersionInterval('[8.2.0,)'): + match_info = { + "start": 0, + "length": 1048576, + "depth": 0, + "present": False, + "zero": False, + "data": False, + } + if qemu_version in VersionInterval("[8.2.0,)"): match_info["compressed"] = False if match_info not in res: - test.fail( - "The dumped info is not correct, and the info is %s" % res) + test.fail(f"The dumped info is not correct, and the info is {res}") finally: nbd_export.stop_export() # Execute bitmap clear @@ -129,8 +122,7 @@ def _check_bitmap_add(img, bitmap_name): try: res = _map_nbd_bitmap(nbd_server, nbd_port, nbd_export_bitmap) if match_info in res: - test.fail( - "Clear the bitmap data failed, and the dumped info is %s" % res) + test.fail(f"Clear the bitmap data failed, and the dumped info is {res}") finally: nbd_export.stop_export() @@ -143,40 +135,55 @@ def _check_bitmap_add(img, bitmap_name): # add bitmap to top image test.log.info("Add bitmap to the top image.") top_img.bitmap_add(bitmap_name_top) - _check_bitmap_add(top_img, params['bitmap_name_top']) + _check_bitmap_add(top_img, params["bitmap_name_top"]) # write data to test and top images - _qemu_io(bitmap_img, 'write -P1 0 1M') - _qemu_io(top_img, 'write -P2 1M 1M') + _qemu_io(bitmap_img, "write -P1 0 1M") + _qemu_io(top_img, "write -P2 1M 1M") # check the info before merging nbd_export.export_image() try: res = _map_nbd_bitmap(nbd_server, nbd_port, nbd_export_bitmap) - match_info = {"start": 0, "length": 1048576, "depth": 0, - "present": False, "zero": False, "data": False} - if qemu_version in VersionInterval('[8.2.0,)'): + match_info = { + "start": 0, + "length": 1048576, + "depth": 0, + "present": False, + "zero": False, + "data": False, + } + if qemu_version in VersionInterval("[8.2.0,)"): match_info["compressed"] = False if match_info not in res: test.fail( "Add the bitmap data to base image failed, and the dumped " - "info is %s" % res) + f"info is {res}" + ) finally: nbd_export.stop_export() # merge the bitmap of top image to base image bitmap_source = params["bitmap_source"] - bitmap_img.bitmap_merge(params, root_dir, bitmap_name_top, bitmap_name, - bitmap_source) + bitmap_img.bitmap_merge( + params, root_dir, bitmap_name_top, bitmap_name, bitmap_source + ) # Check the info of base after merging nbd_export.export_image() try: res = _map_nbd_bitmap(nbd_server, nbd_port, nbd_export_bitmap) - match_info = {"start": 0, "length": 2097152, "depth": 0, - "present": False, "zero": False, "data": False} - if qemu_version in VersionInterval('[8.2.0,)'): + match_info = { + "start": 0, + "length": 2097152, + "depth": 0, + "present": False, + "zero": False, + "data": False, + } + if qemu_version in VersionInterval("[8.2.0,)"): match_info["compressed"] = False if match_info not in res: test.fail( "Add the bitmap data to base image failed, and the dumped " - "info is %s" % res) + f"info is {res}" + ) finally: nbd_export.stop_export() @@ -185,4 +192,4 @@ def _check_bitmap_add(img, bitmap_name): bitmap_img.bitmap_remove(bitmap_name) info_output = json.loads(bitmap_img.info(output="json")) if "bitmaps" in info_output["format-specific"]["data"]: - test.fail("Remove bitmap failed, and image info is '%s'" % info_output) + test.fail(f"Remove bitmap failed, and image info is '{info_output}'") diff --git a/qemu/tests/qemu_img_check_data_integrity.py b/qemu/tests/qemu_img_check_data_integrity.py index cb18af05f1..f6e41297fb 100644 --- a/qemu/tests/qemu_img_check_data_integrity.py +++ b/qemu/tests/qemu_img_check_data_integrity.py @@ -1,7 +1,6 @@ import time -from virttest import utils_misc -from virttest import data_dir +from virttest import data_dir, utils_misc from virttest.qemu_storage import QemuImg from provider import qemu_img_utils as img_utils @@ -36,14 +35,12 @@ def kill_vm_process(vm): :param vm: vm object """ pid = vm.process.get_pid() - test.log.debug("Ending VM %s process (killing PID %s)", - vm.name, pid) + test.log.debug("Ending VM %s process (killing PID %s)", vm.name, pid) try: utils_misc.kill_process_tree(pid, 9, timeout=60) test.log.debug("VM %s down (process killed)", vm.name) except RuntimeError: - test.error("VM %s (PID %s) is a zombie!" - % (vm.name, vm.process.get_pid())) + test.error(f"VM {vm.name} (PID {vm.process.get_pid()}) is a zombie!") def run_iozone_background(vm): """ @@ -52,8 +49,8 @@ def run_iozone_background(vm): :param vm: vm object """ test.log.debug("Start iozone in background.") - iozone = generate_instance(params, vm, 'iozone') - args = (params['iozone_cmd_opitons'], int(params['iozone_timeout'])) + iozone = generate_instance(params, vm, "iozone") + args = (params["iozone_cmd_opitons"], int(params["iozone_timeout"])) iozone_thread = utils_misc.InterruptedThread(iozone.run, args) iozone_thread.start() if not utils_misc.wait_for(lambda: iozone_thread.is_alive, 60): @@ -68,18 +65,17 @@ def run_iozone_background(vm): md5sum_bin = params.get("md5sum_bin", "md5sum") sync_bin = params.get("sync_bin", "sync") test.log.debug("Create temporary file on guest: %s", guest_temp_file) - img_utils.save_random_file_to_vm(vm, guest_temp_file, 2048 * 512, - sync_bin) + img_utils.save_random_file_to_vm(vm, guest_temp_file, 2048 * 512, sync_bin) test.log.debug("Get md5 value of the temporary file") - md5_value = img_utils.check_md5sum(guest_temp_file, - md5sum_bin, session) + md5_value = img_utils.check_md5sum(guest_temp_file, md5sum_bin, session) session.close() kill_vm_process(vm) vm = img_utils.boot_vm_with_images(test, params, env) session = vm.wait_for_login() test.log.debug("Verify md5 value of the temporary file") - img_utils.check_md5sum(guest_temp_file, md5sum_bin, session, - md5_value_to_check=md5_value) + img_utils.check_md5sum( + guest_temp_file, md5sum_bin, session, md5_value_to_check=md5_value + ) session.cmd(params["rm_testfile_cmd"] % guest_temp_file) else: iozone_testfile = params["iozone_testfile"] diff --git a/qemu/tests/qemu_img_check_data_integrity_with_lazy_refcounts.py b/qemu/tests/qemu_img_check_data_integrity_with_lazy_refcounts.py index 5de08711cf..0badb68750 100644 --- a/qemu/tests/qemu_img_check_data_integrity_with_lazy_refcounts.py +++ b/qemu/tests/qemu_img_check_data_integrity_with_lazy_refcounts.py @@ -1,8 +1,6 @@ from avocado import fail_on from avocado.utils import process - -from virttest import utils_misc -from virttest import data_dir +from virttest import data_dir, utils_misc from virttest.qemu_storage import QemuImg from provider import qemu_img_utils as img_utils @@ -32,14 +30,12 @@ def kill_vm_process(vm): :param vm: vm object """ pid = vm.process.get_pid() - test.log.debug("Ending VM %s process (killing PID %s)", - vm.name, pid) + test.log.debug("Ending VM %s process (killing PID %s)", vm.name, pid) try: utils_misc.kill_process_tree(pid, 9, timeout=60) test.log.debug("VM %s down (process killed)", vm.name) except RuntimeError: - test.error("VM %s (PID %s) is a zombie!" - % (vm.name, vm.process.get_pid())) + test.error(f"VM {vm.name} (PID {vm.process.get_pid()}) is a zombie!") src_image = params["convert_source"] tgt_image = params["convert_target"] @@ -57,11 +53,9 @@ def kill_vm_process(vm): md5sum_bin = params.get("md5sum_bin", "md5sum") sync_bin = params.get("sync_bin", "sync") test.log.debug("Create temporary file on guest: %s", guest_temp_file) - img_utils.save_random_file_to_vm(vm, guest_temp_file, 2048 * 512, - sync_bin) + img_utils.save_random_file_to_vm(vm, guest_temp_file, 2048 * 512, sync_bin) test.log.debug("Get md5 value of the temporary file") - md5_value = img_utils.check_md5sum(guest_temp_file, - md5sum_bin, session) + md5_value = img_utils.check_md5sum(guest_temp_file, md5sum_bin, session) session.close() kill_vm_process(vm) @@ -74,7 +68,8 @@ def kill_vm_process(vm): vm = img_utils.boot_vm_with_images(test, params, env, (tgt_image,)) session = vm.wait_for_login() test.log.debug("Verify md5 value of the temporary file") - img_utils.check_md5sum(guest_temp_file, md5sum_bin, session, - md5_value_to_check=md5_value) + img_utils.check_md5sum( + guest_temp_file, md5sum_bin, session, md5_value_to_check=md5_value + ) session.cmd(params["rm_testfile_cmd"] % guest_temp_file) session.close() diff --git a/qemu/tests/qemu_img_check_fragmentation.py b/qemu/tests/qemu_img_check_fragmentation.py index aa67088184..7c58451ef7 100755 --- a/qemu/tests/qemu_img_check_fragmentation.py +++ b/qemu/tests/qemu_img_check_fragmentation.py @@ -1,9 +1,7 @@ import re from avocado.utils import process - -from virttest import data_dir -from virttest import storage +from virttest import data_dir, storage def run(test, params, env): @@ -21,24 +19,28 @@ def run(test, params, env): """ image_stg = params["images"] root_dir = data_dir.get_data_dir() - image_stg_name = storage.get_image_filename(params.object_params(image_stg), - root_dir) + image_stg_name = storage.get_image_filename( + params.object_params(image_stg), root_dir + ) timeout = float(params.get("timeout", 1800)) offset = params["offsets"].split() fragmentation_maximum = params["fragmentation_maximum"] qemu_img_bench_cmd = params["qemu_img_bench_cmd"] for o in offset: - process.run(qemu_img_bench_cmd % (image_stg_name, o), - timeout=timeout, shell=True) + process.run( + qemu_img_bench_cmd % (image_stg_name, o), timeout=timeout, shell=True + ) check_fragmentation_cmd = params["check_fragmentation_cmd"] % image_stg_name cmd_result = process.run(check_fragmentation_cmd, shell=True) extents_number_pattern = params["extents_number_pattern"] fragmentation_maximum = int(params["fragmentation_maximum"]) - extents_number = re.search(extents_number_pattern, - cmd_result.stdout.decode()) + extents_number = re.search(extents_number_pattern, cmd_result.stdout.decode()) if not extents_number: - test.fail("Failed to get extents number. " - "The output is '%s'." % cmd_result.stdout.decode()) + test.fail( + "Failed to get extents number. " + f"The output is '{cmd_result.stdout.decode()}'." + ) if int(extents_number.group(1)) >= fragmentation_maximum: - test.fail("The extents should less than %s, the actual result is %s." - % (fragmentation_maximum, extents_number.group(1))) + test.fail( + f"The extents should less than {fragmentation_maximum}, the actual result is {extents_number.group(1)}." + ) diff --git a/qemu/tests/qemu_img_convert_from_vdi.py b/qemu/tests/qemu_img_convert_from_vdi.py index 5ef0f7d4e1..ea59d22854 100644 --- a/qemu/tests/qemu_img_convert_from_vdi.py +++ b/qemu/tests/qemu_img_convert_from_vdi.py @@ -1,6 +1,5 @@ from avocado import fail_on from avocado.utils import process - from virttest import data_dir from virttest.qemu_storage import QemuImg @@ -22,7 +21,7 @@ def run(test, params, env): img_dir = data_dir.get_data_dir() vdi_image_url = params["vdi_image_address"] - vdi_image_download_cmd = "wget -P %s/images %s" % (img_dir, vdi_image_url) + vdi_image_download_cmd = f"wget -P {img_dir}/images {vdi_image_url}" process.system(vdi_image_download_cmd) for format in ("qcow2", "raw"): diff --git a/qemu/tests/qemu_img_convert_image_with_unaligned_size.py b/qemu/tests/qemu_img_convert_image_with_unaligned_size.py index a075714496..79571466d4 100644 --- a/qemu/tests/qemu_img_convert_image_with_unaligned_size.py +++ b/qemu/tests/qemu_img_convert_image_with_unaligned_size.py @@ -1,6 +1,5 @@ from avocado import fail_on from avocado.utils import process - from virttest import data_dir from virttest.qemu_io import QemuIOSystem from virttest.qemu_storage import QemuImg @@ -18,24 +17,26 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _qemu_io(img, cmd): """Run qemu-io cmd to a given img.""" test.log.info("Run qemu-io %s", img.image_filename) try: QemuIOSystem(test, params, img.image_filename).cmd_output(cmd, 120) except process.CmdError: - test.fail("qemu-io to '%s' failed." % img.image_filename) + test.fail(f"qemu-io to '{img.image_filename}' failed.") src_image = params["images"] tgt_image = params["convert_target"] img_dir = data_dir.get_data_dir() source = QemuImg(params.object_params(src_image), img_dir, src_image) - with open(source.image_filename, mode='wb') as fd: + with open(source.image_filename, mode="wb") as fd: fd.truncate(int(params["truncate_size"])) - _qemu_io(source, 'write -P 1 0 %s' % params["write_size"]) + _qemu_io(source, "write -P 1 0 {}".format(params["write_size"])) fail_on((process.CmdError,))(source.convert)( - source.params, img_dir, cache_mode="none", source_cache_mode="none") + source.params, img_dir, cache_mode="none", source_cache_mode="none" + ) params["images"] += " " + tgt_image diff --git a/qemu/tests/qemu_img_convert_with_backing_file.py b/qemu/tests/qemu_img_convert_with_backing_file.py index addff10b2e..16ac1cd23c 100644 --- a/qemu/tests/qemu_img_convert_with_backing_file.py +++ b/qemu/tests/qemu_img_convert_with_backing_file.py @@ -1,10 +1,9 @@ import json -from virttest import data_dir -from virttest import qemu_storage - from avocado import fail_on from avocado.utils import process +from virttest import data_dir, qemu_storage + from provider import qemu_img_utils as img_utils @@ -36,12 +35,13 @@ def prepare_images_from_params(image_chain, params): """Parse params to initialize a QImage.""" params["image_chain"] = image_chain image_chain = params["image_chain"].split() - base, sn = (qemu_storage.QemuImg(params.object_params(tag), root_dir, tag) - for tag in image_chain) + base, sn = ( + qemu_storage.QemuImg(params.object_params(tag), root_dir, tag) + for tag in image_chain + ) return base, sn - def convert_images_from_params(convert_source, convert_target, - backing_file=None): + def convert_images_from_params(convert_source, convert_target, backing_file=None): """Convert images with specified parameters""" source_params = convert_source.params target_params = convert_target.params @@ -50,26 +50,25 @@ def convert_images_from_params(convert_source, convert_target, source_cache_mode = params.get("source_cache_mode") source_params["convert_target"] = convert_target.tag source_params["convert_backing_file"] = backing_file - test.log.info("Convert from %s to %s", - convert_source.tag, convert_target.tag) + test.log.info("Convert from %s to %s", convert_source.tag, convert_target.tag) fail_on((process.CmdError,))(convert_source.convert)( - source_params, root_dir, cache_mode, - source_cache_mode, skip_target_creation) + source_params, root_dir, cache_mode, source_cache_mode, skip_target_creation + ) def check_image_size(image): """Check image is not fully allocated""" - test.log.info("Verify qemu-img does not allocate the " - "entire image after image convert") + test.log.info( + "Verify qemu-img does not allocate the " "entire image after image convert" + ) info = json.loads(image.info(output="json")) virtual_size = info["virtual-size"] actual_size = info["actual-size"] if actual_size >= virtual_size: - test.fail("qemu-img wrongly allocates to %s the entire image", - image.tag) + test.fail("qemu-img wrongly allocates to %s the entire image", image.tag) images = params["images"].split() - params["image_name_%s" % images[0]] = params["image_name"] - params["image_format_%s" % images[0]] = params["image_format"] + params[f"image_name_{images[0]}"] = params["image_name"] + params[f"image_format_{images[0]}"] = params["image_format"] vm = img_utils.boot_vm_with_images(test, params, env, (images[0],)) session = vm.wait_for_login() guest_temp_file = params["guest_temp_file"] @@ -97,8 +96,9 @@ def check_image_size(image): vm = img_utils.boot_vm_with_images(test, params, env, (sn2.tag,)) session = vm.wait_for_login() test.log.info("Verify md5 value of the temporary file") - img_utils.check_md5sum(guest_temp_file, md5sum_bin, session, - md5_value_to_check=md5_value) + img_utils.check_md5sum( + guest_temp_file, md5sum_bin, session, md5_value_to_check=md5_value + ) session.close() vm.destroy() @@ -109,9 +109,13 @@ def check_image_size(image): info = json.loads(sn2.info(output="json")) full_backing_filename = info["full-backing-filename"] if full_backing_filename != base2.image_filename: - test.fail("The full-backing-filename of %s is incorrect." - "It should be %s, but it is %s.", - sn2.tag, base2.image_filename, full_backing_filename) + test.fail( + "The full-backing-filename of %s is incorrect." + "It should be %s, but it is %s.", + sn2.tag, + base2.image_filename, + full_backing_filename, + ) base2.remove() sn1.remove() sn2.remove() diff --git a/qemu/tests/qemu_img_convert_with_copy_offloading.py b/qemu/tests/qemu_img_convert_with_copy_offloading.py index d98156c1ef..0b389db98d 100644 --- a/qemu/tests/qemu_img_convert_with_copy_offloading.py +++ b/qemu/tests/qemu_img_convert_with_copy_offloading.py @@ -1,5 +1,4 @@ from avocado.utils import process - from virttest import data_dir from virttest.qemu_io import QemuIOSystem from virttest.qemu_storage import QemuImg @@ -20,6 +19,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _qemu_io(img, cmd): """Run qemu-io cmd to a given img.""" test.log.info("Run qemu-io %s", img.image_filename) @@ -28,12 +28,13 @@ def _qemu_io(img, cmd): def _convert_with_copy_offloading_and_verify(src, tgt): """Verify whether copy_offloading works.""" test.log.info("Verify whether copy_offloading works for commit.") - cmd = ("strace -e trace=copy_file_range -f qemu-img convert -C -f " - "%s %s -O %s %s " % (src.image_format, src.image_filename, - tgt.image_format, tgt.image_filename)) + cmd = ( + "strace -e trace=copy_file_range -f qemu-img convert -C -f " + f"{src.image_format} {src.image_filename} -O {tgt.image_format} {tgt.image_filename} " + ) sts, text = process.getstatusoutput(cmd, verbose=True) if sts != 0: - test.fail("Convert with copy_offloading failed: %s." % text) + test.fail(f"Convert with copy_offloading failed: {text}.") src_image = params["src_image"] tgt_image = params["tgt_image"] @@ -41,7 +42,7 @@ def _convert_with_copy_offloading_and_verify(src, tgt): source = QemuImg(params.object_params(src_image), img_dir, src_image) source.create(source.params) - _qemu_io(source, 'write -P 1 0 %s' % params["write_size"]) + _qemu_io(source, "write -P 1 0 {}".format(params["write_size"])) target = QemuImg(params.object_params(tgt_image), img_dir, tgt_image) _convert_with_copy_offloading_and_verify(source, target) diff --git a/qemu/tests/qemu_img_convert_with_inconsistent_bitmap.py b/qemu/tests/qemu_img_convert_with_inconsistent_bitmap.py index dcc74d7830..524d5958f4 100644 --- a/qemu/tests/qemu_img_convert_with_inconsistent_bitmap.py +++ b/qemu/tests/qemu_img_convert_with_inconsistent_bitmap.py @@ -1,10 +1,7 @@ import json from avocado.utils import process - -from virttest import data_dir -from virttest import utils_misc -from virttest import qemu_storage +from virttest import data_dir, qemu_storage, utils_misc from provider.nbd_image_export import QemuNBDExportImage @@ -33,7 +30,7 @@ def run(test, params, env): def add_persistent_bitmap_to_image(image, bitmap): """Add persistent bitmap to image""" qemu_img = utils_misc.get_qemu_img_binary(params) - add_bitmap_cmd = "%s bitmap %s --add %s" % (qemu_img, image, bitmap) + add_bitmap_cmd = f"{qemu_img} bitmap {image} --add {bitmap}" process.run(add_bitmap_cmd, ignore_status=False, shell=True) def export_image_with_bitmap(params, tag): @@ -66,8 +63,13 @@ def check_bitmap_not_in_image(image, bitmap_name): def convert_image_with_bitmaps(src_fmt, tar_fmt, src_name, tar_name): qemu_img = utils_misc.get_qemu_img_binary(params) - convert_cmd = params["convert_cmd"] % (qemu_img, src_fmt, tar_fmt, - src_name, tar_name) + convert_cmd = params["convert_cmd"] % ( + qemu_img, + src_fmt, + tar_fmt, + src_name, + tar_name, + ) try: process.system(convert_cmd, ignore_status=False, shell=True) except process.CmdError as e: @@ -97,9 +99,9 @@ def get_image_param_by_tag(root_dir, tag): add_persistent_bitmap_to_image(src_name, bitmaps[1]) check_bitmap_in_image(src_image, bitmaps[1]) try: - convert_image_with_bitmaps(src_params["image_format"], - dst_params["image_format"], - src_name, dst_name) + convert_image_with_bitmaps( + src_params["image_format"], dst_params["image_format"], src_name, dst_name + ) check_bitmap_not_in_image(dst_image, bitmaps[0]) check_bitmap_in_image(dst_image, bitmaps[1]) finally: diff --git a/qemu/tests/qemu_img_convert_with_rate_limit.py b/qemu/tests/qemu_img_convert_with_rate_limit.py index 0b4488f549..880ec226c2 100644 --- a/qemu/tests/qemu_img_convert_with_rate_limit.py +++ b/qemu/tests/qemu_img_convert_with_rate_limit.py @@ -1,8 +1,7 @@ -from virttest import data_dir -from virttest import qemu_storage - from avocado import fail_on from avocado.utils import process +from virttest import data_dir, qemu_storage + from provider import qemu_img_utils as img_utils @@ -48,7 +47,8 @@ def run(test, params, env): vm = img_utils.boot_vm_with_images(test, params, env, (convert_target,)) session = vm.wait_for_login() test.log.debug("Verify md5 value of the temporary file") - img_utils.check_md5sum(guest_temp_file, md5sum_bin, session, - md5_value_to_check=md5_value) + img_utils.check_md5sum( + guest_temp_file, md5sum_bin, session, md5_value_to_check=md5_value + ) session.close() target.remove() diff --git a/qemu/tests/qemu_img_convert_with_target_is_zero.py b/qemu/tests/qemu_img_convert_with_target_is_zero.py index 7a1c10eb8b..50eaf01edf 100644 --- a/qemu/tests/qemu_img_convert_with_target_is_zero.py +++ b/qemu/tests/qemu_img_convert_with_target_is_zero.py @@ -1,8 +1,7 @@ -from virttest import data_dir -from virttest import qemu_storage - from avocado import fail_on from avocado.utils import process +from virttest import data_dir, qemu_storage + from provider import qemu_img_utils as img_utils @@ -48,15 +47,16 @@ def run(test, params, env): test.log.info("Convert from %s to %s", convert_source, convert_target) fail_on((process.CmdError,))(source.convert)( - source_params, root_dir, cache_mode, - source_cache_mode, skip_target_creation) + source_params, root_dir, cache_mode, source_cache_mode, skip_target_creation + ) test.log.debug("sync host data after convert") process.system("sync") vm = img_utils.boot_vm_with_images(test, params, env, (convert_target,)) session = vm.wait_for_login() test.log.info("Verify md5 value of the temporary file") - img_utils.check_md5sum(guest_temp_file, md5sum_bin, session, - md5_value_to_check=md5_value) + img_utils.check_md5sum( + guest_temp_file, md5sum_bin, session, md5_value_to_check=md5_value + ) session.close() target.remove() diff --git a/qemu/tests/qemu_img_create_snapshot_on_qcow2_target_from_raw_source.py b/qemu/tests/qemu_img_create_snapshot_on_qcow2_target_from_raw_source.py index 55b1083e48..de131aa74b 100644 --- a/qemu/tests/qemu_img_create_snapshot_on_qcow2_target_from_raw_source.py +++ b/qemu/tests/qemu_img_create_snapshot_on_qcow2_target_from_raw_source.py @@ -1,8 +1,7 @@ from virttest import data_dir from virttest.qemu_storage import QemuImg -from qemu.tests.qemu_disk_img import QemuImgTest -from qemu.tests.qemu_disk_img import generate_base_snapshot_pair +from qemu.tests.qemu_disk_img import QemuImgTest, generate_base_snapshot_pair def run(test, params, env): @@ -21,6 +20,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ + def _get_img_obj_and_params(tag): """Get an QemuImg object and its params based on the tag.""" img_param = params.object_params(tag) @@ -31,8 +31,11 @@ def _get_img_obj_and_params(tag): initial_tag = params["images"].split()[0] c_tag = params["convert_target"] - test.log.info("Boot a guest up with initial image: %s, and create a" - " file %s on the disk.", initial_tag, file) + test.log.info( + "Boot a guest up with initial image: %s, and create a" " file %s on the disk.", + initial_tag, + file, + ) base_qit = QemuImgTest(test, params, env, initial_tag) base_qit.start_vm() md5 = base_qit.save_file(file) @@ -54,7 +57,8 @@ def _get_img_obj_and_params(tag): sn_qit.create_snapshot() sn_qit.start_vm() if not sn_qit.check_file(file, md5): - test.fail("The file %s's md5 on initial image and" - " snapshot are different." % file) + test.fail( + f"The file {file}'s md5 on initial image and" " snapshot are different." + ) for qit in (base_qit, sn_qit): qit.clean() diff --git a/qemu/tests/qemu_img_extent_size_hint.py b/qemu/tests/qemu_img_extent_size_hint.py index 17c1e1d6f7..b40d038bb5 100644 --- a/qemu/tests/qemu_img_extent_size_hint.py +++ b/qemu/tests/qemu_img_extent_size_hint.py @@ -19,6 +19,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _image_create(): """Create an image.""" image_name = params.get("images") @@ -35,8 +36,7 @@ def ck_extent_size_hint(img, expect): es_hint = re.findall(parttern, output) if es_hint: if es_hint[0] != expect: - test.fail("Extent_size_hint %s is not expected value %s" - % (es_hint, expect)) + test.fail(f"Extent_size_hint {es_hint} is not expected value {expect}") elif expect != "0": test.fail("Failed to get extent_size_hint info") @@ -47,11 +47,10 @@ def ck_extent_size_hint(img, expect): else: params["image_extent_size_hint"] = es_hint esh = es_hint - esh_tmp = utils_misc.normalize_data_size(esh, 'B') + esh_tmp = utils_misc.normalize_data_size(esh, "B") esh_expect = esh_tmp.split(".")[0] image = _image_create() - test.log.info("Check extent size hint when it sets to %s" - % es_hint) + test.log.info("Check extent size hint when it sets to %s", es_hint) ck_extent_size_hint(image, esh_expect) diff --git a/qemu/tests/qemu_img_info_while_vm_running.py b/qemu/tests/qemu_img_info_while_vm_running.py index d6b772cde7..ed67c8ca52 100644 --- a/qemu/tests/qemu_img_info_while_vm_running.py +++ b/qemu/tests/qemu_img_info_while_vm_running.py @@ -1,10 +1,8 @@ from avocado.utils import process -from virttest import data_dir -from virttest import error_context +from virttest import data_dir, error_context from virttest.qemu_storage import QemuImg -from qemu.tests.qemu_disk_img import QemuImgTest -from qemu.tests.qemu_disk_img import generate_base_snapshot_pair +from qemu.tests.qemu_disk_img import QemuImgTest, generate_base_snapshot_pair @error_context.context_aware @@ -30,21 +28,21 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _boot_vm(boot_img): - error_context.context("Boot vm with %s." % boot_img, test.log.info) + error_context.context(f"Boot vm with {boot_img}.", test.log.info) vm.params["images"] = boot_img vm.create() vm.verify_alive() def _qemu_img_info(info_img): - error_context.context("Check qemu-img info with %s." % info_img, - test.log.info) + error_context.context(f"Check qemu-img info with {info_img}.", test.log.info) img_param = params.object_params(info_img) img = QemuImg(img_param, data_dir.get_data_dir(), info_img) try: img.info() except process.CmdError: - test.fail("qemu-img info %s failed." % info_img) + test.fail(f"qemu-img info {info_img} failed.") def _qemu_img_info_while_vm_running(boot_img, info_img): _boot_vm(boot_img) diff --git a/qemu/tests/qemu_img_lock_reject_boot.py b/qemu/tests/qemu_img_lock_reject_boot.py index 081f82238d..01136cee19 100644 --- a/qemu/tests/qemu_img_lock_reject_boot.py +++ b/qemu/tests/qemu_img_lock_reject_boot.py @@ -1,11 +1,9 @@ import time +from avocado.utils import process from virttest import env_process -from qemu.tests.qemu_disk_img import QemuImgTest -from qemu.tests.qemu_disk_img import generate_base_snapshot_pair - -from avocado.utils import process +from qemu.tests.qemu_disk_img import QemuImgTest, generate_base_snapshot_pair def run(test, params, env): @@ -21,6 +19,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ + def _create_os_snapshot(): """Crate one external snapshot based on the os image.""" test.log.info("Create a qcow2 snapshot based on the os image.") @@ -31,16 +30,17 @@ def _create_os_snapshot(): QemuImgTest(test, params, env, snapshot).create_snapshot() def _verify_write_lock_err_msg(test, img_file=None): - test.log.info("Verify qemu-img write lock err msg.",) - msgs = ['"write" lock', - 'Is another process using the image'] + test.log.info( + "Verify qemu-img write lock err msg.", + ) + msgs = ['"write" lock', "Is another process using the image"] # Avoid timing issues between writing to log and the check itself - check_lock_timeout = params.get_numeric('check_lock_timeout', 5) + check_lock_timeout = params.get_numeric("check_lock_timeout", 5) time.sleep(check_lock_timeout) # Check expected error messages directly in the test log output = process.run( r"cat " + test.logfile + r"| grep '\[qemu output\]' | grep -v 'warning'", - shell=True + shell=True, ).stdout_text.strip() if img_file: msgs.append(img_file) diff --git a/qemu/tests/qemu_img_luks_key_management.py b/qemu/tests/qemu_img_luks_key_management.py index 711fc23f54..5359bd6678 100644 --- a/qemu/tests/qemu_img_luks_key_management.py +++ b/qemu/tests/qemu_img_luks_key_management.py @@ -1,8 +1,7 @@ -import re import json +import re -from virttest import data_dir -from virttest import qemu_storage +from virttest import data_dir, qemu_storage def run(test, params, env): @@ -64,31 +63,36 @@ def run(test, params, env): # e.g. amend_encrypt.state = inactive # luks likes amend_state = inactive encrypt = "encrypt." if stg_img.image_format == "qcow2" else "" - stg_params.pop("amend_%snew-secret" % encrypt) - stg_params["amend_%sstate" % encrypt] = "inactive" + stg_params.pop(f"amend_{encrypt}new-secret") + stg_params[f"amend_{encrypt}state"] = "inactive" if erase_password == "password": - stg_params.pop("amend_%skeyslot" % encrypt) - stg_params["amend_%sold-secret" % encrypt] = stg_params["amend_secret_id"] + stg_params.pop(f"amend_{encrypt}keyslot") + stg_params[f"amend_{encrypt}old-secret"] = stg_params["amend_secret_id"] cmd_result = stg_img.amend(stg_params, ignore_status=True) if err_info: if not re.search(err_info, cmd_result.stderr.decode(), re.I): - test.fail("Failed to get error information. The actual error " - "information is %s." % cmd_result.stderr.decode()) + test.fail( + "Failed to get error information. The actual error " + f"information is {cmd_result.stderr.decode()}." + ) elif cmd_result.exit_status != 0: - test.fail("Failed to amend image %s. The error information is " - "%s." % (stg_img.image_filename, cmd_result.stderr.decode())) + test.fail( + f"Failed to amend image {stg_img.image_filename}. The error information is " + f"{cmd_result.stderr.decode()}." + ) else: info = json.loads(stg_img.info(output="json")) if stg_img.image_format == "qcow2": key_state = stg_params["amend_encrypt.state"] key_slot = params.get_numeric("amend_encrypt.keyslot", 1) - state = info["format-specific"]["data"]["encrypt"]["slots"][key_slot]["active"] + state = info["format-specific"]["data"]["encrypt"]["slots"][key_slot][ + "active" + ] else: key_state = stg_params["amend_state"] key_slot = params.get_numeric("amend_keyslot", 1) state = info["format-specific"]["data"]["slots"][key_slot]["active"] key_state = True if key_state == "active" else False if key_state != state: - test.fail("The key state is %s, it should be %s." - % (state, key_state)) + test.fail(f"The key state is {state}, it should be {key_state}.") diff --git a/qemu/tests/qemu_img_map_unaligned_image.py b/qemu/tests/qemu_img_map_unaligned_image.py index e77ba06043..99567ee77b 100644 --- a/qemu/tests/qemu_img_map_unaligned_image.py +++ b/qemu/tests/qemu_img_map_unaligned_image.py @@ -1,13 +1,10 @@ -import random import json +import random import string from avocado.utils import process -from virttest import data_dir +from virttest import data_dir, utils_misc, utils_qemu from virttest.qemu_storage import QemuImg -from virttest import utils_misc -from virttest import utils_qemu - from virttest.utils_version import VersionInterval @@ -23,10 +20,13 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ + def _generate_random_string(max_len=19): """Generate random alphabets string in the range of [1, max_len+1].""" - random_str = ''.join(random.choice( - string.ascii_lowercase) for _ in range(random.randint(1, max_len))) + random_str = "".join( + random.choice(string.ascii_lowercase) + for _ in range(random.randint(1, max_len)) + ) return random_str, len(random_str) def _verify_qemu_img_map(output, str_len): @@ -34,43 +34,87 @@ def _verify_qemu_img_map(output, str_len): test.log.info("Verify the dumped mete-data of the unaligned image.") qemu_binary = utils_misc.get_qemu_binary(params) qemu_version = utils_qemu.get_qemu_version(qemu_binary)[0] - if qemu_version in VersionInterval('[8.2.0,)'): + if qemu_version in VersionInterval("[8.2.0,)"): expected = [ - {"start": 0, "length": str_len, "depth": 0, "present": True, - "zero": False, "data": True, "compressed": False, "offset": 0}, - {"start": str_len, "length": 512 - (str_len % 512), "depth": 0, - "present": True, "zero": True, "data": False, - "compressed": False, "offset": str_len}] - elif qemu_version in VersionInterval('[6.1.0,)'): + { + "start": 0, + "length": str_len, + "depth": 0, + "present": True, + "zero": False, + "data": True, + "compressed": False, + "offset": 0, + }, + { + "start": str_len, + "length": 512 - (str_len % 512), + "depth": 0, + "present": True, + "zero": True, + "data": False, + "compressed": False, + "offset": str_len, + }, + ] + elif qemu_version in VersionInterval("[6.1.0,)"): expected = [ - {"start": 0, "length": str_len, "depth": 0, "present": True, - "zero": False, "data": True, "offset": 0}, - {"start": str_len, "length": 512 - (str_len % 512), "depth": 0, - "present": True, "zero": True, "data": False, - "offset": str_len}] + { + "start": 0, + "length": str_len, + "depth": 0, + "present": True, + "zero": False, + "data": True, + "offset": 0, + }, + { + "start": str_len, + "length": 512 - (str_len % 512), + "depth": 0, + "present": True, + "zero": True, + "data": False, + "offset": str_len, + }, + ] else: - expected = [{"start": 0, "length": str_len, "depth": 0, - "zero": False, "data": True, "offset": 0}, - {"start": str_len, "length": 512 - (str_len % 512), - "depth": 0, "zero": True, "data": False, - "offset": str_len}] + expected = [ + { + "start": 0, + "length": str_len, + "depth": 0, + "zero": False, + "data": True, + "offset": 0, + }, + { + "start": str_len, + "length": 512 - (str_len % 512), + "depth": 0, + "zero": True, + "data": False, + "offset": str_len, + }, + ] res = json.loads(output) if res != expected: - test.fail("The dumped mete-data of the unaligned " - "image '%s' is not correct." % img.image_filename) + test.fail( + "The dumped mete-data of the unaligned " + f"image '{img.image_filename}' is not correct." + ) img_param = params.object_params("test") img = QemuImg(img_param, data_dir.get_data_dir(), "test") test.log.info("Create a new file %s using truncate.", img.image_filename) - process.run("rm -f %s" % img.image_filename) - process.run("truncate -s 1G %s " % img.image_filename) + process.run(f"rm -f {img.image_filename}") + process.run(f"truncate -s 1G {img.image_filename} ") random_str, str_len = _generate_random_string() test.log.info("Write '%s' into the file %s.", random_str, img.image_filename) - process.run("echo -n '%s' > %s" % (random_str, img.image_filename), - shell=True) + process.run(f"echo -n '{random_str}' > {img.image_filename}", shell=True) res = img.map(output="json") if res.exit_status != 0: - test.fail("qemu-img map error: %s." % res.stderr_text) + test.fail(f"qemu-img map error: {res.stderr_text}.") _verify_qemu_img_map(res.stdout_text, str_len) diff --git a/qemu/tests/qemu_img_measure_convert_image.py b/qemu/tests/qemu_img_measure_convert_image.py index 7272343bff..e0dd5d9484 100644 --- a/qemu/tests/qemu_img_measure_convert_image.py +++ b/qemu/tests/qemu_img_measure_convert_image.py @@ -2,9 +2,8 @@ from avocado.utils import process from virttest import data_dir -from virttest.qemu_storage import QemuImg -from virttest.qemu_storage import get_image_json from virttest.qemu_io import QemuIOSystem +from virttest.qemu_storage import QemuImg, get_image_json def run(test, params, env): @@ -26,6 +25,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _get_img_obj_and_params(tag): """Get an QemuImg object and its params based on the tag.""" img_param = params.object_params(tag) @@ -39,7 +39,7 @@ def _qemu_io(img, cmd): if img.image_format == "luks": image_secret_object = img._secret_objects[-1] image_json_str = get_image_json(img.tag, img.params, img.root_dir) - image_json_str = " '%s'" % image_json_str + image_json_str = f" '{image_json_str}'" image_filename = image_secret_object + image_json_str q = QemuIOSystem(test, params, image_filename) q.cmd_output(cmd, 120) @@ -47,26 +47,34 @@ def _qemu_io(img, cmd): def _get_file_size(img): """Get the image file size of a given QemuImg object.""" test.log.info("Get %s's file size.", img.image_filename) - cmd = "stat -c %s {0}".format(img.image_filename) + cmd = f"stat -c %s {img.image_filename}" return int(process.system_output(cmd).decode()) def _verify_file_size_with_benchmark(tag, file_size, key): """Verify image file size with the qemu-img measure benchmark.""" - test.log.info("Verify the %s's size with benchmark.\n" - "The image size %s does not exceed the benchmark '%s'" - " size %s.", tag, file_size, key, benchmark[key]) + test.log.info( + "Verify the %s's size with benchmark.\n" + "The image size %s does not exceed the benchmark '%s'" + " size %s.", + tag, + file_size, + key, + benchmark[key], + ) if file_size > benchmark[key]: - test.fail("The %s's file size should not exceed benchmark '%s'" - " size %s, got %s." % (tag, key, - benchmark[key], file_size)) + test.fail( + f"The {tag}'s file size should not exceed benchmark '{key}'" + f" size {benchmark[key]}, got {file_size}." + ) img, img_param = _get_img_obj_and_params(params["images"]) img.create(img_param) - _qemu_io(img, 'write 0 %s' % params["write_size"]) + _qemu_io(img, "write 0 {}".format(params["write_size"])) test.log.info("Using qemu-img measure to get the benchmark size.") - benchmark = json.loads(img.measure(target_fmt=params["target_format"], - output="json").stdout_text) + benchmark = json.loads( + img.measure(target_fmt=params["target_format"], output="json").stdout_text + ) for c_tag in params["convert_tags"].split(): img_param["convert_target"] = c_tag diff --git a/qemu/tests/qemu_img_measure_new_image.py b/qemu/tests/qemu_img_measure_new_image.py index f7a1695649..c27bfaae45 100644 --- a/qemu/tests/qemu_img_measure_new_image.py +++ b/qemu/tests/qemu_img_measure_new_image.py @@ -20,6 +20,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _get_img_obj_and_params(tag): """Get an QemuImg object and its params based on the tag.""" img_param = params.object_params(tag) @@ -29,25 +30,36 @@ def _get_img_obj_and_params(tag): def _get_file_size(img): """Get the image file size of a given QemuImg object.""" test.log.info("Get %s's file size.", img.image_filename) - cmd = "stat -c %s {0}".format(img.image_filename) + cmd = f"stat -c %s {img.image_filename}" return int(process.system_output(cmd).decode()) def _verify_file_size_with_benchmark(tag, file_size, key): """Verify image file size with the qemu-img measure benchmark.""" - test.log.info("Verify the %s's size with benchmark.\n" - "The image size %s does not exceed the benchmark '%s'" - " size %s.", tag, file_size, key, benchmark[key]) + test.log.info( + "Verify the %s's size with benchmark.\n" + "The image size %s does not exceed the benchmark '%s'" + " size %s.", + tag, + file_size, + key, + benchmark[key], + ) if file_size > benchmark[key]: - test.fail("The %s's file size should not exceed benchmark '%s'" - " size %s, got %s." % (tag, key, - benchmark[key], file_size)) + test.fail( + f"The {tag}'s file size should not exceed benchmark '{key}'" + f" size {benchmark[key]}, got {file_size}." + ) for tag in params["images"].split(): img, img_param = _get_img_obj_and_params(tag) test.log.info("Using qemu-img measure to get the benchmark size.") - benchmark = json.loads(img.measure(target_fmt=params["image_format"], - size=params["image_size"], - output="json").stdout_text) + benchmark = json.loads( + img.measure( + target_fmt=params["image_format"], + size=params["image_size"], + output="json", + ).stdout_text + ) img.create(img_param) size = _get_file_size(img) diff --git a/qemu/tests/qemu_img_supports_convert_coroutines_complete.py b/qemu/tests/qemu_img_supports_convert_coroutines_complete.py index 10c8dc709b..5a0f8ea606 100644 --- a/qemu/tests/qemu_img_supports_convert_coroutines_complete.py +++ b/qemu/tests/qemu_img_supports_convert_coroutines_complete.py @@ -20,6 +20,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _get_img_obj_and_params(tag): """Get an QemuImg object and its params based on the tag.""" img_param = params.object_params(tag) @@ -34,8 +35,9 @@ def _qemu_io(img, cmd): def _create_error_cfg(file): test.log.info("Create error cfg %s.", file) - error_cfg = ('[inject-error]\nevent = "write_aio"\n' - 'sector = "819200"\nonce = "on"') + error_cfg = ( + '[inject-error]\nevent = "write_aio"\n' 'sector = "819200"\nonce = "on"' + ) with open(file, "w") as cfg: cfg.write(error_cfg) @@ -44,9 +46,11 @@ def _inject_error_and_verify(file): pkg = utils_package.LocalPackageMgr("valgrind") pkg.install() _create_error_cfg(file) - cmd = ("valgrind --soname-synonyms=somalloc=libtcmalloc.so " - "qemu-img convert -npWO qcow2 source.qcow2 " - "blkdebug:%s:target.qcow2" % file) + cmd = ( + "valgrind --soname-synonyms=somalloc=libtcmalloc.so " + "qemu-img convert -npWO qcow2 source.qcow2 " + f"blkdebug:{file}:target.qcow2" + ) stderr = process.run(cmd, ignore_status=True).stderr_text if "ERROR SUMMARY: 0 errors from 0 contexts" not in stderr: test.fail("There should be no errors in the summary.") @@ -59,6 +63,6 @@ def _inject_error_and_verify(file): img, img_param = _get_img_obj_and_params(tag) img.create(img_param) if tag == "source": - _qemu_io(img, 'write 0 1G') + _qemu_io(img, "write 0 1G") _inject_error_and_verify(params["error_cfg"]) diff --git a/qemu/tests/qemu_io.py b/qemu/tests/qemu_io.py index f0685717e3..f7a50af44b 100644 --- a/qemu/tests/qemu_io.py +++ b/qemu/tests/qemu_io.py @@ -1,26 +1,22 @@ +import logging import os import re -import logging import time import aexpect - from avocado.utils import process +from virttest import data_dir, error_context, utils_misc -from virttest import error_context -from virttest import utils_misc -from virttest import data_dir +LOG_JOB = logging.getLogger("avocado.test") -LOG_JOB = logging.getLogger('avocado.test') - - -class QemuIOConfig(object): +class QemuIOConfig: """ Performs setup for the test qemu_io. This is a borg class, similar to a singleton. The idea is to keep state in memory for when we call cleanup() on postprocessing. """ + __shared_state = {} def __init__(self, test, params): @@ -28,16 +24,16 @@ def __init__(self, test, params): self.tmpdir = test.tmpdir self.qemu_img_binary = utils_misc.get_qemu_img_binary(params) self.raw_files = ["stg1.raw", "stg2.raw"] - self.raw_files = list(map(lambda f: os.path.join(self.tmpdir, f), - self.raw_files)) + self.raw_files = list( + map(lambda f: os.path.join(self.tmpdir, f), self.raw_files) + ) # Here we're trying to choose fairly explanatory names so it's less # likely that we run in conflict with other devices in the system self.vgtest_name = params.get("vgtest_name", "vg_kvm_test_qemu_io") self.lvtest_name = params.get("lvtest_name", "lv_kvm_test_qemu_io") - self.lvtest_device = "/dev/%s/%s" % ( - self.vgtest_name, self.lvtest_name) + self.lvtest_device = f"/dev/{self.vgtest_name}/{self.lvtest_name}" try: - getattr(self, 'loopback') + getattr(self, "loopback") except AttributeError: self.loopback = [] @@ -49,56 +45,55 @@ def setup(self): self.cleanup() try: for f in self.raw_files: - process.run("%s create -f raw %s 10G" % - (self.qemu_img_binary, f)) + process.run(f"{self.qemu_img_binary} create -f raw {f} 10G") # Associate a loopback device with the raw file. # Subject to race conditions, that's why try here to associate # it with the raw file as quickly as possible l_result = process.run("losetup -f") - process.run("losetup -f %s" % f) + process.run(f"losetup -f {f}") loopback = l_result.stdout.strip() self.loopback.append(loopback) # Add the loopback device configured to the list of pvs # recognized by LVM - process.run("pvcreate %s" % loopback) + process.run(f"pvcreate {loopback}") loopbacks = " ".join(self.loopback) - process.run("vgcreate %s %s" % (self.vgtest_name, loopbacks)) + process.run(f"vgcreate {self.vgtest_name} {loopbacks}") # Create an lv inside the vg with starting size of 200M - process.run("lvcreate -L 19G -n %s %s" % - (self.lvtest_name, self.vgtest_name)) + process.run(f"lvcreate -L 19G -n {self.lvtest_name} {self.vgtest_name}") except Exception: try: self.cleanup() except Exception as e: - LOG_JOB.warn(e) + LOG_JOB.warning(e) raise @error_context.context_aware def cleanup(self): error_context.context("performing qemu_io cleanup", LOG_JOB.debug) if os.path.isfile(self.lvtest_device): - process.run("fuser -k %s" % self.lvtest_device) + process.run(f"fuser -k {self.lvtest_device}") time.sleep(2) l_result = process.run("lvdisplay") # Let's remove all volumes inside the volume group created if self.lvtest_name in l_result.stdout: - process.run("lvremove -f %s" % self.lvtest_device) + process.run(f"lvremove -f {self.lvtest_device}") # Now, removing the volume group itself v_result = process.run("vgdisplay") if self.vgtest_name in v_result.stdout: - process.run("vgremove -f %s" % self.vgtest_name) + process.run(f"vgremove -f {self.vgtest_name}") # Now, if we can, let's remove the physical volume from lvm list p_result = process.run("pvdisplay") - l_result = process.run('losetup -a') + l_result = process.run("losetup -a") for l in self.loopback: if l in p_result.stdout: - process.run("pvremove -f %s" % l) + process.run(f"pvremove -f {l}") if l in l_result.stdout: try: - process.run("losetup -d %s" % l) + process.run(f"losetup -d {l}") except process.CmdError as e: - LOG_JOB.error("Failed to liberate loopback %s, " - "error msg: '%s'", l, e) + LOG_JOB.error( + "Failed to liberate loopback %s, " "error msg: '%s'", l, e + ) for f in self.raw_files: if os.path.isfile(f): @@ -122,15 +117,12 @@ def run(test, params, env): qemu_io_config = QemuIOConfig(test, params) qemu_io_config.setup() - test_script = os.path.join(data_dir.get_shared_dir(), - 'scripts/qemu_iotests.sh') - test_image = params.get("test_image", - os.path.join(test.tmpdir, "test.qcow2")) - test.log.info("Run script(%s) with image(%s)", - test_script, test_image) - s, test_result = aexpect.run_fg("sh %s %s" % (test_script, - test_image), - test.log.debug, timeout=1800) + test_script = os.path.join(data_dir.get_shared_dir(), "scripts/qemu_iotests.sh") + test_image = params.get("test_image", os.path.join(test.tmpdir, "test.qcow2")) + test.log.info("Run script(%s) with image(%s)", test_script, test_image) + s, test_result = aexpect.run_fg( + f"sh {test_script} {test_image}", test.log.debug, timeout=1800 + ) err_string = { "err_nums": r"\d errors were found on the image.", @@ -153,4 +145,4 @@ def run(test, params, env): if qemu_io_config: qemu_io_config.cleanup() except Exception as e: - test.log.warn(e) + test.log.warning(e) diff --git a/qemu/tests/qemu_io_blkdebug.py b/qemu/tests/qemu_io_blkdebug.py index 563c7c91d2..4fac491a85 100644 --- a/qemu/tests/qemu_io_blkdebug.py +++ b/qemu/tests/qemu_io_blkdebug.py @@ -1,16 +1,13 @@ import os import re + try: from configparser import ConfigParser except ImportError: from ConfigParser import ConfigParser from avocado.utils import process - -from virttest import error_context -from virttest import qemu_io -from virttest import data_dir -from virttest import utils_misc +from virttest import data_dir, error_context, qemu_io, utils_misc from virttest.qemu_storage import QemuImg @@ -28,13 +25,14 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ - if params.get("blkdebug_event_name_separator") == 'underscore': - blkdebug_event = params.get('err_event') + if params.get("blkdebug_event_name_separator") == "underscore": + blkdebug_event = params.get("err_event") if "." in blkdebug_event: - params['err_event'] = blkdebug_event.replace(".", "_") + params["err_event"] = blkdebug_event.replace(".", "_") tmp_dir = params.get("tmp_dir", "/tmp") - blkdebug_cfg = utils_misc.get_path(tmp_dir, params.get("blkdebug_cfg", - "blkdebug.cfg")) + blkdebug_cfg = utils_misc.get_path( + tmp_dir, params.get("blkdebug_cfg", "blkdebug.cfg") + ) err_command = params["err_command"] err_event = params["err_event"] errn_list = re.split(r"\s+", params["errn_list"].strip()) @@ -47,8 +45,7 @@ def run(test, params, env): del_snapshot = params.get("del_snapshot", "no") == "yes" error_context.context("Create image", test.log.info) - image_io = QemuImg( - params.object_params(image), data_dir.get_data_dir(), image) + image_io = QemuImg(params.object_params(image), data_dir.get_data_dir(), image) image_name, _ = image_io.create(params.object_params(image)) template_name = utils_misc.get_path(test.virtdir, blkdebug_default) @@ -56,31 +53,32 @@ def run(test, params, env): template.read(template_name) for errn in errn_list: - log_filename = utils_misc.get_path(test.outputdir, - "qemu-io-log-%s" % errn) + log_filename = utils_misc.get_path(test.outputdir, f"qemu-io-log-{errn}") error_context.context("Write the blkdebug config file", test.log.info) - template.set("inject-error", "event", '"%s"' % err_event) - template.set("inject-error", "errno", '"%s"' % errn) + template.set("inject-error", "event", f'"{err_event}"') + template.set("inject-error", "errno", f'"{errn}"') error_context.context("Write blkdebug config file", test.log.info) blkdebug = None try: - blkdebug = open(blkdebug_cfg, 'w') + blkdebug = open(blkdebug_cfg, "w") template.write(blkdebug) finally: if blkdebug is not None: blkdebug.close() error_context.context("Create image", test.log.info) - image_io = QemuImg(params.object_params( - image), data_dir.get_data_dir(), image) + image_io = QemuImg(params.object_params(image), data_dir.get_data_dir(), image) image_name = image_io.create(params.object_params(image))[0] - error_context.context("Operate in qemu-io to trigger the error", - test.log.info) - session = qemu_io.QemuIOShellSession(test, params, image_name, - blkdebug_cfg=blkdebug_cfg, - log_filename=log_filename) + error_context.context("Operate in qemu-io to trigger the error", test.log.info) + session = qemu_io.QemuIOShellSession( + test, + params, + image_name, + blkdebug_cfg=blkdebug_cfg, + log_filename=log_filename, + ) if pre_err_commands: for cmd in re.split(",", pre_err_commands.strip()): session.cmd_output(cmd, timeout=test_timeout) @@ -89,9 +87,13 @@ def run(test, params, env): if pre_snapshot: image_io.snapshot_create() image_sn = image_io.snapshot_tag - session = qemu_io.QemuIOShellSession(test, params, image_name, - blkdebug_cfg=blkdebug_cfg, - log_filename=log_filename) + session = qemu_io.QemuIOShellSession( + test, + params, + image_name, + blkdebug_cfg=blkdebug_cfg, + log_filename=log_filename, + ) if not del_snapshot: output = session.cmd_output(err_command, timeout=test_timeout) @@ -108,16 +110,14 @@ def run(test, params, env): image_io.remove() if pre_snapshot and not del_snapshot: params_sn = params.object_params(image_sn) - image_snapshot = QemuImg( - params_sn, data_dir.get_data_dir(), image_sn) + image_snapshot = QemuImg(params_sn, data_dir.get_data_dir(), image_sn) image_snapshot.remove() error_context.context("Get error message", test.log.info) try: std_msg = os.strerror(int(errn)) except ValueError: - test.error("Can not find error message:\n" - " error code is %s" % errn) + test.error("Can not find error message:\n" f" error code is {errn}") session.close() error_context.context("Compare the error message", test.log.info) @@ -125,6 +125,6 @@ def run(test, params, env): test.log.info("Error message is correct in qemu-io") else: fail_log = "The error message is mismatch:\n" - fail_log += " qemu-io reports: '%s',\n" % output - fail_log += " os.strerror reports: '%s'" % std_msg + fail_log += f" qemu-io reports: '{output}',\n" + fail_log += f" os.strerror reports: '{std_msg}'" test.fail(fail_log) diff --git a/qemu/tests/qemu_iotests.py b/qemu/tests/qemu_iotests.py index 2da17f3b9b..bb5dc33b05 100644 --- a/qemu/tests/qemu_iotests.py +++ b/qemu/tests/qemu_iotests.py @@ -1,10 +1,7 @@ import os -from avocado.utils import git -from avocado.utils import process - -from virttest import error_context -from virttest import utils_misc +from avocado.utils import git, process +from virttest import error_context, utils_misc @error_context.context_aware @@ -23,14 +20,20 @@ def run(test, params, env): # First, let's get qemu-io std = "http://git.kernel.org/pub/scm/virt/kvm/qemu-kvm.git" uri = params.get("qemu_io_uri", std) - branch = params.get("qemu_io_branch", 'master') - lbranch = params.get("qemu_io_lbranch", 'master') + branch = params.get("qemu_io_branch", "master") + lbranch = params.get("qemu_io_lbranch", "master") commit = params.get("qemu_io_commit", None) base_uri = params.get("qemu_io_base_uri", None) iotests_dir = params.get("qemu_iotests_dir", "tests/qemu-iotests") destination_dir = os.path.join(test.workdir, "qemu_io_tests") - git.get_repo(uri=uri, branch=branch, lbranch=lbranch, commit=commit, - destination_dir=destination_dir, base_uri=base_uri) + git.get_repo( + uri=uri, + branch=branch, + lbranch=lbranch, + commit=commit, + destination_dir=destination_dir, + base_uri=base_uri, + ) # Then, set the qemu paths for the use of the testsuite os.environ["QEMU_PROG"] = utils_misc.get_qemu_binary(params) @@ -42,10 +45,9 @@ def run(test, params, env): image_format = params["qemu_io_image_format"] extra_options = params.get("qemu_io_extra_options", "") - cmd = './check' + cmd = "./check" if extra_options: cmd += extra_options - error_context.context("running qemu-iotests for image format %s" - % image_format) - process.system("%s -%s" % (cmd, image_format), shell=True) + error_context.context(f"running qemu-iotests for image format {image_format}") + process.system(f"{cmd} -{image_format}", shell=True) diff --git a/qemu/tests/qemu_killer_report.py b/qemu/tests/qemu_killer_report.py index 01bc9d3cda..ddf4ee7644 100644 --- a/qemu/tests/qemu_killer_report.py +++ b/qemu/tests/qemu_killer_report.py @@ -1,8 +1,7 @@ import os import re -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc @error_context.context_aware @@ -43,16 +42,14 @@ def killer_report(re_str): error_context.context("Kill VM by signal 15", test.log.info) thread_pid = kill_vm_by_signal_15() # Wait QEMU print error log. - results = utils_misc.wait_for(lambda: killer_report(re_str), - 60, 2, 2) - error_context.context("Check that QEMU can report who killed it", - test.log.info) + results = utils_misc.wait_for(lambda: killer_report(re_str), 60, 2, 2) + error_context.context("Check that QEMU can report who killed it", test.log.info) if not results: test.fail("QEMU did not tell us who killed it") elif int(results[-1]) != thread_pid: msg = "QEMU identified the process that killed it incorrectly. " - msg += "Killer PID: %s, " % thread_pid - msg += "QEMU reported PID: %s" % int(results[-1]) + msg += f"Killer PID: {thread_pid}, " + msg += f"QEMU reported PID: {int(results[-1])}" test.fail(msg) else: test.log.info("QEMU identified the process that killed it properly") diff --git a/qemu/tests/qemu_no_shutdown.py b/qemu/tests/qemu_no_shutdown.py index d04c4f7871..7994c6e8a6 100644 --- a/qemu/tests/qemu_no_shutdown.py +++ b/qemu/tests/qemu_no_shutdown.py @@ -1,5 +1,4 @@ -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc @error_context.context_aware @@ -30,27 +29,28 @@ def run(test, params, env): test.log.info("The guest bootup successfully.") for i in range(repeat_times): - error_context.context("Round %s : Send monitor cmd system_powerdown." - % str(i + 1), test.log.info) + error_context.context( + f"Round {str(i + 1)} : Send monitor cmd system_powerdown.", test.log.info + ) # Send a system_powerdown monitor command vm.monitor.system_powerdown() # Wait for the session to become unresponsive and close it - if not utils_misc.wait_for(lambda: not session.is_responsive(), - timeout, 0, 1): + if not utils_misc.wait_for(lambda: not session.is_responsive(), timeout, 0, 1): test.fail("Oops, Guest refuses to go down!") if session: session.close() # Check the qemu id is not change if not utils_misc.wait_for(lambda: vm.is_alive(), 5, 0, 1): - test.fail("VM not responsive after system_powerdown " - "with -no-shutdown!") + test.fail("VM not responsive after system_powerdown " "with -no-shutdown!") if vm.get_pid() != qemu_process_id: test.fail("Qemu pid changed after system_powerdown!") test.log.info("Round %s -> System_powerdown successfully.", str(i + 1)) # Send monitor command system_reset and cont - error_context.context("Round %s : Send monitor command system_reset " - "and cont." % str(i + 1), test.log.info) + error_context.context( + f"Round {str(i + 1)} : Send monitor command system_reset " "and cont.", + test.log.info, + ) vm.monitor.cmd("system_reset") vm.resume() diff --git a/qemu/tests/qemu_nobody.py b/qemu/tests/qemu_nobody.py index 2576b77e66..dcd5f01f66 100644 --- a/qemu/tests/qemu_nobody.py +++ b/qemu/tests/qemu_nobody.py @@ -1,14 +1,12 @@ import re from avocado.utils import process - -from virttest import env_process -from virttest import error_context - +from virttest import env_process, error_context try: cmp except NameError: + def cmp(x, y): return (x > y) - (x < y) @@ -24,12 +22,13 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def get_user_ugid(username): """ return user uid and gid as a list """ - user_uid = process.getoutput("id -u %s" % username).split() - user_gid = process.getoutput("id -g %s" % username).split() + user_uid = process.getoutput(f"id -u {username}").split() + user_gid = process.getoutput(f"id -g {username}").split() return user_uid, user_gid def get_ugid_from_processid(pid): @@ -43,42 +42,48 @@ def get_ugid_from_processid(pid): if ugid: return ugid else: - test.error("Could not find the correct UID for process %s" % pid) + test.error(f"Could not find the correct UID for process {pid}") exec_username = params.get("user_runas", "nobody") - error_context.base_context("Run QEMU %s test:" % exec_username) + error_context.base_context(f"Run QEMU {exec_username} test:") error_context.context("Get the user uid and gid,using 'id -u/g username'") (exec_uid, exec_gid) = get_user_ugid(exec_username) - error_context.context("Run the qemu as user '%s'" % exec_username) - test.log.info("The user %s :uid='%s', gid='%s'", - exec_username, exec_uid, exec_gid) + error_context.context(f"Run the qemu as user '{exec_username}'") + test.log.info("The user %s :uid='%s', gid='%s'", exec_username, exec_uid, exec_gid) - params["extra_params"] = " -runas %s" % exec_username + params["extra_params"] = f" -runas {exec_username}" params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, params.get("main_vm")) vm = env.get_vm(params["main_vm"]) failures = [] for pid in process.get_children_pids(vm.get_shell_pid()): - error_context.context("Get the process '%s' u/gid, using 'cat " - "/proc/%s/status'" % (pid, pid), test.log.info) + error_context.context( + f"Get the process '{pid}' u/gid, using 'cat " f"/proc/{pid}/status'", + test.log.info, + ) qemu_ugid = get_ugid_from_processid(pid) - test.log.info("Process run as uid=%s,euid=%s,suid=%s,fsuid=%s", - *tuple(qemu_ugid[0:4])) - test.log.info("Process run as gid=%s,egid=%s,sgid=%s,fsgid=%s", - *tuple(qemu_ugid[4:])) + test.log.info( + "Process run as uid=%s,euid=%s,suid=%s,fsuid=%s", *tuple(qemu_ugid[0:4]) + ) + test.log.info( + "Process run as gid=%s,egid=%s,sgid=%s,fsgid=%s", *tuple(qemu_ugid[4:]) + ) - error_context.context("Check if the user %s ugid is equal to the " - "process %s" % (exec_username, pid)) + error_context.context( + f"Check if the user {exec_username} ugid is equal to the " f"process {pid}" + ) # generate user uid, euid, suid, fsuid, gid, egid, sgid, fsgid user_ugid_extend = exec_uid * 4 + exec_gid * 4 if cmp(user_ugid_extend, qemu_ugid) != 0: - e_msg = ("Process %s error, expect ugid is %s, real is %s" - % (pid, user_ugid_extend, qemu_ugid)) + e_msg = f"Process {pid} error, expect ugid is {user_ugid_extend}, real is {qemu_ugid}" failures.append(e_msg) if failures: - test.fail("FAIL: Test reported %s failures:\n%s" % - (len(failures), "\n".join(failures))) + test.fail( + "FAIL: Test reported {} failures:\n{}".format( + len(failures), "\n".join(failures) + ) + ) diff --git a/qemu/tests/qemu_option_check.py b/qemu/tests/qemu_option_check.py index aee47982a0..9620209498 100644 --- a/qemu/tests/qemu_option_check.py +++ b/qemu/tests/qemu_option_check.py @@ -1,9 +1,7 @@ import re from avocado.utils import process - -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc @error_context.context_aware @@ -19,14 +17,17 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def get_qemu_support_device(qemu_binary): """ Get qemu support device list """ - support_device = process.system_output("%s -device ? 2>&1" - % qemu_binary, timeout=10, - ignore_status=True, - shell=True).decode() + support_device = process.system_output( + f"{qemu_binary} -device ? 2>&1", + timeout=10, + ignore_status=True, + shell=True, + ).decode() if not support_device: test.cancel("Can not get qemu support device list") device_list = re.findall(r'name\s+"(.*)",', support_device) @@ -42,22 +43,23 @@ def get_device_option(qemu_binary, device_name): err_msg = "Oops, Your qemu version doesn't support devic '%s', " err_msg += "make sure you have inputted a correct device name" test.cancel(err_msg % device_name) - device_support_option = process.system_output("%s -device %s,? 2>&1" % - (qemu_binary, - device_name), - timeout=10, - ignore_status=True, - shell=True) + device_support_option = process.system_output( + f"{qemu_binary} -device {device_name},? 2>&1", + timeout=10, + ignore_status=True, + shell=True, + ) device_support_option = device_support_option.decode().strip() - if not re.findall(r"%s\.(.*)=(.*)" % - device_name, device_support_option): + if not re.findall(rf"{device_name}\.(.*)=(.*)", device_support_option): test.fail("Qemu option check Failed") - test.log.info("Qemu options check successful. output is:\n%s", - device_support_option) + test.log.info( + "Qemu options check successful. output is:\n%s", device_support_option + ) device_name = params.get("device_name") qemu_binary = utils_misc.get_qemu_binary(params) - error_context.context("Get qemu support %s device options" % device_name, - test.log.info) + error_context.context( + f"Get qemu support {device_name} device options", test.log.info + ) get_device_option(qemu_binary, device_name) diff --git a/qemu/tests/qemu_output.py b/qemu/tests/qemu_output.py index 75765a80fb..b22f5d7c94 100644 --- a/qemu/tests/qemu_output.py +++ b/qemu/tests/qemu_output.py @@ -19,16 +19,16 @@ def run(test, params, env): """ def check_qemu_output(): - error_context.context('Check qemu outputs.', test.log.info) + error_context.context("Check qemu outputs.", test.log.info) output = vm.process.get_output() if re.search(check_pattern, output, re.I): - test.log.debug('qemu outputs: %s', output) - test.fail('Error message is captured in qemu output.') - test.log.info('No error message was found in the qemu output.') + test.log.debug("qemu outputs: %s", output) + test.fail("Error message is captured in qemu output.") + test.log.info("No error message was found in the qemu output.") - check_pattern = params['check_pattern'] + check_pattern = params["check_pattern"] - vm = env.get_vm(params['main_vm']) + vm = env.get_vm(params["main_vm"]) vm.verify_alive() vm.wait_for_login() try: diff --git a/qemu/tests/qmp_basic.py b/qemu/tests/qmp_basic.py index 4c876dfe35..39c78752fd 100644 --- a/qemu/tests/qmp_basic.py +++ b/qemu/tests/qmp_basic.py @@ -1,6 +1,5 @@ from virttest import utils_misc - try: unicode except NameError: @@ -35,12 +34,12 @@ def run(test, params, env): o Are all those check_*() functions really needed? Wouldn't a specialized class (eg. a Response class) do better? """ + def fail_no_key(qmp_dict, key): if not isinstance(qmp_dict, dict): - test.fail("qmp_dict is not a dict (it's '%s')" % type(qmp_dict)) + test.fail(f"qmp_dict is not a dict (it's '{type(qmp_dict)}')") if key not in qmp_dict: - test.fail("'%s' key doesn't exist in dict ('%s')" % - (key, str(qmp_dict))) + test.fail(f"'{key}' key doesn't exist in dict ('{str(qmp_dict)}')") def check_dict_key(qmp_dict, key, keytype): """ @@ -54,8 +53,9 @@ def check_dict_key(qmp_dict, key, keytype): """ fail_no_key(qmp_dict, key) if not isinstance(qmp_dict[key], keytype): - test.fail("'%s' key is not of type '%s', it's '%s'" % - (key, keytype, type(qmp_dict[key]))) + test.fail( + f"'{key}' key is not of type '{keytype}', it's '{type(qmp_dict[key])}'" + ) def check_key_is_dict(qmp_dict, key): check_dict_key(qmp_dict, key, dict) @@ -69,22 +69,23 @@ def check_key_is_str(qmp_dict, key): def check_str_key(qmp_dict, keyname, value=None): check_dict_key(qmp_dict, keyname, unicode) if value and value != qmp_dict[keyname]: - test.fail("'%s' key value '%s' should be '%s'" % - (keyname, str(qmp_dict[keyname]), str(value))) + test.fail( + f"'{keyname}' key value '{str(qmp_dict[keyname])}' should be '{str(value)}'" + ) def check_key_is_int(qmp_dict, key): fail_no_key(qmp_dict, key) try: int(qmp_dict[key]) except Exception: - test.fail("'%s' key is not of type int, it's '%s'" % - (key, type(qmp_dict[key]))) + test.fail(f"'{key}' key is not of type int, it's '{type(qmp_dict[key])}'") def check_bool_key(qmp_dict, keyname, value=None): check_dict_key(qmp_dict, keyname, bool) if value and value != qmp_dict[keyname]: - test.fail("'%s' key value '%s' should be '%s'" % - (keyname, str(qmp_dict[keyname]), str(value))) + test.fail( + f"'{keyname}' key value '{str(qmp_dict[keyname])}' should be '{str(value)}'" + ) def check_success_resp(resp, empty=False): """ @@ -95,7 +96,7 @@ def check_success_resp(resp, empty=False): """ check_key_is_dict(resp, "return") if empty and len(resp["return"]) > 0: - test.fail("success response is not empty ('%s')" % str(resp)) + test.fail(f"success response is not empty ('{str(resp)}')") def check_error_resp(resp, classname=None, datadict=None): """ @@ -108,11 +109,17 @@ def check_error_resp(resp, classname=None, datadict=None): check_key_is_dict(resp, "error") check_key_is_str(resp["error"], "class") if classname and resp["error"]["class"] != classname: - test.fail("got error class '%s' expected '%s'" % - (resp["error"]["class"], classname)) + test.fail( + "got error class '{}' expected '{}'".format( + resp["error"]["class"], classname + ) + ) if datadict and resp["error"]["desc"] != datadict: - test.fail("got error desc '%s' expected '%s'" % - (resp["error"]["desc"], datadict)) + test.fail( + "got error desc '{}' expected '{}'".format( + resp["error"]["desc"], datadict + ) + ) def test_version(version): """ @@ -170,12 +177,12 @@ def json_parsing_errors_suite(monitor): # # NOTE: sending only "}" seems to break QMP # NOTE: Duplicate keys are accepted (should it?) - bad_json.append("{ \"execute\" }") - bad_json.append("{ \"execute\": \"query-version\", }") - bad_json.append("{ 1: \"query-version\" }") - bad_json.append("{ true: \"query-version\" }") - bad_json.append("{ []: \"query-version\" }") - bad_json.append("{ {}: \"query-version\" }") + bad_json.append('{ "execute" }') + bad_json.append('{ "execute": "query-version", }') + bad_json.append('{ 1: "query-version" }') + bad_json.append('{ true: "query-version" }') + bad_json.append('{ []: "query-version" }') + bad_json.append('{ {}: "query-version" }') for cmd in bad_json: resp = monitor.cmd_raw(cmd) @@ -197,13 +204,19 @@ def test_id_key(monitor): check_str_key(resp, "id", id_key) # The "id" key can be any json-object - for id_key in (True, 1234, "string again!", [1, [], {}, True, "foo"], - {"key": {}}): + for id_key in ( + True, + 1234, + "string again!", + [1, [], {}, True, "foo"], + {"key": {}}, + ): resp = monitor.cmd_qmp("query-status", q_id=id_key) check_success_resp(resp) if resp["id"] != id_key: - test.fail("expected id '%s' but got '%s'" % - (str(id_key), str(resp["id"]))) + test.fail( + "expected id '{}' but got '{}'".format(str(id_key), str(resp["id"])) + ) def test_invalid_arg_key(monitor): """ @@ -212,7 +225,9 @@ def test_invalid_arg_key(monitor): names must be detected. """ resp = monitor.cmd_obj({"execute": "eject", "foobar": True}) - check_error_resp(resp, "GenericError", "QMP input member 'foobar' is unexpected") + check_error_resp( + resp, "GenericError", "QMP input member 'foobar' is unexpected" + ) def test_bad_arguments_key_type(monitor): """ @@ -224,8 +239,9 @@ def test_bad_arguments_key_type(monitor): """ for item in (True, [], 1, "foo"): resp = monitor.cmd_obj({"execute": "eject", "arguments": item}) - check_error_resp(resp, "GenericError", - "QMP input member 'arguments' must be an object") + check_error_resp( + resp, "GenericError", "QMP input member 'arguments' must be an object" + ) def test_bad_execute_key_type(monitor): """ @@ -233,16 +249,21 @@ def test_bad_execute_key_type(monitor): """ for item in (False, 1, {}, []): resp = monitor.cmd_obj({"execute": item}) - check_error_resp(resp, "GenericError", - "QMP input member 'execute' must be a string") + check_error_resp( + resp, "GenericError", "QMP input member 'execute' must be a string" + ) def test_no_execute_key(monitor): """ The "execute" key must exist, we also test for some stupid parsing errors. """ - for cmd in ({}, {"execut": "qmp_capabilities"}, - {"executee": "qmp_capabilities"}, {"foo": "bar"}): + for cmd in ( + {}, + {"execut": "qmp_capabilities"}, + {"executee": "qmp_capabilities"}, + {"foo": "bar"}, + ): resp = monitor.cmd_obj(cmd) check_error_resp(resp) # XXX: check class and data dict? @@ -267,8 +288,7 @@ def test_good_input_obj(monitor): check_success_resp(resp) idd = "1234foo" - resp = monitor.cmd_obj({"id": idd, "execute": "query-version", - "arguments": {}}) + resp = monitor.cmd_obj({"id": idd, "execute": "query-version", "arguments": {}}) check_success_resp(resp) check_str_key(resp, "id", idd) @@ -307,8 +327,7 @@ def argument_checker_suite(monitor): check_error_resp(resp, "GenericError", "Parameter 'filename' is missing") # 'bar' is not a valid argument - resp = monitor.cmd_qmp("screendump", {"filename": "outfile", - "bar": "bar"}) + resp = monitor.cmd_qmp("screendump", {"filename": "outfile", "bar": "bar"}) check_error_resp(resp, "GenericError", "Parameter 'bar' is unexpected") # test optional argument: 'force' is omitted, but it's optional, so @@ -320,32 +339,48 @@ def argument_checker_suite(monitor): # filename argument must be a json-string for arg in ({}, [], 1, True): resp = monitor.cmd_qmp("screendump", {"filename": arg}) - check_error_resp(resp, "GenericError", - "Invalid parameter type for 'filename', expected: string") + check_error_resp( + resp, + "GenericError", + "Invalid parameter type for 'filename', expected: string", + ) # force argument must be a json-bool for arg in ({}, [], 1, "foo"): resp = monitor.cmd_qmp("eject", {"force": arg, "device": "foo"}) - check_error_resp(resp, "GenericError", - "Invalid parameter type for 'force', expected: boolean") + check_error_resp( + resp, + "GenericError", + "Invalid parameter type for 'force', expected: boolean", + ) # val argument must be a json-int for arg in ({}, [], True, "foo"): - resp = monitor.cmd_qmp("memsave", {"val": arg, "filename": "foo", - "size": 10}) - check_error_resp(resp, "GenericError", - "Invalid parameter type for 'val', expected: integer") + resp = monitor.cmd_qmp( + "memsave", {"val": arg, "filename": "foo", "size": 10} + ) + check_error_resp( + resp, + "GenericError", + "Invalid parameter type for 'val', expected: integer", + ) # value argument must be a json-number for arg in ({}, [], True, "foo"): if utils_misc.compare_qemu_version(5, 1, 0, is_rhev=False) is True: - resp = monitor.cmd_qmp("migrate-set-parameters", {"downtime-limit": arg}) - check_error_resp(resp, "GenericError", - "Parameter 'downtime-limit' expects uint64") + resp = monitor.cmd_qmp( + "migrate-set-parameters", {"downtime-limit": arg} + ) + check_error_resp( + resp, "GenericError", "Parameter 'downtime-limit' expects uint64" + ) else: resp = monitor.cmd_qmp("migrate_set_downtime", {"value": arg}) - check_error_resp(resp, "GenericError", - "Invalid parameter type for 'value', expected: number") + check_error_resp( + resp, + "GenericError", + "Invalid parameter type for 'value', expected: number", + ) # qdev-type commands have their own argument checker, all QMP does # is to skip its checking and pass arguments through. Check this @@ -353,11 +388,11 @@ def argument_checker_suite(monitor): # an error message from qdev resp = monitor.cmd_qmp("device_add", {"driver": "e1000", "foo": "bar"}) if params["machine_type"] == "q35": - check_error_resp(resp, "GenericError", - "Bus 'pcie.0' does not support hotplugging") + check_error_resp( + resp, "GenericError", "Bus 'pcie.0' does not support hotplugging" + ) else: - check_error_resp(resp, "GenericError", - "Property 'e1000.foo' not found") + check_error_resp(resp, "GenericError", "Property 'e1000.foo' not found") def unknown_commands_suite(monitor): """ @@ -366,7 +401,9 @@ def unknown_commands_suite(monitor): # We also call a HMP-only command, to be sure it will fail as expected for cmd in ("bar", "query-", "query-foo", "q", "help"): resp = monitor.cmd_qmp(cmd) - check_error_resp(resp, "CommandNotFound", "The command %s has not been found" % (cmd)) + check_error_resp( + resp, "CommandNotFound", f"The command {cmd} has not been found" + ) vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -376,7 +413,7 @@ def unknown_commands_suite(monitor): if qmp_monitor: qmp_monitor = qmp_monitor[0] else: - test.error('Could not find a QMP monitor, aborting test') + test.error("Could not find a QMP monitor, aborting test") # Run all suites greeting_suite(qmp_monitor) @@ -387,4 +424,4 @@ def unknown_commands_suite(monitor): # check if QMP is still alive if not qmp_monitor.is_responsive(): - test.fail('QMP monitor is not responsive after testing') + test.fail("QMP monitor is not responsive after testing") diff --git a/qemu/tests/qmp_basic_rhel6.py b/qemu/tests/qmp_basic_rhel6.py index f5afd9dcbc..bf78b1265c 100644 --- a/qemu/tests/qmp_basic_rhel6.py +++ b/qemu/tests/qmp_basic_rhel6.py @@ -32,12 +32,12 @@ def run(test, params, env): o Are all those check_*() functions really needed? Wouldn't a specialized class (eg. a Response class) do better? """ + def fail_no_key(qmp_dict, key): if not isinstance(qmp_dict, dict): - test.fail("qmp_dict is not a dict (it's '%s')" % type(qmp_dict)) + test.fail(f"qmp_dict is not a dict (it's '{type(qmp_dict)}')") if key not in qmp_dict: - test.fail("'%s' key doesn't exist in dict ('%s')" % - (key, str(qmp_dict))) + test.fail(f"'{key}' key doesn't exist in dict ('{str(qmp_dict)}')") def check_dict_key(qmp_dict, key, keytype): """ @@ -51,8 +51,9 @@ def check_dict_key(qmp_dict, key, keytype): """ fail_no_key(qmp_dict, key) if not isinstance(qmp_dict[key], keytype): - test.fail("'%s' key is not of type '%s', it's '%s'" % - (key, keytype, type(qmp_dict[key]))) + test.fail( + f"'{key}' key is not of type '{keytype}', it's '{type(qmp_dict[key])}'" + ) def check_key_is_dict(qmp_dict, key): check_dict_key(qmp_dict, key, dict) @@ -66,22 +67,23 @@ def check_key_is_str(qmp_dict, key): def check_str_key(qmp_dict, keyname, value=None): check_dict_key(qmp_dict, keyname, unicode) if value and value != qmp_dict[keyname]: - test.fail("'%s' key value '%s' should be '%s'" % - (keyname, str(qmp_dict[keyname]), str(value))) + test.fail( + f"'{keyname}' key value '{str(qmp_dict[keyname])}' should be '{str(value)}'" + ) def check_key_is_int(qmp_dict, key): fail_no_key(qmp_dict, key) try: int(qmp_dict[key]) except Exception: - test.fail("'%s' key is not of type int, it's '%s'" % - (key, type(qmp_dict[key]))) + test.fail(f"'{key}' key is not of type int, it's '{type(qmp_dict[key])}'") def check_bool_key(qmp_dict, keyname, value=None): check_dict_key(qmp_dict, keyname, bool) if value and value != qmp_dict[keyname]: - test.fail("'%s' key value '%s' should be '%s'" % - (keyname, str(qmp_dict[keyname]), str(value))) + test.fail( + f"'{keyname}' key value '{str(qmp_dict[keyname])}' should be '{str(value)}'" + ) def check_success_resp(resp, empty=False): """ @@ -92,7 +94,7 @@ def check_success_resp(resp, empty=False): """ check_key_is_dict(resp, "return") if empty and len(resp["return"]) > 0: - test.fail("success response is not empty ('%s')" % str(resp)) + test.fail(f"success response is not empty ('{str(resp)}')") def check_error_resp(resp, classname=None, datadict=None): """ @@ -106,12 +108,18 @@ def check_error_resp(resp, classname=None, datadict=None): check_key_is_dict(resp, "error") check_key_is_str(resp["error"], "class") if classname and resp["error"]["class"] != classname: - test.fail("got error class '%s' expected '%s'" % - (resp["error"]["class"], classname)) + test.fail( + "got error class '{}' expected '{}'".format( + resp["error"]["class"], classname + ) + ) check_key_is_dict(resp["error"], "data") if datadict and resp["error"]["data"] != datadict: - test.fail("got data dict '%s' expected '%s'" % - (resp["error"]["data"], datadict)) + test.fail( + "got data dict '{}' expected '{}'".format( + resp["error"]["data"], datadict + ) + ) def test_version(version): """ @@ -167,12 +175,12 @@ def json_parsing_errors_suite(monitor): # # NOTE: sending only "}" seems to break QMP # NOTE: Duplicate keys are accepted (should it?) - bad_json.append("{ \"execute\" }") - bad_json.append("{ \"execute\": \"query-version\", }") - bad_json.append("{ 1: \"query-version\" }") - bad_json.append("{ true: \"query-version\" }") - bad_json.append("{ []: \"query-version\" }") - bad_json.append("{ {}: \"query-version\" }") + bad_json.append('{ "execute" }') + bad_json.append('{ "execute": "query-version", }') + bad_json.append('{ 1: "query-version" }') + bad_json.append('{ true: "query-version" }') + bad_json.append('{ []: "query-version" }') + bad_json.append('{ {}: "query-version" }') for cmd in bad_json: resp = monitor.cmd_raw(cmd) @@ -194,13 +202,19 @@ def test_id_key(monitor): check_str_key(resp, "id", id_key) # The "id" key can be any json-object - for id_key in (True, 1234, "string again!", [1, [], {}, True, "foo"], - {"key": {}}): + for id_key in ( + True, + 1234, + "string again!", + [1, [], {}, True, "foo"], + {"key": {}}, + ): resp = monitor.cmd_qmp("query-status", q_id=id_key) check_success_resp(resp) if resp["id"] != id_key: - test.fail("expected id '%s' but got '%s'" % - (str(id_key), str(resp["id"]))) + test.fail( + "expected id '{}' but got '{}'".format(str(id_key), str(resp["id"])) + ) def test_invalid_arg_key(monitor): """ @@ -223,8 +237,11 @@ def test_bad_arguments_key_type(monitor): """ for item in (True, [], 1, "foo"): resp = monitor.cmd_obj({"execute": "eject", "arguments": item}) - check_error_resp(resp, "QMPBadInputObjectMember", - {"member": "arguments", "expected": "object"}) + check_error_resp( + resp, + "QMPBadInputObjectMember", + {"member": "arguments", "expected": "object"}, + ) def test_bad_execute_key_type(monitor): """ @@ -232,16 +249,23 @@ def test_bad_execute_key_type(monitor): """ for item in (False, 1, {}, []): resp = monitor.cmd_obj({"execute": item}) - check_error_resp(resp, "QMPBadInputObjectMember", - {"member": "execute", "expected": "string"}) + check_error_resp( + resp, + "QMPBadInputObjectMember", + {"member": "execute", "expected": "string"}, + ) def test_no_execute_key(monitor): """ The "execute" key must exist, we also test for some stupid parsing errors. """ - for cmd in ({}, {"execut": "qmp_capabilities"}, - {"executee": "qmp_capabilities"}, {"foo": "bar"}): + for cmd in ( + {}, + {"execut": "qmp_capabilities"}, + {"executee": "qmp_capabilities"}, + {"foo": "bar"}, + ): resp = monitor.cmd_obj(cmd) check_error_resp(resp) # XXX: check class and data dict? @@ -266,8 +290,9 @@ def test_good_input_obj(monitor): check_success_resp(resp) id_key = "1234foo" - resp = monitor.cmd_obj({"id": id_key, "execute": "query-version", - "arguments": {}}) + resp = monitor.cmd_obj( + {"id": id_key, "execute": "query-version", "arguments": {}} + ) check_success_resp(resp) check_str_key(resp, "id", id_key) @@ -311,24 +336,28 @@ def argument_checker_suite(monitor): # val argument must be a json-int for arg in ({}, [], True, "foo"): - resp = monitor.cmd_qmp("memsave", {"val": arg, "filename": "foo", - "size": 10}) - check_error_resp(resp, "InvalidParameterType", - {"name": "val", "expected": "int"}) + resp = monitor.cmd_qmp( + "memsave", {"val": arg, "filename": "foo", "size": 10} + ) + check_error_resp( + resp, "InvalidParameterType", {"name": "val", "expected": "int"} + ) # value argument must be a json-number for arg in ({}, [], True, "foo"): resp = monitor.cmd_qmp("migrate_set_speed", {"value": arg}) - check_error_resp(resp, "InvalidParameterType", - {"name": "value", "expected": "number"}) + check_error_resp( + resp, "InvalidParameterType", {"name": "value", "expected": "number"} + ) # qdev-type commands have their own argument checker, all QMP does # is to skip its checking and pass arguments through. Check this # works by providing invalid options to device_add and expecting # an error message from qdev resp = monitor.cmd_qmp("device_add", {"driver": "e1000", "foo": "bar"}) - check_error_resp(resp, "PropertyNotFound", - {"device": "e1000", "property": "foo"}) + check_error_resp( + resp, "PropertyNotFound", {"device": "e1000", "property": "foo"} + ) def unknown_commands_suite(monitor): """ @@ -347,7 +376,7 @@ def unknown_commands_suite(monitor): if qmp_monitor: qmp_monitor = qmp_monitor[0] else: - test.error('Could not find a QMP monitor, aborting test') + test.error("Could not find a QMP monitor, aborting test") # Run all suites greeting_suite(qmp_monitor) @@ -358,4 +387,4 @@ def unknown_commands_suite(monitor): # check if QMP is still alive if not qmp_monitor.is_responsive(): - test.fail('QMP monitor is not responsive after testing') + test.fail("QMP monitor is not responsive after testing") diff --git a/qemu/tests/qmp_command.py b/qemu/tests/qmp_command.py index b7c3310158..ebad80edfb 100644 --- a/qemu/tests/qmp_command.py +++ b/qemu/tests/qmp_command.py @@ -1,11 +1,9 @@ import operator -import time import platform +import time from avocado.utils import process - -from virttest import utils_misc -from virttest import qemu_monitor +from virttest import qemu_monitor, utils_misc from virttest.qemu_capabilities import Flags @@ -45,7 +43,7 @@ def check_list(qmp_o, key, val=None, check_item_in_pair=True): elif isinstance(element, list): if check_list(element, key, val, check_item_in_pair): return True - elif element != '' and not check_item_in_pair: + elif element != "" and not check_item_in_pair: if strict_match: if operator.eq(key, element): return True @@ -82,7 +80,7 @@ def _check_dict(dic, key, val, check_item_in_pair=True): elif isinstance(value, list): if check_list(value, key, val, check_item_in_pair): return True - elif value != '' and not check_item_in_pair: + elif value != "" and not check_item_in_pair: if strict_match: if operator.eq(key, value): return True @@ -107,9 +105,11 @@ def check_result(qmp_o, expect_o=None): test.log.info("Actual result that get from qmp_cmd/post_cmd is %s", qmp_o) if result_check == "equal": if not operator.eq(qmp_o, expect_o): - test.fail("QMP output does not equal to the expect result.\n " - "Expect result: '%s'\n" - "Actual result: '%s'" % (expect_o, qmp_o)) + test.fail( + "QMP output does not equal to the expect result.\n " + f"Expect result: '{expect_o}'\n" + f"Actual result: '{qmp_o}'" + ) elif result_check == "contain": for o in expect_o: if isinstance(o, dict): @@ -123,9 +123,11 @@ def check_result(qmp_o, expect_o=None): if result: test.log.info("QMP output contain the expect value %s", o) else: - test.fail("QMP output does not contain the expect value.\n" - "Missed expect value: '%s'\n" - "Actual result: '%s'\n" % (o, qmp_o)) + test.fail( + "QMP output does not contain the expect value.\n" + f"Missed expect value: '{o}'\n" + f"Actual result: '{qmp_o}'\n" + ) elif result_check == "not_contain": for o in expect_o: if isinstance(o, dict): @@ -137,9 +139,11 @@ def check_result(qmp_o, expect_o=None): result = check_list(qmp_o, o, check_item_in_pair=False) if result: - test.fail("QMP output contain the unexpect result.\n" - "Unexpect result: '%s'\n" - "Actual result: '%s'" % (o, qmp_o)) + test.fail( + "QMP output contain the unexpect result.\n" + f"Unexpect result: '{o}'\n" + f"Actual result: '{qmp_o}'" + ) qemu_binary = utils_misc.get_qemu_binary(params) if not utils_misc.qemu_has_option("qmp", qemu_binary): @@ -153,19 +157,21 @@ def check_result(qmp_o, expect_o=None): module = params.get("modprobe_module") if module: test.log.info("modprobe the module %s", module) - session.cmd("modprobe %s" % module) + session.cmd(f"modprobe {module}") - qmp_ports = vm.get_monitors_by_type('qmp') + qmp_ports = vm.get_monitors_by_type("qmp") if qmp_ports: qmp_port = qmp_ports[0] else: test.error("Incorrect configuration, no QMP monitor found.") - callback = {"host_cmd": lambda cmd: process.system_output(cmd, shell=True).decode(), - "guest_cmd": session.cmd_output, - "qmp_cmd": qmp_port.send_args_cmd} # pylint: disable=E0606 + callback = { + "host_cmd": lambda cmd: process.system_output(cmd, shell=True).decode(), + "guest_cmd": session.cmd_output, + "qmp_cmd": qmp_port.send_args_cmd, + } # pylint: disable=E0606 def send_cmd(cmd): - """ Helper to execute command on host/ssh guest/qmp monitor """ + """Helper to execute command on host/ssh guest/qmp monitor""" if cmd_type in callback.keys(): return callback[cmd_type](cmd) else: @@ -176,7 +182,7 @@ def send_cmd(cmd): post_cmd = params.get("post_cmd") cmd_type = params.get("event_cmd_type") result_check = params.get("cmd_result_check") - strict_match = params.get("strict_match", "yes") == 'yes' + strict_match = params.get("strict_match", "yes") == "yes" expect_o = eval(params.get("cmd_return_value", "[]")) # Pre command @@ -194,14 +200,12 @@ def send_cmd(cmd): qmp_o = [qmp_o] test.log.debug("QMP command: '%s' \n Output: '%s'", qmp_cmd, qmp_o) except qemu_monitor.QMPCmdError as err: - if params.get("negative_test") == 'yes': - test.log.debug("Negative QMP command: '%s'\n output:'%s'", qmp_cmd, - err) + if params.get("negative_test") == "yes": + test.log.debug("Negative QMP command: '%s'\n output:'%s'", qmp_cmd, err) if params.get("negative_check_pattern"): check_pattern = params.get("negative_check_pattern") if check_pattern not in str(err): - test.fail("'%s' not in exception '%s'" - % (check_pattern, err)) + test.fail(f"'{check_pattern}' not in exception '{err}'") else: test.fail(err) except qemu_monitor.MonitorProtocolError as err: @@ -210,7 +214,7 @@ def send_cmd(cmd): test.fail(err) # sleep 10s to make netdev_del take effect - if 'netdev_del' in qmp_cmd: + if "netdev_del" in qmp_cmd: time.sleep(10) # Post command @@ -225,28 +229,32 @@ def send_cmd(cmd): test.log.info("Verify qmp command '%s' works as designed.", qmp_cmd) if qmp_cmd == "query-name": vm_name = params["main_vm"] - expect_o = [{'name': vm_name}] + expect_o = [{"name": vm_name}] elif qmp_cmd == "query-uuid": uuid_input = params["uuid"] - expect_o = [{'UUID': uuid_input}] + expect_o = [{"UUID": uuid_input}] elif qmp_cmd == "query-version": - qemu_version_cmd = "rpm -qa | grep -E 'qemu-kvm(-(rhev|ma))?-[0-9]' | head -n 1" + qemu_version_cmd = ( + "rpm -qa | grep -E 'qemu-kvm(-(rhev|ma))?-[0-9]' | head -n 1" + ) host_arch = platform.machine() - qemu_version = callback["host_cmd"](qemu_version_cmd).replace('.%s' % host_arch, '') + qemu_version = callback["host_cmd"](qemu_version_cmd).replace( + f".{host_arch}", "" + ) expect_o = [str(qemu_version)] elif qmp_cmd == "query-block": - images = params['images'].split() + images = params["images"].split() image_info = {} for image in images: image_params = params.object_params(image) - image_format = image_params['image_format'] - image_drive = "drive_%s" % image + image_format = image_params["image_format"] + image_drive = f"drive_{image}" if vm.check_capability(Flags.BLOCKDEV): - image_info['node-name'] = image_drive + image_info["node-name"] = image_drive else: - image_info['device'] = image_drive - image_info['qdev'] = image - image_info['format'] = image_format + image_info["device"] = image_drive + image_info["qdev"] = image + image_info["format"] = image_format expect_o.append(image_info) elif qmp_cmd == "query-target": host_arch = platform.machine() @@ -255,14 +263,18 @@ def send_cmd(cmd): expect_o = [{"arch": host_arch}] elif qmp_cmd == "query-machines": # Remove avocado machine type - vm_machines = params["machine_type"].split(':', 1)[-1] - expect_o = [{'alias': vm_machines}] + vm_machines = params["machine_type"].split(":", 1)[-1] + expect_o = [{"alias": vm_machines}] elif qmp_cmd == "query-vnc": vnc_port = vm.get_vnc_port() - expect_o = [{'service': str(vnc_port)}, {'enabled': True}, {'host': '0.0.0.0'}] + expect_o = [ + {"service": str(vnc_port)}, + {"enabled": True}, + {"host": "0.0.0.0"}, + ] check_result(qmp_o, expect_o) elif result_check.startswith("post_"): test.log.info("Verify post qmp command '%s' works as designed.", post_cmd) - result_check = result_check.split('_', 1)[1] + result_check = result_check.split("_", 1)[1] check_result(post_o, expect_o) session.close() diff --git a/qemu/tests/qmp_event_notification.py b/qemu/tests/qmp_event_notification.py index 61fc14dfc0..7e7482fcff 100644 --- a/qemu/tests/qmp_event_notification.py +++ b/qemu/tests/qmp_event_notification.py @@ -2,10 +2,8 @@ from functools import partial from avocado.utils import process - from virttest import utils_misc - _system_output = partial(process.system_output, shell=True) @@ -23,19 +21,19 @@ def run(test, params, env): qemu_binary = utils_misc.get_qemu_binary(params) if not utils_misc.qemu_has_option("qmp", qemu_binary): - test.cancel("This test case requires a host QEMU with QMP " - "monitor support") + test.cancel("This test case requires a host QEMU with QMP " "monitor support") vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) qmp_monitor = list(filter(lambda x: x.protocol == "qmp", vm.monitors))[0] - humam_monitor = list( - filter(lambda x: x.protocol == "human", vm.monitors))[0] - callback = {"host_cmd": _system_output, - "guest_cmd": session.cmd, - "monitor_cmd": humam_monitor.send_args_cmd, - "qmp_cmd": qmp_monitor.send_args_cmd} + humam_monitor = list(filter(lambda x: x.protocol == "human", vm.monitors))[0] + callback = { + "host_cmd": _system_output, + "guest_cmd": session.cmd, + "monitor_cmd": humam_monitor.send_args_cmd, + "qmp_cmd": qmp_monitor.send_args_cmd, + } def send_cmd(cmd, cmd_type, options={}): if cmd_type in callback.keys(): @@ -47,21 +45,21 @@ def send_cmd(cmd, cmd_type, options={}): pre_event_cmd = params.get("pre_event_cmd", "") pre_event_cmd_type = params.get("pre_event_cmd_type", cmd_type) pre_event_cmd_options = eval( - "dict({0})".format(params.get("pre_event_cmd_options", ""))) + "dict({})".format(params.get("pre_event_cmd_options", "")) + ) event_cmd = params.get("event_cmd") - event_cmd_options = eval( - "dict({0})".format(params.get("event_cmd_options", ""))) + event_cmd_options = eval("dict({})".format(params.get("event_cmd_options", ""))) post_event_cmd = params.get("post_event_cmd", "") post_event_cmd_type = params.get("post_event_cmd_type", cmd_type) post_event_cmd_options = eval( - "dict({0})".format(params.get("post_event_cmd_options", ""))) + "dict({})".format(params.get("post_event_cmd_options", "")) + ) event_check = params.get("event_check") timeout = int(params.get("check_timeout", 360)) watchdog_action = params.get("watchdog_action") if pre_event_cmd: - send_cmd(pre_event_cmd, pre_event_cmd_type, - pre_event_cmd_options) + send_cmd(pre_event_cmd, pre_event_cmd_type, pre_event_cmd_options) send_cmd(event_cmd, cmd_type, event_cmd_options) @@ -73,15 +71,15 @@ def send_cmd(cmd, cmd_type, options={}): for monitor in qmp_monitors: event = monitor.get_event(event_check) if event_check == "WATCHDOG": - if event and event['data']['action'] == watchdog_action: - test.log.info("Receive watchdog %s event notification", - watchdog_action) + if event and event["data"]["action"] == watchdog_action: + test.log.info( + "Receive watchdog %s event notification", watchdog_action + ) qmp_num -= 1 qmp_monitors.remove(monitor) else: if event: - test.log.info("Receive qmp %s event notification", - event_check) + test.log.info("Receive qmp %s event notification", event_check) qmp_num -= 1 qmp_monitors.remove(monitor) time.sleep(5) @@ -89,10 +87,9 @@ def send_cmd(cmd, cmd_type, options={}): break if qmp_num > 0: - test.fail("Did not receive qmp %s event notification" % event_check) + test.fail(f"Did not receive qmp {event_check} event notification") if post_event_cmd: - send_cmd(post_event_cmd, post_event_cmd_type, - post_event_cmd_options) + send_cmd(post_event_cmd, post_event_cmd_type, post_event_cmd_options) if session: session.close() diff --git a/qemu/tests/qsd_backup_pull.py b/qemu/tests/qsd_backup_pull.py index dae07aa121..ee008a8434 100644 --- a/qemu/tests/qsd_backup_pull.py +++ b/qemu/tests/qsd_backup_pull.py @@ -1,21 +1,16 @@ import socket -from provider import backup_utils -from provider import blockdev_base -from provider import job_utils +from virttest import qemu_storage, utils_disk +from provider import backup_utils, blockdev_base, job_utils from provider.nbd_image_export import InternalNBDExportImage -from provider.virt_storage.storage_admin import sp_admin from provider.qsd import QsdDaemonDev, add_vubp_into_boot - -from virttest import qemu_storage -from virttest import utils_disk +from provider.virt_storage.storage_admin import sp_admin class QSDBackupPullMode(blockdev_base.BlockdevBaseTest): - def __init__(self, test, params, env): - super(QSDBackupPullMode, self).__init__(test, params, env) + super().__init__(test, params, env) self.source_images = [] self.full_backups = [] self.inc_backups = [] @@ -31,52 +26,56 @@ def __init__(self, test, params, env): self.inc_backup_nbd_images = [] self.src_img_tags = params.objects("source_images") localhost = socket.gethostname() - self.params['nbd_server'] = localhost if localhost else 'localhost' + self.params["nbd_server"] = localhost if localhost else "localhost" list(map(self._init_arguments_by_params, self.src_img_tags)) def _init_arguments_by_params(self, tag): image_params = self.params.object_params(tag) bk_tags = image_params.objects("backup_images") - self.source_images.append("drive_%s" % tag) + self.source_images.append(f"drive_{tag}") # fleecing image used for full backup, to be exported by nbd - self.full_backups.append("drive_%s" % bk_tags[0]) - self.full_backup_bitmaps.append("full_bitmap_%s" % tag) + self.full_backups.append(f"drive_{bk_tags[0]}") + self.full_backup_bitmaps.append(f"full_bitmap_{tag}") # fleecing image used for inc backup, to be exported by nbd - self.inc_backups.append("drive_%s" % bk_tags[1]) - self.inc_backup_bitmaps.append("inc_bitmap_%s" % tag) + self.inc_backups.append(f"drive_{bk_tags[1]}") + self.inc_backup_bitmaps.append(f"inc_bitmap_{tag}") # nbd export image used full backup - nbd_image = self.params['nbd_image_%s' % bk_tags[0]] - disk = qemu_storage.QemuImg(self.params.object_params(nbd_image), - None, nbd_image) + nbd_image = self.params[f"nbd_image_{bk_tags[0]}"] + disk = qemu_storage.QemuImg( + self.params.object_params(nbd_image), None, nbd_image + ) self.full_backup_nbd_images.append(disk) # nbd export image used for inc backup - nbd_image = self.params['nbd_image_%s' % bk_tags[1]] - disk = qemu_storage.QemuImg(self.params.object_params(nbd_image), - None, nbd_image) + nbd_image = self.params[f"nbd_image_{bk_tags[1]}"] + disk = qemu_storage.QemuImg( + self.params.object_params(nbd_image), None, nbd_image + ) self.inc_backup_nbd_images.append(disk) # local image used for copying data from nbd export image(full backup) - client_image = self.params['client_image_%s' % bk_tags[0]] + client_image = self.params[f"client_image_{bk_tags[0]}"] disk = self.source_disk_define_by_params( - self.params.object_params(client_image), client_image) + self.params.object_params(client_image), client_image + ) disk.create(self.params) self.trash.append(disk) self.full_backup_client_images.append(disk) # local image used for copying data from nbd export images(inc backup) - client_image = self.params['client_image_%s' % bk_tags[1]] + client_image = self.params[f"client_image_{bk_tags[1]}"] disk = self.source_disk_define_by_params( - self.params.object_params(client_image), client_image) + self.params.object_params(client_image), client_image + ) disk.create(self.params) self.trash.append(disk) self.inc_backup_client_images.append(disk) # disable bitmap created in full backup when doing inc backup - self.disabled_bitmaps.append("full_bitmap_%s" % tag) + self.disabled_bitmaps.append(f"full_bitmap_{tag}") def get_qsd_demon(self): qsd_name = self.params["qsd_namespaces"] @@ -92,25 +91,30 @@ def _init_nbd_exports(tag): bk_tags = self.params.object_params(tag).objects("backup_images") self.full_backup_nbd_objs.append( - InternalNBDExportImage(self.qsd, self.params, bk_tags[0])) + InternalNBDExportImage(self.qsd, self.params, bk_tags[0]) + ) - self.params['nbd_export_bitmaps_%s' % - bk_tags[1]] = "full_bitmap_%s" % tag + self.params[f"nbd_export_bitmaps_{bk_tags[1]}"] = f"full_bitmap_{tag}" self.inc_backup_nbd_objs.append( - InternalNBDExportImage(self.qsd, self.params, bk_tags[1])) + InternalNBDExportImage(self.qsd, self.params, bk_tags[1]) + ) list(map(_init_nbd_exports, self.src_img_tags)) def full_copyif(self): for i, nbd_obj in enumerate(self.full_backup_nbd_images): - backup_utils.copyif(self.params, nbd_obj.tag, - self.full_backup_client_images[i].tag) + backup_utils.copyif( + self.params, nbd_obj.tag, self.full_backup_client_images[i].tag + ) def inc_copyif(self): for i, nbd_obj in enumerate(self.inc_backup_nbd_images): - backup_utils.copyif(self.params, nbd_obj.tag, - self.inc_backup_client_images[i].tag, - self.full_backup_bitmaps[i]) + backup_utils.copyif( + self.params, + nbd_obj.tag, + self.inc_backup_client_images[i].tag, + self.full_backup_bitmaps[i], + ) def export_full_backups(self): for i, obj in enumerate(self.full_backup_nbd_objs): @@ -132,8 +136,8 @@ def stop_export_inc_backups(self): def cancel_backup_jobs(self): for job_id in self.backup_jobs: - arguments = {'id': job_id} - self.qsd.monitor.cmd('job-cancel', arguments) + arguments = {"id": job_id} + self.qsd.monitor.cmd("job-cancel", arguments) def do_full_backup(self): extra_options = {"sync": "none", "wait_job_complete": False} @@ -142,40 +146,48 @@ def do_full_backup(self): self.source_images, self.full_backups, self.full_backup_bitmaps, - **extra_options) - self.backup_jobs = [job['id'] - for job in job_utils.query_jobs(self.qsd)] + **extra_options, + ) + self.backup_jobs = [job["id"] for job in job_utils.query_jobs(self.qsd)] def generate_inc_files(self): return list(map(self.generate_data_file, self.src_img_tags)) - def add_target_data_disks(self, bktype='full'): + def add_target_data_disks(self, bktype="full"): """Hot add target disk to VM with qmp monitor""" for tag in self.params.objects("source_images"): image_params = self.params.object_params(tag) - img = image_params['full_backup_image'] if bktype == 'full' else image_params['inc_backup_image'] + img = ( + image_params["full_backup_image"] + if bktype == "full" + else image_params["inc_backup_image"] + ) disk = self.target_disk_define_by_params(self.params, img) disk.hotplug(self.qsd) self.trash.append(disk) def do_incremental_backup(self): - extra_options = {"sync": "none", - "disabled_bitmaps": self.disabled_bitmaps, - "wait_job_complete": False} + extra_options = { + "sync": "none", + "disabled_bitmaps": self.disabled_bitmaps, + "wait_job_complete": False, + } backup_utils.blockdev_batch_backup( self.qsd, self.source_images, self.inc_backups, self.inc_backup_bitmaps, - **extra_options) - self.backup_jobs = [job['id'] - for job in job_utils.query_jobs(self.qsd)] + **extra_options, + ) + self.backup_jobs = [job["id"] for job in job_utils.query_jobs(self.qsd)] def restart_vm_with_backup_images(self): """restart vm with back2 as its data disk""" self.main_vm.destroy() self.qsd.stop_daemon() - self.params['qsd_images_qsd1'] = ' '.join([i.tag for i in self.inc_backup_client_images]) + self.params["qsd_images_qsd1"] = " ".join( + [i.tag for i in self.inc_backup_client_images] + ) for addr, inc in enumerate(self.inc_backup_client_images, start=15): add_vubp_into_boot(inc.tag, self.params, addr) self.start_qsd() @@ -185,12 +197,12 @@ def restart_vm_with_backup_images(self): def clean_images(self): for img in self.trash: try: - if hasattr(img, 'remove'): + if hasattr(img, "remove"): img.remove() else: sp_admin.remove_volume(img) except Exception as e: - self.test.log.warn(str(e)) + self.test.log.warning(str(e)) def rebase_backup_image(self): """rebase image back2 onto back1""" @@ -211,33 +223,33 @@ def verify_data_files(self): disks_info[data_img] = self.disks_info[data_img] # Check md5sum for the first two files - super(QSDBackupPullMode, self).verify_data_files() + super().verify_data_files() # Check the files should not exist on back2 session = self.clone_vm.wait_for_login() try: for tag, info in disks_info.items(): utils_disk.mount(info[0], info[1], session=session) - file_path = "%s/%s" % (info[1], non_existed_files[tag]) - cat_cmd = "cat %s" % file_path + file_path = f"{info[1]}/{non_existed_files[tag]}" + cat_cmd = f"cat {file_path}" - self.test.log.info('Check %s should not exist', file_path) + self.test.log.info("Check %s should not exist", file_path) s, o = session.cmd_status_output(cat_cmd) if s == 0: - self.test.fail('File (%s) exists' % non_existed_files[tag]) - elif 'No such file' not in o.strip(): - self.test.fail('Unknown error: %s' % o) + self.test.fail(f"File ({non_existed_files[tag]}) exists") + elif "No such file" not in o.strip(): + self.test.fail(f"Unknown error: {o}") finally: if session: session.close() def prepare_test(self): self.start_qsd() - add_vubp_into_boot(self.params['source_images'], self.params) - super(QSDBackupPullMode, self).prepare_test() + add_vubp_into_boot(self.params["source_images"], self.params) + super().prepare_test() def post_test(self): - super(QSDBackupPullMode, self).post_test() + super().post_test() self.qsd.stop_daemon() def do_test(self): @@ -248,7 +260,7 @@ def do_test(self): self.full_copyif() self.cancel_backup_jobs() self.stop_export_full_backups() - self.add_target_data_disks('inc') + self.add_target_data_disks("inc") self.do_incremental_backup() self.export_inc_backups() self.generate_inc_files() diff --git a/qemu/tests/qsd_backup_push.py b/qemu/tests/qsd_backup_push.py index 7b6512131e..f0fcec8d3a 100644 --- a/qemu/tests/qsd_backup_push.py +++ b/qemu/tests/qsd_backup_push.py @@ -1,38 +1,33 @@ from functools import partial from avocado.utils import memory +from virttest import error_context, utils_misc -from virttest import utils_misc -from virttest import error_context - -from provider import backup_utils -from provider import blockdev_base +from provider import backup_utils, blockdev_base from provider.qsd import QsdDaemonDev class QSDBackupTest(blockdev_base.BlockdevBaseTest): - def __init__(self, test, params, env): - super(QSDBackupTest, self).__init__(test, params, env) + super().__init__(test, params, env) self.source_images = [] self.full_backups = [] self.inc_backups = [] self.bitmaps = [] self.rebase_targets = [] - for tag in params.objects('source_images'): + for tag in params.objects("source_images"): image_params = params.object_params(tag) image_chain = image_params.objects("image_backup_chain") - self.source_images.append("fmt_%s" % tag) - self.full_backups.append("drive_%s" % image_chain[0]) - self.inc_backups.append("drive_%s" % image_chain[1]) - self.bitmaps.append("bitmap_%s" % tag) + self.source_images.append(f"fmt_{tag}") + self.full_backups.append(f"drive_{image_chain[0]}") + self.inc_backups.append(f"drive_{image_chain[1]}") + self.bitmaps.append(f"bitmap_{tag}") inc_img_tag = image_chain[-1] inc_img_params = params.object_params(inc_img_tag) # rebase 'inc' image onto 'base' image, so inc's backing is base - inc_img_params['image_chain'] = image_params['image_backup_chain'] - inc_img = self.source_disk_define_by_params( - inc_img_params, inc_img_tag) + inc_img_params["image_chain"] = image_params["image_backup_chain"] + inc_img = self.source_disk_define_by_params(inc_img_params, inc_img_tag) target_func = partial(inc_img.rebase, params=inc_img_params) self.rebase_targets.append(target_func) @@ -63,20 +58,22 @@ def do_full_backup(self): self.source_images, self.full_backups, self.bitmaps, - **extra_options) + **extra_options, + ) def generate_inc_files(self): for tag in self.params.objects("source_images"): self.generate_data_file(tag) def do_incremental_backup(self): - extra_options = {'sync': 'incremental', 'auto_disable_bitmap': False} + extra_options = {"sync": "incremental", "auto_disable_bitmap": False} backup_utils.blockdev_batch_backup( self.qsd, self.source_images, self.inc_backups, self.bitmaps, - **extra_options) + **extra_options, + ) def rebase_target_disk(self): self.qsd.stop_daemon() @@ -89,7 +86,7 @@ def prepare_clone_vm(self): clone_params = self.main_vm.params.copy() for tag in self.params.objects("source_images"): img_params = self.params.object_params(tag) - image_chain = img_params.objects('image_backup_chain') + image_chain = img_params.objects("image_backup_chain") images = images.replace(tag, image_chain[-1]) qsd_images.append(image_chain[-1]) self.params["qsd_images_qsd1"] = " ".join(qsd_images) @@ -118,10 +115,10 @@ def verify_target_disk(self): def prepare_test(self): self.start_qsd() - super(QSDBackupTest, self).prepare_test() + super().prepare_test() def post_test(self): - super(QSDBackupTest, self).post_test() + super().post_test() self.qsd.stop_daemon() diff --git a/qemu/tests/qsd_block_commit.py b/qemu/tests/qsd_block_commit.py index f98fa6445e..0c600db42b 100644 --- a/qemu/tests/qsd_block_commit.py +++ b/qemu/tests/qsd_block_commit.py @@ -1,10 +1,9 @@ from virttest import data_dir +from provider import backup_utils, job_utils from provider.blockdev_commit_base import BlockDevCommitTest -from provider.virt_storage.storage_admin import sp_admin from provider.qsd import QsdDaemonDev, add_vubp_into_boot -from provider import backup_utils -from provider import job_utils +from provider.virt_storage.storage_admin import sp_admin class QSDCommitTest(BlockDevCommitTest): @@ -19,13 +18,12 @@ def start_qsd(self): def get_node_name(self, tag): if tag in self.params["device_tag"]: - return "fmt_%s" % tag + return f"fmt_{tag}" else: - return "drive_%s" % tag + return f"drive_{tag}" def prepare_snapshot_file(self, snapshot_tags): - self.snapshot_images = list( - map(self.get_image_by_tag, snapshot_tags)) + self.snapshot_images = list(map(self.get_image_by_tag, snapshot_tags)) params = self.params.copy() params.setdefault("target_path", data_dir.get_data_dir()) for tag in snapshot_tags: @@ -42,8 +40,7 @@ def create_snapshots(self, snapshot_tags, device): if idx == 0: arguments["node"] = self.device_node else: - arguments["node"] = self.get_node_name( - snapshot_tags[idx - 1]) + arguments["node"] = self.get_node_name(snapshot_tags[idx - 1]) self.qsd.monitor.cmd(cmd, dict(arguments)) for info in self.disks_info: if device in info: @@ -75,11 +72,12 @@ def commit_snapshots(self): def pre_test(self): self.start_qsd() self.main_vm.params["extra_params"] = add_vubp_into_boot( - self.params["device_tag"], self.params) - super(QSDCommitTest, self).pre_test() + self.params["device_tag"], self.params + ) + super().pre_test() def post_test(self): - super(QSDCommitTest, self).post_test() + super().post_test() self.qsd.stop_daemon() diff --git a/qemu/tests/qsd_block_mirror.py b/qemu/tests/qsd_block_mirror.py index 0f29b1c716..776296f7fa 100644 --- a/qemu/tests/qsd_block_mirror.py +++ b/qemu/tests/qsd_block_mirror.py @@ -6,10 +6,9 @@ class QSDMirrorTest(BlockdevMirrorWaitTest): - def __init__(self, test, params, env): - super(QSDMirrorTest, self).__init__(test, params, env) - self._source_nodes = ["fmt_%s" % src for src in self._source_images] + super().__init__(test, params, env) + self._source_nodes = [f"fmt_{src}" for src in self._source_images] def get_qsd_demon(self): qsd_name = self.params["qsd_namespaces"] @@ -24,7 +23,8 @@ def add_target_data_disks(self): """Hot plug target disks to VM with qmp monitor""" for tag in self._target_images: disk = self.target_disk_define_by_params( - self.params.object_params(tag), tag) + self.params.object_params(tag), tag + ) disk.hotplug(self.qsd) self.trash.append(disk) @@ -32,17 +32,19 @@ def blockdev_mirror(self): """Run block-mirror and wait job done""" try: for idx, source_node in enumerate(self._source_nodes): - backup_utils.blockdev_mirror(self.qsd, source_node, - self._target_nodes[idx], - **self._backup_options[idx]) + backup_utils.blockdev_mirror( + self.qsd, + source_node, + self._target_nodes[idx], + **self._backup_options[idx], + ) finally: memory.drop_caches() def _check_mirrored_block_node_attached(self, source_qdev, target_node): out = self.qsd.monitor.cmd("query-named-block-nodes")[0] if out.get("node-name") != target_node: - self.test.fail("Device is not attached to target node(%s)" - % target_node) + self.test.fail(f"Device is not attached to target node({target_node})") def clone_vm_with_mirrored_images(self): """Boot VM with mirrored data disks""" @@ -53,16 +55,16 @@ def clone_vm_with_mirrored_images(self): params = self.main_vm.params.copy() self.clone_vm = self.main_vm.clone(params=params) - self.params.update({"qsd_images_qsd1": ' '.join(self._target_images)}) + self.params.update({"qsd_images_qsd1": " ".join(self._target_images)}) self.start_qsd() self.clone_vm.create() self.clone_vm.verify_alive() - self.env.register_vm("%s_clone" % self.clone_vm.name, self.clone_vm) + self.env.register_vm(f"{self.clone_vm.name}_clone", self.clone_vm) def prepare_test(self): self.start_qsd() - super(QSDMirrorTest, self).prepare_test() + super().prepare_test() def do_test(self): self.blockdev_mirror() @@ -71,7 +73,7 @@ def do_test(self): self.verify_data_files() def post_test(self): - super(QSDMirrorTest, self).post_test() + super().post_test() self.qsd.stop_daemon() diff --git a/qemu/tests/qsd_block_stream.py b/qemu/tests/qsd_block_stream.py index 38c6c96b7c..552b801c78 100644 --- a/qemu/tests/qsd_block_stream.py +++ b/qemu/tests/qsd_block_stream.py @@ -1,13 +1,12 @@ -import time import json +import time -from virttest import error_context -from virttest import data_dir +from virttest import data_dir, error_context +from provider import backup_utils from provider.blockdev_stream_base import BlockDevStreamTest from provider.qsd import QsdDaemonDev, add_vubp_into_boot from provider.virt_storage.storage_admin import sp_admin -from provider import backup_utils class QSDStreamTest(BlockDevStreamTest): @@ -32,7 +31,7 @@ def check_backing_file(self): out = self.snapshot_image.info(output="json") info = json.loads(out) backing_file = info.get("backing-filename") - assert not backing_file, "Unexpect backing file(%s) found!" % backing_file + assert not backing_file, f"Unexpect backing file({backing_file}) found!" def do_test(self): self.snapshot_test() @@ -41,7 +40,8 @@ def do_test(self): self.params.update({"qsd_images_qsd1": self.snapshot_tag}) self.start_qsd() self.clone_vm.params["extra_params"] = add_vubp_into_boot( - self.snapshot_tag, self.params) + self.snapshot_tag, self.params + ) self.clone_vm.create() self.mount_data_disks() self.verify_data_file() @@ -49,23 +49,23 @@ def do_test(self): def pre_test(self): self.start_qsd() self.main_vm.params["extra_params"] = add_vubp_into_boot( - self.base_tag, self.params) - super(QSDStreamTest, self).pre_test() + self.base_tag, self.params + ) + super().pre_test() def create_snapshot(self): options = ["node", "overlay"] cmd = "blockdev-snapshot" arguments = self.params.copy_from_keys(options) - arguments.setdefault("overlay", "drive_%s" % self.snapshot_tag) + arguments.setdefault("overlay", f"drive_{self.snapshot_tag}") return self.qsd.monitor.cmd(cmd, dict(arguments)) def blockdev_stream(self): - backup_utils.blockdev_stream(self.qsd, self._top_device, - **self._stream_options) + backup_utils.blockdev_stream(self.qsd, self._top_device, **self._stream_options) time.sleep(0.5) def post_test(self): - super(QSDStreamTest, self).post_test() + super().post_test() self.qsd.stop_daemon() diff --git a/qemu/tests/qsd_blockdev_check.py b/qemu/tests/qsd_blockdev_check.py index d2201f30e0..5bc9f7e3e6 100644 --- a/qemu/tests/qsd_blockdev_check.py +++ b/qemu/tests/qsd_blockdev_check.py @@ -1,9 +1,11 @@ """QSD blockdev option test""" + import json -from provider.qsd import QsdDaemonDev from virttest import error_context +from provider.qsd import QsdDaemonDev + # This decorator makes the test function aware of context strings @error_context.context_aware @@ -16,14 +18,14 @@ def run(test, params, env): """ def _verify_blockdev(img, data): - prot_attrs = json.loads(params.get("qsd_image_protocol_%s" % img, "{}")) - fmt_attrs = json.loads(params.get("qsd_image_format_%s" % img, "{}")) + prot_attrs = json.loads(params.get(f"qsd_image_protocol_{img}", "{}")) + fmt_attrs = json.loads(params.get(f"qsd_image_format_{img}", "{}")) prot_node = {} fmt_node = {} for node in data: - if node["node-name"] == "prot_%s" % img: + if node["node-name"] == f"prot_{img}": prot_node = node - if node["node-name"] == "fmt_%s" % img: + if node["node-name"] == f"fmt_{img}": fmt_node = node if not fmt_node or not prot_node: test.fail("Can not find blockdev node") @@ -37,7 +39,7 @@ def _verify_blockdev(img, data): logger.info("Checking img %s %s ", img, k) if k == "cache": v["writeback"] = True - test.assertEqual(v, node[k], "Find unequal key %s" % k) + test.assertEqual(v, node[k], f"Find unequal key {k}") logger = test.log qsd = None diff --git a/qemu/tests/qsd_export_vhub_check.py b/qemu/tests/qsd_export_vhub_check.py index 042a021710..923f29e555 100644 --- a/qemu/tests/qsd_export_vhub_check.py +++ b/qemu/tests/qsd_export_vhub_check.py @@ -1,8 +1,9 @@ """QSD export vhost-user-blk option test""" -from provider.qsd import QsdDaemonDev from virttest import error_context +from provider.qsd import QsdDaemonDev + @error_context.context_aware def run(test, params, env): diff --git a/qemu/tests/qsd_hotplug_vubp.py b/qemu/tests/qsd_hotplug_vubp.py index e53b8980f7..705a05a8b3 100644 --- a/qemu/tests/qsd_hotplug_vubp.py +++ b/qemu/tests/qsd_hotplug_vubp.py @@ -1,11 +1,17 @@ """QSD hotplug vhost-user-blk-pci device test""" + import time -from provider.qsd import QsdDaemonDev, add_vubp_into_boot -from provider.qsd import plug_vubp_devices, unplug_vubp_devices from virttest import env_process, utils_disk, utils_misc from virttest.utils_disk import clean_partition_windows +from provider.qsd import ( + QsdDaemonDev, + add_vubp_into_boot, + plug_vubp_devices, + unplug_vubp_devices, +) + def run(test, params, env): """ @@ -22,7 +28,7 @@ def run(test, params, env): def _get_disk_by_size(img_tag, check_exist_flag=None): disk_params = params.object_params(img_tag) disk_size = disk_params["image_size"] - os_type = params['os_type'] + os_type = params["os_type"] disk = None if os_type != "windows": disks = utils_disk.get_linux_disks(session, True) @@ -36,7 +42,7 @@ def _get_disk_by_size(img_tag, check_exist_flag=None): if check_exist_flag is not None: if bool(disk) != check_exist_flag: - test.fail("Disk should exist %s" % check_exist_flag) + test.fail(f"Disk should exist {check_exist_flag}") logger.debug("Find disk is:%s", disk) return disk @@ -49,16 +55,18 @@ def _configure_disk(img_tag): os_type = params["os_type"] if os_type != "windows": - driver = utils_disk.configure_empty_linux_disk( - session, disk_id, disk_size)[0] + driver = utils_disk.configure_empty_linux_disk(session, disk_id, disk_size)[ + 0 + ] logger.debug("mount_point is %s", driver) - output_path = r"%s/test.dat" % driver + output_path = rf"{driver}/test.dat" else: guest_cmd = utils_misc.set_winutils_letter(session, guest_cmd) utils_disk.update_windows_disk_attributes(session, disk_id) driver = utils_disk.configure_empty_windows_disk( - session, disk_id, disk_size)[0] - output_path = r"%s:\\test.dat" % driver + session, disk_id, disk_size + )[0] + output_path = rf"{driver}:\\test.dat" guest_cmd = guest_cmd % output_path session.cmd(guest_cmd) @@ -69,10 +77,10 @@ def _configure_disk(img_tag): qsd_name = params["qsd_namespaces"] qsd = QsdDaemonDev(qsd_name, params) qsd.start_daemon() - img = params["qsd_images_%s" % qsd_name] + img = params[f"qsd_images_{qsd_name}"] add_vubp_into_boot(img, params, 6) - params["start_vm"] = 'yes' + params["start_vm"] = "yes" login_timeout = params.get_numeric("login_timeout", 360) env_process.preprocess_vm(test, params, env, params.get("main_vm")) diff --git a/qemu/tests/qsd_install.py b/qemu/tests/qsd_install.py index 96be88030a..6bdd03aa94 100644 --- a/qemu/tests/qsd_install.py +++ b/qemu/tests/qsd_install.py @@ -1,8 +1,9 @@ """QSD installation test""" -from provider.qsd import QsdDaemonDev, add_vubp_into_boot from virttest.tests import unattended_install +from provider.qsd import QsdDaemonDev, add_vubp_into_boot + def run(test, params, env): """ diff --git a/qemu/tests/qsd_live_snapshot.py b/qemu/tests/qsd_live_snapshot.py index 5f6f911687..b0903bd507 100644 --- a/qemu/tests/qsd_live_snapshot.py +++ b/qemu/tests/qsd_live_snapshot.py @@ -1,11 +1,11 @@ """Live snpashot test with qsd exposed image""" -from provider.qsd import QsdDaemonDev, add_vubp_into_boot + +from virttest import data_dir, error_context + from provider.blockdev_snapshot_base import BlockDevSnapshotTest +from provider.qsd import QsdDaemonDev, add_vubp_into_boot from provider.virt_storage.storage_admin import sp_admin -from virttest import error_context -from virttest import data_dir - class QSDSnapshotTest(BlockDevSnapshotTest): def get_qsd_demon(self): @@ -35,7 +35,8 @@ def verify_snapshot(self): self.params.update({"qsd_images_qsd1": self.snapshot_tag}) self.start_qsd() self.clone_vm.params["extra_params"] = add_vubp_into_boot( - self.snapshot_tag, self.params) + self.snapshot_tag, self.params + ) self.clone_vm.create() self.clone_vm.verify_alive() if self.base_tag != "image1": @@ -45,19 +46,20 @@ def verify_snapshot(self): def pre_test(self): self.start_qsd() self.main_vm.params["extra_params"] = add_vubp_into_boot( - self.base_tag, self.params) + self.base_tag, self.params + ) self.main_vm.create() - super(QSDSnapshotTest, self).pre_test() + super().pre_test() def post_test(self): - super(QSDSnapshotTest, self).post_test() + super().post_test() self.qsd.stop_daemon() def create_snapshot(self): options = ["node", "overlay"] cmd = "blockdev-snapshot" arguments = self.params.copy_from_keys(options) - arguments.setdefault("overlay", "drive_%s" % self.snapshot_tag) + arguments.setdefault("overlay", f"drive_{self.snapshot_tag}") return self.qsd.monitor.cmd(cmd, dict(arguments)) diff --git a/qemu/tests/qsd_object_check.py b/qemu/tests/qsd_object_check.py index 22183a103b..46387168d9 100644 --- a/qemu/tests/qsd_object_check.py +++ b/qemu/tests/qsd_object_check.py @@ -1,8 +1,9 @@ """QSD throttle object test""" -from provider.qsd import QsdDaemonDev from virttest import error_context +from provider.qsd import QsdDaemonDev + # This decorator makes the test function aware of context strings @error_context.context_aware @@ -24,9 +25,11 @@ def run(test, params, env): for tg in params["check_groups"].split(): logger.info("Check throttle %s", tg) out = qsd.monitor.qom_get(tg, "limits") - test.assertEqual(out[params["key_%s" % tg]], - int(params["value_%s" % tg]), - "Unexpected throttle values :%s" % tg) + test.assertEqual( + out[params[f"key_{tg}"]], + int(params[f"value_{tg}"]), + f"Unexpected throttle values :{tg}", + ) qsd.stop_daemon() qsd = None finally: diff --git a/qemu/tests/qsd_pidfile_check.py b/qemu/tests/qsd_pidfile_check.py index ee540f756c..846beeac8d 100644 --- a/qemu/tests/qsd_pidfile_check.py +++ b/qemu/tests/qsd_pidfile_check.py @@ -1,10 +1,12 @@ """QSD pidfile option test""" + import os from avocado.utils import process -from provider.qsd import QsdDaemonDev from virttest import error_context +from provider.qsd import QsdDaemonDev + # This decorator makes the test function aware of context strings @error_context.context_aware @@ -27,7 +29,7 @@ def run(test, params, env): logger.info("Check pidfile %s", pidfile) test.assertTrue(os.path.exists(pidfile), "QSD pidfile is nonexistent") - pid_check_cmd = params['pid_check_cmd'] % (pidfile, qsd.sock_path) + pid_check_cmd = params["pid_check_cmd"] % (pidfile, qsd.sock_path) process.system(pid_check_cmd, shell=True) qsd.monitor = None diff --git a/qemu/tests/qsd_qmp_cmd_check.py b/qemu/tests/qsd_qmp_cmd_check.py index 31474a1436..f688cf398c 100644 --- a/qemu/tests/qsd_qmp_cmd_check.py +++ b/qemu/tests/qsd_qmp_cmd_check.py @@ -1,9 +1,11 @@ """QSD QMP commands test""" + import json -from provider.qsd import QsdDaemonDev from virttest.qemu_monitor import QMPCmdError +from provider.qsd import QsdDaemonDev + def run(test, params, env): """ @@ -34,8 +36,7 @@ def run(test, params, env): export_opts = img_attrs["export"] logger.debug("Check the export list") out = monitor.cmd("query-block-exports") - test.assertTrue(out[0]["id"] == export_opts["id"], - "Can not find export") + test.assertTrue(out[0]["id"] == export_opts["id"], "Can not find export") logger.debug("Delete the export,blockdev and object") monitor.block_export_del(export_opts["id"]) @@ -50,17 +51,19 @@ def run(test, params, env): monitor.cmd("object-add", obj_throttle) monitor.blockdev_add(prot_opts) monitor.blockdev_add(fmt_opts) - filter_opts = {"driver": "throttle", "node-name": "filter_node", - "throttle-group": obj_throttle["id"], - "file": fmt_opts["node-name"]} + filter_opts = { + "driver": "throttle", + "node-name": "filter_node", + "throttle-group": obj_throttle["id"], + "file": fmt_opts["node-name"], + } monitor.blockdev_add(filter_opts) out = monitor.query_named_block_nodes() test.assertTrue(len(out) == 3, "Can not find blockdev") export_opts["node-name"] = filter_opts["node-name"] monitor.cmd("block-export-add", export_opts) out = monitor.query_block_exports() - test.assertTrue(out[0]["id"] == export_opts["id"], - "Can not find export") + test.assertTrue(out[0]["id"] == export_opts["id"], "Can not find export") logger.debug("Re-Delete the export,blockdev and object") monitor.block_export_del(export_opts["id"]) diff --git a/qemu/tests/qsd_vubp_options.py b/qemu/tests/qsd_vubp_options.py index 55c58d2cbe..69f39d8b20 100644 --- a/qemu/tests/qsd_vubp_options.py +++ b/qemu/tests/qsd_vubp_options.py @@ -1,8 +1,9 @@ """QSD vhost-user-blk-pci device options test""" -from provider.qsd import QsdDaemonDev, add_vubp_into_boot from virttest import env_process, utils_disk, utils_misc +from provider.qsd import QsdDaemonDev, add_vubp_into_boot + def run(test, params, env): """ @@ -16,7 +17,7 @@ def run(test, params, env): def _get_disk_by_size(img_tag, check_exist_flag=None): disk_params = params.object_params(img_tag) disk_size = disk_params["image_size"] - os_type = params['os_type'] + os_type = params["os_type"] disk = None if os_type != "windows": disks = utils_disk.get_linux_disks(session, True) @@ -30,7 +31,7 @@ def _get_disk_by_size(img_tag, check_exist_flag=None): if check_exist_flag is not None: if bool(disk) != check_exist_flag: - test.fail("Disk should exist %s" % check_exist_flag) + test.fail(f"Disk should exist {check_exist_flag}") logger.debug("Find disk is:%s", disk) return disk @@ -43,16 +44,18 @@ def _configure_disk(img_tag): os_type = params["os_type"] if os_type != "windows": - driver = utils_disk.configure_empty_linux_disk( - session, disk_id, disk_size)[0] + driver = utils_disk.configure_empty_linux_disk(session, disk_id, disk_size)[ + 0 + ] logger.debug("mount_point is %s", driver) - output_path = r"%s/test.dat" % driver + output_path = rf"{driver}/test.dat" else: guest_cmd = utils_misc.set_winutils_letter(session, guest_cmd) utils_disk.update_windows_disk_attributes(session, disk_id) driver = utils_disk.configure_empty_windows_disk( - session, disk_id, disk_size)[0] - output_path = r"%s:\\test.dat" % driver + session, disk_id, disk_size + )[0] + output_path = rf"{driver}:\\test.dat" guest_cmd = guest_cmd % output_path session.cmd(guest_cmd) @@ -63,10 +66,10 @@ def _configure_disk(img_tag): qsd_name = params["qsd_namespaces"] qsd = QsdDaemonDev(qsd_name, params) qsd.start_daemon() - img = params["qsd_images_%s" % qsd_name] + img = params[f"qsd_images_{qsd_name}"] add_vubp_into_boot(img, params, 6) - params["start_vm"] = 'yes' + params["start_vm"] = "yes" login_timeout = params.get_numeric("login_timeout", 360) env_process.preprocess_vm(test, params, env, params.get("main_vm")) diff --git a/qemu/tests/queues_number_test.py b/qemu/tests/queues_number_test.py index 980cc0e8f2..a934514ded 100644 --- a/qemu/tests/queues_number_test.py +++ b/qemu/tests/queues_number_test.py @@ -1,9 +1,6 @@ import re -from virttest import error_context -from virttest import utils_net -from virttest import utils_test -from virttest import utils_misc +from virttest import error_context, utils_misc, utils_net, utils_test from provider import netperf_test @@ -22,6 +19,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def change_queues_number(ifname, q_number, queues_status=None): """ change queues number @@ -35,8 +33,10 @@ def change_queues_number(ifname, q_number, queues_status=None): err_msg = "" expect_q_number = q_number if q_number != queues_status[1] and q_number <= queues_status[0]: - if (cur_queues_status[1] != q_number - or cur_queues_status[0] != queues_status[0]): + if ( + cur_queues_status[1] != q_number + or cur_queues_status[0] != queues_status[0] + ): err_msg = "Param is valid, but change queues failed, " elif cur_queues_status != queues_status: if q_number != queues_status[1]: @@ -45,12 +45,12 @@ def change_queues_number(ifname, q_number, queues_status=None): expect_q_number = queues_status[1] if len(err_msg) > 0: - err_msg += "current queues set is %s, " % cur_queues_status[1] - err_msg += "max allow queues set is %s, " % cur_queues_status[0] - err_msg += "when run cmd: '%s', " % mq_set_cmd - err_msg += "expect queues are %s," % expect_q_number - err_msg += "expect max allow queues are %s, " % queues_status[0] - err_msg += "output: '%s'" % output + err_msg += f"current queues set is {cur_queues_status[1]}, " + err_msg += f"max allow queues set is {cur_queues_status[0]}, " + err_msg += f"when run cmd: '{mq_set_cmd}', " + err_msg += f"expect queues are {expect_q_number}," + err_msg += f"expect max allow queues are {queues_status[0]}, " + err_msg += f"output: '{output}'" test.fail(err_msg) return cur_queues_status @@ -59,15 +59,15 @@ def get_queues_status(ifname): """ Get queues status """ - mq_get_cmd = "ethtool -l %s" % ifname + mq_get_cmd = f"ethtool -l {ifname}" nic_mq_info = session.cmd_output(mq_get_cmd) queues_reg = re.compile(r"Combined:\s+(\d)", re.I) queues_info = queues_reg.findall(" ".join(nic_mq_info.splitlines())) if len(queues_info) != 2: err_msg = "Oops, get guest queues info failed, " err_msg += "make sure your guest support MQ.\n" - err_msg += "Check cmd is: '%s', " % mq_get_cmd - err_msg += "Command output is: '%s'." % nic_mq_info + err_msg += f"Check cmd is: '{mq_get_cmd}', " + err_msg += f"Command output is: '{nic_mq_info}'." test.cancel(err_msg) return [int(x) for x in queues_info] @@ -78,9 +78,8 @@ def ping_test(dest_ip, ping_time, ping_lost_ratio): _, output = utils_net.ping(dest=dest_ip, timeout=ping_time) packets_lost = utils_test.get_loss_ratio(output) if packets_lost > ping_lost_ratio: - err = " %s%% packages lost during ping. " % packets_lost - err += "Ping command log:\n %s" % "\n".join( - output.splitlines()[-3:]) + err = f" {packets_lost}% packages lost during ping. " + err += "Ping command log:\n {}".format("\n".join(output.splitlines()[-3:])) test.fail(err) login_timeout = params.get_numeric("login_timeout", 360) @@ -102,10 +101,12 @@ def ping_test(dest_ip, ping_time, ping_lost_ratio): ifname = utils_net.get_linux_ifname(session_serial, nic.mac) queues = int(nic.queues) change_queues_number(ifname, queues) - error_context.context("Run test %s background" % netperf_stress, - test.log.info) + error_context.context( + f"Run test {netperf_stress} background", test.log.info + ) stress_thread = utils_misc.InterruptedThread( - netperf_test.netperf_stress, (test, params, vm)) + netperf_test.netperf_stress, (test, params, vm) + ) stress_thread.start() # ping test @@ -114,8 +115,7 @@ def ping_test(dest_ip, ping_time, ping_lost_ratio): bg_ping = utils_misc.InterruptedThread(ping_test, args) bg_ping.start() - error_context.context("Change queues number repeatedly", - test.log.info) + error_context.context("Change queues number repeatedly", test.log.info) repeat_counts = params.get_numeric("repeat_counts") for nic in vm.virtnet: queues = int(nic.queues) @@ -125,13 +125,14 @@ def ping_test(dest_ip, ping_time, ping_lost_ratio): ifname = utils_net.get_linux_ifname(session_serial, nic.mac) change_list = params.get("change_list").split(",") for repeat_num in range(repeat_counts): - error_context.context("Change queues number -- %sth" - % repeat_num, test.log.info) + error_context.context( + f"Change queues number -- {repeat_num}th", test.log.info + ) queues_status = get_queues_status(ifname) for q_number in change_list: - queues_status = change_queues_number(ifname, - int(q_number), - queues_status) + queues_status = change_queues_number( + ifname, int(q_number), queues_status + ) test.log.info("wait for background test finish") try: @@ -146,7 +147,7 @@ def ping_test(dest_ip, ping_time, ping_lost_ratio): bg_ping.join() except Exception as err: txt = "Fail to wait background ping test finish. " - txt += "Got error message %s" % err + txt += f"Got error message {err}" test.fail(txt) if required_reboot: diff --git a/qemu/tests/raw_image_create_and_check.py b/qemu/tests/raw_image_create_and_check.py index 3cc399298f..c92996b640 100755 --- a/qemu/tests/raw_image_create_and_check.py +++ b/qemu/tests/raw_image_create_and_check.py @@ -1,7 +1,6 @@ import json from avocado.utils import process - from virttest import data_dir from virttest.qemu_io import QemuIOSystem from virttest.qemu_storage import QemuImg @@ -18,24 +17,27 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _qemu_io(img, cmd): """Run qemu-io cmd to a given img.""" test.log.info("Run qemu-io %s", img.image_filename) try: QemuIOSystem(test, params, img.image_filename).cmd_output(cmd, 120) except process.CmdError as err: - test.fail("qemu-io to '%s' failed: %s." % (img.image_filename, err)) + test.fail(f"qemu-io to '{img.image_filename}' failed: {err}.") def _check_img_size(img_info, defined_sizes, size_keys): """Check the size info of the image""" for defined_size, size_key in zip(defined_sizes, size_keys): - test.log.info("Check the '%s' size info of %s", - size_key, source.image_filename) + test.log.info( + "Check the '%s' size info of %s", size_key, source.image_filename + ) defined_size = normalize_data_size(defined_size, "B") get_size = img_info[size_key] if int(defined_size) != int(get_size): - test.fail("Got unexpected size '%s', expected size is '%s'" - % (get_size, defined_size)) + test.fail( + f"Got unexpected size '{get_size}', expected size is '{defined_size}'" + ) src_image = params["images"] img_dir = data_dir.get_data_dir() @@ -43,8 +45,11 @@ def _check_img_size(img_info, defined_sizes, size_keys): source = QemuImg(params.object_params(src_image), img_dir, src_image) source.create(source.params) - _qemu_io(source, 'write -P 1 0 %s' % write_size) + _qemu_io(source, f"write -P 1 0 {write_size}") src_info = json.loads(source.info(output="json")) - _check_img_size(src_info, [write_size, params["image_size_test"]], - ["actual-size", "virtual-size"]) + _check_img_size( + src_info, + [write_size, params["image_size_test"]], + ["actual-size", "virtual-size"], + ) diff --git a/qemu/tests/rdtsc_sync_test.py b/qemu/tests/rdtsc_sync_test.py index a8062fb8c0..7d2488fcf6 100644 --- a/qemu/tests/rdtsc_sync_test.py +++ b/qemu/tests/rdtsc_sync_test.py @@ -3,10 +3,7 @@ import time from avocado.utils import process - -from virttest import utils_misc -from virttest import data_dir -from virttest import error_context +from virttest import data_dir, error_context, utils_misc @error_context.context_aware @@ -29,28 +26,33 @@ def run(test, params, env): test_cmd = params["test_cmd"] % src_test_binary qemu_bin = utils_misc.get_qemu_binary(params) - qemu_cmd = '%s %s' % (qemu_bin, test_cmd) - test.log.info("Send host command: %s" % qemu_cmd) + qemu_cmd = f"{qemu_bin} {test_cmd}" + test.log.info("Send host command: %s", qemu_cmd) process.run(cmd=qemu_cmd, verbose=True, ignore_status=True, shell=True) - qemu_pid = process.getoutput("pgrep -f %s" % src_test_binary, shell=True) + qemu_pid = process.getoutput(f"pgrep -f {src_test_binary}", shell=True) if not qemu_pid: test.fail("QEMU start failed!") time.sleep(5) - process.run('echo -e \'{"execute":"qmp_capabilities"}' - '{"execute":"system_reset"}\'|nc -U /tmp/mm', - shell=True, verbose=True) + process.run( + 'echo -e \'{"execute":"qmp_capabilities"}' + '{"execute":"system_reset"}\'|nc -U /tmp/mm', + shell=True, + verbose=True, + ) time.sleep(5) - process.run('echo -e \'{"execute":"qmp_capabilities"}' - '{"execute":"quit"}\'|nc -U /tmp/mm', - shell=True, verbose=True) + process.run( + 'echo -e \'{"execute":"qmp_capabilities"}' '{"execute":"quit"}\'|nc -U /tmp/mm', + shell=True, + verbose=True, + ) - qemu_pid = process.getoutput("pgrep -f %s" % src_test_binary, shell=True) + qemu_pid = process.getoutput(f"pgrep -f {src_test_binary}", shell=True) if qemu_pid: test.fail("QEMU quit failed!") is_file = os.path.exists(log_file) if not is_file: - test.fail("Can't find the log file %s" % log_file) + test.fail(f"Can't find the log file {log_file}") value_a = [] value_b = [] @@ -59,12 +61,12 @@ def run(test, params, env): f = open(log_file) lines = f.readlines() for line in lines[2:]: - if not reset and line.startswith('rdtsc'): + if not reset and line.startswith("rdtsc"): rdtsc = int(re.findall(r"\d+", line)[0]) value_a.append(rdtsc) elif line.startswith("PM"): reset = True - elif reset and line.startswith('rdtsc'): + elif reset and line.startswith("rdtsc"): rdtsc = int(re.findall(r"\d+", line)[0]) value_b.append(rdtsc) @@ -76,7 +78,7 @@ def run(test, params, env): test.fail("rdtsc isn't increasing order after system_reset.") if value_a[-1] <= value_b[0]: test.fail("rdtsc doesn't decrease at first after system_reset.") - test.log.info('Test passed as rdtsc behaviour is same!') + test.log.info("Test passed as rdtsc behaviour is same!") finally: f.close() - process.run("rm -rf %s" % log_file) + process.run(f"rm -rf {log_file}") diff --git a/qemu/tests/readonly_disk.py b/qemu/tests/readonly_disk.py index 7c83654731..674bb21bd5 100644 --- a/qemu/tests/readonly_disk.py +++ b/qemu/tests/readonly_disk.py @@ -1,7 +1,4 @@ -from virttest import error_context -from virttest import env_process -from virttest import utils_misc -from virttest import utils_disk +from virttest import env_process, error_context, utils_disk, utils_misc @error_context.context_aware @@ -18,39 +15,44 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ - error_context.context( - "TEST STEPS 1: Try to log into guest.", test.log.info) + error_context.context("TEST STEPS 1: Try to log into guest.", test.log.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) error_context.context( - "TEST STEPS 2: Format the disk and copy file to it", test.log.info) + "TEST STEPS 2: Format the disk and copy file to it", test.log.info + ) os_type = params["os_type"] copy_cmd = params.get("copy_cmd", "copy %s %s") fstype = params.get("fstype", "ntfs") data_image_size = params.get("image_size_data", "1G") - data_image_num = int(params.get("data_image_num", - len(params.objects("images")) - 1)) - error_context.context("Get windows disk index that to " - "be formatted", test.log.info) + data_image_num = int( + params.get("data_image_num", len(params.objects("images")) - 1) + ) + error_context.context( + "Get windows disk index that to " "be formatted", test.log.info + ) disk_index_list = utils_disk.get_windows_disks_index(session, data_image_size) if len(disk_index_list) < data_image_num: - test.fail("Fail to list all data disks. " - "Set disk number: %d, " - "get disk number in guest: %d." - % (data_image_num, len(disk_index_list))) + test.fail( + "Fail to list all data disks. " + "Set disk number: %d, " + "get disk number in guest: %d." % (data_image_num, len(disk_index_list)) + ) src_file = utils_misc.set_winutils_letter( - session, params["src_file"], label="WIN_UTILS") - error_context.context("Clear readonly for all disks and online " - "them in guest.", test.log.info) + session, params["src_file"], label="WIN_UTILS" + ) + error_context.context( + "Clear readonly for all disks and online " "them in guest.", test.log.info + ) if not utils_disk.update_windows_disk_attributes(session, disk_index_list): test.fail("Failed to update windows disk attributes.") - error_context.context("Format disk %s in guest." % disk_index_list[0], - test.log.info) + error_context.context(f"Format disk {disk_index_list[0]} in guest.", test.log.info) drive_letter = utils_disk.configure_empty_disk( - session, disk_index_list[0], data_image_size, os_type, fstype=fstype) + session, disk_index_list[0], data_image_size, os_type, fstype=fstype + ) if not drive_letter: test.fail("Fail to format disks.") dst_file = params["dst_file"] % drive_letter[0] @@ -63,8 +65,8 @@ def run(test, params, env): vm.destroy() data_img = params.get("images").split()[-1] - params["image_readonly_%s" % data_img] = "yes" - params["force_create_image_%s" % data_img] = "no" + params[f"image_readonly_{data_img}"] = "yes" + params[f"force_create_image_{data_img}"] = "no" env_process.preprocess(test, params, env) vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -72,16 +74,19 @@ def run(test, params, env): error_context.context( "TEST STEPS 4: Write to the readonly disk expect:" - "The media is write protected", test.log.info) + "The media is write protected", + test.log.info, + ) dst_file_readonly = params["dst_file_readonly"] % drive_letter[0] o = session.cmd_output(copy_cmd % (src_file, dst_file_readonly)) if not o.find("write protect"): - test.fail("Write in readonly disk should failed\n. {}".format(o)) + test.fail(f"Write in readonly disk should failed\n. {o}") error_context.context( - "TEST STEPS 5: Try to read from the readonly disk", test.log.info) + "TEST STEPS 5: Try to read from the readonly disk", test.log.info + ) s, o = session.cmd_status_output(copy_cmd % (dst_file, r"C:\\")) if s != 0: - test.fail("Read file failed\n. {}".format(o)) + test.fail(f"Read file failed\n. {o}") session.close() diff --git a/qemu/tests/readonly_floppy.py b/qemu/tests/readonly_floppy.py index 22f37b58af..7e8c10650b 100644 --- a/qemu/tests/readonly_floppy.py +++ b/qemu/tests/readonly_floppy.py @@ -1,9 +1,7 @@ -import time import re +import time -from virttest import data_dir -from virttest import env_process -from virttest import error_context +from virttest import data_dir, env_process, error_context @error_context.context_aware @@ -42,8 +40,10 @@ def run(test, params, env): # if it is a windows OS,wait for 20 seconds until the floppies # are ready for testing if sleep: - test.log.info("Windows system being tested,sleep for 20" - " seconds until floppies are ready to be use") + test.log.info( + "Windows system being tested,sleep for 20" + " seconds until floppies are ready to be use" + ) time.sleep(20) try: # if it is a linux OS,load the floppy module @@ -57,28 +57,39 @@ def run(test, params, env): # Format floppy disk to test if it is readonly floppy_count = len(params.get("floppies", "").split()) - format_cmd_list = [params.get("format_floppy0_cmd"), - params.get("format_floppy1_cmd")] + format_cmd_list = [ + params.get("format_floppy0_cmd"), + params.get("format_floppy1_cmd"), + ] for floppy_index in range(floppy_count): - error_context.context("Format the %s floppy disk" % floppy_index, - test.log.info) + error_context.context( + f"Format the {floppy_index} floppy disk", test.log.info + ) s, o = session.cmd_status_output( format_cmd_list[floppy_index], - timeout=float(params.get("format_floppy_timeout", 60))) + timeout=float(params.get("format_floppy_timeout", 60)), + ) if s == 0: - test.error("Floppy disk %s is not readonly and" - " it's formatted successfully" % floppy_index) - error_context.context("Check the %s floppy is readonly" - % floppy_index, test.log.info) - found = re.search('(Read-only)|(protected)', o) + test.error( + f"Floppy disk {floppy_index} is not readonly and" + " it's formatted successfully" + ) + error_context.context( + f"Check the {floppy_index} floppy is readonly", test.log.info + ) + found = re.search("(Read-only)|(protected)", o) test.log.debug("Output of format command: %s", o) if not found: - test.error("Floppy disk %s cannot be formatted" - " for reasons other than readonly" % floppy_index) + test.error( + f"Floppy disk {floppy_index} cannot be formatted" + " for reasons other than readonly" + ) else: - test.log.info("Floppy disk %s is Read-only and cannot be" - " formatted", floppy_index) + test.log.info( + "Floppy disk %s is Read-only and cannot be" " formatted", + floppy_index, + ) finally: if session: diff --git a/qemu/tests/rebase_negative_test.py b/qemu/tests/rebase_negative_test.py index 2aa336737e..0408f28fd9 100644 --- a/qemu/tests/rebase_negative_test.py +++ b/qemu/tests/rebase_negative_test.py @@ -1,10 +1,7 @@ import re -from virttest import qemu_storage -from virttest import error_context -from virttest import data_dir - from avocado.utils import process +from virttest import data_dir, error_context, qemu_storage @error_context.context_aware @@ -21,8 +18,7 @@ def run(test, params, env): :param env: Dictionary with test environment """ rebase_chain = params.get("rebase_list", "").split(";") - error_context.context("Change the backing file of snapshot", - test.log.info) + error_context.context("Change the backing file of snapshot", test.log.info) for images in rebase_chain: output = "" images = re.split(r"\s*>\s*", images) @@ -33,42 +29,39 @@ def run(test, params, env): msg = "Invalid format of'rebase_chain' params \n" msg += "format like: 'image > base;image> base2'" test.error(msg) - negtive_test = params.get("negtive_test_%s" % image, "no") + negtive_test = params.get(f"negtive_test_{image}", "no") params["image_chain"] = " ".join([base, image]) params["base_image_filename"] = image t_params = params.object_params(image) cache_mode = t_params.get("cache_mode", None) - rebase_test = qemu_storage.QemuImg(t_params, - data_dir.get_data_dir(), image) + rebase_test = qemu_storage.QemuImg(t_params, data_dir.get_data_dir(), image) try: rebase_test.rebase(t_params, cache_mode) if negtive_test == "yes": - msg = "Fail to trigger negative image('%s') rebase" % image + msg = f"Fail to trigger negative image('{image}') rebase" test.fail(msg) except process.CmdError as err: output = err.result.stderr.decode() - test.log.info("Rebase image('%s') failed: %s.", - image, output) + test.log.info("Rebase image('%s') failed: %s.", image, output) if negtive_test == "no": - msg = "Fail to rebase image('%s'): %s" % (image, output) + msg = f"Fail to rebase image('{image}'): {output}" test.fail(msg) if "(core dumped)" in output: msg = "qemu-img core dumped when change" - msg += " image('%s') backing file to %s" % (image, base) + msg += f" image('{image}') backing file to {base}" test.fail(msg) image_info = rebase_test.info() if not image_info: - msg = "Fail to get image('%s') info" % image + msg = f"Fail to get image('{image}') info" test.fail(msg) - backingfile = re.search(r'backing file: +(.*)', - image_info, re.M) + backingfile = re.search(r"backing file: +(.*)", image_info, re.M) base_name = rebase_test.base_image_filename if not output: if not backingfile: - msg = "Expected backing file: %s" % base_name + msg = f"Expected backing file: {base_name}" msg += " Actual backing file is null!" test.fail(msg) elif base_name not in backingfile.group(0): - msg = "Expected backing file: %s" % base_name - msg += " Actual backing file: %s" % backingfile + msg = f"Expected backing file: {base_name}" + msg += f" Actual backing file: {backingfile}" test.fail(msg) diff --git a/qemu/tests/rebase_onto_no_backing_file.py b/qemu/tests/rebase_onto_no_backing_file.py index a4339d579b..7ee20176c8 100644 --- a/qemu/tests/rebase_onto_no_backing_file.py +++ b/qemu/tests/rebase_onto_no_backing_file.py @@ -2,9 +2,9 @@ from avocado import fail_on from avocado.utils import process +from virttest import data_dir, qemu_storage + from provider import qemu_img_utils as img_utils -from virttest import data_dir -from virttest import qemu_storage def run(test, params, env): @@ -25,33 +25,33 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ + def _verify_image_backing_file(info_output, base): """Verify backing image filename and format.""" backing_filename = info_output["backing-filename"] backing_format = info_output.get("backing-filename-format") - backing_filename_desired = qemu_storage.get_image_repr(base.tag, - params, - root_dir) + backing_filename_desired = qemu_storage.get_image_repr( + base.tag, params, root_dir + ) if backing_filename != backing_filename_desired: - test.fail("backing image name mismatch, got %s, expect %s" % ( - backing_filename, backing_filename_desired - )) + test.fail( + f"backing image name mismatch, got {backing_filename}, expect {backing_filename_desired}" + ) if backing_format: backing_format_desired = base.image_format if backing_format != backing_format_desired: test.fail( - "backing image format mismatch, got %s, expect %s" % ( - backing_format, backing_format_desired - )) + f"backing image format mismatch, got {backing_format}, expect {backing_format_desired}" + ) def _verify_qcow2_compatible(info_output, image): """Verify qcow2 compat version.""" compat = info_output["format-specific"]["data"]["compat"] compat_desired = image.params.get("qcow2_compatible", "1.1") if compat != compat_desired: - test.fail("%s image compat version mismatch, got %s, expect %s" % ( - image.tag, compat, compat_desired - )) + test.fail( + f"{image.tag} image compat version mismatch, got {compat}, expect {compat_desired}" + ) def _verify_no_backing_file(info_output): """Verify snapshot has no backing file for this case.""" @@ -61,11 +61,12 @@ def _verify_no_backing_file(info_output): test.fail("the snapshot has backing file after rebase.") images = params["image_chain"].split() - params["image_name_%s" % images[0]] = params["image_name"] - params["image_format_%s" % images[0]] = params["image_format"] + params[f"image_name_{images[0]}"] = params["image_name"] + params[f"image_format_{images[0]}"] = params["image_format"] root_dir = data_dir.get_data_dir() - base, sn = (qemu_storage.QemuImg(params.object_params(tag), root_dir, tag) - for tag in images) + base, sn = ( + qemu_storage.QemuImg(params.object_params(tag), root_dir, tag) for tag in images + ) md5sum_bin = params.get("md5sum_bin", "md5sum") sync_bin = params.get("sync_bin", "sync") @@ -110,10 +111,9 @@ def _verify_no_backing_file(info_output): test.log.info("check the md5 value of tmp file %s after rebase", guest_file) session = vm.wait_for_login() - img_utils.check_md5sum(guest_file, md5sum_bin, session, - md5_value_to_check=hashval) + img_utils.check_md5sum(guest_file, md5sum_bin, session, md5_value_to_check=hashval) session.close() vm.destroy() # if nothing goes wrong, remove snapshot - params["remove_image_%s" % sn.tag] = "yes" + params[f"remove_image_{sn.tag}"] = "yes" diff --git a/qemu/tests/rebase_onto_qcow2.py b/qemu/tests/rebase_onto_qcow2.py index 75b8c6dc33..c497738099 100644 --- a/qemu/tests/rebase_onto_qcow2.py +++ b/qemu/tests/rebase_onto_qcow2.py @@ -2,9 +2,9 @@ from avocado import fail_on from avocado.utils import process +from virttest import data_dir, qemu_storage + from provider import qemu_img_utils as img_utils -from virttest import data_dir -from virttest import qemu_storage def run(test, params, env): @@ -21,25 +21,29 @@ def verify_backing_file(image): """Verify image backing file.""" info_output = json.loads(image.info(output="json")) backing_params = image.params.object_params(image.base_tag) - backing_file = qemu_storage.get_image_repr(image.base_tag, - backing_params, root_dir) + backing_file = qemu_storage.get_image_repr( + image.base_tag, backing_params, root_dir + ) backing_file_info = info_output["backing-filename"] if backing_file != backing_file_info: - err_msg = "backing file mismatch, got %s, expected %s." % \ - (backing_file_info, backing_file) + err_msg = f"backing file mismatch, got {backing_file_info}, expected {backing_file}." raise ValueError(err_msg) timeout = int(params.get("timeout", 360)) root_dir = data_dir.get_data_dir() images = params["image_chain"].split() - params["image_name_%s" % images[0]] = params["image_name"] - params["image_format_%s" % images[0]] = params["image_format"] - images = [qemu_storage.QemuImg(params.object_params(tag), - root_dir, tag) for tag in images] + params[f"image_name_{images[0]}"] = params["image_name"] + params[f"image_format_{images[0]}"] = params["image_format"] + images = [ + qemu_storage.QemuImg(params.object_params(tag), root_dir, tag) for tag in images + ] for image in images[1:]: - test.log.debug("create snapshot %s based on %s", - image.image_filename, image.base_image_filename) + test.log.debug( + "create snapshot %s based on %s", + image.image_filename, + image.base_image_filename, + ) image.create(image.params) md5sum_bin = params.get("md5sum_bin", "md5sum") @@ -48,21 +52,20 @@ def verify_backing_file(image): for image in images: vm = img_utils.boot_vm_with_images(test, params, env, (image.tag,)) guest_file = params["guest_tmp_filename"] % image.tag - test.log.debug("save tmp file %s in image %s", - guest_file, image.image_filename) + test.log.debug("save tmp file %s in image %s", guest_file, image.image_filename) img_utils.save_random_file_to_vm(vm, guest_file, 2048 * 100, sync_bin) session = vm.wait_for_login(timeout=timeout) - hashes[guest_file] = img_utils.check_md5sum(guest_file, - md5sum_bin, session) + hashes[guest_file] = img_utils.check_md5sum(guest_file, md5sum_bin, session) session.close() vm.destroy() snapshot = images[-1] rebase_target = params["rebase_target"] # ensure size equals to the base - params["image_size_%s" % rebase_target] = images[0].size + params[f"image_size_{rebase_target}"] = images[0].size rebase_target = qemu_storage.QemuImg( - params.object_params(rebase_target), root_dir, rebase_target) + params.object_params(rebase_target), root_dir, rebase_target + ) rebase_target.create(rebase_target.params) test.log.debug("rebase snapshot") snapshot.base_tag = rebase_target.tag @@ -73,11 +76,12 @@ def verify_backing_file(image): vm = img_utils.boot_vm_with_images(test, params, env, (snapshot.tag,)) session = vm.wait_for_login(timeout=timeout) for guest_file, hashval in hashes.items(): - img_utils.check_md5sum(guest_file, md5sum_bin, session, - md5_value_to_check=hashval) + img_utils.check_md5sum( + guest_file, md5sum_bin, session, md5_value_to_check=hashval + ) session.close() vm.destroy() # if nothing goes wrong, remove newly created images - params["remove_image_%s" % snapshot.tag] = "yes" - params["images"] += " %s" % rebase_target.tag + params[f"remove_image_{snapshot.tag}"] = "yes" + params["images"] += f" {rebase_target.tag}" diff --git a/qemu/tests/rebase_second_snapshot_to_base.py b/qemu/tests/rebase_second_snapshot_to_base.py index 3ce9d59d54..d6af15a5d6 100644 --- a/qemu/tests/rebase_second_snapshot_to_base.py +++ b/qemu/tests/rebase_second_snapshot_to_base.py @@ -1,9 +1,7 @@ import json from avocado import fail_on - -from virttest import data_dir -from virttest import qemu_storage +from virttest import data_dir, qemu_storage from provider import qemu_img_utils as img_utils @@ -26,13 +24,17 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ + def get_img_objs(images, params): - return [qemu_storage.QemuImg(params.object_params(tag), root_dir, tag) - for tag in images] + return [ + qemu_storage.QemuImg(params.object_params(tag), root_dir, tag) + for tag in images + ] @fail_on((AssertionError,)) def verify_qemu_img_info_backing_chain(output): """Verify qemu-img info output for this case.""" + def _get_compat_version(): """Get compat version from params.""" return params.get("qcow2_compatible", "1.1") @@ -43,23 +45,23 @@ def _get_compat_version(): if not image.base_tag: continue base_params = params.object_params(image.base_tag) - base_image = qemu_storage.get_image_repr(image.base_tag, - base_params, root_dir) + base_image = qemu_storage.get_image_repr( + image.base_tag, base_params, root_dir + ) base_format = image.base_format compat = _get_compat_version() base_image_info = img_info.get("backing-filename") assert base_image == base_image_info, "backing image mismatches" if base_image_info and not base_image_info.startswith("json"): base_format_info = img_info.get("backing-filename-format") - assert base_format == base_format_info, \ - "backing format mismatches" + assert base_format == base_format_info, "backing format mismatches" compat_info = img_info["format-specific"]["data"]["compat"] assert compat == compat_info, "compat mode mismatches" timeout = int(params.get("timeout", 240)) images = params["image_chain"].split() - params["image_name_%s" % images[0]] = params["image_name"] - params["image_format_%s" % images[0]] = params["image_format"] + params[f"image_name_{images[0]}"] = params["image_name"] + params[f"image_format_{images[0]}"] = params["image_format"] root_dir = data_dir.get_data_dir() images = get_img_objs(images, params) base, active_layer = images[0], images[-1] @@ -67,8 +69,11 @@ def _get_compat_version(): md5sum_bin = params.get("md5sum_bin", "md5sum") sync_bin = params.get("sync_bin", "sync") for image in images[1:]: - test.log.debug("Create snapshot %s based on %s", - image.image_filename, image.base_image_filename) + test.log.debug( + "Create snapshot %s based on %s", + image.image_filename, + image.base_image_filename, + ) image.create(image.params) info_output = json.loads(image.info(output="json")) verify_qemu_img_info_backing_chain(info_output) @@ -80,14 +85,13 @@ def _get_compat_version(): if rebase_mode == "safe" or image in (base, active_layer): vm = img_utils.boot_vm_with_images(test, params, env, (image.tag,)) guest_file = params["guest_tmp_filename"] % image.tag - test.log.debug("Create tmp file %s in image %s", guest_file, - image.image_filename) - img_utils.save_random_file_to_vm(vm, guest_file, - 2048 * 100, sync_bin) + test.log.debug( + "Create tmp file %s in image %s", guest_file, image.image_filename + ) + img_utils.save_random_file_to_vm(vm, guest_file, 2048 * 100, sync_bin) session = vm.wait_for_login(timeout=timeout) test.log.debug("Get md5 value fo the temporary file") - hashes[guest_file] = img_utils.check_md5sum(guest_file, - md5sum_bin, session) + hashes[guest_file] = img_utils.check_md5sum(guest_file, md5sum_bin, session) session.close() vm.destroy() @@ -95,13 +99,14 @@ def _get_compat_version(): if rebase_mode == "unsafe": for image in images: if image not in (base, active_layer): - test.log.debug("Remove the snapshot %s before rebase.", - image.image_filename) + test.log.debug( + "Remove the snapshot %s before rebase.", image.image_filename + ) image.remove() cache_mode = params.get("cache_mode") msg = "Rebase the snapshot %s to %s" - msg += "with cache %s." % cache_mode if cache_mode else "." + msg += f"with cache {cache_mode}." if cache_mode else "." test.log.info(msg) active_layer.base_tag = base.tag active_layer.rebase(active_layer.params, cache_mode) @@ -111,8 +116,9 @@ def _get_compat_version(): vm = img_utils.boot_vm_with_images(test, params, env, (active_layer.tag,)) session = vm.wait_for_login(timeout=timeout) for guest_file, hash_val in hashes.items(): - img_utils.check_md5sum(guest_file, md5sum_bin, session, - md5_value_to_check=hash_val) + img_utils.check_md5sum( + guest_file, md5sum_bin, session, md5_value_to_check=hash_val + ) session.close() vm.destroy() for image in images: diff --git a/qemu/tests/reboot_time.py b/qemu/tests/reboot_time.py index b8836914d8..343786c2b2 100644 --- a/qemu/tests/reboot_time.py +++ b/qemu/tests/reboot_time.py @@ -1,6 +1,4 @@ -from virttest import utils_misc -from virttest import env_process -from virttest import error_context +from virttest import env_process, error_context, utils_misc from virttest.staging import utils_memory @@ -26,12 +24,12 @@ def run(test, params, env): session = vm.wait_for_login(timeout=timeout) error_context.context("Set guest run level to 1", test.log.info) - single_user_cmd = params['single_user_cmd'] + single_user_cmd = params["single_user_cmd"] session.cmd(single_user_cmd) try: error_context.context("Restart guest", test.log.info) - session.cmd('sync') + session.cmd("sync") vm.destroy() error_context.context("Boot up guest", test.log.info) @@ -39,36 +37,37 @@ def run(test, params, env): vm.verify_alive() session = vm.wait_for_serial_login(timeout=timeout) - error_context.context("Send a 'reboot' command to the guest", - test.log.info) + error_context.context("Send a 'reboot' command to the guest", test.log.info) utils_memory.drop_caches() - session.cmd('reboot & exit', timeout=1, ignore_all_errors=True) + session.cmd("reboot & exit", timeout=1, ignore_all_errors=True) before_reboot_stamp = utils_misc.monotonic_time() - error_context.context("Boot up the guest and measure the boot time", - test.log.info) + error_context.context( + "Boot up the guest and measure the boot time", test.log.info + ) session = vm.wait_for_serial_login(timeout=timeout) reboot_time = utils_misc.monotonic_time() - before_reboot_stamp - test.write_test_keyval({'result': "%ss" % reboot_time}) + test.write_test_keyval({"result": f"{reboot_time}s"}) expect_time = int(params.get("expect_reboot_time", "30")) test.log.info("Reboot time: %ss", reboot_time) finally: try: error_context.context("Restore guest run level", test.log.info) - restore_level_cmd = params['restore_level_cmd'] + restore_level_cmd = params["restore_level_cmd"] session.cmd(restore_level_cmd) - session.cmd('sync') + session.cmd("sync") vm.destroy(gracefully=False) env_process.preprocess_vm(test, params, env, vm.name) vm.verify_alive() vm.wait_for_login(timeout=timeout) except Exception: - test.log.warning("Can not restore guest run level, " - "need restore the image") + test.log.warning( + "Can not restore guest run level, " "need restore the image" + ) params["restore_image_after_testing"] = "yes" if reboot_time > expect_time: - test.fail("Guest reboot is taking too long: %ss" % reboot_time) + test.fail(f"Guest reboot is taking too long: {reboot_time}s") session.close() diff --git a/qemu/tests/reject_qemu_img_info.py b/qemu/tests/reject_qemu_img_info.py index 07f271f8bb..100e6d41cf 100644 --- a/qemu/tests/reject_qemu_img_info.py +++ b/qemu/tests/reject_qemu_img_info.py @@ -1,10 +1,8 @@ from avocado.utils import process -from virttest import data_dir -from virttest import error_context +from virttest import data_dir, error_context from virttest.qemu_storage import QemuImg -from qemu.tests.qemu_disk_img import QemuImgTest -from qemu.tests.qemu_disk_img import generate_base_snapshot_pair +from qemu.tests.qemu_disk_img import QemuImgTest, generate_base_snapshot_pair @error_context.context_aware @@ -29,27 +27,28 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _boot_vm(boot_img): - error_context.context("Boot vm with %s." % boot_img, test.log.info) + error_context.context(f"Boot vm with {boot_img}.", test.log.info) vm.params["images"] = boot_img vm.create() vm.verify_alive() def _qemu_img_info(info_img, force_share=False): - error_context.context("Check qemu-img info with %s." % info_img, - test.log.info) + error_context.context(f"Check qemu-img info with {info_img}.", test.log.info) img_param = params.object_params(info_img) img = QemuImg(img_param, data_dir.get_data_dir(), info_img) img.info(force_share) def _verify_write_lock_err_msg(e, img_tag): - error_context.context("Verify qemu-img write lock err msg.", - test.log.info) + error_context.context("Verify qemu-img write lock err msg.", test.log.info) img_param = params.object_params(img_tag) img = QemuImg(img_param, data_dir.get_data_dir(), img_tag) - msgs = ['"write" lock', - 'Is another process using the image', - img.image_filename] + msgs = [ + '"write" lock', + "Is another process using the image", + img.image_filename, + ] if not all(msg in e.result.stderr.decode() for msg in msgs): test.fail("Image lock information is not as expected.") @@ -60,11 +59,11 @@ def _qemu_img_info_to_verify_image_lock(boot_img, info_img, img_tag): except process.CmdError as e: _verify_write_lock_err_msg(e, img_tag) else: - test.fail("The image %s is not locked." % img_tag) + test.fail(f"The image {img_tag} is not locked.") try: _qemu_img_info(info_img, True) except process.CmdError: - test.fail("qemu-img info %s failed." % info_img) + test.fail(f"qemu-img info {info_img} failed.") vm = env.get_vm(params["main_vm"]) if params.get("create_snapshot", "no") == "yes": diff --git a/qemu/tests/remote_block_resize.py b/qemu/tests/remote_block_resize.py index 93879757b9..7a99fd531e 100644 --- a/qemu/tests/remote_block_resize.py +++ b/qemu/tests/remote_block_resize.py @@ -1,15 +1,9 @@ import json from avocado.utils import wait - -from virttest import error_context -from virttest import utils_test -from virttest import utils_disk -from virttest import qemu_storage -from virttest import data_dir -from virttest.utils_windows import drive - +from virttest import data_dir, error_context, qemu_storage, utils_disk, utils_test from virttest.qemu_capabilities import Flags +from virttest.utils_windows import drive @error_context.context_aware @@ -32,11 +26,15 @@ def verify_disk_size(session, os_type, disk): """ current_size = utils_disk.get_disk_size(session, os_type, disk) accept_ratio = float(params.get("accept_ratio", 0)) - if (current_size <= block_size and - current_size >= block_size * (1 - accept_ratio)): - test.log.info("Block Resizing Finished !!! \n" - "Current size %s is same as the expected %s", - current_size, block_size) + if current_size <= block_size and current_size >= block_size * ( + 1 - accept_ratio + ): + test.log.info( + "Block Resizing Finished !!! \n" + "Current size %s is same as the expected %s", + current_size, + block_size, + ) return True else: test.log.error("Current: %s\nExpect: %s\n", current_size, block_size) @@ -50,8 +48,7 @@ def verify_disk_size(session, os_type, disk): img_size = params.get("image_size_stg", "10G") data_image = params.get("images").split()[-1] data_image_params = params.object_params(data_image) - img = qemu_storage.QemuImg(data_image_params, data_dir.get_data_dir(), - data_image) + img = qemu_storage.QemuImg(data_image_params, data_dir.get_data_dir(), data_image) filters = {} data_image_dev = "" if vm.check_capability(Flags.BLOCKDEV): @@ -67,16 +64,15 @@ def verify_disk_size(session, os_type, disk): if not data_image_dev: test.error("Cannot find device to resize.") - block_virtual_size = json.loads(img.info(force_share=True, - output="json"))["virtual-size"] + block_virtual_size = json.loads(img.info(force_share=True, output="json"))[ + "virtual-size" + ] session = vm.wait_for_login(timeout=timeout) if os_type == "windows" and driver_name: - session = utils_test.qemu.windrv_check_running_verifier(session, - vm, - test, - driver_name, - timeout) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name, timeout + ) if os_type == "linux": disk = sorted(utils_disk.get_linux_disks(session).keys())[0] else: @@ -87,8 +83,9 @@ def verify_disk_size(session, os_type, disk): # The new size must be a multiple of 512 for windows if os_type == "windows" and block_size % 512 != 0: block_size = int(block_size / 512) * 512 - error_context.context("Change disk size to %s in monitor" - % block_size, test.log.info) + error_context.context( + f"Change disk size to {block_size} in monitor", test.log.info + ) if vm.check_capability(Flags.BLOCKDEV): args = (None, block_size, data_image_dev) @@ -104,9 +101,9 @@ def verify_disk_size(session, os_type, disk): if params.get("need_rescan") == "yes": drive.rescan_disks(session) - if not wait.wait_for(lambda: verify_disk_size(session, os_type, - disk), 20, 0, 1, - "Block Resizing"): + if not wait.wait_for( + lambda: verify_disk_size(session, os_type, disk), 20, 0, 1, "Block Resizing" + ): test.fail("The current block size is not the same as expected.\n") session.close() diff --git a/qemu/tests/remote_convert.py b/qemu/tests/remote_convert.py index 014aa1b75c..c64df36e84 100644 --- a/qemu/tests/remote_convert.py +++ b/qemu/tests/remote_convert.py @@ -1,9 +1,7 @@ -from virttest import data_dir -from virttest import qemu_storage -from virttest import storage - from avocado import fail_on from avocado.utils import process +from virttest import data_dir, qemu_storage, storage + from provider import qemu_img_utils as img_utils @@ -22,8 +20,9 @@ def _check_file(boot_image, md5_value): session = vm.wait_for_login() guest_temp_file = params["guest_temp_file"] md5sum_bin = params.get("md5sum_bin", "md5sum") - img_utils.check_md5sum(guest_temp_file, md5sum_bin, session, - md5_value_to_check=md5_value) + img_utils.check_md5sum( + guest_temp_file, md5sum_bin, session, md5_value_to_check=md5_value + ) session.close() vm.destroy() @@ -34,8 +33,7 @@ def _check_file(boot_image, md5_value): sync_bin = params.get("sync_bin", "sync") test.log.info("Create temporary file on guest: %s", guest_temp_file) - img_utils.save_random_file_to_vm(vm, guest_temp_file, 2048 * 512, - sync_bin) + img_utils.save_random_file_to_vm(vm, guest_temp_file, 2048 * 512, sync_bin) md5_value = img_utils.check_md5sum(guest_temp_file, md5sum_bin, session) test.log.info("Get md5 value of the temporary file: %s", md5_value) @@ -49,8 +47,7 @@ def _check_file(boot_image, md5_value): img_pairs = [(params["convert_source"], params["convert_target"])] if params.get("convert_target_remote"): # local -> remote - img_pairs.append((params["convert_target"], - params["convert_target_remote"])) + img_pairs.append((params["convert_target"], params["convert_target_remote"])) # Convert images for source, target in img_pairs: @@ -75,9 +72,12 @@ def _check_file(boot_image, md5_value): source_cache_mode = params.get("source_cache_mode") test.log.info("Convert %s to %s", source, target) fail_on((process.CmdError,))(source_image.convert)( - params, root_dir, cache_mode=cache_mode, + params, + root_dir, + cache_mode=cache_mode, source_cache_mode=source_cache_mode, - skip_target_creation=skip_target_creation) + skip_target_creation=skip_target_creation, + ) _check_file(target, md5_value) diff --git a/qemu/tests/remote_image_compress.py b/qemu/tests/remote_image_compress.py index 6990187513..0554a8f027 100644 --- a/qemu/tests/remote_image_compress.py +++ b/qemu/tests/remote_image_compress.py @@ -1,11 +1,9 @@ import os from avocado.utils import process - -from virttest import data_dir -from virttest import error_context -from virttest.qemu_storage import QemuImg, get_image_repr +from virttest import data_dir, error_context from virttest.qemu_io import QemuIOSystem +from virttest.qemu_storage import QemuImg, get_image_repr from provider.nbd_image_export import QemuNBDExportImage @@ -23,25 +21,25 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _write_to_image(img, write_size, write_timeout): - """ Writes data to the given image - :param img: QemuImg instance which data will be written to - :param write_size: amount of data written into target image - :param write_timeout: maximum time for write command to complete + """Writes data to the given image + :param img: QemuImg instance which data will be written to + :param write_size: amount of data written into target image + :param write_timeout: maximum time for write command to complete """ io_handler = QemuIOSystem(test, params, img.image_filename) - test.log.info(f"Running qemu-io into {img.image_filename}") + test.log.info("Running qemu-io into %s", img.image_filename) try: - io_handler.cmd_output(f"write -P 1 0 {write_size}", - write_timeout) + io_handler.cmd_output(f"write -P 1 0 {write_size}", write_timeout) except process.CmdError: test.fail(f"Couldn't write to {img.image_filename} file by qemu-io") def _get_image_size(img, root_dir): - """ Returns size in bytes that the given image is actually using - :param img: QemuImg instance of the image being checked - :param root_dir: root data dir in which images are left - :returns: an int with the disk usage of the image + """Returns size in bytes that the given image is actually using + :param img: QemuImg instance of the image being checked + :param root_dir: root data dir in which images are left + :returns: an int with the disk usage of the image """ img_path = os.path.join(root_dir, img.image_filename) try: @@ -69,24 +67,27 @@ def _get_image_size(img, root_dir): write_size = params.get("write_size", "1G") _write_to_image(src_image, write_size, write_timeout) # 2) Export dst image by nbd - test.log.info(f"Exporting NBD Image") + test.log.info("Exporting NBD Image") nbd_export_filters = dst_params.get_list("nbd_export_filters") nbd_export_filter_ids = [] for index, filter_type in enumerate(nbd_export_filters): nbd_export_filter_ids.append(f"filt{index}") dst_params[f"image_filter_driver_type_filt{index}"] = filter_type dst_params["image_filter_drivers"] = " ".join(nbd_export_filter_ids) - dst_params["nbd_export_image_opts"] = get_image_repr(dst, dst_params, - root_dir, "opts") + dst_params["nbd_export_image_opts"] = get_image_repr( + dst, dst_params, root_dir, "opts" + ) nbd_dst = QemuNBDExportImage(dst_params, dst) nbd_dst.export_image() # 3) Copy src into dst - test.log.info(f"Executing source conversion onto remote target") + test.log.info("Executing source conversion onto remote target") try: src_image.convert(params, root_dir, skip_target_creation=True) except process.CmdError as exception_details: - test.error(f"Couldn't convert {src} image onto {dst}." - f"Have a look at:\n{exception_details}") + test.error( + f"Couldn't convert {src} image onto {dst}." + f"Have a look at:\n{exception_details}" + ) finally: # End qemu-nbd export in any case nbd_dst.stop_export() @@ -96,11 +97,15 @@ def _get_image_size(img, root_dir): dst_du = _get_image_size(dst_image, root_dir) if src_du <= dst_du: # Assert dst size is smaller than src due to the compress driver - test.fail(f"dst size is {dst_du} and src size is {src_du}.\nExpected " - "dst to be smaller due to NBD compress driver.") + test.fail( + f"dst size is {dst_du} and src size is {src_du}.\nExpected " + "dst to be smaller due to NBD compress driver." + ) # 5) Check src == dst in content - test.log.info(f"Running qemu-img compare over the resulting local imgs") + test.log.info("Running qemu-img compare over the resulting local imgs") compare_res = src_image.compare_to(dst_image) if compare_res.exit_status: - test.fail(f"src and dst images differ. {compare_res.stderr_text}" - f"{compare_res.stdout_text}") + test.fail( + f"src and dst images differ. {compare_res.stderr_text}" + f"{compare_res.stdout_text}" + ) diff --git a/qemu/tests/remote_image_guestfish_access.py b/qemu/tests/remote_image_guestfish_access.py index 9466395534..f8be515016 100644 --- a/qemu/tests/remote_image_guestfish_access.py +++ b/qemu/tests/remote_image_guestfish_access.py @@ -1,7 +1,5 @@ from avocado.utils import process - -from virttest import qemu_storage -from virttest import error_context +from virttest import error_context, qemu_storage @error_context.context_aware @@ -16,40 +14,41 @@ def run(test, params, env): :param env: Dictionary with test environment. """ image_tag = params.get("images").split()[0] - image_object = qemu_storage.QemuImg(params.object_params(image_tag), - None, image_tag) + image_object = qemu_storage.QemuImg( + params.object_params(image_tag), None, image_tag + ) if image_object.image_access: - test.cancel('Access remote image with tls-creds is ' - 'not supported by guestfish, skip the test') + test.cancel( + "Access remote image with tls-creds is " + "not supported by guestfish, skip the test" + ) # Make sure the image holds an OS instance vm = env.get_vm(params["main_vm"]) vm.verify_alive() try: - session = vm.wait_for_login( - timeout=params.get_numeric("login_timeout", 360)) + session = vm.wait_for_login(timeout=params.get_numeric("login_timeout", 360)) session.close() finally: vm.destroy() - msg = params['msg_check'] - testfile = params['guest_file_name'] - write_cmd = params['write_cmd'].format(fmt=image_object.image_format, - uri=image_object.image_filename) - read_cmd = params['read_cmd'].format(fmt=image_object.image_format, - uri=image_object.image_filename) + msg = params["msg_check"] + testfile = params["guest_file_name"] + write_cmd = params["write_cmd"].format( + fmt=image_object.image_format, uri=image_object.image_filename + ) + read_cmd = params["read_cmd"].format( + fmt=image_object.image_format, uri=image_object.image_filename + ) test.log.info("Write file '%s'", testfile) result = process.run(write_cmd, ignore_status=True, shell=True) if result.exit_status != 0: - test.fail('Failed to write a file, error message: %s' - % result.stderr.decode()) + test.fail(f"Failed to write a file, error message: {result.stderr.decode()}") test.log.info("Read file '%s'", testfile) result = process.run(read_cmd, ignore_status=True, shell=True) if result.exit_status != 0: - test.fail('Failed to read a file, error message: %s' - % result.stderr.decode()) + test.fail(f"Failed to read a file, error message: {result.stderr.decode()}") elif result.stdout.decode().strip() != msg: - test.fail("Message '%s' mismatched with '%s'" - % (msg, result.stdout.decode())) + test.fail(f"Message '{msg}' mismatched with '{result.stdout.decode()}'") diff --git a/qemu/tests/remote_image_multiwrite.py b/qemu/tests/remote_image_multiwrite.py index 4c31f1239d..4160f933df 100644 --- a/qemu/tests/remote_image_multiwrite.py +++ b/qemu/tests/remote_image_multiwrite.py @@ -1,8 +1,6 @@ import re -from virttest import error_context -from virttest import utils_disk -from virttest import utils_misc +from virttest import error_context, utils_disk, utils_misc @error_context.context_aware @@ -32,12 +30,12 @@ def run(test, params, env): drive_id = match.group(2) drive_path = utils_misc.get_linux_drive_path(session, drive_id) did = drive_path[5:] - test.log.info("Format %s(size=%s) with %s type.", did, stg_size, - stg_fstype) - mnts = utils_disk.configure_empty_linux_disk(session, did, stg_size, - fstype=stg_fstype) + test.log.info("Format %s(size=%s) with %s type.", did, stg_size, stg_fstype) + mnts = utils_disk.configure_empty_linux_disk( + session, did, stg_size, fstype=stg_fstype + ) if not mnts: - test.fail("Failed to create %s on disk %s." % (stg_fstype, did)) + test.fail(f"Failed to create {stg_fstype} on disk {did}.") finally: if session: session.close() diff --git a/qemu/tests/remote_image_ncat_access.py b/qemu/tests/remote_image_ncat_access.py index f8b8a83c66..dc6f11c2e5 100644 --- a/qemu/tests/remote_image_ncat_access.py +++ b/qemu/tests/remote_image_ncat_access.py @@ -1,15 +1,10 @@ import socket from avocado.utils import process - -from virttest import error_context -from virttest import qemu_storage -from virttest import storage -from virttest import data_dir - -from provider.nbd_image_export import InternalNBDExportImage +from virttest import data_dir, error_context, qemu_storage, storage from provider import qemu_img_utils as img_utils +from provider.nbd_image_export import InternalNBDExportImage @error_context.context_aware @@ -25,64 +20,66 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _create_image(): - result = qemu_storage.QemuImg( - params, None, params['images'].split()[0]).dd( - output=storage.get_image_filename( - params.object_params(params["local_image_tag"]), - data_dir.get_data_dir() - ), - bs=1024*1024 - ) + result = qemu_storage.QemuImg(params, None, params["images"].split()[0]).dd( + output=storage.get_image_filename( + params.object_params(params["local_image_tag"]), data_dir.get_data_dir() + ), + bs=1024 * 1024, + ) if result.exit_status != 0: - test.fail('Failed to clone the system image, error: %s' - % result.stderr.decode()) + test.fail( + f"Failed to clone the system image, error: {result.stderr.decode()}" + ) def _start_vm_without_image(): - params['images'] = '' + params["images"] = "" vm = None try: vm = img_utils.boot_vm_with_images(test, params, env) vm.verify_alive() finally: # let VT remove it - params['images'] = ' %s' % params['local_image_tag'] + params["images"] = " {}".format(params["local_image_tag"]) return vm def _make_ncat_cmd(): - ncat = '' - if params.get('nbd_unix_socket_%s' % params['nbd_image_tag']): - ncat = params['ncat_cmd'] + ncat = "" + if params.get("nbd_unix_socket_{}".format(params["nbd_image_tag"])): + ncat = params["ncat_cmd"] else: localhost = socket.gethostname() - params['nbd_server'] = localhost if localhost else 'localhost' - ncat = params['ncat_cmd'].format(localhost=params['nbd_server']) + params["nbd_server"] = localhost if localhost else "localhost" + ncat = params["ncat_cmd"].format(localhost=params["nbd_server"]) return ncat _create_image() vm = _start_vm_without_image() - nbd_export = InternalNBDExportImage(vm, params, params['local_image_tag']) + nbd_export = InternalNBDExportImage(vm, params, params["local_image_tag"]) nbd_export.hotplug_tls() nbd_export.hotplug_image() nbd_export.export_image() - params['nbd_export_name'] = nbd_export.get_export_name() + params["nbd_export_name"] = nbd_export.get_export_name() ncat_cmd = _make_ncat_cmd() result = process.run(ncat_cmd, ignore_status=True, shell=True) - if params['errmsg_check'] not in result.stderr.decode().strip(): - test.fail('Failed to read message(%s) from output(%s)' - % (params['errmsg_check'], result.stderr.decode())) + if params["errmsg_check"] not in result.stderr.decode().strip(): + test.fail( + "Failed to read message({}) from output({})".format( + params["errmsg_check"], result.stderr.decode() + ) + ) vm2 = None try: # Start another VM from the nbd exported image - vm2 = img_utils.boot_vm_with_images(test, params, env, - (params["nbd_image_tag"],), - 'vm2') - session = vm2.wait_for_login( - timeout=params.get_numeric("login_timeout", 480)) + vm2 = img_utils.boot_vm_with_images( + test, params, env, (params["nbd_image_tag"],), "vm2" + ) + session = vm2.wait_for_login(timeout=params.get_numeric("login_timeout", 480)) session.close() finally: if vm2: diff --git a/qemu/tests/remote_image_nmap_access.py b/qemu/tests/remote_image_nmap_access.py index 9bf5675e4e..480512396c 100644 --- a/qemu/tests/remote_image_nmap_access.py +++ b/qemu/tests/remote_image_nmap_access.py @@ -1,7 +1,6 @@ import socket from avocado.utils import process - from virttest import error_context from provider.nbd_image_export import QemuNBDExportImage @@ -22,23 +21,25 @@ def run(test, params, env): nbd_export.export_image() h = socket.gethostname() - params['nbd_server_%s' % params["nbd_image_tag"]] = h if h else 'localhost' - nmap_cmd = params['nmap_cmd'].format( - localhost=params['nbd_server_%s' % params["nbd_image_tag"]]) + params["nbd_server_{}".format(params["nbd_image_tag"])] = h if h else "localhost" + nmap_cmd = params["nmap_cmd"].format( + localhost=params["nbd_server_{}".format(params["nbd_image_tag"])] + ) try: result = process.run(nmap_cmd, ignore_status=True, shell=True) if result.exit_status != 0: - test.fail('Failed to execute nmap, error: %s' - % result.stderr.decode()) + test.fail(f"Failed to execute nmap, error: {result.stderr.decode()}") nbd_export.list_exported_image( - params["nbd_image_tag"], - params.object_params(params["nbd_image_tag"]) + params["nbd_image_tag"], params.object_params(params["nbd_image_tag"]) ) - if params.get('msg_check'): - if params['msg_check'] not in result.stdout.decode().strip(): - test.fail('Failed to read message(%s) from output(%s)' - % (params['msg_check'], result.stderr.decode())) + if params.get("msg_check"): + if params["msg_check"] not in result.stdout.decode().strip(): + test.fail( + "Failed to read message({}) from output({})".format( + params["msg_check"], result.stderr.decode() + ) + ) finally: nbd_export.stop_export() diff --git a/qemu/tests/remote_image_qemu_info_access.py b/qemu/tests/remote_image_qemu_info_access.py index ffa27e2950..c65d109d08 100644 --- a/qemu/tests/remote_image_qemu_info_access.py +++ b/qemu/tests/remote_image_qemu_info_access.py @@ -1,5 +1,4 @@ -from virttest import qemu_storage -from virttest import error_context +from virttest import error_context, qemu_storage @error_context.context_aware @@ -15,21 +14,20 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ - img_params = params.object_params(params['remote_image_tag']) + img_params = params.object_params(params["remote_image_tag"]) image_name_list = [ - img_params['curl_path'], - img_params['curl_path'].replace(params['replace_symbol'], - params['ascii_symbol']) + img_params["curl_path"], + img_params["curl_path"].replace( + params["replace_symbol"], params["ascii_symbol"] + ), ] for image_name in image_name_list: - img_params['curl_path'] = image_name - img_obj = qemu_storage.QemuImg(img_params, None, - params['remote_image_tag']) + img_params["curl_path"] = image_name + img_obj = qemu_storage.QemuImg(img_params, None, params["remote_image_tag"]) - test.log.info('Access image: %s', img_obj.image_filename) + test.log.info("Access image: %s", img_obj.image_filename) out = img_obj.info() if img_obj.image_filename not in out: - test.fail('Failed to get url(%s) from output(%s)' - % (img_obj.image_filename, out)) + test.fail(f"Failed to get url({img_obj.image_filename}) from output({out})") diff --git a/qemu/tests/remote_image_qemu_io_access.py b/qemu/tests/remote_image_qemu_io_access.py index 462a9d8e5c..96ef8589f1 100644 --- a/qemu/tests/remote_image_qemu_io_access.py +++ b/qemu/tests/remote_image_qemu_io_access.py @@ -1,12 +1,7 @@ import socket from avocado.utils import process - -from virttest import data_dir -from virttest import storage -from virttest import qemu_storage -from virttest import utils_misc -from virttest import error_context +from virttest import data_dir, error_context, qemu_storage, storage, utils_misc from provider.nbd_image_export import QemuNBDExportImage @@ -22,60 +17,70 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _prepare(): tag = params["local_image_tag"] image_params = params.object_params(tag) - if image_params.get('create_description_cmd'): - params['nbd_export_description_%s' % tag] = process.run( - image_params['create_description_cmd'], - ignore_status=True, - shell=True - ).stdout.decode().strip() - - if image_params.get('create_image_cmd'): - params['create_image_cmd_%s' % tag] = image_params['create_image_cmd'].format( - desc=params['nbd_export_description_%s' % tag], - filename=storage.get_image_filename(image_params, - data_dir.get_data_dir()) + if image_params.get("create_description_cmd"): + params[f"nbd_export_description_{tag}"] = ( + process.run( + image_params["create_description_cmd"], + ignore_status=True, + shell=True, + ) + .stdout.decode() + .strip() + ) + + if image_params.get("create_image_cmd"): + params[f"create_image_cmd_{tag}"] = image_params["create_image_cmd"].format( + desc=params[f"nbd_export_description_{tag}"], + filename=storage.get_image_filename( + image_params, data_dir.get_data_dir() + ), ) # update nbd image's server to the local host localhost = socket.gethostname() - params['nbd_server_%s' % params["nbd_image_tag"] - ] = localhost if localhost else 'localhost' + params["nbd_server_{}".format(params["nbd_image_tag"])] = ( + localhost if localhost else "localhost" + ) def _get_tls_creds_obj(tag, params): - tls_str = '--object tls-creds-x509,id={t.aid},endpoint=client,dir={t.tls_creds}' + tls_str = "--object tls-creds-x509,id={t.aid},endpoint=client,dir={t.tls_creds}" tls = storage.StorageAuth.auth_info_define_by_params(tag, params) - return tls_str.format(t=tls) if tls else '' + return tls_str.format(t=tls) if tls else "" def _get_secret_obj(tag, params): - secret_str = '--object secret,id={s.aid},data={s.data}' + secret_str = "--object secret,id={s.aid},data={s.data}" secret = storage.ImageSecret.image_secret_define_by_params(tag, params) - return secret_str.format(s=secret) if secret else '' + return secret_str.format(s=secret) if secret else "" def _make_qemu_io_cmd(): nbd_image = params["nbd_image_tag"] nbd_image_params = params.object_params(nbd_image) nbd_image_filename = storage.get_image_filename(nbd_image_params, None) - nbd_image_format = '-f %s' % nbd_image_params['image_format'] + nbd_image_format = "-f {}".format(nbd_image_params["image_format"]) tls_obj = _get_tls_creds_obj(nbd_image, nbd_image_params) sec_obj = _get_secret_obj(nbd_image, nbd_image_params) if tls_obj or sec_obj: - nbd_image_format = '' - nbd_image_filename = "'%s'" % qemu_storage.get_image_json( - nbd_image, nbd_image_params, None) + nbd_image_format = "" + nbd_image_filename = ( + f"'{qemu_storage.get_image_json(nbd_image, nbd_image_params, None)}'" + ) qemu_io = utils_misc.get_qemu_io_binary(params) - return params['qemu_io_cmd'].format(qemu_io=qemu_io, - tls_creds=tls_obj, - secret=sec_obj, - fmt=nbd_image_format, - subcmd=params['qemu_io_subcmd'], - filename=nbd_image_filename) + return params["qemu_io_cmd"].format( + qemu_io=qemu_io, + tls_creds=tls_obj, + secret=sec_obj, + fmt=nbd_image_format, + subcmd=params["qemu_io_subcmd"], + filename=nbd_image_filename, + ) _prepare() @@ -88,17 +93,18 @@ def _make_qemu_io_cmd(): try: result = process.run(qemu_io_cmd, ignore_status=True, shell=True) if result.exit_status != 0: - test.fail('Failed to execute qemu-io, error: %s' - % result.stderr.decode()) + test.fail(f"Failed to execute qemu-io, error: {result.stderr.decode()}") - if params.get('msg_check'): - if params['msg_check'] not in result.stdout.decode().strip(): - test.fail('Failed to read message(%s) from output(%s)' - % (params['msg_check'], result.stderr.decode())) + if params.get("msg_check"): + if params["msg_check"] not in result.stdout.decode().strip(): + test.fail( + "Failed to read message({}) from output({})".format( + params["msg_check"], result.stderr.decode() + ) + ) nbd_export.list_exported_image( - params["nbd_image_tag"], - params.object_params(params["nbd_image_tag"]) + params["nbd_image_tag"], params.object_params(params["nbd_image_tag"]) ) finally: nbd_export.stop_export() diff --git a/qemu/tests/remote_image_unix_socket_access.py b/qemu/tests/remote_image_unix_socket_access.py index 5843b213d0..3e0e1847d1 100644 --- a/qemu/tests/remote_image_unix_socket_access.py +++ b/qemu/tests/remote_image_unix_socket_access.py @@ -1,10 +1,6 @@ -from virttest import data_dir -from virttest import storage -from virttest import qemu_storage -from virttest import error_context +from virttest import data_dir, error_context, qemu_storage, storage from provider import qemu_img_utils as img_utils - from provider.nbd_image_export import QemuNBDExportImage @@ -19,19 +15,19 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _prepare(): test.log.info("Clone system image with qemu-img") - result = qemu_storage.QemuImg( - params, None, params['images'].split()[0]).dd( - output=storage.get_image_filename( - params.object_params(params["local_image_tag"]), - data_dir.get_data_dir() - ), - bs=1024*1024 + result = qemu_storage.QemuImg(params, None, params["images"].split()[0]).dd( + output=storage.get_image_filename( + params.object_params(params["local_image_tag"]), data_dir.get_data_dir() + ), + bs=1024 * 1024, ) if result.exit_status != 0: - test.fail('Failed to clone the system image, error: %s' - % result.stderr.decode()) + test.fail( + f"Failed to clone the system image, error: {result.stderr.decode()}" + ) # Remove the image after test by avocado-vt # params['images'] += ' %s' % params["local_image_tag"] @@ -47,12 +43,12 @@ def _prepare(): try: # Start VM from the nbd exported image - vm = img_utils.boot_vm_with_images(test, params, env, - (params["nbd_image_tag"],)) - session = vm.wait_for_login( - timeout=int(params.get("login_timeout", 360))) + vm = img_utils.boot_vm_with_images( + test, params, env, (params["nbd_image_tag"],) + ) + session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) if not session: - test.fail('Failed to log into VM') + test.fail("Failed to log into VM") finally: if session: session.close() diff --git a/qemu/tests/remote_server_disconnected.py b/qemu/tests/remote_server_disconnected.py index 7715b153fa..fd019ec940 100644 --- a/qemu/tests/remote_server_disconnected.py +++ b/qemu/tests/remote_server_disconnected.py @@ -1,6 +1,6 @@ -import os import ipaddress import json +import os from avocado.utils import process @@ -26,14 +26,20 @@ def _check_hosts(hosts): test.cancel("2 remote servers at least are required.") for h in hosts: if os.path.exists(h) or _is_ipv6_addr(h): - test.cancel("Neither ipv6 nor unix domain" - " socket is supported by now.") + test.cancel( + "Neither ipv6 nor unix domain" " socket is supported by now." + ) hosts = [] if params.get_boolean("enable_gluster"): hosts.append(params["gluster_server"]) - hosts.extend([peer['host'] for peer in json.loads( - params.get('gluster_peers', '[]')) if 'host' in peer]) + hosts.extend( + [ + peer["host"] + for peer in json.loads(params.get("gluster_peers", "[]")) + if "host" in peer + ] + ) _check_hosts(hosts) hosts.pop() # The last server should be accessible @@ -54,25 +60,35 @@ def _check_hosts(hosts): try: for host in hosts: test.log.info("Disconnect to %s", host) - process.system(disconn_cmd.format(source=host), - ignore_status=True, shell=True) - if process.system(conn_check_cmd.format(source=host), - ignore_status=True, shell=True) == 0: + process.system( + disconn_cmd.format(source=host), ignore_status=True, shell=True + ) + if ( + process.system( + conn_check_cmd.format(source=host), ignore_status=True, shell=True + ) + == 0 + ): test.error("Failed to disconnect to remote server") disconn_hosts.append(host) test.log.info("Do disk I/O in VM") s, o = session.cmd_status_output(disk_op_cmd, timeout=disk_op_tm) if s != 0: - test.fail("Failed to do I/O in VM: %s" % o) + test.fail(f"Failed to do I/O in VM: {o}") finally: for host in disconn_hosts: test.log.info("Recover connection to %s", host) - process.system(recover_cmd.format(source=host), - ignore_status=True, shell=True) - if process.system(conn_check_cmd.format(source=host), - ignore_status=True, shell=True) != 0: - test.log.warn("Failed to recover connection to %s", host) + process.system( + recover_cmd.format(source=host), ignore_status=True, shell=True + ) + if ( + process.system( + conn_check_cmd.format(source=host), ignore_status=True, shell=True + ) + != 0 + ): + test.log.warning("Failed to recover connection to %s", host) if session: session.close() vm.destroy() diff --git a/qemu/tests/remove_interface_from_host.py b/qemu/tests/remove_interface_from_host.py index d5ab54cb84..01a17d414e 100644 --- a/qemu/tests/remove_interface_from_host.py +++ b/qemu/tests/remove_interface_from_host.py @@ -1,14 +1,11 @@ """ Remove tap/interface in host while guest is using it. """ + import logging import time -from virttest import error_context -from virttest import utils_net -from virttest import utils_misc -from virttest import utils_test -from virttest import env_process +from virttest import env_process, error_context, utils_misc, utils_net, utils_test @error_context.context_aware @@ -37,68 +34,63 @@ def run(test, params, env): # Step 2, ping should work guest_ip = vm.get_address() - error_context.context("Get the guest ip %s" % guest_ip, test.log.info) + error_context.context(f"Get the guest ip {guest_ip}", test.log.info) - error_context.context("Ping test from host to guest, should work", - test.log.info) + error_context.context("Ping test from host to guest, should work", test.log.info) status, output = utils_test.ping(guest_ip, 30, timeout=20) if status != 0: - test.fail("Ping failed, status: %s, output: %s" % (status, output)) + test.fail(f"Ping failed, status: {status}, output: {output}") host_ifname_name = vm.get_ifname() - error_context.context("Get interface name: %s. " % host_ifname_name, - test.log.info) + error_context.context(f"Get interface name: {host_ifname_name}. ", test.log.info) host_ifname = utils_net.Interface(host_ifname_name) # Step 3,4, disable interface and ping should fail - error_context.context("Set interface %s down." % host_ifname_name, - test.log.info) + error_context.context(f"Set interface {host_ifname_name} down.", test.log.info) host_ifname.down() time.sleep(secs_after_iplink_action) - error_context.context("After disable the ifname, " - "Ping test from host to guest, should fail.", - test.log.info) + error_context.context( + "After disable the ifname, " "Ping test from host to guest, should fail.", + test.log.info, + ) status, output = utils_test.ping(guest_ip, 30, timeout=20) if status == 0: - test.fail("Ping should fail, status: %s, output: %s" - % (status, output)) + test.fail(f"Ping should fail, status: {status}, output: {output}") # Step 5, enable interface, ping should work - error_context.context("Set interface %s up." % host_ifname_name, - test.log.info) + error_context.context(f"Set interface {host_ifname_name} up.", test.log.info) host_ifname.up() time.sleep(secs_after_iplink_action) - error_context.context("After enable the ifname, " - "Ping test from host to guest, should work", - test.log.info) + error_context.context( + "After enable the ifname, " "Ping test from host to guest, should work", + test.log.info, + ) status, output = utils_test.ping(guest_ip, 30, timeout=20) if status != 0: - test.fail("Ping should work, status: %s, output: %s" - % (status, output)) + test.fail(f"Ping should work, status: {status}, output: {output}") # Step 6, delete the interface, qemu should not crash, # ping should fail - error_context.context("Delete the interface %s." % host_ifname_name, - test.log.info) + error_context.context(f"Delete the interface {host_ifname_name}.", test.log.info) host_ifname.dellink() time.sleep(secs_after_iplink_action) - error_context.context("After delete the ifname, " - "VM and qemu should not crash, ping should fail", - test.log.info) + error_context.context( + "After delete the ifname, " "VM and qemu should not crash, ping should fail", + test.log.info, + ) vm.verify_alive() status, output = utils_test.ping(guest_ip, 30, timeout=20) if status == 0: - test.fail("Ping should fail, status: %s, output: %s" - % (status, output)) + test.fail(f"Ping should fail, status: {status}, output: {output}") # Step 7, shutdown guest, and restart a guest error_context.context("Shutdown the VM.", test.log.info) session = vm.wait_for_serial_login() shutdown_cmd = params.get("shutdown_command", "shutdown") - logging .debug("Shutdown guest with command %s", shutdown_cmd) + logging.debug("Shutdown guest with command %s", shutdown_cmd) session.sendline(shutdown_cmd) error_context.context("Waiting VM to go down", test.log.info) @@ -115,11 +107,10 @@ def run(test, params, env): vm.wait_for_login(timeout=login_timeout) guest_ip = vm.get_address() - error_context.context("Get the guest ip %s" % guest_ip, test.log.info) + error_context.context(f"Get the guest ip {guest_ip}", test.log.info) # Repeat step 2, ping should work - error_context.context("Ping test from host to guest, should work", - test.log.info) + error_context.context("Ping test from host to guest, should work", test.log.info) status, output = utils_test.ping(guest_ip, 30, timeout=20) if status != 0: - test.fail("Ping failed, status: %s, output: %s" % (status, output)) + test.fail(f"Ping failed, status: {status}, output: {output}") diff --git a/qemu/tests/resize_qemu_img.py b/qemu/tests/resize_qemu_img.py index e4d52397a1..89862bb5a6 100644 --- a/qemu/tests/resize_qemu_img.py +++ b/qemu/tests/resize_qemu_img.py @@ -1,8 +1,6 @@ import json -from virttest import data_dir -from virttest import error_context -from virttest import utils_numeric +from virttest import data_dir, error_context, utils_numeric from virttest.qemu_storage import QemuImg @@ -19,6 +17,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _sum_size_changes(size_changes): """ Sum the list of size changes. @@ -27,34 +26,45 @@ def _sum_size_changes(size_changes): """ res = [] for change in size_changes: - s = int(utils_numeric.normalize_data_size(change, "B") - ) * (-1 if '-' in change else 1) + s = int(utils_numeric.normalize_data_size(change, "B")) * ( + -1 if "-" in change else 1 + ) res.append(s) return sum(res) def _verify_resize_image(img_size, expected_size): """Verify the image size is as expected after resize.""" - test.log.info("Verify the size of %s is %s.", - img.image_filename, expected_size) + test.log.info( + "Verify the size of %s is %s.", img.image_filename, expected_size + ) if img_size != expected_size: - test.fail("Got image virtual size: %s, should be: %s." % - (img_size, expected_size)) + test.fail( + f"Got image virtual size: {img_size}, should be: {expected_size}." + ) def _verify_resize_disk(disk_size, expected_size): """ Verify the disk size is as expected after resize. """ - test.log.info("Verify the disk size of the image %s is %sG.", - img.image_filename, expected_size) + test.log.info( + "Verify the disk size of the image %s is %sG.", + img.image_filename, + expected_size, + ) if disk_size != expected_size: - test.fail("Got image actual size: %sG, should be: %sG." - % (disk_size, expected_size)) + test.fail( + f"Got image actual size: {disk_size}G, should be: {expected_size}G." + ) def _resize(size_changes, preallocation): """Resize the image and verify its size.""" for idx, size in enumerate(size_changes): - test.log.info("Resize the raw image %s %s with preallocation %s.", - img.image_filename, size, preallocation) + test.log.info( + "Resize the raw image %s %s with preallocation %s.", + img.image_filename, + size, + preallocation, + ) shrink = True if "-" in size else False img.resize(size, shrink=shrink, preallocation=preallocation) @@ -62,17 +72,18 @@ def _resize(size_changes, preallocation): disk_size = json.loads(img.info(output="json"))["actual-size"] # Set the magnitude order to GiB, allow some bytes deviation disk_size = float( - utils_numeric.normalize_data_size(str(disk_size), "G")) + utils_numeric.normalize_data_size(str(disk_size), "G") + ) expected_disk_size = size[1] _verify_resize_disk(int(disk_size), int(expected_disk_size)) img_size = json.loads(img.info(output="json"))["virtual-size"] - expected_size = (int(utils_numeric.normalize_data_size( - params["image_size_test"], "B")) + - _sum_size_changes(size_changes[:idx + 1])) + expected_size = int( + utils_numeric.normalize_data_size(params["image_size_test"], "B") + ) + _sum_size_changes(size_changes[: idx + 1]) _verify_resize_image(img_size, expected_size) - img_param = params.object_params('test') - img = QemuImg(img_param, data_dir.get_data_dir(), 'test') + img_param = params.object_params("test") + img = QemuImg(img_param, data_dir.get_data_dir(), "test") size_changes = params["size_changes"].split() preallocation = params.get("preallocation") diff --git a/qemu/tests/resize_short_overlay.py b/qemu/tests/resize_short_overlay.py index 71648fcd99..c54a7d2fbe 100644 --- a/qemu/tests/resize_short_overlay.py +++ b/qemu/tests/resize_short_overlay.py @@ -1,6 +1,5 @@ from avocado import fail_on from avocado.utils import process - from virttest import data_dir from virttest.qemu_io import QemuIOSystem from virttest.qemu_storage import QemuImg @@ -20,13 +19,14 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def _qemu_io(img, cmd): """Run qemu-io cmd to a given img.""" test.log.info("Run qemu-io %s", img.image_filename) try: QemuIOSystem(test, params, img.image_filename).cmd_output(cmd, 120) except process.CmdError as err: - test.fail("qemu-io to '%s' failed: %s." % (img.image_filename, err)) + test.fail(f"qemu-io to '{img.image_filename}' failed: {err}.") images = params["image_chain"].split() root_dir = data_dir.get_data_dir() diff --git a/qemu/tests/rh_kernel_update.py b/qemu/tests/rh_kernel_update.py index 28b128a7b6..c1d49d17dd 100644 --- a/qemu/tests/rh_kernel_update.py +++ b/qemu/tests/rh_kernel_update.py @@ -2,11 +2,7 @@ import time from avocado.utils import process - -from virttest import error_context -from virttest import storage -from virttest import data_dir - +from virttest import data_dir, error_context, storage QUERY_TIMEOUT = 180 INSTALL_TIMEOUT = 600 @@ -34,16 +30,16 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ - def install_rpm(session, url, upgrade=False, nodeps=False, - timeout=INSTALL_TIMEOUT): - cmd = "rpm -ivhf %s" % url + + def install_rpm(session, url, upgrade=False, nodeps=False, timeout=INSTALL_TIMEOUT): + cmd = f"rpm -ivhf {url}" if upgrade: - cmd = "rpm -Uvhf %s" % url + cmd = f"rpm -Uvhf {url}" if nodeps: cmd += " --nodeps" s, o = session.cmd_status_output(cmd, timeout=timeout) if s != 0 and ("already" not in o): - test.fail("Failed to install %s: %s" % (url, o)) + test.fail(f"Failed to install {url}: {o}") def get_brew_latest_pkg(topdir, tag, pkg, arch=None, list_path=False): """ @@ -57,10 +53,10 @@ def get_brew_latest_pkg(topdir, tag, pkg, arch=None, list_path=False): :return: content returned by `latest-pkg`. """ - cmd = "brew --topdir='%s' latest-pkg %s %s" % (topdir, tag, pkg) + cmd = f"brew --topdir='{topdir}' latest-pkg {tag} {pkg}" cmd += " --quiet" if bool(arch): - cmd += " --arch=%s" % arch + cmd += f" --arch={arch}" if list_path: cmd += " --paths" return process.system_output(cmd, timeout=QUERY_TIMEOUT).decode() @@ -72,13 +68,12 @@ def get_kernel_info(): o = get_brew_latest_pkg(download_root, tag, build_name) try: - build = re.findall(r"%s[^\s]+" % build_name, o)[0] + build = re.findall(rf"{build_name}[^\s]+", o)[0] except IndexError: - test.error("Could not get the latest kernel build name: %s" % o) + test.error(f"Could not get the latest kernel build name: {o}") test.log.info("The latest build for tag '%s' is '%s'", tag, build) - info_cmd = "brew --topdir='%s' buildinfo %s" % (download_root, build) - buildinfo = process.system_output(info_cmd, - timeout=QUERY_TIMEOUT).decode() + info_cmd = f"brew --topdir='{download_root}' buildinfo {build}" + buildinfo = process.system_output(info_cmd, timeout=QUERY_TIMEOUT).decode() ver_rev = re.sub(build_name, "", build).lstrip("-") # customize it since old kernel not has arch name in release name @@ -90,12 +85,11 @@ def get_kernel_info(): pkg_params = params.object_params(pkg_name) pkg_arch = pkg_params["pkg_arch"] # package pattern: n-v-r.a.rpm - pkg_pattern = "%s-%s.%s.rpm" % (pkg_name, ver_rev, pkg_arch) - pkg_pattern = re.compile(".*/%s" % re.escape(pkg_pattern)) + pkg_pattern = f"{pkg_name}-{ver_rev}.{pkg_arch}.rpm" + pkg_pattern = re.compile(f".*/{re.escape(pkg_pattern)}") match = pkg_pattern.search(buildinfo, re.M | re.I) if not match: - test.error("Could not get the link of '%s' in buildinfo" - % pkg_name) + test.error(f"Could not get the link of '{pkg_name}' in buildinfo") pkg_path = match.group(0) pkg_links.append(pkg_path) @@ -125,15 +119,16 @@ def get_guest_pkgs(session, pkg, qformat=""): :return: list of packages. :rtype: list """ - cmd = "rpm -q --whatrequires %s" % pkg + cmd = f"rpm -q --whatrequires {pkg}" if qformat: - cmd += " --queryformat='%s\n'" % qformat + cmd += f" --queryformat='{qformat}\n'" pkgs = session.cmd_output(cmd, timeout=QUERY_TIMEOUT).splitlines() pkgs.append(pkg) return pkgs - def upgrade_guest_pkgs(session, pkg, arch, debuginfo=False, - nodeps=True, timeout=INSTALL_TIMEOUT): + def upgrade_guest_pkgs( + session, pkg, arch, debuginfo=False, nodeps=True, timeout=INSTALL_TIMEOUT + ): """ upgrade given packages in guest os. @@ -143,13 +138,13 @@ def upgrade_guest_pkgs(session, pkg, arch, debuginfo=False, :parm nodeps: bool type, if True, ignore deps when install rpm. :parm timeout: float type, timeout value when install rpm. """ - error_context.context("Upgrade package '%s' in guest" % pkg, - test.log.info) + error_context.context(f"Upgrade package '{pkg}' in guest", test.log.info) pkgs = get_guest_pkgs(session, pkg, "%{NAME}") tag = params.get("brew_tag") - pkg_urls = get_brew_latest_pkg(download_root, tag, pkg, arch, - list_path=True).splitlines() + pkg_urls = get_brew_latest_pkg( + download_root, tag, pkg, arch, list_path=True + ).splitlines() for url in pkg_urls: if "debuginfo" in url and not debuginfo: continue @@ -163,10 +158,10 @@ def get_guest_kernel_version(session): def compare_version(current, expected): if current == expected: return 0 - cur_ver = re.split('[.+-]', current) + cur_ver = re.split("[.+-]", current) cur_ver = [int(item) for item in cur_ver if item.isdigit()] cur_len = len(cur_ver) - exp_ver = re.split('[.+-]', expected) + exp_ver = re.split("[.+-]", expected) exp_ver = [int(item) for item in exp_ver if item.isdigit()] exp_len = len(exp_ver) if cur_len != exp_len: @@ -209,8 +204,9 @@ def compare_version(current, expected): kernel_ver = cur_ver updated = False else: - test.log.info("Guest current kernel does not match the " - "requirement, processing upgrade") + test.log.info( + "Guest current kernel does not match the " "requirement, processing upgrade" + ) for pkg in kernel_deps_pkgs: pkg_params = params.object_params(pkg) arch = pkg_params["pkg_arch"] @@ -219,72 +215,69 @@ def compare_version(current, expected): error_context.context("Install guest kernel package(s)", test.log.info) # not install kernel pkgs via rpm since need to install them atomically kernel_pkg_dir = "/tmp/kernel_packages" - session.cmd("mkdir -p %s" % kernel_pkg_dir) + session.cmd(f"mkdir -p {kernel_pkg_dir}") # old guest not support installing via url directly download_cmd = "curl -kL %s -o %s/%s" for pkg_url in kernel_pkgs: pkg_name = pkg_url.rsplit("/", 1)[-1] status, output = session.cmd_status_output( download_cmd % (pkg_url, kernel_pkg_dir, pkg_name), - timeout=DOWNLOAD_TIMEOUT) + timeout=DOWNLOAD_TIMEOUT, + ) if status: - test.fail("Failed to download %s: %s" % (pkg_url, output)) + test.fail(f"Failed to download {pkg_url}: {output}") pm_bin = "dnf" - if session.cmd_status("command -v %s" % pm_bin) != 0: + if session.cmd_status(f"command -v {pm_bin}") != 0: pm_bin = "yum" - inst_cmd = ("%s localinstall %s/* -y --nogpgcheck" - % (pm_bin, kernel_pkg_dir)) - status, output = session.cmd_status_output(inst_cmd, - timeout=inst_timeout) + inst_cmd = f"{pm_bin} localinstall {kernel_pkg_dir}/* -y --nogpgcheck" + status, output = session.cmd_status_output(inst_cmd, timeout=inst_timeout) if status != 0: - test.fail("Failed to install kernel package(s): %s" % output) - session.cmd("rm -rf %s" % kernel_pkg_dir) + test.fail(f"Failed to install kernel package(s): {output}") + session.cmd(f"rm -rf {kernel_pkg_dir}") - kernel_path = "/boot/vmlinuz-%s" % kernel_ver + kernel_path = f"/boot/vmlinuz-{kernel_ver}" if install_virtio: error_context.context("Installing virtio driver", test.log.info) - initrd_prob_cmd = "grubby --info=%s" % kernel_path + initrd_prob_cmd = f"grubby --info={kernel_path}" s, o = session.cmd_status_output(initrd_prob_cmd) if s != 0: - test.error("Could not get guest kernel information: %s" % o) + test.error(f"Could not get guest kernel information: {o}") try: initrd_path = re.findall("initrd=(.*)", o)[0] except IndexError: - test.error("Could not get initrd path from guest: %s" % o) + test.error(f"Could not get initrd path from guest: {o}") error_context.context("Update initrd file", test.log.info) - driver_list = ["--with=%s" % drv for drv in virtio_drivers] - mkinitrd_cmd = "mkinitrd -f %s " % initrd_path + driver_list = [f"--with={drv}" for drv in virtio_drivers] + mkinitrd_cmd = f"mkinitrd -f {initrd_path} " mkinitrd_cmd += " ".join(driver_list) - mkinitrd_cmd += " %s" % kernel_ver + mkinitrd_cmd += f" {kernel_ver}" s, o = session.cmd_status_output(mkinitrd_cmd, timeout=360) if s != 0: - test.fail("Failed to install virtio driver: %s" % o) + test.fail(f"Failed to install virtio driver: {o}") # make sure the newly installed kernel as default if updated: - error_context.context("Make the new installed kernel as default", - test.log.info) - make_def_cmd = "grubby --set-default=%s " % kernel_path + error_context.context("Make the new installed kernel as default", test.log.info) + make_def_cmd = f"grubby --set-default={kernel_path} " s, o = session.cmd_status_output(make_def_cmd) if s != 0: - test.error("Fail to set default kernel: %s" % o) + test.error(f"Fail to set default kernel: {o}") # remove or add the required arguments update_kernel_cmd = "" if args_removed: - update_kernel_cmd += ' --remove-args="%s"' % " ".join(args_removed) + update_kernel_cmd += ' --remove-args="{}"'.format(" ".join(args_removed)) if args_added: - update_kernel_cmd += ' --args="%s"' % " ".join(args_added) + update_kernel_cmd += ' --args="{}"'.format(" ".join(args_added)) if update_kernel_cmd: - update_kernel_cmd = ("grubby --update-kernel=%s %s" - % (kernel_path, update_kernel_cmd)) + update_kernel_cmd = f"grubby --update-kernel={kernel_path} {update_kernel_cmd}" update_kernel_cmd = params.get("update_kernel_cmd", update_kernel_cmd) if update_kernel_cmd: error_context.context("Update the guest kernel cmdline", test.log.info) s, o = session.cmd_status_output(update_kernel_cmd) if s != 0: - test.error("Fail to modify kernel cmdline: %s" % o) + test.error(f"Fail to modify kernel cmdline: {o}") # upgrade listed packages to latest version. for pkg in params.get("upgrade_pkgs", "").split(): @@ -292,15 +285,13 @@ def compare_version(current, expected): arch = pkg_info["pkg_arch"] nodeps = pkg_info.get("ignore_deps") == "yes" install_debuginfo = pkg_info.get("install_debuginfo") == "yes" - ver_before = session.cmd_output("rpm -q %s" % pkg) - upgrade_guest_pkgs(session, pkg, arch, install_debuginfo, nodeps, - inst_timeout) - ver_after = session.cmd_output("rpm -q %s" % pkg) + ver_before = session.cmd_output(f"rpm -q {pkg}") + upgrade_guest_pkgs(session, pkg, arch, install_debuginfo, nodeps, inst_timeout) + ver_after = session.cmd_output(f"rpm -q {pkg}") if "not installed" in ver_before: - mesg = "Install '%s' in guest" % ver_after + mesg = f"Install '{ver_after}' in guest" else: - mesg = "Upgrade '%s' from '%s' to '%s'" % ( - pkg, ver_before, ver_after) + mesg = f"Upgrade '{pkg}' from '{ver_before}' to '{ver_after}'" test.log.info(mesg) # reboot guest and do verify @@ -309,8 +300,10 @@ def compare_version(current, expected): session = vm.reboot(session) cur_ver = get_guest_kernel_version(session) if compare_version(cur_ver, kernel_ver) != 0: - test.fail("Failed to verify the guest kernel, expected version '%s' " - "vs current version '%s'" % (kernel_ver, cur_ver)) + test.fail( + f"Failed to verify the guest kernel, expected version '{kernel_ver}' " + f"vs current version '{cur_ver}'" + ) if verify_virtio: error_context.context("Verifying the virtio drivers", test.log.info) if not is_virtio_driver_installed(): @@ -322,4 +315,4 @@ def compare_version(current, expected): image_filename = storage.get_image_filename(params, base_dir) block = vm.get_block({"backing_file": image_filename}) vm.monitor.cmd("stop") - vm.monitor.send_args_cmd("commit %s" % block) + vm.monitor.send_args_cmd(f"commit {block}") diff --git a/qemu/tests/rh_qemu_iotests.py b/qemu/tests/rh_qemu_iotests.py index 3417da335a..86be6a55af 100644 --- a/qemu/tests/rh_qemu_iotests.py +++ b/qemu/tests/rh_qemu_iotests.py @@ -1,11 +1,9 @@ import os import re -from virttest import utils_misc -from virttest import error_context - -from avocado.utils import process from avocado.core import exceptions +from avocado.utils import process +from virttest import error_context, utils_misc @error_context.context_aware @@ -23,6 +21,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def retry_command(cmd): """ Retry the command when it fails, and raise the error once @@ -40,8 +39,9 @@ def retry_command(cmd): except process.CmdError as detail: msg = "Fail to execute command" test.log.error("%s: %s.", msg, detail) - raise exceptions.TestError("%s after %s times retry: %s" % - (msg, max_retry, detail)) + raise exceptions.TestError( + f"{msg} after {max_retry} times retry: {detail}" + ) def install_test(build_root): """ @@ -56,13 +56,14 @@ def install_test(build_root): query_format = params["query_format"] download_rpm_cmd = params["download_rpm_cmd"] get_src_cmd = params["get_src_cmd"] - qemu_spec = params.get("qemu_spec", 'SPECS/qemu-kvm.spec') - get_rpm_name_cmd = ("rpm -qf %s --queryformat=%s" % - (utils_misc.get_qemu_binary(params), query_format)) + qemu_spec = params.get("qemu_spec", "SPECS/qemu-kvm.spec") + get_rpm_name_cmd = ( + f"rpm -qf {utils_misc.get_qemu_binary(params)} --queryformat={query_format}" + ) src_rpm_name = process.system_output(get_rpm_name_cmd, shell=True) retry_command(download_rpm_cmd % src_rpm_name) spec = os.path.join(build_root, qemu_spec) - build_dir = os.path.join(build_root, 'BUILD') + build_dir = os.path.join(build_root, "BUILD") cmd = get_src_cmd % (src_rpm_name, spec) process.system(cmd, shell=True) src_dir = os.listdir(build_dir)[0] @@ -81,7 +82,7 @@ def config_test(qemu_src_dir): os.chdir(qemu_src_dir) cmd = "./configure" if make_socket_scm_helper: - cmd += " %s" % make_socket_scm_helper + cmd += f" {make_socket_scm_helper}" process.system(cmd, shell=True) def run_test(qemu_src_dir): @@ -94,23 +95,24 @@ def run_test(qemu_src_dir): extra_options = params.get("qemu_io_extra_options", "") image_format = params.get("qemu_io_image_format") result_pattern = params.get("iotests_result_pattern") - error_context.context("running qemu-iotests for image format %s" - % image_format, test.log.info) + error_context.context( + f"running qemu-iotests for image format {image_format}", test.log.info + ) os.environ["QEMU_PROG"] = utils_misc.get_qemu_binary(params) os.environ["QEMU_IMG_PROG"] = utils_misc.get_qemu_img_binary(params) os.environ["QEMU_IO_PROG"] = utils_misc.get_qemu_io_binary(params) - os.environ["QEMU_NBD_PROG"] = utils_misc.get_binary('qemu-nbd', params) + os.environ["QEMU_NBD_PROG"] = utils_misc.get_binary("qemu-nbd", params) os.chdir(os.path.join(qemu_src_dir, iotests_root)) - cmd = './check' + cmd = "./check" if extra_options: - cmd += " %s" % extra_options - cmd += " -%s" % image_format + cmd += f" {extra_options}" + cmd += f" -{image_format}" output = process.system_output(cmd, ignore_status=True, shell=True) match = re.search(result_pattern, output, re.I | re.M) if match: - iotests_log_file = "qemu_iotests_%s.log" % image_format + iotests_log_file = f"qemu_iotests_{image_format}.log" iotests_log_file = utils_misc.get_path(test.debugdir, iotests_log_file) - with open(iotests_log_file, 'w+') as log: + with open(iotests_log_file, "w+") as log: log.write(output) log.flush() msg = "Total test %s cases, %s failed" @@ -118,7 +120,7 @@ def run_test(qemu_src_dir): build_root = params.get("build_root", "/root/rpmbuild") rpmbuild_clean_cmd = params["rpmbuild_clean_cmd"] - cmd = "%s -version" % utils_misc.get_qemu_binary(params) + cmd = f"{utils_misc.get_qemu_binary(params)} -version" output = process.system_output(cmd, shell=True) cwd = os.getcwd() (qemu_src_dir, spec) = install_test(build_root) @@ -131,4 +133,4 @@ def run_test(qemu_src_dir): os.chdir(cwd) process.system(rpmbuild_clean_cmd % spec, shell=True) except Exception: - test.log.warn("Fail to clean test environment") + test.log.warning("Fail to clean test environment") diff --git a/qemu/tests/rh_qemu_update.py b/qemu/tests/rh_qemu_update.py index b937dd8132..3594b4cf46 100644 --- a/qemu/tests/rh_qemu_update.py +++ b/qemu/tests/rh_qemu_update.py @@ -1,5 +1,6 @@ -from avocado.utils import process, distro from distutils.version import LooseVersion + +from avocado.utils import distro, process from virttest import error_context QUERY_TIMEOUT = 360 @@ -22,26 +23,18 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ - qemu_package = params['qemu_package_install'] - pm_tool = params['pm_tool'] + qemu_package = params["qemu_package_install"] + pm_tool = params["pm_tool"] def install_qemu(session): """ Install compose version of qemu-kvm pkg by the name provided in cfg """ - cmd = "{} install -y {}".format( - pm_tool, qemu_package - ) + cmd = f"{pm_tool} install -y {qemu_package}" s, o = session.cmd_status_output(cmd, timeout=OPERATION_TIMEOUT) if s != 0: - test.error("Installation of '{}' failed with: {}".format( - qemu_package, o - ) - ) - test.log.info("Installation of '{}' succeeded".format( - qemu_package - ) - ) + test.error(f"Installation of '{qemu_package}' failed with: {o}") + test.log.info("Installation of '%s' succeeded", qemu_package) def install_component_management(session): """ @@ -49,22 +42,16 @@ def install_component_management(session): repositories containing qemu-kvm build or whole virt module """ cmd_clone = "git clone --depth=1 {} -b {} {}".format( - params['cm_repo'], - params['cm_branch'], - params['cm_path'] + params["cm_repo"], params["cm_branch"], params["cm_path"] ) s_clone, o_clone = session.cmd_status_output( cmd_clone, timeout=OPERATION_TIMEOUT - ) + ) if s_clone != 0: - test.error("Clonning of '{}' failed with: {}".format( - params['cm_repo'], o_clone - ) + test.error( + "Clonning of '{}' failed with: {}".format(params["cm_repo"], o_clone) ) - test.log.info("Clonning of '{}' succeeded".format( - params['cm_repo'] - ) - ) + test.log.info("Clonning of '%s' succeeded", params["cm_repo"]) def _get_installed_qemu_info(session=None): """ @@ -74,16 +61,19 @@ def _get_installed_qemu_info(session=None): cmd = f"rpm -q {qemu_package}" if session is not None: out = session.cmd(cmd, timeout=QUERY_TIMEOUT) - tgt = process.run( - "cat /etc/os-release | grep VERSION_ID | cut -d'=' -f2", - shell=True - ).stdout_text.strip().replace("\"", "") + tgt = ( + process.run( + "cat /etc/os-release | grep VERSION_ID | cut -d'=' -f2", shell=True + ) + .stdout_text.strip() + .replace('"', "") + ) else: out = process.run(cmd, shell=True).stdout_text.strip() distro_details = distro.detect() tgt = f"{distro_details.version}.{distro_details.release}" # Drop arch information from NVR e.g. '.x86_64' - nvr = out.rsplit('.', 1)[0] + nvr = out.rsplit(".", 1)[0] return { "nvr": nvr, "target": tgt, @@ -98,26 +88,23 @@ def verify_qemu_version(host_qemu, guest_qemu): if host_qemu["target"] != guest_qemu["target"]: test.cancel( "Guest target target '{}' differs from host '{}'".format( - guest_qemu['target'], - host_qemu['target'] + guest_qemu["target"], host_qemu["target"] ) ) # Check if qemu-versions in the available and guest one differs if LooseVersion(host_qemu["nvr"]) > LooseVersion(guest_qemu["nvr"]): test.log.info( - "Available qemu-kvm '{}' is newer compared to guest's '{}'".format( - host_qemu["nvr"], - guest_qemu["nvr"] - ) + "Available qemu-kvm '%s' is newer compared to guest's '%s'", + host_qemu["nvr"], + guest_qemu["nvr"], ) else: test.cancel( "Available qemu-kvm '{}' is older or same compared to guest's '{}'".format( - host_qemu["nvr"], - guest_qemu["nvr"] + host_qemu["nvr"], guest_qemu["nvr"] ) ) - return host_qemu['nvr'] + return host_qemu["nvr"] def update_guest_qemu(session, install_id): """ @@ -126,69 +113,47 @@ def update_guest_qemu(session, install_id): """ # Prepare module or build repo containing newer version of qemu-kvm cmd = f"python3 {params['cm_path']}{params['cm_cmd']} {install_id}" - test.log.info(f"Running: {cmd}") + test.log.info("Running: %s", cmd) try: session.cmd(cmd, timeout=OPERATION_TIMEOUT) - test.log.info("Creation of repo '{}' succeeded".format( - install_id - ) - ) + test.log.info("Creation of repo '%s' succeeded", install_id) except Exception as e: - test.error("Creation of repo '{}' failed with: {}".format( - install_id, e - ) - ) + test.error(f"Creation of repo '{install_id}' failed with: {e}") # Disable and enable new module if module is used if "+" in install_id: # Get virt module stream ('rhel' or 'av') on the host stream = process.run( - f"{pm_tool} module list --enabled | grep virt" + - "| awk -F ' ' '{{print $2}}' | head -1", - shell=True + f"{pm_tool} module list --enabled | grep virt" + + "| awk -F ' ' '{{print $2}}' | head -1", + shell=True, ).stdout_text.strip() disable_cmd = f"{pm_tool} module disable -y virt" s_disable, o_disable = session.cmd_status_output( disable_cmd, timeout=QUERY_TIMEOUT - ) + ) if s_disable != 0: - test.fail("Disable of module virt failed with: {}".format( - o_disable - ) - ) + test.fail(f"Disable of module virt failed with: {o_disable}") else: test.log.info("Disable of module virt succeeded") enable_cmd = f"{pm_tool} module enable -y virt:{stream}" s_enable, o_enable = session.cmd_status_output( enable_cmd, timeout=QUERY_TIMEOUT - ) + ) if s_enable != 0: - test.fail("Enable of module virt:{} failed with: {}".format( - stream, o_enable - ) - ) + test.fail(f"Enable of module virt:{stream} failed with: {o_enable}") else: - test.log.info("Enable of module virt:{} succeeded".format( - stream - ) - ) + test.log.info("Enable of module virt:%s succeeded", stream) # Run upgrade to newer qemu-kvm version if "+" in install_id: cmd_upgrade = f"{pm_tool} module update -y virt:{stream}" else: - cmd_upgrade = "{} upgrade -y {}".format( - pm_tool, qemu_package - ) + cmd_upgrade = f"{pm_tool} upgrade -y {qemu_package}" s_upgrade, o_upgrade = session.cmd_status_output( - cmd_upgrade, timeout=INSTALL_TIMEOUT) - if s_upgrade != 0: - test.fail("Upgrade of '{}' failed with: {}".format( - qemu_package, o_upgrade - ) - ) - test.log.info("Upgrade of '{}' succeeded".format( - qemu_package - ) + cmd_upgrade, timeout=INSTALL_TIMEOUT ) + if s_upgrade != 0: + test.fail(f"Upgrade of '{qemu_package}' failed with: {o_upgrade}") + test.log.info("Upgrade of '%s' succeeded", qemu_package) def verify_installed_qemu(host_qemu, guest_qemu): """ @@ -197,16 +162,10 @@ def verify_installed_qemu(host_qemu, guest_qemu): expected_nvr = host_qemu["nvr"] installed_nvr = guest_qemu["nvr"] if installed_nvr == expected_nvr: - test.log.info( - "NVR of installed pkg '{}' is correct".format( - installed_nvr - ) - ) + test.log.info("NVR of installed pkg '%s' is correct", installed_nvr) else: test.fail( - "NVR of installed pkg '{}' differs from expected '{}'".format( - installed_nvr, expected_nvr - ) + f"NVR of installed pkg '{installed_nvr}' differs from expected '{expected_nvr}'" ) vm = env.get_vm(params["main_vm"]) diff --git a/qemu/tests/rng_bat.py b/qemu/tests/rng_bat.py index 26df21071c..f453c15a64 100644 --- a/qemu/tests/rng_bat.py +++ b/qemu/tests/rng_bat.py @@ -1,12 +1,11 @@ import re -import aexpect import time -from virttest import utils_misc -from virttest import error_context -from virttest import utils_test -from virttest.utils_windows import system +import aexpect from avocado.utils import process +from virttest import error_context, utils_misc, utils_test +from virttest.utils_windows import system + from provider import win_driver_utils @@ -33,16 +32,16 @@ def is_dev_used_by_qemu(dev_file, vm_pid): :param vm_pid: qemu process ID. :return: Match objects or None. """ - lsof_cmd = "lsof %s" % dev_file + lsof_cmd = f"lsof {dev_file}" output = process.system_output(lsof_cmd, ignore_status=True).decode() - return re.search(r"\s+%s\s+" % vm_pid, output, re.M) + return re.search(rf"\s+{vm_pid}\s+", output, re.M) def _is_rngd_running(): """ Check whether rngd is running """ output = session.cmd_output(check_rngd_service) # pylint: disable=E0606 - if 'running' not in output: + if "running" not in output: return False return True @@ -52,8 +51,7 @@ def _is_rngd_running(): rng_dll_register_cmd = params.get("rng_dll_register_cmd") read_rng_timeout = float(params.get("read_rng_timeout", "360")) cmd_timeout = float(params.get("session_cmd_timeout", "360")) - rng_src = params.get("rng_src", - "WIN_UTILS:\\random_%PROCESSOR_ARCHITECTURE%.exe") + rng_src = params.get("rng_src", "WIN_UTILS:\\random_%PROCESSOR_ARCHITECTURE%.exe") driver_name = params["driver_name"] read_rng_cmd = params["read_rng_cmd"] rng_dst = params.get("rng_dst", "c:\\random_%PROCESSOR_ARCHITECTURE%.exe") @@ -63,21 +61,20 @@ def _is_rngd_running(): vm_pid = vm.get_pid() if dev_file: - error_context.context("Check '%s' used by qemu" % dev_file, - test.log.info) + error_context.context(f"Check '{dev_file}' used by qemu", test.log.info) if not is_dev_used_by_qemu(dev_file, vm_pid): msg = "Qemu (pid=%d) not using host passthrough " % vm_pid - msg += "device '%s'" % dev_file + msg += f"device '{dev_file}'" test.fail(msg) session = vm.wait_for_login(timeout=timeout) if os_type == "windows": - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, driver_name, - timeout) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name, timeout + ) if not system.file_exists(session, rng_dst): rng_src = utils_misc.set_winutils_letter(session, rng_src) - session.cmd("copy %s %s /y" % (rng_src, rng_dst)) + session.cmd(f"copy {rng_src} {rng_dst} /y") else: update_driver = params.get("update_driver") if update_driver: @@ -87,17 +84,16 @@ def _is_rngd_running(): try: output = session.cmd_output_safe(verify_cmd, timeout=cmd_timeout) except aexpect.ShellTimeoutError: - err = "%s timeout, pls check if it's a product bug" % verify_cmd + err = f"{verify_cmd} timeout, pls check if it's a product bug" test.fail(err) - if not re.search(r"%s" % driver_name, output, re.M): + if not re.search(rf"{driver_name}", output, re.M): msg = "Verify device driver failed, " - msg += "guest report driver is %s, " % output - msg += "expect is '%s'" % driver_name + msg += f"guest report driver is {output}, " + msg += f"expect is '{driver_name}'" test.fail(msg) - error_context.context("Read virtio-rng device to get random number", - test.log.info) + error_context.context("Read virtio-rng device to get random number", test.log.info) if rng_dll_register_cmd: test.log.info("register 'viorngum.dll' into system") @@ -115,15 +111,13 @@ def _is_rngd_running(): if params.get("test_duration"): start_time = time.time() while (time.time() - start_time) < float(params.get("test_duration")): - output = session.cmd_output(read_rng_cmd, - timeout=read_rng_timeout) + output = session.cmd_output(read_rng_cmd, timeout=read_rng_timeout) if len(re.findall(rng_data_rex, output, re.M)) < 2: - test.fail("Unable to read random numbers from guest: %s" - % output) + test.fail(f"Unable to read random numbers from guest: {output}") else: output = session.cmd_output(read_rng_cmd, timeout=read_rng_timeout) if len(re.findall(rng_data_rex, output, re.M)) < 2: - test.fail("Unable to read random numbers from guest: %s" % output) + test.fail(f"Unable to read random numbers from guest: {output}") session.close() # for windows guest, disable/uninstall driver to get memory leak based on # driver verifier is enabled diff --git a/qemu/tests/rng_driver_negative.py b/qemu/tests/rng_driver_negative.py index 1fd96caf55..813502abcc 100644 --- a/qemu/tests/rng_driver_negative.py +++ b/qemu/tests/rng_driver_negative.py @@ -1,7 +1,6 @@ import aexpect - -from virttest import error_context from avocado.core import exceptions +from virttest import error_context @error_context.context_aware @@ -25,29 +24,26 @@ def run(test, params, env): driver_unload_cmd = params["driver_unload_cmd"] - error_context.context("Read virtio-rng device in background", - test.log.info) + error_context.context("Read virtio-rng device in background", test.log.info) read_rng_cmd = params["read_rng_cmd"] pid = session.cmd_output(read_rng_cmd) pid = pid.split("\n")[1] test.log.info("Check if random read process exist") - status = session.cmd_status("ps -p %s" % pid) + status = session.cmd_status(f"ps -p {pid}") if status != 0: raise exceptions.TestFail("random read is not running background") - error_context.context("Unload the driver during random read", - test.log.info) + error_context.context("Unload the driver during random read", test.log.info) try: session.cmd(driver_unload_cmd) except aexpect.ShellTimeoutError: pass - error_context.context("Check if there is call trace in guest", - test.log.info) + error_context.context("Check if there is call trace in guest", test.log.info) try: vm.verify_kernel_crash() finally: try: - session.cmd("kill -9 %s" % pid) + session.cmd(f"kill -9 {pid}") session.close() except Exception: pass diff --git a/qemu/tests/rng_host_guest_read.py b/qemu/tests/rng_host_guest_read.py index 1c9073ab01..87988d8798 100644 --- a/qemu/tests/rng_host_guest_read.py +++ b/qemu/tests/rng_host_guest_read.py @@ -1,6 +1,6 @@ -from virttest import error_context -from virttest import utils_test from avocado.utils import process +from virttest import error_context, utils_test + from provider import win_driver_utils @@ -42,14 +42,12 @@ def host_read_clean(host_read_process): vm = env.get_vm(params["main_vm"]) vm.wait_for_login() - error_context.context("Host read random numbers in the background", - test.log.info) + error_context.context("Host read random numbers in the background", test.log.info) host_read_process = host_read_start(host_read_cmd) try: if host_read_process.poll() is None: - error_context.context("Guest begin to read random numbers", - test.log.info) + error_context.context("Guest begin to read random numbers", test.log.info) utils_test.run_virt_sub_test(test, params, env, guest_rng_test) else: test.error("Host reading data is not alive!") diff --git a/qemu/tests/rng_hotplug.py b/qemu/tests/rng_hotplug.py index dbb79f241a..7aa7297aae 100644 --- a/qemu/tests/rng_hotplug.py +++ b/qemu/tests/rng_hotplug.py @@ -1,9 +1,9 @@ import time -from virttest import error_context -from virttest.qemu_devices import qdevices -from virttest import utils_test from avocado.core import exceptions +from virttest import error_context, utils_test +from virttest.qemu_devices import qdevices + from provider import win_driver_utils @@ -36,7 +36,7 @@ def get_rng_id(vm): return device_list def hotplug_rng(vm, dev): - error_context.context("Hotplug %s" % dev, test.log.info) + error_context.context(f"Hotplug {dev}", test.log.info) out, ver_out = vm.devices.simple_hotplug(dev, vm.monitor) if not ver_out: msg = "no % device in qtree after hotplug" % dev @@ -44,10 +44,10 @@ def hotplug_rng(vm, dev): test.log.info("%s is hotpluged successfully", dev) def unplug_rng(vm, dev): - error_context.context("Hot-unplug %s" % dev, test.log.info) + error_context.context(f"Hot-unplug {dev}", test.log.info) out, ver_out = vm.devices.simple_unplug(dev, vm.monitor) if not ver_out: - msg = "Still get %s in qtree after unplug" % dev + msg = f"Still get {dev} in qtree after unplug" raise exceptions.TestFail(msg) time.sleep(15) test.log.info("%s is unpluged successfully", dev) @@ -64,8 +64,7 @@ def restart_rngd(vm): def stop_rngd(vm): if params.get("stop_rngd"): session = vm.wait_for_login() - error_context.context("Disable rngd service before unplug", - test.log.info) + error_context.context("Disable rngd service before unplug", test.log.info) status, output = session.cmd_status_output(params.get("stop_rngd")) if status != 0: raise exceptions.TestError(output) @@ -76,7 +75,7 @@ def run_subtest(sub_test): Run subtest(e.g. rng_bat,reboot,shutdown) when it's not None :param sub_test: subtest name """ - error_context.context("Run %s subtest" % sub_test) + error_context.context(f"Run {sub_test} subtest") utils_test.run_virt_sub_test(test, params, env, sub_test) login_timeout = int(params.get("login_timeout", 360)) @@ -106,13 +105,13 @@ def run_subtest(sub_test): for i in range(repeat_times): dev_list = [] - error_context.context("Hotplug/unplug rng devices the %s time" - % (i+1), test.log.info) + error_context.context( + "Hotplug/unplug rng devices the %s time" % (i + 1), test.log.info + ) for num in range(rng_num): vm.devices.set_dirty() - new_dev = qdevices.QDevice(rng_driver, - {'id': '%s-%d' % (rng_driver, num)}) + new_dev = qdevices.QDevice(rng_driver, {"id": "%s-%d" % (rng_driver, num)}) hotplug_rng(vm, new_dev) dev_list.append(new_dev) diff --git a/qemu/tests/rng_maxbytes_period.py b/qemu/tests/rng_maxbytes_period.py index 910cd9dc0c..2b16f59c64 100644 --- a/qemu/tests/rng_maxbytes_period.py +++ b/qemu/tests/rng_maxbytes_period.py @@ -1,11 +1,7 @@ import re -from virttest import error_context -from virttest import env_process -from virttest import virt_vm -from virttest import utils_misc - from aexpect.exceptions import ShellTimeoutError +from virttest import env_process, error_context, utils_misc, virt_vm @error_context.context_aware @@ -20,12 +16,13 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _is_rngd_running(): """ Check whether rngd is running """ output = session.cmd_output(check_rngd_service) - return 'running' in output + return "running" in output timeout = params.get_numeric("login_timeout", 360) read_rng_timeout = float(params.get("read_rng_timeout", 3600)) @@ -37,24 +34,29 @@ def _is_rngd_running(): if not max_bytes and not period: test.error("Please specify the expected max-bytes and/or period.") if not max_bytes or not period: - if max_bytes != '0': + if max_bytes != "0": error_info = params["expected_error_info"] try: - env_process.process(test, params, env, - env_process.preprocess_image, - env_process.preprocess_vm) + env_process.process( + test, + params, + env, + env_process.preprocess_image, + env_process.preprocess_vm, + ) except virt_vm.VMCreateError as e: if error_info not in e.output: - test.fail("Expected error info '%s' is not reported, " - "output: %s" % (error_info, e.output)) + test.fail( + f"Expected error info '{error_info}' is not reported, " + f"output: {e.output}" + ) return vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) - error_context.context("Read virtio-rng device to get random number", - test.log.info) + error_context.context("Read virtio-rng device to get random number", test.log.info) update_driver = params.get("update_driver") if update_driver: session.cmd(update_driver, timeout=cmd_timeout) @@ -66,23 +68,24 @@ def _is_rngd_running(): if status: test.error(output) - if max_bytes == '0': + if max_bytes == "0": try: s, o = session.cmd_status_output(read_rng_cmd, timeout=read_rng_timeout) except ShellTimeoutError: pass else: - test.fail("Unexpected dd result, status: %s, output: %s" % (s, o)) + test.fail(f"Unexpected dd result, status: {s}, output: {o}") else: s, o = session.cmd_status_output(read_rng_cmd, timeout=read_rng_timeout) if s: test.error(o) test.log.info(o) - data_rate = re.search(r'\s(\d+\.\d+) kB/s', o, re.M) + data_rate = re.search(r"\s(\d+\.\d+) kB/s", o, re.M) expected_data_rate = float(params["expected_data_rate"]) if float(data_rate.group(1)) > expected_data_rate * 1.1: - test.error("Read data rate is not as expected. " - "data rate: %s kB/s, max-bytes: %s, period: %s" % - (data_rate.group(1), max_bytes, period)) + test.error( + "Read data rate is not as expected. " + f"data rate: {data_rate.group(1)} kB/s, max-bytes: {max_bytes}, period: {period}" + ) session.close() diff --git a/qemu/tests/rng_stress.py b/qemu/tests/rng_stress.py index bdb6546394..9222814414 100644 --- a/qemu/tests/rng_stress.py +++ b/qemu/tests/rng_stress.py @@ -1,11 +1,9 @@ -import aexpect -import time import re +import time +import aexpect from avocado.utils import process - -from virttest import error_context -from virttest import utils_test +from virttest import error_context, utils_test from virttest.qemu_devices import qdevices @@ -41,9 +39,9 @@ def get_available_rng(session): driver_name = params["driver_name"] try: output = session.cmd_output_safe(verify_cmd) - rng_devices = re.findall(r"%s(?:\.\d+)?" % driver_name, output) + rng_devices = re.findall(rf"{driver_name}(?:\.\d+)?", output) except aexpect.ShellTimeoutError: - err = "%s timeout, pls check if it's a product bug" % verify_cmd + err = f"{verify_cmd} timeout, pls check if it's a product bug" test.fail(err) return rng_devices @@ -55,28 +53,28 @@ def get_available_rng(session): if params.get("pre_cmd"): error_context.context("Fetch data from host", test.log.info) - process.system(params.get("pre_cmd"), shell=True, - ignore_bg_processes=True) + process.system(params.get("pre_cmd"), shell=True, ignore_bg_processes=True) error_context.context("Read rng device in guest", test.log.info) utils_test.run_virt_sub_test(test, params, env, sub_test) if params.get("os_type") == "linux": - error_context.context("Query virtio rng device in guest", - test.log.info) + error_context.context("Query virtio rng device in guest", test.log.info) rng_devices = get_available_rng(session) rng_attached = get_rng_list(vm) if len(rng_devices) != len(rng_attached): - test.fail("The devices get from rng_arriable" - " don't match the rng devices attached") + test.fail( + "The devices get from rng_arriable" + " don't match the rng devices attached" + ) if len(rng_devices) > 1: for rng_device in rng_devices: - error_context.context("Change virtio rng device to %s" % - rng_device, test.log.info) + error_context.context( + f"Change virtio rng device to {rng_device}", test.log.info + ) session.cmd_status(params.get("switch_rng_cmd") % rng_device) - error_context.context("Read from %s in guest" % rng_device, - test.log.info) + error_context.context(f"Read from {rng_device} in guest", test.log.info) utils_test.run_virt_sub_test(test, params, env, sub_test) if params.get("post_cmd"): @@ -85,6 +83,7 @@ def get_available_rng(session): s = process.system( params.get("post_cmd"), ignore_status=(params.get("ignore_status") == "yes"), - shell=True) + shell=True, + ) if s == 0: break diff --git a/qemu/tests/rv_audio.py b/qemu/tests/rv_audio.py index cbd7aaf99f..052ef2f173 100644 --- a/qemu/tests/rv_audio.py +++ b/qemu/tests/rv_audio.py @@ -5,11 +5,12 @@ Requires: rv_connect test """ + import logging from virttest import utils_misc -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def verify_recording(recording, params): @@ -26,7 +27,7 @@ def verify_recording(recording, params): threshold = int(params.get("rv_audio_threshold", "25000")) config_test = params.get("config_test", None) - if (len(rec) - rec.count('\0') < 50): + if len(rec) - rec.count("\0") < 50: LOG_JOB.info("Recording is empty") if disable_audio != "yes": return False @@ -37,7 +38,7 @@ def verify_recording(recording, params): pause = False try: for index, value in enumerate(rec): - if value == '\0': + if value == "\0": if not pause: pauses.append([index]) pause = True @@ -51,9 +52,11 @@ def verify_recording(recording, params): if len(pauses): LOG_JOB.error("%d pauses detected:", len(pauses)) for i in pauses: - LOG_JOB.info("start: %10fs duration: %10fs", - (float(i[0]) / (2 * 2 * 44100)), - (float(i[1] - i[0]) / (2 * 2 * 44100))) + LOG_JOB.info( + "start: %10fs duration: %10fs", + (float(i[0]) / (2 * 2 * 44100)), + (float(i[1] - i[0]) / (2 * 2 * 44100)), + ) # Two small hiccups are allowed when migrating if len(pauses) < 3 and config_test == "migration": return True @@ -70,59 +73,60 @@ def verify_recording(recording, params): def run(test, params, env): - guest_vm = env.get_vm(params["guest_vm"]) guest_vm.verify_alive() guest_session = guest_vm.wait_for_login( - timeout=int(params.get("login_timeout", 360))) + timeout=int(params.get("login_timeout", 360)) + ) client_vm = env.get_vm(params["client_vm"]) client_vm.verify_alive() client_session = client_vm.wait_for_login( - timeout=int(params.get("login_timeout", 360))) + timeout=int(params.get("login_timeout", 360)) + ) - if guest_session.cmd_status("ls %s" % params.get("audio_tgt")): + if guest_session.cmd_status("ls {}".format(params.get("audio_tgt"))): test.log.info(params.get("audio_src")) test.log.info(params.get("audio_tgt")) - guest_vm.copy_files_to( - params.get("audio_src"), - params.get("audio_tgt")) - if client_session.cmd_status("ls %s" % params.get("audio_tgt")): - client_vm.copy_files_to( - params.get("audio_src"), - params.get("audio_tgt")) + guest_vm.copy_files_to(params.get("audio_src"), params.get("audio_tgt")) + if client_session.cmd_status("ls {}".format(params.get("audio_tgt"))): + client_vm.copy_files_to(params.get("audio_src"), params.get("audio_tgt")) if params.get("rv_record") == "yes": test.log.info("rv_record set; Testing recording") - player = client_vm.wait_for_login( - timeout=int(params.get("login_timeout", 360))) + player = client_vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) recorder_session = guest_vm.wait_for_login( - timeout=int(params.get("login_timeout", 360))) + timeout=int(params.get("login_timeout", 360)) + ) recorder_session_vm = guest_vm else: test.log.info("rv_record not set; Testing playback") - player = guest_vm.wait_for_login( - timeout=int(params.get("login_timeout", 360))) + player = guest_vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) recorder_session = client_vm.wait_for_login( - timeout=int(params.get("login_timeout", 360))) + timeout=int(params.get("login_timeout", 360)) + ) recorder_session_vm = client_vm - player.cmd("aplay %s &> /dev/null &" % # starts playback - params.get("audio_tgt"), timeout=30) + player.cmd( + "aplay {} &> /dev/null &".format(params.get("audio_tgt")), + timeout=30, + ) if params.get("config_test", "no") == "migration": bg = utils_misc.InterruptedThread(guest_vm.migrate, kwargs={}) bg.start() - recorder_session.cmd("arecord -d %s -f cd -D hw:0,1 %s" % ( # records - params.get("audio_time", "200"), # duration - params.get("audio_rec")), # target - timeout=500) + recorder_session.cmd( + "arecord -d {} -f cd -D hw:0,1 {}".format( # records + params.get("audio_time", "200"), # duration + params.get("audio_rec"), + ), # target + timeout=500, + ) if params.get("config_test", "no") == "migration": bg.join() - recorder_session_vm.copy_files_from( - params.get("audio_rec"), "./recorded.wav") + recorder_session_vm.copy_files_from(params.get("audio_rec"), "./recorded.wav") if not verify_recording("./recorded.wav", params): test.fail("Test failed") diff --git a/qemu/tests/rv_build_install.py b/qemu/tests/rv_build_install.py index d88c7140d2..4eb7995373 100644 --- a/qemu/tests/rv_build_install.py +++ b/qemu/tests/rv_build_install.py @@ -5,17 +5,16 @@ Requires: connected binaries remote-viewer, Xorg, gnome session, git """ + import logging import os -import time import re +import time from aexpect import ShellCmdError +from virttest import data_dir, utils_spice -from virttest import utils_spice -from virttest import data_dir - -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def connect_to_vm(vm_name, env, params): @@ -31,7 +30,9 @@ def connect_to_vm(vm_name, env, params): vm.verify_alive() vm_root_session = vm.wait_for_login( timeout=int(params.get("login_timeout", 360)), - username="root", password="123456") + username="root", + password="123456", + ) LOG_JOB.info("VM %s is up and running", vm_name) return (vm, vm_root_session) @@ -48,13 +49,12 @@ def install_req_pkgs(pkgsRequired, vm_root_session, params): for pkgName in pkgsRequired: LOG_JOB.info("Checking to see if %s is installed", pkgName) try: - vm_root_session.cmd("rpm -q %s" % pkgName) + vm_root_session.cmd(f"rpm -q {pkgName}") except: rpm = params.get(re.sub("-", "_", pkgName) + "_url") LOG_JOB.info("Installing %s from %s", pkgName, rpm) try: - vm_root_session.cmd("yum -y localinstall %s" % rpm, - timeout=300) + vm_root_session.cmd(f"yum -y localinstall {rpm}", timeout=300) except: LOG_JOB.info("Could not install %s", pkgName) @@ -74,14 +74,13 @@ def build_install_spiceprotocol(test, vm_root_session, vm_script_path, params): # the main product repo if "release 6" in vm_root_session.cmd("cat /etc/redhat-release"): try: - cmd = "yum --disablerepo=\"*\" " + \ - "--enablerepo=\"epel\" -y install pyparsing" + cmd = 'yum --disablerepo="*" ' + '--enablerepo="epel" -y install pyparsing' output = vm_root_session.cmd(cmd, timeout=300) LOG_JOB.info(output) except: LOG_JOB.error("Not able to install pyparsing!") - output = vm_root_session.cmd("%s -p spice-protocol" % (vm_script_path)) + output = vm_root_session.cmd(f"{vm_script_path} -p spice-protocol") LOG_JOB.info(output) if re.search("Return code", output): test.fail("spice-protocol was not installed properly") @@ -97,13 +96,16 @@ def build_install_qxl(test, vm_root_session, vm_script_path, params): """ # Checking to see if required packages exist and if not, install them - pkgsRequired = ["libpciaccess-devel", "xorg-x11-util-macros", - "xorg-x11-server-devel", "libfontenc-devel", - "libXfont-devel"] + pkgsRequired = [ + "libpciaccess-devel", + "xorg-x11-util-macros", + "xorg-x11-server-devel", + "libfontenc-devel", + "libXfont-devel", + ] install_req_pkgs(pkgsRequired, vm_root_session, params) - output = vm_root_session.cmd("%s -p xf86-video-qxl" % (vm_script_path), - timeout=600) + output = vm_root_session.cmd(f"{vm_script_path} -p xf86-video-qxl", timeout=600) LOG_JOB.info(output) if re.search("Return code", output): test.fail("qxl was not installed properly") @@ -134,24 +136,29 @@ def build_install_virtviewer(test, vm_root_session, vm_script_path, params): LOG_JOB.error("virt-viewer package couldn't be removed! %s", err.output) if "release 7" in vm_root_session.cmd("cat /etc/redhat-release"): - pkgsRequired = ["libogg-devel", "celt051-devel", - "spice-glib-devel", "spice-gtk3-devel"] + pkgsRequired = [ + "libogg-devel", + "celt051-devel", + "spice-glib-devel", + "spice-gtk3-devel", + ] else: pkgsRequired = ["libogg-devel", "celt051-devel"] install_req_pkgs(pkgsRequired, vm_root_session, params) - output = vm_root_session.cmd("%s -p virt-viewer" % (vm_script_path), - timeout=600) + output = vm_root_session.cmd(f"{vm_script_path} -p virt-viewer", timeout=600) LOG_JOB.info(output) if re.search("Return code", output): test.fail("virt-viewer was not installed properly") # Get version of remote-viewer after install try: - output = vm_root_session.cmd("which remote-viewer;" - "LD_LIBRARY_PATH=/usr/local/lib" - " remote-viewer --version") + output = vm_root_session.cmd( + "which remote-viewer;" + "LD_LIBRARY_PATH=/usr/local/lib" + " remote-viewer --version" + ) LOG_JOB.info(output) except ShellCmdError as err: LOG_JOB.error("Can't get version number! %s", err.output) @@ -168,23 +175,29 @@ def build_install_spicegtk(test, vm_root_session, vm_script_path, params): # Get version of spice-gtk before install try: - output = vm_root_session.cmd("LD_LIBRARY_PATH=/usr/local/lib" - " remote-viewer --spice-gtk-version") + output = vm_root_session.cmd( + "LD_LIBRARY_PATH=/usr/local/lib" " remote-viewer --spice-gtk-version" + ) LOG_JOB.info(output) except: LOG_JOB.error(output) if "release 7" in vm_root_session.cmd("cat /etc/redhat-release"): - pkgsRequired = ["libogg-devel", "celt051-devel", "libcacard-devel", - "source-highlight", "gtk-doc", "libepoxy-devel"] + pkgsRequired = [ + "libogg-devel", + "celt051-devel", + "libcacard-devel", + "source-highlight", + "gtk-doc", + "libepoxy-devel", + ] else: pkgsRequired = ["libogg-devel", "celt051-devel", "libcacard-devel"] install_req_pkgs(pkgsRequired, vm_root_session, params) try: - cmd = "yum --disablerepo=\"*\" " + \ - "--enablerepo=\"epel\" -y install perl-Text-CSV" + cmd = 'yum --disablerepo="*" ' + '--enablerepo="epel" -y install perl-Text-CSV' output = vm_root_session.cmd(cmd, timeout=300) LOG_JOB.info(output) except: @@ -193,8 +206,10 @@ def build_install_spicegtk(test, vm_root_session, vm_script_path, params): # spice-gtk needs to built from tarball before building virt-viewer on RHEL6 pkgName = params.get("build_install_pkg") if pkgName != "spice-gtk": - tarballLocation = "http://www.spice-space.org/download/gtk/spice-gtk-0.30.tar.bz2" - cmd = "%s -p spice-gtk --tarball %s" % (vm_script_path, tarballLocation) + tarballLocation = ( + "http://www.spice-space.org/download/gtk/spice-gtk-0.30.tar.bz2" + ) + cmd = f"{vm_script_path} -p spice-gtk --tarball {tarballLocation}" output = vm_root_session.cmd(cmd, timeout=600) LOG_JOB.info(output) if re.search("Return code", output): @@ -203,16 +218,16 @@ def build_install_spicegtk(test, vm_root_session, vm_script_path, params): LOG_JOB.info("spice-gtk was installed") else: - output = vm_root_session.cmd("%s -p spice-gtk" % (vm_script_path), - timeout=600) + output = vm_root_session.cmd(f"{vm_script_path} -p spice-gtk", timeout=600) LOG_JOB.info(output) if re.search("Return code", output): test.fail("spice-gtk was not installed properly") # Get version of spice-gtk after install try: - output = vm_root_session.cmd("LD_LIBRARY_PATH=/usr/local/lib" - " remote-viewer --spice-gtk-version") + output = vm_root_session.cmd( + "LD_LIBRARY_PATH=/usr/local/lib" " remote-viewer --spice-gtk-version" + ) LOG_JOB.info(output) except: LOG_JOB.error(output) @@ -237,8 +252,7 @@ def build_install_vdagent(test, vm_root_session, vm_script_path, params): pkgsRequired = ["libpciaccess-devel"] install_req_pkgs(pkgsRequired, vm_root_session, params) - output = vm_root_session.cmd("%s -p spice-vd-agent" % (vm_script_path), - timeout=600) + output = vm_root_session.cmd(f"{vm_script_path} -p spice-vd-agent", timeout=600) LOG_JOB.info(output) if re.search("Return code", output): test.fail("spice-vd-agent was not installed properly") @@ -287,9 +301,13 @@ def run(test, params, env): # location of the script on the host host_script_path = os.path.join(data_dir.get_deps_dir(), "spice", script) - test.log.info("Transferring the script to %s," - "destination directory: %s, source script location: %s", - vm_name, vm_script_path, host_script_path) + test.log.info( + "Transferring the script to %s," + "destination directory: %s, source script location: %s", + vm_name, + vm_script_path, + host_script_path, + ) vm.copy_files_to(host_script_path, vm_script_path, timeout=60) time.sleep(5) diff --git a/qemu/tests/rv_connect.py b/qemu/tests/rv_connect.py index b6a38a7da5..3537083a03 100644 --- a/qemu/tests/rv_connect.py +++ b/qemu/tests/rv_connect.py @@ -5,19 +5,14 @@ Use example kickstart RHEL-6-spice.ks """ + import logging import socket -from aexpect import ShellStatusError -from aexpect import ShellCmdError -from aexpect import ShellProcessTerminatedError - -from virttest import utils_net -from virttest import utils_spice -from virttest import remote -from virttest import utils_misc +from aexpect import ShellCmdError, ShellProcessTerminatedError, ShellStatusError +from virttest import remote, utils_misc, utils_net, utils_spice -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def str_input(client_vm, ticket): @@ -27,12 +22,14 @@ def str_input(client_vm, ticket): :param ticket - use params.get("spice_password") """ LOG_JOB.info("Passing ticket '%s' to the remote-viewer.", ticket) - char_mapping = {":": "shift-semicolon", - ",": "comma", - ".": "dot", - "/": "slash", - "?": "shift-slash", - "=": "equal"} + char_mapping = { + ":": "shift-semicolon", + ",": "comma", + ".": "dot", + "/": "slash", + "?": "shift-slash", + "=": "equal", + } for character in ticket: if character in char_mapping: character = char_mapping[character] @@ -47,10 +44,10 @@ def print_rv_version(client_session, rv_binary): :param client_session - vm.wait_for_login() :param rv_binary - remote-viewer binary """ - LOG_JOB.info("remote-viewer version: %s", - client_session.cmd(rv_binary + " -V")) - LOG_JOB.info("spice-gtk version: %s", - client_session.cmd(rv_binary + " --spice-gtk-version")) + LOG_JOB.info("remote-viewer version: %s", client_session.cmd(rv_binary + " -V")) + LOG_JOB.info( + "spice-gtk version: %s", client_session.cmd(rv_binary + " --spice-gtk-version") + ) def launch_rv(test, client_vm, guest_vm, params): @@ -71,14 +68,13 @@ def launch_rv(test, client_vm, guest_vm, params): if proxy: try: socket.inet_aton(params.get("proxy_ip", None)) - except socket.error: - test.cancel('Parameter proxy_ip not changed from default values') + except OSError: + test.cancel("Parameter proxy_ip not changed from default values") host_ip = utils_net.get_host_ip_address(params) host_port = None if guest_vm.get_spice_var("listening_addr") == "ipv6": - host_ip = ("[" + utils_misc.convert_ipv4_to_ipv6(host_ip) + - "]") + host_ip = "[" + utils_misc.convert_ipv4_to_ipv6(host_ip) + "]" host_tls_port = None disable_audio = params.get("disable_audio", "no") @@ -100,9 +96,8 @@ def launch_rv(test, client_vm, guest_vm, params): ticket_send = params.get("spice_password_send") qemu_ticket = params.get("qemu_password") if qemu_ticket: - guest_vm.monitor.cmd("set_password spice %s" % qemu_ticket) - LOG_JOB.info("Sending to qemu monitor: set_password spice %s", - qemu_ticket) + guest_vm.monitor.cmd(f"set_password spice {qemu_ticket}") + LOG_JOB.info("Sending to qemu monitor: set_password spice %s", qemu_ticket) gencerts = params.get("gencerts") certdb = params.get("certdb") @@ -111,29 +106,37 @@ def launch_rv(test, client_vm, guest_vm, params): cacert = None rv_parameters_from = params.get("rv_parameters_from", "cmd") - if rv_parameters_from == 'file': + if rv_parameters_from == "file": cmd += " ~/rv_file.vv" client_session = client_vm.wait_for_login( - timeout=int(params.get("login_timeout", 360))) + timeout=int(params.get("login_timeout", 360)) + ) if display == "spice": - ticket = guest_vm.get_spice_var("spice_password") if guest_vm.get_spice_var("spice_ssl") == "yes": - # client needs cacert file - cacert = "%s/%s" % (guest_vm.get_spice_var("spice_x509_prefix"), - guest_vm.get_spice_var("spice_x509_cacert_file")) - client_session.cmd("rm -rf %s && mkdir -p %s" % ( - guest_vm.get_spice_var("spice_x509_prefix"), - guest_vm.get_spice_var("spice_x509_prefix"))) - remote.copy_files_to(client_vm.get_address(), 'scp', - params.get("username"), - params.get("password"), - params.get("shell_port"), - cacert, cacert) + cacert = "{}/{}".format( + guest_vm.get_spice_var("spice_x509_prefix"), + guest_vm.get_spice_var("spice_x509_cacert_file"), + ) + client_session.cmd( + "rm -rf {} && mkdir -p {}".format( + guest_vm.get_spice_var("spice_x509_prefix"), + guest_vm.get_spice_var("spice_x509_prefix"), + ) + ) + remote.copy_files_to( + client_vm.get_address(), + "scp", + params.get("username"), + params.get("password"), + params.get("shell_port"), + cacert, + cacert, + ) host_tls_port = guest_vm.get_spice_var("spice_tls_port") host_port = guest_vm.get_spice_var("spice_port") @@ -142,7 +145,7 @@ def launch_rv(test, client_vm, guest_vm, params): # remote-viewer needs ',' delimiter. And also is needed to remove # first character (it's '/') host_subj = guest_vm.get_spice_var("spice_x509_server_subj") - host_subj = host_subj.replace('/', ',')[1:] + host_subj = host_subj.replace("/", ",")[1:] if ssltype == "invalid_explicit_hs": host_subj = "Invalid Explicit HS" else: @@ -153,11 +156,13 @@ def launch_rv(test, client_vm, guest_vm, params): # generated with the ip address hostname = socket.gethostname() if ssltype == "invalid_implicit_hs": - spice_url = r" spice://%s?tls-port=%s\&port=%s" % ( - hostname, host_tls_port, host_port) + spice_url = ( + rf" spice://{hostname}?tls-port={host_tls_port}\&port={host_port}" + ) else: - spice_url = r" spice://%s?tls-port=%s\&port=%s" % ( - host_ip, host_tls_port, host_port) + spice_url = ( + rf" spice://{host_ip}?tls-port={host_tls_port}\&port={host_port}" + ) if rv_parameters_from == "menu": line = spice_url @@ -167,22 +172,24 @@ def launch_rv(test, client_vm, guest_vm, params): cmd += spice_url if not rv_parameters_from == "file": - cmd += " --spice-ca-file=%s" % cacert + cmd += f" --spice-ca-file={cacert}" - if (params.get("spice_client_host_subject") == "yes" and not - rv_parameters_from == "file"): - cmd += " --spice-host-subject=\"%s\"" % host_subj + if ( + params.get("spice_client_host_subject") == "yes" + and not rv_parameters_from == "file" + ): + cmd += f' --spice-host-subject="{host_subj}"' else: host_port = guest_vm.get_spice_var("spice_port") if rv_parameters_from == "menu": # line to be sent through monitor once r-v is started # without spice url - line = "spice://%s?port=%s" % (host_ip, host_port) + line = f"spice://{host_ip}?port={host_port}" elif rv_parameters_from == "file": pass else: - cmd += " spice://%s?port=%s" % (host_ip, host_port) + cmd += f" spice://{host_ip}?port={host_port}" elif display == "vnc": raise NotImplementedError("remote-viewer vnc") @@ -206,13 +213,14 @@ def launch_rv(test, client_vm, guest_vm, params): cmd += " --spice-smartcard" if certdb is not None: - LOG_JOB.debug("Remote Viewer set to use the following certificate" - " database: %s", certdb) + LOG_JOB.debug( + "Remote Viewer set to use the following certificate" " database: %s", + certdb, + ) cmd += " --spice-smartcard-db " + certdb if gencerts is not None: - LOG_JOB.debug("Remote Viewer set to use the following certs: %s", - gencerts) + LOG_JOB.debug("Remote Viewer set to use the following certs: %s", gencerts) cmd += " --spice-smartcard-certificates " + gencerts if client_vm.params.get("os_type") == "linux": @@ -229,37 +237,43 @@ def launch_rv(test, client_vm, guest_vm, params): # Launching the actual set of commands try: if rv_ld_library_path: - print_rv_version(client_session, "LD_LIBRARY_PATH=/usr/local/lib " + rv_binary) + print_rv_version( + client_session, "LD_LIBRARY_PATH=/usr/local/lib " + rv_binary + ) else: print_rv_version(client_session, rv_binary) except (ShellStatusError, ShellProcessTerminatedError): # Sometimes It fails with Status error, ingore it and continue. # It's not that important to have printed versions in the log. - LOG_JOB.debug("Ignoring a Status Exception that occurs from calling " - "print versions of remote-viewer or spice-gtk") + LOG_JOB.debug( + "Ignoring a Status Exception that occurs from calling " + "print versions of remote-viewer or spice-gtk" + ) LOG_JOB.info("Launching %s on the client (virtual)", cmd) if proxy: if "http" in proxy: - split = proxy.split('//')[1].split(':') + split = proxy.split("//")[1].split(":") else: - split = proxy.split(':') + split = proxy.split(":") host_ip = split[0] if len(split) > 1: host_port = split[1] else: host_port = "3128" if rv_parameters_from != "file": - client_session.cmd("export SPICE_PROXY=%s" % proxy) + client_session.cmd(f"export SPICE_PROXY={proxy}") if not params.get("rv_verify") == "only": try: client_session.cmd(cmd) except ShellStatusError: - LOG_JOB.debug("Ignoring a status exception, will check connection" - "of remote-viewer later") + LOG_JOB.debug( + "Ignoring a status exception, will check connection" + "of remote-viewer later" + ) # Send command line through monitor since url was not provided if rv_parameters_from == "menu": @@ -284,11 +298,14 @@ def launch_rv(test, client_vm, guest_vm, params): is_rv_connected = True try: - utils_spice.verify_established(client_vm, host_ip, - host_port, rv_binary, - host_tls_port, - params.get("spice_secure_channels", - None)) + utils_spice.verify_established( + client_vm, + host_ip, + host_port, + rv_binary, + host_tls_port, + params.get("spice_secure_channels", None), + ) except utils_spice.RVConnectError: if test_type == "negative": LOG_JOB.info("remote-viewer connection failed as expected") @@ -298,15 +315,18 @@ def launch_rv(test, client_vm, guest_vm, params): if "SSL_accept failed" in qemulog: return else: - test.fail("SSL_accept failed not shown in qemu" - "process as expected.") + test.fail( + "SSL_accept failed not shown in qemu" "process as expected." + ) is_rv_connected = False else: test.fail("remote-viewer connection failed") if test_type == "negative" and is_rv_connected: - test.fail("remote-viewer connection was established when" - " it was supposed to be unsuccessful") + test.fail( + "remote-viewer connection was established when" + " it was supposed to be unsuccessful" + ) # Get spice info output = guest_vm.monitor.cmd("info spice") @@ -314,19 +334,19 @@ def launch_rv(test, client_vm, guest_vm, params): LOG_JOB.debug(output) # Check to see if ipv6 address is reported back from qemu monitor - if (check_spice_info == "ipv6"): - LOG_JOB.info("Test to check if ipv6 address is reported" - " back from the qemu monitor") + if check_spice_info == "ipv6": + LOG_JOB.info( + "Test to check if ipv6 address is reported" " back from the qemu monitor" + ) # Remove brackets from ipv6 host ip - if (host_ip[1:len(host_ip) - 1] in output): - LOG_JOB.info("Reported ipv6 address found in output from" - " 'info spice'") + if host_ip[1 : len(host_ip) - 1] in output: + LOG_JOB.info("Reported ipv6 address found in output from" " 'info spice'") else: - test.fail("ipv6 address not found from qemu monitor" - " command: 'info spice'") + test.fail( + "ipv6 address not found from qemu monitor" " command: 'info spice'" + ) else: - LOG_JOB.info("Not checking the value of 'info spice'" - " from the qemu monitor") + LOG_JOB.info("Not checking the value of 'info spice'" " from the qemu monitor") # prevent from kill remote-viewer after test finish if client_vm.params.get("os_type") == "linux": @@ -350,15 +370,18 @@ def run(test, params, env): guest_vm.verify_alive() guest_session = guest_vm.wait_for_login( - timeout=int(params.get("login_timeout", 360))) + timeout=int(params.get("login_timeout", 360)) + ) client_vm = env.get_vm(params["client_vm"]) client_vm.verify_alive() client_session = client_vm.wait_for_login( - timeout=int(params.get("login_timeout", 360))) + timeout=int(params.get("login_timeout", 360)) + ) - if (client_vm.params.get("os_type") == "windows" and - client_vm.params.get("rv_installer", None)): + if client_vm.params.get("os_type") == "windows" and client_vm.params.get( + "rv_installer", None + ): utils_spice.install_rv_win(client_vm, params.get("rv_installer")) return @@ -366,17 +389,20 @@ def run(test, params, env): for vm in params.get("vms").split(): try: session = env.get_vm(vm).wait_for_login(timeout=360) - output = session.cmd('cat /etc/redhat-release') + output = session.cmd("cat /etc/redhat-release") test.log.info(output) except ShellCmdError: - test.cancel("Test is only currently supported on " - "RHEL and Fedora operating systems") + test.cancel( + "Test is only currently supported on " + "RHEL and Fedora operating systems" + ) if "release 6." in output: waittime = 15 else: waittime = 60 - utils_spice.clear_interface(env.get_vm(vm), - int(params.get("login_timeout", "360"))) + utils_spice.clear_interface( + env.get_vm(vm), int(params.get("login_timeout", "360")) + ) utils_spice.wait_timeout(waittime) diff --git a/qemu/tests/rv_copyandpaste.py b/qemu/tests/rv_copyandpaste.py index fa67f4007e..66d38b14d4 100644 --- a/qemu/tests/rv_copyandpaste.py +++ b/qemu/tests/rv_copyandpaste.py @@ -5,17 +5,15 @@ Requires: connected binaries remote-viewer, Xorg, gnome session """ + import logging import os import time import aexpect +from virttest import data_dir, utils_misc, utils_spice -from virttest import utils_misc -from virttest import utils_spice -from virttest import data_dir - -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def wait_timeout(timeout=10): @@ -43,7 +41,7 @@ def clear_cb(session, params): script_call = os.path.join(dst_path, script) # Clear the clipboard from the client and guest - clear_cmd = "%s %s %s" % (interpreter, script_call, script_clear_params) + clear_cmd = f"{interpreter} {script_call} {script_clear_params}" try: LOG_JOB.info("Clearing the clipboard") @@ -54,9 +52,15 @@ def clear_cb(session, params): LOG_JOB.info("Clipboard has been cleared.") -def place_img_in_clipboard(test, session_to_copy_from, interpreter, - script_call, script_params, dst_image_path, - test_timeout): +def place_img_in_clipboard( + test, + session_to_copy_from, + interpreter, + script_call, + script_params, + dst_image_path, + test_timeout, +): """ Use the clipboard script to copy an image into the clipboard. @@ -67,26 +71,34 @@ def place_img_in_clipboard(test, session_to_copy_from, interpreter, :param dst_image_path: location of the image to be copied :param test_timeout: timeout time for the cmd """ - cmd = "%s %s %s %s" % (interpreter, script_call, - script_params, dst_image_path) + cmd = f"{interpreter} {script_call} {script_params} {dst_image_path}" try: LOG_JOB.debug("------------ Script output ------------") - output = session_to_copy_from.cmd(cmd, print_func=LOG_JOB.info, - timeout=test_timeout) + output = session_to_copy_from.cmd( + cmd, print_func=LOG_JOB.info, timeout=test_timeout + ) if "The image has been placed into the clipboard." in output: LOG_JOB.info("Copying of the image was successful") else: - test.fail("Copying to the clipboard failed. %s" % output) + test.fail(f"Copying to the clipboard failed. {output}") except aexpect.ShellCmdError: test.fail("Copying to the clipboard failed") - LOG_JOB.debug("------------ End of script output of the Copying" - " Session ------------") + LOG_JOB.debug( + "------------ End of script output of the Copying" " Session ------------" + ) -def verify_img_paste(test, session_to_copy_from, interpreter, script_call, - script_params, final_image_path, test_timeout): +def verify_img_paste( + test, + session_to_copy_from, + interpreter, + script_call, + script_params, + final_image_path, + test_timeout, +): """ Use the clipboard script to paste an image from the clipboard. @@ -97,42 +109,52 @@ def verify_img_paste(test, session_to_copy_from, interpreter, script_call, :param final_image_path: location of where the image should be pasted :param test_timeout: timeout time for the cmd """ - cmd = "%s %s %s %s" % (interpreter, script_call, - script_params, final_image_path) + cmd = f"{interpreter} {script_call} {script_params} {final_image_path}" try: LOG_JOB.debug("------------ Script output ------------") - output = session_to_copy_from.cmd(cmd, print_func=LOG_JOB.info, - timeout=test_timeout) + output = session_to_copy_from.cmd( + cmd, print_func=LOG_JOB.info, timeout=test_timeout + ) if "Cb Image stored and saved to:" in output: LOG_JOB.info("Copying of the image was successful") else: - test.fail("Copying to the clipboard failed. %s" % output) + test.fail(f"Copying to the clipboard failed. {output}") except aexpect.ShellCmdError: test.fail("Copying to the clipboard failed") - LOG_JOB.debug("------------ End of script output of the Copying" - " Session ------------") + LOG_JOB.debug( + "------------ End of script output of the Copying" " Session ------------" + ) # Get the checksum of the file - cmd = "md5sum %s" % (final_image_path) + cmd = f"md5sum {final_image_path}" try: LOG_JOB.debug("------------ Script output ------------") - output = session_to_copy_from.cmd(cmd, print_func=LOG_JOB.info, - timeout=test_timeout) + output = session_to_copy_from.cmd( + cmd, print_func=LOG_JOB.info, timeout=test_timeout + ) except aexpect.ShellCmdError: test.fail("Couldn't get the size of the file") - LOG_JOB.debug("------------ End of script output of the Copying" - " Session ------------") + LOG_JOB.debug( + "------------ End of script output of the Copying" " Session ------------" + ) # Get the size of the copied image, this will be used for # verification on the other session that the paste was successful file_checksum = output.split()[0] return file_checksum -def verify_img_paste_success(test, session_to_copy_from, interpreter, - script_call, script_params, final_image_path, - expected_checksum, test_timeout): +def verify_img_paste_success( + test, + session_to_copy_from, + interpreter, + script_call, + script_params, + final_image_path, + expected_checksum, + test_timeout, +): """ Verify an image paste is successful by pasting an image to a file and verify the checksum matches the expected value. @@ -145,30 +167,33 @@ def verify_img_paste_success(test, session_to_copy_from, interpreter, :param expected_checksum: the checksum value of the image to be verified :param test_timeout: timeout time for the cmd """ - cmd = "%s %s %s %s" % (interpreter, script_call, - script_params, final_image_path) + cmd = f"{interpreter} {script_call} {script_params} {final_image_path}" try: LOG_JOB.debug("------------ Script output ------------") - output = session_to_copy_from.cmd(cmd, print_func=LOG_JOB.info, - timeout=test_timeout) + output = session_to_copy_from.cmd( + cmd, print_func=LOG_JOB.info, timeout=test_timeout + ) if "Cb Image stored and saved to:" in output: LOG_JOB.info("Copying of the image was successful") else: - test.fail("Copying to the clipboard failed. %s" % output) + test.fail(f"Copying to the clipboard failed. {output}") finally: - LOG_JOB.info("------------ End of script output of the Pasting" - " Session ------------") + LOG_JOB.info( + "------------ End of script output of the Pasting" " Session ------------" + ) # Get the checksum of the file - cmd = "md5sum %s" % (final_image_path) + cmd = f"md5sum {final_image_path}" try: LOG_JOB.debug("------------ Script output ------------") - output = session_to_copy_from.cmd(cmd, print_func=LOG_JOB.info, - timeout=test_timeout) + output = session_to_copy_from.cmd( + cmd, print_func=LOG_JOB.info, timeout=test_timeout + ) except aexpect.ShellCmdError: test.fail("Copying to the clipboard failed.") - LOG_JOB.info("------------ End of script output of the Pasting" - " Session ------------") + LOG_JOB.info( + "------------ End of script output of the Pasting" " Session ------------" + ) img_checksum = output.split()[0] if img_checksum == expected_checksum: LOG_JOB.info("PASS: The image was successfully pasted") @@ -176,9 +201,15 @@ def verify_img_paste_success(test, session_to_copy_from, interpreter, test.fail("The pasting of the image failed") -def verify_img_paste_fails(test, session_to_copy_from, interpreter, - script_call, script_params, final_image_path, - test_timeout): +def verify_img_paste_fails( + test, + session_to_copy_from, + interpreter, + script_call, + script_params, + final_image_path, + test_timeout, +): """ Verify that pasting an image fails. @@ -189,13 +220,13 @@ def verify_img_paste_fails(test, session_to_copy_from, interpreter, :param final_image_path: location of where the image should be pasted :param test_timeout: timeout time for the cmd """ - cmd = "%s %s %s %s" % (interpreter, script_call, - script_params, final_image_path) + cmd = f"{interpreter} {script_call} {script_params} {final_image_path}" try: LOG_JOB.debug("------------ Script output ------------") - output = session_to_copy_from.cmd(cmd, print_func=LOG_JOB.info, - timeout=test_timeout) + output = session_to_copy_from.cmd( + cmd, print_func=LOG_JOB.info, timeout=test_timeout + ) if "No image stored" in output: LOG_JOB.info("PASS: Pasting the image failed as expected.") else: @@ -203,13 +234,21 @@ def verify_img_paste_fails(test, session_to_copy_from, interpreter, except aexpect.ShellCmdError: test.fail("Copying to the clipboard failed") - LOG_JOB.debug("------------ End of script output of the Pasting" - " Session ------------") - - -def verify_text_copy(test, session_to_copy_from, interpreter, script_call, - script_params, string_length, final_text_path, - test_timeout): + LOG_JOB.debug( + "------------ End of script output of the Pasting" " Session ------------" + ) + + +def verify_text_copy( + test, + session_to_copy_from, + interpreter, + script_call, + script_params, + string_length, + final_text_path, + test_timeout, +): """ Verify copying a large amount of textual data to the clipboard and to a file is successful, and return the checksum of the file. @@ -223,41 +262,51 @@ def verify_text_copy(test, session_to_copy_from, interpreter, script_call, :return: file_checksum: checksum of the textfile that was created. """ - cmd = "%s %s %s %s" % (interpreter, script_call, - script_params, string_length) + cmd = f"{interpreter} {script_call} {script_params} {string_length}" try: LOG_JOB.debug("------------ Script output ------------") - output = session_to_copy_from.cmd(cmd, print_func=LOG_JOB.info, - timeout=test_timeout) + output = session_to_copy_from.cmd( + cmd, print_func=LOG_JOB.info, timeout=test_timeout + ) if "The string has also been placed in the clipboard" in output: LOG_JOB.info("Copying of the large text file was successful") else: - test.fail("Copying to the clipboard failed. %s" % output) + test.fail(f"Copying to the clipboard failed. {output}") except aexpect.ShellCmdError: test.fail("Copying to the clipboard failed") - LOG_JOB.debug("------------ End of script output of the Copying" - " Session ------------") + LOG_JOB.debug( + "------------ End of script output of the Copying" " Session ------------" + ) # Get the checksum of the file - cmd = "md5sum %s" % (final_text_path) + cmd = f"md5sum {final_text_path}" try: LOG_JOB.debug("------------ Script output ------------") - output = session_to_copy_from.cmd(cmd, print_func=LOG_JOB.info, - timeout=test_timeout) + output = session_to_copy_from.cmd( + cmd, print_func=LOG_JOB.info, timeout=test_timeout + ) except aexpect.ShellCmdError: test.fail("Couldn't get the size of the file") - LOG_JOB.debug("------------ End of script output of the Copying" - " Session ------------") + LOG_JOB.debug( + "------------ End of script output of the Copying" " Session ------------" + ) # Get the size of the copied image, this will be used for # verification on the other session that the paste was successful file_checksum = output.split()[0] return file_checksum -def verify_txt_paste_success(test, session_to_paste_to, interpreter, - script_call, script_params, - final_text_path, textfile_checksum, test_timeout): +def verify_txt_paste_success( + test, + session_to_paste_to, + interpreter, + script_call, + script_params, + final_text_path, + textfile_checksum, + test_timeout, +): """ Use the clipboard script to copy text into the clipboard. @@ -269,30 +318,33 @@ def verify_txt_paste_success(test, session_to_paste_to, interpreter, :param image_size: the size of the image to be verified :param test_timeout: timeout time for the cmd """ - cmd = "%s %s %s %s" % (interpreter, script_call, - script_params, final_text_path) + cmd = f"{interpreter} {script_call} {script_params} {final_text_path}" try: LOG_JOB.debug("------------ Script output ------------") - output = session_to_paste_to.cmd(cmd, print_func=LOG_JOB.info, - timeout=test_timeout) + output = session_to_paste_to.cmd( + cmd, print_func=LOG_JOB.info, timeout=test_timeout + ) if "Writing of the clipboard text is complete" in output: LOG_JOB.info("Copying of the large text file was successful") else: - test.fail("Copying to the clipboard failed. %s" % output) + test.fail(f"Copying to the clipboard failed. {output}") finally: - LOG_JOB.info("------------ End of script output of the Pasting" - " Session ------------") + LOG_JOB.info( + "------------ End of script output of the Pasting" " Session ------------" + ) # Get the checksum of the file - cmd = "md5sum %s" % (final_text_path) + cmd = f"md5sum {final_text_path}" try: LOG_JOB.debug("------------ Script output ------------") - output = session_to_paste_to.cmd(cmd, print_func=LOG_JOB.info, - timeout=test_timeout) + output = session_to_paste_to.cmd( + cmd, print_func=LOG_JOB.info, timeout=test_timeout + ) except aexpect.ShellCmdError: test.fail("Copying to the clipboard failed.") - LOG_JOB.info("------------ End of script output of the Pasting" - " Session ------------") + LOG_JOB.info( + "------------ End of script output of the Pasting" " Session ------------" + ) file_checksum = output.split()[0] if file_checksum == textfile_checksum: LOG_JOB.info("PASS: The large text file was successfully pasted") @@ -300,9 +352,15 @@ def verify_txt_paste_success(test, session_to_paste_to, interpreter, test.fail("The pasting of the large text file failed") -def place_text_in_clipboard(test, session_to_copy_from, interpreter, - script_call, script_params, testing_text, - test_timeout): +def place_text_in_clipboard( + test, + session_to_copy_from, + interpreter, + script_call, + script_params, + testing_text, + test_timeout, +): """ Use the clipboard script to copy text into the clipboard. @@ -313,42 +371,45 @@ def place_text_in_clipboard(test, session_to_copy_from, interpreter, :param testing_text: text to be pasted :param test_timeout: timeout time for the cmd """ - cmd = "%s %s %s %s" % (interpreter, script_call, - script_params, testing_text) + cmd = f"{interpreter} {script_call} {script_params} {testing_text}" try: LOG_JOB.debug("------------ Script output ------------") - output = session_to_copy_from.cmd(cmd, print_func=LOG_JOB.info, - timeout=test_timeout) + output = session_to_copy_from.cmd( + cmd, print_func=LOG_JOB.info, timeout=test_timeout + ) if "The text has been placed into the clipboard." in output: LOG_JOB.info("Copying of text was successful") else: - test.fail("Copying to the clipboard failed. %s" % output) + test.fail(f"Copying to the clipboard failed. {output}") except aexpect.ShellCmdError: test.fail("Copying to the clipboard failed") - LOG_JOB.debug("------------ End of script output of the Copying" - " Session ------------") + LOG_JOB.debug( + "------------ End of script output of the Copying" " Session ------------" + ) # Verify the clipboard of the session that is being copied from, # before continuing the test - cmd = "%s %s" % (interpreter, script_call) + cmd = f"{interpreter} {script_call}" try: LOG_JOB.debug("------------ Script output ------------") - output = session_to_copy_from.cmd(cmd, print_func=LOG_JOB.info, - timeout=test_timeout) + output = session_to_copy_from.cmd( + cmd, print_func=LOG_JOB.info, timeout=test_timeout + ) if testing_text in output: LOG_JOB.info("Text was successfully copied to the clipboard") else: - test.fail("Copying to the clipboard Failed. %s" % output) + test.fail(f"Copying to the clipboard Failed. {output}") except aexpect.ShellCmdError: test.fail("Copying to the clipboard failed") LOG_JOB.debug("------------ End of script output ------------") -def verify_paste_fails(test, session_to_paste_to, testing_text, interpreter, - script_call, test_timeout): +def verify_paste_fails( + test, session_to_paste_to, testing_text, interpreter, script_call, test_timeout +): """ Test that pasting to the other session fails (negative testing: spice-vdagentd stopped or copy-paste-disabled is set on the VM @@ -360,26 +421,32 @@ def verify_paste_fails(test, session_to_paste_to, testing_text, interpreter, :param testing_text: text to be pasted :param test_timeout: timeout time for the cmd """ - cmd = "%s %s" % (interpreter, script_call) + cmd = f"{interpreter} {script_call}" try: LOG_JOB.debug("------------ Script output ------------") - output = session_to_paste_to.cmd(cmd, print_func=LOG_JOB.info, - timeout=test_timeout) + output = session_to_paste_to.cmd( + cmd, print_func=LOG_JOB.info, timeout=test_timeout + ) if testing_text in output: - test.fail("Pasting from the clipboard was" - " successful, text was copied from the other" - " session with vdagent stopped!!", output) + test.fail( + "Pasting from the clipboard was" + " successful, text was copied from the other" + " session with vdagent stopped!!", + output, + ) else: - LOG_JOB.info("PASS: Pasting from the clipboard was not" - " successful, as EXPECTED") + LOG_JOB.info( + "PASS: Pasting from the clipboard was not" " successful, as EXPECTED" + ) except aexpect.ShellCmdError: test.fail("Pasting from the clipboard failed.") LOG_JOB.debug("------------ End of script output ------------") -def verify_paste_successful(test, session_to_paste_to, testing_text, - interpreter, script_call, test_timeout): +def verify_paste_successful( + test, session_to_paste_to, testing_text, interpreter, script_call, test_timeout +): """ Test that pasting to the other session fails (negative testing - spice-vdagentd stopped or copy-paste-disabled is set on the VM @@ -391,24 +458,29 @@ def verify_paste_successful(test, session_to_paste_to, testing_text, :param testing_text: text to be pasted :param test_timeout: timeout time for the cmd """ - cmd = "%s %s" % (interpreter, script_call) + cmd = f"{interpreter} {script_call}" try: LOG_JOB.debug("------------ Script output ------------") - output = session_to_paste_to.cmd(cmd, print_func=LOG_JOB.info, - timeout=test_timeout) + output = session_to_paste_to.cmd( + cmd, print_func=LOG_JOB.info, timeout=test_timeout + ) if testing_text in output: LOG_JOB.info("Pasting from the clipboard is successful") else: - test.fail("Pasting from the clipboard failed, " - "nothing copied from other session", output) + test.fail( + "Pasting from the clipboard failed, " + "nothing copied from other session", + output, + ) except aexpect.ShellCmdError: test.fail("Pasting from the clipboard failed.") LOG_JOB.debug("------------ End of script output ------------") -def copy_and_paste_neg(test, session_to_copy_from, session_to_paste_to, - guest_session, params): +def copy_and_paste_neg( + test, session_to_copy_from, session_to_paste_to, guest_session, params +): """ Negative Test: Sending the commands to copy from one session to another, and make sure it does not work, because spice vdagent is off @@ -435,17 +507,25 @@ def copy_and_paste_neg(test, session_to_copy_from, session_to_paste_to, # Make sure virtio driver is running utils_spice.verify_virtio(guest_session, test_timeout) # Command to copy text and put it in the keyboard, copy on the client - place_text_in_clipboard(test, session_to_copy_from, interpreter, - script_call, script_params, testing_text, - test_timeout) + place_text_in_clipboard( + test, + session_to_copy_from, + interpreter, + script_call, + script_params, + testing_text, + test_timeout, + ) # Now test to see if the copied text from the one session can # be pasted on the other - verify_paste_fails(test, session_to_paste_to, testing_text, interpreter, - script_call, test_timeout) + verify_paste_fails( + test, session_to_paste_to, testing_text, interpreter, script_call, test_timeout + ) -def copy_and_paste_pos(test, session_to_copy_from, session_to_paste_to, - guest_session, params): +def copy_and_paste_pos( + test, session_to_copy_from, session_to_paste_to, guest_session, params +): """ Sending the commands to copy from one session to another, and make sure it works correctly @@ -470,17 +550,25 @@ def copy_and_paste_pos(test, session_to_copy_from, session_to_paste_to, # Make sure virtio driver is running utils_spice.verify_virtio(guest_session, test_timeout) # Command to copy text and put it in the keyboard, copy on the client - place_text_in_clipboard(test, session_to_copy_from, interpreter, - script_call, script_params, testing_text, - test_timeout) + place_text_in_clipboard( + test, + session_to_copy_from, + interpreter, + script_call, + script_params, + testing_text, + test_timeout, + ) # Now test to see if the copied text from the one session can be # pasted on the other - verify_paste_successful(test, session_to_paste_to, testing_text, - interpreter, script_call, test_timeout) + verify_paste_successful( + test, session_to_paste_to, testing_text, interpreter, script_call, test_timeout + ) -def restart_cppaste(test, session_to_copy_from, session_to_paste_to, - guest_session, params): +def restart_cppaste( + test, session_to_copy_from, session_to_paste_to, guest_session, params +): """ Sending the commands to copy from one session to another, and make sure it works correctly after Restarting vdagent @@ -505,13 +593,20 @@ def restart_cppaste(test, session_to_copy_from, session_to_paste_to, # Make sure virtio driver is running utils_spice.verify_virtio(guest_session, test_timeout) # Command to copy text and put it in the keyboard, copy on the client - place_text_in_clipboard(test, session_to_copy_from, interpreter, - script_call, script_params, testing_text, - test_timeout) + place_text_in_clipboard( + test, + session_to_copy_from, + interpreter, + script_call, + script_params, + testing_text, + test_timeout, + ) # Now test to see if the copied text from the one session can be # pasted on the other - verify_paste_successful(test, session_to_paste_to, testing_text, - interpreter, script_call, test_timeout) + verify_paste_successful( + test, session_to_paste_to, testing_text, interpreter, script_call, test_timeout + ) # Restart vdagent, clear the clipboard, verify cp and paste still works utils_spice.restart_vdagent(guest_session, test_timeout) @@ -520,19 +615,27 @@ def restart_cppaste(test, session_to_copy_from, session_to_paste_to, wait_timeout(5) # Command to copy text and put it in the keyboard, copy on the client - place_text_in_clipboard(test, session_to_copy_from, interpreter, - script_call, script_params, testing_text, - test_timeout) + place_text_in_clipboard( + test, + session_to_copy_from, + interpreter, + script_call, + script_params, + testing_text, + test_timeout, + ) wait_timeout(5) # Now test to see if the copied text from the one session can be # pasted on the other - verify_paste_successful(test, session_to_paste_to, testing_text, - interpreter, script_call, test_timeout) + verify_paste_successful( + test, session_to_paste_to, testing_text, interpreter, script_call, test_timeout + ) -def copy_and_paste_cpdisabled_neg(test, session_to_copy_from, - session_to_paste_to, guest_session, params): +def copy_and_paste_cpdisabled_neg( + test, session_to_copy_from, session_to_paste_to, guest_session, params +): """ Negative Test: Sending the commands to copy from one session to another, for this test cp/paste will be disabled from qemu-kvm, Verify with vdagent @@ -558,17 +661,25 @@ def copy_and_paste_cpdisabled_neg(test, session_to_copy_from, # Make sure virtio driver is running utils_spice.verify_virtio(guest_session, test_timeout) # Command to copy text and put it in the keyboard, copy on the client - place_text_in_clipboard(test, session_to_copy_from, interpreter, - script_call, script_params, testing_text, - test_timeout) + place_text_in_clipboard( + test, + session_to_copy_from, + interpreter, + script_call, + script_params, + testing_text, + test_timeout, + ) # Now test to see if the copied text from the one session can be pasted # on the other session - verify_paste_fails(test, session_to_paste_to, testing_text, interpreter, - script_call, test_timeout) + verify_paste_fails( + test, session_to_paste_to, testing_text, interpreter, script_call, test_timeout + ) -def copy_and_paste_largetext(test, session_to_copy_from, session_to_paste_to, - guest_session, params): +def copy_and_paste_largetext( + test, session_to_copy_from, session_to_paste_to, guest_session, params +): """ Sending the commands to copy large text from one session to another, and make sure the data is still correct. @@ -585,8 +696,7 @@ def copy_and_paste_largetext(test, session_to_copy_from, session_to_paste_to, script_write_params = params.get("script_params_writef") script_create_params = params.get("script_params_createf") dst_path = params.get("dst_dir", "guest_script") - final_text_path = os.path.join(params.get("dst_dir"), - params.get("final_textfile")) + final_text_path = os.path.join(params.get("dst_dir"), params.get("final_textfile")) script_call = os.path.join(dst_path, script) string_length = params.get("text_to_test") @@ -597,20 +707,34 @@ def copy_and_paste_largetext(test, session_to_copy_from, session_to_paste_to, utils_spice.verify_virtio(guest_session, test_timeout) # Command to copy text and put it in the clipboard - textfile_checksum = verify_text_copy(test, session_to_copy_from, - interpreter, script_call, - script_create_params, string_length, - final_text_path, test_timeout) + textfile_checksum = verify_text_copy( + test, + session_to_copy_from, + interpreter, + script_call, + script_create_params, + string_length, + final_text_path, + test_timeout, + ) wait_timeout(30) # Verify the paste on the session to paste to - verify_txt_paste_success(test, session_to_paste_to, interpreter, - script_call, script_write_params, - final_text_path, textfile_checksum, test_timeout) - - -def restart_cppaste_lrgtext(test, session_to_copy_from, session_to_paste_to, - guest_session, params): + verify_txt_paste_success( + test, + session_to_paste_to, + interpreter, + script_call, + script_write_params, + final_text_path, + textfile_checksum, + test_timeout, + ) + + +def restart_cppaste_lrgtext( + test, session_to_copy_from, session_to_paste_to, guest_session, params +): """ Sending the commands to copy large text from one session to another, and make sure the data is still correct after restarting vdagent. @@ -627,8 +751,7 @@ def restart_cppaste_lrgtext(test, session_to_copy_from, session_to_paste_to, script_write_params = params.get("script_params_writef") script_create_params = params.get("script_params_createf") dst_path = params.get("dst_dir", "guest_script") - final_text_path = os.path.join(params.get("dst_dir"), - params.get("final_textfile")) + final_text_path = os.path.join(params.get("dst_dir"), params.get("final_textfile")) script_call = os.path.join(dst_path, script) string_length = params.get("text_to_test") @@ -639,16 +762,29 @@ def restart_cppaste_lrgtext(test, session_to_copy_from, session_to_paste_to, utils_spice.verify_virtio(guest_session, test_timeout) # Command to copy text and put it in the clipboard - textfile_checksum = verify_text_copy(test, session_to_copy_from, - interpreter, script_call, - script_create_params, string_length, - final_text_path, test_timeout) + textfile_checksum = verify_text_copy( + test, + session_to_copy_from, + interpreter, + script_call, + script_create_params, + string_length, + final_text_path, + test_timeout, + ) wait_timeout(30) # Verify the paste on the session to paste to - verify_txt_paste_success(test, session_to_paste_to, interpreter, - script_call, script_write_params, - final_text_path, textfile_checksum, test_timeout) + verify_txt_paste_success( + test, + session_to_paste_to, + interpreter, + script_call, + script_write_params, + final_text_path, + textfile_checksum, + test_timeout, + ) # Restart vdagent & clear the clipboards. utils_spice.restart_vdagent(guest_session, test_timeout) clear_cb(session_to_paste_to, params) @@ -656,20 +792,34 @@ def restart_cppaste_lrgtext(test, session_to_copy_from, session_to_paste_to, wait_timeout(5) # Command to copy text and put it in the clipboard - textfile_checksum = verify_text_copy(test, session_to_copy_from, - interpreter, script_call, - script_create_params, string_length, - final_text_path, test_timeout) + textfile_checksum = verify_text_copy( + test, + session_to_copy_from, + interpreter, + script_call, + script_create_params, + string_length, + final_text_path, + test_timeout, + ) wait_timeout(30) # Verify the paste on the session to paste to - verify_txt_paste_success(test, session_to_paste_to, interpreter, - script_call, script_write_params, - final_text_path, textfile_checksum, test_timeout) - - -def copy_and_paste_image_pos(test, session_to_copy_from, session_to_paste_to, - guest_session, params): + verify_txt_paste_success( + test, + session_to_paste_to, + interpreter, + script_call, + script_write_params, + final_text_path, + textfile_checksum, + test_timeout, + ) + + +def copy_and_paste_image_pos( + test, session_to_copy_from, session_to_paste_to, guest_session, params +): """ Sending the commands to copy an image from one session to another. @@ -686,14 +836,16 @@ def copy_and_paste_image_pos(test, session_to_copy_from, session_to_paste_to, script_set_params = params.get("script_params_img_set") script_save_params = params.get("script_params_img_save") dst_path = params.get("dst_dir", "guest_script") - dst_image_path = os.path.join(params.get("dst_dir"), - params.get("image_tocopy_name")) - dst_image_path_bmp = os.path.join(params.get("dst_dir"), - params.get("image_tocopy_name_bmp")) - final_image_path = os.path.join(params.get("dst_dir"), - params.get("final_image")) - final_image_path_bmp = os.path.join(params.get("dst_dir"), - params.get("final_image_bmp")) + dst_image_path = os.path.join( + params.get("dst_dir"), params.get("image_tocopy_name") + ) + dst_image_path_bmp = os.path.join( + params.get("dst_dir"), params.get("image_tocopy_name_bmp") + ) + final_image_path = os.path.join(params.get("dst_dir"), params.get("final_image")) + final_image_path_bmp = os.path.join( + params.get("dst_dir"), params.get("final_image_bmp") + ) script_call = os.path.join(dst_path, script) # Before doing the copy and paste, verify vdagent is @@ -704,41 +856,80 @@ def copy_and_paste_image_pos(test, session_to_copy_from, session_to_paste_to, if "png" in image_type: # Command to copy text and put it in the keyboard, copy on the client - place_img_in_clipboard(test, session_to_copy_from, interpreter, - script_call, script_set_params, dst_image_path, - test_timeout) + place_img_in_clipboard( + test, + session_to_copy_from, + interpreter, + script_call, + script_set_params, + dst_image_path, + test_timeout, + ) # Now test to see if the copied text from the one session can be # pasted on the other - image_size = verify_img_paste(test, session_to_copy_from, interpreter, - script_call, script_save_params, - final_image_path, test_timeout) + image_size = verify_img_paste( + test, + session_to_copy_from, + interpreter, + script_call, + script_save_params, + final_image_path, + test_timeout, + ) wait_timeout(30) # Verify the paste on the session to paste to - verify_img_paste_success(test, session_to_paste_to, interpreter, - script_call, script_save_params, - final_image_path, image_size, test_timeout) + verify_img_paste_success( + test, + session_to_paste_to, + interpreter, + script_call, + script_save_params, + final_image_path, + image_size, + test_timeout, + ) else: # Testing bmp - place_img_in_clipboard(test, session_to_copy_from, interpreter, - script_call, script_set_params, - dst_image_path_bmp, test_timeout) + place_img_in_clipboard( + test, + session_to_copy_from, + interpreter, + script_call, + script_set_params, + dst_image_path_bmp, + test_timeout, + ) # Now test to see if the copied text from the one session can be # pasted on the other - image_size = verify_img_paste(test, session_to_copy_from, interpreter, - script_call, script_save_params, - final_image_path_bmp, test_timeout) + image_size = verify_img_paste( + test, + session_to_copy_from, + interpreter, + script_call, + script_save_params, + final_image_path_bmp, + test_timeout, + ) wait_timeout(30) # Verify the paste on the session to paste to - verify_img_paste_success(test, session_to_paste_to, interpreter, - script_call, script_save_params, - final_image_path_bmp, image_size, test_timeout) - - -def restart_cppaste_image(test, session_to_copy_from, session_to_paste_to, - guest_session, params): + verify_img_paste_success( + test, + session_to_paste_to, + interpreter, + script_call, + script_save_params, + final_image_path_bmp, + image_size, + test_timeout, + ) + + +def restart_cppaste_image( + test, session_to_copy_from, session_to_paste_to, guest_session, params +): """ Sending the commands to copy an image from one session to another. @@ -755,14 +946,16 @@ def restart_cppaste_image(test, session_to_copy_from, session_to_paste_to, script_set_params = params.get("script_params_img_set") script_save_params = params.get("script_params_img_save") dst_path = params.get("dst_dir", "guest_script") - dst_image_path = os.path.join(params.get("dst_dir"), - params.get("image_tocopy_name")) - dst_image_path_bmp = os.path.join(params.get("dst_dir"), - params.get("image_tocopy_name_bmp")) - final_image_path = os.path.join(params.get("dst_dir"), - params.get("final_image")) - final_image_path_bmp = os.path.join(params.get("dst_dir"), - params.get("final_image_bmp")) + dst_image_path = os.path.join( + params.get("dst_dir"), params.get("image_tocopy_name") + ) + dst_image_path_bmp = os.path.join( + params.get("dst_dir"), params.get("image_tocopy_name_bmp") + ) + final_image_path = os.path.join(params.get("dst_dir"), params.get("final_image")) + final_image_path_bmp = os.path.join( + params.get("dst_dir"), params.get("final_image_bmp") + ) script_call = os.path.join(dst_path, script) # Before doing the copy and paste, verify vdagent is @@ -773,37 +966,75 @@ def restart_cppaste_image(test, session_to_copy_from, session_to_paste_to, if "png" in image_type: # Command to copy text and put it in the keyboard, copy on the client - place_img_in_clipboard(test, session_to_copy_from, interpreter, - script_call, script_set_params, dst_image_path, - test_timeout) + place_img_in_clipboard( + test, + session_to_copy_from, + interpreter, + script_call, + script_set_params, + dst_image_path, + test_timeout, + ) # Now test to see if the copied text from the one session can be # pasted on the other - image_size = verify_img_paste(test, session_to_copy_from, interpreter, - script_call, script_save_params, - final_image_path, test_timeout) + image_size = verify_img_paste( + test, + session_to_copy_from, + interpreter, + script_call, + script_save_params, + final_image_path, + test_timeout, + ) wait_timeout(30) # Verify the paste on the session to paste to - verify_img_paste_success(test, session_to_paste_to, interpreter, - script_call, script_save_params, - final_image_path, image_size, test_timeout) + verify_img_paste_success( + test, + session_to_paste_to, + interpreter, + script_call, + script_save_params, + final_image_path, + image_size, + test_timeout, + ) else: # Testing bmp - place_img_in_clipboard(test, session_to_copy_from, interpreter, - script_call, script_set_params, - dst_image_path_bmp, test_timeout) + place_img_in_clipboard( + test, + session_to_copy_from, + interpreter, + script_call, + script_set_params, + dst_image_path_bmp, + test_timeout, + ) # Now test to see if the copied text from the one session can be # pasted on the other - image_size = verify_img_paste(test, session_to_copy_from, interpreter, - script_call, script_save_params, - final_image_path_bmp, test_timeout) + image_size = verify_img_paste( + test, + session_to_copy_from, + interpreter, + script_call, + script_save_params, + final_image_path_bmp, + test_timeout, + ) wait_timeout(30) # Verify the paste on the session to paste to - verify_img_paste_success(test, session_to_paste_to, interpreter, - script_call, script_save_params, - final_image_path_bmp, image_size, test_timeout) + verify_img_paste_success( + test, + session_to_paste_to, + interpreter, + script_call, + script_save_params, + final_image_path_bmp, + image_size, + test_timeout, + ) # Restart vdagent & clear the clipboards. utils_spice.restart_vdagent(guest_session, test_timeout) clear_cb(session_to_paste_to, params) @@ -812,41 +1043,80 @@ def restart_cppaste_image(test, session_to_copy_from, session_to_paste_to, if "png" in image_type: # Command to copy text and put it in the keyboard, copy on the client - place_img_in_clipboard(test, session_to_copy_from, interpreter, - script_call, script_set_params, dst_image_path, - test_timeout) + place_img_in_clipboard( + test, + session_to_copy_from, + interpreter, + script_call, + script_set_params, + dst_image_path, + test_timeout, + ) # Now test to see if the copied text from the one session can be # pasted on the other - image_size = verify_img_paste(test, session_to_copy_from, interpreter, - script_call, script_save_params, - final_image_path, test_timeout) + image_size = verify_img_paste( + test, + session_to_copy_from, + interpreter, + script_call, + script_save_params, + final_image_path, + test_timeout, + ) wait_timeout(30) # Verify the paste on the session to paste to - verify_img_paste_success(test, session_to_paste_to, interpreter, - script_call, script_save_params, - final_image_path, image_size, test_timeout) + verify_img_paste_success( + test, + session_to_paste_to, + interpreter, + script_call, + script_save_params, + final_image_path, + image_size, + test_timeout, + ) else: # Testing bmp - place_img_in_clipboard(test, session_to_copy_from, interpreter, - script_call, script_set_params, - dst_image_path_bmp, test_timeout) + place_img_in_clipboard( + test, + session_to_copy_from, + interpreter, + script_call, + script_set_params, + dst_image_path_bmp, + test_timeout, + ) # Now test to see if the copied text from the one session can be # pasted on the other - image_size = verify_img_paste(test, session_to_copy_from, interpreter, - script_call, script_save_params, - final_image_path_bmp, test_timeout) + image_size = verify_img_paste( + test, + session_to_copy_from, + interpreter, + script_call, + script_save_params, + final_image_path_bmp, + test_timeout, + ) wait_timeout(30) # Verify the paste on the session to paste to - verify_img_paste_success(test, session_to_paste_to, interpreter, - script_call, script_save_params, - final_image_path_bmp, image_size, test_timeout) - - -def copy_and_paste_image_neg(test, session_to_copy_from, session_to_paste_to, - guest_session, params): + verify_img_paste_success( + test, + session_to_paste_to, + interpreter, + script_call, + script_save_params, + final_image_path_bmp, + image_size, + test_timeout, + ) + + +def copy_and_paste_image_neg( + test, session_to_copy_from, session_to_paste_to, guest_session, params +): """ Negative Test: Sending the commands to copy an image from one session to another, with spice-vdagentd off, so copy and pasting @@ -864,10 +1134,10 @@ def copy_and_paste_image_neg(test, session_to_copy_from, session_to_paste_to, script_set_params = params.get("script_params_img_set") script_save_params = params.get("script_params_img_save") dst_path = params.get("dst_dir", "guest_script") - dst_image_path = os.path.join(params.get("dst_dir"), - params.get("image_tocopy_name")) - final_image_path = os.path.join(params.get("dst_dir"), - params.get("final_image")) + dst_image_path = os.path.join( + params.get("dst_dir"), params.get("image_tocopy_name") + ) + final_image_path = os.path.join(params.get("dst_dir"), params.get("final_image")) script_call = os.path.join(dst_path, script) # Before doing the copy and paste, verify vdagent is @@ -878,22 +1148,41 @@ def copy_and_paste_image_neg(test, session_to_copy_from, session_to_paste_to, # Make sure virtio driver is running utils_spice.verify_virtio(guest_session, test_timeout) # Command to copy text and put it in the keyboard, copy on the client - place_img_in_clipboard(test, session_to_copy_from, interpreter, - script_call, script_set_params, dst_image_path, - test_timeout) + place_img_in_clipboard( + test, + session_to_copy_from, + interpreter, + script_call, + script_set_params, + dst_image_path, + test_timeout, + ) # Now test to see if the copied text from the one session can be # pasted on the other - verify_img_paste(test, session_to_copy_from, interpreter, - script_call, script_save_params, - final_image_path, test_timeout) + verify_img_paste( + test, + session_to_copy_from, + interpreter, + script_call, + script_save_params, + final_image_path, + test_timeout, + ) # Verify the paste on the session to paste to - verify_img_paste_fails(test, session_to_paste_to, interpreter, - script_call, script_save_params, - final_image_path, test_timeout) - - -def copyandpasteimg_cpdisabled_neg(test, session_to_copy_from, - session_to_paste_to, guest_session, params): + verify_img_paste_fails( + test, + session_to_paste_to, + interpreter, + script_call, + script_save_params, + final_image_path, + test_timeout, + ) + + +def copyandpasteimg_cpdisabled_neg( + test, session_to_copy_from, session_to_paste_to, guest_session, params +): """ Negative Tests Sending the commands to copy an image from one session to another; however, copy-paste will be disabled on the VM @@ -911,10 +1200,10 @@ def copyandpasteimg_cpdisabled_neg(test, session_to_copy_from, script_set_params = params.get("script_params_img_set") script_save_params = params.get("script_params_img_save") dst_path = params.get("dst_dir", "guest_script") - dst_image_path = os.path.join(params.get("dst_dir"), - params.get("image_tocopy_name")) - final_image_path = os.path.join(params.get("dst_dir"), - params.get("final_image")) + dst_image_path = os.path.join( + params.get("dst_dir"), params.get("image_tocopy_name") + ) + final_image_path = os.path.join(params.get("dst_dir"), params.get("final_image")) script_call = os.path.join(dst_path, script) # Before doing the copy and paste, verify vdagent is @@ -923,20 +1212,38 @@ def copyandpasteimg_cpdisabled_neg(test, session_to_copy_from, # Make sure virtio driver is running utils_spice.verify_virtio(guest_session, test_timeout) # Command to copy text and put it in the keyboard, copy on the client - place_img_in_clipboard(test, session_to_copy_from, interpreter, - script_call, script_set_params, dst_image_path, - test_timeout) + place_img_in_clipboard( + test, + session_to_copy_from, + interpreter, + script_call, + script_set_params, + dst_image_path, + test_timeout, + ) # Now test to see if the copied text from the one session can be # pasted on the other - verify_img_paste(test, session_to_copy_from, interpreter, - script_call, script_save_params, - final_image_path, test_timeout) + verify_img_paste( + test, + session_to_copy_from, + interpreter, + script_call, + script_save_params, + final_image_path, + test_timeout, + ) wait_timeout(30) # Verify the paste on the session to paste to - verify_img_paste_fails(test, session_to_paste_to, interpreter, - script_call, script_save_params, - final_image_path, test_timeout) + verify_img_paste_fails( + test, + session_to_paste_to, + interpreter, + script_call, + script_save_params, + final_image_path, + test_timeout, + ) def run(test, params, env): @@ -966,14 +1273,18 @@ def run(test, params, env): client_vm = env.get_vm(params["client_vm"]) client_vm.verify_alive() client_session = client_vm.wait_for_login( - timeout=int(params.get("login_timeout", 360))) + timeout=int(params.get("login_timeout", 360)) + ) guest_vm = env.get_vm(params["guest_vm"]) guest_session = guest_vm.wait_for_login( - timeout=int(params.get("login_timeout", 360))) + timeout=int(params.get("login_timeout", 360)) + ) guest_root_session = guest_vm.wait_for_login( timeout=int(params.get("login_timeout", 360)), - username="root", password="123456") + username="root", + password="123456", + ) test.log.info("Get PID of remote-viewer") client_session.cmd("pgrep remote-viewer") @@ -985,12 +1296,15 @@ def run(test, params, env): # The following is to copy the test image to either the client or guest # if the test deals with images. - image_path = os.path.join(data_dir.get_deps_dir(), 'spice', image_name) - image_path_bmp = os.path.join(data_dir.get_deps_dir(), 'spice', image_name_bmp) + image_path = os.path.join(data_dir.get_deps_dir(), "spice", image_name) + image_path_bmp = os.path.join(data_dir.get_deps_dir(), "spice", image_name_bmp) - test.log.info("Transferring the clipboard script to client & guest," - "destination directory: %s, source script location: %s", - dst_path, script_path) + test.log.info( + "Transferring the clipboard script to client & guest," + "destination directory: %s, source script location: %s", + dst_path, + script_path, + ) client_vm.copy_files_to(script_path, dst_path, timeout=60) guest_vm.copy_files_to(script_path, dst_path, timeout=60) @@ -998,28 +1312,38 @@ def run(test, params, env): if "image" in test_type: if "client_to_guest" in test_type: if "png" in image_type: - test.log.info("Transferring the image to client" - "destination directory: %s, source image: %s", - dst_image_path, image_path) + test.log.info( + "Transferring the image to client" + "destination directory: %s, source image: %s", + dst_image_path, + image_path, + ) client_vm.copy_files_to(image_path, dst_image_path, timeout=60) else: - test.log.info("Transferring a bmp image to client" - "destination directory: %s, source image: %s", - dst_image_path_bmp, image_path_bmp) - client_vm.copy_files_to(image_path_bmp, dst_image_path_bmp, - timeout=60) + test.log.info( + "Transferring a bmp image to client" + "destination directory: %s, source image: %s", + dst_image_path_bmp, + image_path_bmp, + ) + client_vm.copy_files_to(image_path_bmp, dst_image_path_bmp, timeout=60) elif "guest_to_client" in test_type: if "png" in image_type: - test.log.info("Transferring the image to client" - "destination directory: %s, source image: %s", - dst_image_path, image_path) + test.log.info( + "Transferring the image to client" + "destination directory: %s, source image: %s", + dst_image_path, + image_path, + ) guest_vm.copy_files_to(image_path, dst_image_path, timeout=60) else: - test.log.info("Transferring a bmp image to client" - "destination directory: %s, source image: %s", - dst_image_path_bmp, image_path_bmp) - guest_vm.copy_files_to(image_path_bmp, dst_image_path_bmp, - timeout=60) + test.log.info( + "Transferring a bmp image to client" + "destination directory: %s, source image: %s", + dst_image_path_bmp, + image_path_bmp, + ) + guest_vm.copy_files_to(image_path_bmp, dst_image_path_bmp, timeout=60) else: test.fail("Incorrect Test_Setup") @@ -1039,35 +1363,43 @@ def run(test, params, env): wait_timeout(5) # Figure out which test needs to be run - if (cp_disabled_test == "yes"): + if cp_disabled_test == "yes": # These are negative tests, clipboards are not synced because the VM # is set to disable copy and paste. if "client_to_guest" in test_type: if "image" in test_type: - test.log.info("Negative Test Case: Copy/Paste Disabled, Copying" - "Image from the Client to Guest Should Not Work\n") - copyandpasteimg_cpdisabled_neg(test, client_session, - guest_session, - guest_root_session, params) + test.log.info( + "Negative Test Case: Copy/Paste Disabled, Copying" + "Image from the Client to Guest Should Not Work\n" + ) + copyandpasteimg_cpdisabled_neg( + test, client_session, guest_session, guest_root_session, params + ) else: - test.log.info("Negative Test Case: Copy/Paste Disabled, Copying" - " from the Client to Guest Should Not Work\n") - copy_and_paste_cpdisabled_neg(test, client_session, - guest_session, - guest_root_session, params) + test.log.info( + "Negative Test Case: Copy/Paste Disabled, Copying" + " from the Client to Guest Should Not Work\n" + ) + copy_and_paste_cpdisabled_neg( + test, client_session, guest_session, guest_root_session, params + ) if "guest_to_client" in test_type: if "image" in test_type: - test.log.info("Negative Test Case: Copy/Paste Disabled, Copying" - "Image from the Guest to Client Should Not Work\n") - copyandpasteimg_cpdisabled_neg(test, guest_session, - client_session, - guest_root_session, params) + test.log.info( + "Negative Test Case: Copy/Paste Disabled, Copying" + "Image from the Guest to Client Should Not Work\n" + ) + copyandpasteimg_cpdisabled_neg( + test, guest_session, client_session, guest_root_session, params + ) else: - test.log.info("Negative Test Case: Copy/Paste Disabled, Copying" - " from the Guest to Client Should Not Work\n") - copy_and_paste_cpdisabled_neg(test, guest_session, - client_session, - guest_root_session, params) + test.log.info( + "Negative Test Case: Copy/Paste Disabled, Copying" + " from the Guest to Client Should Not Work\n" + ) + copy_and_paste_cpdisabled_neg( + test, guest_session, client_session, guest_root_session, params + ) elif "positive" in test_type: # These are positive tests, where the clipboards are synced because @@ -1076,94 +1408,119 @@ def run(test, params, env): if "image" in test_type: if "restart" in test_type: test.log.info("Restart Vdagent, Cp Img Client to Guest") - restart_cppaste_image(test, client_session, guest_session, - guest_root_session, params) + restart_cppaste_image( + test, client_session, guest_session, guest_root_session, params + ) else: test.log.info("Copying an Image from the Client to Guest") - copy_and_paste_image_pos(test, client_session, - guest_session, - guest_root_session, params) + copy_and_paste_image_pos( + test, client_session, guest_session, guest_root_session, params + ) elif testing_text.isdigit(): if "restart" in test_type: - test.log.info("Restart Vdagent, Copying a String of size " - "%s from the Client to Guest", testing_text) - restart_cppaste_lrgtext(test, client_session, - guest_session, - guest_root_session, params) + test.log.info( + "Restart Vdagent, Copying a String of size " + "%s from the Client to Guest", + testing_text, + ) + restart_cppaste_lrgtext( + test, client_session, guest_session, guest_root_session, params + ) else: - test.log.info("Copying a String of size %s" - " from the Client to Guest", testing_text) - copy_and_paste_largetext(test, client_session, - guest_session, - guest_root_session, params) + test.log.info( + "Copying a String of size %s" " from the Client to Guest", + testing_text, + ) + copy_and_paste_largetext( + test, client_session, guest_session, guest_root_session, params + ) else: if "restart" in test_type: - test.log.info( - "Restart Vdagent, Copying from Client to Guest\n") - restart_cppaste(test, client_session, guest_session, - guest_root_session, params) + test.log.info("Restart Vdagent, Copying from Client to Guest\n") + restart_cppaste( + test, client_session, guest_session, guest_root_session, params + ) else: test.log.info("Copying from the Client to Guest\n") - copy_and_paste_pos(test, client_session, guest_session, - guest_root_session, params) + copy_and_paste_pos( + test, client_session, guest_session, guest_root_session, params + ) if "guest_to_client" in test_type: if "image" in test_type: if "restart" in test_type: test.log.info("Restart Vdagent, Copy Img Guest to Client") - restart_cppaste_image(test, guest_session, client_session, - guest_root_session, params) + restart_cppaste_image( + test, guest_session, client_session, guest_root_session, params + ) else: test.log.info("Copying an Image from the Guest to Client") - copy_and_paste_image_pos(test, guest_session, - client_session, - guest_root_session, params) + copy_and_paste_image_pos( + test, guest_session, client_session, guest_root_session, params + ) elif testing_text.isdigit(): if "restart" in test_type: - test.log.info("Restart Vdagent, Copying a String of size " - "%s from the Guest to Client", testing_text) - restart_cppaste_lrgtext(test, guest_session, - client_session, - guest_root_session, params) + test.log.info( + "Restart Vdagent, Copying a String of size " + "%s from the Guest to Client", + testing_text, + ) + restart_cppaste_lrgtext( + test, guest_session, client_session, guest_root_session, params + ) else: - test.log.info("Copying a String of size %s" - " from the Guest to Client", testing_text) - copy_and_paste_largetext(test, guest_session, - client_session, - guest_root_session, params) + test.log.info( + "Copying a String of size %s" " from the Guest to Client", + testing_text, + ) + copy_and_paste_largetext( + test, guest_session, client_session, guest_root_session, params + ) else: if "restart" in test_type: test.log.info("Restart Vdagent, Copying: Client to Guest\n") - restart_cppaste(test, guest_session, client_session, - guest_root_session, params) + restart_cppaste( + test, guest_session, client_session, guest_root_session, params + ) else: test.log.info("Copying from the Guest to Client\n") - copy_and_paste_pos(test, guest_session, client_session, - guest_root_session, params) + copy_and_paste_pos( + test, guest_session, client_session, guest_root_session, params + ) elif "negative" in test_type: # These are negative tests, where the clipboards are not synced because # the spice-vdagent service will not be running on the guest. if "client_to_guest" in test_type: if "image" in test_type: - test.log.info("Negative Test Case: Copying an Image from the " - "Client to Guest") - copy_and_paste_image_neg(test, client_session, guest_session, - guest_root_session, params) + test.log.info( + "Negative Test Case: Copying an Image from the " "Client to Guest" + ) + copy_and_paste_image_neg( + test, client_session, guest_session, guest_root_session, params + ) else: - test.log.info("Negative Test Case: Copying from the Client to" - "Guest Should Not Work\n") - copy_and_paste_neg(test, client_session, guest_session, - guest_root_session, params) + test.log.info( + "Negative Test Case: Copying from the Client to" + "Guest Should Not Work\n" + ) + copy_and_paste_neg( + test, client_session, guest_session, guest_root_session, params + ) if "guest_to_client" in test_type: if "image" in test_type: - test.log.info("Negative Test Case: Copying an Image from the " - "Guest to Client") - copy_and_paste_image_neg(test, guest_session, client_session, - guest_root_session, params) + test.log.info( + "Negative Test Case: Copying an Image from the " "Guest to Client" + ) + copy_and_paste_image_neg( + test, guest_session, client_session, guest_root_session, params + ) else: - test.log.info("Negative Test Case: Copying from the Guest to" - " Client Should Not Work\n") - copy_and_paste_neg(test, guest_session, client_session, - guest_root_session, params) + test.log.info( + "Negative Test Case: Copying from the Guest to" + " Client Should Not Work\n" + ) + copy_and_paste_neg( + test, guest_session, client_session, guest_root_session, params + ) else: # The test is not supported, verify what is a supported test. test.fail("Couldn't Find the Correct Test To Run") diff --git a/qemu/tests/rv_fullscreen.py b/qemu/tests/rv_fullscreen.py index dcc81d81c8..be518682d4 100644 --- a/qemu/tests/rv_fullscreen.py +++ b/qemu/tests/rv_fullscreen.py @@ -28,11 +28,13 @@ def run(test, params, env): guest_vm.verify_alive() guest_session = guest_vm.wait_for_login( - timeout=int(params.get("login_timeout", 360))) + timeout=int(params.get("login_timeout", 360)) + ) client_vm.verify_alive() client_session = client_vm.wait_for_login( - timeout=int(params.get("login_timeout", 360))) + timeout=int(params.get("login_timeout", 360)) + ) # Get the resolution of the client & guest test.log.info("Getting the Resolution on the client") @@ -43,11 +45,9 @@ def run(test, params, env): client_res_raw = client_session.cmd("cat /tmp/res|awk '{print $1}'") client_res = client_res_raw.split()[0] except ShellCmdError: - test.fail("Could not get guest resolution, xrandr output:" - " %s" % client_res_raw) + test.fail("Could not get guest resolution, xrandr output:" f" {client_res_raw}") except IndexError: - test.fail("Could not get guest resolution, xrandr output:" - " %s" % client_res_raw) + test.fail("Could not get guest resolution, xrandr output:" f" {client_res_raw}") test.log.info("Getting the Resolution on the guest") guest_session.cmd("export DISPLAY=:0.0") @@ -57,11 +57,9 @@ def run(test, params, env): guest_res_raw = guest_session.cmd("cat /tmp/res|awk '{print $1}'") guest_res = guest_res_raw.split()[0] except ShellCmdError: - test.fail("Could not get guest resolution, xrandr output:" - " %s" % guest_res_raw) + test.fail("Could not get guest resolution, xrandr output:" f" {guest_res_raw}") except IndexError: - test.fail("Could not get guest resolution, xrandr output:" - " %s" % guest_res_raw) + test.fail("Could not get guest resolution, xrandr output:" f" {guest_res_raw}") test.log.info("Here's the information I have: ") test.log.info("\nClient Resolution: %s", client_res) diff --git a/qemu/tests/rv_input.py b/qemu/tests/rv_input.py index 694e79db55..8d74019426 100644 --- a/qemu/tests/rv_input.py +++ b/qemu/tests/rv_input.py @@ -12,11 +12,9 @@ import os from aexpect import ShellCmdError +from virttest import data_dir, utils_spice -from virttest import utils_spice -from virttest import data_dir - -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def install_pygtk(guest_session, params): @@ -32,8 +30,7 @@ def install_pygtk(guest_session, params): guest_session.cmd(cmd) except ShellCmdError: cmd = "yum -y install pygtk2 --nogpgcheck > /dev/null" - LOG_JOB.info("Installing pygtk2 package to %s", - params.get("guest_vm")) + LOG_JOB.info("Installing pygtk2 package to %s", params.get("guest_vm")) guest_session.cmd(cmd, timeout=60) @@ -49,8 +46,9 @@ def deploy_test_form(test, guest_vm, params): script = params.get("guest_script") script_path = os.path.join(data_dir.get_deps_dir(), "spice", script) - guest_vm.copy_files_to(script_path, "/tmp/%s" % params.get("guest_script"), - timeout=60) + guest_vm.copy_files_to( + script_path, "/tmp/{}".format(params.get("guest_script")), timeout=60 + ) def run_test_form(guest_session, params): @@ -63,7 +61,7 @@ def run_test_form(guest_session, params): """ LOG_JOB.info("Starting test form for catching key events on guest") - cmd = "python /tmp/%s &> /dev/null &" % params.get("guest_script") + cmd = "python /tmp/{} &> /dev/null &".format(params.get("guest_script")) guest_session.cmd(cmd) cmd = "disown -ar" guest_session.cmd(cmd) @@ -97,7 +95,7 @@ def test_type_and_func_keys(client_vm, guest_session, params): LOG_JOB.info("Sending typewriter and functional keys to client machine") for i in range(1, 69): # Avoid Ctrl, RSH, LSH, PtScr, Alt, CpsLk - if (i not in [29, 42, 54, 55, 56, 58]): + if i not in [29, 42, 54, 55, 56, 58]: client_vm.send_key(str(hex(i))) utils_spice.wait_timeout(0.3) @@ -117,12 +115,20 @@ def test_leds_and_esc_keys(client_vm, guest_session, params): utils_spice.wait_timeout(3) # Prepare lists with the keys to be sent to client machine - leds = ['a', 'caps_lock', 'a', 'caps_lock', 'num_lock', 'kp_1', 'num_lock', - 'kp_1'] - shortcuts = ['a', 'shift-a', 'shift_r-a', 'ctrl-a', 'ctrl-c', 'ctrl-v', - 'alt-x'] - escaped = ['insert', 'delete', 'home', 'end', 'pgup', 'pgdn', 'up', - 'down', 'right', 'left'] + leds = ["a", "caps_lock", "a", "caps_lock", "num_lock", "kp_1", "num_lock", "kp_1"] + shortcuts = ["a", "shift-a", "shift_r-a", "ctrl-a", "ctrl-c", "ctrl-v", "alt-x"] + escaped = [ + "insert", + "delete", + "home", + "end", + "pgup", + "pgdn", + "up", + "down", + "right", + "left", + ] test_keys = leds + shortcuts + escaped @@ -150,7 +156,7 @@ def test_nonus_layout(client_vm, guest_session, params): # Czech layout - test some special keys cmd = "setxkbmap cz" guest_session.cmd(cmd) - test_keys = ['7', '8', '9', '0', 'alt_r-x', 'alt_r-c', 'alt_r-v'] + test_keys = ["7", "8", "9", "0", "alt_r-x", "alt_r-c", "alt_r-v"] LOG_JOB.info("Sending czech keys to client machine") for key in test_keys: client_vm.send_key(key) @@ -159,7 +165,7 @@ def test_nonus_layout(client_vm, guest_session, params): # German layout - test some special keys cmd = "setxkbmap de" guest_session.cmd(cmd) - test_keys = ['minus', '0x1a', 'alt_r-q', 'alt_r-m'] + test_keys = ["minus", "0x1a", "alt_r-q", "alt_r-m"] LOG_JOB.info("Sending german keys to client machine") for key in test_keys: client_vm.send_key(key) @@ -185,17 +191,17 @@ def test_leds_migration(client_vm, guest_vm, guest_session, params): grep_ver_cmd = "grep -o 'release [[:digit:]]' /etc/redhat-release" rhel_ver = guest_session.cmd(grep_ver_cmd).strip() - LOG_JOB.info("RHEL version: #{0}#".format(rhel_ver)) + LOG_JOB.info("RHEL version: #%s#", rhel_ver) if rhel_ver == "release 6": - client_vm.send_key('num_lock') + client_vm.send_key("num_lock") # Run PyGTK form catching KeyEvents on guest run_test_form(guest_session, params) utils_spice.wait_timeout(3) # Tested keys before migration - test_keys = ['a', 'kp_1', 'caps_lock', 'num_lock', 'a', 'kp_1'] + test_keys = ["a", "kp_1", "caps_lock", "num_lock", "a", "kp_1"] LOG_JOB.info("Sending leds keys to client machine before migration") for key in test_keys: client_vm.send_key(key) @@ -205,7 +211,7 @@ def test_leds_migration(client_vm, guest_vm, guest_session, params): utils_spice.wait_timeout(8) # Tested keys after migration - test_keys = ['a', 'kp_1', 'caps_lock', 'num_lock'] + test_keys = ["a", "kp_1", "caps_lock", "num_lock"] LOG_JOB.info("Sending leds keys to client machine after migration") for key in test_keys: client_vm.send_key(key) @@ -223,29 +229,136 @@ def analyze_results(file_path, test_type): if test_type == "type_and_func_keys": # List of expected keycodes from guest machine - correct_keycodes = ['65307', '49', '50', '51', '52', '53', '54', '55', - '56', '57', '48', '45', '61', '65288', '65289', - '113', '119', '101', '114', '116', '121', '117', - '105', '111', '112', '91', '93', '65293', '97', - '115', '100', '102', '103', '104', '106', '107', - '108', '59', '39', '96', '92', '122', '120', '99', - '118', '98', '110', '109', '44', '46', '47', '32', - '65470', '65471', '65472', '65473', '65474', - '65475', '65476', '65477', '65478', '65479'] + correct_keycodes = [ + "65307", + "49", + "50", + "51", + "52", + "53", + "54", + "55", + "56", + "57", + "48", + "45", + "61", + "65288", + "65289", + "113", + "119", + "101", + "114", + "116", + "121", + "117", + "105", + "111", + "112", + "91", + "93", + "65293", + "97", + "115", + "100", + "102", + "103", + "104", + "106", + "107", + "108", + "59", + "39", + "96", + "92", + "122", + "120", + "99", + "118", + "98", + "110", + "109", + "44", + "46", + "47", + "32", + "65470", + "65471", + "65472", + "65473", + "65474", + "65475", + "65476", + "65477", + "65478", + "65479", + ] elif test_type == "leds_and_esc_keys": - correct_keycodes = ['97', '65509', '65', '65509', '65407', '65457', - '65407', '65436', '97', '65505', '65', '65506', - '65', '65507', '97', '65507', '99', '65507', '118', - '65513', '120', '65379', '65535', '65360', '65367', - '65365', '65366', '65362', '65364', '65363', - '65361'] + correct_keycodes = [ + "97", + "65509", + "65", + "65509", + "65407", + "65457", + "65407", + "65436", + "97", + "65505", + "65", + "65506", + "65", + "65507", + "97", + "65507", + "99", + "65507", + "118", + "65513", + "120", + "65379", + "65535", + "65360", + "65367", + "65365", + "65366", + "65362", + "65364", + "65363", + "65361", + ] elif test_type == "nonus_layout": - correct_keycodes = ['253', '225', '237', '233', '65027', '35', '65027', - '38', '65027', '64', '223', '252', '65027', '64', - '65027', '181'] + correct_keycodes = [ + "253", + "225", + "237", + "233", + "65027", + "35", + "65027", + "38", + "65027", + "64", + "223", + "252", + "65027", + "64", + "65027", + "181", + ] elif test_type == "leds_migration": - correct_keycodes = ['97', '65457', '65509', '65407', '65', '65436', - '65', '65436', '65509', '65407'] + correct_keycodes = [ + "97", + "65457", + "65509", + "65407", + "65", + "65436", + "65", + "65436", + "65509", + "65407", + ] else: raise ValueError(f"unexpected test type: {test_type}") @@ -281,10 +394,13 @@ def run(test, params, env): client_vm.verify_alive() guest_session = guest_vm.wait_for_login( - timeout=int(params.get("login_timeout", 360))) + timeout=int(params.get("login_timeout", 360)) + ) guest_root_session = guest_vm.wait_for_login( timeout=int(params.get("login_timeout", 360)), - username="root", password="123456") + username="root", + password="123456", + ) # Verify that gnome is now running on the guest try: @@ -300,15 +416,18 @@ def run(test, params, env): # Get test type and perform proper test test_type = params.get("config_test") - test_mapping = {'type_and_func_keys': test_type_and_func_keys, - 'leds_and_esc_keys': test_leds_and_esc_keys, - 'nonus_layout': test_nonus_layout, - 'leds_migration': test_leds_migration} + test_mapping = { + "type_and_func_keys": test_type_and_func_keys, + "leds_and_esc_keys": test_leds_and_esc_keys, + "nonus_layout": test_nonus_layout, + "leds_migration": test_leds_migration, + } test_parameters = { - 'type_and_func_keys': (client_vm, guest_session, params), - 'leds_and_esc_keys': (client_vm, guest_session, params), - 'nonus_layout': (client_vm, guest_session, params), - 'leds_migration': (client_vm, guest_vm, guest_session, params)} + "type_and_func_keys": (client_vm, guest_session, params), + "leds_and_esc_keys": (client_vm, guest_session, params), + "nonus_layout": (client_vm, guest_session, params), + "leds_migration": (client_vm, guest_vm, guest_session, params), + } try: func = test_mapping[test_type] @@ -324,7 +443,6 @@ def run(test, params, env): # do not match with expected keycodes result = analyze_results(result_path, test_type) if result is not None: - test.fail("Testing of sending keys failed:" - " Expected keycode = %s" % result) + test.fail("Testing of sending keys failed:" f" Expected keycode = {result}") guest_session.close() diff --git a/qemu/tests/rv_logging.py b/qemu/tests/rv_logging.py index 7de44ef423..2a5774d731 100644 --- a/qemu/tests/rv_logging.py +++ b/qemu/tests/rv_logging.py @@ -6,10 +6,10 @@ Requires: connected binaries remote-viewer, Xorg, gnome session """ + import os -from virttest import utils_misc -from virttest import utils_spice +from virttest import utils_misc, utils_spice def run(test, params, env): @@ -35,48 +35,56 @@ def run(test, params, env): guest_vm = env.get_vm(params["guest_vm"]) guest_vm.verify_alive() guest_session = guest_vm.wait_for_login( - timeout=int(params.get("login_timeout", 360))) + timeout=int(params.get("login_timeout", 360)) + ) guest_root_session = guest_vm.wait_for_login( timeout=int(params.get("login_timeout", 360)), - username="root", password="123456") + username="root", + password="123456", + ) scriptdir = os.path.join("scripts", script) script_path = utils_misc.get_path(test.virtdir, scriptdir) # Copying the clipboard script to the guest to test spice vdagent - test.log.info("Transferring the clipboard script to the guest," - "destination directory: %s, source script location: %s", - dst_path, script_path) + test.log.info( + "Transferring the clipboard script to the guest," + "destination directory: %s, source script location: %s", + dst_path, + script_path, + ) guest_vm.copy_files_to(script_path, dst_path, timeout=60) # Some logging tests need the full desktop environment guest_session.cmd("export DISPLAY=:0.0") # Logging test for the qxl driver - if log_test == 'qxl': + if log_test == "qxl": test.log.info("Running the logging test for the qxl driver") guest_root_session.cmd("grep -i qxl " + qxl_logfile) # Logging test for spice-vdagent - elif log_test == 'spice-vdagent': - + elif log_test == "spice-vdagent": # Check for RHEL6 or RHEL7 # RHEL7 uses gsettings and RHEL6 uses gconftool-2 try: release = guest_session.cmd("cat /etc/redhat-release") test.log.info("Redhat Release: %s", release) except: - test.cancel("Test is only currently supported on " - "RHEL and Fedora operating systems") + test.cancel( + "Test is only currently supported on " + "RHEL and Fedora operating systems" + ) if "release 7." in release: - spice_vdagent_loginfo_cmd = "journalctl" \ - " SYSLOG_IDENTIFIER=spice-vdagent" \ - " SYSLOG_IDENTIFIER=spice-vdagentd" + spice_vdagent_loginfo_cmd = ( + "journalctl" + " SYSLOG_IDENTIFIER=spice-vdagent" + " SYSLOG_IDENTIFIER=spice-vdagentd" + ) else: spice_vdagent_loginfo_cmd = "tail -n 10 " + spicevdagent_logfile - cmd = ("echo \"SPICE_VDAGENTD_EXTRA_ARGS=-dd\">" - "/etc/sysconfig/spice-vdagentd") + cmd = 'echo "SPICE_VDAGENTD_EXTRA_ARGS=-dd">' "/etc/sysconfig/spice-vdagentd" guest_root_session.cmd(cmd) test.log.info("Running the logging test for spice-vdagent daemon") @@ -84,25 +92,30 @@ def run(test, params, env): # Testing the log after stopping spice-vdagentd utils_spice.stop_vdagent(guest_root_session, test_timeout=15) - cmd = spice_vdagent_loginfo_cmd + " | tail -n 3 | grep \"vdagentd quitting\"" + cmd = spice_vdagent_loginfo_cmd + ' | tail -n 3 | grep "vdagentd quitting"' output = guest_root_session.cmd(cmd) test.log.debug(output) # Testing the log after starting spice-vdagentd utils_spice.start_vdagent(guest_root_session, test_timeout=15) - cmd = spice_vdagent_loginfo_cmd + "| tail -n 7 | grep \"opening vdagent virtio channel\"" + cmd = ( + spice_vdagent_loginfo_cmd + + '| tail -n 7 | grep "opening vdagent virtio channel"' + ) output = guest_root_session.cmd(cmd) test.log.debug(output) # Testing the log after restart spice-vdagentd utils_spice.restart_vdagent(guest_root_session, test_timeout=10) - cmd = spice_vdagent_loginfo_cmd + "| tail -n 7 | grep 'opening vdagent virtio channel'" + cmd = ( + spice_vdagent_loginfo_cmd + + "| tail -n 7 | grep 'opening vdagent virtio channel'" + ) output = guest_root_session.cmd(cmd) test.log.debug(output) # Finally test copying text within the guest - cmd = "%s %s %s %s" % (interpreter, script_call, - script_params, testing_text) + cmd = f"{interpreter} {script_call} {script_params} {testing_text}" test.log.info("This command here: %s", cmd) try: @@ -112,15 +125,17 @@ def run(test, params, env): if "The text has been placed into the clipboard." in output: test.log.info("Copying of text was successful") else: - test.fail("Copying to the clipboard failed. %s" % output) + test.fail(f"Copying to the clipboard failed. {output}") except: test.fail("Copying to the clipboard failed try block failed") - test.log.debug("------------ End of script output of the Copying" - " Session ------------") + test.log.debug( + "------------ End of script output of the Copying" " Session ------------" + ) - output = guest_root_session.cmd(spice_vdagent_loginfo_cmd + "| tail -n 2" + - " | grep 'clipboard grab'") + output = guest_root_session.cmd( + spice_vdagent_loginfo_cmd + "| tail -n 2" + " | grep 'clipboard grab'" + ) else: # Couldn't find the right test to run diff --git a/qemu/tests/rv_smartcard.py b/qemu/tests/rv_smartcard.py index 16bbad5992..a15d451b92 100644 --- a/qemu/tests/rv_smartcard.py +++ b/qemu/tests/rv_smartcard.py @@ -32,14 +32,18 @@ def run(test, params, env): guest_vm.verify_alive() guest_session = guest_vm.wait_for_login( timeout=int(params.get("login_timeout", 360)), - username="root", password="123456") + username="root", + password="123456", + ) client_vm = env.get_vm(params["client_vm"]) client_vm.verify_alive() client_session = client_vm.wait_for_login( timeout=int(params.get("login_timeout", 360)), - username="root", password="123456") + username="root", + password="123456", + ) # Verify remote-viewer is running try: pid = client_session.cmd("pgrep remote-viewer") @@ -66,20 +70,21 @@ def run(test, params, env): # Send a carriage return for PIN for token listcerts_output = guest_session.cmd("") except: - test.fail("Test failed trying to get the output" - " of pkcs11_listcerts") + test.fail("Test failed trying to get the output" " of pkcs11_listcerts") - test.log.info("Listing Certs available on the guest: %s", - listcerts_output) + test.log.info("Listing Certs available on the guest: %s", listcerts_output) for cert in cert_list: subj_string = "CN=" + cert if subj_string in listcerts_output: - test.log.debug("%s has been found as a listed cert in the guest", - subj_string) + test.log.debug( + "%s has been found as a listed cert in the guest", subj_string + ) else: - test.fail("Certificate %s was not found as a listed" - " cert in the guest" % subj_string) + test.fail( + f"Certificate {subj_string} was not found as a listed" + " cert in the guest" + ) elif smartcard_testtype == "pklogin_finder": # pkcs11_listcerts not installed until # Smart Card Support is installed @@ -92,8 +97,7 @@ def run(test, params, env): # Send a carriage return for PIN for token certsinfo_output = guest_session.cmd("", ok_status=[0, 1]) except: - test.fail("Test failed trying to get the output" - " of pklogin_finder") + test.fail("Test failed trying to get the output" " of pklogin_finder") testindex = certsinfo_output.find(searchstr) if testindex >= 0: string_aftercheck = certsinfo_output[testindex:] @@ -109,55 +113,58 @@ def run(test, params, env): string_aftercheck = string_aftercheck[testindex:] testindex2 = string_aftercheck.find(subj_string) if testindex >= 0: - test.log.debug("Found %s in output of pklogin", - subj_string) + test.log.debug("Found %s in output of pklogin", subj_string) string_aftercheck = string_aftercheck[testindex2:] testindex3 = string_aftercheck.find(certcheck1) if testindex3 >= 0: - test.log.debug("Found %s in output of pklogin", - certcheck1) + test.log.debug("Found %s in output of pklogin", certcheck1) string_aftercheck = string_aftercheck[testindex3:] testindex4 = string_aftercheck.find(certcheck2) if testindex4 >= 0: - test.log.debug("Found %s in output of pklogin", - certcheck2) + test.log.debug( + "Found %s in output of pklogin", certcheck2 + ) else: - test.fail(certcheck2 + " not found" - " in output of pklogin " - "on the guest") + test.fail( + certcheck2 + " not found" + " in output of pklogin " + "on the guest" + ) else: - test.fail(certcheck1 + " not found in " - "output of pklogin on the guest") + test.fail( + certcheck1 + " not found in " + "output of pklogin on the guest" + ) else: - test.fail("Common name %s, not found " - "in pkogin_finder after software " - "smartcard was inserted into the " - "guest" % subj_string) + test.fail( + f"Common name {subj_string}, not found " + "in pkogin_finder after software " + "smartcard was inserted into the " + "guest" + ) else: - test.fail(checkstr + " not found in output of " - "pklogin on the guest") + test.fail( + checkstr + " not found in output of " "pklogin on the guest" + ) else: - test.fail(searchstr + " not found in output of pklogin" - " on the guest") + test.fail(searchstr + " not found in output of pklogin" " on the guest") test.log.info("Certs Info on the guest: %s", certsinfo_output) else: test.fail("Please specify a valid smartcard testype") # Do some cleanup, remove the certs on the client - # for each cert listed by the test, create it on the client + # for each cert listed by the test, create it on the client for cert in cert_list: cmd = "certutil " cmd += "-D -n '" + cert + "' -d " + cert_db try: output = client_session.cmd(cmd) except: - test.log.warning( - "Deleting of %s certificate from the client failed", - cert) - test.log.debug("Output of " + cmd + ": " + output) + test.log.warning("Deleting of %s certificate from the client failed", cert) + test.log.debug("Output of %s: %s", cmd, output) client_session.close() guest_session.close() diff --git a/qemu/tests/rv_vdagent.py b/qemu/tests/rv_vdagent.py index 8861ee8463..7c2022139d 100644 --- a/qemu/tests/rv_vdagent.py +++ b/qemu/tests/rv_vdagent.py @@ -25,15 +25,17 @@ def run(test, params, env): guest_vm.verify_alive() guest_root_session = guest_vm.wait_for_login( timeout=int(params.get("login_timeout", 360)), - username="root", password="123456") + username="root", + password="123456", + ) client_vm = env.get_vm(params["client_vm"]) client_vm.verify_alive() client_session = client_vm.wait_for_login( - timeout=int(params.get("login_timeout", 360))) + timeout=int(params.get("login_timeout", 360)) + ) - vdagent_status = utils_spice.get_vdagent_status( - guest_root_session, test_timeout) + vdagent_status = utils_spice.get_vdagent_status(guest_root_session, test_timeout) # start test if vdagent_test == "start": @@ -44,8 +46,7 @@ def run(test, params, env): else: utils_spice.start_vdagent(guest_root_session, test_timeout) # Verify the status of vdagent is running - status = utils_spice.get_vdagent_status( - guest_root_session, test_timeout) + status = utils_spice.get_vdagent_status(guest_root_session, test_timeout) if "running" in status: pass else: @@ -59,8 +60,7 @@ def run(test, params, env): else: utils_spice.stop_vdagent(guest_root_session, test_timeout) # Verify the status of vdagent is stopped - status = utils_spice.get_vdagent_status( - guest_root_session, test_timeout) + status = utils_spice.get_vdagent_status(guest_root_session, test_timeout) if "stopped" in status: pass else: @@ -75,8 +75,7 @@ def run(test, params, env): else: utils_spice.restart_vdagent(guest_root_session, test_timeout) # Verify the status of vdagent is started - status = utils_spice.get_vdagent_status( - guest_root_session, test_timeout) + status = utils_spice.get_vdagent_status(guest_root_session, test_timeout) if "running" in status: pass else: @@ -90,8 +89,7 @@ def run(test, params, env): else: utils_spice.restart_vdagent(guest_root_session, test_timeout) # Verify the status of vdagent is started - status = utils_spice.get_vdagent_status( - guest_root_session, test_timeout) + status = utils_spice.get_vdagent_status(guest_root_session, test_timeout) if "running" in status: pass else: diff --git a/qemu/tests/rv_video.py b/qemu/tests/rv_video.py index f3064b9c9b..3201b807ac 100644 --- a/qemu/tests/rv_video.py +++ b/qemu/tests/rv_video.py @@ -7,15 +7,15 @@ Test starts video player """ + import logging import os -import time import re +import time -from virttest import utils_misc -from virttest import remote +from virttest import remote, utils_misc -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def launch_totem(test, guest_session, params): @@ -29,8 +29,9 @@ def launch_totem(test, guest_session, params): LOG_JOB.info("Totem version: %s", totem_version) # repeat parameters for totem - LOG_JOB.info("Set up video repeat to '%s' to the Totem.", - params.get("repeat_video")) + LOG_JOB.info( + "Set up video repeat to '%s' to the Totem.", params.get("repeat_video") + ) # Check for RHEL6 or RHEL7 # RHEL7 uses gsettings and RHEL6 uses gconftool-2 @@ -38,18 +39,17 @@ def launch_totem(test, guest_session, params): release = guest_session.cmd("cat /etc/redhat-release") LOG_JOB.info("Redhat Release: %s", release) except: - test.cancel("Test is only currently supported on " - "RHEL and Fedora operating systems") + test.cancel( + "Test is only currently supported on " "RHEL and Fedora operating systems" + ) cmd = "export DISPLAY=:0.0" guest_session.cmd(cmd) if "release 6." in release: cmd = "gconftool-2 --set /apps/totem/repeat -t bool" - totem_params = "--display=:0.0 --play" else: cmd = "gsettings set org.gnome.totem repeat" - totem_params = "" if params.get("repeat_video", "no") == "yes": cmd += " true" @@ -66,8 +66,10 @@ def launch_totem(test, guest_session, params): else: fullscreen = "" - cmd = "nohup totem %s %s --display=:0.0 &> /dev/null &" \ - % (fullscreen, params.get("destination_video_file_path")) + cmd = "nohup totem {} {} --display=:0.0 &> /dev/null &".format( + fullscreen, + params.get("destination_video_file_path"), + ) guest_session.cmd(cmd) time.sleep(10) @@ -80,8 +82,10 @@ def launch_totem(test, guest_session, params): if not re.search(r"^(\d+)", pid): LOG_JOB.info("Could not find Totem running! Try starting again!") # Sometimes totem doesn't start properly; try again - cmd = "nohup totem %s %s --display=:0.0 &> /dev/null &" \ - % (fullscreen, params.get("destination_video_file_path")) + cmd = "nohup totem {} {} --display=:0.0 &> /dev/null &".format( + fullscreen, + params.get("destination_video_file_path"), + ) guest_session.cmd(cmd) cmd = "pgrep totem" pid = guest_session.cmd_output(cmd) @@ -99,12 +103,15 @@ def deploy_video_file(test, vm_obj, params): video_dir = os.path.join("deps", source_video_file) video_path = utils_misc.get_path(test.virtdir, video_dir) - remote.copy_files_to(vm_obj.get_address(), 'scp', - params.get("username"), - params.get("password"), - params.get("shell_port"), - video_path, - params.get("destination_video_file_path")) + remote.copy_files_to( + vm_obj.get_address(), + "scp", + params.get("username"), + params.get("password"), + params.get("shell_port"), + video_path, + params.get("destination_video_file_path"), + ) def run(test, params, env): @@ -118,7 +125,8 @@ def run(test, params, env): guest_vm = env.get_vm(params["guest_vm"]) guest_vm.verify_alive() guest_session = guest_vm.wait_for_login( - timeout=int(params.get("login_timeout", 360))) + timeout=int(params.get("login_timeout", 360)) + ) deploy_video_file(test, guest_vm, params) launch_totem(test, guest_session, params) diff --git a/qemu/tests/rv_vmshutdown.py b/qemu/tests/rv_vmshutdown.py index f9b4aa16a4..69c5522cbf 100644 --- a/qemu/tests/rv_vmshutdown.py +++ b/qemu/tests/rv_vmshutdown.py @@ -6,11 +6,8 @@ """ from aexpect import ShellCmdError - +from virttest import utils_misc, utils_net, utils_spice from virttest.virt_vm import VMDeadError -from virttest import utils_spice -from virttest import utils_misc -from virttest import utils_net def run(test, params, env): @@ -42,13 +39,17 @@ def run(test, params, env): guest_vm.verify_alive() guest_session = guest_vm.wait_for_login( timeout=int(params.get("login_timeout", 360)), - username="root", password="123456") + username="root", + password="123456", + ) client_vm = env.get_vm(params["client_vm"]) client_vm.verify_alive() client_session = client_vm.wait_for_login( timeout=int(params.get("login_timeout", 360)), - username="root", password="123456") + username="root", + password="123456", + ) if guest_vm.get_spice_var("spice_ssl") == "yes": host_port = guest_vm.get_spice_var("spice_tls_port") @@ -57,8 +58,9 @@ def run(test, params, env): # Determine if the test is to shutdown from cli or qemu monitor if shutdownfrom == "cmd": - test.log.info("Shutting down guest from command line:" - " %s\n", cmd_cli_shutdown) + test.log.info( + "Shutting down guest from command line:" " %s\n", cmd_cli_shutdown + ) output = guest_session.cmd(cmd_cli_shutdown) test.log.debug("Guest is being shutdown: %s", output) elif shutdownfrom == "qemu_monitor": @@ -66,8 +68,7 @@ def run(test, params, env): output = guest_vm.monitor.cmd(cmd_qemu_shutdown) test.log.debug("Output of %s: %s", cmd_qemu_shutdown, output) else: - test.fail("shutdownfrom var not set, valid values are" - " cmd or qemu_monitor") + test.fail("shutdownfrom var not set, valid values are" " cmd or qemu_monitor") # wait for the guest vm to be shutoff test.log.info("Waiting for the guest VM to be shutoff") @@ -85,20 +86,23 @@ def run(test, params, env): test.log.info("Guest VM is verified to be shutdown") try: - utils_spice.verify_established( - client_vm, host_ip, host_port, rv_binary) + utils_spice.verify_established(client_vm, host_ip, host_port, rv_binary) test.fail("Remote-Viewer connection to guest is still established.") except utils_spice.RVConnectError: test.log.info("There is no remote-viewer connection as expected") else: - test.fail("Unexpected error while trying to see if there" - " was no spice connection to the guest") + test.fail( + "Unexpected error while trying to see if there" + " was no spice connection to the guest" + ) # Verify the remote-viewer process is not running - test.log.info("Checking to see if remote-viewer process is still running on" - " client after VM has been shutdown") + test.log.info( + "Checking to see if remote-viewer process is still running on" + " client after VM has been shutdown" + ) try: - pidoutput = str(client_session.cmd("pgrep remote-viewer")) + str(client_session.cmd("pgrep remote-viewer")) test.fail("Remote-viewer is still running on the client.") except ShellCmdError: test.log.info("Remote-viewer process is not running as expected.") diff --git a/qemu/tests/s390x_cpu_model_baseline.py b/qemu/tests/s390x_cpu_model_baseline.py index 53209cb002..9bc1036a7a 100644 --- a/qemu/tests/s390x_cpu_model_baseline.py +++ b/qemu/tests/s390x_cpu_model_baseline.py @@ -42,13 +42,13 @@ def run(test, params, env): :param env: Dictionary with the test environment. """ vm = env.get_vm(params["main_vm"]) - test.log.info('Start query cpu model supported by qmp') + test.log.info("Start query cpu model supported by qmp") # get cpu models for test - cpu_models = params.objects('cpu_models') - props1 = props_dict(params.get_dict('props1')) - props2 = props_dict(params.get_dict('props2')) - expected_props = params.objects('expected_props') - not_expected_props = params.objects('not_expected_props') + cpu_models = params.objects("cpu_models") + props1 = props_dict(params.get_dict("props1")) + props2 = props_dict(params.get_dict("props2")) + expected_props = params.objects("expected_props") + not_expected_props = params.objects("not_expected_props") test_failures = [] for i in range(len(cpu_models)): newer_model = cpu_models[i] @@ -56,36 +56,35 @@ def run(test, params, env): older_model = cpu_models[j] args = { - 'modela': {'name': older_model, 'props': props1}, - 'modelb': {'name': newer_model, 'props': props2} - } - test.log.debug("Test with args: %s" % args) - output = vm.monitor.cmd('query-cpu-model-baseline', args) + "modela": {"name": older_model, "props": props1}, + "modelb": {"name": newer_model, "props": props2}, + } + test.log.debug("Test with args: %s", args) + output = vm.monitor.cmd("query-cpu-model-baseline", args) - obtained_model = output.get('model').get('name') + obtained_model = output.get("model").get("name") expected_model = older_model + "-base" if obtained_model != expected_model: - msg = ("Expected to get older model but newer one" - " was chosen:" - " %s instead of expected %s." - " Input model names: %s and %s" % (obtained_model, - expected_model, - older_model, - newer_model)) + msg = ( + "Expected to get older model but newer one" + " was chosen:" + f" {obtained_model} instead of expected {expected_model}." + f" Input model names: {older_model} and {newer_model}" + ) test_failures.append(msg) - props = output.get('model').get('props') + props = output.get("model").get("props") found_not_expected = found_unexpected_props(not_expected_props, props) not_found_expected = not_found_expected_props(expected_props, props) if not_found_expected or found_not_expected: - msg = ("Expected to get intersection of props '%s'" - " and '%s': '%s';" - " but got '%s'" % (props1, - props2, - expected_props, - props)) + msg = ( + f"Expected to get intersection of props '{props1}'" + f" and '{props2}': '{expected_props}';" + f" but got '{props}'" + ) test_failures.append(msg) if test_failures: - test.fail("Some baselines didn't return as expected." - " Details: %s" % test_failures) + test.fail( + "Some baselines didn't return as expected." f" Details: {test_failures}" + ) diff --git a/qemu/tests/s390x_cpu_model_boot.py b/qemu/tests/s390x_cpu_model_boot.py index d6e5ebef48..b33429c78c 100644 --- a/qemu/tests/s390x_cpu_model_boot.py +++ b/qemu/tests/s390x_cpu_model_boot.py @@ -18,17 +18,15 @@ def run(test, params, env): :param cpu_model_check_args: arguments of checking cpu models """ boot_cpu_models = params.get_list("boot_cpu_models", delimiter=";") - cpu_model_check_cmd = params.get('cpu_model_check_cmd') - cpu_model_check_args = json.loads(params.get('cpu_model_check_args')) - vm_name = params['main_vm'] + cpu_model_check_cmd = params.get("cpu_model_check_cmd") + cpu_model_check_args = json.loads(params.get("cpu_model_check_args")) + vm_name = params["main_vm"] vm = env.get_vm(vm_name) - host_model = vm.monitor.cmd(cpu_model_check_cmd, - cpu_model_check_args).get('model') - host_model_name = host_model.get('name')[:-5] + host_model = vm.monitor.cmd(cpu_model_check_cmd, cpu_model_check_args).get("model") + host_model_name = host_model.get("name")[:-5] for boot_models in boot_cpu_models: if host_model_name in boot_models: - boot_cpu_models = boot_cpu_models[:boot_cpu_models.index( - boot_models) + 1] + boot_cpu_models = boot_cpu_models[: boot_cpu_models.index(boot_models) + 1] break vm.destroy() for boot_models in boot_cpu_models: @@ -37,12 +35,11 @@ def run(test, params, env): for boot_model in boot_models: params["cpu_model"] = boot_model try: - test.log.info("Start boot guest with cpu model: %s.", - boot_model) + test.log.info("Start boot guest with cpu model: %s.", boot_model) vm.create(params=params) vm.verify_alive() vm.wait_for_serial_login() vm.destroy() except Exception as info: - test.log.error("Guest failed to boot up with: %s" % boot_model) + test.log.error("Guest failed to boot up with: %s", boot_model) test.fail(info) diff --git a/qemu/tests/s390x_cpu_model_expansion.py b/qemu/tests/s390x_cpu_model_expansion.py index cade84d990..68f22a1e43 100644 --- a/qemu/tests/s390x_cpu_model_expansion.py +++ b/qemu/tests/s390x_cpu_model_expansion.py @@ -13,36 +13,42 @@ def run(test, params, env): :params props: expected cpu model properties """ vm = env.get_vm(params["main_vm"]) - test.log.info('Start query cpu model supported by qmp') + test.log.info("Start query cpu model supported by qmp") # get cpu models for test - cpu_models = params.objects('cpu_models') + cpu_models = params.objects("cpu_models") for cpu_model in cpu_models: - args = {'type': 'static', 'model': {'name': cpu_model}} - output = vm.monitor.cmd('query-cpu-model-expansion', args) + args = {"type": "static", "model": {"name": cpu_model}} + output = vm.monitor.cmd("query-cpu-model-expansion", args) try: - model = output.get('model') - model_name = model.get('name') - model_props = model.get('props') - if model_name != cpu_model+'-base': - test.fail('Command query-cpu-model-expansion return' - ' wrong model: %s with %s' % (cpu_model+'-base', - model_name)) + model = output.get("model") + model_name = model.get("name") + model_props = model.get("props") + if model_name != cpu_model + "-base": + test.fail( + "Command query-cpu-model-expansion return" + " wrong model: {} with {}".format(cpu_model + "-base", model_name) + ) if model_name[:-5] in cpu_models: - props = params.get_dict('props') + props = params.get_dict("props") keys = props.keys() for key in keys: - if props[key] == 'True': + if props[key] == "True": props[key] = True - elif props[key] == 'False': + elif props[key] == "False": props[key] = False else: - test.fail('unexpected values in configuration,' - 'key: %s, value:%s' % (key, props[key])) + test.fail( + "unexpected values in configuration," + f"key: {key}, value:{props[key]}" + ) if model_props != props: - test.fail('Properties %s was not same as expected,%s' % - (model_props, props)) + test.fail( + f"Properties {model_props} was not same as expected,{props}" + ) else: - test.fail('There is no suitable cpu model searched by expansion' - 'guest: %s, expected: %s' % (model_name, cpu_models)) + test.fail( + "There is no suitable cpu model searched by expansion" + f"guest: {model_name}, expected: {cpu_models}" + ) except Exception as info: test.fail(info) diff --git a/qemu/tests/same_mac_address.py b/qemu/tests/same_mac_address.py index 86c4010766..159577f3b6 100644 --- a/qemu/tests/same_mac_address.py +++ b/qemu/tests/same_mac_address.py @@ -1,9 +1,7 @@ import re import time -from virttest import error_context -from virttest import utils_test -from virttest import utils_net +from virttest import error_context, utils_net, utils_test @error_context.context_aware @@ -21,25 +19,26 @@ def run(test, params, env): :param env: Dictionary with test environment. """ timeout = int(params.get_numeric("timeout", 360)) - error_context.context("Boot guest with 2 virtio-net with the same mac", - test.log.info) + error_context.context( + "Boot guest with 2 virtio-net with the same mac", test.log.info + ) vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login(timeout=timeout) - error_context.context("Check if the driver is installed and " - "verified", test.log.info) + error_context.context( + "Check if the driver is installed and " "verified", test.log.info + ) driver_verifier = params["driver_verifier"] - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, - driver_verifier, - timeout) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_verifier, timeout + ) # wait for getting the 169.254.xx.xx, it gets slower than valid ip. time.sleep(60) error_context.context("Check the ip of guest", test.log.info) mac = vm.virtnet[0].mac - cmd = 'wmic nicconfig where macaddress="%s" get ipaddress' % mac + cmd = f'wmic nicconfig where macaddress="{mac}" get ipaddress' status, output = session.cmd_status_output(cmd, timeout) if status: - test.error("Check ip error, output=%s" % output) + test.error(f"Check ip error, output={output}") lines = [l.strip() for l in output.splitlines() if l.strip()] test.log.info(lines) @@ -53,7 +52,6 @@ def run(test, params, env): error_context.context("Ping out from guest", test.log.info) host_ip = utils_net.get_host_ip_address(params) - status, output = utils_net.ping(host_ip, count=10, timeout=60, - session=session) + status, output = utils_net.ping(host_ip, count=10, timeout=60, session=session) if status: - test.fail("Ping %s failed, output=%s" % (host_ip, output)) + test.fail(f"Ping {host_ip} failed, output={output}") diff --git a/qemu/tests/save_restore_vm.py b/qemu/tests/save_restore_vm.py index 4dd0a92dae..ee3ebd7146 100644 --- a/qemu/tests/save_restore_vm.py +++ b/qemu/tests/save_restore_vm.py @@ -1,8 +1,7 @@ -import time import os +import time -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc from virttest.staging import utils_memory @@ -27,8 +26,9 @@ def run(test, params, env): expect_time = int(params.get("expect_restore_time", 25)) session = vm.wait_for_login(timeout=timeout) - save_file = params.get("save_file", os.path.join("/tmp", - utils_misc.generate_random_string(8))) + save_file = params.get( + "save_file", os.path.join("/tmp", utils_misc.generate_random_string(8)) + ) try: error_context.context("Pause VM", test.log.info) @@ -44,7 +44,7 @@ def run(test, params, env): vm.resume() session = vm.wait_for_login(timeout=timeout) restore_time = utils_misc.monotonic_time() - vm.start_monotonic_time - test.write_test_keyval({'result': "%ss" % restore_time}) + test.write_test_keyval({"result": f"{restore_time}s"}) test.log.info("Restore time: %ss", restore_time) finally: @@ -60,6 +60,6 @@ def run(test, params, env): params["restore_image_after_testing"] = "yes" if restore_time > expect_time: - test.fail("Guest restoration took too long: %ss" % restore_time) + test.fail(f"Guest restoration took too long: {restore_time}s") session.close() diff --git a/qemu/tests/savevm_loadvm.py b/qemu/tests/savevm_loadvm.py index 7422a63339..34094a8248 100644 --- a/qemu/tests/savevm_loadvm.py +++ b/qemu/tests/savevm_loadvm.py @@ -1,5 +1,4 @@ -from virttest import utils_misc -from virttest import error_context +from virttest import error_context, utils_misc @error_context.context_aware @@ -25,18 +24,16 @@ def run(test, params, env): vm.verify_alive() vm.wait_for_login().close() try: - error_context.base_context("Saving VM to %s" % snapshot_tag, - test.log.info) - vm.monitor.human_monitor_cmd("savevm %s" % snapshot_tag) + error_context.base_context(f"Saving VM to {snapshot_tag}", test.log.info) + vm.monitor.human_monitor_cmd(f"savevm {snapshot_tag}") vm_snapshots = vm.monitor.info("snapshots") if snapshot_tag not in vm_snapshots: - test.fail("Failed to save VM to %s" % snapshot_tag) - error_context.context("Loading VM from %s" % snapshot_tag, - test.log.info) - vm.monitor.human_monitor_cmd("loadvm %s" % snapshot_tag) + test.fail(f"Failed to save VM to {snapshot_tag}") + error_context.context(f"Loading VM from {snapshot_tag}", test.log.info) + vm.monitor.human_monitor_cmd(f"loadvm {snapshot_tag}") if os_type == "linux": vm.verify_kernel_crash() vm.verify_dmesg() finally: if snapshot_tag in vm.monitor.info("snapshots"): - vm.monitor.human_monitor_cmd("delvm %s" % snapshot_tag) + vm.monitor.human_monitor_cmd(f"delvm {snapshot_tag}") diff --git a/qemu/tests/seabios.py b/qemu/tests/seabios.py index 02a4e46777..3af428de91 100644 --- a/qemu/tests/seabios.py +++ b/qemu/tests/seabios.py @@ -1,7 +1,6 @@ import re -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc @error_context.context_aware @@ -19,6 +18,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def get_output(session_obj): """ Use the function to short the lines in the scripts @@ -33,8 +33,7 @@ def boot_menu(): return re.search(boot_menu_hint, get_output(seabios_session)) def boot_menu_check(): - return (len(re.findall(boot_menu_hint, - get_output(seabios_session))) > 1) + return len(re.findall(boot_menu_hint, get_output(seabios_session))) > 1 error_context.context("Start guest with sga bios", test.log.info) vm = env.get_vm(params["main_vm"]) @@ -44,24 +43,23 @@ def boot_menu_check(): vm.create() timeout = float(params.get("login_timeout", 240)) - boot_menu_key = params.get("boot_menu_key", 'esc') + boot_menu_key = params.get("boot_menu_key", "esc") restart_key = params.get("restart_key") boot_menu_hint = params.get("boot_menu_hint") boot_device = params.get("boot_device", "") sgabios_info = params.get("sgabios_info") - seabios_session = vm.logsessions['seabios'] + seabios_session = vm.logsessions["seabios"] if sgabios_info: error_context.context("Display and check the SGABIOS info", test.log.info) def info_check(): - return re.search(sgabios_info, - get_output(vm.serial_console)) + return re.search(sgabios_info, get_output(vm.serial_console)) if not utils_misc.wait_for(info_check, timeout, 1): err_msg = "Cound not get sgabios message. The output" - err_msg += " is %s" % get_output(vm.serial_console) + err_msg += f" is {get_output(vm.serial_console)}" test.fail(err_msg) if not (boot_menu_hint and utils_misc.wait_for(boot_menu, timeout, 1)): @@ -100,13 +98,11 @@ def get_list(): test.log.info("Got boot menu entries: '%s'", boot_list) for i, v in enumerate(boot_list, start=1): if re.search(boot_device, v, re.I): - error_context.context("Start guest from boot entry '%s'" % v, - test.log.info) + error_context.context(f"Start guest from boot entry '{v}'", test.log.info) vm.send_key(str(i)) break else: - test.fail("Could not get any boot entry match " - "pattern '%s'" % boot_device) + test.fail("Could not get any boot entry match " f"pattern '{boot_device}'") error_context.context("Log into the guest to verify it's up") session = vm.wait_for_login(timeout=timeout) diff --git a/qemu/tests/seabios_bin.py b/qemu/tests/seabios_bin.py index fe3a17e6ca..2809a559ec 100644 --- a/qemu/tests/seabios_bin.py +++ b/qemu/tests/seabios_bin.py @@ -1,8 +1,5 @@ -from virttest import error_context -from virttest import env_process -from virttest import utils_misc - from avocado.utils import process +from virttest import env_process, error_context, utils_misc @error_context.context_aware @@ -19,48 +16,47 @@ def run(test, params, env): """ bin_dict = { - 'rhel6': 'bios.bin', - 'rhel7': 'bios-256k.bin', - 'rhel8': 'bios-256k.bin', - 'rhel9': 'bios-256k.bin', - '4.2': 'bios-256k.bin', - '2.11': 'bios-256k.bin' - } + "rhel6": "bios.bin", + "rhel7": "bios-256k.bin", + "rhel8": "bios-256k.bin", + "rhel9": "bios-256k.bin", + "4.2": "bios-256k.bin", + "2.11": "bios-256k.bin", + } error_context.context("Get available bin files", test.log.info) - output = process.system_output('ls /usr/share/seabios', shell=True).decode() + output = process.system_output("ls /usr/share/seabios", shell=True).decode() bin_file_skip = params.get("bin_file_skip", "") for value in bin_dict.values(): if value not in output and value != bin_file_skip: - test.fail("%s is not available" % value) + test.fail(f"{value} is not available") error_context.context("Get supported machine types", test.log.info) qemu_binary = utils_misc.get_qemu_binary(params) machine_type_cmd = qemu_binary + " -machine help | awk '{ print $1 }'" output = process.system_output(machine_type_cmd, shell=True).decode() machine_types = output.splitlines() - machine_type_remove = params['machine_type_remove'].split() + machine_type_remove = params["machine_type_remove"].split() for i in machine_type_remove: machine_types.remove(i) test.log.info(machine_types) for machine in machine_types: - error_context.context("Check bin file with machine type: %s" % machine, - test.log.info) + error_context.context( + f"Check bin file with machine type: {machine}", test.log.info + ) for key in bin_dict: if key in machine: bin_file = bin_dict[key] break else: - test.error("Uncertain which bin file in use for machine type: %s" - % machine) + test.error(f"Uncertain which bin file in use for machine type: {machine}") - params['machine_type'] = machine - params['start_vm'] = 'yes' + params["machine_type"] = machine + params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, params.get("main_vm")) vm = env.get_vm(params["main_vm"]) info_roms = vm.monitor.info("roms") if bin_file not in info_roms: - test.fail("Checking bin file fails with %s, info roms: %s" - % (machine, info_roms)) + test.fail(f"Checking bin file fails with {machine}, info roms: {info_roms}") vm.destroy() diff --git a/qemu/tests/seabios_bootmenu_prompt.py b/qemu/tests/seabios_bootmenu_prompt.py index 1b71deedac..75774ee45e 100644 --- a/qemu/tests/seabios_bootmenu_prompt.py +++ b/qemu/tests/seabios_bootmenu_prompt.py @@ -1,8 +1,6 @@ import re -from virttest import error_context -from virttest import env_process -from virttest import utils_misc +from virttest import env_process, error_context, utils_misc @error_context.context_aware @@ -18,17 +16,18 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def prepare_images(img_num): """ prepare extra images """ for i in range(img_num): - img = "stg%s" % i - params["images"] = ' '.join([params["images"], img]) - params["image_name_%s" % img] = "images/%s" % img - params["image_size_%s" % img] = params["extra_img_size"] - params["force_create_image_%s" % img] = "yes" - params["remove_image_%s" % img] = "yes" + img = f"stg{i}" + params["images"] = " ".join([params["images"], img]) + params[f"image_name_{img}"] = f"images/{img}" + params[f"image_size_{img}"] = params["extra_img_size"] + params[f"force_create_image_{img}"] = "yes" + params[f"remove_image_{img}"] = "yes" def get_output(session_obj): """ @@ -40,12 +39,11 @@ def boot_menu(): return re.search(boot_menu_hint, get_output(seabios_session)) def get_boot_menu_list(): - return re.findall(r"^([1-9a-z])\. (.*)\s", - get_output(seabios_session), re.M) + return re.findall(r"^([1-9a-z])\. (.*)\s", get_output(seabios_session), re.M) timeout = float(params.get("timeout", 60)) boot_menu_hint = params["boot_menu_hint"] - boot_menu_key = params.get("boot_menu_key", 'esc') + boot_menu_key = params.get("boot_menu_key", "esc") boot_device = str(int(params["bootindex_image1"]) + 1) extra_img_num = int(params["extra_img_num"]) @@ -58,7 +56,7 @@ def get_boot_menu_list(): error_context.context("Start guest with sga bios", test.log.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() - seabios_session = vm.logsessions['seabios'] + seabios_session = vm.logsessions["seabios"] error_context.context("Get boot menu list", test.log.info) if not utils_misc.wait_for(boot_menu, timeout, 1): diff --git a/qemu/tests/seabios_hotplug_unplug.py b/qemu/tests/seabios_hotplug_unplug.py index c1afdfd320..2947a04f30 100644 --- a/qemu/tests/seabios_hotplug_unplug.py +++ b/qemu/tests/seabios_hotplug_unplug.py @@ -1,9 +1,6 @@ import re -from virttest import error_context -from virttest import utils_test -from virttest import utils_misc -from virttest import utils_net +from virttest import error_context, utils_misc, utils_net, utils_test @error_context.context_aware @@ -38,8 +35,9 @@ def sga_info_check(): return re.search(sgabios_info, get_output(vm.serial_console)) def menu_hint_check(): - return (len(re.findall(boot_menu_hint, - get_output(seabios_session))) > reboot_times) + return ( + len(re.findall(boot_menu_hint, get_output(seabios_session))) > reboot_times + ) def get_list(): return re.findall(r"^\d+\. .*\s", get_output(seabios_session), re.M) @@ -49,14 +47,13 @@ def get_boot_menu(): test.fail("Could not get boot menu message") vm.send_key(boot_menu_key) boot_list = utils_misc.wait_for(get_list, timeout, 1) - boot_menu = boot_list[len(boot_list_record):] + boot_menu = boot_list[len(boot_list_record) :] if not boot_menu: test.fail("Could not get boot menu list") return (boot_list, boot_menu) def check_in_guest(): - session = vm.wait_for_serial_login(timeout=timeout, - restart_network=True) + session = vm.wait_for_serial_login(timeout=timeout, restart_network=True) error_context.context("Check kernel crash message!", test.log.info) vm.verify_kernel_crash() error_context.context("Ping guest!", test.log.info) @@ -65,7 +62,7 @@ def check_in_guest(): if status: test.fail("Ping guest failed!") elif utils_test.get_loss_ratio(output) == 100: - test.fail("All packets lost during ping guest %s." % guest_ip) + test.fail(f"All packets lost during ping guest {guest_ip}.") session.close() def get_diff(menu_before, menu_after, plug=False): @@ -73,8 +70,8 @@ def get_diff(menu_before, menu_after, plug=False): (menu_short, menu_long) = (menu_before, menu_after) else: (menu_short, menu_long) = (menu_after, menu_before) - menu_short = re.findall(r"^\d+\. (.*)\s", ''.join(menu_short), re.M) - menu_long = re.findall(r"^\d+\. (.*)\s", ''.join(menu_long), re.M) + menu_short = re.findall(r"^\d+\. (.*)\s", "".join(menu_short), re.M) + menu_long = re.findall(r"^\d+\. (.*)\s", "".join(menu_long), re.M) for dev in menu_short: if dev in menu_long: menu_long.remove(dev) @@ -109,13 +106,13 @@ def get_diff(menu_before, menu_after, plug=False): image_params = params.object_params(image_name) image_params["drive_format"] = "virtio" image_hint = "Virtio disk" - devices = vm.devices.images_define_by_params(image_name, image_params, - 'disk', None, False, None) + devices = vm.devices.images_define_by_params( + image_name, image_params, "disk", None, False, None + ) for dev in devices: ret = vm.devices.simple_hotplug(dev, vm.monitor) if ret[1] is False: - test.fail("Failed to hotplug device '%s'. Output:\n%s" - % (dev, ret[0])) + test.fail(f"Failed to hotplug device '{dev}'. Output:\n{ret[0]}") disk_hotplugged.append(devices[-1]) error_context.context("Hotplugging virtio nic", test.log.info) @@ -128,13 +125,13 @@ def get_diff(menu_before, menu_after, plug=False): nic_hint = "iPXE" test.log.info("Disable other link(s) in guest") - guest_is_linux = ("linux" == params.get("os_type")) + guest_is_linux = "linux" == params.get("os_type") s_session = vm.wait_for_serial_login(timeout=timeout) primary_nics = [nic for nic in vm.virtnet] for nic in primary_nics: if guest_is_linux: ifname = utils_net.get_linux_ifname(s_session, nic["mac"]) - s_session.cmd_output_safe("ifconfig %s 0.0.0.0" % ifname) + s_session.cmd_output_safe(f"ifconfig {ifname} 0.0.0.0") else: s_session.cmd_output_safe("ipconfig /release all") vm.set_link(nic.device_id, up=False) @@ -154,8 +151,7 @@ def get_diff(menu_before, menu_after, plug=False): test.log.info("Got boot menu after hotplug: '%s'", boot_menu_after_plug) if not len(boot_menu_after_plug) > len(boot_menu_before_plug): test.fail("The boot menu is incorrect after hotplug.") - menu_diff = get_diff(boot_menu_before_plug, - boot_menu_after_plug, plug=True) + menu_diff = get_diff(boot_menu_before_plug, boot_menu_after_plug, plug=True) if image_hint not in str(menu_diff): test.fail("Hotplugged virtio disk is not in boot menu list") if nic_hint not in str(menu_diff): @@ -168,8 +164,7 @@ def get_diff(menu_before, menu_after, plug=False): for dev in disk_hotplugged: ret = vm.devices.simple_unplug(dev, vm.monitor) if ret[1] is False: - test.fail("Failed to unplug device '%s'. Output:\n%s" - % (dev, ret[0])) + test.fail(f"Failed to unplug device '{dev}'. Output:\n{ret[0]}") vm.hotunplug_nic(hotplug_nic.nic_name) for nic in primary_nics: @@ -184,8 +179,7 @@ def get_diff(menu_before, menu_after, plug=False): test.log.info("Got boot menu after hotunplug: '%s'", boot_menu_after_unplug) if not len(boot_menu_after_plug) > len(boot_menu_after_unplug): test.fail("The boot menu is incorrect after hotunplug.") - menu_diff = get_diff(boot_menu_after_plug, - boot_menu_after_unplug, plug=False) + menu_diff = get_diff(boot_menu_after_plug, boot_menu_after_unplug, plug=False) if image_hint not in str(menu_diff): test.fail("Hotunplugged virtio disk is still in boot menu list") if nic_hint not in str(menu_diff): diff --git a/qemu/tests/seabios_order_once.py b/qemu/tests/seabios_order_once.py index 223c15d166..5587c2749c 100644 --- a/qemu/tests/seabios_order_once.py +++ b/qemu/tests/seabios_order_once.py @@ -2,10 +2,7 @@ import re from avocado.utils import process - -from virttest import error_context -from virttest import env_process -from virttest import utils_misc +from virttest import env_process, error_context, utils_misc @error_context.context_aware @@ -29,7 +26,7 @@ def create_cdroms(cdrom_test): """ test.log.info("creating test cdrom") process.run("dd if=/dev/urandom of=test bs=10M count=1") - process.run("mkisofs -o %s test" % cdrom_test) + process.run(f"mkisofs -o {cdrom_test} test") process.run("rm -f test") def cleanup_cdroms(cdrom_test): @@ -60,7 +57,7 @@ def boot_check(info): vm.pause() # Disable nic device, boot fail from nic device except user model - if params['nettype'] != 'user': + if params["nettype"] != "user": for nic in vm.virtnet: vm.set_link(nic.device_id, up=False) vm.resume() @@ -75,8 +72,9 @@ def boot_check(info): try: error_context.context("Check boot order before reboot", test.log.info) if not utils_misc.wait_for(lambda: boot_check(boot_info1), timeout, 1): - test.fail("Guest isn't booted as expected order before reboot: %s" - % bootorder_before) + test.fail( + f"Guest isn't booted as expected order before reboot: {bootorder_before}" + ) error_context.context("Reboot", test.log.info) vm.send_key(restart_key) @@ -84,7 +82,8 @@ def boot_check(info): error_context.context("Check boot order after reboot", test.log.info) boot_info = boot_info1 + boot_info2 if not utils_misc.wait_for(lambda: boot_check(boot_info), timeout, 1): - test.fail("Guest isn't booted as expected order after reboot: %s" - % bootorder_after) + test.fail( + f"Guest isn't booted as expected order after reboot: {bootorder_after}" + ) finally: cleanup_cdroms(cdrom_test) diff --git a/qemu/tests/seabios_reboot_timeout.py b/qemu/tests/seabios_reboot_timeout.py index 71d1a246b3..130232101b 100644 --- a/qemu/tests/seabios_reboot_timeout.py +++ b/qemu/tests/seabios_reboot_timeout.py @@ -1,7 +1,6 @@ import re -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc @error_context.context_aware @@ -33,14 +32,14 @@ def reboot_timeout_check(): vm.verify_alive() timeout = float(params.get("login_timeout", 360)) - seabios_session = vm.logsessions['seabios'] + seabios_session = vm.logsessions["seabios"] rb_timeout = int(params["boot_reboot_timeout"]) if rb_timeout < 0: - test.cancel("Do not support rb_timeout = %s" % rb_timeout) + test.cancel(f"Do not support rb_timeout = {rb_timeout}") elif rb_timeout > 65535: rb_timeout = 65535 - rb_timeout = rb_timeout//1000 + rb_timeout = rb_timeout // 1000 pattern = "No bootable device.*Retrying in %d seconds" % rb_timeout error_context.context("Check reboot-timeout option", test.log.info) diff --git a/qemu/tests/seabios_scsi_lun.py b/qemu/tests/seabios_scsi_lun.py index 9773b55b03..396ba799c2 100644 --- a/qemu/tests/seabios_scsi_lun.py +++ b/qemu/tests/seabios_scsi_lun.py @@ -1,7 +1,6 @@ import re -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc @error_context.context_aware @@ -31,16 +30,16 @@ def get_list(): return re.findall(r"^\d+\. (.*)\s", get_output(seabios_session), re.M) timeout = float(params.get("boot_timeout", 60)) - boot_menu_key = params.get("boot_menu_key", 'esc') + boot_menu_key = params.get("boot_menu_key", "esc") boot_menu_hint = params.get("boot_menu_hint") check_pattern = params.get("check_pattern", "virtio-scsi Drive") img = params.objects("images")[0] - lun = params["drive_port_%s" % img] + lun = params[f"drive_port_{img}"] vm = env.get_vm(params["main_vm"]) vm.verify_alive() - seabios_session = vm.logsessions['seabios'] + seabios_session = vm.logsessions["seabios"] if not (boot_menu_hint and utils_misc.wait_for(boot_menu, timeout, 1)): test.fail("Could not get boot menu message.") @@ -52,6 +51,6 @@ def get_list(): test.fail("Could not get boot entries list.") if check_pattern not in str(boot_list): - test.fail("SCSI disk with lun %s cannot be found in boot menu" % lun) + test.fail(f"SCSI disk with lun {lun} cannot be found in boot menu") vm.destroy(gracefully=False) diff --git a/qemu/tests/seabios_strict.py b/qemu/tests/seabios_strict.py index 15d1a0c4b1..d40809c287 100644 --- a/qemu/tests/seabios_strict.py +++ b/qemu/tests/seabios_strict.py @@ -1,12 +1,8 @@ import os import re -from virttest import error_context -from virttest import utils_misc -from virttest import data_dir -from virttest import env_process - from avocado.utils import process +from virttest import data_dir, env_process, error_context, utils_misc @error_context.context_aware @@ -28,10 +24,10 @@ def create_cdrom(): Create 'test' cdrom """ test.log.info("creating test cdrom") - cdrom_test = params.get("cdrom_test", '/tmp/test.iso') + cdrom_test = params.get("cdrom_test", "/tmp/test.iso") cdrom_test = utils_misc.get_path(data_dir.get_data_dir(), cdrom_test) process.run("dd if=/dev/urandom of=test bs=10M count=1") - process.run("mkisofs -o %s test" % cdrom_test) + process.run(f"mkisofs -o {cdrom_test} test") process.run("rm -f test") def cleanup_cdrom(): @@ -39,8 +35,9 @@ def cleanup_cdrom(): Remove 'test' cdrom """ test.log.info("cleaning up test cdrom") - cdrom_test = utils_misc.get_path(data_dir.get_data_dir(), - params.get("cdrom_test")) + cdrom_test = utils_misc.get_path( + data_dir.get_data_dir(), params.get("cdrom_test") + ) os.remove(cdrom_test) def boot_check(info): @@ -63,15 +60,15 @@ def boot_check(info): vm.pause() # Disable nic device, boot fail from nic device except user model - if params['nettype'] != 'user': + if params["nettype"] != "user": for nic in vm.virtnet: - process.system("ifconfig %s down" % nic.ifname) + process.system(f"ifconfig {nic.ifname} down") vm.resume() timeout = float(params.get("login_timeout", 240)) - fail_infos = params['boot_fail_infos'] - fail_infos_ex = params['boot_fail_infos_extra'] - boot_strict = (params['boot_strict'] == 'on') + fail_infos = params["boot_fail_infos"] + fail_infos_ex = params["boot_fail_infos_extra"] + boot_strict = params["boot_strict"] == "on" try: error_context.context("Check guest boot result", test.log.info) diff --git a/qemu/tests/secure_execution.py b/qemu/tests/secure_execution.py index 6e6c46bfdc..0d78d15759 100644 --- a/qemu/tests/secure_execution.py +++ b/qemu/tests/secure_execution.py @@ -1,5 +1,4 @@ -from virttest import error_context -from virttest import virt_vm +from virttest import error_context, virt_vm @error_context.context_aware @@ -15,8 +14,7 @@ def run(test, params, env): """ vm = env.get_vm(params["main_vm"]) - error_context.context("Try to log into guest '%s'." % vm.name, - test.log.info) + error_context.context(f"Try to log into guest '{vm.name}'.", test.log.info) if params.get("start_vm") == "yes": session = vm.wait_for_serial_login() session.close() @@ -31,6 +29,7 @@ def run(test, params, env): except virt_vm.VMCreateError as detail: output = detail.output if error_msg not in output: - test.fail("Error message is not expected! " - "Expected: {} Actual: {}" - .format(error_msg, output)) + test.fail( + "Error message is not expected! " + f"Expected: {error_msg} Actual: {output}" + ) diff --git a/qemu/tests/secure_img.py b/qemu/tests/secure_img.py index 1892da8e3a..ba4af1a1bc 100644 --- a/qemu/tests/secure_img.py +++ b/qemu/tests/secure_img.py @@ -1,5 +1,5 @@ -from virttest import error_context from avocado.utils import cpu +from virttest import error_context @error_context.context_aware @@ -38,40 +38,40 @@ def run_cmd_in_guest(session, cmd, test): session = vm.wait_for_login(timeout=60) # create the secure boot params - secure_params_cmd = params.get('secure_params_cmd') + secure_params_cmd = params.get("secure_params_cmd") run_cmd_in_guest(session, secure_params_cmd, test) # check LPAR type(Z15/Z16) and download HKD cpu_family = cpu.get_family() if cpu_family: - download_hkd = params.get('download_hkd_%s' % cpu_family) + download_hkd = params.get(f"download_hkd_{cpu_family}") else: test.fail("Failed to retrieve CPU family.") - run_cmd_in_guest(session, download_hkd, test) # pylint: disable=E0606 + run_cmd_in_guest(session, download_hkd, test) # pylint: disable=E0606 # Create the boot image file - kernel_version = run_cmd_in_guest(session, 'uname -r', test) + kernel_version = run_cmd_in_guest(session, "uname -r", test) kernel_version = kernel_version.strip() - boot_kernel = '/boot/vmlinuz-%s' % kernel_version - boot_initrd = '/boot/initramfs-%s%s' % (kernel_version, '.img') - boot_img_cmd = params.get('boot_img_cmd') % (boot_kernel, boot_initrd) + boot_kernel = f"/boot/vmlinuz-{kernel_version}" + boot_initrd = "/boot/initramfs-{}{}".format(kernel_version, ".img") + boot_img_cmd = params.get("boot_img_cmd") % (boot_kernel, boot_initrd) run_cmd_in_guest(session, boot_img_cmd, test) # update the zipl config - zipl_config_cmd = params.get('zipl_config_cmd') + zipl_config_cmd = params.get("zipl_config_cmd") run_cmd_in_guest(session, zipl_config_cmd, test) # update the kernel command and reboot the guest - zipl_cmd = params.get('zipl_cmd') + zipl_cmd = params.get("zipl_cmd") run_cmd_in_guest(session, zipl_cmd, test) session.close() vm.reboot() # Check if the vm is secured session = vm.wait_for_login(timeout=60) - check_se_cmd = params.get('check_se_cmd') + check_se_cmd = params.get("check_se_cmd") se_output = run_cmd_in_guest(session, check_se_cmd, test).strip() - if '1' == se_output: + if "1" == se_output: test.log.info("Image is secured") else: test.fail("Image failed to secured") diff --git a/qemu/tests/serial_no_listening.py b/qemu/tests/serial_no_listening.py index 21d11650f2..b4634865a5 100644 --- a/qemu/tests/serial_no_listening.py +++ b/qemu/tests/serial_no_listening.py @@ -1,11 +1,9 @@ import aexpect - from avocado.utils import process +from virttest import error_context, utils_test -from virttest import error_context -from virttest import utils_test -from qemu.tests.virtio_serial_file_transfer import get_virtio_port_property from provider import win_driver_utils +from qemu.tests.virtio_serial_file_transfer import get_virtio_port_property @error_context.context_aware @@ -29,13 +27,13 @@ def run(test, params, env): session = vm.wait_for_login() if os_type == "windows": - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, driver_name) - port_path = get_virtio_port_property(vm, - params["file_transfer_serial_port"])[1] + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name + ) + port_path = get_virtio_port_property(vm, params["file_transfer_serial_port"])[1] error_context.context("host send while no listening side", test.log.info) - host_send_cmd = 'echo "hi" | nc -U %s' % port_path + host_send_cmd = f'echo "hi" | nc -U {port_path}' try: process.system(host_send_cmd, shell=True, timeout=timeout) except process.CmdError: @@ -48,10 +46,12 @@ def run(test, params, env): output = session.cmd_output(guest_send_cmd) except aexpect.ShellTimeoutError: if os_type != "linux": - test.error("timeout when guest send command: %s" % guest_send_cmd) + test.error(f"timeout when guest send command: {guest_send_cmd}") else: - if not (os_type == "windows" and - ("The system cannot write to the specified device" in output)): + if not ( + os_type == "windows" + and ("The system cannot write to the specified device" in output) + ): test.fail("Guest send should fail while no listening side") vm.verify_kernel_crash() diff --git a/qemu/tests/set_link.py b/qemu/tests/set_link.py index d009c0b3e0..a5aa17eb94 100644 --- a/qemu/tests/set_link.py +++ b/qemu/tests/set_link.py @@ -1,13 +1,7 @@ import time import aexpect - -from virttest import error_context -from virttest import remote -from virttest import utils_test -from virttest import utils_net -from virttest import utils_misc -from virttest import virt_vm +from virttest import error_context, remote, utils_misc, utils_net, utils_test, virt_vm @error_context.context_aware @@ -34,16 +28,17 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def change_queues_number(session, ifname, q_number): """ Change queues number, only used for mq test """ - mq_set_cmd = "ethtool -L %s combined %s" % (ifname, q_number) + mq_set_cmd = f"ethtool -L {ifname} combined {q_number}" try: session.cmd_output_safe(mq_set_cmd) except aexpect.ShellError as err: err_msg = "Change queues number failed" - err_msg += "Error info: '%s'" % err + err_msg += f"Error info: '{err}'" test.fail(err_msg) def change_queues_number_repeatly(guest_ifname): @@ -81,14 +76,17 @@ def guest_netwok_connecting_check(guest_ip, link_up, change_queues=False): if change_queues: env["run_change_queues"] = False bg_thread = utils_misc.InterruptedThread( - change_queues_number_repeatly, (guest_ifname,)) + change_queues_number_repeatly, (guest_ifname,) + ) bg_thread.start() - utils_misc.wait_for(lambda: env["run_change_queues"], 30, 0, 2, - "wait queues change start") + utils_misc.wait_for( + lambda: env["run_change_queues"], 30, 0, 2, "wait queues change start" + ) time.sleep(0.5) - output = utils_test.ping(guest_ip, 10, interface=host_interface, - timeout=20, session=None)[1] + output = utils_test.ping( + guest_ip, 10, interface=host_interface, timeout=20, session=None + )[1] if not link_up and utils_test.get_loss_ratio(output) < 80: err_msg = "guest network still connecting after down the link" test.fail(err_msg) @@ -105,24 +103,26 @@ def operstate_check(session, expect_status, guest_ifname=""): Check Guest interface operstate """ if params.get("os_type") == "linux": - if_operstate = utils_net.get_net_if_operstate(guest_ifname, - session.cmd_output_safe) + if_operstate = utils_net.get_net_if_operstate( + guest_ifname, session.cmd_output_safe + ) else: - if_operstate = utils_net.get_windows_nic_attribute(session, - "macaddress", - vm.get_mac_address(), - "netconnectionstatus") + if_operstate = utils_net.get_windows_nic_attribute( + session, "macaddress", vm.get_mac_address(), "netconnectionstatus" + ) if if_operstate != expect_status: - err_msg = "Guest interface %s status error, " % guest_ifname - err_msg = "currently interface status is '%s', " % if_operstate - err_msg += "but expect status is '%s'" % expect_status + err_msg = f"Guest interface {guest_ifname} status error, " + err_msg = f"currently interface status is '{if_operstate}', " + err_msg += f"but expect status is '{expect_status}'" test.fail(err_msg) - test.log.info("Guest interface operstate '%s' is exactly as expected", - if_operstate) + test.log.info( + "Guest interface operstate '%s' is exactly as expected", if_operstate + ) - def guest_interface_operstate_check(expect_status, guest_ifname="", - change_queues=False): + def guest_interface_operstate_check( + expect_status, guest_ifname="", change_queues=False + ): """ Check guest interface operstate, support mq """ @@ -168,16 +168,16 @@ def check_interface_ip(timeout=600): mac_addr = vm.get_mac_address() try: if not utils_misc.wait_for( - lambda: check_interface_ip_routine(session, mac_addr), - timeout, - step=5.0): - err_msg = "Can't get valid ip in %s seconds" % timeout + lambda: check_interface_ip_routine(session, mac_addr), timeout, step=5.0 + ): + err_msg = f"Can't get valid ip in {timeout} seconds" test.fail(err_msg) finally: session.close() - def set_link_test(linkid, link_up, expect_status, change_queues=False, - operstate_always_up=False): + def set_link_test( + linkid, link_up, expect_status, change_queues=False, operstate_always_up=False + ): """ Issue set_link commands and test its function @@ -199,25 +199,26 @@ def set_link_test(linkid, link_up, expect_status, change_queues=False, expect_status = "up" if expect_status == win_media_disconnected: expect_status = win_media_connected - guest_interface_operstate_check(expect_status, guest_ifname, - change_queues) + guest_interface_operstate_check(expect_status, guest_ifname, change_queues) if params.get("os_type") == "windows" and expect_status == win_media_connected: check_interface_ip() - error_context.context("Check if guest network connective", - test.log.info) + error_context.context("Check if guest network connective", test.log.info) guest_netwok_connecting_check(guest_ip, link_up, change_queues) reboot_method = params.get("reboot_method", "shell") - error_context.context("Reboot guest by '%s' and recheck interface " - "operstate" % reboot_method, test.log.info) + error_context.context( + f"Reboot guest by '{reboot_method}' and recheck interface " "operstate", + test.log.info, + ) guest_reboot(reboot_method, link_up) - guest_interface_operstate_check(expect_status, guest_ifname, - change_queues) + guest_interface_operstate_check(expect_status, guest_ifname, change_queues) - error_context.context("Check guest network connecting after reboot " - "by '%s'" % reboot_method, test.log.info) + error_context.context( + "Check guest network connecting after reboot " f"by '{reboot_method}'", + test.log.info, + ) guest_netwok_connecting_check(guest_ip, link_up, change_queues) vm = env.get_vm(params["main_vm"]) @@ -240,8 +241,7 @@ def set_link_test(linkid, link_up, expect_status, change_queues=False, win_media_disconnected = params.get("win_media_disconnected", "7") if os_type == "linux": - guest_ifname = utils_net.get_linux_ifname(session, - vm.get_mac_address()) + guest_ifname = utils_net.get_linux_ifname(session, vm.get_mac_address()) queues = int(params.get("queues", 1)) if queues != 1 and vm.virtnet[0].nic_model == "virtio": change_queues = True @@ -252,24 +252,27 @@ def set_link_test(linkid, link_up, expect_status, change_queues=False, expect_up_status = params.get("up-status", "up") operstate_always_up = params.get("operstate_always_up", "no") == "yes" - error_context.context("Disable guest netdev link '%s' by set_link" - % netdev_id, test.log.info) - set_link_test(netdev_id, False, expect_down_status, change_queues, - operstate_always_up) - - error_context.context("Re-enable guest netdev link '%s' by set_link" - % netdev_id, test.log.info) - set_link_test(netdev_id, True, expect_up_status, change_queues, - operstate_always_up) - - error_context.context("Disable guest nic device '%s' by set_link" - % device_id, test.log.info) + error_context.context( + f"Disable guest netdev link '{netdev_id}' by set_link", test.log.info + ) + set_link_test( + netdev_id, False, expect_down_status, change_queues, operstate_always_up + ) + + error_context.context( + f"Re-enable guest netdev link '{netdev_id}' by set_link", test.log.info + ) + set_link_test(netdev_id, True, expect_up_status, change_queues, operstate_always_up) + + error_context.context( + f"Disable guest nic device '{device_id}' by set_link", test.log.info + ) set_link_test(device_id, False, expect_down_status, change_queues) - error_context.context("Re-enable guest nic device '%s' by set_link" - % device_id, test.log.info) + error_context.context( + f"Re-enable guest nic device '{device_id}' by set_link", test.log.info + ) set_link_test(device_id, True, expect_up_status, change_queues) - error_context.context("Do file transfer after setlink on and off", - test.log.info) + error_context.context("Do file transfer after setlink on and off", test.log.info) utils_test.run_file_transfer(test, params, env) diff --git a/qemu/tests/sev_basic_config.py b/qemu/tests/sev_basic_config.py index 6d9c60abaf..58140f6408 100644 --- a/qemu/tests/sev_basic_config.py +++ b/qemu/tests/sev_basic_config.py @@ -25,7 +25,7 @@ def run(test, params, env): f = open(sev_module_path, "r") output = f.read().strip() f.close() - if output not in params.objects('module_status'): + if output not in params.objects("module_status"): test.cancel("Host sev-es support check fail.") else: test.cancel("Host sev-es support check fail.") @@ -42,11 +42,12 @@ def run(test, params, env): else: policy_keyword = "sev-es" guest_check_cmd = params["sev_guest_check"].format( - policy_keyword=policy_keyword) + policy_keyword=policy_keyword + ) try: session.cmd_output(guest_check_cmd, timeout=240) except Exception as e: - test.fail("Guest sev verify fail: %s" % str(e)) + test.fail(f"Guest sev verify fail: {str(e)}") sev_guest_info = vm.monitor.query_sev() if sev_guest_info["policy"] != vm_policy: test.fail("QMP sev policy doesn't match.") diff --git a/qemu/tests/sev_dhcert_boot.py b/qemu/tests/sev_dhcert_boot.py index b66c49f6b3..2c81c1c044 100644 --- a/qemu/tests/sev_dhcert_boot.py +++ b/qemu/tests/sev_dhcert_boot.py @@ -1,9 +1,7 @@ import os from avocado.utils import process -from virttest import error_context -from virttest import env_process -from virttest import utils_package +from virttest import env_process, error_context, utils_package @error_context.context_aware @@ -27,35 +25,33 @@ def run(test, params, env): f = open(sev_module_path, "r") output = f.read().strip() f.close() - if output not in params.objects('module_status'): + if output not in params.objects("module_status"): test.cancel("Host sev-es support check fail.") else: test.cancel("Host sev-es support check fail.") sev_tool_pkg = params.get("sev_tool_pkg") - s, o = process.getstatusoutput("rpm -qa | grep %s" % sev_tool_pkg, - shell=True) + s, o = process.getstatusoutput(f"rpm -qa | grep {sev_tool_pkg}", shell=True) if s != 0: install_status = utils_package.package_install(sev_tool_pkg) if not install_status: - test.cancel("Failed to install %s." % sev_tool_pkg) + test.cancel(f"Failed to install {sev_tool_pkg}.") vm_name = params["main_vm"] files_remove = [] try: process.system_output("sevctl export --full vm.chain", shell=True) - files_remove.append('vm.chain') - process.system_output("sevctl session --name " + vm_name + - " vm.chain " + params["vm_sev_policy"], - shell=True) - session_files = ['godh.b64', 'session.b64', 'tek.bin', 'tik.bin'] - files_remove.extend([f'{vm_name}_{name}' for name in session_files]) - params["vm_sev_dh_cert_file"] = os.path.abspath("%s_godh.b64" - % vm_name) - params["vm_sev_session_file"] = os.path.abspath("%s_session.b64" - % vm_name) + files_remove.append("vm.chain") + process.system_output( + "sevctl session --name " + vm_name + " vm.chain " + params["vm_sev_policy"], + shell=True, + ) + session_files = ["godh.b64", "session.b64", "tek.bin", "tik.bin"] + files_remove.extend([f"{vm_name}_{name}" for name in session_files]) + params["vm_sev_dh_cert_file"] = os.path.abspath(f"{vm_name}_godh.b64") + params["vm_sev_session_file"] = os.path.abspath(f"{vm_name}_session.b64") except Exception as e: - test.fail("Insert guest dhcert and session blob failed, %s" % str(e)) + test.fail(f"Insert guest dhcert and session blob failed, {str(e)}") env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) @@ -66,7 +62,7 @@ def run(test, params, env): try: session.cmd_output(params["sev_guest_check"], timeout=240) except Exception as e: - test.fail("Guest sev verify fail: %s" % str(e)) + test.fail(f"Guest sev verify fail: {str(e)}") finally: session.close() vm.destroy() diff --git a/qemu/tests/sev_hotplug_mem.py b/qemu/tests/sev_hotplug_mem.py index eccec85170..b5079f3e44 100644 --- a/qemu/tests/sev_hotplug_mem.py +++ b/qemu/tests/sev_hotplug_mem.py @@ -5,12 +5,10 @@ from virttest import error_context from virttest.utils_test.qemu import MemoryHotplugTest - -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class MemoryHotplugSimple(MemoryHotplugTest): - def check_memory(self, vm, wait_time=0): """ Check is guest memory is really match assigned to VM. @@ -24,11 +22,14 @@ def check_memory(self, vm, wait_time=0): vm_mem_size = self.get_guest_total_mem(vm) assigned_vm_mem_size = self.get_vm_mem(vm) sev_rom_size = self.params.get_numeric("sev_rom_size", 0) - if abs(vm_mem_size + sev_rom_size - assigned_vm_mem_size) > \ - assigned_vm_mem_size * threshold: - msg = ("Assigned '%s MB' memory to '%s'" - "but, '%s MB' memory detect by OS" % - (assigned_vm_mem_size, vm.name, vm_mem_size)) + if ( + abs(vm_mem_size + sev_rom_size - assigned_vm_mem_size) + > assigned_vm_mem_size * threshold + ): + msg = ( + f"Assigned '{assigned_vm_mem_size} MB' memory to '{vm.name}'" + f"but, '{vm_mem_size} MB' memory detect by OS" + ) raise TestError(msg) @@ -54,7 +55,7 @@ def run(test, params, env): plugged = [] wait_time = params.get_numeric("wait_time", 0) for target_mem in params.objects("target_mems"): - if target_mem in vm.params.objects('mem_devs'): + if target_mem in vm.params.objects("mem_devs"): hotplug_test.unplug_memory(vm, target_mem) else: hotplug_test.hotplug_memory(vm, target_mem) diff --git a/qemu/tests/sgx_basic.py b/qemu/tests/sgx_basic.py index daa923225f..30b3b31a6d 100644 --- a/qemu/tests/sgx_basic.py +++ b/qemu/tests/sgx_basic.py @@ -1,8 +1,7 @@ from virttest import error_context from virttest.utils_misc import verify_dmesg -from provider.sgx import SGXHostCapability -from provider.sgx import SGXChecker +from provider.sgx import SGXChecker, SGXHostCapability @error_context.context_aware @@ -33,8 +32,7 @@ def run(test, params, env): vm.verify_alive() session = vm.wait_for_login(timeout=timeout) verify_dmesg() - dmesg_output = session.cmd_output(params["guest_sgx_check"], - timeout=240).strip() + dmesg_output = session.cmd_output(params["guest_sgx_check"], timeout=240).strip() session.close() test_check = SGXChecker(test, params, vm) diff --git a/qemu/tests/sgx_cpu.py b/qemu/tests/sgx_cpu.py index 3a8fe127a5..72a32d3e02 100644 --- a/qemu/tests/sgx_cpu.py +++ b/qemu/tests/sgx_cpu.py @@ -25,13 +25,14 @@ def run(test, params, env): # Set up guest environment cpuid_pkg = params.get("cpuid_pkg") - if session.cmd_status("rpm -qa|grep %s" % cpuid_pkg): + if session.cmd_status(f"rpm -qa|grep {cpuid_pkg}"): try: session.cmd_output_safe(params.get("repo_install_cmd")) - session.cmd_status("yum -y install %s" % cpuid_pkg) + session.cmd_status(f"yum -y install {cpuid_pkg}") except Exception: - test.cancel("Fail to install package cpuid, please retest" - "this case again.") + test.cancel( + "Fail to install package cpuid, please retest" "this case again." + ) error_context.context("Check the sgx CPUID features", test.log.info) check_cpuid_entry_cmd = params.get("cpuid_entry_cmd") @@ -39,15 +40,17 @@ def run(test, params, env): for i in sgx_features_list: cmd = params.get("check_cpuid_sgx_cmd").format(sgx_cpu_features=i) if session.cmd_status(cmd): - test.fail("Fail to verify sgx feature %s " % i) + test.fail(f"Fail to verify sgx feature {i} ") if params.get("cpuid_entry_cmd"): - error_context.context("Check the corresponding CPUID entries with" - "sgx cpu flags", test.log.info) + error_context.context( + "Check the corresponding CPUID entries with" "sgx cpu flags", + test.log.info, + ) output = session.cmd_output(check_cpuid_entry_cmd) - eax_value = output.splitlines()[-1].split()[2].split('0x')[-1] - eax_value = bin(int(eax_value, 16)).split('0b')[-1] - if eax_value[-5] != '1': - test.fail('CPUID 0x12.0x1.EAX bit 4 is 0') + eax_value = output.splitlines()[-1].split()[2].split("0x")[-1] + eax_value = bin(int(eax_value, 16)).split("0b")[-1] + if eax_value[-5] != "1": + test.fail("CPUID 0x12.0x1.EAX bit 4 is 0") finally: session.close() vm.destroy() diff --git a/qemu/tests/sgx_multi_vms.py b/qemu/tests/sgx_multi_vms.py index d19fe4eadc..847d015eb7 100644 --- a/qemu/tests/sgx_multi_vms.py +++ b/qemu/tests/sgx_multi_vms.py @@ -1,9 +1,7 @@ -from virttest import error_context -from virttest import env_process +from virttest import env_process, error_context from virttest.utils_misc import verify_dmesg -from provider.sgx import SGXHostCapability -from provider.sgx import SGXChecker +from provider.sgx import SGXChecker, SGXHostCapability @error_context.context_aware @@ -29,7 +27,7 @@ def run(test, params, env): if params.get("monitor_expect_nodes"): sgx_cap.validate_numa_node_count() - params['start_vm'] = 'yes' + params["start_vm"] = "yes" vms = params.objects("vms") for vm_name in vms: env_process.preprocess_vm(test, params, env, vm_name) @@ -37,8 +35,9 @@ def run(test, params, env): vm.verify_alive() session = vm.wait_for_login(timeout=timeout) verify_dmesg() - dmesg_output = session.cmd_output(params["guest_sgx_check"], - timeout=240).strip() + dmesg_output = session.cmd_output( + params["guest_sgx_check"], timeout=240 + ).strip() session.close() test_check = SGXChecker(test, params, vm) diff --git a/qemu/tests/single_driver_install.py b/qemu/tests/single_driver_install.py index 2549c0d2ed..e765573b5e 100644 --- a/qemu/tests/single_driver_install.py +++ b/qemu/tests/single_driver_install.py @@ -1,14 +1,12 @@ -import re import logging +import re from aexpect import ShellTimeoutError - -from virttest import error_context -from virttest import utils_misc -from virttest.utils_windows import virtio_win, wmic +from virttest import error_context, utils_misc from virttest.utils_test.qemu import windrv_verify_running +from virttest.utils_windows import virtio_win, wmic -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") QUERY_TIMEOUT = 360 @@ -34,9 +32,12 @@ def _add_cert(session, cert_path, store): def _pnpdrv_info(session, name_pattern, props=None): - cmd = wmic.make_query("path win32_pnpsigneddriver", - "DeviceName like '%s'" % name_pattern, - props=props, get_swch=wmic.FMT_TYPE_LIST) + cmd = wmic.make_query( + "path win32_pnpsigneddriver", + f"DeviceName like '{name_pattern}'", + props=props, + get_swch=wmic.FMT_TYPE_LIST, + ) return wmic.parse_list(session.cmd(cmd, timeout=QUERY_TIMEOUT)) @@ -74,22 +75,22 @@ def run(test, params, env): # wait for cdroms having driver installed in case that # they are new appeared in this test - utils_misc.wait_for(lambda: utils_misc.get_winutils_vol(session), - timeout=OPERATION_TIMEOUT, step=10) - - devcon_path = utils_misc.set_winutils_letter(session, - params["devcon_path"]) - status, output = session.cmd_status_output("dir %s" % devcon_path, - timeout=OPERATION_TIMEOUT) + utils_misc.wait_for( + lambda: utils_misc.get_winutils_vol(session), timeout=OPERATION_TIMEOUT, step=10 + ) + + devcon_path = utils_misc.set_winutils_letter(session, params["devcon_path"]) + status, output = session.cmd_status_output( + f"dir {devcon_path}", timeout=OPERATION_TIMEOUT + ) if status: - test.error("Not found devcon.exe, details: %s" % output) + test.error(f"Not found devcon.exe, details: {output}") media_type = params["virtio_win_media_type"] try: - get_drive_letter = getattr(virtio_win, "drive_letter_%s" % media_type) - get_product_dirname = getattr(virtio_win, - "product_dirname_%s" % media_type) - get_arch_dirname = getattr(virtio_win, "arch_dirname_%s" % media_type) + get_drive_letter = getattr(virtio_win, f"drive_letter_{media_type}") + get_product_dirname = getattr(virtio_win, f"product_dirname_{media_type}") + get_arch_dirname = getattr(virtio_win, f"arch_dirname_{media_type}") except AttributeError: test.error("Not supported virtio win media type '%s'", media_type) viowin_ltr = get_drive_letter(session) @@ -102,45 +103,46 @@ def run(test, params, env): if not guest_arch: test.error("Could not get architecture dirname of the vm") - inf_middle_path = ("{name}\\{arch}" if media_type == "iso" - else "{arch}\\{name}").format(name=guest_name, - arch=guest_arch) + inf_middle_path = ( + "{name}\\{arch}" if media_type == "iso" else "{arch}\\{name}" + ).format(name=guest_name, arch=guest_arch) inf_find_cmd = 'dir /b /s %s\\%s.inf | findstr "\\%s\\\\"' inf_find_cmd %= (viowin_ltr, driver_name, inf_middle_path) inf_path = session.cmd(inf_find_cmd, timeout=OPERATION_TIMEOUT).strip() test.log.info("Found inf file '%s'", inf_path) # `findstr` cannot handle unicode so calling `type` makes it work - expected_ver = session.cmd("type %s | findstr /i /r DriverVer.*=" % - inf_path, timeout=OPERATION_TIMEOUT) + expected_ver = session.cmd( + f"type {inf_path} | findstr /i /r DriverVer.*=", timeout=OPERATION_TIMEOUT + ) expected_ver = expected_ver.strip().split(",", 1)[-1] if not expected_ver: test.error("Failed to find driver version from inf file") test.log.info("Target version is '%s'", expected_ver) if params.get("need_uninstall", "no") == "yes": - error_context.context("Uninstalling previous installed driver", - test.log.info) + error_context.context("Uninstalling previous installed driver", test.log.info) for inf_name in _pnpdrv_info(session, device_name, ["InfName"]): pnp_cmd = "pnputil /delete-driver %s /uninstall /force" - uninst_store_cmd = params.get("uninst_store_cmd", - pnp_cmd) % inf_name - status, output = session.cmd_status_output(uninst_store_cmd, - inst_timeout) + uninst_store_cmd = params.get("uninst_store_cmd", pnp_cmd) % inf_name + status, output = session.cmd_status_output(uninst_store_cmd, inst_timeout) if status not in (0, 3010): # for viostor and vioscsi, they need system reboot # acceptable status: OK(0), REBOOT(3010) - test.error("Failed to uninstall driver '%s' from store, " - "details:\n%s" % (driver_name, output)) + test.error( + f"Failed to uninstall driver '{driver_name}' from store, " + f"details:\n{output}" + ) - uninst_cmd = "%s remove %s" % (devcon_path, device_hwid) + uninst_cmd = f"{devcon_path} remove {device_hwid}" status, output = session.cmd_status_output(uninst_cmd, inst_timeout) # acceptable status: OK(0), REBOOT(1) if status > 1: - test.error("Failed to uninstall driver '%s', details:\n" - "%s" % (driver_name, output)) + test.error( + f"Failed to uninstall driver '{driver_name}', details:\n" f"{output}" + ) - if params.get_boolean('need_destroy'): + if params.get_boolean("need_destroy"): vm.destroy() vm.create() vm = env.get_vm(params["main_vm"]) @@ -149,8 +151,7 @@ def run(test, params, env): session = vm.reboot(session) error_context.context("Installing certificates", test.log.info) - cert_files = utils_misc.set_winutils_letter(session, - params.get("cert_files", "")) + cert_files = utils_misc.set_winutils_letter(session, params.get("cert_files", "")) cert_files = [cert.split("=", 1) for cert in cert_files.split()] for store, cert in cert_files: _chk_cert(session, cert) @@ -159,24 +160,24 @@ def run(test, params, env): error_context.context("Installing target driver", test.log.info) installed_any = False for hwid in device_hwid.split(): - output = session.cmd_output("%s find %s" % (devcon_path, hwid)) + output = session.cmd_output(f"{devcon_path} find {hwid}") if re.search("No matching devices found", output, re.I): continue # workaround for install driver without signture - inst_cmd = "%s update %s %s" % (devcon_path, inf_path, hwid) + inst_cmd = f"{devcon_path} update {inf_path} {hwid}" key_to_install_driver = params.get("key_to_install_driver").split(";") try: session.cmd_status_output(inst_cmd, timeout=30) except ShellTimeoutError: send_key(vm, key_to_install_driver) - if not utils_misc.wait_for(lambda: not session.cmd_status(chk_cmd), - 600, 60, 10): - test.fail("Failed to install driver '%s'" % driver_name) + if not utils_misc.wait_for( + lambda: not session.cmd_status(chk_cmd), 600, 60, 10 + ): + test.fail(f"Failed to install driver '{driver_name}'") installed_any |= True if not installed_any: - test.error("Failed to find target devices " - "by hwids: '%s'" % device_hwid) + test.error("Failed to find target devices " f"by hwids: '{device_hwid}'") error_context.context("Verifying target driver", test.log.info) session = vm.reboot(session) @@ -184,6 +185,8 @@ def run(test, params, env): ver_list = _pnpdrv_info(session, device_name, ["DriverVersion"]) if expected_ver not in ver_list: - test.fail("The expected driver version is '%s', but " - "found '%s'" % (expected_ver, ver_list)) + test.fail( + f"The expected driver version is '{expected_ver}', but " + f"found '{ver_list}'" + ) session.close() diff --git a/qemu/tests/slof_balloon.py b/qemu/tests/slof_balloon.py index ef808e713b..8bd670394b 100644 --- a/qemu/tests/slof_balloon.py +++ b/qemu/tests/slof_balloon.py @@ -3,10 +3,9 @@ 1. virtio balloon can work with pci-bridge. """ -from virttest import error_context +from virttest import error_context, utils_misc, utils_net + from provider import slof -from virttest import utils_net -from virttest import utils_misc @error_context.context_aware @@ -26,45 +25,44 @@ def run(test, params, env): :param params: Dictionary with the test . :param env: Dictionary with test environment. """ + def _get_qmp_port(): - """ Get the qmp monitor port. """ - qmp_ports = vm.get_monitors_by_type('qmp') + """Get the qmp monitor port.""" + qmp_ports = vm.get_monitors_by_type("qmp") if not qmp_ports: test.error("Incorrect configuration, no QMP monitor found.") return qmp_ports[0] def _check_balloon_info(): - """ Check virtio balloon device info. """ - error_context.context('Check virtio balloon device info.') - balloon_size = qmp.query('balloon')['actual'] - test.log.debug('The balloon size is %s', balloon_size) - mem = int(params["mem"]) * 1024 ** 2 + """Check virtio balloon device info.""" + error_context.context("Check virtio balloon device info.") + balloon_size = qmp.query("balloon")["actual"] + test.log.debug("The balloon size is %s", balloon_size) + mem = int(params["mem"]) * 1024**2 if int(balloon_size) != mem: - test.error( - 'The balloon size is not equal to %d' % mem) + test.error("The balloon size is not equal to %d" % mem) def _change_balloon_size(): - """ Change the ballloon size. """ - changed_ballon_size = int(params['balloon_size']) - balloon_timeout = int(params['balloon_timeout']) - error_context.context( - 'Change the balloon size to %s' % changed_ballon_size) + """Change the ballloon size.""" + changed_ballon_size = int(params["balloon_size"]) + balloon_timeout = int(params["balloon_timeout"]) + error_context.context(f"Change the balloon size to {changed_ballon_size}") qmp.balloon(changed_ballon_size) - error_context.context('Check balloon size after changed.') + error_context.context("Check balloon size after changed.") if not utils_misc.wait_for( - lambda: bool( - changed_ballon_size == int( - qmp.query('balloon')['actual'])), balloon_timeout): - test.fail('The balloon size is not changed to %s in %s sec.' - % (changed_ballon_size, balloon_timeout)) - test.log.debug( - 'The balloon size is %s after changed.', changed_ballon_size) + lambda: bool(changed_ballon_size == int(qmp.query("balloon")["actual"])), + balloon_timeout, + ): + test.fail( + f"The balloon size is not changed to {changed_ballon_size} in {balloon_timeout} sec." + ) + test.log.debug("The balloon size is %s after changed.", changed_ballon_size) def _ping_host(): - """ Ping host from guest. """ + """Ping host from guest.""" error_context.context("Try to ping external host.", test.log.info) extra_host_ip = utils_net.get_host_ip_address(params) - session.cmd('ping %s -c 5' % extra_host_ip) + session.cmd(f"ping {extra_host_ip} -c 5") test.log.info("Ping host(%s) successfully.", extra_host_ip) vm = env.get_vm(params["main_vm"]) @@ -74,7 +72,7 @@ def _ping_host(): error_context.context("Check the output of SLOF.", test.log.info) slof.check_error(test, content) - error_context.context("Try to log into guest '%s'." % vm.name, test.log.info) + error_context.context(f"Try to log into guest '{vm.name}'.", test.log.info) session = vm.wait_for_login(timeout=float(params.get("login_timeout", 240))) test.log.info("log into guest '%s' successfully.", vm.name) diff --git a/qemu/tests/slof_boot.py b/qemu/tests/slof_boot.py index b29a6b72fb..26c1189292 100644 --- a/qemu/tests/slof_boot.py +++ b/qemu/tests/slof_boot.py @@ -14,12 +14,13 @@ 8. Test supported block size of boot disk for virtio-blk-pci. 9. Test supported block size of boot disk for virtio-scsi. """ + import re from avocado.utils import process -from virttest import error_context +from virttest import error_context, utils_net + from provider import slof -from virttest import utils_net @error_context.context_aware @@ -52,57 +53,52 @@ def run(test, params, env): """ def _get_pci_bridge_addr(id): - dev_info_list = re.findall(r'\s+-device pci-bridge,\S+\s+', - vm.qemu_command) + dev_info_list = re.findall(r"\s+-device pci-bridge,\S+\s+", vm.qemu_command) for dev_info in dev_info_list: - if ('id=' + id) in dev_info: - return re.search(r'(addr)=(\w+)', dev_info).group(2) + if ("id=" + id) in dev_info: + return re.search(r"(addr)=(\w+)", dev_info).group(2) def _verify_boot_status(boot_dev, content): dev_params = params.object_params(boot_dev) - child_addr = dev_params.get('child_addr') - sub_child_addr = dev_params.get('sub_child_addr', None) - parent_bus = dev_params.get('parent_bus') - child_bus = dev_params.get('child_bus') - if child_bus == 'pci-bridge': - pci_bus_id = params.get('pci_bus_image1', None) + child_addr = dev_params.get("child_addr") + sub_child_addr = dev_params.get("sub_child_addr", None) + parent_bus = dev_params.get("parent_bus") + child_bus = dev_params.get("child_bus") + if child_bus == "pci-bridge": + pci_bus_id = params.get("pci_bus_image1", None) child_addr = _get_pci_bridge_addr(pci_bus_id) if sub_child_addr: - fail_info = ('Failed to boot from %s device (%s@%s).' - % (boot_dev, child_addr, sub_child_addr)) - ret_info = ('Booted from %s device (%s@%s) successfully.' - % (boot_dev, child_addr, sub_child_addr)) + fail_info = f"Failed to boot from {boot_dev} device ({child_addr}@{sub_child_addr})." + ret_info = f"Booted from {boot_dev} device ({child_addr}@{sub_child_addr}) successfully." else: - fail_info = ('Failed to boot from %s device(@%s).' % - (boot_dev, child_addr)) - ret_info = ('Booted from %s device(@%s) successfully.' % - (boot_dev, child_addr)) - if not slof.verify_boot_device(content, parent_bus, child_bus, - child_addr, sub_child_addr): + fail_info = f"Failed to boot from {boot_dev} device(@{child_addr})." + ret_info = f"Booted from {boot_dev} device(@{child_addr}) successfully." + if not slof.verify_boot_device( + content, parent_bus, child_bus, child_addr, sub_child_addr + ): test.fail(fail_info) test.log.info(ret_info) o = process.getoutput(params.get("check_slof_version")).strip() test.log.info("Check the version of SLOF: '%s'", o) - vm = env.get_vm(params['main_vm']) + vm = env.get_vm(params["main_vm"]) vm.verify_alive() content, _ = slof.wait_for_loaded(vm, test) error_context.context("Check the output of SLOF.", test.log.info) slof.check_error(test, content) - _verify_boot_status(params['boot_dev_type'], content) + _verify_boot_status(params["boot_dev_type"], content) - error_context.context("Try to log into guest '%s'." % vm.name, - test.log.info) + error_context.context(f"Try to log into guest '{vm.name}'.", test.log.info) timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) test.log.info("log into guest '%s' successfully.", vm.name) error_context.context("Try to ping external host.", test.log.info) extra_host_ip = utils_net.get_host_ip_address(params) - s, o = session.cmd_status_output('ping %s -c 5' % extra_host_ip) + s, o = session.cmd_status_output(f"ping {extra_host_ip} -c 5") test.log.debug(o) if s: test.fail("Failed to ping external host.") diff --git a/qemu/tests/slof_device_tree.py b/qemu/tests/slof_device_tree.py index d1259f5eac..509a4150b6 100644 --- a/qemu/tests/slof_device_tree.py +++ b/qemu/tests/slof_device_tree.py @@ -22,8 +22,7 @@ def get_info(vm_session, guest_info): :param vm_session: session to checked vm. :return: if file does not exist return None, or not, return it's value """ - output = vm_session.cmd_output( - "echo `cat /proc/device-tree/%s`" % guest_info) + output = vm_session.cmd_output(f"echo `cat /proc/device-tree/{guest_info}`") if match_str in output: test.log.info(output) return None @@ -32,7 +31,7 @@ def get_info(vm_session, guest_info): def compare_dev_tree(keyword, src): dst = get_info(session, keyword) if src != dst: - test.fail("%s does not match to %s" % (src, dst)) + test.fail(f"{src} does not match to {dst}") def check_nonexist_aliases(vm_session): """ @@ -41,10 +40,10 @@ def check_nonexist_aliases(vm_session): :param vm_session: session to checked vm. """ - status = vm_session.cmd_status( - "test -f /proc/device-tree/aliases/cdrom") + status = vm_session.cmd_status("test -f /proc/device-tree/aliases/cdrom") error_context.context( - "Checking whether aliases file is indeed nonexisting", test.log.info) + "Checking whether aliases file is indeed nonexisting", test.log.info + ) if status == 0: test.fail("Nonexist cdrom aliases check failed.") @@ -53,7 +52,7 @@ def check_nonexist_aliases(vm_session): timeout = int(params.get("login_timeout", 600)) session = vm.wait_for_login(timeout=timeout) - match_str = params['match_str'] + match_str = params["match_str"] try: uuid = vm.get_uuid() @@ -61,7 +60,8 @@ def check_nonexist_aliases(vm_session): compare_dev_tree("vm,uuid", uuid) if get_info(session, "host-serial"): host_system_id = process.getoutput( - "cat /proc/device-tree/system-id", verbose=True).strip("\x00") + "cat /proc/device-tree/system-id", verbose=True + ).strip("\x00") compare_dev_tree("host-serial", host_system_id) compare_dev_tree("ibm,partition-name", params["main_vm"]) diff --git a/qemu/tests/slof_greater_lun_id.py b/qemu/tests/slof_greater_lun_id.py index 318b7fffde..ecc0cb0871 100644 --- a/qemu/tests/slof_greater_lun_id.py +++ b/qemu/tests/slof_greater_lun_id.py @@ -3,11 +3,9 @@ 1.SLOF could support LUN ID greater than 255. """ +from virttest import env_process, error_context, utils_net -from virttest import error_context from provider import slof -from virttest import utils_net -from virttest import env_process @error_context.context_aware @@ -29,9 +27,9 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ - params['start_vm'] = 'yes' + params["start_vm"] = "yes" start_pos = 0 - for params['drive_lun_image1'] in params['lun_ids'].split(): + for params["drive_lun_image1"] in params["lun_ids"].split(): env_process.preprocess_vm(test, params, env, params["main_vm"]) vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -40,15 +38,14 @@ def run(test, params, env): error_context.context("Check the output of SLOF.", test.log.info) slof.check_error(test, content) - error_context.context("Try to log into guest '%s'." % vm.name, - test.log.info) + error_context.context(f"Try to log into guest '{vm.name}'.", test.log.info) timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) test.log.info("log into guest '%s' successfully.", vm.name) error_context.context("Try to ping external host.", test.log.info) extra_host_ip = utils_net.get_host_ip_address(params) - session.cmd('ping %s -c 5' % extra_host_ip) + session.cmd(f"ping {extra_host_ip} -c 5") test.log.info("Ping host(%s) successfully.", extra_host_ip) session.close() vm.destroy(gracefully=True) diff --git a/qemu/tests/slof_hugepage.py b/qemu/tests/slof_hugepage.py index dd4064b2e2..fd597547ce 100644 --- a/qemu/tests/slof_hugepage.py +++ b/qemu/tests/slof_hugepage.py @@ -3,18 +3,22 @@ 1. Boot guest with hugepage backing file then hotplug hugepage. 2. Boot guest without hugepage backing file then hotplug hugepage. """ + import logging -from virttest import error_context -from provider import slof -from virttest import utils_net -from virttest import env_process -from virttest import test_setup +from virttest import ( + env_process, + error_context, + test_setup, + utils_misc, + utils_net, + utils_numeric, +) from virttest.utils_test.qemu import MemoryHotplugTest -from virttest import utils_numeric -from virttest import utils_misc -LOG_JOB = logging.getLogger('avocado.test') +from provider import slof + +LOG_JOB = logging.getLogger("avocado.test") def _setup_hugepage(params): @@ -26,35 +30,32 @@ def _setup_hugepage(params): 3. Set hugepage by executing "echo $num > /proc/sys/vm/nr_hugepages". 4. Mount this hugepage to /mnt/kvm_hugepage. """ - size = params['total_hugepage_size'] + size = params["total_hugepage_size"] huge_page = test_setup.HugePageConfig(params) - error_context.context('Assign %sMB hugepages in host.' % size, LOG_JOB.info) + error_context.context(f"Assign {size}MB hugepages in host.", LOG_JOB.info) hugepage_size = huge_page.get_hugepage_size() - LOG_JOB.debug('Hugepage size is %skB in host.', hugepage_size) + LOG_JOB.debug("Hugepage size is %skB in host.", hugepage_size) huge_page.target_hugepages = int((int(size) * 1024) // hugepage_size) - LOG_JOB.debug('Set hugepages to %d pages in host.', - huge_page.target_hugepages) - huge_page.set_node_num_huge_pages(huge_page.target_hugepages, - 0, hugepage_size) + LOG_JOB.debug("Set hugepages to %d pages in host.", huge_page.target_hugepages) + huge_page.set_node_num_huge_pages(huge_page.target_hugepages, 0, hugepage_size) - error_context.context('mount hugepages to %s' - % huge_page.hugepage_path, LOG_JOB.info) + error_context.context(f"mount hugepages to {huge_page.hugepage_path}", LOG_JOB.info) huge_page.mount_hugepage_fs() params["hugepage_path"] = huge_page.hugepage_path def _check_mem_increase(session, params, orig_mem): """Check the size of memory increased.""" - increase_mem = int( - utils_numeric.normalize_data_size(params['size_mem_plug'], 'B')) - new_mem = int(session.cmd_output(cmd=params['free_mem_cmd'])) + increase_mem = int(utils_numeric.normalize_data_size(params["size_mem_plug"], "B")) + new_mem = int(session.cmd_output(cmd=params["free_mem_cmd"])) if (new_mem - orig_mem) == increase_mem: error_context.context( - 'Get guest free memory size after hotplug pc-dimm.', LOG_JOB.info) - LOG_JOB.debug('Guest free memory size is %d bytes', new_mem) - LOG_JOB.info("Guest memory size is increased %s.", params['size_mem_plug']) + "Get guest free memory size after hotplug pc-dimm.", LOG_JOB.info + ) + LOG_JOB.debug("Guest free memory size is %d bytes", new_mem) + LOG_JOB.info("Guest memory size is increased %s.", params["size_mem_plug"]) return True return False @@ -82,14 +83,14 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def _wait_for_login(cur_pos=0): """Wait for login guest.""" content, next_pos = slof.wait_for_loaded(vm, test, cur_pos) error_context.context("Check the output of SLOF.", test.log.info) slof.check_error(test, content) - error_context.context("Try to log into guest '%s'." % vm.name, - test.log.info) + error_context.context(f"Try to log into guest '{vm.name}'.", test.log.info) timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) test.log.info("log into guest '%s' successfully.", vm.name) @@ -97,37 +98,41 @@ def _wait_for_login(cur_pos=0): _setup_hugepage(params) - params['start_vm'] = 'yes' + params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, params["main_vm"]) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session, next_pos = _wait_for_login() - error_context.context('Get guest free memory size before hotplug pc-dimm.', - test.log.info) - orig_mem = int(session.cmd_output(cmd=params['free_mem_cmd'])) - test.log.debug('Guest free memory size is %d bytes', orig_mem) + error_context.context( + "Get guest free memory size before hotplug pc-dimm.", test.log.info + ) + orig_mem = int(session.cmd_output(cmd=params["free_mem_cmd"])) + test.log.debug("Guest free memory size is %d bytes", orig_mem) - error_context.context('Hotplug pc-dimm for guest.', test.log.info) + error_context.context("Hotplug pc-dimm for guest.", test.log.info) htp_mem = MemoryHotplugTest(test, params, env) - htp_mem.hotplug_memory(vm, params['plug_mem_name']) + htp_mem.hotplug_memory(vm, params["plug_mem_name"]) - plug_timeout = float(params.get('plug_timeout', 5)) + plug_timeout = float(params.get("plug_timeout", 5)) if not utils_misc.wait_for( - lambda: _check_mem_increase(session, params, orig_mem), - plug_timeout): - test.fail("Guest memory size is not increased %s in %s sec." - % (params['size_mem_plug'], params.get('plug_timeout', 5))) - - error_context.context('Reboot guest', test.log.info) + lambda: _check_mem_increase(session, params, orig_mem), plug_timeout + ): + test.fail( + "Guest memory size is not increased {} in {} sec.".format( + params["size_mem_plug"], params.get("plug_timeout", 5) + ) + ) + + error_context.context("Reboot guest", test.log.info) session.close() vm.reboot() session, _ = _wait_for_login(next_pos) error_context.context("Try to ping external host.", test.log.info) extra_host_ip = utils_net.get_host_ip_address(params) - session.cmd('ping %s -c 5' % extra_host_ip) + session.cmd(f"ping {extra_host_ip} -c 5") test.log.info("Ping host(%s) successfully.", extra_host_ip) session.close() diff --git a/qemu/tests/slof_memory.py b/qemu/tests/slof_memory.py index 1575d82544..f6a904257c 100644 --- a/qemu/tests/slof_memory.py +++ b/qemu/tests/slof_memory.py @@ -3,10 +3,9 @@ 1. CAS(client-architecture-support) response with large maxmem. """ -from virttest import error_context +from virttest import env_process, error_context, utils_net + from provider import slof -from virttest import env_process -from virttest import utils_net @error_context.context_aware @@ -29,8 +28,8 @@ def run(test, params, env): :param env: Dictionary with test environment. """ start_pos = 0 - for mem in params['maxmem_mem_list'].split(): - params['maxmem_mem'] = mem + for mem in params["maxmem_mem_list"].split(): + params["maxmem_mem"] = mem env_process.preprocess_vm(test, params, env, params["main_vm"]) vm = env.get_vm(params["main_vm"]) @@ -40,15 +39,14 @@ def run(test, params, env): error_context.context("Check the output of SLOF.", test.log.info) slof.check_error(test, content) - error_context.context("Try to log into guest '%s'." % vm.name, - test.log.info) + error_context.context(f"Try to log into guest '{vm.name}'.", test.log.info) timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) test.log.info("log into guest '%s' successfully.", vm.name) error_context.context("Try to ping external host.", test.log.info) extra_host_ip = utils_net.get_host_ip_address(params) - session.cmd('ping %s -c 5' % extra_host_ip) + session.cmd(f"ping {extra_host_ip} -c 5") test.log.info("Ping host(%s) successfully.", extra_host_ip) session.close() diff --git a/qemu/tests/slof_multi_devices.py b/qemu/tests/slof_multi_devices.py index 247e67fe7e..282e9d5f1c 100644 --- a/qemu/tests/slof_multi_devices.py +++ b/qemu/tests/slof_multi_devices.py @@ -4,10 +4,9 @@ 2. VM boot successfully with lots of virtio-net-pci devices. """ -from virttest import error_context +from virttest import env_process, error_context, utils_net + from provider import slof -from virttest import utils_net -from virttest import env_process @error_context.context_aware @@ -30,23 +29,23 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ - if params['device_type'] == 'pci-bridge': - for id in range(1, int(params['pci_bridge_num'])): - params['pci_controllers'] += ' pci_bridge%d' % id - params['type_pci_bridge%d' % id] = 'pci-bridge' - elif params['device_type'] == 'virtio-net-pci': - pci_num = int(params['pci_bridge_num']) + if params["device_type"] == "pci-bridge": + for id in range(1, int(params["pci_bridge_num"])): + params["pci_controllers"] += " pci_bridge%d" % id + params["type_pci_bridge%d" % id] = "pci-bridge" + elif params["device_type"] == "virtio-net-pci": + pci_num = int(params["pci_bridge_num"]) nic_id = 0 for pci_id in range(pci_num): - params['pci_controllers'] += ' pci_bridge%d' % pci_id - params['type_pci_bridge%d' % pci_id] = 'pci-bridge' - nic_num_per_pci = int(params['nic_num_per_pci_bridge']) + params["pci_controllers"] += " pci_bridge%d" % pci_id + params["type_pci_bridge%d" % pci_id] = "pci-bridge" + nic_num_per_pci = int(params["nic_num_per_pci_bridge"]) for i in range(nic_num_per_pci): - params["nics"] = ' '.join([params["nics"], 'nic%d' % nic_id]) - params["nic_pci_bus_nic%d" % nic_id] = 'pci_bridge%d' % pci_id + params["nics"] = " ".join([params["nics"], "nic%d" % nic_id]) + params["nic_pci_bus_nic%d" % nic_id] = "pci_bridge%d" % pci_id nic_id += 1 - params['start_vm'] = 'yes' + params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, params["main_vm"]) vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -55,28 +54,32 @@ def run(test, params, env): error_context.context("Check the output of SLOF.", test.log.info) slof.check_error(test, content) - error_context.context("Try to log into guest '%s'." % vm.name, - test.log.info) + error_context.context(f"Try to log into guest '{vm.name}'.", test.log.info) timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) test.log.info("log into guest '%s' successfully.", vm.name) error_context.context("Try to ping external host.", test.log.info) extra_host_ip = utils_net.get_host_ip_address(params) - session.cmd('ping %s -c 5' % extra_host_ip) + session.cmd(f"ping {extra_host_ip} -c 5") test.log.info("Ping host(%s) successfully.", extra_host_ip) - if params['device_type'] == 'virtio-net-pci': - nic_num = int(str(session.cmd_output(params['nic_check_cmd']))) - error_context.context('Found %d ehternet controllers inside guest.' - % nic_num, test.log.info) + if params["device_type"] == "virtio-net-pci": + nic_num = int(str(session.cmd_output(params["nic_check_cmd"]))) + error_context.context( + "Found %d ehternet controllers inside guest." % nic_num, test.log.info + ) if (pci_num * nic_num_per_pci) != nic_num: test.fail( "The number of ethernet controllers is not equal to %s " - "inside guest." % (pci_num * nic_num_per_pci)) + "inside guest." % (pci_num * nic_num_per_pci) + ) test.log.info( - 'The number of ehternet controllers inside guest is equal to ' - 'qemu command line(%d * %d).', pci_num, nic_num_per_pci) + "The number of ehternet controllers inside guest is equal to " + "qemu command line(%d * %d).", + pci_num, + nic_num_per_pci, + ) session.close() vm.destroy(gracefully=True) diff --git a/qemu/tests/slof_next_entry.py b/qemu/tests/slof_next_entry.py index b76be92ad1..bb6ce0582f 100644 --- a/qemu/tests/slof_next_entry.py +++ b/qemu/tests/slof_next_entry.py @@ -21,18 +21,20 @@ def run(test, params, env): """ def get_kernels_info(): - """ Get detailed information about each kernel version in the guest. """ + """Get detailed information about each kernel version in the guest.""" kernels_info = {} for kernel in kernel_list: - grubby_info = session.cmd_output("grubby --info=%s" % kernel, - print_func=test.log.info) - entry_dict = dict((item.replace('"', '').split("=", 1) - for item in grubby_info.splitlines())) + grubby_info = session.cmd_output( + f"grubby --info={kernel}", print_func=test.log.info + ) + entry_dict = dict( + item.replace('"', "").split("=", 1) for item in grubby_info.splitlines() + ) kernels_info[int(entry_dict.pop("index"))] = entry_dict return kernels_info def check_kernel_version(k_index): - """ Check whether the kernel version matches the kernel index. """ + """Check whether the kernel version matches the kernel index.""" current_kernel = session.cmd_output("uname -r").strip() if guest_kernels[k_index]["kernel"].split("-", 1)[1] != current_kernel: test.log.debug("The current kernel version is: %s", current_kernel) @@ -51,30 +53,36 @@ def check_kernel_version(k_index): test.log.info("Ensure the guest has at least two kernel versions") kernel_list = session.cmd_output(get_kernel_list_cmd).splitlines() if len(kernel_list) < 2: - test.cancel("This test requires at least two kernel versions in the " - "guest") + test.cancel("This test requires at least two kernel versions in the " "guest") if session.cmd_output("grubby --default-index").strip() != "0": test.log.info("Ensure that the default kernel index of the guest is 0.") session.cmd("grubby --set-default-index=0") session = vm.reboot() guest_kernels = get_kernels_info() - error_context.context("Set a next boot entry other than the default one and" - " check it", test.log.info) + error_context.context( + "Set a next boot entry other than the default one and" " check it", + test.log.info, + ) next_entry = guest_kernels[1]["title"] - session.cmd("grub2-reboot '%s'" % next_entry) - grub_env = dict((item.split("=", 1) for item in - session.cmd_output("grub2-editenv list").splitlines())) + session.cmd(f"grub2-reboot '{next_entry}'") + grub_env = dict( + item.split("=", 1) + for item in session.cmd_output("grub2-editenv list").splitlines() + ) grub_next_entry = grub_env["next_entry"] if grub_next_entry != next_entry: test.log.debug("The 'next_entry' is: %s", grub_next_entry) test.fail("The next boot entry is not expected as we set") - error_context.base_context("Reboot guest, check the kernel version and " - "'next_entry'", test.log.info) + error_context.base_context( + "Reboot guest, check the kernel version and " "'next_entry'", test.log.info + ) session = vm.reboot(session) - grub_env = dict((item.split("=", 1) for item in - session.cmd_output("grub2-editenv list").splitlines())) + grub_env = dict( + item.split("=", 1) + for item in session.cmd_output("grub2-editenv list").splitlines() + ) check_kernel_version(1) grub_next_entry = grub_env["next_entry"] if grub_next_entry: diff --git a/qemu/tests/slof_open_bios.py b/qemu/tests/slof_open_bios.py index 5c989efc74..5011970e78 100644 --- a/qemu/tests/slof_open_bios.py +++ b/qemu/tests/slof_open_bios.py @@ -3,9 +3,9 @@ 1. Disable the auto-boot feature with qemu cli "-prom-env 'auto-boot?=false'". """ -from virttest import error_context +from virttest import error_context, utils_net + from provider import slof -from virttest import utils_net @error_context.context_aware @@ -26,33 +26,33 @@ def run(test, params, env): :param params: Dictionary with the test. :param env: Dictionary with test environment. """ + def _send_custom_key(): - """ Send custom keyword to SLOF's user interface. """ - test.log.info('Sending \"%s\" to SLOF user interface.', send_key) + """Send custom keyword to SLOF's user interface.""" + test.log.info('Sending "%s" to SLOF user interface.', send_key) for key in send_key: - key = 'minus' if key == '-' else key + key = "minus" if key == "-" else key vm.send_key(key) - vm.send_key('ret') + vm.send_key("ret") vm = env.get_vm(params["main_vm"]) - send_key = params.get('send_key') - end_str = params.get('slof_end_str', '0 >') + send_key = params.get("send_key") + end_str = params.get("slof_end_str", "0 >") vm.verify_alive() content, next_pos = slof.wait_for_loaded(vm, test, end_str=end_str) - test.log.info('SLOF stop at \'%s\'.', end_str) + test.log.info("SLOF stop at '%s'.", end_str) - error_context.context( - "Enter to menu by sending \'%s\'." % send_key, test.log.info) + error_context.context(f"Enter to menu by sending '{send_key}'.", test.log.info) _send_custom_key() - content, _ = slof.wait_for_loaded(vm, test, next_pos, 'Trying to load') + content, _ = slof.wait_for_loaded(vm, test, next_pos, "Trying to load") - error_context.context("Try to log into guest '%s'." % vm.name, test.log.info) + error_context.context(f"Try to log into guest '{vm.name}'.", test.log.info) session = vm.wait_for_login(timeout=float(params["login_timeout"])) test.log.info("log into guest '%s' successfully.", vm.name) error_context.context("Try to ping external host.", test.log.info) extra_host_ip = utils_net.get_host_ip_address(params) - session.cmd('ping %s -c 5' % extra_host_ip) + session.cmd(f"ping {extra_host_ip} -c 5") test.log.info("Ping host(%s) successfully.", extra_host_ip) session.close() diff --git a/qemu/tests/slof_order.py b/qemu/tests/slof_order.py index a2ca0ea813..6a14c70d57 100644 --- a/qemu/tests/slof_order.py +++ b/qemu/tests/slof_order.py @@ -7,9 +7,9 @@ are both not bootable. """ -from virttest import error_context +from virttest import env_process, error_context + from provider import slof -from virttest import env_process @error_context.context_aware @@ -43,42 +43,48 @@ def run(test, params, env): :param params: Dictionary with the test . :param env: Dictionary with test environment. """ + def _send_custom_key(): - """ Send custom keyword to SLOF's user interface. """ - test.log.info('Sending \"%s\" to SLOF user interface.', send_key) + """Send custom keyword to SLOF's user interface.""" + test.log.info('Sending "%s" to SLOF user interface.', send_key) for key in send_key: - key = 'minus' if key == '-' else key + key = "minus" if key == "-" else key vm.send_key(key) - vm.send_key('ret') + vm.send_key("ret") def _verify_boot_order(order): - """ Verify the order of booted devices. """ + """Verify the order of booted devices.""" for index, dev in enumerate(order.split()): args = device_map[dev] - details = 'The device({}@{}) is not the {} bootable device.'.format( - args[1], args[2], index) + details = ( + f"The device({args[1]}@{args[2]}) is not the {index} bootable device." + ) if not slof.verify_boot_device( - content, args[0], args[1], args[2], position=index): - test.fail('Fail: ' + details) - test.log.info('Pass: %s', details) + content, args[0], args[1], args[2], position=index + ): + test.fail("Fail: " + details) + test.log.info("Pass: %s", details) - parent_bus = params.get('parent_bus') - child_bus = params.get('child_bus') - parent_bus_nic = params.get('parent_bus_nic') - child_bus_nic = params.get('child_bus_nic') - send_key = params.get('send_key') - device_map = {'c': (parent_bus, child_bus, params.get('disk_addr')), - 'd': (parent_bus, child_bus, params.get('cdrom_addr')), - 'n': (parent_bus_nic, child_bus_nic, params.get('nic_addr'))} + parent_bus = params.get("parent_bus") + child_bus = params.get("child_bus") + parent_bus_nic = params.get("parent_bus_nic") + child_bus_nic = params.get("child_bus_nic") + send_key = params.get("send_key") + device_map = { + "c": (parent_bus, child_bus, params.get("disk_addr")), + "d": (parent_bus, child_bus, params.get("cdrom_addr")), + "n": (parent_bus_nic, child_bus_nic, params.get("nic_addr")), + } env_process.process( - test, params, env, env_process.preprocess_image, env_process.preprocess_vm) + test, params, env, env_process.preprocess_image, env_process.preprocess_vm + ) vm = env.get_vm(params["main_vm"]) vm.verify_alive() - content, next_pos = slof.wait_for_loaded(vm, test, end_str='0 >') - _verify_boot_order(params['order_before_send_key']) - if send_key in ('reset-all', 'boot'): + content, next_pos = slof.wait_for_loaded(vm, test, end_str="0 >") + _verify_boot_order(params["order_before_send_key"]) + if send_key in ("reset-all", "boot"): error_context.context("Reboot guest by sending key.", test.log.info) _send_custom_key() - content, _ = slof.wait_for_loaded(vm, test, next_pos, end_str='0 >') - _verify_boot_order(params['order_after_send_key']) + content, _ = slof.wait_for_loaded(vm, test, next_pos, end_str="0 >") + _verify_boot_order(params["order_after_send_key"]) diff --git a/qemu/tests/slof_user_interface.py b/qemu/tests/slof_user_interface.py index 8b728226ff..527cc7b06c 100644 --- a/qemu/tests/slof_user_interface.py +++ b/qemu/tests/slof_user_interface.py @@ -4,15 +4,14 @@ 2. SLOF user interface testing - boot. 3. SLOF user interface testing - reset-all. """ + import os import re import time -from virttest import error_context +from virttest import env_process, error_context, utils_misc, utils_net + from provider import slof -from virttest import env_process -from virttest import utils_misc -from virttest import utils_net @error_context.context_aware @@ -44,110 +43,120 @@ def run(test, params, env): :param env: Dictionary with test environment. """ STOP, F12 = range(2) - enter_key = {STOP: 's', F12: 'f12'} + enter_key = {STOP: "s", F12: "f12"} def _send_custom_key(keystr): - """ Send custom keyword to SLOF's user interface. """ - test.log.info('Sending \"%s\" to SLOF user interface.', keystr) + """Send custom keyword to SLOF's user interface.""" + test.log.info('Sending "%s" to SLOF user interface.', keystr) for key in keystr: - key = 'minus' if key == '-' else key + key = "minus" if key == "-" else key vm.send_key(key) - vm.send_key('ret') + vm.send_key("ret") def _send_key(key, custom=True, sleep=0.0): - """ Send keywords to SLOF's user interface. """ - obj_name = 'select' if re.search(r'^\d+$', key) else key - k_params = params.object_params(obj_name.replace('-', '_')) + """Send keywords to SLOF's user interface.""" + obj_name = "select" if re.search(r"^\d+$", key) else key + k_params = params.object_params(obj_name.replace("-", "_")) if custom: _send_custom_key(key) else: vm.send_key(key) time.sleep(sleep) - content, _ = slof.get_boot_content(vm, 0, k_params['start'], k_params['end']) + content, _ = slof.get_boot_content(vm, 0, k_params["start"], k_params["end"]) if content: - test.log.info('Output of SLOF:\n%s', ''.join(content)) - return ''.join(content) + test.log.info("Output of SLOF:\n%s", "".join(content)) + return "".join(content) return None def _check_menu_info(menu_info): - """ Check the menu info by each items. """ - bootable_num = '' - for i in range(1, int(params['boot_dev_num']) + 1): - option = params['menu_option%d' % i] - test.log.info('Checking the device(%s) if is included in menu list.', - '->'.join(option.split())) + """Check the menu info by each items.""" + bootable_num = "" + for i in range(1, int(params["boot_dev_num"]) + 1): + option = params["menu_option%d" % i] + test.log.info( + "Checking the device(%s) if is included in menu list.", + "->".join(option.split()), + ) dev_type, hba_type, child_bus, addr = option.split() - addr = re.sub(r'^0x0?', '', addr) - pattern = re.compile(r'(\d+)\)\s+%s(\d+)?\s+:\s+/%s(\S+)?/%s@%s' % - (dev_type, hba_type, child_bus, addr), re.M) + addr = re.sub(r"^0x0?", "", addr) + pattern = re.compile( + rf"(\d+)\)\s+{dev_type}(\d+)?\s+:\s+/{hba_type}(\S+)?/{child_bus}@{addr}", + re.M, + ) searched = pattern.search(menu_info) if not searched: - test.fail('No such item(%s) in boot menu list.' % - '->'.join(option.split())) - if i == int(params['bootable_index']): + test.fail( + "No such item({}) in boot menu list.".format( + "->".join(option.split()) + ) + ) + if i == int(params["bootable_index"]): bootable_num = searched.group(1) return bootable_num def _enter_user_interface(mode): - """ Enter user interface. """ + """Enter user interface.""" o = utils_misc.wait_for( - lambda: _send_key(enter_key[mode], False), ack_timeout, step=0.0) + lambda: _send_key(enter_key[mode], False), ack_timeout, step=0.0 + ) if not o: - test.fail('Failed to enter user interface in %s sec.' % ack_timeout) + test.fail(f"Failed to enter user interface in {ack_timeout} sec.") return o def _f12_user_interface_test(): - """ Test f12 user interface. """ + """Test f12 user interface.""" menu_list = _enter_user_interface(F12) - actual_num = len(re.findall(r'\d+\)', menu_list)) - dev_num = params['boot_dev_num'] + actual_num = len(re.findall(r"\d+\)", menu_list)) + dev_num = params["boot_dev_num"] if actual_num != int(dev_num): - test.fail( - 'The number of boot devices is not %s in menu list.' % dev_num) + test.fail(f"The number of boot devices is not {dev_num} in menu list.") if not utils_misc.wait_for( - lambda: _send_key( - _check_menu_info(menu_list), False), ack_timeout, step=0.0): - test.fail('Failed to load after selecting boot device ' - 'in %s sec.' % ack_timeout) + lambda: _send_key(_check_menu_info(menu_list), False), ack_timeout, step=0.0 + ): + test.fail( + "Failed to load after selecting boot device " f"in {ack_timeout} sec." + ) def _load_user_interface_test(): - """ Test boot/reset-all user interface. """ + """Test boot/reset-all user interface.""" _enter_user_interface(STOP) if not utils_misc.wait_for( - lambda: _send_key(keys, True, 3), ack_timeout, step=0.0): - test.fail( - 'Failed to load after \'%s\' in %s sec.' % (keys, ack_timeout)) + lambda: _send_key(keys, True, 3), ack_timeout, step=0.0 + ): + test.fail(f"Failed to load after '{keys}' in {ack_timeout} sec.") def _check_serial_log_status(): - """ Check the status of serial log. """ + """Check the status of serial log.""" file_timeout = 30 if not utils_misc.wait_for( - lambda: os.path.isfile(vm.serial_console_log), - file_timeout): - test.error('No found serial log during %s sec.' % file_timeout) - - main_tests = {'f12': _f12_user_interface_test, - 'boot': _load_user_interface_test, - 'reset-all': _load_user_interface_test} - - ack_timeout = params['ack_timeout'] - keys = params['send_keys'] + lambda: os.path.isfile(vm.serial_console_log), file_timeout + ): + test.error(f"No found serial log during {file_timeout} sec.") + + main_tests = { + "f12": _f12_user_interface_test, + "boot": _load_user_interface_test, + "reset-all": _load_user_interface_test, + } + + ack_timeout = params["ack_timeout"] + keys = params["send_keys"] env_process.process( - test, params, env, env_process.preprocess_image, env_process.preprocess_vm) + test, params, env, env_process.preprocess_image, env_process.preprocess_vm + ) vm = env.get_vm(params["main_vm"]) vm.verify_alive() _check_serial_log_status() main_tests[keys]() - error_context.context("Try to log into guest '%s'." % vm.name, - test.log.info) - session = vm.wait_for_login(timeout=float(params['login_timeout'])) + error_context.context(f"Try to log into guest '{vm.name}'.", test.log.info) + session = vm.wait_for_login(timeout=float(params["login_timeout"])) test.log.info("log into guest '%s' successfully.", vm.name) error_context.context("Try to ping external host.", test.log.info) extra_host_ip = utils_net.get_host_ip_address(params) - session.cmd('ping %s -c 5' % extra_host_ip) + session.cmd(f"ping {extra_host_ip} -c 5") test.log.info("Ping host(%s) successfully.", extra_host_ip) vm.destroy(gracefully=True) diff --git a/qemu/tests/smartcard_setup.py b/qemu/tests/smartcard_setup.py index a05450dffa..b267d96aee 100644 --- a/qemu/tests/smartcard_setup.py +++ b/qemu/tests/smartcard_setup.py @@ -9,8 +9,7 @@ """ -from virttest import utils_misc -from virttest import utils_spice +from virttest import utils_misc, utils_spice def run(test, params, env): @@ -40,17 +39,20 @@ def run(test, params, env): client_session = client_vm.wait_for_login( timeout=int(params.get("login_timeout", 360)), - username="root", password="123456") + username="root", + password="123456", + ) for vm in params.get("vms").split(): - utils_spice.clear_interface(env.get_vm(vm), - int(params.get("login_timeout", "360"))) + utils_spice.clear_interface( + env.get_vm(vm), int(params.get("login_timeout", "360")) + ) # generate a random string, used to create a random key for the certs randomstring = utils_misc.generate_random_string(2048) cmd = "echo '" + randomstring + "' > /tmp/randomtext.txt" output = client_session.cmd(cmd) - #output2 = client_session.cmd("cat /tmp/randomtext.txt") + # output2 = client_session.cmd("cat /tmp/randomtext.txt") utils_spice.wait_timeout(5) # for each cert listed by the test, create it on the client @@ -71,7 +73,7 @@ def run(test, params, env): # Verify that all the certs have been generated on the client for cert in cert_list: - if not (cert in output): - test.fail("Certificate %s not found" % cert) + if cert not in output: + test.fail(f"Certificate {cert} not found") client_session.close() diff --git a/qemu/tests/smbios_default_check.py b/qemu/tests/smbios_default_check.py index 775d238a48..8e3db64c7e 100644 --- a/qemu/tests/smbios_default_check.py +++ b/qemu/tests/smbios_default_check.py @@ -19,13 +19,17 @@ def check_info(cmd, template): msg_log = "Check " + template + " info" error_context.context(msg_log, test.log.info) cmd_output = session.cmd_output(cmd) - cmd_output_re = re.split('\n', cmd_output.strip('\n'))[-1].strip(' ') + cmd_output_re = re.split("\n", cmd_output.strip("\n"))[-1].strip(" ") template = params[template] if not re.match(template, cmd_output_re): return cmd_output_re - re_template = ["System_Manufacturer", "System_SKU_Number", - "Baseboard_Manufacturer", "Baseboard_Product_Name"] + re_template = [ + "System_Manufacturer", + "System_SKU_Number", + "Baseboard_Manufacturer", + "Baseboard_Product_Name", + ] vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -41,10 +45,13 @@ def check_info(cmd, template): for cmd, template in zip(check_info_cmd, re_template): output = check_info(cmd, template) if output: - e_msg = ("%s mismatch, out: %s" % (template, output)) + e_msg = f"{template} mismatch, out: {output}" failures.append(e_msg) session.close() if failures: - test.fail("Smbios default check test reported %s failures:\n%s" - % (len(failures), "\n".join(failures))) + test.fail( + "Smbios default check test reported {} failures:\n{}".format( + len(failures), "\n".join(failures) + ) + ) diff --git a/qemu/tests/smbios_table.py b/qemu/tests/smbios_table.py index 519e5a8202..986308a14f 100644 --- a/qemu/tests/smbios_table.py +++ b/qemu/tests/smbios_table.py @@ -1,10 +1,7 @@ import re from avocado.utils import process - -from virttest import error_context -from virttest import env_process -from virttest import utils_misc +from virttest import env_process, error_context, utils_misc @error_context.context_aware @@ -34,16 +31,16 @@ def run(test, params, env): smbios_type_number = 0 elif sm_type == "System": smbios_type_number = 1 - smbios += " -smbios type=%s" % smbios_type_number # pylint: disable=E0606 + smbios += f" -smbios type={smbios_type_number}" # pylint: disable=E0606 dmidecode_key = params.object_params(sm_type).get("dmikeyword") dmidecode_key = dmidecode_key.split() for key in dmidecode_key: - cmd = (dmidecode_exp % (smbios_type_number, key)) - default_key_para = process.run( - cmd, shell=True).stdout_text.strip() - smbios_key_para_set = params.object_params(sm_type).get(key, - default_key_para) - smbios += ",%s='%s'" % (key.lower(), smbios_key_para_set) + cmd = dmidecode_exp % (smbios_type_number, key) + default_key_para = process.run(cmd, shell=True).stdout_text.strip() + smbios_key_para_set = params.object_params(sm_type).get( + key, default_key_para + ) + smbios += f",{key.lower()}='{smbios_key_para_set}'" if params.get("extra_params"): params["extra_params"] += smbios @@ -58,31 +55,34 @@ def run(test, params, env): tmp = utils_misc.get_support_machine_type(qemu_binary, remove_alias=True)[:2] (support_machine_types, expect_system_versions) = tmp machine_type = params.get("machine_type", "") - if ':' in machine_type: - prefix = machine_type.split(':', 1)[0] - support_machine_types = ["%s:%s" % (prefix, m_type) - for m_type in support_machine_types] + if ":" in machine_type: + prefix = machine_type.split(":", 1)[0] + support_machine_types = [ + f"{prefix}:{m_type}" for m_type in support_machine_types + ] failures = [] - rhel_system_version = params.get('smbios_system_version') == 'rhel' + rhel_system_version = params.get("smbios_system_version") == "rhel" if not rhel_system_version: - re_pc_lt_2 = re.compile(r'^pc-(i440fx-)?[01].\d+$') + re_pc_lt_2 = re.compile(r"^pc-(i440fx-)?[01].\d+$") host_dmidecode_system_version = process.run( - "dmidecode -s system-version").stdout_text + "dmidecode -s system-version" + ).stdout_text for m_type in support_machine_types: if m_type in ("isapc", "xenfv", "xenpv"): continue params["machine_type"] = m_type params["start_vm"] = "yes" - error_context.context("Boot the vm using -M option:'-M %s', smbios " - "para: '%s'" % (m_type, smbios), test.log.info) + error_context.context( + f"Boot the vm using -M option:'-M {m_type}', smbios " f"para: '{smbios}'", + test.log.info, + ) env_process.preprocess_vm(test, params, env, params.get("main_vm")) vm1 = env.get_vm(params["main_vm"]) session = vm1.wait_for_login(timeout=login_timeout) - error_context.context("Check smbios info on guest " - "is setted as expected") + error_context.context("Check smbios info on guest " "is setted as expected") for sm_type in smbios_type.split(): if sm_type == "Bios": @@ -92,17 +92,16 @@ def run(test, params, env): dmidecode_key = params.object_params(sm_type).get("dmikeyword") dmidecode_key = dmidecode_key.split() for key in dmidecode_key: - cmd = (dmidecode_exp % (smbios_type_number, key)) + cmd = dmidecode_exp % (smbios_type_number, key) smbios_get_para = session.cmd(cmd).strip() - default_key_para = process.run( - cmd, shell=True).stdout_text.strip() + default_key_para = process.run(cmd, shell=True).stdout_text.strip() if params.get("smbios_type_disable", "no") == "no": - smbios_set_para = params.object_params(sm_type).get(key, - default_key_para) + smbios_set_para = params.object_params(sm_type).get( + key, default_key_para + ) else: # The System.Version is different on RHEL and upstream - if (rhel_system_version or sm_type != 'System' or - key != 'Version'): + if rhel_system_version or sm_type != "System" or key != "Version": key_index = support_machine_types.index(m_type) smbios_set_para = expect_system_versions[key_index] # pylint: disable=E0606 elif re_pc_lt_2.match(m_type): # pylint: disable=E0606 @@ -120,10 +119,8 @@ def run(test, params, env): smbios_set_para = smbios_set_para.lower() smbios_get_para = smbios_get_para.lower() - if (smbios_set_para not in smbios_get_para): - e_msg = ("%s.%s mismatch, Set '%s' but guest is : '%s'" - % (sm_type, key, smbios_set_para, - smbios_get_para)) + if smbios_set_para not in smbios_get_para: + e_msg = f"{sm_type}.{key} mismatch, Set '{smbios_set_para}' but guest is : '{smbios_get_para}'" failures.append(e_msg) session.close() @@ -132,5 +129,8 @@ def run(test, params, env): error_context.context("") if failures: - test.fail("smbios table test reported %s failures:\n%s" % - (len(failures), "\n".join(failures))) + test.fail( + "smbios table test reported {} failures:\n{}".format( + len(failures), "\n".join(failures) + ) + ) diff --git a/qemu/tests/smt_test.py b/qemu/tests/smt_test.py index fcc4f23e26..eee2d1de3c 100644 --- a/qemu/tests/smt_test.py +++ b/qemu/tests/smt_test.py @@ -1,12 +1,9 @@ import re import time -from virttest import env_process -from virttest import error_context -from virttest import utils_misc -from virttest import utils_test - +from virttest import env_process, error_context, utils_misc, utils_test from virttest.utils_test import BackgroundTest + from provider.cpu_utils import check_cpu_flags @@ -22,6 +19,7 @@ def run(test, params, env): :params params: Dictionary with the test parameters. :params env: Dictionary with test environment. """ + def run_guest_cmd(cmd, retry=False): """ Run cmd inside guest @@ -30,7 +28,7 @@ def run_guest_cmd(cmd, retry=False): if retry and not output: output = session.cmd_output_safe(cmd) if not output: - test.error("Get empty output after run cmd %s" % cmd) + test.error(f"Get empty output after run cmd {cmd}") return output def get_guest_threads(): @@ -48,17 +46,19 @@ def get_guest_threads(): cmd = params["get_sockets_cmd"] output = run_guest_cmd(cmd) sockets = len(re.findall(r"SocketDesignation=", output)) - threads = int(vm.cpuinfo.smp/sockets/cores) + threads = int(vm.cpuinfo.smp / sockets / cores) return threads def heavyload_install(install_path): """ Install heavyload in windows guest """ - test_installed_cmd = 'dir "%s" | findstr /I heavyload' % install_path + test_installed_cmd = f'dir "{install_path}" | findstr /I heavyload' if session.cmd_status(test_installed_cmd) != 0: - test.log.warning("Could not find installed heavyload in guest, will" - " install it via winutils.iso ") + test.log.warning( + "Could not find installed heavyload in guest, will" + " install it via winutils.iso " + ) winutil_drive = utils_misc.get_winutils_vol(session) if not winutil_drive: test.cancel("WIN_UTILS CDROM not found.") @@ -69,12 +69,12 @@ def run_stress(): """ Run stress inside guest, return guest cpu usage """ - error_context.context("Run stress in guest and get cpu usage", - test.log.info) + error_context.context("Run stress in guest and get cpu usage", test.log.info) if os_type == "linux": stress_args = params["stress_args"] - stress_test = utils_test.VMStress(vm, "stress", - params, stress_args=stress_args) + stress_test = utils_test.VMStress( + vm, "stress", params, stress_args=stress_args + ) try: stress_test.load_stress_tool() time.sleep(stress_duration / 2) @@ -93,15 +93,17 @@ def run_stress(): install_path = params["install_path"] heavyload_install(install_path) error_context.context("Run heavyload inside guest.", test.log.info) - heavyload_bin = r'"%s\heavyload.exe" ' % install_path - heavyload_options = ["/CPU %d" % vm.cpuinfo.smp, - "/DURATION %d" % (stress_duration // 60), - "/AUTOEXIT", - "/START"] + heavyload_bin = rf'"{install_path}\heavyload.exe" ' + heavyload_options = [ + "/CPU %d" % vm.cpuinfo.smp, + "/DURATION %d" % (stress_duration // 60), + "/AUTOEXIT", + "/START", + ] start_cmd = heavyload_bin + " ".join(heavyload_options) - stress_tool = BackgroundTest(session.cmd, (start_cmd, - stress_duration, - stress_duration)) + stress_tool = BackgroundTest( + session.cmd, (start_cmd, stress_duration, stress_duration) + ) stress_tool.start() if not utils_misc.wait_for(stress_tool.is_alive, stress_duration): test.error("Failed to start heavyload process.") diff --git a/qemu/tests/snapshot_negative_test.py b/qemu/tests/snapshot_negative_test.py index 96fd0e18ef..dadaac915d 100755 --- a/qemu/tests/snapshot_negative_test.py +++ b/qemu/tests/snapshot_negative_test.py @@ -1,7 +1,6 @@ import re from avocado.utils import process - from virttest import data_dir from virttest.qemu_storage import QemuImg @@ -30,11 +29,12 @@ def _check_command(cmds): for qemu_img_cmd in cmds: if qemu_img_cmd_agrs: qemu_img_cmd %= qemu_img_cmd_agrs - cmd_result = process.run(qemu_img_cmd, ignore_status=True, - shell=True) + cmd_result = process.run(qemu_img_cmd, ignore_status=True, shell=True) if not re.search(err_info, cmd_result.stderr.decode(), re.I | re.M): - test.fail("Failed to get error information. The actual error " - "information is %s." % cmd_result.stderr.decode()) + test.fail( + "Failed to get error information. The actual error " + f"information is {cmd_result.stderr.decode()}." + ) def run_cmd_with_incorrect_format(): cmds = params.get("cmd_with_incorrect_format") diff --git a/qemu/tests/softlockup.py b/qemu/tests/softlockup.py index 0ab050255f..8fde2b62ce 100644 --- a/qemu/tests/softlockup.py +++ b/qemu/tests/softlockup.py @@ -2,9 +2,7 @@ import socket import time -from avocado.utils import cpu -from avocado.utils import process - +from avocado.utils import cpu, process from virttest import data_dir @@ -38,8 +36,7 @@ def run(test, params, env): vm = env.get_vm(params["main_vm"]) login_timeout = int(params.get("login_timeout", 360)) stress_dir = data_dir.get_deps_dir("stress") - monitor_dir = params.get("monitor_dir", - data_dir.get_deps_dir("softlockup")) + monitor_dir = params.get("monitor_dir", data_dir.get_deps_dir("softlockup")) def _kill_guest_programs(session, kill_stress_cmd, kill_monitor_cmd): test.log.info("Kill stress and monitor on guest") @@ -71,7 +68,8 @@ def host(): process.run( server_setup_cmd % (monitor_dir, threshold, monitor_log_file_server, monitor_port), - shell=True) + shell=True, + ) if stress_setup_cmd is not None: test.log.info("Build stress on host") @@ -82,12 +80,16 @@ def host(): # stress_threads = 2 * n_cpus threads_host = 2 * cpu.online_count() # Run stress test on host - process.run(stress_cmd % (stress_dir, threads_host), ignore_bg_processes=True, shell=True) + process.run( + stress_cmd % (stress_dir, threads_host), + ignore_bg_processes=True, + shell=True, + ) def guest(): try: host_ip = socket.gethostbyname(socket.gethostname()) - except socket.error: + except OSError: try: # Hackish, but works well on stand alone (laptop) setups # with access to the internet. If this fails, well, then @@ -95,8 +97,8 @@ def guest(): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("redhat.com", 80)) host_ip = s.getsockname()[0] - except socket.error as e: - test.error("Could not determine host IP: %s" % e) + except OSError as e: + test.error(f"Could not determine host IP: {e}") # Now, starting the guest vm.verify_alive() @@ -105,7 +107,7 @@ def guest(): # Kill previous instances of the load programs, if any _kill_guest_programs(session, kill_stress_cmd, kill_monitor_cmd) # Clean up previous log instances - session.cmd("rm -f %s" % monitor_log_file_client) + session.cmd(f"rm -f {monitor_log_file_client}") # Opening firewall ports on guest try: @@ -114,14 +116,17 @@ def guest(): pass # Get monitor files and copy them from host to guest - monitor_path = os.path.join(data_dir.get_deps_dir(), 'softlockup', - 'heartbeat_slu.py') + monitor_path = os.path.join( + data_dir.get_deps_dir(), "softlockup", "heartbeat_slu.py" + ) vm.copy_files_to(monitor_path, "/tmp") test.log.info("Setup monitor client on guest") # Start heartbeat on guest - session.cmd(params.get("client_setup_cmd") % - ("/tmp", host_ip, monitor_log_file_client, monitor_port)) + session.cmd( + params.get("client_setup_cmd") + % ("/tmp", host_ip, monitor_log_file_client, monitor_port) + ) if stress_setup_cmd is not None: # Copy, uncompress and build stress on guest @@ -146,8 +151,7 @@ def guest(): _kill_host_programs(kill_stress_cmd, kill_monitor_cmd) # Collect drift - drift = process.system_output(drift_cmd % monitor_log_file_server, - shell=True) + drift = process.system_output(drift_cmd % monitor_log_file_server, shell=True) test.log.info("Drift noticed: %s", drift) host() diff --git a/qemu/tests/spapr_vty_multi_backends.py b/qemu/tests/spapr_vty_multi_backends.py index 71581da297..6f4de5c1ad 100644 --- a/qemu/tests/spapr_vty_multi_backends.py +++ b/qemu/tests/spapr_vty_multi_backends.py @@ -1,9 +1,7 @@ import os import re -from virttest import error_context -from virttest import remote -from virttest import utils_test +from virttest import error_context, remote, utils_test @error_context.context_aware @@ -27,7 +25,7 @@ def run(test, params, env): vm = env.get_vm(params["main_vm"]) vm.wait_for_login() # do migration - if params.get('sub_type') == 'migration_all_type': + if params.get("sub_type") == "migration_all_type": mig_timeout = float(params.get("mig_timeout", "3600")) mig_protocol = params.get("migration_protocol", "tcp") vm.migrate(mig_timeout, mig_protocol, env=env) @@ -37,8 +35,8 @@ def run(test, params, env): # where the 9th or larger number spapr-vty devices could not be used as serial # since the maximum available console/serial devices is 8 inside the guest, # i.e. from /dev/hvc0 to /dev/hvc7 - hvc_id = int(serial_id.replace('vs', '')) - 1 - kernel_params = "console=hvc%s,115200" % hvc_id + hvc_id = int(serial_id.replace("vs", "")) - 1 + kernel_params = f"console=hvc{hvc_id},115200" utils_test.update_boot_option(vm, args_added=kernel_params) backend = params.object_params(serial_id)["chardev_backend"] @@ -47,35 +45,37 @@ def run(test, params, env): chardev_device = vm.devices.get_by_qid(chardev_qid)[0] test.log.info("The currently tested backend is %s.", backend) - if backend == 'unix_socket': + if backend == "unix_socket": session = vm.wait_for_serial_login(timeout=60) session.cmd(create_delete_file) session.close() - elif backend == 'tcp_socket': - session = remote.remote_login(client='nc', - host=chardev_device.params['host'], - port=chardev_device.params['port'], - username=params['username'], - password=params['password'], - prompt=prompt, - timeout=240) + elif backend == "tcp_socket": + session = remote.remote_login( + client="nc", + host=chardev_device.params["host"], + port=chardev_device.params["port"], + username=params["username"], + password=params["password"], + prompt=prompt, + timeout=240, + ) session.cmd(create_delete_file) session.close() - elif backend == 'pty': - chardev_info = vm.monitor.human_monitor_cmd('info chardev') - hostfile = re.findall('%s: filename=pty:(/dev/pts/\\d)?' % - serial_id, chardev_info) + elif backend == "pty": + chardev_info = vm.monitor.human_monitor_cmd("info chardev") + hostfile = re.findall( + f"{serial_id}: filename=pty:(/dev/pts/\\d)?", chardev_info + ) if not hostfile: - test.fail("Can't find the corresponding pty backend: %s" % - chardev_info) + test.fail(f"Can't find the corresponding pty backend: {chardev_info}") fd_pty = os.open(hostfile[0], os.O_RDWR | os.O_NONBLOCK) os.close(fd_pty) - elif backend == 'file': - filename = chardev_device.params['path'] + elif backend == "file": + filename = chardev_device.params["path"] with open(filename) as f: - if 'Linux' not in f.read(): + if "Linux" not in f.read(): test.fail("Guest boot fail with file backend.") - elif backend == 'null': + elif backend == "null": session = vm.wait_for_login() session.cmd(create_delete_file) diff --git a/qemu/tests/sr_iov_boot_negative.py b/qemu/tests/sr_iov_boot_negative.py index c5aed32b2a..2698d699e5 100644 --- a/qemu/tests/sr_iov_boot_negative.py +++ b/qemu/tests/sr_iov_boot_negative.py @@ -1,5 +1,4 @@ -from virttest import error_context -from virttest import env_process +from virttest import env_process, error_context @error_context.context_aware @@ -20,18 +19,15 @@ def run(test, params, env): test.error("Please set start_vm to no") params["start_vm"] = "yes" try: - error_context.context("Try to boot VM with negative parameters", - test.log.info) + error_context.context("Try to boot VM with negative parameters", test.log.info) case_fail = False env_process.preprocess_vm(test, params, env, params.get("main_vm")) case_fail = True except Exception as e: if neg_msg: - error_context.context("Check qemu-qemu error message", - test.log.info) + error_context.context("Check qemu-qemu error message", test.log.info) if neg_msg not in str(e): - msg = "Could not find '%s' in error message '%s'" % ( - neg_msg, e) + msg = f"Could not find '{neg_msg}' in error message '{e}'" test.fail(msg) test.log.debug("Could not boot up vm, %s", e) if case_fail: diff --git a/qemu/tests/sr_iov_hotplug.py b/qemu/tests/sr_iov_hotplug.py index 2520cb258c..4ee534c3b8 100644 --- a/qemu/tests/sr_iov_hotplug.py +++ b/qemu/tests/sr_iov_hotplug.py @@ -1,12 +1,7 @@ import re import aexpect - -from virttest import utils_misc -from virttest import utils_test -from virttest import utils_net -from virttest import test_setup -from virttest import error_context +from virttest import error_context, test_setup, utils_misc, utils_net, utils_test iface_scripts = [] @@ -34,11 +29,11 @@ def run(test, params, env): """ def check_interface(iface, nic_filter): - cmd = "ifconfig %s" % str(iface) + cmd = f"ifconfig {str(iface)}" session = vm.wait_for_serial_login(timeout=timeout) status, output = session.cmd_status_output(cmd) if status: - test.error("Guest command '%s' fail with output: %s." % (cmd, output)) + test.error(f"Guest command '{cmd}' fail with output: {output}.") if re.findall(nic_filter, output, re.MULTILINE | re.DOTALL): return True return False @@ -49,7 +44,7 @@ def get_active_network_device(session, nic_filter): nic_reg = r"\w+(?=: flags)|\w+(?=\s*Link)" status, output = session.cmd_status_output(cmd) if status: - test.error("Guest command '%s' fail with output: %s." % (cmd, output)) + test.error(f"Guest command '{cmd}' fail with output: {output}.") ifaces = re.findall(nic_reg, output) for iface in ifaces: if check_interface(str(iface), nic_filter): @@ -57,53 +52,55 @@ def get_active_network_device(session, nic_filter): return devnames def pci_add_iov(pci_num): - pci_add_cmd = ("pci_add pci_addr=auto host host=%s,if=%s" % - (pa_pci_ids[pci_num], pci_model)) + pci_add_cmd = ( + f"pci_add pci_addr=auto host host={pa_pci_ids[pci_num]},if={pci_model}" + ) if params.get("hotplug_params"): assign_param = params.get("hotplug_params").split() for param in assign_param: value = params.get(param) if value: - pci_add_cmd += ",%s=%s" % (param, value) + pci_add_cmd += f",{param}={value}" return pci_add(pci_add_cmd) def pci_add(pci_add_cmd): error_context.context("Adding pci device with command 'pci_add'") add_output = vm.monitor.send_args_cmd(pci_add_cmd, convert=False) - pci_info.append(['', add_output]) + pci_info.append(["", add_output]) if "OK domain" not in add_output: - test.fail("Add PCI device failed. Monitor command is: %s, " - "Output: %r" % (pci_add_cmd, add_output)) + test.fail( + f"Add PCI device failed. Monitor command is: {pci_add_cmd}, " + f"Output: {add_output!r}" + ) return vm.monitor.info("pci") def check_support_device(dev): - if vm.monitor.protocol == 'qmp': - devices_supported = vm.monitor.human_monitor_cmd("%s ?" % cmd_type) + if vm.monitor.protocol == "qmp": + devices_supported = vm.monitor.human_monitor_cmd(f"{cmd_type} ?") else: - devices_supported = vm.monitor.send_args_cmd("%s ?" % cmd_type) + devices_supported = vm.monitor.send_args_cmd(f"{cmd_type} ?") # Check if the device is support in qemu is_support = utils_misc.find_substring(devices_supported, dev) if not is_support: - test.error("%s doesn't support device: %s" % (cmd_type, dev)) + test.error(f"{cmd_type} doesn't support device: {dev}") def device_add_iov(pci_num): - device_id = "%s" % pci_model + "-" + utils_misc.generate_random_id() + device_id = f"{pci_model}" + "-" + utils_misc.generate_random_id() pci_info.append([device_id]) driver = params.get("device_driver", "pci-assign") check_support_device(driver) - pci_add_cmd = ("device_add id=%s,driver=%s,host=%s" % - (pci_info[pci_num][0], driver, pa_pci_ids[pci_num])) + pci_add_cmd = f"device_add id={pci_info[pci_num][0]},driver={driver},host={pa_pci_ids[pci_num]}" if params.get("hotplug_params"): assign_param = params.get("hotplug_params").split() for param in assign_param: value = params.get(param) if value: - pci_add_cmd += ",%s=%s" % (param, value) + pci_add_cmd += f",{param}={value}" return device_add(pci_num, pci_add_cmd) def device_add(pci_num, pci_add_cmd): error_context.context("Adding pci device with command 'device_add'") - if vm.monitor.protocol == 'qmp': + if vm.monitor.protocol == "qmp": add_output = vm.monitor.send_args_cmd(pci_add_cmd) else: add_output = vm.monitor.send_args_cmd(pci_add_cmd, convert=False) @@ -111,8 +108,10 @@ def device_add(pci_num, pci_add_cmd): after_add = vm.monitor.info("pci") if pci_info[pci_num][0] not in str(after_add): test.log.debug("Print info pci after add the block: %s", after_add) - test.fail("Add device failed. Monitor command is: %s" - ". Output: %r" % (pci_add_cmd, add_output)) + test.fail( + f"Add device failed. Monitor command is: {pci_add_cmd}" + f". Output: {add_output!r}" + ) return after_add def clean_network_scripts(): @@ -120,17 +119,18 @@ def clean_network_scripts(): session = vm.wait_for_serial_login(timeout=timeout) if "ubuntu" in vm.get_distro().lower(): iface_script = "/etc/network/interfaces" - cmd = "cat %s.BACKUP" % iface_script + cmd = f"cat {iface_script}.BACKUP" if not session.cmd_status(cmd): - cmd = "mv %s.BACKUP %s" % (iface_script, iface_script) + cmd = f"mv {iface_script}.BACKUP {iface_script}" status, output = session.cmd_status_output(cmd) if status: - test.error("Failed to cleanup network script in guest: " - "%s" % output) + test.error( + "Failed to cleanup network script in guest: " f"{output}" + ) else: global iface_scripts for iface_script in iface_scripts: - cmd = "rm -f %s" % iface_script + cmd = f"rm -f {iface_script}" status, output = session.cmd_status_output(cmd) if status: test.error("Failed to delete iface_script") @@ -152,10 +152,9 @@ def add_device(pci_num): vm.pause() try: # get function for adding device. - add_function = local_functions["%s_iov" % cmd_type] + add_function = local_functions[f"{cmd_type}_iov"] except Exception: - test.error("No function for adding sr-iov dev with '%s'" % - cmd_type) + test.error(f"No function for adding sr-iov dev with '{cmd_type}'") after_add = None if add_function: # Do add pci device. @@ -171,8 +170,7 @@ def _new_shown(): def _check_ip(): post_nics = get_active_network_device(session, nic_filter) test.log.debug("Active nics after hotplug - %s", post_nics) - return (len(active_nics) <= len(post_nics) and - active_nics != post_nics) + return len(active_nics) <= len(post_nics) and active_nics != post_nics # Define a helper function to catch PCI device string def _find_pci(): @@ -196,34 +194,41 @@ def _find_pci(): error_context.context("Start checking new added device") # Compare the output of 'info pci' if after_add == info_pci_ref: - test.fail("No new PCI device shown after executing " - "monitor command: 'info pci'") + test.fail( + "No new PCI device shown after executing " + "monitor command: 'info pci'" + ) secs = int(params["wait_secs_for_hook_up"]) if not utils_misc.wait_for(_new_shown, test_timeout, secs, 3): - test.fail("No new device shown in output of command " - "executed inside the guest: %s" % reference_cmd) + test.fail( + "No new device shown in output of command " + f"executed inside the guest: {reference_cmd}" + ) if not utils_misc.wait_for(_find_pci, test_timeout, 3, 3): - test.fail("New add device not found in guest. " - "Command was: lspci -nn") + test.fail( + "New add device not found in guest. " "Command was: lspci -nn" + ) # Assign static IP to the hotplugged interface if params.get("assign_static_ip", "no") == "yes": cmd = "service networking restart" static_ip = next(ip_gen) net_mask = params.get("static_net_mask", "255.255.255.0") - broadcast = params.get("static_broadcast", "10.10.10.255") - pci_id = utils_misc.get_pci_id_using_filter(vf_filter, - session) - test.log.debug("PCIs associated with %s - %s", vf_filter, - ', '.join(map(str, pci_id))) + params.get("static_broadcast", "10.10.10.255") + pci_id = utils_misc.get_pci_id_using_filter(vf_filter, session) + test.log.debug( + "PCIs associated with %s - %s", + vf_filter, + ", ".join(map(str, pci_id)), + ) for each_pci in pci_id: - iface_name = utils_misc.get_interface_from_pci_id(each_pci, - session) - test.log.debug("Interface associated with PCI %s - %s", - each_pci, iface_name) - mac = session.cmd_output("ethtool -P %s" % iface_name) + iface_name = utils_misc.get_interface_from_pci_id(each_pci, session) + test.log.debug( + "Interface associated with PCI %s - %s", each_pci, iface_name + ) + mac = session.cmd_output(f"ethtool -P {iface_name}") mac = mac.split("Permanent address:")[-1].strip() test.log.debug("mac address of %s: %s", iface_name, mac) # backup the network script for other distros @@ -231,26 +236,31 @@ def _find_pci(): cmd = "service network restart" iface_scripts.append(utils_net.get_network_cfg_file(iface_name)) if not check_interface(str(iface_name), nic_filter): - utils_net.create_network_script(iface_name, mac, - boot_proto="static", - net_mask=net_mask, - vm=vm, - ip_addr=static_ip) + utils_net.create_network_script( + iface_name, + mac, + boot_proto="static", + net_mask=net_mask, + vm=vm, + ip_addr=static_ip, + ) status, output = session.cmd_status_output(cmd) if status: - test.error("Failed to set static ip in guest: " - "%s" % output) + test.error("Failed to set static ip in guest: " f"{output}") # Test the newly added device if not utils_misc.wait_for(_check_ip, 120, 3, 3): ifconfig = session.cmd_output("ifconfig -a") - test.fail("New hotpluged device could not get ip " - "after 120s in guest. guest ifconfig " - "output: \n%s" % ifconfig) + test.fail( + "New hotpluged device could not get ip " + "after 120s in guest. guest ifconfig " + f"output: \n{ifconfig}" + ) try: session.cmd(params["pci_test_cmd"] % (pci_num + 1)) except aexpect.ShellError as e: - test.fail("Check device failed after PCI " - "hotplug. Output: %r" % e.output) + test.fail( + "Check device failed after PCI " f"hotplug. Output: {e.output!r}" + ) except Exception: pci_del(pci_num, ignore_failure=True) @@ -265,16 +275,20 @@ def _device_removed(): before_del = vm.monitor.info("pci") if cmd_type == "pci_add": slot_id = "0" + pci_info[pci_num][1].split(",")[2].split()[1] - cmd = "pci_del pci_addr=%s" % slot_id + cmd = f"pci_del pci_addr={slot_id}" vm.monitor.send_args_cmd(cmd, convert=False) elif cmd_type == "device_add": - cmd = "device_del id=%s" % pci_info[pci_num][0] + cmd = f"device_del id={pci_info[pci_num][0]}" vm.monitor.send_args_cmd(cmd) - if (not utils_misc.wait_for(_device_removed, test_timeout, 0, 1) and - not ignore_failure): - test.fail("Failed to hot remove PCI device: %s. " - "Monitor command: %s" % (pci_model, cmd)) + if ( + not utils_misc.wait_for(_device_removed, test_timeout, 0, 1) + and not ignore_failure + ): + test.fail( + f"Failed to hot remove PCI device: {pci_model}. " + f"Monitor command: {cmd}" + ) vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -295,7 +309,7 @@ def _device_removed(): device = {} device["type"] = device_type if generate_mac == "yes": - device['mac'] = utils_net.generate_mac_address_simple() + device["mac"] = utils_net.generate_mac_address_simple() if params.get("device_name"): device["name"] = params.get("device_name") devices.append(device) @@ -309,17 +323,18 @@ def _device_removed(): vf_filter_re=vf_filter, pf_filter_re=params.get("pf_filter_re"), device_driver=device_driver, - pa_type=params.get("pci_assignable")) + pa_type=params.get("pci_assignable"), + ) pa_pci_ids = vm.pci_assignable.request_devs(devices) # Modprobe the module if specified in config file module = params.get("modprobe_module") if module: - error_context.context("modprobe the module %s" % module, test.log.info) - session.cmd("modprobe %s" % module) + error_context.context(f"modprobe the module {module}", test.log.info) + session.cmd(f"modprobe {module}") # Probe qemu to verify what is the supported syntax for PCI hotplug - if vm.monitor.protocol == 'qmp': + if vm.monitor.protocol == "qmp": cmd_o = vm.monitor.info("commands") else: cmd_o = vm.monitor.send_args_cmd("help") @@ -348,15 +363,15 @@ def _device_removed(): if "ubuntu" in vm.get_distro().lower(): session = vm.wait_for_serial_login(timeout=timeout) iface_script = "/etc/network/interfaces" - cmd = "cat %s" % iface_script + cmd = f"cat {iface_script}" if not session.cmd_status(cmd): - test.log.debug("Backup network script in guest - %s", - iface_script) - cmd = "cp %s %s.BACKUP" % (iface_script, iface_script) + test.log.debug( + "Backup network script in guest - %s", iface_script + ) + cmd = f"cp {iface_script} {iface_script}.BACKUP" status, output = session.cmd_status_output(cmd) if status: - test.error("Failed to backup in guest: %s" % - output) + test.error(f"Failed to backup in guest: {output}") for pci_num in range(pci_num_range): msg = "Start hot-adding %sth pci device," % (pci_num + 1) msg += " repeat %d" % (j + 1) @@ -364,8 +379,9 @@ def _device_removed(): add_device(pci_num) sub_type = params.get("sub_type_after_plug") if sub_type: - error_context.context("Running sub test '%s' after hotplug" % - sub_type, test.log.info) + error_context.context( + f"Running sub test '{sub_type}' after hotplug", test.log.info + ) utils_test.run_virt_sub_test(test, params, env, sub_type) if "guest_suspend" == sub_type: # Hotpluged device have been released after guest suspend, @@ -383,8 +399,9 @@ def _device_removed(): # clean network scripts on error clean_network_scripts() if params.get("enable_set_link", "yes") == "yes": - error_context.context("Re-enabling the primary link(s) of guest", - test.log.info) + error_context.context( + "Re-enabling the primary link(s) of guest", test.log.info + ) for nic in vm.virtnet: vm.set_link(nic.device_id, up=True) if session: diff --git a/qemu/tests/sr_iov_hotplug_negative.py b/qemu/tests/sr_iov_hotplug_negative.py index 837854a8c6..46d44eaa53 100644 --- a/qemu/tests/sr_iov_hotplug_negative.py +++ b/qemu/tests/sr_iov_hotplug_negative.py @@ -1,8 +1,5 @@ from avocado.utils import process - -from virttest import error_context -from virttest import utils_misc -from virttest import utils_net +from virttest import error_context, utils_misc, utils_net @error_context.context_aware @@ -21,28 +18,28 @@ def run(test, params, env): """ def make_pci_add_cmd(pa_pci_id, pci_addr="auto"): - pci_add_cmd = ("pci_add pci_addr=%s host host=%s,if=%s" % - (pci_addr, pa_pci_id, pci_model)) + pci_add_cmd = ( + f"pci_add pci_addr={pci_addr} host host={pa_pci_id},if={pci_model}" + ) if params.get("hotplug_params"): assign_param = params.get("hotplug_params").split() for param in assign_param: value = params.get(param) if value: - pci_add_cmd += ",%s=%s" % (param, value) + pci_add_cmd += f",{param}={value}" return pci_add_cmd def make_device_add_cmd(pa_pci_id, pci_addr=None): - device_id = "%s" % pci_model + "-" + utils_misc.generate_random_id() - pci_add_cmd = ("device_add id=%s,driver=pci-assign,host=%s" % - (device_id, pa_pci_id)) + device_id = f"{pci_model}" + "-" + utils_misc.generate_random_id() + pci_add_cmd = f"device_add id={device_id},driver=pci-assign,host={pa_pci_id}" if pci_addr is not None: - pci_add_cmd += ",addr=%s" % pci_addr + pci_add_cmd += f",addr={pci_addr}" if params.get("hotplug_params"): assign_param = params.get("hotplug_params").split() for param in assign_param: value = params.get(param) if value: - pci_add_cmd += ",%s=%s" % (param, value) + pci_add_cmd += f",{param}={value}" return pci_add_cmd neg_msg = params.get("negative_msg") @@ -55,14 +52,14 @@ def make_device_add_cmd(pa_pci_id, pci_addr=None): device = {} device["type"] = params.get("hotplug_device_type", "vf") - device['mac'] = utils_net.generate_mac_address_simple() + device["mac"] = utils_net.generate_mac_address_simple() if params.get("device_name"): device["name"] = params.get("device_name") if vm.pci_assignable is not None: pa_pci_ids = vm.pci_assignable.request_devs(device) # Probe qemu to verify what is the supported syntax for PCI hotplug - if vm.monitor.protocol == 'qmp': + if vm.monitor.protocol == "qmp": cmd_output = vm.monitor.info("commands") else: cmd_output = vm.monitor.send_args_cmd("help") @@ -70,15 +67,14 @@ def make_device_add_cmd(pa_pci_id, pci_addr=None): if not cmd_output: test.error("Unknown version of qemu") - cmd_type = utils_misc.find_substring(str(cmd_output), "pci_add", - "device_add") + cmd_type = utils_misc.find_substring(str(cmd_output), "pci_add", "device_add") for j in range(rp_times): if cmd_type == "pci_add": pci_add_cmd = make_pci_add_cmd(pa_pci_ids[0], pci_invaild_addr) # pylint: disable=E0606 elif cmd_type == "device_add": pci_add_cmd = make_device_add_cmd(pa_pci_ids[0], pci_invaild_addr) try: - msg = "Adding pci device with command '%s'" % pci_add_cmd # pylint: disable=E0606 + msg = f"Adding pci device with command '{pci_add_cmd}'" # pylint: disable=E0606 error_context.context(msg, test.log.info) case_fail = False add_output = vm.monitor.send_args_cmd(pci_add_cmd, convert=False) @@ -88,8 +84,8 @@ def make_device_add_cmd(pa_pci_id, pci_addr=None): msg = "Check negative hotplug error message" error_context.context(msg, test.log.info) if neg_msg not in str(err): - msg = "Could not find '%s' in" % neg_msg - msg += " command output '%s'" % add_output + msg = f"Could not find '{neg_msg}' in" + msg += f" command output '{add_output}'" test.fail(msg) test.log.debug("Could not boot up vm, %s", err) if case_fail: @@ -97,8 +93,8 @@ def make_device_add_cmd(pa_pci_id, pci_addr=None): msg = "Check negative hotplug error message" error_context.context(msg, test.log.info) if neg_msg not in str(add_output): - msg = "Could not find '%s' in" % neg_msg - msg += " command output '%s'" % add_output + msg = f"Could not find '{neg_msg}' in" + msg += f" command output '{add_output}'" test.fail(msg) test.log.debug("Could not boot up vm, %s", add_output) @@ -109,7 +105,6 @@ def make_device_add_cmd(pa_pci_id, pci_addr=None): driver = params.get("driver", "igb") modprobe_cmd = modprobe_cmd % driver try: - process.system(modprobe_cmd, timeout=120, ignore_status=True, - shell=True) + process.system(modprobe_cmd, timeout=120, ignore_status=True, shell=True) except process.CmdError as err: test.log.error(err) diff --git a/qemu/tests/sr_iov_irqbalance.py b/qemu/tests/sr_iov_irqbalance.py index 4581dc4036..1f4d96d67b 100644 --- a/qemu/tests/sr_iov_irqbalance.py +++ b/qemu/tests/sr_iov_irqbalance.py @@ -1,13 +1,10 @@ +import logging import re import time -import logging -from virttest import error_context -from virttest import utils_test -from virttest import utils_net -from virttest import utils_misc +from virttest import error_context, utils_misc, utils_net, utils_test -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") @error_context.context_aware @@ -26,12 +23,12 @@ def get_first_network_devname(test, session, nic_interface_filter): cmd = "ifconfig -a" status, output = session.cmd_status_output(cmd) if status: - msg = "Guest command '%s' fail with output: %s." % (cmd, output) + msg = f"Guest command '{cmd}' fail with output: {output}." test.error(msg) devnames = re.findall(nic_interface_filter, output, re.S) if not devnames: msg = "Fail to get network interface name in guest." - msg += "ifconfig output in guest: %s" % output + msg += f"ifconfig output in guest: {output}" test.error(msg) return devnames[0] @@ -48,10 +45,10 @@ def get_irq_smp_affinity(test, session, irq): :return: Cpu list the irq affinity to. :rtype: List """ - cmd = "cat /proc/irq/%s/smp_affinity" % irq + cmd = f"cat /proc/irq/{irq}/smp_affinity" status, output = session.cmd_status_output(cmd) if status: - msg = "Fail to get affinity cpu for IRQ '%s'" % irq + msg = f"Fail to get affinity cpu for IRQ '{irq}'" test.error(msg) cpus = [] bit_list = list(bin(int(output.strip(), 16))) @@ -61,7 +58,7 @@ def get_irq_smp_affinity(test, session, irq): bit_list[index] = "0" cpus.append(index) if cpus: - msg = "IRQ '%s' has affinity cpu %s" % (irq, cpus) + msg = f"IRQ '{irq}' has affinity cpu {cpus}" LOG_JOB.info(msg) return cpus @@ -80,13 +77,13 @@ def set_irq_smp_affinity(test, session, irq, cpus): """ num = 0 for cpu in cpus: - num += 2 ** cpu + num += 2**cpu if num == 0: test.error("Please set available cpus") - cmd = "echo %s > /proc/irq/%s/smp_affinity" % (num, irq) + cmd = f"echo {num} > /proc/irq/{irq}/smp_affinity" status = session.cmd_status(cmd) if status: - msg = "Fail to set affinity cpu to %s for IRQ '%s'" % (cpus, irq) + msg = f"Fail to set affinity cpu to {cpus} for IRQ '{irq}'" test.fail(msg) @@ -107,16 +104,16 @@ def get_guest_irq_info(test, session, devname, cpu_count): """ irq_num_dict = {} - cmd = "cat /proc/interrupts | grep %s-" % devname + cmd = f"cat /proc/interrupts | grep {devname}-" status, output = session.cmd_status_output(cmd) if status: - msg = "Command '%s' fail in guest with output:%s" % (cmd, output) + msg = f"Command '{cmd}' fail in guest with output:{output}" test.error(msg) irq_info_filter = r"([0-9]*):" + r"\s*([0-9]*)" * cpu_count irq_infos = re.findall(irq_info_filter, output) if not irq_infos: - msg = "Fail to get irq information for device %s. " % devname - msg += "Command output: %s" % output + msg = f"Fail to get irq information for device {devname}. " + msg += f"Command output: {output}" test.error(msg) for irq_info in irq_infos: irq_info = list(irq_info) @@ -126,8 +123,7 @@ def get_guest_irq_info(test, session, devname, cpu_count): @error_context.context_aware -def check_irqbalance(test, session, devname, cpu_count, irqs, count=6, - interval=10): +def check_irqbalance(test, session, devname, cpu_count, irqs, count=6, interval=10): """ Check that whether irqbalance works. Make sure specified irqs is handled in specified cpu. Raise error.TestFail if specified irqs count is not grow in @@ -157,14 +153,12 @@ def check_irqbalance(test, session, devname, cpu_count, irqs, count=6, irq_num_dict = get_guest_irq_info(test, session, devname, cpu_count) for irq in irqs: for cpu in irq_cpus_dict[irq]: - if (int(pre_irq_num_dict[irq][cpu]) >= - int(irq_num_dict[irq][cpu])): - msg = "'Cpu%s' did not handle more interrupt" % cpu - msg += "for irq '%s'." % irq - msg += "IRQ balance information for IRQ '%s'\n" % irq - msg += "%s second ago: %s\n" % (interval, - pre_irq_num_dict[irq]) - msg += "Just now: %s" % irq_num_dict[irq] + if int(pre_irq_num_dict[irq][cpu]) >= int(irq_num_dict[irq][cpu]): + msg = f"'Cpu{cpu}' did not handle more interrupt" + msg += f"for irq '{irq}'." + msg += f"IRQ balance information for IRQ '{irq}'\n" + msg += f"{interval} second ago: {pre_irq_num_dict[irq]}\n" + msg += f"Just now: {irq_num_dict[irq]}" test.fail(msg) num += 1 pre_irq_num_dict = irq_num_dict @@ -201,8 +195,7 @@ def run(test, params, env): irqbalance_check_count = int(params.get("irqbalance_check_count", 36)) nic_interface_filter = params["nic_interface_filter"] - error_context.context("Make sure that guest have at least 2 vCPUs.", - test.log.info) + error_context.context("Make sure that guest have at least 2 vCPUs.", test.log.info) cpu_count = vm.get_cpu_count() if cpu_count < 2: test.cancel("Test requires at least 2 vCPUs.") @@ -210,8 +203,7 @@ def run(test, params, env): msg = "Update irqbalance service status in guest if not match request." error_context.context(msg, test.log.info) irqbalance_status = params.get("irqbalance_status", "active") - status = utils_misc.get_guest_service_status(session=session, - service="irqbalance") + status = utils_misc.get_guest_service_status(session=session, service="irqbalance") service_cmd = "" if status == "active" and irqbalance_status == "inactive": service_cmd = "service irqbalance stop" @@ -221,28 +213,31 @@ def run(test, params, env): status, output = session.cmd_status_output(service_cmd) if status: msg = "Fail to update irqbalance service status in guest." - msg += " Command output in guest: %s" % output + msg += f" Command output in guest: {output}" test.error(msg) - error_context.context("Get first network interface name in guest.", - test.log.info) + error_context.context("Get first network interface name in guest.", test.log.info) devname = get_first_network_devname(test, session, nic_interface_filter) - error_context.context("Start background network stress in guest.", - test.log.info) - host_ip = utils_net.get_ip_address_by_interface(params.get('netdst')) - ping_cmd = "ping %s -f -q" % host_ip + error_context.context("Start background network stress in guest.", test.log.info) + host_ip = utils_net.get_ip_address_by_interface(params.get("netdst")) + ping_cmd = f"ping {host_ip} -f -q" ping_timeout = irqbalance_check_count * 10 + 100 ping_session = vm.wait_for_login(timeout=timeout) - bg_stress = utils_misc.InterruptedThread(utils_test.raw_ping, - kwargs={'command': ping_cmd, - 'timeout': ping_timeout, - 'session': ping_session, - 'output_func': None}) + bg_stress = utils_misc.InterruptedThread( + utils_test.raw_ping, + kwargs={ + "command": ping_cmd, + "timeout": ping_timeout, + "session": ping_session, + "output_func": None, + }, + ) bg_stress.start() try: - error_context.context("Get irq number assigned to attached " - "VF/PF in guest", test.log.info) + error_context.context( + "Get irq number assigned to attached " "VF/PF in guest", test.log.info + ) irq_nums_dict = get_guest_irq_info(test, session, devname, cpu_count) irqs = [] if irq_nums_dict: @@ -279,33 +274,32 @@ def run(test, params, env): msg = "Check specified IRQ count grow on specified cpu." error_context.context(msg, test.log.info) - check_irqbalance(test, session, devname, - cpu_count, irqs, - count=irqbalance_check_count) + check_irqbalance( + test, session, devname, cpu_count, irqs, count=irqbalance_check_count + ) if irqbalance_status == "active": msg = "Check that specified IRQ count grow on every cpu." error_context.context(msg, test.log.info) - post_irq_nums_dict = get_guest_irq_info(test, session, devname, - cpu_count) + post_irq_nums_dict = get_guest_irq_info(test, session, devname, cpu_count) for irq in irqs: if irq not in post_irq_nums_dict.keys(): post_irqs = post_irq_nums_dict.keys() - msg = "Different irq detected: '%s' and '%s'." % (irqs, - post_irqs) + msg = f"Different irq detected: '{irqs}' and '{post_irqs}'." test.error(msg) for cpu in range(cpu_count): - if (int(irq_nums_dict[irq][cpu]) >= - int(post_irq_nums_dict[irq][cpu])): - msg = "'Cpu%s' did not handle more interrupt" % cpu - msg += "for irq '%s'." % irq - msg += "IRQ balance information for IRQ '%s'\n" % irq - msg += "First time: %s\n" % irq_nums_dict - msg += "Just now: %s" % post_irq_nums_dict + if int(irq_nums_dict[irq][cpu]) >= int( + post_irq_nums_dict[irq][cpu] + ): + msg = f"'Cpu{cpu}' did not handle more interrupt" + msg += f"for irq '{irq}'." + msg += f"IRQ balance information for IRQ '{irq}'\n" + msg += f"First time: {irq_nums_dict}\n" + msg += f"Just now: {post_irq_nums_dict}" test.fail(msg) finally: if bg_stress.is_alive(): bg_stress.join(suppress_exception=True) else: - test.log.warn("Background stress test already finished") + test.log.warning("Background stress test already finished") diff --git a/qemu/tests/sr_iov_sanity.py b/qemu/tests/sr_iov_sanity.py index 0a08b30950..6c6c070c4a 100644 --- a/qemu/tests/sr_iov_sanity.py +++ b/qemu/tests/sr_iov_sanity.py @@ -1,19 +1,14 @@ +import random import re import time -import random from ipaddress import ip_address from avocado.utils import process - -from virttest import utils_test -from virttest import test_setup -from virttest import utils_net -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, test_setup, utils_misc, utils_net, utils_test def check_network_interface_ip(interface, ipv6="no"): - check_cmd = "ifconfig %s" % interface + check_cmd = f"ifconfig {interface}" output = process.system_output(check_cmd) ip_re = r"inet (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})" if ipv6 == "yes": @@ -26,21 +21,18 @@ def check_network_interface_ip(interface, ipv6="no"): def ifup_down_interface(test, interface, action="up"): - check_cmd = "ifconfig %s" % interface + check_cmd = f"ifconfig {interface}" output = process.system_output(check_cmd) if action == "up": if not check_network_interface_ip(interface): if "UP" in output.splitlines()[0]: - process.system("ifdown %s" % interface, timeout=120, - ignore_status=True) - process.system("ifup %s" % interface, - timeout=120, ignore_status=True) + process.system(f"ifdown {interface}", timeout=120, ignore_status=True) + process.system(f"ifup {interface}", timeout=120, ignore_status=True) elif action == "down": if "UP" in output.splitlines()[0]: - process.system("ifdown %s" % interface, timeout=120, - ignore_status=True) + process.system(f"ifdown {interface}", timeout=120, ignore_status=True) else: - msg = "Unsupport action '%s' on network interface." % action + msg = f"Unsupport action '{action}' on network interface." test.error(msg) @@ -77,7 +69,8 @@ def run(test, params, env): pa_type=params.get("pci_assignable"), static_ip=static_ip, net_mask=params.get("net_mask"), - start_addr_PF=params.get("start_addr_PF")) + start_addr_PF=params.get("start_addr_PF"), + ) devices = [] device_type = params.get("device_type", "vf") @@ -89,7 +82,7 @@ def run(test, params, env): elif device_type == "pf": device_num = len(pci_assignable.get_pf_vf_info()) else: - msg = "Unsupport device type '%s'." % device_type + msg = f"Unsupport device type '{device_type}'." msg += " Please set device_type to 'vf' or 'pf'." test.error(msg) @@ -97,7 +90,7 @@ def run(test, params, env): device = {} device["type"] = device_type if device_type == "vf": - device['mac'] = utils_net.generate_mac_address_simple() + device["mac"] = utils_net.generate_mac_address_simple() if params.get("device_name"): device["name"] = params.get("device_name") devices.append(device) @@ -123,10 +116,11 @@ def run(test, params, env): ethname_dict.append(ethname) # TODO:cleanup of the network scripts try: - utils_net.create_network_script(ethname, mac, "dhcp", - "255.255.255.0", on_boot="yes") + utils_net.create_network_script( + ethname, mac, "dhcp", "255.255.255.0", on_boot="yes" + ) except Exception as info: - test.error("Network script creation failed - %s" % info) + test.error(f"Network script creation failed - {info}") msg = "Check whether VFs could get ip in host." error_context.context(msg, test.log.info) @@ -134,15 +128,14 @@ def run(test, params, env): utils_net.bring_down_ifname(ethname) _ip = check_network_interface_ip(ethname) if not _ip: - msg = "Interface '%s' could not get IP." % ethname + msg = f"Interface '{ethname}' could not get IP." test.log.error(msg) else: ips[ethname] = _ip test.log.info("Interface '%s' get IP '%s'", ethname, _ip) for i in range(repeat_time): - msg = "Bind/unbind device from host. Repeat %s/%s" % (i + 1, - repeat_time) + msg = f"Bind/unbind device from host. Repeat {i + 1}/{repeat_time}" error_context.context(msg, test.log.info) bind_device_num = random.randint(1, device_num) pci_assignable.request_devs(devices[:bind_device_num]) @@ -157,8 +150,8 @@ def run(test, params, env): post_device_num = len(pci_assignable.get_pf_vf_info()) if post_device_num != device_num: msg = "lspci cannot report the correct PF/VF number." - msg += " Correct number is '%s'" % device_num - msg += " lspci report '%s'" % post_device_num + msg += f" Correct number is '{device_num}'" + msg += f" lspci report '{post_device_num}'" test.fail(msg) dmesg = process.system_output("dmesg") file_name = "host_dmesg_after_unbind_device.txt" @@ -170,8 +163,8 @@ def run(test, params, env): utils_net.bring_up_ifname(ethname) _ip = utils_net.get_ip_address_by_interface(ethname, ip_ver="ipv4") if not _ip: - msg = "Interface '%s' could not get IP." % ethname - msg += "Before bind/unbind it have IP '%s'." % ips[ethname] + msg = f"Interface '{ethname}' could not get IP." + msg += f"Before bind/unbind it have IP '{ips[ethname]}'." test.log.error(msg) else: test.log.info("Interface '%s' get IP '%s'", ethname, _ip) @@ -189,34 +182,37 @@ def run(test, params, env): # below if static_ip: IP_addr_VF = None - if 'IP_addr_VF' not in locals(): + if "IP_addr_VF" not in locals(): IP_addr_VF = ip_address(params.get("start_addr_VF")) net_mask = params.get("net_mask") if not IP_addr_VF: - test.fail("No IP address found, please" - "populate starting IP address in " - "configuration file") + test.fail( + "No IP address found, please" + "populate starting IP address in " + "configuration file" + ) session = vm.wait_for_serial_login( - timeout=int(params.get("login_timeout", 720))) + timeout=int(params.get("login_timeout", 720)) + ) rc, output = session.cmd_status_output( - "ip li| grep -i 'BROADCAST'|awk '{print $2}'| sed 's/://'") + "ip li| grep -i 'BROADCAST'|awk '{print $2}'| sed 's/://'" + ) if not rc: iface_probed = output.splitlines() - test.log.info("probed VF Interface(s) in guest: %s", - iface_probed) + test.log.info("probed VF Interface(s) in guest: %s", iface_probed) for iface in iface_probed: mac = utils_net.get_linux_mac(session, iface) utils_net.set_guest_ip_addr(session, mac, IP_addr_VF) - rc, output = utils_test.ping( - str(IP_addr_VF), 30, timeout=60) + rc, output = utils_test.ping(str(IP_addr_VF), 30, timeout=60) if rc != 0: - test.fail("New nic failed ping test" - "with output:\n %s" % output) + test.fail("New nic failed ping test" f"with output:\n {output}") IP_addr_VF = IP_addr_VF + 1 else: - test.fail("Fail to locate probed interfaces" - "for VFs, please check on respective" - "drivers in guest image") + test.fail( + "Fail to locate probed interfaces" + "for VFs, please check on respective" + "drivers in guest image" + ) else: # User has opted for DHCP IP inside guest vm.verify_alive() diff --git a/qemu/tests/steal_time.py b/qemu/tests/steal_time.py index 28e3ea21dc..f338c0f554 100644 --- a/qemu/tests/steal_time.py +++ b/qemu/tests/steal_time.py @@ -1,9 +1,8 @@ -import time import re +import time from avocado.utils import process -from virttest import error_context -from virttest import utils_test +from virttest import error_context, utils_test @error_context.context_aware @@ -21,6 +20,7 @@ def run(test, params, env): :params params: Dictionary with the test parameters. :params env: Dictionary with test environment. """ + def get_stat_val(): """ Get steal time value in /proc/stat @@ -40,8 +40,7 @@ def get_stat_val(): for vm in vms: session = vm.wait_for_login() sessions.append(session) - stress_test = utils_test.VMStress(vm, "stress", - params, stress_args=stress_args) + stress_test = utils_test.VMStress(vm, "stress", params, stress_args=stress_args) stress_test.load_stress_tool() stress_tests.append(stress_test) @@ -58,8 +57,7 @@ def get_stat_val(): test.fail("Guest steal time is not around 50") error_context.context("Check two qemu process cpu usage", test.log.info) - cmd = "top -n1 -b -p %s -p %s | grep qemu-kvm | awk '{print $9}'" \ - % (vms[0].get_pid(), vms[1].get_pid()) + cmd = f"top -n1 -b -p {vms[0].get_pid()} -p {vms[1].get_pid()} | grep qemu-kvm | awk '{{print $9}}'" cpu_usage = process.getoutput(cmd, shell=True).split() test.log.info("QEMU cpu usage are %s", cpu_usage) cpu_usage = sorted([float(x) for x in cpu_usage]) @@ -71,11 +69,10 @@ def get_stat_val(): test.log.info("Steal time value in /proc/stat is %s", stat_val_pre) time.sleep(60) stat_val_post = get_stat_val() - test.log.info("After 60s, steal time value in /proc/stat is %s", - stat_val_post) + test.log.info("After 60s, steal time value in /proc/stat is %s", stat_val_post) delta = list(map(lambda x, y: y - x, stat_val_pre, stat_val_post)) - if abs(delta[0] - delta[1]) > sum(delta)/2*0.1: + if abs(delta[0] - delta[1]) > sum(delta) / 2 * 0.1: test.fail("Guest steal time change in /proc/stat is not close") finally: diff --git a/qemu/tests/stepmaker.py b/qemu/tests/stepmaker.py index b7effe5533..5b9f5b82a2 100755 --- a/qemu/tests/stepmaker.py +++ b/qemu/tests/stepmaker.py @@ -7,9 +7,9 @@ @version: "20090401" """ -import time -import os import logging +import os +import time try: from gi import pygtkcompat as pygtk @@ -17,30 +17,26 @@ pygtk = None if pygtk is not None: pygtk.enable() - pygtk.enable_gtk(version='3.0') + pygtk.enable_gtk(version="3.0") else: import pygtk - pygtk.require('2.0') -import gtk -import gobject + pygtk.require("2.0") +import gobject +import gtk from avocado.utils import process +from virttest import ppm_utils, qemu_monitor, step_editor, utils_misc -from virttest import utils_misc -from virttest import ppm_utils -from virttest import step_editor -from virttest import qemu_monitor - -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") class StepMaker(step_editor.StepMakerWindow): - """ Application used to create a step file. It will grab your input to the virtual machine and record it on a 'step file', that can be played making it possible to do unattended installs. """ + # Constructor def __init__(self, vm, steps_filename, tempdir, params): @@ -71,13 +67,15 @@ def __init__(self, vm, steps_filename, tempdir, params): self.time_when_actions_completed = time.time() self.steps_file.write("# Generated by Step Maker\n") - self.steps_file.write("# Generated on %s\n" % time.asctime()) - self.steps_file.write("# uname -a: %s\n" % - process.system_output("uname -a", verbose=False)) + self.steps_file.write(f"# Generated on {time.asctime()}\n") + self.steps_file.write( + "# uname -a: {}\n".format(process.system_output("uname -a", verbose=False)) + ) self.steps_file.flush() - self.vars_file.write("# This file lists the vars used during recording" - " with Step Maker\n") + self.vars_file.write( + "# This file lists the vars used during recording" " with Step Maker\n" + ) self.vars_file.flush() # Done/Break HBox @@ -116,8 +114,9 @@ def redirect_timer(self, delay=0, func=None): gobject.source_remove(self.timer_id) self.timer_id = None if func is not None: - self.timer_id = gobject.timeout_add(delay, func, - priority=gobject.PRIORITY_LOW) + self.timer_id = gobject.timeout_add( + delay, func, priority=gobject.PRIORITY_LOW + ) def switch_to_run_mode(self): # Set all widgets to their default states @@ -157,7 +156,7 @@ def update(self): try: self.vm.monitor.screendump(self.screendump_filename, debug=False) except qemu_monitor.MonitorError as e: - LOG_JOB.warn(e) + LOG_JOB.warning(e) else: self.set_image_from_file(self.screendump_filename) @@ -174,7 +173,7 @@ def event_break_clicked(self, widget): # to self.run_time self.run_time += time.time() - self.time_when_done_clicked # Set recording time widget - self.entry_time.set_text("%.2f" % self.run_time) + self.entry_time.set_text(f"{self.run_time:.2f}") # Update screendump ID self.update_screendump_id(self.steps_data_dir) # By default, check the barrier checkbox @@ -186,8 +185,9 @@ def event_break_clicked(self, widget): self.spin_sleep.set_value(round(time_delta)) self.spin_barrier_timeout.set_value(round(time_delta * 5)) # Set window title - self.window.set_title("Step Maker -- step %d at time %.2f" % - (self.step_num, self.run_time)) + self.window.set_title( + "Step Maker -- step %d at time %.2f" % (self.step_num, self.run_time) + ) def event_done_clicked(self, widget): # Get step lines and screendump @@ -209,12 +209,12 @@ def event_done_clicked(self, widget): val = self.params[varname] var_dict[varname] = val else: - val = self.inputdialog("$%s =" % varname, "Variable") + val = self.inputdialog(f"${varname} =", "Variable") if val is None: return var_dict[varname] = val for varname in var_dict.keys(): - self.vars_file.write("%s=%s\n" % (varname, var_dict[varname])) + self.vars_file.write(f"{varname}={var_dict[varname]}\n") self.vars.update(var_dict) # Write step lines to file @@ -267,21 +267,24 @@ def event_capture_clicked(self, widget): self.image_height_backup = self.image_height self.image_data_backup = self.image_data - gtk.gdk.pointer_grab(self.event_box.window, False, - gtk.gdk.BUTTON_PRESS_MASK | - gtk.gdk.BUTTON_RELEASE_MASK) + gtk.gdk.pointer_grab( + self.event_box.window, + False, + gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.BUTTON_RELEASE_MASK, + ) # Create empty cursor pix = gtk.gdk.Pixmap(self.event_box.window, 1, 1, 1) color = gtk.gdk.Color() cursor = gtk.gdk.Cursor(pix, pix, color, color, 0, 0) self.event_box.window.set_cursor(cursor) - gtk.gdk.display_get_default( - ).warp_pointer(gtk.gdk.screen_get_default(), - self.prev_x, self.prev_y) + gtk.gdk.display_get_default().warp_pointer( + gtk.gdk.screen_get_default(), self.prev_x, self.prev_y + ) self.redirect_event_box_input( self.event_capture_button_press, self.event_capture_button_release, - self.event_capture_scroll) + self.event_capture_scroll, + ) self.redirect_timer(10, self.update_capture) self.vm.resume() @@ -298,8 +301,9 @@ def update_capture(self): if (x, y) != (self.prev_x, self.prev_y): self.vm.monitor.mouse_move(-8000, -8000) time.sleep(delay) - self.vm.monitor.mouse_move(self.mouse_click_coords[0], - self.mouse_click_coords[1]) + self.vm.monitor.mouse_move( + self.mouse_click_coords[0], self.mouse_click_coords[1] + ) time.sleep(delay) self.prev_x = x @@ -311,12 +315,11 @@ def update_capture(self): try: self.vm.monitor.screendump(self.screendump_filename, debug=False) except qemu_monitor.MonitorError as e: - LOG_JOB.warn(e) + LOG_JOB.warning(e) else: self.set_image_from_file(self.screendump_filename) - self.redirect_timer(int(self.spin_latency.get_value()), - self.update_capture) + self.redirect_timer(int(self.spin_latency.get_value()), self.update_capture) return True def event_capture_button_press(self, widget, event): @@ -330,13 +333,15 @@ def event_capture_button_release(self, widget, event): self.event_button_release, None, None, - self.event_expose) + self.event_expose, + ) self.redirect_timer() self.vm.pause() self.mouse_click_captured = True self.mouse_click_button = event.button - self.set_image(self.image_width_backup, self.image_height_backup, - self.image_data_backup) + self.set_image( + self.image_width_backup, self.image_height_backup, self.image_data_backup + ) self.check_mousemove.set_sensitive(True) self.check_mouseclick.set_sensitive(True) self.check_mousemove.set_active(True) @@ -348,8 +353,7 @@ def event_capture_scroll(self, widget, event): direction = 1 else: direction = -1 - self.spin_sensitivity.set_value(self.spin_sensitivity.get_value() + - direction) + self.spin_sensitivity.set_value(self.spin_sensitivity.get_value() + direction) pass @@ -360,7 +364,7 @@ def run(test, params, env): steps_filename = params.get("steps") if not steps_filename: image_name = os.path.basename(params["image_name"]) - steps_filename = 'steps/%s.steps' % image_name + steps_filename = f"steps/{image_name}.steps" steps_filename = utils_misc.get_path(test.virtdir, steps_filename) diff --git a/qemu/tests/steps.py b/qemu/tests/steps.py index 8b77e9d24e..4a69b25d9b 100644 --- a/qemu/tests/steps.py +++ b/qemu/tests/steps.py @@ -4,24 +4,24 @@ :copyright: Red Hat 2008-2009 """ +import logging import os -import time import shutil -import logging +import time -from virttest import utils_misc -from virttest import ppm_utils -from virttest import qemu_monitor +from virttest import ppm_utils, qemu_monitor, utils_misc -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") try: import PIL.Image except ImportError: - LOG_JOB.warning('No python imaging library installed. PPM image ' - 'conversion to JPEG disabled. In order to enable it, ' - 'please install python-imaging or the equivalent for your ' - 'distro.') + LOG_JOB.warning( + "No python imaging library installed. PPM image " + "conversion to JPEG disabled. In order to enable it, " + "please install python-imaging or the equivalent for your " + "distro." + ) def handle_var(vm, params, varname): @@ -32,8 +32,9 @@ def handle_var(vm, params, varname): return True -def barrier_2(test, vm, words, params, debug_dir, data_scrdump_filename, - current_step_num): +def barrier_2( + test, vm, words, params, debug_dir, data_scrdump_filename, current_step_num +): if len(words) < 7: LOG_JOB.error("Bad barrier_2 command line") return False @@ -46,8 +47,9 @@ def barrier_2(test, vm, words, params, debug_dir, data_scrdump_filename, scrdump_filename = os.path.join(debug_dir, "scrdump.ppm") cropped_scrdump_filename = os.path.join(debug_dir, "cropped_scrdump.ppm") expected_scrdump_filename = os.path.join(debug_dir, "scrdump_expected.ppm") - expected_cropped_scrdump_filename = os.path.join(debug_dir, - "cropped_scrdump_expected.ppm") + expected_cropped_scrdump_filename = os.path.join( + debug_dir, "cropped_scrdump_expected.ppm" + ) comparison_filename = os.path.join(debug_dir, "comparison.ppm") history_dir = os.path.join(debug_dir, "barrier_history") @@ -98,20 +100,24 @@ def barrier_2(test, vm, words, params, debug_dir, data_scrdump_filename, try: vm.monitor.screendump(scrdump_filename, debug=False) except qemu_monitor.MonitorError as e: - LOG_JOB.warn(e) + LOG_JOB.warning(e) continue # Read image file try: (w, h, data) = ppm_utils.image_read_from_ppm_file(scrdump_filename) - except IOError as e: - LOG_JOB.warn(e) + except OSError as e: + LOG_JOB.warning(e) continue # Make sure image is valid if not ppm_utils.image_verify_ppm_file(scrdump_filename): - LOG_JOB.warn("Got invalid screendump: dimensions: %dx%d, " - "data size: %d", w, h, len(data)) + LOG_JOB.warning( + "Got invalid screendump: dimensions: %dx%d, " "data size: %d", + w, + h, + len(data), + ) continue # Compute md5sum of whole image @@ -119,25 +125,30 @@ def barrier_2(test, vm, words, params, debug_dir, data_scrdump_filename, # Write screendump to history_dir (as JPG) if requested # and if the screendump differs from the previous one - if (keep_screendump_history and - whole_image_md5sum not in prev_whole_image_md5sums[:1]): + if ( + keep_screendump_history + and whole_image_md5sum not in prev_whole_image_md5sums[:1] + ): try: os.makedirs(history_dir) except Exception: pass - history_scrdump_filename = os.path.join(history_dir, - "scrdump-step_%s-%s.jpg" % (current_step_num, - time.strftime("%Y%m%d-%H%M%S"))) + history_scrdump_filename = os.path.join( + history_dir, + "scrdump-step_{}-{}.jpg".format( + current_step_num, time.strftime("%Y%m%d-%H%M%S") + ), + ) try: image = PIL.Image.open(scrdump_filename) - image.save(history_scrdump_filename, format='JPEG', - quality=30) + image.save(history_scrdump_filename, format="JPEG", quality=30) except NameError: pass # Compare md5sum of barrier region with the expected md5sum - calced_md5sum = ppm_utils.get_region_md5sum(w, h, data, x1, y1, dx, dy, - cropped_scrdump_filename) + calced_md5sum = ppm_utils.get_region_md5sum( + w, h, data, x1, y1, dx, dy, cropped_scrdump_filename + ) if calced_md5sum == md5sum: # Success -- remove screendump history unless requested not to if keep_screendump_history and not keep_all_history: @@ -156,15 +167,13 @@ def barrier_2(test, vm, words, params, debug_dir, data_scrdump_filename, # Insert md5sum at beginning of queue prev_whole_image_md5sums.insert(0, whole_image_md5sum) # Limit queue length to stuck_detection_history - prev_whole_image_md5sums = \ - prev_whole_image_md5sums[:stuck_detection_history] + prev_whole_image_md5sums = prev_whole_image_md5sums[:stuck_detection_history] # Sleep for a while time.sleep(sleep_duration) # Failure - message = ("Barrier failed at step %s after %.2f seconds (%s)" % - (current_step_num, time.time() - start_time, failure_message)) + message = f"Barrier failed at step {current_step_num} after {time.time() - start_time:.2f} seconds ({failure_message})" # What should we do with this failure? if words[-1] == "optional": @@ -174,22 +183,20 @@ def barrier_2(test, vm, words, params, debug_dir, data_scrdump_filename, # Collect information and put it in debug_dir if data_scrdump_filename and os.path.exists(data_scrdump_filename): # Read expected screendump image - (ew, eh, edata) = \ - ppm_utils.image_read_from_ppm_file(data_scrdump_filename) + (ew, eh, edata) = ppm_utils.image_read_from_ppm_file(data_scrdump_filename) # Write it in debug_dir - ppm_utils.image_write_to_ppm_file(expected_scrdump_filename, - ew, eh, edata) + ppm_utils.image_write_to_ppm_file(expected_scrdump_filename, ew, eh, edata) # Write the cropped version as well - ppm_utils.get_region_md5sum(ew, eh, edata, x1, y1, dx, dy, - expected_cropped_scrdump_filename) + ppm_utils.get_region_md5sum( + ew, eh, edata, x1, y1, dx, dy, expected_cropped_scrdump_filename + ) # Perform comparison (w, h, data) = ppm_utils.image_read_from_ppm_file(scrdump_filename) if w == ew and h == eh: (w, h, data) = ppm_utils.image_comparison(w, h, data, edata) - ppm_utils.image_write_to_ppm_file(comparison_filename, w, h, - data) + ppm_utils.image_write_to_ppm_file(comparison_filename, w, h, data) # Print error messages and fail the test - long_message = message + "\n(see analysis at %s)" % debug_dir + long_message = message + f"\n(see analysis at {debug_dir})" LOG_JOB.error(long_message) test.fail(message) @@ -201,11 +208,11 @@ def run(test, params, env): steps_filename = params.get("steps") if not steps_filename: image_name = os.path.basename(params["image_name"]) - steps_filename = 'steps/%s.steps' % image_name + steps_filename = f"steps/{image_name}.steps" steps_filename = utils_misc.get_path(test.virtdir, steps_filename) if not os.path.exists(steps_filename): - test.error("Steps file not found: %s" % steps_filename) + test.error(f"Steps file not found: {steps_filename}") sf = open(steps_filename, "r") lines = sf.readlines() @@ -247,12 +254,19 @@ def run(test, params, env): elif words[0] == "barrier_2": if current_screendump: scrdump_filename = os.path.join( - ppm_utils.get_data_dir(steps_filename), - current_screendump) + ppm_utils.get_data_dir(steps_filename), current_screendump + ) else: scrdump_filename = None - if not barrier_2(test, vm, words, params, test.debugdir, - scrdump_filename, current_step_num): + if not barrier_2( + test, + vm, + words, + params, + test.debugdir, + scrdump_filename, + current_step_num, + ): skip_current_step = True else: vm.send_key(words[0]) diff --git a/qemu/tests/stop_continue.py b/qemu/tests/stop_continue.py index fe899d3af8..d7e035fadf 100644 --- a/qemu/tests/stop_continue.py +++ b/qemu/tests/stop_continue.py @@ -1,7 +1,6 @@ import time -from virttest import error_context -from virttest import utils_test +from virttest import error_context, utils_test @error_context.context_aware @@ -35,15 +34,17 @@ def run(test, params, env): try: prepare_op = params.get("prepare_op") if prepare_op: - error_context.context("Do preparation operation: '%s'" - % prepare_op, test.log.info) + error_context.context( + f"Do preparation operation: '{prepare_op}'", test.log.info + ) op_timeout = float(params.get("prepare_op_timeout", 60)) session.cmd(prepare_op, timeout=op_timeout) if start_bg_process: bg_cmd = params.get("bg_cmd") - error_context.context("Start a background process: '%s'" % bg_cmd, - test.log.info) + error_context.context( + f"Start a background process: '{bg_cmd}'", test.log.info + ) session_bg = vm.wait_for_login(timeout=login_timeout) bg_cmd_timeout = float(params.get("bg_cmd_timeout", 240)) args = (bg_cmd, bg_cmd_timeout) @@ -53,12 +54,10 @@ def run(test, params, env): error_context.base_context("Stop the VM", test.log.info) vm.pause() - error_context.context("Verify the status of VM is 'paused'", - test.log.info) + error_context.context("Verify the status of VM is 'paused'", test.log.info) vm.verify_status("paused") - error_context.context("Verify the session has no response", - test.log.info) + error_context.context("Verify the session has no response", test.log.info) if session.is_responsive(): msg = "Session is still responsive after stop" test.log.error(msg) @@ -67,8 +66,7 @@ def run(test, params, env): time.sleep(float(params.get("pause_time", 0))) error_context.base_context("Resume the VM", test.log.info) vm.resume() - error_context.context("Verify the status of VM is 'running'", - test.log.info) + error_context.context("Verify the status of VM is 'running'", test.log.info) vm.verify_status("running") error_context.context("Re-login the guest", test.log.info) @@ -80,30 +78,27 @@ def run(test, params, env): check_op = params.get("check_op") if check_op: - error_context.context("Do check operation: '%s'" % check_op, - test.log.info) + error_context.context(f"Do check operation: '{check_op}'", test.log.info) op_timeout = float(params.get("check_op_timeout", 60)) s, o = session.cmd_status_output(check_op, timeout=op_timeout) if s != 0: - test.fail("Something wrong after stop continue, " - "check command report: %s" % o) + test.fail( + "Something wrong after stop continue, " f"check command report: {o}" + ) finally: try: clean_op = params.get("clean_op") if clean_op: error_context.context( - "Do clean operation: '%s'" % - clean_op, test.log.info) + f"Do clean operation: '{clean_op}'", test.log.info + ) # session close if exception raised, so get renew a session # to do cleanup step. session = vm.wait_for_login(timeout=login_timeout) op_timeout = float(params.get("clean_op_timeout", 60)) - session.cmd(clean_op, timeout=op_timeout, - ignore_all_errors=True) + session.cmd(clean_op, timeout=op_timeout, ignore_all_errors=True) session.close() if session_bg: session_bg.close() except Exception as details: - test.log.warn( - "Exception occur when clean test environment: %s", - details) + test.log.warning("Exception occur when clean test environment: %s", details) diff --git a/qemu/tests/stress_kernel_compile.py b/qemu/tests/stress_kernel_compile.py index 44431af80c..b6b022ea2d 100644 --- a/qemu/tests/stress_kernel_compile.py +++ b/qemu/tests/stress_kernel_compile.py @@ -1,6 +1,5 @@ -from virttest import utils_test, env_process +from virttest import env_process, utils_package, utils_test from virttest.staging import utils_memory -from virttest import utils_package def run(test, params, env): @@ -16,25 +15,25 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def kernelcompile(session, vm_name): vm = env.get_vm(vm_name) ip = vm.get_address() path = params.get("download_url") test.log.info("kernel path = %s", path) - get_kernel_cmd = "wget %s --progress=none" % path - install_status = utils_package.package_install("wget", session, - timeout=60) + get_kernel_cmd = f"wget {path} --progress=none" + install_status = utils_package.package_install("wget", session, timeout=60) if not install_status: test.error("Failed to install wget.") try: - status, output = session.cmd_status_output(get_kernel_cmd, - timeout=2400, safe=True) + status, output = session.cmd_status_output( + get_kernel_cmd, timeout=2400, safe=True + ) if status != 0: test.log.error(output) - test.fail("Fail to download the kernel in %s" % vm_name) + test.fail(f"Fail to download the kernel in {vm_name}") else: - test.log.info("Completed download the kernel src" - " in %s", vm_name) + test.log.info("Completed download the kernel src" " in %s", vm_name) test_cmd = params.get("test_cmd") status, output = session.cmd_status_output(test_cmd, timeout=1200) if status != 0: @@ -48,20 +47,23 @@ def kernelcompile(session, vm_name): guest_number = int(params.get("guest_number", "1")) if guest_number < 1: - test.log.warn("At least boot up one guest for this test," - " set up guest number to 1") + test.log.warning( + "At least boot up one guest for this test," " set up guest number to 1" + ) guest_number = 1 for tag in range(1, guest_number): - params["vms"] += " stress_guest_%s" % tag + params["vms"] += f" stress_guest_{tag}" mem_host = utils_memory.memtotal() / 1024 vmem = int(mem_host * over_c / guest_number) if vmem < 256: - test.cancel("The memory size set for guest is too small." - " Please try less than %s guests" - " in this host." % guest_number) + test.cancel( + "The memory size set for guest is too small." + f" Please try less than {guest_number} guests" + " in this host." + ) params["mem"] = vmem params["start_vm"] = "yes" login_timeout = int(params.get("login_timeout", 360)) @@ -74,7 +76,7 @@ def kernelcompile(session, vm_name): vm.verify_alive() session = vm.wait_for_login(timeout=login_timeout) if not session: - test.fail("Could not log into guest %s" % vm_name) + test.fail(f"Could not log into guest {vm_name}") sessions_info.append([session, vm_name]) @@ -85,8 +87,7 @@ def kernelcompile(session, vm_name): for session_info in sessions_info: session = session_info[0] vm_name = session_info[1] - bg_thread = utils_test.BackgroundTest(kernelcompile, - (session, vm_name)) + bg_thread = utils_test.BackgroundTest(kernelcompile, (session, vm_name)) bg_thread.start() bg_threads.append(bg_thread) diff --git a/qemu/tests/suspend_under_stress.py b/qemu/tests/suspend_under_stress.py index e700f78e6a..6c03665b24 100644 --- a/qemu/tests/suspend_under_stress.py +++ b/qemu/tests/suspend_under_stress.py @@ -1,6 +1,4 @@ -from virttest import error_context -from virttest import utils_test -from virttest import utils_misc +from virttest import error_context, utils_misc, utils_test from generic.tests import guest_suspend @@ -27,24 +25,32 @@ def run(test, params, env): bg_stress_test = params.get("run_bgstress") try: if bg_stress_test: - error_context.context("Run test %s background" % bg_stress_test, - test.log.info) + error_context.context( + f"Run test {bg_stress_test} background", test.log.info + ) stress_thread = "" wait_time = float(params.get("wait_bg_time", 60)) bg_stress_run_flag = params.get("bg_stress_run_flag") env[bg_stress_run_flag] = False stress_thread = utils_misc.InterruptedThread( - utils_test.run_virt_sub_test, (test, params, env), - {"sub_type": bg_stress_test}) + utils_test.run_virt_sub_test, + (test, params, env), + {"sub_type": bg_stress_test}, + ) stress_thread.start() - if not utils_misc.wait_for(lambda: env.get(bg_stress_run_flag), - wait_time, 0, 5, - "Wait %s test start" % bg_stress_test): + if not utils_misc.wait_for( + lambda: env.get(bg_stress_run_flag), + wait_time, + 0, + 5, + f"Wait {bg_stress_test} test start", + ): test.error("Run stress test error") suspend_type = params.get("guest_suspend_type") - error_context.context("Run suspend '%s' test under stress" - % suspend_type, test.log.info) + error_context.context( + f"Run suspend '{suspend_type}' test under stress", test.log.info + ) bg_cmd = guest_suspend.run args = (test, params, env) bg = utils_test.BackgroundTest(bg_cmd, args) @@ -54,8 +60,8 @@ def run(test, params, env): env[bg_stress_run_flag] = False bg.join() except Exception as e: - err_msg = "Run guest suspend: '%s' error!\n" % suspend_type - err_msg += "Error info: '%s'" % e + err_msg = f"Run guest suspend: '{suspend_type}' error!\n" + err_msg += f"Error info: '{e}'" test.fail(err_msg) finally: diff --git a/qemu/tests/sve_basic.py b/qemu/tests/sve_basic.py index ad99eadfb6..6ef172432e 100644 --- a/qemu/tests/sve_basic.py +++ b/qemu/tests/sve_basic.py @@ -12,9 +12,10 @@ def get_sve_supported_lengths(): Get supported SVE lengths of host. """ output = vm.monitor.query_cpu_model_expansion(vm.cpuinfo.model) - output.pop('sve') - sve_list = [sve for sve in output if output[sve] is True and - sve.startswith('sve')] + output.pop("sve") + sve_list = [ + sve for sve in output if output[sve] is True and sve.startswith("sve") + ] sve_list.sort(key=lambda x: int(x[3:])) return sve_list @@ -25,36 +26,38 @@ def launch_sve_guest(sve_opts, check_length): :param sve_opts: List of SVE options to be used. :param check_length: SVE length to be checked in dmesg. """ - test.log.info('Launch a guest with %s', sve_opts) - params['cpu_model_flags'] = 'sve=on,' + ','.join(sve_opts) + test.log.info("Launch a guest with %s", sve_opts) + params["cpu_model_flags"] = "sve=on," + ",".join(sve_opts) vm.create(params=params) vm.verify_alive() session = vm.wait_for_login() - sve_output = session.cmd_output('dmesg | grep SVE').strip() - if re.findall('vector length {} bytes'.format(check_length * 8), - sve_output, re.M): - test.fail('SVE length is incorrect, output:\n{}'.format(sve_output)) + sve_output = session.cmd_output("dmesg | grep SVE").strip() + if re.findall(f"vector length {check_length * 8} bytes", sve_output, re.M): + test.fail(f"SVE length is incorrect, output:\n{sve_output}") session.close() vm.destroy() - cpu_utils.check_cpu_flags(params, 'sve', test) + cpu_utils.check_cpu_flags(params, "sve", test) vm = env.get_vm(params["main_vm"]) sve_lengths = get_sve_supported_lengths() vm.destroy() - error_context.context('Launch a guest with sve=on', test.log.info) + error_context.context("Launch a guest with sve=on", test.log.info) for length in sve_lengths: - opts = ('{}={}'.format( - sve, 'on' if sve_lengths.index(sve) <= sve_lengths.index(length) - else 'off' - ) for sve in sve_lengths) + opts = ( + "{}={}".format( + sve, + "on" if sve_lengths.index(sve) <= sve_lengths.index(length) else "off", + ) + for sve in sve_lengths + ) launch_sve_guest(opts, length) - error_context.context('Launch a guest with sve=off', test.log.info) - opts = ('{}={}'.format(sve, 'off') for sve in sve_lengths) - params['cpu_model_flags'] = 'sve=off,' + ','.join(opts) + error_context.context("Launch a guest with sve=off", test.log.info) + opts = ("{}={}".format(sve, "off") for sve in sve_lengths) + params["cpu_model_flags"] = "sve=off," + ",".join(opts) vm.create(params=params) vm.verify_alive() session = vm.wait_for_login() - if session.cmd_output('dmesg | grep SVE'): - test.fail('The guest gets the SVE feature without using SVE to start') + if session.cmd_output("dmesg | grep SVE"): + test.fail("The guest gets the SVE feature without using SVE to start") diff --git a/qemu/tests/sve_guest_suite.py b/qemu/tests/sve_guest_suite.py index 2bb7ad2b8e..20f5d30bef 100644 --- a/qemu/tests/sve_guest_suite.py +++ b/qemu/tests/sve_guest_suite.py @@ -1,7 +1,6 @@ import re -from virttest import error_context -from virttest import utils_package +from virttest import error_context, utils_package from provider import cpu_utils @@ -13,75 +12,80 @@ def get_sve_supports_lengths(): Get supported SVE lengths of host. """ output = vm.monitor.query_cpu_model_expansion(vm.cpuinfo.model) - output.pop('sve') - sve_list = [sve for sve in output if output[sve] is True and - sve.startswith('sve')] + output.pop("sve") + sve_list = [ + sve for sve in output if output[sve] is True and sve.startswith("sve") + ] sve_list.sort(key=lambda x: int(x[3:])) return sve_list def compile_test_suite(): session.cmd(get_suite_cmd, timeout=180) - if suite_type == 'sve_stress': - session.cmd(params['uncompress_cmd'].format(tmp_dir, linux_name)) - error_context.context('Compile the test suite......', test.log.info) + if suite_type == "sve_stress": + session.cmd(params["uncompress_cmd"].format(tmp_dir, linux_name)) + error_context.context("Compile the test suite......", test.log.info) s, o = session.cmd_status_output(compile_cmd, timeout=180) if s: - test.log.error('Compile output: %s', o) - test.error('Failed to compile the test suite.') + test.log.error("Compile output: %s", o) + test.error("Failed to compile the test suite.") def sve_stress(): - s, o = session.cmd_status_output(f'{suite_dir}/sve-probe-vls') - test_lengths = re.findall(r'# (\d+)$', o, re.M) + s, o = session.cmd_status_output(f"{suite_dir}/sve-probe-vls") + test_lengths = re.findall(r"# (\d+)$", o, re.M) if s or not test_lengths: test.error('Could not get supported SVE lengths by "sve-probe-vls"') - test.log.info('The lengths of SVE used for testing are: %s', test_lengths) + test.log.info("The lengths of SVE used for testing are: %s", test_lengths) for sve_length in test_lengths: - out = session.cmd_output(execute_suite_cmd.format(sve_length), - timeout=(suite_timeout + 10)) - results_lines = [result for result in out.splitlines() if - result.startswith('Terminated by')] - if len(re.findall(r'no error', out, re.M)) != len(results_lines): - test.log.debug('Test results: %s', results_lines) - test.fail('SVE stress test failed') + out = session.cmd_output( + execute_suite_cmd.format(sve_length), timeout=(suite_timeout + 10) + ) + results_lines = [ + result + for result in out.splitlines() + if result.startswith("Terminated by") + ] + if len(re.findall(r"no error", out, re.M)) != len(results_lines): + test.log.debug("Test results: %s", results_lines) + test.fail("SVE stress test failed") def optimized_routines(): out = session.cmd_output(execute_suite_cmd, timeout=suite_timeout) - results = re.findall(r'^(\w+) \w+sve$', out, re.M) + results = re.findall(r"^(\w+) \w+sve$", out, re.M) if not all([result == "PASS" for result in results]): - test.log.debug('Test results: %s', results) - test.fail('optimized routines suite test failed') + test.log.debug("Test results: %s", results) + test.fail("optimized routines suite test failed") - cpu_utils.check_cpu_flags(params, 'sve', test) + cpu_utils.check_cpu_flags(params, "sve", test) vm = env.get_vm(params["main_vm"]) sve_lengths = get_sve_supports_lengths() vm.destroy() - compile_cmd = params['compile_cmd'] - dst_dir = params['dst_dir'] - execute_suite_cmd = params['execute_suite_cmd'] - get_suite_cmd = params['get_suite_cmd'] - suite_dir = params['suite_dir'] - suite_timeout = params.get_numeric('suite_timeout') - suite_type = params['suite_type'] - required_pkgs = params.objects('required_pkgs') - tmp_dir = params['tmp_dir'] + compile_cmd = params["compile_cmd"] + dst_dir = params["dst_dir"] + execute_suite_cmd = params["execute_suite_cmd"] + get_suite_cmd = params["get_suite_cmd"] + suite_dir = params["suite_dir"] + suite_timeout = params.get_numeric("suite_timeout") + suite_type = params["suite_type"] + required_pkgs = params.objects("required_pkgs") + tmp_dir = params["tmp_dir"] - error_context.context('Launch a guest with sve=on', test.log.info) - sve_opts = ('{}={}'.format(sve, 'on') for sve in sve_lengths) - params['cpu_model_flags'] = 'sve=on,' + ','.join(sve_opts) + error_context.context("Launch a guest with sve=on", test.log.info) + sve_opts = ("{}={}".format(sve, "on") for sve in sve_lengths) + params["cpu_model_flags"] = "sve=on," + ",".join(sve_opts) vm.create(params=params) vm.verify_alive() session = vm.wait_for_login() - cpu_utils.check_cpu_flags(params, 'sve', test, session) + cpu_utils.check_cpu_flags(params, "sve", test, session) - kernel_version = session.cmd_output('uname -r').rsplit('.', 1)[0] + kernel_version = session.cmd_output("uname -r").rsplit(".", 1)[0] srpm = f"kernel-{kernel_version}.src.rpm" linux_name = f"linux-{kernel_version}" get_suite_cmd = get_suite_cmd.format(tmp_dir, srpm) - session.cmd(f'mkdir {dst_dir}') + session.cmd(f"mkdir {dst_dir}") if not utils_package.package_install(required_pkgs, session): test.error("Failed to install required packages in guest") compile_test_suite() - error_context.context('Execute the test suite......', test.log.info) + error_context.context("Execute the test suite......", test.log.info) locals()[suite_type]() diff --git a/qemu/tests/sve_host_suite.py b/qemu/tests/sve_host_suite.py index e55000e7f5..1b2af3f3a8 100644 --- a/qemu/tests/sve_host_suite.py +++ b/qemu/tests/sve_host_suite.py @@ -3,9 +3,7 @@ import shutil from avocado.utils import process - -from virttest import error_context -from virttest import utils_package +from virttest import error_context, utils_package from provider import cpu_utils @@ -14,36 +12,35 @@ def run(test, params, env): def compile_kernel_selftests(): error_context.context( - 'Download the kernel src.rpm package and uncompress it', - test.log.info) + "Download the kernel src.rpm package and uncompress it", test.log.info + ) process.run(get_suite_cmd, shell=True) if os.path.exists(dst_dir): shutil.rmtree(dst_dir) os.mkdir(dst_dir) process.run(uncompress_cmd.format(tmp_dir, linux_name), shell=True) - error_context.context('Compile kernel selftests', test.log.info) + error_context.context("Compile kernel selftests", test.log.info) s, o = process.getstatusoutput(compile_cmd, timeout=180) if s: - test.log.error('Compile output: %s', o) - test.error('Failed to compile the test suite.') - - compile_cmd = params['compile_cmd'] - dst_dir = params['dst_dir'] - execute_suite_cmd = params['execute_suite_cmd'] - get_suite_cmd = params['get_suite_cmd'] - required_pkgs = params.objects('required_pkgs') - suite_timeout = params.get_numeric('suite_timeout') - uncompress_cmd = params['uncompress_cmd'] + test.log.error("Compile output: %s", o) + test.error("Failed to compile the test suite.") + + compile_cmd = params["compile_cmd"] + dst_dir = params["dst_dir"] + execute_suite_cmd = params["execute_suite_cmd"] + get_suite_cmd = params["get_suite_cmd"] + required_pkgs = params.objects("required_pkgs") + suite_timeout = params.get_numeric("suite_timeout") + uncompress_cmd = params["uncompress_cmd"] tmp_dir = test.tmpdir if not utils_package.package_install(required_pkgs): test.error("Failed to install required packages in host") - error_context.base_context('Check if the CPU of host supports SVE', - test.log.info) - cpu_utils.check_cpu_flags(params, 'sve', test) + error_context.base_context("Check if the CPU of host supports SVE", test.log.info) + cpu_utils.check_cpu_flags(params, "sve", test) - kernel_version = os.uname()[2].rsplit('.', 1)[0] + kernel_version = os.uname()[2].rsplit(".", 1)[0] srpm = f"kernel-{kernel_version}.src.rpm" linux_name = f"linux-{kernel_version}" get_suite_cmd = get_suite_cmd.format(tmp_dir, srpm) @@ -53,10 +50,11 @@ def compile_kernel_selftests(): s, o = process.getstatusoutput(execute_suite_cmd, timeout=suite_timeout) if s: test.fail('The exit code of "get-reg-list" test suite is not 0.') - elif not all([result == "PASS" for result in - re.findall(r'^sve\S*: (\w+)$', o, re.M)]): - test.log.error('Test result: %s', o) + elif not all( + [result == "PASS" for result in re.findall(r"^sve\S*: (\w+)$", o, re.M)] + ): + test.log.error("Test result: %s", o) test.fail('The sve part of the "get-reg-list" test failed') - test.log.info('get-reg-list test passed') + test.log.info("get-reg-list test passed") finally: shutil.rmtree(dst_dir, ignore_errors=True) diff --git a/qemu/tests/sve_invalid.py b/qemu/tests/sve_invalid.py index 6938a27697..c213875279 100644 --- a/qemu/tests/sve_invalid.py +++ b/qemu/tests/sve_invalid.py @@ -1,7 +1,6 @@ import re from avocado.utils import cpu - from virttest import error_context from virttest.virt_vm import VMCreateError @@ -13,54 +12,57 @@ def get_sve_lengths(supported=True): Get unsupported SVE lengths of host. """ output = vm.monitor.query_cpu_model_expansion(vm.cpuinfo.model) - output.pop('sve') - sve_list = [sve for sve in output if output[sve] is supported and - sve.startswith('sve')] + output.pop("sve") + sve_list = [ + sve for sve in output if output[sve] is supported and sve.startswith("sve") + ] sve_list.sort(key=lambda x: int(x[3:])) return sve_list - error_msg = params['error_msg'] - invalid_length = params.get('invalid_length') - invalid_type = params.get('sve_invalid') + error_msg = params["error_msg"] + invalid_length = params.get("invalid_length") + invalid_type = params.get("sve_invalid") vm = env.get_vm(params["main_vm"]) - sve_flag = cpu.cpu_has_flags('sve') - if invalid_type != 'non_sve_host': + sve_flag = cpu.cpu_has_flags("sve") + if invalid_type != "non_sve_host": if not sve_flag: test.cancel("The host doesn't support SVE feature") if not invalid_length: - active_length = params.get_boolean('active_length') + active_length = params.get_boolean("active_length") sve_lengths = get_sve_lengths(active_length) if active_length: if len(sve_lengths) == 1: test.cancel("The host only supports one sve length") disabled_length = sve_lengths[-2] - flags = ('{}={}'.format( - sve, 'on' if sve != disabled_length else 'off') - for sve in sve_lengths) - flags = ','.join(flags) + flags = ( + "{}={}".format(sve, "on" if sve != disabled_length else "off") + for sve in sve_lengths + ) + flags = ",".join(flags) error_msg = error_msg.format(sve_lengths[-1][3:]) else: invalid_length = sve_lengths[-1] error_msg = error_msg.format(invalid_length[3:]) - flags = '{}=on'.format(invalid_length) + flags = f"{invalid_length}=on" vm.destroy() - params['cpu_model_flags'] = 'sve=on,' + flags + params["cpu_model_flags"] = "sve=on," + flags else: if sve_flag: - test.cancel('The host supports SVE feature, cancel the test...') - params['cpu_model_flags'] = 'sve=on' + test.cancel("The host supports SVE feature, cancel the test...") + params["cpu_model_flags"] = "sve=on" - params['start_vm'] = 'yes' + params["start_vm"] = "yes" try: - error_context.context('Launch a guest with invalid SVE scenario', - test.log.info) + error_context.context("Launch a guest with invalid SVE scenario", test.log.info) vm.create(params=params) except VMCreateError as err: if not re.search(error_msg, err.output, re.M): - test.error('The guest failed to be launched but did not get the ' - 'expected error message.') - test.log.info('The qemu process terminated as expected.') + test.error( + "The guest failed to be launched but did not get the " + "expected error message." + ) + test.log.info("The qemu process terminated as expected.") else: - test.fail('The guest should not be launched.') + test.fail("The guest should not be launched.") diff --git a/qemu/tests/sysprep.py b/qemu/tests/sysprep.py index a4c49da14e..a46db8331a 100644 --- a/qemu/tests/sysprep.py +++ b/qemu/tests/sysprep.py @@ -1,9 +1,7 @@ import os import re -from virttest import utils_misc -from virttest import env_process -from virttest import error_context +from virttest import env_process, error_context, utils_misc @error_context.context_aware @@ -26,8 +24,7 @@ def run(test, params, env): timeout = float(params.get("login_timeout", 240)) session = vm.wait_for_login(timeout=timeout) unattended_file = params.get("unattended_file") - unattended_file_link = os.path.join(test.virtdir, - unattended_file) + unattended_file_link = os.path.join(test.virtdir, unattended_file) tmp_path = params.get("tmp_path", "c:\\") vm.copy_files_to(unattended_file_link, tmp_path, verbose=True) sysprep_cmd = params.get("sysprep_cmd") @@ -44,23 +41,22 @@ def run(test, params, env): sid = re.findall(re_sid, output)[0] except IndexError: msg = "Fail to get guest's System ID. " - msg += "Output from check System ID command: %s" % output + msg += f"Output from check System ID command: {output}" test.fail(msg) test.log.info("VM guest System ID is: %s", sid) - sids[sid] = ["pre_%s" % vm.name] + sids[sid] = [f"pre_{vm.name}"] file_dir = tmp_path + unattended_file sysprep_cmd = sysprep_cmd % file_dir - error_context.context("Run sysprep command in guest. %s" % sysprep_cmd, - test.log.info) + error_context.context(f"Run sysprep command in guest. {sysprep_cmd}", test.log.info) session.sendline(sysprep_cmd) error_context.context("Waiting guest power down.....", test.log.info) status = utils_misc.wait_for(vm.is_dead, timeout * 3, 3) if not status: test.fail("VM did not shutdown after sysprep command") - params['image_snapshot'] = "yes" - params['vms'] += extend_vm - restart_timeout = timeout * len(params['vms'].split()) * 2 - for vm_i in params['vms'].split(): + params["image_snapshot"] = "yes" + params["vms"] += extend_vm + restart_timeout = timeout * len(params["vms"].split()) * 2 + for vm_i in params["vms"].split(): vm_params = params.object_params(vm_i) env_process.preprocess_vm(test, vm_params, env, vm_i) vm = env.get_vm(vm_i) @@ -75,14 +71,12 @@ def run(test, params, env): try: sid = re.findall(re_sid, output)[0] except IndexError: - msg = "Fail to get System ID of %s" % vm_i.name - msg += "Output from check System ID command: %s" % output + msg = f"Fail to get System ID of {vm_i.name}" + msg += f"Output from check System ID command: {output}" test.error(msg) test.log.info("VM:%s System ID is: %s", vm_i.name, sid) if sid in sids.keys(): - test.log.error("VM: %s have duplicate System ID: %s", - vm_i.name, - sid) + test.log.error("VM: %s have duplicate System ID: %s", vm_i.name, sid) sid_same.append(sid) sids[sid].append(vm_i.name) else: @@ -91,6 +85,8 @@ def run(test, params, env): if sid_same: msg = "" for sid in sid_same: - msg += "VM(s): %s have duplicate System ID: %s\n" % \ - (" ".join(sids[sid]), sid) + msg += "VM(s): {} have duplicate System ID: {}\n".format( + " ".join(sids[sid]), + sid, + ) test.fail(msg) diff --git a/qemu/tests/system_reset_bootable.py b/qemu/tests/system_reset_bootable.py index bab535b8e3..57f9653b6e 100644 --- a/qemu/tests/system_reset_bootable.py +++ b/qemu/tests/system_reset_bootable.py @@ -1,8 +1,7 @@ -import time import random +import time -from virttest import env_process -from virttest import error_context +from virttest import env_process, error_context @error_context.context_aware @@ -44,8 +43,7 @@ def run(test, params, env): time.sleep(wait_time) for i in range(1, reset_times + 1): - error_context.context("Reset guest system for %s times" % i, - test.log.info) + error_context.context(f"Reset guest system for {i} times", test.log.info) vm.monitor.cmd("system_reset") @@ -53,8 +51,7 @@ def run(test, params, env): if params.get("fixed_interval", "yes") != "yes": interval_tmp = random.randint(0, interval * 1000) / 1000.0 - test.log.debug("Reset the system by monitor cmd" - " after %ssecs", interval_tmp) + test.log.debug("Reset the system by monitor cmd" " after %ssecs", interval_tmp) time.sleep(interval_tmp) error_context.context("Try to login guest after reset", test.log.info) diff --git a/qemu/tests/systemtap_tracing.py b/qemu/tests/systemtap_tracing.py index 456970d25a..08dd5e2443 100644 --- a/qemu/tests/systemtap_tracing.py +++ b/qemu/tests/systemtap_tracing.py @@ -1,13 +1,9 @@ -import re import os +import re import time from avocado.utils import process - -from virttest import error_context -from virttest import utils_misc -from virttest import env_process -from virttest import data_dir +from virttest import data_dir, env_process, error_context, utils_misc @error_context.context_aware @@ -30,7 +26,7 @@ def create_patterns_reg(trace_key): """ pattern_reg = r"" for tracing_key in trace_key.split(): - pattern_reg += r"%s=\d+," % tracing_key + pattern_reg += rf"{tracing_key}=\d+," return pattern_reg.rstrip(",") error_context.base_context("Qemu_Tracing Test") @@ -45,9 +41,9 @@ def create_patterns_reg(trace_key): if params.get("extra_params"): params["extra_params"] = params.get("extra_params") - if params.get("boot_with_cdrom") == 'yes': - iso_path = "%s/test.iso" % data_dir.get_tmp_dir() - create_cmd = "dd if=/dev/zero of=%s bs=1M count=10" % iso_path + if params.get("boot_with_cdrom") == "yes": + iso_path = f"{data_dir.get_tmp_dir()}/test.iso" + create_cmd = f"dd if=/dev/zero of={iso_path} bs=1M count=10" if process.system(create_cmd, ignore_status=True) != 0: test.cancel("Create test iso failed") params["cdrom_cd1"] = iso_path @@ -68,7 +64,7 @@ def create_patterns_reg(trace_key): cmd_type = "bash" exec_cmds = cmd for cmd_exec in exec_cmds.split(";"): - msg = "Execute %s cmd '%s'" % (cmd_type, cmd_exec) + msg = f"Execute {cmd_type} cmd '{cmd_exec}'" error_context.context(msg, test.log.info) if cmd_type == "monitor": vm.monitor.send_args_cmd(cmd_exec) @@ -82,7 +78,7 @@ def create_patterns_reg(trace_key): start_time = time.time() while (time.time() - start_time) < capdata_timeout: if os.path.isfile(stap_log_file): - fd = open(stap_log_file, 'r') + fd = open(stap_log_file, "r") data = fd.read() if (not data) or (not re.findall(checking_pattern_re, data)): time.sleep(time_inter) @@ -90,8 +86,10 @@ def create_patterns_reg(trace_key): continue elif data and re.findall(checking_pattern_re, data): test.log.info("Capture the data successfully") - test.log.info("The capture data is like: %s", - re.findall(checking_pattern_re, data)[-1]) + test.log.info( + "The capture data is like: %s", + re.findall(checking_pattern_re, data)[-1], + ) fd.close() break else: diff --git a/qemu/tests/tcpreplay.py b/qemu/tests/tcpreplay.py index 25e9a4aae0..abd592a04f 100644 --- a/qemu/tests/tcpreplay.py +++ b/qemu/tests/tcpreplay.py @@ -1,11 +1,9 @@ -import re import os +import re import shutil -from virttest import error_context -from virttest import data_dir -from avocado.utils import process -from avocado.utils import archive +from avocado.utils import archive, process +from virttest import data_dir, error_context @error_context.context_aware @@ -44,8 +42,7 @@ def copy_file_from_deps(file_name, sub_dir, dst_dir="/tmp"): param sub_dir: sub directory that contain the file param dst_dir: the target directory the file copied to """ - src_full_path = os.path.join( - data_dir.get_deps_dir(sub_dir), file_name) + src_full_path = os.path.join(data_dir.get_deps_dir(sub_dir), file_name) dst_full_path = os.path.join(dst_dir, file_name) shutil.copyfile(src_full_path, dst_full_path) return dst_full_path @@ -61,10 +58,10 @@ def copy_file_from_deps(file_name, sub_dir, dst_dir="/tmp"): uncompress_dir = params.get("uncompress_dir") timeout = params.get_numeric("timeout", 60) - error_context.context("Copy %s to %s" % (tcpreplay_file_name, tmp_dir), - test.log.info) - tcpreplay_full_path = copy_file_from_deps(tcpreplay_file_name, - tcpreplay_dir, tmp_dir) + error_context.context(f"Copy {tcpreplay_file_name} to {tmp_dir}", test.log.info) + tcpreplay_full_path = copy_file_from_deps( + tcpreplay_file_name, tcpreplay_dir, tmp_dir + ) error_context.context("Compile tcpreplay", test.log.info) uncompress_full_path = os.path.join(tmp_dir, uncompress_dir) @@ -72,27 +69,24 @@ def copy_file_from_deps(file_name, sub_dir, dst_dir="/tmp"): test.log.info("Remove old uncompress directory") shutil.rmtree(uncompress_full_path, ignore_errors=True) - test.log.info( - "Uncompress %s to %s", tcpreplay_full_path, uncompress_full_path) - uncompress_dir = archive.uncompress( - tcpreplay_full_path, tmp_dir) + test.log.info("Uncompress %s to %s", tcpreplay_full_path, uncompress_full_path) + uncompress_dir = archive.uncompress(tcpreplay_full_path, tmp_dir) if not uncompress_dir: - test.error("Can't uncompress %s" % tcpreplay_full_path) + test.error(f"Can't uncompress {tcpreplay_full_path}") test.log.info("Compile files at %s", uncompress_full_path) execute_host_cmd(tcpreplay_compile_cmd % uncompress_full_path, timeout) - error_context.context("Copy %s to %s" % (pcap_file_name, tmp_dir), - test.log.info) + error_context.context(f"Copy {pcap_file_name} to {tmp_dir}", test.log.info) copy_file_from_deps(pcap_file_name, tcpreplay_dir, tmp_dir) error_context.context("Run tcpreplay with pcap file", test.log.info) output = execute_host_cmd(run_tcpreplay_cmd) - result = re.search(r'Successful packets:\s+(\d+)', output) + result = re.search(r"Successful packets:\s+(\d+)", output) success_packet = 0 if result: success_packet = int(result.group(1)) if success_packet != 1: - test.fail("tcpreplay result error with output: %s" % output) + test.fail(f"tcpreplay result error with output: {output}") vm.verify_alive() diff --git a/qemu/tests/test_SMM_enabled.py b/qemu/tests/test_SMM_enabled.py index c8b1722f1f..b4229479d5 100644 --- a/qemu/tests/test_SMM_enabled.py +++ b/qemu/tests/test_SMM_enabled.py @@ -1,9 +1,7 @@ import os import time -from virttest import data_dir -from virttest import error_context -from virttest import env_process +from virttest import data_dir, env_process, error_context @error_context.context_aware @@ -23,7 +21,7 @@ def run(test, params, env): params["kernel"] = src_test_binary params["start_vm"] = "yes" - vm_name = params['main_vm'] + vm_name = params["main_vm"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) time.sleep(10) diff --git a/qemu/tests/test_vdpa_control_virtqueue.py b/qemu/tests/test_vdpa_control_virtqueue.py index b607c5843f..2ddd36c127 100644 --- a/qemu/tests/test_vdpa_control_virtqueue.py +++ b/qemu/tests/test_vdpa_control_virtqueue.py @@ -1,5 +1,4 @@ -from virttest import error_context -from virttest import utils_net +from virttest import error_context, utils_net from virttest.utils_test.qemu import MemoryHotplugTest @@ -33,17 +32,17 @@ def run(test, params, env): for i in range(change_times): output = session_serial.cmd_output_safe(change_cmd % (new_mac, interface)) if output: - test.fail("Mac address changed failed,print error info: %s" % output) + test.fail(f"Mac address changed failed,print error info: {output}") output = vm.process.get_output() if output: - test.error("Qemu output error info: %s" % output) + test.error(f"Qemu output error info: {output}") test.log.info("Finished change mac address 2^16-1 times") - test.log.info("Hotplug %s memory to update device" % size_mem) + test.log.info("Hotplug %s memory to update device", size_mem) hotplug_mem = MemoryHotplugTest(test, params, env) hotplug_mem.hotplug_memory(vm=vm, name=target_mem) test.log.info("Try to update the mac again") session_serial.cmd_output_safe(change_cmd % (old_mac, interface)) - output = session_serial.cmd_output_safe("ifconfig | grep -i %s" % old_mac) + output = session_serial.cmd_output_safe(f"ifconfig | grep -i {old_mac}") if old_mac in output: test.log.info("Mac address change successfully, net restart...") else: diff --git a/qemu/tests/thin_provision_check_mode.py b/qemu/tests/thin_provision_check_mode.py index 836c75be71..755381b421 100644 --- a/qemu/tests/thin_provision_check_mode.py +++ b/qemu/tests/thin_provision_check_mode.py @@ -1,12 +1,8 @@ import os -from avocado.utils import genio +from avocado.utils import genio, process from avocado.utils import path as utils_path -from avocado.utils import process - -from virttest import env_process -from virttest import error_context - +from virttest import env_process, error_context from virttest.utils_misc import get_linux_drive_path @@ -42,28 +38,29 @@ def get_provisioning_mode(device, host_id): depends on params for scsi_debug module. """ device_name = os.path.basename(device) - path = "/sys/block/%s/device/scsi_disk" % device_name - path += "/%s/provisioning_mode" % host_id + path = f"/sys/block/{device_name}/device/scsi_disk" + path += f"/{host_id}/provisioning_mode" return genio.read_one_line(path).strip() def get_guest_provisioning_mode(device): """ Get disk provisioning_mode in guest """ - cmd = "lsblk -S -n %s" % device + cmd = f"lsblk -S -n {device}" status, output = session.cmd_status_output(cmd) if status != 0: - test.fail("Can not find device %s in guest" % device) + test.fail(f"Can not find device {device} in guest") host_id = output.split()[1] - cmd = "cat /sys/bus/scsi/devices/{0}/scsi_disk/{0}/provisioning_mode".format( - host_id) + cmd = ( + f"cat /sys/bus/scsi/devices/{host_id}/scsi_disk/{host_id}/provisioning_mode" + ) status, output = session.cmd_status_output(cmd) if status == 0: return output.strip() - test.fail("Can not get provisioning mode %s in guest" % host_id) + test.fail(f"Can not get provisioning mode {host_id} in guest") utils_path.find_command("lsblk") host_scsi_id, disk_name = get_host_scsi_disk() @@ -76,10 +73,9 @@ def get_guest_provisioning_mode(device): target_mode = params["target_mode"] disk_serial = params["disk_serial"] params["start_vm"] = "yes" - params["image_name_%s" % data_tag] = disk_name + params[f"image_name_{data_tag}"] = disk_name - error_context.context("boot guest with disk '%s'" % disk_name, - test.log.info) + error_context.context(f"boot guest with disk '{disk_name}'", test.log.info) # boot guest with scsi_debug disk env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) @@ -93,6 +89,6 @@ def get_guest_provisioning_mode(device): test.fail("Can not get output file path in guest.") mode = get_guest_provisioning_mode(output_path) - error_context.context("Checking provision mode %s" % mode, test.log.info) + error_context.context(f"Checking provision mode {mode}", test.log.info) if mode != target_mode: test.fail("Got unexpected mode:%s", mode) diff --git a/qemu/tests/thin_provision_guest_fstrim.py b/qemu/tests/thin_provision_guest_fstrim.py index c426fd3a80..1073267033 100644 --- a/qemu/tests/thin_provision_guest_fstrim.py +++ b/qemu/tests/thin_provision_guest_fstrim.py @@ -1,9 +1,6 @@ from avocado.utils import path as utils_path from avocado.utils import process - -from virttest import env_process -from virttest import error_context -from virttest import guest_agent +from virttest import env_process, error_context, guest_agent from virttest.utils_misc import get_linux_drive_path @@ -28,7 +25,7 @@ def run(test, params, env): """ def get_scsi_debug_disk(guest_session=None): - """" + """ " Get scsi debug disk on host or on guest which created as scsi-block. """ cmd = "lsblk -S -n -p|grep scsi_debug" @@ -47,7 +44,7 @@ def get_guest_discard_disk(): """ Get discard disk on guest. """ - if params["drive_format_%s" % data_tag] == "scsi-block": + if params[f"drive_format_{data_tag}"] == "scsi-block": return get_scsi_debug_disk(session) disk_serial = params["disk_serial"] @@ -62,10 +59,13 @@ def create_guest_agent_session(): filename = vm.get_serial_console_filename(guest_agent_name) guest_agent_params = params.object_params(guest_agent_name) guest_agent_params["monitor_filename"] = filename - return guest_agent.QemuAgent(vm, guest_agent_name, - guest_agent_serial_type, - guest_agent_params, - get_supported_cmds=True) + return guest_agent.QemuAgent( + vm, + guest_agent_name, + guest_agent_serial_type, + guest_agent_params, + get_supported_cmds=True, + ) def get_blocks(): """ @@ -82,10 +82,9 @@ def get_blocks(): vm_name = params["main_vm"] data_tag = params["data_tag"] params["start_vm"] = "yes" - params["image_name_%s" % data_tag] = disk_name + params[f"image_name_{data_tag}"] = disk_name - error_context.context("Boot guest with disk '%s'" % disk_name, - test.log.info) + error_context.context(f"Boot guest with disk '{disk_name}'", test.log.info) # boot guest with scsi_debug disk env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) @@ -121,4 +120,4 @@ def get_blocks(): error_context.context("Compare blocks.", test.log.info) if new_count >= old_count: - test.fail("Got unexpected result:%s %s" % (old_count, new_count)) + test.fail(f"Got unexpected result:{old_count} {new_count}") diff --git a/qemu/tests/thin_write_in_qemu_img_commit.py b/qemu/tests/thin_write_in_qemu_img_commit.py index f25349c48c..dfefc0fdea 100644 --- a/qemu/tests/thin_write_in_qemu_img_commit.py +++ b/qemu/tests/thin_write_in_qemu_img_commit.py @@ -1,17 +1,12 @@ import json import re -from virttest import data_dir -from virttest import utils_numeric +from virttest import data_dir, env_process, utils_misc, utils_numeric from virttest.qemu_io import QemuIOSystem from virttest.qemu_storage import QemuImg -from virttest import env_process -from virttest import utils_misc - from virttest.utils_version import VersionInterval -from qemu.tests.qemu_disk_img import QemuImgTest -from qemu.tests.qemu_disk_img import generate_base_snapshot_pair +from qemu.tests.qemu_disk_img import QemuImgTest, generate_base_snapshot_pair def run(test, params, env): @@ -28,6 +23,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _get_image_object(tag): """Get QemuImg object by tag.""" img_param = params.object_params(tag) @@ -46,33 +42,39 @@ def _qemu_io(img, cmd): q.cmd_output(cmd, 120) def _verify_map_output(output): - """"Verify qemu map output.""" + """ "Verify qemu map output.""" qemu_path = utils_misc.get_qemu_binary(params) qemu_version = env_process._get_qemu_version(qemu_path) - match = re.search(r'[0-9]+\.[0-9]+\.[0-9]+(\-[0-9]+)?', qemu_version) + match = re.search(r"[0-9]+\.[0-9]+\.[0-9]+(\-[0-9]+)?", qemu_version) host_qemu = match.group(0) expected = { - "length": int(utils_numeric.normalize_data_size( - params["write_size"], "B")), - "start": 0, "depth": 0, "zero": True, "data": False} - if host_qemu in VersionInterval('[6.1.0,)'): + "length": int(utils_numeric.normalize_data_size(params["write_size"], "B")), + "start": 0, + "depth": 0, + "zero": True, + "data": False, + } + if host_qemu in VersionInterval("[6.1.0,)"): expected["present"] = True - if host_qemu in VersionInterval('[8.2.0,)'): + if host_qemu in VersionInterval("[8.2.0,)"): expected["compressed"] = False if expected not in json.loads(output.stdout_text): - test.fail("Commit failed, data from 0 to %s are not zero" % - params["write_size"]) + test.fail( + "Commit failed, data from 0 to {} are not zero".format( + params["write_size"] + ) + ) gen = generate_base_snapshot_pair(params["image_chain"]) img_root_dir = data_dir.get_data_dir() base, snapshot = next(gen) img_base = _get_image_object(base) - _qemu_io(img_base, 'write -P 1 0 %s' % params["image_size_base"]) + _qemu_io(img_base, "write -P 1 0 {}".format(params["image_size_base"])) _create_external_snapshot(snapshot) img_sn = _get_image_object(snapshot) - _qemu_io(img_sn, 'write -z 0 %s' % params["write_size"]) + _qemu_io(img_sn, "write -z 0 {}".format(params["write_size"])) img_sn.commit() _verify_map_output(img_base.map(output="json")) diff --git a/qemu/tests/throttle_block_set_io_throttle.py b/qemu/tests/throttle_block_set_io_throttle.py index daec119c58..0fc6dd4f53 100644 --- a/qemu/tests/throttle_block_set_io_throttle.py +++ b/qemu/tests/throttle_block_set_io_throttle.py @@ -1,11 +1,10 @@ """block_set_io_throttle testing on iothread enabled disk""" -import time + import json +import time -from virttest import utils_misc -from virttest import utils_disk +from virttest import error_context, utils_disk, utils_misc from virttest.utils_misc import get_linux_drive_path -from virttest import error_context # This decorator makes the test function aware of context strings @@ -38,8 +37,8 @@ def _execute_set_io_throttle(monitor): logger.info("Start %s block_set_io_throttle %s", repeat, value) for img in images: dev = img - if params["drive_format_%s" % img] == "virtio": - dev = "/machine/peripheral/%s/virtio-backend" % img + if params[f"drive_format_{img}"] == "virtio": + dev = f"/machine/peripheral/{img}/virtio-backend" cmd = cmd_qmp % (dev, value) logger.info(cmd) monitor.cmd_qmp("block_set_io_throttle", json.loads(cmd)) @@ -58,17 +57,16 @@ def _execute_set_io_throttle(monitor): if qmp_monitor: qmp_monitor = qmp_monitor[0] else: - test.error('Could not find a QMP monitor, aborting test') + test.error("Could not find a QMP monitor, aborting test") logger.info("Execute io in guest...") - if os_type == 'windows': + if os_type == "windows": img_size = params.get("image_size_stg1") guest_cmd = utils_misc.set_winutils_letter(session, guest_cmd) disk = _get_window_disk_index_by_serail(disk_serial) utils_disk.update_windows_disk_attributes(session, disk) test.log.info("Formatting disk:%s", disk) - driver = utils_disk.configure_empty_disk(session, disk, img_size, - os_type)[0] + driver = utils_disk.configure_empty_disk(session, disk, img_size, os_type)[0] output_path = driver + ":\\test.dat" else: output_path = get_linux_drive_path(session, disk_serial) diff --git a/qemu/tests/throttle_cdrom_test.py b/qemu/tests/throttle_cdrom_test.py index 5e618214bf..24e6abc728 100644 --- a/qemu/tests/throttle_cdrom_test.py +++ b/qemu/tests/throttle_cdrom_test.py @@ -1,21 +1,21 @@ """IO-Throttling cdrom relevant testing""" - from virttest import error_context from virttest.qemu_capabilities import Flags -from provider.cdrom import QMPEventCheckCDEject, QMPEventCheckCDChange + +from provider.cdrom import QMPEventCheckCDChange, QMPEventCheckCDEject # This decorator makes the test function aware of context strings @error_context.context_aware def run(test, params, env): """ - Test cdrom operation with throttle feature. - 1) Boot up guest with cdrom device in throttle groups. - 2) Query cdrom device. - 3) Execute change media operation - 4) Query cdrom device - 5) Execute eject media operation + Test cdrom operation with throttle feature. + 1) Boot up guest with cdrom device in throttle groups. + 2) Query cdrom device. + 3) Execute change media operation + 4) Query cdrom device + 5) Execute eject media operation """ error_context.context("Get the main VM", test.log.info) @@ -35,7 +35,7 @@ def run(test, params, env): eject_check = QMPEventCheckCDEject(vm, device_name) change_check = QMPEventCheckCDChange(vm, device_name) - monitor = vm.get_monitors_by_type('qmp')[0] + monitor = vm.get_monitors_by_type("qmp")[0] if vm.check_capability(Flags.BLOCKDEV): qdev = vm.devices.get_qdev_by_drive(device_name) monitor.blockdev_open_tray(qdev, force=True) diff --git a/qemu/tests/throttle_multi_guests_parameter_test.py b/qemu/tests/throttle_multi_guests_parameter_test.py index cdab45327b..9de55405ba 100644 --- a/qemu/tests/throttle_multi_guests_parameter_test.py +++ b/qemu/tests/throttle_multi_guests_parameter_test.py @@ -1,42 +1,41 @@ import os import shutil -from virttest import error_context -from virttest import qemu_storage -from virttest import data_dir -from virttest import env_process -from virttest import utils_misc +from virttest import data_dir, env_process, error_context, qemu_storage, utils_misc from provider.storage_benchmark import generate_instance -from provider.throttle_utils import ThrottleGroupManager, ThrottleTester, \ - ThrottleGroupsTester +from provider.throttle_utils import ( + ThrottleGroupManager, + ThrottleGroupsTester, + ThrottleTester, +) # This decorator makes the test function aware of context strings @error_context.context_aware def run(test, params, env): """ - Test throttle relevant properties feature. + Test throttle relevant properties feature. - 1) Boot up guest with throttle groups. - There are two throttle groups and each have two disk - 2) Build fio operation options and expected result - according to throttle properties. - 3) Execute one disk or all disks testing on groups parallel. + 1) Boot up guest with throttle groups. + There are two throttle groups and each have two disk + 2) Build fio operation options and expected result + according to throttle properties. + 3) Execute one disk or all disks testing on groups parallel. """ def copy_base_vm_image(): """Copy the base vm image for VMs.""" src_img = qemu_storage.QemuImg( - params, data_dir.get_data_dir(), params['images']) + params, data_dir.get_data_dir(), params["images"] + ) src_filename = src_img.image_filename src_format = src_img.image_format dst_dir = os.path.dirname(src_filename) for vm_name in vms_list: - dst_filename = os.path.join( - dst_dir, '%s.%s' % (vm_name, src_format)) + dst_filename = os.path.join(dst_dir, f"{vm_name}.{src_format}") if not os.path.exists(dst_filename): - test.log.info('Copying %s to %s.', src_filename, dst_filename) + test.log.info("Copying %s to %s.", src_filename, dst_filename) shutil.copy(src_filename, dst_filename) def wait_for_login_all_vms(): @@ -46,7 +45,7 @@ def wait_for_login_all_vms(): @error_context.context_aware def fio_on_vm(vm_t, session_t): error_context.context("Deploy fio", test.log.info) - fio = generate_instance(params, vm_t, 'fio') + fio = generate_instance(params, vm_t, "fio") test.log.info("fio: %s", fio) tgm = ThrottleGroupManager(vm_t) test.log.info("tgm: %s", tgm) @@ -54,16 +53,16 @@ def fio_on_vm(vm_t, session_t): testers = [] for group in groups: tgm.get_throttle_group_props(group) - images = params["throttle_group_member_%s" % group].split() - tester = ThrottleTester(test, params, vm_t, session_t, - group, images) - error_context.context("Build test stuff for %s:%s" - % (group, images), test.log.info) + images = params[f"throttle_group_member_{group}"].split() + tester = ThrottleTester(test, params, vm_t, session_t, group, images) + error_context.context( + f"Build test stuff for {group}:{images}", test.log.info + ) tester.build_default_option() tester.build_images_fio_option() tester.set_fio(fio) testers.append(tester) - error_context.context("Start groups testing:%s" % groups, test.log.info) + error_context.context(f"Start groups testing:{groups}", test.log.info) groups_tester = ThrottleGroupsTester(testers) groups_tester.start() @@ -76,15 +75,15 @@ def fio_on_vms(): utils_misc.parallel(fio_parallel_params) test.log.info("Done fio on multi-vms.") - vms_list = params['vms'].split() + vms_list = params["vms"].split() copy_base_vm_image() - vms_default = params['vms'].split()[0] - vms_post = params['vms'].split(vms_default)[1].strip() - params['vms'] = str(vms_post) - params['start_vm'] = 'yes' - env_process.process(test, params, env, - env_process.preprocess_image, - env_process.preprocess_vm) + vms_default = params["vms"].split()[0] + vms_post = params["vms"].split(vms_default)[1].strip() + params["vms"] = str(vms_post) + params["start_vm"] = "yes" + env_process.process( + test, params, env, env_process.preprocess_image, env_process.preprocess_vm + ) vms = env.get_all_vms() for vm_verify in vms: vm_verify.verify_alive() diff --git a/qemu/tests/throttle_operation_test.py b/qemu/tests/throttle_operation_test.py index 8a8ba5155f..6411cd1390 100644 --- a/qemu/tests/throttle_operation_test.py +++ b/qemu/tests/throttle_operation_test.py @@ -1,32 +1,37 @@ """IO-Throttling group and other operation relevant testing""" + import json from virttest import error_context from virttest.qemu_monitor import QMPCmdError -from provider.storage_benchmark import generate_instance -from provider.throttle_utils import ThrottleGroupManager, ThrottleTester, \ - ThrottleGroupsTester + from provider.block_devices_plug import BlockDevicesPlug from provider.blockdev_snapshot_base import BlockDevSnapshotTest +from provider.storage_benchmark import generate_instance +from provider.throttle_utils import ( + ThrottleGroupManager, + ThrottleGroupsTester, + ThrottleTester, +) # This decorator makes the test function aware of context strings @error_context.context_aware def run(test, params, env): """ - Test throttle relevant properties feature. - - 1) Boot up guest with throttle groups. - There are two throttle groups. One have two disks,other is empty. - 2) Build fio operation options and expected result - according to throttle properties. - 3) Execute single disk throttle testing on first group. - 4) Execute group relevant testing for example: - Change throttle group attribute or move disk to other group - 5) Or Execute other operation testing for example: - Reboot guest or stop-resume guest - or add snapshot on throttle node - 6) Execute throttle testing on all groups. + Test throttle relevant properties feature. + + 1) Boot up guest with throttle groups. + There are two throttle groups. One have two disks,other is empty. + 2) Build fio operation options and expected result + according to throttle properties. + 3) Execute single disk throttle testing on first group. + 4) Execute group relevant testing for example: + Change throttle group attribute or move disk to other group + 5) Or Execute other operation testing for example: + Reboot guest or stop-resume guest + or add snapshot on throttle node + 6) Execute throttle testing on all groups. """ def negative_test(): @@ -44,7 +49,10 @@ def negative_test(): continue test.log.error( "Cannot got expected wrong result on %s: %s in %s", - name, err_msg, qmp_desc) + name, + err_msg, + qmp_desc, + ) raise err else: test.fail("Can not got expected wrong result") @@ -99,7 +107,7 @@ def operation_snapshot(): session = vm.wait_for_login(timeout=360) error_context.context("Deploy fio", test.log.info) - fio = generate_instance(params, vm, 'fio') + fio = generate_instance(params, vm, "fio") tgm = ThrottleGroupManager(vm) groups = params["throttle_groups"].split() @@ -114,7 +122,7 @@ def operation_snapshot(): tester.start() # execute relevant operation - error_context.context("Execute operation %s" % operation, test.log.info) + error_context.context(f"Execute operation {operation}", test.log.info) locals_var = locals() locals_var[operation]() # test after operation @@ -123,19 +131,18 @@ def operation_snapshot(): session = vm.wait_for_login(timeout=360) for group in groups: tgm.get_throttle_group_props(group) - images = params.get("throttle_group_member_%s" % group, "").split() + images = params.get(f"throttle_group_member_{group}", "").split() if len(images) == 0: test.log.info("No images in group %s", group) continue tester = ThrottleTester(test, params, vm, session, group, images) - error_context.context("Build test stuff for %s:%s" % (group, images), - test.log.info) + error_context.context(f"Build test stuff for {group}:{images}", test.log.info) tester.build_default_option() tester.build_images_fio_option() tester.set_fio(fio) testers.append(tester) - error_context.context("Start groups testing:%s" % groups, test.log.info) + error_context.context(f"Start groups testing:{groups}", test.log.info) groups_tester = ThrottleGroupsTester(testers) repeat_test = params.get_numeric("repeat_test", 1) diff --git a/qemu/tests/throttle_parameter_test.py b/qemu/tests/throttle_parameter_test.py index ed72f4b685..fc866b5551 100644 --- a/qemu/tests/throttle_parameter_test.py +++ b/qemu/tests/throttle_parameter_test.py @@ -1,21 +1,24 @@ from virttest import error_context from provider.storage_benchmark import generate_instance -from provider.throttle_utils import ThrottleGroupManager, ThrottleTester, \ - ThrottleGroupsTester +from provider.throttle_utils import ( + ThrottleGroupManager, + ThrottleGroupsTester, + ThrottleTester, +) # This decorator makes the test function aware of context strings @error_context.context_aware def run(test, params, env): """ - Test throttle relevant properties feature. + Test throttle relevant properties feature. - 1) Boot up guest with throttle groups. - There are two throttle groups and each have two disk - 2) Build fio operation options and expected result - according to throttle properties. - 3) Execute one disk or all disks testing on groups parallel. + 1) Boot up guest with throttle groups. + There are two throttle groups and each have two disk + 2) Build fio operation options and expected result + according to throttle properties. + 3) Execute one disk or all disks testing on groups parallel. """ error_context.context("Get the main VM", test.log.info) @@ -25,23 +28,22 @@ def run(test, params, env): session = vm.wait_for_login(timeout=360) error_context.context("Deploy fio", test.log.info) - fio = generate_instance(params, vm, 'fio') + fio = generate_instance(params, vm, "fio") tgm = ThrottleGroupManager(vm) groups = params["throttle_groups"].split() testers = [] for group in groups: tgm.get_throttle_group_props(group) - images = params["throttle_group_member_%s" % group].split() + images = params[f"throttle_group_member_{group}"].split() tester = ThrottleTester(test, params, vm, session, group, images) - error_context.context("Build test stuff for %s:%s" % (group, images), - test.log.info) + error_context.context(f"Build test stuff for {group}:{images}", test.log.info) tester.build_default_option() tester.build_images_fio_option() tester.set_fio(fio) testers.append(tester) - error_context.context("Start groups testing:%s" % groups, test.log.info) + error_context.context(f"Start groups testing:{groups}", test.log.info) groups_tester = ThrottleGroupsTester(testers) groups_tester.start() diff --git a/qemu/tests/time_conv.py b/qemu/tests/time_conv.py index a4795d7704..eb63c70fc7 100755 --- a/qemu/tests/time_conv.py +++ b/qemu/tests/time_conv.py @@ -2,9 +2,7 @@ import time from avocado.utils import process - -from virttest import data_dir -from virttest import storage +from virttest import data_dir, storage def run(test, params, env): @@ -20,30 +18,36 @@ def run(test, params, env): """ image_stg = params["images"] root_dir = data_dir.get_data_dir() - image_stg_name = storage.get_image_filename(params.object_params(image_stg), - root_dir) + image_stg_name = storage.get_image_filename( + params.object_params(image_stg), root_dir + ) image_secret = params.get("image_secret") timeout = float(params.get("timeout", 1800)) qemu_img_bench_cmd = params["qemu_img_bench_cmd"] image_format = params["image_format"] if image_format == "qcow2" or image_format == "raw": - process.run(qemu_img_bench_cmd % (image_format, image_stg_name), - timeout=timeout, shell=True) + process.run( + qemu_img_bench_cmd % (image_format, image_stg_name), + timeout=timeout, + shell=True, + ) time_list = [] qemu_img_conv_cmd = params["qemu_img_conv_cmd"] conv_img = os.path.join(os.path.dirname(image_stg_name), "convert.img") for i in range(5): start_time = time.time() if image_format == "qcow2" or image_format == "raw": - process.run(qemu_img_conv_cmd % (image_format, image_format, - image_stg_name, conv_img)) + process.run( + qemu_img_conv_cmd + % (image_format, image_format, image_stg_name, conv_img) + ) elif image_format == "luks": process.run(qemu_img_conv_cmd % (image_secret, image_stg_name, conv_img)) time_conv = time.time() - start_time time_list.append(time_conv) - process.run("rm -f %s" % conv_img) + process.run(f"rm -f {conv_img}") test.log.info("The time list is: %s", time_list) max_time = params["max_time"] unexpected_time = [_ for _ in time_list if float(_) > float(max_time)] if unexpected_time: - test.fail("Unexpected time: %s" % unexpected_time) + test.fail(f"Unexpected time: {unexpected_time}") diff --git a/qemu/tests/time_manage.py b/qemu/tests/time_manage.py index 316a979f8c..ce65428447 100644 --- a/qemu/tests/time_manage.py +++ b/qemu/tests/time_manage.py @@ -1,9 +1,7 @@ import time import aexpect -from virttest import utils_test -from virttest import env_process -from virttest import error_context +from virttest import env_process, error_context, utils_test @error_context.context_aware @@ -55,10 +53,14 @@ def run(test, params, env): # Run some load on the host test.log.info("Starting load on host.") - host_load_sessions.append(aexpect.run_bg(host_load_command, - output_func=test.log.debug, - output_prefix="host load ", - timeout=0.5)) + host_load_sessions.append( + aexpect.run_bg( + host_load_command, + output_func=test.log.debug, + output_prefix="host load ", + timeout=0.5, + ) + ) # Boot the VMs try: while num <= int(params["max_vms"]): @@ -75,8 +77,7 @@ def run(test, params, env): test.log.info("Guest #%d booted up successfully", num) # Check whether all previous shell sessions are responsive - error_context.context("checking responsiveness of the booted" - " guest") + error_context.context("checking responsiveness of the booted" " guest") for se in sessions: se.cmd(params["alive_test_cmd"]) num += 1 @@ -86,20 +87,21 @@ def run(test, params, env): # Get the respective vm object vm = env.get_vm(vmnames[vmid]) # Run current iteration - test.log.info( - "Rebooting:vm%d iteration %d ", (vmid + 1), itr) + test.log.info("Rebooting:vm%d iteration %d ", (vmid + 1), itr) se = vm.reboot(se, timeout=timeout) # Remember the current changed session sessions[vmid] = se error_context.context("checking responsiveness of guest") se.cmd(params["alive_test_cmd"]) if itr == 0: - (ht0, gt0) = utils_test.get_time(se, time_command, - time_filter_re, time_format) + (ht0, gt0) = utils_test.get_time( + se, time_command, time_filter_re, time_format + ) prev_time.append((ht0, gt0)) else: - (ht1, gt1) = utils_test.get_time(se, time_command, - time_filter_re, time_format) + (ht1, gt1) = utils_test.get_time( + se, time_command, time_filter_re, time_format + ) curr_time.append((ht1, gt1)) if itr != 0: for i in range(int(params["max_vms"])): @@ -125,7 +127,11 @@ def run(test, params, env): # Closing all the sessions. se.close() test.log.info("killing load on host.") - host_load_sessions.append(aexpect.run_bg(host_load_kill_command, - output_func=test.log.debug, - output_prefix="host load kill", - timeout=0.5)) + host_load_sessions.append( + aexpect.run_bg( + host_load_kill_command, + output_func=test.log.debug, + output_prefix="host load kill", + timeout=0.5, + ) + ) diff --git a/qemu/tests/timedrift.py b/qemu/tests/timedrift.py index e99d6aada2..e3014ba482 100644 --- a/qemu/tests/timedrift.py +++ b/qemu/tests/timedrift.py @@ -1,13 +1,8 @@ -import sys import time import aexpect - -from avocado.utils import process -from avocado.utils import cpu - -from virttest import utils_test -from virttest import utils_time +from avocado.utils import cpu, process +from virttest import utils_test, utils_time def run(test, params, env): @@ -29,6 +24,7 @@ def run(test, params, env): :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ + # Helper functions def set_cpu_affinity(pid, mask): """ @@ -39,18 +35,19 @@ def set_cpu_affinity(pid, mask): :param mask: The CPU affinity mask. :return: A dict containing the previous mask for each thread. """ - tids = process.run("ps -L --pid=%s -o lwp=" % pid, - verbose=False, - ignore_status=True).stdout_text.split() + tids = process.run( + f"ps -L --pid={pid} -o lwp=", verbose=False, ignore_status=True + ).stdout_text.split() prev_masks = {} for tid in tids: prev_mask = process.run( - "taskset -p %s" % tid, verbose=False).stdout_text.split()[-1] + f"taskset -p {tid}", verbose=False + ).stdout_text.split()[-1] prev_masks[tid] = prev_mask - process.system("taskset -p %s %s" % (mask, tid), verbose=False) - children = process.run("ps --ppid=%s -o pid=" % pid, - verbose=False, - ignore_status=True).stdout_text.split() + process.system(f"taskset -p {mask} {tid}", verbose=False) + children = process.run( + f"ps --ppid={pid} -o pid=", verbose=False, ignore_status=True + ).stdout_text.split() for child in children: prev_masks.update(set_cpu_affinity(child, mask)) return prev_masks @@ -62,17 +59,14 @@ def restore_cpu_affinity(prev_masks): :param prev_masks: A dict containing TIDs as keys and masks as values. """ for tid, mask in prev_masks.items(): - process.system("taskset -p %s %s" % (mask, tid), verbose=False, - ignore_status=True) + process.system( + f"taskset -p {mask} {tid}", verbose=False, ignore_status=True + ) # Taking this as a workaround to avoid getting errors during # pickling with Python versions prior to 3.7. global _picklable_logger - if sys.version_info < (3, 7): - def _picklable_logger(*args, **kwargs): - return test.log.debug(*args, **kwargs) - else: - _picklable_logger = test.log.debug + _picklable_logger = test.log.debug vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -80,9 +74,9 @@ def _picklable_logger(*args, **kwargs): boot_option_added = params.get("boot_option_added") boot_option_removed = params.get("boot_option_removed") if boot_option_added or boot_option_removed: - utils_test.update_boot_option(vm, - args_removed=boot_option_removed, - args_added=boot_option_added) + utils_test.update_boot_option( + vm, args_removed=boot_option_removed, args_added=boot_option_added + ) if params["os_type"] == "windows": utils_time.sync_timezone_win(vm) @@ -113,8 +107,7 @@ def _picklable_logger(*args, **kwargs): load_duration = float(params.get("load_duration", "30")) rest_duration = float(params.get("rest_duration", "10")) drift_threshold = float(params.get("drift_threshold", "200")) - drift_threshold_after_rest = float(params.get("drift_threshold_after_rest", - "200")) + drift_threshold_after_rest = float(params.get("drift_threshold_after_rest", "200")) test_duration = float(params.get("test_duration", "60")) interval_gettime = float(params.get("interval_gettime", "20")) guest_load_sessions = [] @@ -140,10 +133,9 @@ def _picklable_logger(*args, **kwargs): # Get time before load # (ht stands for host time, gt stands for guest time) - (ht0, gt0) = utils_test.get_time(session, - time_command, - time_filter_re, - time_format) + (ht0, gt0) = utils_test.get_time( + session, time_command, time_filter_re, time_format + ) # Run some load on the guest if params["os_type"] == "linux": @@ -156,10 +148,12 @@ def _picklable_logger(*args, **kwargs): # Run some load on the host test.log.info("Starting load on host...") for i in range(host_load_instances): - load_cmd = aexpect.run_bg(host_load_command, - output_func=_picklable_logger, - output_prefix="(host load %d) " % i, - timeout=0.5) + load_cmd = aexpect.run_bg( + host_load_command, + output_func=_picklable_logger, + output_prefix="(host load %d) " % i, + timeout=0.5, + ) host_load_sessions.append(load_cmd) # Set the CPU affinity of the load process pid = load_cmd.get_pid() @@ -172,10 +166,9 @@ def _picklable_logger(*args, **kwargs): start_time = time.time() while (time.time() - start_time) < test_duration: # Get time delta after load - (ht1, gt1) = utils_test.get_time(session, - time_command, - time_filter_re, - time_format) + (ht1, gt1) = utils_test.get_time( + session, time_command, time_filter_re, time_format + ) # Report results host_delta = ht1 - ht0 @@ -204,31 +197,28 @@ def _picklable_logger(*args, **kwargs): time.sleep(rest_duration) # Get time after rest - (ht2, gt2) = utils_test.get_time(session, - time_command, - time_filter_re, - time_format) + (ht2, gt2) = utils_test.get_time( + session, time_command, time_filter_re, time_format + ) finally: session.close() # remove flags add for this test. if boot_option_added or boot_option_removed: - utils_test.update_boot_option(vm, - args_removed=boot_option_added, - args_added=boot_option_removed) + utils_test.update_boot_option( + vm, args_removed=boot_option_added, args_added=boot_option_removed + ) # Report results host_delta_total = ht2 - ht0 guest_delta_total = gt2 - gt0 drift_total = 100.0 * (host_delta_total - guest_delta_total) / host_delta test.log.info("Total host duration including rest: %.2f", host_delta_total) - test.log.info( - "Total guest duration including rest: %.2f", guest_delta_total) + test.log.info("Total guest duration including rest: %.2f", guest_delta_total) test.log.info("Total drift after rest: %.2f%%", drift_total) # Fail the test if necessary if abs(drift) > drift_threshold: - test.fail("Time drift too large: %.2f%%" % drift) + test.fail(f"Time drift too large: {drift:.2f}%") if abs(drift_total) > drift_threshold_after_rest: - test.fail("Time drift too large after rest period: %.2f%%" - % drift_total) + test.fail(f"Time drift too large after rest period: {drift_total:.2f}%") diff --git a/qemu/tests/timedrift_adjust_time.py b/qemu/tests/timedrift_adjust_time.py index adb548e3dd..4d01fc3656 100644 --- a/qemu/tests/timedrift_adjust_time.py +++ b/qemu/tests/timedrift_adjust_time.py @@ -1,20 +1,16 @@ +import logging import re import time -import logging from avocado.utils import process -from virttest import env_process -from virttest import test_setup -from virttest import error_context -from virttest import utils_time +from virttest import env_process, error_context, test_setup, utils_time from generic.tests.guest_suspend import GuestSuspendBaseTest -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") -class TimedriftTest(object): - +class TimedriftTest: """ Base class for time drift test, include common steps for time drift test; """ @@ -119,17 +115,15 @@ def get_epoch_seconds(self, session): host_epoch_time_cmd = self.params["host_epoch_time_cmd"] guest_epoch_time_cmd = self.params["guest_epoch_time_cmd"] try: - guest_timestr = session.cmd_output( - guest_epoch_time_cmd, - timeout=240) - host_timestr = process.system_output(host_epoch_time_cmd, - shell=True).decode() + guest_timestr = session.cmd_output(guest_epoch_time_cmd, timeout=240) + host_timestr = process.system_output( + host_epoch_time_cmd, shell=True + ).decode() epoch_host, epoch_guest = list( - map(lambda x: re.findall(regex, x)[0], - [host_timestr, guest_timestr])) + map(lambda x: re.findall(regex, x)[0], [host_timestr, guest_timestr]) + ) except IndexError: - LOG_JOB.debug("Host Time: %s," % guest_timestr + - "Guest Time: %s" % guest_timestr) + LOG_JOB.debug("Host Time: %s, Guest Time: %s", host_timestr, guest_timestr) return list(map(float, [epoch_host, epoch_guest])) def get_hwtime(self, session): @@ -138,19 +132,21 @@ def get_hwtime(self, session): :param session: VM session. """ - hwclock_time_command = self.params.get("hwclock_time_command", - "hwclock -u") - hwclock_time_filter_re = self.params.get("hwclock_time_filter_re", - r"(\d+-\d+-\d+ \d+:\d+:\d+).*") - hwclock_time_format = self.params.get("hwclock_time_format", - "%Y-%m-%d %H:%M:%S") + hwclock_time_command = self.params.get("hwclock_time_command", "hwclock -u") + hwclock_time_filter_re = self.params.get( + "hwclock_time_filter_re", r"(\d+-\d+-\d+ \d+:\d+:\d+).*" + ) + hwclock_time_format = self.params.get( + "hwclock_time_format", "%Y-%m-%d %H:%M:%S" + ) output = session.cmd_output_safe(hwclock_time_command) try: str_time = re.findall(hwclock_time_filter_re, output)[0] guest_time = time.mktime(time.strptime(str_time, hwclock_time_format)) except Exception as err: LOG_JOB.debug( - "(time_format, output): (%s, %s)", hwclock_time_format, output) + "(time_format, output): (%s, %s)", hwclock_time_format, output + ) raise err return guest_time @@ -167,9 +163,11 @@ def verify_clock_source(self, session): real_clock_source = session.cmd_output(read_clock_source_cmd) expect_clock_source = self.params["clock_source"] if expect_clock_source not in real_clock_source: - self.test.fail("Expect clock source: " + - expect_clock_source + - "Real clock source: %s" % real_clock_source) + self.test.fail( + "Expect clock source: " + + expect_clock_source + + f"Real clock source: {real_clock_source}" + ) @error_context.context_aware def cleanup(self): @@ -179,13 +177,12 @@ def cleanup(self): class BackwardtimeTest(TimedriftTest): - """ Base class for test time drift after backward host/guest system clock; """ def __init__(self, test, params, env): - super(BackwardtimeTest, self).__init__(test, params, env) + super().__init__(test, params, env) @error_context.context_aware def set_time(self, nsec, session=None): @@ -197,9 +194,9 @@ def set_time(self, nsec, session=None): :param session: ShellSession object; """ target = session and "guest" or "host" - step = "Forward %s time %s seconds" % (target, nsec) + step = f"Forward {target} time {nsec} seconds" error_context.context(step, LOG_JOB.info) - cmd = self.params.get("set_%s_time_cmd" % target) + cmd = self.params.get(f"set_{target}_time_cmd") return self.execute(cmd, session) @error_context.context_aware @@ -213,7 +210,7 @@ def check_drift_after_adjust_time(self, session): """ target = self.params.get("set_host_time_cmd") and "host" or "guest" step_info = "Check time difference between host and guest" - step_info += " after forward %s time" % target + step_info += f" after forward {target} time" error_context.context(step_info, LOG_JOB.info) tolerance = float(self.params["tolerance"]) timeout = float(self.params.get("workaround_timeout", 1.0)) @@ -222,27 +219,30 @@ def check_drift_after_adjust_time(self, session): while time.time() < start_time + timeout: host_epoch_time, guest_epoch_time = self.get_epoch_seconds(session) real_difference = abs(host_epoch_time - guest_epoch_time) - if self.params["os_type"] == 'linux': - expect_difference_hwclock = float(self.params["time_difference_hwclock"]) + if self.params["os_type"] == "linux": + expect_difference_hwclock = float( + self.params["time_difference_hwclock"] + ) guest_hwtime = self.get_hwtime(session) real_difference_hw = abs(host_epoch_time - guest_hwtime) - if abs(real_difference - expect_difference) < tolerance and \ - abs(real_difference_hw - expect_difference_hwclock) < tolerance: + if ( + abs(real_difference - expect_difference) < tolerance + and abs(real_difference_hw - expect_difference_hwclock) < tolerance + ): return else: if abs(real_difference - expect_difference) < tolerance: return LOG_JOB.info("Host epoch time: %s", host_epoch_time) LOG_JOB.info("Guest epoch time: %s", guest_epoch_time) - if self.params["os_type"] == 'linux': + if self.params["os_type"] == "linux": LOG_JOB.info("Guest hardware time: %s", guest_hwtime) - err_msg = "Unexpected sys and hardware time difference (%s %s)\ - between host and guest after adjusting time." \ - % (real_difference, real_difference_hw) + err_msg = f"Unexpected sys and hardware time difference ({real_difference} {real_difference_hw})\ + between host and guest after adjusting time." else: err_msg = "Unexpected time difference between host and guest after" - err_msg += " testing.(actual difference: %s)" % real_difference - err_msg += " expected difference: %s)" % expect_difference + err_msg += f" testing.(actual difference: {real_difference})" + err_msg += f" expected difference: {expect_difference})" self.test.fail(err_msg) @error_context.context_aware @@ -256,27 +256,26 @@ def check_dirft_before_adjust_time(self, session): """ target = self.params.get("set_host_time_cmd") and "host" or "guest" step_info = "Check time difference between host and guest" - step_info += " before forward %s time" % target + step_info += f" before forward {target} time" error_context.context(step_info, LOG_JOB.info) tolerance = float(self.params.get("tolerance", 6)) host_epoch_time, guest_epoch_time = self.get_epoch_seconds(session) real_difference = abs(host_epoch_time - guest_epoch_time) - if self.params["os_type"] == 'linux': + if self.params["os_type"] == "linux": guest_hwtime = self.get_hwtime(session) real_difference_hw = abs(host_epoch_time - guest_hwtime) if real_difference > tolerance or real_difference_hw > tolerance: LOG_JOB.info("Host epoch time: %s", host_epoch_time) LOG_JOB.info("Guest epoch time: %s", guest_epoch_time) LOG_JOB.info("Guest hardware time: %s", guest_hwtime) - err_msg = "Unexpected sys and hardware time difference (%s %s) \ - between host and guest before testing."\ - % (real_difference, real_difference_hw) + err_msg = f"Unexpected sys and hardware time difference ({real_difference} {real_difference_hw}) \ + between host and guest before testing." self.test.fail(err_msg) else: if real_difference > tolerance: LOG_JOB.info("Host epoch time: %s", host_epoch_time) LOG_JOB.info("Guest epoch time: %s", guest_epoch_time) - err_msg = "Unexcept time difference (%s) " % real_difference + err_msg = f"Unexcept time difference ({real_difference}) " err_msg += " between host and guest before testing." self.test.fail(err_msg) @@ -291,7 +290,7 @@ def pre_test(self): self.setup_private_network() self.sync_host_time() vm = self.get_vm(create=True) - if self.params["os_type"] == 'windows': + if self.params["os_type"] == "windows": utils_time.sync_timezone_win(vm) else: utils_time.sync_timezone_linux(vm) @@ -337,8 +336,8 @@ def run(test, params, env): :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ - class TestReboot(BackwardtimeTest): + class TestReboot(BackwardtimeTest): """ Test Steps: 5) Forward host/guest system time 30 mins @@ -346,7 +345,7 @@ class TestReboot(BackwardtimeTest): """ def __init__(self, test, params, env): - super(TestReboot, self).__init__(test, params, env) + super().__init__(test, params, env) @error_context.context_aware def reboot(self): @@ -362,10 +361,9 @@ def reboot(self): def run(self): fuc = self.reboot - return super(TestReboot, self).run(fuc) + return super().run(fuc) class TestPauseresume(BackwardtimeTest): - """ Test Steps: 5) Forward host system time 30mins @@ -373,14 +371,13 @@ class TestPauseresume(BackwardtimeTest): """ def __init__(self, test, params, env): - super(TestPauseresume, self).__init__(test, params, env) + super().__init__(test, params, env) @error_context.context_aware def pause_resume(self): vm = self.get_vm() sleep_seconds = float(params.get("sleep_seconds", 1800)) - error_context.context("Pause guest %s seconds" % sleep_seconds, - test.log.info) + error_context.context(f"Pause guest {sleep_seconds} seconds", test.log.info) vm.pause() seconds_to_forward = int(self.params.get("seconds_to_forward", 0)) if seconds_to_forward: @@ -391,10 +388,9 @@ def pause_resume(self): def run(self): fuc = self.pause_resume - return super(TestPauseresume, self).run(fuc) + return super().run(fuc) class TestSuspendresume(BackwardtimeTest, GuestSuspendBaseTest): - """ Test Steps: 5) Suspend guest 30 mins, then resume it; @@ -423,8 +419,9 @@ def _get_session(self): @error_context.context_aware def action_during_suspend(self, **args): sleep_seconds = float(self.params.get("sleep_seconds", 1800)) - error_context.context("Sleep %s seconds before resume" % - sleep_seconds, test.log.info) + error_context.context( + f"Sleep {sleep_seconds} seconds before resume", test.log.info + ) seconds_to_forward = int(self.params.get("seconds_to_forward", 0)) if seconds_to_forward: self.set_time(seconds_to_forward) @@ -440,11 +437,11 @@ def suspend_resume(self): def run(self): fuc = self.suspend_resume - return super(TestSuspendresume, self).run(fuc) + return super().run(fuc) vm_action = params["vm_action"].replace("_", "") vm_action = vm_action.capitalize() - test_name = "Test%s" % vm_action + test_name = f"Test{vm_action}" SubTest = locals().get(test_name) if issubclass(SubTest, BackwardtimeTest): timedrift_test = SubTest(test, params, env) diff --git a/qemu/tests/timedrift_check_after_load_vm.py b/qemu/tests/timedrift_check_after_load_vm.py index 4ad8d8ae21..92b2ee5716 100644 --- a/qemu/tests/timedrift_check_after_load_vm.py +++ b/qemu/tests/timedrift_check_after_load_vm.py @@ -2,9 +2,7 @@ import time from avocado.utils import process -from virttest import env_process -from virttest import error_context -from virttest import arch +from virttest import arch, env_process, error_context from qemu.tests.qemu_guest_agent import QemuGuestAgentTest @@ -28,37 +26,41 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _load_kvm_module_with_kvmclock_periodic_sync(module_param): """ - Load kvm module with kvmclock_periodic_sync=N/Y + Load kvm module with kvmclock_periodic_sync=N/Y - :params module_param: the value of kvmclock_periodic_sync - """ - error_context.context("Load kvm module with kvmclock_periodic_sync=%s" - % module_param, test.log.info) + :params module_param: the value of kvmclock_periodic_sync + """ + error_context.context( + f"Load kvm module with kvmclock_periodic_sync={module_param}", + test.log.info, + ) check_modules = arch.get_kvm_module_list() - error_context.context("check_module: '%s'" % check_modules, test.log.info) + error_context.context(f"check_module: '{check_modules}'", test.log.info) check_modules.reverse() for module in check_modules: - rm_mod_cmd = "modprobe -r %s" % module + rm_mod_cmd = f"modprobe -r {module}" process.system(rm_mod_cmd, shell=True) check_modules.reverse() for module in check_modules: - load_mod_cmd = "modprobe %s" % module + load_mod_cmd = f"modprobe {module}" if module == "kvm": - load_mod_cmd = "%s kvmclock_periodic_sync=%s" % (load_mod_cmd, module_param) + load_mod_cmd = f"{load_mod_cmd} kvmclock_periodic_sync={module_param}" process.system(load_mod_cmd, shell=True) check_mod_cmd = params["check_mod_cmd"] if process.system_output(check_mod_cmd).decode() != module_param: - test.error("Cannot load kvm module with kvmclock_periodic_sync=%s" - % module_param) + test.error( + f"Cannot load kvm module with kvmclock_periodic_sync={module_param}" + ) def setup(): """ On host, load kvm module with "kvmclock_periodic_sync=N" sync time with ntp server and boot the guest """ - if arch.ARCH not in ('ppc64', 'ppc64le'): + if arch.ARCH not in ("ppc64", "ppc64le"): _load_kvm_module_with_kvmclock_periodic_sync("N") error_context.context("Sync host time with ntp server", test.log.info) ntp_cmd = params.get("ntp_cmd") @@ -81,7 +83,7 @@ def cleanup(): session.close() env.unregister_vm(vm.name) vm.destroy(gracefully=False, free_mac_addresses=True) - if arch.ARCH not in ('ppc64', 'ppc64le'): + if arch.ARCH not in ("ppc64", "ppc64le"): _load_kvm_module_with_kvmclock_periodic_sync("Y") def setup_gagent(): @@ -111,7 +113,7 @@ def query_ntp_time(): output = session.cmd_output_safe(ntp_query_cmd) error_context.context("Verify guest time offset", test.log.info) offset = float(re.findall(r"[+|-]*\s*(\d+\.\d+)\s*sec", output)[-1]) - error_context.context("offset: '%.2f'" % offset, test.log.info) + error_context.context(f"offset: '{offset:.2f}'", test.log.info) exptected_time_drift = params.get("expected_time_drift", 3) if offset > float(exptected_time_drift): test.fail("After loadvm, the time drift of guest is too large.") @@ -119,17 +121,16 @@ def query_ntp_time(): vm = setup() session = vm.wait_for_login() try: - error_context.context("Check the clocksource currently in use", - test.log.info) + error_context.context("Check the clocksource currently in use", test.log.info) clocksource = params.get("clocksource", "kvm-clock") clocksource_cmd = "cat /sys/devices/system/clocksource/clocksource0" clocksource_cmd += "/current_clocksource" currentsource = session.cmd_output_safe(clocksource_cmd) if clocksource not in currentsource: - test.cancel("Mismatch clocksource, current clocksource: %s", - currentsource) - error_context.context("Stop chronyd and sync guest time with ntp server", - test.log.info) + test.cancel("Mismatch clocksource, current clocksource: %s", currentsource) + error_context.context( + "Stop chronyd and sync guest time with ntp server", test.log.info + ) ntp_cmd = params.get("ntp_cmd") status, output = session.cmd_status_output(ntp_cmd) if status != 0: @@ -137,7 +138,7 @@ def query_ntp_time(): error_context.context("Setup qemu-guest-agent in guest", test.log.info) gagent = setup_gagent() - qmp_ports = vm.get_monitors_by_type('qmp') + qmp_ports = vm.get_monitors_by_type("qmp") qmp_port = None if qmp_ports: qmp_port = qmp_ports[0] @@ -152,9 +153,11 @@ def query_ntp_time(): error_context.context("Execute 'guest-set-time' in qmp monitor") gagent.gagent.set_time() - error_context.context("Execute 'rtc-reset-reinjection' in qmp" - " monitor, not for power platform", test.log.info) - if arch.ARCH not in ('ppc64', 'ppc64le'): + error_context.context( + "Execute 'rtc-reset-reinjection' in qmp" " monitor, not for power platform", + test.log.info, + ) + if arch.ARCH not in ("ppc64", "ppc64le"): qmp_rtc_reset_cmd = params["qmp_rtc_reset_cmd"] run_qmp_cmd(qmp_port, qmp_rtc_reset_cmd) diff --git a/qemu/tests/timedrift_check_non_event.py b/qemu/tests/timedrift_check_non_event.py index 4832dbedcf..990c02c9af 100644 --- a/qemu/tests/timedrift_check_non_event.py +++ b/qemu/tests/timedrift_check_non_event.py @@ -2,7 +2,6 @@ import time from avocado.utils import process - from virttest import error_context @@ -22,25 +21,26 @@ def run(test, params, env): :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ + def get_hwtime(session): """ Get guest's hardware clock in epoch. :param session: VM session. """ - hwclock_time_command = params.get("hwclock_time_command", - "hwclock -u") - hwclock_time_filter_re = params.get("hwclock_time_filter_re", - r"(\d+-\d+-\d+ \d+:\d+:\d+).*") - hwclock_time_format = params.get("hwclock_time_format", - "%Y-%m-%d %H:%M:%S") + hwclock_time_command = params.get("hwclock_time_command", "hwclock -u") + hwclock_time_filter_re = params.get( + "hwclock_time_filter_re", r"(\d+-\d+-\d+ \d+:\d+:\d+).*" + ) + hwclock_time_format = params.get("hwclock_time_format", "%Y-%m-%d %H:%M:%S") output = session.cmd_output_safe(hwclock_time_command) try: str_time = re.findall(hwclock_time_filter_re, output)[0] guest_time = time.mktime(time.strptime(str_time, hwclock_time_format)) except Exception as err: test.log.debug( - "(time_format, output): (%s, %s)", hwclock_time_format, output) + "(time_format, output): (%s, %s)", hwclock_time_format, output + ) raise err return guest_time @@ -71,15 +71,17 @@ def get_hwtime(session): qom_gap = int(qom_st2["tm_hour"]) - int(qom_st1["tm_hour"]) if (qom_gap < 1) or (qom_gap > 2): - test.fail("Unexpected offset in qom-get, " - "qom-get result before change guest's RTC time: %s, " - "qom-get result after change guest's RTC time: %s" - % (qom_st1, qom_st2)) + test.fail( + "Unexpected offset in qom-get, " + f"qom-get result before change guest's RTC time: {qom_st1}, " + f"qom-get result after change guest's RTC time: {qom_st2}" + ) error_context.context("Verify guest hardware time", test.log.info) hwclock_st2 = get_hwtime(session) test.log.debug("hwclock: guest time=%ss", hwclock_st2) session.close() if (hwclock_st1 - hwclock_st2 - float(time_forward)) > float(drift_threshold): - test.fail("Unexpected hwclock drift, " - "hwclock: current guest time=%ss" % hwclock_st2) + test.fail( + "Unexpected hwclock drift, " f"hwclock: current guest time={hwclock_st2}s" + ) diff --git a/qemu/tests/timedrift_check_when_crash.py b/qemu/tests/timedrift_check_when_crash.py index 591f3982f8..64f77af5b4 100644 --- a/qemu/tests/timedrift_check_when_crash.py +++ b/qemu/tests/timedrift_check_when_crash.py @@ -1,12 +1,10 @@ -import time import re +import time from avocado.utils import process +from virttest import error_context, utils_test, utils_time from virttest.env_process import preprocess from virttest.virt_vm import VMDeadKernelCrashError -from virttest import error_context -from virttest import utils_test -from virttest import utils_time @error_context.context_aware @@ -41,7 +39,7 @@ def run(test, params, env): vm = env.get_vm(params["main_vm"]) vm.verify_alive() - if params["os_type"] == 'windows': + if params["os_type"] == "windows": utils_time.sync_timezone_win(vm) timeout = int(params.get("login_timeout", 360)) @@ -67,7 +65,7 @@ def run(test, params, env): else: test.fail("Guest OS still alive ...") - error_context.context("sleep %s seconds" % sleep_time, test.log.info) + error_context.context(f"sleep {sleep_time} seconds", test.log.info) time.sleep(sleep_time) # Autotest parses serial output and could raise VMDeadKernelCrash # we generated using sysrq. Ignore one "BUG:" line @@ -76,24 +74,32 @@ def run(test, params, env): session = vm.reboot(method="system_reset") except VMDeadKernelCrashError as details: details = str(details) - if (re.findall(r"Trigger a crash\s.*BUG:", details, re.M) and - details.count("BUG:") != 1): - test.fail("Got multiple kernel crashes. Please " - "note that one of them was " - "intentionally generated by sysrq in " - "this test.\n%s" % details) + if ( + re.findall(r"Trigger a crash\s.*BUG:", details, re.M) + and details.count("BUG:") != 1 + ): + test.fail( + "Got multiple kernel crashes. Please " + "note that one of them was " + "intentionally generated by sysrq in " + f"this test.\n{details}" + ) end_time = time.time() + timeout while time.time() < end_time: try: session = vm.wait_for_login(timeout=timeout) except VMDeadKernelCrashError as details: details = str(details) - if (re.findall(r"Trigger a crash\s.*BUG:", details, - re.M) and details.count("BUG:") != 1): - test.fail("Got multiple kernel crashes. " - "Please note that one of them was " - "intentionally generated by sysrq " - "in this test.\n%s" % details) + if ( + re.findall(r"Trigger a crash\s.*BUG:", details, re.M) + and details.count("BUG:") != 1 + ): + test.fail( + "Got multiple kernel crashes. " + "Please note that one of them was " + "intentionally generated by sysrq " + f"in this test.\n{details}" + ) else: break @@ -104,5 +110,4 @@ def run(test, params, env): except IndexError: offset = 0.0 if float(offset) > deviation: - test.fail("Unacceptable offset '%s', " % offset + - "deviation '%s'" % deviation) + test.fail(f"Unacceptable offset '{offset}', " + f"deviation '{deviation}'") diff --git a/qemu/tests/timedrift_check_when_hotplug_vcpu.py b/qemu/tests/timedrift_check_when_hotplug_vcpu.py index ab4878a1ac..d42dd79211 100644 --- a/qemu/tests/timedrift_check_when_hotplug_vcpu.py +++ b/qemu/tests/timedrift_check_when_hotplug_vcpu.py @@ -2,10 +2,7 @@ import time from avocado.utils import process - -from virttest import error_context -from virttest import utils_test -from virttest import utils_time +from virttest import error_context, utils_test, utils_time @error_context.context_aware @@ -32,7 +29,7 @@ def run(test, params, env): process.system(ntp_host_cmd, shell=True) vm = env.get_vm(params["main_vm"]) - if params["os_type"] == 'windows': + if params["os_type"] == "windows": utils_time.sync_timezone_win(vm) session = vm.wait_for_login() @@ -53,8 +50,10 @@ def run(test, params, env): vm.hotplug_vcpu_device(params["vcpu_devices"]) time.sleep(1) else: - test.error("Invalid operation, valid index range 0:%d, used range 0:%d" - % (int(params["vcpus_maxcpus"])-1, int(params["smp"]) - 1)) + test.error( + "Invalid operation, valid index range 0:%d, used range 0:%d" + % (int(params["vcpus_maxcpus"]) - 1, int(params["smp"]) - 1) + ) error_context.context("Check time offset via ntp server", test.log.info) for query in range(query_times): @@ -64,7 +63,8 @@ def run(test, params, env): except IndexError: test.error("Failed to get time offset") if float(offset) >= drift_threshold: - test.fail("Uacceptable offset '%s', " % offset + - "threshold '%s'" % drift_threshold) + test.fail( + f"Uacceptable offset '{offset}', " + f"threshold '{drift_threshold}'" + ) time.sleep(query_internal) session.close() diff --git a/qemu/tests/timedrift_check_with_syscall.py b/qemu/tests/timedrift_check_with_syscall.py index 5af06ccde4..37f5eac6b3 100644 --- a/qemu/tests/timedrift_check_with_syscall.py +++ b/qemu/tests/timedrift_check_with_syscall.py @@ -1,8 +1,7 @@ import os import aexpect -from virttest import data_dir -from virttest import error_context +from virttest import data_dir, error_context @error_context.context_aware @@ -27,12 +26,13 @@ def run(test, params, env): session = vm.wait_for_login(timeout=timeout) test_cmd = params.get("test_cmd", "./clktest") - if session.cmd_status("test -x %s" % test_cmd): - src_dir = os.path.join(data_dir.get_deps_dir(), 'timedrift') + if session.cmd_status(f"test -x {test_cmd}"): + src_dir = os.path.join(data_dir.get_deps_dir(), "timedrift") src_file = os.path.join(src_dir, "clktest.c") dst_file = os.path.join(tmp_dir, "clktest.c") - error_context.context("transfer '%s' to guest('%s')" % - (src_file, dst_file), test.log.info) + error_context.context( + f"transfer '{src_file}' to guest('{dst_file}')", test.log.info + ) vm.copy_files_to(src_file, tmp_dir, timeout=120) build_cmd = params.get("build_cmd", "gcc -lrt clktest.c -o clktest") @@ -44,6 +44,6 @@ def run(test, params, env): try: session.cmd_output(test_cmd, timeout=check_timeout) except aexpect.ShellTimeoutError as msg: - if 'Interval is' in msg.output: + if "Interval is" in msg.output: test.fail(msg.output) pass diff --git a/qemu/tests/timedrift_monotonicity.py b/qemu/tests/timedrift_monotonicity.py index 9d27ece790..dcad86cd8e 100644 --- a/qemu/tests/timedrift_monotonicity.py +++ b/qemu/tests/timedrift_monotonicity.py @@ -1,11 +1,9 @@ import os -import time import re import shutil +import time -from virttest import utils_test -from virttest import utils_misc -from virttest import utils_time +from virttest import utils_misc, utils_test, utils_time def run(test, params, env): @@ -23,6 +21,7 @@ def run(test, params, env): :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ + def get_time(cmd, test_time, session): if os.path.isfile(host_path): os.remove(host_path) @@ -31,14 +30,13 @@ def get_time(cmd, test_time, session): start_time = time.time() while (time.time() - start_time) < test_time: tv = session.cmd_output(cmd, timeout=cmd_timeout) - if params.get("os_type") == 'windows': - list = re.split('[:]', tv) - tv = str(int(list[0]) * 3600 + int( - list[1]) * 60 + float(list[2])) + if params.get("os_type") == "windows": + list = re.split("[:]", tv) + tv = str(int(list[0]) * 3600 + int(list[1]) * 60 + float(list[2])) if float(tv) < float(lasttv): p_tv = "time value = " + tv + "\n" p_lasttv = "last time value = " + lasttv + "\n" - with open(host_path, 'a') as time_log: + with open(host_path, "a") as time_log: time_log.write("time went backwards:\n" + p_tv + p_lasttv) lasttv = tv time.sleep(0.1) @@ -49,11 +47,11 @@ def get_time(cmd, test_time, session): boot_option_added = params.get("boot_option_added") boot_option_removed = params.get("boot_option_removed") if boot_option_added or boot_option_removed: - utils_test.update_boot_option(vm, - args_removed=boot_option_removed, - args_added=boot_option_added) + utils_test.update_boot_option( + vm, args_removed=boot_option_removed, args_added=boot_option_added + ) - if params["os_type"] == 'windows': + if params["os_type"] == "windows": utils_time.sync_timezone_win(vm) timeout = int(params.get("login_timeout", 360)) @@ -85,18 +83,19 @@ def get_time(cmd, test_time, session): # analyse the result if os.path.isfile(host_path): - log_dir = os.path.join(test.outputdir, - "timedrift-monotonicity-result.txt") + log_dir = os.path.join(test.outputdir, "timedrift-monotonicity-result.txt") shutil.copyfile(host_path, log_dir) - with open(host_path, 'r') as myfile: + with open(host_path, "r") as myfile: for line in myfile: if "time went backwards" in line: - test.fail("Failed Time Monotonicity testing, " - "Please check log %s" % host_path) + test.fail( + "Failed Time Monotonicity testing, " + f"Please check log {host_path}" + ) finally: session1.close() # remove flags add for this test. if boot_option_added or boot_option_removed: - utils_test.update_boot_option(vm, - args_removed=boot_option_added, - args_added=boot_option_removed) + utils_test.update_boot_option( + vm, args_removed=boot_option_added, args_added=boot_option_removed + ) diff --git a/qemu/tests/timedrift_no_net.py b/qemu/tests/timedrift_no_net.py index 494e06107c..ff876e0b9a 100644 --- a/qemu/tests/timedrift_no_net.py +++ b/qemu/tests/timedrift_no_net.py @@ -1,16 +1,14 @@ import time from avocado.utils import process -from virttest import utils_test -from virttest import error_context +from virttest import error_context, utils_test from generic.tests.guest_suspend import GuestSuspendBaseTest class GuestSuspendSerialConsole(GuestSuspendBaseTest): - def __init__(self, test, params, vm, session): - super(GuestSuspendSerialConsole, self).__init__(test, params, vm) + super().__init__(test, params, vm) def _get_session(self): self.vm.verify_alive() @@ -19,8 +17,7 @@ def _get_session(self): @error_context.context_aware def action_during_suspend(self, **args): - error_context.context("Sleep a while before resuming guest", - self.test.log.info) + error_context.context("Sleep a while before resuming guest", self.test.log.info) time.sleep(float(self.params.get("wait_timeout", "1800"))) if self.os_type == "windows": @@ -41,24 +38,22 @@ def subw_guest_suspend(test, params, vm, session): error_context.context("Suspend vm to disk", test.log.info) gs.guest_suspend_disk(params) else: - test.error("Unknown guest suspend type, Check your" - " 'guest_suspend_type' config.") + test.error( + "Unknown guest suspend type, Check your" " 'guest_suspend_type' config." + ) def subw_guest_pause_resume(test, params, vm, session): vm.monitor.cmd("stop") if not vm.monitor.verify_status("paused"): - test.error("VM is not paused Current status: %s" % - vm.monitor.get_status()) + test.error(f"VM is not paused Current status: {vm.monitor.get_status()}") time.sleep(float(params.get("wait_timeout", "1800"))) vm.monitor.cmd("cont") if not vm.monitor.verify_status("running"): - test.error("VM is not running. Current status: %s" % - vm.monitor.get_status()) + test.error(f"VM is not running. Current status: {vm.monitor.get_status()}") -def time_diff(host_guest_time_before, - host_guest_time_after): +def time_diff(host_guest_time_before, host_guest_time_after): """ Function compares diff of host and guest time before and after. It allows compare time in different timezones. @@ -87,17 +82,16 @@ def run(test, params, env): """ login_timeout = int(params.get("login_timeout", "240")) guest_clock_source = params.get("guest_clock_source", "kvm-clock") - date_time_command = params.get("date_time_command", - "date -u +'TIME: %a %m/%d/%Y %H:%M:%S.%N'") - date_time_filter_re = params.get("date_time_filter_re", - r"(?:TIME: \w\w\w )(.{19})(.+)") - date_time_format = params.get("date_time_format", - "%m/%d/%Y %H:%M:%S") + date_time_command = params.get( + "date_time_command", "date -u +'TIME: %a %m/%d/%Y %H:%M:%S.%N'" + ) + date_time_filter_re = params.get( + "date_time_filter_re", r"(?:TIME: \w\w\w )(.{19})(.+)" + ) + date_time_format = params.get("date_time_format", "%m/%d/%Y %H:%M:%S") hwclock_time_command = params.get("hwclock_time_command") - hwclock_time_filter_re = params.get("hwclock_time_filter_re", - r"(.+)") - hwclock_time_format = params.get("hwclock_time_format", - "%a %b %d %H:%M:%S %Y") + hwclock_time_filter_re = params.get("hwclock_time_filter_re", r"(.+)") + hwclock_time_format = params.get("hwclock_time_format", "%a %b %d %H:%M:%S %Y") tolerance = float(params.get("time_diff_tolerance", "0.5")) sub_work = params["sub_work"] @@ -112,80 +106,81 @@ def run(test, params, env): error_context.context("Check clock source on guest VM", test.log.info) session = vm.wait_for_serial_login(timeout=login_timeout) - out = session.cmd_output("cat /sys/devices/system/clocksource/" - "clocksource0/current_clocksource") + out = session.cmd_output( + "cat /sys/devices/system/clocksource/" "clocksource0/current_clocksource" + ) if guest_clock_source not in out: - test.fail("Clock source %s missing in guest clock " - "sources %s." % (guest_clock_source, out)) - - error_context.context("Get clock from host and guest VM using `date`", - test.log.info) - before_date = utils_test.get_time(session, - date_time_command, - date_time_filter_re, - date_time_format) - test.log.debug("date: host time=%ss guest time=%ss", - *before_date) - - error_context.context("Get clock from host and guest VM using `hwclock`", - test.log.info) - before_hwclock = utils_test.get_time(session, - hwclock_time_command, - hwclock_time_filter_re, - hwclock_time_format) - test.log.debug("hwclock: host time=%ss guest time=%ss", - *before_hwclock) + test.fail( + f"Clock source {guest_clock_source} missing in guest clock " + f"sources {out}." + ) + + error_context.context( + "Get clock from host and guest VM using `date`", test.log.info + ) + before_date = utils_test.get_time( + session, date_time_command, date_time_filter_re, date_time_format + ) + test.log.debug("date: host time=%ss guest time=%ss", *before_date) + + error_context.context( + "Get clock from host and guest VM using `hwclock`", test.log.info + ) + before_hwclock = utils_test.get_time( + session, hwclock_time_command, hwclock_time_filter_re, hwclock_time_format + ) + test.log.debug("hwclock: host time=%ss guest time=%ss", *before_hwclock) session.close() if sub_work in globals(): # Try to find sub work function. globals()[sub_work](test, params, vm, session) else: - test.cancel("Unable to found subwork %s in %s test file." % - (sub_work, __file__)) + test.cancel(f"Unable to found subwork {sub_work} in {__file__} test file.") session = vm.wait_for_serial_login(timeout=login_timeout) - error_context.context("Get clock from host and guest VM using `date`", - test.log.info) - after_date = utils_test.get_time(session, - date_time_command, - date_time_filter_re, - date_time_format) - test.log.debug("date: host time=%ss guest time=%ss", - *after_date) - - error_context.context("Get clock from host and guest VM using `hwclock`", - test.log.info) - after_hwclock = utils_test.get_time(session, - hwclock_time_command, - hwclock_time_filter_re, - hwclock_time_format) - test.log.debug("hwclock: host time=%ss guest time=%ss", - *after_hwclock) - - if test_type == 'guest_suspend': + error_context.context( + "Get clock from host and guest VM using `date`", test.log.info + ) + after_date = utils_test.get_time( + session, date_time_command, date_time_filter_re, date_time_format + ) + test.log.debug("date: host time=%ss guest time=%ss", *after_date) + + error_context.context( + "Get clock from host and guest VM using `hwclock`", test.log.info + ) + after_hwclock = utils_test.get_time( + session, hwclock_time_command, hwclock_time_filter_re, hwclock_time_format + ) + test.log.debug("hwclock: host time=%ss guest time=%ss", *after_hwclock) + + if test_type == "guest_suspend": date_diff = time_diff(before_date, after_date) hwclock_diff = time_diff(before_hwclock, after_hwclock) if date_diff > tolerance and hwclock_diff > tolerance: - test.fail("hwclock %ss and date %ss difference is " - "'guest_diff_time != host_diff_time'" - " out of tolerance %ss" % (hwclock_diff, - date_diff, - tolerance)) + test.fail( + f"hwclock {hwclock_diff}s and date {date_diff}s difference is " + "'guest_diff_time != host_diff_time'" + f" out of tolerance {tolerance}s" + ) elif date_diff > tolerance: - test.fail("date %ss difference is " - "'guest_diff_time != host_diff_time'" - " out of tolerance %ss" % (date_diff, - tolerance)) + test.fail( + f"date {date_diff}s difference is " + "'guest_diff_time != host_diff_time'" + f" out of tolerance {tolerance}s" + ) elif hwclock_diff > tolerance: - test.fail("hwclock %ss difference is " - "'guest_diff_time != host_diff_time'" - " out of tolerance %ss" % (hwclock_diff, - tolerance)) + test.fail( + f"hwclock {hwclock_diff}s difference is " + "'guest_diff_time != host_diff_time'" + f" out of tolerance {tolerance}s" + ) elif test_type == "guest_pause_resume": date_diff = time_diff(before_date, after_date) if date_diff > tolerance: - test.fail("date %ss difference is" - "'guest_time_after-guest_time_before'" - " out of tolerance %ss" % (date_diff, - tolerance)) + test.fail( + f"date {date_diff}s difference is" + "'guest_time_after-guest_time_before'" + f" out of tolerance {tolerance}s" + ) diff --git a/qemu/tests/timedrift_no_net_win.py b/qemu/tests/timedrift_no_net_win.py index abfef7bacc..c361f70a52 100644 --- a/qemu/tests/timedrift_no_net_win.py +++ b/qemu/tests/timedrift_no_net_win.py @@ -1,21 +1,18 @@ import time from avocado.utils import process -from virttest import utils_test -from virttest import error_context +from virttest import error_context, utils_test from generic.tests.guest_suspend import GuestSuspendBaseTest class GuestSuspendSerialConsole(GuestSuspendBaseTest): - def __init__(self, test, params, vm, session): - super(GuestSuspendSerialConsole, self).__init__(test, params, vm) + super().__init__(test, params, vm) @error_context.context_aware def action_during_suspend(self, **args): - error_context.context("Sleep a while before resuming guest", - self.test.log.info) + error_context.context("Sleep a while before resuming guest", self.test.log.info) time.sleep(float(self.params.get("wait_timeout", "1800"))) if self.os_type == "windows": @@ -36,24 +33,22 @@ def subw_guest_suspend(test, params, vm, session): error_context.context("Suspend vm to disk", test.log.info) gs.guest_suspend_disk(params) else: - test.error("Unknown guest suspend type, Check your" - " 'guest_suspend_type' config.") + test.error( + "Unknown guest suspend type, Check your" " 'guest_suspend_type' config." + ) def subw_guest_pause_resume(test, params, vm, session): vm.monitor.cmd("stop") if not vm.monitor.verify_status("paused"): - test.error("VM is not paused Current status: %s" % - vm.monitor.get_status()) + test.error(f"VM is not paused Current status: {vm.monitor.get_status()}") time.sleep(float(params.get("wait_timeout", "1800"))) vm.monitor.cmd("cont") if not vm.monitor.verify_status("running"): - test.error("VM is not running. Current status: %s" % - vm.monitor.get_status()) + test.error(f"VM is not running. Current status: {vm.monitor.get_status()}") -def time_diff(host_guest_time_before, - host_guest_time_after): +def time_diff(host_guest_time_before, host_guest_time_after): """ Function compares diff of host and guest time before and after. It allows compare time in different timezones. @@ -71,8 +66,7 @@ def time_diff(host_guest_time_before, return before_diff - after_diff -def time_diff_host_guest(host_guest_time_before, - host_guest_time_after): +def time_diff_host_guest(host_guest_time_before, host_guest_time_after): """ Function compares diff of host and guest time before and after. It allows compare time in different timezones. @@ -101,12 +95,13 @@ def run(test, params, env): """ clock_sync_command = params["clock_sync_command"] login_timeout = int(params.get("login_timeout", "240")) - date_time_command = params.get("date_time_command", - r"date -u +'TIME: %a %m/%d/%Y %H:%M:%S.%N'") - date_time_filter_re = params.get("date_time_filter_re", - r"(?:TIME: \w\w\w )(.{19})(.+)") - date_time_format = params.get("date_time_format", - "%m/%d/%Y %H:%M:%S") + date_time_command = params.get( + "date_time_command", r"date -u +'TIME: %a %m/%d/%Y %H:%M:%S.%N'" + ) + date_time_filter_re = params.get( + "date_time_filter_re", r"(?:TIME: \w\w\w )(.{19})(.+)" + ) + date_time_format = params.get("date_time_format", "%m/%d/%Y %H:%M:%S") tolerance = float(params.get("time_diff_tolerance", "0.5")) @@ -119,46 +114,45 @@ def run(test, params, env): process.system(clock_sync_command, shell=True) session = vm.wait_for_login(timeout=login_timeout) - error_context.context("Get clock from host and guest VM using `date`", - test.log.info) + error_context.context( + "Get clock from host and guest VM using `date`", test.log.info + ) - before_date = utils_test.get_time(session, - date_time_command, - date_time_filter_re, - date_time_format) - test.log.debug("date: host time=%ss guest time=%ss", - *before_date) + before_date = utils_test.get_time( + session, date_time_command, date_time_filter_re, date_time_format + ) + test.log.debug("date: host time=%ss guest time=%ss", *before_date) session.close() if sub_work in globals(): # Try to find sub work function. globals()[sub_work](test, params, vm, session) else: - test.cancel("Unable to found subwork %s in %s test file." % - (sub_work, __file__)) + test.cancel(f"Unable to found subwork {sub_work} in {__file__} test file.") vm = env.get_vm(vm_name) session = vm.wait_for_login(timeout=login_timeout) - error_context.context("Get clock from host and guest VM using `date`", - test.log.info) - after_date = utils_test.get_time(session, - date_time_command, - date_time_filter_re, - date_time_format) - test.log.debug("date: host time=%ss guest time=%ss", - *after_date) - - if test_type == 'guest_suspend': + error_context.context( + "Get clock from host and guest VM using `date`", test.log.info + ) + after_date = utils_test.get_time( + session, date_time_command, date_time_filter_re, date_time_format + ) + test.log.debug("date: host time=%ss guest time=%ss", *after_date) + + if test_type == "guest_suspend": date_diff = time_diff(before_date, after_date) if date_diff > tolerance: - test.fail("date %ss difference is" - "'guest_diff_time != host_diff_time'" - " out of tolerance %ss" % (date_diff[1], - tolerance)) + test.fail( + f"date {date_diff[1]}s difference is" + "'guest_diff_time != host_diff_time'" + f" out of tolerance {tolerance}s" + ) elif test_type == "guest_pause_resume": date_diff = time_diff_host_guest(before_date, after_date) if date_diff[1] > tolerance: - test.fail("date %ss difference is " - "'guest_time_after-guest_time_before'" - " out of tolerance %ss" % (date_diff[1], - tolerance)) + test.fail( + f"date {date_diff[1]}s difference is " + "'guest_time_after-guest_time_before'" + f" out of tolerance {tolerance}s" + ) diff --git a/qemu/tests/timedrift_with_cpu_offline.py b/qemu/tests/timedrift_with_cpu_offline.py index 0bcb2b5da0..827c8cbc3a 100644 --- a/qemu/tests/timedrift_with_cpu_offline.py +++ b/qemu/tests/timedrift_with_cpu_offline.py @@ -1,7 +1,6 @@ import time -from virttest import utils_test -from virttest import error_context +from virttest import error_context, utils_test @error_context.context_aware @@ -28,9 +27,9 @@ def run(test, params, env): boot_option_added = params.get("boot_option_added") boot_option_removed = params.get("boot_option_removed") if boot_option_added or boot_option_removed: - utils_test.update_boot_option(vm, - args_removed=boot_option_removed, - args_added=boot_option_added) + utils_test.update_boot_option( + vm, args_removed=boot_option_removed, args_added=boot_option_added + ) session = vm.wait_for_login(timeout=login_timeout) @@ -51,14 +50,14 @@ def run(test, params, env): # Get time before set cpu offline # (ht stands for host time, gt stands for guest time) error_context.context("get time before set cpu offline") - (ht0, gt0) = utils_test.get_time(session, time_command, - time_filter_re, time_format) + (ht0, gt0) = utils_test.get_time( + session, time_command, time_filter_re, time_format + ) # Check cpu number error_context.context("check guest cpu number") smp = int(params.get("smp")) if smp < 2: - test.error("The guest only has %d vcpu," - "unsupport cpu offline" % smp) + test.error("The guest only has %d vcpu," "unsupport cpu offline" % smp) # Set cpu offline error_context.context("set cpu offline ") @@ -73,8 +72,9 @@ def run(test, params, env): # Get time after set cpu offline error_context.context("get time after set cpu offline") - (ht1, gt1) = utils_test.get_time(session, time_command, - time_filter_re, time_format) + (ht1, gt1) = utils_test.get_time( + session, time_command, time_filter_re, time_format + ) # Report results host_delta = ht1 - ht0 guest_delta = gt1 - gt0 @@ -83,7 +83,7 @@ def run(test, params, env): test.log.info("Guest duration: %.2f", guest_delta) test.log.info("Drift: %.2f%%", drift) if abs(drift) > drift_threshold: - test.fail("Time drift too large: %.2f%%" % drift) + test.fail(f"Time drift too large: {drift:.2f}%") # Set cpu online again error_context.context("set cpu online") @@ -97,8 +97,9 @@ def run(test, params, env): start_time = time.time() while (time.time() - start_time) < test_duration: # Get time delta after set cpu online - (ht1, gt1) = utils_test.get_time(session, time_command, - time_filter_re, time_format) + (ht1, gt1) = utils_test.get_time( + session, time_command, time_filter_re, time_format + ) # Report results host_delta = ht1 - ht0 @@ -109,11 +110,11 @@ def run(test, params, env): test.log.info("Drift: %.2f%%", drift) time.sleep(interval_gettime) if abs(drift) > drift_threshold: - test.fail("Time drift too large: %.2f%%" % drift) + test.fail(f"Time drift too large: {drift:.2f}%") finally: session.close() # remove flags add for this test. if boot_option_added or boot_option_removed: - utils_test.update_boot_option(vm, - args_removed=boot_option_added, - args_added=boot_option_removed) + utils_test.update_boot_option( + vm, args_removed=boot_option_added, args_added=boot_option_removed + ) diff --git a/qemu/tests/timedrift_with_migration.py b/qemu/tests/timedrift_with_migration.py index fc7dabc70e..4b4a328f1a 100644 --- a/qemu/tests/timedrift_with_migration.py +++ b/qemu/tests/timedrift_with_migration.py @@ -1,5 +1,4 @@ -from virttest import utils_test -from virttest import utils_time +from virttest import utils_test, utils_time def run(test, params, env): @@ -22,11 +21,11 @@ def run(test, params, env): boot_option_added = params.get("boot_option_added") boot_option_removed = params.get("boot_option_removed") if boot_option_added or boot_option_removed: - utils_test.update_boot_option(vm, - args_removed=boot_option_removed, - args_added=boot_option_added) + utils_test.update_boot_option( + vm, args_removed=boot_option_removed, args_added=boot_option_added + ) - if params["os_type"] == 'windows': + if params["os_type"] == "windows": utils_time.sync_timezone_win(vm) timeout = int(params.get("login_timeout", 360)) @@ -46,66 +45,75 @@ def run(test, params, env): try: # Get initial time # (ht stands for host time, gt stands for guest time) - (ht0, gt0) = utils_test.get_time(session, time_command, - time_filter_re, time_format) + (ht0, gt0) = utils_test.get_time( + session, time_command, time_filter_re, time_format + ) # Migrate for i in range(migration_iterations): # Get time before current iteration - (ht0_, gt0_) = utils_test.get_time(session, time_command, - time_filter_re, time_format) + (ht0_, gt0_) = utils_test.get_time( + session, time_command, time_filter_re, time_format + ) session.close() # Run current iteration - test.log.info("Migrating: iteration %d of %d...", - (i + 1), migration_iterations) + test.log.info( + "Migrating: iteration %d of %d...", (i + 1), migration_iterations + ) vm.migrate() # Log in test.log.info("Logging in after migration...") session = vm.wait_for_login(timeout=30) test.log.info("Logged in after migration") # Get time after current iteration - (ht1_, gt1_) = utils_test.get_time(session, time_command, - time_filter_re, time_format) + (ht1_, gt1_) = utils_test.get_time( + session, time_command, time_filter_re, time_format + ) # Report iteration results host_delta = ht1_ - ht0_ guest_delta = gt1_ - gt0_ drift = abs(host_delta - guest_delta) - test.log.info("Host duration (iteration %d): %.2f", - (i + 1), host_delta) - test.log.info("Guest duration (iteration %d): %.2f", - (i + 1), guest_delta) - test.log.info("Drift at iteration %d: %.2f seconds", - (i + 1), drift) + test.log.info("Host duration (iteration %d): %.2f", (i + 1), host_delta) + test.log.info("Guest duration (iteration %d): %.2f", (i + 1), guest_delta) + test.log.info("Drift at iteration %d: %.2f seconds", (i + 1), drift) # Fail if necessary if drift > drift_threshold_single: - test.fail("Time drift too large at iteration %d: " - "%.2f seconds" % (i + 1, drift)) + test.fail( + "Time drift too large at iteration %d: " + "%.2f seconds" % (i + 1, drift) + ) # Get final time - (ht1, gt1) = utils_test.get_time(session, time_command, - time_filter_re, time_format) + (ht1, gt1) = utils_test.get_time( + session, time_command, time_filter_re, time_format + ) finally: if session: session.close() # remove flags add for this test. if boot_option_added or boot_option_removed: - utils_test.update_boot_option(vm, - args_removed=boot_option_added, - args_added=boot_option_removed) + utils_test.update_boot_option( + vm, args_removed=boot_option_added, args_added=boot_option_removed + ) # Report results host_delta = ht1 - ht0 guest_delta = gt1 - gt0 drift = abs(host_delta - guest_delta) - test.log.info("Host duration (%d migrations): %.2f", - migration_iterations, host_delta) - test.log.info("Guest duration (%d migrations): %.2f", - migration_iterations, guest_delta) - test.log.info("Drift after %d migrations: %.2f seconds", - migration_iterations, drift) + test.log.info( + "Host duration (%d migrations): %.2f", migration_iterations, host_delta + ) + test.log.info( + "Guest duration (%d migrations): %.2f", migration_iterations, guest_delta + ) + test.log.info( + "Drift after %d migrations: %.2f seconds", migration_iterations, drift + ) # Fail if necessary if drift > drift_threshold: - test.fail("Time drift too large after %d migrations: " - "%.2f seconds" % (migration_iterations, drift)) + test.fail( + "Time drift too large after %d migrations: " + "%.2f seconds" % (migration_iterations, drift) + ) diff --git a/qemu/tests/timedrift_with_multi_vms.py b/qemu/tests/timedrift_with_multi_vms.py index 634ee27bdf..934a618324 100644 --- a/qemu/tests/timedrift_with_multi_vms.py +++ b/qemu/tests/timedrift_with_multi_vms.py @@ -1,13 +1,8 @@ -import re import random +import re -from avocado.utils import process -from avocado.utils import service -from avocado.utils import cpu - -from virttest import utils_time -from virttest import env_process -from virttest import error_context +from avocado.utils import cpu, process, service +from virttest import env_process, error_context, utils_time @error_context.context_aware @@ -24,13 +19,14 @@ def run(test, params, env): :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ + def verify_guest_clock_source(session, expected): """ :param session: VM session :param expected: expected clocksource """ if expected not in session.cmd(clocksource_cmd): - test.fail("Guest doesn't use '%s' clocksource" % expected) + test.fail(f"Guest doesn't use '{expected}' clocksource") ntp_cmd = params["ntp_cmd"] ntp_stop_cmd = params["ntp_stop_cmd"] @@ -66,20 +62,22 @@ def verify_guest_clock_source(session, expected): for vmid, se in enumerate(sessions): # Get the respective vm object cpu_id = vmid if same_cpu == "no" else 0 - process.system("taskset -cp %s %s" % - (host_cpu_list[cpu_id], vm_obj[vmid].get_pid()), - shell=True) + process.system( + f"taskset -cp {host_cpu_list[cpu_id]} {vm_obj[vmid].get_pid()}", + shell=True, + ) error_context.context("Check the current clocksource", test.log.info) currentsource = se.cmd_output_safe(clocksource_cmd) if clocksource not in currentsource: - error_context.context("Update guest kernel cli to %s" % clocksource, - test.log.info) + error_context.context( + f"Update guest kernel cli to {clocksource}", test.log.info + ) utils_time.update_clksrc(vm_obj[vmid], clksrc=clocksource) verify_guest_clock_source(se, clocksource) error_context.context("Stop ntp service in guest", test.log.info) status, output = se.cmd_status_output(ntp_stop_cmd) - vmid_test = random.randint(0, len(vms)-1) + vmid_test = random.randint(0, len(vms) - 1) vm = vm_obj[vmid_test] se = sessions[vmid_test] if same_cpu == "no": @@ -103,5 +101,6 @@ def verify_guest_clock_source(session, expected): if offset > float(expected_time_drift): fail_offset.append((vmid, offset)) if fail_offset: - test.fail("The time drift of following guests %s are larger than 5s." - % fail_offset) + test.fail( + f"The time drift of following guests {fail_offset} are larger than 5s." + ) diff --git a/qemu/tests/timedrift_with_reboot.py b/qemu/tests/timedrift_with_reboot.py index 22ad17bc92..0228fd9ced 100644 --- a/qemu/tests/timedrift_with_reboot.py +++ b/qemu/tests/timedrift_with_reboot.py @@ -1,5 +1,4 @@ -from virttest import utils_test -from virttest import utils_time +from virttest import utils_test, utils_time def run(test, params, env): @@ -22,11 +21,11 @@ def run(test, params, env): boot_option_added = params.get("boot_option_added") boot_option_removed = params.get("boot_option_removed") if boot_option_added or boot_option_removed: - utils_test.update_boot_option(vm, - args_removed=boot_option_removed, - args_added=boot_option_added) + utils_test.update_boot_option( + vm, args_removed=boot_option_removed, args_added=boot_option_added + ) - if params.get("os_type") == 'linux': + if params.get("os_type") == "linux": utils_time.sync_timezone_linux(vm) else: utils_time.sync_timezone_win(vm) @@ -48,61 +47,64 @@ def run(test, params, env): try: # Get initial time # (ht stands for host time, gt stands for guest time) - (ht0, gt0) = utils_test.get_time(session, time_command, - time_filter_re, time_format) + (ht0, gt0) = utils_test.get_time( + session, time_command, time_filter_re, time_format + ) # Reboot for i in range(reboot_iterations): # Get time before current iteration - (ht0_, gt0_) = utils_test.get_time(session, time_command, - time_filter_re, time_format) + (ht0_, gt0_) = utils_test.get_time( + session, time_command, time_filter_re, time_format + ) # Run current iteration - test.log.info("Rebooting: iteration %d of %d...", - (i + 1), reboot_iterations) + test.log.info( + "Rebooting: iteration %d of %d...", (i + 1), reboot_iterations + ) session = vm.reboot(session, timeout=timeout) # Get time after current iteration - (ht1_, gt1_) = utils_test.get_time(session, time_command, - time_filter_re, time_format) + (ht1_, gt1_) = utils_test.get_time( + session, time_command, time_filter_re, time_format + ) # Report iteration results host_delta = ht1_ - ht0_ guest_delta = gt1_ - gt0_ drift = abs(host_delta - guest_delta) - test.log.info("Host duration (iteration %d): %.2f", - (i + 1), host_delta) - test.log.info("Guest duration (iteration %d): %.2f", - (i + 1), guest_delta) - test.log.info("Drift at iteration %d: %.2f seconds", - (i + 1), drift) + test.log.info("Host duration (iteration %d): %.2f", (i + 1), host_delta) + test.log.info("Guest duration (iteration %d): %.2f", (i + 1), guest_delta) + test.log.info("Drift at iteration %d: %.2f seconds", (i + 1), drift) # Fail if necessary if drift > drift_threshold_single: - test.fail("Time drift too large at iteration %d: " - "%.2f seconds" % (i + 1, drift)) + test.fail( + "Time drift too large at iteration %d: " + "%.2f seconds" % (i + 1, drift) + ) # Get final time - (ht1, gt1) = utils_test.get_time(session, time_command, - time_filter_re, time_format) + (ht1, gt1) = utils_test.get_time( + session, time_command, time_filter_re, time_format + ) finally: if session: session.close() # remove flags add for this test. if boot_option_added or boot_option_removed: - utils_test.update_boot_option(vm, - args_removed=boot_option_added, - args_added=boot_option_removed) + utils_test.update_boot_option( + vm, args_removed=boot_option_added, args_added=boot_option_removed + ) # Report results host_delta = ht1 - ht0 guest_delta = gt1 - gt0 drift = abs(host_delta - guest_delta) - test.log.info("Host duration (%d reboots): %.2f", - reboot_iterations, host_delta) - test.log.info("Guest duration (%d reboots): %.2f", - reboot_iterations, guest_delta) - test.log.info("Drift after %d reboots: %.2f seconds", - reboot_iterations, drift) + test.log.info("Host duration (%d reboots): %.2f", reboot_iterations, host_delta) + test.log.info("Guest duration (%d reboots): %.2f", reboot_iterations, guest_delta) + test.log.info("Drift after %d reboots: %.2f seconds", reboot_iterations, drift) # Fail if necessary if drift > drift_threshold: - test.fail("Time drift too large after %d reboots: " - "%.2f seconds" % (reboot_iterations, drift)) + test.fail( + "Time drift too large after %d reboots: " + "%.2f seconds" % (reboot_iterations, drift) + ) diff --git a/qemu/tests/timedrift_with_stop.py b/qemu/tests/timedrift_with_stop.py index 8588ce1b2d..a723973d36 100644 --- a/qemu/tests/timedrift_with_stop.py +++ b/qemu/tests/timedrift_with_stop.py @@ -1,9 +1,8 @@ -import time import os import signal +import time -from virttest import utils_test -from virttest import utils_time +from virttest import utils_test, utils_time def run(test, params, env): @@ -30,11 +29,11 @@ def run(test, params, env): boot_option_added = params.get("boot_option_added") boot_option_removed = params.get("boot_option_removed") if boot_option_added or boot_option_removed: - utils_test.update_boot_option(vm, - args_removed=boot_option_removed, - args_added=boot_option_added) + utils_test.update_boot_option( + vm, args_removed=boot_option_removed, args_added=boot_option_added + ) - if params["os_type"] == 'windows': + if params["os_type"] == "windows": utils_time.sync_timezone_win(vm) session = vm.wait_for_login(timeout=login_timeout) @@ -59,17 +58,23 @@ def run(test, params, env): try: # Get initial time # (ht stands for host time, gt stands for guest time) - (ht0, gt0) = utils_test.get_time(session, time_command, - time_filter_re, time_format) + (ht0, gt0) = utils_test.get_time( + session, time_command, time_filter_re, time_format + ) # Stop the guest for i in range(stop_iterations): # Get time before current iteration - (ht0_, gt0_) = utils_test.get_time(session, time_command, - time_filter_re, time_format) + (ht0_, gt0_) = utils_test.get_time( + session, time_command, time_filter_re, time_format + ) # Run current iteration - test.log.info("Stop %s second: iteration %d of %d...", - stop_time, (i + 1), stop_iterations) + test.log.info( + "Stop %s second: iteration %d of %d...", + stop_time, + (i + 1), + stop_iterations, + ) if stop_with_signal: test.log.debug("Stop guest") os.kill(pid, signal.SIGSTOP) @@ -86,8 +91,9 @@ def run(test, params, env): time.sleep(sleep_time) # Get time after current iteration - (ht1_, gt1_) = utils_test.get_time(session, time_command, - time_filter_re, time_format) + (ht1_, gt1_) = utils_test.get_time( + session, time_command, time_filter_re, time_format + ) # Report iteration results host_delta = ht1_ - ht0_ guest_delta = gt1_ - gt0_ @@ -98,29 +104,29 @@ def run(test, params, env): drift = abs(drift - stop_time) if params.get("os_type") == "windows" and rtc_clock == "host": drift = abs(host_delta - guest_delta) - test.log.info("Host duration (iteration %d): %.2f", - (i + 1), host_delta) - test.log.info("Guest duration (iteration %d): %.2f", - (i + 1), guest_delta) - test.log.info("Drift at iteration %d: %.2f seconds", - (i + 1), drift) + test.log.info("Host duration (iteration %d): %.2f", (i + 1), host_delta) + test.log.info("Guest duration (iteration %d): %.2f", (i + 1), guest_delta) + test.log.info("Drift at iteration %d: %.2f seconds", (i + 1), drift) # Fail if necessary if drift > drift_threshold_single: - test.fail("Time drift too large at iteration %d: " - "%.2f seconds" % (i + 1, drift)) + test.fail( + "Time drift too large at iteration %d: " + "%.2f seconds" % (i + 1, drift) + ) # Get final time - (ht1, gt1) = utils_test.get_time(session, time_command, - time_filter_re, time_format) + (ht1, gt1) = utils_test.get_time( + session, time_command, time_filter_re, time_format + ) finally: if session: session.close() # remove flags add for this test. if boot_option_added or boot_option_removed: - utils_test.update_boot_option(vm, - args_removed=boot_option_added, - args_added=boot_option_removed) + utils_test.update_boot_option( + vm, args_removed=boot_option_added, args_added=boot_option_removed + ) # Report results host_delta = ht1 - ht0 @@ -132,14 +138,13 @@ def run(test, params, env): drift = abs(drift - stop_time) if params.get("os_type") == "windows" and rtc_clock == "host": drift = abs(host_delta - guest_delta) - test.log.info("Host duration (%d stops): %.2f", - stop_iterations, host_delta) - test.log.info("Guest duration (%d stops): %.2f", - stop_iterations, guest_delta) - test.log.info("Drift after %d stops: %.2f seconds", - stop_iterations, drift) + test.log.info("Host duration (%d stops): %.2f", stop_iterations, host_delta) + test.log.info("Guest duration (%d stops): %.2f", stop_iterations, guest_delta) + test.log.info("Drift after %d stops: %.2f seconds", stop_iterations, drift) # Fail if necessary if drift > drift_threshold: - test.fail("Time drift too large after %d stops: " - "%.2f seconds" % (stop_iterations, drift)) + test.fail( + "Time drift too large after %d stops: " + "%.2f seconds" % (stop_iterations, drift) + ) diff --git a/qemu/tests/timer_rtc_sync.py b/qemu/tests/timer_rtc_sync.py index c15f515993..f704bae9dd 100644 --- a/qemu/tests/timer_rtc_sync.py +++ b/qemu/tests/timer_rtc_sync.py @@ -1,11 +1,8 @@ -import time import re +import time from avocado.utils import process -from virttest import utils_test -from virttest import utils_time -from virttest import env_process -from virttest import error_context +from virttest import env_process, error_context, utils_test, utils_time @error_context.context_aware @@ -31,19 +28,19 @@ def get_hwtime(session): :param session: VM session. """ - hwclock_time_command = params.get("hwclock_time_command", - "hwclock -u") - hwclock_time_filter_re = params.get("hwclock_time_filter_re", - r"(\d+-\d+-\d+ \d+:\d+:\d+)") - hwclock_time_format = params.get("hwclock_time_format", - "%Y-%m-%d %H:%M:%S") + hwclock_time_command = params.get("hwclock_time_command", "hwclock -u") + hwclock_time_filter_re = params.get( + "hwclock_time_filter_re", r"(\d+-\d+-\d+ \d+:\d+:\d+)" + ) + hwclock_time_format = params.get("hwclock_time_format", "%Y-%m-%d %H:%M:%S") output = session.cmd_output_safe(hwclock_time_command) try: str_time = re.findall(hwclock_time_filter_re, output)[0] guest_time = time.mktime(time.strptime(str_time, hwclock_time_format)) except Exception as err: test.log.debug( - "(time_format, time_string): (%s, %s)", hwclock_time_format, str_time) + "(time_format, time_string): (%s, %s)", hwclock_time_format, str_time + ) raise err return guest_time @@ -60,25 +57,26 @@ def verify_timedrift(session, is_hardware=False): time_filter_re = params["time_filter_re"] # Time format for time.strptime() time_format = params["time_format"] - timerdevice_drift_threshold = float(params.get( - "timerdevice_drift_threshold", 3)) + timerdevice_drift_threshold = float( + params.get("timerdevice_drift_threshold", 3) + ) time_type = "system" if not is_hardware else "harware" - error_context.context("Check the %s time on guest" % time_type, - test.log.info) - host_time, guest_time = utils_test.get_time(session, time_command, - time_filter_re, - time_format) + error_context.context(f"Check the {time_type} time on guest", test.log.info) + host_time, guest_time = utils_test.get_time( + session, time_command, time_filter_re, time_format + ) if is_hardware: guest_time = get_hwtime(session) drift = abs(float(host_time) - float(guest_time)) if drift > timerdevice_drift_threshold: - test.fail("The guest's %s time is different with" - " host's system time. Host time: '%s', guest time:" - " '%s'" % (time_type, host_time, guest_time)) + test.fail( + f"The guest's {time_type} time is different with" + f" host's system time. Host time: '{host_time}', guest time:" + f" '{guest_time}'" + ) - error_context.context("sync host time with NTP server", - test.log.info) + error_context.context("sync host time with NTP server", test.log.info) clock_sync_command = params["clock_sync_command"] process.system(clock_sync_command, shell=True) @@ -94,8 +92,7 @@ def verify_timedrift(session, is_hardware=False): session = vm.wait_for_login(timeout=timeout) - error_context.context("check timedrift between guest and host.", - test.log.info) + error_context.context("check timedrift between guest and host.", test.log.info) verify_timedrift(session) verify_timedrift(session, is_hardware=True) @@ -106,7 +103,9 @@ def verify_timedrift(session, is_hardware=False): error_context.context("Waiting for 11 mins.", test.log.info) time.sleep(660) - error_context.context("check timedrift between guest and host after " - "changing RTC time.", test.log.info) + error_context.context( + "check timedrift between guest and host after " "changing RTC time.", + test.log.info, + ) verify_timedrift(session) verify_timedrift(session, is_hardware=True) diff --git a/qemu/tests/timerdevice_boot.py b/qemu/tests/timerdevice_boot.py index 658eabd454..3dac16d566 100644 --- a/qemu/tests/timerdevice_boot.py +++ b/qemu/tests/timerdevice_boot.py @@ -1,12 +1,8 @@ -import time import re +import time from avocado.utils import process -from virttest import utils_test -from virttest import utils_time -from virttest import env_process -from virttest import funcatexit -from virttest import error_context +from virttest import env_process, error_context, funcatexit, utils_test, utils_time def _system(*args, **kwargs): @@ -43,19 +39,19 @@ def get_hwtime(session): :param session: VM session. """ - hwclock_time_command = params.get("hwclock_time_command", - "hwclock -u") - hwclock_time_filter_re = params.get("hwclock_time_filter_re", - r"(\d+-\d+-\d+ \d+:\d+:\d+)") - hwclock_time_format = params.get("hwclock_time_format", - "%Y-%m-%d %H:%M:%S") + hwclock_time_command = params.get("hwclock_time_command", "hwclock -u") + hwclock_time_filter_re = params.get( + "hwclock_time_filter_re", r"(\d+-\d+-\d+ \d+:\d+:\d+)" + ) + hwclock_time_format = params.get("hwclock_time_format", "%Y-%m-%d %H:%M:%S") output = session.cmd_output_safe(hwclock_time_command) try: str_time = re.findall(hwclock_time_filter_re, output)[0] guest_time = time.mktime(time.strptime(str_time, hwclock_time_format)) except Exception as err: test.log.debug( - "(time_format, time_string): (%s, %s)", hwclock_time_format, str_time) + "(time_format, time_string): (%s, %s)", hwclock_time_format, str_time + ) raise err return guest_time @@ -72,22 +68,24 @@ def verify_timedrift(session, is_hardware=False): time_filter_re = params["time_filter_re"] # Time format for time.strptime() time_format = params["time_format"] - timerdevice_drift_threshold = float(params.get( - "timerdevice_drift_threshold", 3)) + timerdevice_drift_threshold = float( + params.get("timerdevice_drift_threshold", 3) + ) time_type = "system" if not is_hardware else "harware" - error_context.context("Check the %s time on guest" % time_type, - test.log.info) - host_time, guest_time = utils_test.get_time(session, time_command, - time_filter_re, - time_format) + error_context.context(f"Check the {time_type} time on guest", test.log.info) + host_time, guest_time = utils_test.get_time( + session, time_command, time_filter_re, time_format + ) if is_hardware: guest_time = get_hwtime(session) drift = abs(float(host_time) - float(guest_time)) if drift > timerdevice_drift_threshold: - test.fail("The guest's %s time is different with" - " host's system time. Host time: '%s', guest time:" - " '%s'" % (time_type, host_time, guest_time)) + test.fail( + f"The guest's {time_type} time is different with" + f" host's system time. Host time: '{host_time}', guest time:" + f" '{guest_time}'" + ) def get_current_clksrc(session): cmd = "cat /sys/devices/system/clocksource/" @@ -115,19 +113,24 @@ def update_clksrc(session, clksrc): avail_cmd += "available_clocksource" avail_clksrc = session.cmd_output_safe(avail_cmd) if clksrc in avail_clksrc: - clksrc_cmd = "echo %s > /sys/devices/system/clocksource/" % clksrc + clksrc_cmd = f"echo {clksrc} > /sys/devices/system/clocksource/" clksrc_cmd += "clocksource0/current_clocksource" status, output = session.cmd_status_output(clksrc_cmd, safe=True) if status: - test.fail("fail to update guest's clocksource to %s," - "details: %s" % clksrc, output) + test.fail( + "fail to update guest's clocksource to {}," "details: {}".format( + *clksrc + ), + output, + ) else: - test.error("please check the clocksource you want to set, " - "it's not supported by current guest, current " - "available clocksources: %s" % avail_clksrc) + test.error( + "please check the clocksource you want to set, " + "it's not supported by current guest, current " + f"available clocksources: {avail_clksrc}" + ) - error_context.context("sync host time with NTP server", - test.log.info) + error_context.context("sync host time with NTP server", test.log.info) clock_sync_command = params["clock_sync_command"] process.system(clock_sync_command, shell=True) @@ -136,18 +139,20 @@ def update_clksrc(session, clksrc): error_context.context("Add some load on host", test.log.info) host_cpu_cnt_cmd = params["host_cpu_cnt_cmd"] host_cpu_cnt = int(process.system_output(host_cpu_cnt_cmd, shell=True).strip()) - timerdevice_host_load_cmd = timerdevice_host_load_cmd % int(host_cpu_cnt/2) + timerdevice_host_load_cmd = timerdevice_host_load_cmd % int(host_cpu_cnt / 2) if params["os_type"] == "linux": - process.system(timerdevice_host_load_cmd, shell=True, - ignore_bg_processes=True) + process.system( + timerdevice_host_load_cmd, shell=True, ignore_bg_processes=True + ) else: - stress_bg = utils_test.HostStress("stress", params, - stress_args=timerdevice_host_load_cmd) + stress_bg = utils_test.HostStress( + "stress", params, stress_args=timerdevice_host_load_cmd + ) stress_bg.load_stress_tool() - host_load_stop_cmd = params.get("timerdevice_host_load_stop_cmd", - "pkill -f 'do X=1'") - funcatexit.register(env, params["type"], _system, - host_load_stop_cmd) + host_load_stop_cmd = params.get( + "timerdevice_host_load_stop_cmd", "pkill -f 'do X=1'" + ) + funcatexit.register(env, params["type"], _system, host_load_stop_cmd) params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, params.get("main_vm")) @@ -157,7 +162,7 @@ def update_clksrc(session, clksrc): error_context.context("Sync guest timezone before test", test.log.info) timeout = int(params.get("login_timeout", 360)) - if params["os_type"] == 'linux': + if params["os_type"] == "linux": utils_time.sync_timezone_linux(vm, timeout) else: utils_time.sync_timezone_win(vm, timeout) @@ -174,8 +179,7 @@ def update_clksrc(session, clksrc): update_clksrc(session, timerdevice_clksource) need_restore_clksrc = True - error_context.context("check timedrift between guest and host.", - test.log.info) + error_context.context("check timedrift between guest and host.", test.log.info) verify_timedrift(session) if params["os_type"] == "linux": verify_timedrift(session, is_hardware=True) @@ -192,13 +196,15 @@ def update_clksrc(session, clksrc): if params.get("timerdevice_reboot_test") == "yes": sleep_time = params.get("timerdevice_sleep_time") if sleep_time: - error_context.context("Sleep '%s' secs before reboot" % sleep_time, - test.log.info) + error_context.context( + f"Sleep '{sleep_time}' secs before reboot", test.log.info + ) sleep_time = int(sleep_time) time.sleep(sleep_time) - error_context.context("Check timedrift between guest and host " - "after reboot.", test.log.info) + error_context.context( + "Check timedrift between guest and host " "after reboot.", test.log.info + ) vm.reboot(timeout=timeout, serial=True) verify_timedrift(session) if params["os_type"] == "linux": diff --git a/qemu/tests/timerdevice_change_guest_clksource.py b/qemu/tests/timerdevice_change_guest_clksource.py index ceec3ec951..e768f56c54 100644 --- a/qemu/tests/timerdevice_change_guest_clksource.py +++ b/qemu/tests/timerdevice_change_guest_clksource.py @@ -1,8 +1,6 @@ import re -from virttest import error_context -from virttest import utils_time -from virttest import utils_test +from virttest import error_context, utils_test, utils_time @error_context.context_aware @@ -21,9 +19,10 @@ def run(test, params, env): :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ + def verify_guest_clock_source(session, expected): if expected not in session.cmd(cur_clk): - test.fail("Guest didn't use '%s' clocksource" % expected) + test.fail(f"Guest didn't use '{expected}' clocksource") error_context.context("Boot a guest with kvm-clock", test.log.info) vm = env.get_vm(params["main_vm"]) @@ -32,34 +31,32 @@ def verify_guest_clock_source(session, expected): timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) - error_context.context("Check the current clocksource in guest", - test.log.info) + error_context.context("Check the current clocksource in guest", test.log.info) cur_clk = params["cur_clk"] if "kvm-clock" not in session.cmd(cur_clk): - error_context.context("Update guest kernel cli to kvm-clock", - test.log.info) + error_context.context("Update guest kernel cli to kvm-clock", test.log.info) utils_time.update_clksrc(vm, clksrc="kvm-clock") session = vm.wait_for_login(timeout=timeout) verify_guest_clock_source(session, "kvm-clock") - error_context.context("Check the available clocksource in guest", - test.log.info) + error_context.context("Check the available clocksource in guest", test.log.info) avl_clk = params["avl_clk"] try: available_clksrc_list = session.cmd(avl_clk).split() except Exception as detail: - test.fail("Couldn't get guest available clock source." - " Detail: '%s'" % detail) + test.fail("Couldn't get guest available clock source." f" Detail: '{detail}'") try: for avl_clksrc in available_clksrc_list: if avl_clksrc == "kvm-clock": continue - error_context.context("Update guest kernel cli to '%s'" % avl_clksrc, - test.log.info) + error_context.context( + f"Update guest kernel cli to '{avl_clksrc}'", test.log.info + ) utils_time.update_clksrc(vm, clksrc=avl_clksrc) session = vm.wait_for_login(timeout=timeout) - error_context.context("Check the current clocksource in guest", - test.log.info) + error_context.context( + "Check the current clocksource in guest", test.log.info + ) verify_guest_clock_source(session, avl_clksrc) finally: error_context.context("Restore guest kernel cli", test.log.info) diff --git a/qemu/tests/timerdevice_check_ntp_offset.py b/qemu/tests/timerdevice_check_ntp_offset.py index d3f999051f..f161d21abe 100644 --- a/qemu/tests/timerdevice_check_ntp_offset.py +++ b/qemu/tests/timerdevice_check_ntp_offset.py @@ -2,10 +2,7 @@ import time from avocado.utils import process -from virttest import utils_test -from virttest import data_dir -from virttest import env_process -from virttest import error_context +from virttest import data_dir, env_process, error_context, utils_test @error_context.context_aware @@ -26,11 +23,11 @@ def run(test, params, env): """ def clean_tmp_file(): - if not session.cmd_status("dir %s" % ntp_dst_path): - session.cmd("rd /s /q %s" % ntp_dst_path) + if not session.cmd_status(f"dir {ntp_dst_path}"): + session.cmd(f"rd /s /q {ntp_dst_path}") ntp_install_path = params["ntp_install_path"] ntp_uninstall_cmd = params["ntp_uninstall_cmd"] - if not session.cmd_status("dir %s" % ntp_install_path): + if not session.cmd_status(f"dir {ntp_install_path}"): session.cmd(ntp_uninstall_cmd) diskspd_check_cmd = params["diskspd_check_cmd"] diskspd_end_cmd = params["diskspd_end_cmd"] @@ -38,6 +35,7 @@ def clean_tmp_file(): if not session.cmd_status(diskspd_check_cmd): session.cmd(diskspd_end_cmd) session.cmd("del %s" % (dst_path + diskspd_name)) + ntp_cmd = params["ntp_cmd"] error_context.context("Sync host system time with ntpserver", test.log.info) process.system(ntp_cmd, shell=True) @@ -62,12 +60,12 @@ def clean_tmp_file(): ntp_dst_path = params["ntp_dst_path"] install_ntp_cmd = params["install_ntp_cmd"] vm.copy_files_to(data_dir.get_deps_dir(ntp_dir), dst_path) - session.cmd("cd %s" % ntp_dst_path) + session.cmd(f"cd {ntp_dst_path}") session.cmd(install_ntp_cmd % (ntp_name, ntp_unattend_file)) error_context.context("Run diskspd on guest", test.log.info) diskspd_run_cmd = params["diskspd_run_cmd"] - session.cmd("cd %s" % dst_path) + session.cmd(f"cd {dst_path}") session.cmd(diskspd_run_cmd) error_context.context("Play a video on guest", test.log.info) @@ -81,9 +79,10 @@ def clean_tmp_file(): for _ in range(params.get_numeric("nums")): time.sleep(int(sleep_time)) ntp_offset = session.cmd_output(check_offset_cmd) - ntp_offset = float(ntp_offset.strip().split("\n")[-1].split()[-2]. - strip('-+')) + ntp_offset = float( + ntp_offset.strip().split("\n")[-1].split()[-2].strip("-+") + ) if ntp_offset > 100: - test.fail("The ntp offset %s is larger than 100ms" % ntp_offset) + test.fail(f"The ntp offset {ntp_offset} is larger than 100ms") finally: clean_tmp_file() diff --git a/qemu/tests/timerdevice_clock_drift_with_ntp.py b/qemu/tests/timerdevice_clock_drift_with_ntp.py index 0058da7002..d2079e1fd5 100644 --- a/qemu/tests/timerdevice_clock_drift_with_ntp.py +++ b/qemu/tests/timerdevice_clock_drift_with_ntp.py @@ -2,9 +2,7 @@ import aexpect from avocado.utils import process -from virttest import data_dir -from virttest import utils_misc -from virttest import error_context +from virttest import data_dir, error_context, utils_misc @error_context.context_aware @@ -28,6 +26,7 @@ def run(test, params, env): :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ + def _drift_file_exist(): try: session.cmd("test -f /var/lib/chrony/drift") @@ -35,8 +34,7 @@ def _drift_file_exist(): except Exception: return False - error_context.context("Check for an appropriate clocksource on host", - test.log.info) + error_context.context("Check for an appropriate clocksource on host", test.log.info) host_cmd = "cat /sys/devices/system/clocksource/" host_cmd += "clocksource0/current_clocksource" if "tsc" not in process.getoutput(host_cmd): @@ -50,8 +48,9 @@ def _drift_file_exist(): sess_guest_load = vm.wait_for_login(timeout=timeout) error_context.context("Copy time-warp-test.c to guest", test.log.info) - src_file_name = os.path.join(data_dir.get_deps_dir(), "tsc_sync", - "time-warp-test.c") + src_file_name = os.path.join( + data_dir.get_deps_dir(), "tsc_sync", "time-warp-test.c" + ) vm.copy_files_to(src_file_name, "/tmp") error_context.context("Compile the time-warp-test.c", test.log.info) @@ -76,7 +75,7 @@ def _drift_file_exist(): if len(cpu_pin_list) < len(vm.vcpu_threads): test.cancel("There isn't enough physical cpu to pin all the vcpus") for vcpu, pcpu in cpu_pin_list: - process.system("taskset -p %s %s" % (1 << pcpu, vcpu)) + process.system(f"taskset -p {1 << pcpu} {vcpu}") error_context.context("Verify each vcpu is pinned on host", test.log.info) @@ -89,17 +88,17 @@ def _drift_file_exist(): cmd = "systemctl start chronyd; sleep 1; echo" session.cmd(cmd) - error_context.context("Check if the drift file exists on guest", - test.log.info) + error_context.context("Check if the drift file exists on guest", test.log.info) test_run_timeout = float(params["test_run_timeout"]) try: utils_misc.wait_for(_drift_file_exist, test_run_timeout, step=5) except aexpect.ShellCmdError as detail: - test.error("Failed to wait for the creation of" - " /var/lib/chronyd/drift file. Detail: '%s'" % detail) + test.error( + "Failed to wait for the creation of" + f" /var/lib/chronyd/drift file. Detail: '{detail}'" + ) - error_context.context("Verify the drift file content on guest", - test.log.info) + error_context.context("Verify the drift file content on guest", test.log.info) output = session.cmd("cat /var/lib/chrony/drift").strip().split()[0] if int(abs(float(output))) > 30: - test.fail("Failed to check the chrony drift. Output: '%s'" % output) + test.fail(f"Failed to check the chrony drift. Output: '{output}'") diff --git a/qemu/tests/timerdevice_clock_drift_with_sleep.py b/qemu/tests/timerdevice_clock_drift_with_sleep.py index fc9742f04a..f11a5f916b 100644 --- a/qemu/tests/timerdevice_clock_drift_with_sleep.py +++ b/qemu/tests/timerdevice_clock_drift_with_sleep.py @@ -1,11 +1,7 @@ import re from avocado.utils import process -from virttest import data_dir -from virttest import storage -from virttest import utils_disk -from virttest import env_process -from virttest import error_context +from virttest import data_dir, env_process, error_context, storage, utils_disk @error_context.context_aware @@ -25,16 +21,16 @@ def run(test, params, env): :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ + def verify_elapsed_time(): sleep_cmd = r'echo "for n in \$(seq 1000);' - sleep_cmd += ' do sleep 0.01; done"'' > /tmp/sleep.sh' + sleep_cmd += ' do sleep 0.01; done"' " > /tmp/sleep.sh" session.cmd(sleep_cmd) - guest_cpu = session.cmd_output("grep 'processor' " - "/proc/cpuinfo | wc -l") - get_time_cmd = 'for (( i=0; i<%s; i+=1 ));' % guest_cpu + guest_cpu = session.cmd_output("grep 'processor' " "/proc/cpuinfo | wc -l") + get_time_cmd = f"for (( i=0; i<{guest_cpu}; i+=1 ));" get_time_cmd += ' do /usr/bin/time -f"%e"' - get_time_cmd += ' taskset -c $i sh /tmp/sleep.sh; done' + get_time_cmd += " taskset -c $i sh /tmp/sleep.sh; done" timeout_sleep = int(guest_cpu) * 14 output = session.cmd_output(get_time_cmd, timeout=timeout_sleep) @@ -42,11 +38,11 @@ def verify_elapsed_time(): times_list = [_ for _ in times_list if float(_) < 10.0 or float(_) > 14.0] if times_list: - test.fail("Unexpected time drift found: Detail: '%s' \n timeslist: %s" - % (output, times_list)) + test.fail( + f"Unexpected time drift found: Detail: '{output}' \n timeslist: {times_list}" + ) - error_context.context("Sync the host system time with ntp server", - test.log.info) + error_context.context("Sync the host system time with ntp server", test.log.info) ntp_sync_cmd = params.get("ntp_sync_cmd") process.system(ntp_sync_cmd, shell=True) @@ -57,40 +53,37 @@ def verify_elapsed_time(): timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) - error_context.context("Check the clock source currently used on guest", - test.log.info) + error_context.context( + "Check the clock source currently used on guest", test.log.info + ) cmd = "cat /sys/devices/system/clocksource/" cmd += "clocksource0/current_clocksource" if "kvm-clock" not in session.cmd(cmd): grub_file = params.get("grub_file", "/boot/grub2/grub.cfg") - if "clocksource=" not in session.cmd("cat %s" % grub_file): + if "clocksource=" not in session.cmd(f"cat {grub_file}"): test.fail("Guest didn't use 'kvm-clock' clocksource") error_context.context("Shutdown guest") vm.destroy() env.unregister_vm(vm.name) - error_context.context("Update guest kernel cli to kvm-clock", - test.log.info) - image_filename = storage.get_image_filename(params, - data_dir.get_data_dir()) - kernel_cfg_pattern = params.get("kernel_cfg_pos_reg", - r".*vmlinuz-\d+.*") + error_context.context("Update guest kernel cli to kvm-clock", test.log.info) + image_filename = storage.get_image_filename(params, data_dir.get_data_dir()) + kernel_cfg_pattern = params.get("kernel_cfg_pos_reg", r".*vmlinuz-\d+.*") disk_obj = utils_disk.GuestFSModiDisk(image_filename) kernel_cfg_original = disk_obj.read_file(grub_file) try: - test.log.warn("Update the first kernel entry to kvm-clock only") - kernel_cfg = re.findall(kernel_cfg_pattern, - kernel_cfg_original)[0] + test.log.warning("Update the first kernel entry to kvm-clock only") + kernel_cfg = re.findall(kernel_cfg_pattern, kernel_cfg_original)[0] except IndexError as detail: - test.error("Couldn't find the kernel config, regex" - " pattern is '%s', detail: '%s'" % - (kernel_cfg_pattern, detail)) + test.error( + "Couldn't find the kernel config, regex" + f" pattern is '{kernel_cfg_pattern}', detail: '{detail}'" + ) if "clocksource=" in kernel_cfg: kernel_cfg_new = re.sub(r"clocksource=[a-z\- ]+", " ", kernel_cfg) - disk_obj.replace_image_file_content(grub_file, kernel_cfg, - kernel_cfg_new) + disk_obj.replace_image_file_content(grub_file, kernel_cfg, kernel_cfg_new) error_context.context("Boot the guest", test.log.info) vm_name = params["main_vm"] @@ -104,8 +97,10 @@ def verify_elapsed_time(): error_context.context("Sync time from guest to ntpserver", test.log.info) session.cmd(ntp_sync_cmd, timeout=timeout) - error_context.context("Sleep a while and check the time drift on guest" - " (without any pinned vcpu)", test.log.info) + error_context.context( + "Sleep a while and check the time drift on guest" " (without any pinned vcpu)", + test.log.info, + ) verify_elapsed_time() error_context.context("Pin every vcpu to physical cpu", test.log.info) @@ -117,13 +112,18 @@ def verify_elapsed_time(): test.cancel("There isn't enough physical cpu to pin all the vcpus") check_one_cpu_pinned = False for vcpu, pcpu in cpu_pin_list: - process.system("taskset -p -c %s %s" % (pcpu, vcpu)) + process.system(f"taskset -p -c {pcpu} {vcpu}") if not check_one_cpu_pinned: - error_context.context("Sleep a while and check the time drift on" - "guest (with one pinned vcpu)", test.log.info) + error_context.context( + "Sleep a while and check the time drift on" + "guest (with one pinned vcpu)", + test.log.info, + ) verify_elapsed_time() check_one_cpu_pinned = True - error_context.context("Sleep a while and check the time drift on" - "guest (with all pinned vcpus)", test.log.info) + error_context.context( + "Sleep a while and check the time drift on" "guest (with all pinned vcpus)", + test.log.info, + ) verify_elapsed_time() diff --git a/qemu/tests/timerdevice_host_time_back.py b/qemu/tests/timerdevice_host_time_back.py index 2a14a0c449..44ded56f68 100644 --- a/qemu/tests/timerdevice_host_time_back.py +++ b/qemu/tests/timerdevice_host_time_back.py @@ -2,7 +2,6 @@ import time from avocado.utils import process - from virttest import error_context @@ -36,14 +35,17 @@ def run(test, params, env): error_context.context("Check time difference between host and guest", test.log.info) guest_timestr_ = session.cmd_output(epoch_time_cmd, timeout=120) host_timestr_ = process.run(epoch_time_cmd, shell=True).stdout_text - host_epoch_time_, guest_epoch_time_ = map(lambda x: re.findall(r"epoch:\s+(\d+)", x)[0], - [host_timestr_, guest_timestr_]) + host_epoch_time_, guest_epoch_time_ = map( + lambda x: re.findall(r"epoch:\s+(\d+)", x)[0], [host_timestr_, guest_timestr_] + ) real_difference_ = abs(int(host_epoch_time_) - int(guest_epoch_time_)) if real_difference_ > tolerance: - test.error("Unexpected timedrift between host and guest, host time: %s," - "guest time: %s" % (host_epoch_time_, guest_epoch_time_)) + test.error( + f"Unexpected timedrift between host and guest, host time: {host_epoch_time_}," + f"guest time: {guest_epoch_time_}" + ) - error_context.context("Set host system time back %s s" % seconds_to_back) + error_context.context(f"Set host system time back {seconds_to_back} s") process.system_output(set_host_time_back_cmd) time.sleep(10) @@ -51,20 +53,27 @@ def run(test, params, env): vm.reboot(serial=True) session = vm.wait_for_serial_login() - error_context.context("Check time difference between host and guest", test.log.info) + error_context.context( + "Check time difference between host and guest", test.log.info + ) try: guest_timestr = session.cmd_output(epoch_time_cmd, timeout=120) session.close() except Exception: test.error("Guest error after set host system time back") host_timestr = process.run(epoch_time_cmd, shell=True).stdout_text - host_epoch_time, guest_epoch_time = map(lambda x: re.findall(r"epoch:\s+(\d+)", x)[0], - [host_timestr, guest_timestr]) + host_epoch_time, guest_epoch_time = map( + lambda x: re.findall(r"epoch:\s+(\d+)", x)[0], [host_timestr, guest_timestr] + ) real_difference = abs(int(host_epoch_time) - int(guest_epoch_time)) if abs(real_difference - time_difference) >= tolerance: - test.fail("Unexpected timedrift between host and guest, host time: %s," - "guest time: %s" % (host_epoch_time, guest_epoch_time)) + test.fail( + f"Unexpected timedrift between host and guest, host time: {host_epoch_time}," + f"guest time: {guest_epoch_time}" + ) finally: time.sleep(10) - error_context.context("Sync host system time with ntpserver finally", test.log.info) + error_context.context( + "Sync host system time with ntpserver finally", test.log.info + ) process.system(clock_sync_command, shell=True) diff --git a/qemu/tests/timerdevice_kvmclock_newer_msrs_support.py b/qemu/tests/timerdevice_kvmclock_newer_msrs_support.py index a5e8e94fac..74afb4e96a 100644 --- a/qemu/tests/timerdevice_kvmclock_newer_msrs_support.py +++ b/qemu/tests/timerdevice_kvmclock_newer_msrs_support.py @@ -19,14 +19,14 @@ def run(test, params, env): msrs = str(params["msrs"]).split() dmesg = str(session.cmd_output("dmesg")) - msrs_catch_re = params.get("msrs_catch_re", - r"kvm-clock: Using msrs (\w+) and (\w+)") - current_msrs = re.search(r"%s" % msrs_catch_re, dmesg, re.M | re.I) + msrs_catch_re = params.get( + "msrs_catch_re", r"kvm-clock: Using msrs (\w+) and (\w+)" + ) + current_msrs = re.search(rf"{msrs_catch_re}", dmesg, re.M | re.I) if current_msrs: current_msrs = set(current_msrs.groups()) if current_msrs != set(msrs): - test.fail("Except msrs (%s), " % msrs + - "got (%s)" % current_msrs) + test.fail(f"Except msrs ({msrs}), " + f"got ({current_msrs})") else: test.log.debug(dmesg) test.fail("No newer msr available for kvm-clock") diff --git a/qemu/tests/timerdevice_time_jump_check.py b/qemu/tests/timerdevice_time_jump_check.py index 317bf4dd7e..98a993416f 100644 --- a/qemu/tests/timerdevice_time_jump_check.py +++ b/qemu/tests/timerdevice_time_jump_check.py @@ -1,10 +1,8 @@ import time +from avocado.utils import cpu, process from virttest import error_context -from avocado.utils import process -from avocado.utils import cpu - @error_context.context_aware def run(test, params, env): @@ -23,8 +21,9 @@ def run(test, params, env): vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() - error_context.context("Check the clock source currently used on guest", - test.log.info) + error_context.context( + "Check the clock source currently used on guest", test.log.info + ) cmd = "cat /sys/devices/system/clocksource/" cmd += "clocksource0/current_clocksource" test.log.info("%s is current clocksource.", session.cmd_output(cmd)) @@ -39,7 +38,7 @@ def run(test, params, env): cpu_pin_list = list(zip(vm.vcpu_threads, host_cpu_list)) for vcpu, pcpu in cpu_pin_list: - process.system("taskset -p -c %s %s" % (pcpu, vcpu)) + process.system(f"taskset -p -c {pcpu} {vcpu}") check_cmd = params["check_cmd"] output = str(session.cmd_output(check_cmd)).splitlines() @@ -52,7 +51,7 @@ def run(test, params, env): time_list.append(etime) for idx, _ in enumerate(time_list): if idx < len(time_list) - 1: - if _ == time_list[idx+1] or (_ + 1) == time_list[idx+1]: + if _ == time_list[idx + 1] or (_ + 1) == time_list[idx + 1]: continue else: test.fail("Test fail, time jumps backward or forward on guest") diff --git a/qemu/tests/timerdevice_tsc_enable.py b/qemu/tests/timerdevice_tsc_enable.py index c83ec67ec6..8f82a0b9ab 100644 --- a/qemu/tests/timerdevice_tsc_enable.py +++ b/qemu/tests/timerdevice_tsc_enable.py @@ -30,11 +30,13 @@ def run(test, params, env): expect_tsc_flag = params["expect_tsc_flag"] if expect_cur_clk not in current_clksrc: - test.fail("Current clocksource is %s, the expected is %s." % - (current_clksrc, expect_cur_clk)) + test.fail( + f"Current clocksource is {current_clksrc}, the expected is {expect_cur_clk}." + ) if tsc_flag: - test.fail("Can not get expected flag: %s." % expect_tsc_flag) + test.fail(f"Can not get expected flag: {expect_tsc_flag}.") if expect_avl_clk not in avl_clksrc: - test.fail("Available clocksources are %s, the exected are %s." - % (avl_clksrc, expect_avl_clk)) + test.fail( + f"Available clocksources are {avl_clksrc}, the exected are {expect_avl_clk}." + ) diff --git a/qemu/tests/timerdevice_tscsync_change_host_clksource.py b/qemu/tests/timerdevice_tscsync_change_host_clksource.py index 3fdb2a8415..325da66898 100644 --- a/qemu/tests/timerdevice_tscsync_change_host_clksource.py +++ b/qemu/tests/timerdevice_tscsync_change_host_clksource.py @@ -2,8 +2,7 @@ import re from avocado.utils import process -from virttest import data_dir -from virttest import error_context +from virttest import data_dir, error_context @error_context.context_aware @@ -24,8 +23,7 @@ def run(test, params, env): :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ - error_context.context("Check for an appropriate clocksource on host", - test.log.info) + error_context.context("Check for an appropriate clocksource on host", test.log.info) host_cmd = "cat /sys/devices/system/clocksource/" host_cmd += "clocksource0/current_clocksource" if "tsc" not in process.getoutput(host_cmd): @@ -41,12 +39,13 @@ def run(test, params, env): error_context.context("Check the guest is using vsyscall", test.log.info) date_cmd = "strace date 2>&1|egrep 'clock_gettime|gettimeofday'|wc -l" output = session.cmd(date_cmd) - if '0' not in output: - test.fail("Failed to check vsyscall. Output: '%s'" % output) + if "0" not in output: + test.fail(f"Failed to check vsyscall. Output: '{output}'") error_context.context("Copy time-warp-test.c to guest", test.log.info) - src_file_name = os.path.join(data_dir.get_deps_dir(), "tsc_sync", - "time-warp-test.c") + src_file_name = os.path.join( + data_dir.get_deps_dir(), "tsc_sync", "time-warp-test.c" + ) vm.copy_files_to(src_file_name, "/tmp") error_context.context("Compile the time-warp-test.c", test.log.info) @@ -65,13 +64,14 @@ def run(test, params, env): re_str = r"fail:(\d+).*?fail:(\d+).*fail:(\d+)" fail_cnt = re.findall(re_str, output) if not fail_cnt: - test.error("Could not get correct test output. Output: '%s'" % output) + test.error(f"Could not get correct test output. Output: '{output}'") - tsc_cnt, tod_cnt, clk_cnt = [int(_) for _ in fail_cnt[-1]] + tsc_cnt, tod_cnt, clk_cnt = (int(_) for _ in fail_cnt[-1]) if tsc_cnt or tod_cnt or clk_cnt: msg = output.splitlines()[-5:] - test.fail("Get error when running time-warp-test." - " Output (last 5 lines): '%s'" % msg) + test.fail( + "Get error when running time-warp-test." f" Output (last 5 lines): '{msg}'" + ) try: error_context.context("Switch host to hpet clocksource", test.log.info) @@ -79,28 +79,29 @@ def run(test, params, env): cmd += "clocksource0/current_clocksource" process.system(cmd, shell=True) - error_context.context("Run time-warp-test after change the host" - " clock source", test.log.info) + error_context.context( + "Run time-warp-test after change the host" " clock source", test.log.info + ) cmd = "$(sleep %d; pkill time-warp-test) &" session.sendline(cmd % test_run_timeout) cmd = "/tmp/time-warp-test" - output = session.cmd_status_output(cmd, - timeout=(test_run_timeout + 60))[1] + output = session.cmd_status_output(cmd, timeout=(test_run_timeout + 60))[1] fail_cnt = re.findall(re_str, output) if not fail_cnt: - test.error("Could not get correct test output." - " Output: '%s'" % output) + test.error("Could not get correct test output." f" Output: '{output}'") - tsc_cnt, tod_cnt, clk_cnt = [int(_) for _ in fail_cnt[-1]] + tsc_cnt, tod_cnt, clk_cnt = (int(_) for _ in fail_cnt[-1]) if tsc_cnt or tod_cnt or clk_cnt: msg = output.splitlines()[-5:] - test.fail("Get error when running time-warp-test." - " Output (last 5 lines): '%s'" % msg) + test.fail( + "Get error when running time-warp-test." + f" Output (last 5 lines): '{msg}'" + ) output = session.cmd(date_cmd) if "1" not in output: - test.fail("Failed to check vsyscall. Output: '%s'" % output) + test.fail(f"Failed to check vsyscall. Output: '{output}'") finally: error_context.context("Restore host to tsc clocksource", test.log.info) cmd = "echo tsc > /sys/devices/system/clocksource/" @@ -108,5 +109,4 @@ def run(test, params, env): try: process.system(cmd, shell=True) except Exception as detail: - test.log.error("Failed to restore host clocksource." - "Detail: %s", detail) + test.log.error("Failed to restore host clocksource." "Detail: %s", detail) diff --git a/qemu/tests/timerdevice_tscsync_longtime.py b/qemu/tests/timerdevice_tscsync_longtime.py index 970bed1b98..d0989eb34c 100644 --- a/qemu/tests/timerdevice_tscsync_longtime.py +++ b/qemu/tests/timerdevice_tscsync_longtime.py @@ -2,8 +2,7 @@ import re from avocado.utils import process -from virttest import data_dir -from virttest import error_context +from virttest import data_dir, error_context @error_context.context_aware @@ -22,15 +21,13 @@ def run(test, params, env): :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ - error_context.context("Check for an appropriate clocksource on host", - test.log.info) + error_context.context("Check for an appropriate clocksource on host", test.log.info) host_cmd = "cat /sys/devices/system/clocksource/" host_cmd += "clocksource0/current_clocksource" if "tsc" not in process.getoutput(host_cmd): test.cancel("Host must use 'tsc' clocksource") - error_context.context("Check host has more than one cpu socket", - test.log.info) + error_context.context("Check host has more than one cpu socket", test.log.info) host_socket_cnt_cmd = params["host_socket_cnt_cmd"] if process.system_output(host_socket_cnt_cmd, shell=True).strip() == "1": test.cancel("Host must have more than 1 socket") @@ -43,8 +40,9 @@ def run(test, params, env): session = vm.wait_for_login(timeout=timeout) error_context.context("Copy time-warp-test.c to guest", test.log.info) - src_file_name = os.path.join(data_dir.get_deps_dir(), "tsc_sync", - "time-warp-test.c") + src_file_name = os.path.join( + data_dir.get_deps_dir(), "tsc_sync", "time-warp-test.c" + ) vm.copy_files_to(src_file_name, "/tmp") error_context.context("Compile the time-warp-test.c", test.log.info) @@ -54,8 +52,7 @@ def run(test, params, env): cmd += " gcc -Wall -o time-warp-test time-warp-test.c -lrt" session.cmd(cmd) - error_context.context("Run time-warp-test for minimum 4 hours", - test.log.info) + error_context.context("Run time-warp-test for minimum 4 hours", test.log.info) test_run_timeout = int(params.get("test_run_timeout", 14400)) session.sendline("$(sleep %d; pkill time-warp-test) &" % test_run_timeout) cmd = "/tmp/time-warp-test" @@ -64,10 +61,11 @@ def run(test, params, env): re_str = r"fail:(\d+).*?fail:(\d+).*fail:(\d+)" fail_cnt = re.findall(re_str, output) if not fail_cnt: - test.error("Could not get correct test output. Output: '%s'" % output) + test.error(f"Could not get correct test output. Output: '{output}'") - tsc_cnt, tod_cnt, clk_cnt = [int(_) for _ in fail_cnt[-1]] + tsc_cnt, tod_cnt, clk_cnt = (int(_) for _ in fail_cnt[-1]) if tsc_cnt or tod_cnt or clk_cnt: msg = output.splitlines()[-5:] - test.fail("Get error when running time-warp-test." - " Output (last 5 lines): '%s'" % msg) + test.fail( + "Get error when running time-warp-test." f" Output (last 5 lines): '{msg}'" + ) diff --git a/qemu/tests/timerdevice_tscwrite.py b/qemu/tests/timerdevice_tscwrite.py index 676ad05e5a..04b5ba3298 100644 --- a/qemu/tests/timerdevice_tscwrite.py +++ b/qemu/tests/timerdevice_tscwrite.py @@ -1,8 +1,7 @@ import os from avocado.utils import process -from virttest import error_context -from virttest import data_dir +from virttest import data_dir, error_context @error_context.context_aware @@ -19,8 +18,7 @@ def run(test, params, env): :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ - error_context.context("Check for an appropriate clocksource on host", - test.log.info) + error_context.context("Check for an appropriate clocksource on host", test.log.info) host_cmd = "cat /sys/devices/system/clocksource/" host_cmd += "clocksource0/current_clocksource" if "tsc" not in process.getoutput(host_cmd): @@ -33,16 +31,15 @@ def run(test, params, env): timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) - error_context.context("Download and compile the newest msr-tools", - test.log.info) + error_context.context("Download and compile the newest msr-tools", test.log.info) tarball = params["tarball"] compile_cmd = params["compile_cmd"] msr_name = params["msr_name"] tarball = os.path.join(data_dir.get_deps_dir(), tarball) msr_dir = "/tmp/" vm.copy_files_to(tarball, msr_dir) - session.cmd("cd %s && tar -zxvf %s" % (msr_dir, os.path.basename(tarball))) - session.cmd("cd %s && %s" % (msr_name, compile_cmd)) + session.cmd(f"cd {msr_dir} && tar -zxvf {os.path.basename(tarball)}") + session.cmd(f"cd {msr_name} && {compile_cmd}") error_context.context("Execute cmd in guest", test.log.info) cmd = "dmesg -c > /dev/null" @@ -50,8 +47,8 @@ def run(test, params, env): date_cmd = "strace date 2>&1 | egrep 'clock_gettime|gettimeofday' | wc -l" output = session.cmd(date_cmd) - if '0' not in output: - test.fail("Test failed before run msr tools. Output: '%s'" % output) + if "0" not in output: + test.fail(f"Test failed before run msr tools. Output: '{output}'") msr_tools_cmd = params["msr_tools_cmd"] session.cmd(msr_tools_cmd) @@ -61,4 +58,4 @@ def run(test, params, env): output = session.cmd(date_cmd) if "1" not in output: - test.fail("Test failed after run msr tools. Output: '%s'" % output) + test.fail(f"Test failed after run msr tools. Output: '{output}'") diff --git a/qemu/tests/tpm_bind_luks.py b/qemu/tests/tpm_bind_luks.py index bc45b1dfcb..7e2f3b5433 100644 --- a/qemu/tests/tpm_bind_luks.py +++ b/qemu/tests/tpm_bind_luks.py @@ -1,8 +1,6 @@ import re -from virttest import error_context -from virttest import utils_disk -from virttest import utils_package +from virttest import error_context, utils_disk, utils_package @error_context.context_aware @@ -34,13 +32,10 @@ def mount_disk(file_func): """ def wrapper(*args, **kwargs): - if not utils_disk.is_mount( - mapper_dev, mount_path, session=session): + if not utils_disk.is_mount(mapper_dev, mount_path, session=session): test.log.info("Mount the LUKs FS...") - if not utils_disk.mount(mapper_dev, mount_path, - session=session): - test.error("Cannot mount %s to %s" % (mapper_dev, - mount_path)) + if not utils_disk.mount(mapper_dev, mount_path, session=session): + test.error(f"Cannot mount {mapper_dev} to {mount_path}") out = file_func(*args, **kwargs) test.log.info("Umount the LUKs FS...") if not utils_disk.umount(mapper_dev, mount_path, session=session): @@ -54,7 +49,8 @@ def get_md5sum(): Return the file's MD5 """ status, md5 = session.cmd_status_output( - "md5sum " + dd_file, print_func=test.log.info) + "md5sum " + dd_file, print_func=test.log.info + ) if status: test.error("Failed to get file's MD5") return md5.split()[0] @@ -76,9 +72,9 @@ def compare_md5sum(): md5_current = get_md5sum() if md5_current != md5_original: test.fail( - "File %s changed, the current md5(%s) is mismatch of the" - " original md5(%s)" % - (dd_file, md5_current, md5_original)) + f"File {dd_file} changed, the current md5({md5_current}) is mismatch of the" + f" original md5({md5_original})" + ) test.log.info("File's md5 matched, md5: %s", md5_current) @mount_disk @@ -86,16 +82,15 @@ def auto_boot_unlocking(): """ Steps to configure automatic unlocking at late boot stage """ - disk_uuid = session.cmd_output( - "blkid -s UUID -o value %s" % extra_disk).strip() - session.cmd('echo "%s UUID=%s none tpm2-device=auto" >> /etc/crypttab' - % (mapper_name, disk_uuid)) - session.cmd('echo "%s %s xfs defaults 0 0" >> /etc/fstab' % - (mapper_dev, mount_path)) - session.cmd('restorecon -Rv ' + mount_path) + disk_uuid = session.cmd_output(f"blkid -s UUID -o value {extra_disk}").strip() + session.cmd( + f'echo "{mapper_name} UUID={disk_uuid} none tpm2-device=auto" >> /etc/crypttab' + ) + session.cmd(f'echo "{mapper_dev} {mount_path} xfs defaults 0 0" >> /etc/fstab') + session.cmd("restorecon -Rv " + mount_path) s, o = session.cmd_status_output("mount -av") if s != 0: - test.fail("Mount format is incorrect:\n%s" % o) + test.fail(f"Mount format is incorrect:\n{o}") test.log.debug("The full mount list is:\n%s", o) session.cmd("systemctl enable clevis-luks-askpass.path") @@ -128,16 +123,17 @@ def auto_boot_unlocking(): test.log.info("Check if the extra disk is LUKs format") if session.cmd_status(cryptsetup_check_cmd + extra_disk) != 0: test.fail("The extra disk cannot be formatted to LUKs") - test.log.debug("The extra disk is formatted to LUKs:\n%s", - session.cmd_output("cryptsetup luksDump " + extra_disk)) + test.log.debug( + "The extra disk is formatted to LUKs:\n%s", + session.cmd_output("cryptsetup luksDump " + extra_disk), + ) error_context.context("Open the LUKs disk", test.log.info) session.cmd(cryptsetup_open_cmd % extra_disk) session.cmd("mkfs.xfs -f " + mapper_dev) test.log.info("A new xfs file system is created for %s", mapper_dev) - error_context.context("Mount the FS and create a random file", - test.log.info) + error_context.context("Mount the FS and create a random file", test.log.info) if session.cmd_status("test -d " + mount_path) != 0: session.cmd("mkdir -p " + mount_path) md5_original = create_random_file() @@ -145,25 +141,31 @@ def auto_boot_unlocking(): test.log.info("Reset TPM DA lockout counter before binding") session.cmd("tpm2_dictionarylockout --clear-lockout") - error_context.base_context("Bind %s using the TPM2 policy" % extra_disk, - test.log.info) + error_context.base_context( + f"Bind {extra_disk} using the TPM2 policy", test.log.info + ) session.cmd(clevis_bind_cmd % extra_disk) clevis_list = session.cmd_output(clevis_list_cmd + extra_disk) - if not re.search(r'tpm2 \S+%s' % pcr_policy, clevis_list, re.M): + if not re.search(rf"tpm2 \S+{pcr_policy}", clevis_list, re.M): test.fail("Failed to bind the disk with TPM2 policy via clevis") test.log.info("The LUKs device is bound to TPM:\n%s", clevis_list) - error_context.context("Open the LUKs device using clevis and check the md5" - " of the file", test.log.info) + error_context.context( + "Open the LUKs device using clevis and check the md5" " of the file", + test.log.info, + ) session.cmd(clevis_unlock_cmd % extra_disk) compare_md5sum() - error_context.context("Modify crypttab and fstab to enable automatic boot " - "unlocking", test.log.info) + error_context.context( + "Modify crypttab and fstab to enable automatic boot " "unlocking", test.log.info + ) auto_boot_unlocking() session.cmd(cryptsetup_close_cmd) - error_context.context("Reboot the guest to check if the operating system " - "can unlock the LUKs FS automatically") + error_context.context( + "Reboot the guest to check if the operating system " + "can unlock the LUKs FS automatically" + ) session = vm.reboot(session) compare_md5sum() diff --git a/qemu/tests/tpm_check_buffer_size.py b/qemu/tests/tpm_check_buffer_size.py index aca92dbd92..cc23d6f4b0 100644 --- a/qemu/tests/tpm_check_buffer_size.py +++ b/qemu/tests/tpm_check_buffer_size.py @@ -1,9 +1,7 @@ import os from shutil import rmtree -from avocado.utils import git -from avocado.utils import process - +from avocado.utils import git, process from virttest import error_context @@ -25,31 +23,34 @@ def run(test, params, env): libtpms_rpm = process.getoutput("rpm -q libtpms", shell=True).strip() libtpms_devel_rpm = libtpms_rpm.replace("libtpms", "libtpms-devel") - if process.system("rpm -q libtpms-devel", shell=True, - ignore_status=True) != 0: + if process.system("rpm -q libtpms-devel", shell=True, ignore_status=True) != 0: try: process.system( - "cd /home/ && brew download-build --rpm %s" % - libtpms_devel_rpm, shell=True) + f"cd /home/ && brew download-build --rpm {libtpms_devel_rpm}", + shell=True, + ) libtpms_devel_rpm = libtpms_devel_rpm + ".rpm" - process.system("rpm -i /home/%s" % libtpms_devel_rpm, shell=True) + process.system(f"rpm -i /home/{libtpms_devel_rpm}", shell=True) except Exception: test.cancel("libtpms-devel package installation failed.") repo_url = params["repo_url"] repo_dir = git.get_repo( - repo_url, destination_dir=os.path.join(test.tmpdir, "libtpms")) + repo_url, destination_dir=os.path.join(test.tmpdir, "libtpms") + ) try: - error_context.base_context( - "Build and execute test cases", test.log.info) + error_context.base_context("Build and execute test cases", test.log.info) for test_case in params.get("test_case").split(";"): test_case_o = test_case.split(".")[0] build_execute_cmd = params["build_execute_cmd"] % ( - test_case, test_case_o, test_case_o) - process.system("cd %s && %s" % - (repo_dir, build_execute_cmd), shell=True) + test_case, + test_case_o, + test_case_o, + ) + process.system(f"cd {repo_dir} && {build_execute_cmd}", shell=True) finally: - process.system("rm -f /home/%s*" % libtpms_devel_rpm, - shell=True, ignore_status=True) + process.system( + f"rm -f /home/{libtpms_devel_rpm}*", shell=True, ignore_status=True + ) rmtree(repo_dir, ignore_errors=True) diff --git a/qemu/tests/tpm_unattended_install.py b/qemu/tests/tpm_unattended_install.py index 984f9d8bbc..10152d87da 100644 --- a/qemu/tests/tpm_unattended_install.py +++ b/qemu/tests/tpm_unattended_install.py @@ -1,7 +1,6 @@ import re -from virttest import env_process -from virttest import error_context +from virttest import env_process, error_context from virttest.tests import unattended_install @@ -17,13 +16,14 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ - def search_keywords(patterns, string, flags=re.M, split_string=';'): + + def search_keywords(patterns, string, flags=re.M, split_string=";"): test.log.info(string) for pattern in patterns.split(split_string): - if not re.search(r'%s' % pattern, string, flags): - test.fail('No Found pattern "%s" from "%s".' % (pattern, string)) - if re.search(r'error', string, re.M | re.A): - test.error('Found errors from "%s".' % string) + if not re.search(rf"{pattern}", string, flags): + test.fail(f'No Found pattern "{pattern}" from "{string}".') + if re.search(r"error", string, re.M | re.A): + test.error(f'Found errors from "{string}".') unattended_install.run(test, params, env) @@ -31,17 +31,17 @@ def search_keywords(patterns, string, flags=re.M, split_string=';'): if vm: vm.destroy() - ovmf_vars_secboot_fd = params.get('ovmf_vars_secboot_fd') + ovmf_vars_secboot_fd = params.get("ovmf_vars_secboot_fd") if ovmf_vars_secboot_fd: - params['ovmf_vars_filename'] = ovmf_vars_secboot_fd + params["ovmf_vars_filename"] = ovmf_vars_secboot_fd - params['start_vm'] = 'yes' - params['cdroms'] = params.get('default_cdrom', '') - params['force_create_image'] = 'no' - params['kernel'] = '' - params['initrd'] = '' - params['kernel_params'] = '' - params['boot_once'] = 'c' + params["start_vm"] = "yes" + params["cdroms"] = params.get("default_cdrom", "") + params["force_create_image"] = "no" + params["kernel"] = "" + params["initrd"] = "" + params["kernel_params"] = "" + params["boot_once"] = "c" env_process.preprocess_vm(test, params, env, params["main_vm"]) vm = env.get_vm(params["main_vm"]) @@ -49,16 +49,17 @@ def search_keywords(patterns, string, flags=re.M, split_string=';'): session = vm.wait_for_login() error_context.context("Check TPM info inside guest.", test.log.info) - for name in params.get('check_cmd_names').split(): + for name in params.get("check_cmd_names").split(): if name: - pattern = params.get('pattern_output_%s' % name) - cmd = params.get('cmd_%s' % name) + pattern = params.get(f"pattern_output_{name}") + cmd = params.get(f"cmd_{name}") search_keywords(pattern, session.cmd(cmd)) - cmd_check_secure_boot_enabled = params.get('cmd_check_secure_boot_enabled') + cmd_check_secure_boot_enabled = params.get("cmd_check_secure_boot_enabled") if cmd_check_secure_boot_enabled: - error_context.context("Check whether secure boot enabled inside guest.", - test.log.info) + error_context.context( + "Check whether secure boot enabled inside guest.", test.log.info + ) status, output = session.cmd_status_output(cmd_check_secure_boot_enabled) if status: - test.fail('Secure boot is not enabled, output: %s' % output) + test.fail(f"Secure boot is not enabled, output: {output}") diff --git a/qemu/tests/tpm_verify_device.py b/qemu/tests/tpm_verify_device.py index dfd85a423e..2bd6f752ac 100644 --- a/qemu/tests/tpm_verify_device.py +++ b/qemu/tests/tpm_verify_device.py @@ -1,9 +1,7 @@ import re from avocado.utils import process - -from virttest import error_context -from virttest import env_process +from virttest import env_process, error_context @error_context.context_aware @@ -20,57 +18,61 @@ def run(test, params, env): :param env: Dictionary with test environment. """ - def search_keywords(patterns, string, flags=re.M, split_string=';'): + def search_keywords(patterns, string, flags=re.M, split_string=";"): test.log.info(string) for pattern in patterns.split(split_string): - if not re.search(r'%s' % pattern, string, flags): - test.fail('No Found pattern "%s" from "%s".' % (pattern, string)) - if re.search(r'error', string, re.M | re.A): - test.error('Found errors from "%s".' % string) + if not re.search(rf"{pattern}", string, flags): + test.fail(f'No Found pattern "{pattern}" from "{string}".') + if re.search(r"error", string, re.M | re.A): + test.error(f'Found errors from "{string}".') - cmd_get_tpm_ver = params.get('cmd_get_tpm_version') - cmd_check_tpm_dev = params.get('cmd_check_tpm_device') + cmd_get_tpm_ver = params.get("cmd_get_tpm_version") + cmd_check_tpm_dev = params.get("cmd_check_tpm_device") if cmd_check_tpm_dev: status, output = process.getstatusoutput(cmd_check_tpm_dev) if status: - test.cancel('No found TPM device on host, output: %s' % output) + test.cancel(f"No found TPM device on host, output: {output}") if cmd_get_tpm_ver: - actual_tpm_ver = process.system_output(cmd_get_tpm_ver, - shell=True).decode().strip() - test.log.info('The TPM device version is %s.', actual_tpm_ver) - required_tmp_ver = params.get('required_tmp_version') + actual_tpm_ver = ( + process.system_output(cmd_get_tpm_ver, shell=True).decode().strip() + ) + test.log.info("The TPM device version is %s.", actual_tpm_ver) + required_tmp_ver = params.get("required_tmp_version") if actual_tpm_ver != required_tmp_ver: - test.cancel('Cancel to test due to require TPM device version %s, ' - 'actual version: %s' % (required_tmp_ver, actual_tpm_ver)) + test.cancel( + f"Cancel to test due to require TPM device version {required_tmp_ver}, " + f"actual version: {actual_tpm_ver}" + ) params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, params["main_vm"]) - for _ in range(params.get_numeric('repeat_times', 1)): + for _ in range(params.get_numeric("repeat_times", 1)): sessions = [] vms = env.get_all_vms() for vm in vms: vm.verify_alive() sessions.append(vm.wait_for_login()) for vm, session in zip(vms, sessions): - error_context.context("%s: Check TPM info inside guest." % vm.name, - test.log.info) - for name in params.get('check_cmd_names').split(): + error_context.context( + f"{vm.name}: Check TPM info inside guest.", test.log.info + ) + for name in params.get("check_cmd_names").split(): if name: - pattern = params.get('pattern_output_%s' % name) - cmd = params.get('cmd_%s' % name) + pattern = params.get(f"pattern_output_{name}") + cmd = params.get(f"cmd_{name}") search_keywords(pattern, session.cmd(cmd)) reboot_method = params.get("reboot_method") if reboot_method: - error_context.context("Reboot guest '%s'." % vm.name, test.log.info) + error_context.context(f"Reboot guest '{vm.name}'.", test.log.info) vm.reboot(session, reboot_method).close() continue error_context.context("Check TPM info on host.", test.log.info) - cmd_check_log = params.get('cmd_check_log') + cmd_check_log = params.get("cmd_check_log") if cmd_check_log: output = process.system_output(cmd_check_log).decode() - pattern = params.get('pattern_check_log') + pattern = params.get("pattern_check_log") search_keywords(pattern, output) session.close() diff --git a/qemu/tests/tpm_with_bitlocker.py b/qemu/tests/tpm_with_bitlocker.py index 61724e3f05..45180d8af8 100644 --- a/qemu/tests/tpm_with_bitlocker.py +++ b/qemu/tests/tpm_with_bitlocker.py @@ -1,7 +1,6 @@ import re -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc @error_context.context_aware @@ -23,27 +22,33 @@ def run(test, params, env): vm.verify_alive() session = vm.wait_for_login() - cmd_install_bitlocker = params.get('cmd_install_bitlocker') + cmd_install_bitlocker = params.get("cmd_install_bitlocker") if cmd_install_bitlocker: error_context.context("Install BitLocker inside guest", test.log.info) session.cmd(cmd_install_bitlocker, 360) session = vm.reboot(session, timeout=480) - error_context.context("Prepares hard drive for BitLocker Drive " - "Encryption inside guest", test.log.info) - cmd_bdehdcfg = session.cmd_output(params.get('cmd_bdehdcfg')) - if re.search(r'error', cmd_bdehdcfg, re.M | re.A): - test.fail('Found error message.') - - error_context.context("Encrypts the volume and turns BitLocker " - "protection on inside guest", test.log.info) - session.cmd(params.get('cmd_manage_bde_on')) + error_context.context( + "Prepares hard drive for BitLocker Drive " "Encryption inside guest", + test.log.info, + ) + cmd_bdehdcfg = session.cmd_output(params.get("cmd_bdehdcfg")) + if re.search(r"error", cmd_bdehdcfg, re.M | re.A): + test.fail("Found error message.") + + error_context.context( + "Encrypts the volume and turns BitLocker " "protection on inside guest", + test.log.info, + ) + session.cmd(params.get("cmd_manage_bde_on")) session = vm.reboot(session, timeout=480) - error_context.context("Wait until Percentage Encrypted finished", - test.log.info) - finished_keywords = params.get('finished_keywords') - cmd_manage_bde_status = params.get('cmd_manage_bde_status') - if not utils_misc.wait_for(lambda: finished_keywords in session.cmd( - cmd_manage_bde_status, 300), step=5, timeout=600): - test.fail('Failed to encrypt the volume.') + error_context.context("Wait until Percentage Encrypted finished", test.log.info) + finished_keywords = params.get("finished_keywords") + cmd_manage_bde_status = params.get("cmd_manage_bde_status") + if not utils_misc.wait_for( + lambda: finished_keywords in session.cmd(cmd_manage_bde_status, 300), + step=5, + timeout=600, + ): + test.fail("Failed to encrypt the volume.") diff --git a/qemu/tests/tpm_with_check_aavmf.py b/qemu/tests/tpm_with_check_aavmf.py index 92e3d0a0ec..10cdc39f12 100644 --- a/qemu/tests/tpm_with_check_aavmf.py +++ b/qemu/tests/tpm_with_check_aavmf.py @@ -1,8 +1,5 @@ -from aexpect.exceptions import ExpectProcessTerminatedError -from aexpect.exceptions import ExpectTimeoutError - -from virttest import error_context -from virttest import utils_package +from aexpect.exceptions import ExpectProcessTerminatedError, ExpectTimeoutError +from virttest import error_context, utils_package @error_context.context_aware @@ -26,16 +23,16 @@ def run(test, params, env): vm = env.get_vm(params["main_vm"]) vm.verify_alive() - error_context.context("Check TPM pattern in the serial output", - test.log.info) + error_context.context("Check TPM pattern in the serial output", test.log.info) try: vm.serial_console.read_until_output_matches(tpm_pattern) except (ExpectProcessTerminatedError, ExpectTimeoutError) as err: test.log.error(err) test.fail("Failed to get the expected tpm pattern.") - error_context.context("Execute tpm2_selftest command for a basic check", - test.log.info) + error_context.context( + "Execute tpm2_selftest command for a basic check", test.log.info + ) session = vm.wait_for_login() if not utils_package.package_install("tpm2-tools", session): test.error("Cannot install tpm2-tools to execute tpm2_selftest") diff --git a/qemu/tests/tpm_with_hlk.py b/qemu/tests/tpm_with_hlk.py index 0f57a4c0b5..41e06be6c2 100644 --- a/qemu/tests/tpm_with_hlk.py +++ b/qemu/tests/tpm_with_hlk.py @@ -1,8 +1,10 @@ from virttest import env_process - -from provider.win_hlk_suite import HLKServer -from provider.win_hlk_suite import install_hlk_client, download_hlk_server_image +from provider.win_hlk_suite import ( + HLKServer, + download_hlk_server_image, + install_hlk_client, +) def run(test, params, env): @@ -19,13 +21,13 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ - server_img = download_hlk_server_image(params, params.get('hlk_server_image_uri')) - vm_name_hlk_server = params.get('vm_name_hlk_server') + server_img = download_hlk_server_image(params, params.get("hlk_server_image_uri")) + vm_name_hlk_server = params.get("vm_name_hlk_server") - params["images_%s" % vm_name_hlk_server] = "image0" - params["image_name_image0_%s" % vm_name_hlk_server] = server_img['image_name'] - params["image_size_image0_%s" % vm_name_hlk_server] = server_img['image_size'] - params["image_format_image0_%s" % vm_name_hlk_server] = server_img['image_format'] + params[f"images_{vm_name_hlk_server}"] = "image0" + params[f"image_name_image0_{vm_name_hlk_server}"] = server_img["image_name"] + params[f"image_size_image0_{vm_name_hlk_server}"] = server_img["image_size"] + params[f"image_format_image0_{vm_name_hlk_server}"] = server_img["image_format"] params["start_vm"] = "yes" params["not_preprocess"] = "no" @@ -39,13 +41,14 @@ def run(test, params, env): else: vm_client = vm - install_hlk_client(vm_client, vm_server) # pylint: disable=E0606 + install_hlk_client(vm_client, vm_server) # pylint: disable=E0606 - pool_name = params.get('hlk_pool_name') - project_name = params.get('hlk_project_name') - target_name = params.get('hlk_target_name') - tests_name = [name for name in params.get('hlk_target_tests_name').split(';')] + pool_name = params.get("hlk_pool_name") + project_name = params.get("hlk_project_name") + target_name = params.get("hlk_target_name") + tests_name = [name for name in params.get("hlk_target_tests_name").split(";")] hlk_server = HLKServer(test, vm_server) - hlk_server.simple_run_test(pool_name, project_name, target_name, - tests_name, timeout=24000, step=600) + hlk_server.simple_run_test( + pool_name, project_name, target_name, tests_name, timeout=24000, step=600 + ) hlk_server.close() diff --git a/qemu/tests/tpm_with_tss.py b/qemu/tests/tpm_with_tss.py index 81252e6950..7560acf55e 100644 --- a/qemu/tests/tpm_with_tss.py +++ b/qemu/tests/tpm_with_tss.py @@ -1,7 +1,6 @@ import re -from virttest import error_context -from virttest import utils_package +from virttest import error_context, utils_package @error_context.context_aware @@ -34,7 +33,7 @@ def run(test, params, env): session = vm.wait_for_login() error_context.context("Check if TPM2 device exists", test.log.info) - if session.cmd_status("test -c %s" % tpm_device) != 0: + if session.cmd_status(f"test -c {tpm_device}") != 0: test.error("Cannot find the TPM2 device inside guest") test.log.info("Install required packages in VM") @@ -44,21 +43,18 @@ def run(test, params, env): try: error_context.context("Compile the tpm2-tss test suite", test.log.info) test.log.info("Clone the tpm2-tss repo...") - session.cmd("git clone --depth=1 %s %s" % (tpm2_tss_repo, - tpm2_tss_path)) + session.cmd(f"git clone --depth=1 {tpm2_tss_repo} {tpm2_tss_path}") test.log.info("Configure tpm2-tss...") session.cmd(configure_cmd, timeout=180) error_context.context("Check test result of tpm2-tss", test.log.info) status, output = session.cmd_status_output(make_check_cmd, timeout=600) if status != 0: - test.fail("tpm2-tss test suite execution failed, output is:\n%s" - % output) + test.fail(f"tpm2-tss test suite execution failed, output is:\n{output}") result = session.cmd_output(check_log_cmd) - t_total, t_pass = re.findall(r"^# (?:TOTAL|PASS): +(\d+)$", result, - re.M) + t_total, t_pass = re.findall(r"^# (?:TOTAL|PASS): +(\d+)$", result, re.M) if t_total != t_pass: test.fail("The count of TOTAL and PASS do not match") finally: - session.cmd("rm -rf %s" % tpm2_tss_path, ignore_all_errors=True) + session.cmd(f"rm -rf {tpm2_tss_path}", ignore_all_errors=True) session.close() diff --git a/qemu/tests/trace_cmd_boot.py b/qemu/tests/trace_cmd_boot.py index 5fa9bb772f..63eb41f885 100644 --- a/qemu/tests/trace_cmd_boot.py +++ b/qemu/tests/trace_cmd_boot.py @@ -1,10 +1,9 @@ -import re import os +import re import signal from avocado.utils import process -from virttest import utils_misc -from virttest import error_context +from virttest import error_context, utils_misc @error_context.context_aware @@ -28,8 +27,7 @@ def run(test, params, env): """ def find_trace_cmd(): - if process.system("ps -a | grep trace-cmd", ignore_status=True, - shell=True): + if process.system("ps -a | grep trace-cmd", ignore_status=True, shell=True): return False else: return True @@ -46,32 +44,29 @@ def find_trace_cmd(): stress_cmd = params.get("stress_cmd", "stress --vm 4 --vm-bytes 1000M") trace_o = os.path.join(test.debugdir, "trace.dat") - trace_cmd = "trace-cmd record -b 20000 -e kvm -o %s" % trace_o + trace_cmd = f"trace-cmd record -b 20000 -e kvm -o {trace_o}" trace_cmd = params.get("trace_cmd", trace_cmd) re_trace = params.get("re_trace", "kvm_inj_exception: #GP") report_file = os.path.join(test.debugdir, "trace.txt") - trace_report_cmd = "trace-cmd report -i %s > %s " % (trace_o, report_file) + trace_report_cmd = f"trace-cmd report -i {trace_o} > {report_file} " try: error_context.context("Run stress tool on host.", test.log.info) stress_job = utils_misc.BgJob(stress_cmd) # Reboot the VM for num in range(int(params.get("reboot_count", 1))): - error_context.context("Reboot guest '%s'. Repeat %d" % - (vm.name, num + 1), test.log.info) + error_context.context( + "Reboot guest '%s'. Repeat %d" % (vm.name, num + 1), test.log.info + ) trace_job = utils_misc.BgJob(trace_cmd) try: - session = vm.reboot(session, - reboot_method, - 0, - timeout) + session = vm.reboot(session, reboot_method, 0, timeout) except Exception: txt = "stop the trace-cmd and generate the readable report." error_context.context(txt, test.log.info) os.kill(trace_job.sp.pid, signal.SIGINT) - if not utils_misc.wait_for(lambda: not find_trace_cmd(), - 120, 60, 3): - test.log.warn("trace-cmd could not finish after 120s.") + if not utils_misc.wait_for(lambda: not find_trace_cmd(), 120, 60, 3): + test.log.warning("trace-cmd could not finish after 120s.") trace_job = None process.system(trace_report_cmd, shell=True) with open(report_file) as report_f: @@ -79,17 +74,16 @@ def find_trace_cmd(): txt = "Check whether the trace.txt includes the error log." error_context.context(txt, test.log.info) if re.findall(re_trace, report_txt, re.S): - msg = "Found %s in trace log %s" % (re_trace, report_file) + msg = f"Found {re_trace} in trace log {report_file}" test.fail(msg) else: txt = "stop the trace-cmd and remove the trace.dat file." error_context.context(txt, test.log.info) os.kill(trace_job.sp.pid, signal.SIGINT) - if not utils_misc.wait_for(lambda: not find_trace_cmd(), - 120, 60, 3): - test.log.warn("trace-cmd could not finish after 120s.") + if not utils_misc.wait_for(lambda: not find_trace_cmd(), 120, 60, 3): + test.log.warning("trace-cmd could not finish after 120s.") trace_job = None - process.system("rm -rf %s" % trace_o, timeout=60) + process.system(f"rm -rf {trace_o}", timeout=60) finally: if session: session.close() diff --git a/qemu/tests/tracing_exception_injection.py b/qemu/tests/tracing_exception_injection.py index 8261ab5f4a..1df4429977 100644 --- a/qemu/tests/tracing_exception_injection.py +++ b/qemu/tests/tracing_exception_injection.py @@ -1,4 +1,4 @@ -from avocado.utils import process, path +from avocado.utils import path, process from virttest import error_context @@ -21,18 +21,19 @@ def run(test, params, env): error_context.context("Check that kvm_stat works in host", test.log.info) kvm_stat_bin = path.find_command("kvm_stat") - check_cmd = "%s -1 -f exits" % kvm_stat_bin + check_cmd = f"{kvm_stat_bin} -1 -f exits" host_cmd_output = process.system_output(check_cmd) if host_cmd_output: - if host_cmd_output.split()[1] == '0': - test.fail("kvm_stat did not provide the expected " - "output: %s" % host_cmd_output) + if host_cmd_output.split()[1] == "0": + test.fail( + "kvm_stat did not provide the expected " f"output: {host_cmd_output}" + ) test.log.info("kvm_stat provided the expected output") test.log.info("Host cmd output '%s'", host_cmd_output) error_context.context( - "Check that host allows tracing of exception injection in KVM", - test.log.info) + "Check that host allows tracing of exception injection in KVM", test.log.info + ) exec_cmd = "grep kvm:kvm_inj_exception " exec_cmd += " /sys/kernel/debug/tracing/available_events" inj_check_cmd = params.get("injection_check_cmd", exec_cmd) diff --git a/qemu/tests/transfer_file_over_ipv6.py b/qemu/tests/transfer_file_over_ipv6.py index d28bf33472..9995673030 100644 --- a/qemu/tests/transfer_file_over_ipv6.py +++ b/qemu/tests/transfer_file_over_ipv6.py @@ -1,13 +1,9 @@ import os -import re import random - +import re from avocado.utils import crypto, process -from virttest import remote -from virttest import utils_misc -from virttest import utils_net -from virttest import error_context +from virttest import error_context, remote, utils_misc, utils_net @error_context.context_aware @@ -25,16 +21,16 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ - timeout = int(params.get("login_timeout", '360')) + timeout = int(params.get("login_timeout", "360")) client = params.get("file_transfer_client") port = params.get("file_transfer_port") password = params.get("password") username = params.get("username") tmp_dir = params["tmp_dir"] - filesize = int(params.get("filesize", '4096')) + filesize = int(params.get("filesize", "4096")) dd_cmd = params["dd_cmd"] - file_trans_timeout = int(params.get("file_trans_timeout", '1200')) - file_md5_check_timeout = int(params.get("file_md5_check_timeout", '600')) + file_trans_timeout = int(params.get("file_trans_timeout", "1200")) + file_md5_check_timeout = int(params.get("file_md5_check_timeout", "600")) link_local_ipv6_addr = params.get_boolean("link_local_ipv6_addr") netid = params.get("netid", "2620:2023:09:12") @@ -43,9 +39,9 @@ def get_file_md5sum(file_name, session, timeout): Get file md5sum from guest. """ test.log.info("Get md5sum of the file:'%s'", file_name) - s, o = session.cmd_status_output("md5sum %s" % file_name, timeout=timeout) + s, o = session.cmd_status_output(f"md5sum {file_name}", timeout=timeout) if s != 0: - test.error("Get file md5sum failed as %s" % o) + test.error(f"Get file md5sum failed as {o}") return re.findall(r"\w{32}", o)[0] sessions = {} @@ -62,126 +58,165 @@ def get_file_md5sum(file_name, session, timeout): if nettype == "vdpa": host_ifname = params.get("netdst") hostid = random.randint(31, 50) - process.run("ip addr add %s::%s/64 dev %s" % (netid, hostid, host_ifname), - ignore_status=True) + process.run( + f"ip addr add {netid}::{hostid}/64 dev {host_ifname}", + ignore_status=True, + ) else: host_ifname = params.get("netdst") if link_local_ipv6_addr else None host_address = utils_net.get_host_ip_address( - params, ip_ver="ipv6", linklocal=link_local_ipv6_addr) + params, ip_ver="ipv6", linklocal=link_local_ipv6_addr + ) - error_context.context("Get ipv6 address of host: %s" % host_address, - test.log.info) + error_context.context(f"Get ipv6 address of host: {host_address}", test.log.info) for vm in vms: vm.verify_alive() sessions[vm] = vm.wait_for_login(timeout=timeout) if params.get("os_type") == "linux": - inet_name[vm] = utils_net.get_linux_ifname(sessions[vm], - vm.get_mac_address()) + inet_name[vm] = utils_net.get_linux_ifname( + sessions[vm], vm.get_mac_address() + ) if nettype == "vdpa": guestid = random.randint(1, 30) - sessions[vm].cmd("ip addr add %s::%s/64 dev %s" % (netid, guestid, - inet_name[vm]), - timeout=timeout, ignore_all_errors=True) - addresses[vm] = utils_net.get_guest_ip_addr(sessions[vm], - vm.get_mac_address(), - params.get("os_type"), - ip_version="ipv6", - linklocal=link_local_ipv6_addr) + sessions[vm].cmd( + f"ip addr add {netid}::{guestid}/64 dev {inet_name[vm]}", + timeout=timeout, + ignore_all_errors=True, + ) + addresses[vm] = utils_net.get_guest_ip_addr( + sessions[vm], + vm.get_mac_address(), + params.get("os_type"), + ip_version="ipv6", + linklocal=link_local_ipv6_addr, + ) else: addresses[vm] = utils_net.get_guest_ip_addr( sessions[vm], vm.get_mac_address(), params.get("os_type"), ip_version="ipv6", - linklocal=link_local_ipv6_addr) + linklocal=link_local_ipv6_addr, + ) if link_local_ipv6_addr is False and addresses[vm] is None: test.cancel("Your guest can not get remote IPv6 address.") - error_context.context("Get ipv6 address of %s: %s" % (vm.name, addresses[vm]), - test.log.info) + error_context.context( + f"Get ipv6 address of {vm.name}: {addresses[vm]}", test.log.info + ) # prepare test data - guest_path = (tmp_dir + "src-%s" % utils_misc.generate_random_string(8)) - dest_path = (tmp_dir + "dst-%s" % utils_misc.generate_random_string(8)) - host_path = os.path.join(test.tmpdir, "tmp-%s" % - utils_misc.generate_random_string(8)) + guest_path = tmp_dir + f"src-{utils_misc.generate_random_string(8)}" + dest_path = tmp_dir + f"dst-{utils_misc.generate_random_string(8)}" + host_path = os.path.join(test.tmpdir, f"tmp-{utils_misc.generate_random_string(8)}") test.log.info("Test setup: Creating %dMB file on host", filesize) process.run(dd_cmd % (host_path, filesize), shell=True) try: - src_md5 = (crypto.hash_file(host_path, algorithm="md5")) - error_context.context("md5 value of data from src: %s" % src_md5, - test.log.info) + src_md5 = crypto.hash_file(host_path, algorithm="md5") + error_context.context(f"md5 value of data from src: {src_md5}", test.log.info) # transfer data for vm in vms: - error_context.context("Transfer data from host to %s" % vm.name, - test.log.info) - remote.copy_files_to(addresses[vm], - client, username, password, port, - host_path, guest_path, - timeout=file_trans_timeout, - interface=host_ifname) - dst_md5 = get_file_md5sum(guest_path, sessions[vm], - timeout=file_md5_check_timeout) - error_context.context("md5 value of data in %s: %s" % (vm.name, dst_md5), - test.log.info) + error_context.context( + f"Transfer data from host to {vm.name}", test.log.info + ) + remote.copy_files_to( + addresses[vm], + client, + username, + password, + port, + host_path, + guest_path, + timeout=file_trans_timeout, + interface=host_ifname, + ) + dst_md5 = get_file_md5sum( + guest_path, sessions[vm], timeout=file_md5_check_timeout + ) + error_context.context( + f"md5 value of data in {vm.name}: {dst_md5}", test.log.info + ) if dst_md5 != src_md5: - test.fail("File changed after transfer host -> %s" % vm.name) + test.fail(f"File changed after transfer host -> {vm.name}") if params.get("os_type") == "linux": for vm_src in addresses: for vm_dst in addresses: if vm_src != vm_dst: - error_context.context("Transferring data from %s to %s" % - (vm_src.name, vm_dst.name), - test.log.info) + error_context.context( + f"Transferring data from {vm_src.name} to {vm_dst.name}", + test.log.info, + ) if params.get_boolean("using_guest_interface"): dst_interface = inet_name[vm_src] else: dst_interface = host_ifname - remote.scp_between_remotes(addresses[vm_src], - addresses[vm_dst], - port, password, password, - username, username, - guest_path, dest_path, - timeout=file_trans_timeout, - src_inter=host_ifname, - dst_inter=dst_interface) - dst_md5 = get_file_md5sum(dest_path, sessions[vm_dst], - timeout=file_md5_check_timeout) - error_context.context("md5 value of data in %s: %s" % (vm.name, dst_md5), - test.log.info) + remote.scp_between_remotes( + addresses[vm_src], + addresses[vm_dst], + port, + password, + password, + username, + username, + guest_path, + dest_path, + timeout=file_trans_timeout, + src_inter=host_ifname, + dst_inter=dst_interface, + ) + dst_md5 = get_file_md5sum( + dest_path, sessions[vm_dst], timeout=file_md5_check_timeout + ) + error_context.context( + f"md5 value of data in {vm.name}: {dst_md5}", + test.log.info, + ) if dst_md5 != src_md5: - test.fail("File changed transfer %s -> %s" - % (vm_src.name, vm_dst.name)) + test.fail( + f"File changed transfer {vm_src.name} -> {vm_dst.name}" + ) for vm in vms: - error_context.context("Transfer data from %s to host" % vm.name, - test.log.info) - remote.copy_files_from(addresses[vm], - client, username, password, port, - guest_path, host_path, - timeout=file_trans_timeout, - interface=host_ifname) - error_context.context("Check whether the file changed after trans", - test.log.info) - dst_md5 = (crypto.hash_file(host_path, algorithm="md5")) - error_context.context("md5 value of data after copying to host: %s" % dst_md5, - test.log.info) + error_context.context( + f"Transfer data from {vm.name} to host", test.log.info + ) + remote.copy_files_from( + addresses[vm], + client, + username, + password, + port, + guest_path, + host_path, + timeout=file_trans_timeout, + interface=host_ifname, + ) + error_context.context( + "Check whether the file changed after trans", test.log.info + ) + dst_md5 = crypto.hash_file(host_path, algorithm="md5") + error_context.context( + f"md5 value of data after copying to host: {dst_md5}", test.log.info + ) if dst_md5 != src_md5: test.fail("File changed after transfer (md5sum mismatch)") - process.system_output("rm -rf %s" % host_path, timeout=timeout) + process.system_output(f"rm -rf {host_path}", timeout=timeout) finally: - process.system("rm -rf %s" % host_path, timeout=timeout, - ignore_status=True) + process.system(f"rm -rf {host_path}", timeout=timeout, ignore_status=True) if nettype == "vdpa": - process.run("ip addr del %s/64 dev %s" % (host_address, host_ifname)) + process.run(f"ip addr del {host_address}/64 dev {host_ifname}") for vm in vms: if params.get("os_type") == "linux": - sessions[vm].cmd("rm -rf %s %s || true" % (guest_path, dest_path), - timeout=timeout, ignore_all_errors=True) + sessions[vm].cmd( + f"rm -rf {guest_path} {dest_path} || true", + timeout=timeout, + ignore_all_errors=True, + ) else: - sessions[vm].cmd("del /f %s" % guest_path, - timeout=timeout, ignore_all_errors=True) + sessions[vm].cmd( + f"del /f {guest_path}", timeout=timeout, ignore_all_errors=True + ) sessions[vm].close() diff --git a/qemu/tests/trim_support_test.py b/qemu/tests/trim_support_test.py index b8e6bc1e4c..5dbfbd8a69 100644 --- a/qemu/tests/trim_support_test.py +++ b/qemu/tests/trim_support_test.py @@ -2,11 +2,8 @@ import time from avocado.utils import process -from virttest import utils_test -from virttest import error_context -from virttest import data_dir -from virttest import utils_disk -from virttest import utils_misc +from virttest import data_dir, error_context, utils_disk, utils_misc, utils_test + from provider import win_driver_utils @@ -56,7 +53,7 @@ def query_system_events(filter_options): return params.get("searched_keywords") in session.cmd(cmd).strip() host_check_cmd = params.get("host_check_cmd") - image_dir = os.path.join(data_dir.get_data_dir(), 'images') + image_dir = os.path.join(data_dir.get_data_dir(), "images") host_check_cmd = host_check_cmd % (image_dir, params["image_format"]) image_name = params["stg_name"] stg_param = params.object_params(image_name) @@ -71,21 +68,24 @@ def query_system_events(filter_options): vm.verify_alive() session = vm.wait_for_login(timeout=timeout) - error_context.context("Check if the driver is installed and verified", - test.log.info) + error_context.context( + "Check if the driver is installed and verified", test.log.info + ) session = utils_test.qemu.windrv_check_running_verifier( - session, vm, test, driver_verifier, timeout) + session, vm, test, driver_verifier, timeout + ) error_context.context("Format data disk", test.log.info) disk_index = utils_misc.wait_for( - lambda: utils_disk.get_windows_disks_index(session, image_size_str), - 120) + lambda: utils_disk.get_windows_disks_index(session, image_size_str), 120 + ) if not disk_index: - test.error("Failed to get the disk index of size %s" % image_size_str) + test.error(f"Failed to get the disk index of size {image_size_str}") if not utils_disk.update_windows_disk_attributes(session, disk_index): - test.error("Failed to enable data disk %s" % disk_index) + test.error(f"Failed to enable data disk {disk_index}") drive_letter_list = utils_disk.configure_empty_windows_disk( - session, disk_index[0], image_size_str, quick_format=False) + session, disk_index[0], image_size_str, quick_format=False + ) if not drive_letter_list: test.error("Failed to format the data disk") drive_letter = drive_letter_list[0] @@ -96,25 +96,25 @@ def query_system_events(filter_options): test.log.info("Data disk size: %sMB", ori_size) error_context.context("Trim data disk in guest") - status, output = session.cmd_status_output(guest_trim_cmd % drive_letter, - timeout=defrag_timeout) + status, output = session.cmd_status_output( + guest_trim_cmd % drive_letter, timeout=defrag_timeout + ) if status: - test.error("Error when trim the volume, status=%s, output=%s" % - (status, output)) + test.error(f"Error when trim the volume, status={status}, output={output}") if event_id: time.sleep(10) session = vm.reboot(session) - if query_system_events(params['filter_options']): - test.fail("Disk corruption after trim for %s" - % params.get("block_size")) + if query_system_events(params["filter_options"]): + test.fail( + "Disk corruption after trim for {}".format(params.get("block_size")) + ) if params["retrim_size_check"] == "yes": error_context.context("Check size from host after disk trimming") - new_size = utils_misc.wait_for( - lambda: _disk_size_smaller(ori_size), 20, 10, 1) + new_size = utils_misc.wait_for(lambda: _disk_size_smaller(ori_size), 20, 10, 1) if new_size is None: - test.error("Data disk size is not smaller than: %sMB" % ori_size) + test.error(f"Data disk size is not smaller than: {ori_size}MB") # for windows guest, disable/uninstall driver to get memory leak based on # driver verifier is enabled diff --git a/qemu/tests/tsc_drift.py b/qemu/tests/tsc_drift.py index f902a50fe7..b7d62de5fc 100644 --- a/qemu/tests/tsc_drift.py +++ b/qemu/tests/tsc_drift.py @@ -1,10 +1,8 @@ -import time import os import re +import time -from avocado.utils import cpu -from avocado.utils import process - +from avocado.utils import cpu, process from virttest import data_dir @@ -30,15 +28,14 @@ def run(test, params, env): cpu_chk_cmd = params.get("cpu_chk_cmd") tsc_cmd_guest = params.get("tsc_cmd_guest", "./a.out") tsc_cmd_host = params.get("tsc_cmd_host", "./a.out") - tsc_freq_path = os.path.join(data_dir.get_deps_dir(), - 'timedrift/get_tsc.c') + tsc_freq_path = os.path.join(data_dir.get_deps_dir(), "timedrift/get_tsc.c") host_freq = 0 def get_tsc(machine="host", i=0): tsc_cmd = tsc_cmd_guest if tsc_cmd == "host": tsc_cmd = tsc_cmd_host - cmd = "taskset %s %s" % (1 << i, tsc_cmd) + cmd = f"taskset {1 << i} {tsc_cmd}" if machine == "host": result = process.run(cmd, ignore_status=True) s, o = result.exit_status, result.stdout @@ -55,7 +52,7 @@ def get_tsc(machine="host", i=0): session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360))) if not os.path.exists(tsc_cmd_guest): - process.run("gcc %s" % tsc_freq_path) + process.run(f"gcc {tsc_freq_path}") ncpu = cpu.online_count() @@ -69,13 +66,13 @@ def get_tsc(machine="host", i=0): delta = tsc2 - tsc1 test.log.info("Host TSC delta for cpu %s is %s", i, delta) if delta < 0: - test.error("Host TSC for cpu %s warps %s" % (i, delta)) + test.error(f"Host TSC for cpu {i} warps {delta}") host_freq += delta / ncpu test.log.info("Average frequency of host's cpus: %s", host_freq) - if session.cmd_status("test -x %s" % tsc_cmd_guest): - vm.copy_files_to(tsc_freq_path, '/tmp/get_tsc.c') + if session.cmd_status(f"test -x {tsc_cmd_guest}"): + vm.copy_files_to(tsc_freq_path, "/tmp/get_tsc.c") if session.cmd_status("gcc /tmp/get_tsc.c") != 0: test.error("Fail to compile program on guest") @@ -101,7 +98,6 @@ def get_tsc(machine="host", i=0): success = False if not success: - test.fail("TSC drift found for the guest, please check the " - "log for details") + test.fail("TSC drift found for the guest, please check the " "log for details") session.close() diff --git a/qemu/tests/uefi_boot_from_device.py b/qemu/tests/uefi_boot_from_device.py index afb2eba845..c3f48252c9 100644 --- a/qemu/tests/uefi_boot_from_device.py +++ b/qemu/tests/uefi_boot_from_device.py @@ -2,10 +2,7 @@ import re from avocado.utils import process - -from virttest import error_context -from virttest import utils_misc -from virttest import env_process +from virttest import env_process, error_context, utils_misc @error_context.context_aware @@ -28,7 +25,7 @@ def create_cdroms(cdrom_test): """ test.log.info("creating test cdrom") process.run("dd if=/dev/urandom of=test bs=10M count=1") - process.run("mkisofs -o %s test" % cdrom_test) + process.run(f"mkisofs -o {cdrom_test} test") process.run("rm -f test") def cleanup_cdroms(cdrom_test): @@ -42,13 +39,13 @@ def boot_check(info): """ boot info check """ - return re.search(info, vm.logsessions['seabios'].get_output(), re.S) + return re.search(info, vm.logsessions["seabios"].get_output(), re.S) def count_of_move_step(boot_dev): """ get the number of steps to move """ - logs = vm.logsessions['seabios'].get_output() + logs = vm.logsessions["seabios"].get_output() boot_dev = re.findall(boot_dev, logs, re.S)[0] return len(re.findall(r"Boot\d+:\sUEFI", logs.split(boot_dev)[0], re.S)) @@ -70,8 +67,7 @@ def count_of_move_step(boot_dev): try: if boot_dev: - if not utils_misc.wait_for(lambda: boot_check(boot_menu_hint), - timeout, 1): + if not utils_misc.wait_for(lambda: boot_check(boot_menu_hint), timeout, 1): test.fail("Could not get boot menu message") # Navigate to boot manager menu @@ -83,9 +79,8 @@ def count_of_move_step(boot_dev): vm.send_key("kp_enter") error_context.context("Check boot result", test.log.info) - if not utils_misc.wait_for(lambda: boot_check(boot_entry_info), - timeout, 1): - test.fail("Could not boot from '%s'" % dev_name) + if not utils_misc.wait_for(lambda: boot_check(boot_entry_info), timeout, 1): + test.fail(f"Could not boot from '{dev_name}'") finally: if dev_name == "cdrom": cleanup_cdroms(cdrom_test) diff --git a/qemu/tests/uefi_check_debugcon.py b/qemu/tests/uefi_check_debugcon.py index dc510d10eb..a4cd4f573b 100644 --- a/qemu/tests/uefi_check_debugcon.py +++ b/qemu/tests/uefi_check_debugcon.py @@ -1,9 +1,5 @@ -from virttest import utils_test -from virttest import utils_misc -from virttest import utils_package -from virttest import env_process -from virttest import error_context from avocado.utils import process +from virttest import env_process, error_context, utils_misc, utils_package, utils_test @error_context.context_aware @@ -35,8 +31,7 @@ def check_trace_process(): """ check whether trace process is existing """ - if process.system( - params["grep_trace_cmd"], ignore_status=True, shell=True): + if process.system(params["grep_trace_cmd"], ignore_status=True, shell=True): return False else: return True @@ -60,16 +55,16 @@ def trace_kvm_pio(): # install trace-cmd in host utils_package.package_install("trace-cmd") if params.get("ovmf_log"): - error_context.context("Append debugcon parameter to " - "qemu command lines.", test.log.info) + error_context.context( + "Append debugcon parameter to " "qemu command lines.", test.log.info + ) ovmf_log = utils_misc.get_path(test.debugdir, params["ovmf_log"]) params["extra_params"] %= ovmf_log params["start_vm"] = "yes" - env_process.process(test, params, env, - env_process.preprocess_image, - env_process.preprocess_vm) - trace_output_file = utils_misc.get_path(test.debugdir, - params["trace_output"]) + env_process.process( + test, params, env, env_process.preprocess_image, env_process.preprocess_vm + ) + trace_output_file = utils_misc.get_path(test.debugdir, params["trace_output"]) trace_record_cmd = params["trace_record_cmd"] % trace_output_file check_pio_read = params["check_pio_read"] % trace_output_file check_pio_write = params["check_pio_write"] % trace_output_file @@ -80,35 +75,36 @@ def trace_kvm_pio(): error_context.context("Remove the existing isa-log device.", test.log.info) remove_isa_debugcon(vm) vm.destroy() - error_context.context("Run trace record command on host.", - test.log.info) + error_context.context("Run trace record command on host.", test.log.info) bg = utils_test.BackgroundTest(trace_kvm_pio, ()) bg.start() if not utils_misc.wait_for(lambda: bg.is_alive, timeout): - test.fail("Failed to start command: '%s'" % trace_record_cmd) + test.fail(f"Failed to start command: '{trace_record_cmd}'") try: vm.create() vm.verify_alive() vm.destroy() process.system(stop_trace_record, ignore_status=True, shell=True) - if not utils_misc.wait_for( - lambda: not check_trace_process(), timeout, 30, 3): - test.fail("Failed to stop command: '%s' after %s seconds." - % (stop_trace_record, timeout)) - pio_read_counts = int(process.run( - check_pio_read, shell=True).stdout.decode().strip()) + if not utils_misc.wait_for(lambda: not check_trace_process(), timeout, 30, 3): + test.fail( + f"Failed to stop command: '{stop_trace_record}' after {timeout} seconds." + ) + pio_read_counts = int( + process.run(check_pio_read, shell=True).stdout.decode().strip() + ) err_str = "pio_read counts should be greater than 0. " - err_str += "But the actual counts are %s." % pio_read_counts + err_str += f"But the actual counts are {pio_read_counts}." test.assertGreater(pio_read_counts, 0, err_str) - pio_write_counts = int(process.run( - check_pio_write, shell=True).stdout.decode().strip()) + pio_write_counts = int( + process.run(check_pio_write, shell=True).stdout.decode().strip() + ) if params.get("ovmf_log"): err_str = "pio_write counts should be greater than 0. " - err_str += "But the actual counts are %s." % pio_write_counts + err_str += f"But the actual counts are {pio_write_counts}." test.assertGreater(pio_write_counts, 0, err_str) else: err_str = "pio_write counts should be equal to 0. " - err_str += "But the actual counts are %s." % pio_write_counts + err_str += f"But the actual counts are {pio_write_counts}." test.assertEqual(pio_write_counts, 0, err_str) finally: if check_trace_process(): diff --git a/qemu/tests/uefi_check_log_info.py b/qemu/tests/uefi_check_log_info.py index 422ed40870..6eaa1bd42b 100644 --- a/qemu/tests/uefi_check_log_info.py +++ b/qemu/tests/uefi_check_log_info.py @@ -1,11 +1,8 @@ -import re import os +import re from avocado.utils import process - -from virttest import utils_misc -from virttest import env_process -from virttest import error_context +from virttest import env_process, error_context, utils_misc @error_context.context_aware @@ -24,7 +21,7 @@ def info_check(info): """ Check log info """ - logs = vm.logsessions['seabios'].get_output() + logs = vm.logsessions["seabios"].get_output() result = re.search(info, logs, re.S) return result @@ -34,7 +31,7 @@ def create_cdroms(cdrom_test): """ test.log.info("creating test cdrom") process.run("dd if=/dev/urandom of=test bs=10M count=1") - process.run("mkisofs -o %s test" % cdrom_test) + process.run(f"mkisofs -o {cdrom_test} test") process.run("rm -f test") boot_splash_time = params.get("boot_splash_time") @@ -44,9 +41,9 @@ def create_cdroms(cdrom_test): if cdrom_test: create_cdroms(cdrom_test) params["start_vm"] = "yes" - env_process.process(test, params, env, - env_process.preprocess_image, - env_process.preprocess_vm) + env_process.process( + test, params, env, env_process.preprocess_image, env_process.preprocess_vm + ) vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -55,11 +52,9 @@ def create_cdroms(cdrom_test): expect_result = check_info_pattern elif boot_splash_time: splash_time_pattern = params.get("splash_time_pattern") - expect_result = (splash_time_pattern % - (int(boot_splash_time) // 1000)) + expect_result = splash_time_pattern % (int(boot_splash_time) // 1000) if not utils_misc.wait_for(lambda: info_check(expect_result), timeout): # pylint: disable=E0606 - test.fail("Does not get expected result from bios log: %s" - % expect_result) + test.fail(f"Does not get expected result from bios log: {expect_result}") finally: if params.get("cdroms") == "test": test.log.info("cleaning up temp cdrom images") diff --git a/qemu/tests/uefi_check_resolution.py b/qemu/tests/uefi_check_resolution.py index d888e9bfab..463fbe3b7e 100644 --- a/qemu/tests/uefi_check_resolution.py +++ b/qemu/tests/uefi_check_resolution.py @@ -1,8 +1,7 @@ -import re import random +import re -from virttest import utils_misc -from virttest import error_context +from virttest import error_context, utils_misc @error_context.context_aware @@ -20,21 +19,44 @@ def run(test, params, env): :param env: Dictionary with test environment. """ - change_prefered = ["640 x 480", "800 x 480", "800 x 600", "832 x 624", - "960 x 640", "1024 x 600", "1024 x 768", "1152 x 864", - "1152 x 870", "1280 x 720", "1280 x 760", "1280 x 768", - "1280 x 800", "1280 x 960", "1280 x 1024", "1360 x 768", - "1366 x 768", "1400 x 1050", "1440 x 900", "1600 x 900", - "1600 x 1200", "1680 x 1050", "1920 x 1080", - "1920 x 1200", "1920 x 1440", "2000 x 2000", - "2048 x 1536", "2048 x 2048", "2560 x 1440", - "2560 x 1600"] + change_prefered = [ + "640 x 480", + "800 x 480", + "800 x 600", + "832 x 624", + "960 x 640", + "1024 x 600", + "1024 x 768", + "1152 x 864", + "1152 x 870", + "1280 x 720", + "1280 x 760", + "1280 x 768", + "1280 x 800", + "1280 x 960", + "1280 x 1024", + "1360 x 768", + "1366 x 768", + "1400 x 1050", + "1440 x 900", + "1600 x 900", + "1600 x 1200", + "1680 x 1050", + "1920 x 1080", + "1920 x 1200", + "1920 x 1440", + "2000 x 2000", + "2048 x 1536", + "2048 x 2048", + "2560 x 1440", + "2560 x 1600", + ] def boot_check(info): """ boot info check """ - logs = vm.logsessions['seabios'].get_output() + logs = vm.logsessions["seabios"].get_output() result = re.search(info, logs, re.S) return result @@ -64,8 +86,7 @@ def choose_resolution(): del change_prefered[index] change_prefered = [default_resolution] + change_prefered change_resolution_key, check_info, resolution = choose_resolution() - if not utils_misc.wait_for(lambda: boot_check(boot_menu_hint), - timeout, 1): + if not utils_misc.wait_for(lambda: boot_check(boot_menu_hint), timeout, 1): test.fail("Could not get boot menu message") key = [] key += enter_change_preferred @@ -77,4 +98,4 @@ def choose_resolution(): vm.reboot(timeout=timeout) if not boot_check(check_info): - test.fail("Change to resolution {'%s'} fail" % resolution) + test.fail(f"Change to resolution {{'{resolution}'}} fail") diff --git a/qemu/tests/uefi_check_secure_mor.py b/qemu/tests/uefi_check_secure_mor.py index 588941cac5..758eb355be 100644 --- a/qemu/tests/uefi_check_secure_mor.py +++ b/qemu/tests/uefi_check_secure_mor.py @@ -1,5 +1,4 @@ -from virttest import env_process -from virttest import data_dir +from virttest import data_dir, env_process def run(test, params, env): @@ -22,55 +21,54 @@ def run(test, params, env): def execute_powershell_command(command, timeout=60): status, output = session.cmd_status_output(command, timeout) if status != 0: - test.fail("execute command fail: %s" % output) + test.fail(f"execute command fail: {output}") return output login_timeout = int(params.get("login_timeout", 360)) - params["ovmf_vars_filename"] = 'OVMF_VARS.secboot.fd' - params["cpu_model_flags"] = ',hv-passthrough' - params["start_vm"] = 'yes' - env_process.preprocess_vm(test, params, env, params['main_vm']) + params["ovmf_vars_filename"] = "OVMF_VARS.secboot.fd" + params["cpu_model_flags"] = ",hv-passthrough" + params["start_vm"] = "yes" + env_process.preprocess_vm(test, params, env, params["main_vm"]) vm = env.get_vm(params["main_vm"]) session = vm.wait_for_serial_login(timeout=login_timeout) - check_cmd = params['check_secure_boot_enabled_cmd'] - dgreadiness_path_command = params['dgreadiness_path_command'] - executionPolicy_command = params['executionPolicy_command'] - enable_command = params['enable_command'] - ready_command = params['ready_command'] + check_cmd = params["check_secure_boot_enabled_cmd"] + dgreadiness_path_command = params["dgreadiness_path_command"] + executionPolicy_command = params["executionPolicy_command"] + enable_command = params["enable_command"] + ready_command = params["ready_command"] try: output = session.cmd_output(check_cmd) - if 'False' in output: - test.fail('Secure boot is not enabled. The actual output is %s' - % output) + if "False" in output: + test.fail(f"Secure boot is not enabled. The actual output is {output}") # Copy Device Guard to guest dgreadiness_host_path = data_dir.get_deps_dir("dgreadiness") dst_path = params["dst_path"] test.log.info("Copy Device Guuard to guest.") - s, o = session.cmd_status_output("mkdir %s" % dst_path) + s, o = session.cmd_status_output(f"mkdir {dst_path}") if s and "already exists" not in o: - test.error("Could not create Device Guard directory in " - "VM '%s', detail: '%s'" % (vm.name, o)) + test.error( + "Could not create Device Guard directory in " + f"VM '{vm.name}', detail: '{o}'" + ) vm.copy_files_to(dgreadiness_host_path, dst_path) execute_powershell_command(dgreadiness_path_command) execute_powershell_command(executionPolicy_command) output = execute_powershell_command(enable_command) - check_enable_info = params['check_enable_info'] + check_enable_info = params["check_enable_info"] if check_enable_info not in output: - test.fail("Device Guard enable failed. The actual output is %s" - % output) + test.fail(f"Device Guard enable failed. The actual output is {output}") # Reboot guest and run Device Guard session = vm.reboot(session) execute_powershell_command(dgreadiness_path_command) execute_powershell_command(executionPolicy_command) output = execute_powershell_command(ready_command) - check_ready_info = params['check_ready_info'] + check_ready_info = params["check_ready_info"] if check_ready_info not in output: - test.fail("Device Guard running failed. The actual output is %s" - % output) + test.fail(f"Device Guard running failed. The actual output is {output}") finally: session.close() diff --git a/qemu/tests/uefi_pkg.py b/qemu/tests/uefi_pkg.py index a1df57db23..834e52bcc0 100644 --- a/qemu/tests/uefi_pkg.py +++ b/qemu/tests/uefi_pkg.py @@ -1,5 +1,5 @@ -import re import json +import re from avocado.utils import process from virttest import error_context @@ -42,7 +42,7 @@ def check_element_filename(filename, file_list): :param file_list: query files command output """ err_str = "The 'filename' element in meta-file point to an " - err_str += "invalid file. The invalid file is '%s'" % filename + err_str += f"invalid file. The invalid file is '{filename}'" test.assertIn(filename, file_list, err_str) def check_element_mode(mode, expected_mode): @@ -58,18 +58,21 @@ def check_element_mode(mode, expected_mode): test.assertTrue(mode == expected_mode, err_str % (expected_mode, mode)) query_package = params["query_package"] - error_context.context("Check edk2-ovmf package has been " - "installed already", test.log.info) - status, output = process.getstatusoutput(query_package, - ignore_status=True, - shell=True) + error_context.context( + "Check edk2-ovmf package has been " "installed already", test.log.info + ) + status, output = process.getstatusoutput( + query_package, ignore_status=True, shell=True + ) if status: test.error("Please install edk2-ovmf package on host.") package_name = params["ovmf_package_name"] ovmf_package = re.findall(package_name, output, re.S) if not ovmf_package: - test.error("Not found right edk2-ovmf package on host. " - "The actual output is '%s'" % output) + test.error( + "Not found right edk2-ovmf package on host. " + f"The actual output is '{output}'" + ) query_files = params["query_files"] % ovmf_package[0] file_suffix = params["file_suffix"] meta_files = [] @@ -78,13 +81,18 @@ def check_element_mode(mode, expected_mode): if line.endswith(file_suffix): meta_files.append(line) if len(meta_files) > int(params["number_of_files"]): - test.fail("The number of JSON files should be less than or " - "equal to %s. The actual file list is %s", - params["number_of_files"], meta_files) - error_context.context("Check the 'filename' elements in both json" - " files point to valid files.", test.log.info) + test.fail( + "The number of JSON files should be less than or " + "equal to %s. The actual file list is %s", + params["number_of_files"], + meta_files, + ) + error_context.context( + "Check the 'filename' elements in both json" " files point to valid files.", + test.log.info, + ) for meta_file in meta_files: - test.log.info("Checking the meta file '%s'" % meta_file) + test.log.info("Checking the meta file '%s'", meta_file) with open(meta_file, "r") as f: content = json.load(f) filename = content["mapping"]["executable"]["filename"] diff --git a/qemu/tests/uefi_secureboot.py b/qemu/tests/uefi_secureboot.py index 9cff471d8d..e61114b419 100755 --- a/qemu/tests/uefi_secureboot.py +++ b/qemu/tests/uefi_secureboot.py @@ -1,9 +1,6 @@ import re -from virttest import error_context -from virttest import utils_misc -from virttest import remote -from virttest import env_process +from virttest import env_process, error_context, remote, utils_misc from virttest.tests import unattended_install @@ -24,8 +21,8 @@ def run(test, params, env): """ def _check_signed(): - """ Check and return if guest is signed """ - if os_type == 'linux': + """Check and return if guest is signed""" + if os_type == "linux": return True if re.search(sign_keyword, sign_info) else False for device_line in sign_info.strip().splitlines()[2:]: if re.match(sign_keyword, device_line): @@ -33,58 +30,61 @@ def _check_signed(): return True unattended_install.run(test, params, env) - os_type = params['os_type'] - params['cdroms'] = '' - params['boot_once'] = '' - params['force_create_image'] = 'no' - params['start_vm'] = 'yes' - params['kernel'] = '' - params['initrd'] = '' - params['kernel_params'] = '' - params['image_boot'] = 'yes' - vm = env.get_vm(params['main_vm']) + os_type = params["os_type"] + params["cdroms"] = "" + params["boot_once"] = "" + params["force_create_image"] = "no" + params["start_vm"] = "yes" + params["kernel"] = "" + params["initrd"] = "" + params["kernel_params"] = "" + params["image_boot"] = "yes" + vm = env.get_vm(params["main_vm"]) if vm: vm.destroy() - env_process.preprocess_vm(test, params, env, params['main_vm']) - vm = env.get_vm(params['main_vm']) + env_process.preprocess_vm(test, params, env, params["main_vm"]) + vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() - check_sign_cmd = params['check_sign_cmd'] - sign_keyword = params['sign_keyword'] - os_type = params['os_type'] - if os_type == 'linux': - check_pesign_cmd = 'which pesign' + check_sign_cmd = params["check_sign_cmd"] + sign_keyword = params["sign_keyword"] + os_type = params["os_type"] + if os_type == "linux": + check_pesign_cmd = "which pesign" if session.cmd_status(check_pesign_cmd) != 0: - install_cmd = params['pesign_install_cmd'] + install_cmd = params["pesign_install_cmd"] s, o = session.cmd_status_output(install_cmd) if s != 0: - test.cancel('Install pesign failed with output: "%s". ' - 'Please define proper source for guest' % o) - vmlinuz = '/boot/vmlinuz-%s' % session.cmd_output('uname -r') + test.cancel( + f'Install pesign failed with output: "{o}". ' + "Please define proper source for guest" + ) + vmlinuz = "/boot/vmlinuz-{}".format(session.cmd_output("uname -r")) check_sign_cmd = check_sign_cmd % vmlinuz sign_info = session.cmd_output(check_sign_cmd) signed = _check_signed() - error_context.context('Guest signed status is %s, shutdown and reboot ' - 'guest with secure boot' % signed, test.log.info) + error_context.context( + f"Guest signed status is {signed}, shutdown and reboot " + "guest with secure boot", + test.log.info, + ) session.close() vm.destroy() if utils_misc.wait_for(vm.is_dead, 180, 1, 1): test.log.info("Guest managed to shutdown cleanly") - params['ovmf_vars_filename'] = 'OVMF_VARS.secboot.fd' - env_process.preprocess_vm(test, params, env, params['main_vm']) - vm = env.get_vm(params['main_vm']) + params["ovmf_vars_filename"] = "OVMF_VARS.secboot.fd" + env_process.preprocess_vm(test, params, env, params["main_vm"]) + vm = env.get_vm(params["main_vm"]) try: session = vm.wait_for_serial_login() except remote.LoginTimeoutError: if signed: - test.fail('The guest is signed,' - ' but boot failed under secure mode.') + test.fail("The guest is signed," " but boot failed under secure mode.") else: - check_cmd = params['check_secure_boot_enabled_cmd'] + check_cmd = params["check_secure_boot_enabled_cmd"] status, output = session.cmd_status_output(check_cmd) if status != 0: - test.fail('Secure boot is not enabled') + test.fail("Secure boot is not enabled") if not signed: - test.fail('The guest is not signed,' - ' but boot succeed under secure mode.') + test.fail("The guest is not signed," " but boot succeed under secure mode.") finally: vm.destroy() diff --git a/qemu/tests/uefishell.py b/qemu/tests/uefishell.py index fab12e0c78..4cfff3ce37 100644 --- a/qemu/tests/uefishell.py +++ b/qemu/tests/uefishell.py @@ -1,19 +1,15 @@ +import logging import os import re import time -import logging from avocado.utils import process +from virttest import data_dir, env_process, utils_misc, utils_net -from virttest import data_dir -from virttest import utils_net -from virttest import utils_misc -from virttest import env_process - -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") -class UEFIShellTest(object): +class UEFIShellTest: """ Provide basic functions for uefishell test. To use UefiShell.iso which is provided by ovmf package. Boot your VM with this as @@ -41,24 +37,29 @@ def setup(self, under_fs0): """ params = self.params for cdrom in params.objects("cdroms"): - boot_index = params.get("boot_index_%s" % cdrom) + boot_index = params.get(f"boot_index_{cdrom}") if boot_index is not None: - params["boot_index_%s" % cdrom] = int(boot_index) + 1 + params[f"boot_index_{cdrom}"] = int(boot_index) + 1 for image in params.objects("images"): - params["image_boot_%s" % image] = "no" - params["cdroms"] = "%s %s" % ("uefishell", params["cdroms"]) + params[f"image_boot_{image}"] = "no" + params["cdroms"] = "{} {}".format("uefishell", params["cdroms"]) params["cdrom_uefishell"] = self.copy_uefishell() params["bootindex_uefishell"] = "0" if params.get("secureboot_pk_kek"): params["secureboot_pk_kek"] %= self.copy_secureboot_pk_kek( - params["pk_kek_filename"]) + params["pk_kek_filename"] + ) params["extra_params"] %= params["secureboot_pk_kek"] params["start_vm"] = "yes" params["shell_prompt"] = r"(Shell|FS\d:\\.*)>" params["shell_linesep"] = r"\r\n" - env_process.process(self.test, params, self.env, - env_process.preprocess_image, - env_process.preprocess_vm) + env_process.process( + self.test, + params, + self.env, + env_process.preprocess_image, + env_process.preprocess_vm, + ) vm = self.env.get_vm(params["main_vm"]) self.session = vm.wait_for_serial_login() if under_fs0 == "yes": @@ -71,14 +72,13 @@ def copy_uefishell(self): """ ovmf_path = self.params["ovmf_path"] uefishell_filename = "UefiShell.iso" - uefishell_dst_path = "images/%s" % uefishell_filename - uefishell_src_path = utils_misc.get_path( - ovmf_path, uefishell_filename) + uefishell_dst_path = f"images/{uefishell_filename}" + uefishell_src_path = utils_misc.get_path(ovmf_path, uefishell_filename) uefishell_dst_path = utils_misc.get_path( - data_dir.get_data_dir(), uefishell_dst_path) + data_dir.get_data_dir(), uefishell_dst_path + ) if not os.path.exists(uefishell_dst_path): - cp_command = "cp -f %s %s" % ( - uefishell_src_path, uefishell_dst_path) + cp_command = f"cp -f {uefishell_src_path} {uefishell_dst_path}" process.system(cp_command) return uefishell_dst_path @@ -88,12 +88,10 @@ def copy_secureboot_pk_kek(self, pk_kek_filename): :return SecureBootPkKek1.oemstr path """ pk_kek_filepath = data_dir.get_deps_dir("edk2") - pk_kek_src_path = utils_misc.get_path(pk_kek_filepath, - pk_kek_filename) - pk_kek_dst_path = "images/%s" % pk_kek_filename - pk_kek_dst_path = utils_misc.get_path(data_dir.get_data_dir(), - pk_kek_dst_path) - cp_command = "cp -f %s %s" % (pk_kek_src_path, pk_kek_dst_path) + pk_kek_src_path = utils_misc.get_path(pk_kek_filepath, pk_kek_filename) + pk_kek_dst_path = f"images/{pk_kek_filename}" + pk_kek_dst_path = utils_misc.get_path(data_dir.get_data_dir(), pk_kek_dst_path) + cp_command = f"cp -f {pk_kek_src_path} {pk_kek_dst_path}" process.system(cp_command) return pk_kek_dst_path @@ -115,15 +113,17 @@ def send_command(self, command, check_result=None, interval=0.5): last_error = self.params["last_error"] env_var = self.session.cmd_output_safe("set") if not re.search(last_error, env_var, re.S): - self.test.fail("Following errors appear %s when running command %s" - % (output, command)) + self.test.fail( + f"Following errors appear {output} when running command {command}" + ) if check_result: value = [] for result in check_result.split(", "): if not re.findall(result, output, re.S): - self.test.fail("The command result is: %s, which does not" - " match the expectation: %s" - % (output, result)) + self.test.fail( + f"The command result is: {output}, which does not" + f" match the expectation: {result}" + ) else: result = re.findall(result, output, re.S)[0] value.append(result) @@ -175,11 +175,10 @@ def form_ping6_args(): """ get source ip address and target ip address ver6 """ - src_ipv6 = uefishell_test.send_command(params["command_show6"], - params["check_result_show6"], - time_interval) - target_ipv6 = utils_net.get_host_ip_address( - params, "ipv6", True).split("%")[0] + src_ipv6 = uefishell_test.send_command( + params["command_show6"], params["check_result_show6"], time_interval + ) + target_ipv6 = utils_net.get_host_ip_address(params, "ipv6", True).split("%")[0] return " ".join([src_ipv6[0], target_ipv6]) def handle_memory_map(output): @@ -191,13 +190,18 @@ def handle_memory_map(output): attribute_values = re.findall(params["rt_code_lines"], output[0], re.M) for value in attribute_values: if params["attribute_value"] != value: - test.fail("The last column should be '%s' for all " - "RT_Code entries. The actual value is '%s'" - % (params["attribute_value"], value)) + test.fail( + "The last column should be '{}' for all " + "RT_Code entries. The actual value is '{}'".format( + params["attribute_value"], value + ) + ) if re.search(params["adjacent_rt_code_lines"], output[0], re.M): - test.fail("Found two adjacent RT_Code lines in command output. " - "The range of 'RT_Code' should be covered by just one" - " entry. The command output is %s" % output[0]) + test.fail( + "Found two adjacent RT_Code lines in command output. " + "The range of 'RT_Code' should be covered by just one" + f" entry. The command output is {output[0]}" + ) def handle_smbiosview(output): """ @@ -214,26 +218,34 @@ def handle_smbiosview(output): """ smbios_version = re.findall(params["smbios_version"], output[0], re.S) if not smbios_version: - test.fail("Failed to find smbios version. " - "The command output is %s" % output[0]) + test.fail( + "Failed to find smbios version. " f"The command output is {output[0]}" + ) bios_version = re.findall(params["bios_version"], output[0], re.S) if not bios_version: - test.fail("Failed to find bios version. " - "The command output is %s" % output[0]) + test.fail( + "Failed to find bios version. " f"The command output is {output[0]}" + ) bios_release_date = re.search(params["bios_release_date"], output[0], re.S) if not bios_release_date: - test.fail("Failed to find bios_release_date. " - "The command output is %s" % output[0]) + test.fail( + "Failed to find bios_release_date. " + f"The command output is {output[0]}" + ) date_year = bios_version[0][:4] date_month = bios_version[0][4:6] date_day = bios_version[0][6:] - if date_year != bios_release_date.group("year") or date_month != \ - bios_release_date.group("month") or date_day != \ - bios_release_date.group("day"): - test.fail("The bios release dates are not equal between " - "bios_version and bios_release_date. The date from " - "bios_version is %s, the date from bios_release_date " - "is %s." % (bios_version[0], bios_release_date[0])) + if ( + date_year != bios_release_date.group("year") + or date_month != bios_release_date.group("month") + or date_day != bios_release_date.group("day") + ): + test.fail( + "The bios release dates are not equal between " + "bios_version and bios_release_date. The date from " + f"bios_version is {bios_version[0]}, the date from bios_release_date " + f"is {bios_release_date[0]}." + ) uefishell_test = UEFIShellTest(test, params, env) time_interval = float(params["time_interval"]) @@ -241,16 +253,14 @@ def handle_smbiosview(output): uefishell_test.setup(under_fs0) test_scenarios = params["test_scenarios"] for scenario in test_scenarios.split(): - command = params["command_%s" % scenario] - if params.get("command_%s_%s" % (scenario, "args")): - func_name = params["command_%s_%s" % (scenario, "args")] + command = params[f"command_{scenario}"] + if params.get("command_{}_{}".format(scenario, "args")): + func_name = params["command_{}_{}".format(scenario, "args")] command += eval(func_name) - check_result = params.get("check_result_%s" % scenario) - output = uefishell_test.send_command(command, - check_result, - time_interval) - if params.get("%s_output_handler" % scenario): - func_name = params["%s_output_handler" % scenario] + check_result = params.get(f"check_result_{scenario}") + output = uefishell_test.send_command(command, check_result, time_interval) + if params.get(f"{scenario}_output_handler"): + func_name = params[f"{scenario}_output_handler"] eval(func_name)(output) uefishell_test.post_test() uefishell_test.clean() diff --git a/qemu/tests/unattended_install_reboot_driftfix.py b/qemu/tests/unattended_install_reboot_driftfix.py index ba48b83cb0..37805f5ff1 100644 --- a/qemu/tests/unattended_install_reboot_driftfix.py +++ b/qemu/tests/unattended_install_reboot_driftfix.py @@ -1,5 +1,4 @@ from virttest import env_process - from virttest.tests import unattended_install diff --git a/qemu/tests/unittest_kvmctl.py b/qemu/tests/unittest_kvmctl.py index 231f775765..42ddb791aa 100644 --- a/qemu/tests/unittest_kvmctl.py +++ b/qemu/tests/unittest_kvmctl.py @@ -20,12 +20,12 @@ def run(test, params, env): os.makedirs(unit_dir) os.chdir(unit_dir) - cmd = "./kvmctl test/x86/bootstrap test/x86/%s.flat" % case + cmd = f"./kvmctl test/x86/bootstrap test/x86/{case}.flat" try: results = process.system_output(cmd, shell=True) except process.CmdError: - test.fail("Unit test %s failed" % case) + test.fail(f"Unit test {case} failed") result_file = os.path.join(test.resultsdir, case) - with open(result_file, 'w') as file: + with open(result_file, "w") as file: file.write(results) diff --git a/qemu/tests/unplug_block_during_io_reboot.py b/qemu/tests/unplug_block_during_io_reboot.py index 6c23916f68..8fd3394393 100644 --- a/qemu/tests/unplug_block_during_io_reboot.py +++ b/qemu/tests/unplug_block_during_io_reboot.py @@ -1,9 +1,7 @@ -import time import re +import time -from virttest import error_context -from virttest import utils_misc -from virttest import utils_disk +from virttest import error_context, utils_disk, utils_misc from provider.block_devices_plug import BlockDevicesPlug from provider.storage_benchmark import generate_instance @@ -23,39 +21,47 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def _check_stress_status(): - """ Check the status of stress. """ + """Check the status of stress.""" ck_session = vm.wait_for_login(timeout=360) proc_name = check_cmd[os_type].split()[-1] cmd = check_cmd[os_type] if not utils_misc.wait_for( - lambda: proc_name.lower() in ck_session.cmd_output(cmd).lower(), - 180, step=3.0): - test.fail("%s is not alive!" % proc_name) + lambda: proc_name.lower() in ck_session.cmd_output(cmd).lower(), + 180, + step=3.0, + ): + test.fail(f"{proc_name} is not alive!") ck_session.close() def _get_data_disk(): - """ Get the data disk. """ - extra_params = params["blk_extra_params_%s" % params['images'].split()[-1]] + """Get the data disk.""" + extra_params = params[ + "blk_extra_params_{}".format(params["images"].split()[-1]) + ] if windows: - return sorted(session.cmd('wmic diskdrive get index').split()[1:])[-1] + return sorted(session.cmd("wmic diskdrive get index").split()[1:])[-1] drive_id = re.search(r"(serial|wwn)=(\w+)", extra_params, re.M).group(2) return utils_misc.get_linux_drive_path(session, drive_id) def _run_stress_background(): - """ Run stress under background. """ + """Run stress under background.""" test.log.info("Start io stress under background.") thread = utils_misc.InterruptedThread( - target[os_type]['name'], (target[os_type]['args'],)) + target[os_type]["name"], (target[os_type]["args"],) + ) thread.start() _check_stress_status() return thread - check_cmd = {'linux': 'pgrep -lx dd', - 'windows': 'TASKLIST /FI "IMAGENAME eq IOZONE.EXE'} - os_type = params['os_type'] - args = params['stress_args'] - windows = os_type == 'windows' + check_cmd = { + "linux": "pgrep -lx dd", + "windows": 'TASKLIST /FI "IMAGENAME eq IOZONE.EXE', + } + os_type = params["os_type"] + args = params["stress_args"] + windows = os_type == "windows" target = {} vm = env.get_vm(params["main_vm"]) vm.verify_alive() @@ -63,20 +69,21 @@ def _run_stress_background(): session = vm.wait_for_login(timeout=360) disk = _get_data_disk() if windows: - iozone = generate_instance(params, vm, 'iozone') + iozone = generate_instance(params, vm, "iozone") utils_disk.update_windows_disk_attributes(session, disk) disk_letter = utils_disk.configure_empty_disk( - session, disk, params['image_size_stg0'], os_type)[0] - target[os_type] = {'name': iozone.run, 'args': args.format(disk_letter)} + session, disk, params["image_size_stg0"], os_type + )[0] + target[os_type] = {"name": iozone.run, "args": args.format(disk_letter)} else: - target[os_type] = {'name': session.cmd, 'args': args.format(disk)} + target[os_type] = {"name": session.cmd, "args": args.format(disk)} stress_thread = _run_stress_background() - time.sleep(float(params['sleep_time'])) + time.sleep(float(params["sleep_time"])) _check_stress_status() BlockDevicesPlug(vm).unplug_devs_serial() stress_thread.join(suppress_exception=True) session.close() vm.monitor.system_reset() - test.log.info('Login guest after reboot.') + test.log.info("Login guest after reboot.") session = vm.wait_for_login(timeout=360) diff --git a/qemu/tests/unsafe_rebase_to_none_existing_backing_file.py b/qemu/tests/unsafe_rebase_to_none_existing_backing_file.py index 64880fc019..1f7805264c 100644 --- a/qemu/tests/unsafe_rebase_to_none_existing_backing_file.py +++ b/qemu/tests/unsafe_rebase_to_none_existing_backing_file.py @@ -4,8 +4,7 @@ from virttest import data_dir from virttest.qemu_storage import QemuImg -from qemu.tests.qemu_disk_img import QemuImgTest -from qemu.tests.qemu_disk_img import generate_base_snapshot_pair +from qemu.tests.qemu_disk_img import QemuImgTest, generate_base_snapshot_pair def run(test, params, env): @@ -20,6 +19,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ + def _get_img_obj_and_params(tag): """Get an QemuImg object and its params based on the tag.""" img_param = params.object_params(tag) @@ -37,26 +37,30 @@ def _verify_qemu_img_info(output, b_fmt, b_name): """Verify qemu-img info output for this case.""" test.log.info("Verify snapshot's backing file information.") res = json.loads(output) - if (res["backing-filename-format"] != b_fmt or - res["backing-filename"] != b_name): - test.fail("Backing file information is not correct," - " got %s." % b_name) + if res["backing-filename-format"] != b_fmt or res["backing-filename"] != b_name: + test.fail("Backing file information is not correct," f" got {b_name}.") compat = res["format-specific"]["data"]["compat"] expected = _get_compat_version() - if (compat != expected): - test.fail("Snapshot's compat mode is not correct," - " got %s, expected %s." % (compat, expected)) + if compat != expected: + test.fail( + "Snapshot's compat mode is not correct," + f" got {compat}, expected {expected}." + ) def _verify_unsafe_rebase(img): """Verify qemu-img check output for this case.""" test.log.info("Verify snapshot's unsafe check information.") - res = process.run("%s check %s" % (img.image_cmd, img.image_filename), - ignore_status=True) - expected = ["Could not open backing file", img.base_image_filename, - "No such file or directory"] + res = process.run( + f"{img.image_cmd} check {img.image_filename}", ignore_status=True + ) + expected = [ + "Could not open backing file", + img.base_image_filename, + "No such file or directory", + ] for msg in expected: if msg not in res.stderr_text: - test.fail("The %s should not exist." % img.base_image_filename) + test.fail(f"The {img.base_image_filename} should not exist.") gen = generate_base_snapshot_pair(params["image_chain"]) base, snapshot = next(gen) @@ -67,12 +71,14 @@ def _verify_unsafe_rebase(img): # workaround to assign system disk's image_name to image_name_image1 params["image_name_image1"] = params["image_name"] QemuImgTest(test, params, env, snapshot).create_snapshot() - _verify_qemu_img_info(sn_img.info(output="json"), - base_img.image_format, base_img.image_filename) + _verify_qemu_img_info( + sn_img.info(output="json"), base_img.image_format, base_img.image_filename + ) sn_img.base_tag = params["none_existing_image"] sn_img.rebase(sn_img_params) - _verify_qemu_img_info(sn_img.info(output="json"), - sn_img.base_format, sn_img.base_image_filename) + _verify_qemu_img_info( + sn_img.info(output="json"), sn_img.base_format, sn_img.base_image_filename + ) _verify_unsafe_rebase(sn_img) diff --git a/qemu/tests/usb_basic_check.py b/qemu/tests/usb_basic_check.py index 1726ce2bb0..2ec33318c4 100644 --- a/qemu/tests/usb_basic_check.py +++ b/qemu/tests/usb_basic_check.py @@ -1,9 +1,8 @@ -import time import re +import time import aexpect -from virttest import utils_misc -from virttest import error_context +from virttest import error_context, utils_misc @error_context.context_aware @@ -17,8 +16,11 @@ def check_usb_device_monitor(test, params, env): o = o.get("return") info_usb_name = params.get("info_usb_name") if info_usb_name and (info_usb_name not in o): - test.fail("Could not find '%s' device, monitor " - "returns: \n%s" % (params.get("product"), o)) + test.fail( + "Could not find '{}' device, monitor " "returns: \n{}".format( + params.get("product"), o + ) + ) def check_usb_device_guest(session, item, cmd, timeout): @@ -26,8 +28,8 @@ def __check(session, item, cmd): output = session.cmd_output(cmd) devices = re.findall(item, output, re.I | re.M) if not devices: - msg = "Could not find item '%s' in guest, " % item - msg += "Output('%s') in guest: %s" % (cmd, output) + msg = f"Could not find item '{item}' in guest, " + msg += f"Output('{cmd}') in guest: {output}" msgbox.append(msg) return devices diff --git a/qemu/tests/usb_common.py b/qemu/tests/usb_common.py index 616e4f04f6..5186705a29 100644 --- a/qemu/tests/usb_common.py +++ b/qemu/tests/usb_common.py @@ -3,12 +3,11 @@ except ImportError: import json import logging - from collections import OrderedDict from virttest import utils_misc -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def parse_usb_topology(params): @@ -21,13 +20,12 @@ def parse_usb_topology(params): """ params["usb_devices"] = "" # usb topology - usb_topology = json.loads(params["usb_topology"], - object_pairs_hook=OrderedDict) + usb_topology = json.loads(params["usb_topology"], object_pairs_hook=OrderedDict) parsed_devs = [] for key, value in usb_topology.items(): for i in range(value): - params["usb_devices"] += " d%s" % len(parsed_devs) - usb_type = '{"usbdev_type_d%s": "%s"}' % (len(parsed_devs), key) + params["usb_devices"] += f" d{len(parsed_devs)}" + usb_type = f'{{"usbdev_type_d{len(parsed_devs)}": "{key}"}}' params.update(json.loads(usb_type)) parsed_devs.append(json.loads(usb_type)) return parsed_devs @@ -46,6 +44,7 @@ def collect_usb_dev(params, vm, parsed_devs, suffix): verification,[id(eg.d0), info(eg.usb-hub), port(eg.1)] the info will change based on suffix. """ + def _change_dev_info_key(parsed_type, suffix): info_key = parsed_type.replace("-", "_") return "_".join([info_key, suffix]) @@ -53,7 +52,7 @@ def _change_dev_info_key(parsed_type, suffix): devs = [] for parsed_dev in parsed_devs: key = list(parsed_dev.keys())[0] - usb_dev_id = "usb-%s" % key[12:] + usb_dev_id = f"usb-{key[12:]}" usb_dev_info = params[_change_dev_info_key(parsed_dev[key], suffix)] usb_dev_port = str(vm.devices.get(usb_dev_id).get_param("port")) devs.append([usb_dev_id, usb_dev_info, usb_dev_port]) @@ -76,10 +75,9 @@ def verify_usb_device_in_monitor(vm, devs): output = str(vm.monitor.info("usb")).splitlines() for dev in devs: for chk_str in dev: - result = next((True for info in output if chk_str in info), - False) + result = next((True for info in output if chk_str in info), False) if result is False: - return (False, "[%s] is not in the monitor info" % chk_str) + return (False, f"[{chk_str}] is not in the monitor info") return (True, "all given devices in the monitor info") @@ -98,7 +96,7 @@ def verify_usb_device_in_monitor_qtree(vm, devs): for dev in devs: for chk_str in dev: if chk_str not in output: - return (False, "[%s] is not in the monitor info" % chk_str) + return (False, f"[{chk_str}] is not in the monitor info") return (True, "all given devices are verified in the monitor info") @@ -111,16 +109,17 @@ def verify_usb_device_in_guest(params, session, devs): :return: A tuple (status, output) where status is the verification result and output is the detail information """ + def _verify_guest_usb(): - output = session.cmd_output(params["chk_usb_info_cmd"], - float(params["cmd_timeout"])) + output = session.cmd_output( + params["chk_usb_info_cmd"], float(params["cmd_timeout"]) + ) # For usb-hub, '-v' must be used to get the expected usb info. # For non usb-hub, refer to 'chk_usb_info_cmd', two situations: # '-v' must be used to get expected info # '-v' must not be used to avoid duplicate info in output and affect the device count. if "Hub" in str(devs) and os_type == "linux": - hub_output = session.cmd_output("lsusb -v", - float(params["cmd_timeout"])) + hub_output = session.cmd_output("lsusb -v", float(params["cmd_timeout"])) # each dev must in the output for dev in devs: if "Hub" in dev[1] and os_type == "linux": @@ -141,25 +140,24 @@ def _verify_guest_usb(): o = output count = o.count(k) if count != v: - LOG_JOB.info("expected %s %s, got %s in the guest", - v, k, count) + LOG_JOB.info("expected %s %s, got %s in the guest", v, k, count) return False return True os_type = params.get("os_type") if os_type == "linux": LOG_JOB.info("checking if there is I/O error in dmesg") - output = session.cmd_output("dmesg | grep -i usb", - float(params["cmd_timeout"])) + output = session.cmd_output("dmesg | grep -i usb", float(params["cmd_timeout"])) for line in output.splitlines(): if "error" in line or "ERROR" in line: - return (False, "error found in guest's dmesg: %s " % line) - - res = utils_misc.wait_for(_verify_guest_usb, - float(params["cmd_timeout"]), - step=5.0, - text="wait for getting guest usb devices info" - ) + return (False, f"error found in guest's dmesg: {line} ") + + res = utils_misc.wait_for( + _verify_guest_usb, + float(params["cmd_timeout"]), + step=5.0, + text="wait for getting guest usb devices info", + ) if res: return (True, "all given devices are verified in the guest") diff --git a/qemu/tests/usb_device_check.py b/qemu/tests/usb_device_check.py index 7c86459152..e5ee93d1d5 100644 --- a/qemu/tests/usb_device_check.py +++ b/qemu/tests/usb_device_check.py @@ -1,9 +1,11 @@ -from virttest import env_process -from virttest import error_context -from qemu.tests.usb_common import (parse_usb_topology, - collect_usb_dev, - verify_usb_device_in_monitor_qtree, - verify_usb_device_in_guest) +from virttest import env_process, error_context + +from qemu.tests.usb_common import ( + collect_usb_dev, + parse_usb_topology, + verify_usb_device_in_guest, + verify_usb_device_in_monitor_qtree, +) @error_context.context_aware @@ -19,6 +21,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def _check_test_step_result(result, output): if result: test.log.info(output) @@ -29,17 +32,16 @@ def _check_test_step_result(result, output): parsed_devs = parse_usb_topology(params) test.log.info("starting vm according to the usb topology") - env_process.process(test, params, env, - env_process.preprocess_image, - env_process.preprocess_vm) + env_process.process( + test, params, env, env_process.preprocess_image, env_process.preprocess_vm + ) vm = env.get_vm(params["main_vm"]) vm.verify_alive() # collect usb dev information for qemu check devs = collect_usb_dev(params, vm, parsed_devs, "for_qemu") - error_context.context("verify usb devices information in qemu...", - test.log.info) + error_context.context("verify usb devices information in qemu...", test.log.info) result, output = verify_usb_device_in_monitor_qtree(vm, devs) _check_test_step_result(result, output) @@ -48,8 +50,7 @@ def _check_test_step_result(result, output): login_timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=login_timeout) - error_context.context("verify usb devices information in guest...", - test.log.info) + error_context.context("verify usb devices information in guest...", test.log.info) result, output = verify_usb_device_in_guest(params, session, devs) _check_test_step_result(result, output) diff --git a/qemu/tests/usb_device_check_negative.py b/qemu/tests/usb_device_check_negative.py index 65f3d0b4f2..202312edb6 100644 --- a/qemu/tests/usb_device_check_negative.py +++ b/qemu/tests/usb_device_check_negative.py @@ -1,6 +1,5 @@ -from virttest import (env_process, - error_context, - virt_vm) +from virttest import env_process, error_context, virt_vm + from qemu.tests.usb_common import parse_usb_topology @@ -20,12 +19,13 @@ def run(test, params, env): parse_usb_topology(params) test.log.info("starting vm according to the usb topology") error_info = params["error_info"] - error_context.context(("verify [%s] is reported by QEMU..." % - error_info), test.log.info) + error_context.context( + (f"verify [{error_info}] is reported by QEMU..."), test.log.info + ) try: - env_process.process(test, params, env, - env_process.preprocess_image, - env_process.preprocess_vm) + env_process.process( + test, params, env, env_process.preprocess_image, env_process.preprocess_vm + ) except virt_vm.VMCreateError as e: if error_info not in e.output: - test.fail("%s is not reported by QEMU" % error_info) + test.fail(f"{error_info} is not reported by QEMU") diff --git a/qemu/tests/usb_host.py b/qemu/tests/usb_host.py index 861bcde737..5ea3495caf 100644 --- a/qemu/tests/usb_host.py +++ b/qemu/tests/usb_host.py @@ -1,11 +1,9 @@ -import re import os +import re import time from avocado.utils import process - -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc from virttest.qemu_devices import qdevices from virttest.qemu_monitor import QMPCmdError from virttest.utils_test import BackgroundTest @@ -22,6 +20,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def get_usb_host_dev(): device_list = [] for device in vm.devices: @@ -31,7 +30,7 @@ def get_usb_host_dev(): return device_list def get_vendorid_productid(bus, addr): - out = process.getoutput("lsusb -v -s %s:%s" % (bus, addr)) + out = process.getoutput(f"lsusb -v -s {bus}:{addr}") res = re.search(r"idVendor\s+0x(\w+).*idProduct\s+0x(\w+)", out, re.S) return (res.group(1), res.group(2)) @@ -66,26 +65,26 @@ def usb_dev_unplug(dev): test.fail("kernel didn't detect unplug") def _get_usb_mount_point(): - """ Get passthrough usb stick mount point """ + """Get passthrough usb stick mount point""" dmesg_cmd = "dmesg | grep 'Attached SCSI removable disk'" s, o = session.cmd_status_output(dmesg_cmd) if s: test.error("Fail to get passthrough usb stick in guest.") - dev = re.findall(r'\[(sd\w+)\]', o)[0] - mounts_cmd = "cat /proc/mounts | grep /dev/%s" % dev + dev = re.findall(r"\[(sd\w+)\]", o)[0] + mounts_cmd = f"cat /proc/mounts | grep /dev/{dev}" s, o = session.cmd_status_output(mounts_cmd) if not s: - s, o = session.cmd_status_output('umount /dev/%s' % dev) + s, o = session.cmd_status_output(f"umount /dev/{dev}") if s: - test.error("Fail to umount /dev/%s, output: %s" % (s, o)) - mkfs_cmd = "mkfs.vfat /dev/%s" % dev + test.error(f"Fail to umount /dev/{s}, output: {o}") + mkfs_cmd = f"mkfs.vfat /dev/{dev}" s, o = session.cmd_status_output(mkfs_cmd) if s: - test.error("Fail to build filesystem on usb stick: %s" % o) + test.error(f"Fail to build filesystem on usb stick: {o}") mount_point = "/mnt" - s, o = session.cmd_status_output("mount /dev/%s %s" % (dev, mount_point)) + s, o = session.cmd_status_output(f"mount /dev/{dev} {mount_point}") if s: - test.error("Fail to mount /dev/%s, output: %s" % (s, o)) + test.error(f"Fail to mount /dev/{s}, output: {o}") return mount_point def _usb_stick_io(mount_point, bg=False): @@ -93,18 +92,19 @@ def _usb_stick_io(mount_point, bg=False): Do I/O operations on passthrough usb stick """ error_context.context("Read and write on usb stick ", test.log.info) - testfile = os.path.join(mount_point, 'testfile') + testfile = os.path.join(mount_point, "testfile") if bg: iozone_cmd = params.get("iozone_cmd_bg", " -az -I -g 1g -f %s") - iozone_thread = BackgroundTest(iozone_test.run, # pylint: disable=E0606 - (iozone_cmd % testfile,)) + iozone_thread = BackgroundTest( + iozone_test.run, # pylint: disable=E0606 + (iozone_cmd % testfile,), + ) iozone_thread.start() if not utils_misc.wait_for(iozone_thread.is_alive, timeout=10): test.fail("Fail to start the iozone background test.") time.sleep(10) else: - iozone_cmd = params.get("iozone_cmd", - " -a -I -r 64k -s 1m -i 0 -i 1 -f %s") + iozone_cmd = params.get("iozone_cmd", " -a -I -r 64k -s 1m -i 0 -i 1 -f %s") iozone_test.run(iozone_cmd % testfile) # pylint: disable=E0606 usb_params = {} @@ -124,36 +124,37 @@ def _usb_stick_io(mount_point, bg=False): try: vm.devices.simple_hotplug(dev, vm.monitor) except QMPCmdError as detail: - test.log.warn(detail) + test.log.warning(detail) for msg in usb_reply_msg_list: - if msg in detail.data['desc']: + if msg in detail.data["desc"]: break else: - test.fail("Could not get expected warning" - " msg in negative test, monitor" - " returns: '%s'" % detail) + test.fail( + "Could not get expected warning" + " msg in negative test, monitor" + f" returns: '{detail}'" + ) else: - test.fail("Hotplug operation in negative test" - " should not succeed.") + test.fail("Hotplug operation in negative test" " should not succeed.") return usb_hostdev = params["usb_devices"].split()[-1] usb_options = params.get("options") if usb_options == "with_vendorid_productid": - vendorid = params["usbdev_option_vendorid_%s" % usb_hostdev] - productid = params["usbdev_option_productid_%s" % usb_hostdev] - usb_params["vendorid"] = "0x%s" % vendorid - usb_params["productid"] = "0x%s" % productid + vendorid = params[f"usbdev_option_vendorid_{usb_hostdev}"] + productid = params[f"usbdev_option_productid_{usb_hostdev}"] + usb_params["vendorid"] = f"0x{vendorid}" + usb_params["productid"] = f"0x{productid}" elif usb_options == "with_hostbus_hostaddr": - hostbus = params["usbdev_option_hostbus_%s" % usb_hostdev] - hostaddr = params["usbdev_option_hostaddr_%s" % usb_hostdev] + hostbus = params[f"usbdev_option_hostbus_{usb_hostdev}"] + hostaddr = params[f"usbdev_option_hostaddr_{usb_hostdev}"] usb_params["hostbus"] = hostbus usb_params["hostaddr"] = hostaddr (vendorid, productid) = get_vendorid_productid(hostbus, hostaddr) - lsusb_cmd = "lsusb -v -d %s:%s" % (vendorid, productid) + lsusb_cmd = f"lsusb -v -d {vendorid}:{productid}" match_add = "New USB device found, " - match_add += "idVendor=%s, idProduct=%s" % (vendorid, productid) + match_add += f"idVendor={vendorid}, idProduct={productid}" match_del = "USB disconnect" usb_stick = "Mass Storage" in process.getoutput(lsusb_cmd) @@ -167,7 +168,7 @@ def _usb_stick_io(mount_point, bg=False): if usb_stick: iozone_test = None mount_point = _get_usb_mount_point() - iozone_test = generate_instance(params, vm, 'iozone') + iozone_test = generate_instance(params, vm, "iozone") _usb_stick_io(mount_point) usb_devs = get_usb_host_dev() for dev in usb_devs: @@ -175,11 +176,11 @@ def _usb_stick_io(mount_point, bg=False): repeat_times = int(params.get("usb_repeat_times", "1")) for i in range(repeat_times): - msg = "Hotplug (iteration %d)" % (i+1) - usb_params["id"] = "usbhostdev%s" % i + msg = "Hotplug (iteration %d)" % (i + 1) + usb_params["id"] = f"usbhostdev{i}" if params.get("usb_check_isobufs", "no") == "yes": # The value of isobufs could only be in '4, 8, 16' - isobufs = (2 << (i % 3 + 1)) + isobufs = 2 << (i % 3 + 1) usb_params["isobufs"] = isobufs msg += ", with 'isobufs' option set to %d." % isobufs error_context.context(msg, test.log.info) diff --git a/qemu/tests/usb_hotplug.py b/qemu/tests/usb_hotplug.py index 2e496c18af..5728c57b8b 100644 --- a/qemu/tests/usb_hotplug.py +++ b/qemu/tests/usb_hotplug.py @@ -1,5 +1,5 @@ -import time import re +import time from virttest import error_context @@ -13,6 +13,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + @error_context.context_aware def usb_dev_hotplug(): error_context.context("Plugin usb device", test.log.info) @@ -20,19 +21,20 @@ def usb_dev_hotplug(): reply = vm.monitor.cmd(monitor_add) if params.get("usb_negative_test") == "yes": if params["usb_reply_msg"] not in reply: - test.fail("Could not get expected warning" - " msg in negative test, monitor" - " returns: '%s'" % reply) + test.fail( + "Could not get expected warning" + " msg in negative test, monitor" + f" returns: '{reply}'" + ) return monitor_pattern = "Parameter 'driver' expects a driver name" if reply.find(monitor_pattern) != -1: - test.cancel("usb device %s not available" % device) + test.cancel(f"usb device {device} not available") @error_context.context_aware def usb_dev_verify(): - error_context.context("Verify usb device is pluged on guest", - test.log.info) + error_context.context("Verify usb device is pluged on guest", test.log.info) time.sleep(sleep_time) session.cmd(udev_refresh_cmd) messages_add = session.cmd(query_syslog_cmd) @@ -57,7 +59,7 @@ def usb_dev_unplug(): product_id = params["product_id"] # compose strings - monitor_add = "device_add %s" % device + monitor_add = f"device_add {device}" monitor_add += ",bus=usbtest.0,id=usbplugdev" monitor_del = "device_del usbplugdev" match_add = params.get("usb_match_add", "idVendor=%s, idProduct=%s") diff --git a/qemu/tests/usb_redir.py b/qemu/tests/usb_redir.py index 8dd3808094..25d37e4a29 100644 --- a/qemu/tests/usb_redir.py +++ b/qemu/tests/usb_redir.py @@ -1,17 +1,13 @@ -import re import os +import re -from virttest import error_context -from virttest import utils_misc -from virttest import utils_package -from virttest import env_process +from avocado.utils import process +from virttest import env_process, error_context, utils_misc, utils_package from virttest.qemu_devices import qdevices from virttest.utils_params import Params from provider.storage_benchmark import generate_instance -from avocado.utils import process - @error_context.context_aware def run(test, params, env): @@ -29,31 +25,31 @@ def run(test, params, env): :param params: Dictionary with test parameters :param env: Dictionary with test environment. """ + def _start_usbredir_server(port): process.getoutput("killall usbredirserver") - usbredir_server = utils_misc.get_binary('usbredirserver', params) - usbredirserver_args = usbredir_server + " -p %s " % port - usbredirserver_args += " %s:%s" % (vendorid, productid) + usbredir_server = utils_misc.get_binary("usbredirserver", params) + usbredirserver_args = usbredir_server + f" -p {port} " + usbredirserver_args += f" {vendorid}:{productid}" usbredirserver_args += " > /dev/null 2>&1" - rv_thread = utils_misc.InterruptedThread(os.system, - (usbredirserver_args,)) + rv_thread = utils_misc.InterruptedThread(os.system, (usbredirserver_args,)) rv_thread.start() def create_repo(): test.log.info("Create temp repository") version_cmd = 'grep "^VERSION_ID=" /etc/os-release | cut -d = -f2' version_id = process.getoutput(version_cmd).strip('"') - major, minor = version_id.split('.') + major, minor = version_id.split(".") baseurl = params["temprepo_url"] baseurl = baseurl.replace("MAJOR", major) - content = "[temp]\nname=temp\nbaseurl=%s\nenable=1\n" % baseurl + content = f"[temp]\nname=temp\nbaseurl={baseurl}\nenable=1\n" content += "gpgcheck=0\nskip_if_unavailable=1" - create_cmd = r'echo -e "%s" > /etc/yum.repos.d/temp.repo' % content + create_cmd = rf'echo -e "{content}" > /etc/yum.repos.d/temp.repo' process.system(create_cmd, shell=True) def _host_config_check(): status = True - err_msg = '' + err_msg = "" if option == "with_negative_config": out = process.getoutput("dmesg") pattern = r"usb (\d-\d+(?:.\d)?):.*idVendor=%s, idProduct=%s" @@ -71,30 +67,29 @@ def _host_config_check(): s, o = process.getstatusoutput(cmd) if s: status = False - err_msg = "Fail to unconfig the USB device, output: %s" % o + err_msg = f"Fail to unconfig the USB device, output: {o}" return (status, err_msg) - if backend == 'spicevmc': + if backend == "spicevmc": gui_group = "Server with GUI" - out = process.getoutput('yum group list --installed', shell=True) - obj = re.search(r"(Installed Environment Groups:.*?)^\S", - out, re.S | re.M) + out = process.getoutput("yum group list --installed", shell=True) + obj = re.search(r"(Installed Environment Groups:.*?)^\S", out, re.S | re.M) if not obj or gui_group not in obj.group(1): - gui_groupinstall_cmd = "yum groupinstall -y '%s'" % gui_group + gui_groupinstall_cmd = f"yum groupinstall -y '{gui_group}'" s, o = process.getstatusoutput(gui_groupinstall_cmd, shell=True) if s: status = False - err_msg = "Fail to install '%s' on host, " % gui_group - err_msg += "output: %s" % o + err_msg = f"Fail to install '{gui_group}' on host, " + err_msg += f"output: {o}" return (status, err_msg) virt_viewer_cmd = "rpm -q virt-viewer || yum install -y virt-viewer" s, o = process.getstatusoutput(virt_viewer_cmd, shell=True) if s: status = False err_msg = "Fail to install 'virt-viewer' on host, " - err_msg += "output: %s" % o + err_msg += f"output: {o}" return (status, err_msg) - elif backend == 'tcp_socket': + elif backend == "tcp_socket": create_repo() if not utils_package.package_install("usbredir-server"): status = False @@ -104,36 +99,36 @@ def _host_config_check(): def _usbredir_preprocess(): def _generate_usb_redir_cmdline(): - extra_params = '' - _backend = 'socket' if 'socket' in backend else backend - chardev_id = usbredir_params.get("chardev_id", - "chardev_%s" % usbredirdev_name) - chardev_params = Params({'backend': _backend, 'id': chardev_id}) - if backend == 'spicevmc': - chardev_params['debug'] = usbredir_params.get('chardev_debug') - chardev_params['name'] = usbredir_params.get('chardev_name') + extra_params = "" + _backend = "socket" if "socket" in backend else backend + chardev_id = usbredir_params.get( + "chardev_id", f"chardev_{usbredirdev_name}" + ) + chardev_params = Params({"backend": _backend, "id": chardev_id}) + if backend == "spicevmc": + chardev_params["debug"] = usbredir_params.get("chardev_debug") + chardev_params["name"] = usbredir_params.get("chardev_name") else: - chardev_params['host'] = usbredir_params['chardev_host'] - chardev_params['port'] = free_port # pylint: disable=E0606 - chardev_params['server'] = usbredir_params.get('chardev_server') - chardev_params['wait'] = usbredir_params.get('chardev_wait') + chardev_params["host"] = usbredir_params["chardev_host"] + chardev_params["port"] = free_port # pylint: disable=E0606 + chardev_params["server"] = usbredir_params.get("chardev_server") + chardev_params["wait"] = usbredir_params.get("chardev_wait") chardev = qdevices.CharDevice(chardev_params, chardev_id) - usbredir_dev = qdevices.QDevice('usb-redir', - aobject=usbredirdev_name) + usbredir_dev = qdevices.QDevice("usb-redir", aobject=usbredirdev_name) usbredir_filter = usbredir_params.get("usbdev_option_filter") usbredir_bootindex = usbredir_params.get("usbdev_option_bootindex") usbredir_bus = usbredir_params.get("usbdev_bus") - usbredir_dev.set_param('id', 'usb-%s' % usbredirdev_name) - usbredir_dev.set_param('chardev', chardev_id) - usbredir_dev.set_param('filter', usbredir_filter) - usbredir_dev.set_param('bootindex', usbredir_bootindex) - usbredir_dev.set_param('bus', usbredir_bus) - extra_params += ' '.join([chardev.cmdline(), - usbredir_dev.cmdline()]) + usbredir_dev.set_param("id", f"usb-{usbredirdev_name}") + usbredir_dev.set_param("chardev", chardev_id) + usbredir_dev.set_param("filter", usbredir_filter) + usbredir_dev.set_param("bootindex", usbredir_bootindex) + usbredir_dev.set_param("bus", usbredir_bus) + extra_params += " ".join([chardev.cmdline(), usbredir_dev.cmdline()]) return extra_params + extra_params = _generate_usb_redir_cmdline() params["extra_params"] = extra_params - if backend == 'spicevmc': + if backend == "spicevmc": params["paused_after_start_vm"] = "yes" del params["spice_password"] del params["spice_addr"] @@ -148,8 +143,8 @@ def _generate_usb_redir_cmdline(): def _start_spice_redirection(): def _rv_connection_check(): - rv_pid = process.getoutput("pidof %s" % rv_binary) - spice_port = vm.get_spice_var('spice_port') + rv_pid = process.getoutput(f"pidof {rv_binary}") + spice_port = vm.get_spice_var("spice_port") cmd = 'netstat -ptn | grep "^tcp.*127.0.0.1:%s.*ESTABLISHED %s.*"' cmd = cmd % (spice_port, rv_pid) s, o = process.getstatusoutput(cmd) @@ -157,19 +152,20 @@ def _rv_connection_check(): return False test.log.info("netstat output:\n%s", o) return True + status = True - err_msg = '' + err_msg = "" rv_binary_path = utils_misc.get_binary(rv_binary, params) - spice_port = vm.get_spice_var('spice_port') - rv_args = rv_binary_path + " spice://localhost:%s " % spice_port + spice_port = vm.get_spice_var("spice_port") + rv_args = rv_binary_path + f" spice://localhost:{spice_port} " rv_args += "--spice-usbredir-redirect-on-connect=" - rv_args += "'-1,0x%s,0x%s,-1,1'" % (vendorid, productid) + rv_args += f"'-1,0x{vendorid},0x{productid},-1,1'" rv_args += " > /dev/null 2>&1" rv_thread = utils_misc.InterruptedThread(os.system, (rv_args,)) rv_thread.start() if not utils_misc.wait_for(_rv_connection_check, timeout, 60): status = False - err_msg = "Fail to establish %s connection" % rv_binary + err_msg = f"Fail to establish {rv_binary} connection" return (status, err_msg) def boot_check(info): @@ -185,26 +181,26 @@ def _usb_dev_verify(): return True def _kill_rv_proc(): - s, o = process.getstatusoutput("pidof %s" % rv_binary) + s, o = process.getstatusoutput(f"pidof {rv_binary}") if not s: - process.getoutput("killall %s" % rv_binary) + process.getoutput(f"killall {rv_binary}") def _get_usb_mount_point(): - """ Get redirected USB stick mount point """ + """Get redirected USB stick mount point""" dmesg_cmd = "dmesg | grep 'Attached SCSI removable disk'" s, o = session.cmd_status_output(dmesg_cmd) if s: test.error("Fail to get redirected USB stick in guest.") - dev = re.findall(r'\[(sd\w+)\]', o)[0] - mounts_cmd = "cat /proc/mounts | grep /dev/%s" % dev + dev = re.findall(r"\[(sd\w+)\]", o)[0] + mounts_cmd = f"cat /proc/mounts | grep /dev/{dev}" s, o = session.cmd_status_output(mounts_cmd) if s: - s, o = session.cmd_status_output('mount /dev/%s /mnt' % dev) + s, o = session.cmd_status_output(f"mount /dev/{dev} /mnt") if s: - test.error("Fail to mount /dev/%s, output: %s" % (dev, o)) + test.error(f"Fail to mount /dev/{dev}, output: {o}") mp = "/mnt" else: - mp = re.findall(r'/dev/%s\d*\s+(\S+)\s+' % dev, o)[0] + mp = re.findall(rf"/dev/{dev}\d*\s+(\S+)\s+", o)[0] return mp def _usb_stick_io(mount_point): @@ -212,34 +208,33 @@ def _usb_stick_io(mount_point): Do I/O operations on passthrough USB stick """ error_context.context("Read and write on USB stick ", test.log.info) - testfile = os.path.join(mount_point, 'testfile') - iozone_cmd = params.get("iozone_cmd", - " -a -I -r 64k -s 1m -i 0 -i 1 -f %s") + testfile = os.path.join(mount_point, "testfile") + iozone_cmd = params.get("iozone_cmd", " -a -I -r 64k -s 1m -i 0 -i 1 -f %s") iozone_test.run(iozone_cmd % testfile) # pylint: disable=E0606 usbredirdev_name = params["usbredirdev_name"] usbredir_params = params.object_params(usbredirdev_name) - backend = usbredir_params.get('chardev_backend', 'spicevmc') - if backend not in ('spicevmc', 'tcp_socket'): - test.error("Unsupported char device backend type: %s" % backend) + backend = usbredir_params.get("chardev_backend", "spicevmc") + if backend not in ("spicevmc", "tcp_socket"): + test.error(f"Unsupported char device backend type: {backend}") - if backend == 'spicevmc' and params.get('display') != 'spice': + if backend == "spicevmc" and params.get("display") != "spice": test.cancel("Only support spice connection") option = params.get("option") vendorid = params["usbredir_vendorid"] productid = params["usbredir_productid"] timeout = params.get("wait_timeout", 600) - lsusb_cmd = "lsusb -v -d %s:%s" % (vendorid, productid) + lsusb_cmd = f"lsusb -v -d {vendorid}:{productid}" usb_stick = "Mass Storage" in process.getoutput(lsusb_cmd) - rv_binary = params.get('rv_binary', 'remote-viewer') + rv_binary = params.get("rv_binary", "remote-viewer") error_context.context("Check host configurations", test.log.info) s, o = _host_config_check() if not s: test.error(o) - if backend == 'tcp_socket': + if backend == "tcp_socket": free_port = utils_misc.find_free_port() _start_usbredir_server(free_port) @@ -248,7 +243,7 @@ def _usb_stick_io(mount_point): vm = env.get_vm(params["main_vm"]) vm.verify_alive() - if backend == 'spicevmc': + if backend == "spicevmc": error_context.context("Start USB redirection via spice", test.log.info) s, o = _start_spice_redirection() if not s: @@ -259,8 +254,7 @@ def _usb_stick_io(mount_point): error_context.context("Check 'bootindex' option", test.log.info) boot_menu_hint = params["boot_menu_hint"] boot_menu_key = params["boot_menu_key"] - if not utils_misc.wait_for(lambda: boot_check(boot_menu_hint), - timeout, 1): + if not utils_misc.wait_for(lambda: boot_check(boot_menu_hint), timeout, 1): test.fail("Could not get boot menu message") # Send boot menu key in monitor. @@ -272,17 +266,15 @@ def _usb_stick_io(mount_point): test.fail("Could not get boot entries list") test.log.info("Got boot menu entries: '%s'", boot_list) - bootindex = int(params["usbdev_option_bootindex_%s" % usbredirdev_name]) + bootindex = int(params[f"usbdev_option_bootindex_{usbredirdev_name}"]) if "USB" not in boot_list[bootindex]: test.fail("'bootindex' option of usb-redir doesn't take effect") if usb_stick: - error_context.context("Boot from redirected USB stick", - test.log.info) + error_context.context("Boot from redirected USB stick", test.log.info) boot_entry_info = params["boot_entry_info"] vm.send_key(str(bootindex + 1)) - if not utils_misc.wait_for(lambda: boot_check(boot_entry_info), - timeout, 1): + if not utils_misc.wait_for(lambda: boot_check(boot_entry_info), timeout, 1): test.fail("Could not boot from redirected USB stick") return @@ -294,12 +286,12 @@ def _usb_stick_io(mount_point): error_msg = "Redirected USB device can be found in guest" error_msg += " while policy is deny" test.fail(error_msg) - if backend == 'spicevmc': + if backend == "spicevmc": _kill_rv_proc() return if not _usb_dev_verify(): - if backend == 'tcp_socket': + if backend == "tcp_socket": process.system("killall usbredirserver", ignore_status=True) test.fail("Can not find the redirected USB device in guest") @@ -307,13 +299,13 @@ def _usb_stick_io(mount_point): iozone_test = None try: mount_point = _get_usb_mount_point() - iozone_test = generate_instance(params, vm, 'iozone') + iozone_test = generate_instance(params, vm, "iozone") _usb_stick_io(mount_point) finally: if iozone_test: iozone_test.clean() - if backend == 'tcp_socket': + if backend == "tcp_socket": process.system("killall usbredirserver", ignore_status=True) session.close() diff --git a/qemu/tests/usb_smartcard_sharing.py b/qemu/tests/usb_smartcard_sharing.py index 85083d4dcd..588edbf6ac 100644 --- a/qemu/tests/usb_smartcard_sharing.py +++ b/qemu/tests/usb_smartcard_sharing.py @@ -1,11 +1,8 @@ -import re import os - -from virttest import error_context -from virttest import data_dir -from virttest import utils_misc +import re from avocado.utils import process +from virttest import data_dir, error_context, utils_misc @error_context.context_aware @@ -24,20 +21,20 @@ def run(test, params, env): :param params: Dictionary with test parameters :param env: Dictionary with test environment. """ + def _client_config_check(): status = True - err_msg = '' + err_msg = "" gui_group = "Server with GUI" - out = process.getoutput('yum group list --installed', shell=True) - obj = re.search(r"(Installed Environment Groups:.*?)^\S", - out, re.S | re.M) + out = process.getoutput("yum group list --installed", shell=True) + obj = re.search(r"(Installed Environment Groups:.*?)^\S", out, re.S | re.M) if not obj or gui_group not in obj.group(1): - gui_groupinstall_cmd = "yum groupinstall -y '%s'" % gui_group + gui_groupinstall_cmd = f"yum groupinstall -y '{gui_group}'" s, o = process.getstatusoutput(gui_groupinstall_cmd, shell=True) if s: status = False - err_msg = "Fail to install '%s' on client, " % gui_group - err_msg += "output: %s" % o + err_msg = f"Fail to install '{gui_group}' on client, " + err_msg += f"output: {o}" return (status, err_msg) virt_viewer_cmd = "rpm -q virt-viewer || yum install -y virt-viewer" @@ -45,76 +42,75 @@ def _client_config_check(): if s: status = False err_msg = "Fail to install 'virt-viewer' on client, " - err_msg += "output: %s" % o + err_msg += f"output: {o}" return (status, err_msg) # unpack fake-smartcard database - sc_db = params.get('sc_db_tar', 'fake-smartcard.tar.gz') - sc_db_src = os.path.join(data_dir.get_deps_dir('smartcard'), sc_db) - unpack_sc_db = 'mkdir -p {0} && tar -zxvf {1} -C {0}' + sc_db = params.get("sc_db_tar", "fake-smartcard.tar.gz") + sc_db_src = os.path.join(data_dir.get_deps_dir("smartcard"), sc_db) + unpack_sc_db = "mkdir -p {0} && tar -zxvf {1} -C {0}" unpack_sc_db = unpack_sc_db.format(sc_db_dst, sc_db_src) s, o = process.getstatusoutput(unpack_sc_db, shell=True) if s: status = False err_msg = "Fail to unpack smartcard database on client, " - err_msg += "output: %s" % o + err_msg += f"output: {o}" return (status, err_msg) return (status, err_msg) def _guest_config_check(): status = True - err_msg = '' - required_groups = ('Server with GUI', 'Smart Card Support') - s, out = session.cmd_status_output('yum group list --installed') + err_msg = "" + required_groups = ("Server with GUI", "Smart Card Support") + s, out = session.cmd_status_output("yum group list --installed") test.log.info(out) if s: status = False - err_msg = 'Fail to get installed group list in guest, ' - err_msg += 'output: %s' % out + err_msg = "Fail to get installed group list in guest, " + err_msg += f"output: {out}" return (status, err_msg) for group in required_groups: if group not in out: - groupinstall_cmd = "yum groupinstall -y '%s'" % group - s, o = session.cmd_status_output(groupinstall_cmd, - timeout=timeout) + groupinstall_cmd = f"yum groupinstall -y '{group}'" + s, o = session.cmd_status_output(groupinstall_cmd, timeout=timeout) if s: status = False - err_msg = "Fail to install group '%s' in guest, " % group - err_msg += "output: %s" % o + err_msg = f"Fail to install group '{group}' in guest, " + err_msg += f"output: {o}" return (status, err_msg) - o = session.cmd_output('systemctl status pcscd') + o = session.cmd_output("systemctl status pcscd") test.log.info(o) - if 'running' not in o: - s, o = session.cmd_status_output('sytemctl restart pcscd') + if "running" not in o: + s, o = session.cmd_status_output("sytemctl restart pcscd") if s: status = False - err_msg = 'Fail to start pcscd in guest, ' - err_msg += 'output: %s' % out + err_msg = "Fail to start pcscd in guest, " + err_msg += f"output: {out}" return (status, err_msg) return (status, err_msg) def _check_sc_in_guest(): status = True - err_msg = '' - o = session.cmd_output('lsusb') + err_msg = "" + o = session.cmd_output("lsusb") test.log.info(o) if ccid_info not in o: status = False - err_msg = 'USB CCID device is not present in guest.' + err_msg = "USB CCID device is not present in guest." return (status, err_msg) - list_certs_cmd = 'pkcs11-tool --list-objects --type cert' + list_certs_cmd = "pkcs11-tool --list-objects --type cert" s, o = session.cmd_status_output(list_certs_cmd) test.log.info(o) if s: status = False - err_msg = 'Fail to list certificates on the smartcard.' + err_msg = "Fail to list certificates on the smartcard." return (status, err_msg) return (status, err_msg) def _start_rv_smartcard(): def _rv_connection_check(): - rv_pid = process.getoutput("pidof %s" % rv_binary) + rv_pid = process.getoutput(f"pidof {rv_binary}") cmd = 'netstat -ptn | grep "^tcp.*127.0.0.1:%s.*ESTABLISHED %s.*"' cmd = cmd % (spice_port, rv_pid) s, o = process.getstatusoutput(cmd) @@ -122,29 +118,30 @@ def _rv_connection_check(): return False test.log.info("netstat output:\n%s", o) return True + status = True - err_msg = '' + err_msg = "" rv_binary_path = utils_misc.get_binary(rv_binary, params) - spice_port = vm.get_spice_var('spice_port') - rv_args = rv_binary_path + " spice://localhost:%s " % spice_port - rv_args += "--spice-smartcard --spice-smartcard-db %s " % sc_db_dst + spice_port = vm.get_spice_var("spice_port") + rv_args = rv_binary_path + f" spice://localhost:{spice_port} " + rv_args += f"--spice-smartcard --spice-smartcard-db {sc_db_dst} " rv_args += "--spice-smartcard-certificates cert1,cert2,cert3" rv_args += " > /dev/null 2>&1" rv_thread = utils_misc.InterruptedThread(os.system, (rv_args,)) rv_thread.start() if not utils_misc.wait_for(_rv_connection_check, timeout, 60): status = False - err_msg = "Fail to establish %s connection" % rv_binary + err_msg = f"Fail to establish {rv_binary} connection" return (status, err_msg) - if params.get('display') != 'spice': + if params.get("display") != "spice": test.cancel("Only support spice connection") - usbscdev_name = params["usbscdev_name"] + params["usbscdev_name"] timeout = params.get("wait_timeout", 600) - rv_binary = params.get('rv_binary', 'remote-viewer') - sc_db_dst = params.get('sc_db_dst', '/home/fake_smartcard') - ccid_info = params.get('ccid_info', 'Gemalto') + rv_binary = params.get("rv_binary", "remote-viewer") + sc_db_dst = params.get("sc_db_dst", "/home/fake_smartcard") + ccid_info = params.get("ccid_info", "Gemalto") vm = env.get_vm(params["main_vm"]) vm.verify_alive() diff --git a/qemu/tests/usb_storage.py b/qemu/tests/usb_storage.py index 7512d120bc..8ed3e79e96 100644 --- a/qemu/tests/usb_storage.py +++ b/qemu/tests/usb_storage.py @@ -2,9 +2,7 @@ import uuid import aexpect -from virttest import (utils_test, - utils_misc, - error_context) +from virttest import error_context, utils_misc, utils_test @error_context.context_aware @@ -26,6 +24,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + @error_context.context_aware def _verify_string(regex_str, string, expect_result, search_opt=0): """ @@ -36,6 +35,7 @@ def _verify_string(regex_str, string, expect_result, search_opt=0): :param expect_result: The expected string :param search_opt: Search option for re module. """ + def _compare_str(act, exp, ignore_case): def str_func_1(x): return x @@ -48,23 +48,25 @@ def str_func_2(x): str_func = str_func_2 if str_func(act) != str_func(exp): - return ("Expected: '%s', Actual: '%s'" % - (str_func(exp), str_func(act))) + return f"Expected: '{str_func(exp)}', Actual: '{str_func(act)}'" return "" ignore_case = False if search_opt & re.I == re.I: ignore_case = True - error_context.context("Finding matched sub-string with regex" - " pattern '%s'" % regex_str, test.log.info) + error_context.context( + "Finding matched sub-string with regex" f" pattern '{regex_str}'", + test.log.info, + ) m = re.findall(regex_str, string, search_opt) if not m: test.log.debug(string) test.error("Could not find matched sub-string") - error_context.context("Verify matched string is same as expected", - test.log.info) + error_context.context( + "Verify matched string is same as expected", test.log.info + ) actual_result = m[0] if "removable" in regex_str: if actual_result in ["on", "yes", "true"]: @@ -85,8 +87,9 @@ def str_func_2(x): if fail_log: test.log.debug(string) - test.fail("Could not find expected string:\n %s" % - ("\n".join(fail_log))) + test.fail( + "Could not find expected string:\n {}".format("\n".join(fail_log)) + ) def _do_io_test_guest(): utils_test.run_virt_sub_test(test, params, env, "format_disk") @@ -113,8 +116,9 @@ def _get_output(): except aexpect.ShellCmdError: return "" - output = utils_misc.wait_for(_get_output, login_timeout, step=5, - text="Wait for getting USB disk name") + output = utils_misc.wait_for( + _get_output, login_timeout, step=5, text="Wait for getting USB disk name" + ) devname = re.findall(r"sd\w", output) if devname: return devname[0] @@ -122,8 +126,7 @@ def _get_output(): @error_context.context_aware def _check_serial_option(serial, regex_str, expect_str): - error_context.context("Set serial option to '%s'" % serial, - test.log.info) + error_context.context(f"Set serial option to '{serial}'", test.log.info) _restart_vm({"blk_extra_params_stg": "serial=" + serial}) error_context.context("Check serial option in monitor", test.log.info) @@ -140,44 +143,42 @@ def _check_serial_option(serial, regex_str, expect_str): @error_context.context_aware def _check_removable_option(removable, expect_str): - error_context.context("Set removable option to '%s'" % removable, - test.log.info) + error_context.context(f"Set removable option to '{removable}'", test.log.info) _restart_vm({"removable_stg": removable}) - error_context.context("Check removable option in monitor", - test.log.info) + error_context.context("Check removable option in monitor", test.log.info) output = str(vm.monitor.info("qtree")) - regex_str = r'usb-storage.*?removable = (.*?)\s' + regex_str = r"usb-storage.*?removable = (.*?)\s" _verify_string(regex_str, output, [removable], re.S) error_context.context("Check removable option in guest", test.log.info) session = _login() - cmd = "dmesg | grep %s" % _get_usb_disk_name_in_guest(session) + cmd = f"dmesg | grep {_get_usb_disk_name_in_guest(session)}" output = session.cmd(cmd) _verify_string(expect_str, output, [expect_str], re.I) session.close() @error_context.context_aware def _check_io_size_option(min_io_size="512", opt_io_size="0"): - error_context.context("Set min_io_size to %s, opt_io_size to %s" % - (min_io_size, opt_io_size), test.log.info) + error_context.context( + f"Set min_io_size to {min_io_size}, opt_io_size to {opt_io_size}", + test.log.info, + ) opt = {} opt["min_io_size_stg"] = min_io_size opt["opt_io_size_stg"] = opt_io_size _restart_vm(opt) - error_context.context("Check min/opt io_size option in monitor", - test.log.info) + error_context.context("Check min/opt io_size option in monitor", test.log.info) output = str(vm.monitor.info("qtree")) regex_str = r"usb-storage.*?min_io_size = (\d+).*?opt_io_size = (\d+)" _verify_string(regex_str, output, [min_io_size, opt_io_size], re.S) - error_context.context("Check min/opt io_size option in guest", - test.log.info) + error_context.context("Check min/opt io_size option in guest", test.log.info) session = _login() d = _get_usb_disk_name_in_guest(session) - cmd = ("cat /sys/block/%s/queue/{minimum,optimal}_io_size" % d) + cmd = f"cat /sys/block/{d}/queue/{{minimum,optimal}}_io_size" output = session.cmd(cmd) # Note: If set min_io_size = 0, guest min_io_size would be set to @@ -186,42 +187,37 @@ def _check_io_size_option(min_io_size="512", opt_io_size="0"): expected_min_size = min_io_size else: expected_min_size = "512" - _verify_string(r"(\d+)\n(\d+)", output, - [expected_min_size, opt_io_size]) + _verify_string(r"(\d+)\n(\d+)", output, [expected_min_size, opt_io_size]) session.close() vm = env.get_vm(params["main_vm"]) vm.verify_alive() login_timeout = int(params.get("login_timeout", 360)) - hotplug_unplug = (params["with_hotplug_unplug"] == "yes") + hotplug_unplug = params["with_hotplug_unplug"] == "yes" repeat_times = int(params.get("usb_repeat_times", "1")) - for rt in range(1, repeat_times+1): + for rt in range(1, repeat_times + 1): disk_hotplugged = [] if hotplug_unplug: - error_context.context("Hotplug the %s times." % rt, test.log.info) + error_context.context(f"Hotplug the {rt} times.", test.log.info) image_name = params.objects("images")[-1] image_params = params.object_params(image_name) - devices = vm.devices.images_define_by_params(image_name, - image_params, - 'disk', None, - False, None) + devices = vm.devices.images_define_by_params( + image_name, image_params, "disk", None, False, None + ) for dev in devices: ret = vm.devices.simple_hotplug(dev, vm.monitor) if ret[1] is False: - test.fail("Failed to hotplug device '%s'. Output:\n%s" - % (dev, ret[0])) + test.fail(f"Failed to hotplug device '{dev}'. Output:\n{ret[0]}") disk_hotplugged.append(devices[-1]) - error_context.context("Check usb device information in monitor", - test.log.info) + error_context.context("Check usb device information in monitor", test.log.info) output = str(vm.monitor.info("usb")) if "Product QEMU USB MSD" not in output: test.log.debug(output) test.fail("Could not find mass storage device") - error_context.context("Check usb device information in guest", - test.log.info) + error_context.context("Check usb device information in guest", test.log.info) session = _login() output = session.cmd(params["chk_usb_info_cmd"]) # No bus specified, default using "usb.0" for "usb-storage" @@ -241,7 +237,7 @@ def _check_io_size_option(min_io_size="512", opt_io_size="0"): test.log.info("Set usb serial to a empty string") # An empty string, "" serial = "EMPTY_STRING" - regex_str = r'usb-storage.*?serial = (.*?)\s' + regex_str = r"usb-storage.*?serial = (.*?)\s" _check_serial_option(serial, regex_str, '""') test.log.info("Leave usb serial option blank") @@ -267,9 +263,8 @@ def _check_io_size_option(min_io_size="512", opt_io_size="0"): # _check_io_size_option("4096", "4096") if hotplug_unplug: - error_context.context("Hotunplug the %s times." % rt, test.log.info) + error_context.context(f"Hotunplug the {rt} times.", test.log.info) for dev in disk_hotplugged: ret = vm.devices.simple_unplug(dev, vm.monitor) if ret[1] is False: - test.fail("Failed to unplug device '%s'. Output:\n%s" - % (dev, ret[0])) + test.fail(f"Failed to unplug device '{dev}'. Output:\n{ret[0]}") diff --git a/qemu/tests/valgrind_memalign.py b/qemu/tests/valgrind_memalign.py index 788f4847d8..edebf09f0a 100644 --- a/qemu/tests/valgrind_memalign.py +++ b/qemu/tests/valgrind_memalign.py @@ -1,8 +1,7 @@ import time from avocado.utils import process -from virttest import env_process -from virttest import error_context +from virttest import env_process, error_context @error_context.context_aware @@ -33,27 +32,26 @@ def valgrind_intall(): valgring_support_check_cmd = params.get("valgring_support_check_cmd") error_context.context("Check valgrind installed in host", test.log.info) try: - process.system(valgring_support_check_cmd, timeout=interval, - shell=True) + process.system(valgring_support_check_cmd, timeout=interval, shell=True) except Exception: valgrind_intall() - params['mem'] = 384 + params["mem"] = 384 params["start_vm"] = "yes" error_context.context("Start guest with specific parameters", test.log.info) env_process.preprocess_vm(test, params, env, params.get("main_vm")) vm = env.get_vm(params["main_vm"]) time.sleep(interval) - error_context.context("Verify guest status is running after cont", - test.log.info) - if params.get('machine_type').startswith("s390"): + error_context.context("Verify guest status is running after cont", test.log.info) + if params.get("machine_type").startswith("s390"): vm.monitor.cmd("cont") if not vm.wait_for_status(params.get("expected_status", "running"), 30): test.fail("VM is not in expected status") - error_context.context("Quit guest and check the process quit normally", - test.log.info) + error_context.context( + "Quit guest and check the process quit normally", test.log.info + ) vm.monitor.quit() vm.wait_until_dead(5, 0.5, 0.5) vm.verify_userspace_crash() diff --git a/qemu/tests/vdi_image_convert.py b/qemu/tests/vdi_image_convert.py index 5977b2d866..208133f08a 100644 --- a/qemu/tests/vdi_image_convert.py +++ b/qemu/tests/vdi_image_convert.py @@ -1,8 +1,7 @@ from avocado import fail_on from avocado.utils import process - -from virttest.qemu_io import QemuIOSystem from virttest import data_dir +from virttest.qemu_io import QemuIOSystem from virttest.qemu_storage import QemuImg @@ -19,14 +18,14 @@ def _qemu_io(img, cmd): try: QemuIOSystem(test, params, img.image_filename).cmd_output(cmd, 120) except process.CmdError: - test.error("qemu-io to '%s' failed." % img.image_filename) + test.error(f"qemu-io to '{img.image_filename}' failed.") src_image = params["images"] tgt_image = params["convert_target"] img_dir = data_dir.get_data_dir() source = QemuImg(params.object_params(src_image), img_dir, src_image) - _qemu_io(source, 'write -P 1 0 %s' % params["write_size"]) + _qemu_io(source, "write -P 1 0 {}".format(params["write_size"])) fail_on((process.CmdError,))(source.convert)(source.params, img_dir) diff --git a/qemu/tests/vdi_image_create.py b/qemu/tests/vdi_image_create.py index 4761475f37..0567e61554 100644 --- a/qemu/tests/vdi_image_create.py +++ b/qemu/tests/vdi_image_create.py @@ -1,5 +1,4 @@ from avocado import TestError - from virttest import data_dir from virttest.qemu_storage import QemuImg @@ -18,5 +17,4 @@ def run(test, params, env): try: test_image.create(test_image.params) except TestError as err: - test.fail( - "Create the vdi image failed with unexpected output: %s" % err) + test.fail(f"Create the vdi image failed with unexpected output: {err}") diff --git a/qemu/tests/vdpa_dpdk.py b/qemu/tests/vdpa_dpdk.py index d44031e2f9..dc9e3e649e 100644 --- a/qemu/tests/vdpa_dpdk.py +++ b/qemu/tests/vdpa_dpdk.py @@ -1,17 +1,14 @@ import logging import os -import six import time +import six from avocado.utils import process +from virttest import remote, utils_misc, utils_net, utils_sriov -from virttest import remote -from virttest import utils_net -from virttest import utils_misc -from virttest import utils_sriov from provider import dpdk_utils -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def format_result(result, base, fbase): @@ -55,8 +52,8 @@ def run(test, params, env): # get parameter from dictionary login_timeout = int(params.get("login_timeout", 360)) forward_mode = params.get("forward_mode") - device_type_guest = params.get("device_type_guest") - device_type_host = params.get("device_type_host") + params.get("device_type_guest") + params.get("device_type_host") dpdk_pkts = params.get("dpdk_pkts") dpdk_queues = params.get("dpdk_queues") dpdk_tool_path = params.get("dpdk_tool_path") @@ -74,19 +71,19 @@ def run(test, params, env): kvm_ver = process.system_output(kvm_ver_chk_cmd, shell=True).decode() host_ver = os.uname()[2] guest_ver = session.cmd_output(guest_ver_cmd) - result_file.write("### kvm-userspace-ver : %s\n" % kvm_ver) - result_file.write("### kvm_version : %s\n" % host_ver) - result_file.write("### guest-kernel-ver :%s" % guest_ver) + result_file.write(f"### kvm-userspace-ver : {kvm_ver}\n") + result_file.write(f"### kvm_version : {host_ver}\n") + result_file.write(f"### guest-kernel-ver :{guest_ver}") dpdk_utils.install_dpdk(params, session) - dpdk_ver = session.cmd_output('rpm -qa |grep dpdk | head -n 1') - result_file.write("### guest-dpdk-ver :%s" % dpdk_ver) + dpdk_ver = session.cmd_output("rpm -qa |grep dpdk | head -n 1") + result_file.write(f"### guest-dpdk-ver :{dpdk_ver}") dpdk_utils.load_vfio_modules(session) # get record_list record_line = "" for record in record_list.split(): - record_line += "%s|" % format_result(record, base, fbase) + record_line += f"{format_result(record, base, fbase)}|" for nic_index, nic in enumerate(vm.virtnet): if nic.nettype == "vdpa": @@ -95,46 +92,63 @@ def run(test, params, env): pci_id = utils_sriov.get_pci_from_iface(ethname, session).strip() dpdk_utils.bind_pci_device_to_vfio(session, pci_id) # pylint: disable=E0606 - guest = {"host": vm.get_address(), - "username": params.get("username"), - "password": params.get("password"), - "cpu": session.cmd_output("nproc").strip(), - "pci": pci_id} + guest = { + "host": vm.get_address(), + "username": params.get("username"), + "password": params.get("password"), + "cpu": session.cmd_output("nproc").strip(), + "pci": pci_id, + } host = None if "rxonly" in forward_mode.split(): dsthost = params.get("dsthost") params_host = params.object_params("dsthost") - dst_ses = remote.wait_for_login(params_host.get("shell_client"), - dsthost, - params_host.get("shell_port"), - params_host.get("username"), - params_host.get("password"), - params_host.get("shell_prompt"), - timeout=login_timeout) - host = {"host": dsthost, - "username": params_host.get("unsername"), - "password": params_host.get("password"), - "cpu": dst_ses.cmd_output(("nproc").strip()), - "pci": params_host.get("dsthost_pci")} + dst_ses = remote.wait_for_login( + params_host.get("shell_client"), + dsthost, + params_host.get("shell_port"), + params_host.get("username"), + params_host.get("password"), + params_host.get("shell_prompt"), + timeout=login_timeout, + ) + host = { + "host": dsthost, + "username": params_host.get("unsername"), + "password": params_host.get("password"), + "cpu": dst_ses.cmd_output(("nproc").strip()), + "pci": params_host.get("dsthost_pci"), + } dpdk_utils.install_dpdk(params, dst_ses) for forward in forward_mode.split(): - result_file.write("Category:%s\n" % forward) - result_file.write("%s\n" % record_line.rstrip("|")) + result_file.write(f"Category:{forward}\n") + result_file.write("{}\n".format(record_line.rstrip("|"))) for pkts in dpdk_pkts.split(): for queue in dpdk_queues.split(): LOG_JOB.info( - 'Processing dpdk test with forward mode: %s, pkts: %s, queue: %s', - forward, pkts, queue) - pps = run_test(forward, guest, host if forward == "rxonly" else None, dpdk_tool_path, queue, pkts, mac if forward == "rxonly" else None) # pylint: disable=E0606 + "Processing dpdk test with forward mode: %s, pkts: %s, queue: %s", + forward, + pkts, + queue, + ) + pps = run_test( + forward, + guest, + host if forward == "rxonly" else None, + dpdk_tool_path, + queue, + pkts, + mac if forward == "rxonly" else None, + ) # pylint: disable=E0606 time.sleep(2) mpps = "%.2f" % (float(pps) / (10**6)) - line = "%s|" % format_result(pkts, base, fbase) - line += "%s|" % format_result(queue, base, fbase) - line += "%s|" % format_result(pps, base, fbase) - line += "%s|" % format_result(mpps, base, fbase) - result_file.write(("%s\n" % line)) + line = f"{format_result(pkts, base, fbase)}|" + line += f"{format_result(queue, base, fbase)}|" + line += f"{format_result(pps, base, fbase)}|" + line += f"{format_result(mpps, base, fbase)}|" + result_file.write(f"{line}\n") result_file.close() session.close() @@ -155,20 +169,30 @@ def run_test(forward_mode, guest, host, dpdk_tool_path, queue, pkts, mac=None): """ if forward_mode == "txonly": - testpmd_guest = dpdk_utils.TestPMD(guest["host"], guest["username"], guest["password"]) + testpmd_guest = dpdk_utils.TestPMD( + guest["host"], guest["username"], guest["password"] + ) elif forward_mode == "rxonly": - testpmd_host = dpdk_utils.TestPMD(host["host"], host["username"], host["password"]) + testpmd_host = dpdk_utils.TestPMD( + host["host"], host["username"], host["password"] + ) testpmd_host.login() - testpmd_host.launch_testpmd(dpdk_tool_path, host["cpu"], host["pci"], "txonly", 16, pkts, mac=mac) + testpmd_host.launch_testpmd( + dpdk_tool_path, host["cpu"], host["pci"], "txonly", 16, pkts, mac=mac + ) testpmd_host.show_port_stats_all() testpmd_host.show_port_stats_all() time.sleep(2) - testpmd_guest = dpdk_utils.TestPMD(guest["host"], guest["username"], guest["password"]) + testpmd_guest = dpdk_utils.TestPMD( + guest["host"], guest["username"], guest["password"] + ) testpmd_guest.login() - testpmd_guest.launch_testpmd(dpdk_tool_path, guest["cpu"], guest["pci"], forward_mode, queue, pkts) + testpmd_guest.launch_testpmd( + dpdk_tool_path, guest["cpu"], guest["pci"], forward_mode, queue, pkts + ) testpmd_guest.show_port_stats_all() output = testpmd_guest.show_port_stats_all() pps_value = testpmd_guest.extract_pps_value(output, forward_mode) diff --git a/qemu/tests/vdpa_pxe_boot.py b/qemu/tests/vdpa_pxe_boot.py index 3d1a947500..adf39899f0 100644 --- a/qemu/tests/vdpa_pxe_boot.py +++ b/qemu/tests/vdpa_pxe_boot.py @@ -19,7 +19,7 @@ def run(test, params, env): error_context.context("Try to boot from vdpa NIC", test.log.info) vm = env.get_vm(params["main_vm"]) timeout = params.get_numeric("pxe_timeout") - test.log.info("Waiting %ss" % timeout) + test.log.info("Waiting %ss", timeout) time.sleep(timeout) vm.verify_status("running") match_str = params["match_string"] diff --git a/qemu/tests/vdpa_sim_blk_test.py b/qemu/tests/vdpa_sim_blk_test.py index 268494a684..3734e22d48 100644 --- a/qemu/tests/vdpa_sim_blk_test.py +++ b/qemu/tests/vdpa_sim_blk_test.py @@ -1,13 +1,13 @@ """VDPA simulator blk test""" from avocado.utils import process - -from provider.block_devices_plug import BlockDevicesPlug -from provider.vdpa_sim_utils import VirtioVdpaBlkSimulatorTest from virttest import env_process, utils_disk, utils_misc from virttest.utils_misc import get_linux_drive_path from virttest.utils_windows.drive import get_disk_props_by_serial_number +from provider.block_devices_plug import BlockDevicesPlug +from provider.vdpa_sim_utils import VirtioVdpaBlkSimulatorTest + def run(test, params, env): """ @@ -42,8 +42,8 @@ def run(test, params, env): def _setup_vdpa_disks(): for img in vdpa_blk_images: dev = vdpa_blk_test.add_dev(img) - vdpa_blk_info[img] = "/dev/%s" % dev - params["image_name_%s" % img] = vdpa_blk_info[img] + vdpa_blk_info[img] = f"/dev/{dev}" + params[f"image_name_{img}"] = vdpa_blk_info[img] cmd = host_cmd.format(dev) process.run(cmd, shell=True) @@ -55,20 +55,20 @@ def _get_window_disk_index_by_serial(serial): idx_info = get_disk_props_by_serial_number(session, serial, ["Index"]) if idx_info: return idx_info["Index"] - test.fail("Not find expected disk %s" % serial) + test.fail(f"Not find expected disk {serial}") def _check_disk_in_guest(img): os_type = params["os_type"] - logger.debug("Check disk %s in guest" % img) - if os_type == 'windows': - img_size = params.get("image_size_%s" % img) + logger.debug("Check disk %s in guest", img) + if os_type == "windows": + img_size = params.get(f"image_size_{img}") cmd = utils_misc.set_winutils_letter(session, guest_cmd) disk = _get_window_disk_index_by_serial(img) utils_disk.update_windows_disk_attributes(session, disk) logger.info("Formatting disk:%s", disk) - driver = \ - utils_disk.configure_empty_disk(session, disk, img_size, - os_type)[0] + driver = utils_disk.configure_empty_disk(session, disk, img_size, os_type)[ + 0 + ] output_path = driver + ":\\test.dat" cmd = cmd.format(output_path) else: @@ -91,7 +91,7 @@ def hotplug_unplug_test(): def discard_test(): for img in vdpa_blk_images: - cmd = "blkdiscard -f %s && echo 'it works!' " % vdpa_blk_info[img] + cmd = f"blkdiscard -f {vdpa_blk_info[img]} && echo 'it works!' " process.run(cmd, shell=True) logger = test.log @@ -116,12 +116,12 @@ def discard_test(): locals_var = locals() if host_operation: - logger.debug("Execute operation %s" % host_operation) + logger.debug("Execute operation %s", host_operation) locals_var[host_operation]() if test_vm == "yes": logger.debug("Ready boot VM...") - params["start_vm"] = 'yes' + params["start_vm"] = "yes" login_timeout = params.get_numeric("login_timeout", 360) env_process.preprocess_vm(test, params, env, params.get("main_vm")) vm = env.get_vm(params["main_vm"]) @@ -129,7 +129,7 @@ def discard_test(): session = vm.wait_for_login(timeout=login_timeout) if guest_operation: - logger.debug("Execute guest operation %s" % guest_operation) + logger.debug("Execute guest operation %s", guest_operation) locals_var[guest_operation]() if test_vm == "yes": diff --git a/qemu/tests/verify_panic_status_with_pvpanic.py b/qemu/tests/verify_panic_status_with_pvpanic.py index e25e6a5b18..6529a36797 100644 --- a/qemu/tests/verify_panic_status_with_pvpanic.py +++ b/qemu/tests/verify_panic_status_with_pvpanic.py @@ -1,8 +1,7 @@ -import aexpect import os -from virttest import data_dir -from virttest import utils_package +import aexpect +from virttest import data_dir, utils_package from virttest.remote import scp_to_remote @@ -30,13 +29,13 @@ def run(test, params, env): # trigger kernel panic config trigger_kernel_panic = params.get("trigger_kernel_panic") - username = params.get('username') - password = params.get('password') - port = params.get('file_transfer_port') - guest_path = params.get('guest_path') - depends_pkgs = params.get('depends_pkgs') - cmd_make = params.get('cmd_make') - io_timeout = params.get_numeric('io_timeout') + username = params.get("username") + password = params.get("password") + port = params.get("file_transfer_port") + guest_path = params.get("guest_path") + depends_pkgs = params.get("depends_pkgs") + cmd_make = params.get("cmd_make") + io_timeout = params.get_numeric("io_timeout") vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() @@ -48,7 +47,7 @@ def run(test, params, env): test.fail("Not find pvpanic device in guest") if trigger_kernel_panic: - host_path = os.path.join(data_dir.get_deps_dir(), 'trigger_panic_drive') + host_path = os.path.join(data_dir.get_deps_dir(), "trigger_panic_drive") scp_to_remote(guest_addr, port, username, password, host_path, guest_path) if not utils_package.package_install(depends_pkgs, session): test.cancel("Please install %s inside guest to proceed", depends_pkgs) diff --git a/qemu/tests/vfio_net_lifecycle.py b/qemu/tests/vfio_net_lifecycle.py index 7915e3fdee..7e1d01231a 100644 --- a/qemu/tests/vfio_net_lifecycle.py +++ b/qemu/tests/vfio_net_lifecycle.py @@ -1,7 +1,6 @@ import json -from virttest import error_context -from virttest import utils_net +from virttest import error_context, utils_net from provider.hostdev import utils as hostdev_utils from provider.hostdev.dev_setup import hostdev_setup diff --git a/qemu/tests/vhost_with_cgroup.py b/qemu/tests/vhost_with_cgroup.py index 793b57a996..35b3bc4f2b 100644 --- a/qemu/tests/vhost_with_cgroup.py +++ b/qemu/tests/vhost_with_cgroup.py @@ -1,7 +1,7 @@ from avocado.utils import process +from virttest import error_context from virttest.env_process import preprocess from virttest.staging.utils_cgroup import Cgroup, CgroupModules -from virttest import error_context @error_context.context_aware @@ -16,6 +16,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def assign_vm_into_cgroup(vm, cgroup, pwd=None): """ Assigns all threads of VM into cgroup @@ -27,20 +28,20 @@ def assign_vm_into_cgroup(vm, cgroup, pwd=None): for pid in process.get_children_pids(vm.get_shell_pid()): try: cgroup.set_cgroup(int(pid), pwd) - except Exception: # Process might not already exist + except Exception: # Process might not already exist test.fail("Failed to move all VM threads to cgroup") - error_context.context("Test Setup: Cgroup initialize in host", - test.log.info) + error_context.context("Test Setup: Cgroup initialize in host", test.log.info) modules = CgroupModules() - if (modules.init(['cpu']) != 1): + if modules.init(["cpu"]) != 1: test.fail("Can't mount cpu cgroup modules") - cgroup = Cgroup('cpu', '') + cgroup = Cgroup("cpu", "") cgroup.initialize(modules) - error_context.context("Boot guest and attach vhost to cgroup your" - " setting(cpu)", test.log.info) + error_context.context( + "Boot guest and attach vhost to cgroup your" " setting(cpu)", test.log.info + ) params["start_vm"] = "yes" preprocess(test, params, env) vm = env.get_vm(params["main_vm"]) @@ -58,11 +59,14 @@ def assign_vm_into_cgroup(vm, cgroup, pwd=None): for vhost_pid in vhost_pids.strip().split(): cgroup.set_cgroup(int(vhost_pid)) - error_context.context("Check whether vhost attached to" - " cgroup successfully", test.log.info) + error_context.context( + "Check whether vhost attached to" " cgroup successfully", test.log.info + ) cgroup_tasks = " ".join(cgroup.get_property("tasks")) for vhost_pid in vhost_pids.strip().split(): if vhost_pid not in cgroup_tasks: - test.error("vhost process attach to cgroup FAILED!" - " Tasks in cgroup is:%s" % cgroup_tasks) + test.error( + "vhost process attach to cgroup FAILED!" + f" Tasks in cgroup is:{cgroup_tasks}" + ) test.log.info("Vhost process attach to cgroup successfully") diff --git a/qemu/tests/vioinput_hotplug.py b/qemu/tests/vioinput_hotplug.py index 9aceded0e2..d6fa53475c 100644 --- a/qemu/tests/vioinput_hotplug.py +++ b/qemu/tests/vioinput_hotplug.py @@ -1,6 +1,7 @@ import time from virttest import error_context + from provider import input_tests @@ -19,17 +20,17 @@ def run(test, params, env): """ def hotplug_input_dev(vm, dev): - error_context.context("Hotplug %s" % dev, test.log.info) + error_context.context(f"Hotplug {dev}", test.log.info) out, ver_out = vm.devices.simple_hotplug(dev, vm.monitor) if not ver_out: test.fail("No % device in qtree after hotplug" % dev) test.log.info("%s is hotpluged successfully", dev) def unplug_input_dev(vm, dev): - error_context.context("Unplug %s" % dev, test.log.info) + error_context.context(f"Unplug {dev}", test.log.info) out, ver_out = vm.devices.simple_unplug(dev, vm.monitor) if not ver_out: - test.fail("Still get %s in qtree after unplug" % dev) + test.fail(f"Still get {dev} in qtree after unplug") test.log.info("%s is unpluged successfully", dev) def run_subtest(sub_test): @@ -37,7 +38,7 @@ def run_subtest(sub_test): Run subtest(e.g. rng_bat,reboot,shutdown) when it's not None :param sub_test: subtest name """ - error_context.context("Run %s subtest" % sub_test, test.log.info) + error_context.context(f"Run {sub_test} subtest", test.log.info) wait_time = float(params.get("wait_time", 0.2)) if sub_test == "keyboard_test": input_tests.keyboard_test(test, params, vm, wait_time) @@ -51,8 +52,7 @@ def run_subtest(sub_test): sub_test = params["sub_test"] # Hotplug an input device - new_dev = vm.devices.input_define_by_params( - params, params["input_name"])[0] + new_dev = vm.devices.input_define_by_params(params, params["input_name"])[0] hotplug_input_dev(vm, new_dev) # For virtio-mouse/tablet device, after new device added, # the default working device will change from ps/2 mice to new added mice, diff --git a/qemu/tests/vioinput_keyboard.py b/qemu/tests/vioinput_keyboard.py index d6f0aece30..3d38985a9d 100644 --- a/qemu/tests/vioinput_keyboard.py +++ b/qemu/tests/vioinput_keyboard.py @@ -1,5 +1,5 @@ -from virttest import error_context -from virttest import utils_test +from virttest import error_context, utils_test + from provider.vioinput_basic import key_tap_test @@ -26,8 +26,9 @@ def run(test, params, env): error_context.context("Check vioinput driver is running", test.log.info) utils_test.qemu.windrv_verify_running(session, test, driver.split()[0]) - error_context.context("Enable all vioinput related driver verified", - test.log.info) + error_context.context( + "Enable all vioinput related driver verified", test.log.info + ) session = utils_test.qemu.setup_win_driver_verifier(session, driver, vm) session.close() diff --git a/qemu/tests/vioinput_mice.py b/qemu/tests/vioinput_mice.py index 6b5d08d350..9257795666 100644 --- a/qemu/tests/vioinput_mice.py +++ b/qemu/tests/vioinput_mice.py @@ -1,11 +1,9 @@ -from __future__ import division import time - from collections import Counter -from virttest import error_context -from virttest import utils_test + +from virttest import error_context, graphical_console, utils_test + from provider import input_event_proxy -from virttest import graphical_console def query_mice_status(vm, mice_name): @@ -14,7 +12,7 @@ def query_mice_status(vm, mice_name): """ events = vm.monitor.query_mice() for event in events: - if event['name'] == mice_name: + if event["name"] == mice_name: return event @@ -30,14 +28,16 @@ def mouse_btn_test(test, params, console, listener, wait_time): :param listener: listening the mouse button event in guest. :param wait_time: wait event received in listener event queue. """ - mouse_btn_map = {'left': 'BTN_LEFT', - 'right': 'BTN_RIGHT', - 'middle': 'BTN_MIDDLE', - 'side': 'BTN_SIDE', - 'extra': 'BTN_EXTRA'} + mouse_btn_map = { + "left": "BTN_LEFT", + "right": "BTN_RIGHT", + "middle": "BTN_MIDDLE", + "side": "BTN_SIDE", + "extra": "BTN_EXTRA", + } btns = params.objects("btns") for btn in btns: - error_context.context("Click mouse %s button" % btn, test.log.info) + error_context.context(f"Click mouse {btn} button", test.log.info) console.btn_click(btn) keycode = mouse_btn_map[btn] @@ -46,8 +46,7 @@ def mouse_btn_test(test, params, console, listener, wait_time): events_queue = listener.events btn_event = list() - error_context.context("Check correct button event is received", - test.log.info) + error_context.context("Check correct button event is received", test.log.info) while not events_queue.empty(): events = events_queue.get() # some windows os will return pointer move event first @@ -57,9 +56,10 @@ def mouse_btn_test(test, params, console, listener, wait_time): btn_event.append((events["keyCode"], events["type"])) if btn_event != exp_events: - test.fail("Received btn events don't match expected events.\n" - "Received btn events as: %s\n Expected events as: %s" - % (btn_event, exp_events)) + test.fail( + "Received btn events don't match expected events.\n" + f"Received btn events as: {btn_event}\n Expected events as: {exp_events}" + ) @error_context.context_aware @@ -76,9 +76,9 @@ def mouse_scroll_test(test, params, console, listener, wait_time, count=1): """ scrolls = params.objects("scrolls") - exp_events = {'wheel-up': ("WHEELFORWARD", 0), 'wheel-down': ('WHEELBACKWARD', 0)} + exp_events = {"wheel-up": ("WHEELFORWARD", 0), "wheel-down": ("WHEELBACKWARD", 0)} for scroll in scrolls: - error_context.context("Scroll mouse %s" % scroll, test.log.info) + error_context.context(f"Scroll mouse {scroll}", test.log.info) if "up" in scroll: console.scroll_forward(count) else: @@ -100,17 +100,19 @@ def mouse_scroll_test(test, params, console, listener, wait_time, count=1): counter = Counter(samples) num = counter.pop(exp_event, 0) if num != count: - test.fail("Received scroll number %s don't match expected" - "scroll count %s" % (num, count)) + test.fail( + f"Received scroll number {num} don't match expected" + f"scroll count {count}" + ) if counter: - test.fail("Received scroll events don't match expected events" - "Received scroll events as: %s\n Expected events as: %s" - % (counter, exp_event)) + test.fail( + "Received scroll events don't match expected events" + f"Received scroll events as: {counter}\n Expected events as: {exp_event}" + ) @error_context.context_aware -def mouse_move_test(test, params, console, listener, - wait_time, end_pos, absolute): +def mouse_move_test(test, params, console, listener, wait_time, end_pos, absolute): """ Mouse move test. @@ -140,7 +142,7 @@ def mouse_move_test(test, params, console, listener, else: vertical = 1 - error_context.context("Moving pointer from %s to %s" % (start_pos, end_pos)) + error_context.context(f"Moving pointer from {start_pos} to {end_pos}") console.pointer_move(end_pos, motion=line, absolute=absolute) time.sleep(wait_time) @@ -155,39 +157,53 @@ def mouse_move_test(test, params, console, listener, xn_guest, yn_guest = event_lst[-1] tolerance = int(params.get("tolerance", 5)) - error_context.context("Compare if pointer move to destination pos (%s, %s)" - "the missed value should in tolerance scope." % end_pos) + error_context.context( + "Compare if pointer move to destination pos ({}, {})" + "the missed value should in tolerance scope.".format(*end_pos) + ) if (abs(xn - xn_guest) > tolerance) or (abs(yn - yn_guest) > tolerance): - test.fail("pointer did not move to destination position." - "it move to pos (%s, %s) in guest, but exepected pos is" - "(%s, %s)" % (xn_guest, yn_guest, xn, yn)) - - error_context.context("Compare if pointer move trace nearby destination line," - "the missed value should in tolerance scope.") + test.fail( + "pointer did not move to destination position." + f"it move to pos ({xn_guest}, {yn_guest}) in guest, but exepected pos is" + f"({xn}, {yn})" + ) + + error_context.context( + "Compare if pointer move trace nearby destination line," + "the missed value should in tolerance scope." + ) for i, (x, y) in enumerate(event_lst): if not vertical: - if abs((k * x + b) - y) > tolerance: # pylint: disable=E0606 - test.fail("Received pointer pos beyond line's tolerance scope " - "when move from {0} to {1}. Received pos is ({2}, {3})," - "it didn't nearby the expected line " - "y={4}x+{5}.".format(start_pos, end_pos, x, y, k, b)) + if abs((k * x + b) - y) > tolerance: # pylint: disable=E0606 + test.fail( + "Received pointer pos beyond line's tolerance scope " + f"when move from {start_pos} to {end_pos}. Received pos is ({x}, {y})," + "it didn't nearby the expected line " + f"y={k}x+{b}." + ) elif k == 0: # for horizontal direction line, only x value will change. if i > 0: - dx = [x2 - x1 for x1, x2 in zip(event_lst[i-1], event_lst[i])][0] - if (xn - x0 > 0 and dx <= 0): - test.fail("pointer move direction is wrong when " - "move from {0} to {1}.".format(start_pos, end_pos)) - elif (xn - x0 < 0 and dx >= 0): - test.fail("pointer move direction is wrong when " - "move from {0} to {1}.".format(start_pos, end_pos)) + dx = [x2 - x1 for x1, x2 in zip(event_lst[i - 1], event_lst[i])][0] + if xn - x0 > 0 and dx <= 0: + test.fail( + "pointer move direction is wrong when " + f"move from {start_pos} to {end_pos}." + ) + elif xn - x0 < 0 and dx >= 0: + test.fail( + "pointer move direction is wrong when " + f"move from {start_pos} to {end_pos}." + ) else: # for vertical direction line, only y value will change. if i > 0: - dy = [y2 - y1 for y1, y2 in zip(event_lst[i-1], event_lst[i])][1] + dy = [y2 - y1 for y1, y2 in zip(event_lst[i - 1], event_lst[i])][1] if (yn - y0 > 0 and dy <= 0) or (yn - y0 < 0 and dy >= 0): - test.fail("pointer move to incorrect direction when " - "move from {0} to {1}.".format(start_pos, end_pos)) + test.fail( + "pointer move to incorrect direction when " + f"move from {start_pos} to {end_pos}." + ) @error_context.context_aware @@ -218,16 +234,16 @@ def run(test, params, env): error_context.context("Check vioinput driver is running", test.log.info) utils_test.qemu.windrv_verify_running(session, test, driver.split()[0]) - error_context.context("Enable all vioinput related driver verified", - test.log.info) + error_context.context( + "Enable all vioinput related driver verified", test.log.info + ) session = utils_test.qemu.setup_win_driver_verifier(session, driver, vm) mice_name = params.get("mice_name", "QEMU PS/2 Mouse") mice_info = query_mice_status(vm, mice_name) - error_context.context("Check if %s device is working" % mice_name, - test.log.info) + error_context.context(f"Check if {mice_name} device is working", test.log.info) if not mice_info["current"]: - test.fail("%s does not worked currently" % mice_name) + test.fail(f"{mice_name} does not worked currently") listener = input_event_proxy.EventListener(vm) console = graphical_console.GraphicalConsole(vm) @@ -239,14 +255,13 @@ def run(test, params, env): if not params.get("target_pos", None): width, height = console.screen_size - x_max, y_max = width-1, height-1 + x_max, y_max = width - 1, height - 1 target_pos = [(1, 0), (x_max, 0), (1, y_max), (x_max, y_max)] else: # suggest set target_pos if want to test one target position. target_pos = [tuple([int(i) for i in params.objects("target_pos")])] for end_pos in target_pos: - mouse_move_test(test, params, console, listener, wait_time, - end_pos, absolute) + mouse_move_test(test, params, console, listener, wait_time, end_pos, absolute) listener.clear_events() listener.cleanup() diff --git a/qemu/tests/vioser_in_use.py b/qemu/tests/vioser_in_use.py index 43d22a7574..681dc6752d 100644 --- a/qemu/tests/vioser_in_use.py +++ b/qemu/tests/vioser_in_use.py @@ -1,20 +1,16 @@ -import re +import logging import os +import re import signal -import logging import time from avocado.utils import process -from virttest import utils_misc -from virttest import utils_test -from virttest import error_context -from virttest import qemu_migration -from provider import win_driver_utils +from virttest import error_context, qemu_migration, utils_misc, utils_test +from provider import win_driver_utils from qemu.tests import virtio_serial_file_transfer -from qemu.tests.timedrift_no_net import subw_guest_pause_resume # pylint: disable=W0611 -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") @error_context.context_aware @@ -56,11 +52,10 @@ def vcpu_hotplug_guest(test, params, vm, session): Vcpu hot plug test. """ - maxcpus = int(params["vcpu_maxcpus"]) + int(params["vcpu_maxcpus"]) vcpu_devices = params.objects("vcpu_devices") for vcpu_device in vcpu_devices: - error_context.context("hot-pluging vCPU %s" % vcpu_device, - test.log.info) + error_context.context(f"hot-pluging vCPU {vcpu_device}", test.log.info) vm.hotplug_vcpu_device(vcpu_id=vcpu_device) # make the cpu hotplug has slot during data transfer time.sleep(2) @@ -75,12 +70,12 @@ def kill_host_serial_pid(params, vm): simultaneously. """ port_path = virtio_serial_file_transfer.get_virtio_port_property( - vm, params["file_transfer_serial_port"])[1] + vm, params["file_transfer_serial_port"] + )[1] host_process = 'ps aux | grep "serial_host_send_receive.py"' - host_process = process.system_output(host_process, - shell=True).decode() - host_process = re.findall(r'(.*?)%s(.*?)\n' % port_path, host_process) + host_process = process.system_output(host_process, shell=True).decode() + host_process = re.findall(rf"(.*?){port_path}(.*?)\n", host_process) if host_process: host_process = str(host_process[0]).split()[1] LOG_JOB.info("Kill previous serial process on host") @@ -98,18 +93,17 @@ def run_bg_test(test, params, vm, sender="both"): # import driver_in_use in this function to avoid circular imports from qemu.tests import driver_in_use - error_context.context("Run serial transfer test in background", - test.log.info) + error_context.context("Run serial transfer test in background", test.log.info) stress_thread = utils_misc.InterruptedThread( - virtio_serial_file_transfer.transfer_data, (params, vm), - {"sender": sender}) + virtio_serial_file_transfer.transfer_data, (params, vm), {"sender": sender} + ) stress_thread.daemon = True stress_thread.start() - check_bg_timeout = float(params.get('check_bg_timeout', 120)) - if not utils_misc.wait_for(lambda: driver_in_use.check_bg_running(vm, - params), - check_bg_timeout, 0, 1): + check_bg_timeout = float(params.get("check_bg_timeout", 120)) + if not utils_misc.wait_for( + lambda: driver_in_use.check_bg_running(vm, params), check_bg_timeout, 0, 1 + ): test.fail("Backgroud test is not alive!") return stress_thread @@ -128,16 +122,16 @@ def run(test, params, env): """ driver = params["driver_name"] - sender = params['file_sender'] + sender = params["file_sender"] timeout = int(params.get("login_timeout", 360)) suppress_exception = params.get("suppress_exception", "no") == "yes" vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() if params["os_type"] == "windows": - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, driver, - timeout) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver, timeout + ) bg_thread = run_bg_test(test, params, vm, sender) globals().get(params["interrupt_test"])(test, params, vm, session) @@ -152,12 +146,13 @@ def run(test, params, env): return if bg_thread: - bg_thread.join(timeout=timeout, - suppress_exception=suppress_exception) + bg_thread.join(timeout=timeout, suppress_exception=suppress_exception) if vm.is_alive(): kill_host_serial_pid(params, vm) - if (virtio_serial_file_transfer.transfer_data( - params, vm, sender=sender) is not True): + if ( + virtio_serial_file_transfer.transfer_data(params, vm, sender=sender) + is not True + ): test.fail("Serial data transfter test failed.") if params.get("memory_leak_check", "no") == "yes": diff --git a/qemu/tests/virt_firmware_basic_test.py b/qemu/tests/virt_firmware_basic_test.py index a75b77a39f..a152f9e07b 100644 --- a/qemu/tests/virt_firmware_basic_test.py +++ b/qemu/tests/virt_firmware_basic_test.py @@ -1,11 +1,10 @@ import os import re -import six -import sys import shutil +import sys -from avocado.utils import git -from avocado.utils import process +import six +from avocado.utils import git, process from virttest import error_context @@ -40,27 +39,29 @@ def run(test, params, env): """ query_cmd = params["cmd_queried_test_package"] - status = process.getstatusoutput(query_cmd, - ignore_status=True, - shell=True)[0] + status = process.getstatusoutput(query_cmd, ignore_status=True, shell=True)[0] if status: - test.log.info("Package 'python3-virt-firmware-tests' " - "has not been installed. " - "Run the test with virt firmware repo.") + test.log.info( + "Package 'python3-virt-firmware-tests' " + "has not been installed. " + "Run the test with virt firmware repo." + ) virt_firmware_dirname = params["virt_firmware_repo_dst_dir"] virt_firmware_repo_addr = params["virt_firmware_repo_addr"] test_file_black_list = params["test_file_black_list"].split() if os.path.exists(virt_firmware_dirname): shutil.rmtree(virt_firmware_dirname, ignore_errors=True) try: - git.get_repo(uri=virt_firmware_repo_addr, - destination_dir=virt_firmware_dirname) + git.get_repo( + uri=virt_firmware_repo_addr, destination_dir=virt_firmware_dirname + ) except Exception as e: - test.error("Failed to clone the virt-firmware repo," - "the error message is '%s'." % six.text_type(e)) + test.error( + "Failed to clone the virt-firmware repo," + f"the error message is '{six.text_type(e)}'." + ) else: - test.log.info("Run the test with package " - "'python3-virt-firmware-tests'.") + test.log.info("Run the test with package " "'python3-virt-firmware-tests'.") virt_firmware_dirname = params["virt_firmware_test_package_dir"] test_file_black_list = [] test_file_pattern = params["test_file_pattern"] @@ -74,16 +75,18 @@ def run(test, params, env): test_cmd = sys.executable + " " + test_file + " 2>&1" else: test_cmd = params["shell_cmd"] % test_file - error_context.context("Test check with command '%s'." - % test_cmd, test.log.info) - status, output = process.getstatusoutput(test_cmd, - ignore_status=True, - shell=True) + error_context.context( + f"Test check with command '{test_cmd}'.", test.log.info + ) + status, output = process.getstatusoutput( + test_cmd, ignore_status=True, shell=True + ) if status: - test.fail("Failed to run '%s', the error message is '%s'" - % (test_cmd, output)) - error_context.context("The output of command '%s':\n%s" - % (test_cmd, output), test.log.info) + test.fail( + f"Failed to run '{test_cmd}', the error message is '{output}'" + ) + error_context.context( + f"The output of command '{test_cmd}':\n{output}", test.log.info + ) if "test_file" not in locals(): - test.error("Not found test file in '%s', please check it." - % test_dirname) + test.error(f"Not found test file in '{test_dirname}', please check it.") diff --git a/qemu/tests/virt_firmware_check_phys_bits.py b/qemu/tests/virt_firmware_check_phys_bits.py index 45ee806a59..79b2235839 100644 --- a/qemu/tests/virt_firmware_check_phys_bits.py +++ b/qemu/tests/virt_firmware_check_phys_bits.py @@ -1,9 +1,7 @@ import re + from avocado.utils import process -from virttest import virt_vm -from virttest import env_process -from virttest import error_context -from virttest import utils_package +from virttest import env_process, error_context, utils_package, virt_vm @error_context.context_aware @@ -42,15 +40,18 @@ def run(test, params, env): phys_bits_grep_cmd = params["phys_bits_grep_cmd"] host_phys_bits = process.getoutput(phys_bits_grep_cmd, shell=True).strip() if not host_phys_bits.isdigit(): - test.error("Failed to get host phys-bits, the actual output is '%s'" - % host_phys_bits) + test.error( + f"Failed to get host phys-bits, the actual output is '{host_phys_bits}'" + ) host_phys_bits_limit = params["host_phys_bits_limit"] params["cpu_model_flags"] %= host_phys_bits_limit err_msg = params.get("err_msg") ignored_err_msg = params.get("ignored_err_msg") try: - error_context.context("Start the vm with host-phys-bits-limit=%s." - % host_phys_bits_limit, test.log.info) + error_context.context( + f"Start the vm with host-phys-bits-limit={host_phys_bits_limit}.", + test.log.info, + ) vm_name = params["main_vm"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) @@ -58,22 +59,26 @@ def run(test, params, env): except (virt_vm.VMCreateError, virt_vm.VMStartError) as e: if err_msg: if err_msg not in str(e): - test.fail("Boot a vm with invalid phys-bits '%s', " - "the error message is not the expected value " - "'%s'. The actual output is '%s'." - % (host_phys_bits_limit, err_msg, str(e))) + test.fail( + f"Boot a vm with invalid phys-bits '{host_phys_bits_limit}', " + "the error message is not the expected value " + f"'{err_msg}'. The actual output is '{str(e)}'." + ) elif ignored_err_msg: if not re.search(ignored_err_msg, str(e), re.S | re.I): - test.fail("Boot a vm with phys-bits '%s', the ignored " - "error message is not the expected value " - "'%s'. The actual output is '%s'." - % (host_phys_bits_limit, ignored_err_msg, str(e))) + test.fail( + f"Boot a vm with phys-bits '{host_phys_bits_limit}', the ignored " + "error message is not the expected value " + f"'{ignored_err_msg}'. The actual output is '{str(e)}'." + ) else: - test.error("Failed to create a vm, the error is '%s'" % str(e)) + test.error(f"Failed to create a vm, the error is '{str(e)}'") else: if err_msg: - test.fail("Start the vm unexpectedly with " - "host-phys-bits-limit=%s." % host_phys_bits_limit) + test.fail( + "Start the vm unexpectedly with " + f"host-phys-bits-limit={host_phys_bits_limit}." + ) error_context.context("Check the phys-bits in guest.", test.log.info) session = vm.wait_for_login() guest_phys_bits = int(session.cmd_output(phys_bits_grep_cmd).strip()) @@ -87,15 +92,18 @@ def run(test, params, env): if output in params["enabled_status"]: sev_es_status = True if sev_status or sev_es_status: - install_status = utils_package.package_install('sevctl') + install_status = utils_package.package_install("sevctl") if not install_status: test.error("Failed to install sevctl.") encryption_bits_grep_cmd = params["encryption_bits_grep_cmd"] host_memory_encryption_bits = process.getoutput( - encryption_bits_grep_cmd, shell=True).strip() + encryption_bits_grep_cmd, shell=True + ).strip() if not host_memory_encryption_bits.isdigit(): - test.error("Failed to get host memory encryption bits, the " - "actual output is '%s'" % host_memory_encryption_bits) + test.error( + "Failed to get host memory encryption bits, the " + f"actual output is '{host_memory_encryption_bits}'" + ) host_phys_bits = int(host_phys_bits) + int(host_memory_encryption_bits) expected_phys_bits = min(int(host_phys_bits), int(host_phys_bits_limit)) session.close() @@ -104,16 +112,19 @@ def run(test, params, env): err_str %= (expected_phys_bits, guest_phys_bits) test.assertEqual(guest_phys_bits, expected_phys_bits, err_str) phys_bits_msg = params["phys_bits_msg"] % expected_phys_bits - logs = vm.logsessions['seabios'].get_output() - error_context.context("Check the phys-bits message in " - "virt firmware log.", test.log.info) + logs = vm.logsessions["seabios"].get_output() + error_context.context( + "Check the phys-bits message in " "virt firmware log.", test.log.info + ) if not re.search(phys_bits_msg, logs, re.S | re.I): - test.fail("Not found phys-bits message '%s' in " - "virt firmware log." % phys_bits_msg) + test.fail( + f"Not found phys-bits message '{phys_bits_msg}' in " + "virt firmware log." + ) limitation = params.get_numeric("limitation_from_ovmf") if limitation and expected_phys_bits > limitation: - error_context.context("Check the limitation message in virt " - "firmware log.", test.log.info) + error_context.context( + "Check the limitation message in virt " "firmware log.", test.log.info + ) if not re.search(params["limitation_msg"], logs, re.S | re.I): - test.fail("Not found the limitation " - "message in virt firmware log.") + test.fail("Not found the limitation " "message in virt firmware log.") diff --git a/qemu/tests/virt_subtest_combine.py b/qemu/tests/virt_subtest_combine.py index 5552763bcf..37149002e4 100644 --- a/qemu/tests/virt_subtest_combine.py +++ b/qemu/tests/virt_subtest_combine.py @@ -1,6 +1,5 @@ from avocado.utils import process -from virttest import utils_test -from virttest import error_context +from virttest import error_context, utils_test @error_context.context_aware @@ -16,31 +15,33 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def exe_cmd_in_guests(subtest_tag): timeout = int(params.get("login_timeout", 240)) vms = env.get_all_vms() for vm in vms: params_vm = params.object_params(vm.name) params_vm_subtest = params_vm.object_params(subtest_tag) - if params_vm_subtest.get('cmd'): - error_context.context("Try to log into guest '%s'." % vm.name, - test.log.info) + if params_vm_subtest.get("cmd"): + error_context.context( + f"Try to log into guest '{vm.name}'.", test.log.info + ) session = vm.wait_for_login(timeout=timeout) cmd_timeout = int(params_vm_subtest.get("cmd_timeout", 240)) - cmd = params_vm_subtest['cmd'] + cmd = params_vm_subtest["cmd"] session.cmd(cmd, timeout=cmd_timeout) def exe_cmd_in_host(subtest_tag): params_subtest = params.object_params(subtest_tag) cmd_timeout = int(params_subtest.get("cmd_timeout", 240)) - cmd = params_subtest['cmd'] + cmd = params_subtest["cmd"] process.system(cmd, timeout=cmd_timeout, shell=True) subtests = params["subtests"].split() for subtest in subtests: params_subtest = params.object_params(subtest) - error_context.context("Run test %s" % subtest, test.log.info) + error_context.context(f"Run test {subtest}", test.log.info) if params_subtest.get("subtest_type") == "guests": exe_cmd_in_guests(subtest) elif params_subtest.get("subtest_type") == "host": @@ -51,8 +52,7 @@ def exe_cmd_in_host(subtest_tag): if params_subtest.get("check_vm_status_after_test", "yes") == "yes": vms = env.get_all_vms() for vm in vms: - error_context.context("Check %s status" % vm.name, - test.log.info) + error_context.context(f"Check {vm.name} status", test.log.info) vm.verify_userspace_crash() vm.verify_kernel_crash() vm.verify_kvm_internal_error() diff --git a/qemu/tests/virtio_aer_opt.py b/qemu/tests/virtio_aer_opt.py index 9708ba29b2..9d7b2ca462 100644 --- a/qemu/tests/virtio_aer_opt.py +++ b/qemu/tests/virtio_aer_opt.py @@ -1,6 +1,7 @@ -from provider.block_devices_plug import BlockDevicesPlug from virttest import error_context +from provider.block_devices_plug import BlockDevicesPlug + @error_context.context_aware def run(test, params, env): @@ -28,27 +29,28 @@ def _get_device_by_devid(devices, dev_id): """ device_found = {} for dev in devices: - if dev['qdev_id'] == dev_id: + if dev["qdev_id"] == dev_id: device_found = dev break - elif dev['class_info'].get('desc') == 'PCI bridge': - pci_bridge_devices = dev['pci_bridge'].get('devices') + elif dev["class_info"].get("desc") == "PCI bridge": + pci_bridge_devices = dev["pci_bridge"].get("devices") if not pci_bridge_devices: continue - device_found = _get_device_by_devid(pci_bridge_devices, - dev_id) + device_found = _get_device_by_devid(pci_bridge_devices, dev_id) if device_found: break return device_found - dev_addr = '' - dev_addr_fmt = '%02d:%02d.%d' - pci_info = vm.monitor.info('pci', debug=False) - device = _get_device_by_devid(pci_info[0]['devices'], dev_id) + dev_addr = "" + dev_addr_fmt = "%02d:%02d.%d" + pci_info = vm.monitor.info("pci", debug=False) + device = _get_device_by_devid(pci_info[0]["devices"], dev_id) if device: - dev_addr = dev_addr_fmt % (device['bus'], - device['slot'], - device['function']) + dev_addr = dev_addr_fmt % ( + device["bus"], + device["slot"], + device["function"], + ) return dev_addr def check_dev_cap_in_guest(dev_id, capbilities): @@ -62,50 +64,53 @@ def check_dev_cap_in_guest(dev_id, capbilities): dev_addr = get_pci_addr_by_devid(dev_id) for cap in capbilities: - check_cmd = "lspci -vvv -s %s | grep '%s'" % (dev_addr, cap) + check_cmd = f"lspci -vvv -s {dev_addr} | grep '{cap}'" if session.cmd_status(check_cmd) != 0: - test.log.error("Failed to get capability '%s' for device %s", - cap, dev_id) + test.log.error( + "Failed to get capability '%s' for device %s", cap, dev_id + ) return False return True - if params.object_params('qmpmonitor1').get('monitor_type') == 'human': + if params.object_params("qmpmonitor1").get("monitor_type") == "human": test.cancel("Please run test with qmp monitor") vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() - capabilities = params['capabilities'].split(',') - images = params.objects('images') + capabilities = params["capabilities"].split(",") + images = params.objects("images") dev_ids = [] blk_image = images[1] blk_dev = vm.devices.get_by_qid(blk_image)[0] - blk_dev_id = blk_dev.params['id'] + blk_dev_id = blk_dev.params["id"] dev_ids.append(blk_dev_id) - scsi_dev = vm.devices.get_by_params({'driver': 'virtio-scsi-pci'})[0] - scsi_dev_id = scsi_dev.params['id'] + scsi_dev = vm.devices.get_by_params({"driver": "virtio-scsi-pci"})[0] + scsi_dev_id = scsi_dev.params["id"] dev_ids.append(scsi_dev_id) nic_id = vm.virtnet[0].device_id nic_dev = vm.devices.get_by_qid(nic_id)[0] - nic_dev_id = nic_dev.params['id'] + nic_dev_id = nic_dev.params["id"] dev_ids.append(nic_dev_id) try: for dev_id in dev_ids: if not check_dev_cap_in_guest(dev_id, capabilities): - test.fail('Check capabilities %s for device %s failed' - % (capabilities, dev_id)) + test.fail( + f"Check capabilities {capabilities} for device {dev_id} failed" + ) plug = BlockDevicesPlug(vm) for img in params.get("hotplug_images", "").split(): plug.unplug_devs_serial(img) plug.hotplug_devs_serial(img) blk_dev = vm.devices.get_by_qid(img)[0] - blk_dev_id = blk_dev.params['id'] + blk_dev_id = blk_dev.params["id"] if not check_dev_cap_in_guest(blk_dev_id, capabilities): - test.fail('Check capabilities %s for device %s failed' - % (capabilities, blk_dev_id)) + test.fail( + f"Check capabilities {capabilities} for device {blk_dev_id} failed" + ) finally: session.close() diff --git a/qemu/tests/virtio_blk_with_discard_write_zeroes.py b/qemu/tests/virtio_blk_with_discard_write_zeroes.py index 66606f60c0..c5dd122789 100644 --- a/qemu/tests/virtio_blk_with_discard_write_zeroes.py +++ b/qemu/tests/virtio_blk_with_discard_write_zeroes.py @@ -1,11 +1,7 @@ import ast import re -from virttest import env_process -from virttest import error_context -from virttest import qemu_qtree -from virttest import utils_misc -from virttest import virt_vm +from virttest import env_process, error_context, qemu_qtree, utils_misc, virt_vm @error_context.context_aware @@ -24,72 +20,77 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def check_attribute_in_qtree(dev_id, name, excepted_val): """Check if discard and write-zeroes attribute work.""" - error_context.context('Check if %s attribute works.' % name, test.log.info) + error_context.context(f"Check if {name} attribute works.", test.log.info) qtree = qemu_qtree.QtreeContainer() - qtree.parse_info_qtree(vm.monitor.info('qtree')) + qtree.parse_info_qtree(vm.monitor.info("qtree")) for node in qtree.get_nodes(): - if (isinstance(node, qemu_qtree.QtreeDev) and - node.qtree.get('id') == dev_id): + if isinstance(node, qemu_qtree.QtreeDev) and node.qtree.get("id") == dev_id: _node = node.children[0].children[0] - if _node.qtree.get('drive').endswith('_%s"' % dev_id): + if _node.qtree.get("drive").endswith(f'_{dev_id}"'): if _node.qtree.get(name) is None: - test.fail('The qtree device %s has no property %s.' % - (dev_id, name)) + test.fail(f"The qtree device {dev_id} has no property {name}.") elif _node.qtree.get(name) == excepted_val: - test.log.info('The "%s" matches with qtree device "%s"' - '(%s).', name, dev_id, excepted_val) + test.log.info( + 'The "%s" matches with qtree device "%s"' "(%s).", + name, + dev_id, + excepted_val, + ) break else: - test.fail('The "%s" mismatches with qtree device "%s"' - '(%s).' % (name, dev_id, excepted_val)) + test.fail( + f'The "{name}" mismatches with qtree device "{dev_id}"' + f"({excepted_val})." + ) else: - test.error('No such "%s" qtree device.' % dev_id) + test.error(f'No such "{dev_id}" qtree device.') def check_status_inside_guest(session, cmd, excepted_val): """Check if the discard or write-zeroes is enabled or disabled.""" if excepted_val not in session.cmd(cmd, 600): - test.fail('The output should be "%s"' % excepted_val) + test.fail(f'The output should be "{excepted_val}"') def get_data_disk_by_serial(session, image_tag): """Get the data disks by serial options.""" match = re.search( - r"serial=(\w+)", params["blk_extra_params_%s" % image_tag], re.M) + r"serial=(\w+)", params[f"blk_extra_params_{image_tag}"], re.M + ) drive_path = utils_misc.get_linux_drive_path(session, match.group(1)) if not drive_path: - test.error("Failed to get '%s' drive path" % image_tag) + test.error(f"Failed to get '{image_tag}' drive path") return drive_path def dd_test(session, target): """Do dd test on the data disk.""" - error_context.context('Do dd test on the data disk.', test.log.info) - session.cmd(params['cmd_dd'].format(target), 600) + error_context.context("Do dd test on the data disk.", test.log.info) + session.cmd(params["cmd_dd"].format(target), 600) data_tag = params["images"].split()[1] vm = env.get_vm(params["main_vm"]) - if params['start_vm'] == 'no': - params['start_vm'] = 'yes' + if params["start_vm"] == "no": + params["start_vm"] = "yes" try: env_process.preprocess_vm(test, params, env, params["main_vm"]) except virt_vm.VMCreateError as e: - error_msg = params.get('error_msg') + error_msg = params.get("error_msg") if error_msg not in str(e): - test.fail('No found "%s" from the output of qemu:%s.' % - (error_msg, str(e))) + test.fail(f'No found "{error_msg}" from the output of qemu:{str(e)}.') return vm.verify_alive() session = vm.wait_for_login() data_disk = get_data_disk_by_serial(session, data_tag) - if params.get('attributes_checked'): - for attr_name, val in ast.literal_eval(params['attributes_checked']).items(): + if params.get("attributes_checked"): + for attr_name, val in ast.literal_eval(params["attributes_checked"]).items(): check_attribute_in_qtree(data_tag, attr_name, val) - if params.get('status_checked'): - for cmd, val in ast.literal_eval(params['status_checked']).items(): + if params.get("status_checked"): + for cmd, val in ast.literal_eval(params["status_checked"]).items(): check_status_inside_guest(session, params[cmd].format(data_disk), val) dd_test(session, data_disk) diff --git a/qemu/tests/virtio_chardev_trace.py b/qemu/tests/virtio_chardev_trace.py index 3be99bafea..af9bce02fe 100644 --- a/qemu/tests/virtio_chardev_trace.py +++ b/qemu/tests/virtio_chardev_trace.py @@ -1,11 +1,9 @@ import os import time -import aexpect +import aexpect from avocado.utils import process -from virttest import error_context -from virttest import env_process -from virttest import data_dir +from virttest import data_dir, env_process, error_context @error_context.context_aware @@ -30,44 +28,46 @@ def run(test, params, env): def get_procs(): procs = [] for x in range(0, int(nums_cpu)): - pipefile = '/tmp/virtio-trace/trace-path-cpu{}.out'.format(x) - proc = aexpect.run_bg('cat %s' % pipefile) + pipefile = f"/tmp/virtio-trace/trace-path-cpu{x}.out" + proc = aexpect.run_bg(f"cat {pipefile}") procs.append(proc) return procs try: nums_cpu = int(params.get("smp", 1)) - serials = params.get("serials", '') + serials = params.get("serials", "") v_path = "/tmp/virtio-trace/" if not os.path.isdir(v_path): - process.run("mkdir {}".format(v_path)) + process.run(f"mkdir {v_path}") for t in ["in", "out"]: - process.run("mkfifo {}agent-ctl-path.{}".format(v_path, t)) + process.run(f"mkfifo {v_path}agent-ctl-path.{t}") for x in range(int(nums_cpu)): - process.run("mkfifo {}trace-path-cpu{}.{}".format(v_path, x, t)) + process.run(f"mkfifo {v_path}trace-path-cpu{x}.{t}") enable_cmd = "echo 1 > /tmp/virtio-trace/agent-ctl-path.in" disable_cmd = "echo 0 > /tmp/virtio-trace/agent-ctl-path.in" for x in range(int(nums_cpu)): - serials += ' vs{} '.format(x) - params['serial_type_vs{}'.format(x)] = 'virtserialport' - params['chardev_backend_vs{}'.format(x)] = 'pipe' - params['serial_name_vs{}'.format(x)] = "trace-path-cpu{}".format(x) - params['chardev_path_vs{}'.format(x)] = "{}trace-path-cpu{}".format(v_path, x) - params['serials'] = serials - params['start_vm'] = "yes" + serials += f" vs{x} " + params[f"serial_type_vs{x}"] = "virtserialport" + params[f"chardev_backend_vs{x}"] = "pipe" + params[f"serial_name_vs{x}"] = f"trace-path-cpu{x}" + params[f"chardev_path_vs{x}"] = f"{v_path}trace-path-cpu{x}" + params["serials"] = serials + params["start_vm"] = "yes" env_process.preprocess(test, params, env) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login() - status, output = session.cmd_status_output("echo 1 > /sys/kernel/debug/tracing/events/sched/enable") + status, output = session.cmd_status_output( + "echo 1 > /sys/kernel/debug/tracing/events/sched/enable" + ) if status != 0: - test.error("Enable ftrace in the guest failed as %s" % output) + test.error(f"Enable ftrace in the guest failed as {output}") # run trace agnet in vm - vm.copy_files_to(data_dir.get_deps_dir("virtio-trace"), '/home/') - session.cmd('cd /home/virtio-trace/ && make') - session.cmd('sudo /home/virtio-trace/trace-agent &') + vm.copy_files_to(data_dir.get_deps_dir("virtio-trace"), "/home/") + session.cmd("cd /home/virtio-trace/ && make") + session.cmd("sudo /home/virtio-trace/trace-agent &") # Host injects read start order to the guest via virtio-serial process.run(enable_cmd, shell=True) @@ -79,14 +79,14 @@ def get_procs(): time.sleep(10) for index, proc in enumerate(procs): if not proc.get_output(): - test.fail("cpu %s do not have output while it is enabled in host" % index) + test.fail(f"cpu {index} do not have output while it is enabled in host") proc.close() procs = get_procs() time.sleep(10) for index, proc in enumerate(procs): if proc.get_output(): - test.fail("cpu %s still have output after disabled in host" % index) + test.fail(f"cpu {index} still have output after disabled in host") proc.close() finally: - process.run("rm -rf {}".format(v_path)) + process.run(f"rm -rf {v_path}") diff --git a/qemu/tests/virtio_console.py b/qemu/tests/virtio_console.py index a923b5fa9b..1ab246186d 100644 --- a/qemu/tests/virtio_console.py +++ b/qemu/tests/virtio_console.py @@ -3,30 +3,32 @@ :copyright: 2010-2012 Red Hat Inc. """ -from collections import deque + import array import logging import os import random +import re import select import socket import threading import time -import re +from collections import deque from subprocess import Popen from avocado.utils import process -from virttest import error_context -from virttest import qemu_virtio_port -from virttest import env_process +from virttest import ( + env_process, + error_context, + funcatexit, + qemu_virtio_port, + utils_misc, +) +from virttest.qemu_devices import qcontainer, qdevices from virttest.utils_test.qemu import migration -from virttest import utils_misc -from virttest import funcatexit -from virttest.qemu_devices import qdevices from virttest.utils_virtio_port import VirtioPortTest -from virttest.qemu_devices import qcontainer -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") EXIT_EVENT = threading.Event() @@ -37,7 +39,7 @@ def __set_exit_event(): Sets global EXIT_EVENT :note: Used in cleanup by funcatexit in some tests """ - LOG_JOB.warn("Executing __set_exit_event()") + LOG_JOB.warning("Executing __set_exit_event()") EXIT_EVENT.set() @@ -50,26 +52,25 @@ def add_chardev(vm, params): :return list of added CharDevice object """ qemu_binary = utils_misc.get_qemu_binary(params) - qdevices = qcontainer.DevContainer(qemu_binary, vm.name, - params.get('strict_mode'), - params.get( - 'workaround_qemu_qmp_crash'), - params.get('allow_hotplugged_vm')) - char_devices = params['extra_chardevs'].split() - host = params.get('chardev_host', '127.0.0.1') - free_ports = utils_misc.find_free_ports( - 5000, 6000, len(char_devices), host) + qdevices = qcontainer.DevContainer( + qemu_binary, + vm.name, + params.get("strict_mode"), + params.get("workaround_qemu_qmp_crash"), + params.get("allow_hotplugged_vm"), + ) + char_devices = params["extra_chardevs"].split() + host = params.get("chardev_host", "127.0.0.1") + free_ports = utils_misc.find_free_ports(5000, 6000, len(char_devices), host) device_list = [] for index, chardev in enumerate(char_devices): chardev_param = params.object_params(chardev) file_name = vm.get_serial_console_filename(chardev) - backend = chardev_param.get('chardev_backend', - 'unix_socket') - if backend in ['udp', 'tcp_socket']: - chardev_param['chardev_host'] = host - chardev_param['chardev_port'] = str(free_ports[index]) - device = qdevices.chardev_define_by_params( - chardev, chardev_param, file_name) + backend = chardev_param.get("chardev_backend", "unix_socket") + if backend in ["udp", "tcp_socket"]: + chardev_param["chardev_host"] = host + chardev_param["chardev_port"] = str(free_ports[index]) + device = qdevices.chardev_define_by_params(chardev, chardev_param, file_name) device_list.append(device) return device_list @@ -83,19 +84,25 @@ def add_virtserial_device(vm, params, serial_id, chardev_id): :return list of added serial devices """ s_params = params.object_params(serial_id) - serial_type = s_params['serial_type'] - machine = params.get('machine_type') - if '-mmio' in machine: - controller_suffix = 'device' + serial_type = s_params["serial_type"] + machine = params.get("machine_type") + if "-mmio" in machine: + controller_suffix = "device" elif machine.startswith("s390"): - controller_suffix = 'ccw' + controller_suffix = "ccw" else: - controller_suffix = 'pci' - bus_type = 'virtio-serial-%s' % controller_suffix + controller_suffix = "pci" + bus_type = f"virtio-serial-{controller_suffix}" return vm.devices.serials_define_by_variables( - serial_id, serial_type, chardev_id, bus_type, - s_params.get('serial_name'), s_params.get('serial_bus'), - s_params.get('serial_nr'), s_params.get('serial_reg')) + serial_id, + serial_type, + chardev_id, + bus_type, + s_params.get("serial_name"), + s_params.get("serial_bus"), + s_params.get("serial_nr"), + s_params.get("serial_reg"), + ) def add_virtio_ports_to_vm(vm, params, serial_device): @@ -107,16 +114,17 @@ def add_virtio_ports_to_vm(vm, params, serial_device): :param serial_device: serial device object """ serial_id = serial_device.get_qid() - chardev_id = serial_device.get_param('chardev') + chardev_id = serial_device.get_param("chardev") chardev = vm.devices.get(chardev_id) - filename = chardev.get_param('path') + filename = chardev.get_param("path") chardev_params = params.object_params(chardev_id) - backend = chardev_params.get('chardev_backend', 'unix_socket') - if backend in ['udp', 'tcp_socket']: - filename = (chardev.get_param('host'), chardev.get_param('port')) - serial_name = serial_device.get_param('name') - vm.virtio_ports.append(qemu_virtio_port.VirtioSerial( - serial_id, serial_name, filename, backend)) + backend = chardev_params.get("chardev_backend", "unix_socket") + if backend in ["udp", "tcp_socket"]: + filename = (chardev.get_param("host"), chardev.get_param("port")) + serial_name = serial_device.get_param("name") + vm.virtio_ports.append( + qemu_virtio_port.VirtioSerial(serial_id, serial_name, filename, backend) + ) @error_context.context_aware @@ -141,7 +149,7 @@ def run(test, params, env): def get_virtio_serial_name(): if params.get("machine_type").startswith("arm64-mmio"): return "virtio-serial-device" - elif params.get('machine_type').startswith("s390"): + elif params.get("machine_type").startswith("s390"): return "virtio-serial-ccw" else: return "virtio-serial-pci" @@ -156,8 +164,9 @@ def test_open(): :param cfg: virtio_port_spread - how many devices per virt pci (0=all) """ (vm, guest_worker, port) = virtio_test.get_vm_with_single_port( - params.get('virtio_console_params')) - guest_worker.cmd("virt.open('%s')" % (port.name)) + params.get("virtio_console_params") + ) + guest_worker.cmd(f"virt.open('{port.name}')") port.open() virtio_test.cleanup(vm, guest_worker) @@ -170,19 +179,24 @@ def test_multi_open(): :param cfg: virtio_port_spread - how many devices per virt pci (0=all) """ (vm, guest_worker, port) = virtio_test.get_vm_with_single_port( - params.get('virtio_console_params')) - guest_worker.cmd("virt.close('%s')" % (port.name), 10) - guest_worker.cmd("virt.open('%s')" % (port.name), 10) - (match, data) = guest_worker._cmd("virt.open('%s')" % (port.name), 10) + params.get("virtio_console_params") + ) + guest_worker.cmd(f"virt.close('{port.name}')", 10) + guest_worker.cmd(f"virt.open('{port.name}')", 10) + (match, data) = guest_worker._cmd(f"virt.open('{port.name}')", 10) # Console on linux is permitted to open the device multiple times if port.is_console == "yes" and guest_worker.os_linux: if match != 0: # Multiple open didn't pass - test.fail("Unexpected fail of opening the console" - " device for the 2nd time.\n%s" % data) + test.fail( + "Unexpected fail of opening the console" + f" device for the 2nd time.\n{data}" + ) else: if match != 1: # Multiple open didn't fail: - test.fail("Unexpended pass of opening the" - " serialport device for the 2nd time.") + test.fail( + "Unexpended pass of opening the" + " serialport device for the 2nd time." + ) port.open() virtio_test.cleanup(vm, guest_worker) @@ -193,8 +207,9 @@ def test_close(): :param cfg: virtio_port_spread - how many devices per virt pci (0=all) """ (vm, guest_worker, port) = virtio_test.get_vm_with_single_port( - params.get('virtio_console_params')) - guest_worker.cmd("virt.close('%s')" % (port.name), 10) + params.get("virtio_console_params") + ) + guest_worker.cmd(f"virt.close('{port.name}')", 10) port.close() virtio_test.cleanup(vm, guest_worker) @@ -205,33 +220,31 @@ def test_polling(): :param cfg: virtio_port_spread - how many devices per virt pci (0=all) """ (vm, guest_worker, port) = virtio_test.get_vm_with_single_port( - params.get('virtio_console_params')) + params.get("virtio_console_params") + ) # Poll (OUT) port.open() - guest_worker.cmd("virt.poll('%s', %s)" % (port.name, select.POLLOUT), - 2) + guest_worker.cmd(f"virt.poll('{port.name}', {select.POLLOUT})", 2) # Poll (IN, OUT) port.sock.sendall(b"test") for test in [select.POLLIN, select.POLLOUT]: - guest_worker.cmd("virt.poll('%s', %s)" % (port.name, test), 10) + guest_worker.cmd(f"virt.poll('{port.name}', {test})", 10) # Poll (IN HUP) # I store the socket informations and close the socket port.close() for test in [select.POLLIN, select.POLLHUP]: - guest_worker.cmd("virt.poll('%s', %s)" % (port.name, test), 10) + guest_worker.cmd(f"virt.poll('{port.name}', {test})", 10) # Poll (HUP) - guest_worker.cmd("virt.recv('%s', 4, 1024, False)" % (port.name), 10) - guest_worker.cmd("virt.poll('%s', %s)" % (port.name, select.POLLHUP), - 2) + guest_worker.cmd(f"virt.recv('{port.name}', 4, 1024, False)", 10) + guest_worker.cmd(f"virt.poll('{port.name}', {select.POLLHUP})", 2) # Reconnect the socket port.open() # Redefine socket in consoles - guest_worker.cmd("virt.poll('%s', %s)" % (port.name, select.POLLOUT), - 2) + guest_worker.cmd(f"virt.poll('{port.name}', {select.POLLOUT})", 2) virtio_test.cleanup(vm, guest_worker) def test_sigio(): @@ -241,51 +254,64 @@ def test_sigio(): :param cfg: virtio_port_spread - how many devices per virt pci (0=all) """ (vm, guest_worker, port) = virtio_test.get_vm_with_single_port( - params.get('virtio_console_params')) + params.get("virtio_console_params") + ) if port.is_open(): port.close() - time.sleep(0.5) # wait for SIGHUP to be emitted + time.sleep(0.5) # wait for SIGHUP to be emitted # Enable sigio on specific port - guest_worker.cmd("virt.asynchronous('%s', True, 0)" % (port.name), 10) + guest_worker.cmd(f"virt.asynchronous('{port.name}', True, 0)", 10) # Test sigio when port open - guest_worker.cmd("virt.set_pool_want_return('%s', select.POLLOUT)" % - (port.name), 10) + guest_worker.cmd( + f"virt.set_pool_want_return('{port.name}', select.POLLOUT)", 10 + ) port.open() - match, data = guest_worker._cmd("virt.get_sigio_poll_return('%s')" % - (port.name), 10) + match, data = guest_worker._cmd( + f"virt.get_sigio_poll_return('{port.name}')", 10 + ) if match == 1: - test.fail("Problem with HUP on console port:\n%s" % data) + test.fail(f"Problem with HUP on console port:\n{data}") # Test sigio when port receive data - guest_worker.cmd("virt.set_pool_want_return('%s', select.POLLOUT |" - " select.POLLIN)" % (port.name), 10) + guest_worker.cmd( + f"virt.set_pool_want_return('{port.name}', select.POLLOUT |" + " select.POLLIN)", + 10, + ) port.sock.sendall(b"0123456789") - guest_worker.cmd("virt.get_sigio_poll_return('%s')" % (port.name), 10) + guest_worker.cmd(f"virt.get_sigio_poll_return('{port.name}')", 10) # Test sigio port close event - guest_worker.cmd("virt.set_pool_want_return('%s', select.POLLHUP |" - " select.POLLIN)" % (port.name), 10) + guest_worker.cmd( + f"virt.set_pool_want_return('{port.name}', select.POLLHUP |" + " select.POLLIN)", + 10, + ) port.close() - guest_worker.cmd("virt.get_sigio_poll_return('%s')" % (port.name), 10) + guest_worker.cmd(f"virt.get_sigio_poll_return('{port.name}')", 10) # Test sigio port open event and persistence of written data on port. - guest_worker.cmd("virt.set_pool_want_return('%s', select.POLLOUT |" - " select.POLLIN)" % (port.name), 10) + guest_worker.cmd( + f"virt.set_pool_want_return('{port.name}', select.POLLOUT |" + " select.POLLIN)", + 10, + ) port.open() - guest_worker.cmd("virt.get_sigio_poll_return('%s')" % (port.name), 10) + guest_worker.cmd(f"virt.get_sigio_poll_return('{port.name}')", 10) # Test event when erase data. - guest_worker.cmd("virt.clean_port('%s')" % (port.name), 10) + guest_worker.cmd(f"virt.clean_port('{port.name}')", 10) port.close() - guest_worker.cmd("virt.set_pool_want_return('%s', select.POLLOUT)" - % (port.name), 10) + guest_worker.cmd( + f"virt.set_pool_want_return('{port.name}', select.POLLOUT)", 10 + ) port.open() - guest_worker.cmd("virt.get_sigio_poll_return('%s')" % (port.name), 10) + guest_worker.cmd(f"virt.get_sigio_poll_return('{port.name}')", 10) # Disable sigio on specific port - guest_worker.cmd("virt.asynchronous('%s', False, 0)" % (port.name), 10) + guest_worker.cmd(f"virt.asynchronous('{port.name}', False, 0)", 10) virtio_test.cleanup(vm, guest_worker) def test_lseek(): @@ -297,8 +323,9 @@ def test_lseek(): """ # The virt.lseek returns PASS when the seek fails (vm, guest_worker, port) = virtio_test.get_vm_with_single_port( - params.get('virtio_console_params')) - guest_worker.cmd("virt.lseek('%s', 0, 0)" % (port.name), 10) + params.get("virtio_console_params") + ) + guest_worker.cmd(f"virt.lseek('{port.name}', 0, 0)", 10) virtio_test.cleanup(vm, guest_worker) def test_rw_host_offline(): @@ -308,20 +335,22 @@ def test_rw_host_offline(): :param cfg: virtio_port_spread - how many devices per virt pci (0=all) """ (vm, guest_worker, port) = virtio_test.get_vm_with_single_port( - params.get('virtio_console_params')) + params.get("virtio_console_params") + ) if port.is_open(): port.close() - guest_worker.cmd("virt.recv('%s', 0, 1024, False)" % port.name, 10) - match, tmp = guest_worker._cmd("virt.send('%s', 10, True)" % port.name, - 10) + guest_worker.cmd(f"virt.recv('{port.name}', 0, 1024, False)", 10) + match, tmp = guest_worker._cmd(f"virt.send('{port.name}', 10, True)", 10) if match is not None: - test.fail("Write on guest while host disconnected " - "didn't time out.\nOutput:\n%s" % tmp) + test.fail( + "Write on guest while host disconnected " + f"didn't time out.\nOutput:\n{tmp}" + ) port.open() - if (len(port.sock.recv(1024)) < 10): + if len(port.sock.recv(1024)) < 10: test.fail("Didn't received data from guest") # Now the cmd("virt.send('%s'... command should be finished guest_worker.cmd("print('PASS: nothing')", 10) @@ -334,31 +363,36 @@ def test_rw_host_offline_big_data(): :param cfg: virtio_port_spread - how many devices per virt pci (0=all) """ (vm, guest_worker, port) = virtio_test.get_vm_with_single_port( - params.get('virtio_console_params')) + params.get("virtio_console_params") + ) if port.is_open(): port.close() port.clean_port() port.close() - guest_worker.cmd("virt.clean_port('%s'),1024" % port.name, 10) - match, tmp = guest_worker._cmd("virt.send('%s', (1024**3)*3, True, " - "is_static=True)" % port.name, 30) + guest_worker.cmd(f"virt.clean_port('{port.name}'),1024", 10) + match, tmp = guest_worker._cmd( + f"virt.send('{port.name}', (1024**3)*3, True, " "is_static=True)", 30 + ) if match is not None: - test.fail("Write on guest while host disconnected " - "didn't time out.\nOutput:\n%s" % tmp) + test.fail( + "Write on guest while host disconnected " + f"didn't time out.\nOutput:\n{tmp}" + ) time.sleep(20) port.open() rlen = 0 - while rlen < (1024 ** 3 * 3): + while rlen < (1024**3 * 3): ret = select.select([port.sock], [], [], 10.0) - if (ret[0] != []): - rlen += len(port.sock.recv(((4096)))) - elif rlen != (1024 ** 3 * 3): - test.fail("Not all data was received," - "only %d from %d" % (rlen, 1024 ** 3 * 3)) + if ret[0] != []: + rlen += len(port.sock.recv(4096)) + elif rlen != (1024**3 * 3): + test.fail( + "Not all data was received," "only %d from %d" % (rlen, 1024**3 * 3) + ) guest_worker.cmd("print('PASS: nothing')", 10) virtio_test.cleanup(vm, guest_worker) @@ -370,17 +404,16 @@ def test_rw_blocking_mode(): """ # Blocking mode (vm, guest_worker, port) = virtio_test.get_vm_with_single_port( - params.get('virtio_console_params')) + params.get("virtio_console_params") + ) port.open() - guest_worker.cmd("virt.blocking('%s', True)" % port.name, 10) + guest_worker.cmd(f"virt.blocking('{port.name}', True)", 10) # Recv should timed out - match, tmp = guest_worker._cmd("virt.recv('%s', 10, 1024, False)" % - port.name, 10) + match, tmp = guest_worker._cmd(f"virt.recv('{port.name}', 10, 1024, False)", 10) if match == 0: - test.fail("Received data even when none was sent\n" - "Data:\n%s" % tmp) + test.fail("Received data even when none was sent\n" f"Data:\n{tmp}") elif match is not None: - test.fail("Unexpected fail\nMatch: %s\nData:\n%s" % (match, tmp)) + test.fail(f"Unexpected fail\nMatch: {match}\nData:\n{tmp}") port.sock.sendall(b"1234567890") # Now guest received the data end escaped from the recv() guest_worker.cmd("print('PASS: nothing')", 10) @@ -394,31 +427,27 @@ def test_rw_nonblocking_mode(): """ # Non-blocking mode (vm, guest_worker, port) = virtio_test.get_vm_with_single_port( - params.get('virtio_console_params')) + params.get("virtio_console_params") + ) port.open() - guest_worker.cmd("virt.blocking('%s', False)" % port.name, 10) + guest_worker.cmd(f"virt.blocking('{port.name}', False)", 10) # Recv should return FAIL with 0 received data - match, tmp = guest_worker._cmd("virt.recv('%s', 10, 1024, False)" % - port.name, 10) + match, tmp = guest_worker._cmd(f"virt.recv('{port.name}', 10, 1024, False)", 10) if match == 0: - test.fail("Received data even when none was sent\n" - "Data:\n%s" % tmp) + test.fail("Received data even when none was sent\n" f"Data:\n{tmp}") elif match is None: - test.fail("Timed out, probably in blocking mode\n" - "Data:\n%s" % tmp) + test.fail("Timed out, probably in blocking mode\n" f"Data:\n{tmp}") elif match != 1: - test.fail("Unexpected fail\nMatch: %s\nData:\n%s" % (match, tmp)) + test.fail(f"Unexpected fail\nMatch: {match}\nData:\n{tmp}") port.sock.sendall(b"1234567890") time.sleep(0.01) try: - guest_worker.cmd("virt.recv('%s', 10, 1024, False)" - % port.name, 10) + guest_worker.cmd(f"virt.recv('{port.name}', 10, 1024, False)", 10) except qemu_virtio_port.VirtioPortException as details: - if '[Errno 11] Resource temporarily unavailable' in str(details): + if "[Errno 11] Resource temporarily unavailable" in str(details): # Give the VM second chance time.sleep(0.01) - guest_worker.cmd("virt.recv('%s', 10, 1024, False)" - % port.name, 10) + guest_worker.cmd(f"virt.recv('{port.name}', 10, 1024, False)", 10) else: raise details virtio_test.cleanup(vm, guest_worker) @@ -429,7 +458,7 @@ def test_basic_loopback(): :param cfg: virtio_console_params - which type of virtio port to test :param cfg: virtio_port_spread - how many devices per virt pci (0=all) """ - if params.get('virtio_console_params') == 'serialport': + if params.get("virtio_console_params") == "serialport": vm, guest_worker = virtio_test.get_vm_with_worker(no_serialports=2) send_port, recv_port = virtio_test.get_virtio_ports(vm)[1][:2] else: @@ -442,8 +471,10 @@ def test_basic_loopback(): # Set nonblocking mode send_port.sock.setblocking(0) recv_port.sock.setblocking(0) - guest_worker.cmd("virt.loopback(['%s'], ['%s'], 1024, virt.LOOP_NONE)" - % (send_port.name, recv_port.name), 10) + guest_worker.cmd( + f"virt.loopback(['{send_port.name}'], ['{recv_port.name}'], 1024, virt.LOOP_NONE)", + 10, + ) send_port.sock.sendall(data) tmp = b"" i = 0 @@ -453,12 +484,12 @@ def test_basic_loopback(): if ret: try: tmp += recv_port.sock.recv(1024) - except IOError as failure_detail: - test.log.warn("Got err while recv: %s", failure_detail) + except OSError as failure_detail: + test.log.warning("Got err while recv: %s", failure_detail) if len(tmp) >= len(data): break if tmp != data: - test.fail("Incorrect data: '%s' != '%s'" % (data, tmp)) + test.fail(f"Incorrect data: '{data}' != '{tmp}'") guest_worker.safe_exit_loopback_threads([send_port], [recv_port]) virtio_test.cleanup(vm, guest_worker) @@ -483,50 +514,50 @@ def test_loopback(): :param cfg: virtio_port_spread - how many devices per virt pci (0=all) """ # PREPARE - test_params = params['virtio_console_params'] - test_time = int(params.get('virtio_console_test_time', 60)) + test_params = params["virtio_console_params"] + test_time = int(params.get("virtio_console_test_time", 60)) no_serialports = 0 no_consoles = 0 - for param in test_params.split(';'): - no_serialports = max(no_serialports, param.count('serialport')) - no_consoles = max(no_consoles, param.count('console')) + for param in test_params.split(";"): + no_serialports = max(no_serialports, param.count("serialport")) + no_consoles = max(no_consoles, param.count("console")) vm, guest_worker = virtio_test.get_vm_with_worker(no_consoles, no_serialports) no_errors = 0 (consoles, serialports) = virtio_test.get_virtio_ports(vm) - for param in test_params.split(';'): + for param in test_params.split(";"): if not param: continue - error_context.context("test_loopback: params %s" % param, test.log.info) + error_context.context(f"test_loopback: params {param}", test.log.info) # Prepare - param = param.split(':') + param = param.split(":") idx_serialport = 0 idx_console = 0 buf_len = [] - if (param[0].startswith('console')): + if param[0].startswith("console"): send_pt = consoles[idx_console] idx_console += 1 else: send_pt = serialports[idx_serialport] idx_serialport += 1 - if (len(param[0].split('@')) == 2): - buf_len.append(int(param[0].split('@')[1])) + if len(param[0].split("@")) == 2: + buf_len.append(int(param[0].split("@")[1])) else: buf_len.append(1024) recv_pts = [] for parm in param[1:]: - if (parm.isdigit()): + if parm.isdigit(): buf_len.append(int(parm)) - break # buf_len is the last portion of param - if (parm.startswith('console')): + break # buf_len is the last portion of param + if parm.startswith("console"): recv_pts.append(consoles[idx_console]) idx_console += 1 else: recv_pts.append(serialports[idx_serialport]) idx_serialport += 1 - if (len(parm[0].split('@')) == 2): - buf_len.append(int(parm[0].split('@')[1])) + if len(parm[0].split("@")) == 2: + buf_len.append(int(parm[0].split("@")[1])) else: buf_len.append(1024) # There must be sum(idx_*) consoles + last item as loopback buf_len @@ -547,25 +578,29 @@ def test_loopback(): queues.append(deque()) # Start loopback - tmp = "'%s'" % recv_pts[0].name + tmp = f"'{recv_pts[0].name}'" for recv_pt in recv_pts[1:]: - tmp += ", '%s'" % (recv_pt.name) - guest_worker.cmd("virt.loopback(['%s'], [%s], %d, virt.LOOP_POLL)" - % (send_pt.name, tmp, buf_len[-1]), 10) + tmp += f", '{recv_pt.name}'" + guest_worker.cmd( + "virt.loopback(['%s'], [%s], %d, virt.LOOP_POLL)" + % (send_pt.name, tmp, buf_len[-1]), + 10, + ) global EXIT_EVENT - funcatexit.register(env, params.get('type'), __set_exit_event) + funcatexit.register(env, params.get("type"), __set_exit_event) # TEST - thread = qemu_virtio_port.ThSendCheck(send_pt, EXIT_EVENT, queues, - buf_len[0]) + thread = qemu_virtio_port.ThSendCheck( + send_pt, EXIT_EVENT, queues, buf_len[0] + ) thread.start() threads.append(thread) for i in range(len(recv_pts)): - thread = qemu_virtio_port.ThRecvCheck(recv_pts[i], queues[i], - EXIT_EVENT, - buf_len[i + 1]) + thread = qemu_virtio_port.ThRecvCheck( + recv_pts[i], queues[i], EXIT_EVENT, buf_len[i + 1] + ) thread.start() threads.append(thread) @@ -579,50 +614,52 @@ def test_loopback(): _transfered = [] for i in range(no_threads): if not threads[i].is_alive(): - err += "main(th%s died), " % threads[i] + err += f"main(th{threads[i]} died), " _transfered.append(threads[i].idx) - if (_transfered == transferred and - transferred != [0] * no_threads): + if _transfered == transferred and transferred != [0] * no_threads: err += "main(no_data), " transferred = _transfered if err: - test.log.error("Error occurred while executing loopback " - "(%d out of %ds)", - test_time - int(end_time - time.time()), - test_time) + test.log.error( + "Error occurred while executing loopback " "(%d out of %ds)", + test_time - int(end_time - time.time()), + test_time, + ) break time.sleep(1) EXIT_EVENT.set() - funcatexit.unregister(env, params.get('type'), __set_exit_event) + funcatexit.unregister(env, params.get("type"), __set_exit_event) # TEST END workaround_unfinished_threads = False - test.log.debug('Joining %s', threads[0]) + test.log.debug("Joining %s", threads[0]) threads[0].join(5) if threads[0].is_alive(): - test.log.error('Send thread stuck, destroing the VM and ' - 'stopping loopback test to prevent autotest ' - 'freeze.') + test.log.error( + "Send thread stuck, destroing the VM and " + "stopping loopback test to prevent autotest " + "freeze." + ) vm.destroy() break if threads[0].ret_code: - err += "%s, " % threads[0] + err += f"{threads[0]}, " tmp = "%d data sent; " % threads[0].idx for thread in threads[1:]: - test.log.debug('Joining %s', thread) + test.log.debug("Joining %s", thread) thread.join(5) if thread.is_alive(): workaround_unfinished_threads = True test.log.debug("Unable to destroy the thread %s", thread) tmp += "%d, " % thread.idx if thread.ret_code: - err += "%s, " % thread - test.log.info("test_loopback: %s data received and verified", - tmp[:-2]) + err += f"{thread}, " + test.log.info("test_loopback: %s data received and verified", tmp[:-2]) if err: no_errors += 1 - test.log.error("test_loopback: error occurred in threads: %s.", - err[:-2]) + test.log.error( + "test_loopback: error occurred in threads: %s.", err[:-2] + ) guest_worker.safe_exit_loopback_threads([send_pt], recv_pts) @@ -635,13 +672,17 @@ def test_loopback(): test.log.debug("All threads finished at this point.") del threads[:] if not vm.is_alive(): - test.fail("VM died, can't continue the test loop. " - "Please check the log for details.") + test.fail( + "VM died, can't continue the test loop. " + "Please check the log for details." + ) virtio_test.cleanup(vm, guest_worker) if no_errors: - msg = ("test_loopback: %d errors occurred while executing test, " - "check log for details." % no_errors) + msg = ( + "test_loopback: %d errors occurred while executing test, " + "check log for details." % no_errors + ) test.log.error(msg) test.fail(msg) @@ -651,41 +692,49 @@ def test_interrupted_transfer(): This test creates loopback between 2 ports and interrupts transfer eg. by stopping the machine or by unplugging of the port. """ + def _replug_loop(): - """ Replug ports and pci in a loop """ + """Replug ports and pci in a loop""" + def _port_unplug(port_idx): dev = ports[port_idx] portdev = vm.devices.get_by_params({"name": dev.qemu_id})[0] if not portdev: - test.error("No port named %s" % dev.qemu_id) - port_property = dict(id=portdev.get_param("id"), - name=portdev.get_param("name"), - chardev=portdev.get_param("chardev"), - bus=portdev.get_param("bus"), - nr=portdev.get_param("nr")) + test.error(f"No port named {dev.qemu_id}") + port_property = dict( + id=portdev.get_param("id"), + name=portdev.get_param("name"), + chardev=portdev.get_param("chardev"), + bus=portdev.get_param("bus"), + nr=portdev.get_param("nr"), + ) (out, ver_out) = vm.devices.simple_unplug(portdev, vm.monitor) if not ver_out: - test.error("Error occured when unplug %s" % dev.name) + test.error(f"Error occured when unplug {dev.name}") time.sleep(intr_time) return port_property def _port_plug(device, property): portdev = qdevices.QDevice(device) - for key, value in {'id': property['id'], - 'chardev': property['chardev'], - 'name': property['name'], - 'bus': property['bus'], - 'nr': property['nr']}.items(): + for key, value in { + "id": property["id"], + "chardev": property["chardev"], + "name": property["name"], + "bus": property["bus"], + "nr": property["nr"], + }.items(): portdev.set_param(key, value) (out, ver_out) = vm.devices.simple_hotplug(portdev, vm.monitor) if not ver_out: - test.error("Error occured when plug port %s." % property['name']) + test.error( + "Error occured when plug port {}.".format(property["name"]) + ) time.sleep(intr_time) def _pci_unplug(bus): - device = vm.devices.get_by_params({"id": str(bus).split('.')[0]})[0] + device = vm.devices.get_by_params({"id": str(bus).split(".")[0]})[0] if not device: - test.error("No bus %s in vm." % bus) + test.error(f"No bus {bus} in vm.") bus_property = dict(id=device.get_param("id")) (out, ver_out) = vm.devices.simple_unplug(device, vm.monitor) if not ver_out: @@ -695,7 +744,7 @@ def _pci_unplug(bus): def _pci_plug(property): bus = qdevices.QDevice("virtio-serial-pci") - bus.set_param('id', property['id']) + bus.set_param("id", property["id"]) (out, ver_out) = vm.devices.simple_hotplug(bus, vm.monitor) if not ver_out: test.error("Error occured when plug bus. out: %s", out) @@ -703,100 +752,103 @@ def _pci_plug(property): send_prop = _port_unplug(0) recv_prop = _port_unplug(1) - bus_prop = _pci_unplug(send_prop['bus']) + bus_prop = _pci_unplug(send_prop["bus"]) # replug all devices _pci_plug(bus_prop) - _port_plug('virtserialport', send_prop) - _port_plug('virtserialport', recv_prop) + _port_plug("virtserialport", send_prop) + _port_plug("virtserialport", recv_prop) def _stop_cont(): - """ Stop and resume VM """ + """Stop and resume VM""" vm.pause() time.sleep(intr_time) vm.resume() def _disconnect(): - """ Disconnect and reconnect the port """ + """Disconnect and reconnect the port""" _guest = random.choice((tuple(), (0,), (1,), (0, 1))) _host = random.choice((tuple(), (0,), (1,), (0, 1))) - if not _guest and not _host: # Close at least one port + if not _guest and not _host: # Close at least one port _guest = (0,) - test.log.debug('closing ports %s on host, %s on guest', _host, - _guest) + test.log.debug("closing ports %s on host, %s on guest", _host, _guest) for i in _host: threads[i].migrate_event.clear() - test.log.debug('Closing port %s on host', i) + test.log.debug("Closing port %s on host", i) ports[i].close() for i in _guest: - guest_worker.cmd("virt.close('%s')" % (ports[i].name), 10) + guest_worker.cmd(f"virt.close('{ports[i].name}')", 10) time.sleep(intr_time) for i in _host: - test.log.debug('Opening port %s on host', i) + test.log.debug("Opening port %s on host", i) ports[i].open() threads[i].migrate_event.set() for i in _guest: # 50 attemps per 0.1s - guest_worker.cmd("virt.open('%s', attempts=50)" - % (ports[i].name), 10) + guest_worker.cmd(f"virt.open('{ports[i].name}', attempts=50)", 10) def _port_replug(device, port_idx): - """ Unplug and replug port with the same name """ + """Unplug and replug port with the same name""" # FIXME: In Linux vport*p* are used. Those numbers are changing # when replugging port from pci to different pci. We should # either use symlinks (as in Windows) or replug with the busname port = ports[port_idx] portdev = vm.devices.get(port.qemu_id) if not portdev: - test.error("No port named %s" % port.qemu_id) + test.error(f"No port named {port.qemu_id}") chardev = portdev.get_param("chardev") out, ver_out = vm.devices.simple_unplug(portdev, vm.monitor) if not ver_out: - test.error("The device %s isn't hotplugged well, " - "result: %s" % (port.qemu_id, out)) + test.error( + f"The device {port.qemu_id} isn't hotplugged well, " + f"result: {out}" + ) time.sleep(intr_time) if not chardev: - test.error("No chardev in guest for port %s" % port.qemu_id) + test.error(f"No chardev in guest for port {port.qemu_id}") new_portdev = qdevices.QDevice(device) - for key, value in {'id': port.qemu_id, 'chardev': chardev, 'name': - port.name}.items(): + for key, value in { + "id": port.qemu_id, + "chardev": chardev, + "name": port.name, + }.items(): new_portdev.set_param(key, value) vm.devices.simple_hotplug(new_portdev, vm.monitor) def _serialport_send_replug(): - """ hepler for executing replug of the sender port """ - _port_replug('virtserialport', 0) + """hepler for executing replug of the sender port""" + _port_replug("virtserialport", 0) def _console_send_replug(): - """ hepler for executing replug of the sender port """ - _port_replug('virtconsole', 0) + """hepler for executing replug of the sender port""" + _port_replug("virtconsole", 0) def _serialport_recv_replug(): - """ hepler for executing replug of the receiver port """ - _port_replug('virtserialport', 1) + """hepler for executing replug of the receiver port""" + _port_replug("virtserialport", 1) def _console_recv_replug(): - """ hepler for executing replug of the receiver port """ - _port_replug('virtconsole', 1) + """hepler for executing replug of the receiver port""" + _port_replug("virtconsole", 1) def _serialport_random_replug(): - """ hepler for executing replug of random port """ - _port_replug('virtserialport', random.choice((0, 1))) + """hepler for executing replug of random port""" + _port_replug("virtserialport", random.choice((0, 1))) def _console_random_replug(): - """ hepler for executing replug of random port """ - _port_replug('virtconsole', random.choice((0, 1))) + """hepler for executing replug of random port""" + _port_replug("virtconsole", random.choice((0, 1))) def _s3(): """ Suspend to mem (S3) and resume the VM. """ - session.sendline(set_s3_cmd) # pylint: disable=E0606 + session.sendline(set_s3_cmd) # pylint: disable=E0606 time.sleep(intr_time) - if not vm.monitor.verify_status('suspended'): - test.log.debug('VM not yet suspended, periodic check started.') - while not vm.monitor.verify_status('suspended'): + if not vm.monitor.verify_status("suspended"): + test.log.debug("VM not yet suspended, periodic check started.") + while not vm.monitor.verify_status("suspended"): pass - vm.monitor.cmd('system_wakeup') + vm.monitor.cmd("system_wakeup") def _s4(): """ @@ -808,7 +860,7 @@ def _s4(): sufficient, we take it as the initial data loss is over. Than we set the allowed loss to 0. """ - set_s4_cmd = params['set_s4_cmd'] + set_s4_cmd = params["set_s4_cmd"] _loss = threads[1].sendidx _count = threads[1].idx # Prepare, hibernate and wake the machine @@ -822,17 +874,21 @@ def _s4(): test.fail("VM refuses to go down. Suspend failed.") time.sleep(intr_time) vm.create() - for _ in range(10): # Wait until new ports are created + for _ in range(10): # Wait until new ports are created try: - if (vm.virtio_ports[0] != oldport and - len(vm.virtio_ports) == portslen): + if ( + vm.virtio_ports[0] != oldport + and len(vm.virtio_ports) == portslen + ): break except IndexError: pass time.sleep(1) else: - test.fail("New virtio_ports were not created with" - "the new VM or the VM failed to start.") + test.fail( + "New virtio_ports were not created with" + "the new VM or the VM failed to start." + ) if is_serialport: ports = virtio_test.get_virtio_ports(vm)[1] else: @@ -859,8 +915,10 @@ def _s4(): _loss = loss _count = count else: - test.fail("Initial data loss is not over after 1s " - "or no new data were received.") + test.fail( + "Initial data loss is not over after 1s " + "or no new data were received." + ) # now no loss is allowed threads[1].sendidx = 0 # DEBUG: When using ThRecv debug, you must wake-up the recv thread @@ -868,12 +926,12 @@ def _s4(): # threads[1].migrate_event.set() error_context.context("Preparing loopback", test.log.info) - test_time = float(params.get('virtio_console_test_time', 10)) - intr_time = float(params.get('virtio_console_intr_time', 0)) - no_repeats = int(params.get('virtio_console_no_repeats', 1)) - interruption = params['virtio_console_interruption'] - is_serialport = (params.get('virtio_console_params') == 'serialport') - buflen = int(params.get('virtio_console_buflen', 1)) + test_time = float(params.get("virtio_console_test_time", 10)) + intr_time = float(params.get("virtio_console_intr_time", 0)) + no_repeats = int(params.get("virtio_console_no_repeats", 1)) + interruption = params["virtio_console_interruption"] + is_serialport = params.get("virtio_console_params") == "serialport" + buflen = int(params.get("virtio_console_buflen", 1)) if is_serialport: vm, guest_worker = virtio_test.get_vm_with_worker(no_serialports=2) (_, ports) = virtio_test.get_virtio_ports(vm) @@ -885,53 +943,54 @@ def _s4(): send_resume_ev = None recv_resume_ev = None acceptable_loss = 0 - if interruption == 'stop': + if interruption == "stop": interruption = _stop_cont - elif interruption == 'disconnect': + elif interruption == "disconnect": interruption = _disconnect acceptable_loss = 100000 send_resume_ev = threading.Event() recv_resume_ev = threading.Event() - elif interruption == 'replug_send': + elif interruption == "replug_send": if is_serialport: interruption = _serialport_send_replug else: interruption = _console_send_replug acceptable_loss = max(buflen * 10, 1000) - elif interruption == 'replug_recv': + elif interruption == "replug_recv": if is_serialport: interruption = _serialport_recv_replug else: interruption = _console_recv_replug acceptable_loss = max(buflen * 5, 1000) - elif interruption == 'replug_random': + elif interruption == "replug_random": if is_serialport: interruption = _serialport_random_replug else: interruption = _console_random_replug acceptable_loss = max(buflen * 10, 1000) - elif interruption == 'replug_loop': + elif interruption == "replug_loop": if is_serialport: interruption = _replug_loop acceptable_loss = max(buflen * 15, 1000) - elif interruption == 's3': + elif interruption == "s3": interruption = _s3 acceptable_loss = 2000 session = vm.wait_for_login() - set_s3_cmd = params['set_s3_cmd'] + set_s3_cmd = params["set_s3_cmd"] if session.cmd_status(params["check_s3_support_cmd"]): test.cancel("Suspend to mem (S3) not supported.") - elif interruption == 's4': + elif interruption == "s4": interruption = _s4 session = vm.wait_for_login() if session.cmd_status(params["check_s4_support_cmd"]): test.cancel("Suspend to disk (S4) not supported.") - acceptable_loss = 99999999 # loss is set in S4 rutine + acceptable_loss = 99999999 # loss is set in S4 rutine send_resume_ev = threading.Event() recv_resume_ev = threading.Event() else: - test.cancel("virtio_console_interruption = '%s' " - "is unknown." % interruption) + test.cancel( + f"virtio_console_interruption = '{interruption}' " "is unknown." + ) send_pt = ports[0] recv_pt = ports[1] @@ -946,27 +1005,41 @@ def _s4(): error_context.context("Starting loopback", test.log.info) err = "" # TODO: Use normal LOOP_NONE when bz796048 is resolved. - guest_worker.cmd("virt.loopback(['%s'], ['%s'], %s, virt.LOOP_" - "RECONNECT_NONE)" - % (send_pt.name, recv_pt.name, buflen), 10) + guest_worker.cmd( + f"virt.loopback(['{send_pt.name}'], ['{recv_pt.name}'], {buflen}, virt.LOOP_" + "RECONNECT_NONE)", + 10, + ) - funcatexit.register(env, params.get('type'), __set_exit_event) + funcatexit.register(env, params.get("type"), __set_exit_event) threads.append( - qemu_virtio_port.ThSendCheck(send_pt, EXIT_EVENT, queues, - buflen, send_resume_ev)) + qemu_virtio_port.ThSendCheck( + send_pt, EXIT_EVENT, queues, buflen, send_resume_ev + ) + ) threads[-1].start() - _ = params.get('virtio_console_debug') - threads.append(qemu_virtio_port.ThRecvCheck(recv_pt, queues[0], - EXIT_EVENT, buflen, - acceptable_loss, - recv_resume_ev, - debug=_)) + _ = params.get("virtio_console_debug") + threads.append( + qemu_virtio_port.ThRecvCheck( + recv_pt, + queues[0], + EXIT_EVENT, + buflen, + acceptable_loss, + recv_resume_ev, + debug=_, + ) + ) threads[-1].start() - test.log.info('Starting the loop 2+%d*(%d+%d+intr_overhead)+2 >= %ss', - no_repeats, intr_time, test_time, - (4 + no_repeats * (intr_time + test_time))) + test.log.info( + "Starting the loop 2+%d*(%d+%d+intr_overhead)+2 >= %ss", + no_repeats, + intr_time, + test_time, + (4 + no_repeats * (intr_time + test_time)), + ) # Lets transfer some data before the interruption time.sleep(2) if not threads[0].is_alive(): @@ -975,78 +1048,85 @@ def _s4(): test.fail("Receiver thread died before interruption.") # 0s interruption without any measurements - if params.get('virtio_console_micro_repeats'): + if params.get("virtio_console_micro_repeats"): error_context.context("Micro interruptions", test.log.info) threads[1].sendidx = acceptable_loss - for i in range(int(params.get('virtio_console_micro_repeats'))): + for i in range(int(params.get("virtio_console_micro_repeats"))): interruption() error_context.context("Normal interruptions", test.log.info) try: for i in range(no_repeats): - error_context.context("Interruption nr. %s" % i) + error_context.context(f"Interruption nr. {i}") threads[1].sendidx = acceptable_loss interruption() count = threads[1].idx - test.log.debug('Transfered data: %s', count) + test.log.debug("Transfered data: %s", count) # Be friendly to very short test_time values for _ in range(10): time.sleep(test_time) - test.log.debug('Transfered data2: %s', threads[1].idx) + test.log.debug("Transfered data2: %s", threads[1].idx) if count == threads[1].idx and threads[1].is_alive(): - test.log.warn('No data received after %ds, extending ' - 'test_time', test_time) + test.log.warning( + "No data received after %ds, extending " "test_time", + test_time, + ) else: break threads[1].reload_loss_idx() if count == threads[1].idx or not threads[1].is_alive(): if not threads[1].is_alive(): - test.log.error('RecvCheck thread stopped unexpectedly.') + test.log.error("RecvCheck thread stopped unexpectedly.") if count == threads[1].idx: - test.log.error( - 'No data transferred after interruption!') - test.log.info('Output from GuestWorker:\n%s', - guest_worker.read_nonblocking()) + test.log.error("No data transferred after interruption!") + test.log.info( + "Output from GuestWorker:\n%s", guest_worker.read_nonblocking() + ) try: session = vm.login() - data = session.cmd_output('dmesg') - if 'WARNING:' in data: - test.log.warning('There are warnings in dmesg:\n%s', - data) + data = session.cmd_output("dmesg") + if "WARNING:" in data: + test.log.warning("There are warnings in dmesg:\n%s", data) except Exception as inst: - test.log.warn("Can't verify dmesg: %s", inst) + test.log.warning("Can't verify dmesg: %s", inst) try: - vm.monitor.info('qtree') + vm.monitor.info("qtree") except Exception as inst: - test.log.warn("Failed to get info from qtree: %s", inst) + test.log.warning("Failed to get info from qtree: %s", inst) EXIT_EVENT.set() vm.verify_kernel_crash() - test.fail('No data transferred after interruption.') + test.fail("No data transferred after interruption.") except Exception as inst: err = "main thread, " - test.log.error('interrupted_loopback failed with exception: %s', - inst) + test.log.error("interrupted_loopback failed with exception: %s", inst) error_context.context("Stopping loopback", test.log.info) EXIT_EVENT.set() - funcatexit.unregister(env, params.get('type'), __set_exit_event) + funcatexit.unregister(env, params.get("type"), __set_exit_event) workaround_unfinished_threads = False threads[0].join(5) if threads[0].is_alive(): workaround_unfinished_threads = True - test.log.error('Send thread stuck, destroing the VM and ' - 'stopping loopback test to prevent autotest freeze.') + test.log.error( + "Send thread stuck, destroing the VM and " + "stopping loopback test to prevent autotest freeze." + ) vm.destroy() for thread in threads[1:]: - test.log.debug('Joining %s', thread) + test.log.debug("Joining %s", thread) thread.join(5) if thread.is_alive(): workaround_unfinished_threads = True test.log.debug("Unable to destroy the thread %s", thread) - if not err: # Show only on success - test.log.info('%d data sent; %d data received and verified; %d ' - 'interruptions %ds each.', threads[0].idx, - threads[1].idx, no_repeats, test_time) + if not err: # Show only on success + test.log.info( + "%d data sent; %d data received and verified; %d " + "interruptions %ds each.", + threads[0].idx, + threads[1].idx, + no_repeats, + test_time, + ) if threads[0].ret_code: err += "sender, " if threads[1].ret_code: @@ -1074,7 +1154,7 @@ def _s4(): virtio_test.cleanup(env.get_vm(params["main_vm"]), guest_worker) if err: - test.fail("%s failed" % err[:-2]) + test.fail(f"{err[:-2]} failed") def _process_stats(stats, scale=1.0): """ @@ -1103,38 +1183,39 @@ def test_perf(): :param cfg: virtio_port_spread - how many devices per virt pci (0=all) """ from autotest.client import utils - test_params = params['virtio_console_params'] - test_time = int(params.get('virtio_console_test_time', 60)) + + test_params = params["virtio_console_params"] + test_time = int(params.get("virtio_console_test_time", 60)) no_serialports = 0 no_consoles = 0 - if test_params.count('serialport'): + if test_params.count("serialport"): no_serialports = 1 - if test_params.count('serialport'): + if test_params.count("serialport"): no_consoles = 1 vm, guest_worker = virtio_test.get_vm_with_worker(no_consoles, no_serialports) (consoles, serialports) = virtio_test.get_virtio_ports(vm) consoles = [consoles, serialports] no_errors = 0 - for param in test_params.split(';'): + for param in test_params.split(";"): if not param: continue - error_context.context("test_perf: params %s" % param, test.log.info) + error_context.context(f"test_perf: params {param}", test.log.info) EXIT_EVENT.clear() # Prepare - param = param.split(':') + param = param.split(":") duration = test_time if len(param) > 1: try: duration = float(param[1]) except ValueError: pass - param = param[0].split('@') + param = param[0].split("@") if len(param) > 1 and param[1].isdigit(): buf_len = int(param[1]) else: buf_len = 1024 - param = (param[0] == 'serialport') + param = param[0] == "serialport" port = consoles[param][0] port.open() @@ -1143,17 +1224,20 @@ def test_perf(): for _ in range(buf_len): data += b"%c" % random.randrange(255) - funcatexit.register(env, params.get('type'), __set_exit_event) + funcatexit.register(env, params.get("type"), __set_exit_event) time_slice = float(duration) / 100 # HOST -> GUEST - guest_worker.cmd('virt.loopback(["%s"], [], %d, virt.LOOP_NONE)' - % (port.name, buf_len), 10) + guest_worker.cmd( + 'virt.loopback(["%s"], [], %d, virt.LOOP_NONE)' % (port.name, buf_len), + 10, + ) thread = qemu_virtio_port.ThSend(port.sock, data, EXIT_EVENT) - stats = array.array('f', []) - loads = utils.SystemLoad([(os.getpid(), 'autotest'), - (vm.get_pid(), 'VM'), 0]) + stats = array.array("f", []) + loads = utils.SystemLoad( + [(os.getpid(), "autotest"), (vm.get_pid(), "VM"), 0] + ) try: loads.start() _time = time.time() @@ -1168,44 +1252,50 @@ def test_perf(): thread.join() if thread.ret_code: no_errors += 1 - test.log.error("test_perf: error occurred in thread %s " - "(H2G)", thread) + test.log.error( + "test_perf: error occurred in thread %s " "(H2G)", thread + ) elif thread.idx == 0: no_errors += 1 test.log.error("test_perf: no data sent (H2G)") # Let the guest read-out all the remaining data for _ in range(60): - if guest_worker._cmd("virt.poll('%s', %s)" - % (port.name, select.POLLIN), 10)[0]: + if guest_worker._cmd( + f"virt.poll('{port.name}', {select.POLLIN})", 10 + )[0]: break time.sleep(1) else: - test.fail("Unable to read-out all remaining " - "data in 60s.") + test.fail("Unable to read-out all remaining " "data in 60s.") guest_worker.safe_exit_loopback_threads([port], []) - if (_time > time_slice): - test.log.error("Test ran %fs longer which is more than one " - "time slice", _time) + if _time > time_slice: + test.log.error( + "Test ran %fs longer which is more than one " "time slice", + _time, + ) else: test.log.debug("Test ran %fs longer", _time) stats = _process_stats(stats[1:], time_slice * 1048576) test.log.debug("Stats = %s", stats) - test.log.info("Host -> Guest [MB/s] (min/med/max) = %.3f/%.3f/" - "%.3f", stats[0], stats[len(stats) / 2], - stats[-1]) + test.log.info( + "Host -> Guest [MB/s] (min/med/max) = %.3f/%.3f/" "%.3f", + stats[0], + stats[len(stats) / 2], + stats[-1], + ) del thread # GUEST -> HOST EXIT_EVENT.clear() - stats = array.array('f', []) - guest_worker.cmd("virt.send_loop_init('%s', %d)" - % (port.name, buf_len), 30) - thread = qemu_virtio_port.ThRecv(port.sock, EXIT_EVENT, - buf_len) + stats = array.array("f", []) + guest_worker.cmd( + "virt.send_loop_init('%s', %d)" % (port.name, buf_len), 30 + ) + thread = qemu_virtio_port.ThRecv(port.sock, EXIT_EVENT, buf_len) thread.start() loads.start() guest_worker.cmd("virt.send_loop()", 10) @@ -1221,25 +1311,32 @@ def test_perf(): thread.join() if thread.ret_code: no_errors += 1 - test.log.error("test_perf: error occurred in thread %s" - "(G2H)", thread) + test.log.error( + "test_perf: error occurred in thread %s" "(G2H)", thread + ) elif thread.idx == 0: no_errors += 1 test.log.error("test_perf: No data received (G2H)") # Deviation is higher than single time_slice - if (_time > time_slice): - test.log.error("Test ran %fs longer which is more than one " - "time slice", _time) + if _time > time_slice: + test.log.error( + "Test ran %fs longer which is more than one " "time slice", + _time, + ) else: test.log.debug("Test ran %fs longer", _time) stats = _process_stats(stats[1:], time_slice * 1048576) test.log.debug("Stats = %s", stats) - test.log.info("Guest -> Host [MB/s] (min/med/max) = %.3f/%.3f/" - "%.3f", stats[0], stats[len(stats) / 2], - stats[-1]) + test.log.info( + "Guest -> Host [MB/s] (min/med/max) = %.3f/%.3f/" "%.3f", + stats[0], + stats[len(stats) / 2], + stats[-1], + ) except Exception as inst: - test.log.error("test_perf: Failed with %s, starting virtio_test.cleanup", - inst) + test.log.error( + "test_perf: Failed with %s, starting virtio_test.cleanup", inst + ) loads.stop() try: guest_worker.cmd("virt.exit_threads()", 10) @@ -1247,18 +1344,18 @@ def test_perf(): thread.join() raise inst except Exception as inst: - test.log.error("test_perf: Critical failure, killing VM %s", - inst) + test.log.error("test_perf: Critical failure, killing VM %s", inst) EXIT_EVENT.set() vm.destroy() del thread raise inst - funcatexit.unregister(env, params.get('type'), __set_exit_event) - del thread + funcatexit.unregister(env, params.get("type"), __set_exit_event) virtio_test.cleanup(vm, guest_worker) if no_errors: - msg = ("test_perf: %d errors occurred while executing test, " - "check log for details." % no_errors) + msg = ( + "test_perf: %d errors occurred while executing test, " + "check log for details." % no_errors + ) test.log.error(msg) test.fail(msg) @@ -1288,7 +1385,7 @@ def _tmigrate(use_serialport, no_ports, no_migrations, blocklen, offline): # TODO BUG: using SMP the data loss is up to 4 buffers # 2048 = char.dev. socket size, parms[2] = host->guest send buffer size sendlen = 2 * 2 * max(qemu_virtio_port.SOCKET_SIZE, blocklen) - if not offline: # TODO BUG: online migration causes more loses + if not offline: # TODO BUG: online migration causes more loses # TODO: Online migration lose n*buffer. n depends on the console # troughput. FIX or analyse it's cause. sendlen = 1000 * sendlen @@ -1304,27 +1401,34 @@ def _tmigrate(use_serialport, no_ports, no_migrations, blocklen, offline): queues.append(deque()) verified.append(0) - tmp = "'%s'" % ports[1:][0].name + tmp = f"'{ports[1:][0].name}'" for recv_pt in ports[1:][1:]: - tmp += ", '%s'" % (recv_pt.name) - guest_worker.cmd("virt.loopback(['%s'], [%s], %d, virt.LOOP_POLL)" - % (ports[0].name, tmp, blocklen), 10) + tmp += f", '{recv_pt.name}'" + guest_worker.cmd( + "virt.loopback(['%s'], [%s], %d, virt.LOOP_POLL)" + % (ports[0].name, tmp, blocklen), + 10, + ) - funcatexit.register(env, params.get('type'), __set_exit_event) + funcatexit.register(env, params.get("type"), __set_exit_event) # TEST - thread = qemu_virtio_port.ThSendCheck(ports[0], EXIT_EVENT, queues, - blocklen, - migrate_event=threading.Event()) + thread = qemu_virtio_port.ThSendCheck( + ports[0], EXIT_EVENT, queues, blocklen, migrate_event=threading.Event() + ) thread.start() threads.append(thread) for i in range(len(ports[1:])): _ = threading.Event() - thread = qemu_virtio_port.ThRecvCheck(ports[1:][i], queues[i], - EXIT_EVENT, blocklen, - sendlen=sendlen, - migrate_event=_) + thread = qemu_virtio_port.ThRecvCheck( + ports[1:][i], + queues[i], + EXIT_EVENT, + blocklen, + sendlen=sendlen, + migrate_event=_, + ) thread.start() threads.append(thread) @@ -1333,14 +1437,12 @@ def _tmigrate(use_serialport, no_ports, no_migrations, blocklen, offline): tmp = "%d data sent; " % threads[0].idx for thread in threads[1:]: tmp += "%d, " % thread.idx - test.log.debug("test_migrate: %s data received and verified", - tmp[:-2]) + test.log.debug("test_migrate: %s data received and verified", tmp[:-2]) i += 1 time.sleep(2) for j in range(no_migrations): - error_context.context("Performing migration number %s/%s" - % (j, no_migrations)) + error_context.context(f"Performing migration number {j}/{no_migrations}") vm = migration.migrate(vm, env, 3600, "exec", 0, offline) if not vm: test.fail("Migration failed") @@ -1363,31 +1465,35 @@ def _tmigrate(use_serialport, no_ports, no_migrations, blocklen, offline): tmp = "%d data sent; " % threads[0].idx for thread in threads[1:]: tmp += "%d, " % thread.idx - test.log.debug("test_migrate: %s data received and verified", - tmp[:-2]) + test.log.debug("test_migrate: %s data received and verified", tmp[:-2]) i += 1 time.sleep(2) if not threads[0].is_alive(): if EXIT_EVENT.is_set(): - test.fail("Exit event emitted, check the log " - "for send/recv thread failure.") + test.fail( + "Exit event emitted, check the log " + "for send/recv thread failure." + ) else: EXIT_EVENT.set() - test.fail("Send thread died unexpectedly in " - "migration %d" % (j + 1)) + test.fail( + "Send thread died unexpectedly in " "migration %d" % (j + 1) + ) for i in range(0, len(ports[1:])): if not threads[i + 1].is_alive(): EXIT_EVENT.set() - test.fail("Recv thread %d died unexpectedly in " - "migration %d" % (i, (j + 1))) + test.fail( + "Recv thread %d died unexpectedly in " + "migration %d" % (i, (j + 1)) + ) if verified[i] == threads[i + 1].idx: EXIT_EVENT.set() - test.fail("No new data in %d console were " - "transferred after migration %d" - % (i, (j + 1))) + test.fail( + "No new data in %d console were " + "transferred after migration %d" % (i, (j + 1)) + ) verified[i] = threads[i + 1].idx - test.log.info("%d out of %d migration(s) passed", (j + 1), - no_migrations) + test.log.info("%d out of %d migration(s) passed", (j + 1), no_migrations) # If we get to this point let's assume all threads were reconnected for thread in threads: thread.migrate_event.clear() @@ -1395,14 +1501,16 @@ def _tmigrate(use_serialport, no_ports, no_migrations, blocklen, offline): # FINISH EXIT_EVENT.set() - funcatexit.unregister(env, params.get('type'), __set_exit_event) + funcatexit.unregister(env, params.get("type"), __set_exit_event) # Send thread might fail to exit when the guest stucks workaround_unfinished_threads = False threads[0].join(5) if threads[0].is_alive(): workaround_unfinished_threads = True - test.log.error('Send thread stuck, destroing the VM and ' - 'stopping loopback test to prevent autotest freeze.') + test.log.error( + "Send thread stuck, destroing the VM and " + "stopping loopback test to prevent autotest freeze." + ) vm.destroy() tmp = "%d data sent; " % threads[0].idx err = "" @@ -1414,11 +1522,14 @@ def _tmigrate(use_serialport, no_ports, no_migrations, blocklen, offline): test.log.debug("Unable to destroy the thread %s", thread) tmp += "%d, " % thread.idx if thread.ret_code: - err += "%s, " % thread - test.log.info("test_migrate: %s data received and verified during %d " - "migrations", tmp[:-2], no_migrations) + err += f"{thread}, " + test.log.info( + "test_migrate: %s data received and verified during %d " "migrations", + tmp[:-2], + no_migrations, + ) if err: - msg = "test_migrate: error occurred in threads: %s." % err[:-2] + msg = f"test_migrate: error occurred in threads: {err[:-2]}." test.log.error(msg) test.fail(msg) @@ -1443,7 +1554,7 @@ def _test_migrate(offline): no_migrations = int(params.get("virtio_console_no_migrations", 5)) no_ports = int(params.get("virtio_console_no_ports", 2)) blocklen = int(params.get("virtio_console_blocklen", 1024)) - use_serialport = params.get('virtio_console_params') == "serialport" + use_serialport = params.get("virtio_console_params") == "serialport" _tmigrate(use_serialport, no_ports, no_migrations, blocklen, offline) def test_migrate_offline(): @@ -1485,20 +1596,20 @@ def _virtio_dev_add(vm, pci_id, port_id, console="no"): port_type = "virtconsole" port += "%d-%d" % (pci_id, port_id) new_portdev = qdevices.QDevice(port_type) - for key, value in {'id': port, 'name': port, 'bus': "virtio_serial_pci" - "%d.0" % pci_id}.items(): + for key, value in { + "id": port, + "name": port, + "bus": "virtio_serial_pci" "%d.0" % pci_id, + }.items(): new_portdev.set_param(key, value) (result, ver_out) = vm.devices.simple_hotplug(new_portdev, vm.monitor) if console == "no": - vm.virtio_ports.append(qemu_virtio_port.VirtioSerial(port, port, - None)) + vm.virtio_ports.append(qemu_virtio_port.VirtioSerial(port, port, None)) else: - vm.virtio_ports.append(qemu_virtio_port.VirtioConsole(port, port, - None)) + vm.virtio_ports.append(qemu_virtio_port.VirtioConsole(port, port, None)) if not ver_out: - test.error("The virtioserialport isn't hotplugged well, result: %s" - % result) + test.error(f"The virtioserialport isn't hotplugged well, result: {result}") def _virtio_dev_del(vm, pci_id, port_id): """ @@ -1513,11 +1624,15 @@ def _virtio_dev_del(vm, pci_id, port_id): (result, ver_out) = vm.devices.simple_unplug(portdev, vm.monitor) vm.virtio_ports.remove(port) if not ver_out: - test.error("The virtioserialport isn't hotunplugged well, " - "result: %s" % result) + test.error( + "The virtioserialport isn't hotunplugged well, " + f"result: {result}" + ) return - test.fail("Removing port which is not in vm.virtio_ports" - " ...-%d-%d" % (pci_id, port_id)) + test.fail( + "Removing port which is not in vm.virtio_ports" + " ...-%d-%d" % (pci_id, port_id) + ) def test_hotplug(): """ @@ -1552,12 +1667,13 @@ def test_hotplug(): test.log.info("Test correct initialization of hotplug ports") for bus_id in range(1, 5): # count of pci device new_pcidev = qdevices.QDevice(get_virtio_serial_name()) - new_pcidev.set_param('id', 'virtio_serial_pci%d' % bus_id) + new_pcidev.set_param("id", "virtio_serial_pci%d" % bus_id) (result, ver_out) = vm.devices.simple_hotplug(new_pcidev, vm.monitor) if not ver_out: - test.error("The virtio serial pci isn't hotplugged well, log: %s" - % result) - for i in range(bus_id * 5 + 5): # max ports 30 + test.error( + f"The virtio serial pci isn't hotplugged well, log: {result}" + ) + for i in range(bus_id * 5 + 5): # max ports 30 _virtio_dev_add(vm, bus_id, i, console) time.sleep(pause) # Test correct initialization of hotplug ports @@ -1566,15 +1682,17 @@ def test_hotplug(): test.log.info("Delete ports during ports in use") # Delete ports when ports are used. - guest_worker.cmd("virt.loopback(['%s'], ['%s'], 1024," - "virt.LOOP_POLL)" % (consoles[1][0].name, - consoles[1][1].name), 10) - funcatexit.register(env, params.get('type'), __set_exit_event) - - send = qemu_virtio_port.ThSend(consoles[1][0].sock, "Data", EXIT_EVENT, - quiet=True) - recv = qemu_virtio_port.ThRecv(consoles[1][1].sock, EXIT_EVENT, - quiet=True) + guest_worker.cmd( + f"virt.loopback(['{consoles[1][0].name}'], ['{consoles[1][1].name}'], 1024," + "virt.LOOP_POLL)", + 10, + ) + funcatexit.register(env, params.get("type"), __set_exit_event) + + send = qemu_virtio_port.ThSend( + consoles[1][0].sock, "Data", EXIT_EVENT, quiet=True + ) + recv = qemu_virtio_port.ThRecv(consoles[1][1].sock, EXIT_EVENT, quiet=True) send.start() time.sleep(2) recv.start() @@ -1586,38 +1704,39 @@ def test_hotplug(): (result_, ver_out_) = vm.devices.simple_unplug(portdev_, vm.monitor) vm.virtio_ports = vm.virtio_ports[2:] if not (ver_out and ver_out_): - test.error("The ports aren't hotunplugged well, log: %s\n, %s" - % (result, result_)) + test.error( + f"The ports aren't hotunplugged well, log: {result}\n, {result_}" + ) EXIT_EVENT.set() - funcatexit.unregister(env, params.get('type'), __set_exit_event) + funcatexit.unregister(env, params.get("type"), __set_exit_event) send.join() recv.join() guest_worker.cmd("virt.exit_threads()", 10) - guest_worker.cmd('guest_exit()', 10) + guest_worker.cmd("guest_exit()", 10) test.log.info("Trying to add maximum count of ports to one pci device") # Try to add ports - for i in range(30): # max port 30 + for i in range(30): # max port 30 _virtio_dev_add(vm, 0, i, console) time.sleep(pause) guest_worker = qemu_virtio_port.GuestWorker(vm) - guest_worker.cmd('guest_exit()', 10) + guest_worker.cmd("guest_exit()", 10) test.log.info("Trying delete and add again part of ports") # Try to delete ports - for i in range(25): # max port 30 + for i in range(25): # max port 30 _virtio_dev_del(vm, 0, i) time.sleep(pause) guest_worker = qemu_virtio_port.GuestWorker(vm) - guest_worker.cmd('guest_exit()', 10) + guest_worker.cmd("guest_exit()", 10) # Try to add ports - for i in range(5): # max port 30 + for i in range(5): # max port 30 _virtio_dev_add(vm, 0, i, console) time.sleep(pause) guest_worker = qemu_virtio_port.GuestWorker(vm) - guest_worker.cmd('guest_exit()', 10) + guest_worker.cmd("guest_exit()", 10) test.log.info("Trying to add and delete one port 100 times") # Try 100 times add and delete one port. @@ -1646,28 +1765,29 @@ def test_hotplug_virtio_pci(): error_context.context("Hotplug while booting", test.log.info) vio_type = get_virtio_serial_name() if "pci" in vio_type: - vio_parent_bus = {'aobject': 'pci.0'} + vio_parent_bus = {"aobject": "pci.0"} else: vio_parent_bus = None vm.wait_for_login() for i in range(int(params.get("virtio_console_loops", 10))): - error_context.context("Hotpluging virtio_pci (iteration %d)" % i, - test.log.info) - new_dev = qdevices.QDevice(vio_type, - {'id': 'virtio_serial_pci%d' % idx}, - parent_bus=vio_parent_bus) + error_context.context( + "Hotpluging virtio_pci (iteration %d)" % i, test.log.info + ) + new_dev = qdevices.QDevice( + vio_type, {"id": "virtio_serial_pci%d" % idx}, parent_bus=vio_parent_bus + ) # Hotplug out, ver_out = vm.devices.simple_hotplug(new_dev, vm.monitor) if not ver_out: - test.error("The device %s isn't hotplugged well, " - "result: %s" % (new_dev.aid, out)) + test.error( + f"The device {new_dev.aid} isn't hotplugged well, " f"result: {out}" + ) time.sleep(pause) # Unplug out, ver_out = vm.devices.simple_unplug(new_dev, vm.monitor) if ver_out is False: - test.fail("Device not unplugged. Iteration: %s, result: %s" % - (i, out)) + test.fail(f"Device not unplugged. Iteration: {i}, result: {out}") # # Destructive tests @@ -1677,7 +1797,7 @@ def test_rw_notconnect_guest(): Try to send to/read from guest on host while guest not recvs/sends any data. """ - use_serialport = params.get('virtio_console_params') == "serialport" + use_serialport = params.get("virtio_console_params") == "serialport" if use_serialport: vm = virtio_test.get_vm_with_ports(no_serialports=1, strict=True) else: @@ -1716,14 +1836,18 @@ def test_rw_notconnect_guest(): test.log.info("Bytes sent to client: %d", sent2) except Exception as inst: - test.log.error('test_rw_notconnect_guest failed: %s', inst) + test.log.error("test_rw_notconnect_guest failed: %s", inst) port.sock.settimeout(None) guest_worker = qemu_virtio_port.GuestWorker(vm) virtio_test.cleanup(vm, guest_worker) raise inst - if (sent1 != sent2): - test.log.warning("Inconsistent behavior: First sent %d bytes and " - "second sent %d bytes", sent1, sent2) + if sent1 != sent2: + test.log.warning( + "Inconsistent behavior: First sent %d bytes and " + "second sent %d bytes", + sent1, + sent2, + ) port.sock.settimeout(None) guest_worker = qemu_virtio_port.GuestWorker(vm) @@ -1736,16 +1860,19 @@ def test_rmmod(): :param cfg: virtio_port_spread - how many devices per virt pci (0=all) """ (vm, guest_worker, port) = virtio_test.get_vm_with_single_port( - params.get('virtio_console_params')) + params.get("virtio_console_params") + ) guest_worker.cleanup() session = vm.wait_for_login() - if session.cmd_status('lsmod | grep virtio_console'): - test.cancel("virtio_console not loaded, probably " - " not compiled as module. Can't test it.") + if session.cmd_status("lsmod | grep virtio_console"): + test.cancel( + "virtio_console not loaded, probably " + " not compiled as module. Can't test it." + ) session.cmd("rmmod -f virtio_console") session.cmd("modprobe virtio_console") guest_worker = qemu_virtio_port.GuestWorker(vm) - guest_worker.cmd("virt.clean_port('%s'),1024" % port.name, 2) + guest_worker.cmd(f"virt.clean_port('{port.name}'),1024", 2) virtio_test.cleanup(vm, guest_worker) def test_max_ports(): @@ -1755,7 +1882,7 @@ def test_max_ports(): :param cfg: virtio_console_params - which type of virtio port to test """ port_count = 30 - if params.get('virtio_console_params') == "serialport": + if params.get("virtio_console_params") == "serialport": test.log.debug("Count of serialports: %d", port_count) vm = virtio_test.get_vm_with_ports(0, port_count, quiet=True) else: @@ -1784,7 +1911,7 @@ def test_stressed_restart(): :param cfg: virtio_port_spread - how many devices per virt pci (0=all) :param cfg: virtio_console_method - reboot method (shell, system_reset) """ - if params.get('virtio_console_params') == 'serialport': + if params.get("virtio_console_params") == "serialport": vm, guest_worker = virtio_test.get_vm_with_worker(no_serialports=1) _ports, ports = virtio_test.get_virtio_ports(vm) else: @@ -1798,38 +1925,43 @@ def test_stressed_restart(): # If more than one, send data on the other ports process = [] for port in ports[1:]: - guest_worker.cmd("virt.close('%s')" % (port.name), 2) - guest_worker.cmd("virt.open('%s')" % (port.name), 2) + guest_worker.cmd(f"virt.close('{port.name}')", 2) + guest_worker.cmd(f"virt.open('{port.name}')", 2) try: - process.append(Popen("dd if=/dev/random of='%s' bs=4096 " - "&>/dev/null &" % port.path)) + process.append( + Popen( + f"dd if=/dev/random of='{port.path}' bs=4096 " "&>/dev/null &" + ) + ) except Exception: pass # Start sending data, it won't finish anyway... - guest_worker._cmd("virt.send('%s', 1024**3, True, is_static=True)" - % ports[0].name, 1) + guest_worker._cmd( + f"virt.send('{ports[0].name}', 1024**3, True, is_static=True)", 1 + ) # Let the computer transfer some bytes :-) time.sleep(2) # Power off the computer try: - vm.reboot(session=session, - method=params.get('virtio_console_method', 'shell'), - timeout=720) + vm.reboot( + session=session, + method=params.get("virtio_console_method", "shell"), + timeout=720, + ) except Exception as details: for process in process: process.terminate() for port in vm.virtio_ports: port.close() - test.fail("Fail to reboot VM:\n%s" % details) + test.fail(f"Fail to reboot VM:\n{details}") # close the virtio ports and process for process in process: process.terminate() for port in vm.virtio_ports: port.close() - error_context.context("Executing basic loopback after reboot.", - test.log.info) + error_context.context("Executing basic loopback after reboot.", test.log.info) test_basic_loopback() @error_context.context_aware @@ -1841,7 +1973,7 @@ def test_unplugged_restart(): :param cfg: virtio_port_spread - how many devices per virt pci (0=all) :param cfg: virtio_console_method - reboot method (shell, system_reset) """ - if params.get('virtio_console_params') == 'serialport': + if params.get("virtio_console_params") == "serialport": vm = virtio_test.get_vm_with_ports(no_serialports=1) else: vm = virtio_test.get_vm_with_ports(no_consoles=1) @@ -1854,16 +1986,18 @@ def test_unplugged_restart(): portdev = vm.devices.get_by_params({"name": port.qemu_id})[0] (result, ver_out) = vm.devices.simple_unplug(portdev, vm.monitor) if not ver_out: - test.fail("Can't unplug port %s: %s" % (port, result)) + test.fail(f"Can't unplug port {port}: {result}") session = vm.wait_for_login() # Power off the computer try: - vm.reboot(session=session, - method=params.get('virtio_console_method', 'shell'), - timeout=720) + vm.reboot( + session=session, + method=params.get("virtio_console_method", "shell"), + timeout=720, + ) except Exception as details: - test.fail("Fail to reboot VM:\n%s" % details) + test.fail(f"Fail to reboot VM:\n{details}") # TODO: Hotplug ports and verify that they are usable # VM is missing ports, which are in params. @@ -1879,26 +2013,27 @@ def test_failed_boot(): :param cfg: max_ports_valid - Valid max ports nums for different versions. """ - max_ports_invalid = params['max_ports_invalid'].split(',') - max_ports_valid = params['max_ports_valid'].split(',') + max_ports_invalid = params["max_ports_invalid"].split(",") + max_ports_valid = params["max_ports_valid"].split(",") qemu_version_pattern = params["qemu_version_pattern"] qemu_binary = utils_misc.get_qemu_binary(params) output = str(process.run(qemu_binary + " --version", shell=True)) - re_comp = re.compile(r'\s\d+\.\d+\.\d+') + re_comp = re.compile(r"\s\d+\.\d+\.\d+") output_list = re_comp.findall(output) # high version if re.search(qemu_version_pattern, output_list[0]): - params["extra_params"] = (params["extra_params"] - % (get_virtio_serial_name(), - max_ports_invalid[0])) - exp_error_message = (params['virtio_console_params'] - % max_ports_valid[0]) + params["extra_params"] = params["extra_params"] % ( + get_virtio_serial_name(), + max_ports_invalid[0], + ) + exp_error_message = params["virtio_console_params"] % max_ports_valid[0] else: - params["extra_params"] = (params["extra_params"] - % (get_virtio_serial_name(), - max_ports_invalid[1])) - exp_error_message = (params['virtio_console_params'] % max_ports_valid[1]) + params["extra_params"] = params["extra_params"] % ( + get_virtio_serial_name(), + max_ports_invalid[1], + ) + exp_error_message = params["virtio_console_params"] % max_ports_valid[1] env_process.preprocess(test, params, env) vm = env.get_vm(params["main_vm"]) @@ -1909,9 +2044,10 @@ def test_failed_boot(): test.log.info("Expected qemu failure. Test PASSED.") return else: - test.fail("VM failed to start but error messages " - "don't match.\nExpected:\n%s\nActual:\n%s" - % (exp_error_message, details)) + test.fail( + "VM failed to start but error messages " + f"don't match.\nExpected:\n{exp_error_message}\nActual:\n{details}" + ) test.fail("VM started even though it should fail.") # @@ -1927,11 +2063,11 @@ def test_delete_guest_script(): vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() out = session.cmd_output("echo on") - if "on" in out: # Linux + if "on" in out: # Linux session.cmd_status("killall python") session.cmd_status("rm -f /tmp/guest_daemon_*") session.cmd_status("rm -f /tmp/virtio_console_guest.py*") - else: # Windows + else: # Windows session.cmd_status("del /F /Q C:\\virtio_console_guest.py*") # @@ -1939,11 +2075,13 @@ def test_delete_guest_script(): # Executes test specified by virtio_console_test variable in cfg # fce = None - _fce = "test_" + params.get('virtio_console_test', '').strip() - error_context.context("Executing test: %s" % _fce, test.log.info) + _fce = "test_" + params.get("virtio_console_test", "").strip() + error_context.context(f"Executing test: {_fce}", test.log.info) if _fce not in locals(): - test.cancel("Test %s doesn't exist. Check 'virtio_console_" - "test' variable in subtest.cfg" % _fce) + test.cancel( + f"Test {_fce} doesn't exist. Check 'virtio_console_" + "test' variable in subtest.cfg" + ) else: try: fce = locals()[_fce] diff --git a/qemu/tests/virtio_driver_sign_check.py b/qemu/tests/virtio_driver_sign_check.py index 6711b9e48a..fb79ea39cb 100644 --- a/qemu/tests/virtio_driver_sign_check.py +++ b/qemu/tests/virtio_driver_sign_check.py @@ -1,7 +1,6 @@ import re -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc from virttest.utils_windows import virtio_win @@ -17,17 +16,17 @@ def get_driver_file_path(session, params): """ driver_path = params["tested_driver"] media_type = params["virtio_win_media_type"] - get_drive_letter = getattr(virtio_win, "drive_letter_%s" % media_type) + get_drive_letter = getattr(virtio_win, f"drive_letter_{media_type}") drive_letter = get_drive_letter(session) - get_product_dirname = getattr(virtio_win, - "product_dirname_%s" % media_type) + get_product_dirname = getattr(virtio_win, f"product_dirname_{media_type}") guest_name = get_product_dirname(session) - get_arch_dirname = getattr(virtio_win, "arch_dirname_%s" % media_type) + get_arch_dirname = getattr(virtio_win, f"arch_dirname_{media_type}") guest_arch = get_arch_dirname(session) - path = ("{letter}\\{driver}\\{name}\\{arch}\\" if media_type == "iso" - else "{letter}\\{arch}\\{name}\\{driver}").format( - letter=drive_letter, driver=driver_path, - name=guest_name, arch=guest_arch) + path = ( + "{letter}\\{driver}\\{name}\\{arch}\\" + if media_type == "iso" + else "{letter}\\{arch}\\{name}\\{driver}" + ).format(letter=drive_letter, driver=driver_path, name=guest_name, arch=guest_arch) return drive_letter, path @@ -53,24 +52,22 @@ def run(test, params, env): verify_option = params["verify_option"] try: - error_context.context("Running SignTool check test in guest...", - test.log.info) + error_context.context("Running SignTool check test in guest...", test.log.info) file_type = [".cat", ".sys", ".inf", "Wdf"] # Add a workaround for pvpanic, as there are pvpanic-pci files # include in the latest prewhql version, # they are for arm support and we no need to test them currently. if "pvpanic" in drv_name: - file_type = ["%s.cat" % drv_name, ".sys", "%s.inf" % drv_name, "Wdf"] + file_type = [f"{drv_name}.cat", ".sys", f"{drv_name}.inf", "Wdf"] tested_list = [] viowin_letter, path = get_driver_file_path(session, params) for ftype in file_type: cmd = list_files_cmd % (viowin_letter, path, ftype) list_file = session.cmd_output(cmd, timeout) - driver_file = re.findall(r".*%s$" % ftype, list_file, re.M) + driver_file = re.findall(rf".*{ftype}$", list_file, re.M) tested_list.extend(driver_file) if (len(tested_list) < 3) or (".cat" not in tested_list[0]): - test.fail("The tested files were not included in %s disk" - % viowin_letter) + test.fail(f"The tested files were not included in {viowin_letter} disk") signtool_cmd = utils_misc.set_winutils_letter(session, signtool_cmd) check_info = "Number of files successfully Verified: (1)" for driver_file in tested_list[1:]: @@ -78,7 +75,8 @@ def run(test, params, env): status, output = session.cmd_status_output(test_cmd) sign_num = re.findall(check_info, output)[0] if (status != 0) or (int(sign_num) != 1): - test.fail("%s signtool verify failed, check the output details:\n %s" - % (driver_file, output)) + test.fail( + f"{driver_file} signtool verify failed, check the output details:\n {output}" + ) finally: session.close() diff --git a/qemu/tests/virtio_fs_group_permission_access.py b/qemu/tests/virtio_fs_group_permission_access.py index 81eade9c07..bc8be623e4 100644 --- a/qemu/tests/virtio_fs_group_permission_access.py +++ b/qemu/tests/virtio_fs_group_permission_access.py @@ -1,11 +1,8 @@ import os import aexpect - -from virttest import nfs from avocado.utils import process -from virttest import env_process -from virttest import error_context, utils_test, utils_disk, utils_misc +from virttest import env_process, error_context, nfs, utils_disk, utils_misc, utils_test from provider import virtio_fs_utils, win_driver_utils @@ -41,10 +38,9 @@ def run(test, params, env): cmd_run_virtiofsd = params.get("cmd_run_virtiofsd") # nfs config - setup_local_nfs = params.get('setup_local_nfs') + setup_local_nfs = params.get("setup_local_nfs") - fs_source = params.get('fs_source_dir') - host_session = None + fs_source = params.get("fs_source_dir") guest_session = None vm = None nfs_local = None @@ -52,26 +48,31 @@ def run(test, params, env): try: for _username in username: add_cmd = add_user_cmd % _username - if process.system("id %s" % _username, shell=True, - ignore_status=True) == 0: + if process.system(f"id {_username}", shell=True, ignore_status=True) == 0: s, o = process.getstatusoutput(del_user_cmd % _username) if s: if "is currently used by process" in o: - test.error("The common user is used by other process," - " pls check on your host.") + test.error( + "The common user is used by other process," + " pls check on your host." + ) else: - test.fail("Unknown error when deleting the " - "user: %s" % o) - if process.system("grep %s /etc/group" % _username, shell=True, - ignore_status=True) == 0: - add_cmd = "useradd -g %s %s" % (_username, _username) + test.fail("Unknown error when deleting the " f"user: {o}") + if ( + process.system( + f"grep {_username} /etc/group", shell=True, ignore_status=True + ) + == 0 + ): + add_cmd = f"useradd -g {_username} {_username}" process.run(add_cmd) user_one, user_two = username[0], username[-1] # create the folder before daemon running shared_dir = os.path.join("/home/" + user_one, fs_source) if not os.path.exists(shared_dir): - process.system("runuser -l " + user_one + " -c 'mkdir -p " + - shared_dir + "'") + process.system( + "runuser -l " + user_one + " -c 'mkdir -p " + shared_dir + "'" + ) if setup_local_nfs: # delete the slash at the end @@ -80,34 +81,35 @@ def run(test, params, env): nfs_local.setup() # change permission of u1 home directory - output = process.system_output("chmod -R 777 /home/%s" % user_one) + output = process.system_output(f"chmod -R 777 /home/{user_one}") error_context.context(output, test.log.info) # change user u2's supplementary group to u1 - output = process.system_output("usermod -G %s %s" % (user_one, - user_two)) + output = process.system_output(f"usermod -G {user_one} {user_two}") error_context.context(output, test.log.info) # set fs daemon config - sock_path = os.path.join("/home/" + user_two, - '-'.join(('avocado-vt-vm1', 'viofs', - 'virtiofsd.sock'))) + sock_path = os.path.join( + "/home/" + user_two, "-".join(("avocado-vt-vm1", "viofs", "virtiofsd.sock")) + ) # create the file - with open(sock_path, "w") as fd: + with open(sock_path, "w"): pass - params['fs_source_user_sock_path'] = sock_path + params["fs_source_user_sock_path"] = sock_path # run daemon cmd_run_virtiofsd = cmd_run_virtiofsd % sock_path - cmd_run_virtiofsd += " --shared-dir %s" % shared_dir - error_context.context('Running daemon command %s' % cmd_run_virtiofsd, - test.log.info) - - host_session = aexpect.ShellSession("runuser -l " + user_two + - " -c '" + cmd_run_virtiofsd + "'", - auto_close=False, - output_func=utils_misc.log_line, - output_params=('virtiofs_fs-virtiofs.log',), - prompt=r"^\[.*\][\#\$]\s*$") + cmd_run_virtiofsd += f" --shared-dir {shared_dir}" + error_context.context( + f"Running daemon command {cmd_run_virtiofsd}", test.log.info + ) + + aexpect.ShellSession( + "runuser -l " + user_two + " -c '" + cmd_run_virtiofsd + "'", + auto_close=False, + output_func=utils_misc.log_line, + output_params=("virtiofs_fs-virtiofs.log",), + prompt=r"^\[.*\][\#\$]\s*$", + ) params["fs_source_base_dir"] = "/home/" + user_one params["start_vm"] = "yes" @@ -117,22 +119,25 @@ def run(test, params, env): guest_session = vm.wait_for_login() if windows: - guest_session = utils_test.qemu.windrv_check_running_verifier(guest_session, - vm, - test, - driver_name) + guest_session = utils_test.qemu.windrv_check_running_verifier( + guest_session, vm, test, driver_name + ) virtio_fs_utils.run_viofs_service(test, params, guest_session) else: - error_context.context("Create a destination directory %s " - "inside guest." % fs_dest, test.log.info) + error_context.context( + f"Create a destination directory {fs_dest} " "inside guest.", + test.log.info, + ) if not utils_misc.make_dirs(fs_dest, session=guest_session): test.fail("Creating directory was failed!") - error_context.context("Mount virtiofs target %s to %s inside" - " guest." % (fs_target, fs_dest), - test.log.info) - if not utils_disk.mount(fs_target, fs_dest, 'virtiofs', - session=guest_session): - test.fail('Mount virtiofs target failed.') + error_context.context( + f"Mount virtiofs target {fs_target} to {fs_dest} inside" " guest.", + test.log.info, + ) + if not utils_disk.mount( + fs_target, fs_dest, "virtiofs", session=guest_session + ): + test.fail("Mount virtiofs target failed.") virtio_fs_utils.basic_io_test(test, params, guest_session) finally: if guest_session: @@ -142,8 +147,7 @@ def run(test, params, env): # based on driver verifier is enabled win_driver_utils.memory_leak_check(vm, test, params) else: - utils_disk.umount(fs_target, fs_dest, 'virtiofs', - session=guest_session) + utils_disk.umount(fs_target, fs_dest, "virtiofs", session=guest_session) utils_misc.safe_rmdir(fs_dest, session=guest_session) if vm and vm.is_alive(): @@ -155,8 +159,9 @@ def run(test, params, env): for _username in username[::-1]: output = process.run(del_user_cmd % _username) if "is currently used by process" in output.stdout_text: - error_context.context("Kill process before delete user...", - test.log.info) + error_context.context( + "Kill process before delete user...", test.log.info + ) pid = output.split(" ")[-1] - process.run("kill -9 %s" % pid) - process.run("rm -rf /home/%s" % _username) + process.run(f"kill -9 {pid}") + process.run(f"rm -rf /home/{_username}") diff --git a/qemu/tests/virtio_fs_host_owner_win.py b/qemu/tests/virtio_fs_host_owner_win.py index 3173d296dd..cd9ad3ad9d 100644 --- a/qemu/tests/virtio_fs_host_owner_win.py +++ b/qemu/tests/virtio_fs_host_owner_win.py @@ -1,17 +1,12 @@ import os -import re -import time import random +import re import string +import time import aexpect - from avocado.utils import process - -from virttest import env_process -from virttest import error_context -from virttest import utils_misc -from virttest import utils_test +from virttest import env_process, error_context, utils_misc, utils_test from virttest.qemu_devices import qdevices from provider import virtio_fs_utils @@ -42,8 +37,7 @@ def get_user_uid_gid(user_name): """ Get user's UID and GID on host. """ - output = process.system_output("id %s" % user_name, - shell=True).decode().strip() + output = process.system_output(f"id {user_name}", shell=True).decode().strip() pattern = r"uid=(\d*).*gid=(\d*)" match_string = re.findall(pattern, output) uid_user = match_string[0][0] @@ -57,10 +51,10 @@ def enable_uid_gid(uid_gid_value): enable_cmd = params["viofs_owner_enable_cmd"] % uid_gid_value s, o = session.cmd_status_output(enable_cmd) if s: - test.fail("Fail command: %s. Output: %s." % - (enable_cmd, o)) - error_context.context("Restart virtiofs service after modify" - " the registry.", test.log.info) + test.fail(f"Fail command: {enable_cmd}. Output: {o}.") + error_context.context( + "Restart virtiofs service after modify" " the registry.", test.log.info + ) virtio_fs_utils.stop_viofs_service(test, params, session) virtio_fs_utils.start_viofs_service(test, params, session) time.sleep(1) @@ -70,33 +64,33 @@ def check_file_uid_gid(volume_letter, shared_dir, expect_id): Check file UID and GID on host. """ error_context.context("Create a file in shared dir.", test.log.info) - file_name = "file_" + ''.join(random.sample(string.ascii_letters + - string.digits, 3)) + file_name = "file_" + "".join( + random.sample(string.ascii_letters + string.digits, 3) + ) guest_file = volume_letter + ":\\" + file_name session.cmd(create_file_cmd % guest_file, io_timeout) - error_context.context("Check the file's UID and GID on host.", - test.log.info) + error_context.context("Check the file's UID and GID on host.", test.log.info) host_file = os.path.join(shared_dir, file_name) - output = process.system_output("ls -l %s" % host_file, - shell=True).decode().strip() + output = ( + process.system_output(f"ls -l {host_file}", shell=True).decode().strip() + ) owner = output.split()[2] group = output.split()[3] - if process.system("id %s -u" % owner, shell=True, ignore_status=True): + if process.system(f"id {owner} -u", shell=True, ignore_status=True): uid = owner else: - uid = process.system_output("id %s -u" % owner, - shell=True).decode().strip() - if process.system("id %s -g" % group, shell=True, ignore_status=True): + uid = process.system_output(f"id {owner} -u", shell=True).decode().strip() + if process.system(f"id {group} -g", shell=True, ignore_status=True): gid = group else: - gid = process.system_output("id %s -g" % group, - shell=True).decode().strip() + gid = process.system_output(f"id {group} -g", shell=True).decode().strip() uid_gid_host = uid + ":" + gid if uid_gid_host != expect_id: - test.fail("Check file owner/group failed, " - "real value is %s, " - "expected value is %s" % - (uid_gid_host, expect_id)) + test.fail( + "Check file owner/group failed, " + f"real value is {uid_gid_host}, " + f"expected value is {expect_id}" + ) fs_target = params.get("fs_target") create_file_cmd = params.get("create_file_cmd") @@ -107,50 +101,58 @@ def check_file_uid_gid(volume_letter, shared_dir, expect_id): user_name = params["new_user"] add_user_cmd = params["add_user_cmd"] del_user_cmd = params["del_user_cmd"] - if process.system("id %s" % user_name, shell=True, - ignore_status=True) == 0: + if process.system(f"id {user_name}", shell=True, ignore_status=True) == 0: s, o = process.getstatusoutput(del_user_cmd) if s: if "is currently used by process" in o: - test.error("The common user is used by other process," - " pls check on your host.") + test.error( + "The common user is used by other process," + " pls check on your host." + ) else: - test.fail("Unknown error when deleting the " - "user: %s" % o) - if process.system("grep %s /etc/group" % user_name, shell=True, - ignore_status=True) == 0: - add_user_cmd = "useradd -g %s %s" % (user_name, user_name) + test.fail("Unknown error when deleting the " f"user: {o}") + if ( + process.system( + f"grep {user_name} /etc/group", shell=True, ignore_status=True + ) + == 0 + ): + add_user_cmd = f"useradd -g {user_name} {user_name}" process.run(add_user_cmd) # config socket - sock_path = os.path.join("/home/" + user_name, - '-'.join(('avocado-vt-vm1', 'viofs', - 'virtiofsd.sock'))) + sock_path = os.path.join( + "/home/" + user_name, + "-".join(("avocado-vt-vm1", "viofs", "virtiofsd.sock")), + ) # create the socket file before daemon running - with open(sock_path, "w") as fd: + with open(sock_path, "w"): pass - params['fs_source_user_sock_path'] = sock_path + params["fs_source_user_sock_path"] = sock_path # create the folder fs_source = params.get("fs_source_dir") shared_dir = os.path.join("/home/" + user_name, fs_source) if not os.path.exists(shared_dir): - process.system("runuser -l " + user_name + " -c 'mkdir -p " + - shared_dir + "'") + process.system( + "runuser -l " + user_name + " -c 'mkdir -p " + shared_dir + "'" + ) # start daemon with a common user cmd_run_virtiofsd = params["cmd_run_virtiofsd"] % sock_path - cmd_run_virtiofsd += " --shared-dir %s" % shared_dir - error_context.context('Running daemon command %s with %s user.' % - (cmd_run_virtiofsd, user_name), - test.log.info) - - aexpect.ShellSession("runuser -l " + user_name + - " -c '" + cmd_run_virtiofsd + "'", - auto_close=False, - output_func=utils_misc.log_line, - output_params=('virtiofs_fs-virtiofs.log',), - prompt=r"^\[.*\][\#\$]\s*$") + cmd_run_virtiofsd += f" --shared-dir {shared_dir}" + error_context.context( + f"Running daemon command {cmd_run_virtiofsd} with {user_name} user.", + test.log.info, + ) + + aexpect.ShellSession( + "runuser -l " + user_name + " -c '" + cmd_run_virtiofsd + "'", + auto_close=False, + output_func=utils_misc.log_line, + output_params=("virtiofs_fs-virtiofs.log",), + prompt=r"^\[.*\][\#\$]\s*$", + ) params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, params["main_vm"]) @@ -158,14 +160,15 @@ def check_file_uid_gid(volume_letter, shared_dir, expect_id): vm.verify_alive() session = vm.wait_for_login() - error_context.context("Change the shared dir's owner and group" - " to 'test' on host.", test.log.info) + error_context.context( + "Change the shared dir's owner and group" " to 'test' on host.", test.log.info + ) if params.get("privileged", "") == "yes": # get shared dir by qdevices. shared_dir = None for device in vm.devices: if isinstance(device, qdevices.QVirtioFSDev): - shared_dir = device.get_param('source') + shared_dir = device.get_param("source") change_source_owner = params["change_source_owner"] % shared_dir process.run(change_source_owner) @@ -175,30 +178,33 @@ def check_file_uid_gid(volume_letter, shared_dir, expect_id): # Check whether windows driver is running,and enable driver verifier driver_name = params["driver_name"] - session = utils_test.qemu.windrv_check_running_verifier(session, - vm, test, - driver_name) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name + ) virtio_fs_utils.run_viofs_service(test, params, session) # get shared volume letter - volume_letter = virtio_fs_utils.get_virtiofs_driver_letter(test, - fs_target, - session) + volume_letter = virtio_fs_utils.get_virtiofs_driver_letter(test, fs_target, session) # set matching table for test uid:gid and expected uid:gid. if params.get("privileged", "") == "yes": uid_gid_test_user = get_user_uid_gid("test") - dict_ids = {"null": uid_gid_test_user, - "0:0": "0:0", - "11111:11111": "11111:11111"} + dict_ids = { + "null": uid_gid_test_user, + "0:0": "0:0", + "11111:11111": "11111:11111", + } else: uid_gid_new_user = get_user_uid_gid(user_name) - dict_ids = {"null": uid_gid_new_user, - "0:0": uid_gid_new_user, - uid_gid_new_user: uid_gid_new_user} + dict_ids = { + "null": uid_gid_new_user, + "0:0": uid_gid_new_user, + uid_gid_new_user: uid_gid_new_user, + } # set UID/GID to virtiofs service and check the created file on host. for test_value, expect_value in dict_ids.items(): - error_context.context("Set host UID:GID=%s to viofs" - " service." % test_value, test.log.info) + error_context.context( + f"Set host UID:GID={test_value} to viofs" " service.", test.log.info + ) s, o = session.cmd_status_output(params["viofs_owner_query_cmd"]) if s == 0: test.log.info("Delete owner key and value from registry.") diff --git a/qemu/tests/virtio_fs_hotplug.py b/qemu/tests/virtio_fs_hotplug.py index 044ff50151..61b9becfee 100644 --- a/qemu/tests/virtio_fs_hotplug.py +++ b/qemu/tests/virtio_fs_hotplug.py @@ -3,14 +3,9 @@ import time from avocado.utils import process - -from virttest import data_dir -from virttest import error_context -from virttest import utils_disk -from virttest import utils_misc -from virttest import utils_test -from virttest.utils_windows import virtio_win +from virttest import data_dir, error_context, utils_disk, utils_misc, utils_test from virttest.qemu_devices import qdevices +from virttest.utils_windows import virtio_win from provider import win_driver_utils @@ -42,10 +37,9 @@ def get_viofs_exe(session): test.log.info("Get virtiofs exe full path.") media_type = params["virtio_win_media_type"] try: - get_drive_letter = getattr(virtio_win, "drive_letter_%s" % media_type) - get_product_dirname = getattr(virtio_win, - "product_dirname_%s" % media_type) - get_arch_dirname = getattr(virtio_win, "arch_dirname_%s" % media_type) + get_drive_letter = getattr(virtio_win, f"drive_letter_{media_type}") + get_product_dirname = getattr(virtio_win, f"product_dirname_{media_type}") + get_arch_dirname = getattr(virtio_win, f"arch_dirname_{media_type}") except AttributeError: test.error("Not supported virtio win media type '%s'", media_type) viowin_ltr = get_drive_letter(session) @@ -58,9 +52,9 @@ def get_viofs_exe(session): if not guest_arch: test.error("Could not get architecture dirname of the vm") - exe_middle_path = ("{name}\\{arch}" if media_type == "iso" - else "{arch}\\{name}").format(name=guest_name, - arch=guest_arch) + exe_middle_path = ( + "{name}\\{arch}" if media_type == "iso" else "{arch}\\{name}" + ).format(name=guest_name, arch=guest_arch) exe_file_name = "virtiofs.exe" exe_find_cmd = 'dir /b /s %s\\%s | findstr "\\%s\\\\"' exe_find_cmd %= (viowin_ltr, exe_file_name, exe_middle_path) @@ -72,25 +66,26 @@ def config_win_before_test(session): """ Only for windows guest, enable driver verifier and install winscp. """ - error_context.context("Do driver verify and winfsp installation" - " in windows guest.", test.log.info) + error_context.context( + "Do driver verify and winfsp installation" " in windows guest.", + test.log.info, + ) check_installed_cmd = params["check_installed_cmd"] % install_path # Check whether windows driver is running,and enable driver verifier - session = utils_test.qemu.windrv_check_running_verifier(session, - vm, test, - driver_name) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name + ) # install winfsp tool - error_context.context("Install winfsp for windows guest.", - test.log.info) + error_context.context("Install winfsp for windows guest.", test.log.info) installed = session.cmd_status(check_installed_cmd) == 0 if installed: test.log.info("Winfsp tool is already installed.") else: - install_cmd = utils_misc.set_winutils_letter(session, - params["install_cmd"]) + install_cmd = utils_misc.set_winutils_letter(session, params["install_cmd"]) session.cmd(install_cmd, 60) - if not utils_misc.wait_for(lambda: not session.cmd_status( - check_installed_cmd), 60): + if not utils_misc.wait_for( + lambda: not session.cmd_status(check_installed_cmd), 60 + ): test.error("Winfsp tool is not installed.") return session @@ -98,21 +93,26 @@ def mount_guest_fs(session): """ Mount virtiofs on linux guest. """ - error_context.context("Create a destination directory %s " - "inside guest." % fs_dest, test.log.info) + error_context.context( + f"Create a destination directory {fs_dest} " "inside guest.", + test.log.info, + ) utils_misc.make_dirs(fs_dest, session) - error_context.context("Mount virtiofs target %s to %s inside" - " guest." % (fs_target, fs_dest), - test.log.info) - if not utils_disk.mount(fs_target, fs_dest, 'virtiofs', session=session): - test.fail('Mount virtiofs target failed.') + error_context.context( + f"Mount virtiofs target {fs_target} to {fs_dest} inside" " guest.", + test.log.info, + ) + if not utils_disk.mount(fs_target, fs_dest, "virtiofs", session=session): + test.fail("Mount virtiofs target failed.") def start_vfs_service(session): """ Start virtiofs service in windows guest """ error_context.context("Start virtiofs service in windows guest.", test.log.info) - test.log.info("Check if virtiofs service is registered with %s." % viofs_sc_query_cmd) + test.log.info( + "Check if virtiofs service is registered with %s.", viofs_sc_query_cmd + ) status, output = session.cmd_status_output(viofs_sc_query_cmd) if "not exist as an installed service" in output: test.log.info("Register virtiofs service in windows guest.") @@ -122,29 +122,33 @@ def start_vfs_service(session): session.cmd(params.get("viofs_exe_copy_cmd") % exe_path) sc_create_s, sc_create_o = session.cmd_status_output(viofs_sc_create_cmd) if sc_create_s != 0: - test.fail("Failed to register virtiofs service, output is %s" % sc_create_o) + test.fail( + f"Failed to register virtiofs service, output is {sc_create_o}" + ) test.log.info("Check if virtiofs service is started.") status, output = session.cmd_status_output(viofs_sc_query_cmd) if "RUNNING" not in output: test.log.info("Start virtiofs service.") sc_start_s, sc_start_o = session.cmd_status_output(viofs_sc_start_cmd) if sc_start_s != 0: - test.fail("Failed to start virtiofs service, output is %s" % sc_start_o) + test.fail(f"Failed to start virtiofs service, output is {sc_start_o}") test.log.info("Virtiofs service is running.") # enable debug log. viofs_debug_enable_cmd = params.get("viofs_debug_enable_cmd") viofs_log_enable_cmd = params.get("viofs_log_enable_cmd") if viofs_debug_enable_cmd and viofs_log_enable_cmd: - error_context.context("Check if virtiofs debug log is enabled in guest.", test.log.info) + error_context.context( + "Check if virtiofs debug log is enabled in guest.", test.log.info + ) cmd = params.get("viofs_reg_query_cmd") ret = session.cmd_output(cmd) if "debugflags" not in ret.lower() or "debuglogfile" not in ret.lower(): error_context.context("Configure virtiofs debug log.", test.log.info) for reg_cmd in (viofs_debug_enable_cmd, viofs_log_enable_cmd): - error_context.context("Set %s " % reg_cmd, test.log.info) + error_context.context(f"Set {reg_cmd} ", test.log.info) s, o = session.cmd_status_output(reg_cmd) if s: - test.fail("Fail command: %s. Output: %s" % (reg_cmd, o)) + test.fail(f"Fail command: {reg_cmd}. Output: {o}") error_context.context("Reboot guest.", test.log.info) session = vm.reboot() # sleep a while after os reboot @@ -158,24 +162,28 @@ def get_win_dst_dir(session): get fs dest for windows vm. """ virtio_fs_disk_label = fs_target - error_context.context("Get Volume letter of virtio fs target, the disk" - "lable is %s." % virtio_fs_disk_label, - test.log.info) - vol_con = "VolumeName='%s'" % virtio_fs_disk_label + error_context.context( + "Get Volume letter of virtio fs target, the disk" + f"lable is {virtio_fs_disk_label}.", + test.log.info, + ) + vol_con = f"VolumeName='{virtio_fs_disk_label}'" volume_letter = utils_misc.wait_for( - lambda: utils_misc.get_win_disk_vol(session, condition=vol_con), 60) + lambda: utils_misc.get_win_disk_vol(session, condition=vol_con), 60 + ) if volume_letter is None: test.fail("Could not get virtio-fs mounted volume letter.") - return volume_letter, "%s:" % volume_letter + return volume_letter, f"{volume_letter}:" def run_io_test(session, volume_letter, fs_dest): """ Run io test on the shared dir. """ - error_context.context("Run io test on the %s." % fs_dest, test.log.info) + error_context.context(f"Run io test on the {fs_dest}.", test.log.info) guest_file = os.path.join(fs_dest, test_file) - error_context.context("Creating file under %s inside " - "guest." % fs_dest, test.log.info) + error_context.context( + f"Creating file under {fs_dest} inside " "guest.", test.log.info + ) session.cmd(cmd_dd % guest_file, io_timeout) if os_type == "linux": cmd_md5_vm = cmd_md5 % guest_file @@ -185,14 +193,17 @@ def run_io_test(session, volume_letter, fs_dest): md5_guest = session.cmd_output(cmd_md5_vm, io_timeout).strip().split()[0] test.log.info(md5_guest) - md5_host = process.run("md5sum %s" % host_data, - io_timeout).stdout_text.strip().split()[0] + md5_host = ( + process.run(f"md5sum {host_data}", io_timeout) + .stdout_text.strip() + .split()[0] + ) if md5_guest != md5_host: test.fail("The md5 value of host is not the same with guest.") error_context.context("Unmount the point after finish io test", test.log.info) if os_type == "linux": - utils_disk.umount(fs_target, fs_dest, 'virtiofs', session=session) + utils_disk.umount(fs_target, fs_dest, "virtiofs", session=session) utils_misc.safe_rmdir(fs_dest, session=session) def io_test_after_hotplug(session, fs_dest): @@ -213,8 +224,7 @@ def create_fs_devices(fs_name, fs_params): """ Create fs devices. """ - return vm.devices.fs_define_by_params( - fs_name, fs_params) + return vm.devices.fs_define_by_params(fs_name, fs_params) def get_fs_devices(fs_target): """ @@ -232,36 +242,42 @@ def plug_fs_devices(action, plug_devices): """ Plug/unplug fs devices. """ - plug_devices = plug_devices if action == 'hotplug' else plug_devices[::-1] + plug_devices = plug_devices if action == "hotplug" else plug_devices[::-1] for dev in plug_devices: - error_context.context("%s %s device (iteration %d)" % - (action.capitalize(), dev.get_qid(), - iteration), test.log.info) + error_context.context( + "%s %s device (iteration %d)" + % (action.capitalize(), dev.get_qid(), iteration), + test.log.info, + ) if isinstance(dev, qdevices.CharDevice): dev.set_param("server", "off") if isinstance(dev, qdevices.QDevice) and action == "hotplug": - if "q35" in params['machine_type'] or "arm64-pci" in params[ - 'machine_type']: - parent_bus = 'pcie_extra_root_port_%s' % index - elif 's390' in params['machine_type']: - parent_bus = 'virtual-css' + if ( + "q35" in params["machine_type"] + or "arm64-pci" in params["machine_type"] + ): + parent_bus = f"pcie_extra_root_port_{index}" + elif "s390" in params["machine_type"]: + parent_bus = "virtual-css" else: - parent_bus = 'pci.0' - parent_bus_obj = vm.devices.get_buses({'aobject': parent_bus})[0] - ret = getattr(vm.devices, 'simple_%s' % action)(dev, vm.monitor, bus=parent_bus_obj) + parent_bus = "pci.0" + parent_bus_obj = vm.devices.get_buses({"aobject": parent_bus})[0] + ret = getattr(vm.devices, f"simple_{action}")( + dev, vm.monitor, bus=parent_bus_obj + ) if not ret[1]: - test.fail("Failed to hotplug '%s'" % dev) + test.fail(f"Failed to hotplug '{dev}'") continue - ret = getattr(vm.devices, 'simple_%s' % action)(dev, vm.monitor) + ret = getattr(vm.devices, f"simple_{action}")(dev, vm.monitor) if not ret[1]: - test.fail("Failed to hotplug '%s'" % dev) + test.fail(f"Failed to hotplug '{dev}'") - test_file = params.get('test_file') - cmd_dd = params.get('cmd_dd') - cmd_md5 = params.get('cmd_md5') - io_timeout = params.get_numeric('io_timeout') + test_file = params.get("test_file") + cmd_dd = params.get("cmd_dd") + cmd_md5 = params.get("cmd_md5") + io_timeout = params.get_numeric("io_timeout") install_path = params.get("install_path") - need_plug = params.get("need_plug", 'no') == "yes" + need_plug = params.get("need_plug", "no") == "yes" # windows config viofs_sc_create_cmd = params.get("viofs_sc_create_cmd") @@ -276,7 +292,7 @@ def plug_fs_devices(action, plug_devices): unplug_devs = [] if need_plug: - params["filesystems"] = params.get('extra_filesystems') + params["filesystems"] = params.get("extra_filesystems") try: for iteration in range(int(params.get("repeat_times", 3))): for index, fs in enumerate(params.objects("filesystems")): @@ -284,8 +300,7 @@ def plug_fs_devices(action, plug_devices): fs_target = fs_params.get("fs_target") fs_dest = fs_params.get("fs_dest") fs_source = fs_params.get("fs_source_dir") - base_dir = fs_params.get("fs_source_base_dir", - data_dir.get_data_dir()) + base_dir = fs_params.get("fs_source_base_dir", data_dir.get_data_dir()) if not os.path.isabs(fs_source): fs_source = os.path.join(base_dir, fs_source) host_data = os.path.join(fs_source, test_file) @@ -297,19 +312,18 @@ def plug_fs_devices(action, plug_devices): os.makedirs(fs_source) fs_devs = create_fs_devices(fs, fs_params) - plug_fs_devices('hotplug', fs_devs) + plug_fs_devices("hotplug", fs_devs) session = io_test_after_hotplug(session, fs_dest) - unplug_devs.extend(fs_devs if need_plug else - get_fs_devices(fs_target)) - plug_fs_devices('unplug', unplug_devs) + unplug_devs.extend(fs_devs if need_plug else get_fs_devices(fs_target)) + plug_fs_devices("unplug", unplug_devs) del unplug_devs[:] # for windows guest, disable/uninstall driver to get memory leak based on # driver verifier is enabled if os_type == "windows" and need_plug: - plug_fs_devices('hotplug', fs_devs) + plug_fs_devices("hotplug", fs_devs) win_driver_utils.memory_leak_check(vm, test, params) finally: if os_type == "linux": - utils_disk.umount(fs_target, fs_dest, 'virtiofs', session=session) + utils_disk.umount(fs_target, fs_dest, "virtiofs", session=session) utils_misc.safe_rmdir(fs_dest, session=session) diff --git a/qemu/tests/virtio_fs_map_uid_gid.py b/qemu/tests/virtio_fs_map_uid_gid.py index c694efd0a8..942c1e3c09 100644 --- a/qemu/tests/virtio_fs_map_uid_gid.py +++ b/qemu/tests/virtio_fs_map_uid_gid.py @@ -1,14 +1,10 @@ import os import random import string -import aexpect +import aexpect from avocado.utils import process - -from virttest import env_process -from virttest import error_context -from virttest import utils_misc -from virttest import utils_disk +from virttest import env_process, error_context, utils_disk, utils_misc @error_context.context_aware @@ -42,9 +38,11 @@ def get_sub_uid_gid(id_type): """ Get the host common user's sub uid/gid and count in host. """ - output = process.system_output("cat /etc/%s |grep %s" % - (id_type, user_name), - shell=True).decode().strip() + output = ( + process.system_output(f"cat /etc/{id_type} |grep {user_name}", shell=True) + .decode() + .strip() + ) id_begin = output.split(":")[1] id_count = output.split(":")[2] return id_begin, id_count @@ -55,22 +53,26 @@ def create_file_in_guest(user_guest): root user or a common user. only for linux guest. """ - file_name = "file_" + ''.join(random.sample(string.ascii_letters + - string.digits, 3)) + file_name = "file_" + "".join( + random.sample(string.ascii_letters + string.digits, 3) + ) guest_file = os.path.join(fs_dest, file_name) - error_context.context("Create a file in shared dir " - "with %s user." % user_guest, test.log.info) + error_context.context( + "Create a file in shared dir " f"with {user_guest} user.", test.log.info + ) if user_guest != "root": - if session.cmd_status("id %s" % user_guest) == 0: + if session.cmd_status(f"id {user_guest}") == 0: session.cmd(del_user_cmd % user_guest) session.cmd(add_user_cmd % user_guest) guest_user_session = vm.wait_for_login() - guest_user_session.cmd("su %s" % user_guest) + guest_user_session.cmd(f"su {user_guest}") s = guest_user_session.cmd_status(create_file_cmd % guest_file) if map_type == "one_to_one" and s == 0: - test.fail("Only mapping root user to host, so the common user" - " should have no permission to" - " create a file in the shared dir.") + test.fail( + "Only mapping root user to host, so the common user" + " should have no permission to" + " create a file in the shared dir." + ) guest_user_session.close() else: session.cmd(create_file_cmd % guest_file) @@ -86,36 +88,37 @@ def get_expect_user_id(user_guest): uid/gid should be the same with the common user in guest, while it's uid_begin/gid_begin + ${id_common_user} in host. """ - error_context.context("Get the user %s's uid:gid from guest." % - user_guest, test.log.info) - user_uid = session.cmd_output("id %s -u" % user_guest).strip() - user_gid = session.cmd_output("id %s -g" % user_guest).strip() + error_context.context( + f"Get the user {user_guest}'s uid:gid from guest.", test.log.info + ) + user_uid = session.cmd_output(f"id {user_guest} -u").strip() + user_gid = session.cmd_output(f"id {user_guest} -g").strip() - expect_id = {"guest": user_uid + ":" + user_gid, - "host": "%s:%s" % (str(int(uid_begin) + int(user_uid)), - str(int(gid_begin) + int(user_gid)))} - test.log.info("The expected file's id is %s for %s" % (expect_id, - user_guest)) + expect_id = { + "guest": user_uid + ":" + user_gid, + "host": f"{str(int(uid_begin) + int(user_uid))}:{str(int(gid_begin) + int(user_gid))}", + } + test.log.info("The expected file's id is %s for %s", expect_id, user_guest) return expect_id def get_file_owner_id_guest(file_name): """ Get the created file's uid and gid in guest. """ - error_context.context("Get the file %s's uid:gid in guest." - % file_name, test.log.info) + error_context.context( + f"Get the file {file_name}'s uid:gid in guest.", test.log.info + ) cmd_get_file = "ls -l %s" guest_file = os.path.join(fs_dest, file_name) - output_guest = session.cmd_output(cmd_get_file % - guest_file).strip() + output_guest = session.cmd_output(cmd_get_file % guest_file).strip() owner_guest = output_guest.split()[2] group_guest = output_guest.split()[3] - s, o = session.cmd_status_output("id %s -u" % owner_guest) + s, o = session.cmd_status_output(f"id {owner_guest} -u") if s: uid = owner_guest else: uid = o.strip() - s, o = session.cmd_status_output("id %s -g" % group_guest) + s, o = session.cmd_status_output(f"id {group_guest} -g") if s: gid = group_guest else: @@ -124,28 +127,34 @@ def get_file_owner_id_guest(file_name): def get_file_owner_id_host(file_name): """ - Get the created file's uid and gid in host. + Get the created file's uid and gid in host. """ - error_context.context("Get the file %s's uid:gid in host." - % file_name, test.log.info) + error_context.context( + f"Get the file {file_name}'s uid:gid in host.", test.log.info + ) cmd_get_file = "ls -l %s" host_file = os.path.join(shared_dir, file_name) - output_host = process.system_output(cmd_get_file % host_file, - shell=True).decode().strip() + output_host = ( + process.system_output(cmd_get_file % host_file, shell=True).decode().strip() + ) owner_host = output_host.split()[2] group_host = output_host.split()[3] - if process.system("id %s -u" % owner_host, shell=True, - ignore_status=True): + if process.system(f"id {owner_host} -u", shell=True, ignore_status=True): uid = owner_host else: - uid = process.system_output("id %s -u" % owner_host, - shell=True).decode().strip() - if process.system("id %s -g" % group_host, shell=True, - ignore_status=True): + uid = ( + process.system_output(f"id {owner_host} -u", shell=True) + .decode() + .strip() + ) + if process.system(f"id {group_host} -g", shell=True, ignore_status=True): gid = group_host else: - gid = process.system_output("id %s -g" % group_host, - shell=True).decode().strip() + gid = ( + process.system_output(f"id {group_host} -g", shell=True) + .decode() + .strip() + ) return uid + ":" + gid fs_target = params.get("fs_target") @@ -161,28 +170,30 @@ def get_file_owner_id_host(file_name): p_vfsd = None try: # start virtiofsd with user config in host - error_context.context("Create a common user and a shared dir in host.", - test.log.info) + error_context.context( + "Create a common user and a shared dir in host.", test.log.info + ) user_name = params["new_user_host"] - if process.system("id %s" % user_name, shell=True, - ignore_status=True) == 0: + if process.system(f"id {user_name}", shell=True, ignore_status=True) == 0: process.run(params["del_user_cmd"] % user_name) process.run(params["add_user_cmd"] % user_name) # config socket - sock_path = os.path.join("/home/" + user_name, - '-'.join(('avocado-vt-vm1', 'viofs', - 'virtiofsd.sock'))) + sock_path = os.path.join( + "/home/" + user_name, + "-".join(("avocado-vt-vm1", "viofs", "virtiofsd.sock")), + ) # create the socket file before daemon running open(sock_path, "w") - params['fs_source_user_sock_path'] = sock_path + params["fs_source_user_sock_path"] = sock_path # create the share folder fs_source = params.get("fs_source_dir") shared_dir = os.path.join("/home/" + user_name, fs_source) if not os.path.exists(shared_dir): - process.system("runuser -l " + user_name + " -c 'mkdir -p " + - shared_dir + "'") + process.system( + "runuser -l " + user_name + " -c 'mkdir -p " + shared_dir + "'" + ) # give 'x' permission to common user home dir and give share dir # write permission for all the users. @@ -200,20 +211,26 @@ def get_file_owner_id_host(file_name): if map_type == "one_to_one": fs_binary_extra_options = fsd_map_option % (uid_begin, gid_begin) else: - fs_binary_extra_options = fsd_map_option % (uid_begin, uid_count, - gid_begin, gid_count) + fs_binary_extra_options = fsd_map_option % ( + uid_begin, + uid_count, + gid_begin, + gid_count, + ) cmd_run_virtiofsd = params["cmd_run_virtiofsd"] % sock_path - cmd_run_virtiofsd += " --shared-dir %s" % shared_dir + cmd_run_virtiofsd += f" --shared-dir {shared_dir}" cmd_run_virtiofsd += fs_binary_extra_options - error_context.context("Running daemon command %s with %s user." % - (cmd_run_virtiofsd, user_name), - test.log.info) - p_vfsd = aexpect.ShellSession("runuser -l " + user_name + - " -c '" + cmd_run_virtiofsd + "'", - auto_close=False, - output_func=utils_misc.log_line, - output_params=(vfsd_log_name,), - prompt=r"^\[.*\][\#\$]\s*$") + error_context.context( + f"Running daemon command {cmd_run_virtiofsd} with {user_name} user.", + test.log.info, + ) + p_vfsd = aexpect.ShellSession( + "runuser -l " + user_name + " -c '" + cmd_run_virtiofsd + "'", + auto_close=False, + output_func=utils_misc.log_line, + output_params=(vfsd_log_name,), + prompt=r"^\[.*\][\#\$]\s*$", + ) params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, params["main_vm"]) @@ -223,19 +240,19 @@ def get_file_owner_id_host(file_name): if not utils_misc.make_dirs(fs_dest, session): test.fail("Creating directory was failed!") - error_context.context("Mount virtiofs target %s to %s inside" - " guest." % (fs_target, fs_dest), - test.log.info) - if not utils_disk.mount(fs_target, fs_dest, 'virtiofs', session=session): - test.fail('Mount virtiofs target failed.') + error_context.context( + f"Mount virtiofs target {fs_target} to {fs_dest} inside" " guest.", + test.log.info, + ) + if not utils_disk.mount(fs_target, fs_dest, "virtiofs", session=session): + test.fail("Mount virtiofs target failed.") for guest_user in ["root", common_guest_user]: # create a new file in guest. file_name = create_file_in_guest(guest_user) # if map type is 1 to 1, then create file will fail with # a common user, so there is no need to check the uid/gid - if not (map_type == "one_to_one" - and guest_user == common_guest_user): + if not (map_type == "one_to_one" and guest_user == common_guest_user): uid_gid_guest = get_file_owner_id_guest(file_name) uid_gid_host = get_file_owner_id_host(file_name) expect_id = get_expect_user_id(guest_user) @@ -246,11 +263,10 @@ def get_file_owner_id_host(file_name): if uid_gid_host != expect_id["host"]: test.fail(msg % ("host", expect_id["host"], uid_gid_host)) finally: - error_context.context("Clean the env, delete the user on guest.", - test.log.info) - utils_disk.umount(fs_target, fs_dest, 'virtiofs', session=session) + error_context.context("Clean the env, delete the user on guest.", test.log.info) + utils_disk.umount(fs_target, fs_dest, "virtiofs", session=session) utils_misc.safe_rmdir(fs_dest, session=session) - if session.cmd_status("id %s" % common_guest_user) == 0: + if session.cmd_status(f"id {common_guest_user}") == 0: session.cmd(del_user_cmd % common_guest_user) if vm and vm.is_alive(): vm.destroy() diff --git a/qemu/tests/virtio_fs_memory_leak_check.py b/qemu/tests/virtio_fs_memory_leak_check.py index 51b41f6e9d..034a1a80c3 100644 --- a/qemu/tests/virtio_fs_memory_leak_check.py +++ b/qemu/tests/virtio_fs_memory_leak_check.py @@ -1,7 +1,6 @@ import os -from virttest import utils_test, utils_misc -from virttest import error_context +from virttest import error_context, utils_misc, utils_test from provider import virtio_fs_utils from provider.storage_benchmark import generate_instance @@ -29,50 +28,44 @@ def _record_poolmon(): """Record Mmdi tag memory info in pool monitor to C: volume""" status = session.cmd_status(poolmon_mmdi_cmd) if status: - test.fail("Fail to get Mmdi pool tag memory " - "info in pool monitor.") + test.fail("Fail to get Mmdi pool tag memory " "info in pool monitor.") driver = params["driver_name"] driver_verifier = params.get("driver_verifier", driver) - driver_running = params.get('driver_running', driver_verifier) + driver_running = params.get("driver_running", driver_verifier) timeout = params.get_numeric("login_timeout", 360) fs_target = params.get("fs_target") - test_file = params.get('virtio_fs_test_file') - iozone_options = params.get('iozone_options') + test_file = params.get("virtio_fs_test_file") + iozone_options = params.get("iozone_options") io_timeout = params.get_numeric("io_timeout", 1800) poolmon_mmdi_cmd = params["poolmon_mmdi_cmd"] get_mem_poolmon_cmd = params["get_mem_poolmon_cmd"] record_file = params["record_file"] - vm_name = params['main_vm'] + vm_name = params["main_vm"] vm = env.get_vm(vm_name) vm.verify_alive() - error_context.context("Boot guest with %s device" % driver, test.log.info) + error_context.context(f"Boot guest with {driver} device", test.log.info) session = vm.wait_for_login(timeout=timeout) error_context.context("Run the viofs service", test.log.info) utils_test.qemu.windrv_verify_running(session, test, driver_running) - session = utils_test.qemu.setup_win_driver_verifier(session, - driver_verifier, - vm) + session = utils_test.qemu.setup_win_driver_verifier(session, driver_verifier, vm) virtio_fs_utils.run_viofs_service(test, params, session) - driver_letter = virtio_fs_utils.get_virtiofs_driver_letter(test, - fs_target, - session) - fs_dest = "%s:" % driver_letter + driver_letter = virtio_fs_utils.get_virtiofs_driver_letter(test, fs_target, session) + fs_dest = f"{driver_letter}:" guest_file = os.path.join(fs_dest, test_file).replace("/", "\\") test.log.info("Record memory info before iotest.") - poolmon_mmdi_cmd = utils_misc.set_winutils_letter(session, - poolmon_mmdi_cmd) - if session.cmd_status("dir %s" % record_file) == 0: - test.log.info("Removing file %s." % record_file) - session.cmd_status("del /f /q %s" % record_file) + poolmon_mmdi_cmd = utils_misc.set_winutils_letter(session, poolmon_mmdi_cmd) + if session.cmd_status(f"dir {record_file}") == 0: + test.log.info("Removing file %s.", record_file) + session.cmd_status(f"del /f /q {record_file}") _record_poolmon() error_context.context("Start iozone test.", test.log.info) - io_test = generate_instance(params, vm, 'iozone') + io_test = generate_instance(params, vm, "iozone") try: io_test.run(iozone_options % guest_file, io_timeout) finally: @@ -81,14 +74,16 @@ def _record_poolmon(): test.log.info("Record memory info after iotest") _record_poolmon() - error_context.context("Check the diff of allocation memory and" - " free memory for Mmdi pool tag" - " in memory pool monitor before" - " start io test.", test.log.info) + error_context.context( + "Check the diff of allocation memory and" + " free memory for Mmdi pool tag" + " in memory pool monitor before" + " start io test.", + test.log.info, + ) result = session.cmd_output(get_mem_poolmon_cmd).strip() - test.log.info("The pool monitor result is\n%s" % result) + test.log.info("The pool monitor result is\n%s", result) diff_befor = result.split("\n")[0].split()[4] diff_aft = result.split("\n")[1].split()[4] if int(diff_aft) - int(diff_befor) > 100: - test.fail("There are memory leak on virtiofs," - " the result is %s" % result) + test.fail("There are memory leak on virtiofs," f" the result is {result}") diff --git a/qemu/tests/virtio_fs_multi_users_access.py b/qemu/tests/virtio_fs_multi_users_access.py index 965b556a28..74ed5bcce3 100644 --- a/qemu/tests/virtio_fs_multi_users_access.py +++ b/qemu/tests/virtio_fs_multi_users_access.py @@ -1,4 +1,4 @@ -from virttest import error_context, utils_test, utils_disk, utils_misc +from virttest import error_context, utils_disk, utils_misc, utils_test from provider import virtio_fs_utils @@ -32,12 +32,12 @@ def get_eth_name(session): """ error_context.context("Get the ip config from guest.", test.log.info) ipconfig_output = session.cmd_output("ipconfig /all") - match = "Connection-specific DNS Suffix . : %s" % domain_dns + match = f"Connection-specific DNS Suffix . : {domain_dns}" if match not in ipconfig_output: test.error("There is NOT related domain adapter found!") - eth_name = ipconfig_output.split(match)[0] \ - .split("Ethernet adapter ")[-1] \ - .split(":")[0] + eth_name = ( + ipconfig_output.split(match)[0].split("Ethernet adapter ")[-1].split(":")[0] + ) return eth_name def switch_dynamic_ip_to_static_ip(session, eth_name): @@ -47,12 +47,10 @@ def switch_dynamic_ip_to_static_ip(session, eth_name): :param session: the session of guest """ - error_context.context("Get the config of %s." % eth_name, - test.log.info) - net = session.cmd_output("netsh interface ip show config name=\"%s\"" % - eth_name) + error_context.context(f"Get the config of {eth_name}.", test.log.info) + net = session.cmd_output(f'netsh interface ip show config name="{eth_name}"') - ipaddr, gateway, dns_server, subnet_mask = "", "", "", "" + ipaddr, gateway, _dns_server, subnet_mask = "", "", "", "" for line in net.splitlines(): if "IP Address: " in line: ipaddr = line.split(":")[-1].lstrip() @@ -60,27 +58,33 @@ def switch_dynamic_ip_to_static_ip(session, eth_name): gateway = line.split(":")[-1].lstrip() elif "Subnet Prefix:" in line: subnet_mask = line.split("mask")[-1].lstrip()[:-1] - error_context.context("The config will be set to ipaddress:%s, " - "gateway:%s, subnet mask:%s." % ( - ipaddr, gateway, subnet_mask), - test.log.info) - - ip_cmd = "netsh interface ip set address name=\"%s\" source=static " \ - "addr=%s mask=%s gateway=%s" % (eth_name, ipaddr, - subnet_mask, gateway) + error_context.context( + f"The config will be set to ipaddress:{ipaddr}, " + f"gateway:{gateway}, subnet mask:{subnet_mask}.", + test.log.info, + ) + + ip_cmd = ( + f'netsh interface ip set address name="{eth_name}" source=static ' + f"addr={ipaddr} mask={subnet_mask} gateway={gateway}" + ) session.cmd(ip_cmd) - dns_cmd = "netsh interface ip set dnsservers \"%s\" static " \ - "192.168.0.1 primary" % eth_name + dns_cmd = ( + f'netsh interface ip set dnsservers "{eth_name}" static ' + "192.168.0.1 primary" + ) session.cmd(dns_cmd) def switch_ip_to_dynamic(session, eth_name): if eth_name: - restore_ip_cmd = "netsh interface ip set address name=\"%s\" " \ - "source=dhcp" % eth_name + restore_ip_cmd = ( + f'netsh interface ip set address name="{eth_name}" ' "source=dhcp" + ) session.cmd(restore_ip_cmd) - restore_dns_cmd = "netsh interface ip set dnsservers name=\"%s\" " \ - "source=dhcp" % eth_name + restore_dns_cmd = ( + f'netsh interface ip set dnsservers name="{eth_name}" ' "source=dhcp" + ) session.cmd(restore_dns_cmd) add_user_cmd = params.get("add_user_cmd") @@ -99,114 +103,108 @@ def switch_ip_to_dynamic(session, eth_name): try: if not domain_dns: - error_context.context("Create the user(s) on guest...", - test.log.info) + error_context.context("Create the user(s) on guest...", test.log.info) for _username, _pwd in zip(username, pwd): if os_type == "windows": - status = session.cmd_status(add_user_cmd % (_username, - _pwd)) + status = session.cmd_status(add_user_cmd % (_username, _pwd)) else: - status = session.cmd_status(add_user_cmd % (_pwd, - _username)) + status = session.cmd_status(add_user_cmd % (_pwd, _username)) if status != 0: test.fail("Failed to create user!") if os_type == "windows": - session = utils_test.qemu. \ - windrv_check_running_verifier(session, vm, test, driver_name) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name + ) virtio_fs_utils.run_viofs_service(test, params, session) vm.reboot(session) else: - error_context.context("Create a destination directory %s " - "inside guest." % fs_dest, test.log.info) + error_context.context( + f"Create a destination directory {fs_dest} " "inside guest.", + test.log.info, + ) if not utils_misc.make_dirs(fs_dest, session=session): test.fail("Creating directory was failed!") - error_context.context("Mount virtiofs target %s to %s inside" - " guest." % (fs_target, fs_dest), - test.log.info) - if not utils_disk.mount(fs_target, fs_dest, 'virtiofs', - session=session): - test.fail('Mount virtiofs target failed.') - error_context.context("Set 777 permission for all users.", - test.log.info) - session.cmd("chmod -R 777 %s" % fs_dest) + error_context.context( + f"Mount virtiofs target {fs_target} to {fs_dest} inside" " guest.", + test.log.info, + ) + if not utils_disk.mount(fs_target, fs_dest, "virtiofs", session=session): + test.fail("Mount virtiofs target failed.") + error_context.context("Set 777 permission for all users.", test.log.info) + session.cmd(f"chmod -R 777 {fs_dest}") for _username, _pwd in zip(username, pwd): try: if not domain_dns: - error_context.context("Login the user: %s" % _username, - test.log.info) - session = vm.wait_for_login(username=_username, - password=_pwd) + error_context.context(f"Login the user: {_username}", test.log.info) + session = vm.wait_for_login(username=_username, password=_pwd) else: session = vm.wait_for_login() eth_name = get_eth_name(session) switch_dynamic_ip_to_static_ip(session, eth_name) join_domain_cmd = params.get("join_domain") - join_domain_cmd = join_domain_cmd.replace("%s", - _username, - 1) - join_domain_cmd = join_domain_cmd.replace("%s", - _pwd, - 1) + join_domain_cmd = join_domain_cmd.replace("%s", _username, 1) + join_domain_cmd = join_domain_cmd.replace("%s", _pwd, 1) error_context.context("Join domain...", test.log.info) output = session.cmd_output(join_domain_cmd) if "does not exist" in output: test.fail("Failed to join the domain!") elif "is not recognized as an internal" in output: - error_context.context("The netdom is NOT supported, " - "trying to use powershell...", - test.log.info) + error_context.context( + "The netdom is NOT supported, " + "trying to use powershell...", + test.log.info, + ) ps_cred = params.get("ps_cred") ps_join_domain = params.get("ps_join_domain") ps_cred = ps_cred % (_username, _pwd) ps_join_domain = ps_cred + ps_join_domain - session.cmd("powershell \"" + ps_join_domain + "\"") + session.cmd('powershell "' + ps_join_domain + '"') session = vm.reboot(session) if os_type == "windows": - virtio_fs_utils.basic_io_test_via_psexec(test, params, vm, - _username, _pwd) + virtio_fs_utils.basic_io_test_via_psexec( + test, params, vm, _username, _pwd + ) else: virtio_fs_utils.basic_io_test(test, params, session) finally: if domain_dns: error_context.context("Remove domain...", test.log.info) remove_domain_cmd = params.get("remove_domain") - remove_domain_cmd = remove_domain_cmd.replace("%s", - _username, - 1) - remove_domain_cmd = remove_domain_cmd.replace("%s", - _pwd, - 1) + remove_domain_cmd = remove_domain_cmd.replace("%s", _username, 1) + remove_domain_cmd = remove_domain_cmd.replace("%s", _pwd, 1) session = vm.wait_for_login() output = session.cmd_output(remove_domain_cmd) if "is not recognized as an internal" in output: - error_context.context("The netdom is NOT supported, " - "trying to use powershell...", - test.log.info) + error_context.context( + "The netdom is NOT supported, " + "trying to use powershell...", + test.log.info, + ) ps_cred = params.get("ps_cred") ps_remove_domain = params.get("ps_remove_domain") ps_cred = ps_cred % (_username, _pwd) ps_remove_domain = ps_cred + ps_remove_domain - session.cmd("powershell \"" + ps_remove_domain + "\"") + session.cmd('powershell "' + ps_remove_domain + '"') vm.reboot(session) finally: session = vm.wait_for_login() if os_type == "linux": error_context.context("Umount and remove dir...") - utils_disk.umount(fs_target, fs_dest, 'virtiofs', session=session) + utils_disk.umount(fs_target, fs_dest, "virtiofs", session=session) utils_misc.safe_rmdir(fs_dest, session=session) if not domain_dns: - error_context.context("Delete the user(s) on guest...", - test.log.info) + error_context.context("Delete the user(s) on guest...", test.log.info) for _username in username: output = session.cmd_output(del_user_cmd % _username) if "is currently used by process" in output: - error_context.context("Kill process before delete user...", - test.log.info) + error_context.context( + "Kill process before delete user...", test.log.info + ) pid = output.split(" ")[-1] - session.cmd("kill -9 %s" % pid) + session.cmd(f"kill -9 {pid}") else: switch_ip_to_dynamic(session, eth_name) session.close() diff --git a/qemu/tests/virtio_fs_multi_vms.py b/qemu/tests/virtio_fs_multi_vms.py index 830c541f4a..24ac823f11 100644 --- a/qemu/tests/virtio_fs_multi_vms.py +++ b/qemu/tests/virtio_fs_multi_vms.py @@ -1,9 +1,6 @@ import os -from virttest import error_context -from virttest import utils_disk -from virttest import utils_misc -from virttest import utils_test +from virttest import error_context, utils_disk, utils_misc, utils_test from virttest.utils_windows import virtio_win from provider import win_driver_utils @@ -26,6 +23,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def get_viofs_exe(session): """ Get viofs.exe from virtio win iso,such as E:\viofs\2k19\amd64 @@ -33,12 +31,9 @@ def get_viofs_exe(session): test.log.info("Get virtiofs exe full path.") media_type = params["virtio_win_media_type"] try: - get_drive_letter = getattr(virtio_win, "drive_letter_%s" % - media_type) - get_product_dirname = getattr(virtio_win, - "product_dirname_%s" % media_type) - get_arch_dirname = getattr(virtio_win, "arch_dirname_%s" % - media_type) + get_drive_letter = getattr(virtio_win, f"drive_letter_{media_type}") + get_product_dirname = getattr(virtio_win, f"product_dirname_{media_type}") + get_arch_dirname = getattr(virtio_win, f"arch_dirname_{media_type}") except AttributeError: test.error("Not supported virtio win media type '%s'", media_type) viowin_ltr = get_drive_letter(session) @@ -51,9 +46,9 @@ def get_viofs_exe(session): if not guest_arch: test.error("Could not get architecture dirname of the vm") - exe_middle_path = ("{name}\\{arch}" if media_type == "iso" - else "{arch}\\{name}").format(name=guest_name, - arch=guest_arch) + exe_middle_path = ( + "{name}\\{arch}" if media_type == "iso" else "{arch}\\{name}" + ).format(name=guest_name, arch=guest_arch) exe_file_name = "virtiofs.exe" exe_find_cmd = 'dir /b /s %s\\%s | findstr "\\%s\\\\"' exe_find_cmd %= (viowin_ltr, exe_file_name, exe_middle_path) @@ -61,11 +56,11 @@ def get_viofs_exe(session): test.log.info("Found exe file '%s'", exe_path) return exe_path - cmd_dd = params.get('cmd_dd') - cmd_md5 = params.get('cmd_md5') - io_timeout = params.get_numeric('io_timeout') - shared_fs_source_dir = params.get('shared_fs_source_dir') - os_type = params.get('os_type') + cmd_dd = params.get("cmd_dd") + cmd_md5 = params.get("cmd_md5") + io_timeout = params.get_numeric("io_timeout") + shared_fs_source_dir = params.get("shared_fs_source_dir") + os_type = params.get("os_type") # cfg for windows vm cmd_timeout = params.get_numeric("cmd_timeout", 120) @@ -80,32 +75,34 @@ def get_viofs_exe(session): sessions.append(vm.wait_for_login()) mapping = {} - for vm, vm_obj, session in zip(params.objects('vms'), vms, sessions): + for vm, vm_obj, session in zip(params.objects("vms"), vms, sessions): vm_params = params.object_params(vm) - mapping[vm] = {'session': session, 'vm_obj': vm_obj, 'filesystems': []} + mapping[vm] = {"session": session, "vm_obj": vm_obj, "filesystems": []} # check driver verifier in windows vm # install winfsp tool and start virtiofs exe in windows vm if os_type == "windows": # Check whether windows driver is running,and enable driver verifier - session = utils_test.qemu.windrv_check_running_verifier(session, - vm_obj, test, - driver_name) - error_context.context("%s: Install winfsp for windows guest." % vm, - test.log.info) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm_obj, test, driver_name + ) + error_context.context( + f"{vm}: Install winfsp for windows guest.", test.log.info + ) installed = session.cmd_status(check_installed_cmd) == 0 if installed: test.log.info("%s: Winfsp tool is already installed.", vm) else: - install_cmd = utils_misc.set_winutils_letter(session, - wfsp_install_cmd) + install_cmd = utils_misc.set_winutils_letter(session, wfsp_install_cmd) session.cmd(install_cmd, cmd_timeout) - if not utils_misc.wait_for(lambda: not session.cmd_status( - check_installed_cmd), 60): - test.error("%s: Winfsp tool is not installed." % vm) - - error_context.context("%s: Start virtiofs service in guest." % vm, - test.log.info) + if not utils_misc.wait_for( + lambda: not session.cmd_status(check_installed_cmd), 60 + ): + test.error(f"{vm}: Winfsp tool is not installed.") + + error_context.context( + f"{vm}: Start virtiofs service in guest.", test.log.info + ) viofs_sc_create_cmd = params["viofs_sc_create_cmd"] viofs_sc_start_cmd = params["viofs_sc_start_cmd"] viofs_sc_query_cmd = params["viofs_sc_query_cmd"] @@ -118,9 +115,13 @@ def get_viofs_exe(session): # copy virtiofs.exe to c: in case the virtio-win cdrom volume name # is changed in other cases of a loop. session.cmd(params.get("viofs_exe_copy_cmd") % exe_path) - sc_create_s, sc_create_o = session.cmd_status_output(viofs_sc_create_cmd) + sc_create_s, sc_create_o = session.cmd_status_output( + viofs_sc_create_cmd + ) if sc_create_s != 0: - test.fail("Failed to register virtiofs service, output is %s" % sc_create_o) + test.fail( + f"Failed to register virtiofs service, output is {sc_create_o}" + ) test.log.info("Check if virtiofs service is started.") status, output = session.cmd_status_output(viofs_sc_query_cmd) @@ -128,114 +129,127 @@ def get_viofs_exe(session): test.log.info("Start virtiofs service.") sc_start_s, sc_start_o = session.cmd_status_output(viofs_sc_start_cmd) if sc_start_s != 0: - test.fail("Failed to start virtiofs service, output is %s" % sc_start_o) + test.fail( + f"Failed to start virtiofs service, output is {sc_start_o}" + ) else: test.log.info("Virtiofs service is running.") # enable debug log. viofs_debug_enable_cmd = params.get("viofs_debug_enable_cmd") viofs_log_enable_cmd = params.get("viofs_log_enable_cmd") if viofs_debug_enable_cmd and viofs_log_enable_cmd: - error_context.context("Check if virtiofs debug log is enabled in guest.", test.log.info) + error_context.context( + "Check if virtiofs debug log is enabled in guest.", test.log.info + ) cmd = params.get("viofs_reg_query_cmd") ret = session.cmd_output(cmd) if "debugflags" not in ret.lower() or "debuglogfile" not in ret.lower(): - error_context.context("Configure virtiofs debug log.", test.log.info) + error_context.context( + "Configure virtiofs debug log.", test.log.info + ) for reg_cmd in (viofs_debug_enable_cmd, viofs_log_enable_cmd): - error_context.context("Set %s " % reg_cmd, test.log.info) + error_context.context(f"Set {reg_cmd} ", test.log.info) s, o = session.cmd_status_output(reg_cmd) if s: - test.fail("Fail command: %s. Output: %s" % (reg_cmd, o)) + test.fail(f"Fail command: {reg_cmd}. Output: {o}") error_context.context("Reboot guest.", test.log.info) session = vm_obj.reboot() # session is updated in reboot steps, so it should be updated. - mapping[vm]['session'] = session + mapping[vm]["session"] = session else: test.log.info("Virtiofs debug log is enabled.") # get fs dest for vm - for fs in vm_params.objects('filesystems'): + for fs in vm_params.objects("filesystems"): fs_params = vm_params.object_params(fs) fs_target = fs_params.get("fs_target") fs_dest = fs_params.get("fs_dest") if os_type == "linux": error_context.context( - "%s: Create a destination directory %s inside guest." % - (vm, fs_dest), test.log.info) + f"{vm}: Create a destination directory {fs_dest} inside guest.", + test.log.info, + ) utils_misc.make_dirs(fs_dest, session) error_context.context( - "%s: Mount the virtiofs target %s to %s inside guest." % - (vm, fs_target, fs_dest), test.log.info) - if not utils_disk.mount(fs_target, fs_dest, 'virtiofs', session=session): - test.fail('Mount virtiofs target failed.') + f"{vm}: Mount the virtiofs target {fs_target} to {fs_dest} inside guest.", + test.log.info, + ) + if not utils_disk.mount( + fs_target, fs_dest, "virtiofs", session=session + ): + test.fail("Mount virtiofs target failed.") else: virtio_fs_disk_label = fs_target - error_context.context("%s: Get Volume letter of virtio fs" - " target, the disk lable is %s." % - (vm, virtio_fs_disk_label), test.log.info) - vol_con = "VolumeName='%s'" % virtio_fs_disk_label - vol_func = utils_misc.get_win_disk_vol(session, - condition=vol_con) - volume_letter = utils_misc.wait_for(lambda: vol_func, - cmd_timeout) + error_context.context( + f"{vm}: Get Volume letter of virtio fs" + f" target, the disk lable is {virtio_fs_disk_label}.", + test.log.info, + ) + vol_con = f"VolumeName='{virtio_fs_disk_label}'" + vol_func = utils_misc.get_win_disk_vol(session, condition=vol_con) + volume_letter = utils_misc.wait_for(lambda: vol_func, cmd_timeout) if volume_letter is None: - test.fail("Could not get virtio-fs mounted volume" - " letter for %s." % fs_target) - fs_dest = "%s:" % volume_letter - - guest_file = os.path.join(fs_dest, 'fs_test') - test.log.info("%s: The guest file in shared dir is %s", - vm, guest_file) - mapping[vm]['filesystems'].append({'fs_target': fs_target, - 'fs_dest': fs_dest, - 'guest_file': guest_file}) + test.fail( + "Could not get virtio-fs mounted volume" + f" letter for {fs_target}." + ) + fs_dest = f"{volume_letter}:" + + guest_file = os.path.join(fs_dest, "fs_test") + test.log.info("%s: The guest file in shared dir is %s", vm, guest_file) + mapping[vm]["filesystems"].append( + {"fs_target": fs_target, "fs_dest": fs_dest, "guest_file": guest_file} + ) if cmd_dd: - test.log.info("%s: Creating file under %s inside guest.", - vm, fs_dest) + test.log.info("%s: Creating file under %s inside guest.", vm, fs_dest) session.cmd(cmd_dd % guest_file, io_timeout) if shared_fs_source_dir: continue if os_type == "linux": - error_context.context("%s: Umount the viriofs target %s." % - (vm, fs_target), test.log.info) - utils_disk.umount(fs_target, fs_dest, 'virtiofs', - session=session) + error_context.context( + f"{vm}: Umount the viriofs target {fs_target}.", test.log.info + ) + utils_disk.umount(fs_target, fs_dest, "virtiofs", session=session) if shared_fs_source_dir: error_context.context("Compare the md5 among VMs.", test.log.info) md5_set = set() for vm, info in mapping.items(): - session = info['session'] - for fs in info['filesystems']: - shared_data = fs['guest_file'] - error_context.context("%s: Get the md5 of %s." % - (vm, shared_data), test.log.info) + session = info["session"] + for fs in info["filesystems"]: + shared_data = fs["guest_file"] + error_context.context( + f"{vm}: Get the md5 of {shared_data}.", test.log.info + ) if os_type == "linux": cmd_md5_vm = cmd_md5 % shared_data else: guest_file_win = shared_data.replace("/", "\\") cmd_md5_vm = cmd_md5 % (volume_letter, guest_file_win) - md5_guest = session.cmd(cmd_md5_vm, - io_timeout).strip().split()[0] + md5_guest = session.cmd(cmd_md5_vm, io_timeout).strip().split()[0] test.log.info(md5_guest) md5_set.add(md5_guest) if os_type == "linux": - error_context.context("%s: Umount the viriofs target %s." % - (vm, fs['fs_target']), test.log.info) - utils_disk.umount(fs['fs_target'], fs['fs_dest'], - 'virtiofs', session=session) + error_context.context( + "{}: Umount the viriofs target {}.".format(vm, fs["fs_target"]), + test.log.info, + ) + utils_disk.umount( + fs["fs_target"], fs["fs_dest"], "virtiofs", session=session + ) if len(md5_set) != 1: - test.fail('The md5 values are different among VMs.') + test.fail("The md5 values are different among VMs.") # for windows guest, disable/uninstall driver to get memory leak based on # driver verifier is enabled if os_type == "windows": for vm, info in mapping.items(): - vm_obj = info['vm_obj'] + vm_obj = info["vm_obj"] vm_params = params.object_params(vm) win_driver_utils.memory_leak_check(vm_obj, test, vm_params) diff --git a/qemu/tests/virtio_fs_readonly.py b/qemu/tests/virtio_fs_readonly.py index 60d621fb94..4fdd08e502 100644 --- a/qemu/tests/virtio_fs_readonly.py +++ b/qemu/tests/virtio_fs_readonly.py @@ -1,6 +1,4 @@ -from virttest import error_context -from virttest import utils_misc -from virttest import utils_disk +from virttest import error_context, utils_disk, utils_misc @error_context.context_aware @@ -18,29 +16,33 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ - fs_target = params.get('fs_target') - fs_dest = params.get('fs_dest') + fs_target = params.get("fs_target") + fs_dest = params.get("fs_dest") vm = env.get_vm(params.get("main_vm")) vm.verify_alive() session = vm.wait_for_login() - error_context.context("Create a destination directory inside guest.", - test.log.info) + error_context.context("Create a destination directory inside guest.", test.log.info) utils_misc.make_dirs(fs_dest, session) - error_context.context("Mount the virtiofs target with read-only to " - "the destination directory inside guest.", test.log.info) - if not utils_disk.mount(fs_target, fs_dest, 'virtiofs', 'ro', session=session): - test.fail('Mount virtiofs target failed.') + error_context.context( + "Mount the virtiofs target with read-only to " + "the destination directory inside guest.", + test.log.info, + ) + if not utils_disk.mount(fs_target, fs_dest, "virtiofs", "ro", session=session): + test.fail("Mount virtiofs target failed.") try: - error_context.context("Create file under the destination " - "directory inside guest.", test.log.info) - output = session.cmd_output(params.get('cmd_create_file')) + error_context.context( + "Create file under the destination " "directory inside guest.", + test.log.info, + ) + output = session.cmd_output(params.get("cmd_create_file")) test.log.info(output) - if params.get('check_str') not in output: - test.fail('Failed to mount the virtiofs target with read-only.') + if params.get("check_str") not in output: + test.fail("Failed to mount the virtiofs target with read-only.") finally: - utils_disk.umount(fs_target, fs_dest, 'virtiofs', session=session) + utils_disk.umount(fs_target, fs_dest, "virtiofs", session=session) utils_misc.safe_rmdir(fs_dest, session=session) diff --git a/qemu/tests/virtio_fs_sandbox.py b/qemu/tests/virtio_fs_sandbox.py index 8401c4deb7..36bb3de942 100644 --- a/qemu/tests/virtio_fs_sandbox.py +++ b/qemu/tests/virtio_fs_sandbox.py @@ -1,16 +1,11 @@ import os import shutil -import aexpect +import aexpect from avocado.utils import process +from virttest import env_process, error_context, utils_disk, utils_misc, utils_test -from virttest import env_process -from virttest import error_context -from virttest import utils_disk -from virttest import utils_misc -from virttest import utils_test -from provider import virtio_fs_utils -from provider import win_driver_utils +from provider import virtio_fs_utils, win_driver_utils @error_context.context_aware @@ -34,8 +29,10 @@ def run(test, params, env): process.system(params["del_user_cmd"], ignore_status=True, shell=True) process.system(params["add_user_cmd"], shell=True) - error_context.context("Switch to the common user and create the" - " shared dir in home dir.", test.log.info) + error_context.context( + "Switch to the common user and create the" " shared dir in home dir.", + test.log.info, + ) # set fs shared dir shared_dir = "/home/" + common_user + "/virtio_fs_test" params["fs_source_dir"] = shared_dir @@ -43,27 +40,29 @@ def run(test, params, env): # set fs daemon path if os.path.exists(shared_dir): shutil.rmtree(shared_dir, ignore_errors=True) - process.system("su - %s -c 'mkdir -p %s'" % - (common_user, shared_dir), shell=True) + process.system(f"su - {common_user} -c 'mkdir -p {shared_dir}'", shell=True) # set fs socket - sock_path = os.path.join("/home/" + common_user, - '-'.join(('avocado-vt-vm1', 'viofs', - 'virtiofsd.sock'))) - params['fs_source_user_sock_path'] = sock_path + sock_path = os.path.join( + "/home/" + common_user, "-".join(("avocado-vt-vm1", "viofs", "virtiofsd.sock")) + ) + params["fs_source_user_sock_path"] = sock_path # run daemon cmd_run_virtiofsd = params["cmd_run_virtiofsd"] % (sock_path, shared_dir) - cmd_run_virtiofsd += params.get('fs_binary_extra_options') - error_context.context("Running daemon command %s with user." % - cmd_run_virtiofsd, - test.log.info) + cmd_run_virtiofsd += params.get("fs_binary_extra_options") + error_context.context( + f"Running daemon command {cmd_run_virtiofsd} with user.", test.log.info + ) - virtiofsd_cmd = "runuser -l %s -c '%s'" % (common_user, cmd_run_virtiofsd) - session = aexpect.ShellSession(virtiofsd_cmd, auto_close=False, - output_func=utils_misc.log_line, - output_params=('virtiofs_fs-virtiofs.log',), - prompt=r"^\[.*\][\#\$]\s*$") + virtiofsd_cmd = f"runuser -l {common_user} -c '{cmd_run_virtiofsd}'" + session = aexpect.ShellSession( + virtiofsd_cmd, + auto_close=False, + output_func=utils_misc.log_line, + output_params=("virtiofs_fs-virtiofs.log",), + prompt=r"^\[.*\][\#\$]\s*$", + ) # start vm params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, params["main_vm"]) @@ -77,51 +76,60 @@ def run(test, params, env): cmd_timeout = params.get_numeric("cmd_timeout", 120) driver_name = params["driver_name"] # Check whether windows driver is running,and enable driver verifier - session = utils_test.qemu.windrv_check_running_verifier(session, - vm, test, - driver_name) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name + ) # create virtiofs service viofs_svc_name = params["viofs_svc_name"] - virtio_fs_utils.create_viofs_service(test, params, session, - service=viofs_svc_name) + virtio_fs_utils.create_viofs_service( + test, params, session, service=viofs_svc_name + ) fs_target = params["fs_target"] fs_dest = params["fs_dest"] if not is_windows: # mount virtiofs - error_context.context("Create a destination directory %s " - "inside guest." % fs_dest, test.log.info) + error_context.context( + f"Create a destination directory {fs_dest} " "inside guest.", + test.log.info, + ) utils_misc.make_dirs(fs_dest, session) - error_context.context("Mount virtiofs target %s to %s inside" - " guest." % (fs_target, fs_dest), - test.log.info) - if not utils_disk.mount(fs_target, fs_dest, 'virtiofs', session=session): - test.fail('Mount virtiofs target failed.') + error_context.context( + f"Mount virtiofs target {fs_target} to {fs_dest} inside" " guest.", + test.log.info, + ) + if not utils_disk.mount(fs_target, fs_dest, "virtiofs", session=session): + test.fail("Mount virtiofs target failed.") # can't change file's owner and group - test_file = "%s/test" % fs_dest - session.cmd("echo aaa > %s" % test_file) - s, o = session.cmd_status_output("chgrp root %s" % test_file) + test_file = f"{fs_dest}/test" + session.cmd(f"echo aaa > {test_file}") + s, o = session.cmd_status_output(f"chgrp root {test_file}") if not s: - test.fail("Should not change file's owner/group because" - " it's unprivileged, the output is %s" % o) + test.fail( + "Should not change file's owner/group because" + f" it's unprivileged, the output is {o}" + ) else: # start virtiofs - error_context.context("Start virtiofs service in guest.", - test.log.info) + error_context.context("Start virtiofs service in guest.", test.log.info) virtio_fs_utils.start_viofs_service(test, params, session) # get fs dest for vm virtio_fs_disk_label = fs_target - error_context.context("Get Volume letter of virtio fs target, the disk" - "lable is %s." % virtio_fs_disk_label, - test.log.info) - vol_con = "VolumeName='%s'" % virtio_fs_disk_label + error_context.context( + "Get Volume letter of virtio fs target, the disk" + f"lable is {virtio_fs_disk_label}.", + test.log.info, + ) + vol_con = f"VolumeName='{virtio_fs_disk_label}'" volume_letter = utils_misc.wait_for( - lambda: utils_misc.get_win_disk_vol(session, condition=vol_con), cmd_timeout) # pylint: disable=E0606 + lambda: utils_misc.get_win_disk_vol(session, condition=vol_con), + cmd_timeout, + ) # pylint: disable=E0606 if volume_letter is None: test.fail("Could not get virtio-fs mounted volume letter.") - fs_dest = "%s:" % volume_letter + fs_dest = f"{volume_letter}:" # basic io test virtio_fs_utils.basic_io_test(test, params, session) @@ -132,7 +140,7 @@ def run(test, params, env): # driver verifier is enabled win_driver_utils.memory_leak_check(vm, test, params) else: - utils_disk.umount(fs_target, fs_dest, 'virtiofs', session=session) + utils_disk.umount(fs_target, fs_dest, "virtiofs", session=session) utils_misc.safe_rmdir(fs_dest, session=session) if vm and vm.is_alive(): vm.destroy() diff --git a/qemu/tests/virtio_fs_set_capability.py b/qemu/tests/virtio_fs_set_capability.py index f84333ef1a..0300118da9 100644 --- a/qemu/tests/virtio_fs_set_capability.py +++ b/qemu/tests/virtio_fs_set_capability.py @@ -2,20 +2,18 @@ import shutil import aexpect - from avocado.utils import process - -from virttest import data_dir -from virttest import env_process -from virttest import error_context -from virttest import utils_disk -from virttest import utils_misc -from virttest import utils_test - +from virttest import ( + data_dir, + env_process, + error_context, + utils_disk, + utils_misc, + utils_test, +) from virttest.utils_windows import virtio_win -from provider import virtio_fs_utils -from provider import win_driver_utils +from provider import virtio_fs_utils, win_driver_utils @error_context.context_aware @@ -34,16 +32,16 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def get_viofs_exe(session): """ Get viofs.exe from virtio win iso,such as E:\viofs\2k19\amd64 """ media_type = params["virtio_win_media_type"] try: - get_drive_letter = getattr(virtio_win, "drive_letter_%s" % media_type) - get_product_dirname = getattr(virtio_win, - "product_dirname_%s" % media_type) - get_arch_dirname = getattr(virtio_win, "arch_dirname_%s" % media_type) + get_drive_letter = getattr(virtio_win, f"drive_letter_{media_type}") + get_product_dirname = getattr(virtio_win, f"product_dirname_{media_type}") + get_arch_dirname = getattr(virtio_win, f"arch_dirname_{media_type}") except AttributeError: test.error("Not supported virtio win media type '%s'", media_type) viowin_ltr = get_drive_letter(session) @@ -56,9 +54,9 @@ def get_viofs_exe(session): if not guest_arch: test.error("Could not get architecture dirname of the vm") - exe_middle_path = ("{name}\\{arch}" if media_type == "iso" - else "{arch}\\{name}").format(name=guest_name, - arch=guest_arch) + exe_middle_path = ( + "{name}\\{arch}" if media_type == "iso" else "{arch}\\{name}" + ).format(name=guest_name, arch=guest_arch) exe_file_name = "virtiofs.exe" exe_find_cmd = 'dir /b /s %s\\%s | findstr "\\%s\\\\"' exe_find_cmd %= (viowin_ltr, exe_file_name, exe_middle_path) @@ -67,29 +65,29 @@ def get_viofs_exe(session): return exe_path # data io config - cmd_dd = params.get('cmd_dd') - cmd_md5 = params.get('cmd_md5') - io_timeout = params.get_numeric('io_timeout') + cmd_dd = params.get("cmd_dd") + cmd_md5 = params.get("cmd_md5") + io_timeout = params.get_numeric("io_timeout") # remove capability config - cmd_create_fs_source = params.get('cmd_create_fs_source') - cmd_run_virtiofsd = params.get('cmd_run_virtiofsd') - capability = params.get('capability') - cmd_capsh_print = params.get('cmd_capsh_print') - cmd_capsh_drop = params.get('cmd_capsh_drop') + cmd_create_fs_source = params.get("cmd_create_fs_source") + cmd_run_virtiofsd = params.get("cmd_run_virtiofsd") + capability = params.get("capability") + cmd_capsh_print = params.get("cmd_capsh_print") + cmd_capsh_drop = params.get("cmd_capsh_drop") # set trusted config - cmd_yum_attr = params.get('cmd_yum_attr') - cmd_set_trusted = params.get('cmd_set_trusted') - cmd_get_trusted = params.get('cmd_get_trusted') - cmd_create_file = params.get('cmd_create_file') - cmd_set_capability = params.get('cmd_set_capability') - cmd_get_capability = params.get('cmd_get_capability') - cmd_echo_file = params.get('cmd_echo_file') + cmd_yum_attr = params.get("cmd_yum_attr") + cmd_set_trusted = params.get("cmd_set_trusted") + cmd_get_trusted = params.get("cmd_get_trusted") + cmd_create_file = params.get("cmd_create_file") + cmd_set_capability = params.get("cmd_set_capability") + cmd_get_capability = params.get("cmd_get_capability") + cmd_echo_file = params.get("cmd_echo_file") # set fs daemon path - fs_source = params.get('fs_source_dir') - base_dir = params.get('fs_source_base_dir', data_dir.get_data_dir()) + fs_source = params.get("fs_source_dir") + base_dir = params.get("fs_source_base_dir", data_dir.get_data_dir()) if not os.path.isabs(fs_source): fs_source = os.path.join(base_dir, fs_source) @@ -98,17 +96,21 @@ def get_viofs_exe(session): test.log.info("Create filesystem source %s.", fs_source) os.makedirs(fs_source) - sock_path = os.path.join(data_dir.get_tmp_dir(), - '-'.join(('avocado-vt-vm1', 'viofs', 'virtiofsd.sock'))) - params['fs_source_user_sock_path'] = sock_path + sock_path = os.path.join( + data_dir.get_tmp_dir(), "-".join(("avocado-vt-vm1", "viofs", "virtiofsd.sock")) + ) + params["fs_source_user_sock_path"] = sock_path # set capability - cmd_capsh_drop = (cmd_capsh_drop % capability) + cmd_capsh_drop = cmd_capsh_drop % capability error_context.context("Remove capability on host.", test.log.info) - session = aexpect.ShellSession(cmd_capsh_drop, auto_close=False, - output_func=utils_misc.log_line, - output_params=('virtiofs_fs-virtiofs.log',), - prompt=r"^\[.*\][\#\$]\s*$") + session = aexpect.ShellSession( + cmd_capsh_drop, + auto_close=False, + output_func=utils_misc.log_line, + output_params=("virtiofs_fs-virtiofs.log",), + prompt=r"^\[.*\][\#\$]\s*$", + ) output = session.cmd_output(cmd_capsh_print) test.log.info("Check current capability is %s.", output) if capability in output: @@ -117,9 +119,9 @@ def get_viofs_exe(session): # run daemon session.sendline(cmd_create_fs_source) cmd_run_virtiofsd = cmd_run_virtiofsd % sock_path - cmd_run_virtiofsd += ' -o source=%s' % fs_source - cmd_run_virtiofsd += params.get('fs_binary_extra_options') - test.log.info('Running daemon command %s.', cmd_run_virtiofsd) + cmd_run_virtiofsd += f" -o source={fs_source}" + cmd_run_virtiofsd += params.get("fs_binary_extra_options") + test.log.info("Running daemon command %s.", cmd_run_virtiofsd) session.sendline(cmd_run_virtiofsd) params["start_vm"] = "yes" @@ -136,37 +138,39 @@ def get_viofs_exe(session): check_installed_cmd = params["check_installed_cmd"] % install_path # Check whether windows driver is running,and enable driver verifier - session = utils_test.qemu.windrv_check_running_verifier(session, - vm, test, - driver_name) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name + ) # Install winfsp tool - error_context.context("Install winfsp for windows guest.", - test.log.info) + error_context.context("Install winfsp for windows guest.", test.log.info) is_installed = session.cmd_status(check_installed_cmd) == 0 if is_installed: test.log.info("Winfsp tool is already installed.") else: - install_cmd = utils_misc.set_winutils_letter(session, - params["install_cmd"]) + install_cmd = utils_misc.set_winutils_letter(session, params["install_cmd"]) session.cmd(install_cmd, cmd_timeout) - if not utils_misc.wait_for(lambda: not session.cmd_status( - check_installed_cmd), 60): + if not utils_misc.wait_for( + lambda: not session.cmd_status(check_installed_cmd), 60 + ): test.error("Winfsp tool is not installed.") - fs_params = params.object_params('fs') + fs_params = params.object_params("fs") fs_target = fs_params.get("fs_target") fs_dest = fs_params.get("fs_dest") - host_data = os.path.join(fs_source, 'fs_test') + host_data = os.path.join(fs_source, "fs_test") try: if not is_windows: - error_context.context("Create a destination directory %s " - "inside guest." % fs_dest, test.log.info) + error_context.context( + f"Create a destination directory {fs_dest} " "inside guest.", + test.log.info, + ) utils_misc.make_dirs(fs_dest, session) - error_context.context("Mount virtiofs target %s to %s inside" - " guest." % (fs_target, fs_dest), - test.log.info) - if not utils_disk.mount(fs_target, fs_dest, 'virtiofs', session=session): - test.fail('Mount virtiofs target failed.') + error_context.context( + f"Mount virtiofs target {fs_target} to {fs_dest} inside" " guest.", + test.log.info, + ) + if not utils_disk.mount(fs_target, fs_dest, "virtiofs", session=session): + test.fail("Mount virtiofs target failed.") else: error_context.context("Start virtiofs service in guest.", test.log.info) viofs_sc_create_cmd = params["viofs_sc_create_cmd"] @@ -181,9 +185,13 @@ def get_viofs_exe(session): # copy virtiofs.exe to c: in case the virtio-win cdrom volume name # is changed in other cases of a loop. session.cmd(params.get("viofs_exe_copy_cmd") % exe_path) - sc_create_s, sc_create_o = session.cmd_status_output(viofs_sc_create_cmd) + sc_create_s, sc_create_o = session.cmd_status_output( + viofs_sc_create_cmd + ) if sc_create_s != 0: - test.fail("Failed to register virtiofs service, output is %s" % sc_create_o) + test.fail( + f"Failed to register virtiofs service, output is {sc_create_o}" + ) test.log.info("Check if virtiofs service is started.") status, output = session.cmd_status_output(viofs_sc_query_cmd) @@ -191,23 +199,29 @@ def get_viofs_exe(session): test.log.info("Start virtiofs service.") sc_start_s, sc_start_o = session.cmd_status_output(viofs_sc_start_cmd) if sc_start_s != 0: - test.fail("Failed to start virtiofs service, output is %s" % sc_start_o) + test.fail( + f"Failed to start virtiofs service, output is {sc_start_o}" + ) else: test.log.info("Virtiofs service is running.") # enable debug log. viofs_debug_enable_cmd = params.get("viofs_debug_enable_cmd") viofs_log_enable_cmd = params.get("viofs_log_enable_cmd") if viofs_debug_enable_cmd and viofs_log_enable_cmd: - error_context.context("Check if virtiofs debug log is enabled in guest.", test.log.info) + error_context.context( + "Check if virtiofs debug log is enabled in guest.", test.log.info + ) cmd = params.get("viofs_reg_query_cmd") ret = session.cmd_output(cmd) if "debugflags" not in ret.lower() or "debuglogfile" not in ret.lower(): - error_context.context("Configure virtiofs debug log.", test.log.info) + error_context.context( + "Configure virtiofs debug log.", test.log.info + ) for reg_cmd in (viofs_debug_enable_cmd, viofs_log_enable_cmd): - error_context.context("Set %s " % reg_cmd, test.log.info) + error_context.context(f"Set {reg_cmd} ", test.log.info) s, o = session.cmd_status_output(reg_cmd) if s: - test.fail("Fail command: %s. Output: %s" % (reg_cmd, o)) + test.fail(f"Fail command: {reg_cmd}. Output: {o}") error_context.context("Reboot guest.", test.log.info) session = vm.reboot() else: @@ -215,25 +229,30 @@ def get_viofs_exe(session): # get fs dest for vm virtio_fs_disk_label = fs_target - error_context.context("Get Volume letter of virtio fs target, the disk" - "lable is %s." % virtio_fs_disk_label, - test.log.info) - vol_con = "VolumeName='%s'" % virtio_fs_disk_label + error_context.context( + "Get Volume letter of virtio fs target, the disk" + f"lable is {virtio_fs_disk_label}.", + test.log.info, + ) + vol_con = f"VolumeName='{virtio_fs_disk_label}'" vol_func = utils_misc.get_win_disk_vol(session, condition=vol_con) volume_letter = utils_misc.wait_for(lambda: vol_func, 120) if volume_letter is None: test.fail("Could not get virtio-fs mounted volume letter.") - fs_dest = "%s:" % volume_letter + fs_dest = f"{volume_letter}:" - guest_file = os.path.join(fs_dest, 'fs_test') + guest_file = os.path.join(fs_dest, "fs_test") test.log.info("The guest file in shared dir is %s.", guest_file) try: # No extended attributes (file steams) in virtio-fs for windows if not is_windows: if cmd_set_trusted: - error_context.context("Trusted attribute test without " - "%s for linux guest" % capability, test.log.info) + error_context.context( + "Trusted attribute test without " + f"{capability} for linux guest", + test.log.info, + ) host_attributes = params["host_attributes"] guest_trusted = params["guest_trusted"] file_capability = params["file_capability"] @@ -241,37 +260,52 @@ def get_viofs_exe(session): session.cmd(cmd_yum_attr) session.cmd(cmd_set_trusted) output = session.cmd_output(cmd_get_trusted) - test.log.info("Failed to check the trusted attribute from " - "guest, the output is %s.", output) + test.log.info( + "Failed to check the trusted attribute from " + "guest, the output is %s.", + output, + ) if guest_trusted not in output: - test.fail("It's failed to check the trusted info from the guest.") + test.fail( + "It's failed to check the trusted info from the guest." + ) process.run(cmd_yum_attr) - output = str(process.run('getfattr %s' % fs_source).stdout.strip()) + output = str(process.run(f"getfattr {fs_source}").stdout.strip()) test.log.info("The host file trusted is %s.", output) if host_attributes not in output: - test.fail("Failed to check the trusted attribute from " - "host, the output is %s." % output) + test.fail( + "Failed to check the trusted attribute from " + f"host, the output is {output}." + ) session.cmd(cmd_create_file) - error_context.context("Privileged capabilities test without " - "%s for linux guest" % capability, test.log.info) + error_context.context( + "Privileged capabilities test without " + f"{capability} for linux guest", + test.log.info, + ) session.cmd(cmd_set_capability) output = session.cmd_output(cmd_get_capability) test.log.info("The guest file capability is %s.", output) if file_capability not in output: - test.fail("Failed to check the trusted attribute from " - "guest, the output is %s." % output) + test.fail( + "Failed to check the trusted attribute from " + f"guest, the output is {output}." + ) test.log.info("Modify file content and check the file capability.") session.cmd(cmd_echo_file) output = session.cmd_output(cmd_get_capability) test.log.info("The guest change file capability is %s.", output) if file_capability in output: - test.fail("Still can get capability after file content is changed.") + test.fail( + "Still can get capability after file content is changed." + ) if cmd_dd: - error_context.context("Creating file under %s inside guest." % - fs_dest, test.log.info) + error_context.context( + f"Creating file under {fs_dest} inside guest.", test.log.info + ) session.cmd(cmd_dd % guest_file, io_timeout) if not is_windows: @@ -279,20 +313,25 @@ def get_viofs_exe(session): else: guest_file_win = guest_file.replace("/", "\\") cmd_md5_vm = cmd_md5 % (volume_letter, guest_file_win) - md5_guest = session.cmd_output(cmd_md5_vm, io_timeout).strip().split()[0] + md5_guest = ( + session.cmd_output(cmd_md5_vm, io_timeout).strip().split()[0] + ) test.log.info(md5_guest) - md5_host = process.run("md5sum %s" % host_data, - io_timeout).stdout_text.strip().split()[0] + md5_host = ( + process.run(f"md5sum {host_data}", io_timeout) + .stdout_text.strip() + .split()[0] + ) if md5_guest != md5_host: - test.fail('The md5 value of host is not same to guest.') + test.fail("The md5 value of host is not same to guest.") # for windows guest, disable/uninstall driver to get memory leak based on # driver verifier is enabled if is_windows: win_driver_utils.memory_leak_check(vm, test, params) finally: if not is_windows: - utils_disk.umount(fs_target, fs_dest, 'virtiofs', session=session) + utils_disk.umount(fs_target, fs_dest, "virtiofs", session=session) utils_misc.safe_rmdir(fs_dest, session=session) finally: if is_windows: diff --git a/qemu/tests/virtio_fs_share_data.py b/qemu/tests/virtio_fs_share_data.py index 74e03d3745..bfb9b1d8bc 100644 --- a/qemu/tests/virtio_fs_share_data.py +++ b/qemu/tests/virtio_fs_share_data.py @@ -3,24 +3,23 @@ import time import aexpect - from avocado.utils import process - -from virttest import data_dir -from virttest import env_process -from virttest import error_context -from virttest import nfs -from virttest import utils_disk -from virttest import utils_misc -from virttest import utils_test +from virttest import ( + data_dir, + env_process, + error_context, + nfs, + utils_disk, + utils_misc, + utils_selinux, + utils_test, +) +from virttest.qemu_devices import qdevices from virttest.remote import scp_to_remote from virttest.utils_windows import virtio_win -from virttest.qemu_devices import qdevices -from virttest import utils_selinux +from provider import virtio_fs_utils, win_driver_utils from provider.storage_benchmark import generate_instance -from provider import win_driver_utils -from provider import virtio_fs_utils @error_context.context_aware @@ -38,6 +37,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def get_viofs_exe(session): """ Get viofs.exe from virtio win iso,such as E:\viofs\2k19\amd64 @@ -45,10 +45,9 @@ def get_viofs_exe(session): test.log.info("Get virtiofs exe full path.") media_type = params["virtio_win_media_type"] try: - get_drive_letter = getattr(virtio_win, "drive_letter_%s" % media_type) - get_product_dirname = getattr(virtio_win, - "product_dirname_%s" % media_type) - get_arch_dirname = getattr(virtio_win, "arch_dirname_%s" % media_type) + get_drive_letter = getattr(virtio_win, f"drive_letter_{media_type}") + get_product_dirname = getattr(virtio_win, f"product_dirname_{media_type}") + get_arch_dirname = getattr(virtio_win, f"arch_dirname_{media_type}") except AttributeError: test.error("Not supported virtio win media type '%s'", media_type) viowin_ltr = get_drive_letter(session) @@ -61,9 +60,9 @@ def get_viofs_exe(session): if not guest_arch: test.error("Could not get architecture dirname of the vm") - exe_middle_path = ("{name}\\{arch}" if media_type == "iso" - else "{arch}\\{name}").format(name=guest_name, - arch=guest_arch) + exe_middle_path = ( + "{name}\\{arch}" if media_type == "iso" else "{arch}\\{name}" + ).format(name=guest_name, arch=guest_arch) exe_file_name = "virtiofs.exe" exe_find_cmd = 'dir /b /s %s\\%s | findstr "\\%s\\\\"' exe_find_cmd %= (viowin_ltr, exe_file_name, exe_middle_path) @@ -89,11 +88,14 @@ def check_socket_group(): sock_path = device.get_param("sock_path") break sock_path_info = process.system_output(cmd_get_sock % sock_path) - group_name = sock_path_info.decode(encoding="utf-8", - errors="strict").strip().split()[3] + group_name = ( + sock_path_info.decode(encoding="utf-8", errors="strict").strip().split()[3] + ) if group_name != socket_group: - test.fail("Socket-group name is not correct.\nIt should be %s,but" - " the output is %s" % (socket_group, group_name)) + test.fail( + f"Socket-group name is not correct.\nIt should be {socket_group},but" + f" the output is {group_name}" + ) def is_autoit_finished(session, process_name): """ @@ -118,7 +120,7 @@ def viofs_svc_create(cmd): session.cmd(params.get("viofs_exe_copy_cmd") % exe_path) sc_create_s, sc_create_o = session.cmd_status_output(cmd) if sc_create_s != 0: - test.fail("Failed to register virtiofs service, output is %s" % sc_create_o) + test.fail(f"Failed to register virtiofs service, output is {sc_create_o}") def viofs_svc_stop_start(action, cmd, expect_status): """ @@ -128,78 +130,78 @@ def viofs_svc_stop_start(action, cmd, expect_status): :param cmd: cmd to start or stop virtiofs service :param expect_status: RUNNING or STOPPED. """ - error_context.context("Try to %s VirtioFsSvc service." % action, - test.log.info) + error_context.context(f"Try to {action} VirtioFsSvc service.", test.log.info) session.cmd(cmd) output = session.cmd_output(viofs_sc_query_cmd) # pylint: disable=E0606 if expect_status not in output: - test.fail("Could not %s VirtioFsSvc service, " - "detail: '%s'" % (action, output)) + test.fail(f"Could not {action} VirtioFsSvc service, " f"detail: '{output}'") def start_multifs_instance(): """ Only for windows and only for multiple shared directory. """ - error_context.context("MultiFS-%s: Start virtiofs instance with" - " tag %s to %s." - % (fs, fs_target, fs_volume_label), - test.log.info) + error_context.context( + f"MultiFS-{fs}: Start virtiofs instance with" + f" tag {fs_target} to {fs_volume_label}.", + test.log.info, + ) instance_start_cmd = params["instance_start_cmd"] - output = session.cmd_output(instance_start_cmd % (fs_target, - fs_target, - fs_volume_label)) - if re.search('KO.*error', output, re.I): - test.fail("MultiFS-%s: Start virtiofs instance failed, " - "output is %s." % (fs, output)) + output = session.cmd_output( + instance_start_cmd % (fs_target, fs_target, fs_volume_label) + ) + if re.search("KO.*error", output, re.I): + test.fail( + f"MultiFS-{fs}: Start virtiofs instance failed, " f"output is {output}." + ) # data io config - test_file = params.get('test_file') - folder_test = params.get('folder_test') - cmd_dd = params.get('cmd_dd') - cmd_md5 = params.get('cmd_md5') - cmd_new_folder = params.get('cmd_new_folder') - cmd_copy_file = params.get('cmd_copy_file') - cmd_rename_folder = params.get('cmd_rename_folder') - cmd_check_folder = params.get('cmd_check_folder') - cmd_del_folder = params.get('cmd_del_folder') + test_file = params.get("test_file") + folder_test = params.get("folder_test") + cmd_dd = params.get("cmd_dd") + cmd_md5 = params.get("cmd_md5") + cmd_new_folder = params.get("cmd_new_folder") + cmd_copy_file = params.get("cmd_copy_file") + cmd_rename_folder = params.get("cmd_rename_folder") + cmd_check_folder = params.get("cmd_check_folder") + cmd_del_folder = params.get("cmd_del_folder") # soft link config - cmd_symblic_file = params.get('cmd_symblic_file') - cmd_symblic_folder = params.get('cmd_symblic_folder') - file_link = params.get('file_link') - folder_link = params.get('folder_link') + cmd_symblic_file = params.get("cmd_symblic_file") + cmd_symblic_folder = params.get("cmd_symblic_folder") + file_link = params.get("file_link") + folder_link = params.get("folder_link") # pjdfs test config - cmd_pjdfstest = params.get('cmd_pjdfstest') - cmd_unpack = params.get('cmd_unpack') - cmd_yum_deps = params.get('cmd_yum_deps') - cmd_autoreconf = params.get('cmd_autoreconf') - cmd_configure = params.get('cmd_configure') - cmd_make = params.get('cmd_make') - pjdfstest_pkg = params.get('pjdfstest_pkg') - username = params.get('username') - password = params.get('password') - port = params.get('file_transfer_port') + cmd_pjdfstest = params.get("cmd_pjdfstest") + cmd_unpack = params.get("cmd_unpack") + cmd_yum_deps = params.get("cmd_yum_deps") + cmd_autoreconf = params.get("cmd_autoreconf") + cmd_configure = params.get("cmd_configure") + cmd_make = params.get("cmd_make") + pjdfstest_pkg = params.get("pjdfstest_pkg") + username = params.get("username") + password = params.get("password") + port = params.get("file_transfer_port") # fio config - fio_options = params.get('fio_options') - io_timeout = params.get_numeric('io_timeout') + fio_options = params.get("fio_options") + io_timeout = params.get_numeric("io_timeout") # iozone config - iozone_options = params.get('iozone_options') + iozone_options = params.get("iozone_options") # xfstest config - cmd_xfstest = params.get('cmd_xfstest') - fs_dest_fs2 = params.get('fs_dest_fs2') - cmd_download_xfstest = params.get('cmd_download_xfstest') - cmd_yum_install = params.get('cmd_yum_install') - cmd_make_xfs = params.get('cmd_make_xfs') - cmd_setenv = params.get('cmd_setenv') - cmd_setenv_nfs = params.get('cmd_setenv_nfs', '') - cmd_useradd = params.get('cmd_useradd') - cmd_get_tmpfs = params.get('cmd_get_tmpfs') - cmd_set_tmpfs = params.get('cmd_set_tmpfs') - size_mem1 = params.get('size_mem1') + cmd_xfstest = params.get("cmd_xfstest") + fs_dest_fs2 = params.get("fs_dest_fs2") + cmd_download_xfstest = params.get("cmd_download_xfstest") + cmd_yum_install = params.get("cmd_yum_install") + cmd_make_xfs = params.get("cmd_make_xfs") + cmd_setenv = params.get("cmd_setenv") + cmd_setenv_nfs = params.get("cmd_setenv_nfs", "") + cmd_useradd = params.get("cmd_useradd") + cmd_get_tmpfs = params.get("cmd_get_tmpfs") + cmd_set_tmpfs = params.get("cmd_set_tmpfs") + size_mem1 = params.get("size_mem1") # git init config git_init_cmd = params.get("git_init_cmd") @@ -235,7 +237,7 @@ def start_multifs_instance(): cmd_run_sesuit = params.get("cmd_run_sesuit") # nfs config - setup_local_nfs = params.get('setup_local_nfs') + setup_local_nfs = params.get("setup_local_nfs") setup_hugepages = params.get("setup_hugepages", "no") == "yes" socket_group_test = params.get("socket_group_test", "no") == "yes" @@ -254,7 +256,9 @@ def start_multifs_instance(): if cmd_xfstest and not setup_hugepages: # /dev/shm is the default memory-backend-file, the default value is the # half of the host memory. Increase it to guest memory size to avoid crash - ori_tmpfs_size = process.run(cmd_get_tmpfs, shell=True).stdout_text.replace("\n", "") + ori_tmpfs_size = process.run(cmd_get_tmpfs, shell=True).stdout_text.replace( + "\n", "" + ) test.log.debug("original tmpfs size is %s", ori_tmpfs_size) params["post_command"] = cmd_set_tmpfs % ori_tmpfs_size params["pre_command"] = cmd_set_tmpfs % size_mem1 @@ -269,7 +273,9 @@ def start_multifs_instance(): params["nfs_mount_dir"] = nfs_params.get("fs_source_dir") if cmd_get_stdev: fs_source_dir = nfs_params.get("fs_source_dir") - params["nfs_mount_dir"] = os.path.join(fs_source_dir, nfs_mount_dst_name) + params["nfs_mount_dir"] = os.path.join( + fs_source_dir, nfs_mount_dst_name + ) nfs_local = nfs.Nfs(params) nfs_local.setup() nfs_local_dic[fs] = nfs_local @@ -281,8 +287,9 @@ def start_multifs_instance(): process.system(cmd_dd_on_host % dd_of_on_host, timeout=300) cmd_losetup_query_on_host = params.get("cmd_losetup_query_on_host") - loop_device = process.run( - cmd_losetup_query_on_host, timeout=60).stdout.decode().strip() + loop_device = ( + process.run(cmd_losetup_query_on_host, timeout=60).stdout.decode().strip() + ) if not loop_device: test.fail("Can't find a valid loop device! ") # loop device setups on host @@ -295,9 +302,8 @@ def start_multifs_instance(): cmd_mkfs_on_host = cmd_mkfs_on_host + loop_device process.system(cmd_mkfs_on_host, timeout=60) # mount on host - fs_source = params.get('fs_source_dir') - base_dir = params.get('fs_source_base_dir', - data_dir.get_data_dir()) + fs_source = params.get("fs_source_dir") + base_dir = params.get("fs_source_base_dir", data_dir.get_data_dir()) if not os.path.isabs(fs_source): fs_source = os.path.join(base_dir, fs_source) if not utils_misc.check_exists(fs_source): @@ -307,20 +313,26 @@ def start_multifs_instance(): if security_label_test: # make sure selinux is enabled on host before sucurity label test. - error_context.context("Set selinux to %s status on host before" - " starting virtiofsd and vm." % se_mode, - test.log.info) + error_context.context( + f"Set selinux to {se_mode} status on host before" + " starting virtiofsd and vm.", + test.log.info, + ) se_mode_host_before = utils_selinux.get_status() if se_mode_host_before.lower() != se_mode: try: utils_selinux.set_status(se_mode) except Exception as err_msg: - test.cancel("Setting selinux failed on host with" - " %s." % str(err_msg)) + test.cancel("Setting selinux failed on host with" f" {str(err_msg)}.") try: vm = None - if (cmd_xfstest or setup_local_nfs or setup_hugepages or - setup_filesystem_on_host or security_label_test): + if ( + cmd_xfstest + or setup_local_nfs + or setup_hugepages + or setup_filesystem_on_host + or security_label_test + ): params["start_vm"] = "yes" env_process.preprocess(test, params, env) @@ -335,30 +347,32 @@ def start_multifs_instance(): if security_label_test and os_type == "linux": # make sure selinux is enabled on guest. - error_context.context("Set selinux to %s status on" - " guest." % se_mode, test.log.info) + error_context.context( + f"Set selinux to {se_mode} status on" " guest.", test.log.info + ) se_mode_guest_before = session.cmd_output("getenforce").strip() if se_mode_guest_before != se_mode: - test.log.info("Need to change selinux mode to %s." % se_mode) + test.log.info("Need to change selinux mode to %s.", se_mode) if se_mode_guest_before == "disabled": - cmd = "sed -i 's/^SELINUX=.*/SELINUX=%s/g'" % se_mode + cmd = f"sed -i 's/^SELINUX=.*/SELINUX={se_mode}/g'" cmd += " /etc/selinux/config" session.cmd(cmd) session = vm.reboot(session) if se_mode_guest_before == "permissive": - session.cmd("setenforce %s" % se_mode) + session.cmd(f"setenforce {se_mode}") if os_type == "windows": cmd_timeout = params.get_numeric("cmd_timeout", 120) driver_name = params["driver_name"] # Check whether windows driver is running,and enable driver verifier - session = utils_test.qemu.windrv_check_running_verifier(session, - vm, test, - driver_name) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name + ) # create virtiofs service viofs_svc_name = params["viofs_svc_name"] - virtio_fs_utils.create_viofs_service(test, params, session, - service=viofs_svc_name) + virtio_fs_utils.create_viofs_service( + test, params, session, service=viofs_svc_name + ) viofs_svc_name = params.get("viofs_svc_name", "VirtioFsSvc") for fs in params.objects("filesystems"): fs_params = params.object_params(fs) @@ -366,27 +380,33 @@ def start_multifs_instance(): fs_dest = fs_params.get("fs_dest") fs_volume_label = fs_params.get("volume_label") fs_source = fs_params.get("fs_source_dir") - base_dir = fs_params.get("fs_source_base_dir", - data_dir.get_data_dir()) + base_dir = fs_params.get("fs_source_base_dir", data_dir.get_data_dir()) if not os.path.isabs(fs_source): fs_source = os.path.join(base_dir, fs_source) host_data = os.path.join(fs_source, test_file) if os_type == "linux": - error_context.context("Create a destination directory %s " - "inside guest." % fs_dest, test.log.info) + error_context.context( + f"Create a destination directory {fs_dest} " "inside guest.", + test.log.info, + ) utils_misc.make_dirs(fs_dest, session) if not cmd_xfstest: - error_context.context("Mount virtiofs target %s to %s inside" - " guest." % (fs_target, fs_dest), - test.log.info) - if not utils_disk.mount(fs_target, fs_dest, 'virtiofs', session=session): - test.fail('Mount virtiofs target failed.') + error_context.context( + f"Mount virtiofs target {fs_target} to {fs_dest} inside" + " guest.", + test.log.info, + ) + if not utils_disk.mount( + fs_target, fs_dest, "virtiofs", session=session + ): + test.fail("Mount virtiofs target failed.") else: if params.get("viofs_svc_name", "VirtioFsSvc") == "VirtioFsSvc": - error_context.context("Start virtiofs service in guest.", - test.log.info) + error_context.context( + "Start virtiofs service in guest.", test.log.info + ) debug_log_operation = params.get("debug_log_operation") if debug_log_operation: session = virtio_fs_utils.operate_debug_log( @@ -394,29 +414,36 @@ def start_multifs_instance(): ) virtio_fs_utils.start_viofs_service(test, params, session) else: - error_context.context("Start winfsp.launcher" - " instance in guest.", test.log.info) + error_context.context( + "Start winfsp.launcher" " instance in guest.", test.log.info + ) start_multifs_instance() # get fs dest for vm virtio_fs_disk_label = fs_target - error_context.context("Get Volume letter of virtio fs target, the disk" - "lable is %s." % virtio_fs_disk_label, - test.log.info) - vol_con = "VolumeName='%s'" % virtio_fs_disk_label + error_context.context( + "Get Volume letter of virtio fs target, the disk" + f"lable is {virtio_fs_disk_label}.", + test.log.info, + ) + vol_con = f"VolumeName='{virtio_fs_disk_label}'" volume_letter = utils_misc.wait_for( - lambda: utils_misc.get_win_disk_vol(session, condition=vol_con), cmd_timeout) # pylint: disable=E0606 + lambda: utils_misc.get_win_disk_vol(session, condition=vol_con), + cmd_timeout, + ) # pylint: disable=E0606 if volume_letter is None: test.fail("Could not get virtio-fs mounted volume letter.") - fs_dest = "%s:" % volume_letter + fs_dest = f"{volume_letter}:" guest_file = os.path.join(fs_dest, test_file) test.log.info("The guest file in shared dir is %s", guest_file) try: if cmd_dd: - error_context.context("Creating file under %s inside " - "guest." % fs_dest, test.log.info) + error_context.context( + f"Creating file under {fs_dest} inside " "guest.", + test.log.info, + ) session.cmd(cmd_dd % guest_file, io_timeout) if os_type == "linux": @@ -424,24 +451,32 @@ def start_multifs_instance(): else: guest_file_win = guest_file.replace("/", "\\") cmd_md5_vm = cmd_md5 % (volume_letter, guest_file_win) - md5_guest = session.cmd_output(cmd_md5_vm, io_timeout).strip().split()[0] + md5_guest = ( + session.cmd_output(cmd_md5_vm, io_timeout).strip().split()[0] + ) test.log.info(md5_guest) - md5_host = process.run("md5sum %s" % host_data, - io_timeout).stdout_text.strip().split()[0] + md5_host = ( + process.run(f"md5sum {host_data}", io_timeout) + .stdout_text.strip() + .split()[0] + ) if md5_guest != md5_host: - test.fail('The md5 value of host is not same to guest.') + test.fail("The md5 value of host is not same to guest.") viofs_log_file_cmd = params.get("viofs_log_file_cmd") if viofs_log_file_cmd: - error_context.context("Check if LOG file is created.", test.log.info) + error_context.context( + "Check if LOG file is created.", test.log.info + ) log_dir_s = session.cmd_status(viofs_log_file_cmd) if log_dir_s != 0: test.fail("Virtiofs log is not created.") - if folder_test == 'yes': - error_context.context("Folder test under %s inside " - "guest." % fs_dest, test.log.info) + if folder_test == "yes": + error_context.context( + f"Folder test under {fs_dest} inside " "guest.", test.log.info + ) session.cmd(cmd_new_folder % fs_dest) try: session.cmd(cmd_copy_file) @@ -461,52 +496,68 @@ def start_multifs_instance(): def file_check(cmd): s, o = session.cmd_status_output(cmd, io_timeout) if s: - test.fail("Case insensitive failed," - " the output is %s" % o) + test.fail("Case insensitive failed," f" the output is {o}") - error_context.context("Check if case insensitive is set in registry.", - test.log.info) + error_context.context( + "Check if case insensitive is set in registry.", test.log.info + ) cmd = params.get("viofs_reg_query_cmd") ret = session.cmd_output(cmd) if "caseinsensitive" not in ret.lower(): s, o = session.cmd_status_output(viofs_case_insense_enable_cmd) if s: - test.fail("Fail to set virtiofs case insensitive," - " output is %s" % o) + test.fail( + "Fail to set virtiofs case insensitive," + f" output is {o}" + ) else: error_context.context("Reboot guest.", test.log.info) session = vm.reboot() - error_context.context("Creating file and file name contain " - "uppercase letter in guest.", test.log.info) + error_context.context( + "Creating file and file name contain " + "uppercase letter in guest.", + test.log.info, + ) test_file_guest = test_file + "_Guest" guest_file = os.path.join(fs_dest, test_file_guest) - session.cmd("echo hello > %s" % guest_file, io_timeout) - - error_context.context("check the file with" - " uppercase letter name.", test.log.info) - guest_file_full_path = volume_letter + ":\\" + test_file_guest.upper() - cmd_check_file = "dir %s" % guest_file_full_path + session.cmd(f"echo hello > {guest_file}", io_timeout) + + error_context.context( + "check the file with" " uppercase letter name.", test.log.info + ) + guest_file_full_path = ( + volume_letter + ":\\" + test_file_guest.upper() + ) + cmd_check_file = f"dir {guest_file_full_path}" file_check(cmd_check_file) - cmd_check_md5sum = cmd_md5 % (volume_letter, test_file_guest.upper()) + cmd_check_md5sum = cmd_md5 % ( + volume_letter, + test_file_guest.upper(), + ) file_check(cmd_check_md5sum) error_context.context("Create file on host.", test.log.info) test_file_host = test_file + "_Host" host_data = os.path.join(fs_source, test_file_host) - process.system('touch %s' % host_data, io_timeout) + process.system(f"touch {host_data}", io_timeout) time.sleep(2) - error_context.context("check the file with" - " lowercase letter name.", test.log.info) - guest_file_full_path = volume_letter + ":\\" + test_file_host.lower() - cmd_check_file = "dir %s" % guest_file_full_path + error_context.context( + "check the file with" " lowercase letter name.", test.log.info + ) + guest_file_full_path = ( + volume_letter + ":\\" + test_file_host.lower() + ) + cmd_check_file = f"dir {guest_file_full_path}" file_check(cmd_check_file) cmd_check_md5sum = cmd_md5 % (volume_letter, test_file_host.lower()) file_check(cmd_check_md5sum) if cmd_symblic_file: - error_context.context("Symbolic test under %s inside " - "guest." % fs_dest, test.log.info) + error_context.context( + f"Symbolic test under {fs_dest} inside " "guest.", + test.log.info, + ) cmd_create_file = params["cmd_create_file"] session.cmd(cmd_new_folder % fs_dest) session.cmd(cmd_create_file) @@ -516,15 +567,19 @@ def file_check(cmd): if session.cmd_status(cmd_symblic_folder): test.fail("Creat symbolic folders failed.") - error_context.context("Compare symbolic link info in " - "the host and guest", test.log.info) + error_context.context( + "Compare symbolic link info in " "the host and guest", + test.log.info, + ) def __file_check(file, os_type): - cmd_map = {'win_host': 'cat %s', - 'win_guest': 'type %s', - 'linux_host': 'ls -l %s', - 'linux_guest': 'ls -l %s'} - if 'guest' in os_type: + cmd_map = { + "win_host": "cat %s", + "win_guest": "type %s", + "linux_host": "ls -l %s", + "linux_guest": "ls -l %s", + } + if "guest" in os_type: o = session.cmd_output(cmd_map[os_type] % file) else: o = process.run(cmd_map[os_type] % file).stdout_text @@ -532,47 +587,53 @@ def __file_check(file, os_type): if os_type == "linux": file_link_host = os.path.join(fs_source, file_link) - if (__file_check(file_link, 'linux_guest') != - __file_check(file_link_host, 'linux_host')): - test.fail("Symbolic file configured in host " - "and guest are inconsistent") - folder_link_host = os.path.join(fs_source, - folder_link) - if (__file_check(folder_link, 'linux_guest') != - __file_check(folder_link_host, 'linux_host')): - test.fail("Symbolic folder configured in " - "host and guest are inconsistent") + if __file_check(file_link, "linux_guest") != __file_check( + file_link_host, "linux_host" + ): + test.fail( + "Symbolic file configured in host " + "and guest are inconsistent" + ) + folder_link_host = os.path.join(fs_source, folder_link) + if __file_check(folder_link, "linux_guest") != __file_check( + folder_link_host, "linux_host" + ): + test.fail( + "Symbolic folder configured in " + "host and guest are inconsistent" + ) session.cmd("cd -") else: - content = session.cmd_output("type %s" % - test_file).strip() - link_guest = __file_check(file_link, 'win_guest') + content = session.cmd_output(f"type {test_file}").strip() + link_guest = __file_check(file_link, "win_guest") file_link_host = os.path.join(fs_source, file_link) - link_host = __file_check(file_link_host, 'win_host') + link_host = __file_check(file_link_host, "win_host") if link_guest != content or link_host != content: - test.fail("Symbolic file check failed," - " the real content is %s\n" - "the link file content in guest is %s\n" - "the link file content in host is %s." % - (content, link_guest, link_host)) + test.fail( + "Symbolic file check failed," + f" the real content is {content}\n" + f"the link file content in guest is {link_guest}\n" + f"the link file content in host is {link_host}." + ) # check the file in folder link folder_link_guest = folder_link + "\\" + test_file - link_guest = __file_check(folder_link_guest, 'win_guest') - folder_link_host = os.path.join(fs_source, - folder_link, - test_file) - link_host = __file_check(folder_link_host, 'win_host') + link_guest = __file_check(folder_link_guest, "win_guest") + folder_link_host = os.path.join( + fs_source, folder_link, test_file + ) + link_host = __file_check(folder_link_host, "win_host") if link_guest != content or link_host != content: - test.fail("Symbolic folder check failed," - " the real content is %s\n" - "the link file content in guest is %s\n" - "the link file content in host is %s." % - (content, link_guest, link_host)) + test.fail( + "Symbolic folder check failed," + f" the real content is {content}\n" + f"the link file content in guest is {link_guest}\n" + f"the link file content in host is {link_host}." + ) session.cmd("cd /d C:\\") if fio_options: - error_context.context("Run fio on %s." % fs_dest, test.log.info) - fio = generate_instance(params, vm, 'fio') + error_context.context(f"Run fio on {fs_dest}.", test.log.info) + fio = generate_instance(params, vm, "fio") try: for bs in params.get_list("stress_bs"): fio.run(fio_options % (guest_file, bs), io_timeout) @@ -581,8 +642,10 @@ def __file_check(file, os_type): vm.verify_dmesg() if iozone_options: - error_context.context("Run iozone test on %s." % fs_dest, test.log.info) - io_test = generate_instance(params, vm, 'iozone') + error_context.context( + f"Run iozone test on {fs_dest}.", test.log.info + ) + io_test = generate_instance(params, vm, "iozone") try: for bs in params.get_list("stress_bs"): io_test.run(iozone_options % (bs, guest_file), io_timeout) @@ -590,18 +653,24 @@ def __file_check(file, os_type): io_test.clean() if cmd_pjdfstest: - error_context.context("Run pjdfstest on %s." % fs_dest, test.log.info) - host_path = os.path.join(data_dir.get_deps_dir('pjdfstest'), pjdfstest_pkg) - scp_to_remote(host_addr, port, username, password, host_path, fs_dest) + error_context.context(f"Run pjdfstest on {fs_dest}.", test.log.info) + host_path = os.path.join( + data_dir.get_deps_dir("pjdfstest"), pjdfstest_pkg + ) + scp_to_remote( + host_addr, port, username, password, host_path, fs_dest + ) session.cmd(cmd_unpack.format(fs_dest), 180) session.cmd(cmd_yum_deps, 180) session.cmd(cmd_autoreconf % fs_dest, 180) session.cmd(cmd_configure.format(fs_dest), 180) session.cmd(cmd_make % fs_dest, io_timeout) status, output = session.cmd_status_output( - cmd_pjdfstest % fs_dest, io_timeout) - failed_test = output.split("-------------------")[1]\ - .split("Files=")[0] + cmd_pjdfstest % fs_dest, io_timeout + ) + failed_test = output.split("-------------------")[1].split( + "Files=" + )[0] # ignore the specific failed cases from pjdfstest ignore_cases = params.objects("pjdfstest_blacklist") matched_element = params.get("fs_dest", "/mnt") + r".*\.t" @@ -611,15 +680,16 @@ def __file_check(file, os_type): for case in cases_in_output.keys(): for ig_case in ignore_cases: if ig_case in case: - error_context.context("Warn: %s was failed!" - % ig_case, test.log.debug) + error_context.context( + f"Warn: {ig_case} was failed!", test.log.debug + ) cases_in_output[case] = True unexpected_fail_case = list(cases_in_output.values()).count(False) if status != 0 and unexpected_fail_case > 0: test.log.info(output) - test.fail('The pjdfstest failed.') + test.fail("The pjdfstest failed.") if cmd_xfstest: error_context.context("Run xfstest on guest.", test.log.info) @@ -641,74 +711,102 @@ def __file_check(file, os_type): try: output = session.cmd_output(cmd_xfstest, io_timeout) test.log.info("%s", output) - if 'Failed' in output: - test.fail('The xfstest failed.') + if "Failed" in output: + test.fail("The xfstest failed.") else: break except (aexpect.ShellStatusError, aexpect.ShellTimeoutError): - test.fail('The xfstest failed.') + test.fail("The xfstest failed.") if cmd_get_stdev: - error_context.context("Create files in local device and" - " nfs device ", test.log.info) + error_context.context( + "Create files in local device and" " nfs device ", test.log.info + ) file_in_local_host = os.path.join(fs_source, "file_test") - file_in_nfs_host = os.path.join(fs_source, nfs_mount_dst_name, - "file_test") - cmd_touch_file = "touch %s && touch %s" % (file_in_local_host, - file_in_nfs_host) + file_in_nfs_host = os.path.join( + fs_source, nfs_mount_dst_name, "file_test" + ) + cmd_touch_file = ( + f"touch {file_in_local_host} && touch {file_in_nfs_host}" + ) process.run(cmd_touch_file) - error_context.context("Check if the two files' st_dev are" - " the same on guest.", test.log.info) + error_context.context( + "Check if the two files' st_dev are" " the same on guest.", + test.log.info, + ) file_in_local_guest = os.path.join(fs_dest, "file_test") - file_in_nfs_guest = os.path.join(fs_dest, nfs_mount_dst_name, - "file_test") + file_in_nfs_guest = os.path.join( + fs_dest, nfs_mount_dst_name, "file_test" + ) if get_stdev(file_in_local_guest) == get_stdev(file_in_nfs_guest): test.fail("st_dev are the same on diffrent device.") if git_init_cmd: if os_type == "windows": error_context.context("Install git", test.log.info) - check_status, check_output = session.cmd_status_output(git_check_cmd) - if check_status and "not recognized" in check_output \ - or "cannot find the path" in check_output: + check_status, check_output = session.cmd_status_output( + git_check_cmd + ) + if ( + check_status + and "not recognized" in check_output + or "cannot find the path" in check_output + ): install_git_cmd = utils_misc.set_winutils_letter( - session, install_git_cmd) + session, install_git_cmd + ) status, output = session.cmd_status_output(install_git_cmd) if status: - test.error("Failed to install git, status=%s, output=%s" - % (status, output)) + test.error( + f"Failed to install git, status={status}, output={output}" + ) test.log.info("Wait for git installation to complete") utils_misc.wait_for( - lambda: is_autoit_finished(session, autoit_name), 360, 60, 5) - error_context.context("Git init test in %s" % fs_dest, test.log.info) + lambda: is_autoit_finished(session, autoit_name), + 360, + 60, + 5, + ) + error_context.context(f"Git init test in {fs_dest}", test.log.info) status, output = session.cmd_status_output(git_init_cmd % fs_dest) if status: - test.fail("Git init failed with %s" % output) + test.fail(f"Git init failed with {output}") if create_dir_winapi_cmd: - error_context.context("Create new directory with WinAPI's " - "CreateDirectory.", test.log.info) + error_context.context( + "Create new directory with WinAPI's " "CreateDirectory.", + test.log.info, + ) session.cmd(create_dir_winapi_cmd % fs_dest) - ret = utils_misc.wait_for(lambda: not bool(session.cmd_status( - check_winapi_dir_cmd % fs_dest)), timeout=60) + ret = utils_misc.wait_for( + lambda: not bool( + session.cmd_status(check_winapi_dir_cmd % fs_dest) + ), + timeout=60, + ) if not ret: - test.fail("Create dir failed in 60s, output is %s" % - session.cmd_output(check_winapi_dir_cmd % fs_dest)) + test.fail( + f"Create dir failed in 60s, output is {session.cmd_output(check_winapi_dir_cmd % fs_dest)}" + ) error_context.context("Get virtiofsd log file.", test.log.info) vfsd_dev = vm.devices.get_by_params({"source": fs_source})[0] - vfd_log_name = '%s-%s.log' % (vfsd_dev.get_qid(), - vfsd_dev.get_param('name')) + vfd_log_name = "{}-{}.log".format( + vfsd_dev.get_qid(), + vfsd_dev.get_param("name"), + ) vfd_logfile = utils_misc.get_log_filename(vfd_log_name) error_context.context("Check virtiofsd log.", test.log.info) - pattern = r'Replying ERROR.*header.*OutHeader.*error.*-9' - with open(vfd_logfile, 'r') as f: + pattern = r"Replying ERROR.*header.*OutHeader.*error.*-9" + with open(vfd_logfile, "r") as f: for line in f.readlines(): if re.match(pattern, line, re.I): - test.fail("CreateDirectory cause virtiofsd-rs ERROR reply.") + test.fail( + "CreateDirectory cause virtiofsd-rs ERROR reply." + ) if getfattr_cmd: # testing security label. @@ -722,21 +820,26 @@ def check_attribute(object, xattr, get_type=True, side="guest"): :return: context type """ format = xattr + "=" + context_pattern - full_pattern = re.compile(r'%s' % format) + full_pattern = re.compile(rf"{format}") if side == "host": - xattr_content = process.system_output(getfattr_cmd % - (xattr, object), - shell=True - ).decode().strip() + xattr_content = ( + process.system_output( + getfattr_cmd % (xattr, object), shell=True + ) + .decode() + .strip() + ) result = re.search(full_pattern, xattr_content) if side == "guest": - xattr_content = session.cmd_output(getfattr_cmd % - (xattr, object)) + xattr_content = session.cmd_output( + getfattr_cmd % (xattr, object) + ) result = re.search(full_pattern, xattr_content) if not result: # pylint: disable=E0606 - test.fail("Attribute is not correct, the pattern is %s\n" - " the attribute is %s." % (full_pattern, - xattr_content)) + test.fail( + f"Attribute is not correct, the pattern is {full_pattern}\n" + f" the attribute is {xattr_content}." + ) if get_type: return result.group(1) @@ -752,57 +855,77 @@ def check_security_label(file, folder, xattr_name): context_type_file = check_attribute(file, xattr_name) context_type_folder = check_attribute(folder, xattr_name) if not context_type_file == context_type_folder: - test.fail("Context type isn't correct.\n" - "File context type is %s\n" - "Shared folder context type is %s" - % (context_type_file, - context_type_folder)) + test.fail( + "Context type isn't correct.\n" + f"File context type is {context_type_file}\n" + f"Shared folder context type is {context_type_folder}" + ) test.log.info("Security.selinux xattr check with xattr mapping.") - error_context.context("Create a new file inside guest.", - test.log.info) + error_context.context( + "Create a new file inside guest.", test.log.info + ) file_new_in_guest = os.path.join(fs_dest, "file_guest") file_share_in_host = os.path.join(fs_source, "file_guest") - session.cmd("touch %s" % file_new_in_guest) + session.cmd(f"touch {file_new_in_guest}") time.sleep(1) - error_context.context("Check new file's security label" - " on guest.", test.log.info) + error_context.context( + "Check new file's security label" " on guest.", test.log.info + ) check_security_label(file_new_in_guest, fs_dest, selinux_xattr_name) - error_context.context("Check new file's attribute on" - " host.", test.log.info) - check_attribute(file_share_in_host, trust_selinux_attr_name, - get_type=False, side="host") - - error_context.context("Create a new file inside host.", - test.log.info) + error_context.context( + "Check new file's attribute on" " host.", test.log.info + ) + check_attribute( + file_share_in_host, + trust_selinux_attr_name, + get_type=False, + side="host", + ) + + error_context.context( + "Create a new file inside host.", test.log.info + ) file_new_in_host = os.path.join(fs_source, "file_host") file_share_in_guest = os.path.join(fs_dest, "file_host") - process.run("touch %s" % file_new_in_host, timeout=60) + process.run(f"touch {file_new_in_host}", timeout=60) time.sleep(1) - check_security_label(file_share_in_guest, fs_dest, selinux_xattr_name) - - error_context.context("The list of xattr for the file is empty " - "in guest, let's check it.", test.log.info) + check_security_label( + file_share_in_guest, fs_dest, selinux_xattr_name + ) + + error_context.context( + "The list of xattr for the file is empty " + "in guest, let's check it.", + test.log.info, + ) getfattr_list_cmd = params.get("getfattr_list_cmd") - s, o = session.cmd_status_output(getfattr_list_cmd % file_new_in_guest) + s, o = session.cmd_status_output( + getfattr_list_cmd % file_new_in_guest + ) if s: - test.fail("Getting the empty list of xattr failed" - " on virtiofs fs, the output is %s" % o) + test.fail( + "Getting the empty list of xattr failed" + f" on virtiofs fs, the output is {o}" + ) if winfsp_test_cmd: # only for windows guest. - error_context.context("Run winfsp-tests suit on windows" - " guest.", test.log.info) - winfsp_copy_cmd = utils_misc.set_winutils_letter(session, - winfsp_copy_cmd) + error_context.context( + "Run winfsp-tests suit on windows" " guest.", test.log.info + ) + winfsp_copy_cmd = utils_misc.set_winutils_letter( + session, winfsp_copy_cmd + ) session.cmd(winfsp_copy_cmd) try: - status, output = session.cmd_status_output(winfsp_test_cmd % fs_dest, - timeout=io_timeout) + status, output = session.cmd_status_output( + winfsp_test_cmd % fs_dest, timeout=io_timeout + ) if status != 0: - test.fail("Winfsp-test failed, the output is %s" % output) + test.fail(f"Winfsp-test failed, the output is {output}") finally: session.cmd("cd /d C:\\") @@ -812,83 +935,109 @@ def check_security_label(file, folder, xattr_name): viofs_sc_stop_cmd = params["viofs_sc_stop_cmd"] repeats = int(params.get("stop_start_repeats", 1)) for i in range(repeats): - error_context.context("Repeat stop/start VirtioFsSvc:" - " %d/%d" % (i + 1, repeats), - test.log.info) + error_context.context( + "Repeat stop/start VirtioFsSvc:" + " %d/%d" % (i + 1, repeats), + test.log.info, + ) viofs_svc_stop_start("stop", viofs_sc_stop_cmd, "STOPPED") viofs_svc_stop_start("start", viofs_sc_start_cmd, "RUNNING") - error_context.context("Basic IO test after" - " repeat stop/start virtiofs" - " service.", test.log.info) + error_context.context( + "Basic IO test after" " repeat stop/start virtiofs" " service.", + test.log.info, + ) s, o = session.cmd_status_output(cmd_dd % guest_file, io_timeout) if s: - test.fail("IO test failed, the output is %s" % o) + test.fail(f"IO test failed, the output is {o}") if cmd_run_sesuit: - error_context.context("Run selinux_testsuits based on selinux label" - "is enabled.", test.log.info) - host_path = os.path.join(data_dir.get_deps_dir('jfsutils'), jfsutils_pkg) - scp_to_remote(host_addr, port, username, password, host_path, '/tmp') + error_context.context( + "Run selinux_testsuits based on selinux label" "is enabled.", + test.log.info, + ) + host_path = os.path.join( + data_dir.get_deps_dir("jfsutils"), jfsutils_pkg + ) + scp_to_remote( + host_addr, port, username, password, host_path, "/tmp" + ) session.cmd(cmd_download_selinux_suits) session.cmd(cmd_yum_install_se) session.cmd(cmd_install_jfsutils) - status, output = session.cmd_status_output(cmd_make_sesuit, - timeout=timeout_make_sesuit) + status, output = session.cmd_status_output( + cmd_make_sesuit, timeout=timeout_make_sesuit + ) failed_make = output.split("Test Summary Report")[1] # ignore the specific failed make files ignore_make = re.findall(make_blacklist, failed_make).strip() if ignore_make != make_blacklist: - test.fail("Make selinux testsuits failed, output is %s" % ignore_make) + test.fail( + f"Make selinux testsuits failed, output is {ignore_make}" + ) status, output = session.cmd_status_output(cmd_run_sesuit) if status: - test.fail("Selinux-testsuits failed on virtiofs," - " the output is %s" % output) + test.fail( + "Selinux-testsuits failed on virtiofs," + f" the output is {output}" + ) # during all virtio fs is mounted, reboot vm - if params.get('reboot_guest', 'no') == 'yes': + if params.get("reboot_guest", "no") == "yes": + def get_vfsd_num(): """ Get virtiofsd daemon number during vm boot up. :return: virtiofsd daemon count. """ - cmd_ps_virtiofsd = params.get('cmd_ps_virtiofsd') + cmd_ps_virtiofsd = params.get("cmd_ps_virtiofsd") vfsd_num = 0 for device in vm.devices: if isinstance(device, qdevices.QVirtioFSDev): - sock_path = device.get_param('sock_path') + sock_path = device.get_param("sock_path") cmd_ps_virtiofsd = cmd_ps_virtiofsd % sock_path - vfsd_ps = process.system_output(cmd_ps_virtiofsd, shell=True) + vfsd_ps = process.system_output( + cmd_ps_virtiofsd, shell=True + ) vfsd_num += len(vfsd_ps.strip().splitlines()) return vfsd_num - error_context.context("Check virtiofs daemon before reboot vm.", - test.log.info) + error_context.context( + "Check virtiofs daemon before reboot vm.", test.log.info + ) vfsd_num_bf = get_vfsd_num() - error_context.context("Reboot guest and check virtiofs daemon.", - test.log.info) + error_context.context( + "Reboot guest and check virtiofs daemon.", test.log.info + ) session = vm.reboot(session) if not vm.is_alive(): test.fail("After rebooting vm quit unexpectedly.") vfsd_num_af = get_vfsd_num() if vfsd_num_bf != vfsd_num_af: - test.fail("Virtiofs daemon is different before and after reboot.\n" - "Before reboot: %s\n" - "After reboot: %s\n", (vfsd_num_bf, vfsd_num_af)) - error_context.context("Start IO test on virtiofs after reboot vm.", - test.log.info) - if os_type == 'windows': + test.fail( + "Virtiofs daemon is different before and after reboot.\n" + "Before reboot: %s\n" + "After reboot: %s\n", + (vfsd_num_bf, vfsd_num_af), + ) + error_context.context( + "Start IO test on virtiofs after reboot vm.", test.log.info + ) + if os_type == "windows": virtio_fs_utils.start_viofs_service(test, params, session) else: - error_context.context("Mount virtiofs target %s to %s inside" - "guest." % (fs_target, fs_dest), - test.log.info) - if not utils_disk.mount(fs_target, fs_dest, 'virtiofs', - session=session): - test.fail('Mount virtiofs target failed.') + error_context.context( + f"Mount virtiofs target {fs_target} to {fs_dest} inside" + "guest.", + test.log.info, + ) + if not utils_disk.mount( + fs_target, fs_dest, "virtiofs", session=session + ): + test.fail("Mount virtiofs target failed.") virtio_fs_utils.basic_io_test(test, params, session) finally: if os_type == "linux": - utils_disk.umount(fs_target, fs_dest, 'virtiofs', session=session) + utils_disk.umount(fs_target, fs_dest, "virtiofs", session=session) utils_misc.safe_rmdir(fs_dest, session=session) # for multi fs test in windows guest, stop winfsp.launcher instance is needed. if params.get("viofs_svc_name") == "WinFSP.Launcher": @@ -903,7 +1052,7 @@ def get_vfsd_num(): if os_type == "windows": win_driver_utils.memory_leak_check(vm, test, params) finally: - if os_type == 'windows' and vm and vm.is_alive(): + if os_type == "windows" and vm and vm.is_alive(): virtio_fs_utils.delete_viofs_serivce(test, params, session) if params.get("reboot_after_delete_service", "no") == "yes": session = vm.reboot(session) @@ -914,9 +1063,8 @@ def get_vfsd_num(): nfs_local = nfs_local_dic[fs] nfs_local.cleanup() if setup_filesystem_on_host: - cmd = "if losetup -l {0};then losetup -d {0};fi;".format( - loop_device) - cmd += "umount -l {0};".format(fs_source) + cmd = f"if losetup -l {loop_device};then losetup -d {loop_device};fi;" + cmd += f"umount -l {fs_source};" process.system_output(cmd, shell=True, timeout=60) if utils_misc.check_exists(dd_of_on_host): cmd_del = "rm -rf " + dd_of_on_host @@ -926,13 +1074,15 @@ def get_vfsd_num(): try: utils_selinux.set_status(se_mode_host_before) except Exception as err_msg: - test.fail("Restore selinux failed with %s on" - "host." % str(err_msg)) + test.fail(f"Restore selinux failed with {str(err_msg)} on" "host.") if os_type == "linux" and not se_mode_guest_before == se_mode: - test.log.info("Need to change selinux mode back to" - " %s." % se_mode_guest_before) + test.log.info( + "Need to change selinux mode back %s.", se_mode_guest_before + ) if se_mode_guest_before.lower() == "disabled": - cmd = "sed -i 's/^SELINUX=.*/SELINUX=Disabled/g' /etc/selinux/config" + cmd = ( + "sed -i 's/^SELINUX=.*/SELINUX=Disabled/g' /etc/selinux/config" + ) session.cmd(cmd) session = vm.reboot(session) if se_mode_guest_before.lower() == "permissive": diff --git a/qemu/tests/virtio_fs_subtest_during_io.py b/qemu/tests/virtio_fs_subtest_during_io.py index 700f00274f..c561c2f0fb 100644 --- a/qemu/tests/virtio_fs_subtest_during_io.py +++ b/qemu/tests/virtio_fs_subtest_during_io.py @@ -1,12 +1,9 @@ -import time import os +import time -from virttest import utils_test, utils_misc, utils_disk -from virttest import error_context -from virttest import data_dir +from virttest import data_dir, error_context, utils_disk, utils_misc, utils_test -from provider import win_driver_utils -from provider import virtio_fs_utils +from provider import virtio_fs_utils, win_driver_utils @error_context.context_aware @@ -25,6 +22,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def sleep_before_basic_io_test(test, params, session): # The reason that sleeping 3 seconds at here is to let the main thread # running into file detection logic. @@ -33,50 +31,51 @@ def sleep_before_basic_io_test(test, params, session): driver = params["driver_name"] driver_verifier = params.get("driver_verifier", driver) - driver_running = params.get('driver_running', driver_verifier) + driver_running = params.get("driver_running", driver_verifier) timeout = int(params.get("login_timeout", 360)) fs_dest = params.get("fs_dest") fs_target = params.get("fs_target") - test_file = params.get('virtio_fs_test_file') + test_file = params.get("virtio_fs_test_file") fs_source = params.get("fs_source_dir") base_dir = params.get("fs_source_base_dir", data_dir.get_data_dir()) if not os.path.isabs(fs_source): fs_source = os.path.join(base_dir, fs_source) host_data = os.path.join(fs_source, test_file) - vm_name = params['main_vm'] + vm_name = params["main_vm"] vm = env.get_vm(vm_name) vm.verify_alive() - error_context.context("Boot guest with %s device" % driver, test.log.info) + error_context.context(f"Boot guest with {driver} device", test.log.info) session = vm.wait_for_login(timeout=timeout) if params["os_type"] == "windows": error_context.context("Run the viofs service", test.log.info) utils_test.qemu.windrv_verify_running(session, test, driver_running) - session = utils_test.qemu.setup_win_driver_verifier(session, - driver_verifier, - vm) + session = utils_test.qemu.setup_win_driver_verifier( + session, driver_verifier, vm + ) virtio_fs_utils.run_viofs_service(test, params, session) else: - error_context.context("Create a destination directory %s " - "inside guest." % fs_dest, test.log.info) + error_context.context( + f"Create a destination directory {fs_dest} " "inside guest.", + test.log.info, + ) if not utils_misc.make_dirs(fs_dest, session=session): test.fail("Creating directory was failed!") - error_context.context("Mount virtiofs target %s to %s inside" - " guest." % (fs_target, fs_dest), - test.log.info) - if not utils_disk.mount(fs_target, fs_dest, 'virtiofs', - session=session): - test.fail('Mount virtiofs target failed.') + error_context.context( + f"Mount virtiofs target {fs_target} to {fs_dest} inside" " guest.", + test.log.info, + ) + if not utils_disk.mount(fs_target, fs_dest, "virtiofs", session=session): + test.fail("Mount virtiofs target failed.") - basic_io_test = utils_misc.InterruptedThread(target=sleep_before_basic_io_test, - kwargs={"test": test, - "params": params, - "session": session}) + basic_io_test = utils_misc.InterruptedThread( + target=sleep_before_basic_io_test, + kwargs={"test": test, "params": params, "session": session}, + ) basic_io_test.daemon = True - error_context.context("Start the io test thread's activity....", - test.log.info) + error_context.context("Start the io test thread's activity....", test.log.info) basic_io_test.start() test.log.info("The io test thread is running...") @@ -85,7 +84,7 @@ def sleep_before_basic_io_test(test, params, session): max_run_time = params.get_numeric("max_run_time", 30) while time.time() - start_time < max_run_time: if os.path.exists(host_data) and os.path.getsize(host_data) > 0: - test.log.info("The file has been detected: %s" % host_data) + test.log.info("The file has been detected: %s", host_data) error_context.context("Going to stop the vm...", test.log.info) vm.pause() time.sleep(2) diff --git a/qemu/tests/virtio_fs_supp_group_transfer.py b/qemu/tests/virtio_fs_supp_group_transfer.py index fd278b932c..b0193a9dae 100644 --- a/qemu/tests/virtio_fs_supp_group_transfer.py +++ b/qemu/tests/virtio_fs_supp_group_transfer.py @@ -1,14 +1,12 @@ -import os.path import logging +import os.path -from virttest import nfs, env_process -from virttest import error_context, utils_disk, utils_misc -from virttest import data_dir from avocado.utils import process +from virttest import data_dir, env_process, error_context, nfs, utils_disk, utils_misc from provider import virtio_fs_utils -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") @error_context.context_aware @@ -31,6 +29,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ + def _basic_io_test(test, params, session, fs_dest, fs_source): """ Virtio_fs basic io test. Create file on guest and then compare two md5 @@ -43,7 +42,7 @@ def _basic_io_test(test, params, session, fs_dest, fs_source): :param fs_source: the component of path at host """ error_context.context("Running viofs basic io test", LOG_JOB.info) - test_file = params.get('virtio_fs_test_file', "virtio_fs_test_file") + test_file = params.get("virtio_fs_test_file", "virtio_fs_test_file") windows = params.get("os_type", "windows") == "windows" io_timeout = params.get_numeric("fs_io_timeout", 120) fs_target = params.get("fs_target") @@ -53,50 +52,54 @@ def _basic_io_test(test, params, session, fs_dest, fs_source): host_data = os.path.join(fs_source, test_file) try: if windows: - cmd_dd = params.get("virtio_fs_cmd_dd", - 'dd if=/dev/random of=%s bs=1M count=100') - driver_letter = virtio_fs_utils.get_virtiofs_driver_letter(test, - fs_target, - session) + cmd_dd = params.get( + "virtio_fs_cmd_dd", "dd if=/dev/random of=%s bs=1M count=100" + ) + driver_letter = virtio_fs_utils.get_virtiofs_driver_letter( + test, fs_target, session + ) # replace the value if platform is windows - fs_dest = "%s:" % driver_letter + fs_dest = f"{driver_letter}:" else: - cmd_dd = params.get("virtio_fs_cmd_dd", - 'dd if=/dev/urandom of=%s bs=1M ' - 'count=100 iflag=fullblock') + cmd_dd = params.get( + "virtio_fs_cmd_dd", + "dd if=/dev/urandom of=%s bs=1M " "count=100 iflag=fullblock", + ) guest_file = os.path.join(fs_dest, test_file) - error_context.context("The guest file in shared dir is %s" % - guest_file, LOG_JOB.info) - error_context.context("Creating file under %s inside guest." % - fs_dest, LOG_JOB.info) + error_context.context( + f"The guest file in shared dir is {guest_file}", LOG_JOB.info + ) + error_context.context( + f"Creating file under {fs_dest} inside guest.", LOG_JOB.info + ) session.cmd(cmd_dd % guest_file, io_timeout) if windows: guest_file_win = guest_file.replace("/", "\\") - cmd_md5 = params.get("cmd_md5", '%s: && md5sum.exe %s') + cmd_md5 = params.get("cmd_md5", "%s: && md5sum.exe %s") cmd_md5_vm = cmd_md5 % (driver_letter, guest_file_win) else: - cmd_md5 = params.get("cmd_md5", 'md5sum %s') + cmd_md5 = params.get("cmd_md5", "md5sum %s") cmd_md5_vm = cmd_md5 % guest_file - md5_guest = session.cmd_output(cmd_md5_vm, - io_timeout).strip().split()[0] - error_context.context("md5 of the guest file: %s" % md5_guest, - LOG_JOB.info) - md5_host = process.run("md5sum %s" % host_data, - io_timeout).stdout_text.strip().split()[0] - error_context.context("md5 of the host file: %s" % md5_host, - LOG_JOB.info) + md5_guest = session.cmd_output(cmd_md5_vm, io_timeout).strip().split()[0] + error_context.context(f"md5 of the guest file: {md5_guest}", LOG_JOB.info) + md5_host = ( + process.run(f"md5sum {host_data}", io_timeout) + .stdout_text.strip() + .split()[0] + ) + error_context.context(f"md5 of the host file: {md5_host}", LOG_JOB.info) if md5_guest != md5_host: - test.fail('The md5 value of host is not same to guest.') + test.fail("The md5 value of host is not same to guest.") else: - error_context.context("The md5 of host is as same as md5 of " - "guest.", LOG_JOB.info) + error_context.context( + "The md5 of host is as same as md5 of " "guest.", LOG_JOB.info + ) finally: if not windows: - session.cmd("rm -rf %s" % guest_file) + session.cmd(f"rm -rf {guest_file}") - virtio_fs_utils.create_sub_folder_test(params, session, fs_dest, - fs_source) + virtio_fs_utils.create_sub_folder_test(params, session, fs_dest, fs_source) add_user_cmd = params.get("add_user_cmd") del_user_cmd = params.get("del_user_cmd") @@ -126,44 +129,57 @@ def _basic_io_test(test, params, session, fs_dest, fs_source): vm.verify_alive() guest_root_session = vm.wait_for_login() - error_context.context("Create a destination directory %s " - "inside guest." % fs_dest, test.log.info) + error_context.context( + f"Create a destination directory {fs_dest} " "inside guest.", + test.log.info, + ) if not utils_misc.make_dirs(fs_dest, session=guest_root_session): test.fail("Creating directory was failed!") - error_context.context("Mount virtiofs target %s to %s inside" - " guest." % (fs_target, fs_dest), - test.log.info) - if not utils_disk.mount(fs_target, fs_dest, 'virtiofs', - session=guest_root_session): - test.fail('Mount virtiofs target failed.') + error_context.context( + f"Mount virtiofs target {fs_target} to {fs_dest} inside" " guest.", + test.log.info, + ) + if not utils_disk.mount( + fs_target, fs_dest, "virtiofs", session=guest_root_session + ): + test.fail("Mount virtiofs target failed.") error_context.context("Create a common user...", test.log.info) guest_root_session.cmd(add_user_cmd % username) error_context.context("Change the group to wheel", test.log.info) - guest_root_session.cmd("usermod -G wheel %s" % username) - - error_context.context("Create a dir inside the virtiofs and " - "change it's group to wheel", test.log.info) - guest_root_session.cmd("cd %s && mkdir -m 770 %s" % (fs_dest, testdir)) - guest_root_session.cmd("cd %s && chgrp wheel %s" % (fs_dest, testdir)) - - error_context.context("Login the common user and try to write under " - "the dir which belongs to wheel group", - test.log.info) + guest_root_session.cmd(f"usermod -G wheel {username}") + + error_context.context( + "Create a dir inside the virtiofs and " "change it's group to wheel", + test.log.info, + ) + guest_root_session.cmd(f"cd {fs_dest} && mkdir -m 770 {testdir}") + guest_root_session.cmd(f"cd {fs_dest} && chgrp wheel {testdir}") + + error_context.context( + "Login the common user and try to write under " + "the dir which belongs to wheel group", + test.log.info, + ) guest_user_session = vm.wait_for_login() - guest_user_session.cmd("su %s" % username) - _basic_io_test(test, params, guest_user_session, - os.path.join(fs_dest, testdir), - os.path.join(shared_dir, testdir)) + guest_user_session.cmd(f"su {username}") + _basic_io_test( + test, + params, + guest_user_session, + os.path.join(fs_dest, testdir), + os.path.join(shared_dir, testdir), + ) finally: if guest_root_session: output = guest_root_session.cmd_output(del_user_cmd % username) if "is currently used by process" in output: - error_context.context("Kill process before delete user...", - test.log.info) + error_context.context( + "Kill process before delete user...", test.log.info + ) pid = output.split(" ")[-1] - guest_root_session.cmd_output("kill -9 %s" % pid) - guest_root_session.cmd("rm -rf /home/%s" % username) + guest_root_session.cmd_output(f"kill -9 {pid}") + guest_root_session.cmd(f"rm -rf /home/{username}") if setup_local_nfs: if vm and vm.is_alive(): diff --git a/qemu/tests/virtio_fs_support_win_fs.py b/qemu/tests/virtio_fs_support_win_fs.py index 695d6798f8..76265c708d 100644 --- a/qemu/tests/virtio_fs_support_win_fs.py +++ b/qemu/tests/virtio_fs_support_win_fs.py @@ -56,7 +56,7 @@ def run(test, params, env): winutils_pack_path = winutils_driver_letter + winutils_pack_path autoIt_path = winutils_driver_letter + autoIt_path script_path = winutils_driver_letter + script_path - copy_cmd = "xcopy %s %s:\\ /Y" % (winutils_pack_path, shared_driver_letter) + copy_cmd = f"xcopy {winutils_pack_path} {shared_driver_letter}:\\ /Y" error_context.context("Copy the executable to shared dir.", test.log.info) session.cmd(copy_cmd) @@ -72,14 +72,9 @@ def run(test, params, env): error_context.context( "Run autoit script to install executable in explorer.", test.log.info ) - session.cmd( - "start /w " - + autoIt_path - + " " - + script_path - ) + session.cmd("start /w " + autoIt_path + " " + script_path) exe_name = winutils_pack_path.encode("unicode_escape").decode()[4:] - output = session.cmd_output("tasklist -v | findstr %s" % exe_name) + output = session.cmd_output(f"tasklist -v | findstr {exe_name}") test.log.info("The process found: %s", output) output_lower = output.lower() if "7-zip" in output_lower and "setup" in output_lower: @@ -91,5 +86,5 @@ def run(test, params, env): test.fail( "No process detected while installing the " "executable package on the shared directory!\n " - "Related process: %s" % output + f"Related process: {output}" ) diff --git a/qemu/tests/virtio_fs_with_unknown_group_name.py b/qemu/tests/virtio_fs_with_unknown_group_name.py index ca88e920ca..d9b6979e96 100644 --- a/qemu/tests/virtio_fs_with_unknown_group_name.py +++ b/qemu/tests/virtio_fs_with_unknown_group_name.py @@ -1,11 +1,9 @@ import os -import shutil import re +import shutil from avocado.utils import process - -from virttest import data_dir -from virttest import error_context +from virttest import data_dir, error_context @error_context.context_aware @@ -23,8 +21,8 @@ def run(test, params, env): result_pattern = params.get("result_pattern", "unable to find group") # set fs daemon path - fs_source = params.get('fs_source_dir') - base_dir = params.get('fs_source_base_dir', data_dir.get_data_dir()) + fs_source = params.get("fs_source_dir") + base_dir = params.get("fs_source_base_dir", data_dir.get_data_dir()) if not os.path.isabs(fs_source): fs_source = os.path.join(base_dir, fs_source) @@ -33,22 +31,30 @@ def run(test, params, env): test.log.info("Create filesystem source %s.", fs_source) os.makedirs(fs_source) try: - sock_path = os.path.join(data_dir.get_tmp_dir(), - '-'.join(('avocado-vt-vm1', 'viofs', 'virtiofsd.sock'))) + sock_path = os.path.join( + data_dir.get_tmp_dir(), + "-".join(("avocado-vt-vm1", "viofs", "virtiofsd.sock")), + ) # run daemon - process.system(params.get('cmd_create_fs_source'), shell=True) - cmd_run_virtiofsd = params.get('cmd_run_virtiofsd') % sock_path - cmd_run_virtiofsd += ' -o source=%s' % fs_source - cmd_run_virtiofsd += params.get('fs_binary_extra_options') - - error_context.context('Running daemon command %s.' % cmd_run_virtiofsd, test.log.info) - output = process.system_output(cmd_run_virtiofsd, - ignore_status=True, - shell=True).strip().decode() + process.system(params.get("cmd_create_fs_source"), shell=True) + cmd_run_virtiofsd = params.get("cmd_run_virtiofsd") % sock_path + cmd_run_virtiofsd += f" -o source={fs_source}" + cmd_run_virtiofsd += params.get("fs_binary_extra_options") + + error_context.context( + f"Running daemon command {cmd_run_virtiofsd}.", test.log.info + ) + output = ( + process.system_output(cmd_run_virtiofsd, ignore_status=True, shell=True) + .strip() + .decode() + ) match = re.search(result_pattern, output, re.I | re.M) if match: - test.fail("Virtiofsd started with an unknown socket group name which isn't expected," - " the output is %s" % output) + test.fail( + "Virtiofsd started with an unknown socket group name which isn't expected," + f" the output is {output}" + ) finally: os.removedirs(fs_source) diff --git a/qemu/tests/virtio_fs_write_same_space.py b/qemu/tests/virtio_fs_write_same_space.py index e5691ac700..0d3c4f744f 100644 --- a/qemu/tests/virtio_fs_write_same_space.py +++ b/qemu/tests/virtio_fs_write_same_space.py @@ -1,11 +1,6 @@ import os -from virttest import data_dir -from virttest import env_process -from virttest import error_context -from virttest import nfs -from virttest import utils_disk -from virttest import utils_misc +from virttest import data_dir, env_process, error_context, nfs, utils_disk, utils_misc from virttest.remote import scp_to_remote @@ -24,19 +19,19 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ - test_file = params.get('test_file') - fs_dest = params.get('fs_dest') + test_file = params.get("test_file") + fs_dest = params.get("fs_dest") fs_target = params.get("fs_target") script_create_file = params.get("script_create_file") cmd_create_file = params.get("cmd_create_file") - username = params.get('username') - password = params.get('password') - port = params.get('file_transfer_port') - setup_local_nfs = params.get('setup_local_nfs', "no") + username = params.get("username") + password = params.get("password") + port = params.get("file_transfer_port") + setup_local_nfs = params.get("setup_local_nfs", "no") try: vm = None - if setup_local_nfs == 'yes': + if setup_local_nfs == "yes": nfs_local = nfs.Nfs(params) nfs_local.setup() params["start_vm"] = "yes" @@ -46,29 +41,31 @@ def run(test, params, env): session = vm.wait_for_login() guest_addr = vm.get_address() - error_context.context("Create a destination directory %s" - "inside guest." % fs_dest, test.log.info) + error_context.context( + f"Create a destination directory {fs_dest}" "inside guest.", test.log.info + ) utils_misc.make_dirs(fs_dest, session) - error_context.context("Mount virtiofs target %s to %s inside " - "guest." % (fs_target, fs_dest), - test.log.info) - if not utils_disk.mount(fs_target, fs_dest, 'virtiofs', session=session): - test.fail('Mount virtiofs target failed.') + error_context.context( + f"Mount virtiofs target {fs_target} to {fs_dest} inside " "guest.", + test.log.info, + ) + if not utils_disk.mount(fs_target, fs_dest, "virtiofs", session=session): + test.fail("Mount virtiofs target failed.") guest_file = os.path.join(fs_dest, test_file) test.log.info("The guest file in shared dir is %s", guest_file) - error_context.context("write to the same space of" - " a file with mmap.", test.log.info) + error_context.context( + "write to the same space of" " a file with mmap.", test.log.info + ) test.log.info("Copy the mmap script to guest.") - host_path = os.path.join(data_dir.get_deps_dir("virtio_fs"), - script_create_file) - scp_to_remote(guest_addr, port, username, password, host_path, '/tmp') + host_path = os.path.join(data_dir.get_deps_dir("virtio_fs"), script_create_file) + scp_to_remote(guest_addr, port, username, password, host_path, "/tmp") cmd_create_file_share = cmd_create_file % guest_file output = session.cmd_output(cmd_create_file_share).strip() if output.split()[0] != output.split()[1]: - test.fail("The file size is increasing, the output is %s." % output) + test.fail(f"The file size is increasing, the output is {output}.") finally: if setup_local_nfs == "yes": if vm and vm.is_alive(): diff --git a/qemu/tests/virtio_fsd_check_info.py b/qemu/tests/virtio_fsd_check_info.py index 3e069de77f..8b1477079c 100644 --- a/qemu/tests/virtio_fsd_check_info.py +++ b/qemu/tests/virtio_fsd_check_info.py @@ -20,13 +20,15 @@ def run(test, params, env): cmd_get_qemu_ver = params.get("cmd_get_qemu_ver") error_context.context("Check virtiofsd info", test.log.info) vfsd_info = process.system_output(cmd_get_vfsd_ver, shell=True).strip() - pattern_ver = r'version.*(\d+.\d+.\d+)'.encode() + pattern_ver = rb"version.*(\d+.\d+.\d+)" vfsd_ver = re.findall(pattern_ver, vfsd_info, re.I)[0] qemu_kvm_info = process.system_output(cmd_get_qemu_ver, shell=True).strip() qemu_ver = re.findall(pattern_ver, qemu_kvm_info, re.I)[0] if vfsd_ver != qemu_ver: - test.fail("virtiofsd version is %s which is wrong," - "it should be %s" % (vfsd_ver, qemu_ver)) + test.fail( + f"virtiofsd version is {vfsd_ver} which is wrong," + f"it should be {qemu_ver}" + ) # Remove the following checkpoint due to a product bz2038031 # pattern_cort = r'Copyright.*\d+-(\d+)'.encode() # vfsd_cprt = re.findall(pattern_cort, vfsd_info)[0] diff --git a/qemu/tests/virtio_mem_dynamic_memslots.py b/qemu/tests/virtio_mem_dynamic_memslots.py index cf239d44c8..364d611f75 100644 --- a/qemu/tests/virtio_mem_dynamic_memslots.py +++ b/qemu/tests/virtio_mem_dynamic_memslots.py @@ -1,6 +1,6 @@ from virttest import error_context - from virttest.utils_misc import normalize_data_size + from provider import virtio_mem_utils diff --git a/qemu/tests/virtio_mem_dynamic_memslots_with_migration.py b/qemu/tests/virtio_mem_dynamic_memslots_with_migration.py index 747807b546..70c0b9bd24 100644 --- a/qemu/tests/virtio_mem_dynamic_memslots_with_migration.py +++ b/qemu/tests/virtio_mem_dynamic_memslots_with_migration.py @@ -1,8 +1,8 @@ import time from virttest import error_context - from virttest.utils_misc import normalize_data_size + from provider import virtio_mem_utils @@ -29,7 +29,7 @@ def run(test, params, env): mem_object_id = params["mem_devs"] timeout = params.get_numeric("timeout", 10) - device_id = "virtio_mem-%s" % mem_object_id + device_id = f"virtio_mem-{mem_object_id}" req_size_normalized = int(float(normalize_data_size(requested_size_vmem_test, "B"))) vm.monitor.qom_set(device_id, "requested-size", req_size_normalized) diff --git a/qemu/tests/virtio_mem_numa_basic.py b/qemu/tests/virtio_mem_numa_basic.py index bdb02e1e58..8cd36fcf8b 100644 --- a/qemu/tests/virtio_mem_numa_basic.py +++ b/qemu/tests/virtio_mem_numa_basic.py @@ -1,8 +1,8 @@ import time from virttest import error_context - from virttest.utils_misc import normalize_data_size + from provider import virtio_mem_utils @@ -36,25 +36,32 @@ def run(test, params, env): mig_protocol = params.get("migration_protocol", "tcp") vm.migrate(mig_timeout, mig_protocol, env=env) - virtio_mem_model = 'virtio-mem-pci' - if '-mmio:' in params.get("machine_type"): - virtio_mem_model = 'virtio-mem-device' - for i, vmem_dev in enumerate(vm.devices.get_by_params({'driver': virtio_mem_model})): + virtio_mem_model = "virtio-mem-pci" + if "-mmio:" in params.get("machine_type"): + virtio_mem_model = "virtio-mem-device" + for i, vmem_dev in enumerate( + vm.devices.get_by_params({"driver": virtio_mem_model}) + ): device_id = vmem_dev.get_qid() requested_size_vmem = params.get("requested-size_test_vmem%d" % i) node_id = int(vmem_dev.params.get("node")) for requested_size in requested_size_vmem.split(): - req_size_normalized = int(float(normalize_data_size(requested_size, 'B'))) + req_size_normalized = int(float(normalize_data_size(requested_size, "B"))) vm.monitor.qom_set(device_id, "requested-size", req_size_normalized) time.sleep(30) - virtio_mem_utils.check_memory_devices(device_id, requested_size, threshold, vm, test) - virtio_mem_utils.check_numa_plugged_mem(node_id, requested_size, threshold, vm, test) + virtio_mem_utils.check_memory_devices( + device_id, requested_size, threshold, vm, test + ) + virtio_mem_utils.check_numa_plugged_mem( + node_id, requested_size, threshold, vm, test + ) if operation_type == "with_reboot": vm.reboot() - error_context.context("Verify virtio-mem device after reboot", - test.log.info) - virtio_mem_utils.check_memory_devices(device_id, requested_size, - threshold, vm, test) - virtio_mem_utils.check_numa_plugged_mem(node_id, requested_size, - threshold, vm, test) + error_context.context("Verify virtio-mem device after reboot", test.log.info) + virtio_mem_utils.check_memory_devices( + device_id, requested_size, threshold, vm, test + ) + virtio_mem_utils.check_numa_plugged_mem( + node_id, requested_size, threshold, vm, test + ) diff --git a/qemu/tests/virtio_mode.py b/qemu/tests/virtio_mode.py index d0f8269e88..1b54e9691e 100644 --- a/qemu/tests/virtio_mode.py +++ b/qemu/tests/virtio_mode.py @@ -1,9 +1,7 @@ import re -from virttest import error_context -from virttest import utils_test -from virttest import utils_misc -from virttest import qemu_qtree +from virttest import error_context, qemu_qtree, utils_misc, utils_test + from provider import win_dev @@ -27,20 +25,25 @@ def verify_virtio_mode_qtree(): device_type = params["device_type"] qtree = qemu_qtree.QtreeContainer() try: - qtree.parse_info_qtree(vm.monitor.info('qtree')) + qtree.parse_info_qtree(vm.monitor.info("qtree")) except AttributeError: test.cancel("Monitor deson't supoort qtree, skip this test") disable_modern = None disable_legacy = None for node in qtree.get_nodes(): - if (isinstance(node, qemu_qtree.QtreeDev) and - node.qtree['type'] == device_type): + if ( + isinstance(node, qemu_qtree.QtreeDev) + and node.qtree["type"] == device_type + ): disable_modern = node.qtree["disable-modern"] disable_legacy = node.qtree["disable-legacy"].strip('"') - if (disable_modern != params["virtio_dev_disable_modern"] or - disable_legacy != params["virtio_dev_disable_legacy"]): - test.fail("virtio mode in qtree is not correct, details are %s %s" - % (disable_modern, disable_legacy)) + if ( + disable_modern != params["virtio_dev_disable_modern"] + or disable_legacy != params["virtio_dev_disable_legacy"] + ): + test.fail( + f"virtio mode in qtree is not correct, details are {disable_modern} {disable_legacy}" + ) def verify_virtio_mode_guest_win(session, virtio_mode): """ @@ -54,10 +57,10 @@ def verify_virtio_mode_guest_win(session, virtio_mode): device_name = params["device_name"] driver_name = params["driver_name"] driver_verifier = params.get("driver_verifier", driver_name) - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, driver_verifier) - devcon_folder = utils_misc.set_winutils_letter(session, - params["devcon_folder"]) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_verifier + ) + devcon_folder = utils_misc.set_winutils_letter(session, params["devcon_folder"]) hwid = win_dev.get_hwids(session, device_name, devcon_folder)[0] device_id = int(hwid[17:21]) @@ -80,13 +83,12 @@ def win_memory_range(session, devcon_folder, hwid, virtio_mode): :param hwid: hardware id of a specific device :param virtio_mode: VirtIO mode for the device """ - mem_check_cmd = ('%sdevcon.exe resources @"%s" | find "MEM"' % - (devcon_folder, hwid)) + mem_check_cmd = f'{devcon_folder}devcon.exe resources @"{hwid}" | find "MEM"' status, output = session.cmd_status_output(mem_check_cmd) guest_mode = "legacy" if status == 0: for out in output.split("\n")[0:-2]: - out = re.split(r':+', out)[1].split("-") + out = re.split(r":+", out)[1].split("-") if int(out[1], 16) - int(out[0], 16) - int("0xFFF", 16): guest_mode = "transitional" return guest_mode @@ -104,7 +106,7 @@ def verify_virtio_mode_guest_linux(session): if not pci_n: test.error("Can't get the pci id for device") - cmd = "grep . /sys/bus/pci/devices/0000:%s/virtio*/features" % pci_n + cmd = f"grep . /sys/bus/pci/devices/0000:{pci_n}/virtio*/features" virtio_bit = int(session.cmd_output(cmd)[32]) if virtio_bit != (virtio_mode != "legacy"): test.fail("Fail as the virtio bit is not correct") diff --git a/qemu/tests/virtio_net_dpdk.py b/qemu/tests/virtio_net_dpdk.py index c390d316f9..01fc1e3215 100644 --- a/qemu/tests/virtio_net_dpdk.py +++ b/qemu/tests/virtio_net_dpdk.py @@ -1,19 +1,13 @@ -import os import logging +import os import threading -import six import time +import six from avocado.utils import process +from virttest import data_dir, error_context, remote, utils_misc, utils_test, virt_vm -from virttest import error_context -from virttest import virt_vm -from virttest import remote -from virttest import data_dir -from virttest import utils_misc -from virttest import utils_test - -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def format_result(result, base="12", fbase="2"): @@ -63,11 +57,11 @@ def _pin_vm_threads(node): utils_test.qemu.pin_vm_threads(vm, node) def install_dpdk(): - """ Install dpdk realted packages""" + """Install dpdk realted packages""" - cmd = 'yum install -y %s' % params.get("env_pkg") + cmd = "yum install -y {}".format(params.get("env_pkg")) session.cmd(cmd, timeout=360, ignore_all_errors=True) - session.cmd_output('rpm -qa |grep dpdk') + session.cmd_output("rpm -qa |grep dpdk") def env_setup(): """ @@ -89,9 +83,8 @@ def env_setup(): # copy testpmd script to guest testpmd_exec = params.get("testpmd_exec") - src = os.path.join(data_dir.get_deps_dir(), - "performance/%s" % testpmd_exec) - dst = "/tmp/%s" % testpmd_exec + src = os.path.join(data_dir.get_deps_dir(), f"performance/{testpmd_exec}") + dst = f"/tmp/{testpmd_exec}" vm.copy_files_to(src, dst, nic_index=0) return dst @@ -108,22 +101,22 @@ def dpdk_devbind(dpdk_bind_cmd): cmd += " && modprobe vfio-pci" session.cmd(cmd, timeout=360, ignore_all_errors=True) session.cmd_output("lspci|grep Eth") - cmd_nic_pci = "lspci |awk '/%s/ {print $1}'" % params.get("nic_driver") + "lspci |awk '/{}/ {{print $1}}'".format(params.get("nic_driver")) nic_driver = params.get("nic_driver").split() if len(nic_driver) > 1: for i in nic_driver: if i == "Virtio": - nic_pci_1 = "0000:%s" % session.cmd( - "lspci |awk '/%s network/ {print $1}'" % i).strip() - cmd_str = "%s --bind=vfio-pci %s" % ( - dpdk_bind_cmd, nic_pci_1) + nic_pci_1 = "0000:{}".format( + session.cmd(f"lspci |awk '/{i} network/ {{print $1}}'").strip() + ) + cmd_str = f"{dpdk_bind_cmd} --bind=vfio-pci {nic_pci_1}" else: - nic_pci_2 = "0000:%s" % session.cmd( - "lspci |awk '/%s/ {print $1}'" % i).strip() - cmd_str = "%s --bind=vfio-pci %s" % ( - dpdk_bind_cmd, nic_pci_2) + nic_pci_2 = "0000:{}".format( + session.cmd(f"lspci |awk '/{i}/ {{print $1}}'").strip() + ) + cmd_str = f"{dpdk_bind_cmd} --bind=vfio-pci {nic_pci_2}" session.cmd_output(cmd_str) - session.cmd_output("%s --status" % dpdk_bind_cmd) + session.cmd_output(f"{dpdk_bind_cmd} --status") return nic_pci_1, nic_pci_2 def install_moongen(session, ip, user, port, password, dpdk_bind_cmd): @@ -135,14 +128,12 @@ def install_moongen(session, ip, user, port, password, dpdk_bind_cmd): # copy MoonGen.zip to remote moongen host moongen_pkg = params.get("moongen_pkg") - local_path = os.path.join( - data_dir.get_deps_dir(), "performance/%s" % moongen_pkg) - remote.scp_to_remote(ip, shell_port, username, - password, local_path, "/home") + local_path = os.path.join(data_dir.get_deps_dir(), f"performance/{moongen_pkg}") + remote.scp_to_remote(ip, shell_port, username, password, local_path, "/home") # install moongen cmd_str = "rm -rf /home/MoonGen" - cmd_str += " && unzip /home/%s -d /home" % params.get("moongen_pkg") + cmd_str += " && unzip /home/{} -d /home".format(params.get("moongen_pkg")) cmd_str += " && cd /home/MoonGen && ./build.sh" if session.cmd_status(cmd_str, timeout=300) != 0: test.error("Fail to install program on monngen host") @@ -157,9 +148,9 @@ def install_moongen(session, ip, user, port, password, dpdk_bind_cmd): # bind nic moongen_dpdk_nic = params.get("moongen_dpdk_nic").split() for i in list(moongen_dpdk_nic): - cmd_bind = "%s --bind=vfio-pci %s" % (dpdk_bind_cmd, i) + cmd_bind = f"{dpdk_bind_cmd} --bind=vfio-pci {i}" if session.cmd_status(cmd_bind) != 0: - test.error("Fail to bind nic %s on monngen host" % i) + test.error(f"Fail to bind nic {i} on monngen host") def unbind_dpdk_nic(session, ip, user, port, password, dpdk_bind_cmd): """ @@ -176,21 +167,20 @@ def unbind_dpdk_nic(session, ip, user, port, password, dpdk_bind_cmd): cmd = "pkill MoonGen ; rm -rf /tmp/throughput.log ; sleep 3" generator1.cmd_output(cmd) moongen_dpdk_nic_list = params.get("moongen_dpdk_nic") - cmd_unbind = "%s -b ixgbe %s" % (dpdk_bind_cmd, moongen_dpdk_nic_list) + cmd_unbind = f"{dpdk_bind_cmd} -b ixgbe {moongen_dpdk_nic_list}" if session.cmd_status(cmd_unbind) != 0: - test.error("Fail to unbind nic %s on monngen host" - % moongen_dpdk_nic_list) + test.error(f"Fail to unbind nic {moongen_dpdk_nic_list} on monngen host") def result(recode, dst): - if os.path.getsize(dst) > 0: - - cmd = "grep -i %s %s | tail -2 | awk -F ':' '{print $2}' | head -1"\ - "| awk '{print $1}'" % (recode, dst) + cmd = ( + f"grep -i {recode} {dst} | tail -2 | awk -F ':' '{{print $2}}' | head -1" + "| awk '{print $1}'" + ) pps_results = process.system_output(cmd, shell=True) power = 10**6 mpps_results = float(pps_results) / float(power) - pps_results = "%.2f" % mpps_results + pps_results = f"{mpps_results:.2f}" else: test.error("the content of /tmp/testpmd.log is empty") @@ -201,8 +191,7 @@ def result(recode, dst): login_timeout = int(params.get("login_timeout", 360)) try: - vm.wait_for_serial_login( - timeout=login_timeout, restart_network=True).close() + vm.wait_for_serial_login(timeout=login_timeout, restart_network=True).close() except virt_vm.VMIPAddressMissingError: pass @@ -214,13 +203,13 @@ def result(recode, dst): session = vm.wait_for_login(nic_index=0, timeout=login_timeout) - guest_ip = vm.wait_for_get_address(0, timeout=90) - macvtap_mac = vm.get_mac_address(1) - vfio_mac = vm.get_mac_address(2) + vm.wait_for_get_address(0, timeout=90) + vm.get_mac_address(1) + vm.get_mac_address(2) # get parameter from dictionary category = params.get("category") - pkt_size = params.get("pkt_size") + params.get("pkt_size") kvm_ver_chk_cmd = params.get("kvm_ver_chk_cmd") guest_ver_cmd = params["guest_ver_cmd"] guest_dpdk_cmd = params["guest_dpdk_cmd"] @@ -229,7 +218,7 @@ def result(recode, dst): # get record_list record_line = "" for record in record_list.split(): - record_line += "%s|" % format_result(record) + record_line += f"{format_result(record)}|" # setup env and bind nics to vfio-pci in guest @@ -242,20 +231,25 @@ def result(recode, dst): shell_port = params.get("shell_port_generator") password = params.get("password_generator") username = params.get("username_generator") - generator1 = remote.wait_for_login(params.get("shell_client_generator"), - generator_ip, - shell_port, - username, - password, - params.get("shell_prompt_generator")) - generator2 = remote.wait_for_login(params.get("shell_client_generator"), - generator_ip, - shell_port, - username, - password, - params.get("shell_prompt_generator")) - install_moongen(generator1, generator_ip, username, - shell_port, password, dpdk_bind_cmd) + generator1 = remote.wait_for_login( + params.get("shell_client_generator"), + generator_ip, + shell_port, + username, + password, + params.get("shell_prompt_generator"), + ) + generator2 = remote.wait_for_login( + params.get("shell_client_generator"), + generator_ip, + shell_port, + username, + password, + params.get("shell_prompt_generator"), + ) + install_moongen( + generator1, generator_ip, username, shell_port, password, dpdk_bind_cmd + ) # get qemu, guest kernel, kvm version and dpdk version and write them into result result_path = utils_misc.get_path(test.resultsdir, "virtio_net_dpdk.RHS") @@ -264,15 +258,15 @@ def result(recode, dst): host_ver = os.uname()[2] guest_ver = session.cmd_output(guest_ver_cmd) dpdk_ver = session.cmd_output(guest_dpdk_cmd) - result_file.write("### kvm-userspace-ver : %s" % kvm_ver) - result_file.write("### kvm_version : %s" % host_ver) - result_file.write("### guest-kernel-ver :%s" % guest_ver) - result_file.write("### guest-dpdk-ver :%s" % dpdk_ver) + result_file.write(f"### kvm-userspace-ver : {kvm_ver}") + result_file.write(f"### kvm_version : {host_ver}") + result_file.write(f"### guest-kernel-ver :{guest_ver}") + result_file.write(f"### guest-dpdk-ver :{dpdk_ver}") # get result tested by each scenario for pkt_cate in category.split(): - result_file.write("Category:%s\n" % pkt_cate) - result_file.write("%s\n" % record_line.rstrip("|")) + result_file.write(f"Category:{pkt_cate}\n") + result_file.write("{}\n".format(record_line.rstrip("|"))) nic1_driver = params.get("nic1_dpdk_driver") nic2_driver = params.get("nic2_dpdk_driver") whitelist_option = params.get("whitelist_option") @@ -282,40 +276,48 @@ def result(recode, dst): size = 60 if pkt_cate == "rx": - error_context.context("test guest rx pps performance", - test.log.info) + error_context.context("test guest rx pps performance", test.log.info) port = 1 record = "Rx-pps" mac = vm.get_mac_address(1) if pkt_cate == "tx": - error_context.context("test guest tx pps performance", - test.log.info) + error_context.context("test guest tx pps performance", test.log.info) port = 0 record = "Tx-pps" mac = vm.get_mac_address(2) - status = launch_test(session, generator1, generator2, - mac, port, exec_file, # pylint: disable=E0606 - nic1_driver, nic2_driver, - whitelist_option, - nic_pci_1, nic_pci_2, - cores, queues, running_time) + status = launch_test( + session, + generator1, + generator2, + mac, + port, + exec_file, # pylint: disable=E0606 + nic1_driver, + nic2_driver, + whitelist_option, + nic_pci_1, + nic_pci_2, + cores, + queues, + running_time, + ) if status is True: - error_context.context("%s test is finished" % - pkt_cate, test.log.info) + error_context.context(f"{pkt_cate} test is finished", test.log.info) else: test.fail("test is failed, please check your command and env") - dst = utils_misc.get_path(test.resultsdir, "testpmd.%s" % pkt_cate) + dst = utils_misc.get_path(test.resultsdir, f"testpmd.{pkt_cate}") vm.copy_files_from("/tmp/testpmd.log", dst) - pkt_cate_r = result("%s-pps" % pkt_cate, dst) - line = "%s|" % format_result(size) - line += "%s" % format_result(pkt_cate_r) - result_file.write(("%s\n" % line)) + pkt_cate_r = result(f"{pkt_cate}-pps", dst) + line = f"{format_result(size)}|" + line += f"{format_result(pkt_cate_r)}" + result_file.write(f"{line}\n") - unbind_dpdk_nic(generator1, generator_ip, username, - shell_port, password, dpdk_bind_cmd) + unbind_dpdk_nic( + generator1, generator_ip, username, shell_port, password, dpdk_bind_cmd + ) generator1.close() generator2.close() @@ -323,54 +325,84 @@ def result(recode, dst): @error_context.context_aware -def launch_test(session, generator1, generator2, - mac, port_id, exec_file, - nic1_driver, nic2_driver, - whitelist_option, - nic_pci_1, nic_pci_2, - cores, queues, running_time): - """ Launch MoonGen """ +def launch_test( + session, + generator1, + generator2, + mac, + port_id, + exec_file, + nic1_driver, + nic2_driver, + whitelist_option, + nic_pci_1, + nic_pci_2, + cores, + queues, + running_time, +): + """Launch MoonGen""" def start_moongen(generator1, mac, port_id, running_time): - - file = '/home/MoonGen/examples/udp-throughput.lua' - cmd = "cp %s %s.tmp" % (file, file) - tmp_file = "%s.tmp" % file - cmd += " && sed -i 's/10:11:12:13:14:15/%s/g' %s" % (mac, tmp_file) - cmd += " && cd /home/MoonGen "\ - " && ./build/MoonGen %s %s > /tmp/throughput.log &" % ( - tmp_file, port_id) + file = "/home/MoonGen/examples/udp-throughput.lua" + cmd = f"cp {file} {file}.tmp" + tmp_file = f"{file}.tmp" + cmd += f" && sed -i 's/10:11:12:13:14:15/{mac}/g' {tmp_file}" + cmd += ( + " && cd /home/MoonGen " + f" && ./build/MoonGen {tmp_file} {port_id} > /tmp/throughput.log &" + ) generator1.cmd_output(cmd) def run_moongen_up(generator2): - cmd = 'grep "1 devices are up" /tmp/throughput.log' if generator2.cmd_status(cmd) == 0: return True else: return False - def start_testpmd(session, exec_file, nic1_driver, nic2_driver, - whitelist_option, nic1_pci_1, nic2_pci_2, cores, - queues, running_time): - """ Start testpmd on VM """ + def start_testpmd( + session, + exec_file, + nic1_driver, + nic2_driver, + whitelist_option, + nic1_pci_1, + nic2_pci_2, + cores, + queues, + running_time, + ): + """Start testpmd on VM""" cmd = "`command -v python python3 | head -1` " - cmd += " %s %s %s %s %s %s %s %s %s > /tmp/testpmd.log" % ( - exec_file, nic1_driver, nic2_driver, whitelist_option, - nic_pci_1, nic_pci_2, cores, queues, running_time) + cmd += f" {exec_file} {nic1_driver} {nic2_driver} {whitelist_option} {nic_pci_1} {nic_pci_2} {cores} {queues} {running_time} > /tmp/testpmd.log" session.cmd_output(cmd) moongen_thread = threading.Thread( - target=start_moongen, args=(generator1, mac, port_id, running_time)) + target=start_moongen, args=(generator1, mac, port_id, running_time) + ) moongen_thread.start() - if utils_misc.wait_for(lambda: run_moongen_up(generator2), 30, - text="Wait until devices is up to work"): + if utils_misc.wait_for( + lambda: run_moongen_up(generator2), 30, text="Wait until devices is up to work" + ): LOG_JOB.debug("MoonGen start to work") - testpmd_thread = threading.Thread(target=start_testpmd, args=( - session, exec_file, nic1_driver, nic2_driver, whitelist_option, - nic_pci_1, nic_pci_2, cores, queues, running_time)) + testpmd_thread = threading.Thread( + target=start_testpmd, + args=( + session, + exec_file, + nic1_driver, + nic2_driver, + whitelist_option, + nic_pci_1, + nic_pci_2, + cores, + queues, + running_time, + ), + ) time.sleep(3) testpmd_thread.start() testpmd_thread.join() diff --git a/qemu/tests/virtio_port_hotplug.py b/qemu/tests/virtio_port_hotplug.py index ea54513334..b7550016c5 100644 --- a/qemu/tests/virtio_port_hotplug.py +++ b/qemu/tests/virtio_port_hotplug.py @@ -1,11 +1,11 @@ import time from avocado.utils import process -from virttest import utils_test -from virttest import error_context +from virttest import error_context, utils_test + +from provider import win_driver_utils from qemu.tests.vioser_in_use import run_bg_test from qemu.tests.virtio_serial_file_transfer import transfer_data -from provider import win_driver_utils @error_context.context_aware @@ -37,65 +37,66 @@ def run(test, params, env): check_module = params.get_boolean("check_module", True) bg_test = params.get_boolean("bg_test", True) host_script = params["host_script"] - check_pid_command = "pgrep -f %s" % host_script + check_pid_command = f"pgrep -f {host_script}" orig_set = set(process.getoutput(check_pid_command).splitlines()) session = vm.wait_for_login() if os_type == "windows": driver_name = params["driver_name"] session = utils_test.qemu.windrv_check_running_verifier( - session, vm, test, driver_name) + session, vm, test, driver_name + ) if module and check_module: - error_context.context("Load module %s" % module, test.log.info) - session.cmd("modprobe %s" % module) + error_context.context(f"Load module {module}", test.log.info) + session.cmd(f"modprobe {module}") time.sleep(1) session.close() for port in params.objects("serials"): session = vm.wait_for_login(timeout=timeout) port_params = params.object_params(port) - if not port_params['serial_type'].startswith('virt'): + if not port_params["serial_type"].startswith("virt"): continue virtio_port = vm.devices.get(port) if not virtio_port: - test.fail("Virtio Port '%s' not found" % port) + test.fail(f"Virtio Port '{port}' not found") chardev_qid = virtio_port.get_param("chardev") try: port_chardev = vm.devices.get_by_qid(chardev_qid)[0] except IndexError: - test.error("Failed to get device %s" % chardev_qid) - if port_params['serial_type'] == 'virtserialport': - params['file_transfer_serial_port'] = port + test.error(f"Failed to get device {chardev_qid}") + if port_params["serial_type"] == "virtserialport": + params["file_transfer_serial_port"] = port if bg_test: run_bg_test(test, params, vm) for repeat in range(params.get_numeric("repeat_times", 1)): repeat += 1 if module and check_module: - error_context.context("Unload module %s" % module, - test.log.info) - session.cmd("modprobe -r %s" % module) + error_context.context(f"Unload module {module}", test.log.info) + session.cmd(f"modprobe -r {module}") time.sleep(1) - error_context.context("Unplug virtio port '%s' in %d tune(s)" % - (port, repeat), test.log.info) + error_context.context( + "Unplug virtio port '%s' in %d tune(s)" % (port, repeat), test.log.info + ) vm.devices.simple_unplug(virtio_port, vm.monitor) if port_params.get("unplug_chardev") == "yes": error_context.context( - "Unplug chardev '%s' for virtio port '%s'" % - (port, chardev_qid), test.log.info) + f"Unplug chardev '{port}' for virtio port '{chardev_qid}'", + test.log.info, + ) vm.devices.simple_unplug(port_chardev, vm.monitor) time.sleep(0.5) vm.devices.simple_hotplug(port_chardev, vm.monitor) vm.devices.simple_hotplug(virtio_port, vm.monitor) if module and check_module: - error_context.context("Load module %s" % module, test.log.info) - session.cmd("modprobe %s" % module) + error_context.context(f"Load module {module}", test.log.info) + session.cmd(f"modprobe {module}") time.sleep(1) session.close() test_set = set(process.getoutput(check_pid_command).splitlines()) difference = test_set.difference(orig_set) if difference: test.log.info("Kill the first serial process on host") - result = process.system('kill -9 %s' % difference.pop(), - shell=True) + result = process.system(f"kill -9 {difference.pop()}", shell=True) if result != 0: test.log.error("Failed to kill the first serial process on host!") if transfer_data(params, vm) is not True: diff --git a/qemu/tests/virtio_port_login.py b/qemu/tests/virtio_port_login.py index 724fc95a2a..978aa60857 100644 --- a/qemu/tests/virtio_port_login.py +++ b/qemu/tests/virtio_port_login.py @@ -3,19 +3,16 @@ :copyright: 2010-2012 Red Hat Inc. """ + import aexpect -from virttest import utils_misc -from virttest import remote -from virttest import utils_virtio_port -from virttest import error_context +from virttest import error_context, remote, utils_misc, utils_virtio_port class ConsoleLoginTest(utils_virtio_port.VirtioPortTest): - __sessions__ = [] def __init__(self, test, env, params): - super(ConsoleLoginTest, self).__init__(test, env, params) + super().__init__(test, env, params) self.vm = self.get_vm_with_ports(no_consoles=1, no_serialports=1) @error_context.context_aware @@ -28,43 +25,44 @@ def pre_step(self): self.__sessions__.append(session) @error_context.context_aware - def virtio_console_login(self, port='vc1'): - error_context.context("Login guest via '%s'" % port, self.test.log.info) + def virtio_console_login(self, port="vc1"): + error_context.context(f"Login guest via '{port}'", self.test.log.info) session = self.vm.wait_for_serial_login(timeout=180, virtio=port) self.__sessions__.append(session) return session - def console_login(self, port='vc1'): + def console_login(self, port="vc1"): return self.virtio_console_login(port=port) @error_context.context_aware - def virtio_serial_login(self, port='vs1'): - error_context.context("Try to login guest via '%s'" % port, - self.test.log.info) + def virtio_serial_login(self, port="vs1"): + error_context.context(f"Try to login guest via '{port}'", self.test.log.info) username = self.params.get("username") password = self.params.get("password") prompt = self.params.get("shell_prompt", "[#$]") - linesep = eval("'%s'" % self.params.get("shell_linesep", r"\n")) + linesep = eval("'{}'".format(self.params.get("shell_linesep", r"\n"))) for vport in self.get_virtio_ports(self.vm)[1]: if vport.name == port: break vport = None if not vport: - self.test.error("Not virtio serial port '%s' found" % port) + self.test.error(f"Not virtio serial port '{port}' found") - logfile = "serial-%s-%s.log" % (vport.name, self.vm.name) - socat_cmd = "nc -U %s" % vport.hostfile - session = aexpect.ShellSession(socat_cmd, auto_close=False, - output_func=utils_misc.log_line, - output_params=(logfile,), - prompt=prompt) + logfile = f"serial-{vport.name}-{self.vm.name}.log" + socat_cmd = f"nc -U {vport.hostfile}" + session = aexpect.ShellSession( + socat_cmd, + auto_close=False, + output_func=utils_misc.log_line, + output_params=(logfile,), + prompt=prompt, + ) session.set_linesep(linesep) session.sendline() self.__sessions__.append(session) try: remote.handle_prompts(session, username, password, prompt, 180) - self.test.fail("virtio serial '%s' should no " % port + - "channel to login") + self.test.fail(f"virtio serial '{port}' should no " + "channel to login") except remote.LoginTimeoutError: self.__sessions__.append(session) self.test.log.info("Can't login via %s", port) @@ -75,13 +73,14 @@ def serial_login(self, port="vc1"): @error_context.context_aware def cleanup(self): - error_context.context("Close open connection and destroy vm", - self.test.log.info) + error_context.context( + "Close open connection and destroy vm", self.test.log.info + ) for session in self.__sessions__: if session: session.close() self.__sessions__.remove(session) - super(ConsoleLoginTest, self).cleanup(vm=self.vm) + super().cleanup(vm=self.vm) def run(test, params, env): @@ -99,7 +98,7 @@ def run(test, params, env): try: console_test.pre_step() port_type = console_params.get("virtio_port_type") - login_func = "%s_login" % port_type + login_func = f"{port_type}_login" test.log.info("Login function: %s", login_func) session = getattr(console_test, login_func)(login_console) if "serial" not in port_type: diff --git a/qemu/tests/virtio_scsi_mq.py b/qemu/tests/virtio_scsi_mq.py index bdf2e9fef3..ed3dbd24e3 100644 --- a/qemu/tests/virtio_scsi_mq.py +++ b/qemu/tests/virtio_scsi_mq.py @@ -1,11 +1,7 @@ import re import time -from virttest import utils_misc -from virttest import utils_test -from virttest import env_process -from virttest import qemu_qtree -from virttest import error_context +from virttest import env_process, error_context, qemu_qtree, utils_misc, utils_test @error_context.context_aware @@ -26,30 +22,33 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def get_mapping_interrupts2vcpus(irqs, pattern): - """ Get the mapping of between virtio interrupts and vcpus. """ - regex = r'(\d+):(\s+(\d+\s+){%d})\s+.+\s%s\s' % ( - len(re.findall(r"\s+CPU\d+", irqs, re.M)), pattern) - return {f[0]: {'count': f[1].split()} for f in re.findall(regex, irqs, re.M)} + """Get the mapping of between virtio interrupts and vcpus.""" + regex = r"(\d+):(\s+(\d+\s+){%d})\s+.+\s%s\s" % ( + len(re.findall(r"\s+CPU\d+", irqs, re.M)), + pattern, + ) + return {f[0]: {"count": f[1].split()} for f in re.findall(regex, irqs, re.M)} def create_data_images(): - """ Create date image objects. """ + """Create date image objects.""" for extra_image in range(images_num): - image_tag = "stg%s" % extra_image - params["images"] += " %s" % image_tag - params["image_name_%s" % image_tag] = "images/%s" % image_tag - params["image_size_%s" % image_tag] = extra_image_size - params["force_create_image_%s" % image_tag] = "yes" + image_tag = f"stg{extra_image}" + params["images"] += f" {image_tag}" + params[f"image_name_{image_tag}"] = f"images/{image_tag}" + params[f"image_size_{image_tag}"] = extra_image_size + params[f"force_create_image_{image_tag}"] = "yes" image_params = params.object_params(image_tag) env_process.preprocess_image(test, image_params, image_tag) def check_irqbalance_status(): - """ Check the status of irqbalance service. """ + """Check the status of irqbalance service.""" error_context.context("Check irqbalance service status.", test.log.info) return re.findall("Active: active", session.cmd_output(status_cmd)) def start_irqbalance_service(): - """ Start the irqbalance service. """ + """Start the irqbalance service.""" error_context.context("Start the irqbalance service.", test.log.info) session.cmd("systemctl start irqbalance") output = utils_misc.strip_console_codes(session.cmd_output(status_cmd)) @@ -57,7 +56,7 @@ def start_irqbalance_service(): test.cancel("Can not start irqbalance inside guest.Skip this test.") def pin_vcpus2host_cpus(): - """ Pint the vcpus to the host cpus. """ + """Pint the vcpus to the host cpus.""" error_context.context("Pin vcpus to host cpus.", test.log.info) host_numa_nodes = utils_misc.NumaInfo() vcpu_num = 0 @@ -69,15 +68,17 @@ def pin_vcpus2host_cpus(): vcpu_tid = vm.vcpu_threads[vcpu_num] test.log.debug( "pin vcpu thread(%s) to cpu(%s)", - vcpu_tid, numa_node.pin_cpu(vcpu_tid)) + vcpu_tid, + numa_node.pin_cpu(vcpu_tid), + ) vcpu_num += 1 def verify_num_queues(): - """ Verify the number of queues. """ + """Verify the number of queues.""" error_context.context("Verify num_queues from monitor.", test.log.info) qtree = qemu_qtree.QtreeContainer() try: - qtree.parse_info_qtree(vm.monitor.info('qtree')) + qtree.parse_info_qtree(vm.monitor.info("qtree")) except AttributeError: test.cancel("Monitor deson't supoort qtree skip this test") error_msg = "Number of queues mismatch: expect %s report from monitor: %s(%s)" @@ -85,44 +86,52 @@ def verify_num_queues(): qtree_num_queues_full = "" qtree_num_queues = "" for node in qtree.get_nodes(): - type = node.qtree['type'] - if isinstance(node, qemu_qtree.QtreeDev) and ( - type == "virtio-scsi-device"): + type = node.qtree["type"] + if isinstance(node, qemu_qtree.QtreeDev) and (type == "virtio-scsi-device"): qtree_num_queues_full = node.qtree["num_queues"] - qtree_num_queues = re.search( - "[0-9]+", qtree_num_queues_full).group() + qtree_num_queues = re.search("[0-9]+", qtree_num_queues_full).group() elif (isinstance(node, qemu_qtree.QtreeDev)) and ( - type == "virtio-scsi-pci"): - scsi_bus_addr = node.qtree['addr'] + type == "virtio-scsi-pci" + ): + scsi_bus_addr = node.qtree["addr"] if qtree_num_queues != num_queues: error_msg = error_msg % ( - num_queues, qtree_num_queues, qtree_num_queues_full) + num_queues, + qtree_num_queues, + qtree_num_queues_full, + ) test.fail(error_msg) if not scsi_bus_addr: test.error("Didn't find addr from qtree. Please check the log.") def check_interrupts(): - """ Check the interrupt queues in guest. """ + """Check the interrupt queues in guest.""" error_context.context("Check the interrupt queues in guest.", test.log.info) return session.cmd_output(irq_check_cmd) def check_interrupts2vcpus(irq_map): - """ Check the status of interrupters to vcpus. """ + """Check the status of interrupters to vcpus.""" error_context.context( - "Check the status of interrupters to vcpus.", test.log.info) + "Check the status of interrupters to vcpus.", test.log.info + ) cpu_selects = {} cpu_select = 1 for _ in range(int(num_queues)): - val = ','.join([_[::-1] for _ in re.findall(r'\w{8}|\w+', format( - cpu_select, 'x')[::-1])][::-1]) - cpu_selects[val] = format(cpu_select, 'b').count('0') + val = ",".join( + [ + _[::-1] + for _ in re.findall(r"\w{8}|\w+", format(cpu_select, "x")[::-1]) + ][::-1] + ) + cpu_selects[val] = format(cpu_select, "b").count("0") cpu_select = cpu_select << 1 irqs_id_reset = [] for irq_id in irq_map.keys(): - cmd = 'cat /proc/irq/%s/smp_affinity' % irq_id + cmd = f"cat /proc/irq/{irq_id}/smp_affinity" cpu_selected = re.sub( - r'(^[0+,?0+]+)|(,)', '', session.cmd_output(cmd)).strip() + r"(^[0+,?0+]+)|(,)", "", session.cmd_output(cmd) + ).strip() if cpu_selected not in cpu_selects: irqs_id_reset.append(irq_id) else: @@ -131,66 +140,67 @@ def check_interrupts2vcpus(irq_map): return irqs_id_reset, cpu_selects def pin_interrupts2vcpus(irqs_id_reset, cpu_selects): - """ Pint the interrupts to vcpus. """ + """Pint the interrupts to vcpus.""" bind_cpu_cmd = [] for irq_id, cpu_select in zip(irqs_id_reset, cpu_selects): - bind_cpu_cmd.append( - "echo %s > /proc/irq/%s/smp_affinity" % (cpu_select, irq_id)) + bind_cpu_cmd.append(f"echo {cpu_select} > /proc/irq/{irq_id}/smp_affinity") cpu_irq_map[irq_id] = cpu_selects[cpu_select] if bind_cpu_cmd: error_context.context("Pin interrupters to vcpus", test.log.info) - session.cmd(' && '.join(bind_cpu_cmd)) + session.cmd(" && ".join(bind_cpu_cmd)) return cpu_irq_map def _get_data_disks(session): - """ Get the data disks. """ + """Get the data disks.""" output = session.cmd_output(params.get("get_dev_cmd", "ls /dev/[svh]d*")) system_dev = re.search(r"/dev/([svh]d\w+)(?=\d+)", output, re.M).group(1) return (dev for dev in output.split() if system_dev not in dev) def check_io_status(timeout): - """ Check the status of I/O. """ + """Check the status of I/O.""" chk_session = vm.wait_for_login(timeout=360) while int(chk_session.cmd_output("pgrep -lx dd | wc -l", timeout)): time.sleep(5) chk_session.close() def load_io_data_disks(): - """ Load I/O on data disks. """ + """Load I/O on data disks.""" error_context.context("Load I/O in all targets", test.log.info) dd_session = vm.wait_for_login(timeout=360) dd_timeout = int(re.findall(r"\d+", extra_image_size)[0]) cmd = "dd of=%s if=/dev/urandom bs=1M count=%s oflag=direct &" cmds = [cmd % (dev, dd_timeout) for dev in _get_data_disks(dd_session)] if len(cmds) != images_num: - test.error( - "Disks are not all show up in system, only %s disks." % len(cmds)) + test.error(f"Disks are not all show up in system, only {len(cmds)} disks.") # As Bug 1177332 exists, mq is not supported completely. # So don't considering performance currently, dd_timeout is longer. - dd_session.cmd(' '.join(cmds), dd_timeout * images_num * 2) + dd_session.cmd(" ".join(cmds), dd_timeout * images_num * 2) check_io_status(dd_timeout) dd_session.close() def compare_interrupts(prev_irqs, cur_irqs): - """ Compare the interrupts between after and before IO. """ + """Compare the interrupts between after and before IO.""" cpu_not_used = [] diff_interrupts = {} for irq in prev_irqs.keys(): cpu = int(cpu_irq_map[irq]) - diff_val = int( - cur_irqs[irq]['count'][cpu]) - int(prev_irqs[irq]['count'][cpu]) + diff_val = int(cur_irqs[irq]["count"][cpu]) - int( + prev_irqs[irq]["count"][cpu] + ) if diff_val == 0: - cpu_not_used.append('CPU%s' % cpu) + cpu_not_used.append(f"CPU{cpu}") else: diff_interrupts[cpu] = diff_val - test.log.debug('The changed number of interrupts:') + test.log.debug("The changed number of interrupts:") for k, v in sorted(diff_interrupts.items()): - test.log.debug(' CPU%s: %d', k, v) + test.log.debug(" CPU%s: %d", k, v) if cpu_not_used: cpus = " ".join(cpu_not_used) - error_msg = ("%s are not used during test. " - "Please check debug log for more information.") + error_msg = ( + "%s are not used during test. " + "Please check debug log for more information." + ) test.fail(error_msg % cpus) def wmi_facility_test(session): @@ -198,36 +208,41 @@ def wmi_facility_test(session): wmi_check_cmd = params["wmi_check_cmd"] pattern = params["pattern"] session = utils_test.qemu.windrv_check_running_verifier( - session, vm, test, driver_name, timeout) + session, vm, test, driver_name, timeout + ) wmi_check_cmd = utils_misc.set_winutils_letter(session, wmi_check_cmd) error_context.context("Run wmi check in guest.", test.log.info) output = session.cmd_output(wmi_check_cmd) queue_num = re.findall(pattern, output, re.M) try: if not queue_num or queue_num[0] != num_queues: - test.fail("The queue_num from guest is not match with expected.\n" - "queue_num from guest is %s, expected is %s" - % (queue_num, num_queues)) + test.fail( + "The queue_num from guest is not match with expected.\n" + f"queue_num from guest is {queue_num}, expected is {num_queues}" + ) finally: session.close() cpu_irq_map = {} timeout = float(params.get("login_timeout", 240)) - num_queues = params['vcpu_maxcpus'] - params['smp'] = num_queues - params['num_queues'] = num_queues + num_queues = params["vcpu_maxcpus"] + params["smp"] = num_queues + params["num_queues"] = num_queues images_num = int(num_queues) extra_image_size = params.get("image_size_extra_images", "512M") system_image = params.get("images") system_image_drive_format = params.get("system_image_drive_format", "virtio") - params["drive_format_%s" % system_image] = system_image_drive_format + params[f"drive_format_{system_image}"] = system_image_drive_format irq_check_cmd = params.get("irq_check_cmd", "cat /proc/interrupts") irq_name = params.get("irq_regex") status_cmd = "systemctl status irqbalance" - error_context.context("Boot up guest with block devcie with num_queues" - " is %s and smp is %s" % (num_queues, params['smp']), - test.log.info) + error_context.context( + "Boot up guest with block devcie with num_queues" " is {} and smp is {}".format( + num_queues, params["smp"] + ), + test.log.info, + ) for vm in env.get_all_vms(): if vm.is_alive(): vm.destroy() diff --git a/qemu/tests/virtio_serial_empty_line.py b/qemu/tests/virtio_serial_empty_line.py index 4a180138f4..947039296f 100644 --- a/qemu/tests/virtio_serial_empty_line.py +++ b/qemu/tests/virtio_serial_empty_line.py @@ -13,8 +13,8 @@ def get_device_name_of_port(session, port_name): :return: The device name of the port """ virtio_port_dev_path = "/dev/virtio-ports/" - port_path = "%s%s" % (virtio_port_dev_path, port_name) - device_name = session.cmd_output_safe("readlink %s" % port_path) + port_path = f"{virtio_port_dev_path}{port_name}" + device_name = session.cmd_output_safe(f"readlink {port_path}") return device_name.strip("../\n") @@ -46,14 +46,14 @@ def get_port_info(session): """ virtio_ports_debug_path = "/sys/kernel/debug/virtio-ports/" info_dict = {} - port_devs = session.cmd_output_safe( - "ls %s" % virtio_ports_debug_path).split() + port_devs = session.cmd_output_safe(f"ls {virtio_ports_debug_path}").split() for port_dev in port_devs: - port_infos = session.cmd_output("cat %s%s" % ( - virtio_ports_debug_path, port_dev)).splitlines() + port_infos = session.cmd_output( + f"cat {virtio_ports_debug_path}{port_dev}" + ).splitlines() port_dict = {} for line in port_infos: - option, value = line.split(':') + option, value = line.split(":") port_dict.update({option: value.strip()}) info_dict.update({port_dev: port_dict}) return info_dict @@ -84,31 +84,32 @@ def run(test, params, env): # Send empty line('\n') from guest to host port.open() # 'echo' automatically adds '\n' in the end of each writing - send_data_command = 'echo "" > /dev/%s' % device_name + send_data_command = f'echo "" > /dev/{device_name}' session.cmd(send_data_command, timeout=120) received_data = port.sock.recv(10) if received_data != b"\n": - test.fail("Received data is not same as the data sent," - " received %s, while expected '\n'" - % received_data) + test.fail( + "Received data is not same as the data sent," + f" received {received_data}, while expected '\n'" + ) check_option = {"bytes_sent": "1"} else: # Send empty line('\n') from host to guest port.open() - port.sock.send(b'\n') - guest_worker.cmd("virt.open('%s')" % port.name) - guest_worker.cmd("virt.recv('%s', 0, mode=False)" % port.name) + port.sock.send(b"\n") + guest_worker.cmd(f"virt.open('{port.name}')") + guest_worker.cmd(f"virt.recv('{port.name}', 0, mode=False)") check_option = {"bytes_received": "1"} # Check options byte_sent or bytes_received check = check_port_info(session, device_name, check_option) if check is False: - test.error("The debug info of %s is not found" % device_name) + test.error(f"The debug info of {device_name} is not found") elif check: - error_msg = '' + error_msg = "" for option, value in check.items(): - error_msg += "Option %s is %s," % (option, value[0]) - error_msg += " while expectation is: %s; " % value[1] - test.fail("Check info mismatch: %s " % error_msg) + error_msg += f"Option {option} is {value[0]}," + error_msg += f" while expectation is: {value[1]}; " + test.fail(f"Check info mismatch: {error_msg} ") finally: virtio_test.cleanup(vm, guest_worker) session.close() diff --git a/qemu/tests/virtio_serial_file_transfer.py b/qemu/tests/virtio_serial_file_transfer.py index 52e0da31e8..98b75f3ea3 100644 --- a/qemu/tests/virtio_serial_file_transfer.py +++ b/qemu/tests/virtio_serial_file_transfer.py @@ -1,17 +1,14 @@ -import re +import logging import os +import re import time -import logging from avocado.utils import process +from virttest import data_dir, error_context, qemu_virtio_port, utils_misc -from virttest import data_dir -from virttest import error_context -from virttest import utils_misc -from virttest import qemu_virtio_port from provider import win_driver_utils -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") @error_context.context_aware @@ -22,8 +19,8 @@ def get_abstract_address(hostfile): :param hostfile: the unix socket path in command :return: Abstract hostfile address for unix socket """ - find_cmd = "cat /proc/net/unix | grep '%s'" % hostfile - abstract_hostfile = process.getoutput(find_cmd).strip().split(' ')[-1] + find_cmd = f"cat /proc/net/unix | grep '{hostfile}'" + abstract_hostfile = process.getoutput(find_cmd).strip().split(" ")[-1] return abstract_hostfile @@ -38,8 +35,7 @@ def copy_scripts(guest_scripts, guest_path, vm): """ error_context.context("Copy test scripts to guest.", LOG_JOB.info) for script in guest_scripts.split(";"): - link = os.path.join(data_dir.get_root_dir(), "shared", "deps", - "serial", script) + link = os.path.join(data_dir.get_root_dir(), "shared", "deps", "serial", script) vm.copy_files_to(link, guest_path, timeout=60) @@ -51,27 +47,29 @@ def get_virtio_port_property(vm, port_name): :param port_name: the port name to be processed :return: port type and port hostfile """ - chardev_info = vm.monitor.human_monitor_cmd('info chardev') + chardev_info = vm.monitor.human_monitor_cmd("info chardev") for port in vm.virtio_ports: if isinstance(port, qemu_virtio_port.VirtioSerial): if port.name == port_name: hostfile = port.hostfile # support abstract namespace Unix domain sockets - if port.port_type == 'unix_socket': - char_info = [m for m in chardev_info.split('\n') - if hostfile in m][0] - if 'abstract=on' in char_info: + if port.port_type == "unix_socket": + char_info = [m for m in chardev_info.split("\n") if hostfile in m][ + 0 + ] + if "abstract=on" in char_info: hostfile = get_abstract_address(hostfile) - elif port.port_type in ('tcp_socket', 'udp'): - hostfile = '%s:%s' % (port.hostfile[0], port.hostfile[1]) - elif port.port_type == 'pty': - hostfile = re.findall('%s: filename=pty:(/dev/pts/\\d)?' % - port_name, chardev_info)[0] + elif port.port_type in ("tcp_socket", "udp"): + hostfile = f"{port.hostfile[0]}:{port.hostfile[1]}" + elif port.port_type == "pty": + hostfile = re.findall( + f"{port_name}: filename=pty:(/dev/pts/\\d)?", chardev_info + )[0] return port.port_type, hostfile @error_context.context_aware -def get_command_options(sender='host', file_size=0): +def get_command_options(sender="host", file_size=0): """ Get the options of host and guest command, per different sender @@ -79,12 +77,12 @@ def get_command_options(sender='host', file_size=0): :param file_size: the size of the file to be sent :return: host file size, guest file size, host action, guest action """ - if sender == 'host': - return file_size, 0, 'send', 'receive' - elif sender == 'guest': - return 0, file_size, 'receive', 'send' + if sender == "host": + return file_size, 0, "send", "receive" + elif sender == "guest": + return 0, file_size, "receive", "send" else: - return file_size, file_size, 'both', 'both' + return file_size, file_size, "both", "both" @error_context.context_aware @@ -98,16 +96,13 @@ def generate_data_file(dir_name, file_size=0, session=None): :param session: guest session if have one, perform on host if None :return: the full file path """ - data_file = os.path.join(dir_name, - "tmp-%s" % utils_misc.generate_random_string(8)) + data_file = os.path.join(dir_name, f"tmp-{utils_misc.generate_random_string(8)}") cmd = "dd if=/dev/zero of=%s bs=1M count=%d" % (data_file, int(file_size)) if not session: - error_context.context( - "Creating %dMB file on host" % file_size, LOG_JOB.info) + error_context.context("Creating %dMB file on host" % file_size, LOG_JOB.info) process.run(cmd) else: - error_context.context( - "Creating %dMB file on guest" % file_size, LOG_JOB.info) + error_context.context("Creating %dMB file on guest" % file_size, LOG_JOB.info) session.cmd(cmd, timeout=600) return data_file @@ -131,7 +126,7 @@ def check_output(sender, output): if sender == "both": if "Md5MissMatch" in output: err = "Data lost during file transfer. Md5 miss match." - err += " Script output:\n%s" % output + err += f" Script output:\n{output}" return False, err return True, 0 else: @@ -140,18 +135,14 @@ def check_output(sender, output): md5 = re.findall(md5_re, output)[0] except Exception: err = "Fail to get md5, script may fail." - err += " Script output:\n%s" % output + err += f" Script output:\n{output}" return False, err return True, md5 try: - kwargs = {'cmd': host_cmd, - 'shell': True, - 'timeout': timeout} - error_context.context("Send host command: %s" - % host_cmd, LOG_JOB.info) - host_thread = utils_misc.InterruptedThread(process.getoutput, - kwargs=kwargs) + kwargs = {"cmd": host_cmd, "shell": True, "timeout": timeout} + error_context.context(f"Send host command: {host_cmd}", LOG_JOB.info) + host_thread = utils_misc.InterruptedThread(process.getoutput, kwargs=kwargs) host_thread.daemon = True host_thread.start() time.sleep(3) @@ -171,15 +162,21 @@ def check_output(sender, output): md5_host = result[1] if sender != "both" and md5_host != md5_guest: err = "Data lost during file transfer. Md5 miss match." - err += " Guest script output:\n %s" % g_output - err += " Host script output:\n%s" % output + err += f" Guest script output:\n {g_output}" + err += f" Host script output:\n{output}" return False, err return True @error_context.context_aware -def transfer_data(params, vm, host_file_name=None, guest_file_name=None, - sender='both', clean_file=True): +def transfer_data( + params, + vm, + host_file_name=None, + guest_file_name=None, + sender="both", + clean_file=True, +): """ Transfer data file between guest and host, and check result via output; Generate random file first if not provided @@ -196,43 +193,40 @@ def transfer_data(params, vm, host_file_name=None, guest_file_name=None, os_type = params["os_type"] try: guest_path = params.get("guest_script_folder", "C:\\") - guest_scripts = params.get("guest_scripts", - "VirtIoChannel_guest_send_receive.py") + guest_scripts = params.get( + "guest_scripts", "VirtIoChannel_guest_send_receive.py" + ) copy_scripts(guest_scripts, guest_path, vm) port_name = params["file_transfer_serial_port"] port_type, port_path = get_virtio_port_property(vm, port_name) file_size = int(params.get("filesize", 10)) transfer_timeout = int(params.get("transfer_timeout", 720)) host_dir = data_dir.get_tmp_dir() - guest_dir = params.get("tmp_dir", '/var/tmp/') - host_file_size, guest_file_size, host_action, guest_action \ - = get_command_options(sender, file_size) + guest_dir = params.get("tmp_dir", "/var/tmp/") + host_file_size, guest_file_size, host_action, guest_action = ( + get_command_options(sender, file_size) + ) if not host_file_name: host_file_name = generate_data_file(host_dir, host_file_size) if not guest_file_name: - guest_file_name = generate_data_file( - guest_dir, guest_file_size, session) + guest_file_name = generate_data_file(guest_dir, guest_file_size, session) host_script = params.get("host_script", "serial_host_send_receive.py") - host_script = os.path.join(data_dir.get_root_dir(), "shared", "deps", - "serial", host_script) - python_bin = '`command -v python python3 | head -1`' - host_cmd = ("%s %s -t %s -s %s -f %s -a %s" % - (python_bin, host_script, port_type, port_path, - host_file_name, host_action)) - guest_script = os.path.join(guest_path, params['guest_script']) - python_bin = params.get('python_bin', python_bin) - guest_cmd = ("%s %s -d %s -f %s -a %s" % - (python_bin, guest_script, - port_name, guest_file_name, guest_action)) - result = _transfer_data( - session, host_cmd, guest_cmd, transfer_timeout, sender) + host_script = os.path.join( + data_dir.get_root_dir(), "shared", "deps", "serial", host_script + ) + python_bin = "`command -v python python3 | head -1`" + host_cmd = f"{python_bin} {host_script} -t {port_type} -s {port_path} -f {host_file_name} -a {host_action}" + guest_script = os.path.join(guest_path, params["guest_script"]) + python_bin = params.get("python_bin", python_bin) + guest_cmd = f"{python_bin} {guest_script} -d {port_name} -f {guest_file_name} -a {guest_action}" + result = _transfer_data(session, host_cmd, guest_cmd, transfer_timeout, sender) finally: if os_type == "windows": guest_file_name = guest_file_name.replace("/", "\\") if clean_file: - clean_cmd = params['clean_cmd'] + clean_cmd = params["clean_cmd"] os.remove(host_file_name) - session.cmd('%s %s' % (clean_cmd, guest_file_name)) + session.cmd(f"{clean_cmd} {guest_file_name}") session.close() return result @@ -252,10 +246,10 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ - sender = params['file_sender'] + sender = params["file_sender"] vm = env.get_vm(params["main_vm"]) vm.verify_alive() - test.log.info('Transfer data from %s', sender) + test.log.info("Transfer data from %s", sender) result = transfer_data(params, vm, sender=sender) if params.get("memory_leak_check", "no") == "yes": # for windows guest, disable/uninstall driver to get memory leak based on @@ -264,4 +258,4 @@ def run(test, params, env): win_driver_utils.memory_leak_check(vm, test, params) vm.destroy() if result is not True: - test.fail("Test failed. %s" % result[1]) + test.fail(f"Test failed. {result[1]}") diff --git a/qemu/tests/virtio_serial_file_transfer_max_ports.py b/qemu/tests/virtio_serial_file_transfer_max_ports.py index 86fa81af20..4ae65d0f32 100644 --- a/qemu/tests/virtio_serial_file_transfer_max_ports.py +++ b/qemu/tests/virtio_serial_file_transfer_max_ports.py @@ -1,8 +1,7 @@ -from virttest import error_context -from virttest import env_process -from virttest import utils_test -from qemu.tests.virtio_serial_file_transfer import transfer_data +from virttest import env_process, error_context, utils_test + from provider import win_driver_utils +from qemu.tests.virtio_serial_file_transfer import transfer_data @error_context.context_aware @@ -20,29 +19,30 @@ def run(test, params, env): """ num_serial_ports = int(params.get("virtio_serial_ports")) for i in range(2, num_serial_ports + 1): - serial_name = 'vs%d' % i - params['serials'] = '%s %s' % (params.get('serials', ''), serial_name) - params['serial_type_%s' % serial_name] = "virtserialport" - params['start_vm'] = "yes" + serial_name = "vs%d" % i + params["serials"] = "{} {}".format(params.get("serials", ""), serial_name) + params[f"serial_type_{serial_name}"] = "virtserialport" + params["start_vm"] = "yes" env_process.preprocess(test, params, env) - vm = env.get_vm(params['main_vm']) + vm = env.get_vm(params["main_vm"]) os_type = params["os_type"] if os_type == "windows": session = vm.wait_for_login() driver_name = params["driver_name"] session = utils_test.qemu.windrv_check_running_verifier( - session, vm, test, driver_name) + session, vm, test, driver_name + ) session.close() serials = params.objects("serials") for serial_port in serials: port_params = params.object_params(serial_port) - if not port_params['serial_type'].startswith('virtserial'): + if not port_params["serial_type"].startswith("virtserial"): continue test.log.info("transfer data with port %s", serial_port) - params['file_transfer_serial_port'] = serial_port - transfer_data(params, vm, sender='both') + params["file_transfer_serial_port"] = serial_port + transfer_data(params, vm, sender="both") vm.verify_alive() # for windows guest, disable/uninstall driver to get memory leak based on diff --git a/qemu/tests/virtio_serial_file_transfer_offline_migrate.py b/qemu/tests/virtio_serial_file_transfer_offline_migrate.py index dac25dacc4..fe80da1010 100644 --- a/qemu/tests/virtio_serial_file_transfer_offline_migrate.py +++ b/qemu/tests/virtio_serial_file_transfer_offline_migrate.py @@ -1,9 +1,6 @@ import os -from virttest import utils_test -from virttest import utils_misc -from virttest import error_context -from virttest import qemu_migration +from virttest import error_context, qemu_migration, utils_misc, utils_test from qemu.tests.virtio_serial_file_transfer import transfer_data @@ -30,9 +27,9 @@ def run_serial_data_transfer(): for port in params.objects("serials"): port_params = params.object_params(port) - if not port_params['serial_type'].startswith('virt'): + if not port_params["serial_type"].startswith("virt"): continue - params['file_transfer_serial_port'] = port + params["file_transfer_serial_port"] = port transfer_data(params, vm) vm = env.get_vm(params["main_vm"]) @@ -41,7 +38,8 @@ def run_serial_data_transfer(): session = vm.wait_for_login() driver_name = params["driver_name"] session = utils_test.qemu.windrv_check_running_verifier( - session, vm, test, driver_name) + session, vm, test, driver_name + ) session.close() error_context.context("transferring data on source guest", test.log.info) run_serial_data_transfer() @@ -49,16 +47,18 @@ def run_serial_data_transfer(): mig_exec_cmd_src = params.get("migration_exec_cmd_src") mig_exec_cmd_dst = params.get("migration_exec_cmd_dst") mig_exec_file = params.get("migration_exec_file", "/var/tmp/exec") - mig_exec_file += "-%s" % utils_misc.generate_random_string(8) + mig_exec_file += f"-{utils_misc.generate_random_string(8)}" mig_exec_cmd_src = mig_exec_cmd_src % mig_exec_file mig_exec_cmd_dst = mig_exec_cmd_dst % mig_exec_file qemu_migration.set_speed(vm, params.get("mig_speed", "1G")) try: - vm.migrate(protocol=mig_protocol, offline=True, - migration_exec_cmd_src=mig_exec_cmd_src, - migration_exec_cmd_dst=mig_exec_cmd_dst) - error_context.context("transferring data on destination guest", - test.log.info) + vm.migrate( + protocol=mig_protocol, + offline=True, + migration_exec_cmd_src=mig_exec_cmd_src, + migration_exec_cmd_dst=mig_exec_cmd_dst, + ) + error_context.context("transferring data on destination guest", test.log.info) run_serial_data_transfer() vm.verify_kernel_crash() finally: diff --git a/qemu/tests/virtio_serial_hotplug_existed_port_pci.py b/qemu/tests/virtio_serial_hotplug_existed_port_pci.py index 96a7d5f058..33464a0d3a 100644 --- a/qemu/tests/virtio_serial_hotplug_existed_port_pci.py +++ b/qemu/tests/virtio_serial_hotplug_existed_port_pci.py @@ -1,6 +1,5 @@ -from virttest import error_context -from virttest import utils_test -from virttest import qemu_monitor +from virttest import error_context, qemu_monitor, utils_test + from provider import win_driver_utils @@ -18,28 +17,27 @@ def run(test, params, env): vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login() - if params['os_type'] == 'windows': - utils_test.qemu.windrv_check_running_verifier( - session, vm, test, 'vioser', 300) + if params["os_type"] == "windows": + utils_test.qemu.windrv_check_running_verifier(session, vm, test, "vioser", 300) session.close() - port = params.objects('serials')[1] + port = params.objects("serials")[1] virtio_port = vm.devices.get(port) - pci_dev_id = virtio_port.params['bus'].split('.')[0] + pci_dev_id = virtio_port.params["bus"].split(".")[0] pci_dev = vm.devices.get(pci_dev_id) try: virtio_port.hotplug(vm.monitor, vm.devices.qemu_version) except qemu_monitor.QMPCmdError as e: - if 'Duplicate' not in e.data['desc']: - test.fail(e.data['desc']) + if "Duplicate" not in e.data["desc"]: + test.fail(e.data["desc"]) else: - test.fail('hotplugg virtserialport device should be failed') + test.fail("hotplugg virtserialport device should be failed") try: pci_dev.hotplug(vm.monitor, vm.devices.qemu_version) except qemu_monitor.QMPCmdError as e: - if 'Duplicate' not in e.data['desc']: - test.fail(e.data['desc']) + if "Duplicate" not in e.data["desc"]: + test.fail(e.data["desc"]) else: - test.fail('hotplugg virtio-serial-pci device should be failed') + test.fail("hotplugg virtio-serial-pci device should be failed") # for windows guest, disable/uninstall driver to get memory leak based on # driver verifier is enabled diff --git a/qemu/tests/virtio_serial_hotplug_max_chardevs.py b/qemu/tests/virtio_serial_hotplug_max_chardevs.py index 29e29dadcd..88face5b5c 100644 --- a/qemu/tests/virtio_serial_hotplug_max_chardevs.py +++ b/qemu/tests/virtio_serial_hotplug_max_chardevs.py @@ -1,14 +1,11 @@ -import time import re +import time -from virttest import error_context -from virttest import utils_misc -from virttest import utils_test +from virttest import error_context, utils_misc, utils_test from virttest.qemu_monitor import QMPCmdError from qemu.tests import driver_in_use -from qemu.tests.virtio_console import add_chardev -from qemu.tests.virtio_console import add_virtio_ports_to_vm +from qemu.tests.virtio_console import add_chardev, add_virtio_ports_to_vm from qemu.tests.virtio_serial_file_transfer import transfer_data from qemu.tests.virtio_serial_hotplug_port_pci import get_buses_and_serial_devices @@ -32,17 +29,18 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ + def run_serial_data_transfer(): """ Transfer data via every virtserialport. """ for serial_port in serials: port_params = params.object_params(serial_port) - if not port_params['serial_type'].startswith('virtserial'): + if not port_params["serial_type"].startswith("virtserial"): continue test.log.info("transfer data with port %s", serial_port) - params['file_transfer_serial_port'] = serial_port - transfer_data(params, vm, sender='both') + params["file_transfer_serial_port"] = serial_port + transfer_data(params, vm, sender="both") def run_bg_test(): """ @@ -52,28 +50,32 @@ def run_bg_test(): """ stress_thread = utils_misc.InterruptedThread(run_serial_data_transfer) stress_thread.start() - if not utils_misc.wait_for(lambda: driver_in_use.check_bg_running( - vm, params), check_bg_timeout, 0, 1): + if not utils_misc.wait_for( + lambda: driver_in_use.check_bg_running(vm, params), check_bg_timeout, 0, 1 + ): test.fail("Backgroud test is not alive!") return stress_thread vm = env.get_vm(params["main_vm"]) vm.verify_alive() os_type = params["os_type"] - check_bg_timeout = float(params.get('check_bg_timeout', 120)) + check_bg_timeout = float(params.get("check_bg_timeout", 120)) num_chardev = int(params.get("numberic_chardev")) num_serial_ports = int(params.get("virtio_serial_ports")) - sleep_time = float(params.get('sleep_time', 0.5)) + sleep_time = float(params.get("sleep_time", 0.5)) for i in range(1, num_chardev): - params["extra_chardevs"] += ' channel%d' % i - serial_name = 'port%d' % (i-1) - params['extra_serials'] = '%s %s' % (params.get('extra_serials', ''), - serial_name) - params['serial_type_%s' % serial_name] = "virtserialport" + params["extra_chardevs"] += " channel%d" % i + serial_name = "port%d" % (i - 1) + params["extra_serials"] = "{} {}".format( + params.get("extra_serials", ""), + serial_name, + ) + params[f"serial_type_{serial_name}"] = "virtserialport" char_devices = add_chardev(vm, params) - serials = params.objects('extra_serials') + serials = params.objects("extra_serials") buses, serial_devices = get_buses_and_serial_devices( - vm, params, char_devices, serials) + vm, params, char_devices, serials + ) vm.devices.simple_hotplug(buses[0], vm.monitor) for i in range(0, num_chardev): vm.devices.simple_hotplug(char_devices[i], vm.monitor) @@ -86,41 +88,41 @@ def run_bg_test(): driver_name = params["driver_name"] session = vm.wait_for_login() session = utils_test.qemu.windrv_check_running_verifier( - session, vm, test, driver_name) + session, vm, test, driver_name + ) thread_transfer = run_bg_test() - error_context.context("hotplug existed virtserialport and chardev", - test.log.info) + error_context.context("hotplug existed virtserialport and chardev", test.log.info) try: serial_devices[0].hotplug(vm.monitor, vm.devices.qemu_version) except QMPCmdError as e: - if not re.search("Duplicate (device |)ID '%s'" - % serial_devices[0], str(e.data)): - msg = ("Should fail to hotplug device %s with error Duplicate" - % serial_devices[0]) + if not re.search(f"Duplicate (device |)ID '{serial_devices[0]}'", str(e.data)): + msg = f"Should fail to hotplug device {serial_devices[0]} with error Duplicate" test.fail(msg) else: - msg = ("The device %s shoudn't be hotplugged successfully" - % serial_devices[0]) + msg = f"The device {serial_devices[0]} shoudn't be hotplugged successfully" test.fail(msg) try: char_devices[0].hotplug(vm.monitor, vm.devices.qemu_version) except QMPCmdError as e: - if not ("duplicate property '%s'" % char_devices[0] in str(e.data) - or "'%s' already exists" % char_devices[0] in str(e.data)): - msg = ("Should fail to hotplug device %s with error Duplicate" - % char_devices[0]) + if not ( + f"duplicate property '{char_devices[0]}'" in str(e.data) + or f"'{char_devices[0]}' already exists" in str(e.data) + ): + msg = ( + f"Should fail to hotplug device {char_devices[0]} with error Duplicate" + ) test.fail(msg) else: - msg = ("The device %s shoudn't be hotplugged successfully" - % char_devices[0]) + msg = f"The device {char_devices[0]} shoudn't be hotplugged successfully" test.fail(msg) thread_transfer.join() if not thread_transfer.is_alive(): - error_context.context("hot-unplug all virtserialport and chardev", - test.log.info) + error_context.context( + "hot-unplug all virtserialport and chardev", test.log.info + ) for i in range(0, num_chardev): if i < num_serial_ports: vm.devices.simple_unplug(serial_devices[i], vm.monitor) diff --git a/qemu/tests/virtio_serial_hotplug_port_pci.py b/qemu/tests/virtio_serial_hotplug_port_pci.py index 0f5e8fb2be..4ad755d05d 100644 --- a/qemu/tests/virtio_serial_hotplug_port_pci.py +++ b/qemu/tests/virtio_serial_hotplug_port_pci.py @@ -1,15 +1,13 @@ -from virttest import error_context -from virttest import env_process +from virttest import env_process, error_context from virttest.qemu_monitor import QMPCmdError -from qemu.tests.virtio_console import add_chardev -from qemu.tests.virtio_console import add_virtserial_device -from qemu.tests.virtio_console import add_virtio_ports_to_vm -from qemu.tests.virtio_serial_file_transfer import transfer_data from provider import win_driver_utils -from qemu.tests.vioser_in_use import shutdown_guest # pylint: disable=W0611 -from qemu.tests.vioser_in_use import reboot_guest # pylint: disable=W0611 -from qemu.tests.vioser_in_use import live_migration_guest # pylint: disable=W0611 +from qemu.tests.virtio_console import ( + add_chardev, + add_virtio_ports_to_vm, + add_virtserial_device, +) +from qemu.tests.virtio_serial_file_transfer import transfer_data def get_buses_and_serial_devices(vm, params, char_devices, serials): @@ -26,7 +24,7 @@ def get_buses_and_serial_devices(vm, params, char_devices, serials): serial_devices = [] for index, serial_id in enumerate(serials): chardev_id = char_devices[index].get_qid() - params['serial_name_%s' % serial_id] = serial_id + params[f"serial_name_{serial_id}"] = serial_id devices = add_virtserial_device(vm, params, serial_id, chardev_id) for device in devices: if device.child_bus: @@ -75,52 +73,50 @@ def run_serial_data_transfer(): Transfer data between two ports. """ - params['file_transfer_serial_port'] = serials[0] - transfer_data(params, vm, sender='host') - params['file_transfer_serial_port'] = serials[1] - transfer_data(params, vm, sender='guest') + params["file_transfer_serial_port"] = serials[0] + transfer_data(params, vm, sender="host") + params["file_transfer_serial_port"] = serials[1] + transfer_data(params, vm, sender="guest") - params['serials'] = params.objects('serials')[0] + params["serials"] = params.objects("serials")[0] repeat_times = int(params.get("repeat_times", 1)) interrupt_test_after_plug = params.get("interrupt_test_after_plug") interrupt_test_after_unplug = params.get("interrupt_test_after_unplug") - vm = env.get_vm(params['main_vm']) + vm = env.get_vm(params["main_vm"]) char_devices = add_chardev(vm, params) for device in char_devices: - extra_params = ' ' + device.cmdline() - params['extra_params'] = params.get('extra_params', '') + extra_params - params['start_vm'] = "yes" + extra_params = " " + device.cmdline() + params["extra_params"] = params.get("extra_params", "") + extra_params + params["start_vm"] = "yes" env_process.preprocess(test, params, env) - vm = env.get_vm(params['main_vm']) + vm = env.get_vm(params["main_vm"]) # Make sure the guest boot successfully before hotplug vm.wait_for_login() vm.devices.insert(char_devices) - serials = params.objects('extra_serials') + serials = params.objects("extra_serials") buses, serial_devices = get_buses_and_serial_devices( - vm, params, char_devices, serials) + vm, params, char_devices, serials + ) for i in range(repeat_times): - error_context.context("Hotplug/unplug serial devices the %s time" - % (i+1), test.log.info) + error_context.context( + "Hotplug/unplug serial devices the %s time" % (i + 1), test.log.info + ) vm.devices.simple_hotplug(buses[0], vm.monitor) vm.devices.simple_hotplug(serial_devices[0], vm.monitor) - pre_nr = serial_devices[0].get_param('nr') + pre_nr = serial_devices[0].get_param("nr") # Try hotplug different device with same 'nr' if params.get("plug_same_nr") == "yes": - serial_devices[1].set_param('bus', - serial_devices[0].get_param('bus')) - serial_devices[1].set_param('nr', pre_nr) + serial_devices[1].set_param("bus", serial_devices[0].get_param("bus")) + serial_devices[1].set_param("nr", pre_nr) try: serial_devices[1].hotplug(vm.monitor, vm.devices.qemu_version) except QMPCmdError as e: - if 'A port already exists at id %d' % pre_nr not in str( - e.data): - test.fail( - 'Hotplug fail for %s, not as expected' % str(e.data)) + if "A port already exists at id %d" % pre_nr not in str(e.data): + test.fail(f"Hotplug fail for {str(e.data)}, not as expected") else: - test.fail( - 'Hotplug with same "nr" option success while should fail') - serial_devices[1].set_param('nr', int(pre_nr) + 1) + test.fail('Hotplug with same "nr" option success while should fail') + serial_devices[1].set_param("nr", int(pre_nr) + 1) vm.devices.simple_hotplug(serial_devices[1], vm.monitor) for device in serial_devices: add_virtio_ports_to_vm(vm, params, device) @@ -139,11 +135,10 @@ def run_serial_data_transfer(): vm.devices.simple_unplug(serial_devices[1], vm.monitor) out = vm.devices.simple_unplug(buses[0], vm.monitor) if out[1] is False: - msg = "Hot-unplug device %s failed" % buses[0] + msg = f"Hot-unplug device {buses[0]} failed" test.fail(msg) if interrupt_test_after_unplug: - test.log.info("Run %s after hot-unplug", - interrupt_test_after_unplug) + test.log.info("Run %s after hot-unplug", interrupt_test_after_unplug) run_interrupt_test(interrupt_test_after_unplug) if params.get("memory_leak_check", "no") == "yes": diff --git a/qemu/tests/virtio_serial_hotplug_port_pci_chardev.py b/qemu/tests/virtio_serial_hotplug_port_pci_chardev.py index 812f49d1d4..52cff085a6 100644 --- a/qemu/tests/virtio_serial_hotplug_port_pci_chardev.py +++ b/qemu/tests/virtio_serial_hotplug_port_pci_chardev.py @@ -1,8 +1,6 @@ -from virttest import error_context +from virttest import error_context, utils_test -from virttest import utils_test -from qemu.tests.virtio_console import add_chardev -from qemu.tests.virtio_console import add_virtio_ports_to_vm +from qemu.tests.virtio_console import add_chardev, add_virtio_ports_to_vm from qemu.tests.virtio_serial_file_transfer import transfer_data from qemu.tests.virtio_serial_hotplug_port_pci import get_buses_and_serial_devices @@ -24,12 +22,13 @@ def run(test, params, env): :param env: Dictionary with test environment """ - vm = env.get_vm(params['main_vm']) + vm = env.get_vm(params["main_vm"]) os_type = params["os_type"] char_devices = add_chardev(vm, params) - serials = params.objects('extra_serials') + serials = params.objects("extra_serials") buses, serial_devices = get_buses_and_serial_devices( - vm, params, char_devices, serials) + vm, params, char_devices, serials + ) vm.devices.simple_hotplug(buses[0], vm.monitor) vm.devices.simple_hotplug(char_devices[0], vm.monitor) vm.devices.simple_hotplug(serial_devices[0], vm.monitor) @@ -39,6 +38,7 @@ def run(test, params, env): driver_name = params["driver_name"] session = vm.wait_for_login() session = utils_test.qemu.windrv_check_running_verifier( - session, vm, test, driver_name) - params['file_transfer_serial_port'] = serials[0] - transfer_data(params, vm, sender='both') + session, vm, test, driver_name + ) + params["file_transfer_serial_port"] = serials[0] + transfer_data(params, vm, sender="both") diff --git a/qemu/tests/virtio_serial_large_file_transfer.py b/qemu/tests/virtio_serial_large_file_transfer.py index 9730b771ba..f44cb45196 100644 --- a/qemu/tests/virtio_serial_large_file_transfer.py +++ b/qemu/tests/virtio_serial_large_file_transfer.py @@ -1,14 +1,15 @@ import os -import time import signal +import time from avocado.utils import process -from virttest import error_context -from virttest import data_dir +from virttest import data_dir, error_context -from qemu.tests.virtio_serial_file_transfer import generate_data_file -from qemu.tests.virtio_serial_file_transfer import get_command_options -from qemu.tests.virtio_serial_file_transfer import transfer_data +from qemu.tests.virtio_serial_file_transfer import ( + generate_data_file, + get_command_options, + transfer_data, +) @error_context.context_aware @@ -28,29 +29,26 @@ def run(test, params, env): :param env: Dictionary with test environment. """ - os_type = params["os_type"] - sender = params['file_sender'] + params["os_type"] + sender = params["file_sender"] file_size = int(params.get("filesize", 100)) continue_time = int(params.get("continue_transfer_time", 600)) vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() host_dir = data_dir.get_tmp_dir() - guest_dir = params.get("tmp_dir", '/var/tmp/') - host_file_size, guest_file_size, _, _ =\ - get_command_options(sender, file_size) + guest_dir = params.get("tmp_dir", "/var/tmp/") + host_file_size, guest_file_size, _, _ = get_command_options(sender, file_size) host_file_name = generate_data_file(host_dir, host_file_size) - guest_file_name = generate_data_file( - guest_dir, guest_file_size, session) + guest_file_name = generate_data_file(guest_dir, guest_file_size, session) check_pid_cmd = 'ps aux | grep "%s"| grep -v "grep"' - host_script = params['host_script'] + host_script = params["host_script"] guest_script = params["guest_script"] - test.log.info('Transfer data from %s', sender) + test.log.info("Transfer data from %s", sender) try: test_time = time.time() + continue_time while time.time() < test_time: - transfer_data( - params, vm, host_file_name, guest_file_name, sender, False) + transfer_data(params, vm, host_file_name, guest_file_name, sender, False) host_proc = process.getoutput(check_pid_cmd % host_script, shell=True) guest_proc = session.cmd_output(check_pid_cmd % guest_script) if host_proc: @@ -60,10 +58,10 @@ def run(test, params, env): if guest_proc: guest_pid = guest_proc.split()[1] test.log.info("Kill serial process on guest") - session.cmd('kill -9 %s' % guest_pid) + session.cmd(f"kill -9 {guest_pid}") finally: - clean_cmd = params['clean_cmd'] - session.cmd('%s %s' % (clean_cmd, guest_file_name)) + clean_cmd = params["clean_cmd"] + session.cmd(f"{clean_cmd} {guest_file_name}") os.remove(host_file_name) session.close() vm.verify_kernel_crash() diff --git a/qemu/tests/virtio_serial_throttling.py b/qemu/tests/virtio_serial_throttling.py index 73b0922fa5..c27863c6c5 100644 --- a/qemu/tests/virtio_serial_throttling.py +++ b/qemu/tests/virtio_serial_throttling.py @@ -1,4 +1,4 @@ -from virttest import error_context, env_process +from virttest import env_process, error_context from virttest.utils_virtio_port import VirtioPortTest @@ -21,11 +21,11 @@ def send_data_from_guest_to_host(): session = vm.wait_for_login() port.open() error_context.context("send data from guest to host", test.log.info) - if params['os_type'] == 'windows': - vport_name = '\\\\.\\Global\\' + port.name - cmd = 'dd if=/dev/zero of=%s bs=1024 count=1' % vport_name + if params["os_type"] == "windows": + vport_name = "\\\\.\\Global\\" + port.name + cmd = f"dd if=/dev/zero of={vport_name} bs=1024 count=1" else: - cmd = 'dd if=/dev/zero of=/dev/virtio-ports/%s bs=1024 count=1' % port.name + cmd = f"dd if=/dev/zero of=/dev/virtio-ports/{port.name} bs=1024 count=1" session.cmd(cmd) session.close() @@ -33,12 +33,12 @@ def send_data_from_guest_to_host(): def send_data_from_host_to_guest(): port.open() error_context.context("send data from host to guest", test.log.info) - data = 'Hello world \n' * 100 + data = "Hello world \n" * 100 data = data.encode() port.sock.send(data) - guest_worker.cmd("virt.open('%s')" % port.name) + guest_worker.cmd(f"virt.open('{port.name}')") - serial_id = params.objects('serials')[-1] + serial_id = params.objects("serials")[-1] try: virtio_test = VirtioPortTest(test, env, params) (vm, guest_worker, port) = virtio_test.get_vm_with_single_port() @@ -49,8 +49,8 @@ def send_data_from_host_to_guest(): finally: virtio_test.cleanup() vm.destroy() - params['chardev_backend_%s' % serial_id] = 'tcp_socket' - vm_name = params['main_vm'] + params[f"chardev_backend_{serial_id}"] = "tcp_socket" + vm_name = params["main_vm"] env_process.preprocess_vm(test, params, env, vm_name) try: virtio_test = VirtioPortTest(test, env, params) diff --git a/qemu/tests/virtio_serial_unplug_port_chardev_pci.py b/qemu/tests/virtio_serial_unplug_port_chardev_pci.py index 4c29987571..1906d3ac77 100644 --- a/qemu/tests/virtio_serial_unplug_port_chardev_pci.py +++ b/qemu/tests/virtio_serial_unplug_port_chardev_pci.py @@ -1,9 +1,9 @@ import time -from virttest import error_context -from virttest import utils_test -from qemu.tests.virtio_serial_file_transfer import transfer_data +from virttest import error_context, utils_test + from qemu.tests.hotplug_port_chardev_pci_with_console import get_virtio_serial_pci +from qemu.tests.virtio_serial_file_transfer import transfer_data @error_context.context_aware @@ -36,23 +36,24 @@ def run(test, params, env): driver_name = params["driver_name"] session = vm.wait_for_login() session = utils_test.qemu.windrv_check_running_verifier( - session, vm, test, driver_name) + session, vm, test, driver_name + ) for port in params.objects("serials"): port_params = params.object_params(port) - if not port_params['serial_type'].startswith('virt'): + if not port_params["serial_type"].startswith("virt"): continue virtio_port = vm.devices.get(port) if not virtio_port: - test.error("Virtio Port '%s' not found" % port) + test.error(f"Virtio Port '{port}' not found") chardev_qid = virtio_port.get_param("chardev") try: port_chardev = vm.devices.get_by_qid(chardev_qid)[0] except IndexError: - test.error("Failed to get device %s" % chardev_qid) - params['file_transfer_serial_port'] = port + test.error(f"Failed to get device {chardev_qid}") + params["file_transfer_serial_port"] = port virtio_serial_pci = get_virtio_serial_pci(vm, virtio_port) test.log.info("Transfer data with %s before hot-unplugging", port) - transfer_data(params, vm, sender='both') + transfer_data(params, vm, sender="both") vm.devices.simple_unplug(virtio_port, vm.monitor) vm.devices.simple_unplug(port_chardev, vm.monitor) vm.devices.simple_unplug(virtio_serial_pci, vm.monitor) @@ -61,9 +62,9 @@ def run(test, params, env): time.sleep(sleep_time) vm.devices.simple_hotplug(port_chardev, vm.monitor) vm.devices.simple_hotplug(virtio_port, vm.monitor) - transfer_data(params, vm, sender='guest') + transfer_data(params, vm, sender="guest") vm.devices.simple_unplug(virtio_port, vm.monitor) vm.devices.simple_hotplug(virtio_port, vm.monitor) - transfer_data(params, vm, sender='host') + transfer_data(params, vm, sender="host") vm.verify_alive() vm.verify_kernel_crash() diff --git a/qemu/tests/virtio_serial_various_chardev_hotplug.py b/qemu/tests/virtio_serial_various_chardev_hotplug.py index 2b01f063a9..2c0202bceb 100644 --- a/qemu/tests/virtio_serial_various_chardev_hotplug.py +++ b/qemu/tests/virtio_serial_various_chardev_hotplug.py @@ -1,4 +1,5 @@ from virttest import error_context + from qemu.tests.virtio_console import add_chardev @@ -28,13 +29,14 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ + def cmd_qmp_log(vm, cmd, args): reply = vm.monitor.cmd_qmp(cmd, args) if "error" in reply: if reply["error"]["class"] == "CommandNotFound": - test.error("qmp command %s not supported" % cmd) + test.error(f"qmp command {cmd} not supported") else: - test.error("qmp error: %s" % reply["error"]["desc"]) + test.error("qmp error: {}".format(reply["error"]["desc"])) return reply vm = env.get_vm(params["main_vm"]) @@ -44,27 +46,35 @@ def cmd_qmp_log(vm, cmd, args): vm.devices.simple_hotplug(char_device, vm.monitor) chardev_id = char_device.get_qid() chardev_param = params.object_params(chardev_id) - backend = chardev_param.get('chardev_backend', - 'unix_socket') - if backend == 'ringbuf': - ringbuf_write_size = int(params.get('ringbuf_write_size')) - ringbuf_read_size = int(params.get('ringbuf_read_size')) + backend = chardev_param.get("chardev_backend", "unix_socket") + if backend == "ringbuf": + ringbuf_write_size = int(params.get("ringbuf_write_size")) + ringbuf_read_size = int(params.get("ringbuf_read_size")) if ringbuf_write_size < ringbuf_read_size: - test.error("data error:write_size %d must above read_size %d" - % (ringbuf_write_size, ringbuf_read_size)) - ringbuf_data = params.get('ringbuf_data') - ringbuf_format = params.get('ringbuf_format') - ringbuf_write = cmd_qmp_log(vm, "ringbuf-write", - {'device': chardev_id, - 'data': ringbuf_data, - 'format': ringbuf_format}) - ringbuf_read = cmd_qmp_log(vm, "ringbuf-read", - {'device': chardev_id, - 'size': ringbuf_read_size, - 'format': ringbuf_format}) + test.error( + "data error:write_size %d must above read_size %d" + % (ringbuf_write_size, ringbuf_read_size) + ) + ringbuf_data = params.get("ringbuf_data") + ringbuf_format = params.get("ringbuf_format") + cmd_qmp_log( + vm, + "ringbuf-write", + {"device": chardev_id, "data": ringbuf_data, "format": ringbuf_format}, + ) + ringbuf_read = cmd_qmp_log( + vm, + "ringbuf-read", + { + "device": chardev_id, + "size": ringbuf_read_size, + "format": ringbuf_format, + }, + ) if ringbuf_data[:ringbuf_read_size] != ringbuf_read["return"]: - test.fail("qmp error: can't find data \'%s\' in %s" - % (ringbuf_data[:ringbuf_read_size], ringbuf_read)) + test.fail( + f"qmp error: can't find data '{ringbuf_data[:ringbuf_read_size]}' in {ringbuf_read}" + ) for char_device in char_devices: vm.devices.simple_unplug(char_device, vm.monitor) vm.reboot() diff --git a/qemu/tests/virtio_trace_pipenb.py b/qemu/tests/virtio_trace_pipenb.py index 83981581e9..4fc2da31f2 100644 --- a/qemu/tests/virtio_trace_pipenb.py +++ b/qemu/tests/virtio_trace_pipenb.py @@ -1,6 +1,6 @@ -import time -import os import errno +import os +import time from virttest import error_context @@ -26,7 +26,9 @@ def run(test, params, env): v_path = vm.get_serial_console_filename(serials[-1]) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) - out_put = session.cmd_output("nohup cat /proc/kallsyms > /dev/virtio-ports/vs2 2>&1 &") + out_put = session.cmd_output( + "nohup cat /proc/kallsyms > /dev/virtio-ports/vs2 2>&1 &" + ) time.sleep(10) if session.cmd_output("date") is None: test.fail("Guest shouldn't be blocked and a date should output!") @@ -40,9 +42,11 @@ def run(test, params, env): time.sleep(5) break else: - raise Exception("Read data in host failed as %s" % e) + raise Exception(f"Read data in host failed as {e}") - if not session.cmd_status("ps -p %s" % guest_pid, safe=True): - test.fail("send process in guest does not exit after all data are read out in host") + if not session.cmd_status(f"ps -p {guest_pid}", safe=True): + test.fail( + "send process in guest does not exit after all data are read out in host" + ) vm.verify_alive() vm.verify_kernel_crash() diff --git a/qemu/tests/virtio_win_installer_version_check.py b/qemu/tests/virtio_win_installer_version_check.py index 62733298e6..177e8f11e6 100644 --- a/qemu/tests/virtio_win_installer_version_check.py +++ b/qemu/tests/virtio_win_installer_version_check.py @@ -1,12 +1,10 @@ import logging import re -from virttest import error_context -from virttest import utils_misc - from avocado.utils import process +from virttest import error_context, utils_misc -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") @error_context.context_aware @@ -24,21 +22,22 @@ def run(test, params, env): vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() - error_context.context("Check virtio-win-installer version.", - LOG_JOB.info) - pkg_status = process.getstatusoutput(params["rpm_install_chk_cmd"], - shell=True)[0] + error_context.context("Check virtio-win-installer version.", LOG_JOB.info) + pkg_status = process.getstatusoutput(params["rpm_install_chk_cmd"], shell=True)[0] if pkg_status: - test.cancel("Pls check the test env: whether virtio-win pkg is " - "installed on host.") + test.cancel( + "Pls check the test env: whether virtio-win pkg is " "installed on host." + ) - pkg_ver = process.system_output(params["rpm_ver_chk_cmd"], - shell=True).strip().decode() + pkg_ver = ( + process.system_output(params["rpm_ver_chk_cmd"], shell=True).strip().decode() + ) - iso_name = process.system_output(params["iso_name_chk_cmd"], - shell=True).strip().decode() + iso_name = ( + process.system_output(params["iso_name_chk_cmd"], shell=True).strip().decode() + ) # /usr/share/virtio-win/virtio-win-1.9.xx.iso - ver_pattern = r'\d.*\d' + ver_pattern = r"\d.*\d" iso_ver = re.findall(ver_pattern, iso_name, re.I)[0] iso_label_name = session.cmd_output(params["iso_label_chk_cmd"]).strip() @@ -47,15 +46,16 @@ def run(test, params, env): vol_virtio_key = "VolumeName like '%virtio-win%'" vol_virtio = utils_misc.get_win_disk_vol(session, vol_virtio_key) - installer_ver = session.cmd_output(params["installer_chk_cmd"] - % vol_virtio).strip() + installer_ver = session.cmd_output(params["installer_chk_cmd"] % vol_virtio).strip() if not pkg_ver == iso_ver == iso_label_ver == installer_ver: - test.fail("Installer version isn't the same with others," - "the package version is %s\n" - "the iso name version is %s\n" - "the iso label version is %s\n" - "the installer version is %s\n", - (pkg_ver, iso_ver, iso_label_ver, installer_ver)) + test.fail( + "Installer version isn't the same with others," + "the package version is %s\n" + "the iso name version is %s\n" + "the iso label version is %s\n" + "the installer version is %s\n", + (pkg_ver, iso_ver, iso_label_ver, installer_ver), + ) if session: session.close() diff --git a/qemu/tests/virtual_nic_private.py b/qemu/tests/virtual_nic_private.py index 0bdc7e3871..bca3106657 100644 --- a/qemu/tests/virtual_nic_private.py +++ b/qemu/tests/virtual_nic_private.py @@ -1,10 +1,7 @@ import re from aexpect import ShellCmdError -from virttest import remote -from virttest import utils_misc -from virttest import utils_net -from virttest import error_context +from virttest import error_context, remote, utils_misc, utils_net @error_context.context_aware @@ -19,6 +16,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ + def _is_process_finished(session, process_name): """ Check whether the target process is finished running @@ -39,16 +37,16 @@ def data_mon(session, cmd, timeout): session.cmd(cmd, timeout) except ShellCmdError as e: if re.findall(catch_date % (addresses[1], addresses[0]), str(e)): - test.fail("God! Capture the transfet data:'%s'" % str(e)) + test.fail(f"God! Capture the transfet data:'{str(e)}'") test.log.info("Guest3 catch data is '%s'", str(e)) - timeout = int(params.get("login_timeout", '360')) + timeout = int(params.get("login_timeout", "360")) password = params.get("password") username = params.get("username") shell_port = params.get("shell_port") tmp_dir = params.get("tmp_dir", "/tmp/") clean_cmd = params.get("clean_cmd", "rm -f") - filesize = int(params.get("filesize", '100')) + filesize = int(params.get("filesize", "100")) wireshark_name = params.get("wireshark_name") check_proc_temp = params.get("check_proc_temp") @@ -71,8 +69,8 @@ def data_mon(session, cmd, timeout): mon_session = vms[2].wait_for_login(timeout=timeout) mon_macaddr = vms[2].get_mac_address() - src_file = (tmp_dir + "src-%s" % utils_misc.generate_random_string(8)) - dst_file = (tmp_dir + "dst-%s" % utils_misc.generate_random_string(8)) + src_file = tmp_dir + f"src-{utils_misc.generate_random_string(8)}" + dst_file = tmp_dir + f"dst-{utils_misc.generate_random_string(8)}" try: # Before transfer, run tcpdump to try to catche data @@ -87,44 +85,58 @@ def data_mon(session, cmd, timeout): error_context.context("Install wireshark", test.log.info) install_wireshark_cmd = params.get("install_wireshark_cmd") install_wireshark_cmd = utils_misc.set_winutils_letter( - sessions[2], install_wireshark_cmd) - status, output = sessions[2].cmd_status_output(install_wireshark_cmd, - timeout=timeout) + sessions[2], install_wireshark_cmd + ) + status, output = sessions[2].cmd_status_output( + install_wireshark_cmd, timeout=timeout + ) if status: - test.error("Failed to install wireshark, status=%s, output=%s" - % (status, output)) + test.error( + f"Failed to install wireshark, status={status}, output={output}" + ) test.log.info("Wait for wireshark installation to complete") utils_misc.wait_for( lambda: _is_process_finished(sessions[2], wireshark_name), - timeout, 20, 3) + timeout, + 20, + 3, + ) test.log.info("Wireshark is already installed") interface_name = if_func(*args) - tcpdump_cmd = tcpdump_cmd % (addresses[1], addresses[0], - interface_name) - dthread = utils_misc.InterruptedThread(data_mon, - (sessions[2], - tcpdump_cmd, - mon_process_timeout)) + tcpdump_cmd = tcpdump_cmd % (addresses[1], addresses[0], interface_name) + dthread = utils_misc.InterruptedThread( + data_mon, (sessions[2], tcpdump_cmd, mon_process_timeout) + ) test.log.info("Tcpdump mon start ...") test.log.info("Creating %dMB file on guest1", filesize) sessions[0].cmd(dd_cmd % (src_file, filesize), timeout=timeout) dthread.start() - error_context.context("Transferring file guest1 -> guest2", - test.log.info) + error_context.context("Transferring file guest1 -> guest2", test.log.info) if params.get("os_type") == "windows": cp_cmd = params["copy_cmd"] - cp_cmd = cp_cmd % (addresses[1], params['file_transfer_port'], - src_file, dst_file) + cp_cmd = cp_cmd % ( + addresses[1], + params["file_transfer_port"], + src_file, + dst_file, + ) sessions[0].cmd_output(cp_cmd) else: - remote.scp_between_remotes(addresses[0], addresses[1], - shell_port, password, password, - username, username, src_file, dst_file) - - error_context.context("Check the src and dst file is same", - test.log.info) + remote.scp_between_remotes( + addresses[0], + addresses[1], + shell_port, + password, + password, + username, + username, + src_file, + dst_file, + ) + + error_context.context("Check the src and dst file is same", test.log.info) src_md5 = sessions[0].cmd_output(md5_check % src_file).split()[0] dst_md5 = sessions[1].cmd_output(md5_check % dst_file).split()[0] @@ -146,8 +158,8 @@ def data_mon(session, cmd, timeout): dthread.join() finally: - sessions[0].cmd(" %s %s " % (clean_cmd, src_file)) - sessions[1].cmd(" %s %s " % (clean_cmd, src_file)) + sessions[0].cmd(f" {clean_cmd} {src_file} ") + sessions[1].cmd(f" {clean_cmd} {src_file} ") if mon_session: mon_session.close() for session in sessions: diff --git a/qemu/tests/virtual_nic_send_buffer.py b/qemu/tests/virtual_nic_send_buffer.py index 8af6e457c7..758f6a1bff 100644 --- a/qemu/tests/virtual_nic_send_buffer.py +++ b/qemu/tests/virtual_nic_send_buffer.py @@ -1,8 +1,4 @@ -from virttest import remote -from virttest import utils_misc -from virttest import utils_test -from virttest import utils_net -from virttest import error_context +from virttest import error_context, remote, utils_misc, utils_net, utils_test @error_context.context_aware @@ -24,58 +20,63 @@ def run(test, params, env): dst_ses = None try: - error_context.context("Transfer file between host and guest", - test.log.info) + error_context.context("Transfer file between host and guest", test.log.info) utils_test.run_file_transfer(test, params, env) dsthost = params.get("dsthost") login_timeout = int(params.get("login_timeout", 360)) if dsthost: params_host = params.object_params("dsthost") - dst_ses = remote.wait_for_login(params_host.get("shell_client"), - dsthost, - params_host.get("shell_port"), - params_host.get("username"), - params_host.get("password"), - params_host.get("shell_prompt"), - timeout=login_timeout) + dst_ses = remote.wait_for_login( + params_host.get("shell_client"), + dsthost, + params_host.get("shell_port"), + params_host.get("username"), + params_host.get("password"), + params_host.get("shell_prompt"), + timeout=login_timeout, + ) else: vm = env.get_vm(params["main_vm"]) vm.verify_alive() dst_ses = vm.wait_for_login(timeout=login_timeout) dsthost = vm.get_address() - bg_stress_test = params.get("background_stress_test", 'netperf_stress') - error_context.context(("Run subtest %s between host and guest." % - bg_stress_test), test.log.info) + bg_stress_test = params.get("background_stress_test", "netperf_stress") + error_context.context( + (f"Run subtest {bg_stress_test} between host and guest."), test.log.info + ) wait_time = float(params.get("wait_bg_time", 60)) bg_stress_run_flag = params.get("bg_stress_run_flag") env[bg_stress_run_flag] = False stress_thread = utils_misc.InterruptedThread( - utils_test.run_virt_sub_test, (test, params, env), - {"sub_type": bg_stress_test}) + utils_test.run_virt_sub_test, + (test, params, env), + {"sub_type": bg_stress_test}, + ) stress_thread.start() - if not utils_misc.wait_for(lambda: env.get(bg_stress_run_flag), - wait_time, 0, 1, - "Wait %s test start" % bg_stress_test): + if not utils_misc.wait_for( + lambda: env.get(bg_stress_run_flag), + wait_time, + 0, + 1, + f"Wait {bg_stress_test} test start", + ): err = "Fail to start netperf test between guest and host" test.error(err) ping_timeout = int(params.get("ping_timeout", 60)) host_ip = utils_net.get_host_ip_address(params) - txt = "Ping %s from %s during netperf testing" % (host_ip, dsthost) + txt = f"Ping {host_ip} from {dsthost} during netperf testing" error_context.context(txt, test.log.info) - status, output = utils_test.ping(host_ip, session=dst_ses, - timeout=ping_timeout) + status, output = utils_test.ping(host_ip, session=dst_ses, timeout=ping_timeout) if status != 0: - test.fail("Ping returns non-zero value %s" % output) + test.fail(f"Ping returns non-zero value {output}") package_lost = utils_test.get_loss_ratio(output) package_lost_ratio = float(params.get("package_lost_ratio", 5)) - txt = "%s%% packeage lost when ping %s from %s." % (package_lost, - host_ip, - dsthost) + txt = f"{package_lost}% packeage lost when ping {host_ip} from {dsthost}." if package_lost > package_lost_ratio: test.fail(txt) test.log.info(txt) diff --git a/qemu/tests/virtual_nic_stress.py b/qemu/tests/virtual_nic_stress.py index 49bfa1169e..01be2114b8 100644 --- a/qemu/tests/virtual_nic_stress.py +++ b/qemu/tests/virtual_nic_stress.py @@ -1,7 +1,4 @@ -from virttest import error_context -from virttest import utils_net -from virttest import utils_test -from virttest import utils_misc +from virttest import error_context, utils_misc, utils_net, utils_test @error_context.context_aware @@ -34,11 +31,9 @@ def load_stress(): """ error_context.context("launch stress app in guest", test.log.info) args = (test, params, env, params["stress_test"]) - bg_test = utils_test.BackgroundTest( - utils_test.run_virt_sub_test, args) + bg_test = utils_test.BackgroundTest(utils_test.run_virt_sub_test, args) bg_test.start() - if not utils_misc.wait_for(bg_test.is_alive, first=10, - step=3, timeout=100): + if not utils_misc.wait_for(bg_test.is_alive, first=10, step=3, timeout=100): test.fail("background test start failed") def unload_stress(session): @@ -63,9 +58,8 @@ def unload_stress(session): error_context.context("Run memory heavy stress in guest", test.log.info) if os_type == "linux": test_mem = params.get("memory", 256) - stress_args = "--cpu 4 --io 4 --vm 2 --vm-bytes %sM" % int(test_mem) - stress_test = utils_test.VMStress(vm, "stress", params, - stress_args=stress_args) + stress_args = f"--cpu 4 --io 4 --vm 2 --vm-bytes {int(test_mem)}M" + stress_test = utils_test.VMStress(vm, "stress", params, stress_args=stress_args) stress_test.load_stress_tool() else: load_stress() @@ -76,15 +70,14 @@ def unload_stress(session): else: unload_stress(session) - error_context.context("Ping test after flood ping," - " Check if the network is still alive", - test.log.info) + error_context.context( + "Ping test after flood ping," " Check if the network is still alive", + test.log.info, + ) count = params["count"] timeout = float(count) * 2 - status, output = utils_net.ping(guest_ip, count, - timeout=timeout) + status, output = utils_net.ping(guest_ip, count, timeout=timeout) if status != 0: - test.fail("Ping failed, status: %s," - " output: %s" % (status, output)) + test.fail(f"Ping failed, status: {status}," f" output: {output}") session.close() diff --git a/qemu/tests/vmstop.py b/qemu/tests/vmstop.py index ab06dbee49..35e253331f 100644 --- a/qemu/tests/vmstop.py +++ b/qemu/tests/vmstop.py @@ -1,5 +1,5 @@ -import time import os +import time from avocado.utils import crypto, process from virttest import utils_misc @@ -33,12 +33,12 @@ def run(test, params, env): file_size = params.get("file_size", "1000") try: - process.run("dd if=/dev/zero of=/tmp/file bs=1M count=%s" % file_size) + process.run(f"dd if=/dev/zero of=/tmp/file bs=1M count={file_size}") # Transfer file from host to guest, we didn't expect the finish of # transfer, we just let it to be a kind of stress in guest. - bg = utils_misc.InterruptedThread(vm.copy_files_to, - ("/tmp/file", guest_path), - dict(verbose=True, timeout=60)) + bg = utils_misc.InterruptedThread( + vm.copy_files_to, ("/tmp/file", guest_path), dict(verbose=True, timeout=60) + ) test.log.info("Start the background transfer") bg.start() @@ -52,8 +52,10 @@ def run(test, params, env): test.log.info("Check the status through monitor") if not vm.monitor.verify_status("paused"): status = str(vm.monitor.info("status")) - test.fail("Guest did not pause after sending stop," - " guest status is %s" % status) + test.fail( + "Guest did not pause after sending stop," + f" guest status is {status}" + ) # check through session test.log.info("Check the session") @@ -66,7 +68,7 @@ def run(test, params, env): vm.save_to_file(p) time.sleep(1) if not os.path.isfile(p): - test.fail("VM failed to save state file %s" % p) + test.fail(f"VM failed to save state file {p}") # Fail if we see deltas md5_save1 = crypto.hash_file(save1) diff --git a/qemu/tests/vnc.py b/qemu/tests/vnc.py index c8ec0ef9ce..4806562d76 100644 --- a/qemu/tests/vnc.py +++ b/qemu/tests/vnc.py @@ -1,18 +1,16 @@ import logging -import time +import random import socket import struct -import random +import time -from virttest import utils_misc +from virttest import error_context, utils_misc from virttest.RFBDes import Des -from virttest import error_context - -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") -class VNC(object): +class VNC: """ Simple VNC client which can only connect to and authenticate with vnc server. @@ -30,27 +28,28 @@ def hand_shake(self, password=None): """ rfb_server_version = self.sock.recv(12) LOG_JOB.debug("VNC server rfb version: %s", rfb_server_version) - LOG_JOB.debug("Handshake with rfb protocol version: %s", - self.rfb_version) - rfb_version = "RFB 00%s.00%s\n" % (self.rfb_version.split(".")[0], - self.rfb_version.split(".")[1]) + LOG_JOB.debug("Handshake with rfb protocol version: %s", self.rfb_version) + rfb_version = "RFB 00{}.00{}\n".format( + self.rfb_version.split(".")[0], + self.rfb_version.split(".")[1], + ) self.sock.send(rfb_version) if self.rfb_version != "3.3": rec = self.sock.recv(1) - (auth,) = struct.unpack('!B', rec) + (auth,) = struct.unpack("!B", rec) if auth == 0: rec = self.sock.recv(4) - (reason_len, ) = struct.unpack('!I', rec) + (reason_len,) = struct.unpack("!I", rec) reason = self.sock.recv(reason_len) LOG_JOB.error("Connection failed: %s", reason) return False else: rec = self.sock.recv(auth) - (auth_type,) = struct.unpack('!%sB' % auth, rec) + (auth_type,) = struct.unpack(f"!{auth}B", rec) LOG_JOB.debug("Server support '%s' security types", auth_type) else: rec = self.sock.recv(4) - (auth_type, ) = struct.unpack('!I', rec) + (auth_type,) = struct.unpack("!I", rec) LOG_JOB.debug("Server support %s security types", auth_type) if auth_type == 0: @@ -63,18 +62,18 @@ def hand_shake(self, password=None): elif auth_type == 2: LOG_JOB.debug("VNC Authentication") if self.rfb_version != "3.3": - self.sock.send(struct.pack('!B', 2)) + self.sock.send(struct.pack("!B", 2)) rec = self.sock.recv(16) des = Des(password) p = des.crypt(rec) self.sock.send(p) # Security Result check phase rec = self.sock.recv(4) - (status, ) = struct.unpack('!I', rec) + (status,) = struct.unpack("!I", rec) if status == 1: if self.rfb_version == "3.8": rec = self.sock.recv(4) - (str_len, ) = struct.unpack('!I', rec) + (str_len,) = struct.unpack("!I", rec) reason = self.sock.recv(str_len) LOG_JOB.debug("Handshaking failed : %s", reason) return False @@ -85,13 +84,22 @@ def initialize(self, shared_flag=0): """ Dealing with VNC initial message. """ - (shared_flag, ) = struct.pack('!B', shared_flag) + (shared_flag,) = struct.pack("!B", shared_flag) self.sock.send(shared_flag) rec = self.sock.recv(24) - (width, height, pixformat, name_len) = struct.unpack('!HH16sI', rec) - (bits_per_pixel, depth, big_endian, true_color, - red_max, green_max, blue_max, red_shift, green_shift, - blue_shift) = struct.unpack("!BBBBHHHBBBxxx", pixformat) + (width, height, pixformat, name_len) = struct.unpack("!HH16sI", rec) + ( + bits_per_pixel, + depth, + big_endian, + true_color, + red_max, + green_max, + blue_max, + red_shift, + green_shift, + blue_shift, + ) = struct.unpack("!BBBBHHHBBBxxx", pixformat) server_name = self.sock.recv(name_len) LOG_JOB.info("vnc server name: %s", server_name) @@ -127,8 +135,7 @@ def run(test, params, env): change_passwd_cmd = params.get("change_passwd_cmd", default_cmd) rfb_version_list = params.get("rfb_version").strip().split() for rfb_version in rfb_version_list: - error_context.base_context("Test with guest RFB version %s" % - rfb_version) + error_context.base_context(f"Test with guest RFB version {rfb_version}") rand = random.SystemRandom() rand.seed() password = utils_misc.generate_random_string(rand.randint(1, 8)) @@ -137,8 +144,9 @@ def run(test, params, env): test.log.info("VNC password timeout is: %s", timeout) vm.monitor.send_args_cmd(change_passwd_cmd % (password, timeout)) - error_context.context("Connect to VNC server after setting password" - " to '%s'" % password) + error_context.context( + "Connect to VNC server after setting password" f" to '{password}'" + ) vnc = VNC(port=port, rfb_version=rfb_version) status = vnc.hand_shake(password) vnc.initialize() @@ -155,5 +163,7 @@ def run(test, params, env): vnc.close() if status: # Should not handshake succeffully. - test.fail("VNC connected with Timeout password, The" - " cmd of setting expire time doesn't work.") + test.fail( + "VNC connected with Timeout password, The" + " cmd of setting expire time doesn't work." + ) diff --git a/qemu/tests/vpmu_check_instructions.py b/qemu/tests/vpmu_check_instructions.py index 8db1e8eeec..240778420f 100644 --- a/qemu/tests/vpmu_check_instructions.py +++ b/qemu/tests/vpmu_check_instructions.py @@ -1,14 +1,9 @@ import os import re - from shutil import copyfile from avocado.utils import process - -from virttest import data_dir -from virttest import error_context -from virttest import arch -from virttest import utils_package +from virttest import arch, data_dir, error_context, utils_package @error_context.context_aware @@ -29,9 +24,9 @@ def run(test, params, env): build_cmd = params.get("build_cmd") vm_arch = params["vm_arch_name"] host_arch = arch.ARCH - src_dir = os.path.join(data_dir.get_deps_dir(), 'million') - src_file = os.path.join(src_dir, "million-%s.s" % host_arch) - dst_file = os.path.join(tmp_dir, "million-%s.s" % host_arch) + src_dir = os.path.join(data_dir.get_deps_dir(), "million") + src_file = os.path.join(src_dir, f"million-{host_arch}.s") + dst_file = os.path.join(tmp_dir, f"million-{host_arch}.s") if not utils_package.package_install("perf"): test.error("Install dependency packages failed") @@ -53,9 +48,10 @@ def run(test, params, env): if not utils_package.package_install("perf", session): test.error("Install dependency packages failed") - src_file = os.path.join(src_dir, "million-%s.s" % vm_arch) - error_context.context("transfer '%s' to guest('%s')" % - (src_file, dst_file), test.log.info) + src_file = os.path.join(src_dir, f"million-{vm_arch}.s") + error_context.context( + f"transfer '{src_file}' to guest('{dst_file}')", test.log.info + ) vm.copy_files_to(src_file, tmp_dir, timeout=timeout) error_context.context("build binary file 'million' in guest", test.log.info) diff --git a/qemu/tests/vsock_hotplug.py b/qemu/tests/vsock_hotplug.py index 0c7bcaf623..17b35e3ea3 100644 --- a/qemu/tests/vsock_hotplug.py +++ b/qemu/tests/vsock_hotplug.py @@ -1,16 +1,11 @@ import re import time -from avocado.utils import linux_modules -from avocado.utils import path - -from virttest import utils_vsock -from virttest import utils_misc -from virttest import error_context +from avocado.utils import linux_modules, path +from virttest import error_context, utils_misc, utils_vsock from virttest.qemu_devices import qdevices -from qemu.tests import vsock_test -from qemu.tests import vsock_negative_test +from qemu.tests import vsock_negative_test, vsock_test @error_context.context_aware @@ -31,50 +26,48 @@ def run(test, params, env): :param env: Dictionary with test environment """ - linux_modules.load_module('vhost_vsock') - vm = env.get_vm(params['main_vm']) + linux_modules.load_module("vhost_vsock") + vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() guest_cid = utils_vsock.get_guest_cid(3) - vsock_id = 'hotplugged_vsock' - vsock_params = {'id': vsock_id, 'guest-cid': guest_cid} + vsock_id = "hotplugged_vsock" + vsock_params = {"id": vsock_id, "guest-cid": guest_cid} vsock_test_tool = params["vsock_test_tool"] - if '-mmio:' in params.get('machine_type'): - dev_vsock = qdevices.QDevice('vhost-vsock-device', vsock_params) - elif params.get('machine_type').startswith("s390"): - vsock_params['devno'] = params.get('devno') + if "-mmio:" in params.get("machine_type"): + dev_vsock = qdevices.QDevice("vhost-vsock-device", vsock_params) + elif params.get("machine_type").startswith("s390"): + vsock_params["devno"] = params.get("devno") dev_vsock = qdevices.QDevice("vhost-vsock-ccw", vsock_params) else: - dev_vsock = qdevices.QDevice('vhost-vsock-pci', vsock_params) + dev_vsock = qdevices.QDevice("vhost-vsock-pci", vsock_params) vm.devices.simple_hotplug(dev_vsock, vm.monitor) - error_context.context('Check vsock device exist in guest lspci and ' - 'dmesg output.', test.log.info) - addr_pattern = params['addr_pattern'] - device_pattern = params['device_pattern'] - check_vsock_cmd = params.get('check_vsock_cmd', 'lspci') + error_context.context( + "Check vsock device exist in guest lspci and " "dmesg output.", test.log.info + ) + addr_pattern = params["addr_pattern"] + device_pattern = params["device_pattern"] + check_vsock_cmd = params.get("check_vsock_cmd", "lspci") time.sleep(10) lspci_output = session.cmd_output(check_vsock_cmd) - device_str = re.findall(r'%s\s%s' % (addr_pattern, device_pattern), - lspci_output) + device_str = re.findall(rf"{addr_pattern}\s{device_pattern}", lspci_output) - if params.get('dmesg_check') == 'yes': + if params.get("dmesg_check") == "yes": if not device_str: - test.fail( - 'check_vsock_cmd failed, no device "%s"' % - device_pattern) + test.fail(f'check_vsock_cmd failed, no device "{device_pattern}"') else: address = re.findall(addr_pattern, device_str[0])[0] - chk_dmesg_cmd = 'dmesg' + chk_dmesg_cmd = "dmesg" output = re.findall(address, session.cmd_output(chk_dmesg_cmd)) if not output: - test.fail('dmesg failed, no info related to %s' % address) + test.fail(f"dmesg failed, no info related to {address}") else: - error_msg = '' + error_msg = "" for o in output: - if re.search(r'fail|error', o, re.I): - error_msg += '%s' % o + if re.search(r"fail|error", o, re.I): + error_msg += f"{o}" break if error_msg: - test.fail("dmesg check failed: %s" % error_msg) + test.fail(f"dmesg check failed: {error_msg}") # Transfer data from guest to host try: if vsock_test_tool == "nc_vsock": @@ -83,14 +76,15 @@ def run(test, params, env): tool_bin = path.find_command("ncat") else: raise ValueError(f"unsupported test tool: {vsock_test_tool}") - tmp_file = "/tmp/vsock_file_%s" % utils_misc.generate_random_string(6) + tmp_file = f"/tmp/vsock_file_{utils_misc.generate_random_string(6)}" rec_session = vsock_test.send_data_from_guest_to_host( - session, tool_bin, guest_cid, tmp_file) + session, tool_bin, guest_cid, tmp_file + ) vsock_negative_test.check_data_received(test, rec_session, tmp_file) vm.devices.simple_unplug(dev_vsock, vm.monitor) vsock_negative_test.kill_host_receive_process(test, rec_session) vsock_test.check_guest_vsock_conn_exit(test, session) finally: - session.cmd_output("rm -f %s" % tmp_file) + session.cmd_output(f"rm -f {tmp_file}") session.close() vm.reboot() diff --git a/qemu/tests/vsock_negative_test.py b/qemu/tests/vsock_negative_test.py index ece3e84a87..2b208a1ae8 100755 --- a/qemu/tests/vsock_negative_test.py +++ b/qemu/tests/vsock_negative_test.py @@ -1,12 +1,9 @@ +import os import random import signal -import os - -from avocado.utils import path -from avocado.utils import process -from virttest import utils_misc -from virttest import error_context +from avocado.utils import path, process +from virttest import error_context, utils_misc from qemu.tests import vsock_test @@ -19,15 +16,14 @@ def check_data_received(test, rec_session, file): :param rec_session: vsock receive session :param file: file to receive data """ - if not utils_misc.wait_for(lambda: rec_session.is_alive(), - timeout=20, step=1): + if not utils_misc.wait_for(lambda: rec_session.is_alive(), timeout=20, step=1): test.error("Host connection failed.") - if not utils_misc.wait_for(lambda: os.path.exists(file), - timeout=20, step=1): + if not utils_misc.wait_for(lambda: os.path.exists(file), timeout=20, step=1): test.fail("Host does not create receive file successfully.") - elif not utils_misc.wait_for(lambda: os.path.getsize(file) > 0, - timeout=300, step=5): - test.fail('Host does not receive data successfully.') + elif not utils_misc.wait_for( + lambda: os.path.getsize(file) > 0, timeout=300, step=5 + ): + test.fail("Host does not receive data successfully.") @error_context.context_aware @@ -38,11 +34,9 @@ def kill_host_receive_process(test, rec_session): :param test: QEMU test object :param rec_session: vsock receive session """ - error_context.context("Kill the vsock process on host...", - test.log.info) + error_context.context("Kill the vsock process on host...", test.log.info) rec_session.kill(sig=signal.SIGINT) - if not utils_misc.wait_for(lambda: not rec_session.is_alive(), - timeout=1, step=0.1): + if not utils_misc.wait_for(lambda: not rec_session.is_alive(), timeout=1, step=0.1): test.fail("Host vsock process does not quit as expected.") @@ -75,14 +69,15 @@ def run(test, params, env): guest_cid = vm.devices.get(vsock_dev).get_param("guest-cid") conn_cmd = None if vsock_test_tool == "nc_vsock": - conn_cmd = "%s %s %s" % (tool_bin, guest_cid, port) + conn_cmd = f"{tool_bin} {guest_cid} {port}" if vsock_test_tool == "ncat": - conn_cmd = "%s --vsock %s %s" % (tool_bin, guest_cid, port) + conn_cmd = f"{tool_bin} --vsock {guest_cid} {port}" if conn_cmd is None: raise ValueError(f"unexpected test tool: {vsock_test_tool}") connected_str = "Connection reset by peer" - error_context.context("Connect vsock from host without" - " listening on guest.", test.log.info) + error_context.context( + "Connect vsock from host without" " listening on guest.", test.log.info + ) try: process.system_output(conn_cmd) except process.CmdError as e: @@ -94,14 +89,15 @@ def run(test, params, env): session.close() session = vm.wait_for_login() - tmp_file = "/tmp/vsock_file_%s" % utils_misc.generate_random_string(6) + tmp_file = f"/tmp/vsock_file_{utils_misc.generate_random_string(6)}" rec_session = vsock_test.send_data_from_guest_to_host( - session, tool_bin, guest_cid, tmp_file) + session, tool_bin, guest_cid, tmp_file + ) try: check_data_received(test, rec_session, tmp_file) kill_host_receive_process(test, rec_session) vsock_test.check_guest_vsock_conn_exit(test, session) finally: - session.cmd_output("rm -f %s" % tmp_file) + session.cmd_output(f"rm -f {tmp_file}") session.close() vm.verify_alive() diff --git a/qemu/tests/vsock_perf.py b/qemu/tests/vsock_perf.py index f3ae392742..5e54159800 100644 --- a/qemu/tests/vsock_perf.py +++ b/qemu/tests/vsock_perf.py @@ -4,28 +4,27 @@ import time import aexpect - from avocado.core import exceptions from avocado.utils import process - -from virttest import data_dir -from virttest import error_context +from virttest import data_dir, error_context def copy_compile_testsuite(vm, vsock_test_base_dir, session): vsock_test_src_file = os.path.join( - data_dir.get_deps_dir("vsock_test"), - "vsock_test.tar.xz") + data_dir.get_deps_dir("vsock_test"), "vsock_test.tar.xz" + ) shutil.copy2(vsock_test_src_file, vsock_test_base_dir) vm.copy_files_to(vsock_test_src_file, vsock_test_base_dir) - uncompress_cmd = "cd %s && tar zxf %s" % ( - vsock_test_base_dir, "vsock_test.tar.xz") + uncompress_cmd = "cd {} && tar zxf {}".format( + vsock_test_base_dir, "vsock_test.tar.xz" + ) process.system(uncompress_cmd, shell=True, ignore_status=True) session.cmd(uncompress_cmd) - compile_cmd = "cd %s && make vsock_perf" % (os.path.join( - vsock_test_base_dir, "vsock/")) + compile_cmd = "cd {} && make vsock_perf".format( + os.path.join(vsock_test_base_dir, "vsock/") + ) host_status = process.system(compile_cmd, shell=True) guest_status = session.cmd_status(compile_cmd) @@ -68,7 +67,7 @@ def run(test, params, env): try: test_bin = copy_compile_testsuite(vm, vsock_test_base_dir, session) - test.log.info("test_bin: %s" % test_bin) + test.log.info("test_bin: %s", test_bin) host_file = params["host_file"] + kernel_version guest_file = params["guest_file"] + kernel_version @@ -82,11 +81,11 @@ def run(test, params, env): test.log.info("Start perf test from guest ...") guest_status, guest_output = session.cmd_status_output(guest_cmd) - with open(host_file, 'a') as file: - file.write(host_output.get_output() + '\n') - with open(guest_file, 'a') as file: - file.write(guest_output + '\n') - test.log.info("loop number %s" % _) + with open(host_file, "a") as file: + file.write(host_output.get_output() + "\n") + with open(guest_file, "a") as file: + file.write(guest_output + "\n") + test.log.info("loop number %s", _) host_status = host_output.get_status() host_output.close() diff --git a/qemu/tests/vsock_test.py b/qemu/tests/vsock_test.py index b9cdd4c90e..ae88fccb9d 100644 --- a/qemu/tests/vsock_test.py +++ b/qemu/tests/vsock_test.py @@ -1,17 +1,13 @@ +import logging import os import random -import logging import time -import aexpect - -from avocado.utils import path -from avocado.utils import process -from virttest import data_dir -from virttest import error_context -from virttest import utils_misc +import aexpect +from avocado.utils import path, process +from virttest import data_dir, error_context, utils_misc -LOG_JOB = logging.getLogger('avocado.test') +LOG_JOB = logging.getLogger("avocado.test") def compile_nc_vsock(test, vm, session): @@ -23,19 +19,18 @@ def compile_nc_vsock(test, vm, session): :param session: vm session :return: Path to binary nc-vsock or None if compile failed """ - nc_vsock_dir = '/home/' - nc_vsock_bin = 'nc-vsock' - nc_vsock_c = 'nc-vsock.c' + nc_vsock_dir = "/home/" + nc_vsock_bin = "nc-vsock" + nc_vsock_c = "nc-vsock.c" src_file = os.path.join(data_dir.get_deps_dir("nc_vsock"), nc_vsock_c) bin_path = os.path.join(nc_vsock_dir, nc_vsock_bin) - rm_cmd = 'rm -rf %s*' % bin_path + rm_cmd = f"rm -rf {bin_path}*" session.cmd(rm_cmd) process.system(rm_cmd, shell=True, ignore_status=True) - cmd_cp = "cp %s %s" % (src_file, nc_vsock_dir) + cmd_cp = f"cp {src_file} {nc_vsock_dir}" process.system(cmd_cp) vm.copy_files_to(src_file, nc_vsock_dir) - compile_cmd = "cd %s && gcc -o %s %s" % ( - nc_vsock_dir, nc_vsock_bin, nc_vsock_c) + compile_cmd = f"cd {nc_vsock_dir} && gcc -o {nc_vsock_bin} {nc_vsock_c}" host_status = process.system(compile_cmd, shell=True) guest_status = session.cmd_status(compile_cmd) if (host_status or guest_status) != 0: @@ -58,10 +53,10 @@ def vsock_listen(tool_bin, port, session): lstn_cmd = None if "ncat" in tool_bin: - lstn_cmd = "%s --vsock -l %s" % (tool_bin, port) + lstn_cmd = f"{tool_bin} --vsock -l {port}" if "nc-vsock" in tool_bin: - lstn_cmd = "%s -l %s" % (tool_bin, port) + lstn_cmd = f"{tool_bin} -l {port}" if lstn_cmd is None: raise ValueError(f"unexpected test tool: {tool_bin}") @@ -84,8 +79,10 @@ def check_received_data(test, session, pattern): session.read_until_last_line_matches([pattern]) except aexpect.ExpectError as e: if isinstance(e, aexpect.ExpectTimeoutError): - test.fail("Does not receive expected content: %s, output" - " of session: %s" % (pattern, e.output)) + test.fail( + f"Does not receive expected content: {pattern}, output" + f" of session: {e.output}" + ) else: test.fail(str(e)) @@ -102,9 +99,9 @@ def vsock_connect(tool_bin, guest_cid, port): conn_cmd = None if "ncat" in tool_bin: - conn_cmd = "%s --vsock %s %s" % (tool_bin, guest_cid, port) + conn_cmd = f"{tool_bin} --vsock {guest_cid} {port}" if "nc-vsock" in tool_bin: - conn_cmd = "%s %s %s" % (tool_bin, guest_cid, port) + conn_cmd = f"{tool_bin} {guest_cid} {port}" if conn_cmd is None: raise ValueError(f"unexpected test tool: {tool_bin}") LOG_JOB.info("Connect to the vsock port on host: %s", conn_cmd) @@ -113,11 +110,13 @@ def vsock_connect(tool_bin, guest_cid, port): conn_cmd, auto_close=False, output_func=utils_misc.log_line, - output_params=("vsock_%s_%s" % (guest_cid, port),)) + output_params=(f"vsock_{guest_cid}_{port}",), + ) -def send_data_from_guest_to_host(guest_session, tool_bin, - guest_cid, tmp_file, file_size=1000): +def send_data_from_guest_to_host( + guest_session, tool_bin, guest_cid, tmp_file, file_size=1000 +): """ Generate a temp file and transfer it from guest to host via vsock @@ -128,34 +127,34 @@ def send_data_from_guest_to_host(guest_session, tool_bin, :return: The host vsock connection process """ - cmd_generate = 'dd if=/dev/urandom of=%s count=%s bs=1M' % ( - tmp_file, file_size) + cmd_generate = f"dd if=/dev/urandom of={tmp_file} count={file_size} bs=1M" guest_session.cmd_status(cmd_generate, timeout=600) port = random.randrange(1, 6000) cmd_transfer = None if "ncat" in tool_bin: - cmd_transfer = '%s --vsock --send-only -l %s < %s' % ( - tool_bin, port, tmp_file) + cmd_transfer = f"{tool_bin} --vsock --send-only -l {port} < {tmp_file}" if "nc-vsock" in tool_bin: - cmd_transfer = '%s -l %s < %s' % (tool_bin, port, tmp_file) + cmd_transfer = f"{tool_bin} -l {port} < {tmp_file}" if cmd_transfer is None: raise ValueError(f"unexpected test tool: {tool_bin}") - error_context.context('Transfer file from guest via command: %s' - % cmd_transfer, LOG_JOB.info) + error_context.context( + f"Transfer file from guest via command: {cmd_transfer}", LOG_JOB.info + ) guest_session.sendline(cmd_transfer) cmd_receive = None if "ncat" in tool_bin: - cmd_receive = '%s --vsock %s %s > %s' % ( - tool_bin, guest_cid, port, tmp_file) + cmd_receive = f"{tool_bin} --vsock {guest_cid} {port} > {tmp_file}" if "nc-vsock" in tool_bin: - cmd_receive = '%s %s %s > %s' % (tool_bin, guest_cid, port, tmp_file) + cmd_receive = f"{tool_bin} {guest_cid} {port} > {tmp_file}" if cmd_receive is None: raise ValueError(f"unexpected test tool: {tool_bin}") time.sleep(60) - return aexpect.Expect(cmd_receive, - auto_close=True, - output_func=utils_misc.log_line, - output_params=('%s.log' % tmp_file,)) + return aexpect.Expect( + cmd_receive, + auto_close=True, + output_func=utils_misc.log_line, + output_params=(f"{tmp_file}.log",), + ) def check_guest_vsock_conn_exit(test, session, close_session=False): @@ -169,8 +168,10 @@ def check_guest_vsock_conn_exit(test, session, close_session=False): try: session.read_up_to_prompt(timeout=120) except aexpect.ExpectTimeoutError: - test.fail("vsock listening prcoess inside guest" - " does not exit after close host nc-vsock connection.") + test.fail( + "vsock listening prcoess inside guest" + " does not exit after close host nc-vsock connection." + ) finally: if close_session: session.close() @@ -194,10 +195,10 @@ def run(test, params, env): """ def clean(tmp_file): - """ Clean the environment """ - cmd_rm = "rm -rf %s" % tmp_file + """Clean the environment""" + cmd_rm = f"rm -rf {tmp_file}" if vsock_test_tool == "nc_vsock": - cmd_rm += "; rm -rf %s*" % tool_bin + cmd_rm += f"; rm -rf {tool_bin}*" session.cmd_output_safe(cmd_rm) process.system(cmd_rm, shell=True, ignore_status=True) if host_vsock_session.is_alive(): @@ -205,7 +206,7 @@ def clean(tmp_file): session.close() vm = env.get_vm(params["main_vm"]) - tmp_file = "/tmp/vsock_file_%s" % utils_misc.generate_random_string(6) + tmp_file = f"/tmp/vsock_file_{utils_misc.generate_random_string(6)}" session = vm.wait_for_login() vsock_dev = params["vsocks"].split()[0] guest_cid = vm.devices.get(vsock_dev).get_param("guest-cid") @@ -237,16 +238,16 @@ def clean(tmp_file): # Transfer data from guest to host session = vm.wait_for_login() - rec_session = send_data_from_guest_to_host(session, tool_bin, - guest_cid, tmp_file) + rec_session = send_data_from_guest_to_host(session, tool_bin, guest_cid, tmp_file) utils_misc.wait_for(lambda: not rec_session.is_alive(), timeout=20) check_guest_vsock_conn_exit(test, session) - cmd_chksum = 'md5sum %s' % tmp_file + cmd_chksum = f"md5sum {tmp_file}" md5_origin = session.cmd_output(cmd_chksum).split()[0] md5_received = process.system_output(cmd_chksum).split()[0].decode() if md5_received != md5_origin: clean(tmp_file) - test.fail('Data transfer not integrated, the original md5 value' - ' is %s, while the md5 value received on host is %s' % - (md5_origin, md5_received)) + test.fail( + "Data transfer not integrated, the original md5 value" + f" is {md5_origin}, while the md5 value received on host is {md5_received}" + ) clean(tmp_file) diff --git a/qemu/tests/vsock_test_suite.py b/qemu/tests/vsock_test_suite.py index fdbfc32b9f..0a346dac60 100644 --- a/qemu/tests/vsock_test_suite.py +++ b/qemu/tests/vsock_test_suite.py @@ -2,13 +2,8 @@ import time import aexpect - from avocado.utils import process - -from virttest import data_dir -from virttest import error_context -from virttest import utils_misc -from virttest import utils_net +from virttest import data_dir, error_context, utils_misc, utils_net def copy_compile_testsuite(test, vm, session): @@ -21,20 +16,22 @@ def copy_compile_testsuite(test, vm, session): """ vsock_test_base_dir = "/home/" vsock_test_src_file = os.path.join( - data_dir.get_deps_dir("vsock_test"), - "vsock_test.tar.xz") - rm_cmd = "rm -rf %s" % os.path.join(vsock_test_base_dir, "vsock*") + data_dir.get_deps_dir("vsock_test"), "vsock_test.tar.xz" + ) + rm_cmd = "rm -rf {}".format(os.path.join(vsock_test_base_dir, "vsock*")) process.system(rm_cmd, shell=True, ignore_status=True) session.cmd(rm_cmd, ignore_all_errors=True) - cp_cmd = "cp %s %s" % (vsock_test_src_file, vsock_test_base_dir) + cp_cmd = f"cp {vsock_test_src_file} {vsock_test_base_dir}" process.system(cp_cmd, shell=True) vm.copy_files_to(vsock_test_src_file, vsock_test_base_dir) - uncompress_cmd = "cd %s && tar zxf %s" % ( - vsock_test_base_dir, "vsock_test.tar.xz") + uncompress_cmd = "cd {} && tar zxf {}".format( + vsock_test_base_dir, "vsock_test.tar.xz" + ) process.system(uncompress_cmd, shell=True, ignore_status=True) session.cmd(uncompress_cmd) - compile_cmd = "cd %s && make vsock_test" % os.path.join( - vsock_test_base_dir, "vsock/") + compile_cmd = "cd {} && make vsock_test".format( + os.path.join(vsock_test_base_dir, "vsock/") + ) host_status = process.system(compile_cmd, shell=True) guest_status = session.cmd_status(compile_cmd) if (host_status or guest_status) != 0: @@ -78,22 +75,19 @@ def run(test, params, env): # Scenario I: host = client, guest = server test.log.info("Host as client, guest as server...") - client_cmd = params["client_cmd"] % ( - test_bin, guest_ip, port, guest_cid) + client_cmd = params["client_cmd"] % (test_bin, guest_ip, port, guest_cid) server_cmd = params["server_cmd"] % (test_bin, port, host_cid) session.sendline(server_cmd) time.sleep(5) - status, output = process.getstatusoutput( - client_cmd, timeout=30, shell=True) + status, output = process.getstatusoutput(client_cmd, timeout=30, shell=True) if status != 0: - test.fail("Test fail %s %s" % (status, output)) - test.log.info("command output: %s" % output) + test.fail(f"Test fail {status} {output}") + test.log.info("command output: %s", output) try: session.read_up_to_prompt(timeout=10) except aexpect.ExpectTimeoutError: - test.fail( - "server_cmd inside guest dosn't closed after test execution.") + test.fail("server_cmd inside guest dosn't closed after test execution.") # Scenario II: host = server, guest = client test.log.info("Host as server, guest as client...") @@ -103,12 +97,13 @@ def run(test, params, env): server_cmd, auto_close=False, output_func=utils_misc.log_line, - output_params=("vsock_%s_%s" % (guest_cid, port),)) + output_params=(f"vsock_{guest_cid}_{port}",), + ) time.sleep(5) status, output = session.cmd_status_output(client_cmd) if status != 0: - test.fail("Test fail %s %s" % (status, output)) - test.log.info("command output: %s" % output) + test.fail(f"Test fail {status} {output}") + test.log.info("command output: %s", output) finally: rm_cmd = "rm -rf /home/vsock*" process.system(rm_cmd, shell=True, timeout=10, ignore_status=True) diff --git a/qemu/tests/watchdog.py b/qemu/tests/watchdog.py index 16e84b4518..3d1ece9c62 100644 --- a/qemu/tests/watchdog.py +++ b/qemu/tests/watchdog.py @@ -1,16 +1,11 @@ import os +import random import re import time -import random - -from avocado.utils import process -from virttest import error_context -from virttest import utils_misc -from virttest import env_process -from virttest import utils_test -from virttest import data_dir from aexpect.exceptions import ShellTimeoutError +from avocado.utils import process +from virttest import data_dir, env_process, error_context, utils_misc, utils_test @error_context.context_aware @@ -26,8 +21,8 @@ def run(test, params, env): :param env: Dictionary with the test environment. """ - timeout = int(params.get("login_timeout", '360')) - relogin_timeout = int(params.get("relogin_timeout", '240')) + timeout = int(params.get("login_timeout", "360")) + relogin_timeout = int(params.get("relogin_timeout", "240")) vm_arch_name = params["vm_arch_name"] watchdog_device_type = params.get("watchdog_device_type", "i6300esb") @@ -50,8 +45,9 @@ def _watchdog_device_check(test, session, watchdog_device): # when wDT is 6300esb need check pci info if watchdog_device == "i6300esb": - error_context.context("checking pci info to ensure have WDT" - " device", test.log.info) + error_context.context( + "checking pci info to ensure have WDT" " device", test.log.info + ) session.cmd("echo 1 > /sys/bus/pci/rescan") o = session.cmd_output("lspci") if o: @@ -63,14 +59,14 @@ def _watchdog_device_check(test, session, watchdog_device): # checking watchdog init info using dmesg error_context.context("Checking watchdog load info", test.log.info) dmesg_info = params.get("dmesg_info", "(i6300ESB|ib700wdt).*init") - module_check_cmd = params.get("module_check_cmd", - "dmesg | grep -i '%s' " % dmesg_info) + module_check_cmd = params.get( + "module_check_cmd", f"dmesg | grep -i '{dmesg_info}' " + ) (s, o) = session.cmd_status_output(module_check_cmd) if s != 0: error_msg = "Wactchdog device '%s' load/initialization failed " test.error(error_msg % watchdog_device) - test.log.info("Watchdog device '%s' add and init successfully", - watchdog_device) + test.log.info("Watchdog device '%s' add and init successfully", watchdog_device) test.log.debug("Init info : '%s'", o) def _trigger_watchdog(session, trigger_cmd=None): @@ -81,8 +77,9 @@ def _trigger_watchdog(session, trigger_cmd=None): @trigger_cmd: cmd trigger the watchdog """ if trigger_cmd is not None: - error_context.context(("Trigger Watchdog action using:'%s'." % - trigger_cmd), test.log.info) + error_context.context( + (f"Trigger Watchdog action using:'{trigger_cmd}'."), test.log.info + ) session.sendline(trigger_cmd) def _action_check(test, session, watchdog_action): @@ -100,41 +97,51 @@ def check_guest_reboot(pattern): return True return False - response_timeout = int(params.get("response_timeout", '240')) - error_context.context("Check whether or not watchdog action '%s' took" - " effect" % watchdog_action, test.log.info) + response_timeout = int(params.get("response_timeout", "240")) + error_context.context( + f"Check whether or not watchdog action '{watchdog_action}' took" " effect", + test.log.info, + ) if watchdog_action == "inject-nmi": - if (vm_arch_name in ("x86_64", "i686")): - if not utils_misc.wait_for(lambda: "NMI received" in session.cmd_output("dmesg"), - response_timeout, 0, 1): - test.fail("Guest didn't receive dmesg with 'NMI received'," - "after action '%s'." % watchdog_action) + if vm_arch_name in ("x86_64", "i686"): + if not utils_misc.wait_for( + lambda: "NMI received" in session.cmd_output("dmesg"), + response_timeout, + 0, + 1, + ): + test.fail( + "Guest didn't receive dmesg with 'NMI received'," + f"after action '{watchdog_action}'." + ) msg = session.cmd_output("dmesg").splitlines()[-8:] test.log.info("Guest received dmesg info: %s", msg) - elif (vm_arch_name in ("ppc64", "ppc64le")): + elif vm_arch_name in ("ppc64", "ppc64le"): rebooted = check_guest_reboot(params["guest_reboot_pattern"]) if not rebooted: - test.fail("Guest isn't rebooted after watchdog action '%s'" - % watchdog_action) + test.fail( + f"Guest isn't rebooted after watchdog action '{watchdog_action}'" + ) test.log.info("Try to login the guest after reboot") session = vm.wait_for_login(timeout=timeout) - if not utils_misc.wait_for(lambda: not session.is_responsive(), - response_timeout, 0, 1): - if (watchdog_action in ("none", "debug", "inject-nmi")): + if not utils_misc.wait_for( + lambda: not session.is_responsive(), response_timeout, 0, 1 + ): + if watchdog_action in ("none", "debug", "inject-nmi"): test.log.info("OK, the guest session is responsive still") else: - txt = "It seems action '%s' took no" % watchdog_action + txt = f"It seems action '{watchdog_action}' took no" txt += " effect, guest is still responsive." test.fail(txt) # when action is poweroff or shutdown(without no-shutdown option), # the vm will dead, and qemu exit. # The others the vm monitor still responsive, can report the vm status. - if (watchdog_action == "poweroff" or (watchdog_action == "shutdown" and - params.get("disable_shutdown") != "yes")): - if not utils_misc.wait_for(lambda: vm.is_dead(), - response_timeout, 0, 1): - txt = "It seems '%s' action took no effect, " % watchdog_action + if watchdog_action == "poweroff" or ( + watchdog_action == "shutdown" and params.get("disable_shutdown") != "yes" + ): + if not utils_misc.wait_for(lambda: vm.is_dead(), response_timeout, 0, 1): + txt = f"It seems '{watchdog_action}' action took no effect, " txt += "guest is still alive!" test.fail(txt) else: @@ -146,10 +153,10 @@ def check_guest_reboot(pattern): f_param = "running" if not utils_misc.wait_for( - lambda: vm.monitor.verify_status(f_param), - response_timeout, 5, 1): + lambda: vm.monitor.verify_status(f_param), response_timeout, 5, 1 + ): test.log.debug("Monitor status is:%s", vm.monitor.get_status()) - txt = "It seems action '%s' took no effect" % watchdog_action + txt = f"It seems action '{watchdog_action}' took no effect" txt += " , Wrong monitor status!" test.fail(txt) @@ -157,8 +164,7 @@ def check_guest_reboot(pattern): if watchdog_action == "reset": test.log.info("Try to login the guest after reboot") vm.wait_for_login(timeout=relogin_timeout) - test.log.info("Watchdog action '%s' come into effect.", - watchdog_action) + test.log.info("Watchdog action '%s' come into effect.", watchdog_action) def check_watchdog_support(): """ @@ -169,23 +175,25 @@ def check_watchdog_support(): """ qemu_binary = utils_misc.get_qemu_binary(params) - watchdog_type_check = params.get( - "watchdog_type_check", " -device '?'") + watchdog_type_check = params.get("watchdog_type_check", " -device '?'") qemu_cmd = qemu_binary + watchdog_type_check # check the host support watchdog types. - error_context.context("Checking whether or not the host support" - " WDT '%s'" % watchdog_device_type, test.log.info) - watchdog_device = process.system_output("%s 2>&1" % qemu_cmd, - shell=True).decode() + error_context.context( + "Checking whether or not the host support" f" WDT '{watchdog_device_type}'", + test.log.info, + ) + watchdog_device = process.system_output(f"{qemu_cmd} 2>&1", shell=True).decode() if watchdog_device: if re.findall(watchdog_device_type, watchdog_device, re.I): - test.log.info("The host support '%s' type watchdog device", - watchdog_device_type) + test.log.info( + "The host support '%s' type watchdog device", watchdog_device_type + ) else: - test.log.info("The host support watchdog device type is: '%s'", - watchdog_device) - test.cancel("watdog %s isn't supported" % watchdog_device_type) + test.log.info( + "The host support watchdog device type is: '%s'", watchdog_device + ) + test.cancel(f"watdog {watchdog_device_type} isn't supported") else: test.cancel("No watchdog device supported by the host!") @@ -223,7 +231,7 @@ def magic_close_support(): 5. Wait heartbeat timeout check the watchdog action deactive. """ - response_timeout = int(params.get("response_timeout", '240')) + response_timeout = int(params.get("response_timeout", "240")) magic_cmd = params.get("magic_close_cmd", "echo V > /dev/watchdog") _watchdog_device_check(test, session, watchdog_device_type) @@ -233,8 +241,9 @@ def magic_close_support(): error_context.context("Magic close is start", test.log.info) _trigger_watchdog(session, magic_cmd) - if utils_misc.wait_for(lambda: not session.is_responsive(), - response_timeout, 0, 1): + if utils_misc.wait_for( + lambda: not session.is_responsive(), response_timeout, 0, 1 + ): error_msg = "Watchdog action took effect, magic close FAILED" test.fail(error_msg) test.log.info("Magic close took effect.") @@ -258,8 +267,10 @@ def migration_when_wdt_timeout(): _watchdog_device_check(test, session, watchdog_device_type) _trigger_watchdog(session, trigger_cmd) - error_context.context("Do migration(protocol:%s),Watchdog have" - " been triggered." % mig_protocol, test.log.info) + error_context.context( + f"Do migration(protocol:{mig_protocol}),Watchdog have" " been triggered.", + test.log.info, + ) args = (mig_timeout, mig_protocol, mig_cancel_delay) migrate_thread = utils_misc.InterruptedThread(vm.migrate, args) migrate_thread.start() @@ -286,14 +297,17 @@ def hotplug_unplug_watchdog_device(): plug_watchdog_device = params.get("plug_watchdog_device", "i6300esb") machine_type = params.get("machine_type") - watchdog_device_add = ("device_add driver=%s, id=%s" - % (plug_watchdog_device, "watchdog")) + watchdog_device_add = "device_add driver={}, id={}".format( + plug_watchdog_device, + "watchdog", + ) if machine_type == "q35": watchdog_device_add += ",bus=pcie-pci-bridge-0,addr=0x1f" - watchdog_device_del = ("device_del id=%s" % "watchdog") + watchdog_device_del = "device_del id={}".format("watchdog") - error_context.context(("Hotplug watchdog device '%s'" % - plug_watchdog_device), test.log.info) + error_context.context( + (f"Hotplug watchdog device '{plug_watchdog_device}'"), test.log.info + ) vm.monitor.send_args_cmd(watchdog_device_add) # wait watchdog device init @@ -305,8 +319,9 @@ def hotplug_unplug_watchdog_device(): error_context.context("Hot unplug watchdog device", test.log.info) vm.monitor.send_args_cmd(watchdog_device_del) - error_context.context("Resume the guest, check the WDT have" - " been removed", test.log.info) + error_context.context( + "Resume the guest, check the WDT have" " been removed", test.log.info + ) vm.resume() session = vm.wait_for_login(timeout=timeout) o = session.cmd_output("lspci") @@ -322,22 +337,31 @@ def stop_cont_test(): continue operation """ - response_timeout = int(params.get("response_timeout", '240')) + response_timeout = int(params.get("response_timeout", "240")) _watchdog_device_check(test, session, watchdog_device_type) vm.monitor.clear_event("WATCHDOG") _trigger_watchdog(session, trigger_cmd) vm.pause() - if utils_misc.wait_for(lambda: vm.monitor.get_event("WATCHDOG"), - timeout=response_timeout): - test.fail("Watchdog action '%s' still took effect after pausing " - "VM." % watchdog_action) - test.log.info("Watchdog action '%s' didn't take effect after pausing " - "VM, it is expected.", watchdog_action) + if utils_misc.wait_for( + lambda: vm.monitor.get_event("WATCHDOG"), timeout=response_timeout + ): + test.fail( + f"Watchdog action '{watchdog_action}' still took effect after pausing " + "VM." + ) + test.log.info( + "Watchdog action '%s' didn't take effect after pausing " + "VM, it is expected.", + watchdog_action, + ) vm.resume() - if not utils_misc.wait_for(lambda: vm.monitor.get_event("WATCHDOG"), - timeout=response_timeout): - test.fail("Watchodg action '%s' didn't take effect after resuming " - "VM." % watchdog_action) + if not utils_misc.wait_for( + lambda: vm.monitor.get_event("WATCHDOG"), timeout=response_timeout + ): + test.fail( + f"Watchodg action '{watchdog_action}' didn't take effect after resuming " + "VM." + ) _action_check(test, session, watchdog_action) def watchdog_test_suit(): @@ -357,9 +381,9 @@ def watchdog_test_suit(): watchdog_test_lib = params["watchdog_test_lib"] src_path = os.path.join(data_dir.get_deps_dir(), watchdog_test_lib) test_dir = os.path.basename(watchdog_test_lib) - session.cmd_output("rm -rf /home/%s" % test_dir) + session.cmd_output(f"rm -rf /home/{test_dir}") vm.copy_files_to(src_path, "/home") - session.cmd_output("cd /home/%s && make" % test_dir) + session.cmd_output(f"cd /home/{test_dir} && make") try: session.cmd_output("./watchdog-test --yes &", timeout=130) except ShellTimeoutError: @@ -374,7 +398,7 @@ def watchdog_test_suit(): finally: vm.resume() session.cmd_output("pkill watchdog-test") - session.cmd_output("rm -rf /home/%s" % test_dir) + session.cmd_output(f"rm -rf /home/{test_dir}") def heartbeat_test(): """ @@ -388,8 +412,9 @@ def heartbeat_test(): del_module_cmd = params["del_module_cmd"] reload_module_cmd = params["reload_module_cmd"] _watchdog_device_check(test, session, watchdog_device_type) - error_context.context("set heartbeat value and reload the i6300esb " - "module", test.log.info) + error_context.context( + "set heartbeat value and reload the i6300esb " "module", test.log.info + ) session.cmd(del_module_cmd) heartbeat = params["heartbeat"] if heartbeat == "random_value": @@ -402,39 +427,49 @@ def heartbeat_test(): if heartbeat < -2147483648 or heartbeat > 2147483647: o = session.cmd_output("dmesg | grep -i 'i6300esb.*invalid'") if o: - test.log.info("Heartbeat value %s is out of range, it is " - "expected.", heartbeat) + test.log.info( + "Heartbeat value %s is out of range, it is " "expected.", heartbeat + ) else: test.fail("No invalid heartbeat info in dmesg.") elif -2147483648 <= heartbeat < 1 or 2046 < heartbeat <= 2147483647: o = session.cmd_output("dmesg | grep -i 'heartbeat=30'") if not o: - test.fail("Heartbeat value isn't default 30 sec in dmesg, it " - "should be.") + test.fail( + "Heartbeat value isn't default 30 sec in dmesg, it " "should be." + ) heartbeat = 30 elif 1 <= heartbeat <= 2046: - o = session.cmd_output("dmesg | grep -i 'heartbeat=%s'" % heartbeat) + o = session.cmd_output(f"dmesg | grep -i 'heartbeat={heartbeat}'") if not o: - test.fail("Heartbeat value isn't %s sec in dmesg" % heartbeat) + test.fail(f"Heartbeat value isn't {heartbeat} sec in dmesg") if heartbeat <= 2147483647 and heartbeat > -2147483648: _watchdog_device_check(test, session, watchdog_device_type) _trigger_watchdog(session, trigger_cmd) - error_context.context("Watchdog will fire after %s s" % heartbeat, - test.log.info) + error_context.context( + f"Watchdog will fire after {heartbeat} s", test.log.info + ) start_time = time.time() end_time = start_time + float(heartbeat) + 2 while not vm.monitor.verify_status("paused"): if time.time() > end_time: - test.fail("Monitor status is:%s, watchdog action '%s' didn't take" - "effect" % (vm.monitor.get_status(), watchdog_action)) + test.fail( + f"Monitor status is:{vm.monitor.get_status()}, watchdog action '{watchdog_action}' didn't take" + "effect" + ) time.sleep(1) guest_pause_time = time.time() - start_time if abs(guest_pause_time - float(heartbeat)) <= 2: - test.log.info("Watchdog action '%s' took effect after '%s's.", - watchdog_action, guest_pause_time) + test.log.info( + "Watchdog action '%s' took effect after '%s's.", + watchdog_action, + guest_pause_time, + ) else: - test.fail("Watchdog action '%s' took effect after '%s's, it is earlier" - " than expected." % (watchdog_action, guest_pause_time)) + test.fail( + f"Watchdog action '{watchdog_action}' took effect after '{guest_pause_time}'s, it is earlier" + " than expected." + ) # main procedure test_type = params.get("test_type") @@ -444,11 +479,12 @@ def heartbeat_test(): else: check_watchdog_support() - error_context.context("'%s' test starting ... " % test_type, test.log.info) - error_context.context("Boot VM with WDT(Device:'%s', Action:'%s')," - " and try to login" % - (watchdog_device_type, watchdog_action), - test.log.info) + error_context.context(f"'{test_type}' test starting ... ", test.log.info) + error_context.context( + f"Boot VM with WDT(Device:'{watchdog_device_type}', Action:'{watchdog_action}')," + " and try to login", + test.log.info, + ) params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, params.get("main_vm")) vm = env.get_vm(params["main_vm"]) @@ -458,12 +494,11 @@ def heartbeat_test(): error_context.context("Setup the runlevel for guest", test.log.info) utils_test.qemu.setup_runlevel(params, session) - if (test_type in locals()): + if test_type in locals(): test_running = locals()[test_type] try: test_running() finally: vm.destroy() else: - test.error("Oops test %s doesn't exist, have a check please." - % test_type) + test.error(f"Oops test {test_type} doesn't exist, have a check please.") diff --git a/qemu/tests/win_heavyload.py b/qemu/tests/win_heavyload.py index 261e6300a0..2b813498ea 100644 --- a/qemu/tests/win_heavyload.py +++ b/qemu/tests/win_heavyload.py @@ -4,9 +4,7 @@ import aexpect from avocado.utils import download -from virttest import error_context -from virttest import utils_misc -from virttest import data_dir +from virttest import data_dir, error_context, utils_misc @error_context.context_aware @@ -46,23 +44,23 @@ def add_option(cmd, key, val): """ if re.match(r".*/%s.*", cmd, re.I): if val: - rex = r"/%s\b+\S+\b+" % key - val = "/%s %s " % (key, val) + rex = rf"/{key}\b+\S+\b+" + val = f"/{key} {val} " cmd = re.sub(rex, val, cmd, re.I) else: - cmd += " /%s %s " % (key, val) + cmd += f" /{key} {val} " return cmd tmp_dir = data_dir.get_tmp_dir() install_path = params["install_path"].rstrip("\\") - heavyload_bin = r'"%s\heavyload.exe"' % install_path - start_cmd = "%s /CPU /MEMORY /FILE " % heavyload_bin + heavyload_bin = rf'"{install_path}\heavyload.exe"' + start_cmd = f"{heavyload_bin} /CPU /MEMORY /FILE " stop_cmd = "taskkill /T /F /IM heavyload.exe" stop_cmd = params.get("stop_cmd", stop_cmd) start_cmd = params.get("start_cmd", start_cmd) check_running_cmd = "tasklist|findstr /I heavyload" check_running_cmd = params.get("check_running_cmd", check_running_cmd) - test_installed_cmd = 'dir "%s"|findstr /I heavyload' % install_path + test_installed_cmd = f'dir "{install_path}"|findstr /I heavyload' test_installed_cmd = params.get("check_installed_cmd", test_installed_cmd) vm = env.get_vm(params["main_vm"]) @@ -82,7 +80,7 @@ def add_option(cmd, key, val): download.get_file(download_url, pkg_path, hash_expected=pkg_md5sum) vm.copy_files_to(pkg_path, dst) else: - dst = r"%s:\\" % utils_misc.get_winutils_vol(session) + dst = rf"{utils_misc.get_winutils_vol(session)}:\\" error_context.context("Install HeavyLoad in guest", test.log.info) install_cmd = params["install_cmd"] @@ -97,35 +95,36 @@ def add_option(cmd, key, val): if params.get("autostress") == "yes": free_mem = utils_misc.get_free_mem(session, "windows") free_disk = utils_misc.get_free_disk(session, "C:") - start_cmd = r'"%s\heavyload.exe"' % params["install_path"] - start_cmd = add_option(start_cmd, 'CPU', vm.cpuinfo.smp) - start_cmd = add_option(start_cmd, 'MEMORY', free_mem) - start_cmd = add_option(start_cmd, 'FILE', free_disk) + start_cmd = r'"{}\heavyload.exe"'.format(params["install_path"]) + start_cmd = add_option(start_cmd, "CPU", vm.cpuinfo.smp) + start_cmd = add_option(start_cmd, "MEMORY", free_mem) + start_cmd = add_option(start_cmd, "FILE", free_disk) else: start_cmd = params["start_cmd"] # reformat command to ensure heavyload started as except test_timeout = int(params.get("timeout", "60")) steping = 60 if test_timeout < 60: - test.log.warn("Heavyload use mins as unit of timeout, given timeout " - "is too small (%ss), force set to 60s", test_timeout) + test.log.warning( + "Heavyload use mins as unit of timeout, given timeout " + "is too small (%ss), force set to 60s", + test_timeout, + ) test_timeout = 60 steping = 30 - start_cmd = add_option(start_cmd, 'DURATION', test_timeout / 60) - start_cmd = add_option(start_cmd, 'START', '') - start_cmd = add_option(start_cmd, 'AUTOEXIT', '') + start_cmd = add_option(start_cmd, "DURATION", test_timeout / 60) + start_cmd = add_option(start_cmd, "START", "") + start_cmd = add_option(start_cmd, "AUTOEXIT", "") test.log.info("heavyload cmd: %s", start_cmd) session.sendline(start_cmd) if not loop_session_cmd(session, check_running_cmd): test.error("heavyload process is not started") - sleep_before_migration = int(params.get("sleep_before_migration", - "0")) + sleep_before_migration = int(params.get("sleep_before_migration", "0")) time.sleep(sleep_before_migration) error_context.context("Verify vm is alive", test.log.info) - utils_misc.wait_for(vm.verify_alive, - timeout=test_timeout * 1.2, step=steping) + utils_misc.wait_for(vm.verify_alive, timeout=test_timeout * 1.2, step=steping) if not session.cmd_status(check_running_cmd): test.fail("heavyload doesn't exist normally") diff --git a/qemu/tests/win_irq_check.py b/qemu/tests/win_irq_check.py index 232670a424..ac1325c759 100644 --- a/qemu/tests/win_irq_check.py +++ b/qemu/tests/win_irq_check.py @@ -1,10 +1,8 @@ -import re import ctypes +import re + +from virttest import error_context, qemu_qtree, utils_misc, utils_test -from virttest import error_context -from virttest import utils_test -from virttest import utils_misc -from virttest import qemu_qtree from provider import win_dev @@ -22,16 +20,19 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def get_vectors_fqtree(): """ Get device vectors from qemu info qtree. """ device_type = params["device_type"] qtree = qemu_qtree.QtreeContainer() - qtree.parse_info_qtree(vm.monitor.info('qtree')) + qtree.parse_info_qtree(vm.monitor.info("qtree")) for node in qtree.get_nodes(): - if (isinstance(node, qemu_qtree.QtreeDev) and - node.qtree['type'] == device_type): + if ( + isinstance(node, qemu_qtree.QtreeDev) + and node.qtree["type"] == device_type + ): vectors = node.qtree["vectors"].split()[0] return vectors @@ -46,20 +47,22 @@ def irq_check(session, device_name, devcon_folder, timeout): """ hwids = win_dev.get_hwids(session, device_name, devcon_folder, timeout) if not hwids: - test.error("Didn't find %s device info from guest" % device_name) + test.error(f"Didn't find {device_name} device info from guest") if params.get("check_vectors", "no") == "yes": vectors = int(get_vectors_fqtree()) for hwid in hwids: get_irq_cmd = params["get_irq_cmd"] % (devcon_folder, hwid) - irq_list = re.findall(r':\s+(\d+)', session.cmd_output(get_irq_cmd), re.M) + irq_list = re.findall(r":\s+(\d+)", session.cmd_output(get_irq_cmd), re.M) if not irq_list: - test.error("device %s's irq checked fail" % device_name) + test.error(f"device {device_name}'s irq checked fail") irq_nums = len(irq_list) for irq_symbol in (ctypes.c_int32(int(irq)).value for irq in irq_list): - if (irq_nums == 1 and irq_symbol < 0) or (irq_nums > 1 and irq_symbol >= 0): - test.fail("%s's irq is not correct." % device_name) - elif irq_nums > 1 and (irq_nums != vectors): # pylint: disable=E0606 - test.fail("%s's irq nums not equal to vectors." % device_name) + if (irq_nums == 1 and irq_symbol < 0) or ( + irq_nums > 1 and irq_symbol >= 0 + ): + test.fail(f"{device_name}'s irq is not correct.") + elif irq_nums > 1 and (irq_nums != vectors): # pylint: disable=E0606 + test.fail(f"{device_name}'s irq nums not equal to vectors.") def set_msi_fguest(enable=True): """ @@ -74,13 +77,13 @@ def set_msi_fguest(enable=True): timeout = int(params.get("login_timeout", 360)) restore_msi = False - error_context.context("Boot guest with %s device" % driver, test.log.info) + error_context.context(f"Boot guest with {driver} device", test.log.info) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, driver_verifier, - timeout) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_verifier, timeout + ) if params.get("check_scsi_vectors", "no") == "yes": scsi_vectors = int(get_vectors_fqtree()) scsi_queues = int(params["num_queues"]) @@ -88,11 +91,12 @@ def set_msi_fguest(enable=True): test.log.info("Device vectors as expected") return else: - test.fail("Device vectors does not equal to num_queues+3.\n" - "Device vectors as:%s\ndevice num_queues as:%s" - % (scsi_vectors, scsi_queues)) + test.fail( + "Device vectors does not equal to num_queues+3.\n" + f"Device vectors as:{scsi_vectors}\ndevice num_queues as:{scsi_queues}" + ) - error_context.context("Check %s's irq number" % device_name, test.log.info) + error_context.context(f"Check {device_name}'s irq number", test.log.info) devcon_folder = utils_misc.set_winutils_letter(session, params["devcon_folder"]) if params.get("msi_cmd"): error_context.context("Set MSI in guest", test.log.info) diff --git a/qemu/tests/win_msft_sign_check.py b/qemu/tests/win_msft_sign_check.py index 97e29289ad..a67c29bac6 100644 --- a/qemu/tests/win_msft_sign_check.py +++ b/qemu/tests/win_msft_sign_check.py @@ -1,5 +1,4 @@ -from virttest import error_context -from virttest import utils_test +from virttest import error_context, utils_test @error_context.context_aware @@ -19,20 +18,21 @@ def run(test, params, env): driver_name = params["driver_name"] driver_verifier = params.get("driver_verifier", driver_name) - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, driver_verifier) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_verifier + ) # check if Windows VirtIO driver is msft digital signed. device_name = params["device_name"] chk_cmd = params["vio_driver_chk_cmd"] % device_name[0:30] chk_timeout = int(params.get("chk_timeout", 240)) - error_context.context("%s Driver Check" % driver_name, test.log.info) + error_context.context(f"{driver_name} Driver Check", test.log.info) chk_output = session.cmd_output(chk_cmd, timeout=chk_timeout) if "FALSE" in chk_output: fail_log = "VirtIO driver is not digitally signed!" - fail_log += " VirtIO driver check output: '%s'" % chk_output + fail_log += f" VirtIO driver check output: '{chk_output}'" test.fail(fail_log) elif "TRUE" in chk_output: pass else: - test.error("Device %s is not found in guest" % device_name) + test.error(f"Device {device_name} is not found in guest") diff --git a/qemu/tests/win_nics_teaming.py b/qemu/tests/win_nics_teaming.py index 8c41d0c413..82bff8ef7d 100644 --- a/qemu/tests/win_nics_teaming.py +++ b/qemu/tests/win_nics_teaming.py @@ -1,12 +1,10 @@ -import time import os import random +import time import aexpect - from avocado.utils import crypto, process -from virttest import utils_net -from virttest import utils_misc +from virttest import utils_misc, utils_net def run(test, params, env): @@ -35,34 +33,35 @@ def run(test, params, env): ifnames = () for i in range(len(nics)): mac = vm.get_mac_address(i) - connection_id = utils_net.get_windows_nic_attribute(session_serial, - "macaddress", - mac, - "netconnectionid") + connection_id = utils_net.get_windows_nic_attribute( + session_serial, "macaddress", mac, "netconnectionid" + ) ifnames += (connection_id,) # get params of teaming setup_cmd = params["setup_cmd"] status, output = session_serial.cmd_status_output(setup_cmd % ifnames) if status: - test.fail("Failed to setup team nic from powershell," - "status=%s, output=%s" % (status, output)) + test.fail( + "Failed to setup team nic from powershell," + f"status={status}, output={output}" + ) # prepare test data - guest_path = (tmp_dir + "src-%s" % utils_misc.generate_random_string(8)) - host_path = os.path.join(test.tmpdir, "tmp-%s" % - utils_misc.generate_random_string(8)) + guest_path = tmp_dir + f"src-{utils_misc.generate_random_string(8)}" + host_path = os.path.join(test.tmpdir, f"tmp-{utils_misc.generate_random_string(8)}") test.log.info("Test setup: Creating %dMB file on host", filesize) process.run(dd_cmd % host_path, shell=True) try: - netsh_set_cmd = "netsh interface set interface \"%s\" %s" + netsh_set_cmd = 'netsh interface set interface "%s" %s' # transfer data original_md5 = crypto.hash_file(host_path, algorithm="md5") test.log.info("md5 value of data original: %s", original_md5) test.log.info("Failover test with file transfer") transfer_thread = utils_misc.InterruptedThread( - vm.copy_files_to, (host_path, guest_path)) + vm.copy_files_to, (host_path, guest_path) + ) transfer_thread.start() try: while transfer_thread.is_alive(): @@ -78,10 +77,11 @@ def run(test, params, env): transfer_thread.join() os.remove(host_path) - test.log.info('Cleaning temp file on host') + test.log.info("Cleaning temp file on host") test.log.info("Failover test 2 with file transfer") transfer_thread = utils_misc.InterruptedThread( - vm.copy_files_from, (guest_path, host_path)) + vm.copy_files_from, (guest_path, host_path) + ) transfer_thread.start() try: nic_num = len(ifnames) @@ -92,8 +92,7 @@ def run(test, params, env): session_serial.cmd(netsh_set_cmd % (ifnames[i], "enable")) for j in range(nic_num): if i != j: - session_serial.cmd( - netsh_set_cmd % (ifnames[j], "disable")) + session_serial.cmd(netsh_set_cmd % (ifnames[j], "disable")) time.sleep(random.randint(1, 5)) index += 1 except aexpect.ShellProcessTerminatedError: @@ -104,10 +103,10 @@ def run(test, params, env): current_md5 = crypto.hash_file(host_path, algorithm="md5") test.log.info("md5 value of data current: %s", current_md5) if original_md5 != current_md5: - test.fail("File changed after transfer host -> guest " - "and guest -> host") + test.fail("File changed after transfer host -> guest " "and guest -> host") finally: os.remove(host_path) - session_serial.cmd(delete_cmd % guest_path, - timeout=login_timeout, ignore_all_errors=True) + session_serial.cmd( + delete_cmd % guest_path, timeout=login_timeout, ignore_all_errors=True + ) session_serial.close() diff --git a/qemu/tests/win_serial_tool_test.py b/qemu/tests/win_serial_tool_test.py index 33fe8d62e6..6f4bd2aeef 100644 --- a/qemu/tests/win_serial_tool_test.py +++ b/qemu/tests/win_serial_tool_test.py @@ -1,11 +1,10 @@ import re -from virttest import error_context -from virttest import utils_test -from virttest import utils_misc +from virttest import error_context, utils_misc, utils_test from virttest.utils_virtio_port import VirtioPortTest -from qemu.tests.virtio_driver_sign_check import get_driver_file_path + from provider import win_driver_utils +from qemu.tests.virtio_driver_sign_check import get_driver_file_path @error_context.context_aware @@ -33,7 +32,7 @@ def transfer_from_host_to_guest(port): port.sock.sendall(transfer_data) output = session.cmd_output(guest_receive_cmd) if not re.findall(guest_pattern, output, re.M): - test.fail("Guest fails to receive data, output is: %s" % output) + test.fail(f"Guest fails to receive data, output is: {output}") port.close() def transfer_from_guest_to_host(port): @@ -48,14 +47,14 @@ def transfer_from_guest_to_host(port): output = session.cmd_output(guest_send_cmd) if params.get("check_from_guest", "no") == "yes": if not re.findall(guest_pattern, output, re.M): - test.fail("Guest fails to send data, output is %s" % output) + test.fail(f"Guest fails to send data, output is {output}") else: try: tmp = port.sock.recv(1024)[:-1] - except IOError as failure_detail: - test.log.warn("Got err while recv: %s", failure_detail) + except OSError as failure_detail: + test.log.warning("Got err while recv: %s", failure_detail) if tmp != transfer_data: - test.fail("Incorrect data: '%s' != '%s'" % (transfer_data, tmp)) + test.fail(f"Incorrect data: '{transfer_data}' != '{tmp}'") port.close() vm = env.get_vm(params["main_vm"]) @@ -74,12 +73,12 @@ def transfer_from_guest_to_host(port): guest_send_cmd = guest_send_cmd % path elif path == "WIN_UTILS": session = vm.wait_for_serial_login() - guest_receive_cmd = utils_misc.set_winutils_letter(session, - guest_receive_cmd) + guest_receive_cmd = utils_misc.set_winutils_letter(session, guest_receive_cmd) guest_send_cmd = utils_misc.set_winutils_letter(session, guest_send_cmd) - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, driver_name) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_name + ) port = VirtioPortTest(test, env, params).get_virtio_ports(vm)[1][0] error_context.context("Tranfer data from host to guest", test.log.info) diff --git a/qemu/tests/win_sigverif.py b/qemu/tests/win_sigverif.py index 056603f4b2..66517c67be 100644 --- a/qemu/tests/win_sigverif.py +++ b/qemu/tests/win_sigverif.py @@ -1,9 +1,7 @@ import re import time -from virttest import error_context -from virttest import utils_test -from virttest import utils_misc +from virttest import error_context, utils_misc, utils_test from virttest.utils_windows import system @@ -25,36 +23,40 @@ def run(test, params, env): driver_name = params["driver_name"] driver_verifier = params.get("driver_verifier", driver_name) - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, driver_verifier) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_verifier + ) run_sigverif_cmd = utils_misc.set_winutils_letter( - session, params["run_sigverif_cmd"]) + session, params["run_sigverif_cmd"] + ) sigverif_log = params["sigverif_log"] check_sigverif_cmd = params["check_sigverif_cmd"] % driver_name clean_sigverif_cmd = params["clean_sigverif_cmd"] error_context.context("Run sigverif in windows guest", test.log.info) session.cmd(clean_sigverif_cmd, ignore_all_errors=True) - vm.send_key('meta_l-d') + vm.send_key("meta_l-d") time.sleep(60) status, output = session.cmd_status_output(run_sigverif_cmd) if status != 0: test.error(output) - if not utils_misc.wait_for(lambda: system.file_exists(session, - sigverif_log), - 180, 0, 5): + if not utils_misc.wait_for( + lambda: system.file_exists(session, sigverif_log), 180, 0, 5 + ): test.error("sigverif logs are not created") try: - error_context.context("Open sigverif logs and check driver signature" - " status", test.log.info) + error_context.context( + "Open sigverif logs and check driver signature" " status", test.log.info + ) output = session.cmd_output(check_sigverif_cmd) - pattern = r"%s.sys.*\s{2,}Signed" % driver_name + pattern = rf"{driver_name}.sys.*\s{{2,}}Signed" if not re.findall(pattern, output, re.M): - test.fail("%s driver is not digitally signed, details info is:\n %s" - % (driver_name, output)) + test.fail( + f"{driver_name} driver is not digitally signed, details info is:\n {output}" + ) finally: error_context.context("Clean sigverif logs", test.log.info) session.cmd(clean_sigverif_cmd, ignore_all_errors=True) diff --git a/qemu/tests/win_video_play.py b/qemu/tests/win_video_play.py index 2244d51323..7caf115f77 100644 --- a/qemu/tests/win_video_play.py +++ b/qemu/tests/win_video_play.py @@ -1,8 +1,7 @@ import time from avocado.core import exceptions -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc @error_context.context_aware @@ -21,8 +20,7 @@ def run(test, params, env): vm.verify_alive() timeout = float(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) - video_player = utils_misc.set_winutils_letter(session, - params["mplayer_path"]) + video_player = utils_misc.set_winutils_letter(session, params["mplayer_path"]) video_url = params["video_url"] play_video_cmd = params["play_video_cmd"] % (video_player, video_url) error_context.context("Play video", test.log.info) @@ -34,6 +32,5 @@ def run(test, params, env): play_video_duration = params.get("play_video_duration") if play_video_duration: time.sleep(int(play_video_duration)) - session.cmd("taskkill /IM %s /F" % video_player, - ignore_all_errors=True) + session.cmd(f"taskkill /IM {video_player} /F", ignore_all_errors=True) session.close() diff --git a/qemu/tests/win_virtio_driver_install_by_installer.py b/qemu/tests/win_virtio_driver_install_by_installer.py index 5fd306c683..9bedf0184c 100644 --- a/qemu/tests/win_virtio_driver_install_by_installer.py +++ b/qemu/tests/win_virtio_driver_install_by_installer.py @@ -2,10 +2,7 @@ from virttest import error_context -from provider import win_driver_installer_test -from provider import win_driver_utils -from provider import virtio_fs_utils - +from provider import virtio_fs_utils, win_driver_installer_test, win_driver_utils from qemu.tests.balloon_check import BallooningTestWin @@ -45,28 +42,22 @@ def run(test, params, env): session = vm.wait_for_login() expected_gagent_version = win_driver_installer_test.install_gagent( - session, test, - qemu_ga_pkg, - gagent_install_cmd, - gagent_pkg_info_cmd) - win_driver_installer_test.uninstall_gagent(session, test, - gagent_uninstall_cmd) + session, test, qemu_ga_pkg, gagent_install_cmd, gagent_pkg_info_cmd + ) + win_driver_installer_test.uninstall_gagent(session, test, gagent_uninstall_cmd) - win_driver_utils.uninstall_driver(session, test, - devcon_path, - driver_name, - device_name, - device_hwid) + win_driver_utils.uninstall_driver( + session, test, devcon_path, driver_name, device_name, device_hwid + ) session = vm.reboot(session) session = win_driver_installer_test.run_installer_with_interaction( - vm, session, test, params, - run_install_cmd, - copy_files_params=params) + vm, session, test, params, run_install_cmd, copy_files_params=params + ) win_driver_installer_test.win_installer_test(session, test, params) - win_driver_installer_test.check_gagent_version(session, test, - gagent_pkg_info_cmd, - expected_gagent_version) + win_driver_installer_test.check_gagent_version( + session, test, gagent_pkg_info_cmd, expected_gagent_version + ) win_driver_installer_test.driver_check(session, test, params) driver_test_names = params.objects("driver_test_names") @@ -76,7 +67,7 @@ def run(test, params, env): balloon_test_win = BallooningTestWin(test, params, env) driver_test_params = {"balloon_test_win": balloon_test_win} for test_name in driver_test_names: - test_func = "win_driver_installer_test.%s_test" % test_name - eval("%s(test, params, vm, **driver_test_params)" % test_func) + test_func = f"win_driver_installer_test.{test_name}_test" + eval(f"{test_func}(test, params, vm, **driver_test_params)") session.close() diff --git a/qemu/tests/win_virtio_driver_install_from_update.py b/qemu/tests/win_virtio_driver_install_from_update.py index 9a4cd499ae..91b14d5eff 100644 --- a/qemu/tests/win_virtio_driver_install_from_update.py +++ b/qemu/tests/win_virtio_driver_install_from_update.py @@ -1,7 +1,6 @@ import time -from virttest import error_context -from virttest import utils_misc +from virttest import error_context, utils_misc from provider import win_driver_utils @@ -39,8 +38,9 @@ def start_wuauserv_service(session): status = session.cmd_status(wuauserv_start_cmd) if status != 0: test.fail("Fail to start wuauserv service") - if not utils_misc.wait_for(lambda: not session.cmd_status(wuauserv_status_cmd), - 60, 0, 2): + if not utils_misc.wait_for( + lambda: not session.cmd_status(wuauserv_status_cmd), 60, 0, 2 + ): test.fail("wuauserv service not running") driver_name = params["driver_name"] @@ -58,35 +58,36 @@ def start_wuauserv_service(session): error_context.context("Start wuauserv service", test.log.info) start_wuauserv_service(session) - error_context.context("Uninstall %s driver" % driver_name, test.log.info) - win_driver_utils.uninstall_driver(session, test, devcon_path, driver_name, - device_name, device_hwid) + error_context.context(f"Uninstall {driver_name} driver", test.log.info) + win_driver_utils.uninstall_driver( + session, test, devcon_path, driver_name, device_name, device_hwid + ) session = vm.reboot(session) error_context.context("Install drivers from windows update", test.log.info) - install_driver_cmd = utils_misc.set_winutils_letter(session, - install_driver_cmd) - vm.send_key('meta_l-d') + install_driver_cmd = utils_misc.set_winutils_letter(session, install_driver_cmd) + vm.send_key("meta_l-d") time.sleep(30) session.cmd(install_driver_cmd) # workaround for viostor and vioscsi as driver status still be running # after uninstall - if driver_name in ('viostor', 'vioscsi'): + if driver_name in ("viostor", "vioscsi"): time.sleep(120) - if not utils_misc.wait_for(lambda: not session.cmd_status(check_stat), - 600, 0, 10): - test.fail("%s Driver can not be installed correctly from " - "windows update" % driver_name) + if not utils_misc.wait_for(lambda: not session.cmd_status(check_stat), 600, 0, 10): + test.fail( + f"{driver_name} Driver can not be installed correctly from " + "windows update" + ) - error_context.context("%s Driver Check" % driver_name, test.log.info) + error_context.context(f"{driver_name} Driver Check", test.log.info) session = vm.reboot(session) chk_output = session.cmd_output(chk_cmd, timeout=chk_timeout) if "FALSE" in chk_output: fail_log = "VirtIO driver is not digitally signed!" - fail_log += " VirtIO driver check output: '%s'" % chk_output + fail_log += f" VirtIO driver check output: '{chk_output}'" test.fail(fail_log) elif "TRUE" not in chk_output: - test.error("Device %s is not found in guest" % device_name) + test.error(f"Device {device_name} is not found in guest") session.close() diff --git a/qemu/tests/win_virtio_driver_installer_repair.py b/qemu/tests/win_virtio_driver_installer_repair.py index 97a73648a6..664add96d5 100644 --- a/qemu/tests/win_virtio_driver_installer_repair.py +++ b/qemu/tests/win_virtio_driver_installer_repair.py @@ -2,10 +2,7 @@ from virttest import error_context -from provider import win_driver_utils -from provider import win_driver_installer_test -from provider import virtio_fs_utils - +from provider import virtio_fs_utils, win_driver_installer_test, win_driver_utils from qemu.tests.balloon_check import BallooningTestWin @@ -33,43 +30,39 @@ def run(test, params, env): vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() - win_driver_installer_test.win_uninstall_all_drivers(session, - test, params) + win_driver_installer_test.win_uninstall_all_drivers(session, test, params) session = vm.reboot(session) test.log.info("Install virtio-win driver by installer.") session = win_driver_installer_test.run_installer_with_interaction( - vm, session, test, params, - run_install_cmd, - copy_files_params=params) + vm, session, test, params, run_install_cmd, copy_files_params=params + ) win_driver_installer_test.driver_check(session, test, params) - error_context.context("Run virtio-win-guest-tools.exe repair test", - test.log.info) + error_context.context("Run virtio-win-guest-tools.exe repair test", test.log.info) test.log.info("Remove virtio-win driver by msi.") session = win_driver_utils.remove_driver_by_msi(session, vm, params) test.log.info("Repair virtio-win driver by installer.") session = win_driver_installer_test.run_installer_with_interaction( - vm, session, test, params, - run_repair_cmd) + vm, session, test, params, run_repair_cmd + ) # driver check after repair win_driver_installer_test.driver_check(session, test, params) - error_context.context("Run driver function test after repair", - test.log.info) + error_context.context("Run driver function test after repair", test.log.info) fail_tests = [] - test_drivers = params.get('test_drivers', - win_driver_installer_test.driver_name_list) - if params.get('test_drivers'): + test_drivers = params.get( + "test_drivers", win_driver_installer_test.driver_name_list + ) + if params.get("test_drivers"): test_drivers = params["test_drivers"].split() for driver_name in test_drivers: - test_name = params.get('driver_test_name_%s' % driver_name) - test_func = "win_driver_installer_test.%s_test" % test_name - driver_test_params = params.get('driver_test_params_%s' - % driver_name, '{}') + test_name = params.get(f"driver_test_name_{driver_name}") + test_func = f"win_driver_installer_test.{test_name}_test" + driver_test_params = params.get(f"driver_test_params_{driver_name}", "{}") if driver_name == "viofs": virtio_fs_utils.run_viofs_service(test, params, session) @@ -80,10 +73,9 @@ def run(test, params, env): driver_test_params = ast.literal_eval(driver_test_params) try: - eval("%s(test, params, vm, **driver_test_params)" % test_func) + eval(f"{test_func}(test, params, vm, **driver_test_params)") except Exception as e: - fail_tests.append('%s:\n%s' % (test_name, str(e))) + fail_tests.append(f"{test_name}:\n{str(e)}") if fail_tests: - test.fail("Function test failed list is %s after repair." - % fail_tests) + test.fail(f"Function test failed list is {fail_tests} after repair.") session.close() diff --git a/qemu/tests/win_virtio_driver_installer_uninstall.py b/qemu/tests/win_virtio_driver_installer_uninstall.py index 8030db7a4f..612591deb8 100644 --- a/qemu/tests/win_virtio_driver_installer_uninstall.py +++ b/qemu/tests/win_virtio_driver_installer_uninstall.py @@ -62,8 +62,7 @@ def run(test, params, env): ) win_installer_test(session, test, params) - check_gagent_version(session, test, gagent_pkg_info_cmd, - expected_gagent_version) + check_gagent_version(session, test, gagent_pkg_info_cmd, expected_gagent_version) driver_check(session, test, params) error_context.context( @@ -78,7 +77,8 @@ def run(test, params, env): if s_check == 0: test.fail( "Could not uninstall Virtio-win-guest-tools package " - "in guest, detail: '%s'" % o_check) + f"in guest, detail: '{o_check}'" + ) error_context.context("Check if all drivers are uninstalled.", test.log.info) uninstalled_device = [] device_name_list = [ @@ -99,7 +99,7 @@ def run(test, params, env): if inf_name: uninstalled_device.append(device_name) if uninstalled_device: - test.fail("%s uninstall failed" % uninstalled_device) + test.fail(f"{uninstalled_device} uninstall failed") error_context.context("Check qemu-ga service.", test.log.info) gagent_status_cmd = 'sc query qemu-ga |findstr "RUNNING" ' status = session.cmd_status(gagent_status_cmd) diff --git a/qemu/tests/win_virtio_driver_update_by_installer.py b/qemu/tests/win_virtio_driver_update_by_installer.py index 81d1710fa8..6b5c5f3e37 100644 --- a/qemu/tests/win_virtio_driver_update_by_installer.py +++ b/qemu/tests/win_virtio_driver_update_by_installer.py @@ -1,16 +1,10 @@ import ast -from virttest import error_context -from virttest import utils_misc -from virttest import utils_net -from virttest import data_dir +from virttest import data_dir, error_context, utils_misc, utils_net +from provider import virtio_fs_utils, win_driver_installer_test, win_driver_utils from qemu.tests.balloon_check import BallooningTestWin -from provider import win_driver_utils -from provider import win_driver_installer_test -from provider import virtio_fs_utils - @error_context.context_aware def run(test, params, env): @@ -38,14 +32,14 @@ def run(test, params, env): :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ + def change_virtio_media(cdrom_virtio): """ change iso for virtio-win. :param cdrom_virtio: iso file """ - virtio_iso = utils_misc.get_path(data_dir.get_data_dir(), - cdrom_virtio) + virtio_iso = utils_misc.get_path(data_dir.get_data_dir(), cdrom_virtio) test.log.info("Changing virtio iso image to '%s'", virtio_iso) vm.change_media("drive_virtio", virtio_iso) @@ -56,23 +50,22 @@ def check_network_config(session_serial): :param session_serial: session_serial """ static_ip_address = utils_net.get_guest_ip_addr( - session_serial, virtio_nic_mac, os_type="windows" - ) + session_serial, virtio_nic_mac, os_type="windows" + ) if static_ip_address != params["static_ip"]: - test.fail( - "Failed to setup static ip,current ip is %s" - % static_ip_address - ) + test.fail(f"Failed to setup static ip,current ip is {static_ip_address}") static_dns_address = utils_net.get_windows_nic_attribute( - session_serial, global_switch="nicconfig", - key="MACAddress", value=f"{virtio_nic_mac}", - target="DNSServerSearchOrder" - ) - static_dns_address = static_dns_address.strip('{}').strip('"') + session_serial, + global_switch="nicconfig", + key="MACAddress", + value=f"{virtio_nic_mac}", + target="DNSServerSearchOrder", + ) + static_dns_address = static_dns_address.strip("{}").strip('"') if static_dns_address != params["static_dns"]: test.fail( "Static dns is lost after upgrade driver, current dns " - "is %s" % static_dns_address + f"is {static_dns_address}" ) devcon_path = params["devcon_path"] @@ -91,37 +84,30 @@ def check_network_config(session_serial): virtio_nic_mac = vm.virtnet[1].mac expected_gagent_version = win_driver_installer_test.install_gagent( - session, test, - qemu_ga_pkg, - gagent_install_cmd, - gagent_pkg_info_cmd) + session, test, qemu_ga_pkg, gagent_install_cmd, gagent_pkg_info_cmd + ) - win_driver_installer_test.uninstall_gagent(session, test, - gagent_uninstall_cmd) + win_driver_installer_test.uninstall_gagent(session, test, gagent_uninstall_cmd) error_context.context("Delete the viofs service at guest...") virtio_fs_utils.delete_viofs_serivce(test, params, session) - win_driver_installer_test.win_uninstall_all_drivers(session, - test, params) + win_driver_installer_test.win_uninstall_all_drivers(session, test, params) change_virtio_media(params["cdrom_virtio_downgrade"]) session = vm.reboot(session) if params.get("update_from_previous_installer", "no") == "yes": - error_context.context("install drivers from previous installer", - test.log.info) + error_context.context("install drivers from previous installer", test.log.info) session = win_driver_installer_test.run_installer_with_interaction( - vm, session, test, params, - run_install_cmd, - copy_files_params=params) + vm, session, test, params, run_install_cmd, copy_files_params=params + ) session_serial = vm.wait_for_serial_login() if vm.virtnet[1].nic_model == "virtio-net-pci": ifname = utils_net.get_windows_nic_attribute( - session_serial, "macaddress", virtio_nic_mac, - "netconnectionid" - ) + session_serial, "macaddress", virtio_nic_mac, "netconnectionid" + ) setup_ip_cmd = params["setup_ip_cmd"] % ifname setup_dns_cmd = params["setup_dns_cmd"] % ifname session_serial.cmd_status(setup_ip_cmd) @@ -130,28 +116,25 @@ def check_network_config(session_serial): session_serial.close() else: for driver_name, device_name, device_hwid in zip( - win_driver_installer_test.driver_name_list, - win_driver_installer_test.device_name_list, - win_driver_installer_test.device_hwid_list): - win_driver_utils.install_driver_by_virtio_media(session, test, - devcon_path, - media_type, - driver_name, - device_hwid) - win_driver_installer_test.install_gagent(session, test, qemu_ga_pkg, - gagent_install_cmd, - gagent_pkg_info_cmd) + win_driver_installer_test.driver_name_list, + win_driver_installer_test.device_name_list, + win_driver_installer_test.device_hwid_list, + ): + win_driver_utils.install_driver_by_virtio_media( + session, test, devcon_path, media_type, driver_name, device_hwid + ) + win_driver_installer_test.install_gagent( + session, test, qemu_ga_pkg, gagent_install_cmd, gagent_pkg_info_cmd + ) error_context.context("Run viofs service...") virtio_fs_utils.run_viofs_service(test, params, session) - error_context.context("Upgrade virtio driver to original", - test.log.info) + error_context.context("Upgrade virtio driver to original", test.log.info) change_virtio_media(params["cdrom_virtio"]) session = win_driver_installer_test.run_installer_with_interaction( - vm, session, test, params, - run_install_cmd, - copy_files_params=params) + vm, session, test, params, run_install_cmd, copy_files_params=params + ) if params.get("update_from_previous_installer", "no") == "yes": session_serial = vm.wait_for_serial_login() @@ -161,34 +144,32 @@ def check_network_config(session_serial): error_context.context("Run viofs service after upgrade...") virtio_fs_utils.run_viofs_service(test, params, session) - win_driver_installer_test.check_gagent_version(session, test, - gagent_pkg_info_cmd, - expected_gagent_version) + win_driver_installer_test.check_gagent_version( + session, test, gagent_pkg_info_cmd, expected_gagent_version + ) win_driver_installer_test.driver_check(session, test, params) - error_context.context("Run driver function test after update", - test.log.info) + error_context.context("Run driver function test after update", test.log.info) fail_tests = [] - test_drivers = params.get('test_drivers', - win_driver_installer_test.driver_name_list) - if params.get('test_drivers'): + test_drivers = params.get( + "test_drivers", win_driver_installer_test.driver_name_list + ) + if params.get("test_drivers"): test_drivers = params["test_drivers"].split() for driver_name in test_drivers: - test_name = params.get('driver_test_name_%s' % driver_name) - test_func = "win_driver_installer_test.%s_test" % test_name - driver_test_params = params.get('driver_test_params_%s' - % driver_name, '{}') + test_name = params.get(f"driver_test_name_{driver_name}") + test_func = f"win_driver_installer_test.{test_name}_test" + driver_test_params = params.get(f"driver_test_params_{driver_name}", "{}") if driver_name == "balloon": balloon_test_win = BallooningTestWin(test, params, env) driver_test_params = {"balloon_test_win": balloon_test_win} else: driver_test_params = ast.literal_eval(driver_test_params) try: - eval("%s(test, params, vm, **driver_test_params)" % test_func) + eval(f"{test_func}(test, params, vm, **driver_test_params)") except Exception as e: - fail_tests.append('%s:\n%s' % (test_name, str(e))) + fail_tests.append(f"{test_name}:\n{str(e)}") if fail_tests: - test.fail("Function test failed list is %s after update" - % fail_tests) + test.fail(f"Function test failed list is {fail_tests} after update") session.close() diff --git a/qemu/tests/win_virtio_driver_update_test.py b/qemu/tests/win_virtio_driver_update_test.py index 21b41d2491..5ec14abb4a 100644 --- a/qemu/tests/win_virtio_driver_update_test.py +++ b/qemu/tests/win_virtio_driver_update_test.py @@ -1,10 +1,7 @@ -from virttest import error_context -from virttest import utils_misc -from virttest import data_dir -from virttest import utils_test +from virttest import data_dir, error_context, utils_misc, utils_test -from qemu.tests import single_driver_install from provider import win_driver_utils +from qemu.tests import single_driver_install @error_context.context_aware @@ -32,8 +29,7 @@ def change_virtio_media(cdrom_virtio): change iso for virtio-win :param cdrom_virtio: iso file """ - virtio_iso = utils_misc.get_path(data_dir.get_data_dir(), - cdrom_virtio) + virtio_iso = utils_misc.get_path(data_dir.get_data_dir(), cdrom_virtio) test.log.info("Changing virtio iso image to '%s'", virtio_iso) vm.change_media("drive_virtio", virtio_iso) @@ -43,17 +39,16 @@ def change_virtio_media(cdrom_virtio): driver_verifier = params.get("driver_verifier", driver) error_context.context("Enable driver verifier in guest.", test.log.info) session = vm.wait_for_login(timeout=timeout) - session = utils_test.qemu.windrv_check_running_verifier(session, vm, - test, driver_verifier, - timeout) + session = utils_test.qemu.windrv_check_running_verifier( + session, vm, test, driver_verifier, timeout + ) session.close() if params.get("need_uninstall") != "yes": error_context.context("Downgrade virtio driver", test.log.info) change_virtio_media(params["cdrom_virtio_downgrade"]) single_driver_install.run(test, params, env) # vm is rebooted in single driver install function - error_context.context("Upgrade virtio driver to original", - test.log.info) + error_context.context("Upgrade virtio driver to original", test.log.info) change_virtio_media(params["cdrom_virtio"]) single_driver_install.run(test, params, env) diff --git a/qemu/tests/win_virtio_serial_data_transfer_reboot.py b/qemu/tests/win_virtio_serial_data_transfer_reboot.py index a8a8a3c057..8bef6df35a 100644 --- a/qemu/tests/win_virtio_serial_data_transfer_reboot.py +++ b/qemu/tests/win_virtio_serial_data_transfer_reboot.py @@ -1,10 +1,7 @@ import os from avocado.utils import process -from virttest import data_dir -from virttest import qemu_virtio_port -from virttest import error_context -from virttest import utils_misc +from virttest import data_dir, error_context, qemu_virtio_port, utils_misc # This decorator makes the test function aware of context strings @@ -41,19 +38,18 @@ def receive_data(test, session, serial_receive_cmd, data_file): with open(data_file, "r") as data_file: ori_data = data_file.read() if ori_data.strip() != output.strip(): - err = "Data lost during transfer. Origin data is:\n%s" % ori_data - err += "Guest receive data:\n%s" % output + err = f"Data lost during transfer. Origin data is:\n{ori_data}" + err += f"Guest receive data:\n{output}" test.fail(err) def transfer_data(test, session, receive_cmd, send_cmd, data_file, n_time): - txt = "Transfer data betwwen guest and host for %s times" % n_time + txt = f"Transfer data betwwen guest and host for {n_time} times" error_context.context(txt, test.log.info) for num in range(n_time): test.log.info("Data transfer repeat %s/%s.", num + 1, n_time) try: args = (test, session, receive_cmd, data_file) - guest_receive = utils_misc.InterruptedThread(receive_data, - args) + guest_receive = utils_misc.InterruptedThread(receive_data, args) guest_receive.daemon = True guest_receive.start() process.system(send_cmd, timeout=30, shell=True) @@ -66,14 +62,15 @@ def transfer_data(test, session, receive_cmd, send_cmd, data_file, n_time): timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=timeout) - check_cmd = params.get("check_vioser_status_cmd", - "verifier /querysettings") + check_cmd = params.get("check_vioser_status_cmd", "verifier /querysettings") output = session.cmd(check_cmd, timeout=360) - error_context.context("Make sure vioser.sys verifier enabled in guest.", - test.log.info) + error_context.context( + "Make sure vioser.sys verifier enabled in guest.", test.log.info + ) if "vioser.sys" not in output: - verify_cmd = params.get("vioser_verify_cmd", - "verifier.exe /standard /driver vioser.sys") + verify_cmd = params.get( + "vioser_verify_cmd", "verifier.exe /standard /driver vioser.sys" + ) session.cmd(verify_cmd, timeout=360, ok_status=[0, 2]) session = vm.reboot(session=session, timeout=timeout) output = session.cmd(check_cmd, timeout=360) @@ -88,27 +85,24 @@ def transfer_data(test, session, receive_cmd, send_cmd, data_file, n_time): port_name = vm.virtio_ports[0].qemu_id host_file = get_virtio_port_host_file(vm, port_name) data_file = params["data_file"] - data_file = os.path.join(data_dir.get_deps_dir("win_serial"), - data_file) + data_file = os.path.join(data_dir.get_deps_dir("win_serial"), data_file) send_script = params.get("host_send_script", "serial-host-send.py") - send_script = os.path.join(data_dir.get_deps_dir("win_serial"), - send_script) - serial_send_cmd = "`command -v python python3 | head -1` %s %s %s" % (send_script, host_file, data_file) - receive_script = params.get("guest_receive_script", - "VirtIoChannel_guest_recieve.py") - receive_script = "%s%s" % (guest_path, receive_script) - serial_receive_cmd = "python %s %s " % (receive_script, port_name) + send_script = os.path.join(data_dir.get_deps_dir("win_serial"), send_script) + serial_send_cmd = ( + f"`command -v python python3 | head -1` {send_script} {host_file} {data_file}" + ) + receive_script = params.get( + "guest_receive_script", "VirtIoChannel_guest_recieve.py" + ) + receive_script = f"{guest_path}{receive_script}" + serial_receive_cmd = f"python {receive_script} {port_name} " n_time = int(params.get("repeat_times", 20)) - transfer_data(test, session, serial_receive_cmd, serial_send_cmd, - data_file, n_time) + transfer_data(test, session, serial_receive_cmd, serial_send_cmd, data_file, n_time) error_context.context("Reboot guest.", test.log.info) session = vm.reboot(session=session, timeout=timeout) - transfer_data(test, session, serial_receive_cmd, serial_send_cmd, - data_file, n_time) - error_context.context("Reboot guest by system_reset qmp command.", - test.log.info) - session = vm.reboot(session=session, method="system_reset", - timeout=timeout) + transfer_data(test, session, serial_receive_cmd, serial_send_cmd, data_file, n_time) + error_context.context("Reboot guest by system_reset qmp command.", test.log.info) + session = vm.reboot(session=session, method="system_reset", timeout=timeout) if session: session.close() diff --git a/qemu/tests/win_virtio_update.py b/qemu/tests/win_virtio_update.py index 404393f0f2..3fc695cc2c 100644 --- a/qemu/tests/win_virtio_update.py +++ b/qemu/tests/win_virtio_update.py @@ -1,13 +1,9 @@ -import time -import re import os +import re +import time -from avocado.utils import process, download -from virttest import utils_test -from virttest import utils_misc -from virttest import data_dir -from virttest import env_process -from virttest import error_context +from avocado.utils import download, process +from virttest import data_dir, env_process, error_context, utils_misc, utils_test @error_context.context_aware @@ -32,8 +28,9 @@ def reboot(vm, session=None): nic_idx -= 1 if nic_idx < 0: raise - test.log.warn("Unable to login guest, " - "try to login via nic %d", nic_idx) + test.log.warning( + "Unable to login guest, " "try to login via nic %d", nic_idx + ) def check_cdrom(timeout): cdrom_chk_cmd = "echo list volume > cmd && echo exit >>" @@ -49,16 +46,17 @@ def check_cdrom(timeout): return vols if params.get("case_type") == "driver_install": - error_context.context("Update the device type to default.", - test.log.info) + error_context.context("Update the device type to default.", test.log.info) default_drive_format = params.get("default_drive_format", "ide") default_nic_model = params.get("default_nic_model", "rtl8139") default_display = params.get("default_display", "vnc") - default_parameters = {"default_drive_format": default_drive_format, - "default_nic_model": default_nic_model, - "default_display": default_display, - "default_cd_format": default_drive_format} + default_parameters = { + "default_drive_format": default_drive_format, + "default_nic_model": default_nic_model, + "default_display": default_display, + "default_cd_format": default_drive_format, + } for key in default_parameters: params[key[8:]] = default_parameters[key] @@ -75,13 +73,12 @@ def check_cdrom(timeout): download.get_file(url_virtio_win, pkg_path) if re.findall("zip$", url_virtio_win): - process.system("cd /tmp/virtio_win; unzip *; rm -f *.zip", - shell=True) + process.system("cd /tmp/virtio_win; unzip *; rm -f *.zip", shell=True) - virtio_iso = utils_misc.get_path(data_dir.get_data_dir(), - params.get("cdrom_virtio", - "/tmp/prewhql.iso")) - process.system("mkisofs -J -o %s /tmp/virtio_win" % virtio_iso) + virtio_iso = utils_misc.get_path( + data_dir.get_data_dir(), params.get("cdrom_virtio", "/tmp/prewhql.iso") + ) + process.system(f"mkisofs -J -o {virtio_iso} /tmp/virtio_win") drivers_install = re.split(";", params.get("drivers_install")) @@ -101,73 +98,70 @@ def check_cdrom(timeout): re_hw_id = params_driver.get("re_hw_id", "(PCI.{14,50})\r\n") driver_install_cmd = params_driver.get("driver_install_cmd") if "hwidcmd" in driver_install_cmd: - pattern_drive = params.get("pattern_drive", - r"\s+\w:(.[^\s]+)\s+hwidcmd") + pattern_drive = params.get("pattern_drive", r"\s+\w:(.[^\s]+)\s+hwidcmd") driver_path = re.findall(pattern_drive, driver_install_cmd)[0] driver_path = "/".join(driver_path.split("\\\\")[1:]) - storage_path = utils_misc.get_path( - data_dir.get_data_dir(), storage_path) - hw_id = utils_test.get_driver_hardware_id(driver_path, - mount_point=mount_point, - storage_path=storage_path, - re_hw_id=re_hw_id) - install_cmds[driver] = re.sub("hwidcmd", hw_id, - driver_install_cmd) + storage_path = utils_misc.get_path(data_dir.get_data_dir(), storage_path) + hw_id = utils_test.get_driver_hardware_id( + driver_path, + mount_point=mount_point, + storage_path=storage_path, + re_hw_id=re_hw_id, + ) + install_cmds[driver] = re.sub("hwidcmd", hw_id, driver_install_cmd) else: install_cmds[driver] = driver_install_cmd check_str[driver] = params_driver.get("check_str") check_cmds[driver] = params_driver.get("check_cmd") - if params_driver.get('op_cmd'): + if params_driver.get("op_cmd"): op_cmds[driver] = params_driver["op_cmd"].split("::") if "pecheck.py" in check_cmds[driver]: setup_ps = True if params.get("check_info") == "yes": - mount_point = params.get("virtio_mount_point", - "/tmp/virtio_win") - iso_path = utils_misc.get_path(data_dir.get_data_dir(), - params.get("cdrom_virtio")) - process.system("mount -o loop %s %s" % (iso_path, mount_point)) + mount_point = params.get("virtio_mount_point", "/tmp/virtio_win") + iso_path = utils_misc.get_path( + data_dir.get_data_dir(), params.get("cdrom_virtio") + ) + process.system(f"mount -o loop {iso_path} {mount_point}") pattern_driver = params_driver.get("pattern_driver") - driver_path = re.findall(pattern_driver, - driver_install_cmd)[0] + driver_path = re.findall(pattern_driver, driver_install_cmd)[0] driver_path = "/".join(driver_path.split("\\\\")[1:]) storage_path = utils_misc.get_path(mount_point, driver_path) storage_path = os.path.dirname(storage_path) files = " ".join(os.listdir(storage_path)) file_name = re.findall(r"\s+(.*?\.inf)", files) if file_name: - file_name = utils_misc.get_path(storage_path, - file_name[0]) + file_name = utils_misc.get_path(storage_path, file_name[0]) else: test.error("Can not find .inf file.") inf = open(file_name) inf_context = inf.read() inf.close() - process.system("umount %s" % mount_point) + process.system(f"umount {mount_point}") patterns_check_str = params_driver.get("check_str") check_str[driver] = {} for i in patterns_check_str.split(";"): check_n, check_p = i.split("::") - check_str[driver][check_n] = re.findall(check_p, - inf_context)[0] + check_str[driver][check_n] = re.findall(check_p, inf_context)[0] check_cmds[driver] = {} for i in params_driver.get("check_cmd").split(";"): cmd_n, cmd_c = i.split("::") - cmd_c = re.sub("DRIVER_PATH", - params_driver.get("sys_file_path", ""), - cmd_c) - cmd_c = re.sub("DRIVER_PATTERN_%s" % cmd_n, - params_driver.get("info_pattern_%s" % cmd_n, - ""), - cmd_c) + cmd_c = re.sub( + "DRIVER_PATH", params_driver.get("sys_file_path", ""), cmd_c + ) + cmd_c = re.sub( + f"DRIVER_PATTERN_{cmd_n}", + params_driver.get(f"info_pattern_{cmd_n}", ""), + cmd_c, + ) check_cmds[driver][cmd_n] = cmd_c error_context.context("Boot up guest with setup parameters", test.log.info) params["start_vm"] = "yes" - vm_name = params['main_vm'] + vm_name = params["main_vm"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) session = vm.wait_for_login(timeout=timeout) @@ -175,7 +169,7 @@ def check_cdrom(timeout): cdroms = params.get("cdroms") cdrom_num = len(re.split(r"\s+", cdroms.strip())) init_timeout = int(params.get("init_timeout", "60")) - driver_install_timeout = int(params.get('driver_install_timeout', 720)) + driver_install_timeout = int(params.get("driver_install_timeout", 720)) error_context.context("Check the cdrom is available", test.log.info) volumes = check_cdrom(init_timeout) @@ -194,13 +188,13 @@ def check_cdrom(timeout): error_context.context("Install drivers", test.log.info) for driver in drivers_install: - error_context.context("Install drivers %s" % driver, test.log.info) + error_context.context(f"Install drivers {driver}", test.log.info) if params.get("kill_rundll", "no") == "yes": kill_cmd = 'tasklist | find /I "rundll32"' status, tasks = session.cmd_status_output(kill_cmd) if status == 0: for i in re.findall(r"rundll32.*?(\d+)", tasks): - session.cmd('taskkill /PID %s' % i) + session.cmd(f"taskkill /PID {i}") if install_cmds: cmd = re.sub("WIN_UTILS", vol_utils, install_cmds[driver]) cmd = re.sub("WIN_VIRTIO", vol_virtio, cmd) # pylint: disable=E0606 @@ -212,7 +206,7 @@ def check_cdrom(timeout): fail_log += " Please check the error_log. " else: fail_log = "Failed to install:" - error_log = open("%s/error_log" % test.resultsdir, "w") + error_log = open(f"{test.resultsdir}/error_log", "w") fail_flag = False error_context.context("Check driver available in guest", test.log.info) if setup_ps: @@ -220,29 +214,30 @@ def check_cdrom(timeout): session.cmd(setup_cmd) for driver in drivers_install: - error_log.write("For driver %s:\n" % driver) + error_log.write(f"For driver {driver}:\n") if isinstance(check_str[driver], dict): for i in check_str[driver]: output = session.cmd(check_cmds[driver][i]) if not re.findall(check_str[driver][i], output, re.I): fail_flag = True - fail_log += " %s" % driver - fail_log += "(%s) is not right; " % i - error_log.write("inf:\t%s\n" % check_str[driver][i]) - error_log.write("sys: \t%s\n" % output) + fail_log += f" {driver}" + fail_log += f"({i}) is not right; " + error_log.write(f"inf:\t{check_str[driver][i]}\n") + error_log.write(f"sys: \t{output}\n") else: output = session.cmd(check_cmds[driver]) if not re.findall(check_str[driver], output, re.I): fail_flag = True - fail_log += " %s" % driver - error_log.write("Check command output: %s\n" % output) + fail_log += f" {driver}" + error_log.write(f"Check command output: {output}\n") if fail_flag: test.fail(fail_log) if op_cmds: - error_context.context("Do more operates in guest to check the driver", - test.log.info) + error_context.context( + "Do more operates in guest to check the driver", test.log.info + ) for driver in drivers_install: if driver not in op_cmds: continue diff --git a/qemu/tests/windows_info.py b/qemu/tests/windows_info.py index 8880e5b34f..0e7ea9c413 100644 --- a/qemu/tests/windows_info.py +++ b/qemu/tests/windows_info.py @@ -29,8 +29,7 @@ def run(test, params, env): output = output.strip().split()[-1] test.log.info("Windows name: %s", output) - error_context.context("Get driver version information in guest.", - test.log.info) + error_context.context("Get driver version information in guest.", test.log.info) system_drivers = session.cmd("wmic sysdriver get DisplayName,PathName") test.log.debug("Drivers exist in the system:\n %s", system_drivers) for i in system_drivers.splitlines(): @@ -40,9 +39,9 @@ def run(test, params, env): path = driver_info[-1] path = re.sub(r"\\", "\\\\\\\\", path) driver_ver_cmd = "wmic datafile where name=" - driver_ver_cmd += "'%s' get version" % path + driver_ver_cmd += f"'{path}' get version" output = session.cmd(driver_ver_cmd) - msg = "Driver %s" % driver_name - msg += " version is %s" % output.strip().split()[-1] + msg = f"Driver {driver_name}" + msg += f" version is {output.strip().split()[-1]}" test.log.info(msg) session.close() diff --git a/qemu/tests/x2avic_test.py b/qemu/tests/x2avic_test.py index 289047be4a..27bcd25d99 100644 --- a/qemu/tests/x2avic_test.py +++ b/qemu/tests/x2avic_test.py @@ -1,4 +1,5 @@ from avocado.utils import process + from provider.cpu_utils import check_cpu_flags @@ -25,7 +26,7 @@ def run(test, params, env): try: session = vm.wait_for_login() if params.get("os_type") == "linux": - output = session.cmd_output('dmesg | grep x2apic') + output = session.cmd_output("dmesg | grep x2apic") if "x2apic enabled" not in output: if "x2apic: enabled" not in output: test.fail("x2apic is not enabled inside guest.") diff --git a/qemu/tests/x86_cpu_L3_cache.py b/qemu/tests/x86_cpu_L3_cache.py index 8fa2ea0aed..57c72090fb 100644 --- a/qemu/tests/x86_cpu_L3_cache.py +++ b/qemu/tests/x86_cpu_L3_cache.py @@ -1,9 +1,6 @@ import re -from virttest import env_process -from virttest import error_context -from virttest import utils_misc -from virttest import utils_qemu +from virttest import env_process, error_context, utils_misc, utils_qemu @error_context.context_aware @@ -28,25 +25,28 @@ def boot_and_check_guest(machine_type, check_L3=False): :param machine_type: Boot guest with which machine type :param check_L3: if L3 cache should exist on guest """ - params['machine_type'] = machine_type - params['start_vm'] = 'yes' - vm_name = params['main_vm'] - if max(params.get_numeric('smp'), - params.get_numeric('vcpu_maxcpus')) > 128: - params['smp'] = params['vcpu_maxcpus'] = '128' - L3_existence = 'present' if check_L3 else 'not present' - test.log.info('Boot guest with machine type %s and expect L3 cache %s' - ' inside guest', machine_type, L3_existence) + params["machine_type"] = machine_type + params["start_vm"] = "yes" + vm_name = params["main_vm"] + if max(params.get_numeric("smp"), params.get_numeric("vcpu_maxcpus")) > 128: + params["smp"] = params["vcpu_maxcpus"] = "128" + L3_existence = "present" if check_L3 else "not present" + test.log.info( + "Boot guest with machine type %s and expect L3 cache %s" " inside guest", + machine_type, + L3_existence, + ) env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) session = vm.wait_for_login() - output = session.cmd_output('lscpu') + output = session.cmd_output("lscpu") session.close() vm.destroy() - L3_present = 'L3' in output + L3_present = "L3" in output if check_L3 ^ L3_present: - test.fail('L3 cache should %s inside guest for machine type %s' % - (L3_existence, machine_type)) + test.fail( + f"L3 cache should {L3_existence} inside guest for machine type {machine_type}" + ) def check_version(latest_machine): """ @@ -55,17 +55,18 @@ def check_version(latest_machine): :param latest_machine: The latest machine type """ - latest_ver = re.findall(r'\d+\.\d+', latest_machine)[0] - old_ver = re.findall(r'\d+\.\d+', old_machine)[0] + latest_ver = re.findall(r"\d+\.\d+", latest_machine)[0] + old_ver = re.findall(r"\d+\.\d+", old_machine)[0] if latest_ver <= old_ver: - test.cancel('The latest supported machine type does not' - ' support this test case.') + test.cancel( + "The latest supported machine type does not" " support this test case." + ) - old_machine = params['old_machine'] - machine_type = params['machine_type'] + old_machine = params["old_machine"] + machine_type = params["machine_type"] qemu_bin = utils_misc.get_qemu_binary(params) machine_types = utils_qemu.get_supported_machines_list(qemu_bin) - m_keyword = 'q35' if 'q35' in machine_type else 'i440fx' + m_keyword = "q35" if "q35" in machine_type else "i440fx" for m_type in machine_types: if m_keyword in m_type and m_type != m_keyword: check_version(m_type) @@ -77,4 +78,4 @@ def check_version(latest_machine): boot_and_check_guest(m_type) break else: - test.log.warning('Old machine type is not supported, skip checking.') + test.log.warning("Old machine type is not supported, skip checking.") diff --git a/qemu/tests/x86_cpu_asyncpf.py b/qemu/tests/x86_cpu_asyncpf.py index 23df023288..56dd031934 100644 --- a/qemu/tests/x86_cpu_asyncpf.py +++ b/qemu/tests/x86_cpu_asyncpf.py @@ -1,9 +1,6 @@ import re -from virttest import env_process -from virttest import error_context -from virttest import utils_misc -from virttest import utils_qemu +from virttest import env_process, error_context, utils_misc, utils_qemu @error_context.context_aware @@ -19,31 +16,33 @@ def run(test, params, env): :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ + def boot_and_check_guest(machine_type): """ Boot guest and check async PF inside guest :param machine_type: Boot guest with which machine type """ - params['machine_type'] = machine_type - check_interrupts = params['check_interrupts'] - params['start_vm'] = 'yes' - vm_name = params['main_vm'] + params["machine_type"] = machine_type + check_interrupts = params["check_interrupts"] + params["start_vm"] = "yes" + vm_name = params["main_vm"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) session = vm.wait_for_login() g_vcpus = session.cmd_output("grep processor /proc/cpuinfo -c").strip() - output = session.cmd_output(check_interrupts).split('\n')[0] - g_interrupts = re.findall(r'\d+', output) + output = session.cmd_output(check_interrupts).split("\n")[0] + g_interrupts = re.findall(r"\d+", output) session.close() vm.destroy() - if g_interrupts.count('0') >= 1: + if g_interrupts.count("0") >= 1: test.fail("cpu interrupt value is not right") elif len(g_interrupts) != int(g_vcpus): - test.fail("interrupts %s is not equal to cpu count %s" - % (len(g_interrupts), g_vcpus)) + test.fail( + f"interrupts {len(g_interrupts)} is not equal to cpu count {g_vcpus}" + ) def check_version(latest_machine): """ @@ -52,16 +51,17 @@ def check_version(latest_machine): :param latest_machine: The latest machine type """ - latest_ver = re.findall(r'\d+\.\d+', latest_machine)[0] - old_ver = re.findall(r'\d+\.\d+', old_machine)[0] + latest_ver = re.findall(r"\d+\.\d+", latest_machine)[0] + old_ver = re.findall(r"\d+\.\d+", old_machine)[0] if latest_ver <= old_ver: - test.cancel('The latest supported machine type does not' - ' support this test case.') + test.cancel( + "The latest supported machine type does not" " support this test case." + ) - old_machine = params['old_machine'] + old_machine = params["old_machine"] qemu_bin = utils_misc.get_qemu_binary(params) machine_types = utils_qemu.get_supported_machines_list(qemu_bin) - m_keyword = 'q35' + m_keyword = "q35" m_type = [m for m in machine_types if m_keyword in m and m_keyword != m] if m_type: diff --git a/qemu/tests/x86_cpu_flag_disable.py b/qemu/tests/x86_cpu_flag_disable.py index 8112d7d6be..379096d97e 100644 --- a/qemu/tests/x86_cpu_flag_disable.py +++ b/qemu/tests/x86_cpu_flag_disable.py @@ -1,7 +1,6 @@ import random -from virttest import env_process -from virttest import error_context +from virttest import env_process, error_context from provider.cpu_utils import check_cpu_flags @@ -22,25 +21,25 @@ def run(test, params, env): flags_list = params.objects("flags_list") params["flags"] = params["no_flags"] = random.choice(flags_list) flag = params["flags"] - params["cpu_model_flags"] += ",-%s" % flag + params["cpu_model_flags"] += f",-{flag}" check_host_flags = params.get_boolean("check_host_flags") if check_host_flags: check_cpu_flags(params, flag, test) params["start_vm"] = "yes" - vm_name = params['main_vm'] + vm_name = params["main_vm"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) error_context.context("Try to log into guest", test.log.info) session = vm.wait_for_login() - check_cpu_flags(params, '', test, session) + check_cpu_flags(params, "", test, session) - if flag == 'kvmclock': + if flag == "kvmclock": check_clock = params.get("check_clock") vm_clock_out = session.cmd_output(check_clock).split() - if 'kvmclock' in vm_clock_out: + if "kvmclock" in vm_clock_out: test.fail("kvmclock shouldn't be found inside geust") vm.verify_kernel_crash() diff --git a/qemu/tests/x86_cpu_flag_intel_pt.py b/qemu/tests/x86_cpu_flag_intel_pt.py index 36fe3899ea..d879831205 100644 --- a/qemu/tests/x86_cpu_flag_intel_pt.py +++ b/qemu/tests/x86_cpu_flag_intel_pt.py @@ -1,8 +1,5 @@ from avocado.utils import process - -from virttest import utils_misc -from virttest import env_process -from virttest import error_context +from virttest import env_process, error_context, utils_misc from provider.cpu_utils import check_cpu_flags @@ -34,10 +31,10 @@ def run(test, params, env): origin_value = process.getoutput(get_pt_mode).strip() try: - if origin_value != '1': - process.system(set_pt_mode % '1', shell=True) + if origin_value != "1": + process.system(set_pt_mode % "1", shell=True) pt_mode = process.getoutput(get_pt_mode).strip() - if pt_mode != '1': + if pt_mode != "1": test.cancel("pt_mode can't be set to 1") params["start_vm"] = "yes" diff --git a/qemu/tests/x86_cpu_flag_nonstop_tsc.py b/qemu/tests/x86_cpu_flag_nonstop_tsc.py index 36229c8c62..d1f2bb28b7 100644 --- a/qemu/tests/x86_cpu_flag_nonstop_tsc.py +++ b/qemu/tests/x86_cpu_flag_nonstop_tsc.py @@ -1,5 +1,4 @@ -from virttest import env_process -from virttest import error_context +from virttest import env_process, error_context from provider.cpu_utils import check_cpu_flags @@ -24,14 +23,14 @@ def run(test, params, env): check_cpu_flags(params, flag, test) params["start_vm"] = "yes" - vm_name = params['main_vm'] + vm_name = params["main_vm"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) error_context.context("Try to log into guest", test.log.info) session = vm.wait_for_login() if params["os_type"] == "linux": - if params['os_variant'] != 'rhel6': + if params["os_variant"] != "rhel6": check_cpu_flags(params, flag, test, session) check_clock = params["check_clock"] check_clock_out = session.cmd_status(check_clock) @@ -39,7 +38,7 @@ def run(test, params, env): test.fail("tsc can't be found inside guest") if params.get("reboot_method"): - error_context.context("Reboot guest '%s'." % vm.name, test.log.info) + error_context.context(f"Reboot guest '{vm.name}'.", test.log.info) session = vm.reboot(session=session) vm.verify_kernel_crash() diff --git a/qemu/tests/x86_cpu_flags.py b/qemu/tests/x86_cpu_flags.py index 9140cc9bf5..8502b46b14 100644 --- a/qemu/tests/x86_cpu_flags.py +++ b/qemu/tests/x86_cpu_flags.py @@ -1,4 +1,5 @@ -from virttest import error_context, env_process, cpu +from virttest import cpu, env_process, error_context + from provider.cpu_utils import check_cpu_flags @@ -25,13 +26,13 @@ def run(test, params, env): if not cpu_model: cpu_model = cpu.get_qemu_best_cpu_model(params) if cpu_model in unsupported_models.split(): - test.cancel("'%s' doesn't support this test case" % cpu_model) - fallback_models_map = eval(params.get('fallback_models_map', '{}')) + test.cancel(f"'{cpu_model}' doesn't support this test case") + fallback_models_map = eval(params.get("fallback_models_map", "{}")) if cpu_model in fallback_models_map.keys(): params["cpu_model"] = fallback_models_map[cpu_model] params["start_vm"] = "yes" - vm_name = params['main_vm'] + vm_name = params["main_vm"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) @@ -49,10 +50,10 @@ def run(test, params, env): if expect_items: result = session.cmd_status(check_guest_cmd % expect_items) if result: - test.fail("'%s' can't be found inside guest" % expect_items) + test.fail(f"'{expect_items}' can't be found inside guest") if params.get("reboot_method"): - error_context.context("Reboot guest '%s'." % vm.name, test.log.info) + error_context.context(f"Reboot guest '{vm.name}'.", test.log.info) session = vm.reboot(session=session) vm.verify_kernel_crash() diff --git a/qemu/tests/x86_cpu_model.py b/qemu/tests/x86_cpu_model.py index cfb5a9cc58..20521b5f78 100644 --- a/qemu/tests/x86_cpu_model.py +++ b/qemu/tests/x86_cpu_model.py @@ -1,10 +1,9 @@ -import re import json +import re -from avocado.utils import cpu -from avocado.utils import process +from avocado.utils import cpu, process +from virttest import env_process, error_context, utils_misc -from virttest import error_context, utils_misc, env_process from provider.cpu_utils import check_cpu_flags @@ -24,26 +23,29 @@ def run(test, params, env): :param env: Dictionary with test environment. """ qemu_binary = utils_misc.get_qemu_binary(params) - qmp_cmds = ['{"execute": "qmp_capabilities"}', - '{"execute": "query-cpu-definitions", "id": "RAND91"}', - '{"execute": "quit"}'] - cmd = "echo -e '{0}' | {1} -qmp stdio -vnc none -M none | grep return |"\ - "grep RAND91".format(r"\n".join(qmp_cmds), qemu_binary) - output = process.run(cmd, timeout=10, - ignore_status=True, - shell=True, - verbose=False).stdout_text + qmp_cmds = [ + '{"execute": "qmp_capabilities"}', + '{"execute": "query-cpu-definitions", "id": "RAND91"}', + '{"execute": "quit"}', + ] + cmd = ( + "echo -e '{}' | {} -qmp stdio -vnc none -M none | grep return |" + "grep RAND91".format(r"\n".join(qmp_cmds), qemu_binary) + ) + output = process.run( + cmd, timeout=10, ignore_status=True, shell=True, verbose=False + ).stdout_text out = json.loads(output)["return"] model = params["model"] model_pattern = params["model_pattern"] flags = params["flags"] - if cpu.get_vendor() == 'intel': - model_ib = "%s-IBRS" % model + if cpu.get_vendor() == "intel": + model_ib = f"{model}-IBRS" flag_ib = " ibpb ibrs" name_ib = ", IBRS( update)?" else: - model_ib = "%s-IBPB" % model + model_ib = f"{model}-IBPB" flag_ib = " ibpb" name_ib = " \\(with IBPB\\)" @@ -56,11 +58,11 @@ def run(test, params, env): cpu_model = model guest_model = model_pattern % "" else: - test.cancel("This host doesn't support cpu model %s" % model) + test.cancel(f"This host doesn't support cpu model {model}") params["cpu_model"] = cpu_model # pylint: disable=E0606 params["start_vm"] = "yes" - vm_name = params['main_vm'] + vm_name = params["main_vm"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) @@ -82,18 +84,22 @@ def run(test, params, env): check_items = params.get("check_items").split() expect_result = params.get("expect_result") for item in vulnerabilities: - h_out = re.search("Vulnerable|Mitigation|Not affected", - process.getoutput(check_cmd % item))[0] - g_out = re.search("Vulnerable|Mitigation|Not affected", - session.cmd_output(check_cmd % item))[0] + h_out = re.search( + "Vulnerable|Mitigation|Not affected", + process.getoutput(check_cmd % item), + )[0] + g_out = re.search( + "Vulnerable|Mitigation|Not affected", + session.cmd_output(check_cmd % item), + )[0] if h_out != g_out: - test.fail("Guest is not equal to Host with '%s'" % item) + test.fail(f"Guest is not equal to Host with '{item}'") if item in check_items and g_out != expect_result: - test.fail("'%s' can't get '%s'" % (item, expect_result)) + test.fail(f"'{item}' can't get '{expect_result}'") check_cpu_flags(params, flags, test, session) if params.get("reboot_method"): - error_context.context("Reboot guest '%s'." % vm.name, test.log.info) + error_context.context(f"Reboot guest '{vm.name}'.", test.log.info) session = vm.reboot(session=session) vm.verify_kernel_crash() diff --git a/qemu/tests/x86_cpu_protection_key.py b/qemu/tests/x86_cpu_protection_key.py index 9f193a801e..4c4c14ed9a 100644 --- a/qemu/tests/x86_cpu_protection_key.py +++ b/qemu/tests/x86_cpu_protection_key.py @@ -1,6 +1,4 @@ -from virttest import cpu -from virttest import env_process -from virttest import error_context +from virttest import cpu, env_process, error_context @error_context.context_aware @@ -20,10 +18,10 @@ def run(test, params, env): unsupported_models = params.get("unsupported_models", "") cpu_model = params.get("cpu_model", cpu.get_qemu_best_cpu_model(params)) if cpu_model in unsupported_models.split(): - test.cancel("'%s' doesn't support this test case" % cpu_model) + test.cancel(f"'{cpu_model}' doesn't support this test case") params["start_vm"] = "yes" - vm_name = params['main_vm'] + vm_name = params["main_vm"] env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) @@ -33,15 +31,15 @@ def run(test, params, env): guest_dir = params["guest_dir"] timeout = params.get_numeric("timeout") kernel_v = session.cmd_output("uname -r").strip() - mkdir_cmd = session.cmd('mkdir -p %s' % guest_dir) + mkdir_cmd = session.cmd(f"mkdir -p {guest_dir}") src_rpm = "kernel-" + kernel_v.rsplit(".", 1)[0] + ".src.rpm" linux_name = "linux-" + kernel_v.rsplit(".", 1)[0] - download_rpm_cmd = 'cd %s && ' % guest_dir + params["download_rpm_cmd"] % src_rpm - uncompress_cmd_src = 'cd %s && ' % guest_dir + params["uncompress_cmd_src"] - uncompress_cmd = 'cd %s && ' % guest_dir + params["uncompress_cmd"] + download_rpm_cmd = f"cd {guest_dir} && " + params["download_rpm_cmd"] % src_rpm + uncompress_cmd_src = f"cd {guest_dir} && " + params["uncompress_cmd_src"] + uncompress_cmd = f"cd {guest_dir} && " + params["uncompress_cmd"] test_dir = guest_dir + linux_name + params["test_dir"] - compile_cmd = 'cd %s && ' % test_dir + params["compile_cmd"] - run_cmd = 'cd %s && ' % test_dir + params["run_cmd"] + compile_cmd = f"cd {test_dir} && " + params["compile_cmd"] + run_cmd = f"cd {test_dir} && " + params["run_cmd"] try: session.cmd(mkdir_cmd) @@ -51,10 +49,10 @@ def run(test, params, env): session.cmd(uncompress_cmd, timeout) session.cmd(compile_cmd, timeout) s, output = session.cmd_status_output(run_cmd, safe=True) - if 'done (all tests OK)' not in output: + if "done (all tests OK)" not in output: test.fail("Protection key test runs failed.") vm.verify_kernel_crash() finally: - session.cmd("rm -rf %s" % guest_dir) + session.cmd(f"rm -rf {guest_dir}") session.close() diff --git a/qemu/tests/x86_cpu_test_0x40000001.py b/qemu/tests/x86_cpu_test_0x40000001.py index e6ac8dc28a..686cd1e21d 100644 --- a/qemu/tests/x86_cpu_test_0x40000001.py +++ b/qemu/tests/x86_cpu_test_0x40000001.py @@ -1,7 +1,6 @@ import os -from virttest import error_context -from virttest import data_dir +from virttest import data_dir, error_context @error_context.context_aware @@ -20,14 +19,13 @@ def run(test, params, env): source_file = params["source_file"] src_cpuid = os.path.join(data_dir.get_deps_dir(), source_file) vm.copy_files_to(src_cpuid, test_dir) - guest_dir = "%s/cpuid-20220224" % test_dir + guest_dir = f"{test_dir}/cpuid-20220224" try: - session.cmd('tar -xvf %s/%s -C %s' - % (test_dir, source_file, test_dir)) - check_cpuid = 'cd %s && ' % guest_dir + params["check_cpuid"] + session.cmd(f"tar -xvf {test_dir}/{source_file} -C {test_dir}") + check_cpuid = f"cd {guest_dir} && " + params["check_cpuid"] results = session.cmd_output(check_cpuid).strip() if results.split()[0] != nums_cpu: test.fail("some vcpu's cpuid has no eax=0x40000001.") finally: - session.cmd("rm %s/cpuid* -rf" % test_dir) + session.cmd(f"rm {test_dir}/cpuid* -rf") session.close() diff --git a/qemu/tests/x86_cpu_test_dies.py b/qemu/tests/x86_cpu_test_dies.py index 4ccc760a32..beb2797777 100644 --- a/qemu/tests/x86_cpu_test_dies.py +++ b/qemu/tests/x86_cpu_test_dies.py @@ -1,9 +1,7 @@ import random from avocado.utils import cpu - -from virttest import error_context -from virttest import env_process +from virttest import env_process, error_context @error_context.context_aware @@ -18,27 +16,27 @@ def run(test, params, env): :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ - vm_name = params['main_vm'] + vm_name = params["main_vm"] vcpu_dies_list = [2, 4] - params['vcpu_dies'] = random.choice(vcpu_dies_list) - params['start_vm'] = 'yes' + params["vcpu_dies"] = random.choice(vcpu_dies_list) + params["start_vm"] = "yes" env_process.preprocess_vm(test, params, env, vm_name) vm = env.get_vm(vm_name) session = vm.wait_for_login() - if params["os_type"] == "linux" and cpu.get_vendor() == 'intel': - check_die_id = params['check_die_id'] - check_die_cpus_list = params['check_die_cpus_list'] + if params["os_type"] == "linux" and cpu.get_vendor() == "intel": + check_die_id = params["check_die_id"] + check_die_cpus_list = params["check_die_cpus_list"] vcpu_sockets = vm.cpuinfo.sockets vcpu_dies = vm.cpuinfo.dies - dies_id = session.cmd_output(check_die_id).strip().split('\n') - dies_cpus_list = session.cmd_output( - check_die_cpus_list).strip().split('\n') + dies_id = session.cmd_output(check_die_id).strip().split("\n") + dies_cpus_list = session.cmd_output(check_die_cpus_list).strip().split("\n") if len(dies_id) != int(vcpu_dies): - test.fail("die_id is not right: %d != %d" - % (len(dies_id), int(vcpu_dies))) - if len(dies_cpus_list) != int(vcpu_sockets)*int(vcpu_dies): - test.fail("die_cpus_list is not right: %d != %d" - % (len(dies_cpus_list), int(vcpu_sockets)*int(vcpu_dies))) + test.fail("die_id is not right: %d != %d" % (len(dies_id), int(vcpu_dies))) + if len(dies_cpus_list) != int(vcpu_sockets) * int(vcpu_dies): + test.fail( + "die_cpus_list is not right: %d != %d" + % (len(dies_cpus_list), int(vcpu_sockets) * int(vcpu_dies)) + ) vm.verify_kernel_crash() session.close() diff --git a/qemu/tests/x86_cpu_v_spec_ctrl.py b/qemu/tests/x86_cpu_v_spec_ctrl.py index 49bf228ddb..2e4ae6fe15 100644 --- a/qemu/tests/x86_cpu_v_spec_ctrl.py +++ b/qemu/tests/x86_cpu_v_spec_ctrl.py @@ -1,9 +1,6 @@ import os -from virttest import cpu -from virttest import data_dir -from virttest import env_process -from virttest import error_context +from virttest import cpu, data_dir, env_process, error_context from virttest.utils_test import update_boot_option from provider.cpu_utils import check_cpu_flags @@ -32,10 +29,10 @@ def run(test, params, env): if not cpu_model: cpu_model = cpu.get_qemu_best_cpu_model(params) if cpu_model not in supported_models.split(): - test.cancel("'%s' doesn't support this test case" % cpu_model) + test.cancel(f"'{cpu_model}' doesn't support this test case") params["start_vm"] = "yes" - vm_name = params['main_vm'] + vm_name = params["main_vm"] env_process.preprocess_vm(test, params, env, vm_name) proc_cmdline = params["proc_cmdline"] @@ -44,7 +41,7 @@ def run(test, params, env): boot_option = params["boot_option"] check_output = str(session.cmd(proc_cmdline, timeout=60)).split() if boot_option and boot_option not in check_output: - error_context.context("Add '%s' to guest" % boot_option, test.log.info) + error_context.context(f"Add '{boot_option}' to guest", test.log.info) update_boot_option(vm, args_added=boot_option) session = vm.wait_for_login() @@ -56,13 +53,13 @@ def run(test, params, env): compile_cmd = params["compile_cmd"] try: session.cmd(compile_cmd % guest_dir) - check_msr = 'cd %s && ' % guest_dir + params["check_msr"] + check_msr = f"cd {guest_dir} && " + params["check_msr"] result = session.cmd_output(check_msr) nums_vcpus = session.cmd_output("grep processor /proc/cpuinfo -c") if result != nums_vcpus: test.fail("verify the guest sets the spec ctrl failed.") finally: - session.cmd("rm -rf %s/msr* %s/master*" % (test_dir, test_dir)) + session.cmd(f"rm -rf {test_dir}/msr* {test_dir}/master*") session.close() vm.verify_kernel_crash() if boot_option and boot_option not in check_output: diff --git a/qemu/tests/yonit_bitmap.py b/qemu/tests/yonit_bitmap.py index bbf6a45293..04553ba088 100644 --- a/qemu/tests/yonit_bitmap.py +++ b/qemu/tests/yonit_bitmap.py @@ -1,7 +1,6 @@ import signal -from virttest import utils_misc -from virttest import error_context +from virttest import error_context, utils_misc from generic.tests import guest_test @@ -44,37 +43,35 @@ def run(test, params, env): # running while the the foreground detecting is on going. error_context.context("run benchmark test in background", test.log.info) params["test_timeout"] = test_timeout * 2 + sec_per_day - test.log.info("set Yonit bitmap test timeout to" - " %ss", params["test_timeout"]) + test.log.info("set Yonit bitmap test timeout to" " %ss", params["test_timeout"]) pid = guest_test.run_guest_test_background(test, params, env) if pid < 0: session.close() - test.error("Could not create child process to execute " - "guest_test background") + test.error("Could not create child process to execute " "guest_test background") def is_yonit_benchmark_launched(): - if session.cmd_status( - 'tasklist | find /I "compress_benchmark_loop"') != 0: + if session.cmd_status('tasklist | find /I "compress_benchmark_loop"') != 0: test.log.debug("yonit bitmap benchmark was not found") return False return True - error_context.context("Watching Yonit bitmap benchmark is" - " running until timeout", test.log.info) + error_context.context( + "Watching Yonit bitmap benchmark is" " running until timeout", test.log.info + ) try: # Start detecting whether the benchmark is started a few mins # after the background test launched, as the downloading # will take some time. launch_timeout = login_timeout - if utils_misc.wait_for(is_yonit_benchmark_launched, - launch_timeout, 180, 5): + if utils_misc.wait_for(is_yonit_benchmark_launched, launch_timeout, 180, 5): test.log.debug("Yonit bitmap benchmark was launched successfully") else: test.error("Failed to launch yonit bitmap benchmark") # If the benchmark exits before timeout, errors happened. - if utils_misc.wait_for(lambda: not is_yonit_benchmark_launched(), - test_timeout, 60, 10): + if utils_misc.wait_for( + lambda: not is_yonit_benchmark_launched(), test_timeout, 60, 10 + ): test.error("Yonit bitmap benchmark exits unexpectly") else: if session.is_responsive(): diff --git a/qemu/tests/zero_copy.py b/qemu/tests/zero_copy.py index 718e525db4..01a0e360f8 100644 --- a/qemu/tests/zero_copy.py +++ b/qemu/tests/zero_copy.py @@ -1,7 +1,5 @@ from avocado.utils import process -from virttest import env_process -from virttest import utils_test -from virttest import error_context +from virttest import env_process, error_context, utils_test @error_context.context_aware @@ -17,6 +15,7 @@ def run(test, params, env): :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ + def zerocp_enable_status(): """ Check whether host have enabled zero copy, if enabled return True, @@ -24,7 +23,7 @@ def zerocp_enable_status(): """ def_para_path = "/sys/module/vhost_net/parameters/experimental_zcopytx" para_path = params.get("zcp_set_path", def_para_path) - cmd_status = process.system("grep 1 %s" % para_path, ignore_status=True) + cmd_status = process.system(f"grep 1 {para_path}", ignore_status=True) if cmd_status: return False else: @@ -42,19 +41,17 @@ def enable_zerocopytx_in_host(test, enable=True): if process.system(cmd, shell=True) or enable != zerocp_enable_status(): test.cancel("Set vhost_net zcopytx failed") - error_context.context("Set host vhost_net experimental_zcopytx", - test.log.info) - if params.get("enable_zerocp", 'yes') == 'yes': + error_context.context("Set host vhost_net experimental_zcopytx", test.log.info) + if params.get("enable_zerocp", "yes") == "yes": enable_zerocopytx_in_host(test) else: enable_zerocopytx_in_host(test, False) error_context.context("Boot vm with 'vhost=on'", test.log.info) if params.get("nettype") == "user": - test.cancel("Unable start test with user networking, please " - "change nettype.") + test.cancel("Unable start test with user networking, please " "change nettype.") params["vhost"] = "vhost=on" - params["start_vm"] = 'yes' + params["start_vm"] = "yes" login_timeout = int(params.get("login_timeout", 360)) env_process.preprocess_vm(test, params, env, params.get("main_vm")) vm = env.get_vm(params["main_vm"]) @@ -65,13 +62,12 @@ def enable_zerocopytx_in_host(test, enable=True): error_context.context("Check guest nic is works by ping", test.log.info) status, output = utils_test.ping(guest_ip, count=10, timeout=20) if status: - err_msg = "Run ping %s failed, after set zero copy" % guest_ip + err_msg = f"Run ping {guest_ip} failed, after set zero copy" test.error(err_msg) elif utils_test.get_loss_ratio(output) == 100: - err_msg = "All packets lost during ping guest %s." % guest_ip + err_msg = f"All packets lost during ping guest {guest_ip}." test.fail(err_msg) # in vm.verify_alive will check whether have userspace or kernel crash - error_context.context("Check guest is alive and have no crash", - test.log.info) + error_context.context("Check guest is alive and have no crash", test.log.info) vm.verify_alive()