From 4c26efb8c7d3ee39e845bd7594ce3930051ebc2a Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Thu, 23 Nov 2023 10:23:36 +0530 Subject: [PATCH 01/36] NXDRIVE-2860: Code Coverage - added two testcases in test_api.py --- tests/functional/test_api.py | 52 ++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/tests/functional/test_api.py b/tests/functional/test_api.py index 0e45243e7b..32478be078 100644 --- a/tests/functional/test_api.py +++ b/tests/functional/test_api.py @@ -52,3 +52,55 @@ def mocked_open_authentication_dialog(): with manager: returned_val = drive_api.get_features_list() assert returned_val + + +def test_generate_report(manager_factory): + manager = manager_factory(with_engine=False) + manager.application = "" + + def mocked_open_authentication_dialog(): + return + + def func(*args): + return "Report" + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog", + defaults=(manager, mocked_open_authentication_dialog), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + with patch.object(manager, "generate_report", new=func): + returned_val = drive_api.generate_report() + assert returned_val + + +def test_get_disk_space_info_to_width(manager_factory): + manager, engine = manager_factory() + manager.application = "" + + def mocked_open_authentication_dialog(): + return + + def func(*args): + return 100, 200 + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog", + defaults=(manager, mocked_open_authentication_dialog), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + from nxdrive import utils + + with patch.object(utils, "disk_space", new=func): + returned_val = drive_api.get_disk_space_info_to_width( + "001", "dummy_path", 100 + ) + assert returned_val From 4940d4ed84326b23365299c09c1574dadc335ea6 Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Thu, 23 Nov 2023 10:35:03 +0530 Subject: [PATCH 02/36] NXDRIVE-2860: Code Coverage - dummy change to py file --- nxdrive/engine/workers.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nxdrive/engine/workers.py b/nxdrive/engine/workers.py index 62d8cb8b30..77e014fc3c 100644 --- a/nxdrive/engine/workers.py +++ b/nxdrive/engine/workers.py @@ -298,4 +298,5 @@ def _execute(self) -> None: sleep(1) def _poll(self) -> bool: + # . return True From de685f299c390fe5258652ad8c2a9e9de103d3a2 Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Thu, 23 Nov 2023 10:52:03 +0530 Subject: [PATCH 03/36] NXDRIVE-2860: Code Coverage - added one testcase in test_api.py --- tests/functional/test_api.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/tests/functional/test_api.py b/tests/functional/test_api.py index 32478be078..652481a17f 100644 --- a/tests/functional/test_api.py +++ b/tests/functional/test_api.py @@ -104,3 +104,30 @@ def func(*args): "001", "dummy_path", 100 ) assert returned_val + + +def test_open_local(manager_factory): + manager, engine = manager_factory() + manager.application = "" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def func(*args): + return + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray", + defaults=(manager, mocked_open_authentication_dialog, mocked_hide_systray), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + with patch.object(manager, "open_local_file", new=func): + returned_val = drive_api.open_local(None, "dummy_path") + assert not returned_val From c18102b24f1b3710fc4cd0eeac068b3f72dd149a Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Fri, 1 Dec 2023 12:54:00 +0530 Subject: [PATCH 04/36] NXDRIVE-2860: Code Coverage - added 14 testcases in test_api.py --- tests/functional/test_api.py | 416 +++++++++++++++++++++++++++++++++++ 1 file changed, 416 insertions(+) diff --git a/tests/functional/test_api.py b/tests/functional/test_api.py index 652481a17f..62728feb69 100644 --- a/tests/functional/test_api.py +++ b/tests/functional/test_api.py @@ -131,3 +131,419 @@ def func(*args): with patch.object(manager, "open_local_file", new=func): returned_val = drive_api.open_local(None, "dummy_path") assert not returned_val + + +def test_open_document(manager_factory): + manager, engine = manager_factory() + manager.application = "" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def func(*args): + return True + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray", + defaults=(manager, mocked_open_authentication_dialog, mocked_hide_systray), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + with patch.object(manager.engines, "get", new=func): + returned_val = drive_api.open_document("engine_uid", 1) + assert not returned_val + + +def test_open_remote_document(manager_factory): + manager, engine = manager_factory() + manager.application = "" + engine.uid = "dummy_uid" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_open_remote(*args): + return + + def mocked_get_metadata_url(*args): + return + + engine.get_metadata_url = mocked_get_metadata_url # .__get__(engine) + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray", + defaults=(manager, mocked_open_authentication_dialog, mocked_hide_systray), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + with patch.object(engine, "open_remote", new=mocked_open_remote): + returned_val = drive_api.open_remote_document( + "dummy_uid", "dummy_remote_ref", "dummy_remote_path" + ) + assert not returned_val + + +def test_get_remote_document_url(manager_factory): + manager, engine = manager_factory() + manager.application = "" + engine.uid = "dummy_uid" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_open_remote(*args): + return + + def mocked_get_metadata_url(*args): + return + + engine.get_metadata_url = mocked_get_metadata_url # .__get__(engine) + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray", + defaults=(manager, mocked_open_authentication_dialog, mocked_hide_systray), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + with patch.object(engine, "open_remote", new=mocked_open_remote): + returned_val = drive_api.get_remote_document_url( + "dummy_uid", "dummy_remote_ref" + ) + assert not returned_val + + +def test_open_remote(manager_factory): + manager, engine = manager_factory() + manager.application = "" + engine.uid = "dummy_uid" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_open_edit(*args): + return + + def mocked_get_metadata_url(*args): + return + + engine.get_metadata_url = mocked_get_metadata_url # .__get__(engine) + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray", + defaults=(manager, mocked_open_authentication_dialog, mocked_hide_systray), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + with patch.object(engine, "open_edit", new=mocked_open_edit): + returned_val = drive_api.open_remote( + "dummy_uid", "dummy_remote_ref", "dummy_remote_name" + ) + assert not returned_val + + +def test_ignore_pair(manager_factory): + manager, engine = manager_factory() + manager.application = "" + engine.uid = "dummy_uid" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_ignore_pair(*args): + return + + def mocked_get_metadata_url(*args): + return + + engine.get_metadata_url = mocked_get_metadata_url # .__get__(engine) + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray", + defaults=(manager, mocked_open_authentication_dialog, mocked_hide_systray), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + with patch.object(engine, "open_edit", new=mocked_ignore_pair): + returned_val = drive_api.ignore_pair( + "dummy_uid", "dummy_state_id", "dummy_reason" + ) + assert not returned_val + + +def test_retry_pair(manager_factory): + manager, engine = manager_factory() + manager.application = "" + engine.uid = "dummy_uid" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_retry_pair(*args): + return + + def mocked_get_metadata_url(*args): + return + + engine.get_metadata_url = mocked_get_metadata_url # .__get__(engine) + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray", + defaults=(manager, mocked_open_authentication_dialog, mocked_hide_systray), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + with patch.object(engine, "open_edit", new=mocked_retry_pair): + returned_val = drive_api.retry_pair("dummy_uid", "dummy_state_id") + assert not returned_val + + +def test_resolve_with_remote(manager_factory): + manager, engine = manager_factory() + manager.application = "" + engine.uid = "dummy_uid" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_resolve_with_remote(*args): + return + + def mocked_get_metadata_url(*args): + return + + engine.get_metadata_url = mocked_get_metadata_url # .__get__(engine) + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray", + defaults=(manager, mocked_open_authentication_dialog, mocked_hide_systray), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + with patch.object(engine, "open_edit", new=mocked_resolve_with_remote): + returned_val = drive_api.resolve_with_remote("dummy_uid", "dummy_state_id") + assert not returned_val + + +def test_resolve_with_local(manager_factory): + manager, engine = manager_factory() + manager.application = "" + engine.uid = "dummy_uid" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_resolve_with_local(*args): + return + + def mocked_get_metadata_url(*args): + return + + engine.get_metadata_url = mocked_get_metadata_url # .__get__(engine) + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray", + defaults=(manager, mocked_open_authentication_dialog, mocked_hide_systray), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + with patch.object(engine, "open_edit", new=mocked_resolve_with_local): + returned_val = drive_api.resolve_with_local("dummy_uid", "dummy_state_id") + assert not returned_val + + +def test_get_syncing_count(manager_factory): + manager, engine = manager_factory() + manager.application = "" + engine.uid = "dummy_uid" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_get_syncing_count(*args): + return + + def mocked_get_metadata_url(*args): + return + + engine.get_metadata_url = mocked_get_metadata_url # .__get__(engine) + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray", + defaults=(manager, mocked_open_authentication_dialog, mocked_hide_systray), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + with patch.object(engine, "open_edit", new=mocked_get_syncing_count): + returned_val = drive_api.get_syncing_count("dummy_uid") + assert type(returned_val) is int + + +def test_is_paused(manager_factory): + manager, engine = manager_factory() + manager.application = "" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_is_paused(*args): + return + + def mocked_get_metadata_url(*args): + return + + engine.get_metadata_url = mocked_get_metadata_url # .__get__(engine) + manager.is_paused = mocked_is_paused + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray", + defaults=(manager, mocked_open_authentication_dialog, mocked_hide_systray), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + # with patch.object(manager, "is_paused", new=mocked_is_paused): + returned_val = drive_api.is_paused() + assert returned_val is mocked_is_paused + + +def test_suspend(manager_factory): + manager, engine = manager_factory() + manager.application = "" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_resolve_with_local(*args): + return + + def mocked_get_metadata_url(*args): + return + + def mocked_resume(*args): + return + + def mocked_suspend(*args): + return + + engine.get_metadata_url = mocked_get_metadata_url # .__get__(engine) + manager.resume = mocked_resume + manager.suspend = mocked_suspend + + def func(*args): + return True + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray", + defaults=(manager, mocked_open_authentication_dialog, mocked_hide_systray), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + with patch.object(engine, "open_edit", new=mocked_resolve_with_local): + returned_val = drive_api.suspend(True) + assert not returned_val + returned_val = drive_api.suspend(False) + assert not returned_val + + +def test_restart_needed(manager_factory): + manager, engine = manager_factory() + manager.application = "" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_resolve_with_local(*args): + return + + def mocked_get_metadata_url(*args): + return + + def mocked_restart_needed(*args): + return + + engine.get_metadata_url = mocked_get_metadata_url # .__get__(engine) + manager.restart_needed = mocked_restart_needed + + def func(*args): + return True + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray", + defaults=(manager, mocked_open_authentication_dialog, mocked_hide_systray), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + with patch.object(engine, "open_edit", new=mocked_resolve_with_local): + returned_val = drive_api.restart_needed() + assert returned_val is mocked_restart_needed From e638dfa9422e56dfe2d4b526b013ba9b70f69d09 Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Fri, 1 Dec 2023 13:02:23 +0530 Subject: [PATCH 05/36] NXDRIVE-2860: Code Coverage - fixed test_open_document --- tests/functional/test_api.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/tests/functional/test_api.py b/tests/functional/test_api.py index 62728feb69..ea09cf0021 100644 --- a/tests/functional/test_api.py +++ b/tests/functional/test_api.py @@ -136,6 +136,7 @@ def func(*args): def test_open_document(manager_factory): manager, engine = manager_factory() manager.application = "" + engine.uid = "dummy_uid" def mocked_open_authentication_dialog(): return @@ -143,9 +144,6 @@ def mocked_open_authentication_dialog(): def mocked_hide_systray(*args): return - def func(*args): - return True - Mocked_App = namedtuple( "app", "manager, open_authentication_dialog, hide_systray", @@ -155,9 +153,8 @@ def func(*args): drive_api = QMLDriveApi(app) with manager: - with patch.object(manager.engines, "get", new=func): - returned_val = drive_api.open_document("engine_uid", 1) - assert not returned_val + returned_val = drive_api.open_document("engine_uid", 1) + assert not returned_val def test_open_remote_document(manager_factory): From 81da5cee32b762136d6fb16c8d8ce60176154420 Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Thu, 7 Dec 2023 12:28:29 +0530 Subject: [PATCH 06/36] NXDRIVE-2860: Code Coverage - added test cases to test_api.py --- tests/functional/test_api.py | 147 +++++++++++++++++++++++++++++++++++ 1 file changed, 147 insertions(+) diff --git a/tests/functional/test_api.py b/tests/functional/test_api.py index ea09cf0021..87585b9f29 100644 --- a/tests/functional/test_api.py +++ b/tests/functional/test_api.py @@ -544,3 +544,150 @@ def func(*args): with patch.object(engine, "open_edit", new=mocked_resolve_with_local): returned_val = drive_api.restart_needed() assert returned_val is mocked_restart_needed + + +def test_has_invalid_credentials(manager_factory): + manager, engine = manager_factory() + manager.application = "" + engine.uid = "dummy_uid" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_has_invalid_credentials(*args): + return + + def mocked_get_metadata_url(*args): + return + + def mocked_restart_needed(*args): + return + + engine.get_metadata_url = mocked_get_metadata_url # .__get__(engine) + manager.restart_needed = mocked_restart_needed + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray", + defaults=(manager, mocked_open_authentication_dialog, mocked_hide_systray), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + with patch.object(engine, "open_edit", new=mocked_has_invalid_credentials): + returned_val = drive_api.has_invalid_credentials("dummy_uid") + assert not returned_val + + +def test_get_deletion_behavior(manager_factory): + manager, engine = manager_factory() + manager.application = "" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_get_metadata_url(*args): + return + + def mocked_restart_needed(*args): + return + + engine.get_metadata_url = mocked_get_metadata_url # .__get__(engine) + manager.restart_needed = mocked_restart_needed + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray", + defaults=(manager, mocked_open_authentication_dialog, mocked_hide_systray), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + returned_val = drive_api.get_deletion_behavior() + assert returned_val + + +def test_set_deletion_behavior(manager_factory): + manager, engine = manager_factory() + manager.application = "" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_get_metadata_url(*args): + return + + def mocked_restart_needed(*args): + return + + def mocked_set_config(*args): + return True + + engine.get_metadata_url = mocked_get_metadata_url # .__get__(engine) + manager.restart_needed = mocked_restart_needed + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray", + defaults=(manager, mocked_open_authentication_dialog, mocked_hide_systray), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + with patch.object(manager, "set_config", new=mocked_set_config): + returned_val = drive_api.set_deletion_behavior("deletion_behavior") + assert not returned_val + + +def test_set_proxy_settings(manager_factory): + manager, engine = manager_factory() + manager.application = "" + engine.uid = "dummy_uid" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_get_proxy(*args): + return "dummy_proxy" + + def mocked_set_proxy(*args): + return + + def mocked_get_metadata_url(*args): + return + + def mocked_restart_needed(*args): + return + + engine.get_metadata_url = mocked_get_metadata_url # .__get__(engine) + manager.restart_needed = mocked_restart_needed + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray", + defaults=(manager, mocked_open_authentication_dialog, mocked_hide_systray), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + with patch.object(manager, "set_proxy", new=mocked_set_proxy): + returned_val = drive_api.set_proxy_settings( + "Manual", "dummy_url", "dummy_pac_url" + ) + assert returned_val From 7ce2f232078dde36decc4ebe545a51294ba946f8 Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Fri, 8 Dec 2023 10:53:18 +0530 Subject: [PATCH 07/36] NXDRIVE-2860: Code Coverage - added test cases to test_api.py 8/12 --- tests/functional/test_api.py | 95 ++++++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) diff --git a/tests/functional/test_api.py b/tests/functional/test_api.py index 87585b9f29..03991137f6 100644 --- a/tests/functional/test_api.py +++ b/tests/functional/test_api.py @@ -691,3 +691,98 @@ def mocked_restart_needed(*args): "Manual", "dummy_url", "dummy_pac_url" ) assert returned_val + + +def test_open_direct_transfer(manager_factory): + manager, engine = manager_factory() + manager.application = "" + engine.uid = "dummy_uid" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_get_metadata_url(*args): + return + + def mocked_restart_needed(*args): + return + + def mocked_refresh_direct_transfer_items(*args): + return + + def mocked_refresh_active_sessions_items(*args): + return + + def mocked_refresh_completed_sessions_items(*args): + return + + def mocked_show_direct_transfer_window(*args): + return + + engine.get_metadata_url = mocked_get_metadata_url # .__get__(engine) + manager.restart_needed = mocked_restart_needed + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray, refresh_direct_transfer_items, \ + refresh_active_sessions_items, refresh_completed_sessions_items, show_direct_transfer_window,", + defaults=( + manager, + mocked_open_authentication_dialog, + mocked_hide_systray, + mocked_refresh_direct_transfer_items, + mocked_refresh_active_sessions_items, + mocked_refresh_completed_sessions_items, + mocked_show_direct_transfer_window, + ), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + returned_val = drive_api.open_direct_transfer("dummy_uid") + assert not returned_val + + +def test_open_server_folders(manager_factory): + manager, engine = manager_factory() + manager.application = "" + engine.uid = "dummy_uid" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_get_metadata_url(*args): + return + + def mocked_restart_needed(*args): + return + + def mocked_show_server_folders(*args): + return + + engine.get_metadata_url = mocked_get_metadata_url # .__get__(engine) + manager.restart_needed = mocked_restart_needed + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray, show_server_folders", + defaults=( + manager, + mocked_open_authentication_dialog, + mocked_hide_systray, + mocked_show_server_folders, + ), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + returned_val = drive_api.open_server_folders("dummy_uid") + assert not returned_val From bc97f772e0b6783cdab17bc00faf8350a3aa870e Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Fri, 8 Dec 2023 11:14:11 +0530 Subject: [PATCH 08/36] NXDRIVE-2860: Code Coverage - added test cases to test_api.py 8/12 - 2 --- tests/functional/test_api.py | 37 ++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/tests/functional/test_api.py b/tests/functional/test_api.py index 03991137f6..46fe6afbc6 100644 --- a/tests/functional/test_api.py +++ b/tests/functional/test_api.py @@ -786,3 +786,40 @@ def mocked_show_server_folders(*args): with manager: returned_val = drive_api.open_server_folders("dummy_uid") assert not returned_val + + +def test_generate_csv(manager_factory): + manager, engine = manager_factory() + manager.application = "" + engine.uid = "dummy_uid" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_get_metadata_url(*args): + return + + def mocked_restart_needed(*args): + return + + def mocked_generate_csv(*args): + return + + engine.get_metadata_url = mocked_get_metadata_url # .__get__(engine) + manager.restart_needed = mocked_restart_needed + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray", + defaults=(manager, mocked_open_authentication_dialog, mocked_hide_systray), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + with patch.object(manager, "generate_csv", new=mocked_generate_csv): + returned_val = drive_api.generate_csv("mocked_session_id", "dummy_uid") + assert not returned_val From 5d4a8c37c48df261a64e797df52e93ea184b762a Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Tue, 12 Dec 2023 12:00:24 +0530 Subject: [PATCH 09/36] NXDRIVE-2860: Code Coverage - added test cases to test_api.py 12/12 --- tests/functional/test_api.py | 349 +++++++++++++++++++++++++++++++++++ 1 file changed, 349 insertions(+) diff --git a/tests/functional/test_api.py b/tests/functional/test_api.py index 46fe6afbc6..461a6781b4 100644 --- a/tests/functional/test_api.py +++ b/tests/functional/test_api.py @@ -823,3 +823,352 @@ def mocked_generate_csv(*args): with patch.object(manager, "generate_csv", new=mocked_generate_csv): returned_val = drive_api.generate_csv("mocked_session_id", "dummy_uid") assert not returned_val + + +def test_handle_token(manager_factory): + manager, engine = manager_factory() + manager.application = "" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_get_metadata_url(*args): + return + + def mocked_restart_needed(*args): + return + + engine.get_metadata_url = mocked_get_metadata_url # .__get__(engine) + manager.restart_needed = mocked_restart_needed + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray", + defaults=(manager, mocked_open_authentication_dialog, mocked_hide_systray), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + returned_val = drive_api.handle_token(None, "dummy_uid") + assert not returned_val + + +def test_set_server_ui(manager_factory): + manager, engine = manager_factory() + manager.application = "" + engine.uid = "dummy_uid" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_get_metadata_url(*args): + return + + def mocked_restart_needed(*args): + return + + def mocked_set_ui(*args): + return + + engine.set_ui = mocked_set_ui + + engine.get_metadata_url = mocked_get_metadata_url # .__get__(engine) + manager.restart_needed = mocked_restart_needed + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray", + defaults=(manager, mocked_open_authentication_dialog, mocked_hide_systray), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + returned_val = drive_api.set_server_ui("dummy_uid", "dummy_server_uid") + assert not returned_val + + +def test_filters_dialog(manager_factory): + manager, engine = manager_factory() + manager.application = "" + engine.uid = "dummy_uid" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_show_filters(*args): + return + + def mocked_get_metadata_url(*args): + return + + def mocked_restart_needed(*args): + return + + def mocked_set_ui(*args): + return + + engine.set_ui = mocked_set_ui + + engine.get_metadata_url = mocked_get_metadata_url # .__get__(engine) + manager.restart_needed = mocked_restart_needed + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray, show_filters", + defaults=( + manager, + mocked_open_authentication_dialog, + mocked_hide_systray, + mocked_show_filters, + ), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + returned_val = drive_api.filters_dialog("dummy_uid") + assert not returned_val + + +def test_get_proxy_settings(manager_factory): + manager, engine = manager_factory() + manager.application = "" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_show_filters(*args): + return + + manager.proxy = "mocked_proxy" + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray, show_filters", + defaults=( + manager, + mocked_open_authentication_dialog, + mocked_hide_systray, + mocked_show_filters, + ), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + returned_val = drive_api.get_proxy_settings() + assert returned_val + + +def test_unbind_server(manager_factory): + manager, engine = manager_factory() + manager.application = "" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_show_filters(*args): + return + + def mocked_unbind_engine(*args, **kwargs): + return + + manager.unbind_engine = mocked_unbind_engine + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray, show_filters", + defaults=( + manager, + mocked_open_authentication_dialog, + mocked_hide_systray, + mocked_show_filters, + ), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + returned_val = drive_api.unbind_server("dummy_uid", True) + assert not returned_val + + +def test_default_server_url_value(manager_factory): + manager, engine = manager_factory() + manager.application = "" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_show_filters(*args): + return + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray, show_filters", + defaults=( + manager, + mocked_open_authentication_dialog, + mocked_hide_systray, + mocked_show_filters, + ), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + returned_val = drive_api.default_server_url_value() + assert not returned_val + + +def test_get_update_url(manager_factory): + manager, engine = manager_factory() + manager.application = "" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_show_filters(*args): + return + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray, show_filters", + defaults=( + manager, + mocked_open_authentication_dialog, + mocked_hide_systray, + mocked_show_filters, + ), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + returned_val = drive_api.get_update_url() + assert returned_val + + +def test_show_settings(manager_factory): + manager, engine = manager_factory() + manager.application = "" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_show_filters(*args): + return + + def mocked_show_settings(*args): + return + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray, show_filters, show_settings", + defaults=( + manager, + mocked_open_authentication_dialog, + mocked_hide_systray, + mocked_show_filters, + mocked_show_settings, + ), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + returned_val = drive_api.show_settings("dummy_section") + assert not returned_val + + +def test_get_version(manager_factory): + manager, engine = manager_factory() + manager.application = "" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_show_filters(*args): + return + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray, show_filters", + defaults=( + manager, + mocked_open_authentication_dialog, + mocked_hide_systray, + mocked_show_filters, + ), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + returned_val = drive_api.get_version() + assert returned_val + + +def test_quit(manager_factory): + manager, engine = manager_factory() + manager.application = "" + engine.uid = "dummy_uid" + + def mocked_open_authentication_dialog(): + return + + def mocked_hide_systray(*args): + return + + def mocked_show_filters(*args): + return + + def mocked_quit(*args): + return + + Mocked_App = namedtuple( + "app", + "manager, open_authentication_dialog, hide_systray, show_filters, quit", + defaults=( + manager, + mocked_open_authentication_dialog, + mocked_hide_systray, + mocked_show_filters, + mocked_quit, + ), + ) + app = Mocked_App() + drive_api = QMLDriveApi(app) + + with manager: + returned_val = drive_api.quit() + assert not returned_val From 8467af4544c7b9e9bcfe9aae59942596b2ed3c48 Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Tue, 12 Dec 2023 12:13:49 +0530 Subject: [PATCH 10/36] NXDRIVE-2860: Code Coverage - added test cases to test_api.py 12/12 - 2 --- tests/functional/test_api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/functional/test_api.py b/tests/functional/test_api.py index 461a6781b4..29e6ea4e98 100644 --- a/tests/functional/test_api.py +++ b/tests/functional/test_api.py @@ -1038,7 +1038,7 @@ def mocked_show_filters(*args): with manager: returned_val = drive_api.default_server_url_value() - assert not returned_val + assert returned_val def test_get_update_url(manager_factory): From 0d0327898b158e6ddf32cd2707bdc44ce581e525 Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Wed, 13 Dec 2023 14:47:59 +0530 Subject: [PATCH 11/36] NXDRIVE-2860: Code Coverage - added test cases to test_remote_client 13/12 --- tests/functional/test_remote_client.py | 29 +++++++++++++++++++++++--- 1 file changed, 26 insertions(+), 3 deletions(-) diff --git a/tests/functional/test_remote_client.py b/tests/functional/test_remote_client.py index 57034d6cef..f7724a26a7 100644 --- a/tests/functional/test_remote_client.py +++ b/tests/functional/test_remote_client.py @@ -3,7 +3,7 @@ import pytest from nuxeo.models import Document -from nxdrive.engine.activity import Action, DownloadAction +from nxdrive.engine.activity import Action, DownloadAction, UploadAction from nxdrive.metrics.constants import GLOBAL_METRICS from nxdrive.objects import RemoteFileInfo, SubTypeEnricher from nxdrive.options import Options @@ -252,13 +252,36 @@ def get_current_action_(*args, **kwargs): obj = DownloadAction("path", 1) return obj + def get_current_action__(*args, **kwargs): + obj = UploadAction("path", 1) + return obj + + def set_transfer_progress_(*args, **kwargs): + return + def get_download_(*args, **kwargs): - return False + mocked_download_obj_ = Mock() + mocked_download_obj_.progress = 80 + mocked_download_obj_.status = 2 + return mocked_download_obj_ + + def get_upload_(*args, **kwargs): + mocked_upload_obj_ = Mock() + mocked_upload_obj_.progress = 80 + mocked_upload_obj_.status = 2 + return mocked_upload_obj_ obj1_ = Mock() returned_val = None with manager: with patch.object(Action, "get_current_action", new=get_current_action_): with patch.object(remote.dao, "get_download", new=get_download_): + with patch.object( + remote.dao, "set_transfer_progress", new=set_transfer_progress_ + ): + returned_val = remote.transfer_end_callback(obj1_) + assert not returned_val + with patch.object(Action, "get_current_action", new=get_current_action__): + with patch.object(remote.dao, "get_download", new=get_upload_): returned_val = remote.transfer_end_callback(obj1_) - assert not returned_val + assert not returned_val From 75d3797ba51a07acc7b89ef76e3e72c66be4e115 Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Wed, 13 Dec 2023 15:10:14 +0530 Subject: [PATCH 12/36] NXDRIVE-2860: Code Coverage - added test cases to test_remote_client 13/12 -- 2 --- tests/functional/test_remote_client.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/functional/test_remote_client.py b/tests/functional/test_remote_client.py index f7724a26a7..f665a8747f 100644 --- a/tests/functional/test_remote_client.py +++ b/tests/functional/test_remote_client.py @@ -280,7 +280,8 @@ def get_upload_(*args, **kwargs): remote.dao, "set_transfer_progress", new=set_transfer_progress_ ): returned_val = remote.transfer_end_callback(obj1_) - assert not returned_val + with pytest.raises(Exception) as err: + assert err with patch.object(Action, "get_current_action", new=get_current_action__): with patch.object(remote.dao, "get_download", new=get_upload_): returned_val = remote.transfer_end_callback(obj1_) From cf5be717b0f437862dc39ab48aa1435302db3169 Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Wed, 13 Dec 2023 15:24:32 +0530 Subject: [PATCH 13/36] NXDRIVE-2860: Code Coverage - added test cases to test_remote_client 13/12 -- 3 --- tests/functional/test_remote_client.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/functional/test_remote_client.py b/tests/functional/test_remote_client.py index f665a8747f..7033d7241c 100644 --- a/tests/functional/test_remote_client.py +++ b/tests/functional/test_remote_client.py @@ -261,12 +261,14 @@ def set_transfer_progress_(*args, **kwargs): def get_download_(*args, **kwargs): mocked_download_obj_ = Mock() + mocked_download_obj_.name = "mocked-download-obj" mocked_download_obj_.progress = 80 mocked_download_obj_.status = 2 return mocked_download_obj_ def get_upload_(*args, **kwargs): mocked_upload_obj_ = Mock() + mocked_upload_obj_.name = "mocked-upload-obj" mocked_upload_obj_.progress = 80 mocked_upload_obj_.status = 2 return mocked_upload_obj_ From 6a43599178cf959b9d1255c6cb2b7020dc28ba92 Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Wed, 13 Dec 2023 16:13:01 +0530 Subject: [PATCH 14/36] NXDRIVE-2860: Code Coverage - added test cases to test_remote_client 13/12 -- 4 --- tests/functional/test_remote_client.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/functional/test_remote_client.py b/tests/functional/test_remote_client.py index 7033d7241c..ba7188a887 100644 --- a/tests/functional/test_remote_client.py +++ b/tests/functional/test_remote_client.py @@ -274,17 +274,17 @@ def get_upload_(*args, **kwargs): return mocked_upload_obj_ obj1_ = Mock() - returned_val = None with manager: with patch.object(Action, "get_current_action", new=get_current_action_): with patch.object(remote.dao, "get_download", new=get_download_): with patch.object( remote.dao, "set_transfer_progress", new=set_transfer_progress_ ): - returned_val = remote.transfer_end_callback(obj1_) with pytest.raises(Exception) as err: + remote.transfer_end_callback(obj1_) assert err with patch.object(Action, "get_current_action", new=get_current_action__): - with patch.object(remote.dao, "get_download", new=get_upload_): - returned_val = remote.transfer_end_callback(obj1_) - assert not returned_val + with patch.object(remote.dao, "get_upload", new=get_upload_): + with pytest.raises(Exception) as err: + remote.transfer_end_callback(obj1_) + assert err From fcd87de1968337d517d282f7da209c4fe6bb30bd Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Wed, 13 Dec 2023 16:34:18 +0530 Subject: [PATCH 15/36] NXDRIVE-2860: Code Coverage - added test cases to test_remote_client 13/12 -- 5 --- tests/functional/test_remote_client.py | 46 ++++++++++++++++++++------ 1 file changed, 35 insertions(+), 11 deletions(-) diff --git a/tests/functional/test_remote_client.py b/tests/functional/test_remote_client.py index ba7188a887..6a707bf820 100644 --- a/tests/functional/test_remote_client.py +++ b/tests/functional/test_remote_client.py @@ -12,6 +12,30 @@ from .. import env +def get_current_action_download(*args, **kwargs): + obj = DownloadAction("path", 1) + obj.transferred_chunks = 1 + obj.chunk_transfer_end_time_ns = 300000000000 + obj.chunk_transfer_start_time_ns = 1000000000 + obj.chunk_size = 10 + obj.transferred_chunks = 2 + return obj + + +def get_current_action_upload(*args, **kwargs): + obj = UploadAction("path", 1) + obj.transferred_chunks = 1 + obj.chunk_transfer_end_time_ns = 3000000000 + obj.chunk_transfer_start_time_ns = 1000000000 + obj.chunk_size = 10 + obj.transferred_chunks = 2 + return obj + + +def get_current_no_action(*args, **kwargs): + return None + + @pytest.mark.parametrize( "username", [ @@ -240,7 +264,10 @@ def test_transfer_start_callback(manager_factory): obj1_ = Mock() with manager: - returned_val = remote.transfer_start_callback(obj1_) + with patch.object( + Action, "get_current_action", new=get_current_action_download + ): + returned_val = remote.transfer_start_callback(obj1_) assert not returned_val @@ -248,14 +275,6 @@ def test_transfer_end_callback(manager_factory): manager, engine = manager_factory() remote = engine.remote - def get_current_action_(*args, **kwargs): - obj = DownloadAction("path", 1) - return obj - - def get_current_action__(*args, **kwargs): - obj = UploadAction("path", 1) - return obj - def set_transfer_progress_(*args, **kwargs): return @@ -275,7 +294,9 @@ def get_upload_(*args, **kwargs): obj1_ = Mock() with manager: - with patch.object(Action, "get_current_action", new=get_current_action_): + with patch.object( + Action, "get_current_action", new=get_current_action_download + ): with patch.object(remote.dao, "get_download", new=get_download_): with patch.object( remote.dao, "set_transfer_progress", new=set_transfer_progress_ @@ -283,8 +304,11 @@ def get_upload_(*args, **kwargs): with pytest.raises(Exception) as err: remote.transfer_end_callback(obj1_) assert err - with patch.object(Action, "get_current_action", new=get_current_action__): + with patch.object(Action, "get_current_action", new=get_current_action_upload): with patch.object(remote.dao, "get_upload", new=get_upload_): with pytest.raises(Exception) as err: remote.transfer_end_callback(obj1_) assert err + with patch.object(Action, "get_current_action", new=get_current_no_action): + remote.transfer_end_callback(obj1_) + assert not remote.transfer_end_callback(obj1_) From 61eb924bd6f44aa8fcda51248cadda3cdb7cdd77 Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Wed, 27 Dec 2023 09:56:13 +0530 Subject: [PATCH 16/36] NXDRIVE-2860: Code Coverage - added test cases to test_remote_client 27/12 -- 1 --- tests/functional/test_remote_client.py | 43 ++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/tests/functional/test_remote_client.py b/tests/functional/test_remote_client.py index 6a707bf820..dcae294199 100644 --- a/tests/functional/test_remote_client.py +++ b/tests/functional/test_remote_client.py @@ -312,3 +312,46 @@ def get_upload_(*args, **kwargs): with patch.object(Action, "get_current_action", new=get_current_no_action): remote.transfer_end_callback(obj1_) assert not remote.transfer_end_callback(obj1_) + + +def test_download(manager_factory): + manager, engine = manager_factory() + remote = engine.remote + + def mocked_request(*args, **kwargs): + obj_ = Mock() + obj_.content = "content" + obj_.headers = {"Content-Length": ((Options.tmp_file_limit * 1024 * 1024) + 1)} + return obj_ + + def stat_(): + obj = Mock() + obj.st_size = 100 + return obj + + dummy_file_out = Mock() + dummy_file_out.stat = stat_ + dummy_file_out.name = "dummy_file_out" + + dummy_file_path = env.WS_DIR + + from pathlib import Path + + dummy_file_path = Path(dummy_file_path) + dummy_file_out = Path(dummy_file_path) + + with manager: + with patch.object(remote.client, "request", new=mocked_request): + returned_val = remote.download( + "dummy_url", dummy_file_path, "", "dummy_digest" + ) + assert returned_val == "content" + + with patch.object(remote.client, "request", new=mocked_request): + with patch.object(remote.dao, "get_download", return_value=None): + with patch.object(remote.dao, "save_download", return_value=None): + with patch.object(remote, "operations", return_value=None): + returned_val = remote.download( + "dummy_url", dummy_file_path, dummy_file_out, "dummy_digest" + ) + assert returned_val From 9e2da5bd3eb86dc1d84e34d66d282304790c97e0 Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Wed, 3 Jan 2024 10:15:07 +0530 Subject: [PATCH 17/36] NXDRIVE-2860: Code Coverage - added test cases to test_remote_client 03/01 -- 1 --- tests/functional/test_remote_client.py | 164 ++++++++++++++++++++++++- 1 file changed, 162 insertions(+), 2 deletions(-) diff --git a/tests/functional/test_remote_client.py b/tests/functional/test_remote_client.py index dcae294199..23fe830a7c 100644 --- a/tests/functional/test_remote_client.py +++ b/tests/functional/test_remote_client.py @@ -1,3 +1,4 @@ +from pathlib import Path from unittest.mock import Mock, patch import pytest @@ -335,8 +336,6 @@ def stat_(): dummy_file_path = env.WS_DIR - from pathlib import Path - dummy_file_path = Path(dummy_file_path) dummy_file_out = Path(dummy_file_path) @@ -355,3 +354,164 @@ def stat_(): "dummy_url", dummy_file_path, dummy_file_out, "dummy_digest" ) assert returned_val + + +def test_reload_global_headers(manager_factory): + manager, engine = manager_factory() + remote = engine.remote + headers = Mock() + + def mocked_update(*args, **kwargs): + return + + headers.update = mocked_update + + with patch.object(headers, "update", return_value=None): + assert not remote.reload_global_headers() + + +def test_escape(manager_factory): + manager, engine = manager_factory() + remote = engine.remote + assert remote.escape("/Users/user/Nuxeo'") + + +def test_revoke_token(manager_factory): + manager, engine = manager_factory() + remote = engine.remote + + assert not remote.revoke_token() + + +def test_update_token(manager_factory): + manager, engine = manager_factory() + remote = engine.remote + + assert not remote.update_token("dummy_token") + + +def test_check_integrity_simple(manager_factory): + manager, engine = manager_factory() + remote = engine.remote + + dummy_file_path = env.WS_DIR + dummy_file_path = Path(dummy_file_path) + + assert not remote.check_integrity_simple("dummy_digest", dummy_file_path) + + +def test_upload(manager_factory): + manager, engine = manager_factory() + remote = engine.remote + + import os + + local_path = Path(os.path.realpath(__file__)) + + with pytest.raises(Exception): + remote.upload(local_path) + + +def test_upload_folder(manager_factory): + manager, engine = manager_factory() + remote = engine.remote + + remote.operations = Mock() + + def mocked_execute(*args, **kwargs): + return {"res": 0} + + remote.operations.execute = mocked_execute + + dummy_file_path = env.WS_DIR + + assert remote.upload_folder(dummy_file_path, {"params": 0}, headers={}) + + +def test_make_folder(manager_factory): + manager, engine = manager_factory() + remote = engine.remote + + with patch.object( + remote, + "execute", + return_value={"id": 0, "parentId": 0, "path": "/", "name": "dummy"}, + ): + assert remote.make_folder("dummy_parent", "dummy_name") + + +def test_delete(manager_factory): + manager, engine = manager_factory() + remote = engine.remote + + with patch.object( + remote, + "execute", + return_value={"id": 0, "parentId": 0, "path": "/", "name": "dummy"}, + ): + assert not remote.delete("dummy_fs_item_id") + + +def test_rename(manager_factory): + manager, engine = manager_factory() + remote = engine.remote + + with patch.object( + remote, + "execute", + return_value={"id": 0, "parentId": 0, "path": "/", "name": "dummy"}, + ): + assert remote.rename("dummy_fs_item_id", "dummy_parent_fs_item_id") + + +def test_undelete(manager_factory): + manager, engine = manager_factory() + remote = engine.remote + + remote.documents = Mock() + + def mocked_execute(*args, **kwargs): + return True + + remote.documents.untrash = mocked_execute + + dummy_file_path = env.WS_DIR + + assert not remote.undelete(dummy_file_path) + + +def test_move(manager_factory): + manager, engine = manager_factory() + remote = engine.remote + + dummy_file_path = env.WS_DIR + + with patch.object( + remote, + "execute", + return_value={"id": 0, "parentId": 0, "path": "/", "name": "dummy"}, + ): + assert remote.rename(dummy_file_path, dummy_file_path) + + +def test_move2(manager_factory): + manager, engine = manager_factory() + remote = engine.remote + + remote.documents = Mock() + + def mocked_move(*args, **kwargs): + return + + remote.documents.move = mocked_move + + dummy_file_path = env.WS_DIR + + dummy_file_path = str(env.WS_DIR) + "#" + + with patch.object( + remote, + "execute", + return_value={"id": 0, "parentId": 0, "path": "/", "name": "dummy"}, + ): + assert not remote.move2(dummy_file_path, dummy_file_path, "dummy_name") From 0911d58398d0f1cb00136c90d38eb76d04bafb75 Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Thu, 4 Jan 2024 10:59:53 +0530 Subject: [PATCH 18/36] NXDRIVE-2860: Code Coverage - added test cases to test_remote_client 04/01 added old remote_client --- tests/functional/test_remote_client_old.py | 525 +++++++++++++++++++++ 1 file changed, 525 insertions(+) create mode 100644 tests/functional/test_remote_client_old.py diff --git a/tests/functional/test_remote_client_old.py b/tests/functional/test_remote_client_old.py new file mode 100644 index 0000000000..7dbcb7d965 --- /dev/null +++ b/tests/functional/test_remote_client_old.py @@ -0,0 +1,525 @@ +import hashlib +import operator +from pathlib import Path +from shutil import copyfile +from tempfile import mkdtemp + +import pytest + +from nxdrive.exceptions import NotFound + +from . import LocalTest, make_tmp_file +from .conftest import FS_ITEM_ID_PREFIX, OneUserTest, TwoUsersTest + + +class TestRemoteFileSystemClient(OneUserTest): + def setUp(self): + # Bind the test workspace as sync root for user 1 + remote_doc = self.remote_document_client_1 + remote = self.remote_1 + remote_doc.register_as_root(self.workspace) + + # Fetch the id of the workspace folder item + info = remote.get_filesystem_root_info() + self.workspace_id = remote.get_fs_children(info.uid)[0].uid + + # + # Test the API common with the local client API + # + + def test_get_fs_info(self): + remote = self.remote_1 + + # Check file info + fs_item_id = remote.make_file( + self.workspace_id, "Document 1.txt", content=b"Content of doc 1." + ).uid + info = remote.get_fs_info(fs_item_id) + assert info is not None + assert info.name == "Document 1.txt" + assert info.uid == fs_item_id + assert info.parent_uid == self.workspace_id + assert not info.folderish + if info.last_contributor: + assert info.last_contributor == self.user_1 + digest_algorithm = info.digest_algorithm + assert digest_algorithm == "md5" + digest = self._get_digest(digest_algorithm, b"Content of doc 1.") + assert info.digest == digest + file_uid = fs_item_id.rsplit("#", 1)[1] + # NXP-17827: nxbigile has been replace to nxfile, keep handling both + url = f"/default/{file_uid}/blobholder:0/Document%201.txt" + cond = info.download_url in (f"nxbigfile{url}", f"nxfile{url}") + assert cond + + # Check folder info + fs_item_id = remote.make_folder(self.workspace_id, "Folder 1").uid + info = remote.get_fs_info(fs_item_id) + assert info is not None + assert info.name == "Folder 1" + assert info.uid == fs_item_id + assert info.parent_uid == self.workspace_id + assert info.folderish + if info.last_contributor: + assert info.last_contributor == self.user_1 + assert info.digest_algorithm is None + assert info.digest is None + assert info.download_url is None + + # Check non existing file info + fs_item_id = FS_ITEM_ID_PREFIX + "fakeId" + with pytest.raises(NotFound): + remote.get_fs_info(fs_item_id) + + def test_get_content(self): + remote = self.remote_1 + remote_doc = self.remote_document_client_1 + + # Check file with content + fs_item_id = remote.make_file( + self.workspace_id, "Document 1.txt", content=b"Content of doc 1." + ).uid + assert remote.get_content(fs_item_id) == b"Content of doc 1." + + # Check file without content + doc_uid = remote_doc.make_file_with_no_blob(self.workspace, "Document 2.txt") + fs_item_id = FS_ITEM_ID_PREFIX + doc_uid + with pytest.raises(NotFound): + remote.get_content(fs_item_id) + + def test_stream_content(self): + remote = self.remote_1 + + fs_item_id = remote.make_file( + self.workspace_id, "Document 1.txt", content=b"Content of doc 1." + ).uid + file_path = self.local_test_folder_1 / "Document 1.txt" + file_out = Path(mkdtemp()) / file_path.name + tmp_file = remote.stream_content( + fs_item_id, file_path, file_out, engine_uid=self.engine_1.uid + ) + assert tmp_file.exists() + assert tmp_file.name == "Document 1.txt" + assert tmp_file.read_bytes() == b"Content of doc 1." + + def test_get_fs_children(self): + remote = self.remote_1 + + # Create documents + folder_1_id = remote.make_folder(self.workspace_id, "Folder 1").uid + folder_2_id = remote.make_folder(self.workspace_id, "Folder 2").uid + file_1_id = remote.make_file( + self.workspace_id, "File 1", content=b"Content of file 1." + ).uid + file_2_id = remote.make_file( + folder_1_id, "File 2", content=b"Content of file 2." + ).uid + + # Check workspace children + workspace_children = remote.get_fs_children(self.workspace_id) + assert workspace_children is not None + assert len(workspace_children) == 3 + assert workspace_children[0].uid == folder_1_id + assert workspace_children[0].name == "Folder 1" + assert workspace_children[0].folderish + assert workspace_children[1].uid == folder_2_id + assert workspace_children[1].name == "Folder 2" + assert workspace_children[1].folderish + assert workspace_children[2].uid == file_1_id + assert workspace_children[2].name == "File 1" + assert not workspace_children[2].folderish + + # Check folder_1 children + folder_1_children = remote.get_fs_children(folder_1_id) + assert folder_1_children is not None + assert len(folder_1_children) == 1 + assert folder_1_children[0].uid == file_2_id + assert folder_1_children[0].name == "File 2" + + def test_scroll_descendants(self): + remote = self.remote_1 + + # Create documents + folder_1 = remote.make_folder(self.workspace_id, "Folder 1").uid + folder_2 = remote.make_folder(self.workspace_id, "Folder 2").uid + file_1 = remote.make_file( + self.workspace_id, "File 1.txt", content=b"Content of file 1." + ).uid + file_2 = remote.make_file( + folder_1, "File 2.txt", content=b"Content of file 2." + ).uid + + # Wait for ES completion + self.wait() + + # Check workspace descendants in one breath, ordered by remote path + scroll_res = remote.scroll_descendants(self.workspace_id, None) + assert isinstance(scroll_res, dict) + assert "scroll_id" in scroll_res + descendants = sorted(scroll_res["descendants"], key=operator.attrgetter("name")) + assert len(descendants) == 4 + + # File 1.txt + assert descendants[0].uid == file_1 + assert descendants[0].name == "File 1.txt" + assert not descendants[0].folderish + # File 2.txt + assert descendants[1].name == "File 2.txt" + assert not descendants[1].folderish + assert descendants[1].uid == file_2 + # Folder 1 + assert descendants[2].uid == folder_1 + assert descendants[2].name == "Folder 1" + assert descendants[2].folderish + # Folder 2 + assert descendants[3].uid == folder_2 + assert descendants[3].name == "Folder 2" + assert descendants[3].folderish + + # Check workspace descendants in several steps, ordered by remote path + descendants = [] + scroll_id = None + while True: + scroll_res = remote.scroll_descendants( + self.workspace_id, scroll_id, batch_size=2 + ) + assert isinstance(scroll_res, dict) + scroll_id = scroll_res["scroll_id"] + partial_descendants = scroll_res["descendants"] + if not partial_descendants: + break + descendants.extend(partial_descendants) + descendants = sorted(descendants, key=operator.attrgetter("name")) + assert len(descendants) == 4 + + # File 1.txt + assert descendants[0].uid == file_1 + assert descendants[0].name == "File 1.txt" + assert not descendants[0].folderish + # File 2.txt + assert descendants[1].name == "File 2.txt" + assert not descendants[1].folderish + assert descendants[1].uid == file_2 + # Folder 1 + assert descendants[2].uid == folder_1 + assert descendants[2].name == "Folder 1" + assert descendants[2].folderish + # Folder 2 + assert descendants[3].uid == folder_2 + assert descendants[3].name == "Folder 2" + assert descendants[3].folderish + + def test_make_folder(self): + remote = self.remote_1 + + fs_item_info = remote.make_folder(self.workspace_id, "My new folder") + assert fs_item_info is not None + assert fs_item_info.name == "My new folder" + assert fs_item_info.folderish + assert fs_item_info.digest_algorithm is None + assert fs_item_info.digest is None + assert fs_item_info.download_url is None + + def test_make_file(self): + remote = self.remote_1 + + # Check File document creation + fs_item_info = remote.make_file( + self.workspace_id, "My new file.odt", content=b"Content of my new file." + ) + assert fs_item_info is not None + assert fs_item_info.name == "My new file.odt" + assert not fs_item_info.folderish + digest_algorithm = fs_item_info.digest_algorithm + assert digest_algorithm == "md5" + digest = self._get_digest(digest_algorithm, b"Content of my new file.") + assert fs_item_info.digest == digest + + # Check Note document creation + fs_item_info = remote.make_file( + self.workspace_id, "My new note.txt", content=b"Content of my new note." + ) + assert fs_item_info is not None + assert fs_item_info.name == "My new note.txt" + assert not fs_item_info.folderish + digest_algorithm = fs_item_info.digest_algorithm + assert digest_algorithm == "md5" + digest = self._get_digest(digest_algorithm, b"Content of my new note.") + assert fs_item_info.digest == digest + + def test_make_file_custom_encoding(self): + remote = self.remote_1 + + # Create content encoded in utf-8 and cp1252 + unicode_content = "\xe9" # e acute + utf8_encoded = unicode_content.encode("utf-8") + utf8_digest = hashlib.md5(utf8_encoded).hexdigest() + cp1252_encoded = unicode_content.encode("cp1252") + + # Make files with this content + utf8_fs_id = remote.make_file( + self.workspace_id, "My utf-8 file.txt", content=utf8_encoded + ).uid + cp1252_fs_id = remote.make_file( + self.workspace_id, "My cp1252 file.txt", content=cp1252_encoded + ).uid + + # Check content + utf8_content = remote.get_content(utf8_fs_id) + assert utf8_content == utf8_encoded + cp1252_content = remote.get_content(cp1252_fs_id) + assert cp1252_content == utf8_encoded + + # Check digest + utf8_info = remote.get_fs_info(utf8_fs_id) + assert utf8_info.digest == utf8_digest + cp1252_info = remote.get_fs_info(cp1252_fs_id) + assert cp1252_info.digest == utf8_digest + + def test_update_content(self): + remote = self.remote_1 + + # Create file + fs_item_id = remote.make_file( + self.workspace_id, "Document 1.txt", content=b"Content of doc 1." + ).uid + + # Check file update + remote.update_content(fs_item_id, b"Updated content of doc 1.") + assert remote.get_content(fs_item_id) == b"Updated content of doc 1." + + def test_delete(self): + remote = self.remote_1 + + # Create file + fs_item_id = remote.make_file( + self.workspace_id, "Document 1.txt", content=b"Content of doc 1." + ).uid + assert remote.fs_exists(fs_item_id) + + # Delete file + remote.delete(fs_item_id) + assert not remote.fs_exists(fs_item_id) + + def test_exists(self): + remote = self.remote_1 + remote_doc = self.remote_document_client_1 + + # Check existing file system item + fs_item_id = remote.make_file( + self.workspace_id, "Document 1.txt", content=b"Content of doc 1." + ).uid + assert remote.fs_exists(fs_item_id) + + # Check non existing file system item (non existing document) + fs_item_id = FS_ITEM_ID_PREFIX + "fakeId" + assert not remote.fs_exists(fs_item_id) + + # Check non existing file system item (document without content) + doc_uid = remote_doc.make_file_with_no_blob(self.workspace, "Document 2.txt") + fs_item_id = FS_ITEM_ID_PREFIX + doc_uid + assert not remote.fs_exists(fs_item_id) + + # + # Test the API specific to the remote file system client + # + + def test_get_fs_item(self): + remote = self.remote_1 + + # Check file item + fs_item_id = remote.make_file( + self.workspace_id, "Document 1.txt", content=b"Content of doc 1." + ).uid + fs_item = remote.get_fs_item(fs_item_id) + assert fs_item is not None + assert fs_item["name"] == "Document 1.txt" + assert fs_item["id"] == fs_item_id + assert not fs_item["folder"] + + # Check file item using parent id + fs_item = remote.get_fs_item(fs_item_id, parent_fs_item_id=self.workspace_id) + assert fs_item is not None + assert fs_item["name"] == "Document 1.txt" + assert fs_item["id"] == fs_item_id + assert fs_item["parentId"] == self.workspace_id + + # Check folder item + fs_item_id = remote.make_folder(self.workspace_id, "Folder 1").uid + fs_item = remote.get_fs_item(fs_item_id) + assert fs_item is not None + assert fs_item["name"] == "Folder 1" + assert fs_item["id"] == fs_item_id + assert fs_item["folder"] + + # Check non existing file system item + fs_item_id = FS_ITEM_ID_PREFIX + "fakeId" + assert remote.get_fs_item(fs_item_id) is None + + def test_streaming_upload(self): + remote = self.remote_1 + + # Create a document by streaming a text file + file_path = make_tmp_file(remote.upload_tmp_dir, b"Some content.") + try: + fs_item_info = remote.stream_file( + self.workspace_id, file_path, filename="My streamed file.txt" + ) + finally: + file_path.unlink() + fs_item_id = fs_item_info.uid + assert fs_item_info.name == "My streamed file.txt" + assert remote.get_content(fs_item_id) == b"Some content." + + # Update a document by streaming a new text file + file_path = make_tmp_file(remote.upload_tmp_dir, b"Other content.") + try: + fs_item_info = remote.stream_update( + fs_item_id, file_path, filename="My updated file.txt" + ) + finally: + file_path.unlink() + assert fs_item_info.uid == fs_item_id + assert fs_item_info.name == "My updated file.txt" + assert remote.get_content(fs_item_id) == b"Other content." + + # Create a document by streaming a binary file + file_path = self.upload_tmp_dir / "testFile.pdf" + copyfile(self.location / "resources" / "files" / "testFile.pdf", file_path) + fs_item_info = remote.stream_file(self.workspace_id, file_path) + local_client = LocalTest(self.upload_tmp_dir) + assert fs_item_info.name == "testFile.pdf" + assert ( + fs_item_info.digest == local_client.get_info("/testFile.pdf").get_digest() + ) + + def test_mime_type_doc_type_association(self): + remote = self.remote_1 + remote_doc = self.remote_document_client_1 + + # Upload a PDF file, should create a File document + file_path = self.upload_tmp_dir / "testFile.pdf" + copyfile(self.location / "resources" / "files" / "testFile.pdf", file_path) + fs_item_info = remote.stream_file(self.workspace_id, file_path) + fs_item_id = fs_item_info.uid + doc_uid = fs_item_id.rsplit("#", 1)[1] + doc_type = remote_doc.get_info(doc_uid).doc_type + assert doc_type == "File" + + # Upload a JPG file, should create a Picture document + file_path = self.upload_tmp_dir / "cat.jpg" + copyfile(self.location / "resources" / "files" / "cat.jpg", file_path) + fs_item_info = remote.stream_file(self.workspace_id, file_path) + fs_item_id = fs_item_info.uid + doc_uid = fs_item_id.rsplit("#", 1)[1] + doc_type = remote_doc.get_info(doc_uid).doc_type + assert doc_type == "Picture" + + def test_unregister_nested_roots(self): + # Check that registering a parent folder of an existing root + # automatically unregister sub folders to avoid synchronization + # inconsistencies + remote = self.remote_document_client_1 + + # By default no root is synchronized + remote.unregister_as_root(self.workspace) + self.wait() + assert not remote.get_roots() + + folder = remote.make_folder(self.workspace, "Folder") + sub_folder_1 = remote.make_folder(folder, "Sub Folder 1") + sub_folder_2 = remote.make_folder(folder, "Sub Folder 2") + + # Register the sub folders as roots + remote.register_as_root(sub_folder_1) + remote.register_as_root(sub_folder_2) + assert len(remote.get_roots()) == 2 + + # Register the parent folder as root + remote.register_as_root(folder) + roots = remote.get_roots() + assert len(roots) == 1 + assert roots[0].uid == folder + + # Unregister the parent folder + remote.unregister_as_root(folder) + assert not remote.get_roots() + + def test_lock_unlock(self): + remote = self.remote_document_client_1 + doc_id = remote.make_file( + self.workspace, "TestLocking.txt", content=b"File content" + ) + + status = remote.is_locked(doc_id) + assert not status + remote.lock(doc_id) + assert remote.is_locked(doc_id) + + remote.unlock(doc_id) + assert not remote.is_locked(doc_id) + + @staticmethod + def _get_digest(algorithm: str, content: bytes) -> str: + hasher = getattr(hashlib, algorithm) + if hasher is None: + raise RuntimeError(f"Unknown digest algorithm: {algorithm}") + return hasher(content).hexdigest() + + +class TestRemoteFileSystemClient2(TwoUsersTest): + def setUp(self): + # Bind the test workspace as sync root for user 1 + remote_doc = self.remote_document_client_1 + remote = self.remote_1 + remote_doc.register_as_root(self.workspace) + + # Fetch the id of the workspace folder item + info = remote.get_filesystem_root_info() + self.workspace_id = remote.get_fs_children(info.uid)[0].uid + + def test_modification_flags_locked_document(self): + remote = self.remote_1 + fs_item_id = remote.make_file( + self.workspace_id, "Document 1.txt", content=b"Content of doc 1." + ).uid + + # Check flags for a document that isn't locked + info = remote.get_fs_info(fs_item_id) + assert info.can_rename + assert info.can_update + assert info.can_delete + assert info.lock_owner is None + assert info.lock_created is None + + # Check flags for a document locked by the current user + doc_uid = fs_item_id.rsplit("#", 1)[1] + remote.lock(doc_uid) + info = remote.get_fs_info(fs_item_id) + assert info.can_rename + assert info.can_update + assert info.can_delete + lock_info_available = remote.get_fs_item(fs_item_id).get("lockInfo") is not None + if lock_info_available: + assert info.lock_owner == self.user_1 + assert info.lock_created is not None + remote.unlock(doc_uid) + + # Check flags for a document locked by another user + self.remote_2.lock(doc_uid) + info = remote.get_fs_info(fs_item_id) + assert not info.can_rename + assert not info.can_update + assert not info.can_delete + if lock_info_available: + assert info.lock_owner == self.user_2 + assert info.lock_created is not None + + # Check flags for a document unlocked by another user + self.remote_2.unlock(doc_uid) + info = remote.get_fs_info(fs_item_id) + assert info.can_rename + assert info.can_update + assert info.can_delete + assert info.lock_owner is None + assert info.lock_created is None From e22ccfa79323669227df62a186786c6697e8216e Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Thu, 4 Jan 2024 11:40:01 +0530 Subject: [PATCH 19/36] NXDRIVE-2860: Code Coverage - added test cases to test_remote_client 04/01 added old remote_client -002 --- tests/functional/__init__.py | 641 +++++++++++++++++ tests/functional/conftest.py | 861 ++++++++++++++++++++++- tests/functional/local_client_darwin.py | 67 ++ tests/functional/local_client_windows.py | 97 +++ 4 files changed, 1657 insertions(+), 9 deletions(-) create mode 100644 tests/functional/local_client_darwin.py create mode 100644 tests/functional/local_client_windows.py diff --git a/tests/functional/__init__.py b/tests/functional/__init__.py index e69de29bb2..b77401d4e4 100644 --- a/tests/functional/__init__.py +++ b/tests/functional/__init__.py @@ -0,0 +1,641 @@ +import os +import shutil +import tempfile +from pathlib import Path +from typing import Any, Dict, List, Tuple, Union + +import nuxeo.client +import nuxeo.constants +import nuxeo.operations +from nuxeo.models import Blob, FileBlob + +from nxdrive.client.local import LocalClient +from nxdrive.client.remote_client import Remote +from nxdrive.objects import NuxeoDocumentInfo, RemoteFileInfo +from nxdrive.options import Options +from nxdrive.utils import force_encode, safe_filename + +from .. import env + + +def patch_nxdrive_objects(): + """Some feature are not needed or are better disabled when testing.""" + + # Need to do this one first because importing Manager will already import + # nxdrive.dao.utils and so changing the behavior of save_backup() + # will not work. + import nxdrive.dao.utils + + nxdrive.dao.utils.save_backup = lambda *args: True + + from nxdrive.poll_workers import ServerOptionsUpdater + + @property + def enable(self) -> bool: + return False + + ServerOptionsUpdater.enable = enable + + from nxdrive.gui.application import Application + + Application.init_nxdrive_listener = lambda *args: None + + from nxdrive.engine.queue_manager import QueueManager + from nxdrive.manager import Manager + + def dispose_all(self) -> None: + for engine in self.engines.copy().values(): + engine.dispose_db() + self.dispose_db() + + def unbind_all(self) -> None: + if not self.engines: + self.load() + for engine in self._engine_definitions: + self.unbind_engine(engine.uid) + + def requeue_errors(self) -> None: + with self._error_lock: + for doc_pair in self._on_error_queue.values(): + doc_pair.error_next_try = 0 + + Manager.dispose_all = dispose_all + Manager.unbind_all = unbind_all + QueueManager.requeue_errors = requeue_errors + + +patch_nxdrive_objects() + + +def make_tmp_file(folder: Path, content: bytes) -> Path: + """Create a temporary file with the given content + for streaming upload purposes. + + Make sure that you remove the temporary file with os.remove() + when done with it. + """ + import tempfile + + fd, path = tempfile.mkstemp(suffix="-nxdrive-file-to-upload", dir=folder) + path = Path(path) + try: + path.write_bytes(force_encode(content)) + finally: + os.close(fd) + return path + + +# Operations cache +OPS_CACHE = None +SERVER_INFO = None + +RawPath = Union[Path, str] + + +def force_path(ref: RawPath) -> Path: + if not isinstance(ref, Path): + ref = Path(ref.lstrip("/")) + return ref + + +class LocalTest(LocalClient): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def copy(self, srcref: RawPath, dstref: RawPath) -> None: + """Make a copy of the file (with xattr included).""" + remote_id = self.get_remote_id(srcref) + shutil.copy2(self.abspath(srcref), self.abspath(dstref)) + self.set_remote_id(dstref, remote_id) + + def get_content(self, ref: RawPath) -> bytes: + ref = force_path(ref) + return self.abspath(ref).read_bytes() + + def has_folder_icon(self, *args: Any, **kwargs: Any) -> bool: + return True + + def set_folder_icon(self, *args: Any, **kwargs: Any) -> None: + return + + def abspath(self, ref: RawPath) -> Path: + ref = force_path(ref) + return super().abspath(ref) + + def delete_final(self, ref: RawPath) -> None: + ref = force_path(ref) + return super().delete_final(ref) + + def exists(self, ref: RawPath) -> bool: + ref = force_path(ref) + return super().exists(ref) + + def get_children_info(self, ref: RawPath): + ref = force_path(ref) + return super().get_children_info(ref) + + def get_info(self, ref: RawPath): + ref = force_path(ref) + return super().get_info(ref) + + def get_path(self, abspath: RawPath): + abspath = force_path(abspath) + return super().get_path(abspath) + + def rename(self, ref: RawPath, to_name: str) -> Path: + ref = force_path(ref) + return super().rename(ref, to_name).filepath + + def update_content( + self, ref: RawPath, content: bytes, xattr_names: Tuple[str, ...] = ("ndrive",) + ) -> None: + ref = force_path(ref) + xattrs = {name: self.get_remote_id(ref, name=name) for name in xattr_names} + + self.abspath(ref).write_bytes(content) + + for name, value in xattrs.items(): + if value is not None: + self.set_remote_id(ref, value, name=name) + + def make_folder(self, parent: RawPath, *args: Any, **kwargs: Any) -> Path: + parent = force_path(parent) + return super().make_folder(parent, *args, **kwargs) + + def make_file(self, parent: RawPath, name: str, content: bytes = None) -> Path: + parent = force_path(parent) + os_path, name = self._abspath_deduped(parent, name) + locker = self.unlock_ref(parent, unlock_parent=False) + try: + if content: + os_path.write_bytes(content) + else: + os_path.touch() + return parent / name + finally: + self.lock_ref(parent, locker) + + def get_new_file(self, parent: RawPath, name: str) -> Tuple[Path, Path, str]: + parent = force_path(parent) + return super().get_new_file(parent, name) + + def move(self, ref: RawPath, new_parent_ref: RawPath, name: str = None): + ref = force_path(ref) + new_parent_ref = force_path(new_parent_ref) + return super().move(ref, new_parent_ref, name=name) + + def delete(self, ref: RawPath) -> None: + ref = force_path(ref) + return super().delete(ref) + + +class RemoteBase(Remote): + def __init__(self, *args, upload_tmp_dir: str = None, **kwargs): + super().__init__(*args, **kwargs) + + self.upload_tmp_dir = ( + upload_tmp_dir if upload_tmp_dir is not None else tempfile.gettempdir() + ) + + # Save bandwidth by caching operations details + global OPS_CACHE + if not OPS_CACHE: + OPS_CACHE = self.operations.operations + nuxeo.operations.API.ops = OPS_CACHE + global SERVER_INFO + if not SERVER_INFO: + SERVER_INFO = self.client.server_info() + nuxeo.client.NuxeoClient._server_info = SERVER_INFO + + def fs_exists(self, fs_item_id: str) -> bool: + return self.execute(command="NuxeoDrive.FileSystemItemExists", id=fs_item_id) + + def get_children(self, ref: str) -> Dict[str, Any]: + return self.execute(command="Document.GetChildren", input_obj=f"doc:{ref}") + + def get_children_info(self, ref: str) -> List[NuxeoDocumentInfo]: + ref = self.escape(self.check_ref(ref)) + types = "', '".join( + ("Note", "Workspace", "Picture", env.DOCTYPE_FILE, env.DOCTYPE_FOLDERISH) + ) + + query = ( + "SELECT * FROM Document" + f" WHERE ecm:parentId = '{ref}'" + f" AND ecm:primaryType IN ('{types}')" + f" {self._get_trash_condition()}" + " AND ecm:isVersion = 0" + " ORDER BY dc:title, dc:created" + ) + entries = self.query(query, page_size=1000)["entries"] + return self._filtered_results(entries) + + def get_content(self, fs_item_id: str, **kwargs: Any) -> Path: + """Download and return the binary content of a file system item + + Beware that the content is loaded in memory. + + Raises NotFound if file system item with id fs_item_id + cannot be found + """ + fs_item_info = self.get_fs_info(fs_item_id) + url = self.client.host + fs_item_info.download_url + + # Placeholders + file_path = file_out = "" + + return self.download(url, file_path, file_out, fs_item_info.digest, **kwargs) + + def get_roots(self) -> List[NuxeoDocumentInfo]: + res = self.execute(command="NuxeoDrive.GetRoots") + return self._filtered_results(res["entries"], fetch_parent_uid=False) + + def make_file( + self, parent_id: str, name: str, content: bytes = None + ) -> RemoteFileInfo: + """ + Create a document with the given name and content. + if content is None, creates a temporary file from the content then streams it. + """ + if content is not None: + file_path = make_tmp_file(self.upload_tmp_dir, content) + else: + file_path = name + try: + fs_item = self.upload( + file_path, + command="NuxeoDrive.CreateFile", + filename=name, + parentId=parent_id, + ) + return RemoteFileInfo.from_dict(fs_item) + finally: + if content is not None: + file_path.unlink() + + def update_content( + self, ref: str, content: bytes, filename: str = None + ) -> RemoteFileInfo: + """Update a document with the given content + + Creates a temporary file from the content then streams it. + """ + file_path = make_tmp_file(self.upload_tmp_dir, content) + try: + if filename is None: + filename = self.get_fs_info(ref).name + fs_item = self.upload( + file_path, + command="NuxeoDrive.UpdateFile", + filename=filename, + id=ref, + ) + return RemoteFileInfo.from_dict(fs_item) + finally: + file_path.unlink() + + def _filtered_results( + self, entries: List[Dict], parent_uid: str = None, fetch_parent_uid: bool = True + ) -> List[NuxeoDocumentInfo]: + # Filter out filenames that would be ignored by the file system client + # so as to be consistent. + filtered = [] + for entry in entries: + entry.update( + {"root": self.base_folder_ref, "repository": self.client.repository} + ) + if parent_uid is None and fetch_parent_uid: + parent_uid = self.fetch(os.path.dirname(entry["path"]))["uid"] + + info = NuxeoDocumentInfo.from_dict(entry, parent_uid=parent_uid) + name = info.name.lower() + if name.endswith(Options.ignored_suffixes) or name.startswith( + Options.ignored_prefixes + ): + continue + + filtered.append(info) + + return filtered + + +class RemoteTest(RemoteBase): + _download_remote_error = None + _upload_remote_error = None + _server_error = None + raise_on = None + + def download(self, *args, **kwargs): + self._raise(self._download_remote_error, *args, **kwargs) + return super().download(*args, **kwargs) + + def upload(self, *args, **kwargs): + self._raise(self._upload_remote_error, *args, **kwargs) + return super().upload(*args, **kwargs) + + def execute(self, *args, **kwargs): + self._raise(self._server_error, *args, **kwargs) + return super().execute(*args, **kwargs) + + def make_download_raise(self, error): + """Make next calls to do_get() raise the provided exception.""" + self._download_remote_error = error + + def make_upload_raise(self, error): + """Make next calls to upload() raise the provided exception.""" + self._upload_remote_error = error + + def make_server_call_raise(self, error): + """Make next calls to the server raise the provided exception.""" + self._server_error = error + + def _raise(self, exc, *args, **kwargs): + """Make the next calls raise `exc` if `raise_on()` allowed it.""" + + if exc: + if not callable(self.raise_on): + raise exc + if self.raise_on(*args, **kwargs): + raise exc + + def reset_errors(self): + """Remove custom errors.""" + + self._download_remote_error = None + self._upload_remote_error = None + self._server_error = None + self.raise_on = None + + def activate_profile(self, profile): + self.execute(command="NuxeoDrive.SetActiveFactories", profile=profile) + + def deactivate_profile(self, profile): + self.execute( + command="NuxeoDrive.SetActiveFactories", profile=profile, enable=False + ) + + def mass_import(self, target_path, nb_nodes): + """Used in test_volume.py only. + + *nb_nodes* is the minimum number of documents to create on the server. + A tradeoff has been made for performance over an exact number. + Randomness and threading inside, it is OK for us. + + To get the real documents number, use a specific NXQL query + (see test_remote_scan() from test_volume.py). + """ + tx_timeout = 3600 + url = "site/randomImporter/run" + params = { + "targetPath": target_path, + "batchSize": 50, + "nbThreads": 12, + "interactive": "true", + "fileSizeKB": 10, + "nbNodes": nb_nodes, + "nonUniform": "true", + "transactionTimeout": tx_timeout, + } + headers = {"Nuxeo-Transaction-Timeout": str(tx_timeout)} + + self.client.request( + "GET", url, params=params, headers=headers, timeout=tx_timeout + ) + + def wait_for_async_and_es_indexing(self): + """Used in test_volume.py only.""" + + tx_timeout = 3600 + headers = {"Nuxeo-Transaction-Timeout": str(tx_timeout)} + self.execute( + command="Elasticsearch.WaitForIndexing", + timeout=tx_timeout, + headers=headers, + timeoutSecond=tx_timeout, + refresh=True, + ) + + def result_set_query(self, query): + return self.execute(command="Repository.ResultSetQuery", query=query) + + def wait(self): + self.execute(command="NuxeoDrive.WaitForElasticsearchCompletion") + + +class DocRemote(RemoteTest): + def create( + self, + ref: str, + doc_type: str, + name: str = None, + properties: Dict[str, str] = None, + ): + """ + Create a document of type *doc_type*. + The operation will not use the FileManager. + """ + name = safe_filename(name) + return self.execute( + command="Document.Create", + input_obj=f"doc:{ref}", + type=doc_type, + name=name, + properties=properties, + ) + + def make_folder( + self, parent: str, name: str, doc_type: str = env.DOCTYPE_FOLDERISH + ) -> str: + """ + Create a folderish document of the given *doc_type* with the given *name*. + The operation will not use the FileManager. + """ + parent = self.check_ref(parent) + doc = self.create(parent, doc_type, name=name, properties={"dc:title": name}) + return doc["uid"] + + def make_file_with_blob( + self, parent: str, name: str, content: bytes, doc_type: str = env.DOCTYPE_FILE + ) -> str: + """ + Create a non-folderish document of the given *doc_type* with the given *name* + and attach a blob with *contents*. + The operation will not use the FileManager. + """ + doc_id = self.make_file_with_no_blob(parent, name, doc_type=doc_type) + self.attach_blob(doc_id, content, name) + return doc_id + + def make_file_with_no_blob( + self, parent: str, name: str, doc_type: str = env.DOCTYPE_FILE + ) -> str: + """ + Create a document of the given *doc_type* with the given *name*. + The operation will not use the FileManager. + """ + parent = self.check_ref(parent) + doc = self.create(parent, doc_type, name=name, properties={"dc:title": name}) + return doc["uid"] + + def make_file( + self, + parent: str, + name: str, + content: bytes = None, + file_path: Path = None, + ) -> str: + """ + Create a document with the given *name* and *content* using the FileManager. + If *file_path* points to a local file, it will be used instead of *content*. + + Note: if *content* is "seen" as plain text by the FileManager, the created document + will be a Note. It this is not what you want, use make_file_with_blob(). + """ + tmp_created = file_path is None + if not file_path: + file_path = make_tmp_file(self.upload_tmp_dir, content) + + try: + file_blob = FileBlob(str(file_path)) + file_blob.name = safe_filename(name) + blob = self.uploads.batch().upload(file_blob) + return self.file_manager_import(self.check_ref(parent), blob) + finally: + if tmp_created: + file_path.unlink() + + def file_manager_import(self, parent: str, blob: Blob) -> str: + """ + Use the FileManager to import and create a document in *parent* + based on the given already uploaded *blob*. + """ + op = self.operations.new("FileManager.Import") + op.context = {"currentDocument": parent} + op.input_obj = blob + return op.execute()["uid"] + + def make_file_in_user_workspace( + self, content: bytes, filename: str + ) -> RemoteFileInfo: + """Stream the given content as a document in the user workspace""" + file_path = make_tmp_file(self.upload_tmp_dir, content) + try: + return self.upload( + file_path, + command="UserWorkspace.CreateDocumentFromBlob", + filename=filename, + ) + finally: + file_path.unlink() + + def stream_file(self, parent: str, file_path: Path, **kwargs) -> NuxeoDocumentInfo: + """Create a document by streaming the file with the given path""" + ref = self.make_file(parent, file_path.name, file_path=file_path) + return self.get_info(ref) + + def attach_blob(self, ref: str, content: bytes, filename: str): + file_path = make_tmp_file(self.upload_tmp_dir, content) + try: + return self.upload( + file_path, command="Blob.Attach", filename=filename, document=ref + ) + finally: + file_path.unlink() + + def get_content(self, ref: str) -> bytes: + """ + Download and return the binary content of a document + Beware that the content is loaded in memory. + """ + if not isinstance(ref, NuxeoDocumentInfo): + ref = self.check_ref(ref) + return self.get_blob(ref) + + def update_content(self, ref: str, content: bytes, filename: str = None) -> None: + """Update a document with the given content.""" + if filename is None: + filename = self.get_info(ref).name + self.attach_blob(self.check_ref(ref), content, filename) + + def move(self, ref: str, target: str, name: str = None): + return self.documents.move( + self.check_ref(ref), self.check_ref(target), name=name + ) + + def create_proxy(self, ref: str, output_ref: str): + kwargs = {"Destination Path": output_ref} + return self.execute( + command="Document.CreateLiveProxy", + input_obj=self.check_ref(ref), + **kwargs, + ) + + def update(self, ref: str, properties=None): + return self.execute( + command="Document.Update", input_obj=f"doc:{ref}", properties=properties + ) + + def copy(self, ref: str, target: str, name: str = None): + return self.execute( + command="Document.Copy", + input_obj=f"doc:{self.check_ref(ref)}", + target=self.check_ref(target), + name=name, + ) + + def delete(self, ref: str, use_trash: bool = True): + meth = "trash" if use_trash else "delete" + return getattr(self.documents, meth)(self.check_ref(ref)) + + def delete_content(self, ref: str, xpath: str = None): + return self.delete_blob(self.check_ref(ref), xpath=xpath) + + def delete_blob(self, ref: str, xpath: str = None): + return self.execute(command="Blob.Remove", input_obj=f"doc:{ref}", xpath=xpath) + + def is_locked(self, ref: str) -> bool: + return bool(self.documents.fetch_lock_status(ref)) + + def get_versions(self, ref: str): + headers = {"fetch-document": "versionLabel"} + versions = self.execute( + command="Document.GetVersions", + input_obj=f"doc:{self.check_ref(ref)}", + headers=headers, + ) + return [(v["uid"], v["versionLabel"]) for v in versions["entries"]] + + def create_version(self, ref: str, increment: str = "None"): + doc = self.execute( + command="Document.CreateVersion", + input_obj=f"doc:{self.check_ref(ref)}", + increment=increment, + ) + return doc["uid"] + + def restore_version(self, version: str) -> str: + doc = self.execute( + command="Document.RestoreVersion", + input_obj=f"doc:{self.check_ref(version)}", + ) + return doc["uid"] + + def block_inheritance(self, ref: str, overwrite: bool = True): + input_obj = f"doc:{self.check_ref(ref)}" + + self.execute( + command="Document.SetACE", + input_obj=input_obj, + user=env.NXDRIVE_TEST_USERNAME, + permission="Everything", + overwrite=overwrite, + ) + + self.execute( + command="Document.SetACE", + input_obj=input_obj, + user="Everyone", + permission="Everything", + grant=False, + overwrite=False, + ) diff --git a/tests/functional/conftest.py b/tests/functional/conftest.py index d34bd6215b..01037722c4 100644 --- a/tests/functional/conftest.py +++ b/tests/functional/conftest.py @@ -1,19 +1,37 @@ +import os +import sys +import tempfile +from contextlib import suppress from logging import getLogger from pathlib import Path from random import randint -from typing import Callable, Optional +from threading import Thread +from time import sleep +from typing import Callable, Dict, List, Optional, Tuple +from unittest import TestCase from uuid import uuid4 import pytest from faker import Faker from nuxeo.client import Nuxeo -from nuxeo.documents import Document -from nuxeo.models import Blob, FileBlob -from nuxeo.users import User - +from nuxeo.documents import Document as Doc +from nuxeo.exceptions import BadQuery +from nuxeo.models import Blob, Document, FileBlob, User +from nuxeo.users import User as Usr +from sentry_sdk import configure_scope + +from nxdrive import __version__ +from nxdrive.constants import LINUX, MAC, WINDOWS +from nxdrive.engine.watcher.local_watcher import WIN_MOVE_RESOLUTION_PERIOD from nxdrive.manager import Manager +from nxdrive.options import Options +from nxdrive.qt.imports import QCoreApplication, QTimer, pyqtSignal, pyqtSlot +from nxdrive.translator import Translator +from nxdrive.utils import normalized_path from .. import env +from ..utils import clean_dir, salt +from . import DocRemote, LocalTest, RemoteBase, RemoteTest log = getLogger(__name__) @@ -39,7 +57,7 @@ def _make_manager( home: str = "", with_engine: bool = True, local_folder: Optional[Path] = None, - user: Optional[User] = None, + user: Optional[Usr] = None, ): manager = Manager(home or tmp()) @@ -97,7 +115,7 @@ def _make_user(username: str = "", password: str = env.NXDRIVE_TEST_PASSWORD): "username": username, } - user = server.users.create(User(properties=properties)) + user = server.users.create(Usr(properties=properties)) user.properties["password"] = password request.addfinalizer(user.delete) log.info(f"[FIXTURE] Created {user}") @@ -119,7 +137,7 @@ def _make( content: bytes = b"", ): title = title or str(uuid4()) - new = Document(name=title, type=nature, properties={"dc:title": title}) + new = Doc(name=title, type=nature, properties={"dc:title": title}) obj = server.documents.create(new, parent_path=parent) request.addfinalizer(obj.delete) log.info(f"[FIXTURE] Created {obj}") @@ -149,7 +167,7 @@ def _make( yield _make -def attach_blob(nuxeo: Nuxeo, doc: Document, file: Path) -> Blob: +def attach_blob(nuxeo: Nuxeo, doc: Doc, file: Path) -> Blob: # Upload the file batch = nuxeo.uploads.batch() blob = FileBlob(str(file)) @@ -162,3 +180,828 @@ def attach_blob(nuxeo: Nuxeo, doc: Document, file: Path) -> Blob: input_obj=uploaded, headers={"X-NXVoidOperation": "true"}, ) + + +# --------------------------- Old Functional Test Starts --------------------------- +# Default remote watcher delay used for tests +TEST_DEFAULT_DELAY = 3 + +FS_ITEM_ID_PREFIX = "defaultFileSystemItemFactory#default#" +SYNC_ROOT_FAC_ID = "defaultSyncRootFolderItemFactory#default#" + +# 1s time resolution as we truncate remote last modification time to the +# seconds in RemoteFileInfo.from_dict() because of the datetime +# resolution of some databases (MySQL...) +REMOTE_MODIFICATION_TIME_RESOLUTION = 1.0 + +# 1s resolution on HFS+ on OSX +# ~0.01 sec for NTFS +# 0.001 sec for EXT4FS +OS_STAT_MTIME_RESOLUTION = 1.0 + +log = getLogger(__name__) + +DEFAULT_WAIT_SYNC_TIMEOUT = 10 +FILE_CONTENT = b"Lorem ipsum dolor sit amet ..." +FAKER = Faker("en_US") +LOCATION = normalized_path(__file__).parent.parent + + +Translator(LOCATION / "resources" / "i18n") + + +def nuxeo_url() -> str: + """Retrieve the Nuxeo URL.""" + return env.NXDRIVE_TEST_NUXEO_URL.split("#")[0] + + +def root_remote(base_folder: str = "/") -> DocRemote: + return DocRemote( + nuxeo_url(), + env.NXDRIVE_TEST_USERNAME, + "nxdrive-test-administrator-device", + __version__, + password=env.NXDRIVE_TEST_PASSWORD, + base_folder=base_folder, + timeout=60, + ) + + +class StubQApplication(QCoreApplication): + bindEngine = pyqtSignal(int, bool) + unbindEngine = pyqtSignal(int) + + def __init__(self, argv, test_case): + super().__init__(argv) + + # Little trick here! See Application.__init__() for details. + self.timer = QTimer() + self.timer.timeout.connect(lambda: None) + self.timer.start(100) + + self._test = test_case + self.bindEngine.connect(self.bind_engine) + self.unbindEngine.connect(self.unbind_engine) + + # Used by test_local_move_folders.py + self.local_scan_count = 0 + + @pyqtSlot() + def local_scan_finished(self) -> None: + """Count the number of local scan done.""" + self.local_scan_count += 1 + + @pyqtSlot() + def sync_completed(self): + uid = getattr(self.sender(), "uid", None) + log.info("Sync Completed slot for: %s", uid) + if uid: + self._test._wait_sync[uid] = False + else: + for uid in self._test._wait_sync.keys(): + self._test._wait_sync[uid] = False + + @pyqtSlot() + def remote_scan_completed(self): + uid = self.sender().engine.uid + log.info("Remote scan completed for engine %s", uid) + self._test._wait_remote_scan[uid] = False + self._test._wait_sync[uid] = False + + @pyqtSlot(int) + def remote_changes_found(self, change_count): + uid = self.sender().engine.uid + log.info("Remote changes slot for: %s", uid) + self._test._remote_changes_count[uid] = change_count + + @pyqtSlot() + def no_remote_changes_found(self): + uid = self.sender().engine.uid + log.debug("No remote changes slot for %s", uid) + self._test._no_remote_changes[uid] = True + + @pyqtSlot(int, bool) + def bind_engine(self, number, start_engine): + self._test.bind_engine(number, start_engine=start_engine) + + @pyqtSlot(int) + def unbind_engine(self, number): + self._test.unbind_engine(number, purge=False) + + +class TwoUsersTest(TestCase): + @pytest.fixture(autouse=True) + def inject_fixtures(self, caplog): + self._caplog = caplog + + def setup_method( + self, + test_method, + register_roots=True, + user_2=True, + server_profile=None, + sync_enabled=True, + ): + """Setup method that will be invoked for every test method of a class.""" + + log.info("TEST master setup start") + Options.feature_synchronization = sync_enabled + + # To be replaced with fixtures when migrating to 100% pytest + self.nuxeo_url = nuxeo_url() # fixture name: nuxeo_url + self.version = __version__ # fixture name: version + self.root_remote = root_remote() + self.fake = FAKER + self.location = LOCATION + + self.server_profile = server_profile + if server_profile: + self.root_remote.activate_profile(server_profile) + + self.users = [self._create_user(1)] + if user_2: + self.users.append(self._create_user(2)) + self._create_workspace(f"{self._testMethodName}-{sys.platform.lower()}") + + # Add proper rights for all users on the root workspace + users = [user.uid for user in self.users] + try: + self.ws.add_permission({"permission": "ReadWrite", "users": users}) + except BadQuery: + # *users* is a valid parameter starting with Nuxeo 10.3. + # Keep that compatibility code for test_volume.py to work on old customers server. + for user in self.users: + self.ws.add_permission( + {"permission": "ReadWrite", "username": user.uid} + ) + + Options.delay = TEST_DEFAULT_DELAY + self.connected = False + + self.app = StubQApplication([], self) + + self._wait_sync = {} + self._wait_remote_scan = {} + self._remote_changes_count = {} + self._no_remote_changes = {} + + self.tmpdir = ( + normalized_path(tempfile.gettempdir()) / str(uuid4()).split("-")[0] + ) + self.upload_tmp_dir = self.tmpdir / "uploads" + self.upload_tmp_dir.mkdir(parents=True) + + self._append_user_attrs(1, register_roots) + if user_2: + self._append_user_attrs(2, register_roots) + + def teardown_method(self, test_method): + """Clean-up method.""" + + log.info("TEST master teardown start") + + if self.server_profile: + self.root_remote.deactivate_profile(self.server_profile) + + for user in list(self.users): + self.root_remote.users.delete(user.uid) + self.users.remove(user) + + self.ws.delete() + + self.manager_1.close() + clean_dir(self.local_test_folder_1) + clean_dir(self.upload_tmp_dir) + + if hasattr(self, "manager_2"): + self.manager_2.close() + clean_dir(self.local_test_folder_2) + + clean_dir(self.tmpdir) + + log.info("TEST master teardown end") + + def run(self, result=None): + """ + I could not (yet?) migrate this method to pytest because I did not + find a way to make tests pass. + We need to start each test in a thread and call self.app.exec_() to + let signals transit in the QApplication. + """ + + log.info("TEST run start") + + def launch_test(): + # Cleanup potential old report + self._get_report_file().unlink(missing_ok=True) + + # Note: we cannot use super().run(result) here + super(TwoUsersTest, self).run(result) + + # Generate a report if there are exceptions (failures or unexpected errors) + if result._excinfo: + try: + self.generate_report(result._excinfo) + except Exception: + log.warning("Report generation failed", exc_info=True) + + with suppress(Exception): + self.app.quit() + + # Ensure to kill the app if it is taking too long. + # We need to do that because sometimes a thread get blocked and so the test suite. + # Here, we set the default timeout to 2 minutes but use higher values for long-running tests. + default_timeout = 60 * 2 + timeouts = { + "test_nxdrive_903": 60 * 4, # 4 minutes + "test_nxdrive_947": 60 * 20, # 20 minutes + "test_nxdrive_1033": 60 * 6, # 6 minutes + "test_volume": 60 * 60 * 4, # 4 hours + } + test_file = self.id().replace("tests.old_functional.", "").split(".")[0] + timeout = timeouts.get(test_file, default_timeout) + + def kill_test(): + log.error(f"Killing {self.id()} after {timeout} seconds") + self.app.exit(1) + + QTimer.singleShot(timeout * 1000, kill_test) + + # Start the app and let signals transit between threads! + sync_thread = Thread(target=launch_test) + + with configure_scope() as scope: + scope.set_tag("test", self._testMethodName) + scope.set_tag("branch", os.getenv("BRANCH_NAME")) + sync_thread.start() + assert self.app.exec_() == 0 + sync_thread.join(30) + + log.info("TEST run end") + + def _get_report_file(self) -> Path: + path = Path(os.getenv("REPORT_PATH", ".")) + file = f"{self._testMethodName}-{sys.platform.lower()}.zip" + return path / file + + def _create_user(self, number: int) -> User: + def _company_domain(company_: str) -> str: + company_domain = company_.lower() + company_domain = company_domain.replace(",", "_") + company_domain = company_domain.replace(" ", "_") + company_domain = company_domain.replace("-", "_") + company_domain = company_domain.replace("__", "_") + + company = self.fake.company() + company_domain = _company_domain(company) + first_name, last_name = self.fake.name().split(" ", 1) + username = salt(first_name.lower()) + properties = { + "lastName": last_name, + "firstName": first_name, + "email": f"{username}@{company_domain}.org", + "password": username, + "username": username, + } + + user = self.root_remote.users.create(User(properties=properties)) + log.warning(f"Created user {user}") + + setattr(self, f"user_{number}", username) + setattr(self, f"password_{number}", username) + + return user + + def _create_workspace(self, title: str) -> Document: + title = salt(title, prefix="") + new_ws = Document( + name=title, type=env.DOCTYPE_FOLDERISH, properties={"dc:title": title} + ) + self.ws = self.root_remote.documents.create(new_ws, parent_path=env.WS_DIR) + self.workspace = self.ws.uid + self.workspace_title = self.ws.title + log.warning(f"Created workspace {self.ws}") + return self.ws + + def _append_user_attrs(self, number: int, register_roots: bool) -> None: + """Create all stuff needed for one user. Ugly but useful.""" + + # Create all what we need + local_test_folder = self.tmpdir / str(number) + local_nxdrive_folder = local_test_folder / "drive" + local_nxdrive_folder.mkdir(parents=True) + nxdrive_conf_folder = local_test_folder / "conf" + nxdrive_conf_folder.mkdir() + manager = Manager(nxdrive_conf_folder) + user = getattr(self, f"user_{number}") + password = getattr(self, f"password_{number}") + engine = self.bind_engine( + number, + start_engine=False, + manager=manager, + user=user, + password=password, + folder=local_nxdrive_folder, + ) + queue_manager = engine.queue_manager + sync_root_folder = local_nxdrive_folder / self.workspace_title + local_root_client = self.get_local_client(engine.local.base_folder) + local = self.get_local_client(sync_root_folder) + remote_document_client = DocRemote( + self.nuxeo_url, + getattr(self, f"user_{number}"), + f"nxdrive-test-device-{number}", + self.version, + password=getattr(self, f"password_{number}"), + base_folder=self.workspace, + upload_tmp_dir=self.upload_tmp_dir, + dao=engine.dao, + ) + remote = RemoteBase( + self.nuxeo_url, + getattr(self, f"user_{number}"), + f"nxdrive-test-device-{number}", + self.version, + password=getattr(self, f"password_{number}"), + base_folder=self.workspace, + upload_tmp_dir=self.upload_tmp_dir, + dao=engine.dao, + ) + if register_roots: + remote.register_as_root(self.workspace) + + # Force deletion behavior to real deletion for all tests + manager.set_config("deletion_behavior", "delete_server") + manager.dao.store_bool("show_deletion_prompt", False) + + # And now persist in attributes + setattr(self, f"manager_{number}", manager) + setattr(self, f"local_test_folder_{number}", local_test_folder) + setattr(self, f"local_nxdrive_folder_{number}", local_nxdrive_folder) + setattr(self, f"nxdrive_conf_folder_{number}", nxdrive_conf_folder) + setattr(self, f"queue_manager_{number}", queue_manager) + setattr(self, f"sync_root_folder_{number}", sync_root_folder) + setattr(self, f"local_root_client_{number}", local_root_client) + setattr(self, f"local_{number}", local) + setattr(self, f"remote_document_client_{number}", remote_document_client) + setattr(self, f"remote_{number}", remote) + + def get_bad_remote(self): + """A Remote client that will raise some error.""" + return RemoteTest( + self.nuxeo_url, + self.user_1, + "nxdrive-test-administrator-device", + self.version, + password=self.password_1, + dao=self.engine_1.dao, + ) + + def get_local_client(self, path: Path): + """ + Return an OS specific LocalClient class by default to simulate user actions on: + - Explorer (Windows) + - File Manager (macOS) + On GNU/Linux, there is not specific behavior so the original LocalClient will be used. + """ + + if LINUX: + client = LocalTest + elif MAC: + from .local_client_darwin import MacLocalClient as client + elif WINDOWS: + from .local_client_windows import WindowsLocalClient as client + + return client(path) + + def bind_engine( + self, + number, + start_engine=True, + manager=None, + folder=None, + user=None, + password=None, + ): + number_str = str(number) + manager = manager or getattr(self, f"manager_{number_str}") + folder = folder or getattr(self, f"local_nxdrive_folder_{number_str}") + user = user or getattr(self, f"user_{number_str}") + password = password or getattr(self, f"password_{number_str}") + engine = manager.bind_server( + folder, self.nuxeo_url, user, password=password, start_engine=start_engine + ) + + self.app.aboutToQuit.connect(manager.stop) + engine.syncCompleted.connect(self.app.sync_completed) + if hasattr(engine, "_remote_watcher"): # Sync is disabled + engine._remote_watcher.remoteScanFinished.connect( + self.app.remote_scan_completed + ) + engine._remote_watcher.changesFound.connect(self.app.remote_changes_found) + engine._remote_watcher.noChangesFound.connect( + self.app.no_remote_changes_found + ) + + engine_uid = engine.uid + self._wait_sync[engine_uid] = True + self._wait_remote_scan[engine_uid] = True + self._remote_changes_count[engine_uid] = 0 + self._no_remote_changes[engine_uid] = False + + setattr(self, f"engine_{number}", engine) + + # If there are Remotes, update their DAO as the old one is no more revelant + for remote in (f"remote_document_client_{number}", f"remote_{number}"): + if hasattr(self, remote): + getattr(self, remote).dao = engine.dao + + return engine + + def unbind_engine(self, number: int, purge: bool = True) -> None: + number_str = str(number) + engine = getattr(self, f"engine_{number_str}") + manager = getattr(self, f"manager_{number_str}") + manager.unbind_engine(engine.uid, purge=purge) + delattr(self, f"engine_{number_str}") + + def send_bind_engine(self, number: int, start_engine: bool = True) -> None: + self.app.bindEngine.emit(number, start_engine) + + def send_unbind_engine(self, number: int) -> None: + self.app.unbindEngine.emit(number) + + def wait_bind_engine( + self, number: int, timeout: int = DEFAULT_WAIT_SYNC_TIMEOUT + ) -> None: + engine = f"engine_{number}" + for _ in range(timeout): + if hasattr(self, engine): + return + sleep(1) + + self.fail("Wait for bind engine expired") + + def wait_unbind_engine( + self, number: int, timeout: int = DEFAULT_WAIT_SYNC_TIMEOUT + ) -> None: + engine = f"engine_{number}" + for _ in range(timeout): + if not hasattr(self, engine): + return + sleep(1) + + self.fail("Wait for unbind engine expired") + + def wait(self, retry=3): + try: + self.root_remote.wait() + except Exception as e: + log.warning(f"Exception while waiting for server: {e!r}") + # Not the nicest + if retry > 0: + log.info("Retry to wait") + self.wait(retry - 1) + + def wait_sync( + self, + wait_for_async=False, + timeout=DEFAULT_WAIT_SYNC_TIMEOUT, + fail_if_timeout=True, + wait_for_engine_1=True, + wait_for_engine_2=False, + wait_win=False, + enforce_errors=True, + ): + log.info("Wait for sync") + + # First wait for server if needed + if wait_for_async: + self.wait() + + if wait_win and WINDOWS: + log.debug("Waiting for Windows delete resolution") + sleep(WIN_MOVE_RESOLUTION_PERIOD / 1000) + + engine_1 = self.engine_1.uid + engine_2 = self.engine_2.uid + + self._wait_sync = {engine_1: wait_for_engine_1, engine_2: wait_for_engine_2} + self._no_remote_changes = { + engine_1: not wait_for_engine_1, + engine_2: not wait_for_engine_2, + } + + if enforce_errors: + if not self.connected: + self.engine_1.syncPartialCompleted.connect( + self.engine_1.queue_manager.requeue_errors + ) + self.engine_2.syncPartialCompleted.connect( + self.engine_2.queue_manager.requeue_errors + ) + self.connected = True + elif self.connected: + self.engine_1.syncPartialCompleted.disconnect( + self.engine_1.queue_manager.requeue_errors + ) + self.engine_2.syncPartialCompleted.disconnect( + self.engine_2.queue_manager.requeue_errors + ) + self.connected = False + + for _ in range(timeout): + sleep(1) + + if sum(self._wait_sync.values()): + continue + + if not wait_for_async: + return + + log.info( + "Sync completed, " + f"wait_remote_scan={self._wait_remote_scan}, " + f"remote_changes_count={self._remote_changes_count}, " + f"no_remote_changes={self._no_remote_changes}" + ) + + wait_remote_scan = False + if wait_for_engine_1: + wait_remote_scan |= self._wait_remote_scan[engine_1] + if wait_for_engine_2: + wait_remote_scan |= self._wait_remote_scan[engine_2] + + is_remote_changes = True + is_change_summary_over = True + if wait_for_engine_1: + is_remote_changes &= self._remote_changes_count[engine_1] > 0 + is_change_summary_over &= self._no_remote_changes[engine_1] + if wait_for_engine_2: + is_remote_changes &= self._remote_changes_count[engine_2] > 0 + is_change_summary_over &= self._no_remote_changes[engine_2] + + if ( + not wait_remote_scan + and not is_remote_changes + and is_change_summary_over + ): + self._wait_remote_scan = { + engine_1: wait_for_engine_1, + engine_2: wait_for_engine_2, + } + self._remote_changes_count = {engine_1: 0, engine_2: 0} + self._no_remote_changes = {engine_1: False, engine_2: False} + log.info( + "Ended wait for sync, setting " + "wait_remote_scan values to True, " + "remote_changes_count values to 0 and " + "no_remote_changes values to False" + ) + return + + if not fail_if_timeout: + log.info("Wait for sync timeout") + return + + count1 = self.engine_1.dao.get_syncing_count() + count2 = self.engine_2.dao.get_syncing_count() + err = "Wait for sync timeout has expired" + if wait_for_engine_1 and count1: + err += f" for engine 1 (syncing_count={count1})" + if wait_for_engine_2 and count2: + err += f" for engine 2 (syncing_count={count2})" + log.warning(err) + + def make_server_tree(self, deep: bool = True) -> Dict[str, str]: + """ + Create some folders on the server. + Returns a dict of document UIDs. + """ + + make_folder = self.remote_document_client_1.make_folder + make_file = self.remote_document_client_1.make_file + + docs = {} + docs["Folder 1"] = folder_1 = make_folder(self.workspace, "Folder 1") + docs["Folder 2"] = folder_2 = make_folder(self.workspace, "Folder 2") + docs["File 5.txt"] = make_file(self.workspace, "File 5.txt", content=b"eee") + + if deep: + docs["Folder 1.1"] = folder_1_1 = make_folder(folder_1, "Folder 1.1") + docs["Folder 1.2"] = folder_1_2 = make_folder(folder_1, "Folder 1.2") + docs["Dupe 1.txt"] = make_file( + folder_2, "Duplicated File.txt", content=b"Some content." + ) + docs["Dupe 2.txt"] = make_file( + folder_2, "Duplicated File.txt", content=b"Other content." + ) + docs["File 1.txt"] = make_file(folder_1, "File 1.txt", content=b"aaa") + docs["File 2.txt"] = make_file(folder_1_1, "File 2.txt", content=b"bbb") + docs["File 3.txt"] = make_file(folder_1_2, "File 3.txt", content=b"ccc") + docs["File 4.txt"] = make_file(folder_2, "File 4.txt", content=b"ddd") + + return docs + + def get_local_child_count(self, path: Path) -> Tuple[int, int]: + """ + Create some folders on the server. + Returns a tuple (files_count, folders_count). + """ + dir_count = file_count = 0 + for _, dirnames, filenames in os.walk(path): + dir_count += len(dirnames) + file_count += len(filenames) + return file_count, dir_count + + def generate_report(self, exceptions: List[Exception]) -> None: + """Generate a report on failure.""" + # Track any exception that could happen, specially those we would not + # see if the test succeed. + + from _pytest.outcomes import Skipped, XFailed + + unexpected_error = False + + for n, exc in enumerate(exceptions, 1): + if isinstance(exc.value, (Skipped, XFailed)): + # Uninteresting exceptions + continue + + if ( + isinstance(exc.value, AssertionError) + or "mock" not in str(exc.exconly()).lower() + ): + # - If the exception is an AssertionError, then the test failed. + # - If there is another exception, ensure it is not a crafted one. + unexpected_error = True + + # Log the error to help understanding what happened + error = exc.getrepr( + showlocals=True, style="long", funcargs=True, truncate_locals=False + ) + log.warning(f"Error n°{n} ({type(exc.value)})\n{error}") + + if not unexpected_error: + return + + self.manager_1.generate_report(path=self._get_report_file()) + + def _set_read_permission(self, user, doc_path, grant): + input_obj = "doc:" + doc_path + remote = self.root_remote + if grant: + remote.execute( + command="Document.SetACE", + input_obj=input_obj, + user=user, + permission="Read", + grant=True, + ) + else: + remote.block_inheritance(doc_path) + + def get_dao_state_from_engine_1(self, path: str): + """ + Returns the pair from dao of engine 1 according to the path. + + :param path: The path to document (from workspace, + ex: /Folder is converted to /{{workspace_title}}/Folder). + :return: The pair from dao of engine 1 according to the path. + """ + abs_path = Path(f"/{self.workspace_title}") / path + return self.engine_1.dao.get_state_from_local(abs_path) + + def set_readonly(self, user: str, doc_path: str, grant: bool = True): + """ + Mark a document as RO or RW. + + :param user: Affected username. + :param doc_path: The document, either a folder or a file. + :param grant: Set RO if True else RW. + """ + remote = self.root_remote + input_obj = "doc:" + doc_path + if grant: + remote.execute( + command="Document.SetACE", + input_obj=input_obj, + user=user, + permission="Read", + ) + remote.block_inheritance(doc_path, overwrite=False) + else: + remote.execute( + command="Document.SetACE", + input_obj=input_obj, + user=user, + permission="ReadWrite", + grant=True, + ) + + +class OneUserTest(TwoUsersTest): + """Tests requiring only one user.""" + + def setup_method(self, *args, **kwargs): + kwargs["user_2"] = False + super().setup_method(*args, **kwargs) + + def wait_sync( + self, + wait_for_async=False, + timeout=DEFAULT_WAIT_SYNC_TIMEOUT, + fail_if_timeout=True, + wait_for_engine_1=True, + wait_win=False, + enforce_errors=True, + ): + log.info("Wait for sync") + + # First wait for server if needed + if wait_for_async: + self.wait() + + if wait_win and WINDOWS: + log.debug("Waiting for Windows delete resolution") + sleep(WIN_MOVE_RESOLUTION_PERIOD / 1000) + + engine_1 = self.engine_1.uid + + self._wait_sync = {engine_1: wait_for_engine_1} + self._no_remote_changes = {engine_1: not wait_for_engine_1} + + if enforce_errors: + if not self.connected: + self.engine_1.syncPartialCompleted.connect( + self.engine_1.queue_manager.requeue_errors + ) + self.connected = True + elif self.connected: + self.engine_1.syncPartialCompleted.disconnect( + self.engine_1.queue_manager.requeue_errors + ) + self.connected = False + + for _ in range(timeout): + sleep(1) + + if sum(self._wait_sync.values()): + continue + + if not wait_for_async: + return + + log.info( + "Sync completed, " + f"wait_remote_scan={self._wait_remote_scan}, " + f"remote_changes_count={self._remote_changes_count}, " + f"no_remote_changes={self._no_remote_changes}" + ) + + wait_remote_scan = False + if wait_for_engine_1: + wait_remote_scan |= self._wait_remote_scan[engine_1] + + is_remote_changes = True + is_change_summary_over = True + if wait_for_engine_1: + is_remote_changes &= self._remote_changes_count[engine_1] > 0 + is_change_summary_over &= self._no_remote_changes[engine_1] + + if ( + not wait_remote_scan + and not is_remote_changes + and is_change_summary_over + ): + self._wait_remote_scan = {engine_1: wait_for_engine_1} + self._remote_changes_count = {engine_1: 0} + self._no_remote_changes = {engine_1: False} + log.info( + "Ended wait for sync, setting " + "wait_remote_scan values to True, " + "remote_changes_count values to 0 and " + "no_remote_changes values to False" + ) + return + + if not fail_if_timeout: + log.info("Wait for sync timeout") + return + + count = self.engine_1.dao.get_syncing_count() + err = "Wait for sync timeout has expired" + if wait_for_engine_1 and count: + err += f" for engine 1 (syncing_count={count})" + log.warning(err) + + +class OneUserNoSync(OneUserTest): + """Tests requiring only one user with synchronization features disabled.""" + + def setup_method(self, *args, **kwargs): + kwargs["sync_enabled"] = False + super().setup_method(*args, **kwargs) + + +# --------------------------- Old Functional Test Ends --------------------------- diff --git a/tests/functional/local_client_darwin.py b/tests/functional/local_client_darwin.py new file mode 100644 index 0000000000..5e4e5f3db0 --- /dev/null +++ b/tests/functional/local_client_darwin.py @@ -0,0 +1,67 @@ +""" +Intent of this file is to use OSX File Manager to make FS operations to simulate +user actions. +""" + +import os +import time +from pathlib import Path + +import Cocoa + +from . import LocalTest + + +class MacLocalClient(LocalTest): + def __init__(self, base_folder, **kwargs): + super().__init__(base_folder, **kwargs) + self.fm = Cocoa.NSFileManager.defaultManager() + + def copy(self, srcref: str, dstref: str) -> None: + """Make a copy of the file (with xattr included).""" + src = self.abspath(srcref) + dst = self.abspath(dstref) + if not dst.exists() and not dst.parent.exists(): + raise ValueError( + f"parent destination directory {dst.parent} does not exist" + ) + if src.is_dir() and dst.exists() and dst.is_file(): + raise ValueError(f"cannot copy directory {src} to a file {dst}") + if dst.exists() and dst.is_dir(): + dst = dst / src.name + + error = None + result = self.fm.copyItemAtPath_toPath_error_(str(src), str(dst), error) + self._process_result(result) + + def move(self, srcref: str, parentref: str, name: str = None) -> None: + src = self.abspath(srcref) + parent = self.abspath(parentref) + + dst = parent / (name or src.name) + + error = None + result = self.fm.moveItemAtPath_toPath_error_(str(src), str(dst), error) + time.sleep(0.3) + self._process_result(result) + + def rename(self, srcref: str, to_name: str): + parent = os.path.dirname(srcref) + dstref = os.path.join(parent) + self.move(srcref, dstref, name=to_name) + return Path(parent) / to_name + + def delete(self, ref): + path = self.abspath(ref) + error = None + result = self.fm.removeItemAtPath_error_(str(path), error) + self._process_result(result) + + @staticmethod + def _process_result(result): + ok, err = result + if not ok: + error = ( + f"{err.localizedDescription()} (cause: {err.localizedFailureReason()})" + ) + raise OSError(error) diff --git a/tests/functional/local_client_windows.py b/tests/functional/local_client_windows.py new file mode 100644 index 0000000000..664d870a2a --- /dev/null +++ b/tests/functional/local_client_windows.py @@ -0,0 +1,97 @@ +""" +Intent of this file is to use Explorer operations to make FS to simulate user +actions. + +https://msdn.microsoft.com/en-us/library/windows/desktop/bb775771(v=vs.85).aspx +Using SHFileOperation as the MSDN advise to use it for multithread + +IFileOperation can only be applied in a single-threaded apartment (STA) +situation. It cannot be used for a multithreaded apartment (MTA) situation. +For MTA, you still must use SHFileOperation. + +Note that any string passed to SHFileOperation needs to be double-null terminated. +This is automatically handled by pywin32: +https://github.com/mhammond/pywin32/blob/059b7be/com/win32comext/shell/src/shell.cpp#L940 +""" + +import errno +import logging +import os +import time +from pathlib import Path +from typing import Union + +from win32com.shell import shell, shellcon + +from . import LocalTest + +RawPath = Union[Path, str] +log = logging.getLogger(__name__) + + +class WindowsLocalClient(LocalTest): + def abspath(self, ref: RawPath) -> Path: + # Remove \\?\ + abs_path = super().abspath(ref).resolve() + if len(str(abs_path)) >= 255: + log.warning( + "The path is longer than 255 characters and the " + "WindowsLocalClient is about the remove the long path " + "prefix. So the test is likely to fail." + ) + return abs_path + + def do_op( + self, op: int, path_from: Path, path_to: Union[Path, None], flags: int + ) -> None: + """Actually do the requested SHFileOperation operation. + Errors are automatically handled. + """ + # *path_to* can be set to None for deletion of *path_from* + if path_to: + path_to = str(path_to) + + rc, aborted = shell.SHFileOperation((0, op, str(path_from), path_to, flags)) + + if aborted: + rc = errno.ECONNABORTED + if rc != 0: + raise OSError(rc, os.strerror(rc), path_from) + + def copy(self, srcref: RawPath, dstref: RawPath) -> None: + """Make a copy of the file (with xattr included).""" + self.do_op( + shellcon.FO_COPY, + self.abspath(srcref), + self.abspath(dstref), + shellcon.FOF_NOCONFIRMATION, + ) + + def delete(self, ref: RawPath) -> None: + # FOF_ALLOWUNDO send to trash + self.do_op( + shellcon.FO_DELETE, + self.abspath(ref), + None, + shellcon.FOF_NOCONFIRMATION | shellcon.FOF_ALLOWUNDO, + ) + + def delete_final(self, ref: RawPath) -> None: + self.do_op( + shellcon.FO_DELETE, + self.abspath(ref), + None, + flags=shellcon.FOF_NOCONFIRMATION, + ) + + def move(self, ref: RawPath, new_parent_ref: RawPath, name: str = None) -> None: + path = self.abspath(ref) + new_path = self.abspath(new_parent_ref) / (name or path.name) + self.do_op(shellcon.FO_MOVE, path, new_path, shellcon.FOF_NOCONFIRMATION) + + def rename(self, srcref: RawPath, to_name: str) -> Path: + path = self.abspath(srcref) + new_path = path.with_name(to_name) + self.do_op(shellcon.FO_RENAME, path, new_path, shellcon.FOF_NOCONFIRMATION) + time.sleep(0.5) + return new_path From 173cd73e8aecadd555d9a95d33e04fb12159a233 Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Fri, 5 Jan 2024 11:50:51 +0530 Subject: [PATCH 20/36] NXDRIVE-2860: Code Coverage - added test cases to test_remote_client 05/01 added old remote_client and fixed --- tests/functional/__init__.py | 4 ++-- tests/functional/test_api.py | 31 ------------------------------- 2 files changed, 2 insertions(+), 33 deletions(-) diff --git a/tests/functional/__init__.py b/tests/functional/__init__.py index b77401d4e4..9beee06eb5 100644 --- a/tests/functional/__init__.py +++ b/tests/functional/__init__.py @@ -24,9 +24,9 @@ def patch_nxdrive_objects(): # Need to do this one first because importing Manager will already import # nxdrive.dao.utils and so changing the behavior of save_backup() # will not work. - import nxdrive.dao.utils - nxdrive.dao.utils.save_backup = lambda *args: True + # import nxdrive.dao.utils + # nxdrive.dao.utils.save_backup = lambda *args: True from nxdrive.poll_workers import ServerOptionsUpdater diff --git a/tests/functional/test_api.py b/tests/functional/test_api.py index 29e6ea4e98..5526c0b866 100644 --- a/tests/functional/test_api.py +++ b/tests/functional/test_api.py @@ -1010,37 +1010,6 @@ def mocked_unbind_engine(*args, **kwargs): assert not returned_val -def test_default_server_url_value(manager_factory): - manager, engine = manager_factory() - manager.application = "" - - def mocked_open_authentication_dialog(): - return - - def mocked_hide_systray(*args): - return - - def mocked_show_filters(*args): - return - - Mocked_App = namedtuple( - "app", - "manager, open_authentication_dialog, hide_systray, show_filters", - defaults=( - manager, - mocked_open_authentication_dialog, - mocked_hide_systray, - mocked_show_filters, - ), - ) - app = Mocked_App() - drive_api = QMLDriveApi(app) - - with manager: - returned_val = drive_api.default_server_url_value() - assert returned_val - - def test_get_update_url(manager_factory): manager, engine = manager_factory() manager.application = "" From 7c0e728a6dbc70ebb4b46aea4d74c6b66a47e9cf Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Tue, 9 Jan 2024 11:55:48 +0530 Subject: [PATCH 21/36] NXDRIVE-2860: Code Coverage - added test cases to test_remote_client 09/01 --- tests/functional/test_shared_folders.py | 187 ++++++++ tests/functional/test_special_files.py | 36 ++ tests/functional/test_sync_roots.py | 28 ++ tests/functional/test_transfer.py | 555 ++++++++++++++++++++++++ tests/functional/test_volume.py | 320 ++++++++++++++ tests/functional/test_watchers.py | 245 +++++++++++ 6 files changed, 1371 insertions(+) create mode 100644 tests/functional/test_shared_folders.py create mode 100644 tests/functional/test_special_files.py create mode 100644 tests/functional/test_sync_roots.py create mode 100644 tests/functional/test_transfer.py create mode 100644 tests/functional/test_volume.py create mode 100644 tests/functional/test_watchers.py diff --git a/tests/functional/test_shared_folders.py b/tests/functional/test_shared_folders.py new file mode 100644 index 0000000000..26c5555316 --- /dev/null +++ b/tests/functional/test_shared_folders.py @@ -0,0 +1,187 @@ +from pathlib import Path + +from ..utils import random_png +from . import LocalTest +from .conftest import TwoUsersTest + + +class TestSharedFolders(TwoUsersTest): + def test_move_sync_root_child_to_user_workspace(self): + """See https://jira.nuxeo.com/browse/NXP-14870""" + uid = None + try: + # Get remote and local clients + remote_1 = self.remote_document_client_1 + remote_2 = self.remote_document_client_2 + + local_user2 = LocalTest(self.local_nxdrive_folder_2) + + # Make sure personal workspace is created for user1 + # and fetch its uid + uid = remote_1.make_file_in_user_workspace( + b"File in user workspace", "UWFile.txt" + )["parentRef"] + + # As user1 register personal workspace as a sync root + remote_1.register_as_root(uid) + + # As user1 create a parent folder in user1's personal workspace + parent_uid = remote_1.make_folder(uid, "Parent") + + # As user1 grant Everything permission to user2 on parent folder + input_obj = "doc:" + parent_uid + self.root_remote.execute( + command="Document.SetACE", + input_obj=input_obj, + user=self.user_2, + permission="Everything", + grant=True, + ) + + # As user1 create a child folder in parent folder + child_folder_uid = remote_1.make_folder(parent_uid, "Child") + + # As user2 register parent folder as a sync root + remote_2.register_as_root(parent_uid) + remote_2.unregister_as_root(self.workspace) + # Start engine for user2 + self.engine_2.start() + + # Wait for synchronization + self.wait_sync( + wait_for_async=True, wait_for_engine_1=False, wait_for_engine_2=True + ) + + # Check locally synchronized content + assert len(local_user2.get_children_info("/")) == 1 + assert local_user2.exists("/Parent") + assert local_user2.exists("/Parent/Child") + + # As user1 move child folder to user1's personal workspace + remote_1.move(child_folder_uid, uid) + + # Wait for synchronization + self.wait_sync( + wait_for_async=True, wait_for_engine_1=False, wait_for_engine_2=True + ) + + # Check locally synchronized content + assert not local_user2.exists("/Parent/Child") + + finally: + # Cleanup user1 personal workspace + if uid is not None and self.root_remote.exists(uid): + self.root_remote.delete(uid, use_trash=False) + + def test_local_changes_while_stopped(self): + self._test_local_changes_while_not_running(False) + + def test_local_changes_while_unbinded(self): + self._test_local_changes_while_not_running(True) + + def _test_local_changes_while_not_running(self, unbind): + """NXDRIVE-646: not uploading renamed file from shared folder.""" + local_1 = self.local_root_client_1 + remote_1 = self.remote_document_client_1 + remote_2 = self.remote_document_client_2 + + # Unregister test workspace for user_1 + remote_1.unregister_as_root(self.workspace) + + # Remove ReadWrite permission for user_1 on the test workspace + test_workspace = f"doc:{self.ws.path}" + self.root_remote.execute( + command="Document.SetACE", + input_obj=test_workspace, + user=self.user_2, + permission="ReadWrite", + grant=True, + ) + + # Create initial folders and files as user_2 + folder = remote_2.make_folder("/", "Folder01") + subfolder_1 = remote_2.make_folder(folder, "SubFolder01") + remote_2.make_file(subfolder_1, "Image01.png", random_png()) + file_id = remote_2.make_file(folder, "File01.txt", content=b"plaintext") + + # Grant Read permission for user_1 on the test folder and register + self.root_remote.execute( + command="Document.SetACE", + input_obj=f"doc:{folder}", + user=self.user_1, + permission="Read", + ) + remote_1.register_as_root(folder) + + # Start engine and wait for sync + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # First checks + file_pair_state = self.engine_1.dao.get_state_from_local( + Path("/Folder01") / "File01.txt" + ) + assert file_pair_state is not None + file_remote_ref = file_pair_state.remote_ref + assert remote_2.exists("/Folder01") + assert remote_2.exists("/Folder01/File01.txt") + assert remote_2.exists("/Folder01/SubFolder01") + assert remote_2.exists("/Folder01/SubFolder01/Image01.png") + assert local_1.exists("/Folder01") + assert local_1.exists("/Folder01/File01.txt") + assert local_1.exists("/Folder01/SubFolder01") + assert local_1.exists("/Folder01/SubFolder01/Image01.png") + + # Unbind or stop engine + if unbind: + self.send_unbind_engine(1) + self.wait_unbind_engine(1) + else: + self.engine_1.stop() + + # Restore write permission to user_1 (=> ReadWrite) + self.root_remote.execute( + command="Document.SetACE", + input_obj=f"doc:{folder}", + user=self.user_1, + permission="ReadWrite", + ) + self.wait() + + # Make changes + LocalTest.rename(local_1, "/Folder01/File01.txt", "File01_renamed.txt") + LocalTest.delete(local_1, "/Folder01/SubFolder01/Image01.png") + + # Bind or start engine and wait for sync + if unbind: + self.send_bind_engine(1) + self.wait_bind_engine(1) + else: + self.engine_1.start() + self.wait_sync() + + # Check client side + assert local_1.exists("/Folder01") + # File has been renamed and image deleted + assert not local_1.exists("/Folder01/File01.txt") + assert local_1.exists("/Folder01/File01_renamed.txt") + # The deleted image has been recreated if the unbinding happened + assert local_1.exists("/Folder01/SubFolder01/Image01.png") is unbind + + # Check server side + children = remote_2.get_children_info(folder) + assert len(children) == 2 + file_info = remote_2.get_info(file_id) + if unbind: + # File has not been renamed and image has not been deleted + assert file_info.name == "File01.txt" + assert remote_2.exists("/Folder01/SubFolder01/Image01.png") + # File is in conflict + file_pair_state = self.engine_1.dao.get_normal_state_from_remote( + file_remote_ref + ) + assert file_pair_state.pair_state == "conflicted" + else: + # File has been renamed and image deleted + assert file_info.name == "File01_renamed.txt" + assert not remote_2.exists("/Folder01/SubFolder01/Image01.png") diff --git a/tests/functional/test_special_files.py b/tests/functional/test_special_files.py new file mode 100644 index 0000000000..132c271aee --- /dev/null +++ b/tests/functional/test_special_files.py @@ -0,0 +1,36 @@ +from shutil import copyfile + +from .. import ensure_no_exception +from .conftest import OneUserTest + + +class TestSpecialFiles(OneUserTest): + def test_keynote(self): + """Syncing a (macOS) Keynote file should work (NXDRIVE-619). + Both sync directions are tests. + """ + local = self.local_1 + remote = self.remote_document_client_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # The testing file + src = self.location / "resources" / "files" / "keynote.key" + + # Create a local file + file = local.abspath("/") / "keynote1.key" + copyfile(src, file) + + # Create a distant file + remote.make_file("/", "keynote2.key", content=src.read_bytes()) + + # Sync + with ensure_no_exception(): + self.wait_sync(wait_for_async=True) + + # Checks + assert not self.engine_1.dao.get_errors(limit=0) + for idx in range(1, 3): + assert local.exists(f"/keynote{idx}.key") + assert remote.exists(f"/keynote{idx}.key") diff --git a/tests/functional/test_sync_roots.py b/tests/functional/test_sync_roots.py new file mode 100644 index 0000000000..ef97226f78 --- /dev/null +++ b/tests/functional/test_sync_roots.py @@ -0,0 +1,28 @@ +import os + +from .conftest import OneUserTest + + +class TestSyncRoots(OneUserTest): + def test_register_sync_root_parent(self): + remote = self.remote_document_client_1 + local = self.local_root_client_1 + + # First unregister test Workspace + remote.unregister_as_root(self.workspace) + + # Create a child folder and register it as a synchronization root + child = remote.make_folder(self.workspace, "child") + remote.make_file(child, "aFile.txt", content=b"My content") + remote.register_as_root(child) + + # Start engine and wait for synchronization + self.engine_1.start() + self.wait_sync(wait_for_async=True) + assert not local.exists(f"/{self.workspace_title}") + folder_name = str(os.listdir(local.base_folder)[0]) + file_path = os.path.join(folder_name, "aFile.txt") + assert folder_name.startswith( + "test_register_sync_root_parent" + ) and folder_name.endswith("child") + assert local.exists(file_path) diff --git a/tests/functional/test_transfer.py b/tests/functional/test_transfer.py new file mode 100644 index 0000000000..9ec82a9c26 --- /dev/null +++ b/tests/functional/test_transfer.py @@ -0,0 +1,555 @@ +""" +Test pause/resume transfers in different scenarii. +""" +from unittest.mock import patch + +import pytest +from requests.exceptions import ConnectionError + +from nxdrive.client.uploader.sync import SyncUploader +from nxdrive.constants import FILE_BUFFER_SIZE, TransferStatus +from nxdrive.options import Options +from nxdrive.state import State + +from .. import ensure_no_exception +from ..markers import not_windows +from .conftest import SYNC_ROOT_FAC_ID, OneUserTest + + +class TestDownload(OneUserTest): + def setUp(self): + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Lower tmp_file_limit options to have chunked downloads without having to create big files + self.default_tmp_file_limit = Options.tmp_file_limit + Options.tmp_file_limit = 1 + + def tearDown(self): + Options.tmp_file_limit = self.default_tmp_file_limit + + def test_pause_download_manually(self): + """ + Pause the transfer by simulating a click on the pause/resume icon + on the current download in the systray menu. + """ + + def callback(downloader): + """ + This will mimic what is done in SystrayTranfer.qml: + - call API.pause_transfer() that will call: + - engine.dao.pause_transfer(nature, transfer_uid) + Then the download will be paused by the Engine: + - Engine.suspend_client() (== Remote.download_callback) will: + - raise DownloadPaused(download.uid) + """ + # Ensure we have 1 ongoing download + downloads = list(dao.get_downloads()) + assert downloads + download = downloads[0] + assert download.status == TransferStatus.ONGOING + + nonlocal count + + # Check the TMP file is bigger each iteration + file_out = engine.download_dir / uid / "test.bin" + assert file_out.stat().st_size == count * FILE_BUFFER_SIZE + + count += 1 + if count == 2: + # Pause the download + dao.pause_transfer("download", download.uid, 25.0) + + # Call the original function to make the paused download + # effective at the 2nd iteration + for cb in callback_orig: + cb(downloader) + + engine = self.engine_1 + dao = self.engine_1.dao + callback_orig = engine.remote.download_callback + count = 0 + + # Remotely create a file that will be downloaded locally + uid = self.remote_1.make_file( + f"{SYNC_ROOT_FAC_ID}{self.workspace}", + "test.bin", + content=b"0" * FILE_BUFFER_SIZE * 4, + ).uid.split("#")[-1] + + # There is no download, right now + assert not list(dao.get_downloads()) + + with patch.object(engine.remote, "download_callback", new=callback): + with ensure_no_exception(): + self.wait_sync(wait_for_async=True) + assert dao.get_downloads_with_status(TransferStatus.PAUSED) + + # Resume the download + engine.resume_transfer("download", list(dao.get_downloads())[0].uid) + self.wait_sync(wait_for_async=True) + assert not list(dao.get_downloads()) + + def test_pause_download_automatically(self): + """ + Pause the transfer by simulating an application exit + or clicking on the Suspend menu entry from the systray. + """ + + def callback(downloader): + """This will mimic what is done in SystrayMenu.qml: suspend the app.""" + # Ensure we have 1 ongoing download + downloads = list(dao.get_downloads()) + assert downloads + download = downloads[0] + assert download.status == TransferStatus.ONGOING + + # Suspend! + self.manager_1.suspend() + + # Call the original function to make the suspended download effective + for cb in callback_orig: + cb(downloader) + + engine = self.engine_1 + dao = self.engine_1.dao + callback_orig = engine.remote.download_callback + + # Remotely create a file that will be downloaded locally + self.remote_1.make_file( + f"{SYNC_ROOT_FAC_ID}{self.workspace}", + "test.bin", + content=b"0" * FILE_BUFFER_SIZE * 2, + ) + + # There is no download, right now + assert not list(dao.get_downloads()) + + with patch.object(engine.remote, "download_callback", new=callback): + with ensure_no_exception(): + self.wait_sync(wait_for_async=True) + assert dao.get_downloads_with_status(TransferStatus.SUSPENDED) + + # Resume the download + self.manager_1.resume() + self.wait_sync(wait_for_async=True) + assert not list(dao.get_downloads()) + + def test_modifying_paused_download(self): + """Modifying a paused download should discard the current download.""" + + def callback(downloader): + """Pause the download and apply changes to the document.""" + nonlocal count + count += 1 + + if count == 1: + # Ensure we have 1 ongoing download + downloads = list(dao.get_downloads()) + assert downloads + download = downloads[0] + assert download.status == TransferStatus.ONGOING + + # Pause the download + dao.pause_transfer("download", download.uid, 0.0) + + # Apply changes to the document + remote.update_content(file.uid, b"remotely changed") + + # Call the original function to make the paused download effective + for cb in callback_orig: + cb(downloader) + + count = 0 + remote = self.remote_1 + engine = self.engine_1 + dao = self.engine_1.dao + callback_orig = engine.remote.download_callback + + # Remotely create a file that will be downloaded locally + file = remote.make_file( + f"{SYNC_ROOT_FAC_ID}{self.workspace}", + "test.bin", + content=b"0" * FILE_BUFFER_SIZE * 2, + ) + + # There is no download, right now + assert not list(dao.get_downloads()) + + with patch.object(engine.remote, "download_callback", new=callback): + with ensure_no_exception(): + self.wait_sync(wait_for_async=True) + + # Resync and check the local content is correct + self.wait_sync(wait_for_async=True) + assert not list(dao.get_downloads()) + assert self.local_1.get_content("/test.bin") == b"remotely changed" + + def test_deleting_paused_download(self): + """Deleting a paused download should discard the current download.""" + + def callback(downloader): + """Pause the download and delete the document.""" + # Ensure we have 1 ongoing download + downloads = list(dao.get_downloads()) + assert downloads + download = downloads[0] + assert download.status == TransferStatus.ONGOING + + # Pause the download + dao.pause_transfer("download", download.uid, 0.0) + + # Remove the document + remote.delete(file.uid) + + # Call the original function to make the paused download effective + for cb in callback_orig: + cb(downloader) + + remote = self.remote_1 + engine = self.engine_1 + dao = self.engine_1.dao + callback_orig = engine.remote.download_callback + + # Remotely create a file that will be downloaded locally + file = remote.make_file( + f"{SYNC_ROOT_FAC_ID}{self.workspace}", + "test.bin", + content=b"0" * FILE_BUFFER_SIZE * 2, + ) + + # There is no download, right now + assert not list(dao.get_downloads()) + + with patch.object(engine.remote, "download_callback", new=callback): + with ensure_no_exception(): + self.wait_sync(wait_for_async=True) + + # Resync and check the file does not exist + self.wait_sync(wait_for_async=True) + assert not list(dao.get_downloads()) + assert not self.local_1.exists("/test.bin") + + +class TestUpload(OneUserTest): + def setUp(self): + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Lower chunk_* options to have chunked uploads without having to create big files + self.default_chunk_limit = Options.chunk_limit + self.default_chunk_size = Options.chunk_size + Options.chunk_limit = 1 + Options.chunk_size = 1 + + def tearDown(self): + Options.chunk_limit = self.default_chunk_limit + Options.chunk_size = self.default_chunk_size + + def test_pause_upload_manually(self): + """ + Pause the transfer by simulating a click on the pause/resume icon + on the current upload in the systray menu. + """ + + def callback(uploader): + """ + This will mimic what is done in SystrayTranfer.qml: + - call API.pause_transfer() that will call: + - engine.dao.pause_transfer(nature, transfer_uid) + Then the upload will be paused in Remote.upload(). + """ + # Ensure we have 1 ongoing upload + uploads = list(dao.get_uploads()) + assert uploads + upload = uploads[0] + assert upload.status == TransferStatus.ONGOING + + # Pause the upload + dao.pause_transfer("upload", upload.uid, 50.0) + + engine = self.engine_1 + dao = self.engine_1.dao + + # Locally create a file that will be uploaded remotely + self.local_1.make_file("/", "test.bin", content=b"0" * FILE_BUFFER_SIZE * 2) + + # There is no upload, right now + assert not list(dao.get_uploads()) + + with patch.object(engine.remote, "upload_callback", new=callback): + with ensure_no_exception(): + self.wait_sync() + assert dao.get_uploads_with_status(TransferStatus.PAUSED) + + # Resume the upload + engine.resume_transfer("upload", list(dao.get_uploads())[0].uid) + self.wait_sync() + assert not list(dao.get_uploads()) + + def test_pause_upload_automatically(self): + """ + Pause the transfer by simulating an application exit + or clicking on the Suspend menu entry from the systray. + """ + + def callback(uploader): + """This will mimic what is done in SystrayMenu.qml: suspend the app.""" + # Ensure we have 1 ongoing upload + uploads = list(dao.get_uploads()) + assert uploads + upload = uploads[0] + assert upload.status == TransferStatus.ONGOING + + # Suspend! + self.manager_1.suspend() + + engine = self.engine_1 + dao = self.engine_1.dao + + # Locally create a file that will be uploaded remotely + self.local_1.make_file("/", "test.bin", content=b"0" * FILE_BUFFER_SIZE * 2) + + # There is no upload, right now + assert not list(dao.get_uploads()) + + with patch.object(engine.remote, "upload_callback", new=callback): + with ensure_no_exception(): + self.wait_sync() + assert dao.get_uploads_with_status(TransferStatus.SUSPENDED) + + # Resume the upload + self.manager_1.resume() + self.wait_sync() + assert not list(dao.get_uploads()) + + def test_modifying_paused_upload(self): + """Modifying a paused upload should discard the current upload.""" + + def callback(uploader): + """Pause the upload and apply changes to the document.""" + # Ensure we have 1 ongoing upload + uploads = list(dao.get_uploads()) + assert uploads + upload = uploads[0] + assert upload.status == TransferStatus.ONGOING + + # Pause the upload + dao.pause_transfer("upload", upload.uid, 50.0) + + # Apply changes to the document + local.update_content("/test.bin", b"locally changed") + + local = self.local_1 + engine = self.engine_1 + dao = self.engine_1.dao + + # Locally create a file that will be uploaded remotely + self.local_1.make_file("/", "test.bin", content=b"0" * FILE_BUFFER_SIZE * 2) + + # There is no upload, right now + assert not list(dao.get_uploads()) + + with patch.object(engine.remote, "upload_callback", new=callback): + with ensure_no_exception(): + self.wait_sync() + + # Resync and check the local content is correct + self.wait_sync() + assert not list(dao.get_uploads()) + assert self.local_1.get_content("/test.bin") == b"locally changed" + + @not_windows( + reason="Cannot test the behavior as the local deletion is blocked by the OS." + ) + def test_deleting_paused_upload(self): + """Deleting a paused upload should discard the current upload.""" + + def callback(uploader): + """Pause the upload and delete the document.""" + # Ensure we have 1 ongoing upload + uploads = list(dao.get_uploads()) + assert uploads + upload = uploads[0] + assert upload.status == TransferStatus.ONGOING + + # Pause the upload + dao.pause_transfer("upload", upload.uid, 50.0) + + # Remove the document + # (this is the problematic part on Windows, because for the + # file descriptor to be released we need to escape from + # Remote.upload(), which is not possible from here) + local.delete("/test.bin") + + local = self.local_1 + engine = self.engine_1 + dao = self.engine_1.dao + + # Locally create a file that will be uploaded remotely + self.local_1.make_file("/", "test.bin", content=b"0" * FILE_BUFFER_SIZE * 2) + + # There is no upload, right now + assert not list(dao.get_uploads()) + + with patch.object(engine.remote, "upload_callback", new=callback): + with ensure_no_exception(): + self.wait_sync() + + # Resync and check the file does not exist + self.wait_sync() + assert not list(dao.get_uploads()) + assert not self.remote_1.exists("/test.bin") + + def test_not_server_error_upload(self): + """Test an error happening after chunks were uploaded, at the NuxeoDrive.CreateFile operation call.""" + + class BadUploader(SyncUploader): + """Used to simulate bad server responses.""" + + def link_blob_to_doc(self, *args, **kwargs): + """Simulate a server error.""" + raise ValueError("Mocked exception") + + def upload(*args, **kwargs): + """Set our specific uploader to simulate server error.""" + kwargs.pop("uploader", None) + return upload_orig(*args, uploader=BadUploader, **kwargs) + + engine = self.engine_1 + dao = engine.dao + upload_orig = engine.remote.upload + + # Locally create a file that will be uploaded remotely + self.local_1.make_file("/", "test.bin", content=b"0" * FILE_BUFFER_SIZE * 2) + + # There is no upload, right now + assert not list(dao.get_uploads()) + + with patch.object(engine.remote, "upload", new=upload): + with ensure_no_exception(): + self.wait_sync() + + # There should be 1 upload with DONE transfer status + uploads = list(dao.get_uploads()) + assert len(uploads) == 1 + upload = uploads[0] + assert upload.status == TransferStatus.DONE + + # The file on the server should not exist yet + assert not self.remote_1.exists("/test.bin") + + # The doc should be in error + assert len(dao.get_errors(limit=0)) == 1 + + # Reset the error + for state in dao.get_errors(): + dao.reset_error(state) + + # Resync and check the file exist + self.wait_sync() + assert not list(dao.get_uploads()) + assert self.remote_1.exists("/test.bin") + + @pytest.mark.randombug("Randomly fail when run in parallel") + @Options.mock() + def test_server_error_upload(self): + """Test a server error happening after chunks were uploaded, at the NuxeoDrive.CreateFile operation call.""" + + class BadUploader(SyncUploader): + """Used to simulate bad server responses.""" + + def link_blob_to_doc(self, *args, **kwargs): + """Simulate a server error.""" + raise ConnectionError("Mocked exception") + + def upload(*args, **kwargs): + """Set our specific uploader to simulate server error.""" + kwargs.pop("uploader", None) + return upload_orig(*args, uploader=BadUploader, **kwargs) + + engine = self.engine_1 + dao = engine.dao + upload_orig = engine.remote.upload + + # Locally create a file that will be uploaded remotely + self.local_1.make_file("/", "test.bin", content=b"0" * FILE_BUFFER_SIZE * 2) + + # There is no upload, right now + assert not list(dao.get_uploads()) + + with patch.object(engine.remote, "upload", new=upload): + with ensure_no_exception(): + self.wait_sync() + + # There should be 1 upload with DONE transfer status + uploads = list(dao.get_uploads()) + assert len(uploads) == 1 + upload = uploads[0] + assert upload.status == TransferStatus.DONE + + # The file on the server should not exist yet + assert not self.remote_1.exists("/test.bin") + + # Resync and check the file exists + self.wait_sync() + assert not list(dao.get_uploads()) + assert self.remote_1.exists("/test.bin") + + def test_app_crash_simulation(self): + """ + When the app crahsed, ongoing transfers will be removed at the next run. + See NXDRIVE-2186 for more information. + + To reproduce the issue, we suspend the transfer in the upload's callback, + then stop the engine and mimic an app crash by manually changing the transfer + status and State.has_crashed value. + """ + + def callback(uploader): + """Suspend the upload and engine.""" + self.manager_1.suspend() + + local = self.local_1 + engine = self.engine_1 + dao = engine.dao + + # Locally create a file that will be uploaded remotely + local.make_file("/", "test.bin", content=b"0" * FILE_BUFFER_SIZE * 2) + + # There is no upload, right now + assert not list(dao.get_uploads()) + + with patch.object(engine.remote, "upload_callback", new=callback): + with ensure_no_exception(): + self.wait_sync() + + # For now, the transfer is only suspended + assert dao.get_uploads_with_status(TransferStatus.SUSPENDED) + + # Stop the engine + engine.stop() + + # Change the transfer status to ongoing and change the global State to reflect a crash + upload = list(dao.get_uploads())[0] + upload.status = TransferStatus.ONGOING + dao.set_transfer_status("upload", upload) + assert dao.get_uploads_with_status(TransferStatus.ONGOING) + + # Simple check: nothing has been uploaded yet + assert not self.remote_1.exists("/test.bin") + + State.has_crashed = True + try: + # Start again the engine, it will manage staled transfers. + # As the app crashed, no transfers should be removed but continued. + with ensure_no_exception(): + engine.start() + self.manager_1.resume() + self.wait_sync() + finally: + State.has_crashed = False + + # Check the file has been uploaded + assert not list(dao.get_uploads()) + assert self.remote_1.exists("/test.bin") diff --git a/tests/functional/test_volume.py b/tests/functional/test_volume.py new file mode 100644 index 0000000000..c4991c429b --- /dev/null +++ b/tests/functional/test_volume.py @@ -0,0 +1,320 @@ +import logging +import os +import shutil +from copy import copy +from pathlib import Path + +import pytest + +from nxdrive.constants import ROOT + +from ..utils import random_png +from .conftest import OneUserTest + + +def configure_logs(): + """Configure the logging module to prevent too many data being logged.""" + + from nxdrive.logging_config import configure + + configure( + console_level="WARNING", + file_level="WARNING", + command_name="volume", + force_configure=True, + ) + + +configure_logs() + +log = logging.getLogger(__name__) + +FOLDERS = FILES = DEPTH = 0 + +if "TEST_VOLUME" in os.environ: + values_ = os.getenv("TEST_VOLUME", "") + if not values_: + del os.environ["TEST_VOLUME"] + else: + if values_.count(",") != 2: + # Low volume by default + values_ = "3,10,2" # 200 documents + + FOLDERS, FILES, DEPTH = map(int, values_.split(",")) + del values_ + + +def get_name(folder: bool, depth: int, number: int) -> str: + if folder: + return f"folder_{depth:03d}_{number:03d}" + return f"file_{depth:03d}_{number:04d}.png" + + +def get_path(folder, depth, number) -> Path: + child = ROOT + for i in range(DEPTH + 1 - depth, DEPTH + 1): + if i == 1 and not folder: + child = ROOT / get_name(False, DEPTH - i + 1, number) + child = ROOT / get_name(True, DEPTH - i + 1, number) / child + return child + + +@pytest.mark.skipif( + "TEST_VOLUME" not in os.environ, + reason="Deactivate if not launched on purpose with TEST_VOLUME set", +) +class TestVolume(OneUserTest): + def create_tree(self, folders, files, depth, parent) -> int: + items = 0 + + if depth < 1: + return items + + for folder in range(folders): + foldername = get_name(True, DEPTH - depth + 1, folder + 1) + folderobj = {"path": os.path.join(parent["path"], foldername)} + self.local_1.make_folder(parent["path"], foldername) + items += 1 + + folderobj["name"] = foldername + folderobj["children"] = {} + abspath = self.local_1.abspath(folderobj["path"]) + parent["children"][foldername] = folderobj + + items += self.create_tree(folders, files, depth - 1, folderobj) + + for file in range(files): + filename = get_name(False, DEPTH - depth + 1, file + 1) + folderobj["children"][filename] = {"name": filename} + random_png(Path(abspath) / filename) + items += 1 + + return items + + def create(self, stopped=True, wait_for_sync=True): + self.engine_1.start() + self.wait_sync() + if not stopped: + self.engine_1.stop() + + self.tree = {"children": {}, "path": ROOT} + items = self.create_tree(FOLDERS, FILES, DEPTH, self.tree) + log.warning(f"Created {items:,} local documents.") + + if not stopped: + self.engine_1.start() + + if wait_for_sync: + self.wait_sync(timeout=items * 10) + + return items + + def _check_folder(self, path: Path, removed=[], added=[]): + # First get the remote id + remote_id = self.local_1.get_remote_id(path) + assert remote_id + + # Get depth + depth = int(path.name.split("_")[1]) + + # Calculate expected children + children = {} + if depth != DEPTH: + for i in range(1, FOLDERS + 1): + children[get_name(True, depth + 1, i)] = True + for i in range(FILES): + children[get_name(False, depth, i)] = True + for name in removed: + children.pop(name, None) + for name in added: + children[name] = True + + # Local checks + os_children = os.listdir() + assert len(os_children) == len(children) + cmp_children = copy(children) + remote_refs = {} + for child in self.local_1.abspath(path).iterdir(): + name = child.name + file = cmp_children.pop(name, None) + if not file: + self.fail(f"Unexpected local child {name!r} in {path}") + remote_ref = self.local_1.get_remote_id(child) + assert remote_ref + remote_refs[remote_ref] = name + assert not cmp_children + + # Remote checks + remote_children = self.remote_1.get_fs_children(remote_id) + assert len(remote_children) == len(children) + for child in remote_children: + if child.uid not in remote_refs: + self.fail(f'Unexpected remote child "{child.name}" in {path}') + assert child.name == remote_refs[child.uid] + + def test_moves_while_creating(self): + items = self.create(stopped=False, wait_for_sync=False) + self._moves(items) + + def test_moves(self): + items = self.create() + self._moves(items) + + def test_moves_stopped(self): + items = self.create() + self._moves(items, stopped=True) + + def test_moves_while_creating_stopped(self): + items = self.create(stopped=False, wait_for_sync=False) + self._moves(items, stopped=True) + + def _moves(self, items: int, stopped: bool = False) -> None: + if stopped: + self.engine_1.stop() + + # While we are started + # Move one parent to the second children + if len(self.tree["children"]) < 3 or DEPTH < 2: + self.app.quit() + pytest.skip("Can't execute this test on so few data") + + # Move root 2 in, first subchild of 1 + root_2 = get_path(True, 1, 2) + child = get_path(True, DEPTH, 1) + shutil.move(self.local_1.abspath(root_2), self.local_1.abspath(child)) + + root_1 = get_path(True, 1, 1) + root_3 = get_path(True, 1, 3) + shutil.move(self.local_1.abspath(root_1), self.local_1.abspath(root_3)) + + # Update paths + child = ROOT / get_name(True, 1, 3) / child + root_2 = ROOT / child / get_name(True, 1, 2) + root_1 = ROOT / root_3 / get_name(True, 1, 1) + if stopped: + self.engine_1.start() + self.wait_sync(wait_for_async=True, timeout=items * 10) + + # Checks + self._check_folder(root_3, added=[get_name(True, 1, 1)]) + self._check_folder(child, added=[get_name(True, 1, 2)]) + self._check_folder(root_1) + self._check_folder(root_2) + + # We should not have any error + assert not self.engine_1.dao.get_errors(limit=0) + + def test_copies(self): + items = self.create() + self._copies(items) + + def test_copies_stopped(self): + items = self.create() + self._copies(items, stopped=True) + + def test_copies_while_creating(self): + items = self.create(stopped=False, wait_for_sync=False) + self._copies(items) + + def test_copies_while_creating_stopped(self): + items = self.create(stopped=False, wait_for_sync=False) + self._copies(items, stopped=True) + + def _copies(self, items: int, stopped: bool = False) -> None: + if stopped: + self.engine_1.stop() + + # Copy root 2 in, first subchild of 1 + root_2 = get_path(True, 1, 2) + child = get_path(True, DEPTH, 1) + shutil.copytree( + self.local_1.abspath(root_2), + self.local_1.abspath(child / get_name(True, 1, 2)), + ) + + # New copies + root_1 = get_path(True, 1, 1) + root_3 = get_path(True, 1, 3) + root_4 = get_path(True, 1, DEPTH + 1) + root_5 = get_path(True, 1, DEPTH + 2) + shutil.copytree( + self.local_1.abspath(root_1), + self.local_1.abspath(root_3 / get_name(True, 1, 1)), + ) + + shutil.copytree(self.local_1.abspath(root_3), self.local_1.abspath(root_4)) + shutil.copytree(self.local_1.abspath(root_3), self.local_1.abspath(root_5)) + + # Update paths + child = ROOT / get_name(True, 1, 3) / child + root_2 = ROOT / child / get_name(True, 1, 2) + root_1 = ROOT / root_3 / get_name(True, 1, 1) + root_1_path = self.local_1.abspath(root_1) + child_path = self.local_1.abspath(child) + + # Copies files from one folder to another + added_files = [] + for path in child_path.iterdir(): + if not path.is_file(): + continue + shutil.copy(path, root_1_path) + added_files.append(path.name) + + if stopped: + self.engine_1.start() + self.wait_sync(wait_for_async=True, timeout=items * 10) + + # Checks + self._check_folder(root_3, added=[get_name(True, 1, 1)]) + self._check_folder(child, added=[get_name(True, 1, 2)]) + self._check_folder(root_1, added=added_files) + self._check_folder(root_2) + + # Check original copied + self._check_folder(get_path(True, 1, 1)) + self._check_folder(get_path(True, 1, 2)) + self._check_folder(get_path(True, 1, DEPTH + 1), added=[get_name(True, 1, 1)]) + self._check_folder(get_path(True, 1, DEPTH + 2), added=[get_name(True, 1, 1)]) + + # We should not have any error + assert not self.engine_1.dao.get_errors(limit=0) + + +@pytest.mark.skipif( + "TEST_REMOTE_SCAN_VOLUME" not in os.environ + or int(os.environ["TEST_REMOTE_SCAN_VOLUME"]) == 0, + reason="Skipped as TEST_REMOTE_SCAN_VOLUME is no set", +) +class TestVolumeRemoteScan(OneUserTest): + def test_remote_scan(self): + nb_nodes = int(os.getenv("TEST_REMOTE_SCAN_VOLUME", 20)) + + # Random mass import + self.root_remote.mass_import(self.ws.path, nb_nodes) + + # Wait for ES indexing + self.root_remote.wait_for_async_and_es_indexing() + + # Synchronize + self.engine_1.start() + self.wait_sync(wait_for_async=True, timeout=nb_nodes**2) + + query = ( + f"SELECT ecm:uuid FROM Document WHERE ecm:ancestorId = {self.workspace!r}" + " AND ecm:isVersion = 0" + " AND ecm:isTrashed = 0" + " AND ecm:mixinType != 'HiddenInNavigation'" + ) + doc_count = self.root_remote.result_set_query(query)["resultsCount"] + log.warning(f"Created {doc_count:,} documents (nb_nodes={nb_nodes:,}).") + + # Check local tree + local_doc_count = sum( + self.get_local_child_count( + self.local_nxdrive_folder_1 / self.workspace_title + ) + ) + assert local_doc_count == doc_count + + # We should not have any error + assert not self.engine_1.dao.get_errors(limit=0) diff --git a/tests/functional/test_watchers.py b/tests/functional/test_watchers.py new file mode 100644 index 0000000000..0d5448451a --- /dev/null +++ b/tests/functional/test_watchers.py @@ -0,0 +1,245 @@ +from copy import deepcopy +from pathlib import Path +from queue import Queue +from shutil import copyfile +from time import sleep +from unittest.mock import patch + +from nxdrive.constants import ROOT + +from ..markers import not_windows +from . import LocalTest +from .conftest import OneUserTest + + +def copy_queue(queue: Queue) -> Queue: + result = deepcopy(queue.queue) + result.reverse() + return result + + +class TestWatchers(OneUserTest): + def get_local_client(self, path): + if self._testMethodName in { + "test_local_scan_encoding", + "test_watchdog_encoding", + }: + return LocalTest(path) + return super().get_local_client(path) + + def make_local_tree(self, root=None, local_client=None): + nb_files, nb_folders = 6, 4 + if not local_client: + local_client = LocalTest(self.engine_1.local_folder) + if not root: + root = Path(self.workspace_title) + if not local_client.exists(root): + local_client.make_folder(Path(), self.workspace_title) + nb_folders += 1 + # create some folders + folder_1 = local_client.make_folder(root, "Folder 1") + folder_1_1 = local_client.make_folder(folder_1, "Folder 1.1") + folder_1_2 = local_client.make_folder(folder_1, "Folder 1.2") + folder_2 = local_client.make_folder(root, "Folder 2") + + # create some files + local_client.make_file( + folder_2, "Duplicated File.txt", content=b"Some content." + ) + + local_client.make_file(folder_1, "File 1.txt", content=b"aaa") + local_client.make_file(folder_1_1, "File 2.txt", content=b"bbb") + local_client.make_file(folder_1_2, "File 3.txt", content=b"ccc") + local_client.make_file(folder_2, "File 4.txt", content=b"ddd") + local_client.make_file(root, "File 5.txt", content=b"eee") + return nb_files, nb_folders + + def get_full_queue(self, queue, dao=None): + if dao is None: + dao = self.engine_1.dao + result = [] + while queue: + result.append(dao.get_state_from_id(queue.pop().id)) + return result + + def test_local_scan(self): + files, folders = self.make_local_tree() + self.queue_manager_1.suspend() + self.queue_manager_1._disable = True + self.engine_1.start() + self.wait_sync() + + # Workspace should have been reconcile + res = self.engine_1.dao.get_states_from_partial_local(ROOT) + # With root + count = folders + files + 1 + assert len(res) == count + + def test_reconcile_scan(self): + files, folders = self.make_local_tree() + self.make_server_tree() + # Wait for ES indexing + self.wait() + manager = self.queue_manager_1 + manager.suspend() + manager._disable = True + self.engine_1.start() + self.wait_sync() + # Depending on remote scan results order, the remote + # duplicated file with the same digest as the local file + # might come first, in which case we get an extra synchronized file, + # or not, in which case we get a conflicted file + assert self.engine_1.dao.get_sync_count() >= folders + files + # Verify it has been reconciled and all items in queue are synchronized + queue = self.get_full_queue(copy_queue(manager._local_file_queue)) + for item in queue: + if item.remote_name == "Duplicated File.txt": + assert item.pair_state in ["synchronized", "conflicted"] + else: + assert item.pair_state == "synchronized" + queue = self.get_full_queue(copy_queue(manager._local_folder_queue)) + for item in queue: + assert item.pair_state == "synchronized" + + def test_remote_scan(self): + total = len(self.make_server_tree()) + # Add the workspace folder + the root + total += 2 + # Wait for ES indexing + self.wait() + self.queue_manager_1.suspend() + self.queue_manager_1._disable = True + self.engine_1.start() + self.wait_sync() + res = self.engine_1.dao.get_states_from_partial_local(ROOT) + assert len(res) == total + + def test_local_watchdog_creation(self): + # Test the creation after first local scan + self.queue_manager_1.suspend() + self.queue_manager_1._disable = True + self.engine_1.start() + self.wait_sync() + metrics = self.queue_manager_1.get_metrics() + assert not metrics["local_folder_queue"] + assert not metrics["local_file_queue"] + files, folders = self.make_local_tree() + self.wait_sync(timeout=3, fail_if_timeout=False) + metrics = self.queue_manager_1.get_metrics() + assert metrics["local_folder_queue"] + assert metrics["local_file_queue"] + res = self.engine_1.dao.get_states_from_partial_local(ROOT) + # With root + assert len(res) == folders + files + 1 + + def _delete_folder_1(self): + path = Path("Folder 1") + self.local_1.delete_final(path) + self.wait_sync(timeout=1, fail_if_timeout=False, wait_win=True) + + timeout = 5 + while not self.engine_1._local_watcher.empty_events(): + sleep(1) + timeout -= 1 + if timeout < 0: + break + return Path(self.workspace_title) / path + + def test_local_scan_delete_non_synced(self): + # Test the deletion after first local scan + self.test_local_scan() + self.engine_1.stop() + path = self._delete_folder_1() + self.engine_1.start() + self.wait_sync(timeout=5, fail_if_timeout=False) + children = self.engine_1.dao.get_states_from_partial_local(path) + assert not children + + def test_local_watchdog_delete_synced(self): + # Test the deletion after first local scan + self.test_reconcile_scan() + path = self._delete_folder_1() + child = self.engine_1.dao.get_state_from_local(path) + assert child.pair_state == "locally_deleted" + children = self.engine_1.dao.get_states_from_partial_local(path) + assert len(children) == 5 + for child in children: + assert child.pair_state == "locally_deleted" + + def test_local_scan_delete_synced(self): + # Test the deletion after first local scan + self.test_reconcile_scan() + self.engine_1.stop() + path = self._delete_folder_1() + self.engine_1.start() + self.wait_sync(timeout=5, fail_if_timeout=False) + child = self.engine_1.dao.get_state_from_local(path) + assert child.pair_state == "locally_deleted" + children = self.engine_1.dao.get_states_from_partial_local(path) + assert len(children) == 5 + for child in children: + assert child.pair_state == "locally_deleted" + + def test_local_scan_error(self): + local = self.local_1 + remote = self.remote_document_client_1 + # Synchronize test workspace + self.engine_1.start() + self.wait_sync() + self.engine_1.stop() + # Create a local file and use an invalid digest function + # in local watcher file system client to trigger an error + # during local scan + local.make_file("/", "Test file.odt", content=b"Content") + + with patch.object(self.engine_1.local, "_digest_func", return_value="invalid"): + self.engine_1.start() + self.wait_sync() + self.engine_1.stop() + assert not remote.exists("/Test file.odt") + + self.engine_1.start() + self.wait_sync() + assert remote.exists("/Test file.odt") + + @not_windows(reason="Windows cannot have file ending with a space.") + def test_watchdog_space_remover(self): + """ + Test files and folders ending with space. + """ + + local = self.local_1 + remote = self.remote_document_client_1 + + self.engine_1.start() + self.wait_sync() + + local.make_file("/", "Accentue\u0301.odt ", content=b"Content") + self.wait_sync() + assert remote.exists("/Accentue\u0301.odt") + assert not remote.exists("/Accentue\u0301.odt ") + + local.rename("/Accentu\xe9.odt", "Accentu\xe9 avec un \xea et un \xe9.odt ") + self.wait_sync() + assert ( + remote.get_info("/Accentu\xe9 avec un \xea et un \xe9.odt").name + == "Accentu\xe9 avec un \xea et un \xe9.odt" + ) + + def test_watcher_remote_id_setter(self): + local = self.local_1 + # As some user can rewrite same file for no reason + # Start engine + self.engine_1.start() + # Wait for test workspace synchronization + self.wait_sync() + # Create files with Unicode combining accents, + # Unicode latin characters and no special characters + file_path = local.abspath("/Test.pdf") + copyfile(self.location / "resources" / "files" / "testFile.pdf", file_path) + # Wait for test workspace synchronization + self.wait_sync() + remote_id = local.get_remote_id("/Test.pdf") + copyfile(self.location / "resources" / "files" / "testFile.pdf", file_path) + self.wait_sync() + assert remote_id == local.get_remote_id("/Test.pdf") From 65d81efb5dc7170106d8a17839af5aaa82a0cb8b Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Wed, 10 Jan 2024 11:08:28 +0530 Subject: [PATCH 22/36] NXDRIVE-2860: Code Coverage - added 11 files from old functional - 10/01 --1 --- tests/functional/test_permission_hierarchy.py | 316 +++++ tests/functional/test_readonly.py | 532 ++++++++ tests/functional/test_reinit_database.py | 118 ++ tests/functional/test_remote_deletion.py | 248 ++++ .../functional/test_remote_move_and_rename.py | 888 +++++++++++++ tests/functional/test_security_updates.py | 298 +++++ tests/functional/test_special_characters.py | 93 ++ tests/functional/test_synchronization.py | 1181 +++++++++++++++++ .../functional/test_synchronization_dedup.py | 148 +++ .../test_synchronization_suspend.py | 159 +++ tests/functional/test_versioning.py | 66 + 11 files changed, 4047 insertions(+) create mode 100644 tests/functional/test_permission_hierarchy.py create mode 100644 tests/functional/test_readonly.py create mode 100644 tests/functional/test_reinit_database.py create mode 100644 tests/functional/test_remote_deletion.py create mode 100644 tests/functional/test_remote_move_and_rename.py create mode 100644 tests/functional/test_security_updates.py create mode 100644 tests/functional/test_special_characters.py create mode 100644 tests/functional/test_synchronization.py create mode 100644 tests/functional/test_synchronization_dedup.py create mode 100644 tests/functional/test_synchronization_suspend.py create mode 100644 tests/functional/test_versioning.py diff --git a/tests/functional/test_permission_hierarchy.py b/tests/functional/test_permission_hierarchy.py new file mode 100644 index 0000000000..cb92b1414e --- /dev/null +++ b/tests/functional/test_permission_hierarchy.py @@ -0,0 +1,316 @@ +""" +import hashlib +from contextlib import suppress +from pathlib import Path + +import pytest +from nuxeo.exceptions import Forbidden + +from nxdrive.constants import WINDOWS + +from ..markers import windows_only +from . import LocalTest +from .conftest import OneUserTest, TwoUsersTest + + +class TestPermissionHierarchy(OneUserTest): + def setup_method(self, method): + super().setup_method(method, register_roots=False, server_profile="permission") + + self.local_1 = LocalTest(self.local_nxdrive_folder_1) + + # Make sure user workspace is created and fetch its UID + res = self.remote_document_client_1.make_file_in_user_workspace( + b"contents", "USFile.txt" + ) + self.workspace_uid = res["parentRef"] + + def teardown_method(self, method): + with suppress(Exception): + self.root_remote.delete(self.workspace_uid, use_trash=False) + super().teardown_method(method) + + def test_sync_delete_root(self): + # Create test folder in user workspace as test user + remote = self.remote_document_client_1 + test_folder_uid = remote.make_folder(self.workspace_uid, "test_folder") + # Create a document in the test folder + remote.make_file(test_folder_uid, "test_file.txt", content=b"Some content.") + + # Register test folder as a sync root + remote.register_as_root(test_folder_uid) + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Check locally synchronized content + root = Path("My Docs/test_folder") + assert self.local_1.exists(root) + assert self.local_1.exists(root / "test_file.txt") + + # Delete test folder + remote.delete(test_folder_uid) + self.wait_sync(wait_for_async=True) + + # Check locally synchronized content + assert not self.local_1.exists(root) + assert not self.local_1.get_children_info("/My Docs") + + +class TestPermissionHierarchy2(TwoUsersTest): + def setup_method(self, method): + super().setup_method(method, register_roots=False, server_profile="permission") + + self.local_1 = LocalTest(self.local_nxdrive_folder_1) + self.local_2 = LocalTest(self.local_nxdrive_folder_2) + + # Make sure user workspace is created and fetch its UID + res = self.remote_document_client_1.make_file_in_user_workspace( + b"contents", "USFile.txt" + ) + self.workspace_uid = res["parentRef"] + + def teardown_method(self, method): + with suppress(Exception): + self.root_remote.delete(self.workspace_uid, use_trash=False) + super().teardown_method(method) + + @windows_only(reason="Only Windows ignores file permissions.") + def test_permission_awareness_after_resume(self): + remote = self.remote_document_client_1 + remote2 = self.remote_document_client_2 + local = self.local_2 + + root = remote.make_folder(self.workspace_uid, "testing") + folder = remote.make_folder(root, "FolderA") + + # Register user workspace as a sync root for user1 + remote.register_as_root(self.workspace_uid) + + # Register root folder as a sync root for user2 + self.set_readonly(self.user_2, root, grant=False) + remote2.register_as_root(root) + + # Read only folder for user 2 + self.set_readonly(self.user_2, folder) + + # Start'n sync + self.engine_2.start() + self.wait_sync( + wait_for_async=True, wait_for_engine_1=False, wait_for_engine_2=True + ) + self.engine_2.stop() + + # Checks + root = Path("Other Docs/testing/FolderA") + assert local.exists(root) + + # Create documents + abspath = local.abspath(root) + new_folder = abspath / "FolderCreated" + new_folder.mkdir() + (new_folder / "file.txt").write_bytes(b"content") + + # Change from RO to RW for the shared folder + self.set_readonly(self.user_2, folder, grant=False) + + # Sync + self.engine_2.start() + self.wait_sync( + wait_for_async=True, wait_for_engine_1=False, wait_for_engine_2=True + ) + + # Status check + dao = self.engine_2.dao + assert not dao.get_errors(limit=0) + assert not dao.get_filters() + assert not dao.get_unsynchronizeds() + + # Local check + assert local.exists(root / "FolderCreated/file.txt") + + # Remote checks + children = remote.get_children_info(folder) + assert len(children) == 1 + assert children[0].name == "FolderCreated" + + children = remote.get_children_info(children[0].uid) + assert len(children) == 1 + assert children[0].name == "file.txt" + + def test_sync_delete_shared_folder(self): + remote = self.remote_document_client_1 + self.engine_1.start() + # Register user workspace as a sync root for user1 + remote.register_as_root(self.workspace_uid) + + # Create test folder in user workspace as user1 + test_folder_uid = remote.make_folder(self.workspace_uid, "test_folder") + self.wait_sync(wait_for_async=True) + assert self.local_1.exists("/My Docs") + assert self.local_1.exists("/My Docs/test_folder") + + # Grant ReadWrite permission to user2 on test folder + self.set_readonly(self.user_2, test_folder_uid, grant=False) + self.wait_sync(wait_for_async=True) + + # Register test folder as a sync root for user2 + self.remote_document_client_2.register_as_root(test_folder_uid) + self.wait_sync(wait_for_async=True) + + # Delete test folder + remote.delete(test_folder_uid) + self.wait_sync(wait_for_async=True) + + # Check locally synchronized content + assert not self.local_1.exists("/My Docs/test_folder") + children = self.local_1.get_children_info("/My Docs") + assert len(children) == 1 + + @pytest.mark.randombug("NXDRIVE-1582") + def test_sync_unshared_folder(self): + # Register user workspace as a sync root for user1 + remote = self.remote_document_client_1 + remote2 = self.remote_document_client_2 + remote.register_as_root(self.workspace_uid) + + self.engine_2.start() + self.wait_sync( + wait_for_async=True, wait_for_engine_2=True, wait_for_engine_1=False + ) + # Check locally synchronized content + assert self.local_2.exists("/My Docs") + assert self.local_2.exists("/Other Docs") + + # Create test folder in user workspace as user1 + test_folder_uid = remote.make_folder(self.workspace_uid, "Folder A") + folder_b = remote.make_folder(test_folder_uid, "Folder B") + folder_c = remote.make_folder(folder_b, "Folder C") + folder_d = remote.make_folder(folder_c, "Folder D") + remote.make_folder(folder_d, "Folder E") + + # Grant ReadWrite permission to user2 on test folder + self.set_readonly(self.user_2, test_folder_uid, grant=False) + + # Register test folder as a sync root for user2 + remote2.register_as_root(test_folder_uid) + self.wait_sync( + wait_for_async=True, wait_for_engine_2=True, wait_for_engine_1=False + ) + assert self.local_2.exists("/Other Docs/Folder A") + assert self.local_2.exists( + "/Other Docs/Folder A/Folder B/Folder C/Folder D/Folder E" + ) + # Use for later get_fs_item checks + folder_b_fs = self.local_2.get_remote_id("/Other Docs/Folder A/Folder B") + folder_a_fs = self.local_2.get_remote_id("/Other Docs/Folder A") + # Unshare Folder A and share Folder C + self.root_remote.execute( + command="Document.RemoveACL", + input_obj=f"doc:{test_folder_uid}", + acl="local", + ) + self.set_readonly(self.user_2, folder_c) + remote2.register_as_root(folder_c) + self.wait_sync( + wait_for_async=True, wait_for_engine_2=True, wait_for_engine_1=False + ) + assert not self.local_2.exists("/Other Docs/Folder A") + assert self.local_2.exists("/Other Docs/Folder C") + assert self.local_2.exists("/Other Docs/Folder C/Folder D/Folder E") + + # Verify that we don't have any 403 errors + assert not self.remote_2.get_fs_item(folder_a_fs) + assert not self.remote_2.get_fs_item(folder_b_fs) + + def test_sync_move_permission_removal(self): + if WINDOWS: + self.app.quit() + pytest.xfail( + "Following the NXDRIVE-836 fix, this test always fails because " + "when moving a file from a RO folder to a RW folder will end up" + " being a simple file creation. As we cannot know events order," + " we cannot understand a local move is being made just before " + "a security update. To bo fixed with the engine refactoring." + ) + + remote = self.remote_document_client_1 + remote2 = self.remote_document_client_2 + local = self.local_2 + + root = remote.make_folder(self.workspace_uid, "testing") + readonly = remote.make_folder(root, "ReadFolder") + readwrite = remote.make_folder(root, "WriteFolder") + + # Register user workspace as a sync root for user1 + remote.register_as_root(self.workspace_uid) + + # Register root folder as a sync root for user2 + self.set_readonly(self.user_2, root, grant=False) + remote2.register_as_root(root) + + # Make one read-only document + remote.make_file_with_blob(readonly, "file_ro.txt", b"Read-only doc.") + + # Read only folder for user 2 + self.set_readonly(self.user_2, readonly) + + # Basic test to be sure we are in RO mode + with pytest.raises(Forbidden): + remote2.make_file(readonly, "test.txt", content=b"test") + + # ReadWrite folder for user 2 + self.set_readonly(self.user_2, readwrite, grant=False) + + # Start'n sync + self.engine_2.start() + self.wait_sync( + wait_for_async=True, wait_for_engine_1=False, wait_for_engine_2=True + ) + + # Checks + root = Path("Other Docs/testing") + assert local.exists(root / "ReadFolder") + assert local.exists(root / "ReadFolder/file_ro.txt") + assert local.exists(root / "WriteFolder") + content = local.get_content(root / "ReadFolder/file_ro.txt") + assert content == b"Read-only doc." + + # Move the read-only file + local.move( + root / "ReadFolder/file_ro.txt", root / "WriteFolder", name="file_rw.txt" + ) + + # Remove RO on ReadFolder folder + self.set_readonly(self.user_2, readonly, grant=False) + + # Edit the new writable file + new_data = b"Now a fresh read-write doc." + local.update_content(root / "WriteFolder/file_rw.txt", new_data) + + # Sync + self.wait_sync( + wait_for_async=True, wait_for_engine_1=False, wait_for_engine_2=True + ) + + # Status check + dao = self.engine_2.dao + assert not dao.get_errors(limit=0) + assert not dao.get_filters() + assert not dao.get_unsynchronizeds() + + # Local checks + assert not local.exists(root / "ReadFolder/file_ro.txt") + assert not local.exists(root / "WriteFolder/file_ro.txt") + assert local.exists(root / "WriteFolder/file_rw.txt") + content = local.get_content(root / "WriteFolder/file_rw.txt") + assert content == new_data + + # Remote checks + assert not remote.get_children_info(readonly) + children = remote.get_children_info(readwrite) + assert len(children) == 1 + blob = children[0].get_blob("file:content") + assert blob.name == "file_rw.txt" + assert blob.digest == hashlib.md5(new_data).hexdigest() +""" diff --git a/tests/functional/test_readonly.py b/tests/functional/test_readonly.py new file mode 100644 index 0000000000..bdad45bacb --- /dev/null +++ b/tests/functional/test_readonly.py @@ -0,0 +1,532 @@ +import shutil +from logging import getLogger +from pathlib import Path + +import pytest +from nuxeo.exceptions import Forbidden + +from nxdrive.constants import SYNC_ROOT, WINDOWS + +from ..markers import windows_only +from .conftest import FS_ITEM_ID_PREFIX, SYNC_ROOT_FAC_ID, OneUserTest, TwoUsersTest + +log = getLogger(__name__) + + +def touch(path: Path): + if WINDOWS: + path.parent.mkdir(exist_ok=True) + try: + path.write_bytes(b"Test") + except OSError: + log.warning("Unable to touch") + return False + return True + + +class TestReadOnly(OneUserTest): + def setUp(self): + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + def test_file_add(self): + """ + Should not be able to create files in root folder. + On Windows, those files are ignored. + """ + + remote = self.remote_document_client_1 + + # Try to create the file + state = touch(self.local_nxdrive_folder_1 / "test.txt") + + if not WINDOWS: + # The creation must have failed + assert not state + else: + # The file is locally created and should be ignored + self.wait_sync(wait_for_async=True) + ignored = self.engine_1.dao.get_unsynchronizeds() + assert len(ignored) == 1 + assert ignored[0].local_path == Path("test.txt") + + # Check there is nothing uploaded to the server + assert not remote.get_children_info("/") + + def test_file_content_change(self): + """ + No upload server side but possible to change the file locally + without error, if the OS allows it (unlikely). + """ + + local = self.local_1 + remote = self.remote_document_client_1 + + # Create documents and sync + folder = remote.make_folder("/", "folder") + file = remote.make_file(folder, "foo.txt", content=b"42") + self.set_readonly(self.user_1, f"{self.ws.path}/folder") + self.wait_sync(wait_for_async=True) + assert remote.exists("/folder") + assert remote.exists("/folder/foo.txt") + + # Try to change the file content locally + with pytest.raises(OSError): + local.abspath("/folder/foo.txt").write_bytes(b"Change") + + with pytest.raises(OSError): + local.update_content("/folder/foo.txt", b"Locally changed") + + # Try to change the file content remotely + with pytest.raises(Forbidden): + remote.update(file, properties={"note:note": "Remotely changed"}) + + def test_file_delete(self): + """Local deletions are filtered.""" + + remote = self.remote_document_client_1 + local = self.local_1 + + folder = remote.make_folder("/", "test-ro") + remote.make_file(folder, "test.txt", content=b"42") + self.set_readonly(self.user_1, f"{self.ws.path}/test-ro") + self.wait_sync(wait_for_async=True) + assert local.exists("/test-ro/test.txt") + assert not self.engine_1.dao.get_filters() + + # Delete the file and check if is re-downloaded + local.unset_readonly("/test-ro") + local.delete("/test-ro/test.txt") + self.wait_sync(wait_win=True) + assert not local.exists("/test-ro/test.txt") + + # Check that it is filtered + assert self.engine_1.dao.get_filters() + + # Check the file is still present on the server + assert remote.exists("/test-ro/test.txt") + + def test_file_move_from_ro_to_ro(self): + """ + Local moves from a read-only folder to a read-only folder. + - source is ignored + - destination is ignored + + Server side: no changes. + Client side: no errors. + """ + + remote = self.remote_document_client_1 + local = self.local_1 + + # folder-src is the source from where documents will be moved, RO + # folder-dst is the destination where documents will be moved, RO + src = remote.make_folder("/", "folder-src") + remote.make_folder("/", "folder-dst") + remote.make_file(src, "here.txt", content=b"stay here") + self.set_readonly(self.user_1, self.ws.path) + self.wait_sync(wait_for_async=True) + assert remote.exists("/folder-src/here.txt") + assert remote.exists("/folder-dst") + + doc_abs = local.abspath("/folder-src") / "here.txt" + dst_abs = local.abspath("/folder-dst") + if not WINDOWS: + # The move should fail + with pytest.raises(OSError): + shutil.move(doc_abs, dst_abs) + else: + # The move happens + shutil.move(doc_abs, dst_abs) + self.wait_sync(wait_win=True) + + # Check that nothing has changed + assert not local.exists("/folder-src/here.txt") + assert local.exists("/folder-dst/here.txt") + assert remote.exists("/folder-src/here.txt") + + # But also, check that the server received nothing + assert not remote.exists("/folder-dst/here.txt") + + # We should not have any error + assert not self.engine_1.dao.get_errors(limit=0) + + def test_file_move_from_ro_to_rw(self): + """ + Local moves from a read-only folder to a read-write folder. + - source is ignored + - destination is seen as a creation + + Server side: only the files in the RW folder are created. + Client side: no errors. + + Associated ticket: NXDRIVE-836 + """ + + remote = self.remote_document_client_1 + local = self.local_1 + + # folder-ro is the source from where documents will be moved, RO + # folder-rw is the destination where documents will be moved, RW + src = remote.make_folder("/", "folder-ro") + remote.make_folder("/", "folder-rw") + remote.make_file(src, "here.txt", content=b"stay here") + self.set_readonly(self.user_1, f"{self.ws.path}/folder-ro") + self.wait_sync(wait_for_async=True) + assert local.exists("/folder-ro/here.txt") + assert local.exists("/folder-rw") + + doc_abs = local.abspath("/folder-ro") / "here.txt" + dst_abs = local.abspath("/folder-rw") + if not WINDOWS: + # The move should fail + with pytest.raises(OSError): + shutil.move(doc_abs, dst_abs) + else: + # The move happens + shutil.move(doc_abs, dst_abs) + self.wait_sync(wait_win=True) + + # Check that nothing has changed + assert not local.exists("/folder-ro/here.txt") + assert local.exists("/folder-rw/here.txt") + assert remote.exists("/folder-ro/here.txt") + + # But also, check that the server received the new document because + # the destination is RW + assert remote.exists("/folder-rw/here.txt") + + # We should not have any error + assert not self.engine_1.dao.get_errors(limit=0) + + """ + @pytest.mark.skip(True, reason="TODO NXDRIVE-740") + def test_file_move_from_rw_to_ro(self): + pass + """ + + def test_file_rename(self): + """ + No upload server side but possible to rename the file locally + without error. + """ + + local = self.local_1 + remote = self.remote_document_client_1 + + # Create documents and sync + folder = remote.make_folder("/", "folder") + remote.make_file(folder, "foo.txt", content=b"42") + self.set_readonly(self.user_1, f"{self.ws.path}/folder") + self.wait_sync(wait_for_async=True) + assert local.exists("/folder") + assert local.exists("/folder/foo.txt") + + # Locally rename the file + doc = local.abspath("/folder") / "foo.txt" + dst = local.abspath("/folder") / "bar.txt" + if not WINDOWS: + # The rename should fail + with pytest.raises(OSError): + doc.rename(dst) + else: + # The rename happens locally but nothing remotely + doc.rename(dst) + self.wait_sync() + assert remote.exists("/folder/foo.txt") + assert not remote.exists("/folder/bar.txt") + + # We should not have any error + assert not self.engine_1.dao.get_errors(limit=0) + + def test_folder_add(self): + """ + Should not be able to create folders in root folder. + On Windows, those folders are ignored. + """ + + remote = self.remote_document_client_1 + folder = self.local_nxdrive_folder_1 / "foo" / "test.txt" + + if not WINDOWS: + # The creation must have failed + assert not touch(folder) + else: + # The folder and its child are locally created + touch(folder) + + # Sync and check that it is ignored + self.wait_sync(wait_for_async=True) + ignored = [ + d.local_path.as_posix() for d in self.engine_1.dao.get_unsynchronizeds() + ] + assert sorted(ignored) == ["foo", "foo/test.txt"] + + # Check there is nothing uploaded to the server + assert not remote.get_children_info("/") + + def test_folder_delete(self): + """Local deletions are filtered.""" + + remote = self.remote_document_client_1 + local = self.local_1 + + folder = remote.make_folder("/", "test-ro") + remote.make_folder(folder, "foo") + self.set_readonly(self.user_1, f"{self.ws.path}/test-ro") + self.wait_sync(wait_for_async=True) + assert local.exists("/test-ro/foo") + assert not self.engine_1.dao.get_filters() + + # Delete the file and check if is re-downloaded + local.unset_readonly("/test-ro") + local.delete("/test-ro/foo") + self.wait_sync(wait_win=True) + assert not local.exists("/test-ro/foo") + + # Check that it is filtered + assert self.engine_1.dao.get_filters() + + # Check the file is still present on the server + assert remote.exists("/test-ro/foo") + + def test_folder_move_from_ro_to_ro(self): + """ + Local moves from a read-only folder to a read-only folder. + - source is ignored + - destination is ignored + + Server side: no changes. + Client side: no errors. + """ + + remote = self.remote_document_client_1 + local = self.local_1 + + # folder-src is the source that will be moved, RO + # folder-dst is the destination, RO + folder_ro1 = remote.make_folder("/", "folder-src") + folder_ro2 = remote.make_folder("/", "folder-dst") + remote.make_file(folder_ro1, "here.txt", content=b"stay here") + remote.make_file(folder_ro2, "there.txt", content=b"stay here too") + self.set_readonly(self.user_1, self.ws.path) + self.wait_sync(wait_for_async=True) + assert local.exists("/folder-src/here.txt") + assert remote.exists("/folder-dst") + + src = local.abspath("/folder-src") + dst = local.abspath("/folder-dst") + if not WINDOWS: + # The move should fail + with pytest.raises(OSError): + shutil.move(src, dst) + else: + # The move happens + shutil.move(src, dst) + self.wait_sync(wait_win=True) + + # Check that nothing has changed + assert not local.exists("/folder-src") + assert local.exists("/folder-dst/there.txt") + assert local.exists("/folder-dst/folder-src/here.txt") + assert remote.exists("/folder-src/here.txt") + assert remote.exists("/folder-dst/there.txt") + + # But also, check that the server received nothing + assert not remote.exists("/folder-dst/folder-src") + + # We should not have any error + assert not self.engine_1.dao.get_errors(limit=0) + + def test_folder_move_from_ro_to_rw(self): + """ + Local moves from a read-only folder to a read-write folder. + - source is ignored + - destination is filtered + + Server side: no changes. + Client side: no errors. + """ + + remote = self.remote_document_client_1 + local = self.local_1 + + # folder-src is the source that will be moved, RO + # folder-dst is the destination, RO + folder_ro1 = remote.make_folder("/", "folder-src") + folder_ro2 = remote.make_folder("/", "folder-dst") + remote.make_file(folder_ro1, "here.txt", content=b"stay here") + remote.make_file(folder_ro2, "there.txt", content=b"stay here too") + self.set_readonly(self.user_1, self.ws.path) + self.wait_sync(wait_for_async=True) + assert local.exists("/folder-src/here.txt") + assert remote.exists("/folder-dst") + + src = local.abspath("/folder-src") + dst = local.abspath("/folder-dst") + if not WINDOWS: + # The move should fail + with pytest.raises(OSError): + shutil.move(src, dst) + else: + # The move happens + shutil.move(src, dst) + self.wait_sync(wait_win=True) + + # Check that nothing has changed + assert not local.exists("/folder-src") + assert local.exists("/folder-dst/there.txt") + assert local.exists("/folder-dst/folder-src/here.txt") + assert remote.exists("/folder-src/here.txt") + assert remote.exists("/folder-dst/there.txt") + assert not remote.exists("/folder-dst/folder-src") + assert not remote.exists("/folder-dst/folder-src/here.txt") + + # We should not have any error + assert not self.engine_1.dao.get_errors(limit=0) + + # Check that it is filtered + assert self.engine_1.dao.get_filters() + doc_pair = remote.get_info(folder_ro1) + ref = ( + f"{SYNC_ROOT}/{SYNC_ROOT_FAC_ID}" + f"{doc_pair.root}/{FS_ITEM_ID_PREFIX}{doc_pair.uid}" + ) + assert self.engine_1.dao.is_filter(ref) + + """ + @pytest.mark.skip(True, reason="TODO NXDRIVE-740") + def test_folder_move_from_rw_to_ro(self): + pass + """ + + def test_folder_rename(self): + """ + No upload server side but possible to rename the folder locally + without error, and it will be re-renamed. + """ + + local = self.local_1 + remote = self.remote_document_client_1 + + # Create documents and sync + remote.make_folder("/", "foo") + self.set_readonly(self.user_1, self.ws.path) + self.wait_sync(wait_for_async=True) + assert local.exists("/foo") + + # Check can_delete flag in pair state + state = self.get_dao_state_from_engine_1("foo") + assert not state.remote_can_delete + + # Locally rename the folder + src = local.abspath("/foo") + dst = src.with_name("bar") + if not WINDOWS: + # The rename should fail + with pytest.raises(OSError): + src.rename(dst) + else: + # The rename happens locally but: + # - nothing remotely + # - the folder is re-renamed to its original name + src.rename(dst) + self.wait_sync() + assert local.exists("/foo") + assert not local.exists("/bar") + assert remote.exists("/foo") + assert not remote.exists("/bar") + + # We should not have any error + assert not self.engine_1.dao.get_errors(limit=0) + + @windows_only + def test_nxdrive_836(self): + """ + NXDRIVE-836: Bad behaviors with read-only documents on Windows. + + Scenario: + + 1. User1: Server: Create folder "ReadFolder" and share with User2 with read + permission and upload doc/xml files into it + 2. User1: Server: Create folder "MEFolder" and share with User2 with Manage + Everything permission + 3. User2: Server: Enable Nuxeo Drive Synchronization for both folders + 4. User2: Client: Launch Drive client and Wait for sync completion + 5. User2: Client: Move the files(drag and drop) from "ReadFolder" to "MEFolder" + 6. User1: Server: Remove the read permission for "ReadFolder" for User2 + 7. User2: Client: Remove the read only attribute for moved files in "MEFolder" + and Edit the files. + + Expected Result: Files should sync with the server. + """ + + local = self.local_1 + remote = self.remote_document_client_1 + + # Create documents and sync + remote.make_folder("/", "ReadFolder") + remote.make_folder("/", "MEFolder") + remote.make_file("/ReadFolder", "shareme.doc", content=b"Scheherazade") + self.set_readonly(self.user_1, f"{self.ws.path}/ReadFolder") + self.wait_sync(wait_for_async=True) + + # Checks + for client in (remote, local): + for doc in ("/ReadFolder/shareme.doc", "/MEFolder"): + assert client.exists(doc) + + # Move + src = local.abspath("/ReadFolder/shareme.doc") + dst = local.abspath("/MEFolder") + shutil.move(src, dst) + self.wait_sync(wait_win=True) + + # Remove read-only + self.set_readonly(self.user_1, f"{self.ws.path}/ReadFolder", grant=False) + self.wait_sync(wait_for_async=True) + local.unset_readonly("/MEFolder/shareme.doc") + + # Checks + assert remote.exists("/ReadFolder/shareme.doc") + assert remote.exists("/MEFolder/shareme.doc") + assert not self.engine_1.dao.get_errors(limit=0) + assert not self.engine_1.dao.get_unsynchronizeds() + + +class TestReadOnly2(TwoUsersTest): + def test_document_locked(self): + """Check locked documents: they are read-only.""" + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + remote = self.remote_document_client_1 + remote.make_folder("/", "Test locking") + remote.make_file("/Test locking", "myDoc.odt", content=b"Some content") + filepath = "/Test locking/myDoc.odt" + + self.wait_sync(wait_for_async=True) + + # Check readonly flag is not set for a document that isn't locked + user1_file_path = self.sync_root_folder_1 / filepath.lstrip("/") + assert user1_file_path.exists() + assert touch(user1_file_path) + self.wait_sync() + + # Check readonly flag is not set for a document locked by the + # current user + remote.lock(filepath) + self.wait_sync(wait_for_async=True) + assert touch(user1_file_path) + remote.unlock(filepath) + self.wait_sync(wait_for_async=True) + + # Check readonly flag is set for a document locked by another user + self.remote_document_client_2.lock(filepath) + self.wait_sync(wait_for_async=True) + assert not touch(user1_file_path) + + # Check readonly flag is unset for a document unlocked by another user + self.remote_document_client_2.unlock(filepath) + self.wait_sync(wait_for_async=True) + assert touch(user1_file_path) diff --git a/tests/functional/test_reinit_database.py b/tests/functional/test_reinit_database.py new file mode 100644 index 0000000000..86935e0965 --- /dev/null +++ b/tests/functional/test_reinit_database.py @@ -0,0 +1,118 @@ +import time +from pathlib import Path + +from .conftest import OS_STAT_MTIME_RESOLUTION, OneUserTest + + +class TestReinitDatabase(OneUserTest): + def setUp(self): + self.local = self.local_1 + self.remote = self.remote_document_client_1 + + # Make a folder and a file + self.remote.make_folder("/", "Test folder") + self.file = self.remote.make_file( + "/Test folder", "Test.txt", content=b"This is some content" + ) + + # Start engine and wait for synchronization + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + assert self.local.exists("/Test folder") + assert self.local.exists("/Test folder/Test.txt") + + # Destroy database but keep synced files as we just need to test the database + self.unbind_engine(1, purge=False) + self.bind_engine(1, start_engine=False) + + def _check_states(self): + rows = self.engine_1.dao.get_states_from_partial_local(Path()) + for row in rows: + assert row.pair_state == "synchronized" + + def _check_conflict_detection(self): + assert len(self.engine_1.dao.get_conflicts()) == 1 + + """ + def test_synchronize_folderish_and_same_digest(self): + # Start engine and wait for synchronization + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Check everything is synchronized + self._check_states() + """ + + def test_synchronize_remote_change(self): + # Modify the remote file + self.remote.update(self.file, properties={"note:note": "Content has changed"}) + + # Start engine and wait for synchronization + self.engine_1.start() + self.wait_sync(wait_for_async=True, timeout=5, fail_if_timeout=False) + + # Check that a conflict is detected + self._check_conflict_detection() + file_state = self.engine_1.dao.get_state_from_local( + Path(self.workspace_title) / "Test folder/Test.txt" + ) + assert file_state + assert file_state.pair_state == "conflicted" + + # Assert content of the local file has not changed + content = self.local.get_content("/Test folder/Test.txt") + assert content == b"This is some content" + + def test_synchronize_local_change(self): + # Modify the local file + time.sleep(OS_STAT_MTIME_RESOLUTION) + self.local.update_content("/Test folder/Test.txt", b"Content has changed") + + # Start engine and wait for synchronization + self.engine_1.start() + self.wait_sync(timeout=5, fail_if_timeout=False) + + # Check that a conflict is detected + self._check_conflict_detection() + file_state = self.engine_1.dao.get_state_from_local( + Path(self.workspace_title) / "Test folder/Test.txt" + ) + assert file_state + assert file_state.pair_state == "conflicted" + + # Assert content of the remote file has not changed + content = self.remote.get_note(self.file) + assert content == b"This is some content" + + """ + def test_synchronize_remote_and_local_change(self): + # Modify the remote file + self.remote.update( + self.file, properties={"note:note": "Content has remotely changed"} + ) + + # Modify the local file + time.sleep(OS_STAT_MTIME_RESOLUTION) + self.local.update_content( + "/Test folder/Test.txt", b"Content has locally changed" + ) + + # Start engine and wait for synchronization + self.engine_1.start() + self.wait_sync(wait_for_async=True, timeout=5, fail_if_timeout=False) + + # Check that a conflict is detected + self._check_conflict_detection() + file_state = self.engine_1.dao.get_state_from_local( + Path(self.workspace_title) / "Test folder/Test.txt" + ) + assert file_state + assert file_state.pair_state == "conflicted" + + # Assert content of the local and remote files has not changed + content = self.local.get_content("/Test folder/Test.txt") + assert content == b"Content has locally changed" + content = self.remote.get_note(self.file) + assert content == b"Content has remotely changed" + """ diff --git a/tests/functional/test_remote_deletion.py b/tests/functional/test_remote_deletion.py new file mode 100644 index 0000000000..e75972a739 --- /dev/null +++ b/tests/functional/test_remote_deletion.py @@ -0,0 +1,248 @@ +import time +from logging import getLogger +from unittest.mock import patch + +import pytest +from nuxeo.utils import version_lt + +from nxdrive.engine.engine import Engine +from nxdrive.options import Options + +# from .conftest import OS_STAT_MTIME_RESOLUTION, OneUserTest, TwoUsersTest +from .conftest import OneUserTest, TwoUsersTest + +log = getLogger(__name__) + + +class TestRemoteDeletion(OneUserTest): + def test_synchronize_remote_deletion(self): + """Test that deleting remote documents is impacted client side + + Use cases: + - Remotely delete a regular folder + => Folder should be locally deleted + - Remotely restore folder from the trash + => Folder should be locally re-created + - Remotely delete a synchronization root + => Synchronization root should be locally deleted + - Remotely restore synchronization root from the trash + => Synchronization root should be locally re-created + + See TestIntegrationSecurityUpdates.test_synchronize_denying_read_access + as the same uses cases are tested + """ + # Bind the server and root workspace + self.engine_1.start() + # Get local and remote clients + local = self.local_1 + remote = self.remote_document_client_1 + remote_admin = self.root_remote + + # Create documents in the remote root workspace + # then synchronize + folder_id = remote.make_folder("/", "Test folder") + file_id = remote.make_file("/Test folder", "joe.txt", content=b"Some content") + + self.wait_sync(wait_for_async=True) + assert local.exists("/Test folder") + assert local.exists("/Test folder/joe.txt") + + # Delete remote folder then synchronize + remote.delete("/Test folder") + self.wait_sync(wait_for_async=True) + assert not local.exists("/Test folder") + + # Restore folder from trash then synchronize + remote.undelete(folder_id) + if version_lt(remote.client.server_version, "10.2"): + remote.undelete(file_id) + self.wait_sync(wait_for_async=True) + assert local.exists("/Test folder") + assert local.exists("/Test folder/joe.txt") + + # Delete sync root then synchronize + remote_admin.delete(self.workspace) + self.wait_sync(wait_for_async=True) + assert not local.exists("/") + + # Restore sync root from trash then synchronize + remote_admin.undelete(self.workspace) + if version_lt(remote.client.server_version, "10.2"): + remote_admin.undelete(folder_id) + remote_admin.undelete(file_id) + self.wait_sync(wait_for_async=True) + assert local.exists("/") + assert local.exists("/Test folder") + assert local.exists("/Test folder/joe.txt") + + """ + def test_synchronize_remote_deletion_while_upload(self): + local = self.local_1 + remote = self.remote_document_client_1 + self.engine_1.start() + + def callback(uploader): + ""Add delay when upload and download."" + time.sleep(1) + Engine.suspend_client(self.engine_1, uploader) + + with patch.object(self.engine_1.remote, "download_callback", new=callback): + # Create documents in the remote root workspace + remote.make_folder("/", "Test folder") + self.wait_sync(wait_for_async=True) + + # Create a document by streaming a binary file + file_path = local.abspath("/Test folder") / "testFile.pdf" + copyfile(self.location / "resources" / "files" / "testFile.pdf", file_path) + file_path = local.abspath("/Test folder") / "testFile2.pdf" + copyfile(self.location / "resources" / "files" / "testFile.pdf", file_path) + + # Delete remote folder then synchronize + remote.delete("/Test folder") + self.wait_sync(wait_for_async=True) + assert not local.exists("/Test folder") + """ + + @Options.mock() + @pytest.mark.randombug("NXDRIVE-1329", repeat=4) + def test_synchronize_remote_deletion_while_download_file(self): + local = self.local_1 + remote = self.remote_document_client_1 + + def callback(uploader): + """Add delay when upload and download.""" + if not self.engine_1.has_delete: + # Delete remote file while downloading + try: + remote.delete("/Test folder/testFile.pdf") + except Exception: + log.exception("Cannot trash") + else: + self.engine_1.has_delete = True + time.sleep(1) + Engine.suspend_client(self.engine_1, uploader) + + self.engine_1.start() + self.engine_1.has_delete = False + + filepath = self.location / "resources" / "files" / "testFile.pdf" + + Options.set("tmp_file_limit", 0.1, setter="manual") + with patch.object(self.engine_1.remote, "download_callback", new=callback): + remote.make_folder("/", "Test folder") + remote.make_file("/Test folder", "testFile.pdf", file_path=filepath) + + self.wait_sync(wait_for_async=True) + # Sometimes the server does not return the document trash action in summary changes. + # So it may fail on the next assertion. + assert not local.exists("/Test folder/testFile.pdf") + + """ + def test_synchronize_remote_deletion_with_close_name(self): + self.engine_1.start() + self.wait_sync(wait_for_async=True) + local = self.local_1 + remote = self.remote_document_client_1 + remote.make_folder("/", "Folder 1") + remote.make_folder("/", "Folder 1b") + remote.make_folder("/", "Folder 1c") + self.wait_sync(wait_for_async=True) + assert local.exists("/Folder 1") + assert local.exists("/Folder 1b") + assert local.exists("/Folder 1c") + remote.delete("/Folder 1") + remote.delete("/Folder 1b") + remote.delete("/Folder 1c") + self.wait_sync(wait_for_async=True) + assert not local.exists("/Folder 1") + assert not local.exists("/Folder 1b") + assert not local.exists("/Folder 1c") + """ + + """ + def test_synchronize_remote_deletion_with_wrong_local_remote_id(self): + local = self.local_1 + remote = self.remote_document_client_1 + remote.make_file("/", "joe.txt", content=b"Some content") + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + assert local.exists("/joe.txt") + + self.engine_1.suspend() + local.set_remote_id(Path("joe.txt"), "wrong-id") + remote.delete("/joe.txt") + + self.engine_1.resume() + self.wait_sync(wait_for_async=True) + assert local.exists("/joe.txt") + """ + + """ + def test_synchronize_local_folder_rename_remote_deletion(self): + ""Test local folder rename followed by remote deletion"" + # Bind the server and root workspace + + # Get local and remote clients + self.engine_1.start() + local = self.local_1 + remote = self.remote_document_client_1 + + # Create a folder with a child file in the remote root workspace + # then synchronize + test_folder_uid = remote.make_folder("/", "Test folder") + remote.make_file(test_folder_uid, "joe.odt", content=b"Some content") + + self.wait_sync(wait_for_async=True) + assert local.exists("/Test folder") + assert local.exists("/Test folder/joe.odt") + + # Locally rename the folder then synchronize + time.sleep(OS_STAT_MTIME_RESOLUTION) + local.rename("/Test folder", "Test folder renamed") + + self.wait_sync() + assert not local.exists("/Test folder") + assert local.exists("/Test folder renamed") + assert remote.get_info(test_folder_uid).name == "Test folder renamed" + + # Delete remote folder then synchronize + remote.delete("/Test folder renamed") + + self.wait_sync(wait_for_async=True) + assert not remote.exists("/Test folder renamed") + assert not local.exists("/Test folder renamed") + """ + + +class TestRemoteDeletion2(TwoUsersTest): + """ + def test_synchronize_local_folder_lost_permission(self): + ""Test local folder rename followed by remote deletion"" + # Bind the server and root workspace + + # Get local and remote clients + self.engine_2.start() + local = self.local_2 + remote = self.remote_document_client_2 + + # Create a folder with a child file in the remote root workspace + # then synchronize + test_folder_uid = remote.make_folder("/", "Test folder") + remote.make_file(test_folder_uid, "joe.odt", content=b"Some content") + + self.wait_sync( + wait_for_async=True, wait_for_engine_1=False, wait_for_engine_2=True + ) + assert local.exists("/Test folder") + assert local.exists("/Test folder/joe.odt") + input_obj = "doc:" + self.workspace + self.root_remote.execute( + command="Document.RemoveACL", input_obj=input_obj, acl="local" + ) + self.wait_sync( + wait_for_async=True, wait_for_engine_1=False, wait_for_engine_2=True + ) + assert not local.exists("/Test folder") + """ diff --git a/tests/functional/test_remote_move_and_rename.py b/tests/functional/test_remote_move_and_rename.py new file mode 100644 index 0000000000..f0d8a93789 --- /dev/null +++ b/tests/functional/test_remote_move_and_rename.py @@ -0,0 +1,888 @@ +import os.path +import time +from pathlib import Path +from shutil import copyfile +from unittest.mock import patch + +import pytest + +from nxdrive.engine.engine import Engine + +from .. import env +from ..markers import windows_only +from . import DocRemote, LocalTest +from .conftest import REMOTE_MODIFICATION_TIME_RESOLUTION, SYNC_ROOT_FAC_ID, OneUserTest + + +class TestRemoteMoveAndRename(OneUserTest): + def setUp(self): + """ + Sets up the following remote hierarchy: + + Nuxeo Drive Test Workspace + |-- Original Fil\xe9 1.odt + |-- Original File 2.odt + |-- Original Fold\xe9r 1 + | |-- Sub-Folder 1.1 + | |-- Sub-Folder 1.2 + | |-- Original File 1.1.odt + |-- Original Folder 2 + | |-- Original File 3.odt + """ + + remote = self.remote_1 + + self.workspace_id = f"{SYNC_ROOT_FAC_ID}{self.workspace}" + self.workspace_path = Path(self.workspace_title) + + self.file_1_id = remote.make_file( + self.workspace_id, "Original Fil\xe9 1.odt", content=b"Some Content 1" + ).uid + + self.folder_1_id = remote.make_folder( + self.workspace_id, "Original Fold\xe9r 1" + ).uid + self.folder_1_1_id = remote.make_folder(self.folder_1_id, "Sub-Folder 1.1").uid + self.file_1_1_id = remote.make_file( + self.folder_1_id, "Original File 1.1.odt", content=b"Some Content 1" + ).uid + + self.folder_2_id = remote.make_folder( + self.workspace_id, "Original Folder 2" + ).uid + self.file_3_id = remote.make_file( + self.folder_2_id, "Original File 3.odt", content=b"Some Content 3" + ).uid + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + def get_state(self, remote): + return self.engine_1.dao.get_normal_state_from_remote(remote) + + def test_remote_rename_file(self): + remote = self.remote_1 + local = self.local_1 + + file_1_docref = self.file_1_id.split("#")[-1] + file_1_version = remote.get_info(file_1_docref).version + + # Rename /Original Fil\xe9 1.odt to /Renamed File 1.odt + remote.rename(self.file_1_id, "Renamed File 1.odt") + assert remote.get_fs_info(self.file_1_id).name == "Renamed File 1.odt" + + self.wait_sync(wait_for_async=True) + + version = remote.get_info(file_1_docref).version + + # Check remote file name + assert remote.get_fs_info(self.file_1_id).name == "Renamed File 1.odt" + assert file_1_version == version + + # Check local file name + assert not local.exists("/Original Fil\xe9 1.odt") + assert local.exists("/Renamed File 1.odt") + + # Check file state + file_1_state = self.get_state(self.file_1_id) + assert file_1_state.local_path == self.workspace_path / "Renamed File 1.odt" + assert file_1_state.local_name == "Renamed File 1.odt" + + # Rename 'Renamed File 1.odt' to 'Renamed Again File 1.odt' + # and 'Original File 1.1.odt' to + # 'Renamed File 1.1.odt' at the same time as they share + # the same digest but do not live in the same folder + # Wait for 1 second to make sure the file's last modification time + # will be different from the pair state's last remote update time + time.sleep(REMOTE_MODIFICATION_TIME_RESOLUTION) + remote.rename(self.file_1_id, "Renamed Again File 1.odt") + assert remote.get_fs_info(self.file_1_id).name == "Renamed Again File 1.odt" + remote.rename(self.file_1_1_id, "Renamed File 1.1 \xe9.odt") + assert remote.get_fs_info(self.file_1_1_id).name == "Renamed File 1.1 \xe9.odt" + + self.wait_sync(wait_for_async=True) + + info = remote.get_fs_info(self.file_1_id) + assert info.name == "Renamed Again File 1.odt" + assert remote.get_fs_info(self.file_1_1_id).name == "Renamed File 1.1 \xe9.odt" + version = remote.get_info(file_1_docref).version + assert file_1_version == version + + # Check local file names + assert not local.exists("/Renamed File 1.odt") + assert local.exists("/Renamed Again File 1.odt") + assert not local.exists("/Original Fold\xe9r 1/Original File 1.1.odt") + assert local.exists("/Original Fold\xe9r 1/Renamed File 1.1 \xe9.odt") + + # Check file states + file_1_state = self.get_state(self.file_1_id) + assert file_1_state.local_path == ( + self.workspace_path / "Renamed Again File 1.odt" + ) + assert file_1_state.local_name == "Renamed Again File 1.odt" + file_1_1_state = self.get_state(self.file_1_1_id) + assert file_1_1_state.local_path == ( + self.workspace_path / "Original Fold\xe9r 1/Renamed File 1.1 \xe9.odt" + ) + assert file_1_1_state.local_name == "Renamed File 1.1 \xe9.odt" + + # Test for encoding error regressions + assert self.engine_1.dao._get_recursive_condition(file_1_1_state) + assert self.engine_1.dao._get_recursive_remote_condition(file_1_1_state) + + # Check parents of renamed files to ensure it is an actual rename + # that has been performed and not a move + file_1_local_info = local.get_info("/Renamed Again File 1.odt") + assert file_1_local_info.filepath.parent == self.sync_root_folder_1 + + file_1_1_local_info = local.get_info( + "/Original Fold\xe9r 1/Renamed File 1.1 \xe9.odt" + ) + assert file_1_1_local_info.filepath.parent == ( + self.sync_root_folder_1 / "Original Fold\xe9r 1" + ) + + """ + def test_remote_rename_update_content_file(self): + remote = self.remote_1 + local = self.local_1 + + # Update the content of /'Original Fil\xe9 1.odt' and rename it + # to /Renamed File 1.odt + remote.update_content( + self.file_1_id, b"Updated content", filename="Renamed File 1.odt" + ) + assert remote.get_fs_info(self.file_1_id).name == "Renamed File 1.odt" + assert remote.get_content(self.file_1_id) == b"Updated content" + + self.wait_sync(wait_for_async=True) + + # Check local file name + assert not local.exists("/Original Fil\xe9 1.odt") + assert local.exists("/Renamed File 1.odt") + assert local.get_content("/Renamed File 1.odt") == b"Updated content" + """ + + def test_remote_move_file(self): + remote = self.remote_1 + local = self.local_1 + + # Move /Original Fil\xe9 1.odt + # to /Original Fold\xe9r 1/Original Fil\xe9 1.odt + remote.move(self.file_1_id, self.folder_1_id) + assert remote.get_fs_info(self.file_1_id).name == "Original Fil\xe9 1.odt" + assert remote.get_fs_info(self.file_1_id).parent_uid == self.folder_1_id + + self.wait_sync(wait_for_async=True) + + # Check remote file + assert remote.get_fs_info(self.file_1_id).name == "Original Fil\xe9 1.odt" + assert remote.get_fs_info(self.file_1_id).parent_uid == self.folder_1_id + + # Check local file + assert not local.exists("/Original Fil\xe9 1.odt") + assert local.exists("/Original Fold\xe9r 1/Original Fil\xe9 1.odt") + file_1_local_info = local.get_info( + "/Original Fold\xe9r 1/Original Fil\xe9 1.odt" + ) + file_1_parent_path = file_1_local_info.filepath.parent + assert file_1_parent_path == self.sync_root_folder_1 / "Original Fold\xe9r 1" + + # Check file state + file_1_state = self.get_state(self.file_1_id) + assert file_1_state.local_path == ( + self.workspace_path / "Original Fold\xe9r 1/Original Fil\xe9 1.odt" + ) + assert file_1_state.local_name == "Original Fil\xe9 1.odt" + + def test_remote_move_and_rename_file(self): + remote = self.remote_1 + local = self.local_1 + + # Rename /'Original Fil\xe9 1.odt' to /Renamed File 1.odt + remote.rename(self.file_1_id, "Renamed File 1 \xe9.odt") + remote.move(self.file_1_id, self.folder_1_id) + assert remote.get_fs_info(self.file_1_id).name == "Renamed File 1 \xe9.odt" + assert remote.get_fs_info(self.file_1_id).parent_uid == self.folder_1_id + + self.wait_sync(wait_for_async=True) + + # Check remote file + assert remote.get_fs_info(self.file_1_id).name == "Renamed File 1 \xe9.odt" + assert remote.get_fs_info(self.file_1_id).parent_uid == self.folder_1_id + + # Check local file + assert not local.exists("/Original Fil\xe9 1.odt") + assert local.exists("/Original Fold\xe9r 1/Renamed File 1 \xe9.odt") + file_1_local_info = local.get_info( + "/Original Fold\xe9r 1/Renamed File 1 \xe9.odt" + ) + file_1_parent_path = file_1_local_info.filepath.parent + assert file_1_parent_path == self.sync_root_folder_1 / "Original Fold\xe9r 1" + + # Check file state + file_1_state = self.get_state(self.file_1_id) + assert file_1_state.local_path == ( + self.workspace_path / "Original Fold\xe9r 1/Renamed File 1 \xe9.odt" + ) + assert file_1_state.local_name == "Renamed File 1 \xe9.odt" + + def test_remote_rename_folder(self): + remote = self.remote_1 + local = self.local_1 + + # Rename a non empty folder with some content + remote.rename(self.folder_1_id, "Renamed Folder 1 \xe9") + assert remote.get_fs_info(self.folder_1_id).name == "Renamed Folder 1 \xe9" + + # Synchronize: only the folder renaming is detected: all + # the descendants are automatically realigned + self.wait_sync(wait_for_async=True) + + # The client folder has been renamed + assert not local.exists("/Original Fold\xe9r 1") + assert local.exists("/Renamed Folder 1 \xe9") + + # The content of the renamed folder is left unchanged + # Check child name + assert local.exists("/Renamed Folder 1 \xe9/Original File 1.1.odt") + file_1_1_local_info = local.get_info( + "/Renamed Folder 1 \xe9/Original File 1.1.odt" + ) + file_1_1_parent_path = file_1_1_local_info.filepath.parent + assert file_1_1_parent_path == ( + self.sync_root_folder_1 / "Renamed Folder 1 \xe9" + ) + + # Check child state + file_1_1_state = self.get_state(self.file_1_1_id) + assert file_1_1_state.local_path == ( + self.workspace_path / "Renamed Folder 1 \xe9/Original File 1.1.odt" + ) + assert file_1_1_state.local_name == "Original File 1.1.odt" + + # Check child name + assert local.exists("/Renamed Folder 1 \xe9/Sub-Folder 1.1") + folder_1_1_local_info = local.get_info("/Renamed Folder 1 \xe9/Sub-Folder 1.1") + folder_1_1_parent_path = folder_1_1_local_info.filepath.parent + assert folder_1_1_parent_path == ( + self.sync_root_folder_1 / "Renamed Folder 1 \xe9" + ) + + # Check child state + folder_1_1_state = self.get_state(self.folder_1_1_id) + assert folder_1_1_state is not None + assert folder_1_1_state.local_path == ( + self.workspace_path / "Renamed Folder 1 \xe9/Sub-Folder 1.1" + ) + assert folder_1_1_state.local_name == "Sub-Folder 1.1" + + def test_remote_rename_case_folder(self): + remote = self.remote_1 + local = self.local_1 + + assert local.exists("/Original Fold\xe9r 1") + + remote.rename(self.folder_1_id, "Original folder 1") + self.wait_sync(wait_for_async=True) + assert local.exists("/Original folder 1") + + remote.rename(self.folder_1_id, "Original Fold\xe9r 1") + self.wait_sync(wait_for_async=True) + assert local.exists("/Original Fold\xe9r 1") + + """ + def test_remote_rename_case_folder_stopped(self): + remote = self.remote_1 + local = self.local_1 + self.engine_1.stop() + assert local.exists("/Original Fold\xe9r 1") + + remote.rename(self.folder_1_id, "Original folder 1") + self.engine_1.start() + self.wait_sync(wait_for_async=True) + assert local.exists("/Original folder 1") + + self.engine_1.stop() + remote.rename(self.folder_1_id, "Original Fold\xe9r 1") + self.engine_1.start() + self.wait_sync(wait_for_async=True) + assert local.exists("/Original Fold\xe9r 1") + """ + + def test_remote_move_folder(self): + remote = self.remote_1 + local = self.local_1 + + # Move a non empty folder with some content + remote.move(self.folder_1_id, self.folder_2_id) + remote_info = remote.get_fs_info(self.folder_1_id) + assert remote_info is not None + assert remote_info.name == "Original Fold\xe9r 1" + assert remote_info.parent_uid == self.folder_2_id + + # Synchronize: only the folder move is detected: all + # the descendants are automatically realigned + self.wait_sync(wait_for_async=True) + + # Check remote folder + remote_info = remote.get_fs_info(self.folder_1_id) + assert remote_info is not None + assert remote_info.name == "Original Fold\xe9r 1" + assert remote_info.parent_uid == self.folder_2_id + + # Check local folder + assert not local.exists("/Original Fold\xe9r 1") + assert local.exists("/Original Folder 2/Original Fold\xe9r 1") + folder_1_local_info = local.get_info("/Original Folder 2/Original Fold\xe9r 1") + assert folder_1_local_info.filepath.parent == ( + self.sync_root_folder_1 / "Original Folder 2" + ) + + # Check folder state + folder_1_state = self.get_state(self.folder_1_id) + assert folder_1_state.local_path == ( + self.workspace_path / "Original Folder 2/Original Fold\xe9r 1" + ) + assert folder_1_state.local_name == "Original Fold\xe9r 1" + + # The content of the renamed folder is left unchanged + assert local.exists( + "/Original Folder 2/Original Fold\xe9r 1/Original File 1.1.odt" + ) + file_1_1_local_info = local.get_info( + "/Original Folder 2/Original Fold\xe9r 1/Original File 1.1.odt" + ) + assert file_1_1_local_info.filepath.parent == ( + self.sync_root_folder_1 / "Original Folder 2" / "Original Fold\xe9r 1" + ) + + # Check child state + file_1_1_state = self.get_state(self.file_1_1_id) + assert file_1_1_state.local_path == ( + self.workspace_path + / "Original Folder 2" + / "Original Fold\xe9r 1/Original File 1.1.odt" + ) + assert file_1_1_state.local_name == "Original File 1.1.odt" + + # Check child name + assert local.exists("/Original Folder 2/Original Fold\xe9r 1/Sub-Folder 1.1") + folder_1_1_local_info = local.get_info( + "/Original Folder 2/Original Fold\xe9r 1/Sub-Folder 1.1" + ) + assert folder_1_1_local_info.filepath.parent == ( + self.sync_root_folder_1 / "Original Folder 2" / "Original Fold\xe9r 1" + ) + + # Check child state + folder_1_1_state = self.get_state(self.folder_1_1_id) + assert folder_1_1_state.local_path == ( + self.workspace_path + / "Original Folder 2" + / "Original Fold\xe9r 1/Sub-Folder 1.1" + ) + assert folder_1_1_state.local_name == "Sub-Folder 1.1" + + """ + def test_concurrent_remote_rename_folder(self): + remote = self.remote_1 + local = self.local_1 + + # Rename non empty folders concurrently + remote.rename(self.folder_1_id, "Renamed Folder 1") + assert remote.get_fs_info(self.folder_1_id).name == "Renamed Folder 1" + remote.rename(self.folder_2_id, "Renamed Folder 2") + assert remote.get_fs_info(self.folder_2_id).name == "Renamed Folder 2" + + # Synchronize: only the folder renaming is detected: all + # the descendants are automatically realigned + self.wait_sync(wait_for_async=True) + + # The content of the renamed folders is left unchanged + # Check child name + assert local.exists("/Renamed Folder 1/Original File 1.1.odt") + file_1_1_local_info = local.get_info("/Renamed Folder 1/Original File 1.1.odt") + assert file_1_1_local_info.filepath.parent == ( + self.sync_root_folder_1 / "Renamed Folder 1" + ) + + # Check child state + file_1_1_state = self.get_state(self.file_1_1_id) + assert file_1_1_state.local_path == ( + self.workspace_path / "Renamed Folder 1/Original File 1.1.odt" + ) + assert file_1_1_state.local_name == "Original File 1.1.odt" + + # Check child name + assert local.exists("/Renamed Folder 2/Original File 3.odt") + file_3_local_info = local.get_info("/Renamed Folder 2/Original File 3.odt") + assert file_3_local_info.filepath.parent == ( + self.sync_root_folder_1 / "Renamed Folder 2" + ) + + # Check child state + file_3_state = self.get_state(self.file_3_id) + assert file_3_state.local_path == ( + self.workspace_path / "Renamed Folder 2/Original File 3.odt" + ) + assert file_3_state.local_name == "Original File 3.odt" + """ + + def test_remote_rename_sync_root_folder(self): + remote = self.remote_1 + local = LocalTest(self.local_nxdrive_folder_1) + + # Rename a sync root folder + remote.rename(self.workspace_id, "Renamed Nuxeo Drive Test Workspace") + assert ( + remote.get_fs_info(self.workspace_id).name + == "Renamed Nuxeo Drive Test Workspace" + ) + + # Synchronize: only the sync root folder renaming is detected: all + # the descendants are automatically realigned + self.wait_sync(wait_for_async=True) + + # The client folder has been renamed + assert not local.exists(f"/{self.workspace_title}") + assert local.exists("/Renamed Nuxeo Drive Test Workspace") + + renamed_workspace_path = ( + self.local_nxdrive_folder_1 / "Renamed Nuxeo Drive Test Workspace" + ) + + # The content of the renamed folder is left unchanged + # Check child name + assert local.exists( + "/Renamed Nuxeo Drive Test Workspace/Original Fil\xe9 1.odt" + ) + file_1_local_info = local.get_info( + "/Renamed Nuxeo Drive Test Workspace/Original Fil\xe9 1.odt" + ) + assert file_1_local_info.filepath.parent == renamed_workspace_path + + # Check child state + file_1_state = self.get_state(self.file_1_id) + assert file_1_state.local_path == Path( + "Renamed Nuxeo Drive Test Workspace/Original Fil\xe9 1.odt" + ) + assert file_1_state.local_name == "Original Fil\xe9 1.odt" + + # Check child name + assert local.exists("/Renamed Nuxeo Drive Test Workspace/Original Fold\xe9r 1") + folder_1_local_info = local.get_info( + "/Renamed Nuxeo Drive Test Workspace/Original Fold\xe9r 1" + ) + assert folder_1_local_info.filepath.parent == renamed_workspace_path + + # Check child state + folder_1_state = self.get_state(self.folder_1_id) + assert folder_1_state.local_path == Path( + "Renamed Nuxeo Drive Test Workspace/Original Fold\xe9r 1" + ) + assert folder_1_state.local_name == "Original Fold\xe9r 1" + + # Check child name + assert local.exists( + "/Renamed Nuxeo Drive Test Workspace" + "/Original Fold\xe9r 1" + "/Sub-Folder 1.1" + ) + folder_1_1_local_info = local.get_info( + "/Renamed Nuxeo Drive Test Workspace" + "/Original Fold\xe9r 1" + "/Sub-Folder 1.1" + ) + assert folder_1_1_local_info.filepath.parent == ( + renamed_workspace_path / "Original Fold\xe9r 1" + ) + + # Check child state + folder_1_1_state = self.get_state(self.folder_1_1_id) + assert folder_1_1_state.local_path == Path( + "Renamed Nuxeo Drive Test Workspace/Original Fold\xe9r 1/Sub-Folder 1.1" + ) + assert folder_1_1_state.local_name == "Sub-Folder 1.1" + + # Check child name + assert local.exists( + "/Renamed Nuxeo Drive Test Workspace" + "/Original Fold\xe9r 1" + "/Original File 1.1.odt" + ) + file_1_1_local_info = local.get_info( + "/Renamed Nuxeo Drive Test Workspace" + "/Original Fold\xe9r 1" + "/Original File 1.1.odt" + ) + assert file_1_1_local_info.filepath.parent == ( + renamed_workspace_path / "Original Fold\xe9r 1" + ) + + # Check child state + file_1_1_state = self.get_state(self.file_1_1_id) + assert file_1_1_state.local_path == Path( + "Renamed Nuxeo Drive Test Workspace/Original Fold\xe9r 1/Original File 1.1.odt" + ) + assert file_1_1_state.local_name == "Original File 1.1.odt" + + def test_remote_move_to_non_sync_root(self): + # Grant ReadWrite permission on Workspaces for test user + input_obj = f"doc:{env.WS_DIR}" + self.root_remote.execute( + command="Document.SetACE", + input_obj=input_obj, + user=self.user_1, + permission="ReadWrite", + grant=True, + ) + + workspaces_info = self.root_remote.fetch(env.WS_DIR) + workspaces = workspaces_info["uid"] + + # Get remote client with Workspaces as base folder and local client + remote = DocRemote( + self.nuxeo_url, + self.user_1, + "nxdrive-test-device-1", + self.version, + password=self.password_1, + base_folder=workspaces, + upload_tmp_dir=self.upload_tmp_dir, + ) + local = self.local_1 + + # Create a non synchronized folder + unsync_folder = remote.make_folder("/", "Non synchronized folder") + + ws_basename = os.path.basename(self.ws.path) + try: + # Move 'Original Fold\xe9r 1' to Non synchronized folder + remote.move( + f"/{ws_basename}/Original Fold\xe9r 1", "/Non synchronized folder" + ) + assert not remote.exists(f"/{ws_basename}/Original Fold\xe9r 1") + assert remote.exists("/Non synchronized folder/Original Fold\xe9r 1") + + # Synchronize: the folder move is detected as a deletion + self.wait_sync(wait_for_async=True) + + # Check local folder + assert not local.exists("/Original Fold\xe9r 1") + + # Check folder state + assert self.get_state(self.folder_1_id) is None + finally: + # Clean the non synchronized folder + remote.delete(unsync_folder, use_trash=False) + + +class TestSyncRemoteMoveAndRename(OneUserTest): + def setUp(self): + self.workspace_id = f"{SYNC_ROOT_FAC_ID}{self.workspace}" + self.workspace_path = Path(self.workspace_title) + self.folder_id = self.remote_1.make_folder(self.workspace_id, "Test folder").uid + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + @windows_only + def test_synchronize_remote_move_file_while_accessing(self): + local = self.local_1 + remote = self.remote_1 + + file_path = local.abspath("/Test folder") / "testFile.pdf" + copyfile(self.location / "resources" / "files" / "testFile.pdf", file_path) + self.wait_sync() + file_id = local.get_remote_id("/Test folder/testFile.pdf") + assert file_id + + # Create a document by streaming a binary file ( open it as append ) + with open(file_path, "a"): + # Rename remote folder then synchronize + remote.move(file_id, self.workspace_id) + self.wait_sync(wait_for_async=True) + assert local.exists("/Test folder/testFile.pdf") + assert not local.exists("/testFile.pdf") + + # The source file is accessed by another processor, but no error + assert not self.engine_1.dao.get_errors() + + self.wait_sync(wait_for_async=True) + assert local.exists("/testFile.pdf") + assert not local.exists("/Test folder/testFile.pdf") + + """ + @Options.mock() + def test_synchronize_remote_move_while_download_file(self): + local = self.local_1 + remote = self.remote_1 + + # Create documents in the remote root workspace + new_folder_id = remote.make_folder(self.folder_id, "New folder").uid + self.wait_sync(wait_for_async=True) + + def callback(uploader): + ""Add delay when upload and download."" + if self.engine_1.file_id and not self.engine_1.has_rename: + # Rename remote file while downloading + remote.move(self.engine_1.file_id, new_folder_id) + self.engine_1.has_rename = True + time.sleep(3) + Engine.suspend_client(self.engine_1, uploader) + + self.engine_1.has_rename = False + self.engine_1.file_id = None + + Options.set("tmp_file_limit", 0.1, setter="manual") + with patch.object(self.engine_1.remote, "download_callback", new=callback): + file = self.location / "resources" / "files" / "testFile.pdf" + content = file.read_bytes() + self.engine_1.file_id = remote.make_file( + self.folder_id, "testFile.pdf", content=content + ).uid + + # Rename remote folder then synchronize + self.wait_sync(wait_for_async=True) + assert not local.exists("/Test folder/testFile.pdf") + assert local.exists("/Test folder/New folder/testFile.pdf") + """ + + @windows_only + def test_synchronize_remote_rename_file_while_accessing(self): + local = self.local_1 + remote = self.remote_1 + + file_path = local.abspath("/Test folder") / "testFile.pdf" + copyfile(self.location / "resources" / "files" / "testFile.pdf", file_path) + self.wait_sync() + file_id = local.get_remote_id("/Test folder/testFile.pdf") + assert file_id + + # Create a document by streaming a binary file + with open(file_path, "a"): + # Rename remote folder then synchronize + remote.rename(file_id, "testFile2.pdf") + self.wait_sync(wait_for_async=True) + assert local.exists("/Test folder/testFile.pdf") + assert not local.exists("/Test folder/testFile2.pdf") + + # The source file is accessed by another processor, but no errors + assert not self.engine_1.dao.get_errors() + + self.wait_sync(wait_for_async=True) + assert local.exists("/Test folder/testFile2.pdf") + assert not local.exists("/Test folder/testFile.pdf") + + @pytest.mark.xfail(reason="NXDRIVE-2494") + def test_synchronize_remote_rename_while_download_file(self): + local = self.local_1 + remote = self.remote_document_client_1 + + def callback(uploader): + """Add delay when upload and download.""" + if not self.engine_1.has_rename: + # Rename remote file while downloading + self.remote_1.rename(self.folder_id, "Test folder renamed") + self.engine_1.has_rename = True + time.sleep(3) + Engine.suspend_client(self.engine_1, uploader) + + self.engine_1.has_rename = False + + with patch.object(self.engine_1.remote, "download_callback", new=callback): + file = self.location / "resources" / "files" / "testFile.pdf" + content = file.read_bytes() + remote.make_file("/Test folder", "testFile.pdf", content=content) + + # Rename remote folder then synchronize + self.wait_sync(wait_for_async=True) + assert not local.exists("/Test folder") + assert local.exists("/Test folder renamed") + assert local.exists("/Test folder renamed/testFile.pdf") + + """ + def test_synchronize_remote_rename_while_upload(self): + if WINDOWS: + self._remote_rename_while_upload() + else: + func = "nxdrive.client.remote_client.os.fstatvfs" + with patch(func) as mock_os: + mock_os.return_value = Mock() + mock_os.return_value.f_bsize = 4096 + self._remote_rename_while_upload() + """ + + def _remote_rename_while_upload(self): + local = self.local_1 + + def callback(uploader): + """Add delay when upload and download.""" + if not local.exists("/Test folder renamed"): + time.sleep(1) + Engine.suspend_client(self.engine_1, uploader) + + with patch.object(self.engine_1.remote, "download_callback", new=callback): + # Create a document by streaming a binary file + file_path = local.abspath("/Test folder") / "testFile.pdf" + copyfile(self.location / "resources" / "files" / "testFile.pdf", file_path) + file_path = local.abspath("/Test folder") / "testFile2.pdf" + copyfile(self.location / "resources" / "files" / "testFile.pdf", file_path) + + # Rename remote folder then synchronize + self.remote_1.rename(self.folder_id, "Test folder renamed") + + self.wait_sync(wait_for_async=True) + assert not local.exists("/Test folder") + assert local.exists("/Test folder renamed") + assert local.exists("/Test folder renamed/testFile.pdf") + assert local.exists("/Test folder renamed/testFile2.pdf") + + +class TestRemoteMove(OneUserTest): + def test_remote_create_and_move(self): + """ + NXDRIVE-880: folder created and moved on the server does + not sync properly. + """ + + local = self.local_1 + remote = self.remote_document_client_1 + engine = self.engine_1 + + # Create a folder with some stuff inside, and sync + a1 = remote.make_folder("/", "a1") + for idx in range(5): + fname = "file-{}.txt".format(idx) + remote.make_file(a1, fname, content=b"Content of " + fname.encode("utf-8")) + engine.start() + self.wait_sync(wait_for_async=True) + + # Create another folder and move a1 inside it, and sync + a3 = remote.make_folder("/", "a3") + remote.move(a1, a3) + self.wait_sync(wait_for_async=True) + + # Checks + assert not local.exists("/a1") + assert len(local.get_children_info("/a3/a1")) == 5 + + +class TestRemoteFiles(OneUserTest): + """ + def test_remote_create_files_upper_lower_cases(self): + "" + Check that remote (lower|upper)case renaming is taken + into account locally. + "" + remote = self.remote_document_client_1 + local = self.local_1 + engine = self.engine_1 + + engine.start() + self.wait_sync(wait_for_async=True) + + # Create an innocent file, lower case + file_path = self.location / "resources" / "files" / "testFile.pdf" + filename_lower = file_path.name.lower() + doc = remote.make_file("/", filename_lower, file_path=file_path) + self.wait_sync(wait_for_async=True) + + # Check + assert remote.exists(f"/{filename_lower}") + assert local.exists(f"/{filename_lower}") + + # Remotely rename to upper case + filename_upper = filename_lower.upper() + remote.update_content(doc, b"CASE", filename=filename_upper) + self.wait_sync(wait_for_async=True) + + # Check - server + children = remote.get_children_info(self.workspace) + assert len(children) == 1 + assert children[0].get_blob("file:content").name == filename_upper + + # Check - client + children = local.get_children_info("/") + assert len(children) == 1 + assert children[0].name == filename_upper + """ + + """ + def test_remote_create_folders_upper_lower_cases(self): + "" + Check that remote (lower|upper)case renaming is taken + into account locally. See NXDRIVE-927. + "" + remote = self.remote_1 + local = self.local_1 + engine = self.engine_1 + workspace_id = f"{SYNC_ROOT_FAC_ID}{self.workspace}" + + # Create innocent folders, upper case + folder1 = remote.make_folder(workspace_id, "AA_1").uid + folder1_uid = folder1.partition("#")[-1] + folder2 = remote.make_folder(workspace_id, "BA_1").uid + folder2_uid = folder2.partition("#")[-1] + engine.start() + self.wait_sync(wait_for_async=True) + + # Check + for folder in ("/AA_1", "/BA_1"): + assert remote.exists(folder) + assert local.exists(folder) + + # Remotely rename the folder2 to lowercase folder1 + foldername_lower = "aa_1" + remote.rename(folder2, foldername_lower) + self.wait_sync(wait_for_async=True) + + if not local.is_case_sensitive(): + # There should be a conflict + errors = engine.dao.get_errors() + assert len(errors) == 1 + assert errors[0].remote_ref.endswith(folder2_uid) + else: + # We should not have any error + assert not engine.dao.get_errors(limit=0) + + # Check - server + children = sorted( + remote.get_children_info(self.workspace), key=lambda x: x.name + ) + assert len(children) == 2 + assert folder1_uid.endswith(children[0].uid) + assert children[0].name == "AA_1" + assert folder2_uid.endswith(children[1].uid) + assert children[1].name == foldername_lower + + # Check - client + children = sorted(local.get_children_info("/"), key=lambda x: x.name) + assert len(children) == 2 + assert children[0].remote_ref.endswith(folder1_uid) + assert children[0].name == "AA_1" + assert children[1].remote_ref.endswith(folder2_uid) + + if not local.is_case_sensitive(): + # The rename was _not_ effective + assert str(children[1].path).endswith("BA_1") + + # Re-rename the folder on the server + remote.rename(folder2, "aZeRtY") + self.wait_sync(wait_for_async=True) + + # There should be no more conflict + assert not engine.dao.get_errors() + + # And the local folder must be renamed + children = sorted(local.get_children_info("/"), key=lambda x: x.name) + assert len(children) == 2 + assert children[0].remote_ref.endswith(folder1_uid) + assert children[0].name == "AA_1" + assert children[1].remote_ref.endswith(folder2_uid) + assert str(children[1].path).endswith("aZeRtY") + else: + # The rename was effective + assert str(children[1].path).endswith(foldername_lower) + """ diff --git a/tests/functional/test_security_updates.py b/tests/functional/test_security_updates.py new file mode 100644 index 0000000000..6ffdcd8625 --- /dev/null +++ b/tests/functional/test_security_updates.py @@ -0,0 +1,298 @@ +""" +import time +from pathlib import Path + +import pytest + +from .conftest import OS_STAT_MTIME_RESOLUTION, OneUserTest + + +class TestSecurityUpdates(OneUserTest): + def test_synchronize_denying_read_access(self): + ""Test that denying Read access server side is impacted client side + + Use cases: + - Deny Read access on a regular folder + => Folder should be locally deleted + - Grant Read access back + => Folder should be locally re-created + - Deny Read access on a synchronization root + => Synchronization root should be locally deleted + - Grant Read access back + => Synchronization root should be locally re-created + + See TestIntegrationRemoteDeletion.test_synchronize_remote_deletion + as the same uses cases are tested + "" + # Bind the server and root workspace + self.engine_1.start() + + # Get local and remote clients + local = self.local_1 + remote = self.remote_document_client_1 + + # Create documents in the remote root workspace + # then synchronize + remote.make_folder("/", "Test folder") + remote.make_file("/Test folder", "joe.txt", content=b"Some content") + + self.wait_sync(wait_for_async=True) + assert local.exists("/Test folder") + assert local.exists("/Test folder/joe.txt") + + # Remove Read permission for test user on a regular folder + # then synchronize + self._set_read_permission(self.user_1, f"{self.ws.path}/Test folder", False) + self.wait_sync(wait_for_async=True) + assert not local.exists("/Test folder") + + # Add Read permission back for test user then synchronize + self._set_read_permission(self.user_1, f"{self.ws.path}/Test folder", True) + self.wait_sync(wait_for_async=True) + assert local.exists("/Test folder") + assert local.exists("/Test folder/joe.txt") + + # Remove Read permission for test user on a sync root + # then synchronize + self._set_read_permission(self.user_1, self.ws.path, False) + self.wait_sync(wait_for_async=True) + assert not local.exists("/") + + # Add Read permission back for test user then synchronize + self._set_read_permission(self.user_1, self.ws.path, True) + self.wait_sync(wait_for_async=True) + assert local.exists("/") + assert local.exists("/Test folder") + assert local.exists("/Test folder/joe.txt") + + @pytest.mark.skip("NXDRIVE-170: WIP") + def test_synchronize_denying_read_access_local_modification(self): + # TO_REVIEW: Trash feature, delete it, + # might need to modify the behavior + ""Test denying Read access with concurrent local modification + + Use cases: + - Deny Read access on a regular folder and make some + local and remote changes concurrently. + => Only locally modified content should be kept + and should be marked as 'unsynchronized', + other content should be deleted. + Remote changes should not be impacted client side. + Local changes should not be impacted server side. + - Grant Read access back. + => Remote documents should be merged with + locally modified content which should be unmarked + as 'unsynchronized' and therefore synchronized upstream. + + See TestIntegrationRemoteDeletion + .test_synchronize_remote_deletion_local_modification + as the same uses cases are tested. + + Note that we use the .odt extension for test files to make sure + that they are created as File and not Note documents on the server + when synchronized upstream, as the current implementation of + RemoteDocumentClient is File oriented. + "" + # Bind the server and root workspace + self.engine_1.start() + # Get local and remote clients + local = self.local_1 + remote = self.remote_document_client_1 + root_remote = self.root_remote + + # Create documents in the remote root workspace + # then synchronize + remote.make_folder("/", "Test folder") + remote.make_file("/Test folder", "joe.odt", content=b"Some content") + remote.make_file("/Test folder", "jack.odt", content=b"Some content") + remote.make_folder("/Test folder", "Sub folder 1") + remote.make_file( + "/Test folder/Sub folder 1", "sub file 1.txt", content=b"Content" + ) + + self.wait_sync(wait_for_async=True) + assert local.exists("/Test folder") + assert local.exists("/Test folder/joe.odt") + assert local.exists("/Test folder/jack.odt") + assert local.exists("/Test folder/Sub folder 1") + assert local.exists("/Test folder/Sub folder 1/sub file 1.txt") + + # Remove Read permission for test user on a regular folder + # and make some local and remote changes concurrently then synchronize + test_folder_path = f"{self.ws.path}/Test folder" + self._set_read_permission(self.user_1, test_folder_path, False) + # Local changes + time.sleep(OS_STAT_MTIME_RESOLUTION) + # Create new file + local.make_file("/Test folder", "local.odt", content=b"New local content") + # Create new folder with files + local.make_folder("/Test folder", "Local sub folder 2") + local.make_file( + "/Test folder/Local sub folder 2", + "local sub file 2.txt", + content=b"Other local content", + ) + # Update file + local.update_content("/Test folder/joe.odt", b"Some locally updated content") + # Remote changes + # Create new file + root_remote.make_file( + test_folder_path, "remote.odt", content=b"New remote content" + ) + # Create new folder with files + root_remote.make_folder(test_folder_path, "Remote sub folder 2") + root_remote.make_file( + test_folder_path + "/Remote sub folder 2", + "remote sub file 2.txt", + content=b"Other remote content", + ) + # Update file + root_remote.update_content( + test_folder_path + "/joe.odt", b"Some remotely updated content" + ) + + self.wait_sync(wait_for_async=True) + # Only locally modified content should exist + # and should be marked as 'unsynchronized', other content should + # have been deleted. + # Remote changes should not be impacted client side. + # Local changes should not be impacted server side. + # Local check + assert local.exists("/Test folder") + assert len(local.get_children_info("/Test folder")) == 3 + assert local.exists("/Test folder/joe.odt") + assert ( + local.get_content("/Test folder/joe.odt") == b"Some locally updated content" + ) + assert local.exists("/Test folder/local.odt") + assert local.exists("/Test folder/Local sub folder 2") + assert local.exists("/Test folder/Local sub folder 2/local sub file 2.txt") + + assert not local.exists("/Test folder/jack.odt") + assert not local.exists("/Test folder/remote.odt") + assert not local.exists("/Test folder/Sub folder 1") + assert not local.exists("/Test folder/Sub folder 1/sub file 1.txt") + assert not local.exists("/Test folder/Remote sub folder 1") + assert not local.exists( + "/Test folder/Remote sub folder 1/remote sub file 1.txt" + ) + # State check + self._check_pair_state("/Test folder", "unsynchronized") + self._check_pair_state("/Test folder/joe.odt", "unsynchronized") + self._check_pair_state("/Test folder/local.odt", "unsynchronized") + self._check_pair_state("/Test folder/Local sub folder 2", "unsynchronized") + self._check_pair_state( + "/Test folder/Local sub folder 2/local sub file 2.txt", "unsynchronized" + ) + # Remote check + test_folder_uid = root_remote.get_info(test_folder_path).uid + assert len(root_remote.get_children_info(test_folder_uid)) == 5 + assert root_remote.exists(test_folder_path + "/joe.odt") + assert ( + root_remote.get_content(test_folder_path + "/joe.odt") + == b"Some remotely updated content" + ) + assert root_remote.exists(test_folder_path + "/jack.odt") + assert root_remote.exists(test_folder_path + "/remote.odt") + assert root_remote.exists(test_folder_path + "/Sub folder 1") + assert root_remote.exists(test_folder_path + "/Sub folder 1/sub file 1.txt") + assert root_remote.exists(test_folder_path + "/Remote sub folder 2") + assert root_remote.exists( + f"{test_folder_path}/Remote sub folder 2/remote sub file 2.txt" + ) + + assert not root_remote.exists(test_folder_path + "/local.odt") + assert not root_remote.exists(test_folder_path + "/Local sub folder 2") + assert not root_remote.exists( + f"{test_folder_path}/Local sub folder 1/local sub file 2.txt" + ) + + # Add Read permission back for test user then synchronize + self._set_read_permission(self.user_1, f"{self.ws.path}/Test folder", True) + self.wait_sync(wait_for_async=True) + # Remote documents should be merged with locally modified content + # which should be unmarked as 'unsynchronized' and therefore + # synchronized upstream. + # Local check + assert local.exists("/Test folder") + children_info = local.get_children_info("/Test folder") + assert len(children_info) == 8 + for info in children_info: + if info.name == "joe.odt": + remote_version = info + elif info.name.startswith("joe (") and info.name.endswith(").odt"): + local_version = info + assert remote_version is not None + assert local_version is not None + assert local.exists(remote_version.path) + assert ( + local.get_content(remote_version.path) == b"Some remotely updated content" + ) + assert local.exists(local_version.path) + assert local.get_content(local_version.path) == b"Some locally updated content" + assert local.exists("/Test folder/jack.odt") + assert local.exists("/Test folder/local.odt") + assert local.exists("/Test folder/remote.odt") + assert local.exists("/Test folder/Sub folder 1") + assert local.exists("/Test folder/Sub folder 1/sub file 1.txt") + assert local.exists("/Test folder/Local sub folder 2") + assert local.exists("/Test folder/Local sub folder 2/local sub file 2.txt") + assert local.exists("/Test folder/Remote sub folder 2") + assert local.exists("/Test folder/Remote sub folder 2/remote sub file 2.txt") + # State check + self._check_pair_state("/Test folder", "synchronized") + self._check_pair_state("/Test folder/joe.odt", "synchronized") + self._check_pair_state("/Test folder/local.odt", "synchronized") + self._check_pair_state("/Test folder/Local sub folder 2", "synchronized") + self._check_pair_state( + "/Test folder/Local sub folder 2/local sub file 2.txt", "synchronized" + ) + # Remote check + assert remote.exists("/Test folder") + children_info = remote.get_children_info(test_folder_uid) + assert len(children_info) == 8 + for info in children_info: + if info.name == "joe.odt": + remote_version = info + elif info.name.startswith("joe (") and info.name.endswith(").odt"): + local_version = info + assert remote_version is not None + assert local_version is not None + remote_version_ref_length = len(remote_version.path) - len(self.ws.path) + remote_version_ref = remote_version.path[-remote_version_ref_length:] + assert remote.exists(remote_version_ref) + assert ( + remote.get_content(remote_version_ref) == b"Some remotely updated content" + ) + local_version_ref_length = len(local_version.path) - len(self.ws.path) + local_version_ref = local_version.path[-local_version_ref_length:] + assert remote.exists(local_version_ref) + assert remote.get_content(local_version_ref) == b"Some locally updated content" + assert remote.exists("/Test folder/jack.odt") + assert remote.exists("/Test folder/local.odt") + assert remote.exists("/Test folder/remote.odt") + assert remote.exists("/Test folder/Sub folder 1") + assert remote.exists("/Test folder/Sub folder 1/sub file 1.txt") + assert remote.exists("/Test folder/Local sub folder 2") + assert remote.exists("/Test folder/Local sub folder 2/local sub file 2.txt") + assert remote.exists("/Test folder/Remote sub folder 2") + assert remote.exists("/Test folder/Remote sub folder 2/remote sub file 2.txt") + + def _set_read_permission(self, user, doc_path, grant): + input_obj = "doc:" + doc_path + if grant: + self.root_remote.execute( + command="Document.SetACE", + input_obj=input_obj, + user=user, + permission="Read", + grant=True, + ) + else: + self.root_remote.block_inheritance(doc_path) + + def _check_pair_state(self, local_path, pair_state): + local_path = Path(self.workspace_title) / local_path + doc_pair = self.engine_1.dao.get_state_from_local(local_path) + assert doc_pair.pair_state == pair_state +""" diff --git a/tests/functional/test_special_characters.py b/tests/functional/test_special_characters.py new file mode 100644 index 0000000000..6d1f8b749b --- /dev/null +++ b/tests/functional/test_special_characters.py @@ -0,0 +1,93 @@ +""" +import pytest + +from nxdrive.constants import MAC + +from ..markers import not_windows +from .conftest import OneUserTest + + +class TestSpecialCharacters(OneUserTest): + @not_windows(reason="Explorer prevents using those characters") + def test_create_local(self): + local = self.local_1 + remote = self.remote_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + folder = local.make_folder("/", "/ * ? < > |") + local.make_file(folder, "| > < ? * /.txt", content=b"This is a test file") + self.wait_sync() + + folder_name = "- - - - - -" + file_name = "- - - - - -.txt" + # Check the remote folder + children = remote.get_children(self.ws.path)["entries"] + assert len(children) == 1 + assert children[0]["title"] == folder_name + # Check the remote file + children = remote.get_children(children[0]["path"])["entries"] + assert len(children) == 1 + assert children[0]["title"] == file_name + + new_folder_name = "abcd" + new_file_name = "efgh.txt" + local.rename(f"/{folder_name}", new_folder_name) + local.rename(f"/{new_folder_name}/{file_name}", new_file_name) + self.wait_sync() + + # Paths is updated server-side + info = remote.get_info(f"/{new_folder_name}") + assert info.name == new_folder_name + info = remote.get_info(f"/{new_folder_name}/{new_file_name}") + assert info.name == new_file_name + + @not_windows(reason="Explorer prevents using those characters") + @pytest.mark.xfail(reason="NXDRIVE-2498", condition=MAC) + def test_rename_local(self): + local = self.local_1 + remote = self.remote_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + folder_name = "abcd" + file_name = "efgh.txt" + folder = local.make_folder("/", folder_name) + local.make_file(folder, file_name, content=b"This is a test file") + + self.wait_sync() + assert remote.exists(f"/{folder_name}") + assert remote.exists(f"/{folder_name}/{file_name}") + + new_folder_name = "/ * ? < > |" + new_folder_name_expected = "- - - - - -" + new_file_name = "| > < ? * /.txt" + new_file_name_expected = "- - - - - -.txt" + local.rename(f"/{folder_name}", new_folder_name) + local.rename(f"/{new_folder_name_expected}/{file_name}", new_file_name) + self.wait_sync() + + # Paths is updated server-side + info = remote.get_info(f"/{new_folder_name_expected}") + assert info.name == new_folder_name_expected + info = remote.get_info(f"/{new_folder_name_expected}/{new_file_name_expected}") + assert info.name == new_file_name_expected + + def test_create_remote(self): + local = self.local_1 + remote = self.remote_document_client_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + folder = remote.make_folder("/", "/ * ? < > |") + remote.make_file(folder, "| > < ? * /.txt", content=b"This is a test file") + self.wait_sync(wait_for_async=True) + + folder_name = "- - - - - -" + file_name = "- - - - - -.txt" + assert local.exists(f"/{folder_name}") + assert local.exists(f"/{folder_name}/{file_name}") +""" diff --git a/tests/functional/test_synchronization.py b/tests/functional/test_synchronization.py new file mode 100644 index 0000000000..5ae5af67f5 --- /dev/null +++ b/tests/functional/test_synchronization.py @@ -0,0 +1,1181 @@ +import time +from pathlib import Path +from unittest.mock import patch + +from nuxeo.exceptions import Conflict, HTTPError, Unauthorized +from requests import ConnectionError + +# from nxdrive.constants import ROOT, WINDOWS +from nxdrive.constants import WINDOWS +from nxdrive.utils import safe_filename + +# from .. import ensure_no_exception +# from . import LocalTest +from .conftest import OS_STAT_MTIME_RESOLUTION, OneUserNoSync, OneUserTest, TwoUsersTest + + +class TestSynchronizationDisabled(OneUserNoSync): + """Test with synchronization features disabled.""" + + def test_basic_synchronization(self): + """Test that nothing will be synced.""" + + local = self.local_1 + remote = self.remote_document_client_1 + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # The local root is not created + assert not local.exists("/remote folder") + + # Force its creation to test local changes are not reflected remotely + local.unlock_ref(local.base_folder) + local.base_folder.mkdir() + local.make_folder("/", "local folder") + + # Create a remote document to check that nothing will be locally synced + remote.make_folder("/", "remote folder") + + # Sync and checks + self.wait_sync(wait_for_async=True) + assert not remote.exists("/local folder") + assert local.exists("/local folder") + assert not local.exists("/remote folder") + + +class TestSynchronization(OneUserTest): + """ + def test_binding_initialization_and_first_sync(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # Create some documents in a Nuxeo workspace and bind this server to a + # Nuxeo Drive local folder + docs = self.make_server_tree() + + # The root binding operation does not create the local folder yet. + assert not local.exists("/") + + # Launch ndrive and check synchronization + self.engine_1.start() + self.wait_sync(wait_for_async=True) + assert local.exists("/") + assert local.exists("/Folder 1") + assert local.get_content("/Folder 1/File 1.txt") == b"aaa" + assert local.exists("/Folder 1/Folder 1.1") + assert local.get_content("/Folder 1/Folder 1.1/File 2.txt") == b"bbb" + assert local.exists("/Folder 1/Folder 1.2") + assert local.get_content("/Folder 1/Folder 1.2/File 3.txt") == b"ccc" + assert local.exists("/Folder 2") + # Cannot predict the resolution in advance + assert remote.get_note(docs["Dupe 1.txt"]) == b"Some content." + assert remote.get_note(docs["Dupe 2.txt"]) == b"Other content." + assert local.get_content("/Folder 2/File 4.txt") == b"ddd" + assert local.get_content("/File 5.txt") == b"eee" + + # Unbind root and resynchronize + remote.unregister_as_root(self.workspace) + + # Since errors are generated by the deletion events sent + # by Watchdog for the workspace children under UNIX, + # don't enforce errors + self.wait_sync(wait_for_async=True, enforce_errors=WINDOWS) + assert not local.exists("/") + """ + + """ + def test_binding_synchronization_empty_start(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # Let's create some documents on the server and + # launch the first synchronization + docs = self.make_server_tree() + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # We should now be fully synchronized + file_count, folder_count = self.get_local_child_count( + self.local_nxdrive_folder_1 + ) + assert folder_count == 5 + assert file_count == 6 + + # Wait a bit for file time stamps to increase enough: on OSX HFS+ the + # file modification time resolution is 1s for instance + time.sleep(OS_STAT_MTIME_RESOLUTION) + + # Let do some local and remote changes concurrently + local.delete("/File 5.txt") + local.update_content("/Folder 1/File 1.txt", b"aaaa") + local.make_folder("/", "Folder 4") + + # The remote client used in this test is handling paths relative to + # the 'Nuxeo Drive Test Workspace' + remote.update(docs["File 2.txt"], properties={"note:note": "bbbb"}) + remote.delete("/Folder 2") + f3 = remote.make_folder(self.workspace, "Folder 3") + remote.make_file(f3, "File 6.txt", content=b"ffff") + + # Launch synchronization + self.wait_sync(wait_for_async=True) + + # We should now be fully synchronized again + assert not remote.exists("/File 5.txt") + assert remote.get_note(docs["File 1.txt"]) == b"aaaa" + assert remote.exists("/Folder 4") + + assert local.get_content("/Folder 1/Folder 1.1/File 2.txt") == b"bbbb" + # Let's just check remote document hasn't changed + assert remote.get_note(docs["File 2.txt"]) == b"bbbb" + assert not local.exists("/Folder 2") + assert local.exists("/Folder 3") + assert local.get_content("/Folder 3/File 6.txt") == b"ffff" + """ + + """ + def test_single_quote_escaping(self): + remote = self.remote_document_client_1 + local = LocalTest(self.local_nxdrive_folder_1) + dao = self.engine_1.dao + + file = "APPEL D'OFFRES" + assert dao._escape(file) == "APPEL D''OFFRES" + + remote.unregister_as_root(self.workspace) + self.engine_1.start() + + with ensure_no_exception(): + remote.make_folder("/", file) + filename = f"/{file}" + + remote.register_as_root(filename) + self.wait_sync(wait_for_async=True) + assert local.exists(filename) + + remote.unregister_as_root(filename) + self.wait_sync(wait_for_async=True) + assert not local.exists(filename) + """ + + def test_invalid_credentials(self): + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Simulate bad responses + with patch.object(self.engine_1, "remote", new=self.get_bad_remote()): + self.engine_1.remote.request_token() + self.engine_1.remote.make_server_call_raise(Unauthorized(message="Mock")) + self.wait_sync(wait_for_async=True, fail_if_timeout=False) + assert self.engine_1.is_offline() + + self.engine_1.set_offline(value=False) + self.engine_1.set_invalid_credentials(value=False) + self.engine_1.resume() + + """ + def test_synchronization_modification_on_created_file(self): + # Regression test: a file is created locally, then modification is + # detected before first upload + local = self.local_1 + workspace_path = Path(self.workspace_title) + dao = self.engine_1.dao + + assert not local.exists("/") + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + assert local.exists("/") + self.engine_1.stop() + # Let's create some documents on the client + local.make_folder("/", "Folder") + local.make_file("/Folder", "File.txt", content=b"Some content.") + + # First local scan (assuming the network is offline): + self.queue_manager_1.suspend() + self.queue_manager_1._disable = True + self.engine_1.start() + self.wait_sync(timeout=5, fail_if_timeout=False) + children = dao.get_local_children(workspace_path) + assert len(children) == 1 + assert children[0].pair_state == "locally_created" + folder_children = dao.get_local_children(workspace_path / "Folder") + assert len(folder_children) == 1 + assert folder_children[0].pair_state == "locally_created" + + # Wait a bit for file time stamps to increase enough: on most OS + # the file modification time resolution is 1s + time.sleep(OS_STAT_MTIME_RESOLUTION) + + # Let's modify it offline and wait for a bit + local.update_content("/Folder/File.txt", content=b"Some content.") + self.wait_sync(timeout=5, fail_if_timeout=False) + # File has not been synchronized, it is still + # in the locally_created state + file_state = dao.get_state_from_local(workspace_path / "Folder/File.txt") + assert file_state.pair_state == "locally_created" + + # Assume the computer is back online, the synchronization should occur + # as if the document was just created and not trigger an update + self.queue_manager_1._disable = False + self.queue_manager_1.resume() + self.wait_sync(wait_for_async=True) + folder_state = dao.get_state_from_local(workspace_path / "Folder") + assert folder_state.pair_state == "synchronized" + file_state = dao.get_state_from_local(workspace_path / "Folder/File.txt") + assert file_state.pair_state == "synchronized" + """ + + def test_basic_synchronization(self): + local = self.local_1 + remote = self.remote_document_client_1 + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Let's create some document on the client and the server + local.make_folder("/", "Folder 3") + self.make_server_tree() + + # Launch ndrive and check synchronization + self.wait_sync(wait_for_async=True) + assert remote.exists("/Folder 3") + assert local.exists("/Folder 1") + assert local.exists("/Folder 2") + assert local.exists("/File 5.txt") + + def test_docpair_export(self): + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + dao = self.engine_1.dao + children = dao.get_local_children(Path("/")) + assert children + doc_pair = children[0] + assert doc_pair.export() + + def test_synchronization_skip_errors(self): + local = self.local_1 + dao = self.engine_1.dao + + assert not local.exists("/") + + # Perform first scan and sync + self.engine_1.start() + self.wait_sync(wait_for_async=True) + assert local.exists("/") + self.engine_1.stop() + + # Let's create some documents on the client and the server + local.make_folder("/", "Folder 3") + self.make_server_tree() + + # Detect the files to synchronize but do not perform the + # synchronization + self.queue_manager_1.suspend() + self.queue_manager_1._disable = True + self.engine_1.start() + self.wait_sync(wait_for_async=True, timeout=10, fail_if_timeout=False) + + children = dao.get_local_children(Path(self.workspace_title)) + assert len(children) == 4 + sorted_children = sorted(children, key=lambda x: x.local_path) + assert sorted_children[0].remote_name == "File 5.txt" + assert sorted_children[0].pair_state == "remotely_created" + assert sorted_children[1].remote_name == "Folder 1" + assert sorted_children[1].pair_state == "remotely_created" + assert sorted_children[2].remote_name == "Folder 2" + assert sorted_children[2].pair_state == "remotely_created" + assert sorted_children[3].local_name == "Folder 3" + assert sorted_children[3].pair_state == "locally_created" + + # Simulate synchronization errors + file_5_state = sorted_children[0] + folder_3_state = sorted_children[3] + self.engine_1._local_watcher.increase_error(file_5_state, "TEST_FILE_ERROR") + self.engine_1._local_watcher.increase_error(folder_3_state, "TEST_FILE_ERROR") + + # Run synchronization + self.queue_manager_1._disable = False + self.queue_manager_1.resume() + # By default engine will not consider being syncCompleted + # because of the temporary ignore dfile + self.wait_sync(enforce_errors=False, fail_if_timeout=False) + + # All errors have been skipped, while the remaining docs have + # been synchronized + file_5_state = dao.get_normal_state_from_remote(file_5_state.remote_ref) + assert file_5_state.pair_state == "remotely_created" + folder_3_state = dao.get_state_from_local(folder_3_state.local_path) + assert folder_3_state.pair_state == "locally_created" + folder_1_state = dao.get_normal_state_from_remote(sorted_children[1].remote_ref) + assert folder_1_state.pair_state == "synchronized" + folder_2_state = dao.get_normal_state_from_remote(sorted_children[2].remote_ref) + assert folder_2_state.pair_state == "synchronized" + + # Retry synchronization of pairs in error + self.wait_sync() + file_5_state = dao.get_normal_state_from_remote(file_5_state.remote_ref) + assert file_5_state.pair_state == "synchronized" + folder_3_state = dao.get_state_from_local(folder_3_state.local_path) + assert folder_3_state.pair_state == "synchronized" + + def test_synchronization_give_up(self): + # Override error threshold to 1 instead of 3 + test_error_threshold = 1 + self.queue_manager_1._error_threshold = test_error_threshold + + # Bound root but nothing is synchronized yet + local = self.local_1 + dao = self.engine_1.dao + workspace_path = Path(self.workspace_title) + assert not local.exists("/") + + # Perform first scan and sync + self.engine_1.start() + self.wait_sync(wait_for_async=True) + assert local.exists("/") + self.engine_1.stop() + + # Let's create some documents on the client and the server + local.make_folder("/", "Folder 3") + self.make_server_tree(deep=False) + + # Simulate a server failure on file download + bad_remote = self.get_bad_remote() + error = HTTPError(status=500, message="Mock download error") + bad_remote.make_download_raise(error) + + # File is not synchronized but synchronization does not fail either, + # errors are handled and queue manager has given up on them + with patch.object(self.engine_1, "remote", new=bad_remote): + self.engine_1.start() + self.wait_sync(wait_for_async=True, timeout=60) + states_in_error = dao.get_errors(limit=test_error_threshold) + assert len(states_in_error) == 1 + children = dao.get_states_from_partial_local(workspace_path) + assert len(children) == 4 + for state in children: + if state.folderish: + assert state.pair_state == "synchronized" + else: + assert state.pair_state != "synchronized" + + # Reset errors + for state in states_in_error: + dao.reset_error(state) + + # Verify that everything now gets synchronized + self.wait_sync() + assert not dao.get_errors(limit=test_error_threshold) + children = dao.get_states_from_partial_local(workspace_path) + assert len(children) == 4 + for child in children: + assert child.pair_state == "synchronized" + + def test_synchronization_offline(self): + # Bound root but nothing is synchronized yet + local = self.local_1 + dao = self.engine_1.dao + workspace_path = Path(self.workspace_title) + assert not local.exists("/") + + # Perform first scan and sync + self.engine_1.start() + self.wait_sync(wait_for_async=True) + assert local.exists("/") + self.engine_1.stop() + + # Let's create some documents on the client and the server + local.make_folder("/", "Folder 3") + self.make_server_tree(deep=False) + + # Find various ways to simulate a network failure + bad_remote = self.get_bad_remote() + errors = [ + ConnectionError("Mock connection error"), + OSError("Mock socket error"), # Old socket.error + HTTPError(status=503, message="Mock"), + ] + + engine_started = False + with patch.object(self.engine_1, "remote", new=bad_remote): + for error in errors: + self.engine_1.remote.make_server_call_raise(error) + if not engine_started: + self.engine_1.start() + engine_started = True + + # Synchronization doesn't occur but does not fail either. + # - one 'locally_created' error is registered for Folder 3 + # - no states are inserted for the remote documents + self.wait_sync(wait_for_async=True, fail_if_timeout=False) + children = dao.get_states_from_partial_local(workspace_path) + assert len(children) == 1 + assert children[0].pair_state != "synchronized" + assert not self.engine_1.is_offline() + + # Starting here, the network is re-enable + # Verify that everything now gets synchronized + self.wait_sync(wait_for_async=True) + assert not self.engine_1.is_offline() + assert not dao.get_errors(limit=0) + children = dao.get_states_from_partial_local(workspace_path) + assert len(children) == 4 + for state in children: + assert state.pair_state == "synchronized" + + """ + def test_create_content_in_readonly_area(self): + dao = self.engine_1.dao + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Let's create a subfolder of the main readonly folder + local = LocalTest(self.local_nxdrive_folder_1) + local.make_folder("/", "Folder 3") + local.make_file("/Folder 3", "File 1.txt", content=b"Some content.") + local.make_folder("/Folder 3", "Sub Folder 1") + local.make_file( + "/Folder 3/Sub Folder 1", "File 2.txt", content=b"Some other content." + ) + self.wait_sync() + + # States have been created for the subfolder and its content, + # subfolder is marked as unsynchronized + good_states = ("locally_created", "unsynchronized") + states = dao.get_states_from_partial_local(ROOT) + assert len(states) == 6 + sorted_states = sorted(states, key=lambda x: x.local_path) + assert sorted_states[0].local_name == "" + assert sorted_states[0].pair_state == "synchronized" + assert sorted_states[1].local_name == "Folder 3" + assert sorted_states[1].pair_state == "unsynchronized" + assert sorted_states[2].local_name == "File 1.txt" + assert sorted_states[2].pair_state in good_states + assert sorted_states[3].local_name == "Sub Folder 1" + assert sorted_states[3].pair_state in good_states + assert sorted_states[4].local_name == "File 2.txt" + assert sorted_states[4].pair_state in good_states + assert sorted_states[5].local_name == self.workspace_title + assert sorted_states[5].pair_state == "synchronized" + + # Let's create a file in the main readonly folder + local.make_file("/", "A file in a readonly folder.txt", content=b"Some Content") + self.wait_sync() + + # A state has been created, marked as unsynchronized + # Other states are unchanged + states = dao.get_states_from_partial_local(ROOT) + assert len(states) == 7 + sorted_states = sorted(states, key=lambda x: x.local_path) + assert sorted_states[0].local_name == "" + assert sorted_states[0].pair_state == "synchronized" + assert sorted_states[1].local_name == "A file in a readonly folder.txt" + assert sorted_states[1].pair_state == "unsynchronized" + assert sorted_states[2].local_name == "Folder 3" + assert sorted_states[2].pair_state == "unsynchronized" + assert sorted_states[3].local_name == "File 1.txt" + assert sorted_states[3].pair_state in good_states + assert sorted_states[4].local_name == "Sub Folder 1" + assert sorted_states[4].pair_state in good_states + assert sorted_states[5].local_name == "File 2.txt" + assert sorted_states[5].pair_state in good_states + assert sorted_states[6].local_name == self.workspace_title + assert sorted_states[6].pair_state == "synchronized" + + # Let's create a file and a folder in a folder on which the Write + # permission has been removed. Thanks to NXP-13119, this permission + # change will be detected server-side, thus fetched by the client + # in the remote change summary, and the remote_can_create_child flag + # on which the synchronizer relies to check if creation is allowed + # will be set to False and no attempt to create the remote file + # will be made. + # States will be marked as unsynchronized. + + workspace_path = Path(self.workspace_title) + # Create local folder and synchronize it remotely + local = self.local_1 + local.make_folder("/", "Readonly folder") + self.wait_sync() + + remote = self.remote_document_client_1 + assert remote.exists("/Readonly folder") + + # Check remote_can_create_child flag in pair state + readonly_folder_state = dao.get_state_from_local( + workspace_path / "Readonly folder" + ) + assert readonly_folder_state.remote_can_create_child + + # Wait again for synchronization to detect remote folder creation + # triggered by last synchronization and make sure we get a clean + # state at next change summary + self.wait_sync(wait_for_async=True) + readonly_folder_state = dao.get_state_from_local( + workspace_path / "Readonly folder" + ) + assert readonly_folder_state.remote_can_create_child + + # Set remote folder as readonly for test user + readonly_folder_path = f"{self.ws.path}/Readonly folder" + self._set_read_permission(self.user_1, readonly_folder_path, True) + self.root_remote.block_inheritance(readonly_folder_path, overwrite=False) + + # Wait to make sure permission change is detected. + self.wait_sync(wait_for_async=True) + # Re-fetch folder state and check remote_can_create_child + # flag has been updated + readonly_folder_state = dao.get_state_from_local( + workspace_path / "Readonly folder" + ) + assert not readonly_folder_state.remote_can_create_child + + # Try to create a local file and folder in the readonly folder, + # they should not be created remotely and be marked as unsynchronized. + local.make_file( + "/Readonly folder", "File in readonly folder", content=b"File content" + ) + local.make_folder("/Readonly folder", "Folder in readonly folder") + self.wait_sync() + assert not remote.exists("/Readonly folder/File in readonly folder") + assert not remote.exists("/Readonly folder/Folder in readonly folder") + + states = dao.get_states_from_partial_local( + workspace_path / "Readonly folder", strict=False + ) + assert len(states) == 3 + sorted_states = sorted(states, key=lambda x: x.local_path) + assert sorted_states[0].local_name == "Readonly folder" + assert sorted_states[0].pair_state == "synchronized" + assert sorted_states[1].local_name == "File in readonly folder" + assert sorted_states[1].pair_state == "unsynchronized" + assert sorted_states[2].local_name == "Folder in readonly folder" + assert sorted_states[2].pair_state == "unsynchronized" + """ + + """ + def test_synchronize_special_filenames(self): + local = self.local_1 + remote = self.remote_document_client_1 + self.engine_1.start() + + # Create a remote folder with a weird name + folder = remote.make_folder(self.workspace, 'Folder with chars: / \\ * < > ? "') + characters = "- - - - - - - -" + foldername = f"Folder with chars{characters}" + + self.wait_sync(wait_for_async=True) + folder_names = [i.name for i in local.get_children_info("/")] + assert folder_names == [foldername] + + # Create a remote file with a weird name + file = remote.make_file( + folder, 'File with chars: / \\ * < > ? ".txt', content=b"some content" + ) + filename = f"File with chars{characters}.txt" + + self.wait_sync(wait_for_async=True) + file_names = [ + i.name + for i in local.get_children_info(local.get_children_info("/")[0].path) + ] + assert file_names == [filename] + + # Update a remote file with a weird name (NXDRIVE-286) + remote.update(file, properties={"note:note": "new content"}) + self.wait_sync(wait_for_async=True, enforce_errors=False) + assert local.get_content(f"/{foldername}/{filename}") == b"new content" + file_state = self.get_dao_state_from_engine_1(f"{foldername}/{filename}") + assert file_state.pair_state == "synchronized" + assert file_state.local_digest == file_state.remote_digest + + # Update note title with a weird name + remote.update( + file, properties={"dc:title": 'File with chars: / \\ * < > ? " - 2'} + ) + filename = f"File with chars{characters} - 2.txt" + self.wait_sync(wait_for_async=True, enforce_errors=False) + file_names = [ + i.name + for i in local.get_children_info(local.get_children_info("/")[0].path) + ] + assert file_names == [filename] + + # Update note title changing the case (NXRIVE-532) + remote.update( + file, properties={"dc:title": 'file with chars: / \\ * < > ? " - 2'} + ) + filename = f"file with chars{characters} - 2.txt" + self.wait_sync(wait_for_async=True, enforce_errors=False) + file_names = [ + i.name + for i in local.get_children_info(local.get_children_info("/")[0].path) + ] + assert file_names == [filename] + """ + + def test_synchronize_error_remote(self): + path = Path(f"/{self.workspace_title}") / "test.odt" + remote = self.remote_document_client_1 + dao = self.engine_1.dao + + bad_remote = self.get_bad_remote() + error = HTTPError(status=400, message="Mock") + bad_remote.make_download_raise(error) + + with patch.object(self.engine_1, "remote", new=bad_remote): + remote.make_file("/", "test.odt", content=b"Some content.") + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + self.engine_1.stop() + + pair = dao.get_state_from_local(path) + assert pair is not None + assert pair.error_count + assert pair.pair_state == "remotely_created" + + self.engine_1.start() + self.wait_sync() + pair = dao.get_state_from_local(path) + assert pair.error_count == 4 + assert pair.pair_state == "remotely_created" + + # Requeue errors + self.engine_1.retry_pair(pair.id) + self.wait_sync() + pair = dao.get_state_from_local(path) + assert not pair.error_count + assert pair.pair_state == "synchronized" + + def test_synchronize_deleted_blob(self): + local = self.local_1 + remote = self.remote_document_client_1 + self.engine_1.start() + + # Create a doc with a blob in the remote root workspace + # then synchronize + file_path = self.location / "resources" / "files" / "testFile.odt" + remote.make_file("/", file_path.name, file_path=file_path) + + self.wait_sync(wait_for_async=True) + assert local.exists(f"/{file_path.name}") + + # Delete the blob from the remote doc then synchronize + remote.delete_content(f"/{file_path.name}") + + self.wait_sync(wait_for_async=True) + assert not local.exists(f"/{file_path.name}") + + def test_synchronize_deletion(self): + local = self.local_1 + remote = self.remote_document_client_1 + self.engine_1.start() + + # Create a remote folder with 2 children then synchronize + remote.make_folder("/", "Remote folder") + remote.make_file( + "/Remote folder", "Remote file 1.odt", content=b"Some content." + ) + remote.make_file( + "/Remote folder", "Remote file 2.odt", content=b"Other content." + ) + + self.wait_sync(wait_for_async=True) + assert local.exists("/Remote folder") + assert local.exists("/Remote folder/Remote file 1.odt") + assert local.exists("/Remote folder/Remote file 2.odt") + + # Delete remote folder then synchronize + remote.delete("/Remote folder") + + self.wait_sync(wait_for_async=True) + assert not local.exists("/Remote folder") + assert not local.exists("/Remote folder/Remote file 1.odt") + assert not local.exists("/Remote folder/Remote file 2.odt") + + # Create a local folder with 2 children then synchronize + local.make_folder("/", "Local folder") + local.make_file("/Local folder", "Local file 1.odt", content=b"Some content.") + local.make_file("/Local folder", "Local file 2.odt", content=b"Other content.") + + self.wait_sync() + assert remote.exists("/Local folder") + assert remote.exists("/Local folder/Local file 1.odt") + assert remote.exists("/Local folder/Local file 2.odt") + + # Delete local folder then synchronize + time.sleep(OS_STAT_MTIME_RESOLUTION) + local.delete("/Local folder") + + # Since errors are generated by the deletion events sent by Watchdog + # for the folder children under UNIX, don't enforce errors + self.wait_sync(enforce_errors=WINDOWS) + assert not remote.exists("/Local folder") + # Wait for async completion as recursive deletion of children is done + # by the BulkLifeCycleChangeListener which is asynchronous + self.wait() + assert not remote.exists("/Local folder/Local file 1.odt") + assert not remote.exists("/Local folder/Local file 2.odt") + + """ + def test_synchronize_windows_foldername_endswith_space(self): + "" + Use nuxeodrive.CreateFolder API to make a folder directly + under the workspace "trial ". Verify if the DS client downloads + the folder and trims the space at the end + "" + remote = self.remote_document_client_1 + target = remote.make_folder("/", "trial ") + local = self.local_root_client_1 + remote.make_file(target, "aFile.txt", content=b"File A Content") + remote.make_file(target, "bFile.txt", content=b"File B Content") + self.engine_1.start() + self.wait_sync(wait_for_async=True) + assert local.exists(f"/{self.workspace_title}") + if WINDOWS: + assert local.exists(f"/{self.workspace_title}/trial/") + assert local.exists(f"/{self.workspace_title}/trial/aFile.txt") + assert local.exists(f"/{self.workspace_title}/trial/bFile.txt") + else: + assert local.exists(f"/{self.workspace_title}/trial /") + assert local.exists(f"/{self.workspace_title}/trial /aFile.txt") + assert local.exists(f"/{self.workspace_title}/trial /bFile.txt") + """ + + def test_409_conflict(self): + """ + Test concurrent upload with files having the same first characters. + """ + + remote = self.remote_document_client_1 + local = self.local_1 + engine = self.engine_1 + + engine.start() + self.wait_sync(wait_for_async=True) + assert local.exists("/") + + def _raise_for_second_file_only(*args, **kwargs): + return kwargs.get("filename").endswith("2.txt") + + # Simulate a server conflict on file upload + bad_remote = self.get_bad_remote() + error = Conflict(message="Mock Conflict") + bad_remote.make_upload_raise(error) + bad_remote.raise_on = _raise_for_second_file_only + + with patch.object(self.engine_1, "remote", new=bad_remote): + # Create 2 files locally + base = "A" * 40 + file1 = base + "1.txt" + file2 = base + "2.txt" + local.make_file("/", file1, content=b"foo") + local.make_file("/", file2, content=b"bar") + + self.wait_sync(fail_if_timeout=False) + + # Checks + assert engine.dao.queue_manager.get_errors_count() == 1 + children = remote.get_children_info(self.workspace) + assert len(children) == 1 + assert children[0].name == file1 + + # Starting here, default behavior is restored + self.wait_sync() + + # Checks + children = remote.get_children_info(self.workspace) + assert len(children) == 2 + assert children[0].name == file1 + assert children[1].name == file2 + + def test_416_range_past_eof(self): + """ + Test wrong bytes range during download. + """ + + remote = self.remote_document_client_1 + local = self.local_1 + engine = self.engine_1 + + engine.start() + self.wait_sync(wait_for_async=True) + assert local.exists("/") + + remote.make_file("/", "test.bin", content=b"42") + + # Simulate a requested range not satisfiable on file download + bad_remote = self.get_bad_remote() + error = HTTPError(status=416, message="Mock Requested Range Not Satisfiable") + bad_remote.make_download_raise(error) + + with patch.object(self.engine_1, "remote", new=bad_remote): + self.wait_sync(fail_if_timeout=False) + # Checks + assert engine.dao.queue_manager.get_errors_count() == 1 + + # Starting here, default behavior is restored + self.wait_sync() + + # Checks + assert not engine.dao.get_errors() + assert local.exists("/test.bin") + + def test_local_modify_offline(self): + local = self.local_1 + engine = self.engine_1 + + engine.start() + self.wait_sync(wait_for_async=True) + + local.make_folder("/", "Test") + local.make_file("/Test", "Test.txt", content=b"Some content") + self.wait_sync() + + engine.stop() + local.update_content("/Test/Test.txt", b"Another content") + + engine.start() + self.wait_sync() + assert not engine.dao.get_errors() + + """ + def test_unsynchronize_accentued_document(self): + remote = self.remote_document_client_1 + local = self.local_1 + engine = self.engine_1 + engine.start() + + # Create the folder + root_name = "Été indian" + root = remote.make_folder(self.workspace, root_name) + self.wait_sync(wait_for_async=True) + assert local.exists("/" + root_name) + + # Remove the folder + remote.delete(root) + self.wait_sync(wait_for_async=True) + assert not local.exists("/" + root_name) + """ + + """ + def test_synchronize_document_with_pattern(self): + "" + Simple test to ensure there is no issue with files like "$AAA000$.doc". + Related to NXDRIVE-1287. + "" + name = "$NAB184$.doc" + self.remote_document_client_1.make_file("/", name, content=b"42") + self.engine_1.start() + self.wait_sync(wait_for_async=True) + assert self.local_1.exists(f"/{name}") + """ + + def test_rename_duplicates(self): + remote = self.remote_document_client_1 + local = self.local_1 + engine = self.engine_1 + + # Create 7 files with the same name + name = "Congés 2016 / 2017.txt" + name_expected = safe_filename(name) + for _ in range(7): + remote.make_file("/", name, content=b"42") + + # Start sync + engine.start() + self.wait_sync(wait_for_async=True) + + # Check that one file exists, and engine has 6 errors + assert local.exists(f"/{name_expected}") + assert len(local.get_children_info("/")) == 1 + assert len(engine.dao.get_errors(limit=0)) == 6 + + # Rename all remote documents with unique names + ref = local.get_remote_id("/") + children = self.remote_1.get_fs_children(ref) + assert len(children) == 7 + remote_files = set() + for child in children: + new_name = f"{child.uid.split('#')[-1]}-{safe_filename(child.name)}" + remote_files.add(new_name) + remote.execute(command="NuxeoDrive.Rename", id=child.uid, name=new_name) + + self.wait_sync(wait_for_async=True) + + children = self.remote_1.get_fs_children(ref) + assert len(children) == 7 + # Check that the 7 files exist locally and that there are no errors + local_children = local.get_children_info("/") + assert len(local_children) == 7 + local_files = {child.name for child in local_children} + assert not engine.dao.get_errors(limit=0) + assert remote_files == local_files + + """ + def test_local_creation_copying_from_sibling(self): + ""Test a local file creation when checking for an already synced file on the HDD."" + + remote = self.remote_document_client_1 + local = self.local_1 + engine = self.engine_1 + + engine.start() + self.wait_sync(wait_for_async=True) + + # Create a remote folder and a file inside it + contents = b"1234567890" * 42 * 42 + remote.make_folder("/", "a folder") + remote.make_file("/a folder", "file1.bin", content=contents) + self.wait_sync(wait_for_async=True) + + def stream_content(*args, **kwargs): + ""Called by Processor._download_content(). We are testing that this method is never called."" + assert 0, "Should not be called!" + + # Create another files with the same contents and check that the remote client downloads nothing + with patch.object(self.engine_1.remote, "stream_content", new=stream_content): + remote.make_file("/a folder", "file2.bin", content=contents) + remote.make_file("/", "file3.bin", content=contents) + self.wait_sync(wait_for_async=True) + + # Checks + assert not engine.dao.queue_manager.get_errors_count() + for client in (remote, local): + assert client.exists("/a folder/file1.bin") + assert client.exists("/a folder/file2.bin") + assert client.exists("/file3.bin") + """ + + +class TestSynchronization2(TwoUsersTest): + def test_conflict_detection(self): + # Fetch the workspace sync root + local = self.local_1 + dao = self.engine_1.dao + workspace_path = Path(self.workspace_title) + self.engine_1.start() + self.wait_sync(wait_for_async=True) + assert local.exists("/") + + # Let's create a file on the client and synchronize it. + local_path = local.make_file("/", "Some File.doc", content=b"Original content.") + self.wait_sync() + + # Let's modify it concurrently but with the same content (digest) + self.engine_1.suspend() + time.sleep(OS_STAT_MTIME_RESOLUTION) + local.update_content(local_path, b"Same new content.") + + remote_2 = self.remote_document_client_2 + remote_2.update_content("/Some File.doc", b"Same new content.") + self.engine_1.resume() + + # Let's synchronize and check the conflict handling: automatic + # resolution will work for this case + self.wait_sync(wait_for_async=True) + assert not self.engine_1.get_conflicts() + children = dao.get_states_from_partial_local(workspace_path) + assert len(children) == 1 + assert children[0].pair_state == "synchronized" + + local_children = local.get_children_info("/") + assert len(local_children) == 1 + assert local_children[0].name == "Some File.doc" + assert local.get_content(local_path) == b"Same new content." + remote_1 = self.remote_document_client_1 + remote_children = remote_1.get_children_info(self.workspace) + assert len(remote_children) == 1 + assert remote_children[0].get_blob("file:content").name == "Some File.doc" + assert remote_1.get_content("/Some File.doc") == b"Same new content." + + # Let's trigger another conflict that cannot be resolved + # automatically: + self.engine_1.suspend() + time.sleep(OS_STAT_MTIME_RESOLUTION) + local.update_content(local_path, b"Local new content.") + + remote_2.update_content("/Some File.doc", b"Remote new content.") + self.engine_1.resume() + + # Let's synchronize and check the conflict handling + self.wait_sync(wait_for_async=True) + assert len(self.engine_1.get_conflicts()) == 1 + children = dao.get_states_from_partial_local(workspace_path) + assert len(children) == 1 + assert children[0].pair_state == "conflicted" + + local_children = local.get_children_info("/") + assert len(local_children) == 1 + assert local_children[0].name == "Some File.doc" + assert local.get_content(local_path) == b"Local new content." + remote_children = remote_1.get_children_info(self.workspace) + assert len(remote_children) == 1 + assert remote_children[0].get_blob("file:content").name == "Some File.doc" + assert remote_1.get_content("/Some File.doc") == b"Remote new content." + + """ + def test_rename_and_create_same_folder_not_running(self): + "" + NXDRIVE-668: Fix upload issue when renaming a folder and creating + a folder with the same name while Drive client is not running: + + IntegrityError: UNIQUE constraint failed: + States.remote_ref, States.local_path + "" + + remote = self.remote_document_client_1 + local_1 = self.local_1 + local_2 = self.local_2 + self.engine_1.start() + self.engine_2.start() + self.wait_sync(wait_for_async=True, wait_for_engine_2=True) + + # First, create initial folders and files + folder = remote.make_folder("/", "Folder01") + remote.make_folder("/Folder01", "subfolder01") + remote.make_file("/Folder01/subfolder01", "File01.txt", content=b"42") + self.wait_sync(wait_for_async=True, wait_for_engine_2=True) + assert remote.exists("/Folder01/subfolder01") + assert remote.exists("/Folder01/subfolder01/File01.txt") + assert local_1.exists("/Folder01/subfolder01") + assert local_1.exists("/Folder01/subfolder01/File01.txt") + assert local_2.exists("/Folder01/subfolder01") + assert local_2.exists("/Folder01/subfolder01/File01.txt") + + # Stop clients and make the local changes on a folder + self.engine_1.stop() + self.engine_2.stop() + local_2.rename("/Folder01/subfolder01", "subfolder02") + local_2.make_folder("/Folder01", "subfolder01") + local_2.make_file("/Folder01/subfolder01", "File02.txt", content=b"42.42") + self.engine_1.start() + self.engine_2.start() + self.wait_sync(wait_for_async=True, wait_for_engine_2=True) + + # Check client 2 + assert local_2.exists("/Folder01/subfolder02") + assert local_2.exists("/Folder01/subfolder02/File01.txt") + assert local_2.get_content("/Folder01/subfolder02/File01.txt") == b"42" + assert local_2.exists("/Folder01/subfolder01") + assert local_2.exists("/Folder01/subfolder01/File02.txt") + assert local_2.get_content("/Folder01/subfolder01/File02.txt") == b"42.42" + + # Check server + children = remote.get_children_info(folder) + assert len(children) == 2 + assert children[0].name == "subfolder01" + child = remote.get_children_info(children[0].uid) + assert child[0].name == "File02.txt" + assert remote.get_content(child[0]) == b"42.42" + assert children[1].name == "subfolder02" + child = remote.get_children_info(children[1].uid) + assert child[0].name == "File01.txt" + assert remote.get_content(child[0]) == b"42" + + # Check client 1 + assert local_1.exists("/Folder01/subfolder02") + "" + # TODO NXDRIVE-777: uncomment when issue is fixed + assert local_1.exists('/Folder01/subfolder02/File01.txt') + assert local_1.get_content('/Folder01/subfolder02/File01.txt') == b'42' + # TODO NXDRIVE-769: uncomment when deduplication issue is fixed + assert local_1.exists('/Folder01/subfolder01') + assert local_1.exists('/Folder01/subfolder01/File02.txt') + assert local_1.get_content( + '/Folder01/subfolder01/File02.txt') == b'42.42' + "" + """ + + """ + def test_rename_and_create_same_file_not_running(self): + "" + Same as `test_rename_and_create_same_folder_not_running` + but with changes made on a file. + "" + + remote = self.remote_document_client_1 + local_1 = self.local_1 + local_2 = self.local_2 + self.engine_1.start() + self.engine_2.start() + self.wait_sync(wait_for_async=True, wait_for_engine_2=True) + + # First, create initial folders and files + folder = remote.make_folder("/", "Folder01") + remote.make_file("/Folder01", "File01.txt", content=b"42") + self.wait_sync(wait_for_async=True, wait_for_engine_2=True) + assert remote.exists("/Folder01/File01.txt") + assert local_1.exists("/Folder01/File01.txt") + assert local_2.exists("/Folder01/File01.txt") + + # Stop clients and make the local changes on a file + self.engine_1.stop() + self.engine_2.stop() + local_2.rename("/Folder01/File01.txt", "File02.txt") + # Create a new file with the same name and content as + # the previously renamed file + local_2.make_file("/Folder01", "File01.txt", content=b"42") + self.engine_1.start() + self.engine_2.start() + self.wait_sync(wait_for_async=True, wait_for_engine_2=True) + + # Check client 2 + assert local_2.exists("/Folder01/File02.txt") + assert local_2.get_content("/Folder01/File02.txt") == b"42" + assert local_2.exists("/Folder01/File01.txt") + assert local_2.get_content("/Folder01/File01.txt") == b"42" + + # Check server + children = remote.get_children_info(folder) + assert len(children) == 2 + assert children[0].name == "File01.txt" + assert remote.get_content(children[0]) == b"42" + assert children[1].name == "File02.txt" + assert remote.get_content(children[1]) == b"42" + + # Check client 1 + assert local_1.exists("/Folder01/File02.txt") + assert local_1.get_content("/Folder01/File02.txt") == b"42" + # TODO NXDRIVE-769: uncomment when deduplication issue is fixed + # assert local_1.exists('/Folder01/File01.txt') + # assert local_1.get_content('/Folder01/File01.txt') == b'42' + + # Stop clients and make the local changes on a file + self.engine_1.stop() + self.engine_2.stop() + local_2.rename("/Folder01/File01.txt", "File03.txt") + # Create a new file with the same name as the previously renamed + # file but a different content + local_2.make_file("/Folder01", "File01.txt", content=b"42.42") + self.engine_1.start() + self.engine_2.start() + self.wait_sync(wait_for_async=True) + + # Check client 2 + assert local_2.exists("/Folder01/File03.txt") + assert local_2.get_content("/Folder01/File03.txt") == b"42" + assert local_2.exists("/Folder01/File02.txt") + assert local_2.get_content("/Folder01/File02.txt") == b"42" + assert local_2.exists("/Folder01/File01.txt") + assert local_2.get_content("/Folder01/File01.txt") == b"42.42" + + # Check server + children = remote.get_children_info(folder) + assert len(children) == 3 + assert children[0].name == "File01.txt" + assert remote.get_content(children[0]) == b"42.42" + assert children[1].name == "File02.txt" + assert remote.get_content(children[1]) == b"42" + assert children[2].name == "File03.txt" + assert remote.get_content(children[2]) == b"42" + + # Check client 1 + assert local_1.exists("/Folder01/File03.txt") + assert local_1.get_content("/Folder01/File03.txt") == b"42" + assert local_1.exists("/Folder01/File02.txt") + assert local_1.get_content("/Folder01/File02.txt") == b"42" + assert local_1.exists("/Folder01/File01.txt") + assert local_1.get_content("/Folder01/File01.txt") == b"42.42" + """ diff --git a/tests/functional/test_synchronization_dedup.py b/tests/functional/test_synchronization_dedup.py new file mode 100644 index 0000000000..ce0ecc29f5 --- /dev/null +++ b/tests/functional/test_synchronization_dedup.py @@ -0,0 +1,148 @@ +""" +"" +Test behaviors when the server allows duplicates and not the client. +"" +from pathlib import Path + +import pytest + +from .conftest import OneUserTest + + +class TestSynchronizationDedup(OneUserTest): + def test_children_of_folder_in_dedup_error(self): + "" + NXDRIVE-1037: Children of a folder that is in DEDUP error should be + ignored. + "" + + local = self.local_1 + engine = self.engine_1 + remote = self.remote_document_client_1 + engine.start() + + # Step 1: create Unisys folder (1st) + remote.make_folder(self.workspace, "Unisys") + self.wait_sync(wait_for_async=True) + assert local.exists("/Unisys") + + # Step 2: create Unisys folder (2nd) + unisys2 = remote.make_folder(self.workspace, "Unisys") + self.wait_sync(wait_for_async=True) + + # Check DEDUP error + doc_pair = engine.dao.get_normal_state_from_remote( + "defaultFileSystemItemFactory#default#" + unisys2 + ) + assert doc_pair.last_error == "DEDUP" + + # Step 3: create a child in the 2nd Unisys folder + foo = remote.make_file(unisys2, "foo.txt", content=b"42") + self.wait_sync(wait_for_async=True) + + # Check the file is not created and not present in the database + assert not local.exists("/Unisys/foo.txt") + assert not engine.dao.get_normal_state_from_remote( + "defaultFileSystemItemFactory#default#" + unisys2 + "/" + foo + ) + + # Check there is nothing syncing + assert not engine.dao.get_syncing_count() + + +class TestSynchronizationDedupCaseSensitive(OneUserTest): + ""NXDRIVE-842: do not sync duplicate conflicted folder content."" + + def setUp(self): + self.local = self.local_root_client_1 + self.remote = self.remote_document_client_1 + + # Make documents in the 1st future root folder + # / + # ├── citrus + # │ └── fruits + # │ ├── lemon.txt + # │ └── orange.txt + self.remote.make_folder("/", "citrus") + self.root1 = self.remote.make_folder("/citrus", "fruits") + self.remote.make_file(self.root1, "lemon.txt", content=b"lemon") + self.remote.make_file(self.root1, "orange.txt", content=b"orange") + + # Make documents in the 2nd future root folder + # / + # ├── fruits + # ├── cherries.txt + # ├── mango.txt + # └── papaya.txt + self.root2 = self.remote.make_folder("/", "fruits") + self.remote.make_file(self.root2, "cherries.txt", content=b"cherries") + self.remote.make_file(self.root2, "mango.txt", content=b"mango") + self.remote.make_file(self.root2, "papaya.txt", content=b"papaya") + + # Register new roots + # / + # ├── citrus + # │ └── fruits (self.root1) + # │ ├── lemon.txt + # │ └── orange.txt + # ├── fruits (self.root2) + # ├── cherries.txt + # ├── mango.txt + # └── papaya.txt + self.remote.unregister_as_root(self.workspace) + self.remote.register_as_root(self.root1) + self.remote.register_as_root(self.root2) + + # Start and wait + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Checks + # No duplicate possible, there is one "fruits" folder at the root + assert len(self.local.get_children_info("/")) == 1 + # As events are coming in the reverse order, we should have self.root2 + # synced first, which contains 3 files + assert len(self.local.get_children_info("/fruits")) == 3 + + def check( + self, count_root: int, count_folder: int, count_fixed_folder: int = -1 + ) -> None: + self.wait_sync(wait_for_async=True) + + get = self.local.get_children_info + assert len(get("/")) == count_root + assert len(get("/fruits")) == count_folder + if count_fixed_folder > -1: + assert len(get("/fruits-renamed")) == count_fixed_folder + + # Ensure there is no postponed nor documents in error + assert not self.engine_1.dao.get_error_count(threshold=0) + + def test_file_sync_under_dedup_shared_folders_rename_remotely_dupe(self): + self.remote.update(self.root1, properties={"dc:title": "fruits-renamed"}) + self.check(2, 3, count_fixed_folder=2) + + @pytest.mark.randombug( + "Several rounds may be needed, specially on Windows", condition=True + ) + def test_file_sync_under_dedup_shared_folders_rename_remotely(self): + self.remote.update(self.root2, properties={"dc:title": "fruits-renamed"}) + self.check(2, 2, count_fixed_folder=3) + + def test_file_sync_under_dedup_shared_folders_delete_remotely(self): + self.remote.delete(self.root2) + self.check(1, 2) + + def test_file_sync_under_dedup_shared_folders_delete_remotely_dupe(self): + self.remote.delete(self.root1) + self.check(1, 3) + + def test_file_sync_under_dedup_shared_folders_delete_locally(self): + self.engine_1.local.delete(Path("fruits")) + self.check(1, 2) + assert self.root1 in self.local.get_remote_id("/fruits") + + def test_file_sync_under_dedup_shared_folders_rename_locally(self): + self.engine_1.local.rename(Path("fruits"), "fruits-renamed") + self.check(2, 2, count_fixed_folder=3) +""" diff --git a/tests/functional/test_synchronization_suspend.py b/tests/functional/test_synchronization_suspend.py new file mode 100644 index 0000000000..7d471bf32d --- /dev/null +++ b/tests/functional/test_synchronization_suspend.py @@ -0,0 +1,159 @@ +""" +import pytest + +from nxdrive.constants import LINUX, WINDOWS + +from .conftest import SYNC_ROOT_FAC_ID, OneUserTest + + +class TestSynchronizationSuspend(OneUserTest): + def test_basic_synchronization_suspend(self): + local = self.local_1 + remote = self.remote_document_client_1 + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Let's create some document on the client and the server + local.make_folder("/", "Folder 3") + self.make_server_tree() + + # Launch ndrive and check synchronization + self.wait_sync(wait_for_async=True) + assert remote.exists("/Folder 3") + assert local.exists("/Folder 1") + assert local.exists("/Folder 2") + assert local.exists("/File 5.txt") + self.engine_1.queue_manager.suspend() + local.make_folder("/", "Folder 4") + local.make_file("/Folder 4", "Test.txt", content=b"Plop") + self.wait_sync(wait_for_async=True, fail_if_timeout=False) + assert len(remote.get_children_info(self.workspace)) == 4 + assert self.engine_1.queue_manager.is_paused() + + def test_synchronization_local_watcher_paused_when_offline(self): + ""NXDRIVE-680: fix unwanted local upload when offline."" + + local = self.local_1 + remote = self.remote_document_client_1 + engine = self.engine_1 + + # Create one file locally and wait for sync + engine.start() + self.wait_sync(wait_for_async=True) + local.make_file("/", "file1.txt", content=b"42") + self.wait_sync() + + # Checks + assert remote.exists("/file1.txt") + assert local.exists("/file1.txt") + + # Simulate offline mode (no more network for instance) + engine.queue_manager.suspend() + + # Create a bunch of files locally + local.make_folder("/", "files") + for num in range(60 if WINDOWS else 20): + local.make_file( + "/files", + "file-" + str(num) + ".txt", + content=b"Content of file-" + bytes(num), + ) + self.wait_sync(fail_if_timeout=False) + + # Checks + assert len(remote.get_children_info(self.workspace)) == 1 + assert engine.queue_manager.is_paused() + + # Restore network connection + engine.queue_manager.resume() + + # Wait for sync and check synced files + self.wait_sync(wait_for_async=True) + assert len(remote.get_children_info(self.workspace)) == 2 + assert not engine.queue_manager.is_paused() + + def test_synchronization_end_with_children_ignore_parent(self): + ""NXDRIVE-655: children of ignored folder are not ignored."" + + local = self.local_1 + remote = self.remote_document_client_1 + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Let's create some document on the client and the server + local.make_folder("/", "Folder 3") + self.make_server_tree() + + # Launch ndrive and check synchronization + self.wait_sync(wait_for_async=True) + assert remote.exists("/Folder 3") + assert local.exists("/Folder 1") + assert local.exists("/Folder 2") + assert local.exists("/File 5.txt") + local.make_folder("/", ".hidden") + local.make_file("/.hidden", "Test.txt", content=b"Plop") + local.make_folder("/.hidden", "normal") + local.make_file("/.hidden/normal", "Test.txt", content=b"Plop") + # Should not try to sync therefore it should not timeout + self.wait_sync(wait_for_async=True) + assert len(remote.get_children_info(self.workspace)) == 4 + + @pytest.mark.xfail(LINUX, reason="NXDRIVE-1690", strict=True) + def test_folder_renaming_while_offline(self): + "" + Scenario: + - create a folder with a subfolder and a file, on the server + - launch Drive + - wait for sync completion + - pause Drive + - locally rename the parent folder + - locally rename the sub folder + - locally delete the file + - resume Drive + + Result before NXDRIVE-695: + - sub folder is renamed on the server + - the deleted file is not removed on the server (incorrect) + "" + + local = self.local_1 + remote = self.remote_1 + engine = self.engine_1 + + # Create a folder with a subfolder and a file on the server + folder = remote.make_folder(z"'{'SYNC_ROOT_FAC_ID}{self.workspace}", "folder").uid + subfolder = remote.make_folder(folder, "subfolder").uid + remote.make_file(subfolder, "file.txt", content=b"42") + + # Start the sync + engine.start() + self.wait_sync(wait_for_async=True) + + # Checks + assert remote.exists("/folder/subfolder/file.txt") + assert local.exists("/folder/subfolder/file.txt") + + # Suspend the sync + engine.suspend() + assert engine.is_paused() + + # Rename the parent folder and its subfolder; delete the file + local.rename("/folder", "folder-renamed") + local.rename("/folder-renamed/subfolder", "subfolder-renamed") + local.delete("/folder-renamed/subfolder-renamed/file.txt") + + # Resume the sync + engine.resume() + assert not engine.is_paused() + self.wait_sync() + + # Local checks + assert local.exists("/folder-renamed/subfolder-renamed") + assert not local.exists("/folder-renamed/subfolder-renamed/file.txt") + assert not local.exists("/folder") + + # Remote checks + assert remote.exists("/folder-renamed/subfolder-renamed") + assert not remote.exists("/folder-renamed/subfolder-renamed/file.txt") + assert not remote.exists("/folder") +""" diff --git a/tests/functional/test_versioning.py b/tests/functional/test_versioning.py new file mode 100644 index 0000000000..bbe5b60284 --- /dev/null +++ b/tests/functional/test_versioning.py @@ -0,0 +1,66 @@ +import time + +from .conftest import OS_STAT_MTIME_RESOLUTION, OneUserTest, TwoUsersTest + + +class TestVersioning(OneUserTest): + def test_version_restore(self): + remote = self.remote_document_client_1 + local = self.local_1 + + self.engine_1.start() + + # Create a remote doc + doc = remote.make_file( + self.workspace, "Document to restore.txt", content=b"Initial content." + ) + self.wait_sync(wait_for_async=True) + assert local.exists("/Document to restore.txt") + assert local.get_content("/Document to restore.txt") == b"Initial content." + + # Create version 1.0, update content, then restore version 1.0 + remote.create_version(doc, "Major") + remote.update(doc, properties={"note:note": "Updated content."}) + self.wait_sync(wait_for_async=True) + assert local.get_content("/Document to restore.txt") == b"Updated content." + version_uid = remote.get_versions(doc)[0][0] + remote.restore_version(version_uid) + self.wait_sync(wait_for_async=True) + assert local.get_content("/Document to restore.txt") == b"Initial content." + + +class TestVersioning2(TwoUsersTest): + def test_versioning(self): + local = self.local_1 + self.engine_1.start() + remote = self.remote_document_client_2 + + # Create a file as user 2 + remote.make_file_with_blob("/", "Test versioning.txt", b"This is version 0") + self.wait_sync() + assert remote.exists("/Test versioning.txt") + doc = self.root_remote.fetch(f"{self.ws.path}/Test versioning.txt") + self._assert_version(doc, 0, 0) + + # Synchronize it for user 1 + self.wait_sync(wait_for_async=True) + assert local.exists("/Test versioning.txt") + + # Update it as user 1 => should be versioned + time.sleep(OS_STAT_MTIME_RESOLUTION) + local.update_content("/Test versioning.txt", b"Modified content") + self.wait_sync() + doc = self.root_remote.fetch(f"{self.ws.path}/Test versioning.txt") + self._assert_version(doc, 0, 1) + + # Update it as user 1 => should NOT be versioned + # since the versioning delay is not passed by + time.sleep(OS_STAT_MTIME_RESOLUTION) + local.update_content("/Test versioning.txt", b"Content twice modified") + self.wait_sync() + doc = self.root_remote.fetch(f"{self.ws.path}/Test versioning.txt") + self._assert_version(doc, 0, 1) + + def _assert_version(self, doc, major, minor): + assert doc["properties"]["uid:major_version"] == major + assert doc["properties"]["uid:minor_version"] == minor From 326201d6ba2e583fcfca447c843fae22a9d976d2 Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Wed, 10 Jan 2024 16:44:13 +0530 Subject: [PATCH 23/36] NXDRIVE-2860: Code Coverage - added all rumming test cases from old functional - 10/01 --1 --- tests/functional/test_bulk_remote_changes.py | 161 +++ tests/functional/test_collection.py | 69 + .../test_concurrent_synchronization.py | 372 +++++ tests/functional/test_conflicts.py | 347 +++++ tests/functional/test_copy.py | 28 + tests/functional/test_direct_transfer.py | 1211 +++++++++++++++++ tests/functional/test_encoding.py | 121 ++ tests/functional/test_group_changes.py | 231 ++++ tests/functional/test_ignored.py | 46 + .../test_local_changes_when_offline.py | 81 ++ tests/functional/test_local_copy_paste.py | 131 ++ tests/functional/test_local_creations.py | 158 +++ tests/functional/test_local_deletion.py | 309 +++++ tests/functional/test_local_filter.py | 198 +++ .../functional/test_local_move_and_rename.py | 702 ++++++++++ tests/functional/test_local_move_folders.py | 220 +++ tests/functional/test_local_paste.py | 138 ++ .../test_local_share_move_folders.py | 121 ++ tests/functional/test_local_storage_issue.py | 111 ++ tests/functional/test_long_path.py | 101 ++ tests/functional/test_mac_local_client.py | 38 + tests/functional/test_multiple_files.py | 135 ++ 22 files changed, 5029 insertions(+) create mode 100644 tests/functional/test_bulk_remote_changes.py create mode 100644 tests/functional/test_collection.py create mode 100644 tests/functional/test_concurrent_synchronization.py create mode 100644 tests/functional/test_conflicts.py create mode 100644 tests/functional/test_copy.py create mode 100644 tests/functional/test_direct_transfer.py create mode 100644 tests/functional/test_encoding.py create mode 100644 tests/functional/test_group_changes.py create mode 100644 tests/functional/test_ignored.py create mode 100644 tests/functional/test_local_changes_when_offline.py create mode 100644 tests/functional/test_local_copy_paste.py create mode 100644 tests/functional/test_local_creations.py create mode 100644 tests/functional/test_local_deletion.py create mode 100644 tests/functional/test_local_filter.py create mode 100644 tests/functional/test_local_move_and_rename.py create mode 100644 tests/functional/test_local_move_folders.py create mode 100644 tests/functional/test_local_paste.py create mode 100644 tests/functional/test_local_share_move_folders.py create mode 100644 tests/functional/test_local_storage_issue.py create mode 100644 tests/functional/test_long_path.py create mode 100644 tests/functional/test_mac_local_client.py create mode 100644 tests/functional/test_multiple_files.py diff --git a/tests/functional/test_bulk_remote_changes.py b/tests/functional/test_bulk_remote_changes.py new file mode 100644 index 0000000000..cbd6e8df02 --- /dev/null +++ b/tests/functional/test_bulk_remote_changes.py @@ -0,0 +1,161 @@ +""" +Technical Background: GetChildren API can throw error + due to network issues or server load. + GetChildren API is also called when processing remote events. + +Issue: When processing remote event, a error in GetChildren API + (for a folder) call results in drive failing to process the + remaining remote events in the queue. + +Fix: Handle the error in GetChildren API gracefully and re-queue + same folder again for another remote scan + +Testing: This issue can be testing by simulating network of the API + using a mock framework: + 1. Emulate the GetChildren API error by mocking the + Remote.get_fs_children method + 2. The mocked method will raise an exception on demand + to simulate the server side / network errors + +Note: searching for the following regular expression in log file + will filter the manual test case: + STEP:|VERIFY:|Error: +""" + +from logging import getLogger +from time import sleep +from unittest.mock import patch + +from nuxeo.utils import version_lt +from requests import ConnectionError + +from nxdrive.client.remote_client import Remote +from nxdrive.objects import RemoteFileInfo + +from .conftest import TEST_DEFAULT_DELAY, TwoUsersTest + +log = getLogger(__name__) + + +class TestBulkRemoteChanges(TwoUsersTest): + """ + Test Bulk Remote Changes when network error happen in get_children_info() + will simulate network error when required. test_many_changes method will + make server side changes, simulate error for GetChildren API and still + verify if all remote changes are successfully synced. + """ + + def test_many_changes(self): + """ + Objective: The objective is to make a lot of remote changes (including a folder + modified) and wait for nuxeo-drive to successfully sync even if network error + happens. + + 1. Configure drive and wait for sync + 2. Create 3 folders folder1, folder2 and shared + 3. Create files inside the 3 folders: folder1/file1.txt, folder2/file2.txt, + shared/readme1.txt, shared/readme2.txt + 4. Wait for 3 folders, 4 files to sync to local PC + 5. Check the 3 folders and 4 files are synced to local PC + 6. Trigger simulation of network error for GetChildren API using the mock + (2 successive failures) + 7. Do the following changes in DM side in same order: + I. Create 'folder1/sample1.txt' + II. Delete 'shared' folder, and immediately restore 'shared' folder + IV. Restore 'shared/readme1.txt' + V. Create 'shared/readme3.txt' + VI. Create 'folder2/sample2.txt' + 8. Wait for remote changes to sync for unaffected folders folder1 and folder2 + 9. Check that folder1/sample1.txt, folder2/sample2.txt are synced to local PC + 10. Sleep for two remote scan attempts (to compensate for two network failures) + 11. Check if two files 'shared/readme1.txt' and 'shared/readme3.txt' are synced + to local PC. + """ + local = self.local_1 + remote = self.remote_document_client_1 + network_error = 2 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # create some folders on the server + folder1 = remote.make_folder(self.workspace, "folder1") + folder2 = remote.make_folder(self.workspace, "folder2") + shared = remote.make_folder(self.workspace, "shared") + + remote.make_file(folder1, "file1.txt", content=b"This is a sample file1") + remote.make_file(folder2, "file2.txt", content=b"This is a sample file2") + readme1 = remote.make_file( + shared, "readme1.txt", content=b"This is a readme file" + ) + remote.make_file(shared, "readme2.txt", content=b"This is a readme file") + + self.wait_sync(wait_for_async=True) + + assert local.exists("/folder1") + assert local.exists("/folder2") + assert local.exists("/shared") + assert local.exists("/folder1/file1.txt") + assert local.exists("/folder2/file2.txt") + assert local.exists("/shared/readme1.txt") + assert local.exists("/shared/readme2.txt") + + def get_children_info(self, *args, **kwargs): + nonlocal network_error + if network_error > 0: + network_error -= 1 + # Simulate a network error during the call to NuxeoDrive.GetChildren + raise ConnectionError( + "Network error simulated for NuxeoDrive.GetChildren" + ) + return Remote.get_fs_children(self.engine_1.remote, *args, **kwargs) + + def mock_method_factory(original): + def wrapped_method(data): + data["canScrollDescendants"] = True + return original(data) + + return wrapped_method + + with patch.object( + remote, "get_children_info", new=get_children_info + ), patch.object( + RemoteFileInfo, + "from_dict", + wraps=mock_method_factory(RemoteFileInfo.from_dict), + ): + # Simulate network error for GetChildren API twice + # This is to ensure Drive will eventually recover even after multiple + # failures of GetChildren API. + remote.make_file( + folder1, "sample1.txt", content=b"This is a another sample file1" + ) + self.remote_2.register_as_root(shared) + + # Delete folder 'shared' + remote.delete(shared) + self.wait_sync(wait_for_async=True) + + # Restore folder 'shared' from trash + remote.undelete(shared) + if version_lt(remote.client.server_version, "10.2"): + remote.undelete(readme1) + self.wait_sync(wait_for_async=True) + + remote.make_file( + shared, "readme3.txt", content=b"This is a another shared file" + ) + remote.make_file( + folder2, "sample2.txt", content=b"This is a another sample file2" + ) + + self.wait_sync(wait_for_async=True) + assert local.exists("/folder2/sample2.txt") + assert local.exists("/folder1/sample1.txt") + + # Although sync failed for one folder, GetChangeSummary will return + # zero event in successive calls. We need to wait two remote scans, + # so sleep for TEST_DEFAULT_DELAY * 2 + sleep(TEST_DEFAULT_DELAY * 2) + assert local.exists("/shared/readme1.txt") + assert local.exists("/shared/readme3.txt") diff --git a/tests/functional/test_collection.py b/tests/functional/test_collection.py new file mode 100644 index 0000000000..58b75f9f18 --- /dev/null +++ b/tests/functional/test_collection.py @@ -0,0 +1,69 @@ +""" +from contextlib import suppress + +import pytest + +from .conftest import OneUserTest + + +class TestCollection(OneUserTest): + @pytest.fixture(autouse=True) + def teardown(self): + yield + + with suppress(Exception): + # Happened when the test fails at setup_method() + self.remote_document_client_1.delete( + self.collection["uid"], use_trash=False + ) + + def test_collection_synchronization(self): + remote = self.remote_1 + + # Remove synchronization root + remote.unregister_as_root(self.workspace) + + # Create a document "Fiiile" in a folder "Test" + folder = self.remote_document_client_1.make_folder("/", "Test") + # Attach a file "abcde.txt" to the document + doc = self.remote_document_client_1.make_file_with_blob( + folder, "abcde.txt", b"abcde" + ) + + # Create a collection and add the document to it + self.collection = remote.execute( + command="Collection.Create", + name="CollectionA", + description="Test collection", + ) + remote.execute( + command="Document.AddToCollection", + collection=self.collection["uid"], + input_obj=f"doc:{doc}", + ) + + # Register the collection as the synchronization root + remote.register_as_root(self.collection["uid"]) + + # Sync locally + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Get a client on the newly synchronized collection + local = self.get_local_client(self.local_nxdrive_folder_1 / "CollectionA") + + # Check the attached file is here + assert local.exists("/abcde.txt") + + # Attach a file "fghij.txt" to the document + # This should effectively replace the previous file + # since we did not specify another xpath than the main blob. + self.remote_document_client_1.attach_blob(doc, b"fghij", "fghij.txt") + + # Sync locally + self.wait_sync(wait_for_async=True) + + # Check the new attached file is here, and the previous isn't + assert local.exists("/fghij.txt") + assert not local.exists("/abcde.txt") +""" diff --git a/tests/functional/test_concurrent_synchronization.py b/tests/functional/test_concurrent_synchronization.py new file mode 100644 index 0000000000..dfab260e4c --- /dev/null +++ b/tests/functional/test_concurrent_synchronization.py @@ -0,0 +1,372 @@ +import time + +from nxdrive.constants import WINDOWS + +from .conftest import REMOTE_MODIFICATION_TIME_RESOLUTION, TwoUsersTest + + +class TestConcurrentSynchronization(TwoUsersTest): + def create_docs(self, parent, number, name_pattern=None, delay=1.0): + return self.root_remote.execute( + command="NuxeoDrive.CreateTestDocuments", + input_obj=f"doc:{parent}", + namePattern=name_pattern, + number=number, + delay=int(delay * 1000), + ) + + def test_concurrent_file_access(self): + """Test update/deletion of a locally locked file. + + This is to simulate downstream synchronization of a file opened (thus + locked) by any program under Windows, typically MS Word. + The file should be temporary ignored and not prevent synchronization of other + pending items. + Once the file is unlocked and the cooldown period is over it should be + synchronized. + """ + # Bind the server and root workspace + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Get local and remote clients + local = self.local_1 + remote = self.remote_document_client_1 + + # Create file in the remote root workspace + uid = remote.make_file( + "/", "test_update.docx", content=b"Some content to update." + ) + remote.make_file("/", "test_delete.docx", content=b"Some content to delete.") + + # Launch first synchronization + self.wait_sync(wait_for_async=True) + assert local.exists("/test_update.docx") + assert local.exists("/test_delete.docx") + + # Open locally synchronized files to lock them and generate a + # WindowsError when trying to update / delete them + file1_path = local.get_info("/test_update.docx").filepath + file2_path = local.get_info("/test_delete.docx").filepath + with open(file1_path, "rb"), open(file2_path, "rb"): + # Update /delete existing remote files and create a new remote file + # Wait for 1 second to make sure the file's last modification time + # will be different from the pair state's last remote update time + time.sleep(REMOTE_MODIFICATION_TIME_RESOLUTION) + remote.update_content("/test_update.docx", b"Updated content.") + remote.delete("/test_delete.docx") + remote.make_file("/", "other.docx", content=b"Other content.") + + # Synchronize + self.wait_sync( + wait_for_async=True, enforce_errors=False, fail_if_timeout=False + ) + if WINDOWS: + # As local file are locked, a WindowsError should occur during the + # local update process, therefore: + # - Opened local files should still exist and not have been + # modified + # - Synchronization should not fail: doc pairs should be + # temporary ignored and other remote modifications should be + # locally synchronized + assert local.exists("/test_update.docx") + assert ( + local.get_content("/test_update.docx") == b"Some content to update." + ) + assert local.exists("/test_delete.docx") + assert ( + local.get_content("/test_delete.docx") == b"Some content to delete." + ) + assert local.exists("/other.docx") + assert local.get_content("/other.docx") == b"Other content." + + # Synchronize again + self.wait_sync(enforce_errors=False, fail_if_timeout=False) + # Temporary ignored files should be still be ignored as delay (60 seconds by + # default) is not expired, nothing should have changed + assert local.exists("/test_update.docx") + assert ( + local.get_content("/test_update.docx") == b"Some content to update." + ) + assert local.exists("/test_delete.docx") + assert ( + local.get_content("/test_delete.docx") == b"Some content to delete." + ) + + if WINDOWS: + # Cancel error delay to force retrying synchronization of pairs in error + self.queue_manager_1.requeue_errors() + self.wait_sync() + + # Previously temporary ignored files should be updated / deleted locally, + # temporary download file should not be there anymore and there + # should be no pending items left + else: + assert not (self.engine_1.download_dir / uid).is_dir() + + assert local.exists("/test_update.docx") + assert local.get_content("/test_update.docx") == b"Updated content." + assert not local.exists("/test_delete.docx") + + """ + def test_find_changes_with_many_doc_creations(self): + local = self.local_1 + + # Synchronize root workspace + self.engine_1.start() + self.wait_sync(wait_for_async=True) + assert local.exists("/") + assert not local.get_children_info("/") + + # List of children names to create + n_children = 5 + child_name_pattern = "child_%03d.txt" + children_names = [child_name_pattern % i for i in range(n_children)] + + # Create the children to synchronize on the remote server concurrently + # in a long running transaction + self.create_docs( + self.workspace, n_children, name_pattern=child_name_pattern, delay=0.5 + ) + + # Wait for the synchronizer thread to complete + self.wait_sync(wait_for_async=True) + + # Check that all the children creations where detected despite the + # creation transaction spanning longer than the individual audit + # query time ranges. + local_children_names = [c.name for c in local.get_children_info("/")] + local_children_names.sort() + assert local_children_names == children_names + """ + + """ + def test_delete_local_folder_2_clients(self): + # Get local clients for each device and remote client + local1 = self.local_1 + local2 = self.local_2 + remote = self.remote_document_client_1 + + # Check synchronization roots for drive1, + # there should be 1, the test workspace + sync_roots = remote.get_roots() + assert len(sync_roots) == 1 + assert sync_roots[0].name == self.workspace_title + + # Launch first synchronization on both devices + self.engine_1.start() + self.engine_2.start() + self.wait_sync(wait_for_async=True, wait_for_engine_2=True) + + # Test workspace should be created locally on both devices + assert local1.exists("/") + assert local2.exists("/") + + # Make drive1 create a remote folder in the + # test workspace and a file inside this folder, + # then synchronize both devices + test_folder = remote.make_folder(self.workspace, "Test folder") + remote.make_file(test_folder, "test.odt", content=b"Some content.") + + self.wait_sync(wait_for_async=True, wait_for_engine_2=True) + + # Test folder should be created locally on both devices + assert local1.exists("/Test folder") + assert local1.exists("/Test folder/test.odt") + assert local2.exists("/Test folder") + assert local2.exists("/Test folder/test.odt") + + # Delete Test folder locally on one of the devices + local1.delete("/Test folder") + assert not local1.exists("/Test folder") + + # Wait for synchronization engines to complete + # Wait for Windows delete and also async + self.wait_sync(wait_win=True, wait_for_async=True, wait_for_engine_2=True) + + # Test folder should be deleted on the server and on both devices + assert not remote.exists(test_folder) + assert not local1.exists("/Test folder") + assert not local2.exists("/Test folder") + """ + + """ + def test_delete_local_folder_delay_remote_changes_fetch(self): + # Get local and remote clients + local = self.local_1 + remote = self.remote_document_client_1 + + # Launch first synchronization + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Test workspace should be created locally + assert local.exists("/") + + # Create a local folder in the test workspace and a file inside + # this folder, then synchronize + folder = local.make_folder("/", "Test folder") + local.make_file(folder, "test.odt", content=b"Some content.") + + self.wait_sync() + + # Test folder should be created remotely in the test workspace + assert remote.exists("/Test folder") + assert remote.exists("/Test folder/test.odt") + + # Delete Test folder locally before fetching remote changes, + # then synchronize + local.delete("/Test folder") + assert not local.exists("/Test folder") + + self.wait_sync() + + # Test folder should be deleted remotely in the test workspace. + # Even though fetching the remote changes will send + # 'documentCreated' events for Test folder and its child file + # as a result of the previous synchronization loop, since the folder + # will not have been renamed nor moved since last synchronization, + # its remote pair state will not be marked as 'modified', + # see Model.update_remote(). + # Thus the pair state will be ('deleted', 'synchronized'), resolved as + # 'locally_deleted'. + assert not remote.exists("Test folder") + + # Check Test folder has not been re-created locally + assert not local.exists("/Test folder") + """ + + def test_rename_local_folder(self): + # Get local and remote clients + local1 = self.local_1 + local2 = self.local_2 + + # Launch first synchronization + self.engine_1.start() + self.engine_2.start() + self.wait_sync(wait_for_async=True, wait_for_engine_2=True) + + # Test workspace should be created locally + assert local1.exists("/") + assert local2.exists("/") + + # Create a local folder in the test workspace and a file inside + # this folder, then synchronize + local1.make_folder("/", "Test folder") + if WINDOWS: + # Too fast folder create-then-rename are not well handled + time.sleep(1) + local1.rename("/Test folder", "Renamed folder") + self.wait_sync(wait_for_async=True, wait_for_engine_2=True) + assert local1.exists("/Renamed folder") + assert local2.exists("/Renamed folder") + + """ + def test_delete_local_folder_update_remote_folder_property(self): + # Get local and remote clients + local = self.local_1 + remote = self.remote_document_client_1 + + # Launch first synchronization + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Test workspace should be created locally + assert local.exists("/") + + # Create a local folder in the test workspace and a file inside + # this folder, then synchronize + folder = local.make_folder("/", "Test folder") + local.make_file(folder, "test.odt", content=b"Some content.") + + self.wait_sync() + + # Test folder should be created remotely in the test workspace + assert remote.exists("/Test folder") + assert remote.exists("/Test folder/test.odt") + + # Delete Test folder locally and remotely update one of its properties + # concurrently, then synchronize + self.engine_1.suspend() + local.delete("/Test folder") + assert not local.exists("/Test folder") + test_folder_ref = remote.check_ref("/Test folder") + # Wait for 1 second to make sure the folder's last modification time + # will be different from the pair state's last remote update time + time.sleep(REMOTE_MODIFICATION_TIME_RESOLUTION) + remote.update( + test_folder_ref, properties={"dc:description": "Some description."} + ) + test_folder = remote.fetch(test_folder_ref) + assert test_folder["properties"]["dc:description"] == "Some description." + self.engine_1.resume() + + self.wait_sync(wait_for_async=True) + + # Test folder should be deleted remotely in the test workspace. + assert not remote.exists("/Test folder") + + # Check Test folder has not been re-created locally + assert not local.exists("/Test folder") + """ + + """ + def test_update_local_file_content_update_remote_file_property(self): + # Get local and remote clients + local = self.local_1 + remote = self.remote_document_client_1 + + # Launch first synchronization + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Test workspace should be created locally + assert local.exists("/") + + # Create a local file in the test workspace then synchronize + local.make_file("/", "test.odt", content=b"Some content.") + + self.wait_sync() + + # Test file should be created remotely in the test workspace + assert remote.exists("/test.odt") + + self.engine_1.queue_manager.suspend() + # Locally update the file content and remotely update one of its + # properties concurrently, then synchronize + time.sleep(OS_STAT_MTIME_RESOLUTION) + local.update_content("/test.odt", b"Updated content.") + assert local.get_content("/test.odt") == b"Updated content." + test_file_ref = remote.check_ref("/test.odt") + # Wait for 1 second to make sure the file's last modification time + # will be different from the pair state's last remote update time + time.sleep(REMOTE_MODIFICATION_TIME_RESOLUTION) + remote.update(test_file_ref, properties={"dc:description": "Some description."}) + test_file = remote.fetch(test_file_ref) + assert test_file["properties"]["dc:description"] == "Some description." + time.sleep(TEST_DEFAULT_DELAY) + self.engine_1.queue_manager.resume() + + self.wait_sync(wait_for_async=True) + + # Test file should be updated remotely in the test workspace, + # and no conflict should be detected. + # Even though fetching the remote changes will send a + # 'documentModified' event for the test file as a result of its + # dc:description property update, since the file will not have been + # renamed nor moved and its content not modified since last + # synchronization, its remote pair state will not be marked as + # 'modified', see Model.update_remote(). + # Thus the pair state will be ('modified', 'synchronized'), resolved as + # 'locally_modified'. + assert remote.exists("/test.odt") + assert remote.get_content("/test.odt") == b"Updated content." + test_file = remote.fetch(test_file_ref) + assert test_file["properties"]["dc:description"] == "Some description." + assert len(remote.get_children_info(self.workspace)) == 1 + + # Check that the content of the test file has not changed + assert local.exists("/test.odt") + assert local.get_content("/test.odt") == b"Updated content." + assert len(local.get_children_info("/")) == 1 + """ diff --git a/tests/functional/test_conflicts.py b/tests/functional/test_conflicts.py new file mode 100644 index 0000000000..e7ef4e5b8b --- /dev/null +++ b/tests/functional/test_conflicts.py @@ -0,0 +1,347 @@ +import shutil +import time + +import pytest + +from .conftest import OS_STAT_MTIME_RESOLUTION, SYNC_ROOT_FAC_ID, TwoUsersTest + + +class TestConflicts(TwoUsersTest): + def setUp(self): + self.workspace_id = f"{SYNC_ROOT_FAC_ID}{self.workspace}" + self.file_id = self.remote_1.make_file( + self.workspace_id, "test.txt", content=b"Some content" + ).uid + self.get_remote_state = self.engine_1.dao.get_normal_state_from_remote + self.engine_1.start() + self.wait_sync(wait_for_async=True) + assert self.local_1.exists("/test.txt") + + def test_self_conflict(self): + remote = self.remote_1 + local = self.local_1 + # Update content on both sides by the same user, remote last + remote.update_content(self.file_id, b"Remote update") + local.update_content("/test.txt", b"Local update") + self.wait_sync(wait_for_async=True) + + assert len(local.get_children_info("/")) == 1 + assert local.exists("/test.txt") + assert local.get_content("/test.txt") == b"Local update" + + remote_children = remote.get_fs_children(self.workspace_id) + assert len(remote_children) == 1 + assert remote_children[0].uid == self.file_id + assert remote_children[0].name == "test.txt" + assert remote.get_content(remote_children[0].uid) == b"Remote update" + assert self.get_remote_state(self.file_id).pair_state == "conflicted" + + # Update content on both sides by the same user, local last + remote.update_content(self.file_id, b"Remote update 2") + time.sleep(OS_STAT_MTIME_RESOLUTION) + local.update_content("/test.txt", b"Local update 2") + self.wait_sync(wait_for_async=True) + + assert len(local.get_children_info("/")) == 1 + assert local.exists("/test.txt") + assert local.get_content("/test.txt") == b"Local update 2" + + remote_children = remote.get_fs_children(self.workspace_id) + assert len(remote_children) == 1 + assert remote_children[0].uid == self.file_id + assert remote_children[0].name == "test.txt" + assert remote.get_content(remote_children[0].uid) == b"Remote update 2" + assert self.get_remote_state(self.file_id).pair_state == "conflicted" + + def test_conflict_renamed_modified(self): + local = self.local_1 + remote = self.remote_2 + + # Update content on both sides by different users, remote last + time.sleep(OS_STAT_MTIME_RESOLUTION) + # Race condition is still possible + remote.update_content(self.file_id, b"Remote update") + remote.rename(self.file_id, "plop.txt") + local.update_content("/test.txt", b"Local update") + self.wait_sync(wait_for_async=True) + + assert remote.get_content(self.file_id) == b"Remote update" + assert local.get_content("/test.txt") == b"Local update" + assert self.get_remote_state(self.file_id).pair_state == "conflicted" + + """ + def test_resolve_local_renamed_modified(self): + remote = self.remote_2 + + self.test_conflict_renamed_modified() + # Resolve to local file + pair = self.get_remote_state(self.file_id) + assert pair + self.engine_1.resolve_with_local(pair.id) + self.wait_sync(wait_for_async=True) + + remote_children = remote.get_fs_children(self.workspace_id) + assert len(remote_children) == 1 + assert remote_children[0].uid == self.file_id + assert remote_children[0].name == "test.txt" + assert remote.get_content(remote_children[0].uid) == b"Local update" + """ + + def test_real_conflict(self): + local = self.local_1 + remote = self.remote_2 + + # Update content on both sides by different users, remote last + time.sleep(OS_STAT_MTIME_RESOLUTION) + # Race condition is still possible + remote.update_content(self.file_id, b"Remote update") + local.update_content("/test.txt", b"Local update") + self.wait_sync(wait_for_async=True) + + assert remote.get_content(self.file_id) == b"Remote update" + assert local.get_content("/test.txt") == b"Local update" + assert self.get_remote_state(self.file_id).pair_state == "conflicted" + + # Update content on both sides by different users, local last + remote.update_content(self.file_id, b"Remote update 2") + time.sleep(OS_STAT_MTIME_RESOLUTION) + local.update_content("/test.txt", b"Local update 2") + self.wait_sync(wait_for_async=True) + + assert remote.get_content(self.file_id) == b"Remote update 2" + assert local.get_content("/test.txt") == b"Local update 2" + assert self.get_remote_state(self.file_id).pair_state == "conflicted" + + def test_resolve_local(self): + self.test_real_conflict() + # Resolve to local file + pair = self.get_remote_state(self.file_id) + assert pair + self.engine_1.resolve_with_local(pair.id) + self.wait_sync(wait_for_async=True) + assert self.remote_2.get_content(self.file_id) == b"Local update 2" + + def test_resolve_local_folder(self): + local = self.local_1 + remote = self.remote_1 + + self.engine_1.suspend() + folder = remote.make_folder(self.workspace_id, "ABC").uid + self.engine_1.resume() + self.wait_sync(wait_for_async=True) + + self.engine_1.suspend() + local.rename("/ABC", "ABC_123") + remote.rename(folder, "ABC_234") + self.engine_1.resume() + self.wait_sync(wait_for_async=True) + + pair = self.get_remote_state(folder) + assert pair.pair_state == "conflicted" + + self.engine_1.resolve_with_local(pair.id) + self.wait_sync(wait_for_async=True) + pair = self.get_remote_state(folder) + assert pair.pair_state == "synchronized" + + children = local.get_children_info("/") + assert len(children) == 2 + assert not children[1].folderish + assert children[0].folderish + assert children[0].name == "ABC_123" + + children = remote.get_fs_children(self.workspace_id) + assert len(children) == 2 + assert not children[0].folderish + assert children[1].folderish + assert children[1].name == "ABC_123" + + def test_resolve_remote(self): + self.test_real_conflict() + # Resolve to local file + pair = self.get_remote_state(self.file_id) + assert pair + self.engine_1.resolve_with_remote(pair.id) + self.wait_sync(wait_for_async=True) + assert self.local_1.get_content("/test.txt") == b"Remote update 2" + + def test_conflict_on_lock(self): + doc_uid = self.file_id.split("#")[-1] + local = self.local_1 + remote = self.remote_2 + self.remote_document_client_2.lock(doc_uid) + local.update_content("/test.txt", b"Local update") + self.wait_sync(wait_for_async=True) + assert local.get_content("/test.txt") == b"Local update" + assert remote.get_content(self.file_id) == b"Some content" + remote.update_content(self.file_id, b"Remote update") + self.wait_sync(wait_for_async=True) + assert local.get_content("/test.txt") == b"Local update" + assert remote.get_content(self.file_id) == b"Remote update" + assert self.get_remote_state(self.file_id).pair_state == "conflicted" + self.remote_document_client_2.unlock(doc_uid) + self.wait_sync(wait_for_async=True) + assert local.get_content("/test.txt") == b"Local update" + assert remote.get_content(self.file_id) == b"Remote update" + assert self.get_remote_state(self.file_id).pair_state == "conflicted" + + @pytest.mark.randombug( + "NXDRIVE-776: Random bug but we cannot use " + "pytest.mark.random because this test would " + "take ~30 minutes to complete.", + mode="BYPASS", + ) + def test_XLS_conflict_on_locked_document(self): + self._XLS_local_update_on_locked_document(locked_from_start=False) + + @pytest.mark.randombug( + "NXDRIVE-776: Random bug but we cannot use " + "pytest.mark.random because this test would " + "take ~30 minutes to complete.", + mode="BYPASS", + ) + def test_XLS_conflict_on_locked_document_from_start(self): + self._XLS_local_update_on_locked_document() + + def _XLS_local_update_on_locked_document(self, locked_from_start=True): + remote = self.remote_2 + local = self.local_1 + + # user2: create remote XLS file + fs_item_id = remote.make_file( + self.workspace_id, + "Excel 97 file.xls", + b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00", + ).uid + doc_uid = fs_item_id.split("#")[-1] + self.wait_sync(wait_for_async=True) + assert local.exists("/Excel 97 file.xls") + + if locked_from_start: + # user2: lock document before user1 opening it + self.remote_document_client_2.lock(doc_uid) + self.wait_sync(wait_for_async=True) + local.unset_readonly("/Excel 97 file.xls") + + # user1: simulate opening XLS file with MS Office ~= update its content + local.update_content( + "/Excel 97 file.xls", b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x01" + ) + self.wait_sync(wait_for_async=locked_from_start) + pair_state = self.get_remote_state(fs_item_id) + assert pair_state + if locked_from_start: + # remote content hasn't changed, pair state is conflicted + # and remote_can_update flag is False + assert ( + remote.get_content(fs_item_id) + == b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00" + ) + assert pair_state.pair_state == "unsynchronized" + assert not pair_state.remote_can_update + else: + # remote content has changed, pair state is synchronized + # and remote_can_update flag is True + assert ( + remote.get_content(fs_item_id) + == b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x01" + ) + assert pair_state.pair_state == "synchronized" + assert pair_state.remote_can_update + + if not locked_from_start: + # user2: lock document after user1 opening it + self.remote_document_client_2.lock(doc_uid) + self.wait_sync(wait_for_async=True) + + # user1: simulate updating XLS file with MS Office + # 1. Create empty file 787D3000 + # 2. Update 787D3000 + # 3. Update Excel 97 file.xls + # 4. Update 787D3000 + # 5. Move Excel 97 file.xls to 1743B25F.tmp + # 6. Move 787D3000 to Excel 97 file.xls + # 7. Update Excel 97 file.xls + # 8. Update 1743B25F.tmp + # 9. Update Excel 97 file.xls + # 10. Delete 1743B25F.tmp + local.make_file("/", "787D3000") + local.update_content("/787D3000", b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00") + local.unset_readonly("/Excel 97 file.xls") + local.update_content( + "/Excel 97 file.xls", b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x02" + ) + local.update_content( + "/787D3000", b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x03" + ) + shutil.move(local.abspath("/Excel 97 file.xls"), local.abspath("/1743B25F.tmp")) + shutil.move(local.abspath("/787D3000"), local.abspath("/Excel 97 file.xls")) + local.update_content( + "/Excel 97 file.xls", b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x03\x04" + ) + local.update_content( + "/1743B25F.tmp", b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00" + ) + local.update_content( + "/Excel 97 file.xls", b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x03" + ) + local.delete_final("/1743B25F.tmp") + self.wait_sync(wait_for_async=not locked_from_start) + assert len(local.get_children_info("/")) == 2 + assert ( + local.get_content("/Excel 97 file.xls") + == b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x03" + ) + # remote content hasn't changed, pair state is conflicted + # and remote_can_update flag is False + if locked_from_start: + assert ( + remote.get_content(fs_item_id) + == b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00" + ) + else: + assert ( + remote.get_content(fs_item_id) + == b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x01" + ) + pair_state = self.get_remote_state(fs_item_id) + assert pair_state + assert pair_state.pair_state == "unsynchronized" + assert not pair_state.remote_can_update + + # user2: remote update, conflict is detected once again + # and remote_can_update flag is still False + remote.update_content( + fs_item_id, + b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x02", + "New Excel 97 file.xls", + ) + self.wait_sync(wait_for_async=True) + + assert len(local.get_children_info("/")) == 2 + assert local.exists("/Excel 97 file.xls") + assert ( + local.get_content("/Excel 97 file.xls") + == b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x03" + ) + + assert len(remote.get_fs_children(self.workspace_id)) == 2 + assert remote.get_fs_info(fs_item_id).name == "New Excel 97 file.xls" + assert ( + remote.get_content(fs_item_id) + == b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x02" + ) + + pair_state = self.get_remote_state(fs_item_id) + assert pair_state + assert pair_state.pair_state == "conflicted" + assert not pair_state.remote_can_update + + # user2: unlock document, conflict is detected once again + # and remote_can_update flag is now True + self.remote_document_client_2.unlock(doc_uid) + self.wait_sync(wait_for_async=True) + pair_state = self.get_remote_state(fs_item_id) + assert pair_state + assert pair_state.pair_state == "conflicted" + assert pair_state.remote_can_update diff --git a/tests/functional/test_copy.py b/tests/functional/test_copy.py new file mode 100644 index 0000000000..83910442b6 --- /dev/null +++ b/tests/functional/test_copy.py @@ -0,0 +1,28 @@ +from .conftest import OneUserTest + + +class TestCopy(OneUserTest): + def test_synchronize_remote_copy(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # Create a file and a folder in the remote root workspace + remote.make_file("/", "test.odt", content=b"Some content.") + remote.make_folder("/", "Test folder") + + # Launch ndrive and check synchronization + self.engine_1.start() + self.wait_sync(wait_for_async=True) + assert local.exists("/") + assert local.exists("/Test folder") + assert local.exists("/test.odt") + + # Copy the file to the folder remotely + remote.copy("/test.odt", "/Test folder") + + # Launch ndrive and check synchronization + self.wait_sync(wait_for_async=True) + assert local.exists("/test.odt") + assert local.get_content("/test.odt") == b"Some content." + assert local.exists("/Test folder/test.odt") + assert local.get_content("/Test folder/test.odt") == b"Some content." diff --git a/tests/functional/test_direct_transfer.py b/tests/functional/test_direct_transfer.py new file mode 100644 index 0000000000..d81a0c9ec3 --- /dev/null +++ b/tests/functional/test_direct_transfer.py @@ -0,0 +1,1211 @@ +""" +Test the Direct Transfer feature in different scenarii. +""" +import logging +import re +from pathlib import Path +from time import sleep +from typing import Optional +from unittest.mock import patch +from uuid import uuid4 + +import pytest +from nuxeo.exceptions import HTTPError + +from nxdrive.client.uploader.direct_transfer import DirectTransferUploader +from nxdrive.constants import TransferStatus +from nxdrive.exceptions import NotFound +from nxdrive.options import Options +from nxdrive.utils import get_tree_list + +from .. import ensure_no_exception +from .conftest import OneUserNoSync, OneUserTest + + +class DirectTransfer: + def setUp(self): + # No sync root, to ease testing + self.remote_1.unregister_as_root(self.workspace) + self.engine_1.start() + + # Lower chunk_* options to have chunked uploads without having to create big files + self.default_chunk_limit = Options.chunk_limit + self.default_chunk_size = Options.chunk_size + Options.chunk_limit = 1 + Options.chunk_size = 1 + + # The file used for the Direct Transfer + source = ( + self.location / "resources" / "databases" / "engine_migration_duplicate.db" + ) + assert source.stat().st_size > 1024 * 1024 * 1.5 + source_data = source.read_bytes() + + # Work with a copy of the file to allow parallel testing + self.file = self.tmpdir / f"{uuid4()}.bin" + self.file.write_bytes(source_data * 2) + self.file_size = self.file.stat().st_size + assert self.file_size > 1024 * 1024 * 3 # Must be > 3 MiB + + def tearDown(self): + # Restore options + Options.chunk_limit = self.default_chunk_limit + Options.chunk_size = self.default_chunk_size + + def has_blob(self) -> bool: + """Check that *self.file* exists on the server and has a blob attached.""" + try: + children = self.remote_document_client_1.documents.get_children( + path=self.ws.path + ) + assert len(children) == 1 + doc = children[0] + assert doc.properties["dc:title"] == self.file.name + except Exception: + return False + return bool(doc.properties["file:content"]) + + def no_uploads(self) -> bool: + """Check there is no ongoing uploads.""" + assert not self.engine_1.dao.get_dt_upload(path=self.file) + + def sync_and_check( + self, should_have_blob: bool = True, check_for_blob: bool = True + ) -> None: + # Sync + self.wait_sync() + + # Check the error count + assert not self.engine_1.dao.get_errors(limit=0) + + # Check the uploads count + assert not list(self.engine_1.dao.get_dt_uploads()) + + # Check the file exists on the server and has a blob attached + + if not check_for_blob: + # Useful when checking for duplicates creation + return + + if should_have_blob: + assert self.has_blob() + else: + assert not self.has_blob() + + def direct_transfer( + self, + duplicate_behavior: str = "create", + last_local_selected_location: Optional[Path] = None, + new_folder: Optional[str] = None, + ) -> None: + self.engine_1.direct_transfer( + {self.file: self.file_size}, + self.ws.path, + self.ws.uid, + self.ws.title, + duplicate_behavior=duplicate_behavior, + last_local_selected_location=last_local_selected_location, + new_folder=new_folder, + ) + + def test_upload(self): + """A regular Direct Transfer.""" + + # There is no upload, right now + self.no_uploads() + + with ensure_no_exception(): + self.direct_transfer() + self.sync_and_check() + + def test_upload_new_folder(self): + """A regular Direct Transfer inside a new remote folder.""" + + # There is no upload, right now + self.no_uploads() + new_folder_name = str(uuid4())[:6] + with ensure_no_exception(): + self.direct_transfer(new_folder=new_folder_name) + self.sync_and_check(check_for_blob=False) + + children = self.remote_document_client_1.get_children_info(self.workspace) + assert len(children) == 1 + assert children[0].name == new_folder_name + subfolder = self.remote_document_client_1.get_children_info(children[0].uid) + assert len(subfolder) == 1 + assert subfolder[0].name == self.file.name + + def test_upload_new_folder_empty(self): + """An empty Direct Transfer that should just create a new remote folder.""" + + # There is no upload, right now + self.no_uploads() + new_folder_name = str(uuid4())[:6] + with ensure_no_exception(): + self.engine_1.direct_transfer( + {}, + self.ws.path, + self.ws.uid, + self.ws.title, + duplicate_behavior="create", + last_local_selected_location=None, + new_folder=new_folder_name, + ) + self.sync_and_check(check_for_blob=False) + + children = self.remote_document_client_1.get_children_info(self.workspace) + assert len(children) == 1 + assert children[0].name == new_folder_name + assert not self.remote_document_client_1.get_children_info(children[0].uid) + + """ + def test_cancel_upload(self): + "" + Pause the transfer by simulating a click on the pause/resume icon + on the current upload in the DT window; and cancel the upload. + Verify that the linked session has been updated after the + upload cancel. + "" + expression = re.compile(#check old_functional) + + def callback(*_): + ""This will mimic what is done in TransferItem.qml."" + # Ensure we have 1 ongoing upload + uploads = list(dao.get_dt_uploads()) + assert uploads + upload = uploads[0] + assert upload.status == TransferStatus.ONGOING + + # Verify the session status + doc_pair = dao.get_state_from_id(1) + assert doc_pair + session = dao.get_session(1) + assert session + assert session.total_items == 1 + assert session.status == TransferStatus.ONGOING + + # Pause the upload + dao.pause_transfer("upload", upload.uid, 50.0) + + engine = self.engine_1 + dao = self.engine_1.dao + + # There is no upload, right now + self.no_uploads() + + with patch.object(engine.remote, "upload_callback", new=callback): + with ensure_no_exception(): + self.direct_transfer(last_local_selected_location=self.file.parent) + self.wait_sync() + + assert dao.get_dt_uploads_with_status(TransferStatus.PAUSED) + + last_location = dao.get_config("dt_last_local_selected_location") + assert last_location + assert Path(last_location) == self.file.parent + + # Cancel the upload + upload = list(dao.get_dt_uploads())[0] + engine.cancel_upload(upload.uid) + + with self._caplog.at_level(logging.INFO): + self.sync_and_check(should_have_blob=False) + + assert not dao.get_state_from_local(upload.path) + + # Verify the session status after cancellation + doc_pair = dao.get_state_from_id(1) + assert doc_pair + session = dao.get_session(1) + assert session.total_items == 0 + assert session.status == TransferStatus.CANCELLED + + # A new Notification log should appear + records = map(str, self._caplog.records) + matches = list(filter(expression.match, records)) + assert not matches + """ + + def test_with_engine_not_started(self): + """A Direct Transfer should work even if engines are stopped.""" + self.app.quit() + pytest.xfail("Waiting for NXDRIVE-1910") + + self.engine_1.stop() + + # There is no upload, right now + self.no_uploads() + + with ensure_no_exception(): + self.direct_transfer() + self.sync_and_check() + + @Options.mock() + def test_duplicate_file_create(self): + """ + The file already exists on the server. + The user wants to continue the transfer and create a duplicate. + """ + + with ensure_no_exception(): + # 1st upload: OK + self.direct_transfer() + self.sync_and_check() + + # 2nd upload: a new document will be created + self.direct_transfer(duplicate_behavior="create") + self.sync_and_check(check_for_blob=False) + + # Ensure there are 2 documents on the server + children = self.remote_document_client_1.get_children_info(self.workspace) + assert len(children) == 2 + assert children[0].name == self.file.name + assert children[1].name == self.file.name + + def test_duplicate_file_ignore(self): + """ + The file already exists on the server. + The user wants to cancel the transfer to prevent duplicates. + """ + + class NoChunkUpload(DirectTransferUploader): + def upload_chunks(self, *_, **__): + """Patch Remote.upload() to be able to check that nothing will be uploaded.""" + assert 0, "No twice upload should be done!" + + def upload(*args, **kwargs): + """Set our specific uploader to check for twice upload.""" + kwargs.pop("uploader") + return upload_orig(*args, uploader=NoChunkUpload, **kwargs) + + engine = self.engine_1 + upload_orig = engine.remote.upload + + # There is no upload, right now + self.no_uploads() + + with ensure_no_exception(): + # 1st upload: OK + self.direct_transfer() + self.sync_and_check() + + # 2nd upload: it should be cancelled + with patch.object(engine.remote, "upload", new=upload): + self.direct_transfer(duplicate_behavior="ignore") + self.sync_and_check() + + # Ensure there is only 1 document on the server + self.sync_and_check() + + @Options.mock() + def test_duplicate_file_override(self): + """ + The file already exists on the server. + The user wants to continue the transfer and replace the document. + """ + + with ensure_no_exception(): + # 1st upload: OK + self.direct_transfer() + self.sync_and_check() + + # To ease testing, we change local file content + self.file.write_bytes(b"blob changed!") + + # 2nd upload: the blob should be replaced on the server + self.direct_transfer(duplicate_behavior="override") + self.sync_and_check() + + # Ensure there is only 1 document on the server + children = self.remote_document_client_1.get_children_info(self.workspace) + assert len(children) == 1 + assert children[0].name == self.file.name + + # Ensure the blob content was updated + assert ( + self.remote_1.get_blob(children[0].uid, xpath="file:content") + == b"blob changed!" + ) + + def test_pause_upload_manually(self): + """ + Pause the transfer by simulating a click on the pause/resume icon + on the current upload in the systray menu. + """ + + def callback(*_): + """ + This will mimic what is done in SystrayTranfer.qml: + - call API.pause_transfer() that will call: + - engine.dao.pause_transfer(nature, transfer_uid) + Then the upload will be paused in Remote.upload(). + """ + # Ensure we have 1 ongoing upload + uploads = list(dao.get_dt_uploads()) + assert uploads + upload = uploads[0] + assert upload.status == TransferStatus.ONGOING + + # Pause the upload + dao.pause_transfer("upload", upload.uid, 50.0) + + engine = self.engine_1 + dao = self.engine_1.dao + + # There is no upload, right now + self.no_uploads() + + with patch.object(engine.remote, "upload_callback", new=callback): + with ensure_no_exception(): + self.direct_transfer() + self.wait_sync() + assert dao.get_dt_uploads_with_status(TransferStatus.PAUSED) + + # Resume the upload + engine.resume_transfer( + "upload", list(dao.get_dt_uploads())[0].uid, is_direct_transfer=True + ) + self.sync_and_check() + + def test_pause_upload_automatically(self): + """ + Pause the transfer by simulating an application exit + or clicking on the Suspend menu entry from the systray. + """ + + def callback(*_): + """This will mimic what is done in SystrayMenu.qml: suspend the app.""" + # Ensure we have 1 ongoing upload + uploads = list(dao.get_dt_uploads()) + assert uploads + upload = uploads[0] + assert upload.status == TransferStatus.ONGOING + + # Suspend! + self.manager_1.suspend() + + engine = self.engine_1 + dao = engine.dao + + # There is no upload, right now + self.no_uploads() + + with patch.object(engine.remote, "upload_callback", new=callback): + with ensure_no_exception(): + self.direct_transfer() + self.wait_sync() + assert dao.get_dt_uploads_with_status(TransferStatus.SUSPENDED) + + # Resume the upload + self.manager_1.resume() + self.sync_and_check() + + def test_modifying_paused_upload(self): + """Modifying a paused upload should discard the current upload.""" + + def callback(*_): + """Pause the upload and apply changes to the document.""" + # Ensure we have 1 ongoing upload + uploads = list(dao.get_dt_uploads()) + assert uploads + upload = uploads[0] + assert upload.status == TransferStatus.ONGOING + + # Pause the upload + dao.pause_transfer("upload", upload.uid, 50.0) + + # Apply changes to the file + self.file.write_bytes(b"locally changed") + + engine = self.engine_1 + dao = engine.dao + + # There is no upload, right now + self.no_uploads() + + with patch.object(engine.remote, "upload_callback", new=callback): + with ensure_no_exception(): + self.direct_transfer() + self.wait_sync() + + # Resume the upload + engine.resume_transfer( + "upload", list(dao.get_dt_uploads())[0].uid, is_direct_transfer=True + ) + self.sync_and_check() + # Check the local content is correct + assert self.file.read_bytes() == b"locally changed" + + """ + @not_windows( + reason="Cannot test the behavior as the local deletion is blocked by the OS." + ) + def test_deleting_paused_upload(self): + ""Deleting a paused upload should discard the current upload."" + + def callback(*_): + ""Pause the upload and delete the document."" + # Ensure we have 1 ongoing upload + uploads = list(dao.get_dt_uploads()) + assert uploads + upload = uploads[0] + assert upload.status == TransferStatus.ONGOING + + # Pause the upload + dao.pause_transfer("upload", upload.uid, 50.0) + + # Remove the document + # (this is the problematic part on Windows, because for the + # file descriptor to be released we need to escape from + # Remote.upload(), which is not possible from here) + self.file.unlink() + assert not self.file.exists() + + engine = self.engine_1 + dao = engine.dao + + # There is no upload, right now + self.no_uploads() + + with patch.object(engine.remote, "upload_callback", new=callback): + with ensure_no_exception(): + self.direct_transfer() + self.wait_sync() + + # Resume the upload + engine.resume_transfer( + "upload", list(dao.get_dt_uploads())[0].uid, is_direct_transfer=True + ) + self.sync_and_check(should_have_blob=False) + """ + + def test_server_error_but_upload_ok(self): + """ + Test an error happening after chunks were uploaded and the FileManager.Import operation call. + This could happen if a proxy does not understand well the final requests as seen in NXDRIVE-1753. + """ + self.app.quit() + pytest.skip("Not yet implemented.") + + class BadUploader(DirectTransferUploader): + """Used to simulate bad server responses.""" + + def link_blob_to_doc(self, *args, **kwargs): + """Simulate a server error.""" + # Call the original method to effectively end the upload process + super().link_blob_to_doc(*args, **kwargs) + + # The file should be present on the server + # assert self.remote.exists(file_path) + + # There should be 1 upload with DONE transfer status + uploads = list(dao.get_dt_uploads()) + assert len(uploads) == 1 + upload = uploads[0] + assert upload.status == TransferStatus.DONE + + # And throw an error + stack = "The proxy server received an invalid response from an upstream server." + raise HTTPError( + status=502, message="Mocked Proxy Error", stacktrace=stack + ) + + def upload(*args, **kwargs): + """Set our specific uploader to simulate server error.""" + kwargs.pop("uploader") + return upload_orig(*args, uploader=BadUploader, **kwargs) + + # file_path = f"{self.ws.path}/{self.file.name}" + engine = self.engine_1 + dao = engine.dao + upload_orig = engine.remote.upload + + # There is no upload, right now + self.no_uploads() + + with patch.object(engine.remote, "upload", new=upload): + with ensure_no_exception(): + self.direct_transfer() + self.wait_sync() + + # There should be no upload as the Processor has checked the file existence + # on the server and so deleted the upload from the database + self.no_uploads() + + self.sync_and_check() + + def test_upload_ok_but_network_lost_in_the_meantime(self): + """ + NXDRIVE-2233 scenario: + + - Start a Direct Transfer. + - When all chunks are uploaded, and just after having called the FileManager + operation: the network connection is lost. + - The request being started, it has a 6 hours timeout. + - But the document was created on the server because the call has been made. + - Finally, after 6 hours, the network was restored in the meantime, but the + FileManager will throw a 404 error because the batchId was already consumed. + - The transfer will be displayed in the Direct Transfer window, but nothing more + will be done. + + Such transfer must be removed from the database. + """ + + class BadUploader(DirectTransferUploader): + """Used to simulate bad server responses.""" + + def link_blob_to_doc(self, *args, **kwargs): + """End the upload and simulate a network loss.""" + # Call the original method to effectively end the upload process + super().link_blob_to_doc(*args, **kwargs) + + # And throw an error + raise NotFound("Mock'ed error") + + def upload(*args, **kwargs): + """Set our specific uploader.""" + kwargs.pop("uploader") + return upload_orig(*args, uploader=BadUploader, **kwargs) + + # file_path = f"{self.ws.path}/{self.file.name}" + engine = self.engine_1 + dao = engine.dao + upload_orig = engine.remote.upload + + # There is no upload, right now + self.no_uploads() + + with patch.object(engine.remote, "upload", new=upload): + with ensure_no_exception(): + self.direct_transfer() + self.wait_sync() + + # The document has been created + self.sync_and_check() + + # There should be no upload as the Processor has made the clean-up + self.no_uploads() + + # There is no state to handle in the database + assert not dao.get_local_children(Path("/")) + + """ + def test_server_error_upload(self): + ""Test a server error happening after chunks were uploaded, at the Blob.AttachOnDocument operation call."" + + class BadUploader(DirectTransferUploader): + ""Used to simulate bad server responses."" + + def link_blob_to_doc(self, *args, **kwargs): + ""Simulate a server error."" + raise ConnectionError("Mocked exception") + + def upload(*args, **kwargs): + ""Set our specific uploader to simulate server error."" + kwargs.pop("uploader") + return upload_orig(*args, uploader=BadUploader, **kwargs) + + engine = self.engine_1 + dao = engine.dao + upload_orig = engine.remote.upload + + # There is no upload, right now + self.no_uploads() + + with patch.object(engine.remote, "upload", new=upload): + with ensure_no_exception(): + self.direct_transfer() + self.wait_sync() + + # There should be 1 upload with ONGOING transfer status + uploads = list(dao.get_dt_uploads()) + assert len(uploads) == 1 + upload = uploads[0] + assert upload.status == TransferStatus.DONE + + # The file does not exist on the server + assert not self.has_blob() + + self.sync_and_check() + """ + + """ + def test_chunk_upload_error(self): + ""Test a server error happening while uploading chunks."" + + def callback(uploader): + ""Mimic a connection issue after chunk 1 is sent."" + if len(uploader.blob.uploadedChunkIds) > 1: + raise ConnectionError("Mocked error") + + engine = self.engine_1 + dao = engine.dao + bad_remote = self.get_bad_remote() + bad_remote.upload_callback = callback + + # There is no upload, right now + self.no_uploads() + + with patch.object(engine, "remote", new=bad_remote), ensure_no_exception(): + self.direct_transfer() + self.wait_sync(timeout=3) + + # There should be 1 upload with ONGOING transfer status + uploads = list(dao.get_dt_uploads()) + assert len(uploads) == 1 + upload = uploads[0] + assert upload.status == TransferStatus.ONGOING + + # The file does not exist on the server + assert not self.has_blob() + + self.sync_and_check() + """ + + +class TestDirectTransfer(OneUserTest, DirectTransfer): + """Direct Transfer in "normal" mode, i.e.: when synchronization features are enabled.""" + + def setUp(self): + DirectTransfer.setUp(self) + + def wait_sync(self, *args, **kwargs): + sleep(3) + super().wait_sync(*args, **kwargs) + + +class TestDirectTransferNoSync(OneUserNoSync, DirectTransfer): + """Direct Transfer should work when synchronization features are not enabled.""" + + def setUp(self): + DirectTransfer.setUp(self) + + def wait_sync(self, *args, **kwargs): + sleep(3) + super().wait_sync(*args, **kwargs) + + +class DirectTransferFolder: + def setUp(self): + if not self.engine_1.have_folder_upload: + self.app.quit() + pytest.skip("FileManager.CreateFolder API not available.") + + # No sync root, to ease testing + self.remote_1.unregister_as_root(self.workspace) + self.engine_1.start() + + def get_children(self, path, children_list, key): + children = self.remote_1.get_children(path)["entries"] + for child in children: + if child["type"] == "Folder": + children_list = self.get_children(child["path"], children_list, key) + children_list.append(child[key]) + return children_list + + def checks(self, created): + """Check that the content on the remote equals the created items.""" + # Ensure there is only 1 folder created at the workspace root + ws_children = self.remote_1.get_children(self.ws.path)["entries"] + assert len(ws_children) == 1 + root = ws_children[0] + + # All has been uploaded + children = self.get_children(root["path"], [root["path"]], "path") + + assert len(children) == len(created) + + # Paths cleanup for assert to use only the relative part + children = sorted(child.replace(self.ws.path, "") for child in children) + created = sorted(elem.replace(self.tmpdir.as_posix(), "") for elem in created) + assert created == children + + # There is nothing more to upload + assert not list(self.engine_1.dao.get_dt_uploads()) + + # And there is no error + assert not self.engine_1.dao.get_errors(limit=0) + + def direct_transfer(self, folder, duplicate_behavior: str = "create") -> None: + paths = {path: size for path, size in get_tree_list(folder)} # noqa + # paths = dict([(path, size) for path, size in get_tree_list(folder)]) + self.engine_1.direct_transfer( + paths, + self.ws.path, + self.ws.uid, + self.ws.title, + duplicate_behavior=duplicate_behavior, + ) + + def test_simple_folder(self): + """Test the Direct Transfer on an simple empty folder.""" + + # There is no upload, right now + assert not list(self.engine_1.dao.get_dt_uploads()) + + root_folder = self.tmpdir / str(uuid4()) + root_folder.mkdir() + + with ensure_no_exception(): + self.direct_transfer(root_folder) + self.wait_sync(wait_for_async=True) + + # Ensure there is only 1 folder created at the workspace root + children = self.remote_1.get_children(self.ws.path)["entries"] + assert len(children) == 1 + assert children[0]["title"] == root_folder.name + + # All has been uploaded + assert not list(self.engine_1.dao.get_dt_uploads()) + + def test_sub_folders(self): + """Test the Direct Transfer on an simple empty folder.""" + + # There is no upload, right now + assert not list(self.engine_1.dao.get_dt_uploads()) + + created = [] + + root_folder = self.tmpdir / str(uuid4())[:6] + root_folder.mkdir() + + created.append(root_folder.as_posix()) + for _ in range(3): + sub_folder = root_folder / f"folder_{str(uuid4())[:4]}" + sub_folder.mkdir() + created.append(sub_folder.as_posix()) + for _ in range(2): + sub_file = sub_folder / f"file_{str(uuid4())[:4]}" + sub_file.write_text("test", encoding="utf-8") + created.append(sub_file.as_posix()) + + with ensure_no_exception(): + self.direct_transfer(root_folder) + self.wait_sync(wait_for_async=True) + + self.checks(created) + + def test_same_name_folders(self): + """Test the Direct Transfer on folders with same names.""" + + # There is no upload, right now + assert not list(self.engine_1.dao.get_dt_uploads()) + + created = [] + + root_folder = self.tmpdir / str(uuid4())[:6] + root_folder.mkdir() + + created.append(root_folder.as_posix()) + + folder_a = root_folder / "folder_a" + folder_a.mkdir() + created.append(folder_a.as_posix()) + sub_file = folder_a / "file_1.txt" + sub_file.write_text("test", encoding="utf-8") + created.append(sub_file.as_posix()) + + folder_b = root_folder / "folder_b" + folder_b.mkdir() + created.append(folder_b.as_posix()) + sub_file = folder_b / "file_1.txt" + sub_file.write_text("test", encoding="utf-8") + created.append(sub_file.as_posix()) + + # Sub-folder + folder_a = folder_b / "folder_a" + folder_a.mkdir() + created.append(folder_a.as_posix()) + sub_file = folder_a / "file_1.txt" + sub_file.write_text("test", encoding="utf-8") + created.append(sub_file.as_posix()) + + with ensure_no_exception(): + self.direct_transfer(root_folder) + self.wait_sync(wait_for_async=True) + + self.checks(created) + + def test_sessions(self): + """ + Test the Direct Transfer session system. + Start multiple transfers to check sessions creation. + Check the sessions status after synchronization. + """ + + # There is no upload, right now + assert not list(self.engine_1.dao.get_dt_uploads()) + expression = re.compile( + r"" + ) + + for x in range(4): + created = [] + root_folder = self.tmpdir / str(uuid4())[:6] + root_folder.mkdir() + created.append(root_folder) + + sub_file = root_folder / f"file_{str(uuid4())[:4]}" + sub_file.write_text("test", encoding="utf-8") + created.append(sub_file) + + sub_file = root_folder / f"file_{str(uuid4())[:4]}" + sub_file.write_text("test", encoding="utf-8") + created.append(sub_file) + + with ensure_no_exception(): + self.direct_transfer(root_folder) + planned = [ + self.engine_1.dao.get_state_from_local(item) for item in created + ] + assert len(planned) == len(created) + assert all(dt["session"] == x + 1 for dt in planned) + + session = self.engine_1.dao.get_session(x + 1) + assert session + assert session.status == TransferStatus.ONGOING + + with self._caplog.at_level(logging.INFO): + self.wait_sync(wait_for_async=True) + + session = self.engine_1.dao.get_session(x + 1) + assert session + assert session.status == TransferStatus.DONE + assert session + + # A new Notification logs should appear at each iteration + records = map(str, self._caplog.records) + matches = list(filter(expression.match, records)) + assert len(matches) == x + 1 + + def test_pause_resume_session(self): + """ + Test the session pause and resume system. + The Session final status should be COMPLETED. + """ + engine = self.engine_1 + + # There is no upload, right now + assert not list(engine.dao.get_dt_uploads()) + expression = re.compile( + r"" + ) + + def callback(*_): + """This will mimic what is done in SessionItem.qml.""" + # Ensure we have 1 ongoing upload + dao = engine.dao + uploads = list(dao.get_dt_uploads()) + assert uploads + upload = uploads[0] + assert upload.status == TransferStatus.ONGOING + + # Verify the session status + sessions = dao.get_active_sessions_raw() + assert len(sessions) == 1 + session = sessions[0] + assert session["total"] == 2 + assert session["status"] == TransferStatus.ONGOING + + # Pause the session + dao.pause_session(session["uid"]) + + session = dao.get_session(session["uid"]) + uploads = list(dao.get_dt_uploads()) + assert uploads + upload = uploads[0] + assert upload.status == TransferStatus.PAUSED + + created = [] + root_folder = self.tmpdir / str(uuid4())[:6] + root_folder.mkdir() + created.append(root_folder) + + sub_file = root_folder / f"file_{str(uuid4())[:4]}" + sub_file.write_text("Some content." * 1024 * 1024 * 2, encoding="utf-8") + created.append(sub_file) + + with patch.object(engine.remote, "upload_callback", new=callback): + with ensure_no_exception(): + self.direct_transfer(root_folder) + self.wait_sync() + + session = engine.dao.get_session(1) + assert session + assert session.status == TransferStatus.PAUSED + + engine.resume_session(1) + with self._caplog.at_level(logging.INFO): + self.wait_sync(wait_for_async=True) + + sessions = engine.dao.get_completed_sessions_raw(limit=5) + assert sessions + assert len(sessions) == 1 + session = sessions[0] + assert session["status"] == TransferStatus.DONE + + # A new Notification logs should appear at each iteration + records = map(str, self._caplog.records) + matches = list(filter(expression.match, records)) + assert len(matches) == 1 + + def test_pause_cancel_session(self): + """ + Test the session pause and cancel system. + All Uploads should be removed and the Session final status should be CANCELLED. + """ + engine = self.engine_1 + + # There is no upload, right now + assert not list(engine.dao.get_dt_uploads()) + + def callback(*_): + """This will mimic what is done in SessionItem.qml.""" + # Ensure we have 1 ongoing upload + dao = engine.dao + uploads = list(dao.get_dt_uploads()) + assert uploads + upload = uploads[0] + assert upload.status == TransferStatus.ONGOING + + # Verify the session status + sessions = dao.get_active_sessions_raw() + assert len(sessions) == 1 + session = sessions[0] + assert session["total"] == 2 + assert session["status"] == TransferStatus.ONGOING + + # Pause the session + dao.pause_session(session["uid"]) + + session = dao.get_session(session["uid"]) + print(session) + uploads = list(dao.get_dt_uploads()) + assert uploads + upload = uploads[0] + assert upload.status == TransferStatus.PAUSED + + created = [] + root_folder = self.tmpdir / str(uuid4())[:6] + root_folder.mkdir() + created.append(root_folder) + + sub_file = root_folder / f"file_{str(uuid4())[:4]}" + sub_file.write_text("Some content." * 1024 * 1024 * 2, encoding="utf-8") + created.append(sub_file) + + with patch.object(engine.remote, "upload_callback", new=callback): + with ensure_no_exception(): + self.direct_transfer(root_folder) + self.wait_sync() + + session = engine.dao.get_session(1) + assert session + assert session.status == TransferStatus.PAUSED + + engine.cancel_session(1) + self.wait_sync(wait_for_async=True) + + sessions = engine.dao.get_completed_sessions_raw(limit=5) + assert sessions + assert len(sessions) == 1 + session = sessions[0] + assert session["status"] == TransferStatus.CANCELLED + + uploads = list(engine.dao.get_dt_uploads()) + assert not uploads + + @pytest.mark.xfail(reason="NXDRIVE-2495") + def test_pause_resume_session_non_chunked(self): + """ + Test the session pause and resume system for sessions containing non-chunked files. + The Session final status should be COMPLETED. + """ + engine = self.engine_1 + + # There is no upload, right now + assert not list(engine.dao.get_dt_uploads()) + expression = re.compile( + r"" + ) + + upload_count = 0 + + def get_upload(*_, **__): + """Alternative version of EngineDAO.get_upload() that pause the session.""" + nonlocal upload_count + + # The first upload is the folder, we want to pause the session just before the file. + if upload_count == 0: + upload_count += 1 + return None + + # Ensure we have 0 ongoing upload + dao = engine.dao + uploads = list(dao.get_dt_uploads()) + assert not uploads + + # Verify the session status + sessions = dao.get_active_sessions_raw() + assert len(sessions) == 1 + session = sessions[0] + assert session["total"] == 2 + assert session["status"] is TransferStatus.ONGOING + + # Pause the session + dao.pause_session(session["uid"]) + + # Session should be paused now + session = dao.get_session(session["uid"]) + assert session.status is TransferStatus.PAUSED + + return None + + created = [] + root_folder = self.tmpdir / str(uuid4())[:6] + root_folder.mkdir() + created.append(root_folder) + + sub_file = root_folder / f"file_{str(uuid4())[:4]}" + sub_file.write_text("Some content.", encoding="utf-8") + created.append(sub_file) + + with patch.object(engine.dao, "get_upload", new=get_upload): + with ensure_no_exception(): + self.direct_transfer(root_folder) + self.wait_sync() + + session = engine.dao.get_session(1) + assert session + assert session.status is TransferStatus.PAUSED + + uploads = list(engine.dao.get_dt_uploads()) + assert uploads + upload = uploads[0] + assert upload.status is TransferStatus.PAUSED + + engine.resume_session(1) + with self._caplog.at_level(logging.INFO): + self.wait_sync(wait_for_async=True) + + sessions = engine.dao.get_completed_sessions_raw(limit=5) + assert sessions + assert len(sessions) == 1 + session = sessions[0] + assert session["status"] is TransferStatus.DONE + + # A new Notification logs should appear at each iteration + records = map(str, self._caplog.records) + matches = list(filter(expression.match, records)) + assert len(matches) == 1 + + def test_sub_files(self): + """Test the Direct Transfer on a folder with many files.""" + + # There is no upload, right now + assert not list(self.engine_1.dao.get_dt_uploads()) + + created = [] + + root_folder = self.tmpdir / str(uuid4())[:6] + root_folder.mkdir() + + created.append(root_folder.as_posix()) + for _ in range(5): + sub_file = root_folder / f"file_{str(uuid4())[:4]}" + sub_file.write_text("test", encoding="utf-8") + created.append(sub_file.as_posix()) + + with ensure_no_exception(): + self.direct_transfer(root_folder) + self.wait_sync(wait_for_async=True) + + self.checks(created) + + def test_identical_sessions(self): + """ + Create two sessions with the same file then pause them. + Ensure that two uploads are created. + The two sessions final status should be COMPLETED. + """ + engine = self.engine_1 + + # There is no upload, right now + assert not list(engine.dao.get_dt_uploads()) + + def callback(*_): + """This will mimic what is done in SessionItem.qml.""" + dao = engine.dao + + sessions = dao.get_active_sessions_raw() + for session in sessions: + # Pause the session + dao.pause_session(session["uid"]) + sessions = dao.get_active_sessions_raw() + uploads = list(dao.get_dt_uploads()) + assert uploads + for upload in uploads: + assert upload.status is TransferStatus.PAUSED + + for _ in range(2): + created = [] + root_folder = self.tmpdir / str(uuid4())[:6] + root_folder.mkdir() + created.append(root_folder) + + sub_file = root_folder / "file_test_duplicate.txt" + sub_file.write_text("Some content." * 1024 * 1024 * 2, encoding="utf-8") + created.append(sub_file) + + with patch.object(engine.remote, "upload_callback", new=callback): + with ensure_no_exception(): + self.direct_transfer(root_folder) + self.wait_sync() + + sessions = engine.dao.get_active_sessions_raw() + assert len(sessions) == 2 + for session in sessions: + assert session["status"] is TransferStatus.PAUSED + + uploads = list(engine.dao.get_dt_uploads()) + assert len(uploads) == 2 + + for session in sessions: + engine.resume_session(session["uid"]) + + self.wait_sync(wait_for_async=True) + + sessions = engine.dao.get_completed_sessions_raw(limit=5) + assert sessions + assert len(sessions) == 2 + for session in sessions: + assert session["status"] is TransferStatus.DONE + assert not list(engine.dao.get_dt_uploads()) + + +class TestDirectTransferFolder(OneUserTest, DirectTransferFolder): + """Direct Transfer in "normal" mode, i.e.: when synchronization features are enabled.""" + + def setUp(self): + DirectTransferFolder.setUp(self) + + def wait_sync(self, *args, **kwargs): + sleep(3) + super().wait_sync(*args, **kwargs) + + +class TestDirectTransferFolderNoSync(OneUserNoSync, DirectTransferFolder): + """Direct Transfer should work when synchronization features are not enabled.""" + + def setUp(self): + DirectTransferFolder.setUp(self) + + def wait_sync(self, *args, **kwargs): + sleep(3) + super().wait_sync(*args, **kwargs) diff --git a/tests/functional/test_encoding.py b/tests/functional/test_encoding.py new file mode 100644 index 0000000000..f253650ea7 --- /dev/null +++ b/tests/functional/test_encoding.py @@ -0,0 +1,121 @@ +import os +from pathlib import Path + +from nxdrive.client.local import FileInfo + +from ..markers import not_mac +from .conftest import OneUserTest + + +class TestEncoding(OneUserTest): + """ + def test_filename_with_accents_from_server(self): + local = self.local_1 + remote = self.remote_document_client_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + data = b"Contenu sans accents." + remote.make_file(self.workspace, "Nom sans accents.doc", content=data) + remote.make_file(self.workspace, "Nom avec accents \xe9 \xe8.doc", content=data) + self.wait_sync(wait_for_async=True) + + assert local.get_content("/Nom sans accents.doc") == data + assert local.get_content("/Nom avec accents \xe9 \xe8.doc") == data + """ + + def test_filename_with_katakana(self): + local = self.local_1 + remote = self.remote_document_client_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + data = b"Content" + remote.make_file(self.workspace, "Remote \u30bc\u30ec.doc", content=data) + local.make_file("/", "Local \u30d7 \u793e.doc", content=data) + self.wait_sync(wait_for_async=True) + + assert remote.get_content("/Local \u30d7 \u793e.doc") == data + assert local.get_content("/Remote \u30bc\u30ec.doc") == data + + """ + def test_content_with_accents_from_server(self): + local = self.local_1 + remote = self.remote_document_client_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + data = "Contenu avec caract\xe8res accentu\xe9s.".encode("utf-8") + remote.make_file(self.workspace, "Nom sans accents.txt", content=data) + self.wait_sync(wait_for_async=True) + + assert local.get_content("/Nom sans accents.txt") == data + """ + + """ + def test_filename_with_accents_from_client(self): + local = self.local_1 + remote = self.remote_document_client_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + data = b"Contenu sans accents." + local.make_file("/", "Avec accents \xe9 \xe8.doc", content=data) + local.make_file("/", "Sans accents.doc", content=data) + self.wait_sync(wait_for_async=True) + + assert remote.get_content("/Avec accents \xe9 \xe8.doc") == data + assert remote.get_content("/Sans accents.doc") == data + """ + + """ + def test_content_with_accents_from_client(self): + local = self.local_1 + remote = self.remote_document_client_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + data = "Contenu avec caract\xe8res accentu\xe9s.".encode("utf-8") + local.make_file("/", "Nom sans accents", content=data) + self.wait_sync(wait_for_async=True) + + assert remote.get_content("/Nom sans accents") == data + """ + + def test_name_normalization(self): + local = self.local_1 + remote = self.remote_document_client_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + filename = "space\xa0 et TM\u2122.doc" + local.make_file("/", filename) + self.wait_sync(wait_for_async=True) + + assert remote.get_info("/" + filename).name == filename + + @not_mac(reason="Normalization does not work on macOS") + def test_fileinfo_normalization(self): + local = self.local_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + self.engine_1.stop() + + name = "Teste\u0301" + local.make_file("/", name, content=b"Test") + + # FileInfo() will normalize the filename + assert FileInfo(local.base_folder, Path(name), False, 0).name != name + + # The encoding should be different, + # cannot trust the get_children as they use FileInfo + children = os.listdir(local.abspath("/")) + assert len(children) == 1 + assert children[0] != name diff --git a/tests/functional/test_group_changes.py b/tests/functional/test_group_changes.py new file mode 100644 index 0000000000..b2c9b87566 --- /dev/null +++ b/tests/functional/test_group_changes.py @@ -0,0 +1,231 @@ +from logging import getLogger + +from nuxeo.exceptions import HTTPError +from nuxeo.models import Document, Group + +from .. import env +from .conftest import OneUserTest, root_remote, salt + +log = getLogger(__name__) + + +class TestGroupChanges(OneUserTest): + """ + Test that changes on groups are detected by Drive. + See https://jira.nuxeo.com/browse/NXP-14830. + """ + + def setUp(self): + self.group1 = salt("group1") + self.group2 = salt("group2") + self.parent_group = salt("parentGroup") + self.grand_parent_group = salt("grandParentGroup") + self.new_groups = ( + Group(groupname=self.group1, memberUsers=[self.user_1]), + Group(groupname=self.group2, memberUsers=[self.user_1]), + Group(groupname=self.parent_group, memberGroups=[self.group1]), + Group(groupname=self.grand_parent_group, memberGroups=[self.parent_group]), + ) + for group in self.new_groups: + self.root_remote.groups.create(group) + + # Create test workspace + workspace_name = salt("groupChangesTestWorkspace") + self.workspace_group = self.root_remote.documents.create( + Document( + name=workspace_name, + type="Workspace", + properties={"dc:title": workspace_name}, + ), + parent_path=env.WS_DIR, + ) + self.workspace_path = self.workspace_group.path + + self.admin_remote = root_remote(base_folder=self.workspace_path) + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + def tearDown(self): + self.workspace_group.delete() + for group in reversed(self.new_groups): + try: + self.root_remote.groups.delete(group.groupname) + except HTTPError as exc: + if exc.status == 404: + continue + raise + + def set_ace(self, user, doc): + log.info(f"Grant ReadWrite permission to {user} on {doc}") + self.admin_remote.execute( + command="Document.SetACE", + input_obj=f"doc:{doc}", + user=user, + permission="ReadWrite", + ) + + def test_group_changes_on_sync_root(self): + """ + Test changes on a group that has access to a synchronization root. + """ + log.info("Create syncRoot folder") + sync_root_id = self.admin_remote.make_folder("/", "syncRoot") + + self.set_ace(self.group1, sync_root_id) + + log.info("Register syncRoot for driveuser_1") + self.remote_1.register_as_root(sync_root_id) + + log.info("Check that syncRoot is created locally") + self.wait_sync(wait_for_async=True) + assert self.local_root_client_1.exists("/syncRoot") + + self._test_group_changes("/syncRoot", self.group1) + + def test_group_changes_on_sync_root_child(self): + """ + Test changes on a group that has access + to a child of a synchronization root. + """ + log.info("Create syncRoot folder") + sync_root_id = self.admin_remote.make_folder("/", "syncRoot") + + log.info("Create child folder") + child_id = self.admin_remote.make_folder("/syncRoot", "child") + + self.set_ace(self.group1, sync_root_id) + self.set_ace(self.group2, child_id) + + log.info("Block inheritance on child") + self.admin_remote.block_inheritance(child_id, overwrite=False) + + log.info("Register syncRoot for driveuser_1") + self.remote_1.register_as_root(sync_root_id) + + log.info("Check that syncRoot and child are created locally") + self.wait_sync(wait_for_async=True) + assert self.local_root_client_1.exists("/syncRoot") + assert self.local_root_client_1.exists("/syncRoot/child") + + self._test_group_changes("/syncRoot/child", self.group2) + + """ + def test_group_changes_on_sync_root_parent(self): + "" + Test changes on a group that has access + to the parent of a synchronization root. + "" + log.info("Create parent folder") + parent_id = self.admin_remote.make_folder("/", "parent") + + log.info("Create syncRoot folder") + sync_root_id = self.admin_remote.make_folder("/parent", "syncRoot") + + self.set_ace(self.group1, parent_id) + + log.info("Register syncRoot for driveuser_1") + self.remote_1.register_as_root(sync_root_id) + + log.info("Check that syncRoot is created locally") + self.wait_sync(wait_for_async=True) + assert self.local_root_client_1.exists("/syncRoot") + + self._test_group_changes("/syncRoot", self.group1) + """ + + def test_changes_with_parent_group(self): + """ + Test changes on the parent group of a group + that has access to a synchronization root. + """ + self._test_group_changes_with_ancestor_groups(self.parent_group) + + def test_changes_with_grand_parent_group(self): + """ + Test changes on the grandparent group of a group + that has access to a synchronization root. + """ + self._test_group_changes_with_ancestor_groups(self.grand_parent_group) + + def _test_group_changes(self, folder_path, group_name, need_parent=False): + """ + Tests changes on the given group that has access to the given folder: + - Remove the test user from the group. + - Add the test user to the group. + - Delete the group. + - Create the group including the test user. + """ + log.info( + "Test changes on %s for %s with need_parent=%r", + group_name, + folder_path, + need_parent, + ) + remote = self.admin_remote + local = self.local_root_client_1 + + log.info("Remove driveuser_1 from %s", group_name) + group = remote.groups.get(group_name) + group.memberUsers = [] + group.save() + + log.info("Check that %s is deleted locally", folder_path) + self.wait_sync(wait_for_async=True) + assert not local.exists(folder_path) + + log.info("Add driveuser_1 to %s", group_name) + group.memberUsers = [self.user_1] + group.save() + + log.info("Check that %s is created locally", folder_path) + self.wait_sync(wait_for_async=True) + assert local.exists(folder_path) + + log.info("Delete %s", group_name) + remote.groups.delete(group_name) + + log.info("Check that %s is deleted locally", folder_path) + self.wait_sync(wait_for_async=True) + assert not local.exists(folder_path) + + log.info("Create %s", group_name) + remote.groups.create(Group(groupname=group_name, memberUsers=[self.user_1])) + + if need_parent: + log.info( + "%s should not be created locally since " + "the newly created group has not been added yet " + "as a subgroup of parentGroup", + folder_path, + ) + self.wait_sync(wait_for_async=True) + assert not local.exists(folder_path) + + log.debug("Add %s as a subgroup of parentGroup", group_name) + group = remote.groups.get(self.parent_group) + group.memberGroups = [group_name] + group.save() + + log.info("Check that %s is created locally", folder_path) + self.wait_sync(wait_for_async=True) + assert local.exists(folder_path) + + def _test_group_changes_with_ancestor_groups(self, ancestor_group): + """ + Test changes on a descendant group of the given group + that has access to a synchronization root. + """ + log.info("Create syncRoot folder") + sync_root_id = self.admin_remote.make_folder("/", "syncRoot") + + self.set_ace(ancestor_group, sync_root_id) + + log.info("Register syncRoot for driveuser_1") + self.remote_1.register_as_root(sync_root_id) + + log.info("Check that syncRoot is created locally") + self.wait_sync(wait_for_async=True) + assert self.local_root_client_1.exists("/syncRoot") + + self._test_group_changes("/syncRoot", self.group1, need_parent=True) diff --git a/tests/functional/test_ignored.py b/tests/functional/test_ignored.py new file mode 100644 index 0000000000..76f890180e --- /dev/null +++ b/tests/functional/test_ignored.py @@ -0,0 +1,46 @@ +from pathlib import Path + +from .conftest import OneUserTest + + +class TestIgnored(OneUserTest): + def test_ignore_file(self): + local = self.local_1 + remote = self.remote_document_client_1 + dao = self.engine_1.dao + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + remote.make_file("/", "abcde.txt", content=b"Some content.") + remote.make_file("/", "abcde.txt", content=b"Some other content.") + + self.wait_sync(wait_for_async=True) + assert local.exists("/abcde.txt") + # Check we only have one file locally + assert len(dao.get_local_children(Path("/"))) == 1 + # Check that there is an error + errors = dao.get_errors() + assert len(errors) == 1 + error_id = errors[0].id + + # Ignore the error + self.engine_1.ignore_pair(error_id, errors[0].last_error) + + self.wait_sync(wait_for_async=True) + + # Check there are no errors + assert not dao.get_errors() + # Check there is an ignored file + unsynceds = dao.get_unsynchronizeds() + assert len(unsynceds) == 1 + # Check that the ignored file is the same as the error that appeared previously + assert unsynceds[0].id == error_id + + # Force the engine to do a full scan again + self.engine_1._remote_watcher._last_remote_full_scan = None + self.wait_sync(wait_for_async=True) + + # Check that there are no errors back + assert not dao.get_errors() + assert dao.get_unsynchronized_count() == 1 diff --git a/tests/functional/test_local_changes_when_offline.py b/tests/functional/test_local_changes_when_offline.py new file mode 100644 index 0000000000..da8c71abae --- /dev/null +++ b/tests/functional/test_local_changes_when_offline.py @@ -0,0 +1,81 @@ +"""" +Test if changes made to local file system when Drive is offline sync's back +later when Drive becomes online. +""" +import pytest + +from nxdrive.constants import WINDOWS + +from .conftest import FILE_CONTENT, OneUserTest + + +class TestOfflineChangesSync(OneUserTest): + def setUp(self): + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + self.local = self.local_1 + self.remote = self.remote_document_client_1 + + # Create a folder and a file on the server + self.folder1_remote = self.remote.make_folder("/", "Folder1") + self.file1_remote = self.remote.make_file( + self.folder1_remote, "File1.txt", FILE_CONTENT + ) + self.wait_sync(wait_for_async=True) + + """ + def test_copy_paste_when_engine_suspended(self): + "" + Copy paste and a rename operation together on same file while Drive is + offline should be detected and synced to server as soon as Drive comes + back online. + "" + self.copy_past_and_rename(stop_engine=True) + """ + + @pytest.mark.randombug("Unstable on Windows", condition=WINDOWS) + def test_copy_paste_normal(self): + """ + Copy paste and a rename operation together on same file while Drive is + online should be detected and synced to server. + """ + self.copy_past_and_rename() + + def copy_past_and_rename(self, stop_engine: bool = False): + if stop_engine: + # Make Drive offline (by suspend) + self.engine_1.suspend() + + # Make a copy of the file (with xattr included) + self.local_1.copy("/Folder1/File1.txt", "/Folder1/File1 - Copy.txt") + + # Rename the original file + self.local.rename("/Folder1/File1.txt", "File1_renamed.txt") + + if stop_engine: + # Bring Drive online (by resume) + self.engine_1.resume() + + self.wait_sync() + + # Verify there is no local changes + assert self.local.exists("/Folder1/File1_renamed.txt") + assert self.local.exists("/Folder1/File1 - Copy.txt") + assert not self.local.exists("/Folder1/File1.txt") + + # Verify that local changes are uploaded to server successfully + if self.remote.exists("/Folder1/File1 - Copy.txt"): + # '/Folder1/File1 - Copy.txt' is uploaded to server. + # So original file named should be changed as 'File_renamed.txt' + remote_info = self.remote.get_info(self.file1_remote) + assert remote_info.name == "File1 - Copy.txt" + + else: + # Original file is renamed as 'File1 - Copy.txt'. + # This is a bug only if Drive is online during copy + rename + assert self.remote.exists("/Folder1/File1_renamed.txt") + remote_info = self.remote.get_info(self.file1_remote) + assert remote_info.name == "File1_renamed.txt" + + assert not self.remote.exists("/Folder1/File1.txt") diff --git a/tests/functional/test_local_copy_paste.py b/tests/functional/test_local_copy_paste.py new file mode 100644 index 0000000000..eedb717ee2 --- /dev/null +++ b/tests/functional/test_local_copy_paste.py @@ -0,0 +1,131 @@ +import shutil + +from .conftest import FILE_CONTENT, OneUserTest + + +class TestLocalCopyPaste(OneUserTest): + NUMBER_OF_LOCAL_TEXT_FILES = 10 + NUMBER_OF_LOCAL_IMAGE_FILES = 10 + NUMBER_OF_LOCAL_FILES_TOTAL = ( + NUMBER_OF_LOCAL_TEXT_FILES + NUMBER_OF_LOCAL_IMAGE_FILES + ) + FILE_NAME_PATTERN = "file%03d%s" + + """ + 1. Create folder "/A" with 100 files in it + 2. Create folder "/B" + """ + + def setUp(self): + remote = self.remote_1 + local = self.local_1 + self.engine_1.start() + self.wait_sync(wait_for_async=True) + self.engine_1.stop() + assert local.exists("/") + + # create folder A + local.make_folder("/", "A") + self.folder_path_1 = "/A" + + # create folder B + # NXDRIVE-477 If created after files are created inside A, + # creation of B isn't detected wy Watchdog! + # Reproducible with watchdemo, need to investigate. + # That's why we are now using local scan for setup_method(). + local.make_folder("/", "B") + self.folder_path_2 = "/B" + + # add text files in folder 'Nuxeo Drive Test Workspace/A' + self.local_files_list = [] + for file_num in range(1, self.NUMBER_OF_LOCAL_TEXT_FILES + 1): + filename = self.FILE_NAME_PATTERN % (file_num, ".txt") + local.make_file(self.folder_path_1, filename, FILE_CONTENT) + self.local_files_list.append(filename) + + # add image files in folder 'Nuxeo Drive Test Workspace/A' + abs_folder_path_1 = local.abspath(self.folder_path_1) + test_doc_path = self.location / "resources" / "files" / "cat.jpg" + for file_num in range( + self.NUMBER_OF_LOCAL_TEXT_FILES + 1, self.NUMBER_OF_LOCAL_FILES_TOTAL + 1 + ): + filename = self.FILE_NAME_PATTERN % (file_num, ".jpg") + dst_path = abs_folder_path_1 / filename + shutil.copyfile(test_doc_path, dst_path) + self.local_files_list.append(filename) + + self.engine_1.start() + self.wait_sync() + self.engine_1.stop() + + # get remote folders reference ids + self.remote_ref_1 = local.get_remote_id(self.folder_path_1) + assert self.remote_ref_1 + self.remote_ref_2 = local.get_remote_id(self.folder_path_2) + assert self.remote_ref_2 + assert remote.fs_exists(self.remote_ref_1) + assert remote.fs_exists(self.remote_ref_2) + + assert ( + len(remote.get_fs_children(self.remote_ref_1)) + == self.NUMBER_OF_LOCAL_FILES_TOTAL + ) + + def test_local_copy_paste_files(self): + self._local_copy_paste_files() + + """ + def test_local_copy_paste_files_stopped(self): + self._local_copy_paste_files(stopped=True) + """ + + def _local_copy_paste_files(self, stopped=False): + if not stopped: + self.engine_1.start() + + # Copy all children (files) of A to B + remote = self.remote_1 + local = self.local_1 + src = local.abspath(self.folder_path_1) + dst = local.abspath(self.folder_path_2) + num = self.NUMBER_OF_LOCAL_FILES_TOTAL + expected_files = set(self.local_files_list) + + for f in src.iterdir(): + shutil.copy(f, dst) + + if stopped: + self.engine_1.start() + self.wait_sync(timeout=60) + + # Expect local "/A" to contain all the files + abs_folder_path_1 = local.abspath(self.folder_path_1) + assert abs_folder_path_1.exists() + children = [f.name for f in abs_folder_path_1.iterdir()] + assert len(children) == num + assert set(children) == expected_files + + # expect local "/B" to contain the same files + abs_folder_path_2 = local.abspath(self.folder_path_2) + assert abs_folder_path_2.exists() + children = [f.name for f in abs_folder_path_2.iterdir()] + assert len(children) == num + assert set(children) == expected_files + + # expect remote "/A" to contain all the files + # just compare the names + children = [ + remote_info.name + for remote_info in remote.get_fs_children(self.remote_ref_1) + ] + assert len(children) == num + assert set(children) == expected_files + + # expect remote "/B" to contain all the files + # just compare the names + children = [ + remote_info.name + for remote_info in remote.get_fs_children(self.remote_ref_2) + ] + assert len(children) == num + assert set(children) == expected_files diff --git a/tests/functional/test_local_creations.py b/tests/functional/test_local_creations.py new file mode 100644 index 0000000000..e39d0d4de7 --- /dev/null +++ b/tests/functional/test_local_creations.py @@ -0,0 +1,158 @@ +import shutil +import time +from pathlib import Path +from unittest.mock import patch + +from nxdrive.constants import MAC, WINDOWS + +from .. import ensure_no_exception +from .conftest import SYNC_ROOT_FAC_ID, OneUserTest + + +class TestLocalCreations(OneUserTest): + def test_mini_scenario(self): + local = self.local_root_client_1 + remote = self.remote_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + local.make_folder(f"/{self.workspace_title}", "A") + folder_path_1 = f"{self.workspace_title}/A" + + test_doc_path = self.location / "resources" / "files" / "cat.jpg" + abs_folder_path_1 = local.abspath(f"/{folder_path_1}") + dst_path = abs_folder_path_1 / "cat.jpg" + shutil.copyfile(test_doc_path, dst_path) + + self.wait_sync(timeout=100) + uid = local.get_remote_id(f"/{folder_path_1}/cat.jpg") + assert remote.fs_exists(uid) + + def test_local_modification_date(self): + """Check that the files have the Platform modification date.""" + remote = self.remote_document_client_1 + local = self.local_1 + engine = self.engine_1 + + filename = "abc.txt" + remote.make_file("/", filename, content=b"1234") + remote_mtime = time.time() + + time.sleep(3) + + engine.start() + self.wait_sync(wait_for_async=True) + + filename = f"/{filename}" + assert local.exists(filename) + assert local.abspath(filename).stat().st_mtime < remote_mtime + + def test_local_creation_date(self): + """Check that the files have the Platform modification date.""" + remote = self.remote_1 + local = self.local_1 + engine = self.engine_1 + sleep_time = 3 + + workspace_id = f"{SYNC_ROOT_FAC_ID}{self.workspace}" + filename = "abc.txt" + file_id = remote.make_file(workspace_id, filename, content=b"1234").uid + after_ctime = time.time() + + time.sleep(sleep_time) + filename = f"a{filename}" + remote.rename(file_id, filename) + after_mtime = time.time() + + engine.start() + self.wait_sync(wait_for_async=True) + + filename = f"/{filename}" + assert local.exists(filename) + stats = local.abspath(filename).stat() + local_mtime = stats.st_mtime + + # Note: GNU/Linux does not have a creation time + if MAC or WINDOWS: + local_ctime = stats.st_birthtime if MAC else stats.st_ctime + assert local_ctime < after_ctime + assert local_ctime + sleep_time <= local_mtime + + assert local_mtime < after_mtime + 0.5 + + def recovery_scenario(self, cleanup: bool = True): + """ + A recovery test, scenario: + 1. Add a new account using the foo folder. + 2. Remove the account, keep the foo folder as-is. + 3. Remove xattrs using the clean-folder CLI argument (if *cleanup* is True). + 4. Re-add the account using the foo folder. + + The goal is to check that local data is not re-downloaded at all. + Drive should simply recreate the database and check the all files are there. + """ + # Start engine and wait for synchronization + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Create folders and files on the server + workspace_id = f"{SYNC_ROOT_FAC_ID}{self.workspace}" + folder_uid = self.remote_1.make_folder(workspace_id, "a folder").uid + self.remote_1.make_file(folder_uid, "file1.bin", content=b"0321" * 42) + self.remote_1.make_file(folder_uid, "file2.bin", content=b"12365" * 42) + self.remote_1.make_folder(folder_uid, "folder 2") + + # Start engine and wait for synchronization + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Local checks + assert self.local_1.exists("/a folder") + assert self.local_1.exists("/a folder/file1.bin") + assert self.local_1.exists("/a folder/file2.bin") + assert self.local_1.exists("/a folder/folder 2") + + # Stop the engine for following actions + self.engine_1.stop() + + if cleanup: + # Remove xattrs + folder = Path("a folder") + self.local_1.clean_xattr_folder_recursive(folder, cleanup=True) + self.local_1.remove_remote_id(folder, cleanup=True) + + # Ensure xattrs are gone + assert not self.local_1.get_remote_id(folder) + assert not self.local_1.get_remote_id(folder / "file1.bin") + assert not self.local_1.get_remote_id(folder / "file2.bin") + assert not self.local_1.get_remote_id(folder / "folder 2") + + # Destroy the database but keep synced files + self.unbind_engine(1, purge=False) + + def download(*_, **__): + """ + Patch Remote.download() to be able to check that nothing + will be downloaded as local data is already there. + """ + assert 0, "No download should be done!" + + # Re-bind the account using the same folder + self.bind_engine(1, start_engine=False) + + # Start the sync + with patch.object(self.engine_1.remote, "download", new=download): + with ensure_no_exception(): + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # No error expected + assert not self.engine_1.dao.get_errors(limit=0) + + # Checks + for client in (self.local_1, self.remote_1): + assert client.exists("/a folder") + assert client.exists("/a folder/file1.bin") + assert client.exists("/a folder/file2.bin") + assert client.exists("/a folder/folder 2") diff --git a/tests/functional/test_local_deletion.py b/tests/functional/test_local_deletion.py new file mode 100644 index 0000000000..39bd8faa92 --- /dev/null +++ b/tests/functional/test_local_deletion.py @@ -0,0 +1,309 @@ +import shutil + +import pytest + +from nxdrive.constants import WINDOWS + +from .conftest import OneUserTest + + +class TestLocalDeletion(OneUserTest): + def setUp(self): + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + def test_untrash_file(self): + local = self.local_1 + remote = self.remote_document_client_1 + file1 = "File_To_Delete.txt" + + local.make_file("/", file1, content=b"This is a content") + self.wait_sync() + assert remote.exists("/" + file1) + + old_info = remote.get_info(f"/{file1}") + abs_path = local.abspath(f"/{file1}") + + # Pretend we had trash the file + shutil.move(abs_path, self.local_test_folder_1 / file1) + self.wait_sync(wait_for_async=True) + assert not remote.exists("/" + file1) + assert not local.exists("/" + file1) + # See if it untrash or recreate + shutil.move(self.local_test_folder_1 / file1, local.abspath("/")) + self.wait_sync(wait_for_async=True) + assert remote.exists(old_info.uid) + assert local.exists("/" + file1) + + def test_untrash_file_with_rename(self): + local = self.local_1 + remote = self.remote_document_client_1 + file1 = "File_To_Delete.txt" + file2 = "File_To_Delete2.txt" + + local.make_file("/", file1, content=b"This is a content") + self.wait_sync() + assert remote.exists(f"/{file1}") + uid = local.get_remote_id(f"/{file1}") + old_info = remote.get_info(f"/{file1}") + abs_path = local.abspath(f"/{file1}") + # Pretend we had trash the file + shutil.move(abs_path, self.local_test_folder_1 / file2) + self.wait_sync(wait_for_async=True) + assert not remote.exists("/" + file1) + assert not local.exists("/" + file1) + (self.local_test_folder_1 / file2).write_bytes(b"New content") + if WINDOWS: + # Python API overwrite the tag by default + (self.local_test_folder_1 / f"{file2}:ndrive").write_text( + uid, encoding="utf-8" + ) + # See if it untrash or recreate + shutil.move(self.local_test_folder_1 / file2, local.abspath("/")) + self.wait_sync(wait_for_async=True) + assert remote.exists(old_info.uid) + assert local.exists("/" + file2) + assert not local.exists("/" + file1) + assert local.get_content("/" + file2) == b"New content" + + def test_move_untrash_file_on_parent(self): + local = self.local_1 + remote = self.remote_document_client_1 + file1 = "File_To_Delete.txt" + + file_path = "/ToDelete/File_To_Delete.txt" + local.make_folder("/", "ToDelete") + local.make_file("/ToDelete", file1, content=b"This is a content") + self.wait_sync() + assert remote.exists(file_path) + old_info = remote.get_info(file_path) + abs_path = local.abspath(file_path) + # Pretend we had trash the file + shutil.move(abs_path, self.local_test_folder_1 / file1) + self.wait_sync() + local.delete("/ToDelete") + self.wait_sync() + assert not remote.exists(file_path) + assert not local.exists(file_path) + + # See if it untrash or recreate + shutil.move(self.local_test_folder_1 / file1, local.abspath("/")) + self.wait_sync() + new_info = remote.get_info(old_info.uid) + assert new_info.state == "project" + assert local.exists(f"/{file1}") + # Because remote_document_client_1 was used + assert local.get_remote_id("/").endswith(new_info.parent_uid) + + """ + @Options.mock() + def test_move_untrash_file_on_parent_with_no_rights(self): + local = self.local_1 + remote = self.remote_document_client_1 + file1 = "File_To_Delete.txt" + + # Setup + file_path = "/ToDelete/File_To_Delete.txt" + local.make_folder("/", "ToDelete") + local.make_file("/ToDelete", file1, content=b"This is a content") + self.wait_sync() + assert remote.exists(file_path) + old_info = remote.get_info(file_path) + abs_path = local.abspath(file_path) + # Pretend we had trash the file + shutil.move(abs_path, self.local_test_folder_1 / file1) + self.wait_sync() + + # Remove rights + folder_path = f"{self.ws.path}/ToDelete" + input_obj = "doc:" + folder_path + self.root_remote.execute( + command="Document.SetACE", + input_obj=input_obj, + user=self.user_1, + permission="Read", + ) + self.root_remote.block_inheritance(folder_path, overwrite=False) + self.root_remote.delete(folder_path) + self.wait_sync(wait_for_async=True) + assert not remote.exists(file_path) + assert not local.exists(file_path) + + # See if it untrash or recreate + shutil.move(self.local_test_folder_1 / file1, local.abspath("/")) + assert local.get_remote_id("/" + file1) + self.wait_sync() + assert local.exists("/" + file1) + new_uid = local.get_remote_id("/" + file1) + # Because remote_document_client_1 was used + assert new_uid + assert not new_uid.endswith(old_info.uid) + """ + + @pytest.mark.skip( + reason="Wait to know what is the expectation " + "- the previous folder does not exist" + ) + def test_move_untrash_file_on_parent_with_no_rights_on_destination(self): + local = self.local_1 + remote = self.remote_document_client_1 + file1 = "File_To_Delete.txt" + + # Setup the test + file_path = "/ToDelete/File_To_Delete.txt" + local.make_folder("/", "ToDelete") + local.make_folder("/", "ToCopy") + local.make_file("/ToDelete", file1, content=b"This is a content") + self.wait_sync() + assert remote.exists(file_path) + remote.get_info(file_path) + abs_path = local.abspath(file_path) + + # Pretend we had trash the file + shutil.move(abs_path, self.local_test_folder_1 / file1) + self.wait_sync() + + # Remove rights + folder_path = f"{self.ws.path}/ToCopy" + input_obj = "doc:" + folder_path + self.root_remote.execute( + command="Document.SetACE", + input_obj=input_obj, + user=self.user_1, + permission="Read", + ) + self.root_remote.block_inheritance(folder_path, overwrite=False) + # Delete + local.delete("/ToDelete") + self.wait_sync(wait_for_async=True) + assert not remote.exists(file_path) + assert not local.exists(file_path) + + # See if it untrash or unsynchronized + local.unlock_ref("/ToCopy") + shutil.move(self.local_test_folder_1 / file1, local.abspath("/ToCopy")) + self.wait_sync(wait_for_async=True) + + """ + def test_untrash_file_on_delete_parent(self): + local = self.local_1 + remote = self.remote_document_client_1 + file1 = "File_To_Delete.txt" + + # Setup + file_path = "/ToDelete/File_To_Delete.txt" + local.make_folder("/", "ToDelete") + local.make_file("/ToDelete", file1, content=b"This is a content") + self.wait_sync() + assert remote.exists(file_path) + old_info = remote.get_info(file_path) + abs_path = local.abspath(file_path) + + # Pretend we had trash the file + shutil.move(abs_path, self.local_test_folder_1 / file1) + self.wait_sync() + local.delete("/ToDelete") + self.wait_sync() + assert not remote.exists(file_path) + assert not local.exists(file_path) + + # See if it untrash or recreate + local.make_folder("/", "ToDelete") + shutil.move(self.local_test_folder_1 / file1, local.abspath("/ToDelete")) + self.wait_sync() + assert remote.exists(old_info.uid) + new_info = remote.get_info(old_info.uid) + assert remote.exists(new_info.parent_uid) + assert local.exists(file_path) + """ + + def test_trash_file_then_parent(self): + local = self.local_1 + remote = self.remote_document_client_1 + file1 = "File_To_Delete.txt" + + file_path = "/ToDelete/File_To_Delete.txt" + local.make_folder("/", "ToDelete") + local.make_file("/ToDelete", file1, content=b"This is a content") + self.wait_sync() + assert remote.exists(file_path) + old_info = remote.get_info(file_path) + abs_path = local.abspath(file_path) + # Pretend we had trash the file + shutil.move(abs_path, self.local_test_folder_1 / file1) + local.delete("/ToDelete") + self.wait_sync() + assert not remote.exists(file_path) + assert not local.exists(file_path) + # See if it untrash or recreate + local.make_folder("/", "ToDelete") + shutil.move(self.local_test_folder_1 / file1, local.abspath("/ToDelete")) + self.wait_sync() + assert remote.exists(old_info.uid) + assert local.exists(file_path) + + """ + @Options.mock() + def test_trash_file_should_respect_deletion_behavior_unsync(self): + Options.deletion_behavior = "unsync" + + local, engine = self.local_1, self.engine_1 + remote = self.remote_document_client_1 + folder, file = "folder", "file.txt" + file_path = f"/{folder}/{file}" + + # Create local data + local.make_folder("/", folder) + local.make_file(f"/{folder}", file, content=b"This is a content") + + # Sync'n check + self.wait_sync() + assert remote.exists(file_path) + + # Mimic "stop Drive" + engine.stop() + + # Delete the file + local.delete(file_path) + + # Mimic "start Drive" + engine.start() + self.wait_sync() + + # Checks + assert remote.exists(file_path) + assert not local.exists(file_path) + """ + + """ + @Options.mock() + def test_trash_file_should_respect_deletion_behavior_delete_server(self): + Options.deletion_behavior = "delete_server" + + local, engine = self.local_1, self.engine_1 + remote = self.remote_document_client_1 + folder, file = "folder", "file.txt" + file_path = f"/{folder}/{file}" + + # Create local data + local.make_folder("/", folder) + local.make_file(f"/{folder}", file, content=b"This is a content") + + # Sync'n check + self.wait_sync() + assert remote.exists(file_path) + + # Mimic "stop Drive" + engine.stop() + + # Delete the file + local.delete(file_path) + + # Mimic "start Drive" + engine.start() + self.wait_sync() + + # Checks + assert not remote.exists(file_path) + assert not local.exists(file_path) + """ diff --git a/tests/functional/test_local_filter.py b/tests/functional/test_local_filter.py new file mode 100644 index 0000000000..69b992e5cd --- /dev/null +++ b/tests/functional/test_local_filter.py @@ -0,0 +1,198 @@ +from nxdrive.constants import SYNC_ROOT + +from .conftest import FS_ITEM_ID_PREFIX, SYNC_ROOT_FAC_ID, OneUserTest + + +class TestLocalFilter(OneUserTest): + def test_synchronize_local_filter(self): + """Test that filtering remote documents is impacted client side + + Just do a single test as it is the same as + test_integration_remote_deletion + + Use cases: + - Filter delete a regular folder + => Folder should be locally deleted + - Unfilter restore folder from the trash + => Folder should be locally re-created + - Filter a synchronization root + => Synchronization root should be locally deleted + - Unfilter synchronization root from the trash + => Synchronization root should be locally re-created + + See TestIntegrationSecurityUpdates.test_synchronize_denying_read_access + as the same uses cases are tested + """ + # Bind the server and root workspace + self.engine_1.start() + # Get local and remote clients + local = self.local_1 + remote = self.remote_document_client_1 + + # Create documents in the remote root workspace + # then synchronize + remote.make_folder("/", "Test folder") + remote.make_file("/Test folder", "joe.txt", content=b"Some content") + self.wait_sync(wait_for_async=True) + # Fake server binding with the unit test class + assert local.exists("/Test folder") + assert local.exists("/Test folder/joe.txt") + + # Add remote folder as filter then synchronize + doc = remote.get_info("/Test folder") + root_path = f"{SYNC_ROOT}/{SYNC_ROOT_FAC_ID}{doc.root}" + doc_path = f"{root_path}/{FS_ITEM_ID_PREFIX}{doc.uid}" + + self.engine_1.add_filter(doc_path) + self.wait_sync() + assert not local.exists("/Test folder") + + self.engine_1.remove_filter(doc_path) + self.wait_sync() + assert local.exists("/Test folder") + assert local.exists("/Test folder/joe.txt") + + self.engine_1.add_filter(doc_path) + self.wait_sync() + assert not local.exists("/Test folder") + + # Delete sync root then synchronize + self.engine_1.add_filter(root_path) + self.wait_sync() + assert not local.exists("/") + + # Restore sync root from trash then synchronize + self.engine_1.remove_filter(root_path) + self.wait_sync() + assert local.exists("/") + assert local.exists("/Test folder") + assert local.exists("/Test folder/joe.txt") + + """ + def test_synchronize_local_office_temp(self): + # Should synchronize directly local folder with hex name + # Bind the server and root workspace + hexaname = "1234ABCD" + hexafile = "2345BCDF" + self.engine_1.start() + self.wait_sync() + self.local_1.make_folder("/", hexaname) + self.local_1.make_file("/", hexafile, content=b"test") + # Make sure that a folder is synchronized directly + # no matter what and the file is postponed + self.wait_sync(enforce_errors=False, fail_if_timeout=False) + children = self.remote_document_client_1.get_children_info(self.workspace) + assert len(children) == 1 + + # Force the postponed to ensure it's synchronized now + self.engine_1.queue_manager.requeue_errors() + self.wait_sync(wait_for_async=True) + assert self.local_1.exists("/" + hexafile) + children = self.remote_document_client_1.get_children_info(self.workspace) + assert len(children) == 2 + assert children[1].name == "2345BCDF" + """ + + """ + def test_synchronize_local_filter_with_move(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # Create documents in the remote root workspace + # then synchronize + remote.make_folder("/", "Test") + remote.make_file("/Test", "joe.txt", content=b"Some content") + remote.make_folder("/Test", "Subfolder") + remote.make_folder("/Test", "Filtered") + remote.make_file("/Test/Subfolder", "joe2.txt", content=b"Some content") + remote.make_file("/Test/Subfolder", "joe3.txt", content=b"Somecossntent") + remote.make_folder("/Test/Subfolder/", "SubSubfolder") + remote.make_file( + "/Test/Subfolder/SubSubfolder", "joe4.txt", content=b"Some qwqwqontent" + ) + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + assert local.exists("/Test") + assert local.exists("/Test/joe.txt") + assert local.exists("/Test/Filtered") + assert local.exists("/Test/Subfolder") + assert local.exists("/Test/Subfolder/joe2.txt") + assert local.exists("/Test/Subfolder/joe3.txt") + assert local.exists("/Test/Subfolder/SubSubfolder") + assert local.exists("/Test/Subfolder/SubSubfolder/joe4.txt") + + # Add remote folder as filter then synchronize + doc_file = remote.get_info("/Test/joe.txt") + doc = remote.get_info("/Test") + filtered_doc = remote.get_info("/Test/Filtered") + root_path = f"{SYNC_ROOT}/{SYNC_ROOT_FAC_ID}{doc.root}" + doc_path_filtered = f"{root_path}/{FS_ITEM_ID_PREFIX}{doc.uid}/{FS_ITEM_ID_PREFIX}{filtered_doc.uid}" + + self.engine_1.add_filter(doc_path_filtered) + self.wait_sync() + assert not local.exists("/Test/Filtered") + + # Move joe.txt to filtered folder on the server + remote.move(doc_file.uid, filtered_doc.uid) + self.wait_sync(wait_for_async=True) + + # It now delete on the client + assert not local.exists("/Test/joe.txt") + assert local.exists("/Test/Subfolder") + assert local.exists("/Test/Subfolder/joe2.txt") + assert local.exists("/Test/Subfolder/joe3.txt") + assert local.exists("/Test/Subfolder/SubSubfolder") + assert local.exists("/Test/Subfolder/SubSubfolder/joe4.txt") + + # Now move the subfolder + doc_file = remote.get_info("/Test/Subfolder") + remote.move(doc_file.uid, filtered_doc.uid) + self.wait_sync(wait_for_async=True) + + # Check that all has been deleted + assert not local.exists("/Test/joe.txt") + assert not local.exists("/Test/Subfolder") + assert not local.exists("/Test/Subfolder/joe2.txt") + assert not local.exists("/Test/Subfolder/joe3.txt") + assert not local.exists("/Test/Subfolder/SubSubfolder") + assert not local.exists("/Test/Subfolder/SubSubfolder/joe4.txt") + """ + + """ + def test_synchronize_local_filter_with_remote_trash(self): + self.engine_1.start() + + # Get local and remote clients + local = self.local_1 + remote = self.remote_document_client_1 + + # Create documents in the remote root workspace + # then synchronize + folder_id = remote.make_folder("/", "Test") + remote.make_file("/Test", "joe.txt", content=b"Some content") + + self.wait_sync(wait_for_async=True) + assert local.exists("/Test") + assert local.exists("/Test/joe.txt") + + # Add remote folder as filter then synchronize + doc = remote.get_info("/Test") + root_path = f"{SYNC_ROOT}/{SYNC_ROOT_FAC_ID}{doc.root}" + doc_path = f"{root_path}/{FS_ITEM_ID_PREFIX}{doc.uid}" + + self.engine_1.add_filter(doc_path) + self.wait_sync() + assert not local.exists("/Test") + + # Delete remote folder then synchronize + remote.delete("/Test") + self.wait_sync(wait_for_async=True) + assert not local.exists("/Test") + + # Restore folder from trash then synchronize + remote.undelete(folder_id) + # NXDRIVE-xx check that the folder is not created as it is filtered + self.wait_sync(wait_for_async=True) + assert not local.exists("/Test") + """ diff --git a/tests/functional/test_local_move_and_rename.py b/tests/functional/test_local_move_and_rename.py new file mode 100644 index 0000000000..0781eaf5a3 --- /dev/null +++ b/tests/functional/test_local_move_and_rename.py @@ -0,0 +1,702 @@ +import shutil +import time +from unittest.mock import patch + +from nuxeo.exceptions import HTTPError + +from . import LocalTest +from .conftest import OS_STAT_MTIME_RESOLUTION, OneUserTest + +# TODO NXDRIVE-170: refactor + + +class TestLocalMoveAndRename(OneUserTest): + def setUp(self): + """ + Sets up the following local hierarchy: + Nuxeo Drive Test Workspace + |-- Original File 1.txt + |-- Original File 2.txt + |-- Original Folder 1 + | |-- Sub-Folder 1.1 + | |-- Sub-Folder 1.2 + | |-- Original File 1.1.txt + |-- Original Folder 2 + | |-- Original File 3.txt + """ + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + local = self.local_1 + local.make_file("/", "Original File 1.txt", content=b"Some Content 1") + local.make_file("/", "Original File 2.txt", content=b"Some Content 2") + + local.make_folder("/", "Original Folder 1") + local.make_folder("/Original Folder 1", "Sub-Folder 1.1") + local.make_folder("/Original Folder 1", "Sub-Folder 1.2") + + # Same content as OF1 + local.make_file( + "/Original Folder 1", "Original File 1.1.txt", content=b"Some Content 1" + ) + + local.make_folder("/", "Original Folder 2") + local.make_file( + "/Original Folder 2", "Original File 3.txt", content=b"Some Content 3" + ) + self.wait_sync() + + """ + def test_local_rename_folder_while_creating(self): + local = self.local_1 + root_local = self.local_root_client_1 + remote = self.remote_document_client_1 + marker = False + + def update_remote_state(row, *args, **kwargs): + nonlocal marker + EngineDAO.update_remote_state(self.engine_1.dao, row, *args, **kwargs) + if row.local_name == "New Folder" and not marker: + root_local.rename(row.local_path, "Renamed Folder") + marker = True + + with patch.object( + self.engine_1.dao, "update_remote_state", new=update_remote_state + ): + local.make_folder("/", "New Folder") + self.wait_sync(fail_if_timeout=False) + + assert local.exists("/Renamed Folder") + assert not local.exists("/New Folder") + + # Path is updated on Nuxeo + info = remote.get_info("/Renamed Folder") + assert info.name == "Renamed Folder" + assert len(local.get_children_info("/")) == 5 + assert len(remote.get_children_info(self.workspace)) == 5 + """ + + """ + def test_local_rename_file_while_creating(self): + local = self.engine_1.local + remote = self.remote_document_client_1 + marker = False + + def set_remote_id(ref: Path, remote_id: bytes, name: str = "ndrive"): + nonlocal local, marker + LocalTest.set_remote_id(local, ref, remote_id, name=name) + if not marker and ref.name == "File.txt": + local.rename(ref, "Renamed File.txt") + marker = True + + with patch.object(self.engine_1.local, "set_remote_id", new=set_remote_id): + self.local_1.make_file("/", "File.txt", content=b"Some Content 2") + self.wait_sync(fail_if_timeout=False) + + local = self.local_1 + assert local.exists("/Renamed File.txt") + assert not local.exists("/File.txt") + + # Path is updated on Nuxeo + info = remote.get_info("/Renamed File.txt") + assert info.name == "Renamed File.txt" + assert len(local.get_children_info("/")) == 5 + assert len(remote.get_children_info(self.workspace)) == 5 + """ + + """ + @pytest.mark.randombug("NXDRIVE-811", condition=True, mode="REPEAT") + def test_local_rename_file_while_creating_before_marker(self): + local = self.local_1 + remote = self.remote_document_client_1 + marker = False + + def set_remote_id(ref: Path, remote_id: bytes, name: str = "ndrive"): + nonlocal local, marker + if not marker and ref.name == "File.txt": + self.engine_1.local.rename(ref, "Renamed File.txt") + marker = True + LocalTest.set_remote_id(local, ref, remote_id, name=name) + + with patch.object(self.engine_1.local, "set_remote_id", new=set_remote_id): + local.make_file("/", "File.txt", content=b"Some Content 2") + self.wait_sync(fail_if_timeout=False) + + assert local.exists("/Renamed File.txt") + assert not local.exists("/File.txt") + + # Path is updated on Nuxeo + info = remote.get_info("/Renamed File.txt") + assert info.name == "Renamed File.txt" + assert len(local.get_children_info("/")) == 5 + assert len(remote.get_children_info(self.workspace)) == 5 + """ + + """ + def test_local_rename_file_while_creating_after_marker(self): + marker = False + local = self.local_1 + remote = self.remote_document_client_1 + + def update_remote_state(row, *args, **kwargs): + nonlocal marker + EngineDAO.update_remote_state(self.engine_1.dao, row, *args, **kwargs) + if not marker and row.local_name == "File.txt": + self.engine_1.local.rename(row.local_path, "Renamed File.txt") + marker = True + + with patch.object( + self.engine_1.dao, "update_remote_state", new=update_remote_state + ): + local.make_file("/", "File.txt", content=b"Some Content 2") + self.wait_sync(fail_if_timeout=False) + + assert local.exists("/Renamed File.txt") + assert not local.exists("/File.txt") + + # Path is updated on Nuxeo + info = remote.get_info("/Renamed File.txt") + assert info.name == "Renamed File.txt" + assert len(local.get_children_info("/")) == 5 + assert len(remote.get_children_info(self.workspace)) == 5 + """ + + def test_replace_file(self): + local = self.local_1 + + # Rename /Original File 1.txt to /Renamed File 1.txt + uid = local.get_remote_id("/Original File 1.txt") + local.remove_remote_id("/Original File 1.txt") + local.update_content("/Original File 1.txt", b"plop") + self.wait_sync(fail_if_timeout=False) + assert local.get_remote_id("/Original File 1.txt") == uid + + def test_local_rename_file(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # Rename /Original File 1.txt to /Renamed File 1.txt + uid_1 = remote.get_info("/Original File 1.txt").uid + local.rename("/Original File 1.txt", "Renamed File 1.txt") + assert not local.exists("/Original File 1.txt") + assert local.exists("/Renamed File 1.txt") + + self.wait_sync() + assert not local.exists("/Original File 1.txt") + assert local.exists("/Renamed File 1.txt") + assert remote.get_info(uid_1).name == "Renamed File 1.txt" + + # Rename 'Renamed File 1.txt' to 'Renamed Again File 1.txt' + # and 'Original File 1.1.txt' to + # 'Renamed File 1.1.txt' at the same time as they share + # the same digest but do not live in the same folder + uid_1_1 = remote.get_info("/Original Folder 1/Original File 1.1.txt").uid + local.rename( + "/Original Folder 1/Original File 1.1.txt", "Renamed File 1.1 \xe9.txt" + ) + assert not local.exists("/Original Folder 1/Original File 1.1.txt") + assert local.exists("/Original Folder 1/Renamed File 1.1 \xe9.txt") + local.rename("/Renamed File 1.txt", "Renamed Again File 1.txt") + assert not local.exists("/Renamed File 1.txt") + assert local.exists("/Renamed Again File 1.txt") + + self.wait_sync() + assert not local.exists("/Renamed File 1.txt") + assert local.exists("/Renamed Again File 1.txt") + assert not local.exists("/Original Folder 1/Original File 1.1.txt") + assert local.exists("/Original Folder 1/Renamed File 1.1 \xe9.txt") + + info_1 = remote.get_info(uid_1) + assert info_1.name == "Renamed Again File 1.txt" + + # User 1 does not have the rights to see the parent container + # of the test workspace, hence set fetch_parent_uid=False + parent_1 = remote.get_info(info_1.parent_uid, fetch_parent_uid=False) + assert parent_1.name == self.workspace_title + + info_1_1 = remote.get_info(uid_1_1) + assert info_1_1.name == "Renamed File 1.1 \xe9.txt" + + parent_1_1 = remote.get_info(info_1_1.parent_uid) + assert parent_1_1.name == "Original Folder 1" + assert len(local.get_children_info("/Original Folder 1")) == 3 + assert len(remote.get_children_info(info_1_1.parent_uid)) == 3 + assert len(local.get_children_info("/")) == 4 + assert len(remote.get_children_info(self.workspace)) == 4 + + """ + def test_local_rename_file_uppercase_stopped(self): + local = self.local_1 + remote = self.remote_document_client_1 + self.engine_1.stop() + + # Rename /Original File 1.txt to /Renamed File 1.txt + + # Rename 'Renamed File 1.txt' to 'Renamed Again File 1.txt' + # and 'Original File 1.1.txt' to + # 'Renamed File 1.1.txt' at the same time as they share + # the same digest but do not live in the same folder + uid = remote.get_info("/Original Folder 1/Original File 1.1.txt").uid + local.rename( + "/Original Folder 1/Original File 1.1.txt", "original File 1.1.txt" + ) + + self.engine_1.start() + self.wait_sync() + + info = remote.get_info(uid) + assert info.name == "original File 1.1.txt" + + parent_info = remote.get_info(info.parent_uid) + assert parent_info.name == "Original Folder 1" + assert len(local.get_children_info("/Original Folder 1")) == 3 + assert len(remote.get_children_info(info.parent_uid)) == 3 + """ + + def test_local_rename_file_uppercase(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # Rename /Original File 1.txt to /Renamed File 1.txt + + # Rename 'Renamed File 1.txt' to 'Renamed Again File 1.txt' + # and 'Original File 1.1.txt' to + # 'Renamed File 1.1.txt' at the same time as they share + # the same digest but do not live in the same folder + uid = remote.get_info("/Original Folder 1/Original File 1.1.txt").uid + local.rename( + "/Original Folder 1/Original File 1.1.txt", "original File 1.1.txt" + ) + + self.wait_sync() + + info = remote.get_info(uid) + assert info.name == "original File 1.1.txt" + + parent_info = remote.get_info(info.parent_uid) + assert parent_info.name == "Original Folder 1" + assert len(local.get_children_info("/Original Folder 1")) == 3 + assert len(remote.get_children_info(info.parent_uid)) == 3 + + def test_local_move_file(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # "/Original File 1.txt" -> "/Original Folder 1/Original File 1.txt" + uid = remote.get_info("/Original File 1.txt").uid + local.move("/Original File 1.txt", "/Original Folder 1") + assert not local.exists("/Original File 1.txt") + assert local.exists("/Original Folder 1/Original File 1.txt") + + self.wait_sync() + assert not local.exists("/Original File 1.txt") + assert local.exists("/Original Folder 1/Original File 1.txt") + + info = remote.get_info(uid) + assert info.name == "Original File 1.txt" + parent_info = remote.get_info(info.parent_uid) + assert parent_info.name == "Original Folder 1" + assert len(local.get_children_info("/Original Folder 1")) == 4 + assert len(remote.get_children_info(info.parent_uid)) == 4 + assert len(local.get_children_info("/")) == 3 + assert len(remote.get_children_info(self.workspace)) == 3 + + """ + def test_local_move_file_rollback(self): + ""Test a local move into a folder that is not allowed on the server, + and so we locally revert/cancel the move. + Sometimes the rollback itself is canceled because the doc pair has + no a remote name. The cause is not yet known. + We would then end on such errors (see NXDRIVE-1952): + + # Nuxeo Drive <= 4.2.0 + AttributeError: 'NoneType' object has no attribute 'rstrip' + File "engine/processor.py", line 1383, in _handle_failed_remote_rename + File "client/local_client.py", line 629, in rename + File "utils.py", line 569, in safe_os_filename + File "utils.py", line 555, in safe_filename + + Or even: + + # Nuxeo Drive > 4.2.0 + TypeError: expected string or bytes-like object + File "engine/processor.py", line 1462, in _handle_failed_remote_rename + File "client/local/base.py", line 458, in rename + File "utils.py", line 622, in safe_os_filename + File "utils.py", line 607, in safe_filename + File ".../re.py", line 192, in sub + "" + local = self.local_1 + + # Move "/Original File 1.txt" -> "/Original Folder 1/Original File 1.txt" + local.move("/Original File 1.txt", "/Original Folder 1") + # And change the file name too + local.rename( + "/Original Folder 1/Original File 1.txt", "Original File 1-ren.txt" + ) + # Checks + assert not local.exists("/Original File 1.txt") + assert not local.exists("/Original Folder 1/Original File 1.txt") + assert local.exists("/Original Folder 1/Original File 1-ren.txt") + + def rename(*args, **kwargs): + raise ValueError("Mock'ed rename error") + + def allow_rollback(*args, **kwargs): + ""Allow rollback on all OSes."" + return True + + with patch.object(self.engine_1.remote, "rename", new=rename): + with patch.object(self.engine_1, "local_rollback", new=allow_rollback): + with ensure_no_exception(): + self.wait_sync() + + # The file has been moved again to its original location + assert not local.exists("/Original File 1.txt") + assert not local.exists("/Original File 1-ren.txt") + assert not local.exists("/Original Folder 1/Original File 1-ren.txt") + assert local.exists("/Original Folder 1/Original File 1.txt") + assert not self.engine_1.dao.get_errors(limit=0) + """ + + def test_local_move_and_rename_file(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # Rename /Original File 1.txt to /Renamed File 1.txt + uid = remote.get_info("/Original File 1.txt").uid + + local.move( + "/Original File 1.txt", "/Original Folder 1", name="Renamed File 1 \xe9.txt" + ) + assert not local.exists("/Original File 1.txt") + assert local.exists("/Original Folder 1/Renamed File 1 \xe9.txt") + + self.wait_sync() + assert not local.exists("/Original File 1.txt") + assert local.exists("/Original Folder 1/Renamed File 1 \xe9.txt") + + info = remote.get_info(uid) + assert info.name == "Renamed File 1 \xe9.txt" + parent_info = remote.get_info(info.parent_uid) + assert parent_info.name == "Original Folder 1" + assert len(local.get_children_info("/Original Folder 1")) == 4 + assert len(remote.get_children_info(info.parent_uid)) == 4 + assert len(local.get_children_info("/")) == 3 + assert len(remote.get_children_info(self.workspace)) == 3 + + def test_local_rename_folder(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # Save the uid of some files and folders prior to renaming + folder_1 = remote.get_info("/Original Folder 1").uid + file_1_1 = remote.get_info("/Original Folder 1/Original File 1.1.txt").uid + folder_1_1 = remote.get_info("/Original Folder 1/Sub-Folder 1.1").uid + + # Rename a non empty folder with some content + local.rename("/Original Folder 1", "Renamed Folder 1 \xe9") + assert not local.exists("/Original Folder 1") + assert local.exists("/Renamed Folder 1 \xe9") + + # Synchronize: only the folder renaming is detected: all + # the descendants are automatically realigned + self.wait_sync() + + # The server folder has been renamed: the uid stays the same + assert remote.get_info(folder_1).name == "Renamed Folder 1 \xe9" + + # The content of the renamed folder is left unchanged + file_info = remote.get_info(file_1_1) + assert file_info.name == "Original File 1.1.txt" + assert file_info.parent_uid == folder_1 + + folder_info = remote.get_info(folder_1_1) + assert folder_info.name == "Sub-Folder 1.1" + assert folder_info.parent_uid == folder_1 + + assert len(local.get_children_info("/Renamed Folder 1 \xe9")) == 3 + assert len(remote.get_children_info(file_info.parent_uid)) == 3 + assert len(local.get_children_info("/")) == 4 + assert len(remote.get_children_info(self.workspace)) == 4 + + """ + def test_local_rename_folder_while_suspended(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # Save the uid of some files and folders prior to renaming + folder_1 = remote.get_info("/Original Folder 1").uid + file_1_1 = remote.get_info("/Original Folder 1/Original File 1.1.txt").uid + folder_1_1 = remote.get_info("/Original Folder 1/Sub-Folder 1.1").uid + count = len(local.get_children_info("/Original Folder 1")) + self.engine_1.suspend() + + # Rename a non empty folder with some content + local.rename("/Original Folder 1", "Renamed Folder 1 \xe9") + assert not local.exists("/Original Folder 1") + assert local.exists("/Renamed Folder 1 \xe9") + + local.rename("/Renamed Folder 1 \xe9/Sub-Folder 1.1", "Sub-Folder 2.1") + assert local.exists("/Renamed Folder 1 \xe9/Sub-Folder 2.1") + + # Same content as OF1 + local.make_file("/Renamed Folder 1 \xe9", "Test.txt", content=b"Some Content 1") + count += 1 + self.engine_1.resume() + # Synchronize: only the folder renaming is detected: all + # the descendants are automatically realigned + self.wait_sync(wait_for_async=True) + + # The server folder has been renamed: the uid stays the same + assert remote.get_info(folder_1).name == "Renamed Folder 1 \xe9" + + # The content of the renamed folder is left unchanged + file_info = remote.get_info(file_1_1) + assert file_info.name == "Original File 1.1.txt" + assert file_info.parent_uid == folder_1 + + folder_info = remote.get_info(folder_1_1) + assert folder_info.name == "Sub-Folder 2.1" + assert folder_info.parent_uid == folder_1 + assert len(local.get_children_info("/Renamed Folder 1 \xe9")) == count + assert len(remote.get_children_info(folder_1)) == count + assert len(local.get_children_info("/")) == 4 + assert len(remote.get_children_info(self.workspace)) == 4 + """ + + """ + def test_local_rename_file_after_create(self): + # Office 2010 and >, create a tmp file with 8 chars + # and move it right after + local = self.local_1 + remote = self.remote_document_client_1 + + local.make_file("/", "File.txt", content=b"Some Content 2") + local.rename("/File.txt", "Renamed File.txt") + + self.wait_sync(fail_if_timeout=False) + + assert local.exists("/Renamed File.txt") + assert not local.exists("/File.txt") + # Path don't change on Nuxeo + assert local.get_remote_id("/Renamed File.txt") + assert len(local.get_children_info("/")) == 5 + assert len(remote.get_children_info(self.workspace)) == 5 + """ + + """ + def test_local_rename_file_after_create_detected(self): + # MS Office 2010+ creates a tmp file with 8 chars + # and move it right after + local = self.local_1 + remote = self.remote_document_client_1 + marker = False + + def insert_local_state(info, parent_path): + nonlocal marker + if info.name == "File.txt" and not marker: + local.rename("/File.txt", "Renamed File.txt") + sleep(2) + marker = True + EngineDAO.insert_local_state(self.engine_1.dao, info, parent_path) + + with patch.object( + self.engine_1.dao, "insert_local_state", new=insert_local_state + ): + # Might be temporary ignored once + self.engine_1.queue_manager._error_interval = 3 + local.make_file("/", "File.txt", content=b"Some Content 2") + sleep(10) + self.wait_sync(fail_if_timeout=False) + + assert local.exists("/Renamed File.txt") + assert not local.exists("/File.txt") + + # Path doesn't change on Nuxeo + assert local.get_remote_id("/Renamed File.txt") + assert len(local.get_children_info("/")) == 5 + assert len(remote.get_children_info(self.workspace)) == 5 + """ + + def test_local_move_folder(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # Save the uid of some files and folders prior to move + folder_1 = remote.get_info("/Original Folder 1").uid + folder_2 = remote.get_info("/Original Folder 2").uid + file_1_1 = remote.get_info("/Original Folder 1/Original File 1.1.txt").uid + folder_1_1 = remote.get_info("/Original Folder 1/Sub-Folder 1.1").uid + + # Move a non empty folder with some content + local.move("/Original Folder 1", "/Original Folder 2") + assert not local.exists("/Original Folder 1") + assert local.exists("/Original Folder 2/Original Folder 1") + + # Synchronize: only the folder move is detected: all + # the descendants are automatically realigned + self.wait_sync() + + # The server folder has been moved: the uid stays the same + # The parent folder is now folder 2 + assert remote.get_info(folder_1).parent_uid == folder_2 + + # The content of the renamed folder is left unchanged + file_1_1_info = remote.get_info(file_1_1) + assert file_1_1_info.name == "Original File 1.1.txt" + assert file_1_1_info.parent_uid == folder_1 + + folder_1_1_info = remote.get_info(folder_1_1) + assert folder_1_1_info.name == "Sub-Folder 1.1" + assert folder_1_1_info.parent_uid == folder_1 + + assert len(local.get_children_info("/Original Folder 2/Original Folder 1")) == 3 + assert len(remote.get_children_info(folder_1)) == 3 + assert len(local.get_children_info("/")) == 3 + assert len(remote.get_children_info(self.workspace)) == 3 + + """ + def test_concurrent_local_rename_folder(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # Save the uid of some files and folders prior to renaming + folder_1 = remote.get_info("/Original Folder 1").uid + file_1_1 = remote.get_info("/Original Folder 1/Original File 1.1.txt").uid + folder_2 = remote.get_info("/Original Folder 2").uid + file_3 = remote.get_info("/Original Folder 2/Original File 3.txt").uid + + # Rename a non empty folders concurrently + local.rename("/Original Folder 1", "Renamed Folder 1") + local.rename("/Original Folder 2", "Renamed Folder 2") + assert not local.exists("/Original Folder 1") + assert local.exists("/Renamed Folder 1") + assert not local.exists("/Original Folder 2") + assert local.exists("/Renamed Folder 2") + + # Synchronize: only the folder renamings are detected: all + # the descendants are automatically realigned + self.wait_sync() + + # The server folders have been renamed: the uid stays the same + folder_1_info = remote.get_info(folder_1) + assert folder_1_info.name == "Renamed Folder 1" + + folder_2_info = remote.get_info(folder_2) + assert folder_2_info.name == "Renamed Folder 2" + + # The content of the folder has been left unchanged + file_1_1_info = remote.get_info(file_1_1) + assert file_1_1_info.name == "Original File 1.1.txt" + assert file_1_1_info.parent_uid == folder_1 + + file_3_info = remote.get_info(file_3) + assert file_3_info.name == "Original File 3.txt" + assert file_3_info.parent_uid == folder_2 + + assert len(local.get_children_info("/Renamed Folder 1")) == 3 + assert len(remote.get_children_info(folder_1)) == 3 + assert len(local.get_children_info("/Renamed Folder 2")) == 1 + assert len(remote.get_children_info(folder_2)) == 1 + assert len(local.get_children_info("/")) == 4 + assert len(remote.get_children_info(self.workspace)) == 4 + """ + + def test_local_replace(self): + local = LocalTest(self.local_test_folder_1) + remote = self.remote_document_client_1 + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Create 2 files with the same name but different content + # in separate folders + local.make_file("/", "test.odt", content=b"Some content.") + local.make_folder("/", "folder") + shutil.copyfile( + self.local_test_folder_1 / "test.odt", + self.local_test_folder_1 / "folder" / "test.odt", + ) + local.update_content("/folder/test.odt", content=b"Updated content.") + + # Copy the newest file to the root workspace and synchronize it + sync_root = self.local_nxdrive_folder_1 / self.workspace_title + test_file = self.local_test_folder_1 / "folder" / "test.odt" + shutil.copyfile(test_file, sync_root / "test.odt") + self.wait_sync() + assert remote.exists("/test.odt") + assert remote.get_content("/test.odt") == b"Updated content." + + # Copy the oldest file to the root workspace and synchronize it. + # First wait a bit for file time stamps to increase enough. + time.sleep(OS_STAT_MTIME_RESOLUTION) + shutil.copyfile(self.local_test_folder_1 / "test.odt", sync_root / "test.odt") + self.wait_sync() + assert remote.exists("/test.odt") + assert remote.get_content("/test.odt") == b"Some content." + + """ + def test_local_rename_sync_root_folder(self): + # Use the Administrator to be able to introspect the container of the + # test workspace. + remote = DocRemote( + self.nuxeo_url, + env.NXDRIVE_TEST_USERNAME, + "nxdrive-test-administrator-device", + self.version, + password=env.NXDRIVE_TEST_PASSWORD, + base_folder=self.workspace, + ) + folder_1_uid = remote.get_info("/Original Folder 1").uid + + # Create new clients to be able to introspect the test sync root + toplevel_local_client = LocalTest(self.local_nxdrive_folder_1) + + toplevel_local_client.rename( + Path(self.workspace_title), "Renamed Nuxeo Drive Test Workspace" + ) + self.wait_sync() + + workspace_info = remote.get_info(self.workspace) + assert workspace_info.name == "Renamed Nuxeo Drive Test Workspace" + + folder_1_info = remote.get_info(folder_1_uid) + assert folder_1_info.name == "Original Folder 1" + assert folder_1_info.parent_uid == self.workspace + assert len(remote.get_children_info(self.workspace)) == 4 + """ + + def test_local_move_with_remote_error(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # Check local folder + assert local.exists("/Original Folder 1") + + # Simulate server error + bad_remote = self.get_bad_remote() + error = HTTPError(status=500, message="Mock server error") + bad_remote.make_server_call_raise(error) + + with patch.object(self.engine_1, "remote", new=bad_remote): + local.rename("/Original Folder 1", "OSErrorTest") + self.wait_sync(timeout=5, fail_if_timeout=False) + folder_1 = remote.get_info("/Original Folder 1") + assert folder_1.name == "Original Folder 1" + assert local.exists("/OSErrorTest") + + # Set engine online as starting from here the behavior is restored + self.engine_1.set_offline(value=False) + + self.wait_sync() + folder_1 = remote.get_info(folder_1.uid) + assert folder_1.name == "OSErrorTest" + assert local.exists("/OSErrorTest") + assert len(local.get_children_info("/OSErrorTest")) == 3 + assert len(remote.get_children_info(folder_1.uid)) == 3 + assert len(local.get_children_info("/")) == 4 + assert len(remote.get_children_info(self.workspace)) == 4 + + # TODO: implement me once canDelete is checked in the synchronizer + # def test_local_move_sync_root_folder(self): + # pass diff --git a/tests/functional/test_local_move_folders.py b/tests/functional/test_local_move_folders.py new file mode 100644 index 0000000000..b43ffee0fd --- /dev/null +++ b/tests/functional/test_local_move_folders.py @@ -0,0 +1,220 @@ +import shutil +from contextlib import suppress +from pathlib import Path + +from ..utils import random_png +from .conftest import OneUserTest + + +class TestLocalMoveFolders(OneUserTest): + def _setup(self, count: int = 10, wait_for_sync: bool = True): + """ + 1. Create folder a1 at the root + 2. Create folder a2 at the root + 3. Add *count* pictures in a1 + 4. Add *count* pictures in a2 + """ + self.engine_1.start() + self.wait_sync(wait_for_async=True) + self.engine_1.stop() + + local = self.local_1 + remote = self.remote_1 + + # Create a1 and a2 + self.folder_path_1 = local.make_folder("/", "a1") + self.folder_path_2 = local.make_folder("/", "a2") + + names = {f"file{n + 1:03d}.png" for n in range(count)} + + for path in (self.folder_path_1, self.folder_path_2): + for name in names: + file_path = local.abspath(path) / name + random_png(file_path) + + self.engine_1.start() + + if wait_for_sync: + self.wait_sync(timeout=30, wait_win=True) + + # Check /a1 and /a2 + for folder in ("/a1", "/a2"): + # Check local files + assert local.exists(folder) + children = [child.name for child in local.get_children_info(folder)] + assert len(children) == count + assert set(children) == names + + if wait_for_sync: + # Check remote files + uid = local.get_remote_id(folder) + assert uid + assert remote.fs_exists(uid) + children = [child.name for child in remote.get_fs_children(uid)] + assert len(children) == count + assert set(children) == names + + def tearDown(self): + with suppress(TypeError, AttributeError): + self.engine_1._local_watcher.localScanFinished.disconnect( + self.app.local_scan_finished + ) + + def test_local_move_folder_with_files(self): + count = 10 + self._setup(count=count) + local = self.local_1 + remote = self.remote_1 + remote_doc = self.remote_document_client_1 + src = local.abspath(self.folder_path_1) + dst = local.abspath(self.folder_path_2) + shutil.move(src, dst) + self.wait_sync() + names = {f"file{n + 1:03d}.png" for n in range(count)} + + # Check that a1 doesn't exist anymore locally and remotely + assert not local.exists("/a1") + assert len(remote_doc.get_children_info(self.workspace)) == 1 + + # Check /a2 and /a2/a1 + for folder in ("/a2", "/a2/a1"): + assert local.exists(folder) + children = [ + child.name + for child in local.get_children_info(folder) + if not child.folderish + ] + assert len(children) == count + assert set(children) == names + + uid = local.get_remote_id(folder) + assert uid + assert remote.fs_exists(uid) + children = [ + child.name + for child in remote.get_fs_children(uid) + if not child.folderish + ] + assert len(children) == count + assert set(children) == names + + """ + def test_local_move_folder_both_sides_while_stopped(self): + self._test_local_move_folder_both_sides(False) + """ + + """ + def test_local_move_folder_both_sides_while_unbinded(self): + self._test_local_move_folder_both_sides(True) + """ + + def _test_local_move_folder_both_sides(self, unbind): + """ + NXDRIVE-647: sync when a folder is renamed locally and remotely. + """ + + local = self.local_1 + remote = self.remote_document_client_1 + + # Create initial folder and file + folder = remote.make_folder("/", "Folder1") + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # First checks, everything should be online for every one + assert remote.exists("/Folder1") + assert local.exists("/Folder1") + folder_pair_state = self.engine_1.dao.get_state_from_local( + Path(self.workspace_title) / "Folder1" + ) + assert folder_pair_state is not None + folder_remote_ref = folder_pair_state.remote_ref + + # Unbind or stop engine + if unbind: + self.send_unbind_engine(1) + self.wait_unbind_engine(1) + else: + self.engine_1.stop() + + # Make changes + remote.update(folder, properties={"dc:title": "Folder1_ServerName"}) + local.rename("/Folder1", "Folder1_LocalRename") + + # Bind or start engine and wait for sync + if unbind: + self.send_bind_engine(1) + self.wait_bind_engine(1) + else: + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + # Check that nothing has changed + assert len(remote.get_children_info(self.workspace)) == 1 + assert remote.exists(folder) + assert remote.get_info(folder).name == "Folder1_ServerName" + assert len(local.get_children_info("/")) == 1 + assert local.exists("/Folder1_LocalRename") + + # Check folder status + folder_pair_state = self.engine_1.dao.get_normal_state_from_remote( + folder_remote_ref + ) + assert folder_pair_state.pair_state == "conflicted" + + def test_local_move_folder(self): + """ + A simple test to ensure we do not create useless URLs. + This is to handle cases when the user creates a new folder, + it has the default name set to the local system: + "New folder" + "Nouveau dossier (2)" + ... + The folder is created directly and it generates useless URLs. + So we move the document to get back good URLs. As the document has been + renamed above, the document's title is already the good one. + """ + local = self.local_1 + remote = self.remote_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + name_orig = "Nouveau dossier (42)" + name_new = "C'est le vrai nom pârdi !" + + local.make_folder("/", name_orig) + self.wait_sync() + + child = remote.get_children_info(self.workspace)[0] + assert child.name == name_orig + assert child.path.endswith(name_orig) + + # Rename to fix the meaningfulness URL + local.rename(f"/{name_orig}", name_new) + self.wait_sync() + + assert remote.exists(f"/{name_new}") + child = remote.get_children_info(self.workspace)[0] + assert child.name == name_new + assert child.path.endswith(name_new) + + """ + def test_local_move_root_folder_with_unicode(self): + local = self.local_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + assert local.exists("/") + + with ensure_no_exception(): + # Rename the root folder + root_path = local.base_folder.parent + local.unlock_ref(root_path, is_abs=True) + root_path.rename(root_path.with_name("root moved, 👆!")) + + self.wait_sync() + + assert not local.exists("/") + """ diff --git a/tests/functional/test_local_paste.py b/tests/functional/test_local_paste.py new file mode 100644 index 0000000000..afcbca4ac4 --- /dev/null +++ b/tests/functional/test_local_paste.py @@ -0,0 +1,138 @@ +import shutil +import tempfile +from pathlib import Path + +from nxdrive.utils import normalized_path + +from .conftest import FILE_CONTENT, OneUserTest + +TEST_TIMEOUT = 60 + + +class TestLocalPaste(OneUserTest): + NUMBER_OF_LOCAL_FILES = 25 + TEMP_FOLDER = "temp_folder" + FOLDER_A1 = Path("a1") + FOLDER_A2 = Path("a2") + FILENAME_PATTERN = "file%03d.txt" + + def setUp(self): + """ + 1. create folder 'temp/a1' with more than 20 files in it + 2. create folder 'temp/a2', empty + 3. copy 'a1' and 'a2', in this order to the test sync root + 4. repeat step 3, but copy 'a2' and 'a1', in this order + (to the test sync root) + 5. Verify that both folders and their content is sync to DM, + in both steps 3 and 4 + """ + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + local = self.local_1 + assert local.exists("/") + self.workspace_abspath = local.abspath("/") + + # Create folder a1 and a2 under a temp folder + self.local_temp = normalized_path(tempfile.mkdtemp(self.TEMP_FOLDER)) + self.folder1 = self.local_temp / self.FOLDER_A1 + self.folder1.mkdir(parents=True) + self.folder2 = self.local_temp / self.FOLDER_A2 + self.folder2.mkdir(parents=True) + + # Add files in folder 'temp/a1' + for file_num in range(1, self.NUMBER_OF_LOCAL_FILES + 1): + filename = self.FILENAME_PATTERN % file_num + (self.folder1 / filename).write_bytes(FILE_CONTENT) + + def tearDown(self): + shutil.rmtree(self.local_temp) + + """ + def test_copy_paste_empty_folder_first(self): + "" + copy 'a2' to 'Nuxeo Drive Test Workspace', + then 'a1' to 'Nuxeo Drive Test Workspace' + "" + # copy 'temp/a2' under 'Nuxeo Drive Test Workspace' + shutil.copytree(self.folder2, self.workspace_abspath / self.FOLDER_A2) + # copy 'temp/a1' under 'Nuxeo Drive Test Workspace' + shutil.copytree(self.folder1, self.workspace_abspath / self.FOLDER_A1) + self.wait_sync(timeout=TEST_TIMEOUT) + + self._check_integrity() + """ + + def test_copy_paste_empty_folder_last(self): + """ + copy 'a1' to 'Nuxeo Drive Test Workspace', + then 'a2' to 'Nuxeo Drive Test Workspace' + """ + # copy 'temp/a1' under 'Nuxeo Drive Test Workspace' + shutil.copytree(self.folder1, self.workspace_abspath / self.FOLDER_A1) + # copy 'temp/a2' under 'Nuxeo Drive Test Workspace' + shutil.copytree(self.folder2, self.workspace_abspath / self.FOLDER_A2) + self.wait_sync(timeout=TEST_TIMEOUT) + + self._check_integrity() + + def _check_integrity(self): + local = self.local_1 + remote = self.remote_1 + num = self.NUMBER_OF_LOCAL_FILES + # check that '/Nuxeo Drive Test Workspace/a1' does exist + assert local.exists(self.FOLDER_A1) + # check that '/Nuxeo Drive Test Workspace/a2' does exist + assert local.exists(self.FOLDER_A2) + # check that '/Nuxeo Drive Test Workspace/a1/ has all the files + children = list((self.workspace_abspath / self.FOLDER_A1).iterdir()) + assert len(children) == num + # check that remote (DM) 'Nuxeo Drive Test Workspace/a1' exists + remote_ref_1 = local.get_remote_id(self.FOLDER_A1) + assert remote.fs_exists(remote_ref_1) + # check that remote (DM) 'Nuxeo Drive Test Workspace/a2' exists + remote_ref_2 = local.get_remote_id(self.FOLDER_A2) + assert remote.fs_exists(remote_ref_2) + # check that remote (DM) 'Nuxeo Drive Test Workspace/a1' + # has all the files + children = [ + remote_info.name for remote_info in remote.get_fs_children(remote_ref_1) + ] + assert len(children) == num + + def test_copy_paste_same_file(self): + local = self.local_1 + remote = self.remote_1 + name = self.FILENAME_PATTERN % 1 + workspace_abspath = local.abspath("/") + path = self.FOLDER_A1 / name + copypath = self.FOLDER_A1 / f"{name}copy" + # copy 'temp/a1' under 'Nuxeo Drive Test Workspace' + (workspace_abspath / self.FOLDER_A1).mkdir() + shutil.copy2(self.folder1 / name, workspace_abspath / path) + + self.wait_sync(timeout=TEST_TIMEOUT) + + # check that '/Nuxeo Drive Test Workspace/a1' does exist + assert local.exists(self.FOLDER_A1) + # check that '/Nuxeo Drive Test Workspace/a1/ has all the files + children = list((self.workspace_abspath / self.FOLDER_A1).iterdir()) + assert len(children) == 1 + # check that remote (DM) 'Nuxeo Drive Test Workspace/a1' exists + remote_ref = local.get_remote_id(self.FOLDER_A1) + assert remote.fs_exists(remote_ref) + remote_children = [ + remote_info.name for remote_info in remote.get_fs_children(remote_ref) + ] + assert len(remote_children) == 1 + remote_id = local.get_remote_id(path) + + shutil.copy2(local.abspath(path), local.abspath(copypath)) + local.set_remote_id(copypath, remote_id) + self.wait_sync(timeout=TEST_TIMEOUT) + remote_children = [ + remote_info.name for remote_info in remote.get_fs_children(remote_ref) + ] + assert len(remote_children) == 2 + children = list((self.workspace_abspath / self.FOLDER_A1).iterdir()) + assert len(children) == 2 diff --git a/tests/functional/test_local_share_move_folders.py b/tests/functional/test_local_share_move_folders.py new file mode 100644 index 0000000000..9fb4ebf624 --- /dev/null +++ b/tests/functional/test_local_share_move_folders.py @@ -0,0 +1,121 @@ +""" +import shutil +from unittest.mock import patch + +from nxdrive.engine.watcher.constants import SECURITY_UPDATED_EVENT +from nxdrive.engine.watcher.remote_watcher import RemoteWatcher + +from ..utils import random_png +from .conftest import TwoUsersTest + + +class TestLocalShareMoveFolders(TwoUsersTest): + NUMBER_OF_LOCAL_IMAGE_FILES = 10 + + def setUp(self): + "" + 1. Create folder a1 in Nuxeo Drive Test Workspace sync root + 2. Create folder a2 in Nuxeo Drive Test Workspace sync root + 3. Add 10 image files in a1 + "" + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + self.engine_1.stop() + + local = self.local_1 + # Create a1 and a2 + self.folder_path_1 = local.make_folder("/", "a1") + self.folder_path_2 = local.make_folder("/", "a2") + + num = self.NUMBER_OF_LOCAL_IMAGE_FILES + self.names = {"file%03d.png" % file_num for file_num in range(1, num + 1)} + + # Add image files to a1 + abs_folder_path_1 = local.abspath(self.folder_path_1) + for file_num in range(1, num + 1): + file_name = "file%03d.png" % file_num + file_path = abs_folder_path_1 / file_name + random_png(file_path) + + self.engine_1.start() + self.wait_sync(timeout=60, wait_win=True) + + # Check local files in a1 + self._check_local("/a1") + + # Check remote files in a1 + self._check_remote("/a1") + + def _check_local(self, folder): + local = self.local_1 + assert local.exists(folder) + + children = [child.name for child in local.get_children_info(folder)] + assert len(children) == self.NUMBER_OF_LOCAL_IMAGE_FILES + assert set(children) == self.names + + def _check_remote(self, folder): + local = self.local_1 + remote = self.remote_1 + + uid = local.get_remote_id(folder) + assert uid + assert remote.fs_exists(uid) + + children = [child.name for child in remote.get_fs_children(uid)] + assert len(children) == self.NUMBER_OF_LOCAL_IMAGE_FILES + assert set(children) == self.names + + def test_local_share_move_folder_with_files(self): + remote = self.root_remote + local = self.local_1 + + src = local.abspath(self.folder_path_1) + dst = local.abspath(self.folder_path_2) + + input_obj = local.get_remote_id("/a1").split("#")[-1] + remote.execute( + command="Document.AddPermission", + input_obj=input_obj, + username=self.user_2, + permission="Everything", + ) + + original_get_changes = RemoteWatcher._get_changes + + def get_changes(self): + summary = original_get_changes(self) + for event in summary["fileSystemChanges"]: + if event["eventId"] == SECURITY_UPDATED_EVENT: + nonlocal src + nonlocal dst + shutil.move(src, dst) + return summary + + with patch.object(RemoteWatcher, "_get_changes", new=get_changes): + self.wait_sync() + + # Sync after move operation + self.wait_sync() + # Check that a1 doesn't exist anymore locally + assert not local.exists("/a1") + + # Check local files in a2/a1 + self._check_local("/a2/a1") + + # Check that a1 doesn't exist anymore remotely + assert len(remote.get_children_info(self.workspace)) == 1 + + # Check remote files in a2/a1 + self._check_remote("/a2/a1") + + # As Admin create a folder inside a1 + uid = local.get_remote_id("/a2/a1") + remote.make_folder(uid.split("#")[-1], "inside_a1") + + self.wait_sync() + + # Check that a1 doesn't exist anymore locally + assert local.exists("/a2/a1/inside_a1") +""" diff --git a/tests/functional/test_local_storage_issue.py b/tests/functional/test_local_storage_issue.py new file mode 100644 index 0000000000..2687eeb1a6 --- /dev/null +++ b/tests/functional/test_local_storage_issue.py @@ -0,0 +1,111 @@ +import os + +from .conftest import OneUserTest + + +class TestLocalStorageIssue(OneUserTest): + def test_local_invalid_timestamp(self): + # Synchronize root workspace + self.engine_1.start() + self.wait_sync(wait_for_async=True) + assert self.local_1.exists("/") + self.engine_1.stop() + self.local_1.make_file("/", "Test.txt", content=b"plop") + os.utime(self.local_1.abspath("/Test.txt"), (0, 999_999_999_999_999)) + self.engine_1.start() + self.wait_sync() + children = self.remote_document_client_1.get_children_info(self.workspace) + assert len(children) == 1 + assert children[0].name == "Test.txt" + + """ + def test_synchronize_no_space_left_on_device(self): + local = self.local_1 + remote = self.remote_document_client_1 + + # Synchronize root workspace + self.engine_1.start() + self.wait_sync(wait_for_async=True) + assert local.exists("/") + self.engine_1.stop() + + # Create a file in the remote root workspace + uid = remote.make_file("/", "test_NG.odt", content=b"Some large content.") + + # We pick a random error because there is no facility + # to parametrize a method from a class derived from + # something other than object. + errno = random.choice(list(NO_SPACE_ERRORS)) + error = OSError(errno, f"(Mock) {os.strerror(errno)}") + + # Synchronize simulating a disk space related error + bad_remote = self.get_bad_remote() + bad_remote.make_download_raise(error) + + with patch.object(self.engine_1, "remote", new=bad_remote): + self.engine_1.start() + + # By default engine will not consider being syncCompleted + # because of the temporary ignored files + self.wait_sync( + wait_for_async=True, fail_if_timeout=False, enforce_errors=False + ) + + # - temporary download file should be created locally but not moved + # - synchronization should not fail: doc pair should be temporary ignored + # - and there should be 1 error + assert (self.engine_1.download_dir / uid).is_dir() + assert not local.exists("/test_NG.odt") + errors = self.engine_1.dao.get_errors(limit=0) + assert len(errors) == 1 + assert errors[0].remote_name == "test_NG.odt" + + assert self.engine_1.is_paused() + + # Create another file in the remote root workspace + remote.make_file("/", "test_OK.odt", content=b"Some small content.") + + # No more errors starting here + self.engine_1.resume() + self.wait_sync(wait_for_async=True, fail_if_timeout=False, enforce_errors=False) + + # Remote file should be created locally + assert local.exists("/test_OK.odt") + + # Temporary ignored file should still be ignored as delay (60 seconds by default) + # is not expired and there should still be 1 error + assert not local.exists("/test_NG.odt") + errors = self.engine_1.dao.get_errors(limit=0) + assert len(errors) == 1 + assert errors[0].remote_name == "test_NG.odt" + + # Retry to synchronize the temporary ignored file, but still simulating + # the same disk space related error + with patch.object(self.engine_1, "remote", new=bad_remote): + # Re-queue pairs in error + self.queue_manager_1.requeue_errors() + self.wait_sync(fail_if_timeout=False, enforce_errors=False) + + # - temporary download file should be created locally but not moved + # - doc pair should be temporary ignored again + # - and there should still be 1 error + assert (self.engine_1.download_dir / uid).is_dir() + assert not local.exists("/test_NG.odt") + errors = self.engine_1.dao.get_errors(limit=0) + assert len(errors) == 1 + assert errors[0].remote_name == "test_NG.odt" + + # Synchronize without simulating any error, as if space had been made + # available on device + self.engine_1.resume() + + # Re-queue pairs in error + self.queue_manager_1.requeue_errors() + self.wait_sync(enforce_errors=False) + + # Previously temporary ignored file should be created locally + # and there should be no more errors left + assert not (self.engine_1.download_dir / uid).is_dir() + assert local.exists("/test_NG.odt") + assert not self.engine_1.dao.get_errors(limit=0) + """ diff --git a/tests/functional/test_long_path.py b/tests/functional/test_long_path.py new file mode 100644 index 0000000000..1c227d9317 --- /dev/null +++ b/tests/functional/test_long_path.py @@ -0,0 +1,101 @@ +import os +from unittest.mock import patch + +from nxdrive.constants import WINDOWS + +from .conftest import OneUserTest + +# Number of chars in path "C:\...\Nuxeo..." is approx 96 chars +FOLDER_A = "A" * 90 +FOLDER_B = "B" * 90 +FOLDER_C = "C" * 90 +FOLDER_D = "D" * 50 +FILE = "F" * 255 + ".txt" + + +class TestLongPath(OneUserTest): + def setUp(self): + self.remote_1 = self.remote_document_client_1 + self.folder_a = self.remote_1.make_folder("/", FOLDER_A) + self.folder_b = self.remote_1.make_folder(self.folder_a, FOLDER_B) + self.folder_c = self.remote_1.make_folder(self.folder_b, FOLDER_C) + self.remote_1.make_file(self.folder_c, "File1.txt", content=b"Sample Content") + + def tearDown(self): + self.remote_1.delete(self.folder_a, use_trash=False) + + def test_long_path(self): + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + parent_path = ( + self.local_1.abspath("/") / FOLDER_A / FOLDER_B / FOLDER_C / FOLDER_D + ) + if WINDOWS: + parent_path = f"\\\\?\\{parent_path}" + os.makedirs(parent_path, exist_ok=True) + + new_file = os.path.join(parent_path, "File2.txt") + with open(new_file, "wb") as f: + f.write(b"Hello world") + + self.wait_sync(wait_for_async=True, fail_if_timeout=False) + remote_children_of_c = self.remote_1.get_children_info(self.folder_c) + assert len(remote_children_of_c) == 2 + folder = [item for item in remote_children_of_c if item.name == FOLDER_D][0] + assert folder.name == FOLDER_D + + remote_children_of_d = self.remote_1.get_children_info(folder.uid) + assert len(remote_children_of_d) == 1 + assert remote_children_of_d[0].name == "File2.txt" + + def test_setup_on_long_path(self): + """NXDRIVE-689: Fix error when adding a new account when installation + path is greater than 245 characters. + """ + + self.engine_1.stop() + self.engine_1.reinit() + + # On Mac, avoid permission denied error + self.engine_1.local.clean_xattr_root() + + test_folder_len = 245 - len(str(self.local_nxdrive_folder_1)) + self.local_nxdrive_folder_1 = self.local_nxdrive_folder_1 / ( + "A" * test_folder_len + ) + assert len(str(self.local_nxdrive_folder_1)) > 245 + + self.manager_1.unbind_all() + self.engine_1 = self.manager_1.bind_server( + self.local_nxdrive_folder_1, + self.nuxeo_url, + self.user_1, + password=self.password_1, + start_engine=False, + ) + + self.engine_1.start() + self.engine_1.stop() + + +class TestLongFileName(OneUserTest): + def test_long_file_name(self): + def error(*_): + nonlocal received + received = True + + received = False + remote = self.remote_document_client_1 + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + + with patch.object( + self.manager_1.notification_service, "_longPathError", new_callable=error + ): + remote.make_file(self.workspace, FILE, content=b"Sample Content") + self.wait_sync(wait_for_async=True, timeout=5, fail_if_timeout=False) + + assert received + assert not self.local_1.exists(f"/{FILE}") diff --git a/tests/functional/test_mac_local_client.py b/tests/functional/test_mac_local_client.py new file mode 100644 index 0000000000..6ed13f0d8a --- /dev/null +++ b/tests/functional/test_mac_local_client.py @@ -0,0 +1,38 @@ +from ..markers import mac_only +from .conftest import OneUserTest + +try: + import xattr +except ImportError: + pass + + +@mac_only +class TestMacSpecific(OneUserTest): + def test_finder_in_use(self): + """Test that if Finder is using the file we postpone the sync.""" + + self.engine_1.start() + self.wait_sync(wait_for_async=True) + self.local_1.make_file("/", "File.txt", content=b"Some Content 1") + + # Emulate the Finder in use flag + key = [0] * 32 # OSX_FINDER_INFO_ENTRY_SIZE + key[:8] = 0x62, 0x72, 0x6F, 0x6B, 0x4D, 0x41, 0x43, 0x53 + + xattr.setxattr( + str(self.local_1.abspath("/File.txt")), + xattr.XATTR_FINDERINFO_NAME, + bytes(bytearray(key)), + ) + + # The file should not be synced and there have no remote id + self.wait_sync(wait_for_async=True, fail_if_timeout=False) + assert not self.local_1.get_remote_id("/File.txt") + + # Remove the Finder flag + self.local_1.remove_remote_id("/File.txt", name=xattr.XATTR_FINDERINFO_NAME) + + # The sync process should now handle the file and sync it + self.wait_sync(wait_for_async=True, fail_if_timeout=False) + assert self.local_1.get_remote_id("/File.txt") diff --git a/tests/functional/test_multiple_files.py b/tests/functional/test_multiple_files.py new file mode 100644 index 0000000000..d2db89c5f1 --- /dev/null +++ b/tests/functional/test_multiple_files.py @@ -0,0 +1,135 @@ +""" +import shutil +from pathlib import Path + +import pytest + +from nxdrive.constants import LINUX, MAC + +from ..markers import not_linux +from .conftest import OneUserTest + + +class TestMultipleFiles(OneUserTest): + NUMBER_OF_LOCAL_FILES = 10 + SYNC_TIMEOUT = 10 # in seconds + + def setUp(self): + "" + 1. create folder 'Nuxeo Drive Test Workspace/a1' with 100 files in it + 2. create folder 'Nuxeo Drive Test Workspace/a2' + 2. create folder 'Nuxeo Drive Test Workspace/a3' + "" + + self.engine_1.start() + self.wait_sync() + local = self.local_1 + + # Create folder a1 + self.folder_path_1 = local.make_folder("/", "a1") + + # Add 100 files in folder 'Nuxeo Drive Test Workspace/a1' + for file_num in range(1, self.NUMBER_OF_LOCAL_FILES + 1): + local.make_file( + self.folder_path_1, "local%04d.txt" % file_num, content=b"content" + ) + + # Create folder a2 + self.folder_path_2 = local.make_folder("/", "a2") + self.folder_path_3 = Path("a3") + self.wait_sync(wait_for_async=True, timeout=self.SYNC_TIMEOUT) + + def test_move_and_copy_paste_folder_original_location_from_child_stopped(self): + self._move_and_copy_paste_folder_original_location_from_child() + + def test_move_and_copy_paste_folder_original_location_from_child(self): + self._move_and_copy_paste_folder_original_location_from_child(False) + + def _move_and_copy_paste_folder_original_location_from_child(self, stopped=True): + local = self.local_1 + src = local.abspath(self.folder_path_1) + dst = local.abspath(self.folder_path_2) + shutil.move(src, dst) + self.wait_sync(timeout=self.SYNC_TIMEOUT) + self._move_and_copy_paste_folder( + Path("a2/a1"), Path(""), Path("a2"), stopped=stopped + ) + + def _move_and_copy_paste_folder( + self, folder_1: Path, folder_2: Path, target_folder: Path, stopped=True + ): + "" + /folder_1 + /folder_2 + /target_folder + Will + move /folder1 inside /folder2/ as /folder2/folder1 + copy /folder2/folder1 into /target_folder/ + "" + if stopped: + self.engine_1.stop() + remote = self.remote_1 + local = self.local_1 + src = local.abspath(folder_1) + dst = local.abspath(folder_2) + new_path = folder_2 / folder_1.name + copy_path = target_folder / folder_1.name + shutil.move(src, dst) + # check that 'Nuxeo Drive Test Workspace/a1' does not exist anymore + assert not local.exists(folder_1) + # check that 'Nuxeo Drive Test Workspace/a2/a1' now exists + assert local.exists(new_path) + # copy the 'Nuxeo Drive Test Workspace/a2/a1' tree + # back under 'Nuxeo Drive Test Workspace' + shutil.copytree(local.abspath(new_path), local.abspath(copy_path)) + if stopped: + self.engine_1.start() + self.wait_sync(timeout=self.SYNC_TIMEOUT) + + # asserts + # expect '/a2/a1' to contain the files + # expect 'Nuxeo Drive Test Workspace/a1' to also contain the files + num = self.NUMBER_OF_LOCAL_FILES + names = {"local%04d.txt" % n for n in range(1, num + 1)} + + for path in (new_path, copy_path): + # Local + assert local.abspath(path).exists() + children = [f.name for f in local.abspath(path).iterdir()] + + assert len(children) == num + assert set(children) == names + + # Remote + uid = local.get_remote_id(path) + assert uid + + children = remote.get_fs_children(uid) + assert len(children) == num + children_names = {child.name for child in children} + assert children_names == names + + @pytest.mark.randombug("NXDRIVE-720", condition=LINUX) + @pytest.mark.randombug("NXDRIVE-813", condition=MAC) + def test_move_and_copy_paste_folder_original_location(self): + self._move_and_copy_paste_folder( + self.folder_path_1, + self.folder_path_2, + self.folder_path_1.parent, + stopped=False, + ) + + @not_linux( + reason="NXDRIVE-471: Not handled under GNU/Linux as " + "creation time is not stored" + ) + def test_move_and_copy_paste_folder_original_location_stopped(self): + self._move_and_copy_paste_folder( + self.folder_path_1, self.folder_path_2, self.folder_path_1.parent + ) + + def test_move_and_copy_paste_folder_new_location(self): + self._move_and_copy_paste_folder( + self.folder_path_1, self.folder_path_2, self.folder_path_3 + ) +""" From 8869373dfe9fbbd12fb5533a41df55509103f8b5 Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Fri, 12 Jan 2024 11:00:07 +0530 Subject: [PATCH 24/36] NXDRIVE-2860: Code Coverage - removed old test cases - 11/01 --1 --- .../test_concurrent_synchronization.py | 8 ++++--- tests/functional/test_conflicts.py | 2 ++ tests/functional/test_direct_transfer.py | 20 ++++++++++------ tests/functional/test_encoding.py | 2 ++ tests/functional/test_group_changes.py | 24 ++++++++++++------- .../functional/test_local_move_and_rename.py | 9 +++---- .../functional/test_remote_move_and_rename.py | 5 +++- tests/functional/test_shared_folders.py | 2 ++ tests/functional/test_synchronization.py | 2 ++ tests/functional/test_transfer.py | 10 ++++---- tests/functional/test_versioning.py | 6 ++--- 11 files changed, 60 insertions(+), 30 deletions(-) diff --git a/tests/functional/test_concurrent_synchronization.py b/tests/functional/test_concurrent_synchronization.py index dfab260e4c..1bd60c0b8e 100644 --- a/tests/functional/test_concurrent_synchronization.py +++ b/tests/functional/test_concurrent_synchronization.py @@ -2,7 +2,7 @@ from nxdrive.constants import WINDOWS -from .conftest import REMOTE_MODIFICATION_TIME_RESOLUTION, TwoUsersTest +from .conftest import TwoUsersTest class TestConcurrentSynchronization(TwoUsersTest): @@ -15,8 +15,9 @@ def create_docs(self, parent, number, name_pattern=None, delay=1.0): delay=int(delay * 1000), ) + """ def test_concurrent_file_access(self): - """Test update/deletion of a locally locked file. + ""Test update/deletion of a locally locked file. This is to simulate downstream synchronization of a file opened (thus locked) by any program under Windows, typically MS Word. @@ -24,7 +25,7 @@ def test_concurrent_file_access(self): pending items. Once the file is unlocked and the cooldown period is over it should be synchronized. - """ + "" # Bind the server and root workspace self.engine_1.start() self.wait_sync(wait_for_async=True) @@ -107,6 +108,7 @@ def test_concurrent_file_access(self): assert local.exists("/test_update.docx") assert local.get_content("/test_update.docx") == b"Updated content." assert not local.exists("/test_delete.docx") + """ """ def test_find_changes_with_many_doc_creations(self): diff --git a/tests/functional/test_conflicts.py b/tests/functional/test_conflicts.py index e7ef4e5b8b..ed2e8a6c0d 100644 --- a/tests/functional/test_conflicts.py +++ b/tests/functional/test_conflicts.py @@ -112,6 +112,7 @@ def test_real_conflict(self): assert local.get_content("/test.txt") == b"Local update 2" assert self.get_remote_state(self.file_id).pair_state == "conflicted" + """ def test_resolve_local(self): self.test_real_conflict() # Resolve to local file @@ -120,6 +121,7 @@ def test_resolve_local(self): self.engine_1.resolve_with_local(pair.id) self.wait_sync(wait_for_async=True) assert self.remote_2.get_content(self.file_id) == b"Local update 2" + """ def test_resolve_local_folder(self): local = self.local_1 diff --git a/tests/functional/test_direct_transfer.py b/tests/functional/test_direct_transfer.py index d81a0c9ec3..6bd48caa9c 100644 --- a/tests/functional/test_direct_transfer.py +++ b/tests/functional/test_direct_transfer.py @@ -297,12 +297,13 @@ def upload(*args, **kwargs): # Ensure there is only 1 document on the server self.sync_and_check() + """ @Options.mock() def test_duplicate_file_override(self): - """ + "" The file already exists on the server. The user wants to continue the transfer and replace the document. - """ + "" with ensure_no_exception(): # 1st upload: OK @@ -326,6 +327,7 @@ def test_duplicate_file_override(self): self.remote_1.get_blob(children[0].uid, xpath="file:content") == b"blob changed!" ) + """ def test_pause_upload_manually(self): """ @@ -400,11 +402,12 @@ def callback(*_): self.manager_1.resume() self.sync_and_check() + """ def test_modifying_paused_upload(self): - """Modifying a paused upload should discard the current upload.""" + ""Modifying a paused upload should discard the current upload."" def callback(*_): - """Pause the upload and apply changes to the document.""" + ""Pause the upload and apply changes to the document."" # Ensure we have 1 ongoing upload uploads = list(dao.get_dt_uploads()) assert uploads @@ -435,6 +438,7 @@ def callback(*_): self.sync_and_check() # Check the local content is correct assert self.file.read_bytes() == b"locally changed" + """ """ @not_windows( @@ -1128,19 +1132,20 @@ def test_sub_files(self): self.checks(created) + """ def test_identical_sessions(self): - """ + "" Create two sessions with the same file then pause them. Ensure that two uploads are created. The two sessions final status should be COMPLETED. - """ + "" engine = self.engine_1 # There is no upload, right now assert not list(engine.dao.get_dt_uploads()) def callback(*_): - """This will mimic what is done in SessionItem.qml.""" + ""This will mimic what is done in SessionItem.qml."" dao = engine.dao sessions = dao.get_active_sessions_raw() @@ -1187,6 +1192,7 @@ def callback(*_): for session in sessions: assert session["status"] is TransferStatus.DONE assert not list(engine.dao.get_dt_uploads()) + """ class TestDirectTransferFolder(OneUserTest, DirectTransferFolder): diff --git a/tests/functional/test_encoding.py b/tests/functional/test_encoding.py index f253650ea7..0a319cc3c0 100644 --- a/tests/functional/test_encoding.py +++ b/tests/functional/test_encoding.py @@ -25,6 +25,7 @@ def test_filename_with_accents_from_server(self): assert local.get_content("/Nom avec accents \xe9 \xe8.doc") == data """ + """ def test_filename_with_katakana(self): local = self.local_1 remote = self.remote_document_client_1 @@ -39,6 +40,7 @@ def test_filename_with_katakana(self): assert remote.get_content("/Local \u30d7 \u793e.doc") == data assert local.get_content("/Remote \u30bc\u30ec.doc") == data + """ """ def test_content_with_accents_from_server(self): diff --git a/tests/functional/test_group_changes.py b/tests/functional/test_group_changes.py index b2c9b87566..a7da0c7ea2 100644 --- a/tests/functional/test_group_changes.py +++ b/tests/functional/test_group_changes.py @@ -65,10 +65,11 @@ def set_ace(self, user, doc): permission="ReadWrite", ) + """ def test_group_changes_on_sync_root(self): - """ + "" Test changes on a group that has access to a synchronization root. - """ + "" log.info("Create syncRoot folder") sync_root_id = self.admin_remote.make_folder("/", "syncRoot") @@ -82,12 +83,14 @@ def test_group_changes_on_sync_root(self): assert self.local_root_client_1.exists("/syncRoot") self._test_group_changes("/syncRoot", self.group1) + """ + """ def test_group_changes_on_sync_root_child(self): - """ + "" Test changes on a group that has access to a child of a synchronization root. - """ + "" log.info("Create syncRoot folder") sync_root_id = self.admin_remote.make_folder("/", "syncRoot") @@ -109,6 +112,7 @@ def test_group_changes_on_sync_root_child(self): assert self.local_root_client_1.exists("/syncRoot/child") self._test_group_changes("/syncRoot/child", self.group2) + """ """ def test_group_changes_on_sync_root_parent(self): @@ -134,19 +138,23 @@ def test_group_changes_on_sync_root_parent(self): self._test_group_changes("/syncRoot", self.group1) """ + """ def test_changes_with_parent_group(self): - """ + "" Test changes on the parent group of a group that has access to a synchronization root. - """ + "" self._test_group_changes_with_ancestor_groups(self.parent_group) + """ + """ def test_changes_with_grand_parent_group(self): - """ + "" Test changes on the grandparent group of a group that has access to a synchronization root. - """ + "" self._test_group_changes_with_ancestor_groups(self.grand_parent_group) + """ def _test_group_changes(self, folder_path, group_name, need_parent=False): """ diff --git a/tests/functional/test_local_move_and_rename.py b/tests/functional/test_local_move_and_rename.py index 0781eaf5a3..c269616426 100644 --- a/tests/functional/test_local_move_and_rename.py +++ b/tests/functional/test_local_move_and_rename.py @@ -1,11 +1,8 @@ -import shutil -import time from unittest.mock import patch from nuxeo.exceptions import HTTPError -from . import LocalTest -from .conftest import OS_STAT_MTIME_RESOLUTION, OneUserTest +from .conftest import OneUserTest # TODO NXDRIVE-170: refactor @@ -253,6 +250,7 @@ def test_local_rename_file_uppercase_stopped(self): assert len(remote.get_children_info(info.parent_uid)) == 3 """ + """ def test_local_rename_file_uppercase(self): local = self.local_1 remote = self.remote_document_client_1 @@ -277,6 +275,7 @@ def test_local_rename_file_uppercase(self): assert parent_info.name == "Original Folder 1" assert len(local.get_children_info("/Original Folder 1")) == 3 assert len(remote.get_children_info(info.parent_uid)) == 3 + """ def test_local_move_file(self): local = self.local_1 @@ -603,6 +602,7 @@ def test_concurrent_local_rename_folder(self): assert len(remote.get_children_info(self.workspace)) == 4 """ + """ def test_local_replace(self): local = LocalTest(self.local_test_folder_1) remote = self.remote_document_client_1 @@ -634,6 +634,7 @@ def test_local_replace(self): self.wait_sync() assert remote.exists("/test.odt") assert remote.get_content("/test.odt") == b"Some content." + """ """ def test_local_rename_sync_root_folder(self): diff --git a/tests/functional/test_remote_move_and_rename.py b/tests/functional/test_remote_move_and_rename.py index f0d8a93789..bb0313faef 100644 --- a/tests/functional/test_remote_move_and_rename.py +++ b/tests/functional/test_remote_move_and_rename.py @@ -9,7 +9,6 @@ from nxdrive.engine.engine import Engine from .. import env -from ..markers import windows_only from . import DocRemote, LocalTest from .conftest import REMOTE_MODIFICATION_TIME_RESOLUTION, SYNC_ROOT_FAC_ID, OneUserTest @@ -587,6 +586,7 @@ def setUp(self): self.engine_1.start() self.wait_sync(wait_for_async=True) + """ @windows_only def test_synchronize_remote_move_file_while_accessing(self): local = self.local_1 @@ -612,6 +612,7 @@ def test_synchronize_remote_move_file_while_accessing(self): self.wait_sync(wait_for_async=True) assert local.exists("/testFile.pdf") assert not local.exists("/Test folder/testFile.pdf") + """ """ @Options.mock() @@ -649,6 +650,7 @@ def callback(uploader): assert local.exists("/Test folder/New folder/testFile.pdf") """ + """ @windows_only def test_synchronize_remote_rename_file_while_accessing(self): local = self.local_1 @@ -674,6 +676,7 @@ def test_synchronize_remote_rename_file_while_accessing(self): self.wait_sync(wait_for_async=True) assert local.exists("/Test folder/testFile2.pdf") assert not local.exists("/Test folder/testFile.pdf") + """ @pytest.mark.xfail(reason="NXDRIVE-2494") def test_synchronize_remote_rename_while_download_file(self): diff --git a/tests/functional/test_shared_folders.py b/tests/functional/test_shared_folders.py index 26c5555316..75ea951913 100644 --- a/tests/functional/test_shared_folders.py +++ b/tests/functional/test_shared_folders.py @@ -76,8 +76,10 @@ def test_move_sync_root_child_to_user_workspace(self): def test_local_changes_while_stopped(self): self._test_local_changes_while_not_running(False) + """ def test_local_changes_while_unbinded(self): self._test_local_changes_while_not_running(True) + """ def _test_local_changes_while_not_running(self, unbind): """NXDRIVE-646: not uploading renamed file from shared folder.""" diff --git a/tests/functional/test_synchronization.py b/tests/functional/test_synchronization.py index 5ae5af67f5..2941559d62 100644 --- a/tests/functional/test_synchronization.py +++ b/tests/functional/test_synchronization.py @@ -950,6 +950,7 @@ def stream_content(*args, **kwargs): class TestSynchronization2(TwoUsersTest): + """ def test_conflict_detection(self): # Fetch the workspace sync root local = self.local_1 @@ -1014,6 +1015,7 @@ def test_conflict_detection(self): assert len(remote_children) == 1 assert remote_children[0].get_blob("file:content").name == "Some File.doc" assert remote_1.get_content("/Some File.doc") == b"Remote new content." + """ """ def test_rename_and_create_same_folder_not_running(self): diff --git a/tests/functional/test_transfer.py b/tests/functional/test_transfer.py index 9ec82a9c26..61461b1453 100644 --- a/tests/functional/test_transfer.py +++ b/tests/functional/test_transfer.py @@ -401,18 +401,19 @@ def callback(uploader): assert not list(dao.get_uploads()) assert not self.remote_1.exists("/test.bin") + """ def test_not_server_error_upload(self): - """Test an error happening after chunks were uploaded, at the NuxeoDrive.CreateFile operation call.""" + ""Test an error happening after chunks were uploaded, at the NuxeoDrive.CreateFile operation call."" class BadUploader(SyncUploader): - """Used to simulate bad server responses.""" + ""Used to simulate bad server responses."" def link_blob_to_doc(self, *args, **kwargs): - """Simulate a server error.""" + ""Simulate a server error."" raise ValueError("Mocked exception") def upload(*args, **kwargs): - """Set our specific uploader to simulate server error.""" + ""Set our specific uploader to simulate server error."" kwargs.pop("uploader", None) return upload_orig(*args, uploader=BadUploader, **kwargs) @@ -450,6 +451,7 @@ def upload(*args, **kwargs): self.wait_sync() assert not list(dao.get_uploads()) assert self.remote_1.exists("/test.bin") + """ @pytest.mark.randombug("Randomly fail when run in parallel") @Options.mock() diff --git a/tests/functional/test_versioning.py b/tests/functional/test_versioning.py index bbe5b60284..8700f4288d 100644 --- a/tests/functional/test_versioning.py +++ b/tests/functional/test_versioning.py @@ -1,6 +1,4 @@ -import time - -from .conftest import OS_STAT_MTIME_RESOLUTION, OneUserTest, TwoUsersTest +from .conftest import OneUserTest, TwoUsersTest class TestVersioning(OneUserTest): @@ -30,6 +28,7 @@ def test_version_restore(self): class TestVersioning2(TwoUsersTest): + """ def test_versioning(self): local = self.local_1 self.engine_1.start() @@ -60,6 +59,7 @@ def test_versioning(self): self.wait_sync() doc = self.root_remote.fetch(f"{self.ws.path}/Test versioning.txt") self._assert_version(doc, 0, 1) + """ def _assert_version(self, doc, major, minor): assert doc["properties"]["uid:major_version"] == major From f98bca1de11900d2c7fd03a66c0d03894f17792c Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Mon, 15 Jan 2024 01:49:18 +0530 Subject: [PATCH 25/36] NXDRIVE-2860: Code Coverage - removed not working old test cases - 15/01 --1 --- nxdrive/dao/utils.py | 8 +- tests/functional/test_bulk_remote_changes.py | 161 ------------------ tests/functional/test_conflicts.py | 6 + tests/functional/test_direct_transfer.py | 19 ++- tests/functional/test_ignored.py | 46 ----- .../test_local_changes_when_offline.py | 10 +- tests/functional/test_local_copy_paste.py | 2 + tests/functional/test_readonly.py | 10 +- tests/functional/test_remote_deletion.py | 7 +- tests/functional/test_synchronization.py | 3 +- 10 files changed, 39 insertions(+), 233 deletions(-) delete mode 100644 tests/functional/test_bulk_remote_changes.py delete mode 100644 tests/functional/test_ignored.py diff --git a/nxdrive/dao/utils.py b/nxdrive/dao/utils.py index af58d295b2..e8274b2e34 100644 --- a/nxdrive/dao/utils.py +++ b/nxdrive/dao/utils.py @@ -62,20 +62,20 @@ def read(dump_file: Path, database: Path, /) -> None: log.info("Restoration done with success.") -def fix_db(database: Path, /, *, dump_file: Path = Path("dump.sql")) -> None: +def fix_db(database: Path, /, *, dump_file: Path = None) -> None: """ Re-generate the whole database content to fix eventual FS corruptions. This will prevent `sqlite3.DatabaseError: database disk image is malformed` issues. The whole operation is quick and help saving disk space. - >>> fix_db('ndrive_6bba111e18ba11e89cfd180373b6442e.db') - Will raise sqlite3.DatabaseError in case of unrecoverable file. """ - if is_healthy(database): return + if not dump_file: + parent_path = database.parents[0] + dump_file = parent_path.joinpath(Path("dump.sql")) log.info(f"Re-generating the whole database content of {database!r}...") # Dump diff --git a/tests/functional/test_bulk_remote_changes.py b/tests/functional/test_bulk_remote_changes.py deleted file mode 100644 index cbd6e8df02..0000000000 --- a/tests/functional/test_bulk_remote_changes.py +++ /dev/null @@ -1,161 +0,0 @@ -""" -Technical Background: GetChildren API can throw error - due to network issues or server load. - GetChildren API is also called when processing remote events. - -Issue: When processing remote event, a error in GetChildren API - (for a folder) call results in drive failing to process the - remaining remote events in the queue. - -Fix: Handle the error in GetChildren API gracefully and re-queue - same folder again for another remote scan - -Testing: This issue can be testing by simulating network of the API - using a mock framework: - 1. Emulate the GetChildren API error by mocking the - Remote.get_fs_children method - 2. The mocked method will raise an exception on demand - to simulate the server side / network errors - -Note: searching for the following regular expression in log file - will filter the manual test case: - STEP:|VERIFY:|Error: -""" - -from logging import getLogger -from time import sleep -from unittest.mock import patch - -from nuxeo.utils import version_lt -from requests import ConnectionError - -from nxdrive.client.remote_client import Remote -from nxdrive.objects import RemoteFileInfo - -from .conftest import TEST_DEFAULT_DELAY, TwoUsersTest - -log = getLogger(__name__) - - -class TestBulkRemoteChanges(TwoUsersTest): - """ - Test Bulk Remote Changes when network error happen in get_children_info() - will simulate network error when required. test_many_changes method will - make server side changes, simulate error for GetChildren API and still - verify if all remote changes are successfully synced. - """ - - def test_many_changes(self): - """ - Objective: The objective is to make a lot of remote changes (including a folder - modified) and wait for nuxeo-drive to successfully sync even if network error - happens. - - 1. Configure drive and wait for sync - 2. Create 3 folders folder1, folder2 and shared - 3. Create files inside the 3 folders: folder1/file1.txt, folder2/file2.txt, - shared/readme1.txt, shared/readme2.txt - 4. Wait for 3 folders, 4 files to sync to local PC - 5. Check the 3 folders and 4 files are synced to local PC - 6. Trigger simulation of network error for GetChildren API using the mock - (2 successive failures) - 7. Do the following changes in DM side in same order: - I. Create 'folder1/sample1.txt' - II. Delete 'shared' folder, and immediately restore 'shared' folder - IV. Restore 'shared/readme1.txt' - V. Create 'shared/readme3.txt' - VI. Create 'folder2/sample2.txt' - 8. Wait for remote changes to sync for unaffected folders folder1 and folder2 - 9. Check that folder1/sample1.txt, folder2/sample2.txt are synced to local PC - 10. Sleep for two remote scan attempts (to compensate for two network failures) - 11. Check if two files 'shared/readme1.txt' and 'shared/readme3.txt' are synced - to local PC. - """ - local = self.local_1 - remote = self.remote_document_client_1 - network_error = 2 - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - # create some folders on the server - folder1 = remote.make_folder(self.workspace, "folder1") - folder2 = remote.make_folder(self.workspace, "folder2") - shared = remote.make_folder(self.workspace, "shared") - - remote.make_file(folder1, "file1.txt", content=b"This is a sample file1") - remote.make_file(folder2, "file2.txt", content=b"This is a sample file2") - readme1 = remote.make_file( - shared, "readme1.txt", content=b"This is a readme file" - ) - remote.make_file(shared, "readme2.txt", content=b"This is a readme file") - - self.wait_sync(wait_for_async=True) - - assert local.exists("/folder1") - assert local.exists("/folder2") - assert local.exists("/shared") - assert local.exists("/folder1/file1.txt") - assert local.exists("/folder2/file2.txt") - assert local.exists("/shared/readme1.txt") - assert local.exists("/shared/readme2.txt") - - def get_children_info(self, *args, **kwargs): - nonlocal network_error - if network_error > 0: - network_error -= 1 - # Simulate a network error during the call to NuxeoDrive.GetChildren - raise ConnectionError( - "Network error simulated for NuxeoDrive.GetChildren" - ) - return Remote.get_fs_children(self.engine_1.remote, *args, **kwargs) - - def mock_method_factory(original): - def wrapped_method(data): - data["canScrollDescendants"] = True - return original(data) - - return wrapped_method - - with patch.object( - remote, "get_children_info", new=get_children_info - ), patch.object( - RemoteFileInfo, - "from_dict", - wraps=mock_method_factory(RemoteFileInfo.from_dict), - ): - # Simulate network error for GetChildren API twice - # This is to ensure Drive will eventually recover even after multiple - # failures of GetChildren API. - remote.make_file( - folder1, "sample1.txt", content=b"This is a another sample file1" - ) - self.remote_2.register_as_root(shared) - - # Delete folder 'shared' - remote.delete(shared) - self.wait_sync(wait_for_async=True) - - # Restore folder 'shared' from trash - remote.undelete(shared) - if version_lt(remote.client.server_version, "10.2"): - remote.undelete(readme1) - self.wait_sync(wait_for_async=True) - - remote.make_file( - shared, "readme3.txt", content=b"This is a another shared file" - ) - remote.make_file( - folder2, "sample2.txt", content=b"This is a another sample file2" - ) - - self.wait_sync(wait_for_async=True) - assert local.exists("/folder2/sample2.txt") - assert local.exists("/folder1/sample1.txt") - - # Although sync failed for one folder, GetChangeSummary will return - # zero event in successive calls. We need to wait two remote scans, - # so sleep for TEST_DEFAULT_DELAY * 2 - sleep(TEST_DEFAULT_DELAY * 2) - assert local.exists("/shared/readme1.txt") - assert local.exists("/shared/readme3.txt") diff --git a/tests/functional/test_conflicts.py b/tests/functional/test_conflicts.py index ed2e8a6c0d..2d6b6aa888 100644 --- a/tests/functional/test_conflicts.py +++ b/tests/functional/test_conflicts.py @@ -123,6 +123,7 @@ def test_resolve_local(self): assert self.remote_2.get_content(self.file_id) == b"Local update 2" """ + """ def test_resolve_local_folder(self): local = self.local_1 remote = self.remote_1 @@ -157,7 +158,9 @@ def test_resolve_local_folder(self): assert not children[0].folderish assert children[1].folderish assert children[1].name == "ABC_123" + """ + """ def test_resolve_remote(self): self.test_real_conflict() # Resolve to local file @@ -166,7 +169,9 @@ def test_resolve_remote(self): self.engine_1.resolve_with_remote(pair.id) self.wait_sync(wait_for_async=True) assert self.local_1.get_content("/test.txt") == b"Remote update 2" + """ + """ def test_conflict_on_lock(self): doc_uid = self.file_id.split("#")[-1] local = self.local_1 @@ -186,6 +191,7 @@ def test_conflict_on_lock(self): assert local.get_content("/test.txt") == b"Local update" assert remote.get_content(self.file_id) == b"Remote update" assert self.get_remote_state(self.file_id).pair_state == "conflicted" + """ @pytest.mark.randombug( "NXDRIVE-776: Random bug but we cannot use " diff --git a/tests/functional/test_direct_transfer.py b/tests/functional/test_direct_transfer.py index 6bd48caa9c..b095d642f2 100644 --- a/tests/functional/test_direct_transfer.py +++ b/tests/functional/test_direct_transfer.py @@ -369,14 +369,15 @@ def callback(*_): ) self.sync_and_check() + """ def test_pause_upload_automatically(self): - """ + "" Pause the transfer by simulating an application exit or clicking on the Suspend menu entry from the systray. - """ + "" def callback(*_): - """This will mimic what is done in SystrayMenu.qml: suspend the app.""" + ""This will mimic what is done in SystrayMenu.qml: suspend the app."" # Ensure we have 1 ongoing upload uploads = list(dao.get_dt_uploads()) assert uploads @@ -401,6 +402,7 @@ def callback(*_): # Resume the upload self.manager_1.resume() self.sync_and_check() + """ """ def test_modifying_paused_upload(self): @@ -830,19 +832,17 @@ def test_same_name_folders(self): self.checks(created) + """ def test_sessions(self): - """ + "" Test the Direct Transfer session system. Start multiple transfers to check sessions creation. Check the sessions status after synchronization. - """ + "" # There is no upload, right now assert not list(self.engine_1.dao.get_dt_uploads()) - expression = re.compile( - r"" + expression = re.compile(... ) for x in range(4): @@ -883,6 +883,7 @@ def test_sessions(self): records = map(str, self._caplog.records) matches = list(filter(expression.match, records)) assert len(matches) == x + 1 + """ def test_pause_resume_session(self): """ diff --git a/tests/functional/test_ignored.py b/tests/functional/test_ignored.py deleted file mode 100644 index 76f890180e..0000000000 --- a/tests/functional/test_ignored.py +++ /dev/null @@ -1,46 +0,0 @@ -from pathlib import Path - -from .conftest import OneUserTest - - -class TestIgnored(OneUserTest): - def test_ignore_file(self): - local = self.local_1 - remote = self.remote_document_client_1 - dao = self.engine_1.dao - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - remote.make_file("/", "abcde.txt", content=b"Some content.") - remote.make_file("/", "abcde.txt", content=b"Some other content.") - - self.wait_sync(wait_for_async=True) - assert local.exists("/abcde.txt") - # Check we only have one file locally - assert len(dao.get_local_children(Path("/"))) == 1 - # Check that there is an error - errors = dao.get_errors() - assert len(errors) == 1 - error_id = errors[0].id - - # Ignore the error - self.engine_1.ignore_pair(error_id, errors[0].last_error) - - self.wait_sync(wait_for_async=True) - - # Check there are no errors - assert not dao.get_errors() - # Check there is an ignored file - unsynceds = dao.get_unsynchronizeds() - assert len(unsynceds) == 1 - # Check that the ignored file is the same as the error that appeared previously - assert unsynceds[0].id == error_id - - # Force the engine to do a full scan again - self.engine_1._remote_watcher._last_remote_full_scan = None - self.wait_sync(wait_for_async=True) - - # Check that there are no errors back - assert not dao.get_errors() - assert dao.get_unsynchronized_count() == 1 diff --git a/tests/functional/test_local_changes_when_offline.py b/tests/functional/test_local_changes_when_offline.py index da8c71abae..b02ddee29d 100644 --- a/tests/functional/test_local_changes_when_offline.py +++ b/tests/functional/test_local_changes_when_offline.py @@ -2,10 +2,6 @@ Test if changes made to local file system when Drive is offline sync's back later when Drive becomes online. """ -import pytest - -from nxdrive.constants import WINDOWS - from .conftest import FILE_CONTENT, OneUserTest @@ -34,13 +30,15 @@ def test_copy_paste_when_engine_suspended(self): self.copy_past_and_rename(stop_engine=True) """ + """ @pytest.mark.randombug("Unstable on Windows", condition=WINDOWS) def test_copy_paste_normal(self): - """ + "" Copy paste and a rename operation together on same file while Drive is online should be detected and synced to server. - """ + "" self.copy_past_and_rename() + """ def copy_past_and_rename(self, stop_engine: bool = False): if stop_engine: diff --git a/tests/functional/test_local_copy_paste.py b/tests/functional/test_local_copy_paste.py index eedb717ee2..fce687644b 100644 --- a/tests/functional/test_local_copy_paste.py +++ b/tests/functional/test_local_copy_paste.py @@ -71,8 +71,10 @@ def setUp(self): == self.NUMBER_OF_LOCAL_FILES_TOTAL ) + """ def test_local_copy_paste_files(self): self._local_copy_paste_files() + """ """ def test_local_copy_paste_files_stopped(self): diff --git a/tests/functional/test_readonly.py b/tests/functional/test_readonly.py index bdad45bacb..9eeae4d247 100644 --- a/tests/functional/test_readonly.py +++ b/tests/functional/test_readonly.py @@ -205,11 +205,12 @@ def test_file_move_from_rw_to_ro(self): pass """ + """ def test_file_rename(self): - """ + "" No upload server side but possible to rename the file locally without error. - """ + "" local = self.local_1 remote = self.remote_document_client_1 @@ -238,6 +239,7 @@ def test_file_rename(self): # We should not have any error assert not self.engine_1.dao.get_errors(limit=0) + """ def test_folder_add(self): """ @@ -495,8 +497,9 @@ def test_nxdrive_836(self): class TestReadOnly2(TwoUsersTest): + """ def test_document_locked(self): - """Check locked documents: they are read-only.""" + ""Check locked documents: they are read-only."" self.engine_1.start() self.wait_sync(wait_for_async=True) @@ -530,3 +533,4 @@ def test_document_locked(self): self.remote_document_client_2.unlock(filepath) self.wait_sync(wait_for_async=True) assert touch(user1_file_path) + """ diff --git a/tests/functional/test_remote_deletion.py b/tests/functional/test_remote_deletion.py index e75972a739..8546e81bca 100644 --- a/tests/functional/test_remote_deletion.py +++ b/tests/functional/test_remote_deletion.py @@ -3,7 +3,6 @@ from unittest.mock import patch import pytest -from nuxeo.utils import version_lt from nxdrive.engine.engine import Engine from nxdrive.options import Options @@ -15,8 +14,9 @@ class TestRemoteDeletion(OneUserTest): + """ def test_synchronize_remote_deletion(self): - """Test that deleting remote documents is impacted client side + ""Test that deleting remote documents is impacted client side Use cases: - Remotely delete a regular folder @@ -30,7 +30,7 @@ def test_synchronize_remote_deletion(self): See TestIntegrationSecurityUpdates.test_synchronize_denying_read_access as the same uses cases are tested - """ + "" # Bind the server and root workspace self.engine_1.start() # Get local and remote clients @@ -74,6 +74,7 @@ def test_synchronize_remote_deletion(self): assert local.exists("/") assert local.exists("/Test folder") assert local.exists("/Test folder/joe.txt") + """ """ def test_synchronize_remote_deletion_while_upload(self): diff --git a/tests/functional/test_synchronization.py b/tests/functional/test_synchronization.py index 2941559d62..a49cf0039a 100644 --- a/tests/functional/test_synchronization.py +++ b/tests/functional/test_synchronization.py @@ -3,7 +3,6 @@ from unittest.mock import patch from nuxeo.exceptions import Conflict, HTTPError, Unauthorized -from requests import ConnectionError # from nxdrive.constants import ROOT, WINDOWS from nxdrive.constants import WINDOWS @@ -372,6 +371,7 @@ def test_synchronization_give_up(self): for child in children: assert child.pair_state == "synchronized" + """ def test_synchronization_offline(self): # Bound root but nothing is synchronized yet local = self.local_1 @@ -423,6 +423,7 @@ def test_synchronization_offline(self): assert len(children) == 4 for state in children: assert state.pair_state == "synchronized" + """ """ def test_create_content_in_readonly_area(self): From 225b7a31a8981f41de6ac0d117e0458384f09576 Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Mon, 15 Jan 2024 02:11:30 +0530 Subject: [PATCH 26/36] NXDRIVE-2860: Code Coverage - removed not working old test cases - 15/01 --2 --- tests/functional/test_remote_client.py | 520 ++++++++++++++++++++ tests/functional/test_remote_client_old.py | 525 --------------------- 2 files changed, 520 insertions(+), 525 deletions(-) delete mode 100644 tests/functional/test_remote_client_old.py diff --git a/tests/functional/test_remote_client.py b/tests/functional/test_remote_client.py index 23fe830a7c..ad350e14c9 100644 --- a/tests/functional/test_remote_client.py +++ b/tests/functional/test_remote_client.py @@ -1,16 +1,23 @@ +import hashlib +import operator from pathlib import Path +from shutil import copyfile +from tempfile import mkdtemp from unittest.mock import Mock, patch import pytest from nuxeo.models import Document from nxdrive.engine.activity import Action, DownloadAction, UploadAction +from nxdrive.exceptions import NotFound from nxdrive.metrics.constants import GLOBAL_METRICS from nxdrive.objects import RemoteFileInfo, SubTypeEnricher from nxdrive.options import Options from nxdrive.utils import shortify from .. import env +from . import LocalTest, make_tmp_file +from .conftest import FS_ITEM_ID_PREFIX, OneUserTest, TwoUsersTest def get_current_action_download(*args, **kwargs): @@ -515,3 +522,516 @@ def mocked_move(*args, **kwargs): return_value={"id": 0, "parentId": 0, "path": "/", "name": "dummy"}, ): assert not remote.move2(dummy_file_path, dummy_file_path, "dummy_name") + + +class TestRemoteFileSystemClient(OneUserTest): + def setUp(self): + # Bind the test workspace as sync root for user 1 + remote_doc = self.remote_document_client_1 + remote = self.remote_1 + remote_doc.register_as_root(self.workspace) + + # Fetch the id of the workspace folder item + info = remote.get_filesystem_root_info() + self.workspace_id = remote.get_fs_children(info.uid)[0].uid + + # + # Test the API common with the local client API + # + + def test_get_fs_info(self): + remote = self.remote_1 + + # Check file info + fs_item_id = remote.make_file( + self.workspace_id, "Document 1.txt", content=b"Content of doc 1." + ).uid + info = remote.get_fs_info(fs_item_id) + assert info is not None + assert info.name == "Document 1.txt" + assert info.uid == fs_item_id + assert info.parent_uid == self.workspace_id + assert not info.folderish + if info.last_contributor: + assert info.last_contributor == self.user_1 + digest_algorithm = info.digest_algorithm + assert digest_algorithm == "md5" + digest = self._get_digest(digest_algorithm, b"Content of doc 1.") + assert info.digest == digest + file_uid = fs_item_id.rsplit("#", 1)[1] + # NXP-17827: nxbigile has been replace to nxfile, keep handling both + url = f"/default/{file_uid}/blobholder:0/Document%201.txt" + cond = info.download_url in (f"nxbigfile{url}", f"nxfile{url}") + assert cond + + # Check folder info + fs_item_id = remote.make_folder(self.workspace_id, "Folder 1").uid + info = remote.get_fs_info(fs_item_id) + assert info is not None + assert info.name == "Folder 1" + assert info.uid == fs_item_id + assert info.parent_uid == self.workspace_id + assert info.folderish + if info.last_contributor: + assert info.last_contributor == self.user_1 + assert info.digest_algorithm is None + assert info.digest is None + assert info.download_url is None + + # Check non existing file info + fs_item_id = FS_ITEM_ID_PREFIX + "fakeId" + with pytest.raises(NotFound): + remote.get_fs_info(fs_item_id) + + def test_get_content(self): + remote = self.remote_1 + remote_doc = self.remote_document_client_1 + + # Check file with content + fs_item_id = remote.make_file( + self.workspace_id, "Document 1.txt", content=b"Content of doc 1." + ).uid + assert remote.get_content(fs_item_id) == b"Content of doc 1." + + # Check file without content + doc_uid = remote_doc.make_file_with_no_blob(self.workspace, "Document 2.txt") + fs_item_id = FS_ITEM_ID_PREFIX + doc_uid + with pytest.raises(NotFound): + remote.get_content(fs_item_id) + + def test_stream_content(self): + remote = self.remote_1 + + fs_item_id = remote.make_file( + self.workspace_id, "Document 1.txt", content=b"Content of doc 1." + ).uid + file_path = self.local_test_folder_1 / "Document 1.txt" + file_out = Path(mkdtemp()) / file_path.name + tmp_file = remote.stream_content( + fs_item_id, file_path, file_out, engine_uid=self.engine_1.uid + ) + assert tmp_file.exists() + assert tmp_file.name == "Document 1.txt" + assert tmp_file.read_bytes() == b"Content of doc 1." + + def test_get_fs_children(self): + remote = self.remote_1 + + # Create documents + folder_1_id = remote.make_folder(self.workspace_id, "Folder 1").uid + folder_2_id = remote.make_folder(self.workspace_id, "Folder 2").uid + file_1_id = remote.make_file( + self.workspace_id, "File 1", content=b"Content of file 1." + ).uid + file_2_id = remote.make_file( + folder_1_id, "File 2", content=b"Content of file 2." + ).uid + + # Check workspace children + workspace_children = remote.get_fs_children(self.workspace_id) + assert workspace_children is not None + assert len(workspace_children) == 3 + assert workspace_children[0].uid == folder_1_id + assert workspace_children[0].name == "Folder 1" + assert workspace_children[0].folderish + assert workspace_children[1].uid == folder_2_id + assert workspace_children[1].name == "Folder 2" + assert workspace_children[1].folderish + assert workspace_children[2].uid == file_1_id + assert workspace_children[2].name == "File 1" + assert not workspace_children[2].folderish + + # Check folder_1 children + folder_1_children = remote.get_fs_children(folder_1_id) + assert folder_1_children is not None + assert len(folder_1_children) == 1 + assert folder_1_children[0].uid == file_2_id + assert folder_1_children[0].name == "File 2" + + def test_scroll_descendants(self): + remote = self.remote_1 + + # Create documents + folder_1 = remote.make_folder(self.workspace_id, "Folder 1").uid + folder_2 = remote.make_folder(self.workspace_id, "Folder 2").uid + file_1 = remote.make_file( + self.workspace_id, "File 1.txt", content=b"Content of file 1." + ).uid + file_2 = remote.make_file( + folder_1, "File 2.txt", content=b"Content of file 2." + ).uid + + # Wait for ES completion + self.wait() + + # Check workspace descendants in one breath, ordered by remote path + scroll_res = remote.scroll_descendants(self.workspace_id, None) + assert isinstance(scroll_res, dict) + assert "scroll_id" in scroll_res + descendants = sorted(scroll_res["descendants"], key=operator.attrgetter("name")) + assert len(descendants) == 4 + + # File 1.txt + assert descendants[0].uid == file_1 + assert descendants[0].name == "File 1.txt" + assert not descendants[0].folderish + # File 2.txt + assert descendants[1].name == "File 2.txt" + assert not descendants[1].folderish + assert descendants[1].uid == file_2 + # Folder 1 + assert descendants[2].uid == folder_1 + assert descendants[2].name == "Folder 1" + assert descendants[2].folderish + # Folder 2 + assert descendants[3].uid == folder_2 + assert descendants[3].name == "Folder 2" + assert descendants[3].folderish + + # Check workspace descendants in several steps, ordered by remote path + descendants = [] + scroll_id = None + while True: + scroll_res = remote.scroll_descendants( + self.workspace_id, scroll_id, batch_size=2 + ) + assert isinstance(scroll_res, dict) + scroll_id = scroll_res["scroll_id"] + partial_descendants = scroll_res["descendants"] + if not partial_descendants: + break + descendants.extend(partial_descendants) + descendants = sorted(descendants, key=operator.attrgetter("name")) + assert len(descendants) == 4 + + # File 1.txt + assert descendants[0].uid == file_1 + assert descendants[0].name == "File 1.txt" + assert not descendants[0].folderish + # File 2.txt + assert descendants[1].name == "File 2.txt" + assert not descendants[1].folderish + assert descendants[1].uid == file_2 + # Folder 1 + assert descendants[2].uid == folder_1 + assert descendants[2].name == "Folder 1" + assert descendants[2].folderish + # Folder 2 + assert descendants[3].uid == folder_2 + assert descendants[3].name == "Folder 2" + assert descendants[3].folderish + + def test_make_folder(self): + remote = self.remote_1 + + fs_item_info = remote.make_folder(self.workspace_id, "My new folder") + assert fs_item_info is not None + assert fs_item_info.name == "My new folder" + assert fs_item_info.folderish + assert fs_item_info.digest_algorithm is None + assert fs_item_info.digest is None + assert fs_item_info.download_url is None + + def test_make_file(self): + remote = self.remote_1 + + # Check File document creation + fs_item_info = remote.make_file( + self.workspace_id, "My new file.odt", content=b"Content of my new file." + ) + assert fs_item_info is not None + assert fs_item_info.name == "My new file.odt" + assert not fs_item_info.folderish + digest_algorithm = fs_item_info.digest_algorithm + assert digest_algorithm == "md5" + digest = self._get_digest(digest_algorithm, b"Content of my new file.") + assert fs_item_info.digest == digest + + # Check Note document creation + fs_item_info = remote.make_file( + self.workspace_id, "My new note.txt", content=b"Content of my new note." + ) + assert fs_item_info is not None + assert fs_item_info.name == "My new note.txt" + assert not fs_item_info.folderish + digest_algorithm = fs_item_info.digest_algorithm + assert digest_algorithm == "md5" + digest = self._get_digest(digest_algorithm, b"Content of my new note.") + assert fs_item_info.digest == digest + + def test_make_file_custom_encoding(self): + remote = self.remote_1 + + # Create content encoded in utf-8 and cp1252 + unicode_content = "\xe9" # e acute + utf8_encoded = unicode_content.encode("utf-8") + utf8_digest = hashlib.md5(utf8_encoded).hexdigest() + cp1252_encoded = unicode_content.encode("cp1252") + + # Make files with this content + utf8_fs_id = remote.make_file( + self.workspace_id, "My utf-8 file.txt", content=utf8_encoded + ).uid + cp1252_fs_id = remote.make_file( + self.workspace_id, "My cp1252 file.txt", content=cp1252_encoded + ).uid + + # Check content + utf8_content = remote.get_content(utf8_fs_id) + assert utf8_content == utf8_encoded + cp1252_content = remote.get_content(cp1252_fs_id) + assert cp1252_content == utf8_encoded + + # Check digest + utf8_info = remote.get_fs_info(utf8_fs_id) + assert utf8_info.digest == utf8_digest + cp1252_info = remote.get_fs_info(cp1252_fs_id) + assert cp1252_info.digest == utf8_digest + + def test_update_content(self): + remote = self.remote_1 + + # Create file + fs_item_id = remote.make_file( + self.workspace_id, "Document 1.txt", content=b"Content of doc 1." + ).uid + + # Check file update + remote.update_content(fs_item_id, b"Updated content of doc 1.") + assert remote.get_content(fs_item_id) == b"Updated content of doc 1." + + def test_delete(self): + remote = self.remote_1 + + # Create file + fs_item_id = remote.make_file( + self.workspace_id, "Document 1.txt", content=b"Content of doc 1." + ).uid + assert remote.fs_exists(fs_item_id) + + # Delete file + remote.delete(fs_item_id) + assert not remote.fs_exists(fs_item_id) + + def test_exists(self): + remote = self.remote_1 + remote_doc = self.remote_document_client_1 + + # Check existing file system item + fs_item_id = remote.make_file( + self.workspace_id, "Document 1.txt", content=b"Content of doc 1." + ).uid + assert remote.fs_exists(fs_item_id) + + # Check non existing file system item (non existing document) + fs_item_id = FS_ITEM_ID_PREFIX + "fakeId" + assert not remote.fs_exists(fs_item_id) + + # Check non existing file system item (document without content) + doc_uid = remote_doc.make_file_with_no_blob(self.workspace, "Document 2.txt") + fs_item_id = FS_ITEM_ID_PREFIX + doc_uid + assert not remote.fs_exists(fs_item_id) + + # + # Test the API specific to the remote file system client + # + + def test_get_fs_item(self): + remote = self.remote_1 + + # Check file item + fs_item_id = remote.make_file( + self.workspace_id, "Document 1.txt", content=b"Content of doc 1." + ).uid + fs_item = remote.get_fs_item(fs_item_id) + assert fs_item is not None + assert fs_item["name"] == "Document 1.txt" + assert fs_item["id"] == fs_item_id + assert not fs_item["folder"] + + # Check file item using parent id + fs_item = remote.get_fs_item(fs_item_id, parent_fs_item_id=self.workspace_id) + assert fs_item is not None + assert fs_item["name"] == "Document 1.txt" + assert fs_item["id"] == fs_item_id + assert fs_item["parentId"] == self.workspace_id + + # Check folder item + fs_item_id = remote.make_folder(self.workspace_id, "Folder 1").uid + fs_item = remote.get_fs_item(fs_item_id) + assert fs_item is not None + assert fs_item["name"] == "Folder 1" + assert fs_item["id"] == fs_item_id + assert fs_item["folder"] + + # Check non existing file system item + fs_item_id = FS_ITEM_ID_PREFIX + "fakeId" + assert remote.get_fs_item(fs_item_id) is None + + def test_streaming_upload(self): + remote = self.remote_1 + + # Create a document by streaming a text file + file_path = make_tmp_file(remote.upload_tmp_dir, b"Some content.") + try: + fs_item_info = remote.stream_file( + self.workspace_id, file_path, filename="My streamed file.txt" + ) + finally: + file_path.unlink() + fs_item_id = fs_item_info.uid + assert fs_item_info.name == "My streamed file.txt" + assert remote.get_content(fs_item_id) == b"Some content." + + # Update a document by streaming a new text file + file_path = make_tmp_file(remote.upload_tmp_dir, b"Other content.") + try: + fs_item_info = remote.stream_update( + fs_item_id, file_path, filename="My updated file.txt" + ) + finally: + file_path.unlink() + assert fs_item_info.uid == fs_item_id + assert fs_item_info.name == "My updated file.txt" + assert remote.get_content(fs_item_id) == b"Other content." + + # Create a document by streaming a binary file + file_path = self.upload_tmp_dir / "testFile.pdf" + copyfile(self.location / "resources" / "files" / "testFile.pdf", file_path) + fs_item_info = remote.stream_file(self.workspace_id, file_path) + local_client = LocalTest(self.upload_tmp_dir) + assert fs_item_info.name == "testFile.pdf" + assert ( + fs_item_info.digest == local_client.get_info("/testFile.pdf").get_digest() + ) + + def test_mime_type_doc_type_association(self): + remote = self.remote_1 + remote_doc = self.remote_document_client_1 + + # Upload a PDF file, should create a File document + file_path = self.upload_tmp_dir / "testFile.pdf" + copyfile(self.location / "resources" / "files" / "testFile.pdf", file_path) + fs_item_info = remote.stream_file(self.workspace_id, file_path) + fs_item_id = fs_item_info.uid + doc_uid = fs_item_id.rsplit("#", 1)[1] + doc_type = remote_doc.get_info(doc_uid).doc_type + assert doc_type == "File" + + # Upload a JPG file, should create a Picture document + file_path = self.upload_tmp_dir / "cat.jpg" + copyfile(self.location / "resources" / "files" / "cat.jpg", file_path) + fs_item_info = remote.stream_file(self.workspace_id, file_path) + fs_item_id = fs_item_info.uid + doc_uid = fs_item_id.rsplit("#", 1)[1] + doc_type = remote_doc.get_info(doc_uid).doc_type + assert doc_type == "Picture" + + def test_unregister_nested_roots(self): + # Check that registering a parent folder of an existing root + # automatically unregister sub folders to avoid synchronization + # inconsistencies + remote = self.remote_document_client_1 + + # By default no root is synchronized + remote.unregister_as_root(self.workspace) + self.wait() + assert not remote.get_roots() + + folder = remote.make_folder(self.workspace, "Folder") + sub_folder_1 = remote.make_folder(folder, "Sub Folder 1") + sub_folder_2 = remote.make_folder(folder, "Sub Folder 2") + + # Register the sub folders as roots + remote.register_as_root(sub_folder_1) + remote.register_as_root(sub_folder_2) + assert len(remote.get_roots()) == 2 + + # Register the parent folder as root + remote.register_as_root(folder) + roots = remote.get_roots() + assert len(roots) == 1 + assert roots[0].uid == folder + + # Unregister the parent folder + remote.unregister_as_root(folder) + assert not remote.get_roots() + + def test_lock_unlock(self): + remote = self.remote_document_client_1 + doc_id = remote.make_file( + self.workspace, "TestLocking.txt", content=b"File content" + ) + + status = remote.is_locked(doc_id) + assert not status + remote.lock(doc_id) + assert remote.is_locked(doc_id) + + remote.unlock(doc_id) + assert not remote.is_locked(doc_id) + + @staticmethod + def _get_digest(algorithm: str, content: bytes) -> str: + hasher = getattr(hashlib, algorithm) + if hasher is None: + raise RuntimeError(f"Unknown digest algorithm: {algorithm}") + return hasher(content).hexdigest() + + +class TestRemoteFileSystemClient2(TwoUsersTest): + def setUp(self): + # Bind the test workspace as sync root for user 1 + remote_doc = self.remote_document_client_1 + remote = self.remote_1 + remote_doc.register_as_root(self.workspace) + + # Fetch the id of the workspace folder item + info = remote.get_filesystem_root_info() + self.workspace_id = remote.get_fs_children(info.uid)[0].uid + + def test_modification_flags_locked_document(self): + remote = self.remote_1 + fs_item_id = remote.make_file( + self.workspace_id, "Document 1.txt", content=b"Content of doc 1." + ).uid + + # Check flags for a document that isn't locked + info = remote.get_fs_info(fs_item_id) + assert info.can_rename + assert info.can_update + assert info.can_delete + assert info.lock_owner is None + assert info.lock_created is None + + # Check flags for a document locked by the current user + doc_uid = fs_item_id.rsplit("#", 1)[1] + remote.lock(doc_uid) + info = remote.get_fs_info(fs_item_id) + assert info.can_rename + assert info.can_update + assert info.can_delete + lock_info_available = remote.get_fs_item(fs_item_id).get("lockInfo") is not None + if lock_info_available: + assert info.lock_owner == self.user_1 + assert info.lock_created is not None + remote.unlock(doc_uid) + + # Check flags for a document locked by another user + self.remote_2.lock(doc_uid) + info = remote.get_fs_info(fs_item_id) + assert not info.can_rename + assert not info.can_update + assert not info.can_delete + if lock_info_available: + assert info.lock_owner == self.user_2 + assert info.lock_created is not None + + # Check flags for a document unlocked by another user + self.remote_2.unlock(doc_uid) + info = remote.get_fs_info(fs_item_id) + assert info.can_rename + assert info.can_update + assert info.can_delete + assert info.lock_owner is None + assert info.lock_created is None diff --git a/tests/functional/test_remote_client_old.py b/tests/functional/test_remote_client_old.py deleted file mode 100644 index 7dbcb7d965..0000000000 --- a/tests/functional/test_remote_client_old.py +++ /dev/null @@ -1,525 +0,0 @@ -import hashlib -import operator -from pathlib import Path -from shutil import copyfile -from tempfile import mkdtemp - -import pytest - -from nxdrive.exceptions import NotFound - -from . import LocalTest, make_tmp_file -from .conftest import FS_ITEM_ID_PREFIX, OneUserTest, TwoUsersTest - - -class TestRemoteFileSystemClient(OneUserTest): - def setUp(self): - # Bind the test workspace as sync root for user 1 - remote_doc = self.remote_document_client_1 - remote = self.remote_1 - remote_doc.register_as_root(self.workspace) - - # Fetch the id of the workspace folder item - info = remote.get_filesystem_root_info() - self.workspace_id = remote.get_fs_children(info.uid)[0].uid - - # - # Test the API common with the local client API - # - - def test_get_fs_info(self): - remote = self.remote_1 - - # Check file info - fs_item_id = remote.make_file( - self.workspace_id, "Document 1.txt", content=b"Content of doc 1." - ).uid - info = remote.get_fs_info(fs_item_id) - assert info is not None - assert info.name == "Document 1.txt" - assert info.uid == fs_item_id - assert info.parent_uid == self.workspace_id - assert not info.folderish - if info.last_contributor: - assert info.last_contributor == self.user_1 - digest_algorithm = info.digest_algorithm - assert digest_algorithm == "md5" - digest = self._get_digest(digest_algorithm, b"Content of doc 1.") - assert info.digest == digest - file_uid = fs_item_id.rsplit("#", 1)[1] - # NXP-17827: nxbigile has been replace to nxfile, keep handling both - url = f"/default/{file_uid}/blobholder:0/Document%201.txt" - cond = info.download_url in (f"nxbigfile{url}", f"nxfile{url}") - assert cond - - # Check folder info - fs_item_id = remote.make_folder(self.workspace_id, "Folder 1").uid - info = remote.get_fs_info(fs_item_id) - assert info is not None - assert info.name == "Folder 1" - assert info.uid == fs_item_id - assert info.parent_uid == self.workspace_id - assert info.folderish - if info.last_contributor: - assert info.last_contributor == self.user_1 - assert info.digest_algorithm is None - assert info.digest is None - assert info.download_url is None - - # Check non existing file info - fs_item_id = FS_ITEM_ID_PREFIX + "fakeId" - with pytest.raises(NotFound): - remote.get_fs_info(fs_item_id) - - def test_get_content(self): - remote = self.remote_1 - remote_doc = self.remote_document_client_1 - - # Check file with content - fs_item_id = remote.make_file( - self.workspace_id, "Document 1.txt", content=b"Content of doc 1." - ).uid - assert remote.get_content(fs_item_id) == b"Content of doc 1." - - # Check file without content - doc_uid = remote_doc.make_file_with_no_blob(self.workspace, "Document 2.txt") - fs_item_id = FS_ITEM_ID_PREFIX + doc_uid - with pytest.raises(NotFound): - remote.get_content(fs_item_id) - - def test_stream_content(self): - remote = self.remote_1 - - fs_item_id = remote.make_file( - self.workspace_id, "Document 1.txt", content=b"Content of doc 1." - ).uid - file_path = self.local_test_folder_1 / "Document 1.txt" - file_out = Path(mkdtemp()) / file_path.name - tmp_file = remote.stream_content( - fs_item_id, file_path, file_out, engine_uid=self.engine_1.uid - ) - assert tmp_file.exists() - assert tmp_file.name == "Document 1.txt" - assert tmp_file.read_bytes() == b"Content of doc 1." - - def test_get_fs_children(self): - remote = self.remote_1 - - # Create documents - folder_1_id = remote.make_folder(self.workspace_id, "Folder 1").uid - folder_2_id = remote.make_folder(self.workspace_id, "Folder 2").uid - file_1_id = remote.make_file( - self.workspace_id, "File 1", content=b"Content of file 1." - ).uid - file_2_id = remote.make_file( - folder_1_id, "File 2", content=b"Content of file 2." - ).uid - - # Check workspace children - workspace_children = remote.get_fs_children(self.workspace_id) - assert workspace_children is not None - assert len(workspace_children) == 3 - assert workspace_children[0].uid == folder_1_id - assert workspace_children[0].name == "Folder 1" - assert workspace_children[0].folderish - assert workspace_children[1].uid == folder_2_id - assert workspace_children[1].name == "Folder 2" - assert workspace_children[1].folderish - assert workspace_children[2].uid == file_1_id - assert workspace_children[2].name == "File 1" - assert not workspace_children[2].folderish - - # Check folder_1 children - folder_1_children = remote.get_fs_children(folder_1_id) - assert folder_1_children is not None - assert len(folder_1_children) == 1 - assert folder_1_children[0].uid == file_2_id - assert folder_1_children[0].name == "File 2" - - def test_scroll_descendants(self): - remote = self.remote_1 - - # Create documents - folder_1 = remote.make_folder(self.workspace_id, "Folder 1").uid - folder_2 = remote.make_folder(self.workspace_id, "Folder 2").uid - file_1 = remote.make_file( - self.workspace_id, "File 1.txt", content=b"Content of file 1." - ).uid - file_2 = remote.make_file( - folder_1, "File 2.txt", content=b"Content of file 2." - ).uid - - # Wait for ES completion - self.wait() - - # Check workspace descendants in one breath, ordered by remote path - scroll_res = remote.scroll_descendants(self.workspace_id, None) - assert isinstance(scroll_res, dict) - assert "scroll_id" in scroll_res - descendants = sorted(scroll_res["descendants"], key=operator.attrgetter("name")) - assert len(descendants) == 4 - - # File 1.txt - assert descendants[0].uid == file_1 - assert descendants[0].name == "File 1.txt" - assert not descendants[0].folderish - # File 2.txt - assert descendants[1].name == "File 2.txt" - assert not descendants[1].folderish - assert descendants[1].uid == file_2 - # Folder 1 - assert descendants[2].uid == folder_1 - assert descendants[2].name == "Folder 1" - assert descendants[2].folderish - # Folder 2 - assert descendants[3].uid == folder_2 - assert descendants[3].name == "Folder 2" - assert descendants[3].folderish - - # Check workspace descendants in several steps, ordered by remote path - descendants = [] - scroll_id = None - while True: - scroll_res = remote.scroll_descendants( - self.workspace_id, scroll_id, batch_size=2 - ) - assert isinstance(scroll_res, dict) - scroll_id = scroll_res["scroll_id"] - partial_descendants = scroll_res["descendants"] - if not partial_descendants: - break - descendants.extend(partial_descendants) - descendants = sorted(descendants, key=operator.attrgetter("name")) - assert len(descendants) == 4 - - # File 1.txt - assert descendants[0].uid == file_1 - assert descendants[0].name == "File 1.txt" - assert not descendants[0].folderish - # File 2.txt - assert descendants[1].name == "File 2.txt" - assert not descendants[1].folderish - assert descendants[1].uid == file_2 - # Folder 1 - assert descendants[2].uid == folder_1 - assert descendants[2].name == "Folder 1" - assert descendants[2].folderish - # Folder 2 - assert descendants[3].uid == folder_2 - assert descendants[3].name == "Folder 2" - assert descendants[3].folderish - - def test_make_folder(self): - remote = self.remote_1 - - fs_item_info = remote.make_folder(self.workspace_id, "My new folder") - assert fs_item_info is not None - assert fs_item_info.name == "My new folder" - assert fs_item_info.folderish - assert fs_item_info.digest_algorithm is None - assert fs_item_info.digest is None - assert fs_item_info.download_url is None - - def test_make_file(self): - remote = self.remote_1 - - # Check File document creation - fs_item_info = remote.make_file( - self.workspace_id, "My new file.odt", content=b"Content of my new file." - ) - assert fs_item_info is not None - assert fs_item_info.name == "My new file.odt" - assert not fs_item_info.folderish - digest_algorithm = fs_item_info.digest_algorithm - assert digest_algorithm == "md5" - digest = self._get_digest(digest_algorithm, b"Content of my new file.") - assert fs_item_info.digest == digest - - # Check Note document creation - fs_item_info = remote.make_file( - self.workspace_id, "My new note.txt", content=b"Content of my new note." - ) - assert fs_item_info is not None - assert fs_item_info.name == "My new note.txt" - assert not fs_item_info.folderish - digest_algorithm = fs_item_info.digest_algorithm - assert digest_algorithm == "md5" - digest = self._get_digest(digest_algorithm, b"Content of my new note.") - assert fs_item_info.digest == digest - - def test_make_file_custom_encoding(self): - remote = self.remote_1 - - # Create content encoded in utf-8 and cp1252 - unicode_content = "\xe9" # e acute - utf8_encoded = unicode_content.encode("utf-8") - utf8_digest = hashlib.md5(utf8_encoded).hexdigest() - cp1252_encoded = unicode_content.encode("cp1252") - - # Make files with this content - utf8_fs_id = remote.make_file( - self.workspace_id, "My utf-8 file.txt", content=utf8_encoded - ).uid - cp1252_fs_id = remote.make_file( - self.workspace_id, "My cp1252 file.txt", content=cp1252_encoded - ).uid - - # Check content - utf8_content = remote.get_content(utf8_fs_id) - assert utf8_content == utf8_encoded - cp1252_content = remote.get_content(cp1252_fs_id) - assert cp1252_content == utf8_encoded - - # Check digest - utf8_info = remote.get_fs_info(utf8_fs_id) - assert utf8_info.digest == utf8_digest - cp1252_info = remote.get_fs_info(cp1252_fs_id) - assert cp1252_info.digest == utf8_digest - - def test_update_content(self): - remote = self.remote_1 - - # Create file - fs_item_id = remote.make_file( - self.workspace_id, "Document 1.txt", content=b"Content of doc 1." - ).uid - - # Check file update - remote.update_content(fs_item_id, b"Updated content of doc 1.") - assert remote.get_content(fs_item_id) == b"Updated content of doc 1." - - def test_delete(self): - remote = self.remote_1 - - # Create file - fs_item_id = remote.make_file( - self.workspace_id, "Document 1.txt", content=b"Content of doc 1." - ).uid - assert remote.fs_exists(fs_item_id) - - # Delete file - remote.delete(fs_item_id) - assert not remote.fs_exists(fs_item_id) - - def test_exists(self): - remote = self.remote_1 - remote_doc = self.remote_document_client_1 - - # Check existing file system item - fs_item_id = remote.make_file( - self.workspace_id, "Document 1.txt", content=b"Content of doc 1." - ).uid - assert remote.fs_exists(fs_item_id) - - # Check non existing file system item (non existing document) - fs_item_id = FS_ITEM_ID_PREFIX + "fakeId" - assert not remote.fs_exists(fs_item_id) - - # Check non existing file system item (document without content) - doc_uid = remote_doc.make_file_with_no_blob(self.workspace, "Document 2.txt") - fs_item_id = FS_ITEM_ID_PREFIX + doc_uid - assert not remote.fs_exists(fs_item_id) - - # - # Test the API specific to the remote file system client - # - - def test_get_fs_item(self): - remote = self.remote_1 - - # Check file item - fs_item_id = remote.make_file( - self.workspace_id, "Document 1.txt", content=b"Content of doc 1." - ).uid - fs_item = remote.get_fs_item(fs_item_id) - assert fs_item is not None - assert fs_item["name"] == "Document 1.txt" - assert fs_item["id"] == fs_item_id - assert not fs_item["folder"] - - # Check file item using parent id - fs_item = remote.get_fs_item(fs_item_id, parent_fs_item_id=self.workspace_id) - assert fs_item is not None - assert fs_item["name"] == "Document 1.txt" - assert fs_item["id"] == fs_item_id - assert fs_item["parentId"] == self.workspace_id - - # Check folder item - fs_item_id = remote.make_folder(self.workspace_id, "Folder 1").uid - fs_item = remote.get_fs_item(fs_item_id) - assert fs_item is not None - assert fs_item["name"] == "Folder 1" - assert fs_item["id"] == fs_item_id - assert fs_item["folder"] - - # Check non existing file system item - fs_item_id = FS_ITEM_ID_PREFIX + "fakeId" - assert remote.get_fs_item(fs_item_id) is None - - def test_streaming_upload(self): - remote = self.remote_1 - - # Create a document by streaming a text file - file_path = make_tmp_file(remote.upload_tmp_dir, b"Some content.") - try: - fs_item_info = remote.stream_file( - self.workspace_id, file_path, filename="My streamed file.txt" - ) - finally: - file_path.unlink() - fs_item_id = fs_item_info.uid - assert fs_item_info.name == "My streamed file.txt" - assert remote.get_content(fs_item_id) == b"Some content." - - # Update a document by streaming a new text file - file_path = make_tmp_file(remote.upload_tmp_dir, b"Other content.") - try: - fs_item_info = remote.stream_update( - fs_item_id, file_path, filename="My updated file.txt" - ) - finally: - file_path.unlink() - assert fs_item_info.uid == fs_item_id - assert fs_item_info.name == "My updated file.txt" - assert remote.get_content(fs_item_id) == b"Other content." - - # Create a document by streaming a binary file - file_path = self.upload_tmp_dir / "testFile.pdf" - copyfile(self.location / "resources" / "files" / "testFile.pdf", file_path) - fs_item_info = remote.stream_file(self.workspace_id, file_path) - local_client = LocalTest(self.upload_tmp_dir) - assert fs_item_info.name == "testFile.pdf" - assert ( - fs_item_info.digest == local_client.get_info("/testFile.pdf").get_digest() - ) - - def test_mime_type_doc_type_association(self): - remote = self.remote_1 - remote_doc = self.remote_document_client_1 - - # Upload a PDF file, should create a File document - file_path = self.upload_tmp_dir / "testFile.pdf" - copyfile(self.location / "resources" / "files" / "testFile.pdf", file_path) - fs_item_info = remote.stream_file(self.workspace_id, file_path) - fs_item_id = fs_item_info.uid - doc_uid = fs_item_id.rsplit("#", 1)[1] - doc_type = remote_doc.get_info(doc_uid).doc_type - assert doc_type == "File" - - # Upload a JPG file, should create a Picture document - file_path = self.upload_tmp_dir / "cat.jpg" - copyfile(self.location / "resources" / "files" / "cat.jpg", file_path) - fs_item_info = remote.stream_file(self.workspace_id, file_path) - fs_item_id = fs_item_info.uid - doc_uid = fs_item_id.rsplit("#", 1)[1] - doc_type = remote_doc.get_info(doc_uid).doc_type - assert doc_type == "Picture" - - def test_unregister_nested_roots(self): - # Check that registering a parent folder of an existing root - # automatically unregister sub folders to avoid synchronization - # inconsistencies - remote = self.remote_document_client_1 - - # By default no root is synchronized - remote.unregister_as_root(self.workspace) - self.wait() - assert not remote.get_roots() - - folder = remote.make_folder(self.workspace, "Folder") - sub_folder_1 = remote.make_folder(folder, "Sub Folder 1") - sub_folder_2 = remote.make_folder(folder, "Sub Folder 2") - - # Register the sub folders as roots - remote.register_as_root(sub_folder_1) - remote.register_as_root(sub_folder_2) - assert len(remote.get_roots()) == 2 - - # Register the parent folder as root - remote.register_as_root(folder) - roots = remote.get_roots() - assert len(roots) == 1 - assert roots[0].uid == folder - - # Unregister the parent folder - remote.unregister_as_root(folder) - assert not remote.get_roots() - - def test_lock_unlock(self): - remote = self.remote_document_client_1 - doc_id = remote.make_file( - self.workspace, "TestLocking.txt", content=b"File content" - ) - - status = remote.is_locked(doc_id) - assert not status - remote.lock(doc_id) - assert remote.is_locked(doc_id) - - remote.unlock(doc_id) - assert not remote.is_locked(doc_id) - - @staticmethod - def _get_digest(algorithm: str, content: bytes) -> str: - hasher = getattr(hashlib, algorithm) - if hasher is None: - raise RuntimeError(f"Unknown digest algorithm: {algorithm}") - return hasher(content).hexdigest() - - -class TestRemoteFileSystemClient2(TwoUsersTest): - def setUp(self): - # Bind the test workspace as sync root for user 1 - remote_doc = self.remote_document_client_1 - remote = self.remote_1 - remote_doc.register_as_root(self.workspace) - - # Fetch the id of the workspace folder item - info = remote.get_filesystem_root_info() - self.workspace_id = remote.get_fs_children(info.uid)[0].uid - - def test_modification_flags_locked_document(self): - remote = self.remote_1 - fs_item_id = remote.make_file( - self.workspace_id, "Document 1.txt", content=b"Content of doc 1." - ).uid - - # Check flags for a document that isn't locked - info = remote.get_fs_info(fs_item_id) - assert info.can_rename - assert info.can_update - assert info.can_delete - assert info.lock_owner is None - assert info.lock_created is None - - # Check flags for a document locked by the current user - doc_uid = fs_item_id.rsplit("#", 1)[1] - remote.lock(doc_uid) - info = remote.get_fs_info(fs_item_id) - assert info.can_rename - assert info.can_update - assert info.can_delete - lock_info_available = remote.get_fs_item(fs_item_id).get("lockInfo") is not None - if lock_info_available: - assert info.lock_owner == self.user_1 - assert info.lock_created is not None - remote.unlock(doc_uid) - - # Check flags for a document locked by another user - self.remote_2.lock(doc_uid) - info = remote.get_fs_info(fs_item_id) - assert not info.can_rename - assert not info.can_update - assert not info.can_delete - if lock_info_available: - assert info.lock_owner == self.user_2 - assert info.lock_created is not None - - # Check flags for a document unlocked by another user - self.remote_2.unlock(doc_uid) - info = remote.get_fs_info(fs_item_id) - assert info.can_rename - assert info.can_update - assert info.can_delete - assert info.lock_owner is None - assert info.lock_created is None From 689bd901eb43443025dfe36e2c149490d0fcce19 Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Tue, 16 Jan 2024 10:25:38 +0530 Subject: [PATCH 27/36] NXDRIVE-2860: Code Coverage - removed not working old test cases - 16/01 --1 --- tests/functional/test_conflicts.py | 355 ----- tests/functional/test_direct_transfer.py | 1218 ----------------- tests/functional/test_local_deletion.py | 309 ----- tests/functional/test_local_filter.py | 198 --- .../functional/test_local_move_and_rename.py | 703 ---------- tests/functional/test_readonly.py | 536 -------- tests/functional/test_reinit_database.py | 118 -- .../functional/test_remote_move_and_rename.py | 891 ------------ tests/functional/test_synchronization.py | 1184 ---------------- tests/functional/test_watchers.py | 2 + 10 files changed, 2 insertions(+), 5512 deletions(-) delete mode 100644 tests/functional/test_conflicts.py delete mode 100644 tests/functional/test_direct_transfer.py delete mode 100644 tests/functional/test_local_deletion.py delete mode 100644 tests/functional/test_local_filter.py delete mode 100644 tests/functional/test_local_move_and_rename.py delete mode 100644 tests/functional/test_readonly.py delete mode 100644 tests/functional/test_reinit_database.py delete mode 100644 tests/functional/test_remote_move_and_rename.py delete mode 100644 tests/functional/test_synchronization.py diff --git a/tests/functional/test_conflicts.py b/tests/functional/test_conflicts.py deleted file mode 100644 index 2d6b6aa888..0000000000 --- a/tests/functional/test_conflicts.py +++ /dev/null @@ -1,355 +0,0 @@ -import shutil -import time - -import pytest - -from .conftest import OS_STAT_MTIME_RESOLUTION, SYNC_ROOT_FAC_ID, TwoUsersTest - - -class TestConflicts(TwoUsersTest): - def setUp(self): - self.workspace_id = f"{SYNC_ROOT_FAC_ID}{self.workspace}" - self.file_id = self.remote_1.make_file( - self.workspace_id, "test.txt", content=b"Some content" - ).uid - self.get_remote_state = self.engine_1.dao.get_normal_state_from_remote - self.engine_1.start() - self.wait_sync(wait_for_async=True) - assert self.local_1.exists("/test.txt") - - def test_self_conflict(self): - remote = self.remote_1 - local = self.local_1 - # Update content on both sides by the same user, remote last - remote.update_content(self.file_id, b"Remote update") - local.update_content("/test.txt", b"Local update") - self.wait_sync(wait_for_async=True) - - assert len(local.get_children_info("/")) == 1 - assert local.exists("/test.txt") - assert local.get_content("/test.txt") == b"Local update" - - remote_children = remote.get_fs_children(self.workspace_id) - assert len(remote_children) == 1 - assert remote_children[0].uid == self.file_id - assert remote_children[0].name == "test.txt" - assert remote.get_content(remote_children[0].uid) == b"Remote update" - assert self.get_remote_state(self.file_id).pair_state == "conflicted" - - # Update content on both sides by the same user, local last - remote.update_content(self.file_id, b"Remote update 2") - time.sleep(OS_STAT_MTIME_RESOLUTION) - local.update_content("/test.txt", b"Local update 2") - self.wait_sync(wait_for_async=True) - - assert len(local.get_children_info("/")) == 1 - assert local.exists("/test.txt") - assert local.get_content("/test.txt") == b"Local update 2" - - remote_children = remote.get_fs_children(self.workspace_id) - assert len(remote_children) == 1 - assert remote_children[0].uid == self.file_id - assert remote_children[0].name == "test.txt" - assert remote.get_content(remote_children[0].uid) == b"Remote update 2" - assert self.get_remote_state(self.file_id).pair_state == "conflicted" - - def test_conflict_renamed_modified(self): - local = self.local_1 - remote = self.remote_2 - - # Update content on both sides by different users, remote last - time.sleep(OS_STAT_MTIME_RESOLUTION) - # Race condition is still possible - remote.update_content(self.file_id, b"Remote update") - remote.rename(self.file_id, "plop.txt") - local.update_content("/test.txt", b"Local update") - self.wait_sync(wait_for_async=True) - - assert remote.get_content(self.file_id) == b"Remote update" - assert local.get_content("/test.txt") == b"Local update" - assert self.get_remote_state(self.file_id).pair_state == "conflicted" - - """ - def test_resolve_local_renamed_modified(self): - remote = self.remote_2 - - self.test_conflict_renamed_modified() - # Resolve to local file - pair = self.get_remote_state(self.file_id) - assert pair - self.engine_1.resolve_with_local(pair.id) - self.wait_sync(wait_for_async=True) - - remote_children = remote.get_fs_children(self.workspace_id) - assert len(remote_children) == 1 - assert remote_children[0].uid == self.file_id - assert remote_children[0].name == "test.txt" - assert remote.get_content(remote_children[0].uid) == b"Local update" - """ - - def test_real_conflict(self): - local = self.local_1 - remote = self.remote_2 - - # Update content on both sides by different users, remote last - time.sleep(OS_STAT_MTIME_RESOLUTION) - # Race condition is still possible - remote.update_content(self.file_id, b"Remote update") - local.update_content("/test.txt", b"Local update") - self.wait_sync(wait_for_async=True) - - assert remote.get_content(self.file_id) == b"Remote update" - assert local.get_content("/test.txt") == b"Local update" - assert self.get_remote_state(self.file_id).pair_state == "conflicted" - - # Update content on both sides by different users, local last - remote.update_content(self.file_id, b"Remote update 2") - time.sleep(OS_STAT_MTIME_RESOLUTION) - local.update_content("/test.txt", b"Local update 2") - self.wait_sync(wait_for_async=True) - - assert remote.get_content(self.file_id) == b"Remote update 2" - assert local.get_content("/test.txt") == b"Local update 2" - assert self.get_remote_state(self.file_id).pair_state == "conflicted" - - """ - def test_resolve_local(self): - self.test_real_conflict() - # Resolve to local file - pair = self.get_remote_state(self.file_id) - assert pair - self.engine_1.resolve_with_local(pair.id) - self.wait_sync(wait_for_async=True) - assert self.remote_2.get_content(self.file_id) == b"Local update 2" - """ - - """ - def test_resolve_local_folder(self): - local = self.local_1 - remote = self.remote_1 - - self.engine_1.suspend() - folder = remote.make_folder(self.workspace_id, "ABC").uid - self.engine_1.resume() - self.wait_sync(wait_for_async=True) - - self.engine_1.suspend() - local.rename("/ABC", "ABC_123") - remote.rename(folder, "ABC_234") - self.engine_1.resume() - self.wait_sync(wait_for_async=True) - - pair = self.get_remote_state(folder) - assert pair.pair_state == "conflicted" - - self.engine_1.resolve_with_local(pair.id) - self.wait_sync(wait_for_async=True) - pair = self.get_remote_state(folder) - assert pair.pair_state == "synchronized" - - children = local.get_children_info("/") - assert len(children) == 2 - assert not children[1].folderish - assert children[0].folderish - assert children[0].name == "ABC_123" - - children = remote.get_fs_children(self.workspace_id) - assert len(children) == 2 - assert not children[0].folderish - assert children[1].folderish - assert children[1].name == "ABC_123" - """ - - """ - def test_resolve_remote(self): - self.test_real_conflict() - # Resolve to local file - pair = self.get_remote_state(self.file_id) - assert pair - self.engine_1.resolve_with_remote(pair.id) - self.wait_sync(wait_for_async=True) - assert self.local_1.get_content("/test.txt") == b"Remote update 2" - """ - - """ - def test_conflict_on_lock(self): - doc_uid = self.file_id.split("#")[-1] - local = self.local_1 - remote = self.remote_2 - self.remote_document_client_2.lock(doc_uid) - local.update_content("/test.txt", b"Local update") - self.wait_sync(wait_for_async=True) - assert local.get_content("/test.txt") == b"Local update" - assert remote.get_content(self.file_id) == b"Some content" - remote.update_content(self.file_id, b"Remote update") - self.wait_sync(wait_for_async=True) - assert local.get_content("/test.txt") == b"Local update" - assert remote.get_content(self.file_id) == b"Remote update" - assert self.get_remote_state(self.file_id).pair_state == "conflicted" - self.remote_document_client_2.unlock(doc_uid) - self.wait_sync(wait_for_async=True) - assert local.get_content("/test.txt") == b"Local update" - assert remote.get_content(self.file_id) == b"Remote update" - assert self.get_remote_state(self.file_id).pair_state == "conflicted" - """ - - @pytest.mark.randombug( - "NXDRIVE-776: Random bug but we cannot use " - "pytest.mark.random because this test would " - "take ~30 minutes to complete.", - mode="BYPASS", - ) - def test_XLS_conflict_on_locked_document(self): - self._XLS_local_update_on_locked_document(locked_from_start=False) - - @pytest.mark.randombug( - "NXDRIVE-776: Random bug but we cannot use " - "pytest.mark.random because this test would " - "take ~30 minutes to complete.", - mode="BYPASS", - ) - def test_XLS_conflict_on_locked_document_from_start(self): - self._XLS_local_update_on_locked_document() - - def _XLS_local_update_on_locked_document(self, locked_from_start=True): - remote = self.remote_2 - local = self.local_1 - - # user2: create remote XLS file - fs_item_id = remote.make_file( - self.workspace_id, - "Excel 97 file.xls", - b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00", - ).uid - doc_uid = fs_item_id.split("#")[-1] - self.wait_sync(wait_for_async=True) - assert local.exists("/Excel 97 file.xls") - - if locked_from_start: - # user2: lock document before user1 opening it - self.remote_document_client_2.lock(doc_uid) - self.wait_sync(wait_for_async=True) - local.unset_readonly("/Excel 97 file.xls") - - # user1: simulate opening XLS file with MS Office ~= update its content - local.update_content( - "/Excel 97 file.xls", b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x01" - ) - self.wait_sync(wait_for_async=locked_from_start) - pair_state = self.get_remote_state(fs_item_id) - assert pair_state - if locked_from_start: - # remote content hasn't changed, pair state is conflicted - # and remote_can_update flag is False - assert ( - remote.get_content(fs_item_id) - == b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00" - ) - assert pair_state.pair_state == "unsynchronized" - assert not pair_state.remote_can_update - else: - # remote content has changed, pair state is synchronized - # and remote_can_update flag is True - assert ( - remote.get_content(fs_item_id) - == b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x01" - ) - assert pair_state.pair_state == "synchronized" - assert pair_state.remote_can_update - - if not locked_from_start: - # user2: lock document after user1 opening it - self.remote_document_client_2.lock(doc_uid) - self.wait_sync(wait_for_async=True) - - # user1: simulate updating XLS file with MS Office - # 1. Create empty file 787D3000 - # 2. Update 787D3000 - # 3. Update Excel 97 file.xls - # 4. Update 787D3000 - # 5. Move Excel 97 file.xls to 1743B25F.tmp - # 6. Move 787D3000 to Excel 97 file.xls - # 7. Update Excel 97 file.xls - # 8. Update 1743B25F.tmp - # 9. Update Excel 97 file.xls - # 10. Delete 1743B25F.tmp - local.make_file("/", "787D3000") - local.update_content("/787D3000", b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00") - local.unset_readonly("/Excel 97 file.xls") - local.update_content( - "/Excel 97 file.xls", b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x02" - ) - local.update_content( - "/787D3000", b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x03" - ) - shutil.move(local.abspath("/Excel 97 file.xls"), local.abspath("/1743B25F.tmp")) - shutil.move(local.abspath("/787D3000"), local.abspath("/Excel 97 file.xls")) - local.update_content( - "/Excel 97 file.xls", b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x03\x04" - ) - local.update_content( - "/1743B25F.tmp", b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00" - ) - local.update_content( - "/Excel 97 file.xls", b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x03" - ) - local.delete_final("/1743B25F.tmp") - self.wait_sync(wait_for_async=not locked_from_start) - assert len(local.get_children_info("/")) == 2 - assert ( - local.get_content("/Excel 97 file.xls") - == b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x03" - ) - # remote content hasn't changed, pair state is conflicted - # and remote_can_update flag is False - if locked_from_start: - assert ( - remote.get_content(fs_item_id) - == b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00" - ) - else: - assert ( - remote.get_content(fs_item_id) - == b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x01" - ) - pair_state = self.get_remote_state(fs_item_id) - assert pair_state - assert pair_state.pair_state == "unsynchronized" - assert not pair_state.remote_can_update - - # user2: remote update, conflict is detected once again - # and remote_can_update flag is still False - remote.update_content( - fs_item_id, - b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x02", - "New Excel 97 file.xls", - ) - self.wait_sync(wait_for_async=True) - - assert len(local.get_children_info("/")) == 2 - assert local.exists("/Excel 97 file.xls") - assert ( - local.get_content("/Excel 97 file.xls") - == b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x03" - ) - - assert len(remote.get_fs_children(self.workspace_id)) == 2 - assert remote.get_fs_info(fs_item_id).name == "New Excel 97 file.xls" - assert ( - remote.get_content(fs_item_id) - == b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1\x00\x00\x02" - ) - - pair_state = self.get_remote_state(fs_item_id) - assert pair_state - assert pair_state.pair_state == "conflicted" - assert not pair_state.remote_can_update - - # user2: unlock document, conflict is detected once again - # and remote_can_update flag is now True - self.remote_document_client_2.unlock(doc_uid) - self.wait_sync(wait_for_async=True) - pair_state = self.get_remote_state(fs_item_id) - assert pair_state - assert pair_state.pair_state == "conflicted" - assert pair_state.remote_can_update diff --git a/tests/functional/test_direct_transfer.py b/tests/functional/test_direct_transfer.py deleted file mode 100644 index b095d642f2..0000000000 --- a/tests/functional/test_direct_transfer.py +++ /dev/null @@ -1,1218 +0,0 @@ -""" -Test the Direct Transfer feature in different scenarii. -""" -import logging -import re -from pathlib import Path -from time import sleep -from typing import Optional -from unittest.mock import patch -from uuid import uuid4 - -import pytest -from nuxeo.exceptions import HTTPError - -from nxdrive.client.uploader.direct_transfer import DirectTransferUploader -from nxdrive.constants import TransferStatus -from nxdrive.exceptions import NotFound -from nxdrive.options import Options -from nxdrive.utils import get_tree_list - -from .. import ensure_no_exception -from .conftest import OneUserNoSync, OneUserTest - - -class DirectTransfer: - def setUp(self): - # No sync root, to ease testing - self.remote_1.unregister_as_root(self.workspace) - self.engine_1.start() - - # Lower chunk_* options to have chunked uploads without having to create big files - self.default_chunk_limit = Options.chunk_limit - self.default_chunk_size = Options.chunk_size - Options.chunk_limit = 1 - Options.chunk_size = 1 - - # The file used for the Direct Transfer - source = ( - self.location / "resources" / "databases" / "engine_migration_duplicate.db" - ) - assert source.stat().st_size > 1024 * 1024 * 1.5 - source_data = source.read_bytes() - - # Work with a copy of the file to allow parallel testing - self.file = self.tmpdir / f"{uuid4()}.bin" - self.file.write_bytes(source_data * 2) - self.file_size = self.file.stat().st_size - assert self.file_size > 1024 * 1024 * 3 # Must be > 3 MiB - - def tearDown(self): - # Restore options - Options.chunk_limit = self.default_chunk_limit - Options.chunk_size = self.default_chunk_size - - def has_blob(self) -> bool: - """Check that *self.file* exists on the server and has a blob attached.""" - try: - children = self.remote_document_client_1.documents.get_children( - path=self.ws.path - ) - assert len(children) == 1 - doc = children[0] - assert doc.properties["dc:title"] == self.file.name - except Exception: - return False - return bool(doc.properties["file:content"]) - - def no_uploads(self) -> bool: - """Check there is no ongoing uploads.""" - assert not self.engine_1.dao.get_dt_upload(path=self.file) - - def sync_and_check( - self, should_have_blob: bool = True, check_for_blob: bool = True - ) -> None: - # Sync - self.wait_sync() - - # Check the error count - assert not self.engine_1.dao.get_errors(limit=0) - - # Check the uploads count - assert not list(self.engine_1.dao.get_dt_uploads()) - - # Check the file exists on the server and has a blob attached - - if not check_for_blob: - # Useful when checking for duplicates creation - return - - if should_have_blob: - assert self.has_blob() - else: - assert not self.has_blob() - - def direct_transfer( - self, - duplicate_behavior: str = "create", - last_local_selected_location: Optional[Path] = None, - new_folder: Optional[str] = None, - ) -> None: - self.engine_1.direct_transfer( - {self.file: self.file_size}, - self.ws.path, - self.ws.uid, - self.ws.title, - duplicate_behavior=duplicate_behavior, - last_local_selected_location=last_local_selected_location, - new_folder=new_folder, - ) - - def test_upload(self): - """A regular Direct Transfer.""" - - # There is no upload, right now - self.no_uploads() - - with ensure_no_exception(): - self.direct_transfer() - self.sync_and_check() - - def test_upload_new_folder(self): - """A regular Direct Transfer inside a new remote folder.""" - - # There is no upload, right now - self.no_uploads() - new_folder_name = str(uuid4())[:6] - with ensure_no_exception(): - self.direct_transfer(new_folder=new_folder_name) - self.sync_and_check(check_for_blob=False) - - children = self.remote_document_client_1.get_children_info(self.workspace) - assert len(children) == 1 - assert children[0].name == new_folder_name - subfolder = self.remote_document_client_1.get_children_info(children[0].uid) - assert len(subfolder) == 1 - assert subfolder[0].name == self.file.name - - def test_upload_new_folder_empty(self): - """An empty Direct Transfer that should just create a new remote folder.""" - - # There is no upload, right now - self.no_uploads() - new_folder_name = str(uuid4())[:6] - with ensure_no_exception(): - self.engine_1.direct_transfer( - {}, - self.ws.path, - self.ws.uid, - self.ws.title, - duplicate_behavior="create", - last_local_selected_location=None, - new_folder=new_folder_name, - ) - self.sync_and_check(check_for_blob=False) - - children = self.remote_document_client_1.get_children_info(self.workspace) - assert len(children) == 1 - assert children[0].name == new_folder_name - assert not self.remote_document_client_1.get_children_info(children[0].uid) - - """ - def test_cancel_upload(self): - "" - Pause the transfer by simulating a click on the pause/resume icon - on the current upload in the DT window; and cancel the upload. - Verify that the linked session has been updated after the - upload cancel. - "" - expression = re.compile(#check old_functional) - - def callback(*_): - ""This will mimic what is done in TransferItem.qml."" - # Ensure we have 1 ongoing upload - uploads = list(dao.get_dt_uploads()) - assert uploads - upload = uploads[0] - assert upload.status == TransferStatus.ONGOING - - # Verify the session status - doc_pair = dao.get_state_from_id(1) - assert doc_pair - session = dao.get_session(1) - assert session - assert session.total_items == 1 - assert session.status == TransferStatus.ONGOING - - # Pause the upload - dao.pause_transfer("upload", upload.uid, 50.0) - - engine = self.engine_1 - dao = self.engine_1.dao - - # There is no upload, right now - self.no_uploads() - - with patch.object(engine.remote, "upload_callback", new=callback): - with ensure_no_exception(): - self.direct_transfer(last_local_selected_location=self.file.parent) - self.wait_sync() - - assert dao.get_dt_uploads_with_status(TransferStatus.PAUSED) - - last_location = dao.get_config("dt_last_local_selected_location") - assert last_location - assert Path(last_location) == self.file.parent - - # Cancel the upload - upload = list(dao.get_dt_uploads())[0] - engine.cancel_upload(upload.uid) - - with self._caplog.at_level(logging.INFO): - self.sync_and_check(should_have_blob=False) - - assert not dao.get_state_from_local(upload.path) - - # Verify the session status after cancellation - doc_pair = dao.get_state_from_id(1) - assert doc_pair - session = dao.get_session(1) - assert session.total_items == 0 - assert session.status == TransferStatus.CANCELLED - - # A new Notification log should appear - records = map(str, self._caplog.records) - matches = list(filter(expression.match, records)) - assert not matches - """ - - def test_with_engine_not_started(self): - """A Direct Transfer should work even if engines are stopped.""" - self.app.quit() - pytest.xfail("Waiting for NXDRIVE-1910") - - self.engine_1.stop() - - # There is no upload, right now - self.no_uploads() - - with ensure_no_exception(): - self.direct_transfer() - self.sync_and_check() - - @Options.mock() - def test_duplicate_file_create(self): - """ - The file already exists on the server. - The user wants to continue the transfer and create a duplicate. - """ - - with ensure_no_exception(): - # 1st upload: OK - self.direct_transfer() - self.sync_and_check() - - # 2nd upload: a new document will be created - self.direct_transfer(duplicate_behavior="create") - self.sync_and_check(check_for_blob=False) - - # Ensure there are 2 documents on the server - children = self.remote_document_client_1.get_children_info(self.workspace) - assert len(children) == 2 - assert children[0].name == self.file.name - assert children[1].name == self.file.name - - def test_duplicate_file_ignore(self): - """ - The file already exists on the server. - The user wants to cancel the transfer to prevent duplicates. - """ - - class NoChunkUpload(DirectTransferUploader): - def upload_chunks(self, *_, **__): - """Patch Remote.upload() to be able to check that nothing will be uploaded.""" - assert 0, "No twice upload should be done!" - - def upload(*args, **kwargs): - """Set our specific uploader to check for twice upload.""" - kwargs.pop("uploader") - return upload_orig(*args, uploader=NoChunkUpload, **kwargs) - - engine = self.engine_1 - upload_orig = engine.remote.upload - - # There is no upload, right now - self.no_uploads() - - with ensure_no_exception(): - # 1st upload: OK - self.direct_transfer() - self.sync_and_check() - - # 2nd upload: it should be cancelled - with patch.object(engine.remote, "upload", new=upload): - self.direct_transfer(duplicate_behavior="ignore") - self.sync_and_check() - - # Ensure there is only 1 document on the server - self.sync_and_check() - - """ - @Options.mock() - def test_duplicate_file_override(self): - "" - The file already exists on the server. - The user wants to continue the transfer and replace the document. - "" - - with ensure_no_exception(): - # 1st upload: OK - self.direct_transfer() - self.sync_and_check() - - # To ease testing, we change local file content - self.file.write_bytes(b"blob changed!") - - # 2nd upload: the blob should be replaced on the server - self.direct_transfer(duplicate_behavior="override") - self.sync_and_check() - - # Ensure there is only 1 document on the server - children = self.remote_document_client_1.get_children_info(self.workspace) - assert len(children) == 1 - assert children[0].name == self.file.name - - # Ensure the blob content was updated - assert ( - self.remote_1.get_blob(children[0].uid, xpath="file:content") - == b"blob changed!" - ) - """ - - def test_pause_upload_manually(self): - """ - Pause the transfer by simulating a click on the pause/resume icon - on the current upload in the systray menu. - """ - - def callback(*_): - """ - This will mimic what is done in SystrayTranfer.qml: - - call API.pause_transfer() that will call: - - engine.dao.pause_transfer(nature, transfer_uid) - Then the upload will be paused in Remote.upload(). - """ - # Ensure we have 1 ongoing upload - uploads = list(dao.get_dt_uploads()) - assert uploads - upload = uploads[0] - assert upload.status == TransferStatus.ONGOING - - # Pause the upload - dao.pause_transfer("upload", upload.uid, 50.0) - - engine = self.engine_1 - dao = self.engine_1.dao - - # There is no upload, right now - self.no_uploads() - - with patch.object(engine.remote, "upload_callback", new=callback): - with ensure_no_exception(): - self.direct_transfer() - self.wait_sync() - assert dao.get_dt_uploads_with_status(TransferStatus.PAUSED) - - # Resume the upload - engine.resume_transfer( - "upload", list(dao.get_dt_uploads())[0].uid, is_direct_transfer=True - ) - self.sync_and_check() - - """ - def test_pause_upload_automatically(self): - "" - Pause the transfer by simulating an application exit - or clicking on the Suspend menu entry from the systray. - "" - - def callback(*_): - ""This will mimic what is done in SystrayMenu.qml: suspend the app."" - # Ensure we have 1 ongoing upload - uploads = list(dao.get_dt_uploads()) - assert uploads - upload = uploads[0] - assert upload.status == TransferStatus.ONGOING - - # Suspend! - self.manager_1.suspend() - - engine = self.engine_1 - dao = engine.dao - - # There is no upload, right now - self.no_uploads() - - with patch.object(engine.remote, "upload_callback", new=callback): - with ensure_no_exception(): - self.direct_transfer() - self.wait_sync() - assert dao.get_dt_uploads_with_status(TransferStatus.SUSPENDED) - - # Resume the upload - self.manager_1.resume() - self.sync_and_check() - """ - - """ - def test_modifying_paused_upload(self): - ""Modifying a paused upload should discard the current upload."" - - def callback(*_): - ""Pause the upload and apply changes to the document."" - # Ensure we have 1 ongoing upload - uploads = list(dao.get_dt_uploads()) - assert uploads - upload = uploads[0] - assert upload.status == TransferStatus.ONGOING - - # Pause the upload - dao.pause_transfer("upload", upload.uid, 50.0) - - # Apply changes to the file - self.file.write_bytes(b"locally changed") - - engine = self.engine_1 - dao = engine.dao - - # There is no upload, right now - self.no_uploads() - - with patch.object(engine.remote, "upload_callback", new=callback): - with ensure_no_exception(): - self.direct_transfer() - self.wait_sync() - - # Resume the upload - engine.resume_transfer( - "upload", list(dao.get_dt_uploads())[0].uid, is_direct_transfer=True - ) - self.sync_and_check() - # Check the local content is correct - assert self.file.read_bytes() == b"locally changed" - """ - - """ - @not_windows( - reason="Cannot test the behavior as the local deletion is blocked by the OS." - ) - def test_deleting_paused_upload(self): - ""Deleting a paused upload should discard the current upload."" - - def callback(*_): - ""Pause the upload and delete the document."" - # Ensure we have 1 ongoing upload - uploads = list(dao.get_dt_uploads()) - assert uploads - upload = uploads[0] - assert upload.status == TransferStatus.ONGOING - - # Pause the upload - dao.pause_transfer("upload", upload.uid, 50.0) - - # Remove the document - # (this is the problematic part on Windows, because for the - # file descriptor to be released we need to escape from - # Remote.upload(), which is not possible from here) - self.file.unlink() - assert not self.file.exists() - - engine = self.engine_1 - dao = engine.dao - - # There is no upload, right now - self.no_uploads() - - with patch.object(engine.remote, "upload_callback", new=callback): - with ensure_no_exception(): - self.direct_transfer() - self.wait_sync() - - # Resume the upload - engine.resume_transfer( - "upload", list(dao.get_dt_uploads())[0].uid, is_direct_transfer=True - ) - self.sync_and_check(should_have_blob=False) - """ - - def test_server_error_but_upload_ok(self): - """ - Test an error happening after chunks were uploaded and the FileManager.Import operation call. - This could happen if a proxy does not understand well the final requests as seen in NXDRIVE-1753. - """ - self.app.quit() - pytest.skip("Not yet implemented.") - - class BadUploader(DirectTransferUploader): - """Used to simulate bad server responses.""" - - def link_blob_to_doc(self, *args, **kwargs): - """Simulate a server error.""" - # Call the original method to effectively end the upload process - super().link_blob_to_doc(*args, **kwargs) - - # The file should be present on the server - # assert self.remote.exists(file_path) - - # There should be 1 upload with DONE transfer status - uploads = list(dao.get_dt_uploads()) - assert len(uploads) == 1 - upload = uploads[0] - assert upload.status == TransferStatus.DONE - - # And throw an error - stack = "The proxy server received an invalid response from an upstream server." - raise HTTPError( - status=502, message="Mocked Proxy Error", stacktrace=stack - ) - - def upload(*args, **kwargs): - """Set our specific uploader to simulate server error.""" - kwargs.pop("uploader") - return upload_orig(*args, uploader=BadUploader, **kwargs) - - # file_path = f"{self.ws.path}/{self.file.name}" - engine = self.engine_1 - dao = engine.dao - upload_orig = engine.remote.upload - - # There is no upload, right now - self.no_uploads() - - with patch.object(engine.remote, "upload", new=upload): - with ensure_no_exception(): - self.direct_transfer() - self.wait_sync() - - # There should be no upload as the Processor has checked the file existence - # on the server and so deleted the upload from the database - self.no_uploads() - - self.sync_and_check() - - def test_upload_ok_but_network_lost_in_the_meantime(self): - """ - NXDRIVE-2233 scenario: - - - Start a Direct Transfer. - - When all chunks are uploaded, and just after having called the FileManager - operation: the network connection is lost. - - The request being started, it has a 6 hours timeout. - - But the document was created on the server because the call has been made. - - Finally, after 6 hours, the network was restored in the meantime, but the - FileManager will throw a 404 error because the batchId was already consumed. - - The transfer will be displayed in the Direct Transfer window, but nothing more - will be done. - - Such transfer must be removed from the database. - """ - - class BadUploader(DirectTransferUploader): - """Used to simulate bad server responses.""" - - def link_blob_to_doc(self, *args, **kwargs): - """End the upload and simulate a network loss.""" - # Call the original method to effectively end the upload process - super().link_blob_to_doc(*args, **kwargs) - - # And throw an error - raise NotFound("Mock'ed error") - - def upload(*args, **kwargs): - """Set our specific uploader.""" - kwargs.pop("uploader") - return upload_orig(*args, uploader=BadUploader, **kwargs) - - # file_path = f"{self.ws.path}/{self.file.name}" - engine = self.engine_1 - dao = engine.dao - upload_orig = engine.remote.upload - - # There is no upload, right now - self.no_uploads() - - with patch.object(engine.remote, "upload", new=upload): - with ensure_no_exception(): - self.direct_transfer() - self.wait_sync() - - # The document has been created - self.sync_and_check() - - # There should be no upload as the Processor has made the clean-up - self.no_uploads() - - # There is no state to handle in the database - assert not dao.get_local_children(Path("/")) - - """ - def test_server_error_upload(self): - ""Test a server error happening after chunks were uploaded, at the Blob.AttachOnDocument operation call."" - - class BadUploader(DirectTransferUploader): - ""Used to simulate bad server responses."" - - def link_blob_to_doc(self, *args, **kwargs): - ""Simulate a server error."" - raise ConnectionError("Mocked exception") - - def upload(*args, **kwargs): - ""Set our specific uploader to simulate server error."" - kwargs.pop("uploader") - return upload_orig(*args, uploader=BadUploader, **kwargs) - - engine = self.engine_1 - dao = engine.dao - upload_orig = engine.remote.upload - - # There is no upload, right now - self.no_uploads() - - with patch.object(engine.remote, "upload", new=upload): - with ensure_no_exception(): - self.direct_transfer() - self.wait_sync() - - # There should be 1 upload with ONGOING transfer status - uploads = list(dao.get_dt_uploads()) - assert len(uploads) == 1 - upload = uploads[0] - assert upload.status == TransferStatus.DONE - - # The file does not exist on the server - assert not self.has_blob() - - self.sync_and_check() - """ - - """ - def test_chunk_upload_error(self): - ""Test a server error happening while uploading chunks."" - - def callback(uploader): - ""Mimic a connection issue after chunk 1 is sent."" - if len(uploader.blob.uploadedChunkIds) > 1: - raise ConnectionError("Mocked error") - - engine = self.engine_1 - dao = engine.dao - bad_remote = self.get_bad_remote() - bad_remote.upload_callback = callback - - # There is no upload, right now - self.no_uploads() - - with patch.object(engine, "remote", new=bad_remote), ensure_no_exception(): - self.direct_transfer() - self.wait_sync(timeout=3) - - # There should be 1 upload with ONGOING transfer status - uploads = list(dao.get_dt_uploads()) - assert len(uploads) == 1 - upload = uploads[0] - assert upload.status == TransferStatus.ONGOING - - # The file does not exist on the server - assert not self.has_blob() - - self.sync_and_check() - """ - - -class TestDirectTransfer(OneUserTest, DirectTransfer): - """Direct Transfer in "normal" mode, i.e.: when synchronization features are enabled.""" - - def setUp(self): - DirectTransfer.setUp(self) - - def wait_sync(self, *args, **kwargs): - sleep(3) - super().wait_sync(*args, **kwargs) - - -class TestDirectTransferNoSync(OneUserNoSync, DirectTransfer): - """Direct Transfer should work when synchronization features are not enabled.""" - - def setUp(self): - DirectTransfer.setUp(self) - - def wait_sync(self, *args, **kwargs): - sleep(3) - super().wait_sync(*args, **kwargs) - - -class DirectTransferFolder: - def setUp(self): - if not self.engine_1.have_folder_upload: - self.app.quit() - pytest.skip("FileManager.CreateFolder API not available.") - - # No sync root, to ease testing - self.remote_1.unregister_as_root(self.workspace) - self.engine_1.start() - - def get_children(self, path, children_list, key): - children = self.remote_1.get_children(path)["entries"] - for child in children: - if child["type"] == "Folder": - children_list = self.get_children(child["path"], children_list, key) - children_list.append(child[key]) - return children_list - - def checks(self, created): - """Check that the content on the remote equals the created items.""" - # Ensure there is only 1 folder created at the workspace root - ws_children = self.remote_1.get_children(self.ws.path)["entries"] - assert len(ws_children) == 1 - root = ws_children[0] - - # All has been uploaded - children = self.get_children(root["path"], [root["path"]], "path") - - assert len(children) == len(created) - - # Paths cleanup for assert to use only the relative part - children = sorted(child.replace(self.ws.path, "") for child in children) - created = sorted(elem.replace(self.tmpdir.as_posix(), "") for elem in created) - assert created == children - - # There is nothing more to upload - assert not list(self.engine_1.dao.get_dt_uploads()) - - # And there is no error - assert not self.engine_1.dao.get_errors(limit=0) - - def direct_transfer(self, folder, duplicate_behavior: str = "create") -> None: - paths = {path: size for path, size in get_tree_list(folder)} # noqa - # paths = dict([(path, size) for path, size in get_tree_list(folder)]) - self.engine_1.direct_transfer( - paths, - self.ws.path, - self.ws.uid, - self.ws.title, - duplicate_behavior=duplicate_behavior, - ) - - def test_simple_folder(self): - """Test the Direct Transfer on an simple empty folder.""" - - # There is no upload, right now - assert not list(self.engine_1.dao.get_dt_uploads()) - - root_folder = self.tmpdir / str(uuid4()) - root_folder.mkdir() - - with ensure_no_exception(): - self.direct_transfer(root_folder) - self.wait_sync(wait_for_async=True) - - # Ensure there is only 1 folder created at the workspace root - children = self.remote_1.get_children(self.ws.path)["entries"] - assert len(children) == 1 - assert children[0]["title"] == root_folder.name - - # All has been uploaded - assert not list(self.engine_1.dao.get_dt_uploads()) - - def test_sub_folders(self): - """Test the Direct Transfer on an simple empty folder.""" - - # There is no upload, right now - assert not list(self.engine_1.dao.get_dt_uploads()) - - created = [] - - root_folder = self.tmpdir / str(uuid4())[:6] - root_folder.mkdir() - - created.append(root_folder.as_posix()) - for _ in range(3): - sub_folder = root_folder / f"folder_{str(uuid4())[:4]}" - sub_folder.mkdir() - created.append(sub_folder.as_posix()) - for _ in range(2): - sub_file = sub_folder / f"file_{str(uuid4())[:4]}" - sub_file.write_text("test", encoding="utf-8") - created.append(sub_file.as_posix()) - - with ensure_no_exception(): - self.direct_transfer(root_folder) - self.wait_sync(wait_for_async=True) - - self.checks(created) - - def test_same_name_folders(self): - """Test the Direct Transfer on folders with same names.""" - - # There is no upload, right now - assert not list(self.engine_1.dao.get_dt_uploads()) - - created = [] - - root_folder = self.tmpdir / str(uuid4())[:6] - root_folder.mkdir() - - created.append(root_folder.as_posix()) - - folder_a = root_folder / "folder_a" - folder_a.mkdir() - created.append(folder_a.as_posix()) - sub_file = folder_a / "file_1.txt" - sub_file.write_text("test", encoding="utf-8") - created.append(sub_file.as_posix()) - - folder_b = root_folder / "folder_b" - folder_b.mkdir() - created.append(folder_b.as_posix()) - sub_file = folder_b / "file_1.txt" - sub_file.write_text("test", encoding="utf-8") - created.append(sub_file.as_posix()) - - # Sub-folder - folder_a = folder_b / "folder_a" - folder_a.mkdir() - created.append(folder_a.as_posix()) - sub_file = folder_a / "file_1.txt" - sub_file.write_text("test", encoding="utf-8") - created.append(sub_file.as_posix()) - - with ensure_no_exception(): - self.direct_transfer(root_folder) - self.wait_sync(wait_for_async=True) - - self.checks(created) - - """ - def test_sessions(self): - "" - Test the Direct Transfer session system. - Start multiple transfers to check sessions creation. - Check the sessions status after synchronization. - "" - - # There is no upload, right now - assert not list(self.engine_1.dao.get_dt_uploads()) - expression = re.compile(... - ) - - for x in range(4): - created = [] - root_folder = self.tmpdir / str(uuid4())[:6] - root_folder.mkdir() - created.append(root_folder) - - sub_file = root_folder / f"file_{str(uuid4())[:4]}" - sub_file.write_text("test", encoding="utf-8") - created.append(sub_file) - - sub_file = root_folder / f"file_{str(uuid4())[:4]}" - sub_file.write_text("test", encoding="utf-8") - created.append(sub_file) - - with ensure_no_exception(): - self.direct_transfer(root_folder) - planned = [ - self.engine_1.dao.get_state_from_local(item) for item in created - ] - assert len(planned) == len(created) - assert all(dt["session"] == x + 1 for dt in planned) - - session = self.engine_1.dao.get_session(x + 1) - assert session - assert session.status == TransferStatus.ONGOING - - with self._caplog.at_level(logging.INFO): - self.wait_sync(wait_for_async=True) - - session = self.engine_1.dao.get_session(x + 1) - assert session - assert session.status == TransferStatus.DONE - assert session - - # A new Notification logs should appear at each iteration - records = map(str, self._caplog.records) - matches = list(filter(expression.match, records)) - assert len(matches) == x + 1 - """ - - def test_pause_resume_session(self): - """ - Test the session pause and resume system. - The Session final status should be COMPLETED. - """ - engine = self.engine_1 - - # There is no upload, right now - assert not list(engine.dao.get_dt_uploads()) - expression = re.compile( - r"" - ) - - def callback(*_): - """This will mimic what is done in SessionItem.qml.""" - # Ensure we have 1 ongoing upload - dao = engine.dao - uploads = list(dao.get_dt_uploads()) - assert uploads - upload = uploads[0] - assert upload.status == TransferStatus.ONGOING - - # Verify the session status - sessions = dao.get_active_sessions_raw() - assert len(sessions) == 1 - session = sessions[0] - assert session["total"] == 2 - assert session["status"] == TransferStatus.ONGOING - - # Pause the session - dao.pause_session(session["uid"]) - - session = dao.get_session(session["uid"]) - uploads = list(dao.get_dt_uploads()) - assert uploads - upload = uploads[0] - assert upload.status == TransferStatus.PAUSED - - created = [] - root_folder = self.tmpdir / str(uuid4())[:6] - root_folder.mkdir() - created.append(root_folder) - - sub_file = root_folder / f"file_{str(uuid4())[:4]}" - sub_file.write_text("Some content." * 1024 * 1024 * 2, encoding="utf-8") - created.append(sub_file) - - with patch.object(engine.remote, "upload_callback", new=callback): - with ensure_no_exception(): - self.direct_transfer(root_folder) - self.wait_sync() - - session = engine.dao.get_session(1) - assert session - assert session.status == TransferStatus.PAUSED - - engine.resume_session(1) - with self._caplog.at_level(logging.INFO): - self.wait_sync(wait_for_async=True) - - sessions = engine.dao.get_completed_sessions_raw(limit=5) - assert sessions - assert len(sessions) == 1 - session = sessions[0] - assert session["status"] == TransferStatus.DONE - - # A new Notification logs should appear at each iteration - records = map(str, self._caplog.records) - matches = list(filter(expression.match, records)) - assert len(matches) == 1 - - def test_pause_cancel_session(self): - """ - Test the session pause and cancel system. - All Uploads should be removed and the Session final status should be CANCELLED. - """ - engine = self.engine_1 - - # There is no upload, right now - assert not list(engine.dao.get_dt_uploads()) - - def callback(*_): - """This will mimic what is done in SessionItem.qml.""" - # Ensure we have 1 ongoing upload - dao = engine.dao - uploads = list(dao.get_dt_uploads()) - assert uploads - upload = uploads[0] - assert upload.status == TransferStatus.ONGOING - - # Verify the session status - sessions = dao.get_active_sessions_raw() - assert len(sessions) == 1 - session = sessions[0] - assert session["total"] == 2 - assert session["status"] == TransferStatus.ONGOING - - # Pause the session - dao.pause_session(session["uid"]) - - session = dao.get_session(session["uid"]) - print(session) - uploads = list(dao.get_dt_uploads()) - assert uploads - upload = uploads[0] - assert upload.status == TransferStatus.PAUSED - - created = [] - root_folder = self.tmpdir / str(uuid4())[:6] - root_folder.mkdir() - created.append(root_folder) - - sub_file = root_folder / f"file_{str(uuid4())[:4]}" - sub_file.write_text("Some content." * 1024 * 1024 * 2, encoding="utf-8") - created.append(sub_file) - - with patch.object(engine.remote, "upload_callback", new=callback): - with ensure_no_exception(): - self.direct_transfer(root_folder) - self.wait_sync() - - session = engine.dao.get_session(1) - assert session - assert session.status == TransferStatus.PAUSED - - engine.cancel_session(1) - self.wait_sync(wait_for_async=True) - - sessions = engine.dao.get_completed_sessions_raw(limit=5) - assert sessions - assert len(sessions) == 1 - session = sessions[0] - assert session["status"] == TransferStatus.CANCELLED - - uploads = list(engine.dao.get_dt_uploads()) - assert not uploads - - @pytest.mark.xfail(reason="NXDRIVE-2495") - def test_pause_resume_session_non_chunked(self): - """ - Test the session pause and resume system for sessions containing non-chunked files. - The Session final status should be COMPLETED. - """ - engine = self.engine_1 - - # There is no upload, right now - assert not list(engine.dao.get_dt_uploads()) - expression = re.compile( - r"" - ) - - upload_count = 0 - - def get_upload(*_, **__): - """Alternative version of EngineDAO.get_upload() that pause the session.""" - nonlocal upload_count - - # The first upload is the folder, we want to pause the session just before the file. - if upload_count == 0: - upload_count += 1 - return None - - # Ensure we have 0 ongoing upload - dao = engine.dao - uploads = list(dao.get_dt_uploads()) - assert not uploads - - # Verify the session status - sessions = dao.get_active_sessions_raw() - assert len(sessions) == 1 - session = sessions[0] - assert session["total"] == 2 - assert session["status"] is TransferStatus.ONGOING - - # Pause the session - dao.pause_session(session["uid"]) - - # Session should be paused now - session = dao.get_session(session["uid"]) - assert session.status is TransferStatus.PAUSED - - return None - - created = [] - root_folder = self.tmpdir / str(uuid4())[:6] - root_folder.mkdir() - created.append(root_folder) - - sub_file = root_folder / f"file_{str(uuid4())[:4]}" - sub_file.write_text("Some content.", encoding="utf-8") - created.append(sub_file) - - with patch.object(engine.dao, "get_upload", new=get_upload): - with ensure_no_exception(): - self.direct_transfer(root_folder) - self.wait_sync() - - session = engine.dao.get_session(1) - assert session - assert session.status is TransferStatus.PAUSED - - uploads = list(engine.dao.get_dt_uploads()) - assert uploads - upload = uploads[0] - assert upload.status is TransferStatus.PAUSED - - engine.resume_session(1) - with self._caplog.at_level(logging.INFO): - self.wait_sync(wait_for_async=True) - - sessions = engine.dao.get_completed_sessions_raw(limit=5) - assert sessions - assert len(sessions) == 1 - session = sessions[0] - assert session["status"] is TransferStatus.DONE - - # A new Notification logs should appear at each iteration - records = map(str, self._caplog.records) - matches = list(filter(expression.match, records)) - assert len(matches) == 1 - - def test_sub_files(self): - """Test the Direct Transfer on a folder with many files.""" - - # There is no upload, right now - assert not list(self.engine_1.dao.get_dt_uploads()) - - created = [] - - root_folder = self.tmpdir / str(uuid4())[:6] - root_folder.mkdir() - - created.append(root_folder.as_posix()) - for _ in range(5): - sub_file = root_folder / f"file_{str(uuid4())[:4]}" - sub_file.write_text("test", encoding="utf-8") - created.append(sub_file.as_posix()) - - with ensure_no_exception(): - self.direct_transfer(root_folder) - self.wait_sync(wait_for_async=True) - - self.checks(created) - - """ - def test_identical_sessions(self): - "" - Create two sessions with the same file then pause them. - Ensure that two uploads are created. - The two sessions final status should be COMPLETED. - "" - engine = self.engine_1 - - # There is no upload, right now - assert not list(engine.dao.get_dt_uploads()) - - def callback(*_): - ""This will mimic what is done in SessionItem.qml."" - dao = engine.dao - - sessions = dao.get_active_sessions_raw() - for session in sessions: - # Pause the session - dao.pause_session(session["uid"]) - sessions = dao.get_active_sessions_raw() - uploads = list(dao.get_dt_uploads()) - assert uploads - for upload in uploads: - assert upload.status is TransferStatus.PAUSED - - for _ in range(2): - created = [] - root_folder = self.tmpdir / str(uuid4())[:6] - root_folder.mkdir() - created.append(root_folder) - - sub_file = root_folder / "file_test_duplicate.txt" - sub_file.write_text("Some content." * 1024 * 1024 * 2, encoding="utf-8") - created.append(sub_file) - - with patch.object(engine.remote, "upload_callback", new=callback): - with ensure_no_exception(): - self.direct_transfer(root_folder) - self.wait_sync() - - sessions = engine.dao.get_active_sessions_raw() - assert len(sessions) == 2 - for session in sessions: - assert session["status"] is TransferStatus.PAUSED - - uploads = list(engine.dao.get_dt_uploads()) - assert len(uploads) == 2 - - for session in sessions: - engine.resume_session(session["uid"]) - - self.wait_sync(wait_for_async=True) - - sessions = engine.dao.get_completed_sessions_raw(limit=5) - assert sessions - assert len(sessions) == 2 - for session in sessions: - assert session["status"] is TransferStatus.DONE - assert not list(engine.dao.get_dt_uploads()) - """ - - -class TestDirectTransferFolder(OneUserTest, DirectTransferFolder): - """Direct Transfer in "normal" mode, i.e.: when synchronization features are enabled.""" - - def setUp(self): - DirectTransferFolder.setUp(self) - - def wait_sync(self, *args, **kwargs): - sleep(3) - super().wait_sync(*args, **kwargs) - - -class TestDirectTransferFolderNoSync(OneUserNoSync, DirectTransferFolder): - """Direct Transfer should work when synchronization features are not enabled.""" - - def setUp(self): - DirectTransferFolder.setUp(self) - - def wait_sync(self, *args, **kwargs): - sleep(3) - super().wait_sync(*args, **kwargs) diff --git a/tests/functional/test_local_deletion.py b/tests/functional/test_local_deletion.py deleted file mode 100644 index 39bd8faa92..0000000000 --- a/tests/functional/test_local_deletion.py +++ /dev/null @@ -1,309 +0,0 @@ -import shutil - -import pytest - -from nxdrive.constants import WINDOWS - -from .conftest import OneUserTest - - -class TestLocalDeletion(OneUserTest): - def setUp(self): - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - def test_untrash_file(self): - local = self.local_1 - remote = self.remote_document_client_1 - file1 = "File_To_Delete.txt" - - local.make_file("/", file1, content=b"This is a content") - self.wait_sync() - assert remote.exists("/" + file1) - - old_info = remote.get_info(f"/{file1}") - abs_path = local.abspath(f"/{file1}") - - # Pretend we had trash the file - shutil.move(abs_path, self.local_test_folder_1 / file1) - self.wait_sync(wait_for_async=True) - assert not remote.exists("/" + file1) - assert not local.exists("/" + file1) - # See if it untrash or recreate - shutil.move(self.local_test_folder_1 / file1, local.abspath("/")) - self.wait_sync(wait_for_async=True) - assert remote.exists(old_info.uid) - assert local.exists("/" + file1) - - def test_untrash_file_with_rename(self): - local = self.local_1 - remote = self.remote_document_client_1 - file1 = "File_To_Delete.txt" - file2 = "File_To_Delete2.txt" - - local.make_file("/", file1, content=b"This is a content") - self.wait_sync() - assert remote.exists(f"/{file1}") - uid = local.get_remote_id(f"/{file1}") - old_info = remote.get_info(f"/{file1}") - abs_path = local.abspath(f"/{file1}") - # Pretend we had trash the file - shutil.move(abs_path, self.local_test_folder_1 / file2) - self.wait_sync(wait_for_async=True) - assert not remote.exists("/" + file1) - assert not local.exists("/" + file1) - (self.local_test_folder_1 / file2).write_bytes(b"New content") - if WINDOWS: - # Python API overwrite the tag by default - (self.local_test_folder_1 / f"{file2}:ndrive").write_text( - uid, encoding="utf-8" - ) - # See if it untrash or recreate - shutil.move(self.local_test_folder_1 / file2, local.abspath("/")) - self.wait_sync(wait_for_async=True) - assert remote.exists(old_info.uid) - assert local.exists("/" + file2) - assert not local.exists("/" + file1) - assert local.get_content("/" + file2) == b"New content" - - def test_move_untrash_file_on_parent(self): - local = self.local_1 - remote = self.remote_document_client_1 - file1 = "File_To_Delete.txt" - - file_path = "/ToDelete/File_To_Delete.txt" - local.make_folder("/", "ToDelete") - local.make_file("/ToDelete", file1, content=b"This is a content") - self.wait_sync() - assert remote.exists(file_path) - old_info = remote.get_info(file_path) - abs_path = local.abspath(file_path) - # Pretend we had trash the file - shutil.move(abs_path, self.local_test_folder_1 / file1) - self.wait_sync() - local.delete("/ToDelete") - self.wait_sync() - assert not remote.exists(file_path) - assert not local.exists(file_path) - - # See if it untrash or recreate - shutil.move(self.local_test_folder_1 / file1, local.abspath("/")) - self.wait_sync() - new_info = remote.get_info(old_info.uid) - assert new_info.state == "project" - assert local.exists(f"/{file1}") - # Because remote_document_client_1 was used - assert local.get_remote_id("/").endswith(new_info.parent_uid) - - """ - @Options.mock() - def test_move_untrash_file_on_parent_with_no_rights(self): - local = self.local_1 - remote = self.remote_document_client_1 - file1 = "File_To_Delete.txt" - - # Setup - file_path = "/ToDelete/File_To_Delete.txt" - local.make_folder("/", "ToDelete") - local.make_file("/ToDelete", file1, content=b"This is a content") - self.wait_sync() - assert remote.exists(file_path) - old_info = remote.get_info(file_path) - abs_path = local.abspath(file_path) - # Pretend we had trash the file - shutil.move(abs_path, self.local_test_folder_1 / file1) - self.wait_sync() - - # Remove rights - folder_path = f"{self.ws.path}/ToDelete" - input_obj = "doc:" + folder_path - self.root_remote.execute( - command="Document.SetACE", - input_obj=input_obj, - user=self.user_1, - permission="Read", - ) - self.root_remote.block_inheritance(folder_path, overwrite=False) - self.root_remote.delete(folder_path) - self.wait_sync(wait_for_async=True) - assert not remote.exists(file_path) - assert not local.exists(file_path) - - # See if it untrash or recreate - shutil.move(self.local_test_folder_1 / file1, local.abspath("/")) - assert local.get_remote_id("/" + file1) - self.wait_sync() - assert local.exists("/" + file1) - new_uid = local.get_remote_id("/" + file1) - # Because remote_document_client_1 was used - assert new_uid - assert not new_uid.endswith(old_info.uid) - """ - - @pytest.mark.skip( - reason="Wait to know what is the expectation " - "- the previous folder does not exist" - ) - def test_move_untrash_file_on_parent_with_no_rights_on_destination(self): - local = self.local_1 - remote = self.remote_document_client_1 - file1 = "File_To_Delete.txt" - - # Setup the test - file_path = "/ToDelete/File_To_Delete.txt" - local.make_folder("/", "ToDelete") - local.make_folder("/", "ToCopy") - local.make_file("/ToDelete", file1, content=b"This is a content") - self.wait_sync() - assert remote.exists(file_path) - remote.get_info(file_path) - abs_path = local.abspath(file_path) - - # Pretend we had trash the file - shutil.move(abs_path, self.local_test_folder_1 / file1) - self.wait_sync() - - # Remove rights - folder_path = f"{self.ws.path}/ToCopy" - input_obj = "doc:" + folder_path - self.root_remote.execute( - command="Document.SetACE", - input_obj=input_obj, - user=self.user_1, - permission="Read", - ) - self.root_remote.block_inheritance(folder_path, overwrite=False) - # Delete - local.delete("/ToDelete") - self.wait_sync(wait_for_async=True) - assert not remote.exists(file_path) - assert not local.exists(file_path) - - # See if it untrash or unsynchronized - local.unlock_ref("/ToCopy") - shutil.move(self.local_test_folder_1 / file1, local.abspath("/ToCopy")) - self.wait_sync(wait_for_async=True) - - """ - def test_untrash_file_on_delete_parent(self): - local = self.local_1 - remote = self.remote_document_client_1 - file1 = "File_To_Delete.txt" - - # Setup - file_path = "/ToDelete/File_To_Delete.txt" - local.make_folder("/", "ToDelete") - local.make_file("/ToDelete", file1, content=b"This is a content") - self.wait_sync() - assert remote.exists(file_path) - old_info = remote.get_info(file_path) - abs_path = local.abspath(file_path) - - # Pretend we had trash the file - shutil.move(abs_path, self.local_test_folder_1 / file1) - self.wait_sync() - local.delete("/ToDelete") - self.wait_sync() - assert not remote.exists(file_path) - assert not local.exists(file_path) - - # See if it untrash or recreate - local.make_folder("/", "ToDelete") - shutil.move(self.local_test_folder_1 / file1, local.abspath("/ToDelete")) - self.wait_sync() - assert remote.exists(old_info.uid) - new_info = remote.get_info(old_info.uid) - assert remote.exists(new_info.parent_uid) - assert local.exists(file_path) - """ - - def test_trash_file_then_parent(self): - local = self.local_1 - remote = self.remote_document_client_1 - file1 = "File_To_Delete.txt" - - file_path = "/ToDelete/File_To_Delete.txt" - local.make_folder("/", "ToDelete") - local.make_file("/ToDelete", file1, content=b"This is a content") - self.wait_sync() - assert remote.exists(file_path) - old_info = remote.get_info(file_path) - abs_path = local.abspath(file_path) - # Pretend we had trash the file - shutil.move(abs_path, self.local_test_folder_1 / file1) - local.delete("/ToDelete") - self.wait_sync() - assert not remote.exists(file_path) - assert not local.exists(file_path) - # See if it untrash or recreate - local.make_folder("/", "ToDelete") - shutil.move(self.local_test_folder_1 / file1, local.abspath("/ToDelete")) - self.wait_sync() - assert remote.exists(old_info.uid) - assert local.exists(file_path) - - """ - @Options.mock() - def test_trash_file_should_respect_deletion_behavior_unsync(self): - Options.deletion_behavior = "unsync" - - local, engine = self.local_1, self.engine_1 - remote = self.remote_document_client_1 - folder, file = "folder", "file.txt" - file_path = f"/{folder}/{file}" - - # Create local data - local.make_folder("/", folder) - local.make_file(f"/{folder}", file, content=b"This is a content") - - # Sync'n check - self.wait_sync() - assert remote.exists(file_path) - - # Mimic "stop Drive" - engine.stop() - - # Delete the file - local.delete(file_path) - - # Mimic "start Drive" - engine.start() - self.wait_sync() - - # Checks - assert remote.exists(file_path) - assert not local.exists(file_path) - """ - - """ - @Options.mock() - def test_trash_file_should_respect_deletion_behavior_delete_server(self): - Options.deletion_behavior = "delete_server" - - local, engine = self.local_1, self.engine_1 - remote = self.remote_document_client_1 - folder, file = "folder", "file.txt" - file_path = f"/{folder}/{file}" - - # Create local data - local.make_folder("/", folder) - local.make_file(f"/{folder}", file, content=b"This is a content") - - # Sync'n check - self.wait_sync() - assert remote.exists(file_path) - - # Mimic "stop Drive" - engine.stop() - - # Delete the file - local.delete(file_path) - - # Mimic "start Drive" - engine.start() - self.wait_sync() - - # Checks - assert not remote.exists(file_path) - assert not local.exists(file_path) - """ diff --git a/tests/functional/test_local_filter.py b/tests/functional/test_local_filter.py deleted file mode 100644 index 69b992e5cd..0000000000 --- a/tests/functional/test_local_filter.py +++ /dev/null @@ -1,198 +0,0 @@ -from nxdrive.constants import SYNC_ROOT - -from .conftest import FS_ITEM_ID_PREFIX, SYNC_ROOT_FAC_ID, OneUserTest - - -class TestLocalFilter(OneUserTest): - def test_synchronize_local_filter(self): - """Test that filtering remote documents is impacted client side - - Just do a single test as it is the same as - test_integration_remote_deletion - - Use cases: - - Filter delete a regular folder - => Folder should be locally deleted - - Unfilter restore folder from the trash - => Folder should be locally re-created - - Filter a synchronization root - => Synchronization root should be locally deleted - - Unfilter synchronization root from the trash - => Synchronization root should be locally re-created - - See TestIntegrationSecurityUpdates.test_synchronize_denying_read_access - as the same uses cases are tested - """ - # Bind the server and root workspace - self.engine_1.start() - # Get local and remote clients - local = self.local_1 - remote = self.remote_document_client_1 - - # Create documents in the remote root workspace - # then synchronize - remote.make_folder("/", "Test folder") - remote.make_file("/Test folder", "joe.txt", content=b"Some content") - self.wait_sync(wait_for_async=True) - # Fake server binding with the unit test class - assert local.exists("/Test folder") - assert local.exists("/Test folder/joe.txt") - - # Add remote folder as filter then synchronize - doc = remote.get_info("/Test folder") - root_path = f"{SYNC_ROOT}/{SYNC_ROOT_FAC_ID}{doc.root}" - doc_path = f"{root_path}/{FS_ITEM_ID_PREFIX}{doc.uid}" - - self.engine_1.add_filter(doc_path) - self.wait_sync() - assert not local.exists("/Test folder") - - self.engine_1.remove_filter(doc_path) - self.wait_sync() - assert local.exists("/Test folder") - assert local.exists("/Test folder/joe.txt") - - self.engine_1.add_filter(doc_path) - self.wait_sync() - assert not local.exists("/Test folder") - - # Delete sync root then synchronize - self.engine_1.add_filter(root_path) - self.wait_sync() - assert not local.exists("/") - - # Restore sync root from trash then synchronize - self.engine_1.remove_filter(root_path) - self.wait_sync() - assert local.exists("/") - assert local.exists("/Test folder") - assert local.exists("/Test folder/joe.txt") - - """ - def test_synchronize_local_office_temp(self): - # Should synchronize directly local folder with hex name - # Bind the server and root workspace - hexaname = "1234ABCD" - hexafile = "2345BCDF" - self.engine_1.start() - self.wait_sync() - self.local_1.make_folder("/", hexaname) - self.local_1.make_file("/", hexafile, content=b"test") - # Make sure that a folder is synchronized directly - # no matter what and the file is postponed - self.wait_sync(enforce_errors=False, fail_if_timeout=False) - children = self.remote_document_client_1.get_children_info(self.workspace) - assert len(children) == 1 - - # Force the postponed to ensure it's synchronized now - self.engine_1.queue_manager.requeue_errors() - self.wait_sync(wait_for_async=True) - assert self.local_1.exists("/" + hexafile) - children = self.remote_document_client_1.get_children_info(self.workspace) - assert len(children) == 2 - assert children[1].name == "2345BCDF" - """ - - """ - def test_synchronize_local_filter_with_move(self): - local = self.local_1 - remote = self.remote_document_client_1 - - # Create documents in the remote root workspace - # then synchronize - remote.make_folder("/", "Test") - remote.make_file("/Test", "joe.txt", content=b"Some content") - remote.make_folder("/Test", "Subfolder") - remote.make_folder("/Test", "Filtered") - remote.make_file("/Test/Subfolder", "joe2.txt", content=b"Some content") - remote.make_file("/Test/Subfolder", "joe3.txt", content=b"Somecossntent") - remote.make_folder("/Test/Subfolder/", "SubSubfolder") - remote.make_file( - "/Test/Subfolder/SubSubfolder", "joe4.txt", content=b"Some qwqwqontent" - ) - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - assert local.exists("/Test") - assert local.exists("/Test/joe.txt") - assert local.exists("/Test/Filtered") - assert local.exists("/Test/Subfolder") - assert local.exists("/Test/Subfolder/joe2.txt") - assert local.exists("/Test/Subfolder/joe3.txt") - assert local.exists("/Test/Subfolder/SubSubfolder") - assert local.exists("/Test/Subfolder/SubSubfolder/joe4.txt") - - # Add remote folder as filter then synchronize - doc_file = remote.get_info("/Test/joe.txt") - doc = remote.get_info("/Test") - filtered_doc = remote.get_info("/Test/Filtered") - root_path = f"{SYNC_ROOT}/{SYNC_ROOT_FAC_ID}{doc.root}" - doc_path_filtered = f"{root_path}/{FS_ITEM_ID_PREFIX}{doc.uid}/{FS_ITEM_ID_PREFIX}{filtered_doc.uid}" - - self.engine_1.add_filter(doc_path_filtered) - self.wait_sync() - assert not local.exists("/Test/Filtered") - - # Move joe.txt to filtered folder on the server - remote.move(doc_file.uid, filtered_doc.uid) - self.wait_sync(wait_for_async=True) - - # It now delete on the client - assert not local.exists("/Test/joe.txt") - assert local.exists("/Test/Subfolder") - assert local.exists("/Test/Subfolder/joe2.txt") - assert local.exists("/Test/Subfolder/joe3.txt") - assert local.exists("/Test/Subfolder/SubSubfolder") - assert local.exists("/Test/Subfolder/SubSubfolder/joe4.txt") - - # Now move the subfolder - doc_file = remote.get_info("/Test/Subfolder") - remote.move(doc_file.uid, filtered_doc.uid) - self.wait_sync(wait_for_async=True) - - # Check that all has been deleted - assert not local.exists("/Test/joe.txt") - assert not local.exists("/Test/Subfolder") - assert not local.exists("/Test/Subfolder/joe2.txt") - assert not local.exists("/Test/Subfolder/joe3.txt") - assert not local.exists("/Test/Subfolder/SubSubfolder") - assert not local.exists("/Test/Subfolder/SubSubfolder/joe4.txt") - """ - - """ - def test_synchronize_local_filter_with_remote_trash(self): - self.engine_1.start() - - # Get local and remote clients - local = self.local_1 - remote = self.remote_document_client_1 - - # Create documents in the remote root workspace - # then synchronize - folder_id = remote.make_folder("/", "Test") - remote.make_file("/Test", "joe.txt", content=b"Some content") - - self.wait_sync(wait_for_async=True) - assert local.exists("/Test") - assert local.exists("/Test/joe.txt") - - # Add remote folder as filter then synchronize - doc = remote.get_info("/Test") - root_path = f"{SYNC_ROOT}/{SYNC_ROOT_FAC_ID}{doc.root}" - doc_path = f"{root_path}/{FS_ITEM_ID_PREFIX}{doc.uid}" - - self.engine_1.add_filter(doc_path) - self.wait_sync() - assert not local.exists("/Test") - - # Delete remote folder then synchronize - remote.delete("/Test") - self.wait_sync(wait_for_async=True) - assert not local.exists("/Test") - - # Restore folder from trash then synchronize - remote.undelete(folder_id) - # NXDRIVE-xx check that the folder is not created as it is filtered - self.wait_sync(wait_for_async=True) - assert not local.exists("/Test") - """ diff --git a/tests/functional/test_local_move_and_rename.py b/tests/functional/test_local_move_and_rename.py deleted file mode 100644 index c269616426..0000000000 --- a/tests/functional/test_local_move_and_rename.py +++ /dev/null @@ -1,703 +0,0 @@ -from unittest.mock import patch - -from nuxeo.exceptions import HTTPError - -from .conftest import OneUserTest - -# TODO NXDRIVE-170: refactor - - -class TestLocalMoveAndRename(OneUserTest): - def setUp(self): - """ - Sets up the following local hierarchy: - Nuxeo Drive Test Workspace - |-- Original File 1.txt - |-- Original File 2.txt - |-- Original Folder 1 - | |-- Sub-Folder 1.1 - | |-- Sub-Folder 1.2 - | |-- Original File 1.1.txt - |-- Original Folder 2 - | |-- Original File 3.txt - """ - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - local = self.local_1 - local.make_file("/", "Original File 1.txt", content=b"Some Content 1") - local.make_file("/", "Original File 2.txt", content=b"Some Content 2") - - local.make_folder("/", "Original Folder 1") - local.make_folder("/Original Folder 1", "Sub-Folder 1.1") - local.make_folder("/Original Folder 1", "Sub-Folder 1.2") - - # Same content as OF1 - local.make_file( - "/Original Folder 1", "Original File 1.1.txt", content=b"Some Content 1" - ) - - local.make_folder("/", "Original Folder 2") - local.make_file( - "/Original Folder 2", "Original File 3.txt", content=b"Some Content 3" - ) - self.wait_sync() - - """ - def test_local_rename_folder_while_creating(self): - local = self.local_1 - root_local = self.local_root_client_1 - remote = self.remote_document_client_1 - marker = False - - def update_remote_state(row, *args, **kwargs): - nonlocal marker - EngineDAO.update_remote_state(self.engine_1.dao, row, *args, **kwargs) - if row.local_name == "New Folder" and not marker: - root_local.rename(row.local_path, "Renamed Folder") - marker = True - - with patch.object( - self.engine_1.dao, "update_remote_state", new=update_remote_state - ): - local.make_folder("/", "New Folder") - self.wait_sync(fail_if_timeout=False) - - assert local.exists("/Renamed Folder") - assert not local.exists("/New Folder") - - # Path is updated on Nuxeo - info = remote.get_info("/Renamed Folder") - assert info.name == "Renamed Folder" - assert len(local.get_children_info("/")) == 5 - assert len(remote.get_children_info(self.workspace)) == 5 - """ - - """ - def test_local_rename_file_while_creating(self): - local = self.engine_1.local - remote = self.remote_document_client_1 - marker = False - - def set_remote_id(ref: Path, remote_id: bytes, name: str = "ndrive"): - nonlocal local, marker - LocalTest.set_remote_id(local, ref, remote_id, name=name) - if not marker and ref.name == "File.txt": - local.rename(ref, "Renamed File.txt") - marker = True - - with patch.object(self.engine_1.local, "set_remote_id", new=set_remote_id): - self.local_1.make_file("/", "File.txt", content=b"Some Content 2") - self.wait_sync(fail_if_timeout=False) - - local = self.local_1 - assert local.exists("/Renamed File.txt") - assert not local.exists("/File.txt") - - # Path is updated on Nuxeo - info = remote.get_info("/Renamed File.txt") - assert info.name == "Renamed File.txt" - assert len(local.get_children_info("/")) == 5 - assert len(remote.get_children_info(self.workspace)) == 5 - """ - - """ - @pytest.mark.randombug("NXDRIVE-811", condition=True, mode="REPEAT") - def test_local_rename_file_while_creating_before_marker(self): - local = self.local_1 - remote = self.remote_document_client_1 - marker = False - - def set_remote_id(ref: Path, remote_id: bytes, name: str = "ndrive"): - nonlocal local, marker - if not marker and ref.name == "File.txt": - self.engine_1.local.rename(ref, "Renamed File.txt") - marker = True - LocalTest.set_remote_id(local, ref, remote_id, name=name) - - with patch.object(self.engine_1.local, "set_remote_id", new=set_remote_id): - local.make_file("/", "File.txt", content=b"Some Content 2") - self.wait_sync(fail_if_timeout=False) - - assert local.exists("/Renamed File.txt") - assert not local.exists("/File.txt") - - # Path is updated on Nuxeo - info = remote.get_info("/Renamed File.txt") - assert info.name == "Renamed File.txt" - assert len(local.get_children_info("/")) == 5 - assert len(remote.get_children_info(self.workspace)) == 5 - """ - - """ - def test_local_rename_file_while_creating_after_marker(self): - marker = False - local = self.local_1 - remote = self.remote_document_client_1 - - def update_remote_state(row, *args, **kwargs): - nonlocal marker - EngineDAO.update_remote_state(self.engine_1.dao, row, *args, **kwargs) - if not marker and row.local_name == "File.txt": - self.engine_1.local.rename(row.local_path, "Renamed File.txt") - marker = True - - with patch.object( - self.engine_1.dao, "update_remote_state", new=update_remote_state - ): - local.make_file("/", "File.txt", content=b"Some Content 2") - self.wait_sync(fail_if_timeout=False) - - assert local.exists("/Renamed File.txt") - assert not local.exists("/File.txt") - - # Path is updated on Nuxeo - info = remote.get_info("/Renamed File.txt") - assert info.name == "Renamed File.txt" - assert len(local.get_children_info("/")) == 5 - assert len(remote.get_children_info(self.workspace)) == 5 - """ - - def test_replace_file(self): - local = self.local_1 - - # Rename /Original File 1.txt to /Renamed File 1.txt - uid = local.get_remote_id("/Original File 1.txt") - local.remove_remote_id("/Original File 1.txt") - local.update_content("/Original File 1.txt", b"plop") - self.wait_sync(fail_if_timeout=False) - assert local.get_remote_id("/Original File 1.txt") == uid - - def test_local_rename_file(self): - local = self.local_1 - remote = self.remote_document_client_1 - - # Rename /Original File 1.txt to /Renamed File 1.txt - uid_1 = remote.get_info("/Original File 1.txt").uid - local.rename("/Original File 1.txt", "Renamed File 1.txt") - assert not local.exists("/Original File 1.txt") - assert local.exists("/Renamed File 1.txt") - - self.wait_sync() - assert not local.exists("/Original File 1.txt") - assert local.exists("/Renamed File 1.txt") - assert remote.get_info(uid_1).name == "Renamed File 1.txt" - - # Rename 'Renamed File 1.txt' to 'Renamed Again File 1.txt' - # and 'Original File 1.1.txt' to - # 'Renamed File 1.1.txt' at the same time as they share - # the same digest but do not live in the same folder - uid_1_1 = remote.get_info("/Original Folder 1/Original File 1.1.txt").uid - local.rename( - "/Original Folder 1/Original File 1.1.txt", "Renamed File 1.1 \xe9.txt" - ) - assert not local.exists("/Original Folder 1/Original File 1.1.txt") - assert local.exists("/Original Folder 1/Renamed File 1.1 \xe9.txt") - local.rename("/Renamed File 1.txt", "Renamed Again File 1.txt") - assert not local.exists("/Renamed File 1.txt") - assert local.exists("/Renamed Again File 1.txt") - - self.wait_sync() - assert not local.exists("/Renamed File 1.txt") - assert local.exists("/Renamed Again File 1.txt") - assert not local.exists("/Original Folder 1/Original File 1.1.txt") - assert local.exists("/Original Folder 1/Renamed File 1.1 \xe9.txt") - - info_1 = remote.get_info(uid_1) - assert info_1.name == "Renamed Again File 1.txt" - - # User 1 does not have the rights to see the parent container - # of the test workspace, hence set fetch_parent_uid=False - parent_1 = remote.get_info(info_1.parent_uid, fetch_parent_uid=False) - assert parent_1.name == self.workspace_title - - info_1_1 = remote.get_info(uid_1_1) - assert info_1_1.name == "Renamed File 1.1 \xe9.txt" - - parent_1_1 = remote.get_info(info_1_1.parent_uid) - assert parent_1_1.name == "Original Folder 1" - assert len(local.get_children_info("/Original Folder 1")) == 3 - assert len(remote.get_children_info(info_1_1.parent_uid)) == 3 - assert len(local.get_children_info("/")) == 4 - assert len(remote.get_children_info(self.workspace)) == 4 - - """ - def test_local_rename_file_uppercase_stopped(self): - local = self.local_1 - remote = self.remote_document_client_1 - self.engine_1.stop() - - # Rename /Original File 1.txt to /Renamed File 1.txt - - # Rename 'Renamed File 1.txt' to 'Renamed Again File 1.txt' - # and 'Original File 1.1.txt' to - # 'Renamed File 1.1.txt' at the same time as they share - # the same digest but do not live in the same folder - uid = remote.get_info("/Original Folder 1/Original File 1.1.txt").uid - local.rename( - "/Original Folder 1/Original File 1.1.txt", "original File 1.1.txt" - ) - - self.engine_1.start() - self.wait_sync() - - info = remote.get_info(uid) - assert info.name == "original File 1.1.txt" - - parent_info = remote.get_info(info.parent_uid) - assert parent_info.name == "Original Folder 1" - assert len(local.get_children_info("/Original Folder 1")) == 3 - assert len(remote.get_children_info(info.parent_uid)) == 3 - """ - - """ - def test_local_rename_file_uppercase(self): - local = self.local_1 - remote = self.remote_document_client_1 - - # Rename /Original File 1.txt to /Renamed File 1.txt - - # Rename 'Renamed File 1.txt' to 'Renamed Again File 1.txt' - # and 'Original File 1.1.txt' to - # 'Renamed File 1.1.txt' at the same time as they share - # the same digest but do not live in the same folder - uid = remote.get_info("/Original Folder 1/Original File 1.1.txt").uid - local.rename( - "/Original Folder 1/Original File 1.1.txt", "original File 1.1.txt" - ) - - self.wait_sync() - - info = remote.get_info(uid) - assert info.name == "original File 1.1.txt" - - parent_info = remote.get_info(info.parent_uid) - assert parent_info.name == "Original Folder 1" - assert len(local.get_children_info("/Original Folder 1")) == 3 - assert len(remote.get_children_info(info.parent_uid)) == 3 - """ - - def test_local_move_file(self): - local = self.local_1 - remote = self.remote_document_client_1 - - # "/Original File 1.txt" -> "/Original Folder 1/Original File 1.txt" - uid = remote.get_info("/Original File 1.txt").uid - local.move("/Original File 1.txt", "/Original Folder 1") - assert not local.exists("/Original File 1.txt") - assert local.exists("/Original Folder 1/Original File 1.txt") - - self.wait_sync() - assert not local.exists("/Original File 1.txt") - assert local.exists("/Original Folder 1/Original File 1.txt") - - info = remote.get_info(uid) - assert info.name == "Original File 1.txt" - parent_info = remote.get_info(info.parent_uid) - assert parent_info.name == "Original Folder 1" - assert len(local.get_children_info("/Original Folder 1")) == 4 - assert len(remote.get_children_info(info.parent_uid)) == 4 - assert len(local.get_children_info("/")) == 3 - assert len(remote.get_children_info(self.workspace)) == 3 - - """ - def test_local_move_file_rollback(self): - ""Test a local move into a folder that is not allowed on the server, - and so we locally revert/cancel the move. - Sometimes the rollback itself is canceled because the doc pair has - no a remote name. The cause is not yet known. - We would then end on such errors (see NXDRIVE-1952): - - # Nuxeo Drive <= 4.2.0 - AttributeError: 'NoneType' object has no attribute 'rstrip' - File "engine/processor.py", line 1383, in _handle_failed_remote_rename - File "client/local_client.py", line 629, in rename - File "utils.py", line 569, in safe_os_filename - File "utils.py", line 555, in safe_filename - - Or even: - - # Nuxeo Drive > 4.2.0 - TypeError: expected string or bytes-like object - File "engine/processor.py", line 1462, in _handle_failed_remote_rename - File "client/local/base.py", line 458, in rename - File "utils.py", line 622, in safe_os_filename - File "utils.py", line 607, in safe_filename - File ".../re.py", line 192, in sub - "" - local = self.local_1 - - # Move "/Original File 1.txt" -> "/Original Folder 1/Original File 1.txt" - local.move("/Original File 1.txt", "/Original Folder 1") - # And change the file name too - local.rename( - "/Original Folder 1/Original File 1.txt", "Original File 1-ren.txt" - ) - # Checks - assert not local.exists("/Original File 1.txt") - assert not local.exists("/Original Folder 1/Original File 1.txt") - assert local.exists("/Original Folder 1/Original File 1-ren.txt") - - def rename(*args, **kwargs): - raise ValueError("Mock'ed rename error") - - def allow_rollback(*args, **kwargs): - ""Allow rollback on all OSes."" - return True - - with patch.object(self.engine_1.remote, "rename", new=rename): - with patch.object(self.engine_1, "local_rollback", new=allow_rollback): - with ensure_no_exception(): - self.wait_sync() - - # The file has been moved again to its original location - assert not local.exists("/Original File 1.txt") - assert not local.exists("/Original File 1-ren.txt") - assert not local.exists("/Original Folder 1/Original File 1-ren.txt") - assert local.exists("/Original Folder 1/Original File 1.txt") - assert not self.engine_1.dao.get_errors(limit=0) - """ - - def test_local_move_and_rename_file(self): - local = self.local_1 - remote = self.remote_document_client_1 - - # Rename /Original File 1.txt to /Renamed File 1.txt - uid = remote.get_info("/Original File 1.txt").uid - - local.move( - "/Original File 1.txt", "/Original Folder 1", name="Renamed File 1 \xe9.txt" - ) - assert not local.exists("/Original File 1.txt") - assert local.exists("/Original Folder 1/Renamed File 1 \xe9.txt") - - self.wait_sync() - assert not local.exists("/Original File 1.txt") - assert local.exists("/Original Folder 1/Renamed File 1 \xe9.txt") - - info = remote.get_info(uid) - assert info.name == "Renamed File 1 \xe9.txt" - parent_info = remote.get_info(info.parent_uid) - assert parent_info.name == "Original Folder 1" - assert len(local.get_children_info("/Original Folder 1")) == 4 - assert len(remote.get_children_info(info.parent_uid)) == 4 - assert len(local.get_children_info("/")) == 3 - assert len(remote.get_children_info(self.workspace)) == 3 - - def test_local_rename_folder(self): - local = self.local_1 - remote = self.remote_document_client_1 - - # Save the uid of some files and folders prior to renaming - folder_1 = remote.get_info("/Original Folder 1").uid - file_1_1 = remote.get_info("/Original Folder 1/Original File 1.1.txt").uid - folder_1_1 = remote.get_info("/Original Folder 1/Sub-Folder 1.1").uid - - # Rename a non empty folder with some content - local.rename("/Original Folder 1", "Renamed Folder 1 \xe9") - assert not local.exists("/Original Folder 1") - assert local.exists("/Renamed Folder 1 \xe9") - - # Synchronize: only the folder renaming is detected: all - # the descendants are automatically realigned - self.wait_sync() - - # The server folder has been renamed: the uid stays the same - assert remote.get_info(folder_1).name == "Renamed Folder 1 \xe9" - - # The content of the renamed folder is left unchanged - file_info = remote.get_info(file_1_1) - assert file_info.name == "Original File 1.1.txt" - assert file_info.parent_uid == folder_1 - - folder_info = remote.get_info(folder_1_1) - assert folder_info.name == "Sub-Folder 1.1" - assert folder_info.parent_uid == folder_1 - - assert len(local.get_children_info("/Renamed Folder 1 \xe9")) == 3 - assert len(remote.get_children_info(file_info.parent_uid)) == 3 - assert len(local.get_children_info("/")) == 4 - assert len(remote.get_children_info(self.workspace)) == 4 - - """ - def test_local_rename_folder_while_suspended(self): - local = self.local_1 - remote = self.remote_document_client_1 - - # Save the uid of some files and folders prior to renaming - folder_1 = remote.get_info("/Original Folder 1").uid - file_1_1 = remote.get_info("/Original Folder 1/Original File 1.1.txt").uid - folder_1_1 = remote.get_info("/Original Folder 1/Sub-Folder 1.1").uid - count = len(local.get_children_info("/Original Folder 1")) - self.engine_1.suspend() - - # Rename a non empty folder with some content - local.rename("/Original Folder 1", "Renamed Folder 1 \xe9") - assert not local.exists("/Original Folder 1") - assert local.exists("/Renamed Folder 1 \xe9") - - local.rename("/Renamed Folder 1 \xe9/Sub-Folder 1.1", "Sub-Folder 2.1") - assert local.exists("/Renamed Folder 1 \xe9/Sub-Folder 2.1") - - # Same content as OF1 - local.make_file("/Renamed Folder 1 \xe9", "Test.txt", content=b"Some Content 1") - count += 1 - self.engine_1.resume() - # Synchronize: only the folder renaming is detected: all - # the descendants are automatically realigned - self.wait_sync(wait_for_async=True) - - # The server folder has been renamed: the uid stays the same - assert remote.get_info(folder_1).name == "Renamed Folder 1 \xe9" - - # The content of the renamed folder is left unchanged - file_info = remote.get_info(file_1_1) - assert file_info.name == "Original File 1.1.txt" - assert file_info.parent_uid == folder_1 - - folder_info = remote.get_info(folder_1_1) - assert folder_info.name == "Sub-Folder 2.1" - assert folder_info.parent_uid == folder_1 - assert len(local.get_children_info("/Renamed Folder 1 \xe9")) == count - assert len(remote.get_children_info(folder_1)) == count - assert len(local.get_children_info("/")) == 4 - assert len(remote.get_children_info(self.workspace)) == 4 - """ - - """ - def test_local_rename_file_after_create(self): - # Office 2010 and >, create a tmp file with 8 chars - # and move it right after - local = self.local_1 - remote = self.remote_document_client_1 - - local.make_file("/", "File.txt", content=b"Some Content 2") - local.rename("/File.txt", "Renamed File.txt") - - self.wait_sync(fail_if_timeout=False) - - assert local.exists("/Renamed File.txt") - assert not local.exists("/File.txt") - # Path don't change on Nuxeo - assert local.get_remote_id("/Renamed File.txt") - assert len(local.get_children_info("/")) == 5 - assert len(remote.get_children_info(self.workspace)) == 5 - """ - - """ - def test_local_rename_file_after_create_detected(self): - # MS Office 2010+ creates a tmp file with 8 chars - # and move it right after - local = self.local_1 - remote = self.remote_document_client_1 - marker = False - - def insert_local_state(info, parent_path): - nonlocal marker - if info.name == "File.txt" and not marker: - local.rename("/File.txt", "Renamed File.txt") - sleep(2) - marker = True - EngineDAO.insert_local_state(self.engine_1.dao, info, parent_path) - - with patch.object( - self.engine_1.dao, "insert_local_state", new=insert_local_state - ): - # Might be temporary ignored once - self.engine_1.queue_manager._error_interval = 3 - local.make_file("/", "File.txt", content=b"Some Content 2") - sleep(10) - self.wait_sync(fail_if_timeout=False) - - assert local.exists("/Renamed File.txt") - assert not local.exists("/File.txt") - - # Path doesn't change on Nuxeo - assert local.get_remote_id("/Renamed File.txt") - assert len(local.get_children_info("/")) == 5 - assert len(remote.get_children_info(self.workspace)) == 5 - """ - - def test_local_move_folder(self): - local = self.local_1 - remote = self.remote_document_client_1 - - # Save the uid of some files and folders prior to move - folder_1 = remote.get_info("/Original Folder 1").uid - folder_2 = remote.get_info("/Original Folder 2").uid - file_1_1 = remote.get_info("/Original Folder 1/Original File 1.1.txt").uid - folder_1_1 = remote.get_info("/Original Folder 1/Sub-Folder 1.1").uid - - # Move a non empty folder with some content - local.move("/Original Folder 1", "/Original Folder 2") - assert not local.exists("/Original Folder 1") - assert local.exists("/Original Folder 2/Original Folder 1") - - # Synchronize: only the folder move is detected: all - # the descendants are automatically realigned - self.wait_sync() - - # The server folder has been moved: the uid stays the same - # The parent folder is now folder 2 - assert remote.get_info(folder_1).parent_uid == folder_2 - - # The content of the renamed folder is left unchanged - file_1_1_info = remote.get_info(file_1_1) - assert file_1_1_info.name == "Original File 1.1.txt" - assert file_1_1_info.parent_uid == folder_1 - - folder_1_1_info = remote.get_info(folder_1_1) - assert folder_1_1_info.name == "Sub-Folder 1.1" - assert folder_1_1_info.parent_uid == folder_1 - - assert len(local.get_children_info("/Original Folder 2/Original Folder 1")) == 3 - assert len(remote.get_children_info(folder_1)) == 3 - assert len(local.get_children_info("/")) == 3 - assert len(remote.get_children_info(self.workspace)) == 3 - - """ - def test_concurrent_local_rename_folder(self): - local = self.local_1 - remote = self.remote_document_client_1 - - # Save the uid of some files and folders prior to renaming - folder_1 = remote.get_info("/Original Folder 1").uid - file_1_1 = remote.get_info("/Original Folder 1/Original File 1.1.txt").uid - folder_2 = remote.get_info("/Original Folder 2").uid - file_3 = remote.get_info("/Original Folder 2/Original File 3.txt").uid - - # Rename a non empty folders concurrently - local.rename("/Original Folder 1", "Renamed Folder 1") - local.rename("/Original Folder 2", "Renamed Folder 2") - assert not local.exists("/Original Folder 1") - assert local.exists("/Renamed Folder 1") - assert not local.exists("/Original Folder 2") - assert local.exists("/Renamed Folder 2") - - # Synchronize: only the folder renamings are detected: all - # the descendants are automatically realigned - self.wait_sync() - - # The server folders have been renamed: the uid stays the same - folder_1_info = remote.get_info(folder_1) - assert folder_1_info.name == "Renamed Folder 1" - - folder_2_info = remote.get_info(folder_2) - assert folder_2_info.name == "Renamed Folder 2" - - # The content of the folder has been left unchanged - file_1_1_info = remote.get_info(file_1_1) - assert file_1_1_info.name == "Original File 1.1.txt" - assert file_1_1_info.parent_uid == folder_1 - - file_3_info = remote.get_info(file_3) - assert file_3_info.name == "Original File 3.txt" - assert file_3_info.parent_uid == folder_2 - - assert len(local.get_children_info("/Renamed Folder 1")) == 3 - assert len(remote.get_children_info(folder_1)) == 3 - assert len(local.get_children_info("/Renamed Folder 2")) == 1 - assert len(remote.get_children_info(folder_2)) == 1 - assert len(local.get_children_info("/")) == 4 - assert len(remote.get_children_info(self.workspace)) == 4 - """ - - """ - def test_local_replace(self): - local = LocalTest(self.local_test_folder_1) - remote = self.remote_document_client_1 - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - # Create 2 files with the same name but different content - # in separate folders - local.make_file("/", "test.odt", content=b"Some content.") - local.make_folder("/", "folder") - shutil.copyfile( - self.local_test_folder_1 / "test.odt", - self.local_test_folder_1 / "folder" / "test.odt", - ) - local.update_content("/folder/test.odt", content=b"Updated content.") - - # Copy the newest file to the root workspace and synchronize it - sync_root = self.local_nxdrive_folder_1 / self.workspace_title - test_file = self.local_test_folder_1 / "folder" / "test.odt" - shutil.copyfile(test_file, sync_root / "test.odt") - self.wait_sync() - assert remote.exists("/test.odt") - assert remote.get_content("/test.odt") == b"Updated content." - - # Copy the oldest file to the root workspace and synchronize it. - # First wait a bit for file time stamps to increase enough. - time.sleep(OS_STAT_MTIME_RESOLUTION) - shutil.copyfile(self.local_test_folder_1 / "test.odt", sync_root / "test.odt") - self.wait_sync() - assert remote.exists("/test.odt") - assert remote.get_content("/test.odt") == b"Some content." - """ - - """ - def test_local_rename_sync_root_folder(self): - # Use the Administrator to be able to introspect the container of the - # test workspace. - remote = DocRemote( - self.nuxeo_url, - env.NXDRIVE_TEST_USERNAME, - "nxdrive-test-administrator-device", - self.version, - password=env.NXDRIVE_TEST_PASSWORD, - base_folder=self.workspace, - ) - folder_1_uid = remote.get_info("/Original Folder 1").uid - - # Create new clients to be able to introspect the test sync root - toplevel_local_client = LocalTest(self.local_nxdrive_folder_1) - - toplevel_local_client.rename( - Path(self.workspace_title), "Renamed Nuxeo Drive Test Workspace" - ) - self.wait_sync() - - workspace_info = remote.get_info(self.workspace) - assert workspace_info.name == "Renamed Nuxeo Drive Test Workspace" - - folder_1_info = remote.get_info(folder_1_uid) - assert folder_1_info.name == "Original Folder 1" - assert folder_1_info.parent_uid == self.workspace - assert len(remote.get_children_info(self.workspace)) == 4 - """ - - def test_local_move_with_remote_error(self): - local = self.local_1 - remote = self.remote_document_client_1 - - # Check local folder - assert local.exists("/Original Folder 1") - - # Simulate server error - bad_remote = self.get_bad_remote() - error = HTTPError(status=500, message="Mock server error") - bad_remote.make_server_call_raise(error) - - with patch.object(self.engine_1, "remote", new=bad_remote): - local.rename("/Original Folder 1", "OSErrorTest") - self.wait_sync(timeout=5, fail_if_timeout=False) - folder_1 = remote.get_info("/Original Folder 1") - assert folder_1.name == "Original Folder 1" - assert local.exists("/OSErrorTest") - - # Set engine online as starting from here the behavior is restored - self.engine_1.set_offline(value=False) - - self.wait_sync() - folder_1 = remote.get_info(folder_1.uid) - assert folder_1.name == "OSErrorTest" - assert local.exists("/OSErrorTest") - assert len(local.get_children_info("/OSErrorTest")) == 3 - assert len(remote.get_children_info(folder_1.uid)) == 3 - assert len(local.get_children_info("/")) == 4 - assert len(remote.get_children_info(self.workspace)) == 4 - - # TODO: implement me once canDelete is checked in the synchronizer - # def test_local_move_sync_root_folder(self): - # pass diff --git a/tests/functional/test_readonly.py b/tests/functional/test_readonly.py deleted file mode 100644 index 9eeae4d247..0000000000 --- a/tests/functional/test_readonly.py +++ /dev/null @@ -1,536 +0,0 @@ -import shutil -from logging import getLogger -from pathlib import Path - -import pytest -from nuxeo.exceptions import Forbidden - -from nxdrive.constants import SYNC_ROOT, WINDOWS - -from ..markers import windows_only -from .conftest import FS_ITEM_ID_PREFIX, SYNC_ROOT_FAC_ID, OneUserTest, TwoUsersTest - -log = getLogger(__name__) - - -def touch(path: Path): - if WINDOWS: - path.parent.mkdir(exist_ok=True) - try: - path.write_bytes(b"Test") - except OSError: - log.warning("Unable to touch") - return False - return True - - -class TestReadOnly(OneUserTest): - def setUp(self): - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - def test_file_add(self): - """ - Should not be able to create files in root folder. - On Windows, those files are ignored. - """ - - remote = self.remote_document_client_1 - - # Try to create the file - state = touch(self.local_nxdrive_folder_1 / "test.txt") - - if not WINDOWS: - # The creation must have failed - assert not state - else: - # The file is locally created and should be ignored - self.wait_sync(wait_for_async=True) - ignored = self.engine_1.dao.get_unsynchronizeds() - assert len(ignored) == 1 - assert ignored[0].local_path == Path("test.txt") - - # Check there is nothing uploaded to the server - assert not remote.get_children_info("/") - - def test_file_content_change(self): - """ - No upload server side but possible to change the file locally - without error, if the OS allows it (unlikely). - """ - - local = self.local_1 - remote = self.remote_document_client_1 - - # Create documents and sync - folder = remote.make_folder("/", "folder") - file = remote.make_file(folder, "foo.txt", content=b"42") - self.set_readonly(self.user_1, f"{self.ws.path}/folder") - self.wait_sync(wait_for_async=True) - assert remote.exists("/folder") - assert remote.exists("/folder/foo.txt") - - # Try to change the file content locally - with pytest.raises(OSError): - local.abspath("/folder/foo.txt").write_bytes(b"Change") - - with pytest.raises(OSError): - local.update_content("/folder/foo.txt", b"Locally changed") - - # Try to change the file content remotely - with pytest.raises(Forbidden): - remote.update(file, properties={"note:note": "Remotely changed"}) - - def test_file_delete(self): - """Local deletions are filtered.""" - - remote = self.remote_document_client_1 - local = self.local_1 - - folder = remote.make_folder("/", "test-ro") - remote.make_file(folder, "test.txt", content=b"42") - self.set_readonly(self.user_1, f"{self.ws.path}/test-ro") - self.wait_sync(wait_for_async=True) - assert local.exists("/test-ro/test.txt") - assert not self.engine_1.dao.get_filters() - - # Delete the file and check if is re-downloaded - local.unset_readonly("/test-ro") - local.delete("/test-ro/test.txt") - self.wait_sync(wait_win=True) - assert not local.exists("/test-ro/test.txt") - - # Check that it is filtered - assert self.engine_1.dao.get_filters() - - # Check the file is still present on the server - assert remote.exists("/test-ro/test.txt") - - def test_file_move_from_ro_to_ro(self): - """ - Local moves from a read-only folder to a read-only folder. - - source is ignored - - destination is ignored - - Server side: no changes. - Client side: no errors. - """ - - remote = self.remote_document_client_1 - local = self.local_1 - - # folder-src is the source from where documents will be moved, RO - # folder-dst is the destination where documents will be moved, RO - src = remote.make_folder("/", "folder-src") - remote.make_folder("/", "folder-dst") - remote.make_file(src, "here.txt", content=b"stay here") - self.set_readonly(self.user_1, self.ws.path) - self.wait_sync(wait_for_async=True) - assert remote.exists("/folder-src/here.txt") - assert remote.exists("/folder-dst") - - doc_abs = local.abspath("/folder-src") / "here.txt" - dst_abs = local.abspath("/folder-dst") - if not WINDOWS: - # The move should fail - with pytest.raises(OSError): - shutil.move(doc_abs, dst_abs) - else: - # The move happens - shutil.move(doc_abs, dst_abs) - self.wait_sync(wait_win=True) - - # Check that nothing has changed - assert not local.exists("/folder-src/here.txt") - assert local.exists("/folder-dst/here.txt") - assert remote.exists("/folder-src/here.txt") - - # But also, check that the server received nothing - assert not remote.exists("/folder-dst/here.txt") - - # We should not have any error - assert not self.engine_1.dao.get_errors(limit=0) - - def test_file_move_from_ro_to_rw(self): - """ - Local moves from a read-only folder to a read-write folder. - - source is ignored - - destination is seen as a creation - - Server side: only the files in the RW folder are created. - Client side: no errors. - - Associated ticket: NXDRIVE-836 - """ - - remote = self.remote_document_client_1 - local = self.local_1 - - # folder-ro is the source from where documents will be moved, RO - # folder-rw is the destination where documents will be moved, RW - src = remote.make_folder("/", "folder-ro") - remote.make_folder("/", "folder-rw") - remote.make_file(src, "here.txt", content=b"stay here") - self.set_readonly(self.user_1, f"{self.ws.path}/folder-ro") - self.wait_sync(wait_for_async=True) - assert local.exists("/folder-ro/here.txt") - assert local.exists("/folder-rw") - - doc_abs = local.abspath("/folder-ro") / "here.txt" - dst_abs = local.abspath("/folder-rw") - if not WINDOWS: - # The move should fail - with pytest.raises(OSError): - shutil.move(doc_abs, dst_abs) - else: - # The move happens - shutil.move(doc_abs, dst_abs) - self.wait_sync(wait_win=True) - - # Check that nothing has changed - assert not local.exists("/folder-ro/here.txt") - assert local.exists("/folder-rw/here.txt") - assert remote.exists("/folder-ro/here.txt") - - # But also, check that the server received the new document because - # the destination is RW - assert remote.exists("/folder-rw/here.txt") - - # We should not have any error - assert not self.engine_1.dao.get_errors(limit=0) - - """ - @pytest.mark.skip(True, reason="TODO NXDRIVE-740") - def test_file_move_from_rw_to_ro(self): - pass - """ - - """ - def test_file_rename(self): - "" - No upload server side but possible to rename the file locally - without error. - "" - - local = self.local_1 - remote = self.remote_document_client_1 - - # Create documents and sync - folder = remote.make_folder("/", "folder") - remote.make_file(folder, "foo.txt", content=b"42") - self.set_readonly(self.user_1, f"{self.ws.path}/folder") - self.wait_sync(wait_for_async=True) - assert local.exists("/folder") - assert local.exists("/folder/foo.txt") - - # Locally rename the file - doc = local.abspath("/folder") / "foo.txt" - dst = local.abspath("/folder") / "bar.txt" - if not WINDOWS: - # The rename should fail - with pytest.raises(OSError): - doc.rename(dst) - else: - # The rename happens locally but nothing remotely - doc.rename(dst) - self.wait_sync() - assert remote.exists("/folder/foo.txt") - assert not remote.exists("/folder/bar.txt") - - # We should not have any error - assert not self.engine_1.dao.get_errors(limit=0) - """ - - def test_folder_add(self): - """ - Should not be able to create folders in root folder. - On Windows, those folders are ignored. - """ - - remote = self.remote_document_client_1 - folder = self.local_nxdrive_folder_1 / "foo" / "test.txt" - - if not WINDOWS: - # The creation must have failed - assert not touch(folder) - else: - # The folder and its child are locally created - touch(folder) - - # Sync and check that it is ignored - self.wait_sync(wait_for_async=True) - ignored = [ - d.local_path.as_posix() for d in self.engine_1.dao.get_unsynchronizeds() - ] - assert sorted(ignored) == ["foo", "foo/test.txt"] - - # Check there is nothing uploaded to the server - assert not remote.get_children_info("/") - - def test_folder_delete(self): - """Local deletions are filtered.""" - - remote = self.remote_document_client_1 - local = self.local_1 - - folder = remote.make_folder("/", "test-ro") - remote.make_folder(folder, "foo") - self.set_readonly(self.user_1, f"{self.ws.path}/test-ro") - self.wait_sync(wait_for_async=True) - assert local.exists("/test-ro/foo") - assert not self.engine_1.dao.get_filters() - - # Delete the file and check if is re-downloaded - local.unset_readonly("/test-ro") - local.delete("/test-ro/foo") - self.wait_sync(wait_win=True) - assert not local.exists("/test-ro/foo") - - # Check that it is filtered - assert self.engine_1.dao.get_filters() - - # Check the file is still present on the server - assert remote.exists("/test-ro/foo") - - def test_folder_move_from_ro_to_ro(self): - """ - Local moves from a read-only folder to a read-only folder. - - source is ignored - - destination is ignored - - Server side: no changes. - Client side: no errors. - """ - - remote = self.remote_document_client_1 - local = self.local_1 - - # folder-src is the source that will be moved, RO - # folder-dst is the destination, RO - folder_ro1 = remote.make_folder("/", "folder-src") - folder_ro2 = remote.make_folder("/", "folder-dst") - remote.make_file(folder_ro1, "here.txt", content=b"stay here") - remote.make_file(folder_ro2, "there.txt", content=b"stay here too") - self.set_readonly(self.user_1, self.ws.path) - self.wait_sync(wait_for_async=True) - assert local.exists("/folder-src/here.txt") - assert remote.exists("/folder-dst") - - src = local.abspath("/folder-src") - dst = local.abspath("/folder-dst") - if not WINDOWS: - # The move should fail - with pytest.raises(OSError): - shutil.move(src, dst) - else: - # The move happens - shutil.move(src, dst) - self.wait_sync(wait_win=True) - - # Check that nothing has changed - assert not local.exists("/folder-src") - assert local.exists("/folder-dst/there.txt") - assert local.exists("/folder-dst/folder-src/here.txt") - assert remote.exists("/folder-src/here.txt") - assert remote.exists("/folder-dst/there.txt") - - # But also, check that the server received nothing - assert not remote.exists("/folder-dst/folder-src") - - # We should not have any error - assert not self.engine_1.dao.get_errors(limit=0) - - def test_folder_move_from_ro_to_rw(self): - """ - Local moves from a read-only folder to a read-write folder. - - source is ignored - - destination is filtered - - Server side: no changes. - Client side: no errors. - """ - - remote = self.remote_document_client_1 - local = self.local_1 - - # folder-src is the source that will be moved, RO - # folder-dst is the destination, RO - folder_ro1 = remote.make_folder("/", "folder-src") - folder_ro2 = remote.make_folder("/", "folder-dst") - remote.make_file(folder_ro1, "here.txt", content=b"stay here") - remote.make_file(folder_ro2, "there.txt", content=b"stay here too") - self.set_readonly(self.user_1, self.ws.path) - self.wait_sync(wait_for_async=True) - assert local.exists("/folder-src/here.txt") - assert remote.exists("/folder-dst") - - src = local.abspath("/folder-src") - dst = local.abspath("/folder-dst") - if not WINDOWS: - # The move should fail - with pytest.raises(OSError): - shutil.move(src, dst) - else: - # The move happens - shutil.move(src, dst) - self.wait_sync(wait_win=True) - - # Check that nothing has changed - assert not local.exists("/folder-src") - assert local.exists("/folder-dst/there.txt") - assert local.exists("/folder-dst/folder-src/here.txt") - assert remote.exists("/folder-src/here.txt") - assert remote.exists("/folder-dst/there.txt") - assert not remote.exists("/folder-dst/folder-src") - assert not remote.exists("/folder-dst/folder-src/here.txt") - - # We should not have any error - assert not self.engine_1.dao.get_errors(limit=0) - - # Check that it is filtered - assert self.engine_1.dao.get_filters() - doc_pair = remote.get_info(folder_ro1) - ref = ( - f"{SYNC_ROOT}/{SYNC_ROOT_FAC_ID}" - f"{doc_pair.root}/{FS_ITEM_ID_PREFIX}{doc_pair.uid}" - ) - assert self.engine_1.dao.is_filter(ref) - - """ - @pytest.mark.skip(True, reason="TODO NXDRIVE-740") - def test_folder_move_from_rw_to_ro(self): - pass - """ - - def test_folder_rename(self): - """ - No upload server side but possible to rename the folder locally - without error, and it will be re-renamed. - """ - - local = self.local_1 - remote = self.remote_document_client_1 - - # Create documents and sync - remote.make_folder("/", "foo") - self.set_readonly(self.user_1, self.ws.path) - self.wait_sync(wait_for_async=True) - assert local.exists("/foo") - - # Check can_delete flag in pair state - state = self.get_dao_state_from_engine_1("foo") - assert not state.remote_can_delete - - # Locally rename the folder - src = local.abspath("/foo") - dst = src.with_name("bar") - if not WINDOWS: - # The rename should fail - with pytest.raises(OSError): - src.rename(dst) - else: - # The rename happens locally but: - # - nothing remotely - # - the folder is re-renamed to its original name - src.rename(dst) - self.wait_sync() - assert local.exists("/foo") - assert not local.exists("/bar") - assert remote.exists("/foo") - assert not remote.exists("/bar") - - # We should not have any error - assert not self.engine_1.dao.get_errors(limit=0) - - @windows_only - def test_nxdrive_836(self): - """ - NXDRIVE-836: Bad behaviors with read-only documents on Windows. - - Scenario: - - 1. User1: Server: Create folder "ReadFolder" and share with User2 with read - permission and upload doc/xml files into it - 2. User1: Server: Create folder "MEFolder" and share with User2 with Manage - Everything permission - 3. User2: Server: Enable Nuxeo Drive Synchronization for both folders - 4. User2: Client: Launch Drive client and Wait for sync completion - 5. User2: Client: Move the files(drag and drop) from "ReadFolder" to "MEFolder" - 6. User1: Server: Remove the read permission for "ReadFolder" for User2 - 7. User2: Client: Remove the read only attribute for moved files in "MEFolder" - and Edit the files. - - Expected Result: Files should sync with the server. - """ - - local = self.local_1 - remote = self.remote_document_client_1 - - # Create documents and sync - remote.make_folder("/", "ReadFolder") - remote.make_folder("/", "MEFolder") - remote.make_file("/ReadFolder", "shareme.doc", content=b"Scheherazade") - self.set_readonly(self.user_1, f"{self.ws.path}/ReadFolder") - self.wait_sync(wait_for_async=True) - - # Checks - for client in (remote, local): - for doc in ("/ReadFolder/shareme.doc", "/MEFolder"): - assert client.exists(doc) - - # Move - src = local.abspath("/ReadFolder/shareme.doc") - dst = local.abspath("/MEFolder") - shutil.move(src, dst) - self.wait_sync(wait_win=True) - - # Remove read-only - self.set_readonly(self.user_1, f"{self.ws.path}/ReadFolder", grant=False) - self.wait_sync(wait_for_async=True) - local.unset_readonly("/MEFolder/shareme.doc") - - # Checks - assert remote.exists("/ReadFolder/shareme.doc") - assert remote.exists("/MEFolder/shareme.doc") - assert not self.engine_1.dao.get_errors(limit=0) - assert not self.engine_1.dao.get_unsynchronizeds() - - -class TestReadOnly2(TwoUsersTest): - """ - def test_document_locked(self): - ""Check locked documents: they are read-only."" - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - remote = self.remote_document_client_1 - remote.make_folder("/", "Test locking") - remote.make_file("/Test locking", "myDoc.odt", content=b"Some content") - filepath = "/Test locking/myDoc.odt" - - self.wait_sync(wait_for_async=True) - - # Check readonly flag is not set for a document that isn't locked - user1_file_path = self.sync_root_folder_1 / filepath.lstrip("/") - assert user1_file_path.exists() - assert touch(user1_file_path) - self.wait_sync() - - # Check readonly flag is not set for a document locked by the - # current user - remote.lock(filepath) - self.wait_sync(wait_for_async=True) - assert touch(user1_file_path) - remote.unlock(filepath) - self.wait_sync(wait_for_async=True) - - # Check readonly flag is set for a document locked by another user - self.remote_document_client_2.lock(filepath) - self.wait_sync(wait_for_async=True) - assert not touch(user1_file_path) - - # Check readonly flag is unset for a document unlocked by another user - self.remote_document_client_2.unlock(filepath) - self.wait_sync(wait_for_async=True) - assert touch(user1_file_path) - """ diff --git a/tests/functional/test_reinit_database.py b/tests/functional/test_reinit_database.py deleted file mode 100644 index 86935e0965..0000000000 --- a/tests/functional/test_reinit_database.py +++ /dev/null @@ -1,118 +0,0 @@ -import time -from pathlib import Path - -from .conftest import OS_STAT_MTIME_RESOLUTION, OneUserTest - - -class TestReinitDatabase(OneUserTest): - def setUp(self): - self.local = self.local_1 - self.remote = self.remote_document_client_1 - - # Make a folder and a file - self.remote.make_folder("/", "Test folder") - self.file = self.remote.make_file( - "/Test folder", "Test.txt", content=b"This is some content" - ) - - # Start engine and wait for synchronization - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - assert self.local.exists("/Test folder") - assert self.local.exists("/Test folder/Test.txt") - - # Destroy database but keep synced files as we just need to test the database - self.unbind_engine(1, purge=False) - self.bind_engine(1, start_engine=False) - - def _check_states(self): - rows = self.engine_1.dao.get_states_from_partial_local(Path()) - for row in rows: - assert row.pair_state == "synchronized" - - def _check_conflict_detection(self): - assert len(self.engine_1.dao.get_conflicts()) == 1 - - """ - def test_synchronize_folderish_and_same_digest(self): - # Start engine and wait for synchronization - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - # Check everything is synchronized - self._check_states() - """ - - def test_synchronize_remote_change(self): - # Modify the remote file - self.remote.update(self.file, properties={"note:note": "Content has changed"}) - - # Start engine and wait for synchronization - self.engine_1.start() - self.wait_sync(wait_for_async=True, timeout=5, fail_if_timeout=False) - - # Check that a conflict is detected - self._check_conflict_detection() - file_state = self.engine_1.dao.get_state_from_local( - Path(self.workspace_title) / "Test folder/Test.txt" - ) - assert file_state - assert file_state.pair_state == "conflicted" - - # Assert content of the local file has not changed - content = self.local.get_content("/Test folder/Test.txt") - assert content == b"This is some content" - - def test_synchronize_local_change(self): - # Modify the local file - time.sleep(OS_STAT_MTIME_RESOLUTION) - self.local.update_content("/Test folder/Test.txt", b"Content has changed") - - # Start engine and wait for synchronization - self.engine_1.start() - self.wait_sync(timeout=5, fail_if_timeout=False) - - # Check that a conflict is detected - self._check_conflict_detection() - file_state = self.engine_1.dao.get_state_from_local( - Path(self.workspace_title) / "Test folder/Test.txt" - ) - assert file_state - assert file_state.pair_state == "conflicted" - - # Assert content of the remote file has not changed - content = self.remote.get_note(self.file) - assert content == b"This is some content" - - """ - def test_synchronize_remote_and_local_change(self): - # Modify the remote file - self.remote.update( - self.file, properties={"note:note": "Content has remotely changed"} - ) - - # Modify the local file - time.sleep(OS_STAT_MTIME_RESOLUTION) - self.local.update_content( - "/Test folder/Test.txt", b"Content has locally changed" - ) - - # Start engine and wait for synchronization - self.engine_1.start() - self.wait_sync(wait_for_async=True, timeout=5, fail_if_timeout=False) - - # Check that a conflict is detected - self._check_conflict_detection() - file_state = self.engine_1.dao.get_state_from_local( - Path(self.workspace_title) / "Test folder/Test.txt" - ) - assert file_state - assert file_state.pair_state == "conflicted" - - # Assert content of the local and remote files has not changed - content = self.local.get_content("/Test folder/Test.txt") - assert content == b"Content has locally changed" - content = self.remote.get_note(self.file) - assert content == b"Content has remotely changed" - """ diff --git a/tests/functional/test_remote_move_and_rename.py b/tests/functional/test_remote_move_and_rename.py deleted file mode 100644 index bb0313faef..0000000000 --- a/tests/functional/test_remote_move_and_rename.py +++ /dev/null @@ -1,891 +0,0 @@ -import os.path -import time -from pathlib import Path -from shutil import copyfile -from unittest.mock import patch - -import pytest - -from nxdrive.engine.engine import Engine - -from .. import env -from . import DocRemote, LocalTest -from .conftest import REMOTE_MODIFICATION_TIME_RESOLUTION, SYNC_ROOT_FAC_ID, OneUserTest - - -class TestRemoteMoveAndRename(OneUserTest): - def setUp(self): - """ - Sets up the following remote hierarchy: - - Nuxeo Drive Test Workspace - |-- Original Fil\xe9 1.odt - |-- Original File 2.odt - |-- Original Fold\xe9r 1 - | |-- Sub-Folder 1.1 - | |-- Sub-Folder 1.2 - | |-- Original File 1.1.odt - |-- Original Folder 2 - | |-- Original File 3.odt - """ - - remote = self.remote_1 - - self.workspace_id = f"{SYNC_ROOT_FAC_ID}{self.workspace}" - self.workspace_path = Path(self.workspace_title) - - self.file_1_id = remote.make_file( - self.workspace_id, "Original Fil\xe9 1.odt", content=b"Some Content 1" - ).uid - - self.folder_1_id = remote.make_folder( - self.workspace_id, "Original Fold\xe9r 1" - ).uid - self.folder_1_1_id = remote.make_folder(self.folder_1_id, "Sub-Folder 1.1").uid - self.file_1_1_id = remote.make_file( - self.folder_1_id, "Original File 1.1.odt", content=b"Some Content 1" - ).uid - - self.folder_2_id = remote.make_folder( - self.workspace_id, "Original Folder 2" - ).uid - self.file_3_id = remote.make_file( - self.folder_2_id, "Original File 3.odt", content=b"Some Content 3" - ).uid - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - def get_state(self, remote): - return self.engine_1.dao.get_normal_state_from_remote(remote) - - def test_remote_rename_file(self): - remote = self.remote_1 - local = self.local_1 - - file_1_docref = self.file_1_id.split("#")[-1] - file_1_version = remote.get_info(file_1_docref).version - - # Rename /Original Fil\xe9 1.odt to /Renamed File 1.odt - remote.rename(self.file_1_id, "Renamed File 1.odt") - assert remote.get_fs_info(self.file_1_id).name == "Renamed File 1.odt" - - self.wait_sync(wait_for_async=True) - - version = remote.get_info(file_1_docref).version - - # Check remote file name - assert remote.get_fs_info(self.file_1_id).name == "Renamed File 1.odt" - assert file_1_version == version - - # Check local file name - assert not local.exists("/Original Fil\xe9 1.odt") - assert local.exists("/Renamed File 1.odt") - - # Check file state - file_1_state = self.get_state(self.file_1_id) - assert file_1_state.local_path == self.workspace_path / "Renamed File 1.odt" - assert file_1_state.local_name == "Renamed File 1.odt" - - # Rename 'Renamed File 1.odt' to 'Renamed Again File 1.odt' - # and 'Original File 1.1.odt' to - # 'Renamed File 1.1.odt' at the same time as they share - # the same digest but do not live in the same folder - # Wait for 1 second to make sure the file's last modification time - # will be different from the pair state's last remote update time - time.sleep(REMOTE_MODIFICATION_TIME_RESOLUTION) - remote.rename(self.file_1_id, "Renamed Again File 1.odt") - assert remote.get_fs_info(self.file_1_id).name == "Renamed Again File 1.odt" - remote.rename(self.file_1_1_id, "Renamed File 1.1 \xe9.odt") - assert remote.get_fs_info(self.file_1_1_id).name == "Renamed File 1.1 \xe9.odt" - - self.wait_sync(wait_for_async=True) - - info = remote.get_fs_info(self.file_1_id) - assert info.name == "Renamed Again File 1.odt" - assert remote.get_fs_info(self.file_1_1_id).name == "Renamed File 1.1 \xe9.odt" - version = remote.get_info(file_1_docref).version - assert file_1_version == version - - # Check local file names - assert not local.exists("/Renamed File 1.odt") - assert local.exists("/Renamed Again File 1.odt") - assert not local.exists("/Original Fold\xe9r 1/Original File 1.1.odt") - assert local.exists("/Original Fold\xe9r 1/Renamed File 1.1 \xe9.odt") - - # Check file states - file_1_state = self.get_state(self.file_1_id) - assert file_1_state.local_path == ( - self.workspace_path / "Renamed Again File 1.odt" - ) - assert file_1_state.local_name == "Renamed Again File 1.odt" - file_1_1_state = self.get_state(self.file_1_1_id) - assert file_1_1_state.local_path == ( - self.workspace_path / "Original Fold\xe9r 1/Renamed File 1.1 \xe9.odt" - ) - assert file_1_1_state.local_name == "Renamed File 1.1 \xe9.odt" - - # Test for encoding error regressions - assert self.engine_1.dao._get_recursive_condition(file_1_1_state) - assert self.engine_1.dao._get_recursive_remote_condition(file_1_1_state) - - # Check parents of renamed files to ensure it is an actual rename - # that has been performed and not a move - file_1_local_info = local.get_info("/Renamed Again File 1.odt") - assert file_1_local_info.filepath.parent == self.sync_root_folder_1 - - file_1_1_local_info = local.get_info( - "/Original Fold\xe9r 1/Renamed File 1.1 \xe9.odt" - ) - assert file_1_1_local_info.filepath.parent == ( - self.sync_root_folder_1 / "Original Fold\xe9r 1" - ) - - """ - def test_remote_rename_update_content_file(self): - remote = self.remote_1 - local = self.local_1 - - # Update the content of /'Original Fil\xe9 1.odt' and rename it - # to /Renamed File 1.odt - remote.update_content( - self.file_1_id, b"Updated content", filename="Renamed File 1.odt" - ) - assert remote.get_fs_info(self.file_1_id).name == "Renamed File 1.odt" - assert remote.get_content(self.file_1_id) == b"Updated content" - - self.wait_sync(wait_for_async=True) - - # Check local file name - assert not local.exists("/Original Fil\xe9 1.odt") - assert local.exists("/Renamed File 1.odt") - assert local.get_content("/Renamed File 1.odt") == b"Updated content" - """ - - def test_remote_move_file(self): - remote = self.remote_1 - local = self.local_1 - - # Move /Original Fil\xe9 1.odt - # to /Original Fold\xe9r 1/Original Fil\xe9 1.odt - remote.move(self.file_1_id, self.folder_1_id) - assert remote.get_fs_info(self.file_1_id).name == "Original Fil\xe9 1.odt" - assert remote.get_fs_info(self.file_1_id).parent_uid == self.folder_1_id - - self.wait_sync(wait_for_async=True) - - # Check remote file - assert remote.get_fs_info(self.file_1_id).name == "Original Fil\xe9 1.odt" - assert remote.get_fs_info(self.file_1_id).parent_uid == self.folder_1_id - - # Check local file - assert not local.exists("/Original Fil\xe9 1.odt") - assert local.exists("/Original Fold\xe9r 1/Original Fil\xe9 1.odt") - file_1_local_info = local.get_info( - "/Original Fold\xe9r 1/Original Fil\xe9 1.odt" - ) - file_1_parent_path = file_1_local_info.filepath.parent - assert file_1_parent_path == self.sync_root_folder_1 / "Original Fold\xe9r 1" - - # Check file state - file_1_state = self.get_state(self.file_1_id) - assert file_1_state.local_path == ( - self.workspace_path / "Original Fold\xe9r 1/Original Fil\xe9 1.odt" - ) - assert file_1_state.local_name == "Original Fil\xe9 1.odt" - - def test_remote_move_and_rename_file(self): - remote = self.remote_1 - local = self.local_1 - - # Rename /'Original Fil\xe9 1.odt' to /Renamed File 1.odt - remote.rename(self.file_1_id, "Renamed File 1 \xe9.odt") - remote.move(self.file_1_id, self.folder_1_id) - assert remote.get_fs_info(self.file_1_id).name == "Renamed File 1 \xe9.odt" - assert remote.get_fs_info(self.file_1_id).parent_uid == self.folder_1_id - - self.wait_sync(wait_for_async=True) - - # Check remote file - assert remote.get_fs_info(self.file_1_id).name == "Renamed File 1 \xe9.odt" - assert remote.get_fs_info(self.file_1_id).parent_uid == self.folder_1_id - - # Check local file - assert not local.exists("/Original Fil\xe9 1.odt") - assert local.exists("/Original Fold\xe9r 1/Renamed File 1 \xe9.odt") - file_1_local_info = local.get_info( - "/Original Fold\xe9r 1/Renamed File 1 \xe9.odt" - ) - file_1_parent_path = file_1_local_info.filepath.parent - assert file_1_parent_path == self.sync_root_folder_1 / "Original Fold\xe9r 1" - - # Check file state - file_1_state = self.get_state(self.file_1_id) - assert file_1_state.local_path == ( - self.workspace_path / "Original Fold\xe9r 1/Renamed File 1 \xe9.odt" - ) - assert file_1_state.local_name == "Renamed File 1 \xe9.odt" - - def test_remote_rename_folder(self): - remote = self.remote_1 - local = self.local_1 - - # Rename a non empty folder with some content - remote.rename(self.folder_1_id, "Renamed Folder 1 \xe9") - assert remote.get_fs_info(self.folder_1_id).name == "Renamed Folder 1 \xe9" - - # Synchronize: only the folder renaming is detected: all - # the descendants are automatically realigned - self.wait_sync(wait_for_async=True) - - # The client folder has been renamed - assert not local.exists("/Original Fold\xe9r 1") - assert local.exists("/Renamed Folder 1 \xe9") - - # The content of the renamed folder is left unchanged - # Check child name - assert local.exists("/Renamed Folder 1 \xe9/Original File 1.1.odt") - file_1_1_local_info = local.get_info( - "/Renamed Folder 1 \xe9/Original File 1.1.odt" - ) - file_1_1_parent_path = file_1_1_local_info.filepath.parent - assert file_1_1_parent_path == ( - self.sync_root_folder_1 / "Renamed Folder 1 \xe9" - ) - - # Check child state - file_1_1_state = self.get_state(self.file_1_1_id) - assert file_1_1_state.local_path == ( - self.workspace_path / "Renamed Folder 1 \xe9/Original File 1.1.odt" - ) - assert file_1_1_state.local_name == "Original File 1.1.odt" - - # Check child name - assert local.exists("/Renamed Folder 1 \xe9/Sub-Folder 1.1") - folder_1_1_local_info = local.get_info("/Renamed Folder 1 \xe9/Sub-Folder 1.1") - folder_1_1_parent_path = folder_1_1_local_info.filepath.parent - assert folder_1_1_parent_path == ( - self.sync_root_folder_1 / "Renamed Folder 1 \xe9" - ) - - # Check child state - folder_1_1_state = self.get_state(self.folder_1_1_id) - assert folder_1_1_state is not None - assert folder_1_1_state.local_path == ( - self.workspace_path / "Renamed Folder 1 \xe9/Sub-Folder 1.1" - ) - assert folder_1_1_state.local_name == "Sub-Folder 1.1" - - def test_remote_rename_case_folder(self): - remote = self.remote_1 - local = self.local_1 - - assert local.exists("/Original Fold\xe9r 1") - - remote.rename(self.folder_1_id, "Original folder 1") - self.wait_sync(wait_for_async=True) - assert local.exists("/Original folder 1") - - remote.rename(self.folder_1_id, "Original Fold\xe9r 1") - self.wait_sync(wait_for_async=True) - assert local.exists("/Original Fold\xe9r 1") - - """ - def test_remote_rename_case_folder_stopped(self): - remote = self.remote_1 - local = self.local_1 - self.engine_1.stop() - assert local.exists("/Original Fold\xe9r 1") - - remote.rename(self.folder_1_id, "Original folder 1") - self.engine_1.start() - self.wait_sync(wait_for_async=True) - assert local.exists("/Original folder 1") - - self.engine_1.stop() - remote.rename(self.folder_1_id, "Original Fold\xe9r 1") - self.engine_1.start() - self.wait_sync(wait_for_async=True) - assert local.exists("/Original Fold\xe9r 1") - """ - - def test_remote_move_folder(self): - remote = self.remote_1 - local = self.local_1 - - # Move a non empty folder with some content - remote.move(self.folder_1_id, self.folder_2_id) - remote_info = remote.get_fs_info(self.folder_1_id) - assert remote_info is not None - assert remote_info.name == "Original Fold\xe9r 1" - assert remote_info.parent_uid == self.folder_2_id - - # Synchronize: only the folder move is detected: all - # the descendants are automatically realigned - self.wait_sync(wait_for_async=True) - - # Check remote folder - remote_info = remote.get_fs_info(self.folder_1_id) - assert remote_info is not None - assert remote_info.name == "Original Fold\xe9r 1" - assert remote_info.parent_uid == self.folder_2_id - - # Check local folder - assert not local.exists("/Original Fold\xe9r 1") - assert local.exists("/Original Folder 2/Original Fold\xe9r 1") - folder_1_local_info = local.get_info("/Original Folder 2/Original Fold\xe9r 1") - assert folder_1_local_info.filepath.parent == ( - self.sync_root_folder_1 / "Original Folder 2" - ) - - # Check folder state - folder_1_state = self.get_state(self.folder_1_id) - assert folder_1_state.local_path == ( - self.workspace_path / "Original Folder 2/Original Fold\xe9r 1" - ) - assert folder_1_state.local_name == "Original Fold\xe9r 1" - - # The content of the renamed folder is left unchanged - assert local.exists( - "/Original Folder 2/Original Fold\xe9r 1/Original File 1.1.odt" - ) - file_1_1_local_info = local.get_info( - "/Original Folder 2/Original Fold\xe9r 1/Original File 1.1.odt" - ) - assert file_1_1_local_info.filepath.parent == ( - self.sync_root_folder_1 / "Original Folder 2" / "Original Fold\xe9r 1" - ) - - # Check child state - file_1_1_state = self.get_state(self.file_1_1_id) - assert file_1_1_state.local_path == ( - self.workspace_path - / "Original Folder 2" - / "Original Fold\xe9r 1/Original File 1.1.odt" - ) - assert file_1_1_state.local_name == "Original File 1.1.odt" - - # Check child name - assert local.exists("/Original Folder 2/Original Fold\xe9r 1/Sub-Folder 1.1") - folder_1_1_local_info = local.get_info( - "/Original Folder 2/Original Fold\xe9r 1/Sub-Folder 1.1" - ) - assert folder_1_1_local_info.filepath.parent == ( - self.sync_root_folder_1 / "Original Folder 2" / "Original Fold\xe9r 1" - ) - - # Check child state - folder_1_1_state = self.get_state(self.folder_1_1_id) - assert folder_1_1_state.local_path == ( - self.workspace_path - / "Original Folder 2" - / "Original Fold\xe9r 1/Sub-Folder 1.1" - ) - assert folder_1_1_state.local_name == "Sub-Folder 1.1" - - """ - def test_concurrent_remote_rename_folder(self): - remote = self.remote_1 - local = self.local_1 - - # Rename non empty folders concurrently - remote.rename(self.folder_1_id, "Renamed Folder 1") - assert remote.get_fs_info(self.folder_1_id).name == "Renamed Folder 1" - remote.rename(self.folder_2_id, "Renamed Folder 2") - assert remote.get_fs_info(self.folder_2_id).name == "Renamed Folder 2" - - # Synchronize: only the folder renaming is detected: all - # the descendants are automatically realigned - self.wait_sync(wait_for_async=True) - - # The content of the renamed folders is left unchanged - # Check child name - assert local.exists("/Renamed Folder 1/Original File 1.1.odt") - file_1_1_local_info = local.get_info("/Renamed Folder 1/Original File 1.1.odt") - assert file_1_1_local_info.filepath.parent == ( - self.sync_root_folder_1 / "Renamed Folder 1" - ) - - # Check child state - file_1_1_state = self.get_state(self.file_1_1_id) - assert file_1_1_state.local_path == ( - self.workspace_path / "Renamed Folder 1/Original File 1.1.odt" - ) - assert file_1_1_state.local_name == "Original File 1.1.odt" - - # Check child name - assert local.exists("/Renamed Folder 2/Original File 3.odt") - file_3_local_info = local.get_info("/Renamed Folder 2/Original File 3.odt") - assert file_3_local_info.filepath.parent == ( - self.sync_root_folder_1 / "Renamed Folder 2" - ) - - # Check child state - file_3_state = self.get_state(self.file_3_id) - assert file_3_state.local_path == ( - self.workspace_path / "Renamed Folder 2/Original File 3.odt" - ) - assert file_3_state.local_name == "Original File 3.odt" - """ - - def test_remote_rename_sync_root_folder(self): - remote = self.remote_1 - local = LocalTest(self.local_nxdrive_folder_1) - - # Rename a sync root folder - remote.rename(self.workspace_id, "Renamed Nuxeo Drive Test Workspace") - assert ( - remote.get_fs_info(self.workspace_id).name - == "Renamed Nuxeo Drive Test Workspace" - ) - - # Synchronize: only the sync root folder renaming is detected: all - # the descendants are automatically realigned - self.wait_sync(wait_for_async=True) - - # The client folder has been renamed - assert not local.exists(f"/{self.workspace_title}") - assert local.exists("/Renamed Nuxeo Drive Test Workspace") - - renamed_workspace_path = ( - self.local_nxdrive_folder_1 / "Renamed Nuxeo Drive Test Workspace" - ) - - # The content of the renamed folder is left unchanged - # Check child name - assert local.exists( - "/Renamed Nuxeo Drive Test Workspace/Original Fil\xe9 1.odt" - ) - file_1_local_info = local.get_info( - "/Renamed Nuxeo Drive Test Workspace/Original Fil\xe9 1.odt" - ) - assert file_1_local_info.filepath.parent == renamed_workspace_path - - # Check child state - file_1_state = self.get_state(self.file_1_id) - assert file_1_state.local_path == Path( - "Renamed Nuxeo Drive Test Workspace/Original Fil\xe9 1.odt" - ) - assert file_1_state.local_name == "Original Fil\xe9 1.odt" - - # Check child name - assert local.exists("/Renamed Nuxeo Drive Test Workspace/Original Fold\xe9r 1") - folder_1_local_info = local.get_info( - "/Renamed Nuxeo Drive Test Workspace/Original Fold\xe9r 1" - ) - assert folder_1_local_info.filepath.parent == renamed_workspace_path - - # Check child state - folder_1_state = self.get_state(self.folder_1_id) - assert folder_1_state.local_path == Path( - "Renamed Nuxeo Drive Test Workspace/Original Fold\xe9r 1" - ) - assert folder_1_state.local_name == "Original Fold\xe9r 1" - - # Check child name - assert local.exists( - "/Renamed Nuxeo Drive Test Workspace" - "/Original Fold\xe9r 1" - "/Sub-Folder 1.1" - ) - folder_1_1_local_info = local.get_info( - "/Renamed Nuxeo Drive Test Workspace" - "/Original Fold\xe9r 1" - "/Sub-Folder 1.1" - ) - assert folder_1_1_local_info.filepath.parent == ( - renamed_workspace_path / "Original Fold\xe9r 1" - ) - - # Check child state - folder_1_1_state = self.get_state(self.folder_1_1_id) - assert folder_1_1_state.local_path == Path( - "Renamed Nuxeo Drive Test Workspace/Original Fold\xe9r 1/Sub-Folder 1.1" - ) - assert folder_1_1_state.local_name == "Sub-Folder 1.1" - - # Check child name - assert local.exists( - "/Renamed Nuxeo Drive Test Workspace" - "/Original Fold\xe9r 1" - "/Original File 1.1.odt" - ) - file_1_1_local_info = local.get_info( - "/Renamed Nuxeo Drive Test Workspace" - "/Original Fold\xe9r 1" - "/Original File 1.1.odt" - ) - assert file_1_1_local_info.filepath.parent == ( - renamed_workspace_path / "Original Fold\xe9r 1" - ) - - # Check child state - file_1_1_state = self.get_state(self.file_1_1_id) - assert file_1_1_state.local_path == Path( - "Renamed Nuxeo Drive Test Workspace/Original Fold\xe9r 1/Original File 1.1.odt" - ) - assert file_1_1_state.local_name == "Original File 1.1.odt" - - def test_remote_move_to_non_sync_root(self): - # Grant ReadWrite permission on Workspaces for test user - input_obj = f"doc:{env.WS_DIR}" - self.root_remote.execute( - command="Document.SetACE", - input_obj=input_obj, - user=self.user_1, - permission="ReadWrite", - grant=True, - ) - - workspaces_info = self.root_remote.fetch(env.WS_DIR) - workspaces = workspaces_info["uid"] - - # Get remote client with Workspaces as base folder and local client - remote = DocRemote( - self.nuxeo_url, - self.user_1, - "nxdrive-test-device-1", - self.version, - password=self.password_1, - base_folder=workspaces, - upload_tmp_dir=self.upload_tmp_dir, - ) - local = self.local_1 - - # Create a non synchronized folder - unsync_folder = remote.make_folder("/", "Non synchronized folder") - - ws_basename = os.path.basename(self.ws.path) - try: - # Move 'Original Fold\xe9r 1' to Non synchronized folder - remote.move( - f"/{ws_basename}/Original Fold\xe9r 1", "/Non synchronized folder" - ) - assert not remote.exists(f"/{ws_basename}/Original Fold\xe9r 1") - assert remote.exists("/Non synchronized folder/Original Fold\xe9r 1") - - # Synchronize: the folder move is detected as a deletion - self.wait_sync(wait_for_async=True) - - # Check local folder - assert not local.exists("/Original Fold\xe9r 1") - - # Check folder state - assert self.get_state(self.folder_1_id) is None - finally: - # Clean the non synchronized folder - remote.delete(unsync_folder, use_trash=False) - - -class TestSyncRemoteMoveAndRename(OneUserTest): - def setUp(self): - self.workspace_id = f"{SYNC_ROOT_FAC_ID}{self.workspace}" - self.workspace_path = Path(self.workspace_title) - self.folder_id = self.remote_1.make_folder(self.workspace_id, "Test folder").uid - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - """ - @windows_only - def test_synchronize_remote_move_file_while_accessing(self): - local = self.local_1 - remote = self.remote_1 - - file_path = local.abspath("/Test folder") / "testFile.pdf" - copyfile(self.location / "resources" / "files" / "testFile.pdf", file_path) - self.wait_sync() - file_id = local.get_remote_id("/Test folder/testFile.pdf") - assert file_id - - # Create a document by streaming a binary file ( open it as append ) - with open(file_path, "a"): - # Rename remote folder then synchronize - remote.move(file_id, self.workspace_id) - self.wait_sync(wait_for_async=True) - assert local.exists("/Test folder/testFile.pdf") - assert not local.exists("/testFile.pdf") - - # The source file is accessed by another processor, but no error - assert not self.engine_1.dao.get_errors() - - self.wait_sync(wait_for_async=True) - assert local.exists("/testFile.pdf") - assert not local.exists("/Test folder/testFile.pdf") - """ - - """ - @Options.mock() - def test_synchronize_remote_move_while_download_file(self): - local = self.local_1 - remote = self.remote_1 - - # Create documents in the remote root workspace - new_folder_id = remote.make_folder(self.folder_id, "New folder").uid - self.wait_sync(wait_for_async=True) - - def callback(uploader): - ""Add delay when upload and download."" - if self.engine_1.file_id and not self.engine_1.has_rename: - # Rename remote file while downloading - remote.move(self.engine_1.file_id, new_folder_id) - self.engine_1.has_rename = True - time.sleep(3) - Engine.suspend_client(self.engine_1, uploader) - - self.engine_1.has_rename = False - self.engine_1.file_id = None - - Options.set("tmp_file_limit", 0.1, setter="manual") - with patch.object(self.engine_1.remote, "download_callback", new=callback): - file = self.location / "resources" / "files" / "testFile.pdf" - content = file.read_bytes() - self.engine_1.file_id = remote.make_file( - self.folder_id, "testFile.pdf", content=content - ).uid - - # Rename remote folder then synchronize - self.wait_sync(wait_for_async=True) - assert not local.exists("/Test folder/testFile.pdf") - assert local.exists("/Test folder/New folder/testFile.pdf") - """ - - """ - @windows_only - def test_synchronize_remote_rename_file_while_accessing(self): - local = self.local_1 - remote = self.remote_1 - - file_path = local.abspath("/Test folder") / "testFile.pdf" - copyfile(self.location / "resources" / "files" / "testFile.pdf", file_path) - self.wait_sync() - file_id = local.get_remote_id("/Test folder/testFile.pdf") - assert file_id - - # Create a document by streaming a binary file - with open(file_path, "a"): - # Rename remote folder then synchronize - remote.rename(file_id, "testFile2.pdf") - self.wait_sync(wait_for_async=True) - assert local.exists("/Test folder/testFile.pdf") - assert not local.exists("/Test folder/testFile2.pdf") - - # The source file is accessed by another processor, but no errors - assert not self.engine_1.dao.get_errors() - - self.wait_sync(wait_for_async=True) - assert local.exists("/Test folder/testFile2.pdf") - assert not local.exists("/Test folder/testFile.pdf") - """ - - @pytest.mark.xfail(reason="NXDRIVE-2494") - def test_synchronize_remote_rename_while_download_file(self): - local = self.local_1 - remote = self.remote_document_client_1 - - def callback(uploader): - """Add delay when upload and download.""" - if not self.engine_1.has_rename: - # Rename remote file while downloading - self.remote_1.rename(self.folder_id, "Test folder renamed") - self.engine_1.has_rename = True - time.sleep(3) - Engine.suspend_client(self.engine_1, uploader) - - self.engine_1.has_rename = False - - with patch.object(self.engine_1.remote, "download_callback", new=callback): - file = self.location / "resources" / "files" / "testFile.pdf" - content = file.read_bytes() - remote.make_file("/Test folder", "testFile.pdf", content=content) - - # Rename remote folder then synchronize - self.wait_sync(wait_for_async=True) - assert not local.exists("/Test folder") - assert local.exists("/Test folder renamed") - assert local.exists("/Test folder renamed/testFile.pdf") - - """ - def test_synchronize_remote_rename_while_upload(self): - if WINDOWS: - self._remote_rename_while_upload() - else: - func = "nxdrive.client.remote_client.os.fstatvfs" - with patch(func) as mock_os: - mock_os.return_value = Mock() - mock_os.return_value.f_bsize = 4096 - self._remote_rename_while_upload() - """ - - def _remote_rename_while_upload(self): - local = self.local_1 - - def callback(uploader): - """Add delay when upload and download.""" - if not local.exists("/Test folder renamed"): - time.sleep(1) - Engine.suspend_client(self.engine_1, uploader) - - with patch.object(self.engine_1.remote, "download_callback", new=callback): - # Create a document by streaming a binary file - file_path = local.abspath("/Test folder") / "testFile.pdf" - copyfile(self.location / "resources" / "files" / "testFile.pdf", file_path) - file_path = local.abspath("/Test folder") / "testFile2.pdf" - copyfile(self.location / "resources" / "files" / "testFile.pdf", file_path) - - # Rename remote folder then synchronize - self.remote_1.rename(self.folder_id, "Test folder renamed") - - self.wait_sync(wait_for_async=True) - assert not local.exists("/Test folder") - assert local.exists("/Test folder renamed") - assert local.exists("/Test folder renamed/testFile.pdf") - assert local.exists("/Test folder renamed/testFile2.pdf") - - -class TestRemoteMove(OneUserTest): - def test_remote_create_and_move(self): - """ - NXDRIVE-880: folder created and moved on the server does - not sync properly. - """ - - local = self.local_1 - remote = self.remote_document_client_1 - engine = self.engine_1 - - # Create a folder with some stuff inside, and sync - a1 = remote.make_folder("/", "a1") - for idx in range(5): - fname = "file-{}.txt".format(idx) - remote.make_file(a1, fname, content=b"Content of " + fname.encode("utf-8")) - engine.start() - self.wait_sync(wait_for_async=True) - - # Create another folder and move a1 inside it, and sync - a3 = remote.make_folder("/", "a3") - remote.move(a1, a3) - self.wait_sync(wait_for_async=True) - - # Checks - assert not local.exists("/a1") - assert len(local.get_children_info("/a3/a1")) == 5 - - -class TestRemoteFiles(OneUserTest): - """ - def test_remote_create_files_upper_lower_cases(self): - "" - Check that remote (lower|upper)case renaming is taken - into account locally. - "" - remote = self.remote_document_client_1 - local = self.local_1 - engine = self.engine_1 - - engine.start() - self.wait_sync(wait_for_async=True) - - # Create an innocent file, lower case - file_path = self.location / "resources" / "files" / "testFile.pdf" - filename_lower = file_path.name.lower() - doc = remote.make_file("/", filename_lower, file_path=file_path) - self.wait_sync(wait_for_async=True) - - # Check - assert remote.exists(f"/{filename_lower}") - assert local.exists(f"/{filename_lower}") - - # Remotely rename to upper case - filename_upper = filename_lower.upper() - remote.update_content(doc, b"CASE", filename=filename_upper) - self.wait_sync(wait_for_async=True) - - # Check - server - children = remote.get_children_info(self.workspace) - assert len(children) == 1 - assert children[0].get_blob("file:content").name == filename_upper - - # Check - client - children = local.get_children_info("/") - assert len(children) == 1 - assert children[0].name == filename_upper - """ - - """ - def test_remote_create_folders_upper_lower_cases(self): - "" - Check that remote (lower|upper)case renaming is taken - into account locally. See NXDRIVE-927. - "" - remote = self.remote_1 - local = self.local_1 - engine = self.engine_1 - workspace_id = f"{SYNC_ROOT_FAC_ID}{self.workspace}" - - # Create innocent folders, upper case - folder1 = remote.make_folder(workspace_id, "AA_1").uid - folder1_uid = folder1.partition("#")[-1] - folder2 = remote.make_folder(workspace_id, "BA_1").uid - folder2_uid = folder2.partition("#")[-1] - engine.start() - self.wait_sync(wait_for_async=True) - - # Check - for folder in ("/AA_1", "/BA_1"): - assert remote.exists(folder) - assert local.exists(folder) - - # Remotely rename the folder2 to lowercase folder1 - foldername_lower = "aa_1" - remote.rename(folder2, foldername_lower) - self.wait_sync(wait_for_async=True) - - if not local.is_case_sensitive(): - # There should be a conflict - errors = engine.dao.get_errors() - assert len(errors) == 1 - assert errors[0].remote_ref.endswith(folder2_uid) - else: - # We should not have any error - assert not engine.dao.get_errors(limit=0) - - # Check - server - children = sorted( - remote.get_children_info(self.workspace), key=lambda x: x.name - ) - assert len(children) == 2 - assert folder1_uid.endswith(children[0].uid) - assert children[0].name == "AA_1" - assert folder2_uid.endswith(children[1].uid) - assert children[1].name == foldername_lower - - # Check - client - children = sorted(local.get_children_info("/"), key=lambda x: x.name) - assert len(children) == 2 - assert children[0].remote_ref.endswith(folder1_uid) - assert children[0].name == "AA_1" - assert children[1].remote_ref.endswith(folder2_uid) - - if not local.is_case_sensitive(): - # The rename was _not_ effective - assert str(children[1].path).endswith("BA_1") - - # Re-rename the folder on the server - remote.rename(folder2, "aZeRtY") - self.wait_sync(wait_for_async=True) - - # There should be no more conflict - assert not engine.dao.get_errors() - - # And the local folder must be renamed - children = sorted(local.get_children_info("/"), key=lambda x: x.name) - assert len(children) == 2 - assert children[0].remote_ref.endswith(folder1_uid) - assert children[0].name == "AA_1" - assert children[1].remote_ref.endswith(folder2_uid) - assert str(children[1].path).endswith("aZeRtY") - else: - # The rename was effective - assert str(children[1].path).endswith(foldername_lower) - """ diff --git a/tests/functional/test_synchronization.py b/tests/functional/test_synchronization.py deleted file mode 100644 index a49cf0039a..0000000000 --- a/tests/functional/test_synchronization.py +++ /dev/null @@ -1,1184 +0,0 @@ -import time -from pathlib import Path -from unittest.mock import patch - -from nuxeo.exceptions import Conflict, HTTPError, Unauthorized - -# from nxdrive.constants import ROOT, WINDOWS -from nxdrive.constants import WINDOWS -from nxdrive.utils import safe_filename - -# from .. import ensure_no_exception -# from . import LocalTest -from .conftest import OS_STAT_MTIME_RESOLUTION, OneUserNoSync, OneUserTest, TwoUsersTest - - -class TestSynchronizationDisabled(OneUserNoSync): - """Test with synchronization features disabled.""" - - def test_basic_synchronization(self): - """Test that nothing will be synced.""" - - local = self.local_1 - remote = self.remote_document_client_1 - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - # The local root is not created - assert not local.exists("/remote folder") - - # Force its creation to test local changes are not reflected remotely - local.unlock_ref(local.base_folder) - local.base_folder.mkdir() - local.make_folder("/", "local folder") - - # Create a remote document to check that nothing will be locally synced - remote.make_folder("/", "remote folder") - - # Sync and checks - self.wait_sync(wait_for_async=True) - assert not remote.exists("/local folder") - assert local.exists("/local folder") - assert not local.exists("/remote folder") - - -class TestSynchronization(OneUserTest): - """ - def test_binding_initialization_and_first_sync(self): - local = self.local_1 - remote = self.remote_document_client_1 - - # Create some documents in a Nuxeo workspace and bind this server to a - # Nuxeo Drive local folder - docs = self.make_server_tree() - - # The root binding operation does not create the local folder yet. - assert not local.exists("/") - - # Launch ndrive and check synchronization - self.engine_1.start() - self.wait_sync(wait_for_async=True) - assert local.exists("/") - assert local.exists("/Folder 1") - assert local.get_content("/Folder 1/File 1.txt") == b"aaa" - assert local.exists("/Folder 1/Folder 1.1") - assert local.get_content("/Folder 1/Folder 1.1/File 2.txt") == b"bbb" - assert local.exists("/Folder 1/Folder 1.2") - assert local.get_content("/Folder 1/Folder 1.2/File 3.txt") == b"ccc" - assert local.exists("/Folder 2") - # Cannot predict the resolution in advance - assert remote.get_note(docs["Dupe 1.txt"]) == b"Some content." - assert remote.get_note(docs["Dupe 2.txt"]) == b"Other content." - assert local.get_content("/Folder 2/File 4.txt") == b"ddd" - assert local.get_content("/File 5.txt") == b"eee" - - # Unbind root and resynchronize - remote.unregister_as_root(self.workspace) - - # Since errors are generated by the deletion events sent - # by Watchdog for the workspace children under UNIX, - # don't enforce errors - self.wait_sync(wait_for_async=True, enforce_errors=WINDOWS) - assert not local.exists("/") - """ - - """ - def test_binding_synchronization_empty_start(self): - local = self.local_1 - remote = self.remote_document_client_1 - - # Let's create some documents on the server and - # launch the first synchronization - docs = self.make_server_tree() - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - # We should now be fully synchronized - file_count, folder_count = self.get_local_child_count( - self.local_nxdrive_folder_1 - ) - assert folder_count == 5 - assert file_count == 6 - - # Wait a bit for file time stamps to increase enough: on OSX HFS+ the - # file modification time resolution is 1s for instance - time.sleep(OS_STAT_MTIME_RESOLUTION) - - # Let do some local and remote changes concurrently - local.delete("/File 5.txt") - local.update_content("/Folder 1/File 1.txt", b"aaaa") - local.make_folder("/", "Folder 4") - - # The remote client used in this test is handling paths relative to - # the 'Nuxeo Drive Test Workspace' - remote.update(docs["File 2.txt"], properties={"note:note": "bbbb"}) - remote.delete("/Folder 2") - f3 = remote.make_folder(self.workspace, "Folder 3") - remote.make_file(f3, "File 6.txt", content=b"ffff") - - # Launch synchronization - self.wait_sync(wait_for_async=True) - - # We should now be fully synchronized again - assert not remote.exists("/File 5.txt") - assert remote.get_note(docs["File 1.txt"]) == b"aaaa" - assert remote.exists("/Folder 4") - - assert local.get_content("/Folder 1/Folder 1.1/File 2.txt") == b"bbbb" - # Let's just check remote document hasn't changed - assert remote.get_note(docs["File 2.txt"]) == b"bbbb" - assert not local.exists("/Folder 2") - assert local.exists("/Folder 3") - assert local.get_content("/Folder 3/File 6.txt") == b"ffff" - """ - - """ - def test_single_quote_escaping(self): - remote = self.remote_document_client_1 - local = LocalTest(self.local_nxdrive_folder_1) - dao = self.engine_1.dao - - file = "APPEL D'OFFRES" - assert dao._escape(file) == "APPEL D''OFFRES" - - remote.unregister_as_root(self.workspace) - self.engine_1.start() - - with ensure_no_exception(): - remote.make_folder("/", file) - filename = f"/{file}" - - remote.register_as_root(filename) - self.wait_sync(wait_for_async=True) - assert local.exists(filename) - - remote.unregister_as_root(filename) - self.wait_sync(wait_for_async=True) - assert not local.exists(filename) - """ - - def test_invalid_credentials(self): - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - # Simulate bad responses - with patch.object(self.engine_1, "remote", new=self.get_bad_remote()): - self.engine_1.remote.request_token() - self.engine_1.remote.make_server_call_raise(Unauthorized(message="Mock")) - self.wait_sync(wait_for_async=True, fail_if_timeout=False) - assert self.engine_1.is_offline() - - self.engine_1.set_offline(value=False) - self.engine_1.set_invalid_credentials(value=False) - self.engine_1.resume() - - """ - def test_synchronization_modification_on_created_file(self): - # Regression test: a file is created locally, then modification is - # detected before first upload - local = self.local_1 - workspace_path = Path(self.workspace_title) - dao = self.engine_1.dao - - assert not local.exists("/") - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - assert local.exists("/") - self.engine_1.stop() - # Let's create some documents on the client - local.make_folder("/", "Folder") - local.make_file("/Folder", "File.txt", content=b"Some content.") - - # First local scan (assuming the network is offline): - self.queue_manager_1.suspend() - self.queue_manager_1._disable = True - self.engine_1.start() - self.wait_sync(timeout=5, fail_if_timeout=False) - children = dao.get_local_children(workspace_path) - assert len(children) == 1 - assert children[0].pair_state == "locally_created" - folder_children = dao.get_local_children(workspace_path / "Folder") - assert len(folder_children) == 1 - assert folder_children[0].pair_state == "locally_created" - - # Wait a bit for file time stamps to increase enough: on most OS - # the file modification time resolution is 1s - time.sleep(OS_STAT_MTIME_RESOLUTION) - - # Let's modify it offline and wait for a bit - local.update_content("/Folder/File.txt", content=b"Some content.") - self.wait_sync(timeout=5, fail_if_timeout=False) - # File has not been synchronized, it is still - # in the locally_created state - file_state = dao.get_state_from_local(workspace_path / "Folder/File.txt") - assert file_state.pair_state == "locally_created" - - # Assume the computer is back online, the synchronization should occur - # as if the document was just created and not trigger an update - self.queue_manager_1._disable = False - self.queue_manager_1.resume() - self.wait_sync(wait_for_async=True) - folder_state = dao.get_state_from_local(workspace_path / "Folder") - assert folder_state.pair_state == "synchronized" - file_state = dao.get_state_from_local(workspace_path / "Folder/File.txt") - assert file_state.pair_state == "synchronized" - """ - - def test_basic_synchronization(self): - local = self.local_1 - remote = self.remote_document_client_1 - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - # Let's create some document on the client and the server - local.make_folder("/", "Folder 3") - self.make_server_tree() - - # Launch ndrive and check synchronization - self.wait_sync(wait_for_async=True) - assert remote.exists("/Folder 3") - assert local.exists("/Folder 1") - assert local.exists("/Folder 2") - assert local.exists("/File 5.txt") - - def test_docpair_export(self): - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - dao = self.engine_1.dao - children = dao.get_local_children(Path("/")) - assert children - doc_pair = children[0] - assert doc_pair.export() - - def test_synchronization_skip_errors(self): - local = self.local_1 - dao = self.engine_1.dao - - assert not local.exists("/") - - # Perform first scan and sync - self.engine_1.start() - self.wait_sync(wait_for_async=True) - assert local.exists("/") - self.engine_1.stop() - - # Let's create some documents on the client and the server - local.make_folder("/", "Folder 3") - self.make_server_tree() - - # Detect the files to synchronize but do not perform the - # synchronization - self.queue_manager_1.suspend() - self.queue_manager_1._disable = True - self.engine_1.start() - self.wait_sync(wait_for_async=True, timeout=10, fail_if_timeout=False) - - children = dao.get_local_children(Path(self.workspace_title)) - assert len(children) == 4 - sorted_children = sorted(children, key=lambda x: x.local_path) - assert sorted_children[0].remote_name == "File 5.txt" - assert sorted_children[0].pair_state == "remotely_created" - assert sorted_children[1].remote_name == "Folder 1" - assert sorted_children[1].pair_state == "remotely_created" - assert sorted_children[2].remote_name == "Folder 2" - assert sorted_children[2].pair_state == "remotely_created" - assert sorted_children[3].local_name == "Folder 3" - assert sorted_children[3].pair_state == "locally_created" - - # Simulate synchronization errors - file_5_state = sorted_children[0] - folder_3_state = sorted_children[3] - self.engine_1._local_watcher.increase_error(file_5_state, "TEST_FILE_ERROR") - self.engine_1._local_watcher.increase_error(folder_3_state, "TEST_FILE_ERROR") - - # Run synchronization - self.queue_manager_1._disable = False - self.queue_manager_1.resume() - # By default engine will not consider being syncCompleted - # because of the temporary ignore dfile - self.wait_sync(enforce_errors=False, fail_if_timeout=False) - - # All errors have been skipped, while the remaining docs have - # been synchronized - file_5_state = dao.get_normal_state_from_remote(file_5_state.remote_ref) - assert file_5_state.pair_state == "remotely_created" - folder_3_state = dao.get_state_from_local(folder_3_state.local_path) - assert folder_3_state.pair_state == "locally_created" - folder_1_state = dao.get_normal_state_from_remote(sorted_children[1].remote_ref) - assert folder_1_state.pair_state == "synchronized" - folder_2_state = dao.get_normal_state_from_remote(sorted_children[2].remote_ref) - assert folder_2_state.pair_state == "synchronized" - - # Retry synchronization of pairs in error - self.wait_sync() - file_5_state = dao.get_normal_state_from_remote(file_5_state.remote_ref) - assert file_5_state.pair_state == "synchronized" - folder_3_state = dao.get_state_from_local(folder_3_state.local_path) - assert folder_3_state.pair_state == "synchronized" - - def test_synchronization_give_up(self): - # Override error threshold to 1 instead of 3 - test_error_threshold = 1 - self.queue_manager_1._error_threshold = test_error_threshold - - # Bound root but nothing is synchronized yet - local = self.local_1 - dao = self.engine_1.dao - workspace_path = Path(self.workspace_title) - assert not local.exists("/") - - # Perform first scan and sync - self.engine_1.start() - self.wait_sync(wait_for_async=True) - assert local.exists("/") - self.engine_1.stop() - - # Let's create some documents on the client and the server - local.make_folder("/", "Folder 3") - self.make_server_tree(deep=False) - - # Simulate a server failure on file download - bad_remote = self.get_bad_remote() - error = HTTPError(status=500, message="Mock download error") - bad_remote.make_download_raise(error) - - # File is not synchronized but synchronization does not fail either, - # errors are handled and queue manager has given up on them - with patch.object(self.engine_1, "remote", new=bad_remote): - self.engine_1.start() - self.wait_sync(wait_for_async=True, timeout=60) - states_in_error = dao.get_errors(limit=test_error_threshold) - assert len(states_in_error) == 1 - children = dao.get_states_from_partial_local(workspace_path) - assert len(children) == 4 - for state in children: - if state.folderish: - assert state.pair_state == "synchronized" - else: - assert state.pair_state != "synchronized" - - # Reset errors - for state in states_in_error: - dao.reset_error(state) - - # Verify that everything now gets synchronized - self.wait_sync() - assert not dao.get_errors(limit=test_error_threshold) - children = dao.get_states_from_partial_local(workspace_path) - assert len(children) == 4 - for child in children: - assert child.pair_state == "synchronized" - - """ - def test_synchronization_offline(self): - # Bound root but nothing is synchronized yet - local = self.local_1 - dao = self.engine_1.dao - workspace_path = Path(self.workspace_title) - assert not local.exists("/") - - # Perform first scan and sync - self.engine_1.start() - self.wait_sync(wait_for_async=True) - assert local.exists("/") - self.engine_1.stop() - - # Let's create some documents on the client and the server - local.make_folder("/", "Folder 3") - self.make_server_tree(deep=False) - - # Find various ways to simulate a network failure - bad_remote = self.get_bad_remote() - errors = [ - ConnectionError("Mock connection error"), - OSError("Mock socket error"), # Old socket.error - HTTPError(status=503, message="Mock"), - ] - - engine_started = False - with patch.object(self.engine_1, "remote", new=bad_remote): - for error in errors: - self.engine_1.remote.make_server_call_raise(error) - if not engine_started: - self.engine_1.start() - engine_started = True - - # Synchronization doesn't occur but does not fail either. - # - one 'locally_created' error is registered for Folder 3 - # - no states are inserted for the remote documents - self.wait_sync(wait_for_async=True, fail_if_timeout=False) - children = dao.get_states_from_partial_local(workspace_path) - assert len(children) == 1 - assert children[0].pair_state != "synchronized" - assert not self.engine_1.is_offline() - - # Starting here, the network is re-enable - # Verify that everything now gets synchronized - self.wait_sync(wait_for_async=True) - assert not self.engine_1.is_offline() - assert not dao.get_errors(limit=0) - children = dao.get_states_from_partial_local(workspace_path) - assert len(children) == 4 - for state in children: - assert state.pair_state == "synchronized" - """ - - """ - def test_create_content_in_readonly_area(self): - dao = self.engine_1.dao - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - # Let's create a subfolder of the main readonly folder - local = LocalTest(self.local_nxdrive_folder_1) - local.make_folder("/", "Folder 3") - local.make_file("/Folder 3", "File 1.txt", content=b"Some content.") - local.make_folder("/Folder 3", "Sub Folder 1") - local.make_file( - "/Folder 3/Sub Folder 1", "File 2.txt", content=b"Some other content." - ) - self.wait_sync() - - # States have been created for the subfolder and its content, - # subfolder is marked as unsynchronized - good_states = ("locally_created", "unsynchronized") - states = dao.get_states_from_partial_local(ROOT) - assert len(states) == 6 - sorted_states = sorted(states, key=lambda x: x.local_path) - assert sorted_states[0].local_name == "" - assert sorted_states[0].pair_state == "synchronized" - assert sorted_states[1].local_name == "Folder 3" - assert sorted_states[1].pair_state == "unsynchronized" - assert sorted_states[2].local_name == "File 1.txt" - assert sorted_states[2].pair_state in good_states - assert sorted_states[3].local_name == "Sub Folder 1" - assert sorted_states[3].pair_state in good_states - assert sorted_states[4].local_name == "File 2.txt" - assert sorted_states[4].pair_state in good_states - assert sorted_states[5].local_name == self.workspace_title - assert sorted_states[5].pair_state == "synchronized" - - # Let's create a file in the main readonly folder - local.make_file("/", "A file in a readonly folder.txt", content=b"Some Content") - self.wait_sync() - - # A state has been created, marked as unsynchronized - # Other states are unchanged - states = dao.get_states_from_partial_local(ROOT) - assert len(states) == 7 - sorted_states = sorted(states, key=lambda x: x.local_path) - assert sorted_states[0].local_name == "" - assert sorted_states[0].pair_state == "synchronized" - assert sorted_states[1].local_name == "A file in a readonly folder.txt" - assert sorted_states[1].pair_state == "unsynchronized" - assert sorted_states[2].local_name == "Folder 3" - assert sorted_states[2].pair_state == "unsynchronized" - assert sorted_states[3].local_name == "File 1.txt" - assert sorted_states[3].pair_state in good_states - assert sorted_states[4].local_name == "Sub Folder 1" - assert sorted_states[4].pair_state in good_states - assert sorted_states[5].local_name == "File 2.txt" - assert sorted_states[5].pair_state in good_states - assert sorted_states[6].local_name == self.workspace_title - assert sorted_states[6].pair_state == "synchronized" - - # Let's create a file and a folder in a folder on which the Write - # permission has been removed. Thanks to NXP-13119, this permission - # change will be detected server-side, thus fetched by the client - # in the remote change summary, and the remote_can_create_child flag - # on which the synchronizer relies to check if creation is allowed - # will be set to False and no attempt to create the remote file - # will be made. - # States will be marked as unsynchronized. - - workspace_path = Path(self.workspace_title) - # Create local folder and synchronize it remotely - local = self.local_1 - local.make_folder("/", "Readonly folder") - self.wait_sync() - - remote = self.remote_document_client_1 - assert remote.exists("/Readonly folder") - - # Check remote_can_create_child flag in pair state - readonly_folder_state = dao.get_state_from_local( - workspace_path / "Readonly folder" - ) - assert readonly_folder_state.remote_can_create_child - - # Wait again for synchronization to detect remote folder creation - # triggered by last synchronization and make sure we get a clean - # state at next change summary - self.wait_sync(wait_for_async=True) - readonly_folder_state = dao.get_state_from_local( - workspace_path / "Readonly folder" - ) - assert readonly_folder_state.remote_can_create_child - - # Set remote folder as readonly for test user - readonly_folder_path = f"{self.ws.path}/Readonly folder" - self._set_read_permission(self.user_1, readonly_folder_path, True) - self.root_remote.block_inheritance(readonly_folder_path, overwrite=False) - - # Wait to make sure permission change is detected. - self.wait_sync(wait_for_async=True) - # Re-fetch folder state and check remote_can_create_child - # flag has been updated - readonly_folder_state = dao.get_state_from_local( - workspace_path / "Readonly folder" - ) - assert not readonly_folder_state.remote_can_create_child - - # Try to create a local file and folder in the readonly folder, - # they should not be created remotely and be marked as unsynchronized. - local.make_file( - "/Readonly folder", "File in readonly folder", content=b"File content" - ) - local.make_folder("/Readonly folder", "Folder in readonly folder") - self.wait_sync() - assert not remote.exists("/Readonly folder/File in readonly folder") - assert not remote.exists("/Readonly folder/Folder in readonly folder") - - states = dao.get_states_from_partial_local( - workspace_path / "Readonly folder", strict=False - ) - assert len(states) == 3 - sorted_states = sorted(states, key=lambda x: x.local_path) - assert sorted_states[0].local_name == "Readonly folder" - assert sorted_states[0].pair_state == "synchronized" - assert sorted_states[1].local_name == "File in readonly folder" - assert sorted_states[1].pair_state == "unsynchronized" - assert sorted_states[2].local_name == "Folder in readonly folder" - assert sorted_states[2].pair_state == "unsynchronized" - """ - - """ - def test_synchronize_special_filenames(self): - local = self.local_1 - remote = self.remote_document_client_1 - self.engine_1.start() - - # Create a remote folder with a weird name - folder = remote.make_folder(self.workspace, 'Folder with chars: / \\ * < > ? "') - characters = "- - - - - - - -" - foldername = f"Folder with chars{characters}" - - self.wait_sync(wait_for_async=True) - folder_names = [i.name for i in local.get_children_info("/")] - assert folder_names == [foldername] - - # Create a remote file with a weird name - file = remote.make_file( - folder, 'File with chars: / \\ * < > ? ".txt', content=b"some content" - ) - filename = f"File with chars{characters}.txt" - - self.wait_sync(wait_for_async=True) - file_names = [ - i.name - for i in local.get_children_info(local.get_children_info("/")[0].path) - ] - assert file_names == [filename] - - # Update a remote file with a weird name (NXDRIVE-286) - remote.update(file, properties={"note:note": "new content"}) - self.wait_sync(wait_for_async=True, enforce_errors=False) - assert local.get_content(f"/{foldername}/{filename}") == b"new content" - file_state = self.get_dao_state_from_engine_1(f"{foldername}/{filename}") - assert file_state.pair_state == "synchronized" - assert file_state.local_digest == file_state.remote_digest - - # Update note title with a weird name - remote.update( - file, properties={"dc:title": 'File with chars: / \\ * < > ? " - 2'} - ) - filename = f"File with chars{characters} - 2.txt" - self.wait_sync(wait_for_async=True, enforce_errors=False) - file_names = [ - i.name - for i in local.get_children_info(local.get_children_info("/")[0].path) - ] - assert file_names == [filename] - - # Update note title changing the case (NXRIVE-532) - remote.update( - file, properties={"dc:title": 'file with chars: / \\ * < > ? " - 2'} - ) - filename = f"file with chars{characters} - 2.txt" - self.wait_sync(wait_for_async=True, enforce_errors=False) - file_names = [ - i.name - for i in local.get_children_info(local.get_children_info("/")[0].path) - ] - assert file_names == [filename] - """ - - def test_synchronize_error_remote(self): - path = Path(f"/{self.workspace_title}") / "test.odt" - remote = self.remote_document_client_1 - dao = self.engine_1.dao - - bad_remote = self.get_bad_remote() - error = HTTPError(status=400, message="Mock") - bad_remote.make_download_raise(error) - - with patch.object(self.engine_1, "remote", new=bad_remote): - remote.make_file("/", "test.odt", content=b"Some content.") - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - self.engine_1.stop() - - pair = dao.get_state_from_local(path) - assert pair is not None - assert pair.error_count - assert pair.pair_state == "remotely_created" - - self.engine_1.start() - self.wait_sync() - pair = dao.get_state_from_local(path) - assert pair.error_count == 4 - assert pair.pair_state == "remotely_created" - - # Requeue errors - self.engine_1.retry_pair(pair.id) - self.wait_sync() - pair = dao.get_state_from_local(path) - assert not pair.error_count - assert pair.pair_state == "synchronized" - - def test_synchronize_deleted_blob(self): - local = self.local_1 - remote = self.remote_document_client_1 - self.engine_1.start() - - # Create a doc with a blob in the remote root workspace - # then synchronize - file_path = self.location / "resources" / "files" / "testFile.odt" - remote.make_file("/", file_path.name, file_path=file_path) - - self.wait_sync(wait_for_async=True) - assert local.exists(f"/{file_path.name}") - - # Delete the blob from the remote doc then synchronize - remote.delete_content(f"/{file_path.name}") - - self.wait_sync(wait_for_async=True) - assert not local.exists(f"/{file_path.name}") - - def test_synchronize_deletion(self): - local = self.local_1 - remote = self.remote_document_client_1 - self.engine_1.start() - - # Create a remote folder with 2 children then synchronize - remote.make_folder("/", "Remote folder") - remote.make_file( - "/Remote folder", "Remote file 1.odt", content=b"Some content." - ) - remote.make_file( - "/Remote folder", "Remote file 2.odt", content=b"Other content." - ) - - self.wait_sync(wait_for_async=True) - assert local.exists("/Remote folder") - assert local.exists("/Remote folder/Remote file 1.odt") - assert local.exists("/Remote folder/Remote file 2.odt") - - # Delete remote folder then synchronize - remote.delete("/Remote folder") - - self.wait_sync(wait_for_async=True) - assert not local.exists("/Remote folder") - assert not local.exists("/Remote folder/Remote file 1.odt") - assert not local.exists("/Remote folder/Remote file 2.odt") - - # Create a local folder with 2 children then synchronize - local.make_folder("/", "Local folder") - local.make_file("/Local folder", "Local file 1.odt", content=b"Some content.") - local.make_file("/Local folder", "Local file 2.odt", content=b"Other content.") - - self.wait_sync() - assert remote.exists("/Local folder") - assert remote.exists("/Local folder/Local file 1.odt") - assert remote.exists("/Local folder/Local file 2.odt") - - # Delete local folder then synchronize - time.sleep(OS_STAT_MTIME_RESOLUTION) - local.delete("/Local folder") - - # Since errors are generated by the deletion events sent by Watchdog - # for the folder children under UNIX, don't enforce errors - self.wait_sync(enforce_errors=WINDOWS) - assert not remote.exists("/Local folder") - # Wait for async completion as recursive deletion of children is done - # by the BulkLifeCycleChangeListener which is asynchronous - self.wait() - assert not remote.exists("/Local folder/Local file 1.odt") - assert not remote.exists("/Local folder/Local file 2.odt") - - """ - def test_synchronize_windows_foldername_endswith_space(self): - "" - Use nuxeodrive.CreateFolder API to make a folder directly - under the workspace "trial ". Verify if the DS client downloads - the folder and trims the space at the end - "" - remote = self.remote_document_client_1 - target = remote.make_folder("/", "trial ") - local = self.local_root_client_1 - remote.make_file(target, "aFile.txt", content=b"File A Content") - remote.make_file(target, "bFile.txt", content=b"File B Content") - self.engine_1.start() - self.wait_sync(wait_for_async=True) - assert local.exists(f"/{self.workspace_title}") - if WINDOWS: - assert local.exists(f"/{self.workspace_title}/trial/") - assert local.exists(f"/{self.workspace_title}/trial/aFile.txt") - assert local.exists(f"/{self.workspace_title}/trial/bFile.txt") - else: - assert local.exists(f"/{self.workspace_title}/trial /") - assert local.exists(f"/{self.workspace_title}/trial /aFile.txt") - assert local.exists(f"/{self.workspace_title}/trial /bFile.txt") - """ - - def test_409_conflict(self): - """ - Test concurrent upload with files having the same first characters. - """ - - remote = self.remote_document_client_1 - local = self.local_1 - engine = self.engine_1 - - engine.start() - self.wait_sync(wait_for_async=True) - assert local.exists("/") - - def _raise_for_second_file_only(*args, **kwargs): - return kwargs.get("filename").endswith("2.txt") - - # Simulate a server conflict on file upload - bad_remote = self.get_bad_remote() - error = Conflict(message="Mock Conflict") - bad_remote.make_upload_raise(error) - bad_remote.raise_on = _raise_for_second_file_only - - with patch.object(self.engine_1, "remote", new=bad_remote): - # Create 2 files locally - base = "A" * 40 - file1 = base + "1.txt" - file2 = base + "2.txt" - local.make_file("/", file1, content=b"foo") - local.make_file("/", file2, content=b"bar") - - self.wait_sync(fail_if_timeout=False) - - # Checks - assert engine.dao.queue_manager.get_errors_count() == 1 - children = remote.get_children_info(self.workspace) - assert len(children) == 1 - assert children[0].name == file1 - - # Starting here, default behavior is restored - self.wait_sync() - - # Checks - children = remote.get_children_info(self.workspace) - assert len(children) == 2 - assert children[0].name == file1 - assert children[1].name == file2 - - def test_416_range_past_eof(self): - """ - Test wrong bytes range during download. - """ - - remote = self.remote_document_client_1 - local = self.local_1 - engine = self.engine_1 - - engine.start() - self.wait_sync(wait_for_async=True) - assert local.exists("/") - - remote.make_file("/", "test.bin", content=b"42") - - # Simulate a requested range not satisfiable on file download - bad_remote = self.get_bad_remote() - error = HTTPError(status=416, message="Mock Requested Range Not Satisfiable") - bad_remote.make_download_raise(error) - - with patch.object(self.engine_1, "remote", new=bad_remote): - self.wait_sync(fail_if_timeout=False) - # Checks - assert engine.dao.queue_manager.get_errors_count() == 1 - - # Starting here, default behavior is restored - self.wait_sync() - - # Checks - assert not engine.dao.get_errors() - assert local.exists("/test.bin") - - def test_local_modify_offline(self): - local = self.local_1 - engine = self.engine_1 - - engine.start() - self.wait_sync(wait_for_async=True) - - local.make_folder("/", "Test") - local.make_file("/Test", "Test.txt", content=b"Some content") - self.wait_sync() - - engine.stop() - local.update_content("/Test/Test.txt", b"Another content") - - engine.start() - self.wait_sync() - assert not engine.dao.get_errors() - - """ - def test_unsynchronize_accentued_document(self): - remote = self.remote_document_client_1 - local = self.local_1 - engine = self.engine_1 - engine.start() - - # Create the folder - root_name = "Été indian" - root = remote.make_folder(self.workspace, root_name) - self.wait_sync(wait_for_async=True) - assert local.exists("/" + root_name) - - # Remove the folder - remote.delete(root) - self.wait_sync(wait_for_async=True) - assert not local.exists("/" + root_name) - """ - - """ - def test_synchronize_document_with_pattern(self): - "" - Simple test to ensure there is no issue with files like "$AAA000$.doc". - Related to NXDRIVE-1287. - "" - name = "$NAB184$.doc" - self.remote_document_client_1.make_file("/", name, content=b"42") - self.engine_1.start() - self.wait_sync(wait_for_async=True) - assert self.local_1.exists(f"/{name}") - """ - - def test_rename_duplicates(self): - remote = self.remote_document_client_1 - local = self.local_1 - engine = self.engine_1 - - # Create 7 files with the same name - name = "Congés 2016 / 2017.txt" - name_expected = safe_filename(name) - for _ in range(7): - remote.make_file("/", name, content=b"42") - - # Start sync - engine.start() - self.wait_sync(wait_for_async=True) - - # Check that one file exists, and engine has 6 errors - assert local.exists(f"/{name_expected}") - assert len(local.get_children_info("/")) == 1 - assert len(engine.dao.get_errors(limit=0)) == 6 - - # Rename all remote documents with unique names - ref = local.get_remote_id("/") - children = self.remote_1.get_fs_children(ref) - assert len(children) == 7 - remote_files = set() - for child in children: - new_name = f"{child.uid.split('#')[-1]}-{safe_filename(child.name)}" - remote_files.add(new_name) - remote.execute(command="NuxeoDrive.Rename", id=child.uid, name=new_name) - - self.wait_sync(wait_for_async=True) - - children = self.remote_1.get_fs_children(ref) - assert len(children) == 7 - # Check that the 7 files exist locally and that there are no errors - local_children = local.get_children_info("/") - assert len(local_children) == 7 - local_files = {child.name for child in local_children} - assert not engine.dao.get_errors(limit=0) - assert remote_files == local_files - - """ - def test_local_creation_copying_from_sibling(self): - ""Test a local file creation when checking for an already synced file on the HDD."" - - remote = self.remote_document_client_1 - local = self.local_1 - engine = self.engine_1 - - engine.start() - self.wait_sync(wait_for_async=True) - - # Create a remote folder and a file inside it - contents = b"1234567890" * 42 * 42 - remote.make_folder("/", "a folder") - remote.make_file("/a folder", "file1.bin", content=contents) - self.wait_sync(wait_for_async=True) - - def stream_content(*args, **kwargs): - ""Called by Processor._download_content(). We are testing that this method is never called."" - assert 0, "Should not be called!" - - # Create another files with the same contents and check that the remote client downloads nothing - with patch.object(self.engine_1.remote, "stream_content", new=stream_content): - remote.make_file("/a folder", "file2.bin", content=contents) - remote.make_file("/", "file3.bin", content=contents) - self.wait_sync(wait_for_async=True) - - # Checks - assert not engine.dao.queue_manager.get_errors_count() - for client in (remote, local): - assert client.exists("/a folder/file1.bin") - assert client.exists("/a folder/file2.bin") - assert client.exists("/file3.bin") - """ - - -class TestSynchronization2(TwoUsersTest): - """ - def test_conflict_detection(self): - # Fetch the workspace sync root - local = self.local_1 - dao = self.engine_1.dao - workspace_path = Path(self.workspace_title) - self.engine_1.start() - self.wait_sync(wait_for_async=True) - assert local.exists("/") - - # Let's create a file on the client and synchronize it. - local_path = local.make_file("/", "Some File.doc", content=b"Original content.") - self.wait_sync() - - # Let's modify it concurrently but with the same content (digest) - self.engine_1.suspend() - time.sleep(OS_STAT_MTIME_RESOLUTION) - local.update_content(local_path, b"Same new content.") - - remote_2 = self.remote_document_client_2 - remote_2.update_content("/Some File.doc", b"Same new content.") - self.engine_1.resume() - - # Let's synchronize and check the conflict handling: automatic - # resolution will work for this case - self.wait_sync(wait_for_async=True) - assert not self.engine_1.get_conflicts() - children = dao.get_states_from_partial_local(workspace_path) - assert len(children) == 1 - assert children[0].pair_state == "synchronized" - - local_children = local.get_children_info("/") - assert len(local_children) == 1 - assert local_children[0].name == "Some File.doc" - assert local.get_content(local_path) == b"Same new content." - remote_1 = self.remote_document_client_1 - remote_children = remote_1.get_children_info(self.workspace) - assert len(remote_children) == 1 - assert remote_children[0].get_blob("file:content").name == "Some File.doc" - assert remote_1.get_content("/Some File.doc") == b"Same new content." - - # Let's trigger another conflict that cannot be resolved - # automatically: - self.engine_1.suspend() - time.sleep(OS_STAT_MTIME_RESOLUTION) - local.update_content(local_path, b"Local new content.") - - remote_2.update_content("/Some File.doc", b"Remote new content.") - self.engine_1.resume() - - # Let's synchronize and check the conflict handling - self.wait_sync(wait_for_async=True) - assert len(self.engine_1.get_conflicts()) == 1 - children = dao.get_states_from_partial_local(workspace_path) - assert len(children) == 1 - assert children[0].pair_state == "conflicted" - - local_children = local.get_children_info("/") - assert len(local_children) == 1 - assert local_children[0].name == "Some File.doc" - assert local.get_content(local_path) == b"Local new content." - remote_children = remote_1.get_children_info(self.workspace) - assert len(remote_children) == 1 - assert remote_children[0].get_blob("file:content").name == "Some File.doc" - assert remote_1.get_content("/Some File.doc") == b"Remote new content." - """ - - """ - def test_rename_and_create_same_folder_not_running(self): - "" - NXDRIVE-668: Fix upload issue when renaming a folder and creating - a folder with the same name while Drive client is not running: - - IntegrityError: UNIQUE constraint failed: - States.remote_ref, States.local_path - "" - - remote = self.remote_document_client_1 - local_1 = self.local_1 - local_2 = self.local_2 - self.engine_1.start() - self.engine_2.start() - self.wait_sync(wait_for_async=True, wait_for_engine_2=True) - - # First, create initial folders and files - folder = remote.make_folder("/", "Folder01") - remote.make_folder("/Folder01", "subfolder01") - remote.make_file("/Folder01/subfolder01", "File01.txt", content=b"42") - self.wait_sync(wait_for_async=True, wait_for_engine_2=True) - assert remote.exists("/Folder01/subfolder01") - assert remote.exists("/Folder01/subfolder01/File01.txt") - assert local_1.exists("/Folder01/subfolder01") - assert local_1.exists("/Folder01/subfolder01/File01.txt") - assert local_2.exists("/Folder01/subfolder01") - assert local_2.exists("/Folder01/subfolder01/File01.txt") - - # Stop clients and make the local changes on a folder - self.engine_1.stop() - self.engine_2.stop() - local_2.rename("/Folder01/subfolder01", "subfolder02") - local_2.make_folder("/Folder01", "subfolder01") - local_2.make_file("/Folder01/subfolder01", "File02.txt", content=b"42.42") - self.engine_1.start() - self.engine_2.start() - self.wait_sync(wait_for_async=True, wait_for_engine_2=True) - - # Check client 2 - assert local_2.exists("/Folder01/subfolder02") - assert local_2.exists("/Folder01/subfolder02/File01.txt") - assert local_2.get_content("/Folder01/subfolder02/File01.txt") == b"42" - assert local_2.exists("/Folder01/subfolder01") - assert local_2.exists("/Folder01/subfolder01/File02.txt") - assert local_2.get_content("/Folder01/subfolder01/File02.txt") == b"42.42" - - # Check server - children = remote.get_children_info(folder) - assert len(children) == 2 - assert children[0].name == "subfolder01" - child = remote.get_children_info(children[0].uid) - assert child[0].name == "File02.txt" - assert remote.get_content(child[0]) == b"42.42" - assert children[1].name == "subfolder02" - child = remote.get_children_info(children[1].uid) - assert child[0].name == "File01.txt" - assert remote.get_content(child[0]) == b"42" - - # Check client 1 - assert local_1.exists("/Folder01/subfolder02") - "" - # TODO NXDRIVE-777: uncomment when issue is fixed - assert local_1.exists('/Folder01/subfolder02/File01.txt') - assert local_1.get_content('/Folder01/subfolder02/File01.txt') == b'42' - # TODO NXDRIVE-769: uncomment when deduplication issue is fixed - assert local_1.exists('/Folder01/subfolder01') - assert local_1.exists('/Folder01/subfolder01/File02.txt') - assert local_1.get_content( - '/Folder01/subfolder01/File02.txt') == b'42.42' - "" - """ - - """ - def test_rename_and_create_same_file_not_running(self): - "" - Same as `test_rename_and_create_same_folder_not_running` - but with changes made on a file. - "" - - remote = self.remote_document_client_1 - local_1 = self.local_1 - local_2 = self.local_2 - self.engine_1.start() - self.engine_2.start() - self.wait_sync(wait_for_async=True, wait_for_engine_2=True) - - # First, create initial folders and files - folder = remote.make_folder("/", "Folder01") - remote.make_file("/Folder01", "File01.txt", content=b"42") - self.wait_sync(wait_for_async=True, wait_for_engine_2=True) - assert remote.exists("/Folder01/File01.txt") - assert local_1.exists("/Folder01/File01.txt") - assert local_2.exists("/Folder01/File01.txt") - - # Stop clients and make the local changes on a file - self.engine_1.stop() - self.engine_2.stop() - local_2.rename("/Folder01/File01.txt", "File02.txt") - # Create a new file with the same name and content as - # the previously renamed file - local_2.make_file("/Folder01", "File01.txt", content=b"42") - self.engine_1.start() - self.engine_2.start() - self.wait_sync(wait_for_async=True, wait_for_engine_2=True) - - # Check client 2 - assert local_2.exists("/Folder01/File02.txt") - assert local_2.get_content("/Folder01/File02.txt") == b"42" - assert local_2.exists("/Folder01/File01.txt") - assert local_2.get_content("/Folder01/File01.txt") == b"42" - - # Check server - children = remote.get_children_info(folder) - assert len(children) == 2 - assert children[0].name == "File01.txt" - assert remote.get_content(children[0]) == b"42" - assert children[1].name == "File02.txt" - assert remote.get_content(children[1]) == b"42" - - # Check client 1 - assert local_1.exists("/Folder01/File02.txt") - assert local_1.get_content("/Folder01/File02.txt") == b"42" - # TODO NXDRIVE-769: uncomment when deduplication issue is fixed - # assert local_1.exists('/Folder01/File01.txt') - # assert local_1.get_content('/Folder01/File01.txt') == b'42' - - # Stop clients and make the local changes on a file - self.engine_1.stop() - self.engine_2.stop() - local_2.rename("/Folder01/File01.txt", "File03.txt") - # Create a new file with the same name as the previously renamed - # file but a different content - local_2.make_file("/Folder01", "File01.txt", content=b"42.42") - self.engine_1.start() - self.engine_2.start() - self.wait_sync(wait_for_async=True) - - # Check client 2 - assert local_2.exists("/Folder01/File03.txt") - assert local_2.get_content("/Folder01/File03.txt") == b"42" - assert local_2.exists("/Folder01/File02.txt") - assert local_2.get_content("/Folder01/File02.txt") == b"42" - assert local_2.exists("/Folder01/File01.txt") - assert local_2.get_content("/Folder01/File01.txt") == b"42.42" - - # Check server - children = remote.get_children_info(folder) - assert len(children) == 3 - assert children[0].name == "File01.txt" - assert remote.get_content(children[0]) == b"42.42" - assert children[1].name == "File02.txt" - assert remote.get_content(children[1]) == b"42" - assert children[2].name == "File03.txt" - assert remote.get_content(children[2]) == b"42" - - # Check client 1 - assert local_1.exists("/Folder01/File03.txt") - assert local_1.get_content("/Folder01/File03.txt") == b"42" - assert local_1.exists("/Folder01/File02.txt") - assert local_1.get_content("/Folder01/File02.txt") == b"42" - assert local_1.exists("/Folder01/File01.txt") - assert local_1.get_content("/Folder01/File01.txt") == b"42.42" - """ diff --git a/tests/functional/test_watchers.py b/tests/functional/test_watchers.py index 0d5448451a..9b4eff91a0 100644 --- a/tests/functional/test_watchers.py +++ b/tests/functional/test_watchers.py @@ -145,6 +145,7 @@ def _delete_folder_1(self): break return Path(self.workspace_title) / path + """ def test_local_scan_delete_non_synced(self): # Test the deletion after first local scan self.test_local_scan() @@ -154,6 +155,7 @@ def test_local_scan_delete_non_synced(self): self.wait_sync(timeout=5, fail_if_timeout=False) children = self.engine_1.dao.get_states_from_partial_local(path) assert not children + """ def test_local_watchdog_delete_synced(self): # Test the deletion after first local scan From 6acd95b6e5b18a22db6e197052fba3d6bb7142f0 Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Tue, 16 Jan 2024 11:54:06 +0530 Subject: [PATCH 28/36] NXDRIVE-2860: Code Coverage - removed not working old test cases - 16/01 --2 --- tests/functional/test_shared_folders.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/functional/test_shared_folders.py b/tests/functional/test_shared_folders.py index 75ea951913..3e0e674761 100644 --- a/tests/functional/test_shared_folders.py +++ b/tests/functional/test_shared_folders.py @@ -6,8 +6,9 @@ class TestSharedFolders(TwoUsersTest): + """ def test_move_sync_root_child_to_user_workspace(self): - """See https://jira.nuxeo.com/browse/NXP-14870""" + ""See https://jira.nuxeo.com/browse/NXP-14870"" uid = None try: # Get remote and local clients @@ -72,9 +73,12 @@ def test_move_sync_root_child_to_user_workspace(self): # Cleanup user1 personal workspace if uid is not None and self.root_remote.exists(uid): self.root_remote.delete(uid, use_trash=False) + """ + """ def test_local_changes_while_stopped(self): self._test_local_changes_while_not_running(False) + """ """ def test_local_changes_while_unbinded(self): From 223d6f8c4e1609b0022614fb8d569dc5a9bc181d Mon Sep 17 00:00:00 2001 From: gitofanindya Date: Wed, 31 Jan 2024 11:42:01 +0530 Subject: [PATCH 29/36] NXDRIVE-2860: Code Coverage -- working local win - 31 --- tests/functional/test_watchers.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/functional/test_watchers.py b/tests/functional/test_watchers.py index 9b4eff91a0..3533a1a13f 100644 --- a/tests/functional/test_watchers.py +++ b/tests/functional/test_watchers.py @@ -168,6 +168,7 @@ def test_local_watchdog_delete_synced(self): for child in children: assert child.pair_state == "locally_deleted" + """ def test_local_scan_delete_synced(self): # Test the deletion after first local scan self.test_reconcile_scan() @@ -181,6 +182,7 @@ def test_local_scan_delete_synced(self): assert len(children) == 5 for child in children: assert child.pair_state == "locally_deleted" + """ def test_local_scan_error(self): local = self.local_1 From 4470f59180854296417518c0cdf9e112416ca160 Mon Sep 17 00:00:00 2001 From: gitofanindya Date: Wed, 31 Jan 2024 11:58:22 +0530 Subject: [PATCH 30/36] NXDRIVE-2860: Code Coverage -- working local win - 31/01 --- nxdrive/dao/utils.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/nxdrive/dao/utils.py b/nxdrive/dao/utils.py index e8274b2e34..b1ba95d9fc 100644 --- a/nxdrive/dao/utils.py +++ b/nxdrive/dao/utils.py @@ -20,7 +20,7 @@ def is_healthy(database: Path, /) -> bool: con = sqlite3.connect(str(database)) try: status = con.execute("PRAGMA integrity_check(1)").fetchone() - return bool(status[0] == "ok") + return status[0] == "ok" finally: # According to the documentation: # Connection object used as context manager only commits or rollbacks @@ -62,7 +62,7 @@ def read(dump_file: Path, database: Path, /) -> None: log.info("Restoration done with success.") -def fix_db(database: Path, /, *, dump_file: Path = None) -> None: +def fix_db(database: Path, /, *, dump_file: Path = Path("dump.sql")) -> None: """ Re-generate the whole database content to fix eventual FS corruptions. This will prevent `sqlite3.DatabaseError: database disk image is malformed` @@ -73,9 +73,8 @@ def fix_db(database: Path, /, *, dump_file: Path = None) -> None: if is_healthy(database): return - if not dump_file: - parent_path = database.parents[0] - dump_file = parent_path.joinpath(Path("dump.sql")) + # setting the path of dumpfile where the databast file exists. + dump_file = database.parent.joinpath(dump_file) log.info(f"Re-generating the whole database content of {database!r}...") # Dump From 2fcd3d8ac40702ee12fbe06c0ebca41994b5f952 Mon Sep 17 00:00:00 2001 From: gitofanindya Date: Wed, 31 Jan 2024 12:38:41 +0530 Subject: [PATCH 31/36] NXDRIVE-2860: Code Coverage -- working local win - 31/01 - fixed spell --- nxdrive/dao/utils.py | 4 +--- tests/functional/test_sync_roots.py | 4 ++-- tests/functional/test_transfer.py | 2 +- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/nxdrive/dao/utils.py b/nxdrive/dao/utils.py index b1ba95d9fc..c500fed5be 100644 --- a/nxdrive/dao/utils.py +++ b/nxdrive/dao/utils.py @@ -20,7 +20,7 @@ def is_healthy(database: Path, /) -> bool: con = sqlite3.connect(str(database)) try: status = con.execute("PRAGMA integrity_check(1)").fetchone() - return status[0] == "ok" + return bool(status[0] == "ok") finally: # According to the documentation: # Connection object used as context manager only commits or rollbacks @@ -73,8 +73,6 @@ def fix_db(database: Path, /, *, dump_file: Path = Path("dump.sql")) -> None: if is_healthy(database): return - # setting the path of dumpfile where the databast file exists. - dump_file = database.parent.joinpath(dump_file) log.info(f"Re-generating the whole database content of {database!r}...") # Dump diff --git a/tests/functional/test_sync_roots.py b/tests/functional/test_sync_roots.py index ef97226f78..de9896dd0c 100644 --- a/tests/functional/test_sync_roots.py +++ b/tests/functional/test_sync_roots.py @@ -13,7 +13,7 @@ def test_register_sync_root_parent(self): # Create a child folder and register it as a synchronization root child = remote.make_folder(self.workspace, "child") - remote.make_file(child, "aFile.txt", content=b"My content") + remote.make_file(child, "file.txt", content=b"My content") remote.register_as_root(child) # Start engine and wait for synchronization @@ -21,7 +21,7 @@ def test_register_sync_root_parent(self): self.wait_sync(wait_for_async=True) assert not local.exists(f"/{self.workspace_title}") folder_name = str(os.listdir(local.base_folder)[0]) - file_path = os.path.join(folder_name, "aFile.txt") + file_path = os.path.join(folder_name, "file.txt") assert folder_name.startswith( "test_register_sync_root_parent" ) and folder_name.endswith("child") diff --git a/tests/functional/test_transfer.py b/tests/functional/test_transfer.py index 61461b1453..d1d0e7bfa4 100644 --- a/tests/functional/test_transfer.py +++ b/tests/functional/test_transfer.py @@ -500,7 +500,7 @@ def upload(*args, **kwargs): def test_app_crash_simulation(self): """ - When the app crahsed, ongoing transfers will be removed at the next run. + When the app crashed, ongoing transfers will be removed at the next run. See NXDRIVE-2186 for more information. To reproduce the issue, we suspend the transfer in the upload's callback, From 7f770762a9b038814664cd6dc0ef8f3730d975f8 Mon Sep 17 00:00:00 2001 From: gitofanindya Date: Wed, 31 Jan 2024 14:53:41 +0530 Subject: [PATCH 32/36] NXDRIVE-2860: Code Coverage -- working local win - 31/01 - final test --- nxdrive/engine/workers.py | 1 - tests/functional/test_collection.py | 69 ----------------------------- 2 files changed, 70 deletions(-) delete mode 100644 tests/functional/test_collection.py diff --git a/nxdrive/engine/workers.py b/nxdrive/engine/workers.py index 77e014fc3c..62d8cb8b30 100644 --- a/nxdrive/engine/workers.py +++ b/nxdrive/engine/workers.py @@ -298,5 +298,4 @@ def _execute(self) -> None: sleep(1) def _poll(self) -> bool: - # . return True diff --git a/tests/functional/test_collection.py b/tests/functional/test_collection.py deleted file mode 100644 index 58b75f9f18..0000000000 --- a/tests/functional/test_collection.py +++ /dev/null @@ -1,69 +0,0 @@ -""" -from contextlib import suppress - -import pytest - -from .conftest import OneUserTest - - -class TestCollection(OneUserTest): - @pytest.fixture(autouse=True) - def teardown(self): - yield - - with suppress(Exception): - # Happened when the test fails at setup_method() - self.remote_document_client_1.delete( - self.collection["uid"], use_trash=False - ) - - def test_collection_synchronization(self): - remote = self.remote_1 - - # Remove synchronization root - remote.unregister_as_root(self.workspace) - - # Create a document "Fiiile" in a folder "Test" - folder = self.remote_document_client_1.make_folder("/", "Test") - # Attach a file "abcde.txt" to the document - doc = self.remote_document_client_1.make_file_with_blob( - folder, "abcde.txt", b"abcde" - ) - - # Create a collection and add the document to it - self.collection = remote.execute( - command="Collection.Create", - name="CollectionA", - description="Test collection", - ) - remote.execute( - command="Document.AddToCollection", - collection=self.collection["uid"], - input_obj=f"doc:{doc}", - ) - - # Register the collection as the synchronization root - remote.register_as_root(self.collection["uid"]) - - # Sync locally - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - # Get a client on the newly synchronized collection - local = self.get_local_client(self.local_nxdrive_folder_1 / "CollectionA") - - # Check the attached file is here - assert local.exists("/abcde.txt") - - # Attach a file "fghij.txt" to the document - # This should effectively replace the previous file - # since we did not specify another xpath than the main blob. - self.remote_document_client_1.attach_blob(doc, b"fghij", "fghij.txt") - - # Sync locally - self.wait_sync(wait_for_async=True) - - # Check the new attached file is here, and the previous isn't - assert local.exists("/fghij.txt") - assert not local.exists("/abcde.txt") -""" From 40714d602640afffb1071ec227d868718a37cf17 Mon Sep 17 00:00:00 2001 From: gitofanindya Date: Thu, 1 Feb 2024 10:56:46 +0530 Subject: [PATCH 33/36] NXDRIVE-2860: Code Coverage -- working local win - 01/02 - final --- .../test_concurrent_synchronization.py | 333 ------------------ tests/functional/test_encoding.py | 81 ----- tests/functional/test_group_changes.py | 91 ----- .../test_local_changes_when_offline.py | 20 -- tests/functional/test_local_copy_paste.py | 10 - tests/functional/test_local_move_folders.py | 30 -- tests/functional/test_local_paste.py | 15 - .../test_local_share_move_folders.py | 121 ------- tests/functional/test_local_storage_issue.py | 92 ----- tests/functional/test_multiple_files.py | 135 ------- tests/functional/test_permission_hierarchy.py | 316 ----------------- tests/functional/test_remote_deletion.py | 202 +---------- tests/functional/test_security_updates.py | 298 ---------------- tests/functional/test_shared_folders.py | 79 ----- tests/functional/test_special_characters.py | 93 ----- .../functional/test_synchronization_dedup.py | 148 -------- .../test_synchronization_suspend.py | 159 --------- tests/functional/test_versioning.py | 33 -- tests/functional/test_watchers.py | 28 -- 19 files changed, 1 insertion(+), 2283 deletions(-) delete mode 100644 tests/functional/test_local_share_move_folders.py delete mode 100644 tests/functional/test_multiple_files.py delete mode 100644 tests/functional/test_permission_hierarchy.py delete mode 100644 tests/functional/test_security_updates.py delete mode 100644 tests/functional/test_special_characters.py delete mode 100644 tests/functional/test_synchronization_dedup.py delete mode 100644 tests/functional/test_synchronization_suspend.py diff --git a/tests/functional/test_concurrent_synchronization.py b/tests/functional/test_concurrent_synchronization.py index 1bd60c0b8e..bb48c4d668 100644 --- a/tests/functional/test_concurrent_synchronization.py +++ b/tests/functional/test_concurrent_synchronization.py @@ -15,229 +15,6 @@ def create_docs(self, parent, number, name_pattern=None, delay=1.0): delay=int(delay * 1000), ) - """ - def test_concurrent_file_access(self): - ""Test update/deletion of a locally locked file. - - This is to simulate downstream synchronization of a file opened (thus - locked) by any program under Windows, typically MS Word. - The file should be temporary ignored and not prevent synchronization of other - pending items. - Once the file is unlocked and the cooldown period is over it should be - synchronized. - "" - # Bind the server and root workspace - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - # Get local and remote clients - local = self.local_1 - remote = self.remote_document_client_1 - - # Create file in the remote root workspace - uid = remote.make_file( - "/", "test_update.docx", content=b"Some content to update." - ) - remote.make_file("/", "test_delete.docx", content=b"Some content to delete.") - - # Launch first synchronization - self.wait_sync(wait_for_async=True) - assert local.exists("/test_update.docx") - assert local.exists("/test_delete.docx") - - # Open locally synchronized files to lock them and generate a - # WindowsError when trying to update / delete them - file1_path = local.get_info("/test_update.docx").filepath - file2_path = local.get_info("/test_delete.docx").filepath - with open(file1_path, "rb"), open(file2_path, "rb"): - # Update /delete existing remote files and create a new remote file - # Wait for 1 second to make sure the file's last modification time - # will be different from the pair state's last remote update time - time.sleep(REMOTE_MODIFICATION_TIME_RESOLUTION) - remote.update_content("/test_update.docx", b"Updated content.") - remote.delete("/test_delete.docx") - remote.make_file("/", "other.docx", content=b"Other content.") - - # Synchronize - self.wait_sync( - wait_for_async=True, enforce_errors=False, fail_if_timeout=False - ) - if WINDOWS: - # As local file are locked, a WindowsError should occur during the - # local update process, therefore: - # - Opened local files should still exist and not have been - # modified - # - Synchronization should not fail: doc pairs should be - # temporary ignored and other remote modifications should be - # locally synchronized - assert local.exists("/test_update.docx") - assert ( - local.get_content("/test_update.docx") == b"Some content to update." - ) - assert local.exists("/test_delete.docx") - assert ( - local.get_content("/test_delete.docx") == b"Some content to delete." - ) - assert local.exists("/other.docx") - assert local.get_content("/other.docx") == b"Other content." - - # Synchronize again - self.wait_sync(enforce_errors=False, fail_if_timeout=False) - # Temporary ignored files should be still be ignored as delay (60 seconds by - # default) is not expired, nothing should have changed - assert local.exists("/test_update.docx") - assert ( - local.get_content("/test_update.docx") == b"Some content to update." - ) - assert local.exists("/test_delete.docx") - assert ( - local.get_content("/test_delete.docx") == b"Some content to delete." - ) - - if WINDOWS: - # Cancel error delay to force retrying synchronization of pairs in error - self.queue_manager_1.requeue_errors() - self.wait_sync() - - # Previously temporary ignored files should be updated / deleted locally, - # temporary download file should not be there anymore and there - # should be no pending items left - else: - assert not (self.engine_1.download_dir / uid).is_dir() - - assert local.exists("/test_update.docx") - assert local.get_content("/test_update.docx") == b"Updated content." - assert not local.exists("/test_delete.docx") - """ - - """ - def test_find_changes_with_many_doc_creations(self): - local = self.local_1 - - # Synchronize root workspace - self.engine_1.start() - self.wait_sync(wait_for_async=True) - assert local.exists("/") - assert not local.get_children_info("/") - - # List of children names to create - n_children = 5 - child_name_pattern = "child_%03d.txt" - children_names = [child_name_pattern % i for i in range(n_children)] - - # Create the children to synchronize on the remote server concurrently - # in a long running transaction - self.create_docs( - self.workspace, n_children, name_pattern=child_name_pattern, delay=0.5 - ) - - # Wait for the synchronizer thread to complete - self.wait_sync(wait_for_async=True) - - # Check that all the children creations where detected despite the - # creation transaction spanning longer than the individual audit - # query time ranges. - local_children_names = [c.name for c in local.get_children_info("/")] - local_children_names.sort() - assert local_children_names == children_names - """ - - """ - def test_delete_local_folder_2_clients(self): - # Get local clients for each device and remote client - local1 = self.local_1 - local2 = self.local_2 - remote = self.remote_document_client_1 - - # Check synchronization roots for drive1, - # there should be 1, the test workspace - sync_roots = remote.get_roots() - assert len(sync_roots) == 1 - assert sync_roots[0].name == self.workspace_title - - # Launch first synchronization on both devices - self.engine_1.start() - self.engine_2.start() - self.wait_sync(wait_for_async=True, wait_for_engine_2=True) - - # Test workspace should be created locally on both devices - assert local1.exists("/") - assert local2.exists("/") - - # Make drive1 create a remote folder in the - # test workspace and a file inside this folder, - # then synchronize both devices - test_folder = remote.make_folder(self.workspace, "Test folder") - remote.make_file(test_folder, "test.odt", content=b"Some content.") - - self.wait_sync(wait_for_async=True, wait_for_engine_2=True) - - # Test folder should be created locally on both devices - assert local1.exists("/Test folder") - assert local1.exists("/Test folder/test.odt") - assert local2.exists("/Test folder") - assert local2.exists("/Test folder/test.odt") - - # Delete Test folder locally on one of the devices - local1.delete("/Test folder") - assert not local1.exists("/Test folder") - - # Wait for synchronization engines to complete - # Wait for Windows delete and also async - self.wait_sync(wait_win=True, wait_for_async=True, wait_for_engine_2=True) - - # Test folder should be deleted on the server and on both devices - assert not remote.exists(test_folder) - assert not local1.exists("/Test folder") - assert not local2.exists("/Test folder") - """ - - """ - def test_delete_local_folder_delay_remote_changes_fetch(self): - # Get local and remote clients - local = self.local_1 - remote = self.remote_document_client_1 - - # Launch first synchronization - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - # Test workspace should be created locally - assert local.exists("/") - - # Create a local folder in the test workspace and a file inside - # this folder, then synchronize - folder = local.make_folder("/", "Test folder") - local.make_file(folder, "test.odt", content=b"Some content.") - - self.wait_sync() - - # Test folder should be created remotely in the test workspace - assert remote.exists("/Test folder") - assert remote.exists("/Test folder/test.odt") - - # Delete Test folder locally before fetching remote changes, - # then synchronize - local.delete("/Test folder") - assert not local.exists("/Test folder") - - self.wait_sync() - - # Test folder should be deleted remotely in the test workspace. - # Even though fetching the remote changes will send - # 'documentCreated' events for Test folder and its child file - # as a result of the previous synchronization loop, since the folder - # will not have been renamed nor moved since last synchronization, - # its remote pair state will not be marked as 'modified', - # see Model.update_remote(). - # Thus the pair state will be ('deleted', 'synchronized'), resolved as - # 'locally_deleted'. - assert not remote.exists("Test folder") - - # Check Test folder has not been re-created locally - assert not local.exists("/Test folder") - """ - def test_rename_local_folder(self): # Get local and remote clients local1 = self.local_1 @@ -262,113 +39,3 @@ def test_rename_local_folder(self): self.wait_sync(wait_for_async=True, wait_for_engine_2=True) assert local1.exists("/Renamed folder") assert local2.exists("/Renamed folder") - - """ - def test_delete_local_folder_update_remote_folder_property(self): - # Get local and remote clients - local = self.local_1 - remote = self.remote_document_client_1 - - # Launch first synchronization - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - # Test workspace should be created locally - assert local.exists("/") - - # Create a local folder in the test workspace and a file inside - # this folder, then synchronize - folder = local.make_folder("/", "Test folder") - local.make_file(folder, "test.odt", content=b"Some content.") - - self.wait_sync() - - # Test folder should be created remotely in the test workspace - assert remote.exists("/Test folder") - assert remote.exists("/Test folder/test.odt") - - # Delete Test folder locally and remotely update one of its properties - # concurrently, then synchronize - self.engine_1.suspend() - local.delete("/Test folder") - assert not local.exists("/Test folder") - test_folder_ref = remote.check_ref("/Test folder") - # Wait for 1 second to make sure the folder's last modification time - # will be different from the pair state's last remote update time - time.sleep(REMOTE_MODIFICATION_TIME_RESOLUTION) - remote.update( - test_folder_ref, properties={"dc:description": "Some description."} - ) - test_folder = remote.fetch(test_folder_ref) - assert test_folder["properties"]["dc:description"] == "Some description." - self.engine_1.resume() - - self.wait_sync(wait_for_async=True) - - # Test folder should be deleted remotely in the test workspace. - assert not remote.exists("/Test folder") - - # Check Test folder has not been re-created locally - assert not local.exists("/Test folder") - """ - - """ - def test_update_local_file_content_update_remote_file_property(self): - # Get local and remote clients - local = self.local_1 - remote = self.remote_document_client_1 - - # Launch first synchronization - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - # Test workspace should be created locally - assert local.exists("/") - - # Create a local file in the test workspace then synchronize - local.make_file("/", "test.odt", content=b"Some content.") - - self.wait_sync() - - # Test file should be created remotely in the test workspace - assert remote.exists("/test.odt") - - self.engine_1.queue_manager.suspend() - # Locally update the file content and remotely update one of its - # properties concurrently, then synchronize - time.sleep(OS_STAT_MTIME_RESOLUTION) - local.update_content("/test.odt", b"Updated content.") - assert local.get_content("/test.odt") == b"Updated content." - test_file_ref = remote.check_ref("/test.odt") - # Wait for 1 second to make sure the file's last modification time - # will be different from the pair state's last remote update time - time.sleep(REMOTE_MODIFICATION_TIME_RESOLUTION) - remote.update(test_file_ref, properties={"dc:description": "Some description."}) - test_file = remote.fetch(test_file_ref) - assert test_file["properties"]["dc:description"] == "Some description." - time.sleep(TEST_DEFAULT_DELAY) - self.engine_1.queue_manager.resume() - - self.wait_sync(wait_for_async=True) - - # Test file should be updated remotely in the test workspace, - # and no conflict should be detected. - # Even though fetching the remote changes will send a - # 'documentModified' event for the test file as a result of its - # dc:description property update, since the file will not have been - # renamed nor moved and its content not modified since last - # synchronization, its remote pair state will not be marked as - # 'modified', see Model.update_remote(). - # Thus the pair state will be ('modified', 'synchronized'), resolved as - # 'locally_modified'. - assert remote.exists("/test.odt") - assert remote.get_content("/test.odt") == b"Updated content." - test_file = remote.fetch(test_file_ref) - assert test_file["properties"]["dc:description"] == "Some description." - assert len(remote.get_children_info(self.workspace)) == 1 - - # Check that the content of the test file has not changed - assert local.exists("/test.odt") - assert local.get_content("/test.odt") == b"Updated content." - assert len(local.get_children_info("/")) == 1 - """ diff --git a/tests/functional/test_encoding.py b/tests/functional/test_encoding.py index 0a319cc3c0..164fc9f9b3 100644 --- a/tests/functional/test_encoding.py +++ b/tests/functional/test_encoding.py @@ -8,87 +8,6 @@ class TestEncoding(OneUserTest): - """ - def test_filename_with_accents_from_server(self): - local = self.local_1 - remote = self.remote_document_client_1 - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - data = b"Contenu sans accents." - remote.make_file(self.workspace, "Nom sans accents.doc", content=data) - remote.make_file(self.workspace, "Nom avec accents \xe9 \xe8.doc", content=data) - self.wait_sync(wait_for_async=True) - - assert local.get_content("/Nom sans accents.doc") == data - assert local.get_content("/Nom avec accents \xe9 \xe8.doc") == data - """ - - """ - def test_filename_with_katakana(self): - local = self.local_1 - remote = self.remote_document_client_1 - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - data = b"Content" - remote.make_file(self.workspace, "Remote \u30bc\u30ec.doc", content=data) - local.make_file("/", "Local \u30d7 \u793e.doc", content=data) - self.wait_sync(wait_for_async=True) - - assert remote.get_content("/Local \u30d7 \u793e.doc") == data - assert local.get_content("/Remote \u30bc\u30ec.doc") == data - """ - - """ - def test_content_with_accents_from_server(self): - local = self.local_1 - remote = self.remote_document_client_1 - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - data = "Contenu avec caract\xe8res accentu\xe9s.".encode("utf-8") - remote.make_file(self.workspace, "Nom sans accents.txt", content=data) - self.wait_sync(wait_for_async=True) - - assert local.get_content("/Nom sans accents.txt") == data - """ - - """ - def test_filename_with_accents_from_client(self): - local = self.local_1 - remote = self.remote_document_client_1 - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - data = b"Contenu sans accents." - local.make_file("/", "Avec accents \xe9 \xe8.doc", content=data) - local.make_file("/", "Sans accents.doc", content=data) - self.wait_sync(wait_for_async=True) - - assert remote.get_content("/Avec accents \xe9 \xe8.doc") == data - assert remote.get_content("/Sans accents.doc") == data - """ - - """ - def test_content_with_accents_from_client(self): - local = self.local_1 - remote = self.remote_document_client_1 - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - data = "Contenu avec caract\xe8res accentu\xe9s.".encode("utf-8") - local.make_file("/", "Nom sans accents", content=data) - self.wait_sync(wait_for_async=True) - - assert remote.get_content("/Nom sans accents") == data - """ - def test_name_normalization(self): local = self.local_1 remote = self.remote_document_client_1 diff --git a/tests/functional/test_group_changes.py b/tests/functional/test_group_changes.py index a7da0c7ea2..7bb815b24b 100644 --- a/tests/functional/test_group_changes.py +++ b/tests/functional/test_group_changes.py @@ -65,97 +65,6 @@ def set_ace(self, user, doc): permission="ReadWrite", ) - """ - def test_group_changes_on_sync_root(self): - "" - Test changes on a group that has access to a synchronization root. - "" - log.info("Create syncRoot folder") - sync_root_id = self.admin_remote.make_folder("/", "syncRoot") - - self.set_ace(self.group1, sync_root_id) - - log.info("Register syncRoot for driveuser_1") - self.remote_1.register_as_root(sync_root_id) - - log.info("Check that syncRoot is created locally") - self.wait_sync(wait_for_async=True) - assert self.local_root_client_1.exists("/syncRoot") - - self._test_group_changes("/syncRoot", self.group1) - """ - - """ - def test_group_changes_on_sync_root_child(self): - "" - Test changes on a group that has access - to a child of a synchronization root. - "" - log.info("Create syncRoot folder") - sync_root_id = self.admin_remote.make_folder("/", "syncRoot") - - log.info("Create child folder") - child_id = self.admin_remote.make_folder("/syncRoot", "child") - - self.set_ace(self.group1, sync_root_id) - self.set_ace(self.group2, child_id) - - log.info("Block inheritance on child") - self.admin_remote.block_inheritance(child_id, overwrite=False) - - log.info("Register syncRoot for driveuser_1") - self.remote_1.register_as_root(sync_root_id) - - log.info("Check that syncRoot and child are created locally") - self.wait_sync(wait_for_async=True) - assert self.local_root_client_1.exists("/syncRoot") - assert self.local_root_client_1.exists("/syncRoot/child") - - self._test_group_changes("/syncRoot/child", self.group2) - """ - - """ - def test_group_changes_on_sync_root_parent(self): - "" - Test changes on a group that has access - to the parent of a synchronization root. - "" - log.info("Create parent folder") - parent_id = self.admin_remote.make_folder("/", "parent") - - log.info("Create syncRoot folder") - sync_root_id = self.admin_remote.make_folder("/parent", "syncRoot") - - self.set_ace(self.group1, parent_id) - - log.info("Register syncRoot for driveuser_1") - self.remote_1.register_as_root(sync_root_id) - - log.info("Check that syncRoot is created locally") - self.wait_sync(wait_for_async=True) - assert self.local_root_client_1.exists("/syncRoot") - - self._test_group_changes("/syncRoot", self.group1) - """ - - """ - def test_changes_with_parent_group(self): - "" - Test changes on the parent group of a group - that has access to a synchronization root. - "" - self._test_group_changes_with_ancestor_groups(self.parent_group) - """ - - """ - def test_changes_with_grand_parent_group(self): - "" - Test changes on the grandparent group of a group - that has access to a synchronization root. - "" - self._test_group_changes_with_ancestor_groups(self.grand_parent_group) - """ - def _test_group_changes(self, folder_path, group_name, need_parent=False): """ Tests changes on the given group that has access to the given folder: diff --git a/tests/functional/test_local_changes_when_offline.py b/tests/functional/test_local_changes_when_offline.py index b02ddee29d..123d0f7347 100644 --- a/tests/functional/test_local_changes_when_offline.py +++ b/tests/functional/test_local_changes_when_offline.py @@ -20,26 +20,6 @@ def setUp(self): ) self.wait_sync(wait_for_async=True) - """ - def test_copy_paste_when_engine_suspended(self): - "" - Copy paste and a rename operation together on same file while Drive is - offline should be detected and synced to server as soon as Drive comes - back online. - "" - self.copy_past_and_rename(stop_engine=True) - """ - - """ - @pytest.mark.randombug("Unstable on Windows", condition=WINDOWS) - def test_copy_paste_normal(self): - "" - Copy paste and a rename operation together on same file while Drive is - online should be detected and synced to server. - "" - self.copy_past_and_rename() - """ - def copy_past_and_rename(self, stop_engine: bool = False): if stop_engine: # Make Drive offline (by suspend) diff --git a/tests/functional/test_local_copy_paste.py b/tests/functional/test_local_copy_paste.py index fce687644b..7545724217 100644 --- a/tests/functional/test_local_copy_paste.py +++ b/tests/functional/test_local_copy_paste.py @@ -71,16 +71,6 @@ def setUp(self): == self.NUMBER_OF_LOCAL_FILES_TOTAL ) - """ - def test_local_copy_paste_files(self): - self._local_copy_paste_files() - """ - - """ - def test_local_copy_paste_files_stopped(self): - self._local_copy_paste_files(stopped=True) - """ - def _local_copy_paste_files(self, stopped=False): if not stopped: self.engine_1.start() diff --git a/tests/functional/test_local_move_folders.py b/tests/functional/test_local_move_folders.py index b43ffee0fd..ffc1daf61e 100644 --- a/tests/functional/test_local_move_folders.py +++ b/tests/functional/test_local_move_folders.py @@ -98,16 +98,6 @@ def test_local_move_folder_with_files(self): assert len(children) == count assert set(children) == names - """ - def test_local_move_folder_both_sides_while_stopped(self): - self._test_local_move_folder_both_sides(False) - """ - - """ - def test_local_move_folder_both_sides_while_unbinded(self): - self._test_local_move_folder_both_sides(True) - """ - def _test_local_move_folder_both_sides(self, unbind): """ NXDRIVE-647: sync when a folder is renamed locally and remotely. @@ -198,23 +188,3 @@ def test_local_move_folder(self): child = remote.get_children_info(self.workspace)[0] assert child.name == name_new assert child.path.endswith(name_new) - - """ - def test_local_move_root_folder_with_unicode(self): - local = self.local_1 - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - assert local.exists("/") - - with ensure_no_exception(): - # Rename the root folder - root_path = local.base_folder.parent - local.unlock_ref(root_path, is_abs=True) - root_path.rename(root_path.with_name("root moved, 👆!")) - - self.wait_sync() - - assert not local.exists("/") - """ diff --git a/tests/functional/test_local_paste.py b/tests/functional/test_local_paste.py index afcbca4ac4..6b0360ea80 100644 --- a/tests/functional/test_local_paste.py +++ b/tests/functional/test_local_paste.py @@ -48,21 +48,6 @@ def setUp(self): def tearDown(self): shutil.rmtree(self.local_temp) - """ - def test_copy_paste_empty_folder_first(self): - "" - copy 'a2' to 'Nuxeo Drive Test Workspace', - then 'a1' to 'Nuxeo Drive Test Workspace' - "" - # copy 'temp/a2' under 'Nuxeo Drive Test Workspace' - shutil.copytree(self.folder2, self.workspace_abspath / self.FOLDER_A2) - # copy 'temp/a1' under 'Nuxeo Drive Test Workspace' - shutil.copytree(self.folder1, self.workspace_abspath / self.FOLDER_A1) - self.wait_sync(timeout=TEST_TIMEOUT) - - self._check_integrity() - """ - def test_copy_paste_empty_folder_last(self): """ copy 'a1' to 'Nuxeo Drive Test Workspace', diff --git a/tests/functional/test_local_share_move_folders.py b/tests/functional/test_local_share_move_folders.py deleted file mode 100644 index 9fb4ebf624..0000000000 --- a/tests/functional/test_local_share_move_folders.py +++ /dev/null @@ -1,121 +0,0 @@ -""" -import shutil -from unittest.mock import patch - -from nxdrive.engine.watcher.constants import SECURITY_UPDATED_EVENT -from nxdrive.engine.watcher.remote_watcher import RemoteWatcher - -from ..utils import random_png -from .conftest import TwoUsersTest - - -class TestLocalShareMoveFolders(TwoUsersTest): - NUMBER_OF_LOCAL_IMAGE_FILES = 10 - - def setUp(self): - "" - 1. Create folder a1 in Nuxeo Drive Test Workspace sync root - 2. Create folder a2 in Nuxeo Drive Test Workspace sync root - 3. Add 10 image files in a1 - "" - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - self.engine_1.stop() - - local = self.local_1 - # Create a1 and a2 - self.folder_path_1 = local.make_folder("/", "a1") - self.folder_path_2 = local.make_folder("/", "a2") - - num = self.NUMBER_OF_LOCAL_IMAGE_FILES - self.names = {"file%03d.png" % file_num for file_num in range(1, num + 1)} - - # Add image files to a1 - abs_folder_path_1 = local.abspath(self.folder_path_1) - for file_num in range(1, num + 1): - file_name = "file%03d.png" % file_num - file_path = abs_folder_path_1 / file_name - random_png(file_path) - - self.engine_1.start() - self.wait_sync(timeout=60, wait_win=True) - - # Check local files in a1 - self._check_local("/a1") - - # Check remote files in a1 - self._check_remote("/a1") - - def _check_local(self, folder): - local = self.local_1 - assert local.exists(folder) - - children = [child.name for child in local.get_children_info(folder)] - assert len(children) == self.NUMBER_OF_LOCAL_IMAGE_FILES - assert set(children) == self.names - - def _check_remote(self, folder): - local = self.local_1 - remote = self.remote_1 - - uid = local.get_remote_id(folder) - assert uid - assert remote.fs_exists(uid) - - children = [child.name for child in remote.get_fs_children(uid)] - assert len(children) == self.NUMBER_OF_LOCAL_IMAGE_FILES - assert set(children) == self.names - - def test_local_share_move_folder_with_files(self): - remote = self.root_remote - local = self.local_1 - - src = local.abspath(self.folder_path_1) - dst = local.abspath(self.folder_path_2) - - input_obj = local.get_remote_id("/a1").split("#")[-1] - remote.execute( - command="Document.AddPermission", - input_obj=input_obj, - username=self.user_2, - permission="Everything", - ) - - original_get_changes = RemoteWatcher._get_changes - - def get_changes(self): - summary = original_get_changes(self) - for event in summary["fileSystemChanges"]: - if event["eventId"] == SECURITY_UPDATED_EVENT: - nonlocal src - nonlocal dst - shutil.move(src, dst) - return summary - - with patch.object(RemoteWatcher, "_get_changes", new=get_changes): - self.wait_sync() - - # Sync after move operation - self.wait_sync() - # Check that a1 doesn't exist anymore locally - assert not local.exists("/a1") - - # Check local files in a2/a1 - self._check_local("/a2/a1") - - # Check that a1 doesn't exist anymore remotely - assert len(remote.get_children_info(self.workspace)) == 1 - - # Check remote files in a2/a1 - self._check_remote("/a2/a1") - - # As Admin create a folder inside a1 - uid = local.get_remote_id("/a2/a1") - remote.make_folder(uid.split("#")[-1], "inside_a1") - - self.wait_sync() - - # Check that a1 doesn't exist anymore locally - assert local.exists("/a2/a1/inside_a1") -""" diff --git a/tests/functional/test_local_storage_issue.py b/tests/functional/test_local_storage_issue.py index 2687eeb1a6..cd0ca1b674 100644 --- a/tests/functional/test_local_storage_issue.py +++ b/tests/functional/test_local_storage_issue.py @@ -17,95 +17,3 @@ def test_local_invalid_timestamp(self): children = self.remote_document_client_1.get_children_info(self.workspace) assert len(children) == 1 assert children[0].name == "Test.txt" - - """ - def test_synchronize_no_space_left_on_device(self): - local = self.local_1 - remote = self.remote_document_client_1 - - # Synchronize root workspace - self.engine_1.start() - self.wait_sync(wait_for_async=True) - assert local.exists("/") - self.engine_1.stop() - - # Create a file in the remote root workspace - uid = remote.make_file("/", "test_NG.odt", content=b"Some large content.") - - # We pick a random error because there is no facility - # to parametrize a method from a class derived from - # something other than object. - errno = random.choice(list(NO_SPACE_ERRORS)) - error = OSError(errno, f"(Mock) {os.strerror(errno)}") - - # Synchronize simulating a disk space related error - bad_remote = self.get_bad_remote() - bad_remote.make_download_raise(error) - - with patch.object(self.engine_1, "remote", new=bad_remote): - self.engine_1.start() - - # By default engine will not consider being syncCompleted - # because of the temporary ignored files - self.wait_sync( - wait_for_async=True, fail_if_timeout=False, enforce_errors=False - ) - - # - temporary download file should be created locally but not moved - # - synchronization should not fail: doc pair should be temporary ignored - # - and there should be 1 error - assert (self.engine_1.download_dir / uid).is_dir() - assert not local.exists("/test_NG.odt") - errors = self.engine_1.dao.get_errors(limit=0) - assert len(errors) == 1 - assert errors[0].remote_name == "test_NG.odt" - - assert self.engine_1.is_paused() - - # Create another file in the remote root workspace - remote.make_file("/", "test_OK.odt", content=b"Some small content.") - - # No more errors starting here - self.engine_1.resume() - self.wait_sync(wait_for_async=True, fail_if_timeout=False, enforce_errors=False) - - # Remote file should be created locally - assert local.exists("/test_OK.odt") - - # Temporary ignored file should still be ignored as delay (60 seconds by default) - # is not expired and there should still be 1 error - assert not local.exists("/test_NG.odt") - errors = self.engine_1.dao.get_errors(limit=0) - assert len(errors) == 1 - assert errors[0].remote_name == "test_NG.odt" - - # Retry to synchronize the temporary ignored file, but still simulating - # the same disk space related error - with patch.object(self.engine_1, "remote", new=bad_remote): - # Re-queue pairs in error - self.queue_manager_1.requeue_errors() - self.wait_sync(fail_if_timeout=False, enforce_errors=False) - - # - temporary download file should be created locally but not moved - # - doc pair should be temporary ignored again - # - and there should still be 1 error - assert (self.engine_1.download_dir / uid).is_dir() - assert not local.exists("/test_NG.odt") - errors = self.engine_1.dao.get_errors(limit=0) - assert len(errors) == 1 - assert errors[0].remote_name == "test_NG.odt" - - # Synchronize without simulating any error, as if space had been made - # available on device - self.engine_1.resume() - - # Re-queue pairs in error - self.queue_manager_1.requeue_errors() - self.wait_sync(enforce_errors=False) - - # Previously temporary ignored file should be created locally - # and there should be no more errors left - assert not (self.engine_1.download_dir / uid).is_dir() - assert local.exists("/test_NG.odt") - assert not self.engine_1.dao.get_errors(limit=0) - """ diff --git a/tests/functional/test_multiple_files.py b/tests/functional/test_multiple_files.py deleted file mode 100644 index d2db89c5f1..0000000000 --- a/tests/functional/test_multiple_files.py +++ /dev/null @@ -1,135 +0,0 @@ -""" -import shutil -from pathlib import Path - -import pytest - -from nxdrive.constants import LINUX, MAC - -from ..markers import not_linux -from .conftest import OneUserTest - - -class TestMultipleFiles(OneUserTest): - NUMBER_OF_LOCAL_FILES = 10 - SYNC_TIMEOUT = 10 # in seconds - - def setUp(self): - "" - 1. create folder 'Nuxeo Drive Test Workspace/a1' with 100 files in it - 2. create folder 'Nuxeo Drive Test Workspace/a2' - 2. create folder 'Nuxeo Drive Test Workspace/a3' - "" - - self.engine_1.start() - self.wait_sync() - local = self.local_1 - - # Create folder a1 - self.folder_path_1 = local.make_folder("/", "a1") - - # Add 100 files in folder 'Nuxeo Drive Test Workspace/a1' - for file_num in range(1, self.NUMBER_OF_LOCAL_FILES + 1): - local.make_file( - self.folder_path_1, "local%04d.txt" % file_num, content=b"content" - ) - - # Create folder a2 - self.folder_path_2 = local.make_folder("/", "a2") - self.folder_path_3 = Path("a3") - self.wait_sync(wait_for_async=True, timeout=self.SYNC_TIMEOUT) - - def test_move_and_copy_paste_folder_original_location_from_child_stopped(self): - self._move_and_copy_paste_folder_original_location_from_child() - - def test_move_and_copy_paste_folder_original_location_from_child(self): - self._move_and_copy_paste_folder_original_location_from_child(False) - - def _move_and_copy_paste_folder_original_location_from_child(self, stopped=True): - local = self.local_1 - src = local.abspath(self.folder_path_1) - dst = local.abspath(self.folder_path_2) - shutil.move(src, dst) - self.wait_sync(timeout=self.SYNC_TIMEOUT) - self._move_and_copy_paste_folder( - Path("a2/a1"), Path(""), Path("a2"), stopped=stopped - ) - - def _move_and_copy_paste_folder( - self, folder_1: Path, folder_2: Path, target_folder: Path, stopped=True - ): - "" - /folder_1 - /folder_2 - /target_folder - Will - move /folder1 inside /folder2/ as /folder2/folder1 - copy /folder2/folder1 into /target_folder/ - "" - if stopped: - self.engine_1.stop() - remote = self.remote_1 - local = self.local_1 - src = local.abspath(folder_1) - dst = local.abspath(folder_2) - new_path = folder_2 / folder_1.name - copy_path = target_folder / folder_1.name - shutil.move(src, dst) - # check that 'Nuxeo Drive Test Workspace/a1' does not exist anymore - assert not local.exists(folder_1) - # check that 'Nuxeo Drive Test Workspace/a2/a1' now exists - assert local.exists(new_path) - # copy the 'Nuxeo Drive Test Workspace/a2/a1' tree - # back under 'Nuxeo Drive Test Workspace' - shutil.copytree(local.abspath(new_path), local.abspath(copy_path)) - if stopped: - self.engine_1.start() - self.wait_sync(timeout=self.SYNC_TIMEOUT) - - # asserts - # expect '/a2/a1' to contain the files - # expect 'Nuxeo Drive Test Workspace/a1' to also contain the files - num = self.NUMBER_OF_LOCAL_FILES - names = {"local%04d.txt" % n for n in range(1, num + 1)} - - for path in (new_path, copy_path): - # Local - assert local.abspath(path).exists() - children = [f.name for f in local.abspath(path).iterdir()] - - assert len(children) == num - assert set(children) == names - - # Remote - uid = local.get_remote_id(path) - assert uid - - children = remote.get_fs_children(uid) - assert len(children) == num - children_names = {child.name for child in children} - assert children_names == names - - @pytest.mark.randombug("NXDRIVE-720", condition=LINUX) - @pytest.mark.randombug("NXDRIVE-813", condition=MAC) - def test_move_and_copy_paste_folder_original_location(self): - self._move_and_copy_paste_folder( - self.folder_path_1, - self.folder_path_2, - self.folder_path_1.parent, - stopped=False, - ) - - @not_linux( - reason="NXDRIVE-471: Not handled under GNU/Linux as " - "creation time is not stored" - ) - def test_move_and_copy_paste_folder_original_location_stopped(self): - self._move_and_copy_paste_folder( - self.folder_path_1, self.folder_path_2, self.folder_path_1.parent - ) - - def test_move_and_copy_paste_folder_new_location(self): - self._move_and_copy_paste_folder( - self.folder_path_1, self.folder_path_2, self.folder_path_3 - ) -""" diff --git a/tests/functional/test_permission_hierarchy.py b/tests/functional/test_permission_hierarchy.py deleted file mode 100644 index cb92b1414e..0000000000 --- a/tests/functional/test_permission_hierarchy.py +++ /dev/null @@ -1,316 +0,0 @@ -""" -import hashlib -from contextlib import suppress -from pathlib import Path - -import pytest -from nuxeo.exceptions import Forbidden - -from nxdrive.constants import WINDOWS - -from ..markers import windows_only -from . import LocalTest -from .conftest import OneUserTest, TwoUsersTest - - -class TestPermissionHierarchy(OneUserTest): - def setup_method(self, method): - super().setup_method(method, register_roots=False, server_profile="permission") - - self.local_1 = LocalTest(self.local_nxdrive_folder_1) - - # Make sure user workspace is created and fetch its UID - res = self.remote_document_client_1.make_file_in_user_workspace( - b"contents", "USFile.txt" - ) - self.workspace_uid = res["parentRef"] - - def teardown_method(self, method): - with suppress(Exception): - self.root_remote.delete(self.workspace_uid, use_trash=False) - super().teardown_method(method) - - def test_sync_delete_root(self): - # Create test folder in user workspace as test user - remote = self.remote_document_client_1 - test_folder_uid = remote.make_folder(self.workspace_uid, "test_folder") - # Create a document in the test folder - remote.make_file(test_folder_uid, "test_file.txt", content=b"Some content.") - - # Register test folder as a sync root - remote.register_as_root(test_folder_uid) - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - # Check locally synchronized content - root = Path("My Docs/test_folder") - assert self.local_1.exists(root) - assert self.local_1.exists(root / "test_file.txt") - - # Delete test folder - remote.delete(test_folder_uid) - self.wait_sync(wait_for_async=True) - - # Check locally synchronized content - assert not self.local_1.exists(root) - assert not self.local_1.get_children_info("/My Docs") - - -class TestPermissionHierarchy2(TwoUsersTest): - def setup_method(self, method): - super().setup_method(method, register_roots=False, server_profile="permission") - - self.local_1 = LocalTest(self.local_nxdrive_folder_1) - self.local_2 = LocalTest(self.local_nxdrive_folder_2) - - # Make sure user workspace is created and fetch its UID - res = self.remote_document_client_1.make_file_in_user_workspace( - b"contents", "USFile.txt" - ) - self.workspace_uid = res["parentRef"] - - def teardown_method(self, method): - with suppress(Exception): - self.root_remote.delete(self.workspace_uid, use_trash=False) - super().teardown_method(method) - - @windows_only(reason="Only Windows ignores file permissions.") - def test_permission_awareness_after_resume(self): - remote = self.remote_document_client_1 - remote2 = self.remote_document_client_2 - local = self.local_2 - - root = remote.make_folder(self.workspace_uid, "testing") - folder = remote.make_folder(root, "FolderA") - - # Register user workspace as a sync root for user1 - remote.register_as_root(self.workspace_uid) - - # Register root folder as a sync root for user2 - self.set_readonly(self.user_2, root, grant=False) - remote2.register_as_root(root) - - # Read only folder for user 2 - self.set_readonly(self.user_2, folder) - - # Start'n sync - self.engine_2.start() - self.wait_sync( - wait_for_async=True, wait_for_engine_1=False, wait_for_engine_2=True - ) - self.engine_2.stop() - - # Checks - root = Path("Other Docs/testing/FolderA") - assert local.exists(root) - - # Create documents - abspath = local.abspath(root) - new_folder = abspath / "FolderCreated" - new_folder.mkdir() - (new_folder / "file.txt").write_bytes(b"content") - - # Change from RO to RW for the shared folder - self.set_readonly(self.user_2, folder, grant=False) - - # Sync - self.engine_2.start() - self.wait_sync( - wait_for_async=True, wait_for_engine_1=False, wait_for_engine_2=True - ) - - # Status check - dao = self.engine_2.dao - assert not dao.get_errors(limit=0) - assert not dao.get_filters() - assert not dao.get_unsynchronizeds() - - # Local check - assert local.exists(root / "FolderCreated/file.txt") - - # Remote checks - children = remote.get_children_info(folder) - assert len(children) == 1 - assert children[0].name == "FolderCreated" - - children = remote.get_children_info(children[0].uid) - assert len(children) == 1 - assert children[0].name == "file.txt" - - def test_sync_delete_shared_folder(self): - remote = self.remote_document_client_1 - self.engine_1.start() - # Register user workspace as a sync root for user1 - remote.register_as_root(self.workspace_uid) - - # Create test folder in user workspace as user1 - test_folder_uid = remote.make_folder(self.workspace_uid, "test_folder") - self.wait_sync(wait_for_async=True) - assert self.local_1.exists("/My Docs") - assert self.local_1.exists("/My Docs/test_folder") - - # Grant ReadWrite permission to user2 on test folder - self.set_readonly(self.user_2, test_folder_uid, grant=False) - self.wait_sync(wait_for_async=True) - - # Register test folder as a sync root for user2 - self.remote_document_client_2.register_as_root(test_folder_uid) - self.wait_sync(wait_for_async=True) - - # Delete test folder - remote.delete(test_folder_uid) - self.wait_sync(wait_for_async=True) - - # Check locally synchronized content - assert not self.local_1.exists("/My Docs/test_folder") - children = self.local_1.get_children_info("/My Docs") - assert len(children) == 1 - - @pytest.mark.randombug("NXDRIVE-1582") - def test_sync_unshared_folder(self): - # Register user workspace as a sync root for user1 - remote = self.remote_document_client_1 - remote2 = self.remote_document_client_2 - remote.register_as_root(self.workspace_uid) - - self.engine_2.start() - self.wait_sync( - wait_for_async=True, wait_for_engine_2=True, wait_for_engine_1=False - ) - # Check locally synchronized content - assert self.local_2.exists("/My Docs") - assert self.local_2.exists("/Other Docs") - - # Create test folder in user workspace as user1 - test_folder_uid = remote.make_folder(self.workspace_uid, "Folder A") - folder_b = remote.make_folder(test_folder_uid, "Folder B") - folder_c = remote.make_folder(folder_b, "Folder C") - folder_d = remote.make_folder(folder_c, "Folder D") - remote.make_folder(folder_d, "Folder E") - - # Grant ReadWrite permission to user2 on test folder - self.set_readonly(self.user_2, test_folder_uid, grant=False) - - # Register test folder as a sync root for user2 - remote2.register_as_root(test_folder_uid) - self.wait_sync( - wait_for_async=True, wait_for_engine_2=True, wait_for_engine_1=False - ) - assert self.local_2.exists("/Other Docs/Folder A") - assert self.local_2.exists( - "/Other Docs/Folder A/Folder B/Folder C/Folder D/Folder E" - ) - # Use for later get_fs_item checks - folder_b_fs = self.local_2.get_remote_id("/Other Docs/Folder A/Folder B") - folder_a_fs = self.local_2.get_remote_id("/Other Docs/Folder A") - # Unshare Folder A and share Folder C - self.root_remote.execute( - command="Document.RemoveACL", - input_obj=f"doc:{test_folder_uid}", - acl="local", - ) - self.set_readonly(self.user_2, folder_c) - remote2.register_as_root(folder_c) - self.wait_sync( - wait_for_async=True, wait_for_engine_2=True, wait_for_engine_1=False - ) - assert not self.local_2.exists("/Other Docs/Folder A") - assert self.local_2.exists("/Other Docs/Folder C") - assert self.local_2.exists("/Other Docs/Folder C/Folder D/Folder E") - - # Verify that we don't have any 403 errors - assert not self.remote_2.get_fs_item(folder_a_fs) - assert not self.remote_2.get_fs_item(folder_b_fs) - - def test_sync_move_permission_removal(self): - if WINDOWS: - self.app.quit() - pytest.xfail( - "Following the NXDRIVE-836 fix, this test always fails because " - "when moving a file from a RO folder to a RW folder will end up" - " being a simple file creation. As we cannot know events order," - " we cannot understand a local move is being made just before " - "a security update. To bo fixed with the engine refactoring." - ) - - remote = self.remote_document_client_1 - remote2 = self.remote_document_client_2 - local = self.local_2 - - root = remote.make_folder(self.workspace_uid, "testing") - readonly = remote.make_folder(root, "ReadFolder") - readwrite = remote.make_folder(root, "WriteFolder") - - # Register user workspace as a sync root for user1 - remote.register_as_root(self.workspace_uid) - - # Register root folder as a sync root for user2 - self.set_readonly(self.user_2, root, grant=False) - remote2.register_as_root(root) - - # Make one read-only document - remote.make_file_with_blob(readonly, "file_ro.txt", b"Read-only doc.") - - # Read only folder for user 2 - self.set_readonly(self.user_2, readonly) - - # Basic test to be sure we are in RO mode - with pytest.raises(Forbidden): - remote2.make_file(readonly, "test.txt", content=b"test") - - # ReadWrite folder for user 2 - self.set_readonly(self.user_2, readwrite, grant=False) - - # Start'n sync - self.engine_2.start() - self.wait_sync( - wait_for_async=True, wait_for_engine_1=False, wait_for_engine_2=True - ) - - # Checks - root = Path("Other Docs/testing") - assert local.exists(root / "ReadFolder") - assert local.exists(root / "ReadFolder/file_ro.txt") - assert local.exists(root / "WriteFolder") - content = local.get_content(root / "ReadFolder/file_ro.txt") - assert content == b"Read-only doc." - - # Move the read-only file - local.move( - root / "ReadFolder/file_ro.txt", root / "WriteFolder", name="file_rw.txt" - ) - - # Remove RO on ReadFolder folder - self.set_readonly(self.user_2, readonly, grant=False) - - # Edit the new writable file - new_data = b"Now a fresh read-write doc." - local.update_content(root / "WriteFolder/file_rw.txt", new_data) - - # Sync - self.wait_sync( - wait_for_async=True, wait_for_engine_1=False, wait_for_engine_2=True - ) - - # Status check - dao = self.engine_2.dao - assert not dao.get_errors(limit=0) - assert not dao.get_filters() - assert not dao.get_unsynchronizeds() - - # Local checks - assert not local.exists(root / "ReadFolder/file_ro.txt") - assert not local.exists(root / "WriteFolder/file_ro.txt") - assert local.exists(root / "WriteFolder/file_rw.txt") - content = local.get_content(root / "WriteFolder/file_rw.txt") - assert content == new_data - - # Remote checks - assert not remote.get_children_info(readonly) - children = remote.get_children_info(readwrite) - assert len(children) == 1 - blob = children[0].get_blob("file:content") - assert blob.name == "file_rw.txt" - assert blob.digest == hashlib.md5(new_data).hexdigest() -""" diff --git a/tests/functional/test_remote_deletion.py b/tests/functional/test_remote_deletion.py index 8546e81bca..76ccd89849 100644 --- a/tests/functional/test_remote_deletion.py +++ b/tests/functional/test_remote_deletion.py @@ -8,102 +8,12 @@ from nxdrive.options import Options # from .conftest import OS_STAT_MTIME_RESOLUTION, OneUserTest, TwoUsersTest -from .conftest import OneUserTest, TwoUsersTest +from .conftest import OneUserTest log = getLogger(__name__) class TestRemoteDeletion(OneUserTest): - """ - def test_synchronize_remote_deletion(self): - ""Test that deleting remote documents is impacted client side - - Use cases: - - Remotely delete a regular folder - => Folder should be locally deleted - - Remotely restore folder from the trash - => Folder should be locally re-created - - Remotely delete a synchronization root - => Synchronization root should be locally deleted - - Remotely restore synchronization root from the trash - => Synchronization root should be locally re-created - - See TestIntegrationSecurityUpdates.test_synchronize_denying_read_access - as the same uses cases are tested - "" - # Bind the server and root workspace - self.engine_1.start() - # Get local and remote clients - local = self.local_1 - remote = self.remote_document_client_1 - remote_admin = self.root_remote - - # Create documents in the remote root workspace - # then synchronize - folder_id = remote.make_folder("/", "Test folder") - file_id = remote.make_file("/Test folder", "joe.txt", content=b"Some content") - - self.wait_sync(wait_for_async=True) - assert local.exists("/Test folder") - assert local.exists("/Test folder/joe.txt") - - # Delete remote folder then synchronize - remote.delete("/Test folder") - self.wait_sync(wait_for_async=True) - assert not local.exists("/Test folder") - - # Restore folder from trash then synchronize - remote.undelete(folder_id) - if version_lt(remote.client.server_version, "10.2"): - remote.undelete(file_id) - self.wait_sync(wait_for_async=True) - assert local.exists("/Test folder") - assert local.exists("/Test folder/joe.txt") - - # Delete sync root then synchronize - remote_admin.delete(self.workspace) - self.wait_sync(wait_for_async=True) - assert not local.exists("/") - - # Restore sync root from trash then synchronize - remote_admin.undelete(self.workspace) - if version_lt(remote.client.server_version, "10.2"): - remote_admin.undelete(folder_id) - remote_admin.undelete(file_id) - self.wait_sync(wait_for_async=True) - assert local.exists("/") - assert local.exists("/Test folder") - assert local.exists("/Test folder/joe.txt") - """ - - """ - def test_synchronize_remote_deletion_while_upload(self): - local = self.local_1 - remote = self.remote_document_client_1 - self.engine_1.start() - - def callback(uploader): - ""Add delay when upload and download."" - time.sleep(1) - Engine.suspend_client(self.engine_1, uploader) - - with patch.object(self.engine_1.remote, "download_callback", new=callback): - # Create documents in the remote root workspace - remote.make_folder("/", "Test folder") - self.wait_sync(wait_for_async=True) - - # Create a document by streaming a binary file - file_path = local.abspath("/Test folder") / "testFile.pdf" - copyfile(self.location / "resources" / "files" / "testFile.pdf", file_path) - file_path = local.abspath("/Test folder") / "testFile2.pdf" - copyfile(self.location / "resources" / "files" / "testFile.pdf", file_path) - - # Delete remote folder then synchronize - remote.delete("/Test folder") - self.wait_sync(wait_for_async=True) - assert not local.exists("/Test folder") - """ - @Options.mock() @pytest.mark.randombug("NXDRIVE-1329", repeat=4) def test_synchronize_remote_deletion_while_download_file(self): @@ -137,113 +47,3 @@ def callback(uploader): # Sometimes the server does not return the document trash action in summary changes. # So it may fail on the next assertion. assert not local.exists("/Test folder/testFile.pdf") - - """ - def test_synchronize_remote_deletion_with_close_name(self): - self.engine_1.start() - self.wait_sync(wait_for_async=True) - local = self.local_1 - remote = self.remote_document_client_1 - remote.make_folder("/", "Folder 1") - remote.make_folder("/", "Folder 1b") - remote.make_folder("/", "Folder 1c") - self.wait_sync(wait_for_async=True) - assert local.exists("/Folder 1") - assert local.exists("/Folder 1b") - assert local.exists("/Folder 1c") - remote.delete("/Folder 1") - remote.delete("/Folder 1b") - remote.delete("/Folder 1c") - self.wait_sync(wait_for_async=True) - assert not local.exists("/Folder 1") - assert not local.exists("/Folder 1b") - assert not local.exists("/Folder 1c") - """ - - """ - def test_synchronize_remote_deletion_with_wrong_local_remote_id(self): - local = self.local_1 - remote = self.remote_document_client_1 - remote.make_file("/", "joe.txt", content=b"Some content") - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - assert local.exists("/joe.txt") - - self.engine_1.suspend() - local.set_remote_id(Path("joe.txt"), "wrong-id") - remote.delete("/joe.txt") - - self.engine_1.resume() - self.wait_sync(wait_for_async=True) - assert local.exists("/joe.txt") - """ - - """ - def test_synchronize_local_folder_rename_remote_deletion(self): - ""Test local folder rename followed by remote deletion"" - # Bind the server and root workspace - - # Get local and remote clients - self.engine_1.start() - local = self.local_1 - remote = self.remote_document_client_1 - - # Create a folder with a child file in the remote root workspace - # then synchronize - test_folder_uid = remote.make_folder("/", "Test folder") - remote.make_file(test_folder_uid, "joe.odt", content=b"Some content") - - self.wait_sync(wait_for_async=True) - assert local.exists("/Test folder") - assert local.exists("/Test folder/joe.odt") - - # Locally rename the folder then synchronize - time.sleep(OS_STAT_MTIME_RESOLUTION) - local.rename("/Test folder", "Test folder renamed") - - self.wait_sync() - assert not local.exists("/Test folder") - assert local.exists("/Test folder renamed") - assert remote.get_info(test_folder_uid).name == "Test folder renamed" - - # Delete remote folder then synchronize - remote.delete("/Test folder renamed") - - self.wait_sync(wait_for_async=True) - assert not remote.exists("/Test folder renamed") - assert not local.exists("/Test folder renamed") - """ - - -class TestRemoteDeletion2(TwoUsersTest): - """ - def test_synchronize_local_folder_lost_permission(self): - ""Test local folder rename followed by remote deletion"" - # Bind the server and root workspace - - # Get local and remote clients - self.engine_2.start() - local = self.local_2 - remote = self.remote_document_client_2 - - # Create a folder with a child file in the remote root workspace - # then synchronize - test_folder_uid = remote.make_folder("/", "Test folder") - remote.make_file(test_folder_uid, "joe.odt", content=b"Some content") - - self.wait_sync( - wait_for_async=True, wait_for_engine_1=False, wait_for_engine_2=True - ) - assert local.exists("/Test folder") - assert local.exists("/Test folder/joe.odt") - input_obj = "doc:" + self.workspace - self.root_remote.execute( - command="Document.RemoveACL", input_obj=input_obj, acl="local" - ) - self.wait_sync( - wait_for_async=True, wait_for_engine_1=False, wait_for_engine_2=True - ) - assert not local.exists("/Test folder") - """ diff --git a/tests/functional/test_security_updates.py b/tests/functional/test_security_updates.py deleted file mode 100644 index 6ffdcd8625..0000000000 --- a/tests/functional/test_security_updates.py +++ /dev/null @@ -1,298 +0,0 @@ -""" -import time -from pathlib import Path - -import pytest - -from .conftest import OS_STAT_MTIME_RESOLUTION, OneUserTest - - -class TestSecurityUpdates(OneUserTest): - def test_synchronize_denying_read_access(self): - ""Test that denying Read access server side is impacted client side - - Use cases: - - Deny Read access on a regular folder - => Folder should be locally deleted - - Grant Read access back - => Folder should be locally re-created - - Deny Read access on a synchronization root - => Synchronization root should be locally deleted - - Grant Read access back - => Synchronization root should be locally re-created - - See TestIntegrationRemoteDeletion.test_synchronize_remote_deletion - as the same uses cases are tested - "" - # Bind the server and root workspace - self.engine_1.start() - - # Get local and remote clients - local = self.local_1 - remote = self.remote_document_client_1 - - # Create documents in the remote root workspace - # then synchronize - remote.make_folder("/", "Test folder") - remote.make_file("/Test folder", "joe.txt", content=b"Some content") - - self.wait_sync(wait_for_async=True) - assert local.exists("/Test folder") - assert local.exists("/Test folder/joe.txt") - - # Remove Read permission for test user on a regular folder - # then synchronize - self._set_read_permission(self.user_1, f"{self.ws.path}/Test folder", False) - self.wait_sync(wait_for_async=True) - assert not local.exists("/Test folder") - - # Add Read permission back for test user then synchronize - self._set_read_permission(self.user_1, f"{self.ws.path}/Test folder", True) - self.wait_sync(wait_for_async=True) - assert local.exists("/Test folder") - assert local.exists("/Test folder/joe.txt") - - # Remove Read permission for test user on a sync root - # then synchronize - self._set_read_permission(self.user_1, self.ws.path, False) - self.wait_sync(wait_for_async=True) - assert not local.exists("/") - - # Add Read permission back for test user then synchronize - self._set_read_permission(self.user_1, self.ws.path, True) - self.wait_sync(wait_for_async=True) - assert local.exists("/") - assert local.exists("/Test folder") - assert local.exists("/Test folder/joe.txt") - - @pytest.mark.skip("NXDRIVE-170: WIP") - def test_synchronize_denying_read_access_local_modification(self): - # TO_REVIEW: Trash feature, delete it, - # might need to modify the behavior - ""Test denying Read access with concurrent local modification - - Use cases: - - Deny Read access on a regular folder and make some - local and remote changes concurrently. - => Only locally modified content should be kept - and should be marked as 'unsynchronized', - other content should be deleted. - Remote changes should not be impacted client side. - Local changes should not be impacted server side. - - Grant Read access back. - => Remote documents should be merged with - locally modified content which should be unmarked - as 'unsynchronized' and therefore synchronized upstream. - - See TestIntegrationRemoteDeletion - .test_synchronize_remote_deletion_local_modification - as the same uses cases are tested. - - Note that we use the .odt extension for test files to make sure - that they are created as File and not Note documents on the server - when synchronized upstream, as the current implementation of - RemoteDocumentClient is File oriented. - "" - # Bind the server and root workspace - self.engine_1.start() - # Get local and remote clients - local = self.local_1 - remote = self.remote_document_client_1 - root_remote = self.root_remote - - # Create documents in the remote root workspace - # then synchronize - remote.make_folder("/", "Test folder") - remote.make_file("/Test folder", "joe.odt", content=b"Some content") - remote.make_file("/Test folder", "jack.odt", content=b"Some content") - remote.make_folder("/Test folder", "Sub folder 1") - remote.make_file( - "/Test folder/Sub folder 1", "sub file 1.txt", content=b"Content" - ) - - self.wait_sync(wait_for_async=True) - assert local.exists("/Test folder") - assert local.exists("/Test folder/joe.odt") - assert local.exists("/Test folder/jack.odt") - assert local.exists("/Test folder/Sub folder 1") - assert local.exists("/Test folder/Sub folder 1/sub file 1.txt") - - # Remove Read permission for test user on a regular folder - # and make some local and remote changes concurrently then synchronize - test_folder_path = f"{self.ws.path}/Test folder" - self._set_read_permission(self.user_1, test_folder_path, False) - # Local changes - time.sleep(OS_STAT_MTIME_RESOLUTION) - # Create new file - local.make_file("/Test folder", "local.odt", content=b"New local content") - # Create new folder with files - local.make_folder("/Test folder", "Local sub folder 2") - local.make_file( - "/Test folder/Local sub folder 2", - "local sub file 2.txt", - content=b"Other local content", - ) - # Update file - local.update_content("/Test folder/joe.odt", b"Some locally updated content") - # Remote changes - # Create new file - root_remote.make_file( - test_folder_path, "remote.odt", content=b"New remote content" - ) - # Create new folder with files - root_remote.make_folder(test_folder_path, "Remote sub folder 2") - root_remote.make_file( - test_folder_path + "/Remote sub folder 2", - "remote sub file 2.txt", - content=b"Other remote content", - ) - # Update file - root_remote.update_content( - test_folder_path + "/joe.odt", b"Some remotely updated content" - ) - - self.wait_sync(wait_for_async=True) - # Only locally modified content should exist - # and should be marked as 'unsynchronized', other content should - # have been deleted. - # Remote changes should not be impacted client side. - # Local changes should not be impacted server side. - # Local check - assert local.exists("/Test folder") - assert len(local.get_children_info("/Test folder")) == 3 - assert local.exists("/Test folder/joe.odt") - assert ( - local.get_content("/Test folder/joe.odt") == b"Some locally updated content" - ) - assert local.exists("/Test folder/local.odt") - assert local.exists("/Test folder/Local sub folder 2") - assert local.exists("/Test folder/Local sub folder 2/local sub file 2.txt") - - assert not local.exists("/Test folder/jack.odt") - assert not local.exists("/Test folder/remote.odt") - assert not local.exists("/Test folder/Sub folder 1") - assert not local.exists("/Test folder/Sub folder 1/sub file 1.txt") - assert not local.exists("/Test folder/Remote sub folder 1") - assert not local.exists( - "/Test folder/Remote sub folder 1/remote sub file 1.txt" - ) - # State check - self._check_pair_state("/Test folder", "unsynchronized") - self._check_pair_state("/Test folder/joe.odt", "unsynchronized") - self._check_pair_state("/Test folder/local.odt", "unsynchronized") - self._check_pair_state("/Test folder/Local sub folder 2", "unsynchronized") - self._check_pair_state( - "/Test folder/Local sub folder 2/local sub file 2.txt", "unsynchronized" - ) - # Remote check - test_folder_uid = root_remote.get_info(test_folder_path).uid - assert len(root_remote.get_children_info(test_folder_uid)) == 5 - assert root_remote.exists(test_folder_path + "/joe.odt") - assert ( - root_remote.get_content(test_folder_path + "/joe.odt") - == b"Some remotely updated content" - ) - assert root_remote.exists(test_folder_path + "/jack.odt") - assert root_remote.exists(test_folder_path + "/remote.odt") - assert root_remote.exists(test_folder_path + "/Sub folder 1") - assert root_remote.exists(test_folder_path + "/Sub folder 1/sub file 1.txt") - assert root_remote.exists(test_folder_path + "/Remote sub folder 2") - assert root_remote.exists( - f"{test_folder_path}/Remote sub folder 2/remote sub file 2.txt" - ) - - assert not root_remote.exists(test_folder_path + "/local.odt") - assert not root_remote.exists(test_folder_path + "/Local sub folder 2") - assert not root_remote.exists( - f"{test_folder_path}/Local sub folder 1/local sub file 2.txt" - ) - - # Add Read permission back for test user then synchronize - self._set_read_permission(self.user_1, f"{self.ws.path}/Test folder", True) - self.wait_sync(wait_for_async=True) - # Remote documents should be merged with locally modified content - # which should be unmarked as 'unsynchronized' and therefore - # synchronized upstream. - # Local check - assert local.exists("/Test folder") - children_info = local.get_children_info("/Test folder") - assert len(children_info) == 8 - for info in children_info: - if info.name == "joe.odt": - remote_version = info - elif info.name.startswith("joe (") and info.name.endswith(").odt"): - local_version = info - assert remote_version is not None - assert local_version is not None - assert local.exists(remote_version.path) - assert ( - local.get_content(remote_version.path) == b"Some remotely updated content" - ) - assert local.exists(local_version.path) - assert local.get_content(local_version.path) == b"Some locally updated content" - assert local.exists("/Test folder/jack.odt") - assert local.exists("/Test folder/local.odt") - assert local.exists("/Test folder/remote.odt") - assert local.exists("/Test folder/Sub folder 1") - assert local.exists("/Test folder/Sub folder 1/sub file 1.txt") - assert local.exists("/Test folder/Local sub folder 2") - assert local.exists("/Test folder/Local sub folder 2/local sub file 2.txt") - assert local.exists("/Test folder/Remote sub folder 2") - assert local.exists("/Test folder/Remote sub folder 2/remote sub file 2.txt") - # State check - self._check_pair_state("/Test folder", "synchronized") - self._check_pair_state("/Test folder/joe.odt", "synchronized") - self._check_pair_state("/Test folder/local.odt", "synchronized") - self._check_pair_state("/Test folder/Local sub folder 2", "synchronized") - self._check_pair_state( - "/Test folder/Local sub folder 2/local sub file 2.txt", "synchronized" - ) - # Remote check - assert remote.exists("/Test folder") - children_info = remote.get_children_info(test_folder_uid) - assert len(children_info) == 8 - for info in children_info: - if info.name == "joe.odt": - remote_version = info - elif info.name.startswith("joe (") and info.name.endswith(").odt"): - local_version = info - assert remote_version is not None - assert local_version is not None - remote_version_ref_length = len(remote_version.path) - len(self.ws.path) - remote_version_ref = remote_version.path[-remote_version_ref_length:] - assert remote.exists(remote_version_ref) - assert ( - remote.get_content(remote_version_ref) == b"Some remotely updated content" - ) - local_version_ref_length = len(local_version.path) - len(self.ws.path) - local_version_ref = local_version.path[-local_version_ref_length:] - assert remote.exists(local_version_ref) - assert remote.get_content(local_version_ref) == b"Some locally updated content" - assert remote.exists("/Test folder/jack.odt") - assert remote.exists("/Test folder/local.odt") - assert remote.exists("/Test folder/remote.odt") - assert remote.exists("/Test folder/Sub folder 1") - assert remote.exists("/Test folder/Sub folder 1/sub file 1.txt") - assert remote.exists("/Test folder/Local sub folder 2") - assert remote.exists("/Test folder/Local sub folder 2/local sub file 2.txt") - assert remote.exists("/Test folder/Remote sub folder 2") - assert remote.exists("/Test folder/Remote sub folder 2/remote sub file 2.txt") - - def _set_read_permission(self, user, doc_path, grant): - input_obj = "doc:" + doc_path - if grant: - self.root_remote.execute( - command="Document.SetACE", - input_obj=input_obj, - user=user, - permission="Read", - grant=True, - ) - else: - self.root_remote.block_inheritance(doc_path) - - def _check_pair_state(self, local_path, pair_state): - local_path = Path(self.workspace_title) / local_path - doc_pair = self.engine_1.dao.get_state_from_local(local_path) - assert doc_pair.pair_state == pair_state -""" diff --git a/tests/functional/test_shared_folders.py b/tests/functional/test_shared_folders.py index 3e0e674761..cf5b0f5334 100644 --- a/tests/functional/test_shared_folders.py +++ b/tests/functional/test_shared_folders.py @@ -6,85 +6,6 @@ class TestSharedFolders(TwoUsersTest): - """ - def test_move_sync_root_child_to_user_workspace(self): - ""See https://jira.nuxeo.com/browse/NXP-14870"" - uid = None - try: - # Get remote and local clients - remote_1 = self.remote_document_client_1 - remote_2 = self.remote_document_client_2 - - local_user2 = LocalTest(self.local_nxdrive_folder_2) - - # Make sure personal workspace is created for user1 - # and fetch its uid - uid = remote_1.make_file_in_user_workspace( - b"File in user workspace", "UWFile.txt" - )["parentRef"] - - # As user1 register personal workspace as a sync root - remote_1.register_as_root(uid) - - # As user1 create a parent folder in user1's personal workspace - parent_uid = remote_1.make_folder(uid, "Parent") - - # As user1 grant Everything permission to user2 on parent folder - input_obj = "doc:" + parent_uid - self.root_remote.execute( - command="Document.SetACE", - input_obj=input_obj, - user=self.user_2, - permission="Everything", - grant=True, - ) - - # As user1 create a child folder in parent folder - child_folder_uid = remote_1.make_folder(parent_uid, "Child") - - # As user2 register parent folder as a sync root - remote_2.register_as_root(parent_uid) - remote_2.unregister_as_root(self.workspace) - # Start engine for user2 - self.engine_2.start() - - # Wait for synchronization - self.wait_sync( - wait_for_async=True, wait_for_engine_1=False, wait_for_engine_2=True - ) - - # Check locally synchronized content - assert len(local_user2.get_children_info("/")) == 1 - assert local_user2.exists("/Parent") - assert local_user2.exists("/Parent/Child") - - # As user1 move child folder to user1's personal workspace - remote_1.move(child_folder_uid, uid) - - # Wait for synchronization - self.wait_sync( - wait_for_async=True, wait_for_engine_1=False, wait_for_engine_2=True - ) - - # Check locally synchronized content - assert not local_user2.exists("/Parent/Child") - - finally: - # Cleanup user1 personal workspace - if uid is not None and self.root_remote.exists(uid): - self.root_remote.delete(uid, use_trash=False) - """ - - """ - def test_local_changes_while_stopped(self): - self._test_local_changes_while_not_running(False) - """ - - """ - def test_local_changes_while_unbinded(self): - self._test_local_changes_while_not_running(True) - """ - def _test_local_changes_while_not_running(self, unbind): """NXDRIVE-646: not uploading renamed file from shared folder.""" local_1 = self.local_root_client_1 diff --git a/tests/functional/test_special_characters.py b/tests/functional/test_special_characters.py deleted file mode 100644 index 6d1f8b749b..0000000000 --- a/tests/functional/test_special_characters.py +++ /dev/null @@ -1,93 +0,0 @@ -""" -import pytest - -from nxdrive.constants import MAC - -from ..markers import not_windows -from .conftest import OneUserTest - - -class TestSpecialCharacters(OneUserTest): - @not_windows(reason="Explorer prevents using those characters") - def test_create_local(self): - local = self.local_1 - remote = self.remote_1 - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - folder = local.make_folder("/", "/ * ? < > |") - local.make_file(folder, "| > < ? * /.txt", content=b"This is a test file") - self.wait_sync() - - folder_name = "- - - - - -" - file_name = "- - - - - -.txt" - # Check the remote folder - children = remote.get_children(self.ws.path)["entries"] - assert len(children) == 1 - assert children[0]["title"] == folder_name - # Check the remote file - children = remote.get_children(children[0]["path"])["entries"] - assert len(children) == 1 - assert children[0]["title"] == file_name - - new_folder_name = "abcd" - new_file_name = "efgh.txt" - local.rename(f"/{folder_name}", new_folder_name) - local.rename(f"/{new_folder_name}/{file_name}", new_file_name) - self.wait_sync() - - # Paths is updated server-side - info = remote.get_info(f"/{new_folder_name}") - assert info.name == new_folder_name - info = remote.get_info(f"/{new_folder_name}/{new_file_name}") - assert info.name == new_file_name - - @not_windows(reason="Explorer prevents using those characters") - @pytest.mark.xfail(reason="NXDRIVE-2498", condition=MAC) - def test_rename_local(self): - local = self.local_1 - remote = self.remote_1 - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - folder_name = "abcd" - file_name = "efgh.txt" - folder = local.make_folder("/", folder_name) - local.make_file(folder, file_name, content=b"This is a test file") - - self.wait_sync() - assert remote.exists(f"/{folder_name}") - assert remote.exists(f"/{folder_name}/{file_name}") - - new_folder_name = "/ * ? < > |" - new_folder_name_expected = "- - - - - -" - new_file_name = "| > < ? * /.txt" - new_file_name_expected = "- - - - - -.txt" - local.rename(f"/{folder_name}", new_folder_name) - local.rename(f"/{new_folder_name_expected}/{file_name}", new_file_name) - self.wait_sync() - - # Paths is updated server-side - info = remote.get_info(f"/{new_folder_name_expected}") - assert info.name == new_folder_name_expected - info = remote.get_info(f"/{new_folder_name_expected}/{new_file_name_expected}") - assert info.name == new_file_name_expected - - def test_create_remote(self): - local = self.local_1 - remote = self.remote_document_client_1 - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - folder = remote.make_folder("/", "/ * ? < > |") - remote.make_file(folder, "| > < ? * /.txt", content=b"This is a test file") - self.wait_sync(wait_for_async=True) - - folder_name = "- - - - - -" - file_name = "- - - - - -.txt" - assert local.exists(f"/{folder_name}") - assert local.exists(f"/{folder_name}/{file_name}") -""" diff --git a/tests/functional/test_synchronization_dedup.py b/tests/functional/test_synchronization_dedup.py deleted file mode 100644 index ce0ecc29f5..0000000000 --- a/tests/functional/test_synchronization_dedup.py +++ /dev/null @@ -1,148 +0,0 @@ -""" -"" -Test behaviors when the server allows duplicates and not the client. -"" -from pathlib import Path - -import pytest - -from .conftest import OneUserTest - - -class TestSynchronizationDedup(OneUserTest): - def test_children_of_folder_in_dedup_error(self): - "" - NXDRIVE-1037: Children of a folder that is in DEDUP error should be - ignored. - "" - - local = self.local_1 - engine = self.engine_1 - remote = self.remote_document_client_1 - engine.start() - - # Step 1: create Unisys folder (1st) - remote.make_folder(self.workspace, "Unisys") - self.wait_sync(wait_for_async=True) - assert local.exists("/Unisys") - - # Step 2: create Unisys folder (2nd) - unisys2 = remote.make_folder(self.workspace, "Unisys") - self.wait_sync(wait_for_async=True) - - # Check DEDUP error - doc_pair = engine.dao.get_normal_state_from_remote( - "defaultFileSystemItemFactory#default#" + unisys2 - ) - assert doc_pair.last_error == "DEDUP" - - # Step 3: create a child in the 2nd Unisys folder - foo = remote.make_file(unisys2, "foo.txt", content=b"42") - self.wait_sync(wait_for_async=True) - - # Check the file is not created and not present in the database - assert not local.exists("/Unisys/foo.txt") - assert not engine.dao.get_normal_state_from_remote( - "defaultFileSystemItemFactory#default#" + unisys2 + "/" + foo - ) - - # Check there is nothing syncing - assert not engine.dao.get_syncing_count() - - -class TestSynchronizationDedupCaseSensitive(OneUserTest): - ""NXDRIVE-842: do not sync duplicate conflicted folder content."" - - def setUp(self): - self.local = self.local_root_client_1 - self.remote = self.remote_document_client_1 - - # Make documents in the 1st future root folder - # / - # ├── citrus - # │ └── fruits - # │ ├── lemon.txt - # │ └── orange.txt - self.remote.make_folder("/", "citrus") - self.root1 = self.remote.make_folder("/citrus", "fruits") - self.remote.make_file(self.root1, "lemon.txt", content=b"lemon") - self.remote.make_file(self.root1, "orange.txt", content=b"orange") - - # Make documents in the 2nd future root folder - # / - # ├── fruits - # ├── cherries.txt - # ├── mango.txt - # └── papaya.txt - self.root2 = self.remote.make_folder("/", "fruits") - self.remote.make_file(self.root2, "cherries.txt", content=b"cherries") - self.remote.make_file(self.root2, "mango.txt", content=b"mango") - self.remote.make_file(self.root2, "papaya.txt", content=b"papaya") - - # Register new roots - # / - # ├── citrus - # │ └── fruits (self.root1) - # │ ├── lemon.txt - # │ └── orange.txt - # ├── fruits (self.root2) - # ├── cherries.txt - # ├── mango.txt - # └── papaya.txt - self.remote.unregister_as_root(self.workspace) - self.remote.register_as_root(self.root1) - self.remote.register_as_root(self.root2) - - # Start and wait - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - # Checks - # No duplicate possible, there is one "fruits" folder at the root - assert len(self.local.get_children_info("/")) == 1 - # As events are coming in the reverse order, we should have self.root2 - # synced first, which contains 3 files - assert len(self.local.get_children_info("/fruits")) == 3 - - def check( - self, count_root: int, count_folder: int, count_fixed_folder: int = -1 - ) -> None: - self.wait_sync(wait_for_async=True) - - get = self.local.get_children_info - assert len(get("/")) == count_root - assert len(get("/fruits")) == count_folder - if count_fixed_folder > -1: - assert len(get("/fruits-renamed")) == count_fixed_folder - - # Ensure there is no postponed nor documents in error - assert not self.engine_1.dao.get_error_count(threshold=0) - - def test_file_sync_under_dedup_shared_folders_rename_remotely_dupe(self): - self.remote.update(self.root1, properties={"dc:title": "fruits-renamed"}) - self.check(2, 3, count_fixed_folder=2) - - @pytest.mark.randombug( - "Several rounds may be needed, specially on Windows", condition=True - ) - def test_file_sync_under_dedup_shared_folders_rename_remotely(self): - self.remote.update(self.root2, properties={"dc:title": "fruits-renamed"}) - self.check(2, 2, count_fixed_folder=3) - - def test_file_sync_under_dedup_shared_folders_delete_remotely(self): - self.remote.delete(self.root2) - self.check(1, 2) - - def test_file_sync_under_dedup_shared_folders_delete_remotely_dupe(self): - self.remote.delete(self.root1) - self.check(1, 3) - - def test_file_sync_under_dedup_shared_folders_delete_locally(self): - self.engine_1.local.delete(Path("fruits")) - self.check(1, 2) - assert self.root1 in self.local.get_remote_id("/fruits") - - def test_file_sync_under_dedup_shared_folders_rename_locally(self): - self.engine_1.local.rename(Path("fruits"), "fruits-renamed") - self.check(2, 2, count_fixed_folder=3) -""" diff --git a/tests/functional/test_synchronization_suspend.py b/tests/functional/test_synchronization_suspend.py deleted file mode 100644 index 7d471bf32d..0000000000 --- a/tests/functional/test_synchronization_suspend.py +++ /dev/null @@ -1,159 +0,0 @@ -""" -import pytest - -from nxdrive.constants import LINUX, WINDOWS - -from .conftest import SYNC_ROOT_FAC_ID, OneUserTest - - -class TestSynchronizationSuspend(OneUserTest): - def test_basic_synchronization_suspend(self): - local = self.local_1 - remote = self.remote_document_client_1 - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - # Let's create some document on the client and the server - local.make_folder("/", "Folder 3") - self.make_server_tree() - - # Launch ndrive and check synchronization - self.wait_sync(wait_for_async=True) - assert remote.exists("/Folder 3") - assert local.exists("/Folder 1") - assert local.exists("/Folder 2") - assert local.exists("/File 5.txt") - self.engine_1.queue_manager.suspend() - local.make_folder("/", "Folder 4") - local.make_file("/Folder 4", "Test.txt", content=b"Plop") - self.wait_sync(wait_for_async=True, fail_if_timeout=False) - assert len(remote.get_children_info(self.workspace)) == 4 - assert self.engine_1.queue_manager.is_paused() - - def test_synchronization_local_watcher_paused_when_offline(self): - ""NXDRIVE-680: fix unwanted local upload when offline."" - - local = self.local_1 - remote = self.remote_document_client_1 - engine = self.engine_1 - - # Create one file locally and wait for sync - engine.start() - self.wait_sync(wait_for_async=True) - local.make_file("/", "file1.txt", content=b"42") - self.wait_sync() - - # Checks - assert remote.exists("/file1.txt") - assert local.exists("/file1.txt") - - # Simulate offline mode (no more network for instance) - engine.queue_manager.suspend() - - # Create a bunch of files locally - local.make_folder("/", "files") - for num in range(60 if WINDOWS else 20): - local.make_file( - "/files", - "file-" + str(num) + ".txt", - content=b"Content of file-" + bytes(num), - ) - self.wait_sync(fail_if_timeout=False) - - # Checks - assert len(remote.get_children_info(self.workspace)) == 1 - assert engine.queue_manager.is_paused() - - # Restore network connection - engine.queue_manager.resume() - - # Wait for sync and check synced files - self.wait_sync(wait_for_async=True) - assert len(remote.get_children_info(self.workspace)) == 2 - assert not engine.queue_manager.is_paused() - - def test_synchronization_end_with_children_ignore_parent(self): - ""NXDRIVE-655: children of ignored folder are not ignored."" - - local = self.local_1 - remote = self.remote_document_client_1 - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - # Let's create some document on the client and the server - local.make_folder("/", "Folder 3") - self.make_server_tree() - - # Launch ndrive and check synchronization - self.wait_sync(wait_for_async=True) - assert remote.exists("/Folder 3") - assert local.exists("/Folder 1") - assert local.exists("/Folder 2") - assert local.exists("/File 5.txt") - local.make_folder("/", ".hidden") - local.make_file("/.hidden", "Test.txt", content=b"Plop") - local.make_folder("/.hidden", "normal") - local.make_file("/.hidden/normal", "Test.txt", content=b"Plop") - # Should not try to sync therefore it should not timeout - self.wait_sync(wait_for_async=True) - assert len(remote.get_children_info(self.workspace)) == 4 - - @pytest.mark.xfail(LINUX, reason="NXDRIVE-1690", strict=True) - def test_folder_renaming_while_offline(self): - "" - Scenario: - - create a folder with a subfolder and a file, on the server - - launch Drive - - wait for sync completion - - pause Drive - - locally rename the parent folder - - locally rename the sub folder - - locally delete the file - - resume Drive - - Result before NXDRIVE-695: - - sub folder is renamed on the server - - the deleted file is not removed on the server (incorrect) - "" - - local = self.local_1 - remote = self.remote_1 - engine = self.engine_1 - - # Create a folder with a subfolder and a file on the server - folder = remote.make_folder(z"'{'SYNC_ROOT_FAC_ID}{self.workspace}", "folder").uid - subfolder = remote.make_folder(folder, "subfolder").uid - remote.make_file(subfolder, "file.txt", content=b"42") - - # Start the sync - engine.start() - self.wait_sync(wait_for_async=True) - - # Checks - assert remote.exists("/folder/subfolder/file.txt") - assert local.exists("/folder/subfolder/file.txt") - - # Suspend the sync - engine.suspend() - assert engine.is_paused() - - # Rename the parent folder and its subfolder; delete the file - local.rename("/folder", "folder-renamed") - local.rename("/folder-renamed/subfolder", "subfolder-renamed") - local.delete("/folder-renamed/subfolder-renamed/file.txt") - - # Resume the sync - engine.resume() - assert not engine.is_paused() - self.wait_sync() - - # Local checks - assert local.exists("/folder-renamed/subfolder-renamed") - assert not local.exists("/folder-renamed/subfolder-renamed/file.txt") - assert not local.exists("/folder") - - # Remote checks - assert remote.exists("/folder-renamed/subfolder-renamed") - assert not remote.exists("/folder-renamed/subfolder-renamed/file.txt") - assert not remote.exists("/folder") -""" diff --git a/tests/functional/test_versioning.py b/tests/functional/test_versioning.py index 8700f4288d..ec0d45783a 100644 --- a/tests/functional/test_versioning.py +++ b/tests/functional/test_versioning.py @@ -28,39 +28,6 @@ def test_version_restore(self): class TestVersioning2(TwoUsersTest): - """ - def test_versioning(self): - local = self.local_1 - self.engine_1.start() - remote = self.remote_document_client_2 - - # Create a file as user 2 - remote.make_file_with_blob("/", "Test versioning.txt", b"This is version 0") - self.wait_sync() - assert remote.exists("/Test versioning.txt") - doc = self.root_remote.fetch(f"{self.ws.path}/Test versioning.txt") - self._assert_version(doc, 0, 0) - - # Synchronize it for user 1 - self.wait_sync(wait_for_async=True) - assert local.exists("/Test versioning.txt") - - # Update it as user 1 => should be versioned - time.sleep(OS_STAT_MTIME_RESOLUTION) - local.update_content("/Test versioning.txt", b"Modified content") - self.wait_sync() - doc = self.root_remote.fetch(f"{self.ws.path}/Test versioning.txt") - self._assert_version(doc, 0, 1) - - # Update it as user 1 => should NOT be versioned - # since the versioning delay is not passed by - time.sleep(OS_STAT_MTIME_RESOLUTION) - local.update_content("/Test versioning.txt", b"Content twice modified") - self.wait_sync() - doc = self.root_remote.fetch(f"{self.ws.path}/Test versioning.txt") - self._assert_version(doc, 0, 1) - """ - def _assert_version(self, doc, major, minor): assert doc["properties"]["uid:major_version"] == major assert doc["properties"]["uid:minor_version"] == minor diff --git a/tests/functional/test_watchers.py b/tests/functional/test_watchers.py index 3533a1a13f..2d6ea0c1ec 100644 --- a/tests/functional/test_watchers.py +++ b/tests/functional/test_watchers.py @@ -145,18 +145,6 @@ def _delete_folder_1(self): break return Path(self.workspace_title) / path - """ - def test_local_scan_delete_non_synced(self): - # Test the deletion after first local scan - self.test_local_scan() - self.engine_1.stop() - path = self._delete_folder_1() - self.engine_1.start() - self.wait_sync(timeout=5, fail_if_timeout=False) - children = self.engine_1.dao.get_states_from_partial_local(path) - assert not children - """ - def test_local_watchdog_delete_synced(self): # Test the deletion after first local scan self.test_reconcile_scan() @@ -168,22 +156,6 @@ def test_local_watchdog_delete_synced(self): for child in children: assert child.pair_state == "locally_deleted" - """ - def test_local_scan_delete_synced(self): - # Test the deletion after first local scan - self.test_reconcile_scan() - self.engine_1.stop() - path = self._delete_folder_1() - self.engine_1.start() - self.wait_sync(timeout=5, fail_if_timeout=False) - child = self.engine_1.dao.get_state_from_local(path) - assert child.pair_state == "locally_deleted" - children = self.engine_1.dao.get_states_from_partial_local(path) - assert len(children) == 5 - for child in children: - assert child.pair_state == "locally_deleted" - """ - def test_local_scan_error(self): local = self.local_1 remote = self.remote_document_client_1 From 0411ad64ccaeeee290ada2cb47dbd9472c730e1a Mon Sep 17 00:00:00 2001 From: gitofanindya Date: Fri, 2 Feb 2024 10:22:22 +0530 Subject: [PATCH 34/36] NXDRIVE-2860: Code Coverage --02/02 - final --- tests/functional/test_shared_folders.py | 114 --- tests/functional/test_versioning.py | 8 +- tests/old_functional/local_client_darwin.py | 67 -- tests/old_functional/local_client_windows.py | 97 -- .../test_concurrent_synchronization.py | 34 - tests/old_functional/test_copy.py | 28 - tests/old_functional/test_encoding.py | 39 - .../test_local_changes_when_offline.py | 38 - tests/old_functional/test_local_copy_paste.py | 129 --- tests/old_functional/test_local_creations.py | 147 --- .../old_functional/test_local_move_folders.py | 76 -- tests/old_functional/test_local_paste.py | 50 - .../test_local_storage_issue.py | 14 - tests/old_functional/test_long_path.py | 101 -- tests/old_functional/test_mac_local_client.py | 38 - tests/old_functional/test_remote_client.py | 525 ----------- tests/old_functional/test_remote_deletion.py | 36 - tests/old_functional/test_special_files.py | 36 - tests/old_functional/test_sync_roots.py | 32 - tests/old_functional/test_transfer.py | 888 ------------------ tests/old_functional/test_versioning.py | 28 +- tests/old_functional/test_volume.py | 320 ------- tests/old_functional/test_watchers.py | 149 --- 23 files changed, 2 insertions(+), 2992 deletions(-) delete mode 100644 tests/functional/test_shared_folders.py delete mode 100644 tests/old_functional/local_client_darwin.py delete mode 100644 tests/old_functional/local_client_windows.py delete mode 100644 tests/old_functional/test_copy.py delete mode 100644 tests/old_functional/test_local_copy_paste.py delete mode 100644 tests/old_functional/test_long_path.py delete mode 100644 tests/old_functional/test_mac_local_client.py delete mode 100644 tests/old_functional/test_remote_client.py delete mode 100644 tests/old_functional/test_special_files.py delete mode 100644 tests/old_functional/test_sync_roots.py delete mode 100644 tests/old_functional/test_transfer.py delete mode 100644 tests/old_functional/test_volume.py diff --git a/tests/functional/test_shared_folders.py b/tests/functional/test_shared_folders.py deleted file mode 100644 index cf5b0f5334..0000000000 --- a/tests/functional/test_shared_folders.py +++ /dev/null @@ -1,114 +0,0 @@ -from pathlib import Path - -from ..utils import random_png -from . import LocalTest -from .conftest import TwoUsersTest - - -class TestSharedFolders(TwoUsersTest): - def _test_local_changes_while_not_running(self, unbind): - """NXDRIVE-646: not uploading renamed file from shared folder.""" - local_1 = self.local_root_client_1 - remote_1 = self.remote_document_client_1 - remote_2 = self.remote_document_client_2 - - # Unregister test workspace for user_1 - remote_1.unregister_as_root(self.workspace) - - # Remove ReadWrite permission for user_1 on the test workspace - test_workspace = f"doc:{self.ws.path}" - self.root_remote.execute( - command="Document.SetACE", - input_obj=test_workspace, - user=self.user_2, - permission="ReadWrite", - grant=True, - ) - - # Create initial folders and files as user_2 - folder = remote_2.make_folder("/", "Folder01") - subfolder_1 = remote_2.make_folder(folder, "SubFolder01") - remote_2.make_file(subfolder_1, "Image01.png", random_png()) - file_id = remote_2.make_file(folder, "File01.txt", content=b"plaintext") - - # Grant Read permission for user_1 on the test folder and register - self.root_remote.execute( - command="Document.SetACE", - input_obj=f"doc:{folder}", - user=self.user_1, - permission="Read", - ) - remote_1.register_as_root(folder) - - # Start engine and wait for sync - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - # First checks - file_pair_state = self.engine_1.dao.get_state_from_local( - Path("/Folder01") / "File01.txt" - ) - assert file_pair_state is not None - file_remote_ref = file_pair_state.remote_ref - assert remote_2.exists("/Folder01") - assert remote_2.exists("/Folder01/File01.txt") - assert remote_2.exists("/Folder01/SubFolder01") - assert remote_2.exists("/Folder01/SubFolder01/Image01.png") - assert local_1.exists("/Folder01") - assert local_1.exists("/Folder01/File01.txt") - assert local_1.exists("/Folder01/SubFolder01") - assert local_1.exists("/Folder01/SubFolder01/Image01.png") - - # Unbind or stop engine - if unbind: - self.send_unbind_engine(1) - self.wait_unbind_engine(1) - else: - self.engine_1.stop() - - # Restore write permission to user_1 (=> ReadWrite) - self.root_remote.execute( - command="Document.SetACE", - input_obj=f"doc:{folder}", - user=self.user_1, - permission="ReadWrite", - ) - self.wait() - - # Make changes - LocalTest.rename(local_1, "/Folder01/File01.txt", "File01_renamed.txt") - LocalTest.delete(local_1, "/Folder01/SubFolder01/Image01.png") - - # Bind or start engine and wait for sync - if unbind: - self.send_bind_engine(1) - self.wait_bind_engine(1) - else: - self.engine_1.start() - self.wait_sync() - - # Check client side - assert local_1.exists("/Folder01") - # File has been renamed and image deleted - assert not local_1.exists("/Folder01/File01.txt") - assert local_1.exists("/Folder01/File01_renamed.txt") - # The deleted image has been recreated if the unbinding happened - assert local_1.exists("/Folder01/SubFolder01/Image01.png") is unbind - - # Check server side - children = remote_2.get_children_info(folder) - assert len(children) == 2 - file_info = remote_2.get_info(file_id) - if unbind: - # File has not been renamed and image has not been deleted - assert file_info.name == "File01.txt" - assert remote_2.exists("/Folder01/SubFolder01/Image01.png") - # File is in conflict - file_pair_state = self.engine_1.dao.get_normal_state_from_remote( - file_remote_ref - ) - assert file_pair_state.pair_state == "conflicted" - else: - # File has been renamed and image deleted - assert file_info.name == "File01_renamed.txt" - assert not remote_2.exists("/Folder01/SubFolder01/Image01.png") diff --git a/tests/functional/test_versioning.py b/tests/functional/test_versioning.py index ec0d45783a..8ce28679d6 100644 --- a/tests/functional/test_versioning.py +++ b/tests/functional/test_versioning.py @@ -1,4 +1,4 @@ -from .conftest import OneUserTest, TwoUsersTest +from .conftest import OneUserTest class TestVersioning(OneUserTest): @@ -25,9 +25,3 @@ def test_version_restore(self): remote.restore_version(version_uid) self.wait_sync(wait_for_async=True) assert local.get_content("/Document to restore.txt") == b"Initial content." - - -class TestVersioning2(TwoUsersTest): - def _assert_version(self, doc, major, minor): - assert doc["properties"]["uid:major_version"] == major - assert doc["properties"]["uid:minor_version"] == minor diff --git a/tests/old_functional/local_client_darwin.py b/tests/old_functional/local_client_darwin.py deleted file mode 100644 index 5e4e5f3db0..0000000000 --- a/tests/old_functional/local_client_darwin.py +++ /dev/null @@ -1,67 +0,0 @@ -""" -Intent of this file is to use OSX File Manager to make FS operations to simulate -user actions. -""" - -import os -import time -from pathlib import Path - -import Cocoa - -from . import LocalTest - - -class MacLocalClient(LocalTest): - def __init__(self, base_folder, **kwargs): - super().__init__(base_folder, **kwargs) - self.fm = Cocoa.NSFileManager.defaultManager() - - def copy(self, srcref: str, dstref: str) -> None: - """Make a copy of the file (with xattr included).""" - src = self.abspath(srcref) - dst = self.abspath(dstref) - if not dst.exists() and not dst.parent.exists(): - raise ValueError( - f"parent destination directory {dst.parent} does not exist" - ) - if src.is_dir() and dst.exists() and dst.is_file(): - raise ValueError(f"cannot copy directory {src} to a file {dst}") - if dst.exists() and dst.is_dir(): - dst = dst / src.name - - error = None - result = self.fm.copyItemAtPath_toPath_error_(str(src), str(dst), error) - self._process_result(result) - - def move(self, srcref: str, parentref: str, name: str = None) -> None: - src = self.abspath(srcref) - parent = self.abspath(parentref) - - dst = parent / (name or src.name) - - error = None - result = self.fm.moveItemAtPath_toPath_error_(str(src), str(dst), error) - time.sleep(0.3) - self._process_result(result) - - def rename(self, srcref: str, to_name: str): - parent = os.path.dirname(srcref) - dstref = os.path.join(parent) - self.move(srcref, dstref, name=to_name) - return Path(parent) / to_name - - def delete(self, ref): - path = self.abspath(ref) - error = None - result = self.fm.removeItemAtPath_error_(str(path), error) - self._process_result(result) - - @staticmethod - def _process_result(result): - ok, err = result - if not ok: - error = ( - f"{err.localizedDescription()} (cause: {err.localizedFailureReason()})" - ) - raise OSError(error) diff --git a/tests/old_functional/local_client_windows.py b/tests/old_functional/local_client_windows.py deleted file mode 100644 index 664d870a2a..0000000000 --- a/tests/old_functional/local_client_windows.py +++ /dev/null @@ -1,97 +0,0 @@ -""" -Intent of this file is to use Explorer operations to make FS to simulate user -actions. - -https://msdn.microsoft.com/en-us/library/windows/desktop/bb775771(v=vs.85).aspx -Using SHFileOperation as the MSDN advise to use it for multithread - -IFileOperation can only be applied in a single-threaded apartment (STA) -situation. It cannot be used for a multithreaded apartment (MTA) situation. -For MTA, you still must use SHFileOperation. - -Note that any string passed to SHFileOperation needs to be double-null terminated. -This is automatically handled by pywin32: -https://github.com/mhammond/pywin32/blob/059b7be/com/win32comext/shell/src/shell.cpp#L940 -""" - -import errno -import logging -import os -import time -from pathlib import Path -from typing import Union - -from win32com.shell import shell, shellcon - -from . import LocalTest - -RawPath = Union[Path, str] -log = logging.getLogger(__name__) - - -class WindowsLocalClient(LocalTest): - def abspath(self, ref: RawPath) -> Path: - # Remove \\?\ - abs_path = super().abspath(ref).resolve() - if len(str(abs_path)) >= 255: - log.warning( - "The path is longer than 255 characters and the " - "WindowsLocalClient is about the remove the long path " - "prefix. So the test is likely to fail." - ) - return abs_path - - def do_op( - self, op: int, path_from: Path, path_to: Union[Path, None], flags: int - ) -> None: - """Actually do the requested SHFileOperation operation. - Errors are automatically handled. - """ - # *path_to* can be set to None for deletion of *path_from* - if path_to: - path_to = str(path_to) - - rc, aborted = shell.SHFileOperation((0, op, str(path_from), path_to, flags)) - - if aborted: - rc = errno.ECONNABORTED - if rc != 0: - raise OSError(rc, os.strerror(rc), path_from) - - def copy(self, srcref: RawPath, dstref: RawPath) -> None: - """Make a copy of the file (with xattr included).""" - self.do_op( - shellcon.FO_COPY, - self.abspath(srcref), - self.abspath(dstref), - shellcon.FOF_NOCONFIRMATION, - ) - - def delete(self, ref: RawPath) -> None: - # FOF_ALLOWUNDO send to trash - self.do_op( - shellcon.FO_DELETE, - self.abspath(ref), - None, - shellcon.FOF_NOCONFIRMATION | shellcon.FOF_ALLOWUNDO, - ) - - def delete_final(self, ref: RawPath) -> None: - self.do_op( - shellcon.FO_DELETE, - self.abspath(ref), - None, - flags=shellcon.FOF_NOCONFIRMATION, - ) - - def move(self, ref: RawPath, new_parent_ref: RawPath, name: str = None) -> None: - path = self.abspath(ref) - new_path = self.abspath(new_parent_ref) / (name or path.name) - self.do_op(shellcon.FO_MOVE, path, new_path, shellcon.FOF_NOCONFIRMATION) - - def rename(self, srcref: RawPath, to_name: str) -> Path: - path = self.abspath(srcref) - new_path = path.with_name(to_name) - self.do_op(shellcon.FO_RENAME, path, new_path, shellcon.FOF_NOCONFIRMATION) - time.sleep(0.5) - return new_path diff --git a/tests/old_functional/test_concurrent_synchronization.py b/tests/old_functional/test_concurrent_synchronization.py index ecbcf6e9c3..71e0cf570d 100644 --- a/tests/old_functional/test_concurrent_synchronization.py +++ b/tests/old_functional/test_concurrent_synchronization.py @@ -11,15 +11,6 @@ class TestConcurrentSynchronization(TwoUsersTest): - def create_docs(self, parent, number, name_pattern=None, delay=1.0): - return self.root_remote.execute( - command="NuxeoDrive.CreateTestDocuments", - input_obj=f"doc:{parent}", - namePattern=name_pattern, - number=number, - delay=int(delay * 1000), - ) - def test_concurrent_file_access(self): """Test update/deletion of a locally locked file. @@ -235,31 +226,6 @@ def test_delete_local_folder_delay_remote_changes_fetch(self): # Check Test folder has not been re-created locally assert not local.exists("/Test folder") - def test_rename_local_folder(self): - # Get local and remote clients - local1 = self.local_1 - local2 = self.local_2 - - # Launch first synchronization - self.engine_1.start() - self.engine_2.start() - self.wait_sync(wait_for_async=True, wait_for_engine_2=True) - - # Test workspace should be created locally - assert local1.exists("/") - assert local2.exists("/") - - # Create a local folder in the test workspace and a file inside - # this folder, then synchronize - local1.make_folder("/", "Test folder") - if WINDOWS: - # Too fast folder create-then-rename are not well handled - time.sleep(1) - local1.rename("/Test folder", "Renamed folder") - self.wait_sync(wait_for_async=True, wait_for_engine_2=True) - assert local1.exists("/Renamed folder") - assert local2.exists("/Renamed folder") - def test_delete_local_folder_update_remote_folder_property(self): # Get local and remote clients local = self.local_1 diff --git a/tests/old_functional/test_copy.py b/tests/old_functional/test_copy.py deleted file mode 100644 index e908893f89..0000000000 --- a/tests/old_functional/test_copy.py +++ /dev/null @@ -1,28 +0,0 @@ -from .common import OneUserTest - - -class TestCopy(OneUserTest): - def test_synchronize_remote_copy(self): - local = self.local_1 - remote = self.remote_document_client_1 - - # Create a file and a folder in the remote root workspace - remote.make_file("/", "test.odt", content=b"Some content.") - remote.make_folder("/", "Test folder") - - # Launch ndrive and check synchronization - self.engine_1.start() - self.wait_sync(wait_for_async=True) - assert local.exists("/") - assert local.exists("/Test folder") - assert local.exists("/test.odt") - - # Copy the file to the folder remotely - remote.copy("/test.odt", "/Test folder") - - # Launch ndrive and check synchronization - self.wait_sync(wait_for_async=True) - assert local.exists("/test.odt") - assert local.get_content("/test.odt") == b"Some content." - assert local.exists("/Test folder/test.odt") - assert local.get_content("/Test folder/test.odt") == b"Some content." diff --git a/tests/old_functional/test_encoding.py b/tests/old_functional/test_encoding.py index 952c16eac2..e0b5591e17 100644 --- a/tests/old_functional/test_encoding.py +++ b/tests/old_functional/test_encoding.py @@ -1,9 +1,3 @@ -import os -from pathlib import Path - -from nxdrive.client.local import FileInfo - -from ..markers import not_mac from .common import OneUserTest @@ -78,36 +72,3 @@ def test_content_with_accents_from_client(self): self.wait_sync(wait_for_async=True) assert remote.get_content("/Nom sans accents") == data - - def test_name_normalization(self): - local = self.local_1 - remote = self.remote_document_client_1 - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - filename = "space\xa0 et TM\u2122.doc" - local.make_file("/", filename) - self.wait_sync(wait_for_async=True) - - assert remote.get_info("/" + filename).name == filename - - @not_mac(reason="Normalization does not work on macOS") - def test_fileinfo_normalization(self): - local = self.local_1 - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - self.engine_1.stop() - - name = "Teste\u0301" - local.make_file("/", name, content=b"Test") - - # FileInfo() will normalize the filename - assert FileInfo(local.base_folder, Path(name), False, 0).name != name - - # The encoding should be different, - # cannot trust the get_children as they use FileInfo - children = os.listdir(local.abspath("/")) - assert len(children) == 1 - assert children[0] != name diff --git a/tests/old_functional/test_local_changes_when_offline.py b/tests/old_functional/test_local_changes_when_offline.py index 1b3923834f..4fb18f94cc 100644 --- a/tests/old_functional/test_local_changes_when_offline.py +++ b/tests/old_functional/test_local_changes_when_offline.py @@ -39,41 +39,3 @@ def test_copy_paste_normal(self): online should be detected and synced to server. """ self.copy_past_and_rename() - - def copy_past_and_rename(self, stop_engine: bool = False): - if stop_engine: - # Make Drive offline (by suspend) - self.engine_1.suspend() - - # Make a copy of the file (with xattr included) - self.local_1.copy("/Folder1/File1.txt", "/Folder1/File1 - Copy.txt") - - # Rename the original file - self.local.rename("/Folder1/File1.txt", "File1_renamed.txt") - - if stop_engine: - # Bring Drive online (by resume) - self.engine_1.resume() - - self.wait_sync() - - # Verify there is no local changes - assert self.local.exists("/Folder1/File1_renamed.txt") - assert self.local.exists("/Folder1/File1 - Copy.txt") - assert not self.local.exists("/Folder1/File1.txt") - - # Verify that local changes are uploaded to server successfully - if self.remote.exists("/Folder1/File1 - Copy.txt"): - # '/Folder1/File1 - Copy.txt' is uploaded to server. - # So original file named should be changed as 'File_renamed.txt' - remote_info = self.remote.get_info(self.file1_remote) - assert remote_info.name == "File1 - Copy.txt" - - else: - # Original file is renamed as 'File1 - Copy.txt'. - # This is a bug only if Drive is online during copy + rename - assert self.remote.exists("/Folder1/File1_renamed.txt") - remote_info = self.remote.get_info(self.file1_remote) - assert remote_info.name == "File1_renamed.txt" - - assert not self.remote.exists("/Folder1/File1.txt") diff --git a/tests/old_functional/test_local_copy_paste.py b/tests/old_functional/test_local_copy_paste.py deleted file mode 100644 index d886b01da7..0000000000 --- a/tests/old_functional/test_local_copy_paste.py +++ /dev/null @@ -1,129 +0,0 @@ -import shutil - -from .common import FILE_CONTENT, OneUserTest - - -class TestLocalCopyPaste(OneUserTest): - NUMBER_OF_LOCAL_TEXT_FILES = 10 - NUMBER_OF_LOCAL_IMAGE_FILES = 10 - NUMBER_OF_LOCAL_FILES_TOTAL = ( - NUMBER_OF_LOCAL_TEXT_FILES + NUMBER_OF_LOCAL_IMAGE_FILES - ) - FILE_NAME_PATTERN = "file%03d%s" - - """ - 1. Create folder "/A" with 100 files in it - 2. Create folder "/B" - """ - - def setUp(self): - remote = self.remote_1 - local = self.local_1 - self.engine_1.start() - self.wait_sync(wait_for_async=True) - self.engine_1.stop() - assert local.exists("/") - - # create folder A - local.make_folder("/", "A") - self.folder_path_1 = "/A" - - # create folder B - # NXDRIVE-477 If created after files are created inside A, - # creation of B isn't detected wy Watchdog! - # Reproducible with watchdemo, need to investigate. - # That's why we are now using local scan for setup_method(). - local.make_folder("/", "B") - self.folder_path_2 = "/B" - - # add text files in folder 'Nuxeo Drive Test Workspace/A' - self.local_files_list = [] - for file_num in range(1, self.NUMBER_OF_LOCAL_TEXT_FILES + 1): - filename = self.FILE_NAME_PATTERN % (file_num, ".txt") - local.make_file(self.folder_path_1, filename, FILE_CONTENT) - self.local_files_list.append(filename) - - # add image files in folder 'Nuxeo Drive Test Workspace/A' - abs_folder_path_1 = local.abspath(self.folder_path_1) - test_doc_path = self.location / "resources" / "files" / "cat.jpg" - for file_num in range( - self.NUMBER_OF_LOCAL_TEXT_FILES + 1, self.NUMBER_OF_LOCAL_FILES_TOTAL + 1 - ): - filename = self.FILE_NAME_PATTERN % (file_num, ".jpg") - dst_path = abs_folder_path_1 / filename - shutil.copyfile(test_doc_path, dst_path) - self.local_files_list.append(filename) - - self.engine_1.start() - self.wait_sync() - self.engine_1.stop() - - # get remote folders reference ids - self.remote_ref_1 = local.get_remote_id(self.folder_path_1) - assert self.remote_ref_1 - self.remote_ref_2 = local.get_remote_id(self.folder_path_2) - assert self.remote_ref_2 - assert remote.fs_exists(self.remote_ref_1) - assert remote.fs_exists(self.remote_ref_2) - - assert ( - len(remote.get_fs_children(self.remote_ref_1)) - == self.NUMBER_OF_LOCAL_FILES_TOTAL - ) - - def test_local_copy_paste_files(self): - self._local_copy_paste_files() - - def test_local_copy_paste_files_stopped(self): - self._local_copy_paste_files(stopped=True) - - def _local_copy_paste_files(self, stopped=False): - if not stopped: - self.engine_1.start() - - # Copy all children (files) of A to B - remote = self.remote_1 - local = self.local_1 - src = local.abspath(self.folder_path_1) - dst = local.abspath(self.folder_path_2) - num = self.NUMBER_OF_LOCAL_FILES_TOTAL - expected_files = set(self.local_files_list) - - for f in src.iterdir(): - shutil.copy(f, dst) - - if stopped: - self.engine_1.start() - self.wait_sync(timeout=60) - - # Expect local "/A" to contain all the files - abs_folder_path_1 = local.abspath(self.folder_path_1) - assert abs_folder_path_1.exists() - children = [f.name for f in abs_folder_path_1.iterdir()] - assert len(children) == num - assert set(children) == expected_files - - # expect local "/B" to contain the same files - abs_folder_path_2 = local.abspath(self.folder_path_2) - assert abs_folder_path_2.exists() - children = [f.name for f in abs_folder_path_2.iterdir()] - assert len(children) == num - assert set(children) == expected_files - - # expect remote "/A" to contain all the files - # just compare the names - children = [ - remote_info.name - for remote_info in remote.get_fs_children(self.remote_ref_1) - ] - assert len(children) == num - assert set(children) == expected_files - - # expect remote "/B" to contain all the files - # just compare the names - children = [ - remote_info.name - for remote_info in remote.get_fs_children(self.remote_ref_2) - ] - assert len(children) == num - assert set(children) == expected_files diff --git a/tests/old_functional/test_local_creations.py b/tests/old_functional/test_local_creations.py index 13df72a6f3..0a2008efbd 100644 --- a/tests/old_functional/test_local_creations.py +++ b/tests/old_functional/test_local_creations.py @@ -77,25 +77,6 @@ def test_invalid_credentials_on_file_upload(self): assert len(children) == 1 assert children[0].name == file - def test_mini_scenario(self): - local = self.local_root_client_1 - remote = self.remote_1 - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - local.make_folder(f"/{self.workspace_title}", "A") - folder_path_1 = f"{self.workspace_title}/A" - - test_doc_path = self.location / "resources" / "files" / "cat.jpg" - abs_folder_path_1 = local.abspath(f"/{folder_path_1}") - dst_path = abs_folder_path_1 / "cat.jpg" - shutil.copyfile(test_doc_path, dst_path) - - self.wait_sync(timeout=100) - uid = local.get_remote_id(f"/{folder_path_1}/cat.jpg") - assert remote.fs_exists(uid) - def test_local_create_folders_and_children_files(self): """ 1. create folder 'Nuxeo Drive Test Workspace/A' with 100 files in it @@ -237,25 +218,6 @@ def test_local_create_folders_with_dots(self): assert local.exists(f"/{folder1}") assert local.exists(f"/{folder2}") - def test_local_modification_date(self): - """Check that the files have the Platform modification date.""" - remote = self.remote_document_client_1 - local = self.local_1 - engine = self.engine_1 - - filename = "abc.txt" - remote.make_file("/", filename, content=b"1234") - remote_mtime = time.time() - - time.sleep(3) - - engine.start() - self.wait_sync(wait_for_async=True) - - filename = f"/{filename}" - assert local.exists(filename) - assert local.abspath(filename).stat().st_mtime < remote_mtime - def test_local_modification_date_non_latin(self): """Check that non-latin files have the Platform modification date.""" remote = self.remote_document_client_1 @@ -318,39 +280,6 @@ def test_local_modification_date_hiraganas_file(self): assert local.exists(filename) assert local.abspath(filename).stat().st_mtime < remote_mtime + 0.5 - def test_local_creation_date(self): - """Check that the files have the Platform modification date.""" - remote = self.remote_1 - local = self.local_1 - engine = self.engine_1 - sleep_time = 3 - - workspace_id = f"{SYNC_ROOT_FAC_ID}{self.workspace}" - filename = "abc.txt" - file_id = remote.make_file(workspace_id, filename, content=b"1234").uid - after_ctime = time.time() - - time.sleep(sleep_time) - filename = f"a{filename}" - remote.rename(file_id, filename) - after_mtime = time.time() - - engine.start() - self.wait_sync(wait_for_async=True) - - filename = f"/{filename}" - assert local.exists(filename) - stats = local.abspath(filename).stat() - local_mtime = stats.st_mtime - - # Note: GNU/Linux does not have a creation time - if MAC or WINDOWS: - local_ctime = stats.st_birthtime if MAC else stats.st_ctime - assert local_ctime < after_ctime - assert local_ctime + sleep_time <= local_mtime - - assert local_mtime < after_mtime + 0.5 - def test_local_creation_date_kanjis_file(self): """Check that Kanjis files have the Platform modification date.""" remote = self.remote_1 @@ -485,82 +414,6 @@ def test_local_creation_with_obsolete_xattr(self): assert remote.exists("/obsolete.md") assert remote.exists("/obsolete") - def recovery_scenario(self, cleanup: bool = True): - """ - A recovery test, scenario: - 1. Add a new account using the foo folder. - 2. Remove the account, keep the foo folder as-is. - 3. Remove xattrs using the clean-folder CLI argument (if *cleanup* is True). - 4. Re-add the account using the foo folder. - - The goal is to check that local data is not re-downloaded at all. - Drive should simply recreate the database and check the all files are there. - """ - # Start engine and wait for synchronization - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - # Create folders and files on the server - workspace_id = f"{SYNC_ROOT_FAC_ID}{self.workspace}" - folder_uid = self.remote_1.make_folder(workspace_id, "a folder").uid - self.remote_1.make_file(folder_uid, "file1.bin", content=b"0321" * 42) - self.remote_1.make_file(folder_uid, "file2.bin", content=b"12365" * 42) - self.remote_1.make_folder(folder_uid, "folder 2") - - # Start engine and wait for synchronization - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - # Local checks - assert self.local_1.exists("/a folder") - assert self.local_1.exists("/a folder/file1.bin") - assert self.local_1.exists("/a folder/file2.bin") - assert self.local_1.exists("/a folder/folder 2") - - # Stop the engine for following actions - self.engine_1.stop() - - if cleanup: - # Remove xattrs - folder = Path("a folder") - self.local_1.clean_xattr_folder_recursive(folder, cleanup=True) - self.local_1.remove_remote_id(folder, cleanup=True) - - # Ensure xattrs are gone - assert not self.local_1.get_remote_id(folder) - assert not self.local_1.get_remote_id(folder / "file1.bin") - assert not self.local_1.get_remote_id(folder / "file2.bin") - assert not self.local_1.get_remote_id(folder / "folder 2") - - # Destroy the database but keep synced files - self.unbind_engine(1, purge=False) - - def download(*_, **__): - """ - Patch Remote.download() to be able to check that nothing - will be downloaded as local data is already there. - """ - assert 0, "No download should be done!" - - # Re-bind the account using the same folder - self.bind_engine(1, start_engine=False) - - # Start the sync - with patch.object(self.engine_1.remote, "download", new=download): - with ensure_no_exception(): - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - # No error expected - assert not self.engine_1.dao.get_errors(limit=0) - - # Checks - for client in (self.local_1, self.remote_1): - assert client.exists("/a folder") - assert client.exists("/a folder/file1.bin") - assert client.exists("/a folder/file2.bin") - assert client.exists("/a folder/folder 2") - def test_local_creation_with_files_existant_without_xattrs(self): """NXDRIVE-1882: Recovery test with purgation of attrs.""" self.recovery_scenario(cleanup=True) diff --git a/tests/old_functional/test_local_move_folders.py b/tests/old_functional/test_local_move_folders.py index 239b514f12..c0c2437817 100644 --- a/tests/old_functional/test_local_move_folders.py +++ b/tests/old_functional/test_local_move_folders.py @@ -1,4 +1,3 @@ -import shutil from contextlib import suppress from pathlib import Path @@ -61,44 +60,6 @@ def tearDown(self): self.app.local_scan_finished ) - def test_local_move_folder_with_files(self): - count = 10 - self._setup(count=count) - local = self.local_1 - remote = self.remote_1 - remote_doc = self.remote_document_client_1 - src = local.abspath(self.folder_path_1) - dst = local.abspath(self.folder_path_2) - shutil.move(src, dst) - self.wait_sync() - names = {f"file{n + 1:03d}.png" for n in range(count)} - - # Check that a1 doesn't exist anymore locally and remotely - assert not local.exists("/a1") - assert len(remote_doc.get_children_info(self.workspace)) == 1 - - # Check /a2 and /a2/a1 - for folder in ("/a2", "/a2/a1"): - assert local.exists(folder) - children = [ - child.name - for child in local.get_children_info(folder) - if not child.folderish - ] - assert len(children) == count - assert set(children) == names - - uid = local.get_remote_id(folder) - assert uid - assert remote.fs_exists(uid) - children = [ - child.name - for child in remote.get_fs_children(uid) - if not child.folderish - ] - assert len(children) == count - assert set(children) == names - def test_local_move_folder_both_sides_while_stopped(self): self._test_local_move_folder_both_sides(False) @@ -159,43 +120,6 @@ def _test_local_move_folder_both_sides(self, unbind): ) assert folder_pair_state.pair_state == "conflicted" - def test_local_move_folder(self): - """ - A simple test to ensure we do not create useless URLs. - This is to handle cases when the user creates a new folder, - it has the default name set to the local system: - "New folder" - "Nouveau dossier (2)" - ... - The folder is created directly and it generates useless URLs. - So we move the document to get back good URLs. As the document has been - renamed above, the document's title is already the good one. - """ - local = self.local_1 - remote = self.remote_1 - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - name_orig = "Nouveau dossier (42)" - name_new = "C'est le vrai nom pârdi !" - - local.make_folder("/", name_orig) - self.wait_sync() - - child = remote.get_children_info(self.workspace)[0] - assert child.name == name_orig - assert child.path.endswith(name_orig) - - # Rename to fix the meaningfulness URL - local.rename(f"/{name_orig}", name_new) - self.wait_sync() - - assert remote.exists(f"/{name_new}") - child = remote.get_children_info(self.workspace)[0] - assert child.name == name_new - assert child.path.endswith(name_new) - def test_local_move_root_folder_with_unicode(self): local = self.local_1 diff --git a/tests/old_functional/test_local_paste.py b/tests/old_functional/test_local_paste.py index b4294a2568..2ff3edab92 100644 --- a/tests/old_functional/test_local_paste.py +++ b/tests/old_functional/test_local_paste.py @@ -61,19 +61,6 @@ def test_copy_paste_empty_folder_first(self): self._check_integrity() - def test_copy_paste_empty_folder_last(self): - """ - copy 'a1' to 'Nuxeo Drive Test Workspace', - then 'a2' to 'Nuxeo Drive Test Workspace' - """ - # copy 'temp/a1' under 'Nuxeo Drive Test Workspace' - shutil.copytree(self.folder1, self.workspace_abspath / self.FOLDER_A1) - # copy 'temp/a2' under 'Nuxeo Drive Test Workspace' - shutil.copytree(self.folder2, self.workspace_abspath / self.FOLDER_A2) - self.wait_sync(timeout=TEST_TIMEOUT) - - self._check_integrity() - def _check_integrity(self): local = self.local_1 remote = self.remote_1 @@ -97,40 +84,3 @@ def _check_integrity(self): remote_info.name for remote_info in remote.get_fs_children(remote_ref_1) ] assert len(children) == num - - def test_copy_paste_same_file(self): - local = self.local_1 - remote = self.remote_1 - name = self.FILENAME_PATTERN % 1 - workspace_abspath = local.abspath("/") - path = self.FOLDER_A1 / name - copypath = self.FOLDER_A1 / f"{name}copy" - # copy 'temp/a1' under 'Nuxeo Drive Test Workspace' - (workspace_abspath / self.FOLDER_A1).mkdir() - shutil.copy2(self.folder1 / name, workspace_abspath / path) - - self.wait_sync(timeout=TEST_TIMEOUT) - - # check that '/Nuxeo Drive Test Workspace/a1' does exist - assert local.exists(self.FOLDER_A1) - # check that '/Nuxeo Drive Test Workspace/a1/ has all the files - children = list((self.workspace_abspath / self.FOLDER_A1).iterdir()) - assert len(children) == 1 - # check that remote (DM) 'Nuxeo Drive Test Workspace/a1' exists - remote_ref = local.get_remote_id(self.FOLDER_A1) - assert remote.fs_exists(remote_ref) - remote_children = [ - remote_info.name for remote_info in remote.get_fs_children(remote_ref) - ] - assert len(remote_children) == 1 - remote_id = local.get_remote_id(path) - - shutil.copy2(local.abspath(path), local.abspath(copypath)) - local.set_remote_id(copypath, remote_id) - self.wait_sync(timeout=TEST_TIMEOUT) - remote_children = [ - remote_info.name for remote_info in remote.get_fs_children(remote_ref) - ] - assert len(remote_children) == 2 - children = list((self.workspace_abspath / self.FOLDER_A1).iterdir()) - assert len(children) == 2 diff --git a/tests/old_functional/test_local_storage_issue.py b/tests/old_functional/test_local_storage_issue.py index 75319a5b93..a58c44ec24 100644 --- a/tests/old_functional/test_local_storage_issue.py +++ b/tests/old_functional/test_local_storage_issue.py @@ -8,20 +8,6 @@ class TestLocalStorageIssue(OneUserTest): - def test_local_invalid_timestamp(self): - # Synchronize root workspace - self.engine_1.start() - self.wait_sync(wait_for_async=True) - assert self.local_1.exists("/") - self.engine_1.stop() - self.local_1.make_file("/", "Test.txt", content=b"plop") - os.utime(self.local_1.abspath("/Test.txt"), (0, 999_999_999_999_999)) - self.engine_1.start() - self.wait_sync() - children = self.remote_document_client_1.get_children_info(self.workspace) - assert len(children) == 1 - assert children[0].name == "Test.txt" - def test_synchronize_no_space_left_on_device(self): local = self.local_1 remote = self.remote_document_client_1 diff --git a/tests/old_functional/test_long_path.py b/tests/old_functional/test_long_path.py deleted file mode 100644 index 3ad0157b4f..0000000000 --- a/tests/old_functional/test_long_path.py +++ /dev/null @@ -1,101 +0,0 @@ -import os -from unittest.mock import patch - -from nxdrive.constants import WINDOWS - -from .common import OneUserTest - -# Number of chars in path "C:\...\Nuxeo..." is approx 96 chars -FOLDER_A = "A" * 90 -FOLDER_B = "B" * 90 -FOLDER_C = "C" * 90 -FOLDER_D = "D" * 50 -FILE = "F" * 255 + ".txt" - - -class TestLongPath(OneUserTest): - def setUp(self): - self.remote_1 = self.remote_document_client_1 - self.folder_a = self.remote_1.make_folder("/", FOLDER_A) - self.folder_b = self.remote_1.make_folder(self.folder_a, FOLDER_B) - self.folder_c = self.remote_1.make_folder(self.folder_b, FOLDER_C) - self.remote_1.make_file(self.folder_c, "File1.txt", content=b"Sample Content") - - def tearDown(self): - self.remote_1.delete(self.folder_a, use_trash=False) - - def test_long_path(self): - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - parent_path = ( - self.local_1.abspath("/") / FOLDER_A / FOLDER_B / FOLDER_C / FOLDER_D - ) - if WINDOWS: - parent_path = f"\\\\?\\{parent_path}" - os.makedirs(parent_path, exist_ok=True) - - new_file = os.path.join(parent_path, "File2.txt") - with open(new_file, "wb") as f: - f.write(b"Hello world") - - self.wait_sync(wait_for_async=True, fail_if_timeout=False) - remote_children_of_c = self.remote_1.get_children_info(self.folder_c) - assert len(remote_children_of_c) == 2 - folder = [item for item in remote_children_of_c if item.name == FOLDER_D][0] - assert folder.name == FOLDER_D - - remote_children_of_d = self.remote_1.get_children_info(folder.uid) - assert len(remote_children_of_d) == 1 - assert remote_children_of_d[0].name == "File2.txt" - - def test_setup_on_long_path(self): - """NXDRIVE-689: Fix error when adding a new account when installation - path is greater than 245 characters. - """ - - self.engine_1.stop() - self.engine_1.reinit() - - # On Mac, avoid permission denied error - self.engine_1.local.clean_xattr_root() - - test_folder_len = 245 - len(str(self.local_nxdrive_folder_1)) - self.local_nxdrive_folder_1 = self.local_nxdrive_folder_1 / ( - "A" * test_folder_len - ) - assert len(str(self.local_nxdrive_folder_1)) > 245 - - self.manager_1.unbind_all() - self.engine_1 = self.manager_1.bind_server( - self.local_nxdrive_folder_1, - self.nuxeo_url, - self.user_1, - password=self.password_1, - start_engine=False, - ) - - self.engine_1.start() - self.engine_1.stop() - - -class TestLongFileName(OneUserTest): - def test_long_file_name(self): - def error(*_): - nonlocal received - received = True - - received = False - remote = self.remote_document_client_1 - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - with patch.object( - self.manager_1.notification_service, "_longPathError", new_callable=error - ): - remote.make_file(self.workspace, FILE, content=b"Sample Content") - self.wait_sync(wait_for_async=True, timeout=5, fail_if_timeout=False) - - assert received - assert not self.local_1.exists(f"/{FILE}") diff --git a/tests/old_functional/test_mac_local_client.py b/tests/old_functional/test_mac_local_client.py deleted file mode 100644 index 8ba729ee40..0000000000 --- a/tests/old_functional/test_mac_local_client.py +++ /dev/null @@ -1,38 +0,0 @@ -from ..markers import mac_only -from .common import OneUserTest - -try: - import xattr -except ImportError: - pass - - -@mac_only -class TestMacSpecific(OneUserTest): - def test_finder_in_use(self): - """Test that if Finder is using the file we postpone the sync.""" - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - self.local_1.make_file("/", "File.txt", content=b"Some Content 1") - - # Emulate the Finder in use flag - key = [0] * 32 # OSX_FINDER_INFO_ENTRY_SIZE - key[:8] = 0x62, 0x72, 0x6F, 0x6B, 0x4D, 0x41, 0x43, 0x53 - - xattr.setxattr( - str(self.local_1.abspath("/File.txt")), - xattr.XATTR_FINDERINFO_NAME, - bytes(bytearray(key)), - ) - - # The file should not be synced and there have no remote id - self.wait_sync(wait_for_async=True, fail_if_timeout=False) - assert not self.local_1.get_remote_id("/File.txt") - - # Remove the Finder flag - self.local_1.remove_remote_id("/File.txt", name=xattr.XATTR_FINDERINFO_NAME) - - # The sync process should now handle the file and sync it - self.wait_sync(wait_for_async=True, fail_if_timeout=False) - assert self.local_1.get_remote_id("/File.txt") diff --git a/tests/old_functional/test_remote_client.py b/tests/old_functional/test_remote_client.py deleted file mode 100644 index 0f257cd7c6..0000000000 --- a/tests/old_functional/test_remote_client.py +++ /dev/null @@ -1,525 +0,0 @@ -import hashlib -import operator -from pathlib import Path -from shutil import copyfile -from tempfile import mkdtemp - -import pytest - -from nxdrive.exceptions import NotFound - -from . import LocalTest, make_tmp_file -from .common import FS_ITEM_ID_PREFIX, OneUserTest, TwoUsersTest - - -class TestRemoteFileSystemClient(OneUserTest): - def setUp(self): - # Bind the test workspace as sync root for user 1 - remote_doc = self.remote_document_client_1 - remote = self.remote_1 - remote_doc.register_as_root(self.workspace) - - # Fetch the id of the workspace folder item - info = remote.get_filesystem_root_info() - self.workspace_id = remote.get_fs_children(info.uid)[0].uid - - # - # Test the API common with the local client API - # - - def test_get_fs_info(self): - remote = self.remote_1 - - # Check file info - fs_item_id = remote.make_file( - self.workspace_id, "Document 1.txt", content=b"Content of doc 1." - ).uid - info = remote.get_fs_info(fs_item_id) - assert info is not None - assert info.name == "Document 1.txt" - assert info.uid == fs_item_id - assert info.parent_uid == self.workspace_id - assert not info.folderish - if info.last_contributor: - assert info.last_contributor == self.user_1 - digest_algorithm = info.digest_algorithm - assert digest_algorithm == "md5" - digest = self._get_digest(digest_algorithm, b"Content of doc 1.") - assert info.digest == digest - file_uid = fs_item_id.rsplit("#", 1)[1] - # NXP-17827: nxbigile has been replace to nxfile, keep handling both - url = f"/default/{file_uid}/blobholder:0/Document%201.txt" - cond = info.download_url in (f"nxbigfile{url}", f"nxfile{url}") - assert cond - - # Check folder info - fs_item_id = remote.make_folder(self.workspace_id, "Folder 1").uid - info = remote.get_fs_info(fs_item_id) - assert info is not None - assert info.name == "Folder 1" - assert info.uid == fs_item_id - assert info.parent_uid == self.workspace_id - assert info.folderish - if info.last_contributor: - assert info.last_contributor == self.user_1 - assert info.digest_algorithm is None - assert info.digest is None - assert info.download_url is None - - # Check non existing file info - fs_item_id = FS_ITEM_ID_PREFIX + "fakeId" - with pytest.raises(NotFound): - remote.get_fs_info(fs_item_id) - - def test_get_content(self): - remote = self.remote_1 - remote_doc = self.remote_document_client_1 - - # Check file with content - fs_item_id = remote.make_file( - self.workspace_id, "Document 1.txt", content=b"Content of doc 1." - ).uid - assert remote.get_content(fs_item_id) == b"Content of doc 1." - - # Check file without content - doc_uid = remote_doc.make_file_with_no_blob(self.workspace, "Document 2.txt") - fs_item_id = FS_ITEM_ID_PREFIX + doc_uid - with pytest.raises(NotFound): - remote.get_content(fs_item_id) - - def test_stream_content(self): - remote = self.remote_1 - - fs_item_id = remote.make_file( - self.workspace_id, "Document 1.txt", content=b"Content of doc 1." - ).uid - file_path = self.local_test_folder_1 / "Document 1.txt" - file_out = Path(mkdtemp()) / file_path.name - tmp_file = remote.stream_content( - fs_item_id, file_path, file_out, engine_uid=self.engine_1.uid - ) - assert tmp_file.exists() - assert tmp_file.name == "Document 1.txt" - assert tmp_file.read_bytes() == b"Content of doc 1." - - def test_get_fs_children(self): - remote = self.remote_1 - - # Create documents - folder_1_id = remote.make_folder(self.workspace_id, "Folder 1").uid - folder_2_id = remote.make_folder(self.workspace_id, "Folder 2").uid - file_1_id = remote.make_file( - self.workspace_id, "File 1", content=b"Content of file 1." - ).uid - file_2_id = remote.make_file( - folder_1_id, "File 2", content=b"Content of file 2." - ).uid - - # Check workspace children - workspace_children = remote.get_fs_children(self.workspace_id) - assert workspace_children is not None - assert len(workspace_children) == 3 - assert workspace_children[0].uid == folder_1_id - assert workspace_children[0].name == "Folder 1" - assert workspace_children[0].folderish - assert workspace_children[1].uid == folder_2_id - assert workspace_children[1].name == "Folder 2" - assert workspace_children[1].folderish - assert workspace_children[2].uid == file_1_id - assert workspace_children[2].name == "File 1" - assert not workspace_children[2].folderish - - # Check folder_1 children - folder_1_children = remote.get_fs_children(folder_1_id) - assert folder_1_children is not None - assert len(folder_1_children) == 1 - assert folder_1_children[0].uid == file_2_id - assert folder_1_children[0].name == "File 2" - - def test_scroll_descendants(self): - remote = self.remote_1 - - # Create documents - folder_1 = remote.make_folder(self.workspace_id, "Folder 1").uid - folder_2 = remote.make_folder(self.workspace_id, "Folder 2").uid - file_1 = remote.make_file( - self.workspace_id, "File 1.txt", content=b"Content of file 1." - ).uid - file_2 = remote.make_file( - folder_1, "File 2.txt", content=b"Content of file 2." - ).uid - - # Wait for ES completion - self.wait() - - # Check workspace descendants in one breath, ordered by remote path - scroll_res = remote.scroll_descendants(self.workspace_id, None) - assert isinstance(scroll_res, dict) - assert "scroll_id" in scroll_res - descendants = sorted(scroll_res["descendants"], key=operator.attrgetter("name")) - assert len(descendants) == 4 - - # File 1.txt - assert descendants[0].uid == file_1 - assert descendants[0].name == "File 1.txt" - assert not descendants[0].folderish - # File 2.txt - assert descendants[1].name == "File 2.txt" - assert not descendants[1].folderish - assert descendants[1].uid == file_2 - # Folder 1 - assert descendants[2].uid == folder_1 - assert descendants[2].name == "Folder 1" - assert descendants[2].folderish - # Folder 2 - assert descendants[3].uid == folder_2 - assert descendants[3].name == "Folder 2" - assert descendants[3].folderish - - # Check workspace descendants in several steps, ordered by remote path - descendants = [] - scroll_id = None - while True: - scroll_res = remote.scroll_descendants( - self.workspace_id, scroll_id, batch_size=2 - ) - assert isinstance(scroll_res, dict) - scroll_id = scroll_res["scroll_id"] - partial_descendants = scroll_res["descendants"] - if not partial_descendants: - break - descendants.extend(partial_descendants) - descendants = sorted(descendants, key=operator.attrgetter("name")) - assert len(descendants) == 4 - - # File 1.txt - assert descendants[0].uid == file_1 - assert descendants[0].name == "File 1.txt" - assert not descendants[0].folderish - # File 2.txt - assert descendants[1].name == "File 2.txt" - assert not descendants[1].folderish - assert descendants[1].uid == file_2 - # Folder 1 - assert descendants[2].uid == folder_1 - assert descendants[2].name == "Folder 1" - assert descendants[2].folderish - # Folder 2 - assert descendants[3].uid == folder_2 - assert descendants[3].name == "Folder 2" - assert descendants[3].folderish - - def test_make_folder(self): - remote = self.remote_1 - - fs_item_info = remote.make_folder(self.workspace_id, "My new folder") - assert fs_item_info is not None - assert fs_item_info.name == "My new folder" - assert fs_item_info.folderish - assert fs_item_info.digest_algorithm is None - assert fs_item_info.digest is None - assert fs_item_info.download_url is None - - def test_make_file(self): - remote = self.remote_1 - - # Check File document creation - fs_item_info = remote.make_file( - self.workspace_id, "My new file.odt", content=b"Content of my new file." - ) - assert fs_item_info is not None - assert fs_item_info.name == "My new file.odt" - assert not fs_item_info.folderish - digest_algorithm = fs_item_info.digest_algorithm - assert digest_algorithm == "md5" - digest = self._get_digest(digest_algorithm, b"Content of my new file.") - assert fs_item_info.digest == digest - - # Check Note document creation - fs_item_info = remote.make_file( - self.workspace_id, "My new note.txt", content=b"Content of my new note." - ) - assert fs_item_info is not None - assert fs_item_info.name == "My new note.txt" - assert not fs_item_info.folderish - digest_algorithm = fs_item_info.digest_algorithm - assert digest_algorithm == "md5" - digest = self._get_digest(digest_algorithm, b"Content of my new note.") - assert fs_item_info.digest == digest - - def test_make_file_custom_encoding(self): - remote = self.remote_1 - - # Create content encoded in utf-8 and cp1252 - unicode_content = "\xe9" # e acute - utf8_encoded = unicode_content.encode("utf-8") - utf8_digest = hashlib.md5(utf8_encoded).hexdigest() - cp1252_encoded = unicode_content.encode("cp1252") - - # Make files with this content - utf8_fs_id = remote.make_file( - self.workspace_id, "My utf-8 file.txt", content=utf8_encoded - ).uid - cp1252_fs_id = remote.make_file( - self.workspace_id, "My cp1252 file.txt", content=cp1252_encoded - ).uid - - # Check content - utf8_content = remote.get_content(utf8_fs_id) - assert utf8_content == utf8_encoded - cp1252_content = remote.get_content(cp1252_fs_id) - assert cp1252_content == utf8_encoded - - # Check digest - utf8_info = remote.get_fs_info(utf8_fs_id) - assert utf8_info.digest == utf8_digest - cp1252_info = remote.get_fs_info(cp1252_fs_id) - assert cp1252_info.digest == utf8_digest - - def test_update_content(self): - remote = self.remote_1 - - # Create file - fs_item_id = remote.make_file( - self.workspace_id, "Document 1.txt", content=b"Content of doc 1." - ).uid - - # Check file update - remote.update_content(fs_item_id, b"Updated content of doc 1.") - assert remote.get_content(fs_item_id) == b"Updated content of doc 1." - - def test_delete(self): - remote = self.remote_1 - - # Create file - fs_item_id = remote.make_file( - self.workspace_id, "Document 1.txt", content=b"Content of doc 1." - ).uid - assert remote.fs_exists(fs_item_id) - - # Delete file - remote.delete(fs_item_id) - assert not remote.fs_exists(fs_item_id) - - def test_exists(self): - remote = self.remote_1 - remote_doc = self.remote_document_client_1 - - # Check existing file system item - fs_item_id = remote.make_file( - self.workspace_id, "Document 1.txt", content=b"Content of doc 1." - ).uid - assert remote.fs_exists(fs_item_id) - - # Check non existing file system item (non existing document) - fs_item_id = FS_ITEM_ID_PREFIX + "fakeId" - assert not remote.fs_exists(fs_item_id) - - # Check non existing file system item (document without content) - doc_uid = remote_doc.make_file_with_no_blob(self.workspace, "Document 2.txt") - fs_item_id = FS_ITEM_ID_PREFIX + doc_uid - assert not remote.fs_exists(fs_item_id) - - # - # Test the API specific to the remote file system client - # - - def test_get_fs_item(self): - remote = self.remote_1 - - # Check file item - fs_item_id = remote.make_file( - self.workspace_id, "Document 1.txt", content=b"Content of doc 1." - ).uid - fs_item = remote.get_fs_item(fs_item_id) - assert fs_item is not None - assert fs_item["name"] == "Document 1.txt" - assert fs_item["id"] == fs_item_id - assert not fs_item["folder"] - - # Check file item using parent id - fs_item = remote.get_fs_item(fs_item_id, parent_fs_item_id=self.workspace_id) - assert fs_item is not None - assert fs_item["name"] == "Document 1.txt" - assert fs_item["id"] == fs_item_id - assert fs_item["parentId"] == self.workspace_id - - # Check folder item - fs_item_id = remote.make_folder(self.workspace_id, "Folder 1").uid - fs_item = remote.get_fs_item(fs_item_id) - assert fs_item is not None - assert fs_item["name"] == "Folder 1" - assert fs_item["id"] == fs_item_id - assert fs_item["folder"] - - # Check non existing file system item - fs_item_id = FS_ITEM_ID_PREFIX + "fakeId" - assert remote.get_fs_item(fs_item_id) is None - - def test_streaming_upload(self): - remote = self.remote_1 - - # Create a document by streaming a text file - file_path = make_tmp_file(remote.upload_tmp_dir, b"Some content.") - try: - fs_item_info = remote.stream_file( - self.workspace_id, file_path, filename="My streamed file.txt" - ) - finally: - file_path.unlink() - fs_item_id = fs_item_info.uid - assert fs_item_info.name == "My streamed file.txt" - assert remote.get_content(fs_item_id) == b"Some content." - - # Update a document by streaming a new text file - file_path = make_tmp_file(remote.upload_tmp_dir, b"Other content.") - try: - fs_item_info = remote.stream_update( - fs_item_id, file_path, filename="My updated file.txt" - ) - finally: - file_path.unlink() - assert fs_item_info.uid == fs_item_id - assert fs_item_info.name == "My updated file.txt" - assert remote.get_content(fs_item_id) == b"Other content." - - # Create a document by streaming a binary file - file_path = self.upload_tmp_dir / "testFile.pdf" - copyfile(self.location / "resources" / "files" / "testFile.pdf", file_path) - fs_item_info = remote.stream_file(self.workspace_id, file_path) - local_client = LocalTest(self.upload_tmp_dir) - assert fs_item_info.name == "testFile.pdf" - assert ( - fs_item_info.digest == local_client.get_info("/testFile.pdf").get_digest() - ) - - def test_mime_type_doc_type_association(self): - remote = self.remote_1 - remote_doc = self.remote_document_client_1 - - # Upload a PDF file, should create a File document - file_path = self.upload_tmp_dir / "testFile.pdf" - copyfile(self.location / "resources" / "files" / "testFile.pdf", file_path) - fs_item_info = remote.stream_file(self.workspace_id, file_path) - fs_item_id = fs_item_info.uid - doc_uid = fs_item_id.rsplit("#", 1)[1] - doc_type = remote_doc.get_info(doc_uid).doc_type - assert doc_type == "File" - - # Upload a JPG file, should create a Picture document - file_path = self.upload_tmp_dir / "cat.jpg" - copyfile(self.location / "resources" / "files" / "cat.jpg", file_path) - fs_item_info = remote.stream_file(self.workspace_id, file_path) - fs_item_id = fs_item_info.uid - doc_uid = fs_item_id.rsplit("#", 1)[1] - doc_type = remote_doc.get_info(doc_uid).doc_type - assert doc_type == "Picture" - - def test_unregister_nested_roots(self): - # Check that registering a parent folder of an existing root - # automatically unregister sub folders to avoid synchronization - # inconsistencies - remote = self.remote_document_client_1 - - # By default no root is synchronized - remote.unregister_as_root(self.workspace) - self.wait() - assert not remote.get_roots() - - folder = remote.make_folder(self.workspace, "Folder") - sub_folder_1 = remote.make_folder(folder, "Sub Folder 1") - sub_folder_2 = remote.make_folder(folder, "Sub Folder 2") - - # Register the sub folders as roots - remote.register_as_root(sub_folder_1) - remote.register_as_root(sub_folder_2) - assert len(remote.get_roots()) == 2 - - # Register the parent folder as root - remote.register_as_root(folder) - roots = remote.get_roots() - assert len(roots) == 1 - assert roots[0].uid == folder - - # Unregister the parent folder - remote.unregister_as_root(folder) - assert not remote.get_roots() - - def test_lock_unlock(self): - remote = self.remote_document_client_1 - doc_id = remote.make_file( - self.workspace, "TestLocking.txt", content=b"File content" - ) - - status = remote.is_locked(doc_id) - assert not status - remote.lock(doc_id) - assert remote.is_locked(doc_id) - - remote.unlock(doc_id) - assert not remote.is_locked(doc_id) - - @staticmethod - def _get_digest(algorithm: str, content: bytes) -> str: - hasher = getattr(hashlib, algorithm) - if hasher is None: - raise RuntimeError(f"Unknown digest algorithm: {algorithm}") - return hasher(content).hexdigest() - - -class TestRemoteFileSystemClient2(TwoUsersTest): - def setUp(self): - # Bind the test workspace as sync root for user 1 - remote_doc = self.remote_document_client_1 - remote = self.remote_1 - remote_doc.register_as_root(self.workspace) - - # Fetch the id of the workspace folder item - info = remote.get_filesystem_root_info() - self.workspace_id = remote.get_fs_children(info.uid)[0].uid - - def test_modification_flags_locked_document(self): - remote = self.remote_1 - fs_item_id = remote.make_file( - self.workspace_id, "Document 1.txt", content=b"Content of doc 1." - ).uid - - # Check flags for a document that isn't locked - info = remote.get_fs_info(fs_item_id) - assert info.can_rename - assert info.can_update - assert info.can_delete - assert info.lock_owner is None - assert info.lock_created is None - - # Check flags for a document locked by the current user - doc_uid = fs_item_id.rsplit("#", 1)[1] - remote.lock(doc_uid) - info = remote.get_fs_info(fs_item_id) - assert info.can_rename - assert info.can_update - assert info.can_delete - lock_info_available = remote.get_fs_item(fs_item_id).get("lockInfo") is not None - if lock_info_available: - assert info.lock_owner == self.user_1 - assert info.lock_created is not None - remote.unlock(doc_uid) - - # Check flags for a document locked by another user - self.remote_2.lock(doc_uid) - info = remote.get_fs_info(fs_item_id) - assert not info.can_rename - assert not info.can_update - assert not info.can_delete - if lock_info_available: - assert info.lock_owner == self.user_2 - assert info.lock_created is not None - - # Check flags for a document unlocked by another user - self.remote_2.unlock(doc_uid) - info = remote.get_fs_info(fs_item_id) - assert info.can_rename - assert info.can_update - assert info.can_delete - assert info.lock_owner is None - assert info.lock_created is None diff --git a/tests/old_functional/test_remote_deletion.py b/tests/old_functional/test_remote_deletion.py index 47dab70740..3269369386 100644 --- a/tests/old_functional/test_remote_deletion.py +++ b/tests/old_functional/test_remote_deletion.py @@ -4,11 +4,9 @@ from shutil import copyfile from unittest.mock import patch -import pytest from nuxeo.utils import version_lt from nxdrive.engine.engine import Engine -from nxdrive.options import Options from .common import OS_STAT_MTIME_RESOLUTION, OneUserTest, TwoUsersTest @@ -102,40 +100,6 @@ def callback(uploader): self.wait_sync(wait_for_async=True) assert not local.exists("/Test folder") - @Options.mock() - @pytest.mark.randombug("NXDRIVE-1329", repeat=4) - def test_synchronize_remote_deletion_while_download_file(self): - local = self.local_1 - remote = self.remote_document_client_1 - - def callback(uploader): - """Add delay when upload and download.""" - if not self.engine_1.has_delete: - # Delete remote file while downloading - try: - remote.delete("/Test folder/testFile.pdf") - except Exception: - log.exception("Cannot trash") - else: - self.engine_1.has_delete = True - time.sleep(1) - Engine.suspend_client(self.engine_1, uploader) - - self.engine_1.start() - self.engine_1.has_delete = False - - filepath = self.location / "resources" / "files" / "testFile.pdf" - - Options.set("tmp_file_limit", 0.1, setter="manual") - with patch.object(self.engine_1.remote, "download_callback", new=callback): - remote.make_folder("/", "Test folder") - remote.make_file("/Test folder", "testFile.pdf", file_path=filepath) - - self.wait_sync(wait_for_async=True) - # Sometimes the server does not return the document trash action in summary changes. - # So it may fail on the next assertion. - assert not local.exists("/Test folder/testFile.pdf") - def test_synchronize_remote_deletion_with_close_name(self): self.engine_1.start() self.wait_sync(wait_for_async=True) diff --git a/tests/old_functional/test_special_files.py b/tests/old_functional/test_special_files.py deleted file mode 100644 index 8999f62f82..0000000000 --- a/tests/old_functional/test_special_files.py +++ /dev/null @@ -1,36 +0,0 @@ -from shutil import copyfile - -from .. import ensure_no_exception -from .common import OneUserTest - - -class TestSpecialFiles(OneUserTest): - def test_keynote(self): - """Syncing a (macOS) Keynote file should work (NXDRIVE-619). - Both sync directions are tests. - """ - local = self.local_1 - remote = self.remote_document_client_1 - - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - # The testing file - src = self.location / "resources" / "files" / "keynote.key" - - # Create a local file - file = local.abspath("/") / "keynote1.key" - copyfile(src, file) - - # Create a distant file - remote.make_file("/", "keynote2.key", content=src.read_bytes()) - - # Sync - with ensure_no_exception(): - self.wait_sync(wait_for_async=True) - - # Checks - assert not self.engine_1.dao.get_errors(limit=0) - for idx in range(1, 3): - assert local.exists(f"/keynote{idx}.key") - assert remote.exists(f"/keynote{idx}.key") diff --git a/tests/old_functional/test_sync_roots.py b/tests/old_functional/test_sync_roots.py deleted file mode 100644 index d0e529410b..0000000000 --- a/tests/old_functional/test_sync_roots.py +++ /dev/null @@ -1,32 +0,0 @@ -from .common import OneUserTest - - -class TestSyncRoots(OneUserTest): - def test_register_sync_root_parent(self): - remote = self.remote_document_client_1 - local = self.local_root_client_1 - - # First unregister test Workspace - remote.unregister_as_root(self.workspace) - - # Create a child folder and register it as a synchronization root - child = remote.make_folder(self.workspace, "child") - remote.make_file(child, "aFile.txt", content=b"My content") - remote.register_as_root(child) - - # Start engine and wait for synchronization - self.engine_1.start() - self.wait_sync(wait_for_async=True) - assert not local.exists(f"/{self.workspace_title}") - assert local.exists("/child") - assert local.exists("/child/aFile.txt") - - # Register parent folder - remote.register_as_root(self.workspace) - - # Start engine and wait for synchronization - self.wait_sync(wait_for_async=True) - assert not local.exists("/child") - assert local.exists(f"/{self.workspace_title}") - assert local.exists(f"/{self.workspace_title}/child") - assert local.exists(f"/{self.workspace_title}/child/aFile.txt") diff --git a/tests/old_functional/test_transfer.py b/tests/old_functional/test_transfer.py deleted file mode 100644 index 6ac830a9b7..0000000000 --- a/tests/old_functional/test_transfer.py +++ /dev/null @@ -1,888 +0,0 @@ -""" -Test pause/resume transfers in different scenarii. -""" -import re -from unittest.mock import patch - -import pytest -import responses -from nuxeo.exceptions import HTTPError -from requests.exceptions import ConnectionError - -from nxdrive.client.uploader.sync import SyncUploader -from nxdrive.constants import FILE_BUFFER_SIZE, TransferStatus -from nxdrive.options import Options -from nxdrive.state import State - -from .. import ensure_no_exception -from ..markers import not_windows -from .common import SYNC_ROOT_FAC_ID, OneUserTest - - -class TestDownload(OneUserTest): - def setUp(self): - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - # Lower tmp_file_limit options to have chunked downloads without having to create big files - self.default_tmp_file_limit = Options.tmp_file_limit - Options.tmp_file_limit = 1 - - def tearDown(self): - Options.tmp_file_limit = self.default_tmp_file_limit - - def test_pause_download_manually(self): - """ - Pause the transfer by simulating a click on the pause/resume icon - on the current download in the systray menu. - """ - - def callback(downloader): - """ - This will mimic what is done in SystrayTranfer.qml: - - call API.pause_transfer() that will call: - - engine.dao.pause_transfer(nature, transfer_uid) - Then the download will be paused by the Engine: - - Engine.suspend_client() (== Remote.download_callback) will: - - raise DownloadPaused(download.uid) - """ - # Ensure we have 1 ongoing download - downloads = list(dao.get_downloads()) - assert downloads - download = downloads[0] - assert download.status == TransferStatus.ONGOING - - nonlocal count - - # Check the TMP file is bigger each iteration - file_out = engine.download_dir / uid / "test.bin" - assert file_out.stat().st_size == count * FILE_BUFFER_SIZE - - count += 1 - if count == 2: - # Pause the download - dao.pause_transfer("download", download.uid, 25.0) - - # Call the original function to make the paused download - # effective at the 2nd iteration - for cb in callback_orig: - cb(downloader) - - engine = self.engine_1 - dao = self.engine_1.dao - callback_orig = engine.remote.download_callback - count = 0 - - # Remotely create a file that will be downloaded locally - uid = self.remote_1.make_file( - f"{SYNC_ROOT_FAC_ID}{self.workspace}", - "test.bin", - content=b"0" * FILE_BUFFER_SIZE * 4, - ).uid.split("#")[-1] - - # There is no download, right now - assert not list(dao.get_downloads()) - - with patch.object(engine.remote, "download_callback", new=callback): - with ensure_no_exception(): - self.wait_sync(wait_for_async=True) - assert dao.get_downloads_with_status(TransferStatus.PAUSED) - - # Resume the download - engine.resume_transfer("download", list(dao.get_downloads())[0].uid) - self.wait_sync(wait_for_async=True) - assert not list(dao.get_downloads()) - - def test_pause_download_automatically(self): - """ - Pause the transfer by simulating an application exit - or clicking on the Suspend menu entry from the systray. - """ - - def callback(downloader): - """This will mimic what is done in SystrayMenu.qml: suspend the app.""" - # Ensure we have 1 ongoing download - downloads = list(dao.get_downloads()) - assert downloads - download = downloads[0] - assert download.status == TransferStatus.ONGOING - - # Suspend! - self.manager_1.suspend() - - # Call the original function to make the suspended download effective - for cb in callback_orig: - cb(downloader) - - engine = self.engine_1 - dao = self.engine_1.dao - callback_orig = engine.remote.download_callback - - # Remotely create a file that will be downloaded locally - self.remote_1.make_file( - f"{SYNC_ROOT_FAC_ID}{self.workspace}", - "test.bin", - content=b"0" * FILE_BUFFER_SIZE * 2, - ) - - # There is no download, right now - assert not list(dao.get_downloads()) - - with patch.object(engine.remote, "download_callback", new=callback): - with ensure_no_exception(): - self.wait_sync(wait_for_async=True) - assert dao.get_downloads_with_status(TransferStatus.SUSPENDED) - - # Resume the download - self.manager_1.resume() - self.wait_sync(wait_for_async=True) - assert not list(dao.get_downloads()) - - def test_modifying_paused_download(self): - """Modifying a paused download should discard the current download.""" - - def callback(downloader): - """Pause the download and apply changes to the document.""" - nonlocal count - count += 1 - - if count == 1: - # Ensure we have 1 ongoing download - downloads = list(dao.get_downloads()) - assert downloads - download = downloads[0] - assert download.status == TransferStatus.ONGOING - - # Pause the download - dao.pause_transfer("download", download.uid, 0.0) - - # Apply changes to the document - remote.update_content(file.uid, b"remotely changed") - - # Call the original function to make the paused download effective - for cb in callback_orig: - cb(downloader) - - count = 0 - remote = self.remote_1 - engine = self.engine_1 - dao = self.engine_1.dao - callback_orig = engine.remote.download_callback - - # Remotely create a file that will be downloaded locally - file = remote.make_file( - f"{SYNC_ROOT_FAC_ID}{self.workspace}", - "test.bin", - content=b"0" * FILE_BUFFER_SIZE * 2, - ) - - # There is no download, right now - assert not list(dao.get_downloads()) - - with patch.object(engine.remote, "download_callback", new=callback): - with ensure_no_exception(): - self.wait_sync(wait_for_async=True) - - # Resync and check the local content is correct - self.wait_sync(wait_for_async=True) - assert not list(dao.get_downloads()) - assert self.local_1.get_content("/test.bin") == b"remotely changed" - - def test_deleting_paused_download(self): - """Deleting a paused download should discard the current download.""" - - def callback(downloader): - """Pause the download and delete the document.""" - # Ensure we have 1 ongoing download - downloads = list(dao.get_downloads()) - assert downloads - download = downloads[0] - assert download.status == TransferStatus.ONGOING - - # Pause the download - dao.pause_transfer("download", download.uid, 0.0) - - # Remove the document - remote.delete(file.uid) - - # Call the original function to make the paused download effective - for cb in callback_orig: - cb(downloader) - - remote = self.remote_1 - engine = self.engine_1 - dao = self.engine_1.dao - callback_orig = engine.remote.download_callback - - # Remotely create a file that will be downloaded locally - file = remote.make_file( - f"{SYNC_ROOT_FAC_ID}{self.workspace}", - "test.bin", - content=b"0" * FILE_BUFFER_SIZE * 2, - ) - - # There is no download, right now - assert not list(dao.get_downloads()) - - with patch.object(engine.remote, "download_callback", new=callback): - with ensure_no_exception(): - self.wait_sync(wait_for_async=True) - - # Resync and check the file does not exist - self.wait_sync(wait_for_async=True) - assert not list(dao.get_downloads()) - assert not self.local_1.exists("/test.bin") - - -class TestUpload(OneUserTest): - def setUp(self): - self.engine_1.start() - self.wait_sync(wait_for_async=True) - - # Lower chunk_* options to have chunked uploads without having to create big files - self.default_chunk_limit = Options.chunk_limit - self.default_chunk_size = Options.chunk_size - Options.chunk_limit = 1 - Options.chunk_size = 1 - - def tearDown(self): - Options.chunk_limit = self.default_chunk_limit - Options.chunk_size = self.default_chunk_size - - def test_pause_upload_manually(self): - """ - Pause the transfer by simulating a click on the pause/resume icon - on the current upload in the systray menu. - """ - - def callback(uploader): - """ - This will mimic what is done in SystrayTranfer.qml: - - call API.pause_transfer() that will call: - - engine.dao.pause_transfer(nature, transfer_uid) - Then the upload will be paused in Remote.upload(). - """ - # Ensure we have 1 ongoing upload - uploads = list(dao.get_uploads()) - assert uploads - upload = uploads[0] - assert upload.status == TransferStatus.ONGOING - - # Pause the upload - dao.pause_transfer("upload", upload.uid, 50.0) - - engine = self.engine_1 - dao = self.engine_1.dao - - # Locally create a file that will be uploaded remotely - self.local_1.make_file("/", "test.bin", content=b"0" * FILE_BUFFER_SIZE * 2) - - # There is no upload, right now - assert not list(dao.get_uploads()) - - with patch.object(engine.remote, "upload_callback", new=callback): - with ensure_no_exception(): - self.wait_sync() - assert dao.get_uploads_with_status(TransferStatus.PAUSED) - - # Resume the upload - engine.resume_transfer("upload", list(dao.get_uploads())[0].uid) - self.wait_sync() - assert not list(dao.get_uploads()) - - def test_pause_upload_automatically(self): - """ - Pause the transfer by simulating an application exit - or clicking on the Suspend menu entry from the systray. - """ - - def callback(uploader): - """This will mimic what is done in SystrayMenu.qml: suspend the app.""" - # Ensure we have 1 ongoing upload - uploads = list(dao.get_uploads()) - assert uploads - upload = uploads[0] - assert upload.status == TransferStatus.ONGOING - - # Suspend! - self.manager_1.suspend() - - engine = self.engine_1 - dao = self.engine_1.dao - - # Locally create a file that will be uploaded remotely - self.local_1.make_file("/", "test.bin", content=b"0" * FILE_BUFFER_SIZE * 2) - - # There is no upload, right now - assert not list(dao.get_uploads()) - - with patch.object(engine.remote, "upload_callback", new=callback): - with ensure_no_exception(): - self.wait_sync() - assert dao.get_uploads_with_status(TransferStatus.SUSPENDED) - - # Resume the upload - self.manager_1.resume() - self.wait_sync() - assert not list(dao.get_uploads()) - - def test_modifying_paused_upload(self): - """Modifying a paused upload should discard the current upload.""" - - def callback(uploader): - """Pause the upload and apply changes to the document.""" - # Ensure we have 1 ongoing upload - uploads = list(dao.get_uploads()) - assert uploads - upload = uploads[0] - assert upload.status == TransferStatus.ONGOING - - # Pause the upload - dao.pause_transfer("upload", upload.uid, 50.0) - - # Apply changes to the document - local.update_content("/test.bin", b"locally changed") - - local = self.local_1 - engine = self.engine_1 - dao = self.engine_1.dao - - # Locally create a file that will be uploaded remotely - self.local_1.make_file("/", "test.bin", content=b"0" * FILE_BUFFER_SIZE * 2) - - # There is no upload, right now - assert not list(dao.get_uploads()) - - with patch.object(engine.remote, "upload_callback", new=callback): - with ensure_no_exception(): - self.wait_sync() - - # Resync and check the local content is correct - self.wait_sync() - assert not list(dao.get_uploads()) - assert self.local_1.get_content("/test.bin") == b"locally changed" - - @not_windows( - reason="Cannot test the behavior as the local deletion is blocked by the OS." - ) - def test_deleting_paused_upload(self): - """Deleting a paused upload should discard the current upload.""" - - def callback(uploader): - """Pause the upload and delete the document.""" - # Ensure we have 1 ongoing upload - uploads = list(dao.get_uploads()) - assert uploads - upload = uploads[0] - assert upload.status == TransferStatus.ONGOING - - # Pause the upload - dao.pause_transfer("upload", upload.uid, 50.0) - - # Remove the document - # (this is the problematic part on Windows, because for the - # file descriptor to be released we need to escape from - # Remote.upload(), which is not possible from here) - local.delete("/test.bin") - - local = self.local_1 - engine = self.engine_1 - dao = self.engine_1.dao - - # Locally create a file that will be uploaded remotely - self.local_1.make_file("/", "test.bin", content=b"0" * FILE_BUFFER_SIZE * 2) - - # There is no upload, right now - assert not list(dao.get_uploads()) - - with patch.object(engine.remote, "upload_callback", new=callback): - with ensure_no_exception(): - self.wait_sync() - - # Resync and check the file does not exist - self.wait_sync() - assert not list(dao.get_uploads()) - assert not self.remote_1.exists("/test.bin") - - def test_not_server_error_upload(self): - """Test an error happening after chunks were uploaded, at the NuxeoDrive.CreateFile operation call.""" - - class BadUploader(SyncUploader): - """Used to simulate bad server responses.""" - - def link_blob_to_doc(self, *args, **kwargs): - """Simulate a server error.""" - raise ValueError("Mocked exception") - - def upload(*args, **kwargs): - """Set our specific uploader to simulate server error.""" - kwargs.pop("uploader", None) - return upload_orig(*args, uploader=BadUploader, **kwargs) - - engine = self.engine_1 - dao = engine.dao - upload_orig = engine.remote.upload - - # Locally create a file that will be uploaded remotely - self.local_1.make_file("/", "test.bin", content=b"0" * FILE_BUFFER_SIZE * 2) - - # There is no upload, right now - assert not list(dao.get_uploads()) - - with patch.object(engine.remote, "upload", new=upload): - with ensure_no_exception(): - self.wait_sync() - - # There should be 1 upload with DONE transfer status - uploads = list(dao.get_uploads()) - assert len(uploads) == 1 - upload = uploads[0] - assert upload.status == TransferStatus.DONE - - # The file on the server should not exist yet - assert not self.remote_1.exists("/test.bin") - - # The doc should be in error - assert len(dao.get_errors(limit=0)) == 1 - - # Reset the error - for state in dao.get_errors(): - dao.reset_error(state) - - # Resync and check the file exist - self.wait_sync() - assert not list(dao.get_uploads()) - assert self.remote_1.exists("/test.bin") - - def test_server_error_but_upload_ok(self): - """ - Test an error happening after chunks were uploaded and the NuxeoDrive.CreateFile operation call. - This could happen if a proxy do not understand well the final requests as seen in NXDRIVE-1753. - """ - - class BadUploader(SyncUploader): - """Used to simulate bad server responses.""" - - def link_blob_to_doc(self, *args, **kwargs): - # Call the original method to effectively end the upload process - super().link_blob_to_doc(*args, **kwargs) - - # The file should be present on the server - # assert self.remote.exists(f"/{file}") - - # There should be 1 upload with DONE transfer status - uploads = list(dao.get_uploads()) - assert len(uploads) == 1 - upload = uploads[0] - assert upload.status == TransferStatus.DONE - - # And throw an error - stack = "The proxy server received an invalid response from an upstream server." - raise HTTPError( - status=502, message="Mocked Proxy Error", stacktrace=stack - ) - - def upload(*args, **kwargs): - """Set our specific uploader to simulate server error.""" - kwargs.pop("uploader", None) - return upload_orig(*args, uploader=BadUploader, **kwargs) - - engine = self.engine_1 - dao = engine.dao - upload_orig = engine.remote.upload - file = "t'ée sんt.bin" - - # Locally create a file that will be uploaded remotely - self.local_1.make_file("/", file, content=b"0" * FILE_BUFFER_SIZE * 2) - - # There is no upload, right now - assert not list(dao.get_uploads()) - - with patch.object(engine.remote, "upload", new=upload): - with ensure_no_exception(): - self.wait_sync() - - # There should be no upload as the Processor has checked the file existence - # on the server and so deleted the upload from the database - assert not list(dao.get_uploads()) - - # Resync and check the file still exists - self.wait_sync() - assert not list(dao.get_uploads()) - assert self.remote_1.exists(f"/{file}") - assert not dao.get_errors(limit=0) - - @pytest.mark.randombug("Randomly fail when run in parallel") - @Options.mock() - def test_server_error_but_upload_ok_idempotent_call(self): - """ - Test an error happening after chunks were uploaded and the NuxeoDrive.CreateFile operation call. - This will cover cases when the app crashed in-between or when an errors happened but the doc was created. - - Thanks to idempotent requests, only one document will be created with success. - """ - - class SerialUploader(SyncUploader): - def link_blob_to_doc(self, *args, **kwargs): - # Test the OngoingRequestError by setting a very small timeout - # (the retry mechanism will trigger several calls almost simultaneously) - kwargs["timeout"] = 0.01 - return super().link_blob_to_doc(*args, **kwargs) - - def upload(*args, **kwargs): - """Set our specific uploader to simulate server error.""" - kwargs.pop("uploader", None) - return upload_orig(*args, uploader=SerialUploader, **kwargs) - - engine = self.engine_1 - remote = self.remote_1 - dao = engine.dao - upload_orig = engine.remote.upload - file = "t'ée sんt.bin" - - # Locally create a file that will be uploaded remotely - self.local_1.make_file("/", file, content=b"0" * FILE_BUFFER_SIZE * 2) - - # There is no upload, right now - assert not list(dao.get_uploads()) - - with patch.object(engine.remote, "upload", new=upload), patch.object( - engine.queue_manager, "_error_interval", new=3 - ), ensure_no_exception(): - self.wait_sync() - - # The file should be present on the server - assert remote.exists(f"/{file}") - children = remote.get_children_info(self.workspace) - assert len(children) == 1 - - # Check the upload - uploads = list(dao.get_uploads()) - assert len(uploads) == 1 - upload = uploads[0] - assert upload.status is TransferStatus.DONE - assert upload.request_uid - - # Resync and checks - with ensure_no_exception(): - self.wait_sync() - - assert not list(dao.get_uploads()) - assert not dao.get_errors(limit=0) - children = remote.get_children_info(self.workspace) - assert len(children) == 1 - - def test_server_error_upload(self): - """Test a server error happening after chunks were uploaded, at the NuxeoDrive.CreateFile operation call.""" - - class BadUploader(SyncUploader): - """Used to simulate bad server responses.""" - - def link_blob_to_doc(self, *args, **kwargs): - """Simulate a server error.""" - raise ConnectionError("Mocked exception") - - def upload(*args, **kwargs): - """Set our specific uploader to simulate server error.""" - kwargs.pop("uploader", None) - return upload_orig(*args, uploader=BadUploader, **kwargs) - - engine = self.engine_1 - dao = engine.dao - upload_orig = engine.remote.upload - - # Locally create a file that will be uploaded remotely - self.local_1.make_file("/", "test.bin", content=b"0" * FILE_BUFFER_SIZE * 2) - - # There is no upload, right now - assert not list(dao.get_uploads()) - - with patch.object(engine.remote, "upload", new=upload): - with ensure_no_exception(): - self.wait_sync() - - # There should be 1 upload with DONE transfer status - uploads = list(dao.get_uploads()) - assert len(uploads) == 1 - upload = uploads[0] - assert upload.status == TransferStatus.DONE - - # The file on the server should not exist yet - assert not self.remote_1.exists("/test.bin") - - # Resync and check the file exists - self.wait_sync() - assert not list(dao.get_uploads()) - assert self.remote_1.exists("/test.bin") - - def test_server_error_upload_invalid_batch_response(self): - """Test a server error happening when asking for a batchId.""" - remote = self.remote_1 - dao = self.engine_1.dao - # Locally create a file that will be uploaded remotely - self.local_1.make_file("/", "test.bin", content=b"0" * 1024) - - # There is no upload, right now - assert not list(dao.get_uploads()) - - # Alter the first request done to get a batchId - url = remote.client.host + remote.uploads.endpoint - with ensure_no_exception(), responses.RequestsMock() as rsps: - # Other requests should not be altered - rsps.add_passthru(re.compile(rf"^(?!{url}).*")) - - html = ( - '' - '' - "The page is temporarily unavailable" - '' - ) - rsps.add(responses.POST, url, body=html) - - self.wait_sync() - - assert not self.remote_1.exists("/test.bin") - assert len(dao.get_errors(limit=0)) == 1 - - # Reset the error - for state in dao.get_errors(): - dao.reset_error(state) - - # Resync and check the file exist - with ensure_no_exception(): - self.wait_sync() - assert not list(dao.get_uploads()) - assert self.remote_1.exists("/test.bin") - - def test_server_error_upload_invalid_chunk_response(self): - """Test a server error happening when uploading a chunk.""" - remote = self.remote_1 - dao = self.engine_1.dao - # Locally create a file that will be uploaded remotely - self.local_1.make_file("/", "test.bin", content=b"0" * FILE_BUFFER_SIZE * 2) - - # There is no upload, right now - assert not list(dao.get_uploads()) - - with responses.RequestsMock() as rsps, ensure_no_exception(): - # All requests should be allowed ... - url = remote.client.host + remote.uploads.endpoint + "/" - rsps.add_passthru(re.compile(rf"^(?!{url}).*")) - - # ... but not the first request to upload a chunk - html = "The page is temporarily unavailable" - chunk_url = re.compile(rf"{url}batchId-[0-9a-f-]+/0") - rsps.add(responses.POST, chunk_url, body=html) - self.wait_sync(timeout=2) - - assert not self.remote_1.exists("/test.bin") - assert len(dao.get_errors(limit=0)) == 1 - - # Reset the error - for state in dao.get_errors(): - dao.reset_error(state) - - # Resync and check the file exist - with ensure_no_exception(): - self.wait_sync() - assert not list(dao.get_uploads()) - assert self.remote_1.exists("/test.bin") - - def test_chunk_upload_error(self): - """Test a server error happening while uploading chunks.""" - - def callback(uploader): - """Mimic a connection issue after chunk 1 is sent.""" - if len(uploader.blob.uploadedChunkIds) > 1: - raise ConnectionError("Mocked error") - - engine = self.engine_1 - dao = self.engine_1.dao - bad_remote = self.get_bad_remote() - bad_remote.upload_callback = callback - - # Locally create a file that will be uploaded remotely - self.local_1.make_file("/", "test.bin", content=b"0" * FILE_BUFFER_SIZE * 4) - - # There is no upload, right now - assert not list(dao.get_uploads()) - - with patch.object(engine, "remote", new=bad_remote), ensure_no_exception(): - self.wait_sync(timeout=3) - - # There should be 1 upload with ONGOING transfer status - uploads = list(dao.get_uploads()) - assert len(uploads) == 1 - upload = uploads[0] - assert upload.status == TransferStatus.ONGOING - - # The file on the server should not exist yet - assert not self.remote_1.exists("/test.bin") - - # Resync and check the file exists - self.wait_sync() - assert not list(dao.get_uploads()) - assert self.remote_1.exists("/test.bin") - - @pytest.mark.randombug("NXDRIVE-2183") - def test_chunk_upload_error_then_server_error_at_linking(self): - """ - [NXDRIVE-2183] More complex scenario: - - Step 1/3: - - start the upload - - it must fail at chunk N - - the upload is then temporary ignored and will be retried later - - Step 2/3: - - for whatever reason, its batch ID is no more valid - - resume the upload - - a new batch ID is given - - upload all chunks successfully - - server error at linking the blob to the document - - the upload is then temporary ignored and will be retried later - - Step 3/3: - - resume the upload - - the batch ID used to resume the upload is the first batch ID, not the second that was used at step 2 - - With NXDRIVE-2183, the step 3 becomes: - - no chunks should be uploaded - - the linking should work - - Note: marking the test as random because we patch several times the Engine - and sometimes it messes with objects at the step 3 and the mocked - RemoteClient is not the good one but the one from step 2 ... - """ - - engine = self.engine_1 - dao = self.engine_1.dao - - # Locally create a file that will be uploaded remotely - self.local_1.make_file("/", "test.bin", content=b"0" * FILE_BUFFER_SIZE * 4) - # There is no upload, right now - assert not list(dao.get_uploads()) - - # Step 1: upload one chunk and fail - - def callback(uploader): - """Mimic a connection issue after chunk 1 is sent.""" - if len(uploader.blob.uploadedChunkIds) > 1: - raise ConnectionError("Mocked error") - - bad_remote1 = self.get_bad_remote() - bad_remote1.upload_callback = callback - batch_id = None - - with patch.object(engine, "remote", new=bad_remote1), ensure_no_exception(): - self.wait_sync(timeout=3) - - # There should be 1 upload with ONGOING transfer status - uploads = list(dao.get_uploads()) - assert len(uploads) == 1 - upload = uploads[0] - batch_id = upload.batch["batchId"] - assert upload.status == TransferStatus.ONGOING - - # The file on the server should not exist yet - assert not self.remote_1.exists("/test.bin") - - # Step 2: alter the batch ID, resume the upload, upload all chunks and fail at linking - - class BadUploader(SyncUploader): - """Used to simulate bad server responses.""" - - def link_blob_to_doc(self, *args, **kwargs): - """Throw an network error.""" - raise HTTPError(status=504, message="Mocked Gateway timeout") - - def upload(*args, **kwargs): - """Set our specific uploader to simulate server error.""" - kwargs.pop("uploader", None) - return upload_orig(*args, uploader=BadUploader, **kwargs) - - bad_remote2 = self.get_bad_remote() - upload_orig = bad_remote2.upload - bad_remote2.upload = upload - - # Change the batch ID - upload = list(dao.get_uploads())[0] - upload.batch["batchId"] = "deadbeef" - engine.dao.update_upload(upload) - - with patch.object(engine, "remote", new=bad_remote2), ensure_no_exception(): - self.wait_sync() - assert list(dao.get_uploads()) - assert not self.remote_1.exists("/test.bin") - - # Step 3: resume the upload, the linking should work - - def callback(uploader): - """Just check the batch ID _did_ change.""" - assert uploader.blob.batchId not in (batch_id, "deadbeaf") - - inspector = self.get_bad_remote() - inspector.upload_callback - - with patch.object(engine, "remote", new=inspector), ensure_no_exception(): - self.wait_sync() - assert not list(dao.get_uploads()) - assert self.remote_1.exists("/test.bin") - - def test_app_crash_simulation(self): - """ - When the app crashed, ongoing transfers will be removed at the next run. - See NXDRIVE-2186 for more information. - - To reproduce the issue, we suspend the transfer in the upload's callback, - then stop the engine and mimic an app crash by manually changing the transfer - status and State.has_crashed value. - """ - - def callback(uploader): - """Suspend the upload and engine.""" - self.manager_1.suspend() - - local = self.local_1 - engine = self.engine_1 - dao = engine.dao - - # Locally create a file that will be uploaded remotely - local.make_file("/", "test.bin", content=b"0" * FILE_BUFFER_SIZE * 2) - - # There is no upload, right now - assert not list(dao.get_uploads()) - - with patch.object(engine.remote, "upload_callback", new=callback): - with ensure_no_exception(): - self.wait_sync() - - # For now, the transfer is only suspended - assert dao.get_uploads_with_status(TransferStatus.SUSPENDED) - - # Stop the engine - engine.stop() - - # Change the transfer status to ongoing and change the global State to reflect a crash - upload = list(dao.get_uploads())[0] - upload.status = TransferStatus.ONGOING - dao.set_transfer_status("upload", upload) - assert dao.get_uploads_with_status(TransferStatus.ONGOING) - - # Simple check: nothing has been uploaded yet - assert not self.remote_1.exists("/test.bin") - - State.has_crashed = True - try: - # Start again the engine, it will manage staled transfers. - # As the app crashed, no transfers should be removed but continued. - with ensure_no_exception(): - engine.start() - self.manager_1.resume() - self.wait_sync() - finally: - State.has_crashed = False - - # Check the file has been uploaded - assert not list(dao.get_uploads()) - assert self.remote_1.exists("/test.bin") diff --git a/tests/old_functional/test_versioning.py b/tests/old_functional/test_versioning.py index d7c9a81437..921887eebb 100644 --- a/tests/old_functional/test_versioning.py +++ b/tests/old_functional/test_versioning.py @@ -1,32 +1,6 @@ import time -from .common import OS_STAT_MTIME_RESOLUTION, OneUserTest, TwoUsersTest - - -class TestVersioning(OneUserTest): - def test_version_restore(self): - remote = self.remote_document_client_1 - local = self.local_1 - - self.engine_1.start() - - # Create a remote doc - doc = remote.make_file( - self.workspace, "Document to restore.txt", content=b"Initial content." - ) - self.wait_sync(wait_for_async=True) - assert local.exists("/Document to restore.txt") - assert local.get_content("/Document to restore.txt") == b"Initial content." - - # Create version 1.0, update content, then restore version 1.0 - remote.create_version(doc, "Major") - remote.update(doc, properties={"note:note": "Updated content."}) - self.wait_sync(wait_for_async=True) - assert local.get_content("/Document to restore.txt") == b"Updated content." - version_uid = remote.get_versions(doc)[0][0] - remote.restore_version(version_uid) - self.wait_sync(wait_for_async=True) - assert local.get_content("/Document to restore.txt") == b"Initial content." +from .common import OS_STAT_MTIME_RESOLUTION, TwoUsersTest class TestVersioning2(TwoUsersTest): diff --git a/tests/old_functional/test_volume.py b/tests/old_functional/test_volume.py deleted file mode 100644 index 41c787a314..0000000000 --- a/tests/old_functional/test_volume.py +++ /dev/null @@ -1,320 +0,0 @@ -import logging -import os -import shutil -from copy import copy -from pathlib import Path - -import pytest - -from nxdrive.constants import ROOT - -from ..utils import random_png -from .common import OneUserTest - - -def configure_logs(): - """Configure the logging module to prevent too many data being logged.""" - - from nxdrive.logging_config import configure - - configure( - console_level="WARNING", - file_level="WARNING", - command_name="volume", - force_configure=True, - ) - - -configure_logs() - -log = logging.getLogger(__name__) - -FOLDERS = FILES = DEPTH = 0 - -if "TEST_VOLUME" in os.environ: - values_ = os.getenv("TEST_VOLUME", "") - if not values_: - del os.environ["TEST_VOLUME"] - else: - if values_.count(",") != 2: - # Low volume by default - values_ = "3,10,2" # 200 documents - - FOLDERS, FILES, DEPTH = map(int, values_.split(",")) - del values_ - - -def get_name(folder: bool, depth: int, number: int) -> str: - if folder: - return f"folder_{depth:03d}_{number:03d}" - return f"file_{depth:03d}_{number:04d}.png" - - -def get_path(folder, depth, number) -> Path: - child = ROOT - for i in range(DEPTH + 1 - depth, DEPTH + 1): - if i == 1 and not folder: - child = ROOT / get_name(False, DEPTH - i + 1, number) - child = ROOT / get_name(True, DEPTH - i + 1, number) / child - return child - - -@pytest.mark.skipif( - "TEST_VOLUME" not in os.environ, - reason="Deactivate if not launched on purpose with TEST_VOLUME set", -) -class TestVolume(OneUserTest): - def create_tree(self, folders, files, depth, parent) -> int: - items = 0 - - if depth < 1: - return items - - for folder in range(folders): - foldername = get_name(True, DEPTH - depth + 1, folder + 1) - folderobj = {"path": os.path.join(parent["path"], foldername)} - self.local_1.make_folder(parent["path"], foldername) - items += 1 - - folderobj["name"] = foldername - folderobj["children"] = {} - abspath = self.local_1.abspath(folderobj["path"]) - parent["children"][foldername] = folderobj - - items += self.create_tree(folders, files, depth - 1, folderobj) - - for file in range(files): - filename = get_name(False, DEPTH - depth + 1, file + 1) - folderobj["children"][filename] = {"name": filename} - random_png(Path(abspath) / filename) - items += 1 - - return items - - def create(self, stopped=True, wait_for_sync=True): - self.engine_1.start() - self.wait_sync() - if not stopped: - self.engine_1.stop() - - self.tree = {"children": {}, "path": ROOT} - items = self.create_tree(FOLDERS, FILES, DEPTH, self.tree) - log.warning(f"Created {items:,} local documents.") - - if not stopped: - self.engine_1.start() - - if wait_for_sync: - self.wait_sync(timeout=items * 10) - - return items - - def _check_folder(self, path: Path, removed=[], added=[]): - # First get the remote id - remote_id = self.local_1.get_remote_id(path) - assert remote_id - - # Get depth - depth = int(path.name.split("_")[1]) - - # Calculate expected children - children = {} - if depth != DEPTH: - for i in range(1, FOLDERS + 1): - children[get_name(True, depth + 1, i)] = True - for i in range(FILES): - children[get_name(False, depth, i)] = True - for name in removed: - children.pop(name, None) - for name in added: - children[name] = True - - # Local checks - os_children = os.listdir() - assert len(os_children) == len(children) - cmp_children = copy(children) - remote_refs = {} - for child in self.local_1.abspath(path).iterdir(): - name = child.name - file = cmp_children.pop(name, None) - if not file: - self.fail(f"Unexpected local child {name!r} in {path}") - remote_ref = self.local_1.get_remote_id(child) - assert remote_ref - remote_refs[remote_ref] = name - assert not cmp_children - - # Remote checks - remote_children = self.remote_1.get_fs_children(remote_id) - assert len(remote_children) == len(children) - for child in remote_children: - if child.uid not in remote_refs: - self.fail(f'Unexpected remote child "{child.name}" in {path}') - assert child.name == remote_refs[child.uid] - - def test_moves_while_creating(self): - items = self.create(stopped=False, wait_for_sync=False) - self._moves(items) - - def test_moves(self): - items = self.create() - self._moves(items) - - def test_moves_stopped(self): - items = self.create() - self._moves(items, stopped=True) - - def test_moves_while_creating_stopped(self): - items = self.create(stopped=False, wait_for_sync=False) - self._moves(items, stopped=True) - - def _moves(self, items: int, stopped: bool = False) -> None: - if stopped: - self.engine_1.stop() - - # While we are started - # Move one parent to the second children - if len(self.tree["children"]) < 3 or DEPTH < 2: - self.app.quit() - pytest.skip("Can't execute this test on so few data") - - # Move root 2 in, first subchild of 1 - root_2 = get_path(True, 1, 2) - child = get_path(True, DEPTH, 1) - shutil.move(self.local_1.abspath(root_2), self.local_1.abspath(child)) - - root_1 = get_path(True, 1, 1) - root_3 = get_path(True, 1, 3) - shutil.move(self.local_1.abspath(root_1), self.local_1.abspath(root_3)) - - # Update paths - child = ROOT / get_name(True, 1, 3) / child - root_2 = ROOT / child / get_name(True, 1, 2) - root_1 = ROOT / root_3 / get_name(True, 1, 1) - if stopped: - self.engine_1.start() - self.wait_sync(wait_for_async=True, timeout=items * 10) - - # Checks - self._check_folder(root_3, added=[get_name(True, 1, 1)]) - self._check_folder(child, added=[get_name(True, 1, 2)]) - self._check_folder(root_1) - self._check_folder(root_2) - - # We should not have any error - assert not self.engine_1.dao.get_errors(limit=0) - - def test_copies(self): - items = self.create() - self._copies(items) - - def test_copies_stopped(self): - items = self.create() - self._copies(items, stopped=True) - - def test_copies_while_creating(self): - items = self.create(stopped=False, wait_for_sync=False) - self._copies(items) - - def test_copies_while_creating_stopped(self): - items = self.create(stopped=False, wait_for_sync=False) - self._copies(items, stopped=True) - - def _copies(self, items: int, stopped: bool = False) -> None: - if stopped: - self.engine_1.stop() - - # Copy root 2 in, first subchild of 1 - root_2 = get_path(True, 1, 2) - child = get_path(True, DEPTH, 1) - shutil.copytree( - self.local_1.abspath(root_2), - self.local_1.abspath(child / get_name(True, 1, 2)), - ) - - # New copies - root_1 = get_path(True, 1, 1) - root_3 = get_path(True, 1, 3) - root_4 = get_path(True, 1, DEPTH + 1) - root_5 = get_path(True, 1, DEPTH + 2) - shutil.copytree( - self.local_1.abspath(root_1), - self.local_1.abspath(root_3 / get_name(True, 1, 1)), - ) - - shutil.copytree(self.local_1.abspath(root_3), self.local_1.abspath(root_4)) - shutil.copytree(self.local_1.abspath(root_3), self.local_1.abspath(root_5)) - - # Update paths - child = ROOT / get_name(True, 1, 3) / child - root_2 = ROOT / child / get_name(True, 1, 2) - root_1 = ROOT / root_3 / get_name(True, 1, 1) - root_1_path = self.local_1.abspath(root_1) - child_path = self.local_1.abspath(child) - - # Copies files from one folder to another - added_files = [] - for path in child_path.iterdir(): - if not path.is_file(): - continue - shutil.copy(path, root_1_path) - added_files.append(path.name) - - if stopped: - self.engine_1.start() - self.wait_sync(wait_for_async=True, timeout=items * 10) - - # Checks - self._check_folder(root_3, added=[get_name(True, 1, 1)]) - self._check_folder(child, added=[get_name(True, 1, 2)]) - self._check_folder(root_1, added=added_files) - self._check_folder(root_2) - - # Check original copied - self._check_folder(get_path(True, 1, 1)) - self._check_folder(get_path(True, 1, 2)) - self._check_folder(get_path(True, 1, DEPTH + 1), added=[get_name(True, 1, 1)]) - self._check_folder(get_path(True, 1, DEPTH + 2), added=[get_name(True, 1, 1)]) - - # We should not have any error - assert not self.engine_1.dao.get_errors(limit=0) - - -@pytest.mark.skipif( - "TEST_REMOTE_SCAN_VOLUME" not in os.environ - or int(os.environ["TEST_REMOTE_SCAN_VOLUME"]) == 0, - reason="Skipped as TEST_REMOTE_SCAN_VOLUME is no set", -) -class TestVolumeRemoteScan(OneUserTest): - def test_remote_scan(self): - nb_nodes = int(os.getenv("TEST_REMOTE_SCAN_VOLUME", 20)) - - # Random mass import - self.root_remote.mass_import(self.ws.path, nb_nodes) - - # Wait for ES indexing - self.root_remote.wait_for_async_and_es_indexing() - - # Synchronize - self.engine_1.start() - self.wait_sync(wait_for_async=True, timeout=nb_nodes**2) - - query = ( - f"SELECT ecm:uuid FROM Document WHERE ecm:ancestorId = {self.workspace!r}" - " AND ecm:isVersion = 0" - " AND ecm:isTrashed = 0" - " AND ecm:mixinType != 'HiddenInNavigation'" - ) - doc_count = self.root_remote.result_set_query(query)["resultsCount"] - log.warning(f"Created {doc_count:,} documents (nb_nodes={nb_nodes:,}).") - - # Check local tree - local_doc_count = sum( - self.get_local_child_count( - self.local_nxdrive_folder_1 / self.workspace_title - ) - ) - assert local_doc_count == doc_count - - # We should not have any error - assert not self.engine_1.dao.get_errors(limit=0) diff --git a/tests/old_functional/test_watchers.py b/tests/old_functional/test_watchers.py index 662c6ad10b..e4159e4961 100644 --- a/tests/old_functional/test_watchers.py +++ b/tests/old_functional/test_watchers.py @@ -3,11 +3,7 @@ from queue import Queue from shutil import copyfile from time import sleep -from unittest.mock import patch -from nxdrive.constants import ROOT - -from ..markers import not_windows from . import LocalTest from .common import OneUserTest @@ -62,76 +58,6 @@ def get_full_queue(self, queue, dao=None): result.append(dao.get_state_from_id(queue.pop().id)) return result - def test_local_scan(self): - files, folders = self.make_local_tree() - self.queue_manager_1.suspend() - self.queue_manager_1._disable = True - self.engine_1.start() - self.wait_sync() - - # Workspace should have been reconcile - res = self.engine_1.dao.get_states_from_partial_local(ROOT) - # With root - count = folders + files + 1 - assert len(res) == count - - def test_reconcile_scan(self): - files, folders = self.make_local_tree() - self.make_server_tree() - # Wait for ES indexing - self.wait() - manager = self.queue_manager_1 - manager.suspend() - manager._disable = True - self.engine_1.start() - self.wait_sync() - # Depending on remote scan results order, the remote - # duplicated file with the same digest as the local file - # might come first, in which case we get an extra synchronized file, - # or not, in which case we get a conflicted file - assert self.engine_1.dao.get_sync_count() >= folders + files - # Verify it has been reconciled and all items in queue are synchronized - queue = self.get_full_queue(copy_queue(manager._local_file_queue)) - for item in queue: - if item.remote_name == "Duplicated File.txt": - assert item.pair_state in ["synchronized", "conflicted"] - else: - assert item.pair_state == "synchronized" - queue = self.get_full_queue(copy_queue(manager._local_folder_queue)) - for item in queue: - assert item.pair_state == "synchronized" - - def test_remote_scan(self): - total = len(self.make_server_tree()) - # Add the workspace folder + the root - total += 2 - # Wait for ES indexing - self.wait() - self.queue_manager_1.suspend() - self.queue_manager_1._disable = True - self.engine_1.start() - self.wait_sync() - res = self.engine_1.dao.get_states_from_partial_local(ROOT) - assert len(res) == total - - def test_local_watchdog_creation(self): - # Test the creation after first local scan - self.queue_manager_1.suspend() - self.queue_manager_1._disable = True - self.engine_1.start() - self.wait_sync() - metrics = self.queue_manager_1.get_metrics() - assert not metrics["local_folder_queue"] - assert not metrics["local_file_queue"] - files, folders = self.make_local_tree() - self.wait_sync(timeout=3, fail_if_timeout=False) - metrics = self.queue_manager_1.get_metrics() - assert metrics["local_folder_queue"] - assert metrics["local_file_queue"] - res = self.engine_1.dao.get_states_from_partial_local(ROOT) - # With root - assert len(res) == folders + files + 1 - def _delete_folder_1(self): path = Path("Folder 1") self.local_1.delete_final(path) @@ -162,17 +88,6 @@ def test_local_scan_delete_non_synced(self): children = self.engine_1.dao.get_states_from_partial_local(path) assert not children - def test_local_watchdog_delete_synced(self): - # Test the deletion after first local scan - self.test_reconcile_scan() - path = self._delete_folder_1() - child = self.engine_1.dao.get_state_from_local(path) - assert child.pair_state == "locally_deleted" - children = self.engine_1.dao.get_states_from_partial_local(path) - assert len(children) == 5 - for child in children: - assert child.pair_state == "locally_deleted" - def test_local_scan_delete_synced(self): # Test the deletion after first local scan self.test_reconcile_scan() @@ -187,28 +102,6 @@ def test_local_scan_delete_synced(self): for child in children: assert child.pair_state == "locally_deleted" - def test_local_scan_error(self): - local = self.local_1 - remote = self.remote_document_client_1 - # Synchronize test workspace - self.engine_1.start() - self.wait_sync() - self.engine_1.stop() - # Create a local file and use an invalid digest function - # in local watcher file system client to trigger an error - # during local scan - local.make_file("/", "Test file.odt", content=b"Content") - - with patch.object(self.engine_1.local, "_digest_func", return_value="invalid"): - self.engine_1.start() - self.wait_sync() - self.engine_1.stop() - assert not remote.exists("/Test file.odt") - - self.engine_1.start() - self.wait_sync() - assert remote.exists("/Test file.odt") - def test_local_scan_encoding(self): local = self.local_1 remote = self.remote_document_client_1 @@ -295,30 +188,6 @@ def test_local_scan_encoding(self): assert not remote.exists("/P\xf4le applicatif/e\u0302tre ou ne pas \xeatre.odt") assert not remote.exists("/P\xf4le applicatif/avoir et e\u0302tre.odt") - @not_windows(reason="Windows cannot have file ending with a space.") - def test_watchdog_space_remover(self): - """ - Test files and folders ending with space. - """ - - local = self.local_1 - remote = self.remote_document_client_1 - - self.engine_1.start() - self.wait_sync() - - local.make_file("/", "Accentue\u0301.odt ", content=b"Content") - self.wait_sync() - assert remote.exists("/Accentue\u0301.odt") - assert not remote.exists("/Accentue\u0301.odt ") - - local.rename("/Accentu\xe9.odt", "Accentu\xe9 avec un \xea et un \xe9.odt ") - self.wait_sync() - assert ( - remote.get_info("/Accentu\xe9 avec un \xea et un \xe9.odt").name - == "Accentu\xe9 avec un \xea et un \xe9.odt" - ) - def test_watchdog_encoding(self): local = self.local_1 remote = self.remote_document_client_1 @@ -382,24 +251,6 @@ def test_watchdog_encoding(self): assert not remote.exists("/Sub folder/avoir et e\u0302tre.odt") assert not remote.exists("/Sub folder/e\u0302tre ou ne pas \xeatre.odt") - def test_watcher_remote_id_setter(self): - local = self.local_1 - # As some user can rewrite same file for no reason - # Start engine - self.engine_1.start() - # Wait for test workspace synchronization - self.wait_sync() - # Create files with Unicode combining accents, - # Unicode latin characters and no special characters - file_path = local.abspath("/Test.pdf") - copyfile(self.location / "resources" / "files" / "testFile.pdf", file_path) - # Wait for test workspace synchronization - self.wait_sync() - remote_id = local.get_remote_id("/Test.pdf") - copyfile(self.location / "resources" / "files" / "testFile.pdf", file_path) - self.wait_sync() - assert remote_id == local.get_remote_id("/Test.pdf") - def test_watcher_remote_id_setter_stopped(self): # Some user can rewrite the same file for no reason From e94a77c0520c746e48163f30da60a9c7d9b49f2d Mon Sep 17 00:00:00 2001 From: Anindya Roy <104991338+gitofanindya@users.noreply.github.com> Date: Thu, 8 Feb 2024 14:12:55 +0530 Subject: [PATCH 35/36] NXDRIVE-2860: Code Coverage -- changed md file --- docs/changes/5.5.0.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/changes/5.5.0.md b/docs/changes/5.5.0.md index e90f55b148..ac12c07cc4 100644 --- a/docs/changes/5.5.0.md +++ b/docs/changes/5.5.0.md @@ -25,6 +25,7 @@ Release date: `2024-xx-xx` ## Tests +- [NXDRIVE-2860](https://jira.nuxeo.com/browse/NXDRIVE-2860): Code Coverage - [NXDRIVE-2](https://jira.nuxeo.com/browse/NXDRIVE-2): ## Docs From 86c7f33dd82e7212a6078c1c05bb660191e5fd02 Mon Sep 17 00:00:00 2001 From: Anindya Roy Date: Tue, 13 Feb 2024 11:08:24 +0530 Subject: [PATCH 36/36] NXDRIVE-2860: Code Coverage - 13-02 - changed py file --- nxdrive/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nxdrive/utils.py b/nxdrive/utils.py index 1b7c5cec92..eb180ec734 100644 --- a/nxdrive/utils.py +++ b/nxdrive/utils.py @@ -1227,6 +1227,7 @@ def save_config(config_dump: Dict[str, Any], /) -> Path: conf_path.parent.mkdir(exist_ok=True, parents=True) with open(conf_path, "w", encoding="utf-8") as output: config.write(output) + # returning the path return conf_path