From c5fb22694e95f12c24b8d8af76999be7aea3fcec Mon Sep 17 00:00:00 2001 From: 魏曹先生 <1992414357@qq.com> Date: Mon, 12 Jan 2026 04:28:28 +0800 Subject: Reorganize crate structure and move documentation files --- .gitignore | 2 +- Cargo.toml | 46 +- actions/Cargo.toml | 29 + actions/src/actions.rs | 288 ++++++ actions/src/actions/local_actions.rs | 525 +++++++++++ actions/src/actions/sheet_actions.rs | 583 ++++++++++++ actions/src/actions/track_action.rs | 987 +++++++++++++++++++++ actions/src/actions/user_actions.rs | 144 +++ actions/src/actions/vault_actions.rs | 1 + actions/src/connection.rs | 3 + actions/src/connection/action_service.rs | 221 +++++ actions/src/connection/error.rs | 14 + actions/src/connection/protocol.rs | 7 + actions/src/lib.rs | 3 + actions/src/registry.rs | 2 + actions/src/registry/client_registry.rs | 123 +++ actions/src/registry/server_registry.rs | 36 + crates/system_action/Cargo.toml | 15 - crates/system_action/action_macros/Cargo.toml | 19 - crates/system_action/action_macros/src/lib.rs | 248 ------ crates/system_action/src/action.rs | 244 ----- crates/system_action/src/action_pool.rs | 247 ------ crates/system_action/src/lib.rs | 6 - crates/utils/cfg_file/Cargo.toml | 23 - crates/utils/cfg_file/cfg_file_derive/Cargo.toml | 11 - crates/utils/cfg_file/cfg_file_derive/src/lib.rs | 130 --- crates/utils/cfg_file/cfg_file_test/Cargo.toml | 9 - crates/utils/cfg_file/cfg_file_test/src/lib.rs | 95 -- crates/utils/cfg_file/src/config.rs | 263 ------ crates/utils/cfg_file/src/lib.rs | 7 - crates/utils/data_struct/Cargo.toml | 10 - crates/utils/data_struct/src/bi_map.rs | 239 ----- crates/utils/data_struct/src/data_sort.rs | 232 ----- crates/utils/data_struct/src/lib.rs | 2 - crates/utils/sha1_hash/Cargo.toml | 9 - crates/utils/sha1_hash/res/story.txt | 48 - crates/utils/sha1_hash/res/story_crlf.sha1 | 1 - crates/utils/sha1_hash/res/story_lf.sha1 | 1 - crates/utils/sha1_hash/src/lib.rs | 257 ------ crates/utils/string_proc/Cargo.toml | 7 - crates/utils/string_proc/src/format_path.rs | 111 --- crates/utils/string_proc/src/format_processer.rs | 132 --- crates/utils/string_proc/src/lib.rs | 50 -- crates/utils/string_proc/src/macros.rs | 63 -- crates/utils/string_proc/src/simple_processer.rs | 15 - crates/utils/tcp_connection/Cargo.toml | 28 - crates/utils/tcp_connection/src/error.rs | 122 --- crates/utils/tcp_connection/src/instance.rs | 542 ----------- .../utils/tcp_connection/src/instance_challenge.rs | 311 ------- crates/utils/tcp_connection/src/lib.rs | 6 - .../tcp_connection/tcp_connection_test/Cargo.toml | 9 - .../res/image/test_transfer.png | Bin 1001369 -> 0 bytes .../tcp_connection_test/res/key/test_key.pem | 13 - .../res/key/test_key_private.pem | 51 -- .../res/key/wrong_key_private.pem | 52 -- .../tcp_connection/tcp_connection_test/src/lib.rs | 17 - .../tcp_connection_test/src/test_challenge.rs | 160 ---- .../tcp_connection_test/src/test_connection.rs | 78 -- .../tcp_connection_test/src/test_file_transfer.rs | 94 -- .../tcp_connection_test/src/test_msgpack.rs | 103 --- .../src/test_tcp_target_build.rs | 32 - .../tcp_connection_test/src/test_utils.rs | 4 - .../tcp_connection_test/src/test_utils/handle.rs | 11 - .../tcp_connection_test/src/test_utils/target.rs | 201 ----- .../src/test_utils/target_configure.rs | 53 -- .../src/test_utils/target_connection.rs | 89 -- crates/vcs_actions/Cargo.toml | 29 - crates/vcs_actions/src/actions.rs | 288 ------ crates/vcs_actions/src/actions/local_actions.rs | 525 ----------- crates/vcs_actions/src/actions/sheet_actions.rs | 583 ------------ crates/vcs_actions/src/actions/track_action.rs | 987 --------------------- crates/vcs_actions/src/actions/user_actions.rs | 144 --- crates/vcs_actions/src/actions/vault_actions.rs | 1 - crates/vcs_actions/src/connection.rs | 3 - .../vcs_actions/src/connection/action_service.rs | 221 ----- crates/vcs_actions/src/connection/error.rs | 14 - crates/vcs_actions/src/connection/protocol.rs | 7 - crates/vcs_actions/src/lib.rs | 3 - crates/vcs_actions/src/registry.rs | 2 - crates/vcs_actions/src/registry/client_registry.rs | 123 --- crates/vcs_actions/src/registry/server_registry.rs | 36 - crates/vcs_data/Cargo.toml | 40 - crates/vcs_data/src/constants.rs | 118 --- crates/vcs_data/src/current.rs | 84 -- crates/vcs_data/src/data.rs | 5 - crates/vcs_data/src/data/local.rs | 269 ------ crates/vcs_data/src/data/local/align.rs | 110 --- crates/vcs_data/src/data/local/cached_sheet.rs | 94 -- crates/vcs_data/src/data/local/config.rs | 375 -------- crates/vcs_data/src/data/local/latest_file_data.rs | 105 --- crates/vcs_data/src/data/local/latest_info.rs | 83 -- crates/vcs_data/src/data/local/local_files.rs | 148 --- crates/vcs_data/src/data/local/local_sheet.rs | 377 -------- crates/vcs_data/src/data/local/vault_modified.rs | 30 - .../vcs_data/src/data/local/workspace_analyzer.rs | 327 ------- crates/vcs_data/src/data/member.rs | 71 -- crates/vcs_data/src/data/sheet.rs | 280 ------ crates/vcs_data/src/data/user.rs | 28 - crates/vcs_data/src/data/user/accounts.rs | 164 ---- crates/vcs_data/src/data/vault.rs | 132 --- crates/vcs_data/src/data/vault/config.rs | 233 ----- crates/vcs_data/src/data/vault/member.rs | 144 --- crates/vcs_data/src/data/vault/service.rs | 40 - crates/vcs_data/src/data/vault/sheet_share.rs | 424 --------- crates/vcs_data/src/data/vault/sheets.rs | 274 ------ crates/vcs_data/src/data/vault/virtual_file.rs | 506 ----------- crates/vcs_data/src/lib.rs | 5 - crates/vcs_data/todo.md | 31 - crates/vcs_data/vcs_data_test/Cargo.toml | 13 - crates/vcs_data/vcs_data_test/lib.rs | 11 - crates/vcs_data/vcs_data_test/src/lib.rs | 30 - ...local_workspace_setup_and_account_management.rs | 248 ------ ...st_sheet_creation_management_and_persistence.rs | 275 ------ .../test_sheet_share_creation_and_management.rs | 631 ------------- .../src/test_vault_setup_and_member_register.rs | 67 -- .../src/test_virtual_file_creation_and_update.rs | 162 ---- crates/vcs_docs/Cargo.toml | 6 - crates/vcs_docs/build.rs | 196 ---- crates/vcs_docs/src/docs.rs.template | 26 - crates/vcs_docs/src/lib.rs | 1 - data/Cargo.toml | 40 + data/src/constants.rs | 118 +++ data/src/current.rs | 84 ++ data/src/data.rs | 5 + data/src/data/local.rs | 269 ++++++ data/src/data/local/align.rs | 110 +++ data/src/data/local/cached_sheet.rs | 94 ++ data/src/data/local/config.rs | 375 ++++++++ data/src/data/local/latest_file_data.rs | 105 +++ data/src/data/local/latest_info.rs | 83 ++ data/src/data/local/local_files.rs | 148 +++ data/src/data/local/local_sheet.rs | 377 ++++++++ data/src/data/local/vault_modified.rs | 30 + data/src/data/local/workspace_analyzer.rs | 327 +++++++ data/src/data/member.rs | 71 ++ data/src/data/sheet.rs | 280 ++++++ data/src/data/user.rs | 28 + data/src/data/user/accounts.rs | 164 ++++ data/src/data/vault.rs | 132 +++ data/src/data/vault/config.rs | 233 +++++ data/src/data/vault/member.rs | 144 +++ data/src/data/vault/service.rs | 40 + data/src/data/vault/sheet_share.rs | 424 +++++++++ data/src/data/vault/sheets.rs | 274 ++++++ data/src/data/vault/virtual_file.rs | 506 +++++++++++ data/src/lib.rs | 5 + data/tests/Cargo.toml | 13 + data/tests/src/lib.rs | 30 + ...local_workspace_setup_and_account_management.rs | 248 ++++++ ...st_sheet_creation_management_and_persistence.rs | 275 ++++++ .../test_sheet_share_creation_and_management.rs | 631 +++++++++++++ .../src/test_vault_setup_and_member_register.rs | 67 ++ .../src/test_virtual_file_creation_and_update.rs | 162 ++++ docs/Cargo.toml | 6 + docs/build.rs | 196 ++++ docs/src/docs.rs | 387 ++++++++ docs/src/docs.rs.template | 26 + docs/src/lib.rs | 1 + examples/Cargo.toml | 8 +- systems/action/Cargo.toml | 15 + systems/action/action_macros/Cargo.toml | 19 + systems/action/action_macros/src/lib.rs | 248 ++++++ systems/action/src/action.rs | 244 +++++ systems/action/src/action_pool.rs | 247 ++++++ systems/action/src/lib.rs | 6 + utils/cfg_file/Cargo.toml | 23 + utils/cfg_file/cfg_file_derive/Cargo.toml | 11 + utils/cfg_file/cfg_file_derive/src/lib.rs | 130 +++ utils/cfg_file/cfg_file_test/Cargo.toml | 9 + utils/cfg_file/cfg_file_test/src/lib.rs | 95 ++ utils/cfg_file/src/config.rs | 263 ++++++ utils/cfg_file/src/lib.rs | 7 + utils/data_struct/Cargo.toml | 10 + utils/data_struct/src/bi_map.rs | 239 +++++ utils/data_struct/src/data_sort.rs | 232 +++++ utils/data_struct/src/lib.rs | 2 + utils/sha1_hash/Cargo.toml | 9 + utils/sha1_hash/res/story.txt | 48 + utils/sha1_hash/res/story_crlf.sha1 | 1 + utils/sha1_hash/res/story_lf.sha1 | 1 + utils/sha1_hash/src/lib.rs | 257 ++++++ utils/string_proc/Cargo.toml | 7 + utils/string_proc/src/format_path.rs | 111 +++ utils/string_proc/src/format_processer.rs | 132 +++ utils/string_proc/src/lib.rs | 50 ++ utils/string_proc/src/macros.rs | 63 ++ utils/string_proc/src/simple_processer.rs | 15 + utils/tcp_connection/Cargo.toml | 28 + utils/tcp_connection/src/error.rs | 122 +++ utils/tcp_connection/src/instance.rs | 542 +++++++++++ utils/tcp_connection/src/instance_challenge.rs | 311 +++++++ utils/tcp_connection/src/lib.rs | 6 + .../tcp_connection/tcp_connection_test/Cargo.toml | 9 + .../res/image/test_transfer.png | Bin 0 -> 1001369 bytes .../tcp_connection_test/res/key/test_key.pem | 13 + .../res/key/test_key_private.pem | 51 ++ .../res/key/wrong_key_private.pem | 52 ++ .../tcp_connection/tcp_connection_test/src/lib.rs | 17 + .../tcp_connection_test/src/test_challenge.rs | 160 ++++ .../tcp_connection_test/src/test_connection.rs | 78 ++ .../tcp_connection_test/src/test_file_transfer.rs | 94 ++ .../tcp_connection_test/src/test_msgpack.rs | 103 +++ .../src/test_tcp_target_build.rs | 32 + .../tcp_connection_test/src/test_utils.rs | 4 + .../tcp_connection_test/src/test_utils/handle.rs | 11 + .../tcp_connection_test/src/test_utils/target.rs | 201 +++++ .../src/test_utils/target_configure.rs | 53 ++ .../src/test_utils/target_connection.rs | 89 ++ 208 files changed, 13972 insertions(+), 13627 deletions(-) create mode 100644 actions/Cargo.toml create mode 100644 actions/src/actions.rs create mode 100644 actions/src/actions/local_actions.rs create mode 100644 actions/src/actions/sheet_actions.rs create mode 100644 actions/src/actions/track_action.rs create mode 100644 actions/src/actions/user_actions.rs create mode 100644 actions/src/actions/vault_actions.rs create mode 100644 actions/src/connection.rs create mode 100644 actions/src/connection/action_service.rs create mode 100644 actions/src/connection/error.rs create mode 100644 actions/src/connection/protocol.rs create mode 100644 actions/src/lib.rs create mode 100644 actions/src/registry.rs create mode 100644 actions/src/registry/client_registry.rs create mode 100644 actions/src/registry/server_registry.rs delete mode 100644 crates/system_action/Cargo.toml delete mode 100644 crates/system_action/action_macros/Cargo.toml delete mode 100644 crates/system_action/action_macros/src/lib.rs delete mode 100644 crates/system_action/src/action.rs delete mode 100644 crates/system_action/src/action_pool.rs delete mode 100644 crates/system_action/src/lib.rs delete mode 100644 crates/utils/cfg_file/Cargo.toml delete mode 100644 crates/utils/cfg_file/cfg_file_derive/Cargo.toml delete mode 100644 crates/utils/cfg_file/cfg_file_derive/src/lib.rs delete mode 100644 crates/utils/cfg_file/cfg_file_test/Cargo.toml delete mode 100644 crates/utils/cfg_file/cfg_file_test/src/lib.rs delete mode 100644 crates/utils/cfg_file/src/config.rs delete mode 100644 crates/utils/cfg_file/src/lib.rs delete mode 100644 crates/utils/data_struct/Cargo.toml delete mode 100644 crates/utils/data_struct/src/bi_map.rs delete mode 100644 crates/utils/data_struct/src/data_sort.rs delete mode 100644 crates/utils/data_struct/src/lib.rs delete mode 100644 crates/utils/sha1_hash/Cargo.toml delete mode 100644 crates/utils/sha1_hash/res/story.txt delete mode 100644 crates/utils/sha1_hash/res/story_crlf.sha1 delete mode 100644 crates/utils/sha1_hash/res/story_lf.sha1 delete mode 100644 crates/utils/sha1_hash/src/lib.rs delete mode 100644 crates/utils/string_proc/Cargo.toml delete mode 100644 crates/utils/string_proc/src/format_path.rs delete mode 100644 crates/utils/string_proc/src/format_processer.rs delete mode 100644 crates/utils/string_proc/src/lib.rs delete mode 100644 crates/utils/string_proc/src/macros.rs delete mode 100644 crates/utils/string_proc/src/simple_processer.rs delete mode 100644 crates/utils/tcp_connection/Cargo.toml delete mode 100644 crates/utils/tcp_connection/src/error.rs delete mode 100644 crates/utils/tcp_connection/src/instance.rs delete mode 100644 crates/utils/tcp_connection/src/instance_challenge.rs delete mode 100644 crates/utils/tcp_connection/src/lib.rs delete mode 100644 crates/utils/tcp_connection/tcp_connection_test/Cargo.toml delete mode 100644 crates/utils/tcp_connection/tcp_connection_test/res/image/test_transfer.png delete mode 100644 crates/utils/tcp_connection/tcp_connection_test/res/key/test_key.pem delete mode 100644 crates/utils/tcp_connection/tcp_connection_test/res/key/test_key_private.pem delete mode 100644 crates/utils/tcp_connection/tcp_connection_test/res/key/wrong_key_private.pem delete mode 100644 crates/utils/tcp_connection/tcp_connection_test/src/lib.rs delete mode 100644 crates/utils/tcp_connection/tcp_connection_test/src/test_challenge.rs delete mode 100644 crates/utils/tcp_connection/tcp_connection_test/src/test_connection.rs delete mode 100644 crates/utils/tcp_connection/tcp_connection_test/src/test_file_transfer.rs delete mode 100644 crates/utils/tcp_connection/tcp_connection_test/src/test_msgpack.rs delete mode 100644 crates/utils/tcp_connection/tcp_connection_test/src/test_tcp_target_build.rs delete mode 100644 crates/utils/tcp_connection/tcp_connection_test/src/test_utils.rs delete mode 100644 crates/utils/tcp_connection/tcp_connection_test/src/test_utils/handle.rs delete mode 100644 crates/utils/tcp_connection/tcp_connection_test/src/test_utils/target.rs delete mode 100644 crates/utils/tcp_connection/tcp_connection_test/src/test_utils/target_configure.rs delete mode 100644 crates/utils/tcp_connection/tcp_connection_test/src/test_utils/target_connection.rs delete mode 100644 crates/vcs_actions/Cargo.toml delete mode 100644 crates/vcs_actions/src/actions.rs delete mode 100644 crates/vcs_actions/src/actions/local_actions.rs delete mode 100644 crates/vcs_actions/src/actions/sheet_actions.rs delete mode 100644 crates/vcs_actions/src/actions/track_action.rs delete mode 100644 crates/vcs_actions/src/actions/user_actions.rs delete mode 100644 crates/vcs_actions/src/actions/vault_actions.rs delete mode 100644 crates/vcs_actions/src/connection.rs delete mode 100644 crates/vcs_actions/src/connection/action_service.rs delete mode 100644 crates/vcs_actions/src/connection/error.rs delete mode 100644 crates/vcs_actions/src/connection/protocol.rs delete mode 100644 crates/vcs_actions/src/lib.rs delete mode 100644 crates/vcs_actions/src/registry.rs delete mode 100644 crates/vcs_actions/src/registry/client_registry.rs delete mode 100644 crates/vcs_actions/src/registry/server_registry.rs delete mode 100644 crates/vcs_data/Cargo.toml delete mode 100644 crates/vcs_data/src/constants.rs delete mode 100644 crates/vcs_data/src/current.rs delete mode 100644 crates/vcs_data/src/data.rs delete mode 100644 crates/vcs_data/src/data/local.rs delete mode 100644 crates/vcs_data/src/data/local/align.rs delete mode 100644 crates/vcs_data/src/data/local/cached_sheet.rs delete mode 100644 crates/vcs_data/src/data/local/config.rs delete mode 100644 crates/vcs_data/src/data/local/latest_file_data.rs delete mode 100644 crates/vcs_data/src/data/local/latest_info.rs delete mode 100644 crates/vcs_data/src/data/local/local_files.rs delete mode 100644 crates/vcs_data/src/data/local/local_sheet.rs delete mode 100644 crates/vcs_data/src/data/local/vault_modified.rs delete mode 100644 crates/vcs_data/src/data/local/workspace_analyzer.rs delete mode 100644 crates/vcs_data/src/data/member.rs delete mode 100644 crates/vcs_data/src/data/sheet.rs delete mode 100644 crates/vcs_data/src/data/user.rs delete mode 100644 crates/vcs_data/src/data/user/accounts.rs delete mode 100644 crates/vcs_data/src/data/vault.rs delete mode 100644 crates/vcs_data/src/data/vault/config.rs delete mode 100644 crates/vcs_data/src/data/vault/member.rs delete mode 100644 crates/vcs_data/src/data/vault/service.rs delete mode 100644 crates/vcs_data/src/data/vault/sheet_share.rs delete mode 100644 crates/vcs_data/src/data/vault/sheets.rs delete mode 100644 crates/vcs_data/src/data/vault/virtual_file.rs delete mode 100644 crates/vcs_data/src/lib.rs delete mode 100644 crates/vcs_data/todo.md delete mode 100644 crates/vcs_data/vcs_data_test/Cargo.toml delete mode 100644 crates/vcs_data/vcs_data_test/lib.rs delete mode 100644 crates/vcs_data/vcs_data_test/src/lib.rs delete mode 100644 crates/vcs_data/vcs_data_test/src/test_local_workspace_setup_and_account_management.rs delete mode 100644 crates/vcs_data/vcs_data_test/src/test_sheet_creation_management_and_persistence.rs delete mode 100644 crates/vcs_data/vcs_data_test/src/test_sheet_share_creation_and_management.rs delete mode 100644 crates/vcs_data/vcs_data_test/src/test_vault_setup_and_member_register.rs delete mode 100644 crates/vcs_data/vcs_data_test/src/test_virtual_file_creation_and_update.rs delete mode 100644 crates/vcs_docs/Cargo.toml delete mode 100644 crates/vcs_docs/build.rs delete mode 100644 crates/vcs_docs/src/docs.rs.template delete mode 100644 crates/vcs_docs/src/lib.rs create mode 100644 data/Cargo.toml create mode 100644 data/src/constants.rs create mode 100644 data/src/current.rs create mode 100644 data/src/data.rs create mode 100644 data/src/data/local.rs create mode 100644 data/src/data/local/align.rs create mode 100644 data/src/data/local/cached_sheet.rs create mode 100644 data/src/data/local/config.rs create mode 100644 data/src/data/local/latest_file_data.rs create mode 100644 data/src/data/local/latest_info.rs create mode 100644 data/src/data/local/local_files.rs create mode 100644 data/src/data/local/local_sheet.rs create mode 100644 data/src/data/local/vault_modified.rs create mode 100644 data/src/data/local/workspace_analyzer.rs create mode 100644 data/src/data/member.rs create mode 100644 data/src/data/sheet.rs create mode 100644 data/src/data/user.rs create mode 100644 data/src/data/user/accounts.rs create mode 100644 data/src/data/vault.rs create mode 100644 data/src/data/vault/config.rs create mode 100644 data/src/data/vault/member.rs create mode 100644 data/src/data/vault/service.rs create mode 100644 data/src/data/vault/sheet_share.rs create mode 100644 data/src/data/vault/sheets.rs create mode 100644 data/src/data/vault/virtual_file.rs create mode 100644 data/src/lib.rs create mode 100644 data/tests/Cargo.toml create mode 100644 data/tests/src/lib.rs create mode 100644 data/tests/src/test_local_workspace_setup_and_account_management.rs create mode 100644 data/tests/src/test_sheet_creation_management_and_persistence.rs create mode 100644 data/tests/src/test_sheet_share_creation_and_management.rs create mode 100644 data/tests/src/test_vault_setup_and_member_register.rs create mode 100644 data/tests/src/test_virtual_file_creation_and_update.rs create mode 100644 docs/Cargo.toml create mode 100644 docs/build.rs create mode 100644 docs/src/docs.rs create mode 100644 docs/src/docs.rs.template create mode 100644 docs/src/lib.rs create mode 100644 systems/action/Cargo.toml create mode 100644 systems/action/action_macros/Cargo.toml create mode 100644 systems/action/action_macros/src/lib.rs create mode 100644 systems/action/src/action.rs create mode 100644 systems/action/src/action_pool.rs create mode 100644 systems/action/src/lib.rs create mode 100644 utils/cfg_file/Cargo.toml create mode 100644 utils/cfg_file/cfg_file_derive/Cargo.toml create mode 100644 utils/cfg_file/cfg_file_derive/src/lib.rs create mode 100644 utils/cfg_file/cfg_file_test/Cargo.toml create mode 100644 utils/cfg_file/cfg_file_test/src/lib.rs create mode 100644 utils/cfg_file/src/config.rs create mode 100644 utils/cfg_file/src/lib.rs create mode 100644 utils/data_struct/Cargo.toml create mode 100644 utils/data_struct/src/bi_map.rs create mode 100644 utils/data_struct/src/data_sort.rs create mode 100644 utils/data_struct/src/lib.rs create mode 100644 utils/sha1_hash/Cargo.toml create mode 100644 utils/sha1_hash/res/story.txt create mode 100644 utils/sha1_hash/res/story_crlf.sha1 create mode 100644 utils/sha1_hash/res/story_lf.sha1 create mode 100644 utils/sha1_hash/src/lib.rs create mode 100644 utils/string_proc/Cargo.toml create mode 100644 utils/string_proc/src/format_path.rs create mode 100644 utils/string_proc/src/format_processer.rs create mode 100644 utils/string_proc/src/lib.rs create mode 100644 utils/string_proc/src/macros.rs create mode 100644 utils/string_proc/src/simple_processer.rs create mode 100644 utils/tcp_connection/Cargo.toml create mode 100644 utils/tcp_connection/src/error.rs create mode 100644 utils/tcp_connection/src/instance.rs create mode 100644 utils/tcp_connection/src/instance_challenge.rs create mode 100644 utils/tcp_connection/src/lib.rs create mode 100644 utils/tcp_connection/tcp_connection_test/Cargo.toml create mode 100644 utils/tcp_connection/tcp_connection_test/res/image/test_transfer.png create mode 100644 utils/tcp_connection/tcp_connection_test/res/key/test_key.pem create mode 100644 utils/tcp_connection/tcp_connection_test/res/key/test_key_private.pem create mode 100644 utils/tcp_connection/tcp_connection_test/res/key/wrong_key_private.pem create mode 100644 utils/tcp_connection/tcp_connection_test/src/lib.rs create mode 100644 utils/tcp_connection/tcp_connection_test/src/test_challenge.rs create mode 100644 utils/tcp_connection/tcp_connection_test/src/test_connection.rs create mode 100644 utils/tcp_connection/tcp_connection_test/src/test_file_transfer.rs create mode 100644 utils/tcp_connection/tcp_connection_test/src/test_msgpack.rs create mode 100644 utils/tcp_connection/tcp_connection_test/src/test_tcp_target_build.rs create mode 100644 utils/tcp_connection/tcp_connection_test/src/test_utils.rs create mode 100644 utils/tcp_connection/tcp_connection_test/src/test_utils/handle.rs create mode 100644 utils/tcp_connection/tcp_connection_test/src/test_utils/target.rs create mode 100644 utils/tcp_connection/tcp_connection_test/src/test_utils/target_configure.rs create mode 100644 utils/tcp_connection/tcp_connection_test/src/test_utils/target_connection.rs diff --git a/.gitignore b/.gitignore index 0ff49e8..aa7aaa7 100644 --- a/.gitignore +++ b/.gitignore @@ -34,4 +34,4 @@ ### BUILT-IN DOCUMENTS ### ########################## /src/data/compile_info.rs -/crates/vcs_docs/src/docs.rs +/docs/src/docs.rs diff --git a/Cargo.toml b/Cargo.toml index 41e2bae..be67728 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,28 +24,28 @@ vcs = [] members = [ "examples", - "crates/utils/cfg_file", - "crates/utils/cfg_file/cfg_file_derive", - "crates/utils/cfg_file/cfg_file_test", + "utils/cfg_file", + "utils/cfg_file/cfg_file_derive", + "utils/cfg_file/cfg_file_test", - "crates/utils/data_struct", + "utils/data_struct", - "crates/utils/sha1_hash", + "utils/sha1_hash", - "crates/utils/tcp_connection", - "crates/utils/tcp_connection/tcp_connection_test", + "utils/tcp_connection", + "utils/tcp_connection/tcp_connection_test", - "crates/utils/string_proc", + "utils/string_proc", - "crates/system_action", - "crates/system_action/action_macros", + "systems/action", + "systems/action/action_macros", - "crates/vcs_data", - "crates/vcs_data/vcs_data_test", + "data", + "data/tests", - "crates/vcs_actions", + "actions", - "crates/vcs_docs", + "docs", ] [workspace.package] @@ -75,14 +75,14 @@ chrono = "0.4" toml = "0.9" [dependencies] -cfg_file = { path = "crates/utils/cfg_file" } -data_struct = { path = "crates/utils/data_struct" } -sha1_hash = { path = "crates/utils/sha1_hash" } -tcp_connection = { path = "crates/utils/tcp_connection" } -string_proc = { path = "crates/utils/string_proc" } +cfg_file = { path = "utils/cfg_file" } +data_struct = { path = "utils/data_struct" } +sha1_hash = { path = "utils/sha1_hash" } +tcp_connection = { path = "utils/tcp_connection" } +string_proc = { path = "utils/string_proc" } -action_system = { path = "crates/system_action" } +action_system = { path = "systems/action" } -vcs_docs = { path = "crates/vcs_docs" } -vcs_data = { path = "crates/vcs_data" } -vcs_actions = { path = "crates/vcs_actions" } +vcs_docs = { path = "docs" } +vcs_data = { path = "data" } +vcs_actions = { path = "actions" } diff --git a/actions/Cargo.toml b/actions/Cargo.toml new file mode 100644 index 0000000..615d3af --- /dev/null +++ b/actions/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "vcs_actions" +edition = "2024" +version.workspace = true + +[dependencies] + +# Utils +tcp_connection = { path = "../utils/tcp_connection" } +cfg_file = { path = "../utils/cfg_file", features = ["default"] } +sha1_hash = { path = "../utils/sha1_hash" } +string_proc = { path = "../utils/string_proc" } + +# Core dependencies +action_system = { path = "../systems/action" } +vcs_data = { path = "../data" } + +# Error handling +thiserror = "2.0.17" + +# Serialization +serde = { version = "1.0.228", features = ["derive"] } +serde_json = "1.0.145" + +# Async & Networking +tokio = { version = "1.48.0", features = ["full"] } + +# Logging +log = "0.4.28" diff --git a/actions/src/actions.rs b/actions/src/actions.rs new file mode 100644 index 0000000..3019327 --- /dev/null +++ b/actions/src/actions.rs @@ -0,0 +1,288 @@ +use std::sync::Arc; + +use action_system::action::ActionContext; +use cfg_file::config::ConfigFile; +use tcp_connection::{error::TcpTargetError, instance::ConnectionInstance}; +use tokio::sync::{Mutex, mpsc::Sender}; +use vcs_data::{ + constants::{SERVER_PATH_MEMBER_PUB, VAULT_HOST_NAME}, + data::{ + local::{LocalWorkspace, config::LocalConfig, latest_info::LatestInfo}, + member::MemberId, + sheet::SheetName, + user::UserDirectory, + vault::Vault, + }, +}; + +pub mod local_actions; +pub mod sheet_actions; +pub mod track_action; +pub mod user_actions; +pub mod vault_actions; + +/// Check if the connection instance is valid in the given context. +/// This function is used to verify the connection instance in actions that require remote calls. +pub fn check_connection_instance( + ctx: &ActionContext, +) -> Result<&Arc>, TcpTargetError> { + let Some(instance) = ctx.instance() else { + return Err(TcpTargetError::NotFound( + "Connection instance lost.".to_string(), + )); + }; + Ok(instance) +} + +/// Try to get the Vault instance from the context. +pub fn try_get_vault(ctx: &ActionContext) -> Result, TcpTargetError> { + let Some(vault) = ctx.get_arc::() else { + return Err(TcpTargetError::NotFound( + "Vault instance not found".to_string(), + )); + }; + Ok(vault) +} + +/// Try to get the LocalWorkspace instance from the context. +pub fn try_get_local_workspace(ctx: &ActionContext) -> Result, TcpTargetError> { + let Some(local_workspace) = ctx.get_arc::() else { + return Err(TcpTargetError::NotFound( + "LocalWorkspace instance not found".to_string(), + )); + }; + Ok(local_workspace) +} + +/// Try to get the UserDirectory instance from the context. +pub fn try_get_user_directory(ctx: &ActionContext) -> Result, TcpTargetError> { + let Some(user_directory) = ctx.get_arc::() else { + return Err(TcpTargetError::NotFound( + "UserDirectory instance not found".to_string(), + )); + }; + Ok(user_directory) +} + +/// Try to get the LocalWorkspace instance from the context. +pub fn try_get_local_output(ctx: &ActionContext) -> Result>, TcpTargetError> { + let Some(output) = ctx.get_arc::>() else { + return Err(TcpTargetError::NotFound( + "Client sender not found".to_string(), + )); + }; + Ok(output) +} + +/// Authenticate member based on context and return MemberId +pub async fn auth_member( + ctx: &ActionContext, + instance: &Arc>, +) -> Result<(MemberId, bool), TcpTargetError> { + // Window开服Linux连接 -> 此函数内产生 early eof + // ~ WS # jv update + // 身份认证失败:I/O error: early eof! + + // 分析相应流程: + // 1. 服务端发起挑战,客户端接受 + // 2. 服务端发送结果,客户端接受 + // 3. 推测此时发生 early eof ---> 无 ack,导致客户端尝试拿到结果时,服务端已经结束 + // 这很有可能是 Windows 和 Linux 对于连接处理的方案差异导致的问题,需要进一步排查 + + // Start Challenge (Remote) + if ctx.is_proc_on_remote() { + let mut mut_instance = instance.lock().await; + let vault = try_get_vault(ctx)?; + + let using_host_mode = mut_instance.read_msgpack::().await?; + + let result = mut_instance + .challenge(vault.vault_path().join(SERVER_PATH_MEMBER_PUB)) + .await; + + return match result { + Ok((pass, member_id)) => { + if !pass { + // Send false to inform the client that authentication failed + mut_instance.write(false).await?; + Err(TcpTargetError::Authentication( + "Authenticate failed.".to_string(), + )) + } else { + if using_host_mode { + if vault.config().vault_host_list().contains(&member_id) { + // Using Host mode authentication, and is indeed an administrator + mut_instance.write(true).await?; + Ok((member_id, true)) + } else { + // Using Host mode authentication, but not an administrator + mut_instance.write(false).await?; + Err(TcpTargetError::Authentication( + "Authenticate failed.".to_string(), + )) + } + } else { + // Not using Host mode authentication + mut_instance.write(true).await?; + Ok((member_id, false)) + } + } + } + Err(e) => Err(e), + }; + } + + // Accept Challenge (Local) + if ctx.is_proc_on_local() { + let mut mut_instance = instance.lock().await; + let local_workspace = try_get_local_workspace(ctx)?; + let (is_host_mode, member_name) = { + let cfg = local_workspace.config().lock_owned().await; + (cfg.is_host_mode(), cfg.current_account()) + }; + let user_directory = try_get_user_directory(ctx)?; + + // Inform remote whether to authenticate in Host mode + mut_instance.write_msgpack(is_host_mode).await?; + + // Member name & Private key + let private_key = user_directory.account_private_key_path(&member_name); + let _ = mut_instance + .accept_challenge(private_key, &member_name) + .await?; + + // Read result + let challenge_result = mut_instance.read::().await?; + if challenge_result { + return Ok((member_name.clone(), is_host_mode)); + } else { + return Err(TcpTargetError::Authentication( + "Authenticate failed.".to_string(), + )); + } + } + + Err(TcpTargetError::NoResult("Auth failed.".to_string())) +} + +/// Get the current sheet name based on the context (local or remote). +/// This function handles the communication between local and remote instances +/// to verify and retrieve the current sheet name and whether it's a reference sheet. +/// +/// On local: +/// - Reads the current sheet from local configuration +/// - Sends the sheet name to remote for verification +/// - Returns the sheet name and whether it's a reference sheet if remote confirms it exists +/// +/// On remote: +/// - Receives sheet name from local +/// - Verifies the sheet exists in the vault +/// - Checks if the sheet is a reference sheet +/// - If allow_ref is true, reference sheets are allowed to pass verification +/// - Sends confirmation and reference status back to local +/// +/// Returns a tuple of (SheetName, bool) where the bool indicates if it's a reference sheet, +/// or an error if the sheet doesn't exist or doesn't meet the verification criteria. +pub async fn get_current_sheet_name( + ctx: &ActionContext, + instance: &Arc>, + member_id: &MemberId, + allow_ref: bool, +) -> Result<(SheetName, bool), TcpTargetError> { + let mut mut_instance = instance.lock().await; + if ctx.is_proc_on_local() { + let workspace = try_get_local_workspace(ctx)?; + let config = LocalConfig::read().await?; + let latest = LatestInfo::read_from(LatestInfo::latest_info_path( + workspace.local_path(), + member_id, + )) + .await?; + if let Some(sheet_name) = config.sheet_in_use() { + // Send sheet name + mut_instance.write_msgpack(sheet_name).await?; + + // Read result + if mut_instance.read_msgpack::().await? { + // Check if sheet is a reference sheet + let is_ref_sheet = latest.reference_sheets.contains(sheet_name); + if allow_ref { + // Allow reference sheets, directly return the determination result + return Ok((sheet_name.clone(), is_ref_sheet)); + } else if is_ref_sheet { + // Not allowed but it's a reference sheet, return an error + return Err(TcpTargetError::ReferenceSheetNotAllowed( + "Reference sheet not allowed".to_string(), + )); + } else { + // Not allowed but not a reference sheet, return normally + return Ok((sheet_name.clone(), false)); + } + } else { + return Err(TcpTargetError::NotFound("Sheet not found".to_string())); + } + } + // Send empty sheet_name + mut_instance.write_msgpack("".to_string()).await?; + + // Read result, since we know it's impossible to pass here, we just consume this result + let _ = mut_instance.read_msgpack::().await?; + + return Err(TcpTargetError::NotFound("Sheet not found".to_string())); + } + if ctx.is_proc_on_remote() { + let vault = try_get_vault(ctx)?; + + // Read sheet name + let sheet_name: SheetName = mut_instance.read_msgpack().await?; + + // Check if sheet exists + if let Ok(sheet) = vault.sheet(&sheet_name).await + && let Some(holder) = sheet.holder() + { + let is_ref_sheet = holder == VAULT_HOST_NAME; + if allow_ref { + // Allow reference sheets, directly return the determination result + if holder == member_id || holder == VAULT_HOST_NAME { + mut_instance.write_msgpack(true).await?; + return Ok((sheet.name().clone(), is_ref_sheet)); + } + } else if is_ref_sheet { + // Not allowed but it's a reference sheet, return an error + mut_instance.write_msgpack(true).await?; + return Err(TcpTargetError::ReferenceSheetNotAllowed( + "Reference sheet not allowed".to_string(), + )); + } else { + // Not allowed but not a reference sheet, return normally + if holder == member_id { + mut_instance.write_msgpack(true).await?; + return Ok((sheet_name.clone(), false)); + } + } + } + // Tell local the check is not passed + mut_instance.write_msgpack(false).await?; + return Err(TcpTargetError::NotFound("Sheet not found".to_string())); + } + Err(TcpTargetError::NoResult("NoResult".to_string())) +} + +/// The macro to write and return a result. +#[macro_export] +macro_rules! write_and_return { + ($instance:expr, $result:expr) => {{ + $instance.lock().await.write($result).await?; + return Ok($result); + }}; +} + +/// The macro to send formatted string to output channel. +/// Usage: local_println!(output, "format string", arg1, arg2, ...) +#[macro_export] +macro_rules! local_println { + ($output:expr, $($arg:tt)*) => {{ + let formatted = format!($($arg)*); + let _ = $output.send(formatted).await; + }}; +} diff --git a/actions/src/actions/local_actions.rs b/actions/src/actions/local_actions.rs new file mode 100644 index 0000000..53a1ff8 --- /dev/null +++ b/actions/src/actions/local_actions.rs @@ -0,0 +1,525 @@ +use std::{ + collections::{HashMap, HashSet}, + io::ErrorKind, + net::SocketAddr, + path::PathBuf, + time::SystemTime, +}; + +use action_system::{action::ActionContext, macros::action_gen}; +use cfg_file::config::ConfigFile; +use log::info; +use serde::{Deserialize, Serialize}; +use tcp_connection::error::TcpTargetError; +use vcs_data::{ + constants::{ + CLIENT_PATH_CACHED_SHEET, CLIENT_PATH_LOCAL_SHEET, REF_SHEET_NAME, + SERVER_SUFFIX_SHEET_SHARE_FILE, VAULT_HOST_NAME, + }, + data::{ + local::{ + cached_sheet::CachedSheet, + config::LocalConfig, + latest_file_data::LatestFileData, + latest_info::{LatestInfo, SheetInfo}, + vault_modified::sign_vault_modified, + }, + member::MemberId, + sheet::{SheetData, SheetName, SheetPathBuf}, + vault::{ + config::VaultUuid, + sheet_share::{Share, SheetShareId}, + virtual_file::{VirtualFileId, VirtualFileVersion, VirtualFileVersionDescription}, + }, + }, +}; + +use crate::actions::{ + auth_member, check_connection_instance, try_get_local_workspace, try_get_vault, +}; + +#[derive(Serialize, Deserialize)] +pub enum SetUpstreamVaultActionResult { + // Success + DirectedAndStained, + Redirected, + + // Fail + AlreadyStained, + AuthorizeFailed(String), + RedirectFailed(String), + SameUpstream, + + Done, +} + +#[action_gen] +pub async fn set_upstream_vault_action( + ctx: ActionContext, + upstream: SocketAddr, +) -> Result { + let instance = check_connection_instance(&ctx)?; + + // Auth Member + if let Err(e) = auth_member(&ctx, instance).await { + return Ok(SetUpstreamVaultActionResult::AuthorizeFailed(e.to_string())); + } + + // Direct + if ctx.is_proc_on_remote() { + let vault = try_get_vault(&ctx)?; + instance + .lock() + .await + .write(*vault.config().vault_uuid()) + .await?; + return Ok(SetUpstreamVaultActionResult::Done); + } + + if ctx.is_proc_on_local() { + info!("Authorize successful. directing to upstream vault."); + + // Read the vault UUID from the instance + let vault_uuid = instance.lock().await.read::().await?; + + let local_workspace = try_get_local_workspace(&ctx)?; + let local_config = local_workspace.config(); + + let mut mut_local_config = local_config.lock().await; + if !mut_local_config.stained() { + // Stain the local workspace + mut_local_config.stain(vault_uuid); + + // Set the upstream address + mut_local_config.set_vault_addr(upstream); + + // Store the updated config + LocalConfig::write(&mut_local_config).await?; + + info!("Workspace stained!"); + return Ok(SetUpstreamVaultActionResult::DirectedAndStained); + } else { + // Local workspace is already stained, redirecting + let Some(stained_uuid) = mut_local_config.stained_uuid() else { + return Ok(SetUpstreamVaultActionResult::RedirectFailed( + "Stained uuid not found".to_string(), + )); + }; + let local_upstream = mut_local_config.upstream_addr(); + + // Address changed, but same UUID. + if vault_uuid == stained_uuid { + if local_upstream != upstream { + // Set the upstream address + mut_local_config.set_vault_addr(upstream); + + // Store the updated config + LocalConfig::write(&mut_local_config).await?; + return Ok(SetUpstreamVaultActionResult::Redirected); + } else { + return Ok(SetUpstreamVaultActionResult::SameUpstream); + } + } + return Ok(SetUpstreamVaultActionResult::AlreadyStained); + } + } + + Err(TcpTargetError::NoResult("No result.".to_string())) +} + +#[derive(Serialize, Deserialize)] +pub enum UpdateToLatestInfoResult { + Success, + + // Fail + AuthorizeFailed(String), + SyncCachedSheetFail(SyncCachedSheetFailReason), +} + +#[derive(Serialize, Deserialize)] +pub enum SyncCachedSheetFailReason { + PathAlreadyExist(PathBuf), +} + +#[action_gen] +pub async fn update_to_latest_info_action( + ctx: ActionContext, + _unused: (), +) -> Result { + let instance = check_connection_instance(&ctx)?; + + let (member_id, _is_host_mode) = match auth_member(&ctx, instance).await { + Ok(id) => id, + Err(e) => return Ok(UpdateToLatestInfoResult::AuthorizeFailed(e.to_string())), + }; + + info!("Sending latest info to {}", member_id); + + // Sync Latest Info + { + if ctx.is_proc_on_remote() { + let vault = try_get_vault(&ctx)?; + + // Build latest info + let mut latest_info = LatestInfo::default(); + + // Sheet & Share + let mut shares_in_my_sheets: HashMap> = + HashMap::new(); + let mut member_owned = Vec::new(); + let mut member_visible = Vec::new(); + let mut ref_sheets = HashSet::new(); + + for sheet in vault.sheets().await? { + // Build share parts + if let Some(holder) = sheet.holder() { + if holder == &member_id || holder == VAULT_HOST_NAME { + let mut sheet_shares: HashMap = HashMap::new(); + for share in sheet.get_shares().await? { + // Get SharePath + let Some(share_path) = share.path.clone() else { + continue; + }; + // Get ShareId from SharePath + let Some(share_id) = share_path.file_name() else { + continue; + }; + let share_id = share_id.display().to_string(); + let share_id_trimed = + share_id.trim_end_matches(SERVER_SUFFIX_SHEET_SHARE_FILE); + sheet_shares.insert(share_id_trimed.to_string(), share); + } + shares_in_my_sheets.insert(sheet.name().clone(), sheet_shares); + } + } + + // Build sheet parts + let holder_is_host = + sheet.holder().unwrap_or(&String::default()) == &VAULT_HOST_NAME; + if sheet.holder().is_some() + && (sheet.holder().unwrap() == &member_id || holder_is_host) + { + member_owned.push(sheet.name().clone()); + if holder_is_host { + ref_sheets.insert(sheet.name().clone()); + } + } else { + member_visible.push(SheetInfo { + sheet_name: sheet.name().clone(), + holder_name: sheet.holder().cloned(), + }); + } + } + + // Record Share & Sheet + latest_info.visible_sheets = member_owned; + latest_info.invisible_sheets = member_visible; + latest_info.shares_in_my_sheets = shares_in_my_sheets; + + // RefSheet + let ref_sheet_data = vault.sheet(&REF_SHEET_NAME.to_string()).await?.to_data(); + latest_info.ref_sheet_content = ref_sheet_data.clone(); + latest_info.ref_sheet_vfs_mapping = ref_sheet_data + .mapping() + .into_iter() + .map(|(path, file)| (file.id.clone(), path.clone())) + .collect::>(); + latest_info.reference_sheets = ref_sheets; + + // Members + let members = vault.members().await?; + latest_info.vault_members = members; + + // Send + instance + .lock() + .await + .write_large_msgpack(latest_info, 512_u16) + .await?; + } + + if ctx.is_proc_on_local() { + let workspace = try_get_local_workspace(&ctx)?; + let mut latest_info = instance + .lock() + .await + .read_large_msgpack::(512_u16) + .await?; + latest_info.update_instant = Some(SystemTime::now()); + LatestInfo::write_to( + &latest_info, + LatestInfo::latest_info_path(workspace.local_path(), &member_id), + ) + .await?; + } + } + + info!("Update sheets to {}", member_id); + + // Sync Remote Sheets + { + if ctx.is_proc_on_local() { + let workspace = try_get_local_workspace(&ctx)?; + let Ok(latest_info) = LatestInfo::read_from(LatestInfo::latest_info_path( + workspace.local_path(), + &member_id, + )) + .await + else { + return Err(TcpTargetError::Io("Read latest info failed".to_string())); + }; + + // Collect all local versions + let mut local_versions = vec![]; + for request_sheet in latest_info.visible_sheets { + let Ok(data) = CachedSheet::cached_sheet_data(&request_sheet).await else { + // For newly created sheets, the version is 0. + // Send -1 to distinguish from 0, ensuring the upstream will definitely send the sheet information + local_versions.push((request_sheet, -1)); + continue; + }; + local_versions.push((request_sheet, data.write_count())); + } + + // Send the version list + let len = local_versions.len(); + instance.lock().await.write_msgpack(local_versions).await?; + + if len < 1 { + // Don't return here, continue to next section + // But we need to consume the false marker from the server + if ctx.is_proc_on_local() { + let mut mut_instance = instance.lock().await; + let _: bool = mut_instance.read_msgpack().await?; + } + } else { + // Receive data + if ctx.is_proc_on_local() { + let mut mut_instance = instance.lock().await; + loop { + let in_coming: bool = mut_instance.read_msgpack().await?; + if in_coming { + let (sheet_name, data): (SheetName, SheetData) = + mut_instance.read_large_msgpack(1024u16).await?; + + let Some(path) = CachedSheet::cached_sheet_path(sheet_name) else { + return Err(TcpTargetError::NotFound( + "Workspace not found".to_string(), + )); + }; + + SheetData::write_to(&data, path).await?; + } else { + break; + } + } + } + } + } + if ctx.is_proc_on_remote() { + let vault = try_get_vault(&ctx)?; + let mut mut_instance = instance.lock().await; + + let local_versions = mut_instance.read_msgpack::>().await?; + + for (sheet_name, version) in local_versions.iter() { + let sheet = vault.sheet(sheet_name).await?; + if let Some(holder) = sheet.holder() + && (holder == &member_id || holder == VAULT_HOST_NAME) + && &sheet.write_count() != version + { + mut_instance.write_msgpack(true).await?; + mut_instance + .write_large_msgpack((sheet_name, sheet.to_data()), 1024u16) + .await?; + } + } + mut_instance.write_msgpack(false).await?; + } + } + + info!("Fetch held status to {}", member_id); + + // Sync Held Info + { + if ctx.is_proc_on_local() { + let workspace = try_get_local_workspace(&ctx)?; + + let Ok(latest_info) = LatestInfo::read_from(LatestInfo::latest_info_path( + workspace.local_path(), + &member_id, + )) + .await + else { + return Err(TcpTargetError::Io("Read latest info failed".to_string())); + }; + + // Collect files that need to know the holder + let mut holder_wants_know = Vec::new(); + for sheet_name in &latest_info.visible_sheets { + if let Ok(sheet_data) = CachedSheet::cached_sheet_data(sheet_name).await { + holder_wants_know + .extend(sheet_data.mapping().values().map(|value| value.id.clone())); + } + } + + // Send request + let mut mut_instance = instance.lock().await; + mut_instance + .write_large_msgpack(&holder_wants_know, 1024u16) + .await?; + + // Receive information and write to local + let result: HashMap< + VirtualFileId, + ( + Option, + VirtualFileVersion, + Vec<(VirtualFileVersion, VirtualFileVersionDescription)>, + ), + > = mut_instance.read_large_msgpack(1024u16).await?; + + // Read configuration file + let path = LatestFileData::data_path(&member_id)?; + let mut latest_file_data: LatestFileData = + LatestFileData::read_from(&path).await.unwrap_or_default(); + + // Write the received information + latest_file_data.update_info(result); + + // Write + LatestFileData::write_to(&latest_file_data, &path).await?; + } + + if ctx.is_proc_on_remote() { + let vault = try_get_vault(&ctx)?; + let mut mut_instance = instance.lock().await; + + // Read the request + let holder_wants_know: Vec = + mut_instance.read_large_msgpack(1024u16).await?; + + // Organize the information + let mut result: HashMap< + VirtualFileId, + ( + Option, + VirtualFileVersion, + Vec<(VirtualFileVersion, VirtualFileVersionDescription)>, + ), + > = HashMap::new(); + for id in holder_wants_know { + let Ok(meta) = vault.virtual_file_meta(&id).await else { + continue; + }; + let holder = if meta.hold_member().is_empty() { + None + } else { + Some(meta.hold_member().clone()) + }; + let latest_version = meta.version_latest(); + + let all_versions = meta.versions(); + let all_descriptions = meta.version_descriptions(); + let histories = all_versions + .iter() + .filter_map(|v| { + let Some(desc) = all_descriptions.get(v) else { + return None; + }; + Some((v.clone(), desc.clone())) + }) + .collect::>(); + + result.insert(id, (holder, latest_version, histories)); + } + + // Send information + mut_instance.write_large_msgpack(&result, 1024u16).await?; + } + } + + // Sync cached sheet to local sheet + if ctx.is_proc_on_local() { + let workspace = try_get_local_workspace(&ctx)?; + let cached_sheet_path = workspace.local_path().join(CLIENT_PATH_CACHED_SHEET); + let local_sheet_path = workspace.local_path().join(CLIENT_PATH_LOCAL_SHEET); + if !local_sheet_path.exists() || !cached_sheet_path.exists() { + // No need to sync + if ctx.is_proc_on_local() { + sign_vault_modified(false).await; + } + return Ok(UpdateToLatestInfoResult::Success); + } + + let cached_sheet_paths = + extract_sheet_names_from_paths(CachedSheet::cached_sheet_paths().await?)?; + + // Match cached sheets and local sheets, and sync content + for (cached_sheet_name, _cached_sheet_path) in cached_sheet_paths { + // Read cached sheet and local sheet + let cached_sheet = CachedSheet::cached_sheet_data(&cached_sheet_name).await?; + let Ok(mut local_sheet) = workspace.local_sheet(&member_id, &cached_sheet_name).await + else { + continue; + }; + + // Read cached id mapping + let Some(cached_sheet_id_mapping) = cached_sheet.id_mapping() else { + continue; + }; + + for (cached_item_id, cached_item_path) in cached_sheet_id_mapping.iter() { + let path_by_id = { local_sheet.path_by_id(cached_item_id).cloned() }; + + // Get local path + let Some(local_path) = path_by_id else { + continue; + }; + + if &local_path == cached_item_path { + continue; + } + + // If path not match, try to move + let move_result = local_sheet.move_mapping(&local_path, cached_item_path); + if let Err(e) = move_result { + match e.kind() { + ErrorKind::AlreadyExists => { + return Ok(UpdateToLatestInfoResult::SyncCachedSheetFail( + SyncCachedSheetFailReason::PathAlreadyExist( + cached_item_path.clone(), + ), + )); + } + _ => return Err(e.into()), + } + } + local_sheet.write().await?; + } + } + } + + if ctx.is_proc_on_local() { + sign_vault_modified(false).await; + } + Ok(UpdateToLatestInfoResult::Success) +} + +/// Extract sheet names from file paths +fn extract_sheet_names_from_paths( + paths: Vec, +) -> Result, std::io::Error> { + let mut result = HashMap::new(); + for p in paths { + let sheet_name = p + .file_stem() + .and_then(|s| s.to_str()) + .map(|s| s.to_string()) + .ok_or_else(|| { + std::io::Error::new(std::io::ErrorKind::InvalidData, "Invalid file name") + })?; + result.insert(sheet_name, p); + } + Ok(result) +} diff --git a/actions/src/actions/sheet_actions.rs b/actions/src/actions/sheet_actions.rs new file mode 100644 index 0000000..4c9977e --- /dev/null +++ b/actions/src/actions/sheet_actions.rs @@ -0,0 +1,583 @@ +use std::{collections::HashMap, io::ErrorKind}; + +use action_system::{action::ActionContext, macros::action_gen}; +use serde::{Deserialize, Serialize}; +use tcp_connection::error::TcpTargetError; +use vcs_data::{ + constants::VAULT_HOST_NAME, + data::{ + local::{ + vault_modified::sign_vault_modified, + workspace_analyzer::{FromRelativePathBuf, ToRelativePathBuf}, + }, + sheet::SheetName, + vault::sheet_share::{ShareMergeMode, SheetShareId}, + }, +}; + +use crate::{ + actions::{ + auth_member, check_connection_instance, get_current_sheet_name, try_get_local_workspace, + try_get_vault, + }, + write_and_return, +}; + +#[derive(Default, Serialize, Deserialize)] +pub enum MakeSheetActionResult { + Success, + SuccessRestore, + + // Fail + AuthorizeFailed(String), + SheetAlreadyExists, + SheetCreationFailed(String), + + #[default] + Unknown, +} + +/// Build a sheet with context +#[action_gen] +pub async fn make_sheet_action( + ctx: ActionContext, + sheet_name: SheetName, +) -> Result { + let instance = check_connection_instance(&ctx)?; + + // Auth Member + let (member_id, is_host_mode) = match auth_member(&ctx, instance).await { + Ok(id) => id, + Err(e) => return Ok(MakeSheetActionResult::AuthorizeFailed(e.to_string())), + }; + + if ctx.is_proc_on_remote() { + let vault = try_get_vault(&ctx)?; + let holder = if is_host_mode { + VAULT_HOST_NAME.to_string() + } else { + member_id + }; + + // Check if the sheet already exists + if let Ok(mut sheet) = vault.sheet(&sheet_name).await { + // If the sheet has no holder, assign it to the current member (restore operation) + if sheet.holder().is_none() { + sheet.set_holder(holder.clone()); + match sheet.persist().await { + Ok(_) => { + write_and_return!(instance, MakeSheetActionResult::SuccessRestore); + } + Err(e) => { + write_and_return!( + instance, + MakeSheetActionResult::SheetCreationFailed(e.to_string()) + ); + } + } + } else { + write_and_return!(instance, MakeSheetActionResult::SheetAlreadyExists); + } + } else { + // Create the sheet + match vault.create_sheet(&sheet_name, &holder).await { + Ok(_) => { + write_and_return!(instance, MakeSheetActionResult::Success); + } + Err(e) => { + write_and_return!( + instance, + MakeSheetActionResult::SheetCreationFailed(e.to_string()) + ); + } + } + } + } + + if ctx.is_proc_on_local() { + let result = instance + .lock() + .await + .read::() + .await?; + if matches!(result, MakeSheetActionResult::Success) { + sign_vault_modified(true).await; + } + return Ok(result); + } + + Err(TcpTargetError::NoResult("No result.".to_string())) +} + +#[derive(Default, Serialize, Deserialize)] +pub enum DropSheetActionResult { + Success, + + // Fail + SheetInUse, + AuthorizeFailed(String), + SheetNotExists, + SheetDropFailed(String), + NoHolder, + NotOwner, + + #[default] + Unknown, +} + +#[action_gen] +pub async fn drop_sheet_action( + ctx: ActionContext, + sheet_name: SheetName, +) -> Result { + let instance = check_connection_instance(&ctx)?; + + // Auth Member + let (member_id, is_host_mode) = match auth_member(&ctx, instance).await { + Ok(id) => id, + Err(e) => { + return Ok(DropSheetActionResult::AuthorizeFailed(e.to_string())); + } + }; + + // Check sheet in use on local + if ctx.is_proc_on_local() { + let local_workspace = try_get_local_workspace(&ctx)?; + if let Some(sheet) = local_workspace.config().lock().await.sheet_in_use() { + if sheet == &sheet_name { + instance.lock().await.write(false).await?; + return Ok(DropSheetActionResult::SheetInUse); + } + instance.lock().await.write(true).await?; + } else { + instance.lock().await.write(true).await?; + } + } + + if ctx.is_proc_on_remote() { + // Check if client sheet is in use + let sheet_in_use = instance.lock().await.read::().await?; + if !sheet_in_use { + return Ok(DropSheetActionResult::SheetInUse); + } + + let vault = try_get_vault(&ctx)?; + + // Check if the sheet exists + let mut sheet = match vault.sheet(&sheet_name).await { + Ok(sheet) => sheet, + Err(e) => { + if e.kind() == ErrorKind::NotFound { + write_and_return!(instance, DropSheetActionResult::SheetNotExists); + } else { + write_and_return!( + instance, + DropSheetActionResult::SheetDropFailed(e.to_string()) + ); + } + } + }; + + // Get the sheet's holder + let Some(holder) = sheet.holder() else { + write_and_return!(instance, DropSheetActionResult::NoHolder); + }; + + // Verify that the sheet holder is either the current user or the host + // All sheets belong to the host + if holder != &member_id && !is_host_mode { + write_and_return!(instance, DropSheetActionResult::NotOwner); + } + + // Drop the sheet + sheet.forget_holder(); + match sheet.persist().await { + Ok(_) => { + write_and_return!(instance, DropSheetActionResult::Success); + } + Err(e) => { + write_and_return!( + instance, + DropSheetActionResult::SheetDropFailed(e.to_string()) + ); + } + } + } + + if ctx.is_proc_on_local() { + let result = instance + .lock() + .await + .read::() + .await?; + if matches!(result, DropSheetActionResult::Success) { + sign_vault_modified(true).await; + } + return Ok(result); + } + + Err(TcpTargetError::NoResult("No result.".to_string())) +} + +pub type OperationArgument = (EditMappingOperations, Option); + +#[derive(Serialize, Deserialize, PartialEq, Eq, Clone)] +pub enum EditMappingOperations { + Move, + Erase, +} + +#[derive(Serialize, Deserialize, Default)] +pub enum EditMappingActionResult { + Success, + + // Fail + AuthorizeFailed(String), + EditNotAllowed, + MappingNotFound(FromRelativePathBuf), + InvalidMove(InvalidMoveReason), + + #[default] + Unknown, +} + +#[derive(Serialize, Deserialize)] +pub enum InvalidMoveReason { + MoveOperationButNoTarget(FromRelativePathBuf), + ContainsDuplicateMapping(ToRelativePathBuf), +} + +#[derive(Serialize, Deserialize, Clone)] +pub struct EditMappingActionArguments { + pub operations: HashMap, +} + +/// This Action only modifies Sheet Mapping and +/// does not interfere with the actual location of local files or Local Mapping +#[action_gen] +pub async fn edit_mapping_action( + ctx: ActionContext, + args: EditMappingActionArguments, +) -> Result { + let instance = check_connection_instance(&ctx)?; + + // Auth Member + let (member_id, is_host_mode) = match auth_member(&ctx, instance).await { + Ok(id) => id, + Err(e) => { + return Ok(EditMappingActionResult::AuthorizeFailed(e.to_string())); + } + }; + + // Check sheet + let (sheet_name, is_ref_sheet) = + get_current_sheet_name(&ctx, instance, &member_id, true).await?; + + // Can modify Sheet when not in reference sheet or in Host mode + let can_modify_sheet = !is_ref_sheet || is_host_mode; + + if !can_modify_sheet { + return Ok(EditMappingActionResult::EditNotAllowed); + } + + if ctx.is_proc_on_remote() { + let vault = try_get_vault(&ctx)?; + let mut sheet = vault.sheet(&sheet_name).await?; + + // Precheck + for (from_path, (operation, to_path)) in args.operations.iter() { + // Check mapping exists + if !sheet.mapping().contains_key(from_path) { + write_and_return!( + instance, + EditMappingActionResult::MappingNotFound(from_path.clone()) + ); + } + + // Move check + if operation == &EditMappingOperations::Move { + // Check if target exists + if let Some(to_path) = to_path { + // Check if target is duplicate + if sheet.mapping().contains_key(to_path) { + write_and_return!( + instance, + EditMappingActionResult::InvalidMove( + InvalidMoveReason::ContainsDuplicateMapping(to_path.clone()) + ) + ); + } + } else { + write_and_return!( + instance, + EditMappingActionResult::InvalidMove( + InvalidMoveReason::MoveOperationButNoTarget(from_path.clone()) + ) + ); + } + } + } + + // Process + for (from_path, (operation, to_path)) in args.operations { + match operation { + // During the Precheck phase, it has been ensured that: + // 1. The mapping to be edited for the From path indeed exists + // 2. The location of the To path is indeed empty + // 3. In Move mode, To path can be safely unwrapped + // Therefore, the following unwrap() calls are safe to execute + EditMappingOperations::Move => { + let mapping = sheet.mapping_mut().remove(&from_path).unwrap(); + let to_path = to_path.unwrap(); + sheet + .add_mapping(to_path, mapping.id, mapping.version) + .await?; + } + EditMappingOperations::Erase => { + sheet.mapping_mut().remove(&from_path).unwrap(); + } + } + } + + // Write + sheet.persist().await?; + + write_and_return!(instance, EditMappingActionResult::Success); + } + + if ctx.is_proc_on_local() { + let result = instance + .lock() + .await + .read::() + .await?; + if matches!(result, EditMappingActionResult::Success) { + sign_vault_modified(true).await; + } + return Ok(result); + } + + Ok(EditMappingActionResult::Success) +} + +#[derive(Serialize, Deserialize, PartialEq, Eq, Clone)] +pub struct ShareMappingArguments { + pub mappings: Vec, + pub description: String, + // None = current sheet, + // Some(sheet_name) = other ref(public) sheet + pub from_sheet: Option, + pub to_sheet: SheetName, +} + +#[derive(Serialize, Deserialize, Default)] +pub enum ShareMappingActionResult { + Success, + + // Fail + AuthorizeFailed(String), + TargetSheetNotFound(SheetName), + TargetIsSelf, + MappingNotFound(FromRelativePathBuf), + + #[default] + Unknown, +} + +#[action_gen] +pub async fn share_mapping_action( + ctx: ActionContext, + args: ShareMappingArguments, +) -> Result { + let instance = check_connection_instance(&ctx)?; + + // Auth Member + let (member_id, _is_host_mode) = match auth_member(&ctx, instance).await { + Ok(id) => id, + Err(e) => { + return Ok(ShareMappingActionResult::AuthorizeFailed(e.to_string())); + } + }; + + // Check sheet + let sheet_name = args.from_sheet.unwrap_or( + get_current_sheet_name(&ctx, instance, &member_id, true) + .await? + .0, + ); + + if ctx.is_proc_on_remote() { + let vault = try_get_vault(&ctx)?; + let sheet = vault.sheet(&sheet_name).await?; + + // Tip: Because sheet_name may specify a sheet that does not belong to the user, + // a secondary verification is required. + + // Check if the sheet holder is Some and matches the member_id or is the host + let Some(holder) = sheet.holder() else { + // If there's no holder, the sheet cannot be shared from + write_and_return!( + instance, + ShareMappingActionResult::AuthorizeFailed("Sheet has no holder".to_string()) + ); + }; + + // Verify the holder is either the current member or the host + if holder != &member_id && holder != VAULT_HOST_NAME { + write_and_return!( + instance, + ShareMappingActionResult::AuthorizeFailed( + "Not sheet holder or ref sheet".to_string() + ) + ); + } + + let to_sheet_name = args.to_sheet; + + // Verify target sheet exists + if !vault.sheet_names()?.contains(&to_sheet_name) { + // Does not exist + write_and_return!( + instance, + ShareMappingActionResult::TargetSheetNotFound(to_sheet_name.clone()) + ); + } + + // Verify sheet is not self + if sheet_name == to_sheet_name { + // Is self + write_and_return!(instance, ShareMappingActionResult::TargetIsSelf); + } + + // Verify all mappings are correct + for mapping in args.mappings.iter() { + if !sheet.mapping().contains_key(mapping) { + // If any mapping is invalid, indicate failure + write_and_return!( + instance, + ShareMappingActionResult::MappingNotFound(mapping.clone()) + ); + } + } + + // Execute sharing logic + sheet + .share_mappings(&to_sheet_name, args.mappings, &member_id, args.description) + .await?; + + // Sharing successful + write_and_return!(instance, ShareMappingActionResult::Success); + } + + if ctx.is_proc_on_local() { + let result = instance + .lock() + .await + .read::() + .await?; + return Ok(result); + } + + Ok(ShareMappingActionResult::Success) +} + +#[derive(Serialize, Deserialize, PartialEq, Eq, Clone)] +pub struct MergeShareMappingArguments { + pub share_id: SheetShareId, + pub share_merge_mode: ShareMergeMode, +} + +#[derive(Serialize, Deserialize, Default)] +pub enum MergeShareMappingActionResult { + Success, + + // Fail + HasConflicts, + AuthorizeFailed(String), + EditNotAllowed, + ShareIdNotFound(SheetShareId), + MergeFails(String), + + #[default] + Unknown, +} + +#[action_gen] +pub async fn merge_share_mapping_action( + ctx: ActionContext, + args: MergeShareMappingArguments, +) -> Result { + let instance = check_connection_instance(&ctx)?; + + // Auth Member + let (member_id, is_host_mode) = match auth_member(&ctx, instance).await { + Ok(id) => id, + Err(e) => { + return Ok(MergeShareMappingActionResult::AuthorizeFailed( + e.to_string(), + )); + } + }; + + // Check sheet + let (sheet_name, is_ref_sheet) = + get_current_sheet_name(&ctx, instance, &member_id, true).await?; + + // Can modify Sheet when not in reference sheet or in Host mode + let can_modify_sheet = !is_ref_sheet || is_host_mode; + + if !can_modify_sheet { + return Ok(MergeShareMappingActionResult::EditNotAllowed); + } + + if ctx.is_proc_on_remote() { + let vault = try_get_vault(&ctx)?; + let share_id = args.share_id; + + // Get the share and sheet + let (sheet, share) = if vault.share_file_path(&sheet_name, &share_id).exists() { + let sheet = vault.sheet(&sheet_name).await?; + let share = sheet.get_share(&share_id).await?; + (sheet, share) + } else { + // Share does not exist + write_and_return!( + instance, + MergeShareMappingActionResult::ShareIdNotFound(share_id.clone()) + ); + }; + + // Perform the merge + match sheet.merge_share(share, args.share_merge_mode).await { + Ok(_) => write_and_return!(instance, MergeShareMappingActionResult::Success), + Err(e) => match e.kind() { + ErrorKind::AlreadyExists => { + write_and_return!(instance, MergeShareMappingActionResult::HasConflicts); + } + _ => { + write_and_return!( + instance, + MergeShareMappingActionResult::MergeFails(e.to_string()) + ); + } + }, + } + } + + if ctx.is_proc_on_local() { + let result = instance + .lock() + .await + .read::() + .await?; + match result { + MergeShareMappingActionResult::Success => { + sign_vault_modified(true).await; + } + _ => {} + } + return Ok(result); + } + + Ok(MergeShareMappingActionResult::Success) +} diff --git a/actions/src/actions/track_action.rs b/actions/src/actions/track_action.rs new file mode 100644 index 0000000..e5f96b3 --- /dev/null +++ b/actions/src/actions/track_action.rs @@ -0,0 +1,987 @@ +use std::{ + collections::{HashMap, HashSet}, + path::PathBuf, + sync::Arc, + time::SystemTime, +}; + +use action_system::{action::ActionContext, macros::action_gen}; +use cfg_file::config::ConfigFile; +use serde::{Deserialize, Serialize}; +use sha1_hash::calc_sha1; +use tcp_connection::{error::TcpTargetError, instance::ConnectionInstance}; +use tokio::{fs, sync::Mutex}; +use vcs_data::{ + constants::CLIENT_FILE_TEMP_FILE, + data::{ + local::{ + cached_sheet::CachedSheet, latest_file_data::LatestFileData, + local_sheet::LocalMappingMetadata, vault_modified::sign_vault_modified, + workspace_analyzer::AnalyzeResult, + }, + member::MemberId, + sheet::SheetName, + vault::{ + config::VaultUuid, + virtual_file::{VirtualFileId, VirtualFileVersion, VirtualFileVersionDescription}, + }, + }, +}; + +use crate::{ + actions::{ + auth_member, check_connection_instance, get_current_sheet_name, try_get_local_output, + try_get_local_workspace, try_get_vault, + }, + local_println, +}; + +pub type NextVersion = String; +pub type UpdateDescription = String; + +const TEMP_NAME: &str = "{temp_name}"; + +#[derive(Serialize, Deserialize)] +pub struct TrackFileActionArguments { + // Path need to track + pub relative_pathes: HashSet, + + // File update info + pub file_update_info: HashMap, + + // Print infos + pub print_infos: bool, + + // overwrite modified files + pub allow_overwrite_modified: bool, +} + +#[derive(Serialize, Deserialize)] +pub enum TrackFileActionResult { + Done { + created: Vec, + updated: Vec, + synced: Vec, + skipped: Vec, + }, + + // Fail + AuthorizeFailed(String), + + /// There are local move or missing items that have not been resolved, + /// this situation does not allow track + StructureChangesNotSolved, + + CreateTaskFailed(CreateTaskResult), + UpdateTaskFailed(UpdateTaskResult), + SyncTaskFailed(SyncTaskResult), +} + +#[derive(Serialize, Deserialize)] +pub enum CreateTaskResult { + Success(Vec), // Success(success_relative_pathes) + + /// Create file on existing path in the sheet + CreateFileOnExistPath(PathBuf), + + /// Sheet not found + SheetNotFound(SheetName), +} + +#[derive(Serialize, Deserialize)] +pub enum UpdateTaskResult { + Success(Vec), // Success(success_relative_pathes) + + VerifyFailed { + path: PathBuf, + reason: VerifyFailReason, + }, +} + +#[derive(Serialize, Deserialize, Clone)] +pub enum VerifyFailReason { + SheetNotFound(SheetName), + MappingNotFound, + VirtualFileNotFound(VirtualFileId), + VirtualFileReadFailed(VirtualFileId), + NotHeld, + VersionDismatch(VirtualFileVersion, VirtualFileVersion), // (CurrentVersion, RemoteVersion) + UpdateButNoDescription, // File needs update, but no description exists + VersionAlreadyExist(VirtualFileVersion), // (RemoteVersion) +} + +#[derive(Serialize, Deserialize)] +pub enum SyncTaskResult { + Success(Vec), // Success(success_relative_pathes) +} +#[action_gen] +pub async fn track_file_action( + ctx: ActionContext, + arguments: TrackFileActionArguments, +) -> Result { + let relative_pathes = arguments.relative_pathes; + let instance = check_connection_instance(&ctx)?; + + // Auth Member + let (member_id, is_host_mode) = match auth_member(&ctx, instance).await { + Ok(id) => id, + Err(e) => return Ok(TrackFileActionResult::AuthorizeFailed(e.to_string())), + }; + + // Check sheet + let (sheet_name, is_ref_sheet) = + get_current_sheet_name(&ctx, instance, &member_id, true).await?; + + // Can modify Sheet when not in reference sheet or in Host mode + let can_modify_sheet = !is_ref_sheet || is_host_mode; + + if ctx.is_proc_on_local() { + let workspace = try_get_local_workspace(&ctx)?; + let analyzed = AnalyzeResult::analyze_local_status(&workspace).await?; + let latest_file_data = + LatestFileData::read_from(LatestFileData::data_path(&member_id)?).await?; + + if !analyzed.lost.is_empty() || !analyzed.moved.is_empty() { + return Ok(TrackFileActionResult::StructureChangesNotSolved); + } + + let Some(sheet_in_use) = workspace.config().lock().await.sheet_in_use().clone() else { + return Err(TcpTargetError::NotFound("Sheet not found!".to_string())); + }; + + // Read local sheet and member held + let local_sheet = workspace.local_sheet(&member_id, &sheet_in_use).await?; + let cached_sheet = CachedSheet::cached_sheet_data(&sheet_in_use).await?; + let member_held = LatestFileData::read_from(LatestFileData::data_path(&member_id)?).await?; + + let modified = analyzed + .modified + .intersection(&relative_pathes) + .cloned() + .collect::>(); + + // Filter out created files + let created_task = analyzed + .created + .intersection(&relative_pathes) + .cloned() + .collect::>(); + + // Filter out modified files that need to be updated + let mut update_task: Vec = { + let result = modified.iter().filter_map(|p| { + if let Ok(local_data) = local_sheet.mapping_data(p) { + let id = local_data.mapping_vfid(); + let local_ver = local_data.version_when_updated(); + let Some(latest_ver) = latest_file_data.file_version(id) else { + return None; + }; + if let Some(held_member) = member_held.file_holder(id) { + // Check if holder and version match + if held_member == &member_id && local_ver == latest_ver { + return Some(p.clone()); + } + } + }; + None + }); + result.collect() + }; + + let mut skipped_task: Vec = Vec::new(); + + // Filter out files that do not exist locally or have version inconsistencies and need to be synchronized + let mut sync_task: Vec = { + let other: Vec = relative_pathes + .iter() + .filter(|p| !created_task.contains(p) && !update_task.contains(p)) + .cloned() + .collect(); + + let result = other.iter().filter_map(|p| { + // Not exists and not lost, first download + if !workspace.local_path().join(p).exists() && !analyzed.lost.contains(p) { + return Some(p.clone()); + } + + // In cached sheet + if !cached_sheet.mapping().contains_key(p) { + return None; + } + + // In local sheet + let local_sheet_mapping = local_sheet.mapping_data(p).ok()?; + let vfid = local_sheet_mapping.mapping_vfid(); + + if let Some(latest_version) = &latest_file_data.file_version(vfid) { + // Version does not match + if &local_sheet_mapping.version_when_updated() != latest_version { + let modified = modified.contains(p); + if modified && arguments.allow_overwrite_modified { + return Some(p.clone()); + } else if modified && !arguments.allow_overwrite_modified { + // If not allowed to overwrite, join skipped tasks + skipped_task.push(p.clone()); + return None; + } + return Some(p.clone()); + } + } + + // File not held and modified + let holder = latest_file_data.file_holder(vfid); + if (holder.is_none() || &member_id != holder.unwrap()) && modified.contains(p) { + // If allow overwrite modified is true, overwrite the file + if arguments.allow_overwrite_modified { + return Some(p.clone()); + } else { + // If not allowed to overwrite, join skipped tasks + skipped_task.push(p.clone()); + return None; + } + } + + None + }); + result.collect() + }; + + // If the sheet cannot be modified, + // the update_task here should be considered invalid and changed to sync rollback + if !can_modify_sheet { + if arguments.allow_overwrite_modified { + sync_task.append(&mut update_task); + update_task.clear(); + } else { + skipped_task.append(&mut update_task); + update_task.clear(); + } + } + + // Package tasks + let tasks: (Vec, Vec, Vec) = + (created_task, update_task, sync_task); + + // Send to remote + { + let mut mut_instance = instance.lock().await; + mut_instance + .write_large_msgpack(tasks.clone(), 1024u16) + .await?; + // Drop mutex here + } + + // Process create tasks + let mut success_create = Vec::::new(); + if can_modify_sheet { + success_create = match proc_create_tasks_local( + &ctx, + instance.clone(), + &member_id, + &sheet_name, + tasks.0, + arguments.print_infos, + ) + .await + { + Ok(r) => match r { + CreateTaskResult::Success(relative_pathes) => relative_pathes, + _ => { + return Ok(TrackFileActionResult::CreateTaskFailed(r)); + } + }, + Err(e) => return Err(e), + }; + } + + // Process update tasks + let mut success_update = Vec::::new(); + if can_modify_sheet { + success_update = match proc_update_tasks_local( + &ctx, + instance.clone(), + &member_id, + &sheet_name, + tasks.1, + arguments.print_infos, + arguments.file_update_info, + ) + .await + { + Ok(r) => match r { + UpdateTaskResult::Success(relative_pathes) => relative_pathes, + _ => { + return Ok(TrackFileActionResult::UpdateTaskFailed(r)); + } + }, + Err(e) => return Err(e), + }; + } + + // Process sync tasks + let success_sync = match proc_sync_tasks_local( + &ctx, + instance.clone(), + &member_id, + &sheet_name, + tasks.2, + arguments.print_infos, + ) + .await + { + Ok(r) => match r { + SyncTaskResult::Success(relative_pathes) => relative_pathes, + }, + Err(e) => return Err(e), + }; + + if success_create.len() + success_update.len() > 0 { + sign_vault_modified(true).await; + } + + return Ok(TrackFileActionResult::Done { + created: success_create, + updated: success_update, + synced: success_sync, + skipped: skipped_task, + }); + } + + if ctx.is_proc_on_remote() { + // Read tasks + let (created_task, update_task, sync_task): (Vec, Vec, Vec) = { + let mut mut_instance = instance.lock().await; + mut_instance.read_large_msgpack(1024u16).await? + }; + + // Process create tasks + let mut success_create = Vec::::new(); + if can_modify_sheet { + success_create = match proc_create_tasks_remote( + &ctx, + instance.clone(), + &member_id, + &sheet_name, + created_task, + ) + .await + { + Ok(r) => match r { + CreateTaskResult::Success(relative_pathes) => relative_pathes, + _ => { + return Ok(TrackFileActionResult::CreateTaskFailed(r)); + } + }, + Err(e) => return Err(e), + }; + } + + // Process update tasks + let mut success_update = Vec::::new(); + if can_modify_sheet { + success_update = match proc_update_tasks_remote( + &ctx, + instance.clone(), + &member_id, + &sheet_name, + update_task, + arguments.file_update_info, + ) + .await + { + Ok(r) => match r { + UpdateTaskResult::Success(relative_pathes) => relative_pathes, + _ => { + return Ok(TrackFileActionResult::UpdateTaskFailed(r)); + } + }, + Err(e) => return Err(e), + }; + } + + // Process sync tasks + let success_sync = match proc_sync_tasks_remote( + &ctx, + instance.clone(), + &member_id, + &sheet_name, + sync_task, + ) + .await + { + Ok(r) => match r { + SyncTaskResult::Success(relative_pathes) => relative_pathes, + }, + Err(e) => return Err(e), + }; + + return Ok(TrackFileActionResult::Done { + created: success_create, + updated: success_update, + synced: success_sync, + skipped: Vec::new(), // The server doesn't know which files were skipped + }); + } + + Err(TcpTargetError::NoResult("No result.".to_string())) +} + +async fn proc_create_tasks_local( + ctx: &ActionContext, + instance: Arc>, + member_id: &MemberId, + sheet_name: &SheetName, + relative_paths: Vec, + print_infos: bool, +) -> Result { + let workspace = try_get_local_workspace(ctx)?; + let local_output = try_get_local_output(ctx)?; + let mut mut_instance = instance.lock().await; + let mut local_sheet = workspace.local_sheet(member_id, sheet_name).await?; + + if print_infos && relative_paths.len() > 0 { + local_println!(local_output, "Creating {} files...", relative_paths.len()); + } + + // Wait for remote detection of whether the sheet exists + let has_sheet = mut_instance.read_msgpack::().await?; + if !has_sheet { + return Ok(CreateTaskResult::SheetNotFound(sheet_name.clone())); + } + + // Wait for remote detection of whether the file exists + let (hasnt_duplicate, duplicate_path) = mut_instance.read_msgpack::<(bool, PathBuf)>().await?; + if !hasnt_duplicate { + return Ok(CreateTaskResult::CreateFileOnExistPath(duplicate_path)); + } + + let mut success_relative_pathes = Vec::new(); + + // Start sending files + for path in relative_paths { + let full_path = workspace.local_path().join(&path); + + // Send file + if mut_instance.write_file(&full_path).await.is_err() { + continue; + } + + // Read virtual file id and version + let (vfid, version, version_desc) = mut_instance + .read_msgpack::<( + VirtualFileId, + VirtualFileVersion, + VirtualFileVersionDescription, + )>() + .await?; + + // Add mapping to local sheet + let hash = sha1_hash::calc_sha1(&full_path, 2048).await.unwrap().hash; + let time = std::fs::metadata(&full_path)?.modified()?; + local_sheet.add_mapping( + &path.clone(), + LocalMappingMetadata::new( + hash, // hash_when_updated + time, // time_when_updated + std::fs::metadata(&full_path)?.len(), // size_when_updated + version_desc, // version_desc_when_updated + version, // version_when_updated + vfid, // mapping_vfid + time, // last_modifiy_check_itme + false, // last_modifiy_check_result + ), + )?; + + // Print success info + if print_infos { + local_println!(local_output, "+ {}", path.display()); + } + + success_relative_pathes.push(path); + } + + // Write local sheet + local_sheet.write().await?; + + Ok(CreateTaskResult::Success(success_relative_pathes)) +} + +async fn proc_create_tasks_remote( + ctx: &ActionContext, + instance: Arc>, + member_id: &MemberId, + sheet_name: &SheetName, + relative_paths: Vec, +) -> Result { + let vault = try_get_vault(ctx)?; + let mut mut_instance = instance.lock().await; + + // Sheet check + let Ok(mut sheet) = vault.sheet(sheet_name).await else { + // Sheet not found + mut_instance.write_msgpack(false).await?; + return Ok(CreateTaskResult::SheetNotFound(sheet_name.to_string())); + }; + mut_instance.write_msgpack(true).await?; + + // Duplicate create precheck + for path in relative_paths.iter() { + if sheet.mapping().contains_key(path) { + // Duplicate file + mut_instance.write_msgpack((false, path)).await?; + return Ok(CreateTaskResult::CreateFileOnExistPath(path.clone())); + } + } + mut_instance.write_msgpack((true, PathBuf::new())).await?; + + let mut success_relative_pathes = Vec::new(); + + // Start receiving files + for path in relative_paths { + // Read file and create virtual file + let Ok(vfid) = vault + .create_virtual_file_from_connection(&mut mut_instance, member_id) + .await + else { + continue; + }; + + // Record virtual file to sheet + let vf_meta = vault.virtual_file(&vfid)?.read_meta().await?; + sheet + .add_mapping(path.clone(), vfid.clone(), vf_meta.version_latest()) + .await?; + + // Tell client the virtual file id and version + mut_instance + .write_msgpack(( + vfid, + vf_meta.version_latest(), + vf_meta + .version_description(vf_meta.version_latest()) + .unwrap(), + )) + .await?; + + success_relative_pathes.push(path); + } + + sheet.persist().await?; + + Ok(CreateTaskResult::Success(success_relative_pathes)) +} + +async fn proc_update_tasks_local( + ctx: &ActionContext, + instance: Arc>, + member_id: &MemberId, + sheet_name: &SheetName, + relative_paths: Vec, + print_infos: bool, + file_update_info: HashMap, +) -> Result { + let workspace = try_get_local_workspace(ctx)?; + let local_output = try_get_local_output(ctx)?; + let mut mut_instance = instance.lock().await; + let mut local_sheet = workspace.local_sheet(member_id, sheet_name).await?; + + let mut success = Vec::new(); + + if print_infos && relative_paths.len() > 0 { + local_println!(local_output, "Updating {} files...", relative_paths.len()); + } + + for path in relative_paths.iter() { + let Ok(mapping) = local_sheet.mapping_data(path) else { + // Is mapping not found, write empty + mut_instance.write_msgpack("".to_string()).await?; + continue; + }; + // Read and send file version + let Ok(_) = mut_instance + .write_msgpack(mapping.version_when_updated()) + .await + else { + continue; + }; + + // Read verify result + let verify_result: bool = mut_instance.read_msgpack().await?; + if !verify_result { + let reason = mut_instance.read_msgpack::().await?; + return Ok(UpdateTaskResult::VerifyFailed { + path: path.clone(), + reason: reason.clone(), + }); + } + + // Calc hash + let hash_result = match sha1_hash::calc_sha1(workspace.local_path().join(path), 2048).await + { + Ok(r) => r, + Err(_) => { + mut_instance.write_msgpack(false).await?; // Not Ready + continue; + } + }; + + // Get next version + let Some((next_version, description)) = file_update_info.get(path) else { + mut_instance.write_msgpack(false).await?; // Not Ready + continue; + }; + + // Write + mut_instance.write_msgpack(true).await?; // Ready + mut_instance.write_file(path).await?; + + // Read upload result + let upload_result: bool = mut_instance.read_msgpack().await?; + if upload_result { + // Success + let mapping_data_mut = local_sheet.mapping_data_mut(path).unwrap(); + let version = mapping_data_mut.version_when_updated().clone(); + mapping_data_mut.set_hash_when_updated(hash_result.hash); + mapping_data_mut.set_version_when_updated(next_version.clone()); + mapping_data_mut.set_version_desc_when_updated(VirtualFileVersionDescription { + creator: member_id.clone(), + description: description.clone(), + }); + mapping_data_mut.set_last_modifiy_check_result(false); // Mark file not modified + + // Write + local_sheet.write().await?; + + // Push path into success vec + success.push(path.clone()); + + // Print success info + if print_infos { + local_println!( + local_output, + "↑ {} ({} -> {})", + path.display(), + version, + next_version + ); + } + } + } + + Ok(UpdateTaskResult::Success(success)) +} + +async fn proc_update_tasks_remote( + ctx: &ActionContext, + instance: Arc>, + member_id: &MemberId, + sheet_name: &SheetName, + relative_paths: Vec, + file_update_info: HashMap, +) -> Result { + let vault = try_get_vault(ctx)?; + let mut mut_instance = instance.lock().await; + + let mut success = Vec::new(); + + for path in relative_paths.iter() { + // Read version + let Ok(version) = mut_instance.read_msgpack::().await else { + continue; + }; + if version.is_empty() { + continue; + } + + // Verify + let Some((next_version, description)) = file_update_info.get(path) else { + mut_instance.write_msgpack(false).await?; + let reason = VerifyFailReason::UpdateButNoDescription; + mut_instance.write_msgpack(reason.clone()).await?; + return Ok(UpdateTaskResult::VerifyFailed { + path: path.clone(), + reason, + }); // Sheet not found + }; + let Ok(mut sheet) = vault.sheet(sheet_name).await else { + mut_instance.write_msgpack(false).await?; + let reason = VerifyFailReason::SheetNotFound(sheet_name.clone()); + mut_instance.write_msgpack(reason.clone()).await?; + return Ok(UpdateTaskResult::VerifyFailed { + path: path.clone(), + reason, + }); // Sheet not found + }; + let Some(mapping_data) = sheet.mapping_mut().get_mut(path) else { + mut_instance.write_msgpack(false).await?; + let reason = VerifyFailReason::MappingNotFound; + mut_instance.write_msgpack(reason.clone()).await?; + return Ok(UpdateTaskResult::VerifyFailed { + path: path.clone(), + reason, + }); // Mapping not found + }; + let Ok(vf) = vault.virtual_file(&mapping_data.id) else { + mut_instance.write_msgpack(false).await?; + let reason = VerifyFailReason::VirtualFileNotFound(mapping_data.id.clone()); + mut_instance.write_msgpack(reason.clone()).await?; + return Ok(UpdateTaskResult::VerifyFailed { + path: path.clone(), + reason, + }); // Virtual file not found + }; + let Ok(vf_metadata) = vf.read_meta().await else { + mut_instance.write_msgpack(false).await?; + let reason = VerifyFailReason::VirtualFileReadFailed(mapping_data.id.clone()); + mut_instance.write_msgpack(reason.clone()).await?; + return Ok(UpdateTaskResult::VerifyFailed { + path: path.clone(), + reason, + }); // Read virtual file metadata failed + }; + if vf_metadata.versions().contains(next_version) { + mut_instance.write_msgpack(false).await?; + let reason = VerifyFailReason::VersionAlreadyExist(version); + mut_instance.write_msgpack(reason.clone()).await?; + return Ok(UpdateTaskResult::VerifyFailed { + path: path.clone(), + reason, + }); // VersionAlreadyExist + } + if vf_metadata.hold_member() != member_id { + mut_instance.write_msgpack(false).await?; + let reason = VerifyFailReason::NotHeld; + mut_instance.write_msgpack(reason.clone()).await?; + return Ok(UpdateTaskResult::VerifyFailed { + path: path.clone(), + reason, + }); // Member not held it + }; + if vf_metadata.version_latest() != version { + mut_instance.write_msgpack(false).await?; + let reason = + VerifyFailReason::VersionDismatch(version.clone(), vf_metadata.version_latest()); + mut_instance.write_msgpack(reason.clone()).await?; + return Ok(UpdateTaskResult::VerifyFailed { + path: path.clone(), + reason, + }); // Version does not match + }; + mut_instance.write_msgpack(true).await?; // Verified + + // Read if local ready + let ready: bool = mut_instance.read_msgpack().await?; + if !ready { + continue; + } + + // Read and update virtual file + match vault + .update_virtual_file_from_connection( + &mut mut_instance, + member_id, + &mapping_data.id, + next_version, + VirtualFileVersionDescription { + creator: member_id.clone(), + description: description.clone(), + }, + ) + .await + { + Ok(_) => { + // Update version to sheet + mapping_data.version = next_version.clone(); + + // Persist + sheet.persist().await?; + + success.push(path.clone()); + mut_instance.write_msgpack(true).await?; // Success + } + Err(e) => { + mut_instance.write_msgpack(false).await?; // Fail + return Err(e.into()); + } + } + } + + Ok(UpdateTaskResult::Success(success)) +} + +type SyncVersionInfo = Option<( + VirtualFileVersion, + VirtualFileVersionDescription, + VirtualFileId, +)>; + +async fn proc_sync_tasks_local( + ctx: &ActionContext, + instance: Arc>, + member_id: &MemberId, + sheet_name: &SheetName, + relative_paths: Vec, + print_infos: bool, +) -> Result { + let workspace = try_get_local_workspace(ctx)?; + let local_output = try_get_local_output(ctx)?; + let mut mut_instance = instance.lock().await; + let mut success: Vec = Vec::new(); + + if print_infos && relative_paths.len() > 0 { + local_println!(local_output, "Syncing {} files...", relative_paths.len()); + } + + for path in relative_paths { + let Some((version, description, vfid)) = + mut_instance.read_msgpack::().await? + else { + continue; + }; + + // Generate a temp path + let temp_path = workspace + .local_path() + .join(CLIENT_FILE_TEMP_FILE.replace(TEMP_NAME, &VaultUuid::new_v4().to_string())); + + let copy_to = workspace.local_path().join(&path); + + // Read file + match mut_instance.read_file(&temp_path).await { + Ok(_) => { + if !temp_path.exists() { + continue; + } + } + Err(_) => { + continue; + } + } + + // Calc hash + let new_hash = match calc_sha1(&temp_path, 2048).await { + Ok(hash) => hash, + Err(_) => { + continue; + } + }; + + // Calc size + let new_size = match fs::metadata(&temp_path).await.map(|meta| meta.len()) { + Ok(size) => size, + Err(_) => { + continue; + } + }; + + // Write file + if copy_to.exists() { + if let Err(_) = fs::remove_file(©_to).await { + continue; + } + } else { + // Not exist, create directory + if let Some(path) = copy_to.clone().parent() { + fs::create_dir_all(path).await?; + } + } + if let Err(_) = fs::rename(&temp_path, ©_to).await { + continue; + } + + // Modify local sheet + let mut local_sheet = match workspace.local_sheet(member_id, sheet_name).await { + Ok(sheet) => sheet, + Err(_) => { + continue; + } + }; + + // Get or create mapping + let mapping = match local_sheet.mapping_data_mut(&path) { + Ok(m) => m, + Err(_) => { + // First download + let mut data = LocalMappingMetadata::default(); + data.set_mapping_vfid(vfid); + if let Err(_) = local_sheet.add_mapping(&path, data) { + continue; + } + match local_sheet.mapping_data_mut(&path) { + Ok(m) => m, + Err(_) => { + continue; + } + } + } + }; + + let time = SystemTime::now(); + mapping.set_hash_when_updated(new_hash.hash); + mapping.set_last_modifiy_check_result(false); // Mark not modified + mapping.set_version_when_updated(version); + mapping.set_version_desc_when_updated(description); + mapping.set_size_when_updated(new_size); + mapping.set_time_when_updated(time); + mapping.set_last_modifiy_check_time(time); + if let Err(_) = local_sheet.write().await { + continue; + } + + success.push(path.clone()); + + // Print success info + if print_infos { + local_println!(local_output, "↓ {}", path.display()); + } + } + Ok(SyncTaskResult::Success(success)) +} + +async fn proc_sync_tasks_remote( + ctx: &ActionContext, + instance: Arc>, + _member_id: &MemberId, + sheet_name: &SheetName, + relative_paths: Vec, +) -> Result { + let vault = try_get_vault(ctx)?; + let sheet = vault.sheet(sheet_name).await?; + let mut mut_instance = instance.lock().await; + let mut success: Vec = Vec::new(); + + for path in relative_paths { + // Get mapping + let Some(mapping) = sheet.mapping().get(&path) else { + mut_instance.write_msgpack::(None).await?; // (ready) + continue; + }; + // Get virtual file + let Ok(vf) = vault.virtual_file(&mapping.id) else { + mut_instance.write_msgpack::(None).await?; // (ready) + continue; + }; + // Read metadata and get real path + let vf_meta = &vf.read_meta().await?; + let real_path = vault.virtual_file_real_path(&mapping.id, &vf_meta.version_latest()); + let version = vf_meta.version_latest(); + mut_instance + .write_msgpack::(Some(( + version.clone(), + vf_meta.version_description(version).cloned().unwrap_or( + VirtualFileVersionDescription { + creator: MemberId::default(), + description: "".to_string(), + }, + ), + vf.id(), + ))) + .await?; // (ready) + if mut_instance.write_file(real_path).await.is_err() { + continue; + } else { + success.push(path); + } + } + Ok(SyncTaskResult::Success(success)) +} diff --git a/actions/src/actions/user_actions.rs b/actions/src/actions/user_actions.rs new file mode 100644 index 0000000..dc0f71a --- /dev/null +++ b/actions/src/actions/user_actions.rs @@ -0,0 +1,144 @@ +use std::path::PathBuf; + +use action_system::{action::ActionContext, macros::action_gen}; +use serde::{Deserialize, Serialize}; +use tcp_connection::error::TcpTargetError; +use vcs_data::data::local::vault_modified::sign_vault_modified; + +use crate::actions::{ + auth_member, check_connection_instance, get_current_sheet_name, try_get_vault, +}; + +#[derive(Serialize, Deserialize)] +pub enum ChangeVirtualFileEditRightResult { + // Success + Success { + success_hold: Vec, + success_throw: Vec, + }, + + // Fail + AuthorizeFailed(String), + DoNothing, +} + +#[derive(Serialize, Deserialize, PartialEq, Clone)] +pub enum EditRightChangeBehaviour { + Hold, + Throw, +} + +/// The server part only checks: +/// 1. Whether the file exists +/// 2. Whether the file has no holder +/// If both conditions are met, send success information to the local client +/// +/// All version checks are handled locally +#[action_gen] +pub async fn change_virtual_file_edit_right_action( + ctx: ActionContext, + arguments: (Vec<(PathBuf, EditRightChangeBehaviour)>, bool), +) -> Result { + let instance = check_connection_instance(&ctx)?; + let (relative_paths, print_info) = arguments; + + // Auth Member + let (member_id, is_host_mode) = match auth_member(&ctx, instance).await { + Ok(id) => id, + Err(e) => { + return Ok(ChangeVirtualFileEditRightResult::AuthorizeFailed( + e.to_string(), + )); + } + }; + + // Check sheet + let (sheet_name, _is_ref_sheet) = + get_current_sheet_name(&ctx, instance, &member_id, true).await?; + + if ctx.is_proc_on_remote() { + let mut mut_instance = instance.lock().await; + let mut success_hold: Vec = Vec::new(); + let mut success_throw: Vec = Vec::new(); + let vault = try_get_vault(&ctx)?; + for (path, behaviour) in relative_paths { + let Ok(sheet) = vault.sheet(&sheet_name).await else { + continue; + }; + let Some(mapping) = sheet.mapping().get(&path) else { + continue; + }; + let Ok(has_edit_right) = vault + .has_virtual_file_edit_right(&member_id, &mapping.id) + .await + else { + continue; + }; + + // Hold file + if !has_edit_right && behaviour == EditRightChangeBehaviour::Hold { + match vault + .grant_virtual_file_edit_right(&member_id, &mapping.id) + .await + { + Ok(_) => { + success_hold.push(path.clone()); + } + Err(_) => continue, + } + } else + // Throw file + if (has_edit_right || is_host_mode) + && behaviour == EditRightChangeBehaviour::Throw + { + match vault.revoke_virtual_file_edit_right(&mapping.id).await { + Ok(_) => { + success_throw.push(path.clone()); + } + Err(_) => continue, + } + } + } + + // Write success list + mut_instance + .write_large_msgpack::<(Vec, Vec)>( + (success_hold.clone(), success_throw.clone()), + 4096u16, + ) + .await?; + return Ok(ChangeVirtualFileEditRightResult::Success { + success_hold, + success_throw, + }); + } + + if ctx.is_proc_on_local() { + let mut mut_instance = instance.lock().await; + let (success_hold, success_throw) = mut_instance + .read_large_msgpack::<(Vec, Vec)>(4096u16) + .await?; + + // If there are any successful items, mark as modified + if success_hold.len() + success_throw.len() > 0 { + sign_vault_modified(true).await; + } + + // Print info + if print_info { + success_hold + .iter() + .for_each(|s| println!("--> {}", s.display())); + success_throw + .iter() + .for_each(|s| println!("<-- {}", s.display())); + } + + return Ok(ChangeVirtualFileEditRightResult::Success { + success_hold, + success_throw, + }); + } + + Ok(ChangeVirtualFileEditRightResult::DoNothing) +} diff --git a/actions/src/actions/vault_actions.rs b/actions/src/actions/vault_actions.rs new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/actions/src/actions/vault_actions.rs @@ -0,0 +1 @@ + diff --git a/actions/src/connection.rs b/actions/src/connection.rs new file mode 100644 index 0000000..918f93c --- /dev/null +++ b/actions/src/connection.rs @@ -0,0 +1,3 @@ +pub mod action_service; +pub mod error; +pub mod protocol; diff --git a/actions/src/connection/action_service.rs b/actions/src/connection/action_service.rs new file mode 100644 index 0000000..f137126 --- /dev/null +++ b/actions/src/connection/action_service.rs @@ -0,0 +1,221 @@ +use std::{ + env::set_current_dir, + net::SocketAddr, + path::PathBuf, + sync::Arc, + time::{Duration, Instant}, +}; + +use action_system::{action::ActionContext, action_pool::ActionPool}; +use cfg_file::config::ConfigFile; +use log::{debug, error, info, warn}; +use tcp_connection::{error::TcpTargetError, instance::ConnectionInstance}; +use tokio::{ + net::{TcpListener, TcpStream}, + select, signal, spawn, + sync::mpsc, +}; +use vcs_data::data::vault::{Vault, config::VaultConfig}; + +use crate::{ + connection::protocol::RemoteActionInvoke, registry::server_registry::server_action_pool, +}; + +// Start the server with a Vault using the specified directory +pub async fn server_entry( + vault_path: impl Into, + port_override: u16, +) -> Result<(), TcpTargetError> { + let vault_path = vault_path.into(); + + // Set to vault path + set_current_dir(&vault_path).map_err(|e| TcpTargetError::Io(e.to_string()))?; + + // Read the vault cfg + let vault_cfg = VaultConfig::read().await?; + + // Create TCPListener + let listener = create_tcp_listener(&vault_cfg, port_override).await?; + + // Initialize the vault + let vault: Arc = init_vault(vault_cfg, vault_path).await?; + + // Lock the vault + vault + .lock() + .map_err(|e| TcpTargetError::Locked(e.to_string()))?; + + // Create ActionPool + let action_pool: Arc = Arc::new(server_action_pool()); + + // Start the server + let (_shutdown_rx, future) = build_server_future(vault.clone(), action_pool.clone(), listener); + future.await?; // Start and block until shutdown + + // Unlock the vault + vault.unlock()?; + + Ok(()) +} + +async fn create_tcp_listener( + cfg: &VaultConfig, + port_override: u16, +) -> Result { + let local_bind_addr = cfg.server_config().local_bind(); + let port = if port_override > 0 { + port_override // Override -> PORT > 0 + } else { + cfg.server_config().port() // Default -> Port = 0 + }; + let bind_port = port; + let sock_addr = SocketAddr::new(*local_bind_addr, bind_port); + let listener = TcpListener::bind(sock_addr).await?; + + Ok(listener) +} + +async fn init_vault(cfg: VaultConfig, path: PathBuf) -> Result, TcpTargetError> { + // Init and create the vault + let Some(vault) = Vault::init(cfg, path) else { + return Err(TcpTargetError::NotFound("Vault not found".to_string())); + }; + let vault: Arc = Arc::new(vault); + + Ok(vault) +} + +fn build_server_future( + vault: Arc, + action_pool: Arc, + listener: TcpListener, +) -> ( + mpsc::Sender<()>, + impl std::future::Future>, +) { + let (tx, mut rx) = mpsc::channel::(100); + let (shutdown_tx, mut shutdown_rx) = mpsc::channel::<()>(1); + let mut active_connections = 0; + let mut shutdown_requested = false; + + // Spawn task to handle Ctrl+C with rapid exit detection + let shutdown_tx_clone = shutdown_tx.clone(); + spawn(async move { + let mut ctrl_c_count = 0; + let mut last_ctrl_c_time = Instant::now(); + + while let Ok(()) = signal::ctrl_c().await { + let now = Instant::now(); + + // Reset counter if more than 5 seconds have passed + if now.duration_since(last_ctrl_c_time) > Duration::from_secs(5) { + ctrl_c_count = 0; + } + + ctrl_c_count += 1; + last_ctrl_c_time = now; + + let _ = shutdown_tx_clone.send(()).await; + + // If 3 Ctrl+C within 5 seconds, exit immediately + if ctrl_c_count >= 3 { + info!("Shutdown. (3/3)"); + std::process::exit(0); + } else { + info!("Ctrl + C to force shutdown. ({} / 3)", ctrl_c_count); + } + } + }); + + let future = async move { + loop { + select! { + // Accept new connections + accept_result = listener.accept(), if !shutdown_requested => { + match accept_result { + Ok((stream, _addr)) => { + debug!("New connection. (now {})", active_connections); + let _ = tx.send(1).await; + + let vault_clone = vault.clone(); + let action_pool_clone = action_pool.clone(); + let tx_clone = tx.clone(); + + spawn(async move { + process_connection(stream, vault_clone, action_pool_clone).await; + debug!("A connection closed. (now {})", active_connections); + let _ = tx_clone.send(-1).await; + }); + } + Err(_) => { + continue; + } + } + } + + // Handle connection count updates + Some(count_change) = rx.recv() => { + active_connections = (active_connections as i32 + count_change) as usize; + + // Check if we should shutdown after all connections are done + if shutdown_requested && active_connections == 0 { + break; + } + } + + // Handle shutdown signal + _ = shutdown_rx.recv() => { + shutdown_requested = true; + // If no active connections, break immediately + if active_connections == 0 { + info!("No active connections. Shutting down."); + break; + } else { + warn!("Cannot shutdown while active connections exist! ({} active)", active_connections); + } + } + } + } + + Ok(()) + }; + + (shutdown_tx, future) +} + +async fn process_connection(stream: TcpStream, vault: Arc, action_pool: Arc) { + // Setup connection instance + let mut instance = ConnectionInstance::from(stream); + + // Read action name and action arguments + let msg = match instance.read_msgpack::().await { + Ok(msg) => msg, + Err(e) => { + error!("Failed to read action message: {}", e); + return; + } + }; + + // Build context + let ctx: ActionContext = ActionContext::remote().insert_instance(instance); + + // Insert vault into context + let ctx = ctx.with_arc_data(vault); + + info!( + "Process action `{}` with argument `{}`", + msg.action_name, msg.action_args_json + ); + + // Process action + let result = action_pool + .process_json(&msg.action_name, ctx, msg.action_args_json) + .await; + + match result { + Ok(_result_json) => {} + Err(e) => { + warn!("Failed to process action `{}`: {}", msg.action_name, e); + } + } +} diff --git a/actions/src/connection/error.rs b/actions/src/connection/error.rs new file mode 100644 index 0000000..241c16e --- /dev/null +++ b/actions/src/connection/error.rs @@ -0,0 +1,14 @@ +use std::io; +use thiserror::Error; + +#[derive(Error, Debug, Clone)] +pub enum ConnectionError { + #[error("I/O error: {0}")] + Io(String), +} + +impl From for ConnectionError { + fn from(error: io::Error) -> Self { + ConnectionError::Io(error.to_string()) + } +} diff --git a/actions/src/connection/protocol.rs b/actions/src/connection/protocol.rs new file mode 100644 index 0000000..2cebe79 --- /dev/null +++ b/actions/src/connection/protocol.rs @@ -0,0 +1,7 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Default, Clone, Serialize, Deserialize)] +pub struct RemoteActionInvoke { + pub action_name: String, + pub action_args_json: String, +} diff --git a/actions/src/lib.rs b/actions/src/lib.rs new file mode 100644 index 0000000..2f7cbe4 --- /dev/null +++ b/actions/src/lib.rs @@ -0,0 +1,3 @@ +pub mod actions; +pub mod connection; +pub mod registry; diff --git a/actions/src/registry.rs b/actions/src/registry.rs new file mode 100644 index 0000000..ceec1a1 --- /dev/null +++ b/actions/src/registry.rs @@ -0,0 +1,2 @@ +pub mod client_registry; +pub mod server_registry; diff --git a/actions/src/registry/client_registry.rs b/actions/src/registry/client_registry.rs new file mode 100644 index 0000000..05cb7f1 --- /dev/null +++ b/actions/src/registry/client_registry.rs @@ -0,0 +1,123 @@ +use std::sync::Arc; + +use action_system::{action::ActionContext, action_pool::ActionPool}; +use cfg_file::config::ConfigFile; +use tcp_connection::error::TcpTargetError; +use vcs_data::data::{ + local::{LocalWorkspace, config::LocalConfig}, + user::UserDirectory, +}; + +use crate::{ + actions::{ + local_actions::{ + register_set_upstream_vault_action, register_update_to_latest_info_action, + }, + sheet_actions::{ + register_drop_sheet_action, register_edit_mapping_action, register_make_sheet_action, + register_merge_share_mapping_action, register_share_mapping_action, + }, + track_action::register_track_file_action, + user_actions::register_change_virtual_file_edit_right_action, + }, + connection::protocol::RemoteActionInvoke, +}; + +fn register_actions(pool: &mut ActionPool) { + // Pool register here + + // Local Actions + register_set_upstream_vault_action(pool); + register_update_to_latest_info_action(pool); + + // Sheet Actions + register_make_sheet_action(pool); + register_drop_sheet_action(pool); + register_edit_mapping_action(pool); + + // Share / Merge Share Actions + register_share_mapping_action(pool); + register_merge_share_mapping_action(pool); + + // Track Action + register_track_file_action(pool); + + // User Actions + register_change_virtual_file_edit_right_action(pool); +} + +pub fn client_action_pool() -> ActionPool { + // Create pool + let mut pool = ActionPool::new(); + + // Register actions + register_actions(&mut pool); + + // Add process events + pool.set_on_proc_begin(|ctx, args| Box::pin(on_proc_begin(ctx, args))); + + // Return + pool +} + +async fn on_proc_begin( + ctx: &mut ActionContext, + _args: &(dyn std::any::Any + Send + Sync), +) -> Result<(), TcpTargetError> { + // Is ctx remote + let is_remote = ctx.is_remote_action(); + + // Action name and arguments + let action_name = ctx.action_name().to_string(); + let action_args_json = ctx.action_args_json().clone(); + + // Insert LocalWorkspace Arc + let Ok(local_config) = LocalConfig::read().await else { + return Err(TcpTargetError::NotFound( + "The current directory does not have a local workspace".to_string(), + )); + }; + let local_workspace = match LocalWorkspace::init_current_dir(local_config) { + Some(workspace) => workspace, + None => { + return Err(TcpTargetError::NotFound( + "Failed to initialize local workspace.".to_string(), + )); + } + }; + let local_workspace_arc = Arc::new(local_workspace); + ctx.insert_arc_data(local_workspace_arc); + + // Insert UserDirectory Arc + let Some(user_directory) = UserDirectory::current_cfg_dir() else { + return Err(TcpTargetError::NotFound( + "The user directory does not exist.".to_string(), + )); + }; + + let user_directory_arc = Arc::new(user_directory); + ctx.insert_arc_data(user_directory_arc); + + // Get instance + let Some(instance) = ctx.instance() else { + return Err(TcpTargetError::Unsupported( + "Missing ConnectionInstance in current context, this ActionPool does not support this call" + .to_string())); + }; + + // If it's remote, invoke action at server + if is_remote { + // Build protocol message + let msg = RemoteActionInvoke { + action_name, + action_args_json, + }; + + // Send + let mut instance = instance.lock().await; + instance.write_msgpack(&msg).await?; + } + + // Return OK, wait for client to execute Action locally + Ok(()) +} diff --git a/actions/src/registry/server_registry.rs b/actions/src/registry/server_registry.rs new file mode 100644 index 0000000..356e640 --- /dev/null +++ b/actions/src/registry/server_registry.rs @@ -0,0 +1,36 @@ +use action_system::action_pool::ActionPool; + +use crate::actions::{ + local_actions::{register_set_upstream_vault_action, register_update_to_latest_info_action}, + sheet_actions::{ + register_drop_sheet_action, register_edit_mapping_action, register_make_sheet_action, + register_merge_share_mapping_action, register_share_mapping_action, + }, + track_action::register_track_file_action, + user_actions::register_change_virtual_file_edit_right_action, +}; + +pub fn server_action_pool() -> ActionPool { + let mut pool = ActionPool::new(); + + // Local Actions + register_set_upstream_vault_action(&mut pool); + register_update_to_latest_info_action(&mut pool); + + // Sheet Actions + register_make_sheet_action(&mut pool); + register_drop_sheet_action(&mut pool); + register_edit_mapping_action(&mut pool); + + // Share / Merge Share Actions + register_share_mapping_action(&mut pool); + register_merge_share_mapping_action(&mut pool); + + // Track Action + register_track_file_action(&mut pool); + + // User Actions + register_change_virtual_file_edit_right_action(&mut pool); + + pool +} diff --git a/crates/system_action/Cargo.toml b/crates/system_action/Cargo.toml deleted file mode 100644 index b4b98aa..0000000 --- a/crates/system_action/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -name = "action_system" -edition = "2024" -version.workspace = true - -[dependencies] -tcp_connection = { path = "../utils/tcp_connection" } -action_system_macros = { path = "action_macros" } - -# Serialization -serde = { version = "1.0.228", features = ["derive"] } -serde_json = "1.0.145" - -# Async & Networking -tokio = "1.48.0" diff --git a/crates/system_action/action_macros/Cargo.toml b/crates/system_action/action_macros/Cargo.toml deleted file mode 100644 index ee7d93b..0000000 --- a/crates/system_action/action_macros/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "action_system_macros" -edition = "2024" -version.workspace = true - -[lib] -proc-macro = true - -[dependencies] -tcp_connection = { path = "../../utils/tcp_connection" } -string_proc = { path = "../../utils/string_proc" } - -syn = { version = "2.0", features = ["full", "extra-traits"] } -quote = "1.0" -proc-macro2 = "1.0" - -# Serialization -serde = { version = "1.0.228", features = ["derive"] } -serde_json = "1.0.145" diff --git a/crates/system_action/action_macros/src/lib.rs b/crates/system_action/action_macros/src/lib.rs deleted file mode 100644 index e6616b4..0000000 --- a/crates/system_action/action_macros/src/lib.rs +++ /dev/null @@ -1,248 +0,0 @@ -use proc_macro::TokenStream; -use quote::quote; -use syn::{ItemFn, parse_macro_input}; - -/// # Macro - Generate Action -/// -/// When annotating a function with the `#[action_gen]` macro in the following format, it generates boilerplate code for client-server interaction -/// -/// ```ignore -/// #[action_gen] -/// async fn action_name(ctx: ActionContext, argument: YourArgument) -> Result { -/// // Write your client and server logic here -/// if ctx.is_proc_on_remote() { -/// // Server logic -/// } -/// if ctx.is_proc_on_local() { -/// // Client logic -/// } -/// } -/// ``` -/// -/// > WARNING: -/// > For Argument and Result types, the `action_gen` macro only supports types that derive serde's Serialize and Deserialize -/// -/// ## Generated Code -/// -/// `action_gen` will generate the following: -/// -/// 1. Complete implementation of Action -/// 2. Process / Register method -/// -/// ## How to use -/// -/// You can use your generated method as follows -/// -/// ```ignore -/// async fn main() -> Result<(), TcpTargetError> { -/// -/// // Prepare your argument -/// let args = YourArgument::default(); -/// -/// // Create a pool and context -/// let mut pool = ActionPool::new(); -/// let ctx = ActionContext::local(); -/// -/// // Register your action -/// register_your_action(&mut pool); -/// -/// // Process your action -/// proc_your_action(&pool, ctx, args).await?; -/// -/// Ok(()) -/// } -/// ``` -#[proc_macro_attribute] -pub fn action_gen(attr: TokenStream, item: TokenStream) -> TokenStream { - let input_fn = parse_macro_input!(item as ItemFn); - let is_local = if attr.is_empty() { - false - } else { - let attr_str = attr.to_string(); - attr_str == "local" || attr_str.contains("local") - }; - - generate_action_struct(input_fn, is_local).into() -} - -fn generate_action_struct(input_fn: ItemFn, _is_local: bool) -> proc_macro2::TokenStream { - let fn_vis = &input_fn.vis; - let fn_sig = &input_fn.sig; - let fn_name = &fn_sig.ident; - let fn_block = &input_fn.block; - - validate_function_signature(fn_sig); - - let (context_param_name, arg_param_name, arg_type, return_type) = - extract_parameters_and_types(fn_sig); - - let struct_name = quote::format_ident!("{}", convert_to_pascal_case(&fn_name.to_string())); - - let action_name_ident = &fn_name; - - let register_this_action = quote::format_ident!("register_{}", action_name_ident); - let proc_this_action = quote::format_ident!("proc_{}", action_name_ident); - - quote! { - #[derive(Debug, Clone, Default)] - #fn_vis struct #struct_name; - - impl action_system::action::Action<#arg_type, #return_type> for #struct_name { - fn action_name() -> &'static str { - Box::leak(string_proc::snake_case!(stringify!(#action_name_ident)).into_boxed_str()) - } - - fn is_remote_action() -> bool { - !#_is_local - } - - async fn process(#context_param_name: action_system::action::ActionContext, #arg_param_name: #arg_type) -> Result<#return_type, tcp_connection::error::TcpTargetError> { - #fn_block - } - } - - #fn_vis fn #register_this_action(pool: &mut action_system::action_pool::ActionPool) { - pool.register::<#struct_name, #arg_type, #return_type>(); - } - - #fn_vis async fn #proc_this_action( - pool: &action_system::action_pool::ActionPool, - mut ctx: action_system::action::ActionContext, - #arg_param_name: #arg_type - ) -> Result<#return_type, tcp_connection::error::TcpTargetError> { - ctx.set_is_remote_action(!#_is_local); - let args_json = serde_json::to_string(&#arg_param_name) - .map_err(|e| { - tcp_connection::error::TcpTargetError::Serialization(e.to_string()) - })?; - let result_json = pool.process_json( - Box::leak(string_proc::snake_case!(stringify!(#action_name_ident)).into_boxed_str()), - ctx, - args_json, - ).await?; - serde_json::from_str(&result_json) - .map_err(|e| { - tcp_connection::error::TcpTargetError::Serialization(e.to_string()) - }) - } - - #[allow(dead_code)] - #[deprecated = "This function is used by #[action_gen] as a template."] - #[doc = "Template function for #[[action_gen]] - do not call directly."] - #[doc = "Use the generated struct instead."] - #[doc = ""] - #[doc = "Register the action to the pool."] - #[doc = "```ignore"] - #[doc = "register_your_func(&mut pool);"] - #[doc = "```"] - #[doc = ""] - #[doc = "Process the action at the pool."] - #[doc = "```ignore"] - #[doc = "let result = proc_your_func(&pool, ctx, arg).await?;"] - #[doc = "```"] - #fn_vis #fn_sig #fn_block - } -} - -fn validate_function_signature(fn_sig: &syn::Signature) { - if fn_sig.asyncness.is_none() { - panic!("Expected async function for Action, but found synchronous function"); - } - - if fn_sig.inputs.len() != 2 { - panic!( - "Expected exactly 2 arguments for Action function: ctx: ActionContext and arg: T, but found {} arguments", - fn_sig.inputs.len() - ); - } - - let return_type = match &fn_sig.output { - syn::ReturnType::Type(_, ty) => ty, - _ => panic!( - "Expected Action function to return Result, but found no return type" - ), - }; - - if let syn::Type::Path(type_path) = return_type.as_ref() { - if let Some(segment) = type_path.path.segments.last() - && segment.ident != "Result" - { - panic!( - "Expected Action function to return Result, but found different return type" - ); - } - } else { - panic!( - "Expected Action function to return Result, but found no return type" - ); - } -} - -fn convert_to_pascal_case(s: &str) -> String { - s.split('_') - .map(|word| { - let mut chars = word.chars(); - match chars.next() { - None => String::new(), - Some(first) => first.to_uppercase().collect::() + chars.as_str(), - } - }) - .collect() -} - -fn extract_parameters_and_types( - fn_sig: &syn::Signature, -) -> ( - proc_macro2::TokenStream, - proc_macro2::TokenStream, - proc_macro2::TokenStream, - proc_macro2::TokenStream, -) { - let mut inputs = fn_sig.inputs.iter(); - - let context_param = match inputs.next() { - Some(syn::FnArg::Typed(pat_type)) => { - let pat = &pat_type.pat; - quote::quote!(#pat) - } - _ => { - panic!("Expected the first argument to be a typed parameter, but found something else") - } - }; - - let arg_param = match inputs.next() { - Some(syn::FnArg::Typed(pat_type)) => { - let pat = &pat_type.pat; - let ty = &pat_type.ty; - (quote::quote!(#pat), quote::quote!(#ty)) - } - _ => { - panic!("Expected the second argument to be a typed parameter, but found something else") - } - }; - - let (arg_param_name, arg_type) = arg_param; - - let return_type = match &fn_sig.output { - syn::ReturnType::Type(_, ty) => { - if let syn::Type::Path(type_path) = ty.as_ref() { - if let syn::PathArguments::AngleBracketed(args) = - &type_path.path.segments.last().unwrap().arguments - { - if let Some(syn::GenericArgument::Type(ty)) = args.args.first() { - quote::quote!(#ty) - } else { - panic!("Expected to extract the success type of Result, but failed"); - } - } else { - panic!("Expected Result type to have generic parameters, but found none"); - } - } else { - panic!("Expected return type to be Result, but found different type"); - } - } - _ => panic!("Expected function to have return type, but found none"), - }; - - (context_param, arg_param_name, arg_type, return_type) -} diff --git a/crates/system_action/src/action.rs b/crates/system_action/src/action.rs deleted file mode 100644 index 62425ff..0000000 --- a/crates/system_action/src/action.rs +++ /dev/null @@ -1,244 +0,0 @@ -use serde::{Serialize, de::DeserializeOwned}; -use std::any::{Any, TypeId}; -use std::collections::HashMap; -use std::sync::Arc; -use tcp_connection::{error::TcpTargetError, instance::ConnectionInstance}; -use tokio::{net::TcpStream, sync::Mutex}; - -/// # Trait - Action -/// -/// A trait used to describe the interaction pattern between client and server -/// -/// ## Generics -/// -/// Args: Represents the parameter type required for this action -/// -/// Return: Represents the return type of this action -/// -/// The above generics must implement serde's Serialize and DeserializeOwned traits, -/// and must be sendable between threads -/// -/// ## Implementation -/// -/// ```ignore -/// pub trait Action -/// where -/// Args: Serialize + DeserializeOwned + Send, -/// Return: Serialize + DeserializeOwned + Send, -/// { -/// /// Name, used to inform the server which action to execute -/// fn action_name() -> &'static str; -/// -/// /// Whether it's a local Action, used to inform the system if it only runs locally -/// fn is_remote_action() -> bool; -/// -/// /// Action processing logic -/// fn process( -/// context: ActionContext, -/// args: Args, -/// ) -> impl std::future::Future> + Send; -/// } -/// ``` -pub trait Action -where - Args: Serialize + DeserializeOwned + Send, - Return: Serialize + DeserializeOwned + Send, -{ - fn action_name() -> &'static str; - - fn is_remote_action() -> bool; - - fn process( - context: ActionContext, - args: Args, - ) -> impl std::future::Future> + Send; -} - -/// # Struct - ActionContext -/// -/// Used to inform the Action about the current execution environment -/// -/// ## Creation -/// -/// Create ActionContext using the following methods: -/// -/// ```ignore -/// -/// // The instance here is the connection instance passed from external sources for communicating with the server -/// // For specific usage, please refer to the `/crates/utils/tcp_connection` section -/// -/// fn init_local_action_ctx(instance: ConnectionInstance) { -/// // Create context and specify execution on local -/// let mut ctx = ActionContext::local(); -/// } -/// -/// fn init_remote_action_ctx(instance: ConnectionInstance) { -/// // Create context and specify execution on remote -/// let mut ctx = ActionContext::remote(); -/// } -#[derive(Default)] -pub struct ActionContext { - /// Whether the action is executed locally or remotely - proc_on_local: bool, - - /// Whether the action being executed in the current context is a remote action - is_remote_action: bool, - - /// The name of the action being executed - action_name: String, - - /// The JSON-serialized arguments for the action - action_args_json: String, - - /// The connection instance in the current context, - instance: Option>>, - - /// Generic data storage for arbitrary types - data: HashMap>, -} - -impl ActionContext { - /// Generate local context - pub fn local() -> Self { - ActionContext { - proc_on_local: true, - ..Default::default() - } - } - - /// Generate remote context - pub fn remote() -> Self { - ActionContext { - proc_on_local: false, - ..Default::default() - } - } - - /// Build connection instance from TcpStream - pub fn build_instance(mut self, stream: TcpStream) -> Self { - self.instance = Some(Arc::new(Mutex::new(ConnectionInstance::from(stream)))); - self - } - - /// Insert connection instance into context - pub fn insert_instance(mut self, instance: ConnectionInstance) -> Self { - self.instance = Some(Arc::new(Mutex::new(instance))); - self - } - - /// Pop connection instance from context - pub fn pop_instance(&mut self) -> Option>> { - self.instance.take() - } -} - -impl ActionContext { - /// Whether the action is executed locally - pub fn is_proc_on_local(&self) -> bool { - self.proc_on_local - } - - /// Whether the action is executed remotely - pub fn is_proc_on_remote(&self) -> bool { - !self.proc_on_local - } - - /// Whether the action being executed in the current context is a remote action - pub fn is_remote_action(&self) -> bool { - self.is_remote_action - } - - /// Set whether the action being executed in the current context is a remote action - pub fn set_is_remote_action(&mut self, is_remote_action: bool) { - self.is_remote_action = is_remote_action; - } - - /// Get the connection instance in the current context - pub fn instance(&self) -> &Option>> { - &self.instance - } - - /// Get a mutable reference to the connection instance in the current context - pub fn instance_mut(&mut self) -> &mut Option>> { - &mut self.instance - } - - /// Get the action name from the context - pub fn action_name(&self) -> &str { - &self.action_name - } - - /// Get the action arguments from the context - pub fn action_args_json(&self) -> &String { - &self.action_args_json - } - - /// Set the action name in the context - pub fn set_action_name(mut self, action_name: String) -> Self { - self.action_name = action_name; - self - } - - /// Set the action arguments in the context - pub fn set_action_args(mut self, action_args: String) -> Self { - self.action_args_json = action_args; - self - } - - /// Insert arbitrary data in the context - pub fn with_data(mut self, value: T) -> Self { - self.data.insert(TypeId::of::(), Arc::new(value)); - self - } - - /// Insert arbitrary data as Arc in the context - pub fn with_arc_data(mut self, value: Arc) -> Self { - self.data.insert(TypeId::of::(), value); - self - } - - /// Insert arbitrary data in the context - pub fn insert_data(&mut self, value: T) { - self.data.insert(TypeId::of::(), Arc::new(value)); - } - - /// Insert arbitrary data as Arc in the context - pub fn insert_arc_data(&mut self, value: Arc) { - self.data.insert(TypeId::of::(), value); - } - - /// Get arbitrary data from the context - pub fn get(&self) -> Option<&T> { - self.data - .get(&TypeId::of::()) - .and_then(|arc| arc.downcast_ref::()) - } - - /// Get arbitrary data as Arc from the context - pub fn get_arc(&self) -> Option> { - self.data - .get(&TypeId::of::()) - .and_then(|arc| Arc::clone(arc).downcast::().ok()) - } - - /// Remove and return arbitrary data from the context - pub fn remove(&mut self) -> Option> { - self.data - .remove(&TypeId::of::()) - .and_then(|arc| arc.downcast::().ok()) - } - - /// Check if the context contains data of a specific type - pub fn contains(&self) -> bool { - self.data.contains_key(&TypeId::of::()) - } - - /// Take ownership of the context and extract data of a specific type - pub fn take(mut self) -> (Self, Option>) { - let value = self - .data - .remove(&TypeId::of::()) - .and_then(|arc| arc.downcast::().ok()); - (self, value) - } -} diff --git a/crates/system_action/src/action_pool.rs b/crates/system_action/src/action_pool.rs deleted file mode 100644 index 019fa6d..0000000 --- a/crates/system_action/src/action_pool.rs +++ /dev/null @@ -1,247 +0,0 @@ -use std::pin::Pin; - -use serde::{Serialize, de::DeserializeOwned}; -use serde_json; -use tcp_connection::error::TcpTargetError; - -use crate::action::{Action, ActionContext}; - -type ProcBeginCallback = for<'a> fn( - &'a mut ActionContext, - args: &'a (dyn std::any::Any + Send + Sync), -) -> ProcBeginFuture<'a>; -type ProcEndCallback = fn() -> ProcEndFuture; - -type ProcBeginFuture<'a> = Pin> + Send + 'a>>; -type ProcEndFuture = Pin> + Send>>; - -/// # Struct - ActionPool -/// -/// This struct is used to register and record all accessible and executable actions -/// -/// It also registers `on_proc_begin` and `on_proc_end` callback functions -/// used for action initialization -/// -/// ## Creating and registering actions -/// ```ignore -/// fn init_action_pool() { -/// let mut pool = Action::new(); -/// -/// // Register action -/// pool.register(); -/// -/// // If the action is implemented with `#[action_gen]`, you can also do -/// register_your_action(&mut pool); -/// } -/// ``` -pub struct ActionPool { - /// HashMap storing action name to action implementation mapping - actions: std::collections::HashMap<&'static str, Box>, - - /// Callback to execute when process begins - on_proc_begin: Option, - - /// Callback to execute when process ends - on_proc_end: Option, -} - -impl Default for ActionPool { - fn default() -> Self { - Self::new() - } -} - -impl ActionPool { - /// Creates a new empty ActionPool - pub fn new() -> Self { - Self { - actions: std::collections::HashMap::new(), - on_proc_begin: None, - on_proc_end: None, - } - } - - /// Sets a callback to be executed when process begins - pub fn set_on_proc_begin(&mut self, callback: ProcBeginCallback) { - self.on_proc_begin = Some(callback); - } - - /// Sets a callback to be executed when process ends - pub fn set_on_proc_end(&mut self, callback: ProcEndCallback) { - self.on_proc_end = Some(callback); - } - - /// Registers an action type with the pool - /// - /// Usage: - /// ```ignore - /// action_pool.register::(); - /// ``` - pub fn register(&mut self) - where - A: Action + Send + Sync + 'static, - Args: serde::Serialize + serde::de::DeserializeOwned + Send + Sync + 'static, - Return: serde::Serialize + serde::de::DeserializeOwned + Send + Sync + 'static, - { - let action_name = A::action_name(); - self.actions.insert( - action_name, - Box::new(ActionWrapper::(std::marker::PhantomData)), - ); - } - - /// Processes an action by name with given context and arguments - /// - /// Usage: - /// ```ignore - /// let result = action_pool.process::("my_action", context, args).await?; - /// ``` - /// Processes an action by name with JSON-serialized arguments - /// - /// Usage: - /// ```ignore - /// let result_json = action_pool.process_json("my_action", context, args_json).await?; - /// let result: MyReturn = serde_json::from_str(&result_json)?; - /// ``` - pub async fn process_json<'a>( - &'a self, - action_name: &'a str, - context: ActionContext, - args_json: String, - ) -> Result { - if let Some(action) = self.actions.get(action_name) { - // Set action name and args in context for callbacks - let context = context.set_action_name(action_name.to_string()); - let mut context = context.set_action_args(args_json.clone()); - - self.exec_on_proc_begin(&mut context, &args_json).await?; - let result = action.process_json_erased(context, args_json).await?; - self.exec_on_proc_end().await?; - Ok(result) - } else { - Err(TcpTargetError::Unsupported("InvalidAction".to_string())) - } - } - - /// Processes an action by name with given context and arguments - /// - /// Usage: - /// ```ignore - /// let result = action_pool.process::("my_action", context, args).await?; - /// ``` - pub async fn process<'a, Args, Return>( - &'a self, - action_name: &'a str, - mut context: ActionContext, - args: Args, - ) -> Result - where - Args: serde::de::DeserializeOwned + Send + Sync + 'static, - Return: serde::Serialize + Send + 'static, - { - if let Some(action) = self.actions.get(action_name) { - self.exec_on_proc_begin(&mut context, &args).await?; - let result = action.process_erased(context, Box::new(args)).await?; - let result = *result - .downcast::() - .map_err(|_| TcpTargetError::Unsupported("InvalidArguments".to_string()))?; - self.exec_on_proc_end().await?; - Ok(result) - } else { - Err(TcpTargetError::Unsupported("InvalidAction".to_string())) - } - } - - /// Executes the process begin callback if set - async fn exec_on_proc_begin( - &self, - context: &mut ActionContext, - args: &(dyn std::any::Any + Send + Sync), - ) -> Result<(), TcpTargetError> { - if let Some(callback) = &self.on_proc_begin { - callback(context, args).await - } else { - Ok(()) - } - } - - /// Executes the process end callback if set - async fn exec_on_proc_end(&self) -> Result<(), TcpTargetError> { - if let Some(callback) = &self.on_proc_end { - callback().await - } else { - Ok(()) - } - } -} - -/// Trait for type-erased actions that can be stored in ActionPool -type ProcessErasedFuture = std::pin::Pin< - Box< - dyn std::future::Future, TcpTargetError>> - + Send, - >, ->; -type ProcessJsonErasedFuture = - std::pin::Pin> + Send>>; - -trait ActionErased: Send + Sync { - /// Processes the action with type-erased arguments and returns type-erased result - fn process_erased( - &self, - context: ActionContext, - args: Box, - ) -> ProcessErasedFuture; - - /// Processes the action with JSON-serialized arguments and returns JSON-serialized result - fn process_json_erased( - &self, - context: ActionContext, - args_json: String, - ) -> ProcessJsonErasedFuture; -} - -/// Wrapper struct that implements ActionErased for concrete Action types -struct ActionWrapper(std::marker::PhantomData<(A, Args, Return)>); - -impl ActionErased for ActionWrapper -where - A: Action + Send + Sync, - Args: Serialize + DeserializeOwned + Send + Sync + 'static, - Return: Serialize + DeserializeOwned + Send + Sync + 'static, -{ - fn process_erased( - &self, - context: ActionContext, - args: Box, - ) -> std::pin::Pin< - Box< - dyn std::future::Future, TcpTargetError>> - + Send, - >, - > { - Box::pin(async move { - let args = *args - .downcast::() - .map_err(|_| TcpTargetError::Unsupported("InvalidArguments".to_string()))?; - let result = A::process(context, args).await?; - Ok(Box::new(result) as Box) - }) - } - - fn process_json_erased( - &self, - context: ActionContext, - args_json: String, - ) -> std::pin::Pin> + Send>> - { - Box::pin(async move { - let args: Args = serde_json::from_str(&args_json) - .map_err(|e| TcpTargetError::Serialization(format!("Deserialize failed: {}", e)))?; - let result = A::process(context, args).await?; - let result_json = serde_json::to_string(&result) - .map_err(|e| TcpTargetError::Serialization(format!("Serialize failed: {}", e)))?; - Ok(result_json) - }) - } -} diff --git a/crates/system_action/src/lib.rs b/crates/system_action/src/lib.rs deleted file mode 100644 index 12ae999..0000000 --- a/crates/system_action/src/lib.rs +++ /dev/null @@ -1,6 +0,0 @@ -pub mod macros { - pub use action_system_macros::*; -} - -pub mod action; -pub mod action_pool; diff --git a/crates/utils/cfg_file/Cargo.toml b/crates/utils/cfg_file/Cargo.toml deleted file mode 100644 index 0685329..0000000 --- a/crates/utils/cfg_file/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "cfg_file" -edition = "2024" -version.workspace = true - -[features] -default = ["derive"] -derive = [] - -[dependencies] -cfg_file_derive = { path = "cfg_file_derive" } - -# Async -tokio = { version = "1.48.0", features = ["full"] } -async-trait = "0.1.89" - -# Serialization -serde = { version = "1.0.228", features = ["derive"] } -serde_yaml = "0.9.34" -serde_json = "1.0.145" -ron = "0.11.0" -toml = "0.9.8" -bincode2 = "2.0.1" diff --git a/crates/utils/cfg_file/cfg_file_derive/Cargo.toml b/crates/utils/cfg_file/cfg_file_derive/Cargo.toml deleted file mode 100644 index ce5e77f..0000000 --- a/crates/utils/cfg_file/cfg_file_derive/Cargo.toml +++ /dev/null @@ -1,11 +0,0 @@ -[package] -name = "cfg_file_derive" -edition = "2024" -version.workspace = true - -[lib] -proc-macro = true - -[dependencies] -syn = { version = "2.0", features = ["full", "extra-traits"] } -quote = "1.0" diff --git a/crates/utils/cfg_file/cfg_file_derive/src/lib.rs b/crates/utils/cfg_file/cfg_file_derive/src/lib.rs deleted file mode 100644 index e916311..0000000 --- a/crates/utils/cfg_file/cfg_file_derive/src/lib.rs +++ /dev/null @@ -1,130 +0,0 @@ -extern crate proc_macro; - -use proc_macro::TokenStream; -use quote::quote; -use syn::parse::ParseStream; -use syn::{Attribute, DeriveInput, Expr, parse_macro_input}; -/// # Macro - ConfigFile -/// -/// ## Usage -/// -/// Use `#[derive(ConfigFile)]` to derive the ConfigFile trait for a struct -/// -/// Specify the default storage path via `#[cfg_file(path = "...")]` -/// -/// ## About the `cfg_file` attribute macro -/// -/// Use `#[cfg_file(path = "string")]` to specify the configuration file path -/// -/// Or use `#[cfg_file(path = constant_expression)]` to specify the configuration file path -/// -/// ## Path Rules -/// -/// Paths starting with `"./"`: relative to the current working directory -/// -/// Other paths: treated as absolute paths -/// -/// When no path is specified: use the struct name + ".json" as the default filename (e.g., `my_struct.json`) -/// -/// ## Example -/// ```ignore -/// #[derive(ConfigFile)] -/// #[cfg_file(path = "./config.json")] -/// struct AppConfig; -/// ``` -#[proc_macro_derive(ConfigFile, attributes(cfg_file))] -pub fn derive_config_file(input: TokenStream) -> TokenStream { - let input = parse_macro_input!(input as DeriveInput); - let name = &input.ident; - - // Process 'cfg_file' - let path_expr = match find_cfg_file_path(&input.attrs) { - Some(PathExpr::StringLiteral(path)) => { - if let Some(path_str) = path.strip_prefix("./") { - quote! { - std::env::current_dir()?.join(#path_str) - } - } else { - // Using Absolute Path - quote! { - std::path::PathBuf::from(#path) - } - } - } - Some(PathExpr::PathExpression(path_expr)) => { - // For path expressions (constants), generate code that references the constant - quote! { - std::path::PathBuf::from(#path_expr) - } - } - None => { - let default_file = to_snake_case(&name.to_string()) + ".json"; - quote! { - std::env::current_dir()?.join(#default_file) - } - } - }; - - let expanded = quote! { - impl cfg_file::config::ConfigFile for #name { - type DataType = #name; - - fn default_path() -> Result { - Ok(#path_expr) - } - } - }; - - TokenStream::from(expanded) -} - -enum PathExpr { - StringLiteral(String), - PathExpression(syn::Expr), -} - -fn find_cfg_file_path(attrs: &[Attribute]) -> Option { - for attr in attrs { - if attr.path().is_ident("cfg_file") { - let parser = |meta: ParseStream| { - let path_meta: syn::MetaNameValue = meta.parse()?; - if path_meta.path.is_ident("path") { - match &path_meta.value { - // String literal case: path = "./vault.toml" - Expr::Lit(expr_lit) if matches!(expr_lit.lit, syn::Lit::Str(_)) => { - if let syn::Lit::Str(lit_str) = &expr_lit.lit { - return Ok(PathExpr::StringLiteral(lit_str.value())); - } - } - // Path expression case: path = SERVER_FILE_VAULT or crate::constants::SERVER_FILE_VAULT - expr @ (Expr::Path(_) | Expr::Macro(_)) => { - return Ok(PathExpr::PathExpression(expr.clone())); - } - _ => {} - } - } - Err(meta.error("expected `path = \"...\"` or `path = CONSTANT`")) - }; - - if let Ok(path_expr) = attr.parse_args_with(parser) { - return Some(path_expr); - } - } - } - None -} - -fn to_snake_case(s: &str) -> String { - let mut snake = String::new(); - for (i, c) in s.chars().enumerate() { - if c.is_uppercase() { - if i != 0 { - snake.push('_'); - } - snake.push(c.to_ascii_lowercase()); - } else { - snake.push(c); - } - } - snake -} diff --git a/crates/utils/cfg_file/cfg_file_test/Cargo.toml b/crates/utils/cfg_file/cfg_file_test/Cargo.toml deleted file mode 100644 index 5db1010..0000000 --- a/crates/utils/cfg_file/cfg_file_test/Cargo.toml +++ /dev/null @@ -1,9 +0,0 @@ -[package] -name = "cfg_file_test" -version = "0.1.0" -edition = "2024" - -[dependencies] -cfg_file = { path = "../../cfg_file", features = ["default"] } -tokio = { version = "1.48.0", features = ["full"] } -serde = { version = "1.0.228", features = ["derive"] } diff --git a/crates/utils/cfg_file/cfg_file_test/src/lib.rs b/crates/utils/cfg_file/cfg_file_test/src/lib.rs deleted file mode 100644 index f70d00d..0000000 --- a/crates/utils/cfg_file/cfg_file_test/src/lib.rs +++ /dev/null @@ -1,95 +0,0 @@ -#[cfg(test)] -mod test_cfg_file { - use cfg_file::ConfigFile; - use cfg_file::config::ConfigFile; - use serde::{Deserialize, Serialize}; - use std::collections::HashMap; - - #[derive(ConfigFile, Deserialize, Serialize, Default)] - #[cfg_file(path = "./.temp/example_cfg.toml")] - struct ExampleConfig { - name: String, - age: i32, - hobby: Vec, - secret: HashMap, - } - - #[derive(ConfigFile, Deserialize, Serialize, Default)] - #[cfg_file(path = "./.temp/example_bincode.bcfg")] - struct ExampleBincodeConfig { - name: String, - age: i32, - hobby: Vec, - secret: HashMap, - } - - #[tokio::test] - async fn test_config_file_serialization() { - let mut example = ExampleConfig { - name: "Weicao".to_string(), - age: 22, - hobby: ["Programming", "Painting"] - .iter() - .map(|m| m.to_string()) - .collect(), - secret: HashMap::new(), - }; - let secret_no_comments = - "Actually, I'm really too lazy to write comments, documentation, and unit tests."; - example - .secret - .entry("No comments".to_string()) - .insert_entry(secret_no_comments.to_string()); - - let secret_peek = "Of course, it's peeking at you who's reading the source code."; - example - .secret - .entry("Peek".to_string()) - .insert_entry(secret_peek.to_string()); - - ExampleConfig::write(&example).await.unwrap(); // Write to default path. - - // Read from default path. - let read_cfg = ExampleConfig::read().await.unwrap(); - assert_eq!(read_cfg.name, "Weicao"); - assert_eq!(read_cfg.age, 22); - assert_eq!(read_cfg.hobby, vec!["Programming", "Painting"]); - assert_eq!(read_cfg.secret["No comments"], secret_no_comments); - assert_eq!(read_cfg.secret["Peek"], secret_peek); - } - - #[tokio::test] - async fn test_bincode_config_file_serialization() { - let mut example = ExampleBincodeConfig { - name: "Weicao".to_string(), - age: 22, - hobby: ["Programming", "Painting"] - .iter() - .map(|m| m.to_string()) - .collect(), - secret: HashMap::new(), - }; - let secret_no_comments = - "Actually, I'm really too lazy to write comments, documentation, and unit tests."; - example - .secret - .entry("No comments".to_string()) - .insert_entry(secret_no_comments.to_string()); - - let secret_peek = "Of course, it's peeking at you who's reading the source code."; - example - .secret - .entry("Peek".to_string()) - .insert_entry(secret_peek.to_string()); - - ExampleBincodeConfig::write(&example).await.unwrap(); // Write to default path. - - // Read from default path. - let read_cfg = ExampleBincodeConfig::read().await.unwrap(); - assert_eq!(read_cfg.name, "Weicao"); - assert_eq!(read_cfg.age, 22); - assert_eq!(read_cfg.hobby, vec!["Programming", "Painting"]); - assert_eq!(read_cfg.secret["No comments"], secret_no_comments); - assert_eq!(read_cfg.secret["Peek"], secret_peek); - } -} diff --git a/crates/utils/cfg_file/src/config.rs b/crates/utils/cfg_file/src/config.rs deleted file mode 100644 index d3f5477..0000000 --- a/crates/utils/cfg_file/src/config.rs +++ /dev/null @@ -1,263 +0,0 @@ -use async_trait::async_trait; -use bincode2; -use ron; -use serde::{Deserialize, Serialize}; -use std::{ - borrow::Cow, - env::current_dir, - io::Error, - path::{Path, PathBuf}, -}; -use tokio::{fs, io::AsyncReadExt}; - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -enum ConfigFormat { - Yaml, - Toml, - Ron, - Json, - Bincode, -} - -impl ConfigFormat { - fn from_filename(filename: &str) -> Option { - if filename.ends_with(".yaml") || filename.ends_with(".yml") { - Some(Self::Yaml) - } else if filename.ends_with(".toml") || filename.ends_with(".tom") { - Some(Self::Toml) - } else if filename.ends_with(".ron") { - Some(Self::Ron) - } else if filename.ends_with(".json") { - Some(Self::Json) - } else if filename.ends_with(".bcfg") { - Some(Self::Bincode) - } else { - None - } - } -} - -/// # Trait - ConfigFile -/// -/// Used to implement more convenient persistent storage functionality for structs -/// -/// This trait requires the struct to implement Default and serde's Serialize and Deserialize traits -/// -/// ## Implementation -/// -/// ```ignore -/// // Your struct -/// #[derive(Default, Serialize, Deserialize)] -/// struct YourData; -/// -/// impl ConfigFile for YourData { -/// type DataType = YourData; -/// -/// // Specify default path -/// fn default_path() -> Result { -/// Ok(current_dir()?.join("data.json")) -/// } -/// } -/// ``` -/// -/// > **Using derive macro** -/// > -/// > We provide the derive macro `#[derive(ConfigFile)]` -/// > -/// > You can implement this trait more quickly, please check the module cfg_file::cfg_file_derive -/// -#[async_trait] -pub trait ConfigFile: Serialize + for<'a> Deserialize<'a> + Default { - type DataType: Serialize + for<'a> Deserialize<'a> + Default + Send + Sync; - - fn default_path() -> Result; - - /// # Read from default path - /// - /// Read data from the path specified by default_path() - /// - /// ```ignore - /// fn main() -> Result<(), std::io::Error> { - /// let data = YourData::read().await?; - /// } - /// ``` - async fn read() -> Result - where - Self: Sized + Send + Sync, - { - let path = Self::default_path()?; - Self::read_from(path).await - } - - /// # Read from the given path - /// - /// Read data from the path specified by the path parameter - /// - /// ```ignore - /// fn main() -> Result<(), std::io::Error> { - /// let data_path = current_dir()?.join("data.json"); - /// let data = YourData::read_from(data_path).await?; - /// } - /// ``` - async fn read_from(path: impl AsRef + Send) -> Result - where - Self: Sized + Send + Sync, - { - let path = path.as_ref(); - let cwd = current_dir()?; - let file_path = cwd.join(path); - - // Check if file exists - if fs::metadata(&file_path).await.is_err() { - return Err(std::io::Error::new( - std::io::ErrorKind::NotFound, - "Config file not found", - )); - } - - // Determine file format first - let format = file_path - .file_name() - .and_then(|name| name.to_str()) - .and_then(ConfigFormat::from_filename) - .unwrap_or(ConfigFormat::Bincode); // Default to Bincode - - // Deserialize based on format - let result = match format { - ConfigFormat::Yaml => { - let mut file = fs::File::open(&file_path).await?; - let mut contents = String::new(); - file.read_to_string(&mut contents).await?; - serde_yaml::from_str(&contents) - .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))? - } - ConfigFormat::Toml => { - let mut file = fs::File::open(&file_path).await?; - let mut contents = String::new(); - file.read_to_string(&mut contents).await?; - toml::from_str(&contents) - .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))? - } - ConfigFormat::Ron => { - let mut file = fs::File::open(&file_path).await?; - let mut contents = String::new(); - file.read_to_string(&mut contents).await?; - ron::from_str(&contents) - .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))? - } - ConfigFormat::Json => { - let mut file = fs::File::open(&file_path).await?; - let mut contents = String::new(); - file.read_to_string(&mut contents).await?; - serde_json::from_str(&contents) - .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))? - } - ConfigFormat::Bincode => { - // For Bincode, we need to read the file as bytes directly - let bytes = fs::read(&file_path).await?; - bincode2::deserialize(&bytes) - .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))? - } - }; - - Ok(result) - } - - /// # Write to default path - /// - /// Write data to the path specified by default_path() - /// - /// ```ignore - /// fn main() -> Result<(), std::io::Error> { - /// let data = YourData::default(); - /// YourData::write(&data).await?; - /// } - /// ``` - async fn write(val: &Self::DataType) -> Result<(), std::io::Error> - where - Self: Sized + Send + Sync, - { - let path = Self::default_path()?; - Self::write_to(val, path).await - } - /// # Write to the given path - /// - /// Write data to the path specified by the path parameter - /// - /// ```ignore - /// fn main() -> Result<(), std::io::Error> { - /// let data = YourData::default(); - /// let data_path = current_dir()?.join("data.json"); - /// YourData::write_to(&data, data_path).await?; - /// } - /// ``` - async fn write_to( - val: &Self::DataType, - path: impl AsRef + Send, - ) -> Result<(), std::io::Error> - where - Self: Sized + Send + Sync, - { - let path = path.as_ref(); - - if let Some(parent) = path.parent() - && !parent.exists() - { - tokio::fs::create_dir_all(parent).await?; - } - - let cwd = current_dir()?; - let file_path = cwd.join(path); - - // Determine file format - let format = file_path - .file_name() - .and_then(|name| name.to_str()) - .and_then(ConfigFormat::from_filename) - .unwrap_or(ConfigFormat::Bincode); // Default to Bincode - - match format { - ConfigFormat::Yaml => { - let contents = serde_yaml::to_string(val) - .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; - fs::write(&file_path, contents).await? - } - ConfigFormat::Toml => { - let contents = toml::to_string(val) - .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; - fs::write(&file_path, contents).await? - } - ConfigFormat::Ron => { - let mut pretty_config = ron::ser::PrettyConfig::new(); - pretty_config.new_line = Cow::from("\n"); - pretty_config.indentor = Cow::from(" "); - - let contents = ron::ser::to_string_pretty(val, pretty_config) - .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; - fs::write(&file_path, contents).await? - } - ConfigFormat::Json => { - let contents = serde_json::to_string(val) - .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; - fs::write(&file_path, contents).await? - } - ConfigFormat::Bincode => { - let bytes = bincode2::serialize(val) - .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; - fs::write(&file_path, bytes).await? - } - } - Ok(()) - } - - /// Check if the file returned by `default_path` exists - fn exist() -> bool - where - Self: Sized + Send + Sync, - { - let Ok(path) = Self::default_path() else { - return false; - }; - path.exists() - } -} diff --git a/crates/utils/cfg_file/src/lib.rs b/crates/utils/cfg_file/src/lib.rs deleted file mode 100644 index 72246e7..0000000 --- a/crates/utils/cfg_file/src/lib.rs +++ /dev/null @@ -1,7 +0,0 @@ -#[cfg(feature = "derive")] -extern crate cfg_file_derive; - -#[cfg(feature = "derive")] -pub use cfg_file_derive::*; - -pub mod config; diff --git a/crates/utils/data_struct/Cargo.toml b/crates/utils/data_struct/Cargo.toml deleted file mode 100644 index e8caa6e..0000000 --- a/crates/utils/data_struct/Cargo.toml +++ /dev/null @@ -1,10 +0,0 @@ -[package] -name = "data_struct" -edition = "2024" -version.workspace = true - -[features] - -[dependencies] -serde = { version = "1.0.228", features = ["derive"] } -ahash = "0.8.12" diff --git a/crates/utils/data_struct/src/bi_map.rs b/crates/utils/data_struct/src/bi_map.rs deleted file mode 100644 index c21a9c8..0000000 --- a/crates/utils/data_struct/src/bi_map.rs +++ /dev/null @@ -1,239 +0,0 @@ -use ahash::AHasher; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use std::hash::{BuildHasherDefault, Hash}; - -type FastHashMap = HashMap>; - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct BiMap -where - A: Eq + Hash + Clone, - B: Eq + Hash + Clone, -{ - #[serde(flatten)] - a_to_b: FastHashMap, - #[serde(skip)] - b_to_a: FastHashMap, -} - -pub struct Entry<'a, A, B> -where - A: Eq + Hash + Clone, - B: Eq + Hash + Clone, -{ - bimap: &'a mut BiMap, - key: A, - value: Option, -} - -impl BiMap -where - A: Eq + Hash + Clone, - B: Eq + Hash + Clone, -{ - pub fn new() -> Self { - Self { - a_to_b: FastHashMap::default(), - b_to_a: FastHashMap::default(), - } - } - - pub fn entry(&mut self, a: A) -> Entry<'_, A, B> { - let value = self.a_to_b.get(&a).cloned(); - Entry { - bimap: self, - key: a, - value, - } - } - - #[inline(always)] - pub fn insert(&mut self, a: A, b: B) { - if let Some(old_b) = self.a_to_b.insert(a.clone(), b.clone()) { - self.b_to_a.remove(&old_b); - } - if let Some(old_a) = self.b_to_a.insert(b.clone(), a.clone()) { - self.a_to_b.remove(&old_a); - } - } - - #[inline(always)] - pub fn get_by_a(&self, key: &A) -> Option<&B> { - self.a_to_b.get(key) - } - - #[inline(always)] - pub fn get_by_b(&self, key: &B) -> Option<&A> { - self.b_to_a.get(key) - } - - pub fn remove_by_a(&mut self, key: &A) -> Option<(A, B)> { - if let Some(b) = self.get_by_a(key).cloned() { - let a = self.get_by_b(&b).cloned().unwrap(); - self.a_to_b.remove(key); - self.b_to_a.remove(&b); - Some((a, b)) - } else { - None - } - } - - pub fn remove_by_b(&mut self, key: &B) -> Option<(A, B)> { - if let Some(a) = self.get_by_b(key).cloned() { - let b = self.get_by_a(&a).cloned().unwrap(); - self.b_to_a.remove(key); - self.a_to_b.remove(&a); - Some((a, b)) - } else { - None - } - } - - pub fn reserve(&mut self, additional: usize) { - self.a_to_b.reserve(additional); - self.b_to_a.reserve(additional); - } - - pub fn len(&self) -> usize { - self.a_to_b.len() - } - - pub fn is_empty(&self) -> bool { - self.a_to_b.is_empty() - } - - pub fn clear(&mut self) { - self.a_to_b.clear(); - self.b_to_a.clear(); - } - - pub fn contains_a(&self, key: &A) -> bool { - self.a_to_b.contains_key(key) - } - - pub fn contains_b(&self, key: &B) -> bool { - self.b_to_a.contains_key(key) - } - - pub fn keys_a(&self) -> impl Iterator { - self.a_to_b.keys() - } - - pub fn keys_b(&self) -> impl Iterator { - self.b_to_a.keys() - } - - pub fn iter_a_to_b(&self) -> impl Iterator { - self.a_to_b.iter() - } - - pub fn iter_b_to_a(&self) -> impl Iterator { - self.b_to_a.iter() - } -} - -impl<'a, A, B> Entry<'a, A, B> -where - A: Eq + Hash + Clone, - B: Eq + Hash + Clone, -{ - pub fn and_modify(mut self, f: F) -> Self - where - F: FnOnce(&mut B), - { - if let Some(ref mut value) = self.value { - f(value); - } - self - } - - pub fn or_insert(self, default: B) -> Result<&'a mut B, &'static str> { - self.or_insert_with(|| default) - } - - pub fn or_insert_with(mut self, default: F) -> Result<&'a mut B, &'static str> - where - F: FnOnce() -> B, - { - if self.value.is_none() { - self.value = Some(default()); - } - - let value = self.value.as_ref().ok_or("Value is None")?.clone(); - self.bimap.insert(self.key.clone(), value); - - self.bimap - .a_to_b - .get_mut(&self.key) - .ok_or("Key not found in a_to_b map") - } -} - -impl Default for BiMap -where - A: Eq + Hash + Clone, - B: Eq + Hash + Clone, -{ - fn default() -> Self { - Self::new() - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_bimap_basic_operations() { - let mut bimap = BiMap::new(); - bimap.insert("key1", "value1"); - - assert_eq!(bimap.get_by_a(&"key1"), Some(&"value1")); - assert_eq!(bimap.get_by_b(&"value1"), Some(&"key1")); - assert!(bimap.contains_a(&"key1")); - assert!(bimap.contains_b(&"value1")); - } - - #[test] - fn test_bimap_remove() { - let mut bimap = BiMap::new(); - bimap.insert(1, "one"); - - assert_eq!(bimap.remove_by_a(&1), Some((1, "one"))); - assert!(bimap.is_empty()); - } - - #[test] - fn test_bimap_entry() { - let mut bimap = BiMap::new(); - bimap.entry("key1").or_insert("value1").unwrap(); - - assert_eq!(bimap.get_by_a(&"key1"), Some(&"value1")); - } - - #[test] - fn test_bimap_iterators() { - let mut bimap = BiMap::new(); - bimap.insert(1, "one"); - bimap.insert(2, "two"); - - let a_keys: Vec<_> = bimap.keys_a().collect(); - assert!(a_keys.contains(&&1) && a_keys.contains(&&2)); - - let b_keys: Vec<_> = bimap.keys_b().collect(); - assert!(b_keys.contains(&&"one") && b_keys.contains(&&"two")); - } - - #[test] - fn test_bimap_duplicate_insert() { - let mut bimap = BiMap::new(); - bimap.insert(1, "one"); - bimap.insert(1, "new_one"); - bimap.insert(2, "one"); - - assert_eq!(bimap.get_by_a(&1), Some(&"new_one")); - assert_eq!(bimap.get_by_b(&"one"), Some(&2)); - assert_eq!(bimap.get_by_a(&2), Some(&"one")); - } -} diff --git a/crates/utils/data_struct/src/data_sort.rs b/crates/utils/data_struct/src/data_sort.rs deleted file mode 100644 index 2c7a452..0000000 --- a/crates/utils/data_struct/src/data_sort.rs +++ /dev/null @@ -1,232 +0,0 @@ -/// Quick sort a slice with a custom comparison function -/// -/// # Arguments -/// * `arr` - The mutable slice to be sorted -/// * `inverse` - Sort direction: true for descending, false for ascending -/// * `compare` - Comparison function that returns -1, 0, or 1 indicating the relative order of two elements -pub fn quick_sort_with_cmp(arr: &mut [T], inverse: bool, compare: F) -where - F: Fn(&T, &T) -> i32, -{ - quick_sort_with_cmp_helper(arr, inverse, &compare); -} - -/// Quick sort for types that implement the PartialOrd trait -/// -/// # Arguments -/// * `arr` - The mutable slice to be sorted -/// * `inverse` - Sort direction: true for descending, false for ascending -pub fn quick_sort(arr: &mut [T], inverse: bool) { - quick_sort_with_cmp(arr, inverse, |a, b| { - if a < b { - -1 - } else if a > b { - 1 - } else { - 0 - } - }); -} - -fn quick_sort_with_cmp_helper(arr: &mut [T], inverse: bool, compare: &F) -where - F: Fn(&T, &T) -> i32, -{ - if arr.len() <= 1 { - return; - } - - let pivot_index = partition_with_cmp(arr, inverse, compare); - let (left, right) = arr.split_at_mut(pivot_index); - - quick_sort_with_cmp_helper(left, inverse, compare); - quick_sort_with_cmp_helper(&mut right[1..], inverse, compare); -} - -fn partition_with_cmp(arr: &mut [T], inverse: bool, compare: &F) -> usize -where - F: Fn(&T, &T) -> i32, -{ - let len = arr.len(); - let pivot_index = len / 2; - - arr.swap(pivot_index, len - 1); - - let mut i = 0; - for j in 0..len - 1 { - let cmp_result = compare(&arr[j], &arr[len - 1]); - let should_swap = if inverse { - cmp_result > 0 - } else { - cmp_result < 0 - }; - - if should_swap { - arr.swap(i, j); - i += 1; - } - } - - arr.swap(i, len - 1); - i -} - -#[cfg(test)] -pub mod sort_test { - use crate::data_sort::{quick_sort, quick_sort_with_cmp}; - - #[test] - fn test_quick_sort_ascending() { - let mut arr = [3, 1, 4, 1, 5, 9, 2, 6]; - quick_sort(&mut arr, false); - assert_eq!(arr, [1, 1, 2, 3, 4, 5, 6, 9]); - } - - #[test] - fn test_quick_sort_descending() { - let mut arr = [3, 1, 4, 1, 5, 9, 2, 6]; - quick_sort(&mut arr, true); - assert_eq!(arr, [9, 6, 5, 4, 3, 2, 1, 1]); - } - - #[test] - fn test_quick_sort_single() { - let mut arr = [42]; - quick_sort(&mut arr, false); - assert_eq!(arr, [42]); - } - - #[test] - fn test_quick_sort_already_sorted() { - let mut arr = [1, 2, 3, 4, 5]; - quick_sort(&mut arr, false); - assert_eq!(arr, [1, 2, 3, 4, 5]); - } - - #[test] - fn test_quick_sort_with_cmp_by_count() { - #[derive(Debug, PartialEq)] - struct WordCount { - word: String, - count: usize, - } - - let mut words = vec![ - WordCount { - word: "apple".to_string(), - count: 3, - }, - WordCount { - word: "banana".to_string(), - count: 1, - }, - WordCount { - word: "cherry".to_string(), - count: 5, - }, - WordCount { - word: "date".to_string(), - count: 2, - }, - ]; - - quick_sort_with_cmp(&mut words, false, |a, b| { - if a.count < b.count { - -1 - } else if a.count > b.count { - 1 - } else { - 0 - } - }); - - assert_eq!( - words, - vec![ - WordCount { - word: "banana".to_string(), - count: 1 - }, - WordCount { - word: "date".to_string(), - count: 2 - }, - WordCount { - word: "apple".to_string(), - count: 3 - }, - WordCount { - word: "cherry".to_string(), - count: 5 - }, - ] - ); - - quick_sort_with_cmp(&mut words, true, |a, b| { - if a.count < b.count { - -1 - } else if a.count > b.count { - 1 - } else { - 0 - } - }); - - assert_eq!( - words, - vec![ - WordCount { - word: "cherry".to_string(), - count: 5 - }, - WordCount { - word: "apple".to_string(), - count: 3 - }, - WordCount { - word: "date".to_string(), - count: 2 - }, - WordCount { - word: "banana".to_string(), - count: 1 - }, - ] - ); - } - - #[test] - fn test_quick_sort_with_cmp_by_first_letter() { - let mut words = vec!["zebra", "apple", "banana", "cherry", "date"]; - - quick_sort_with_cmp(&mut words, false, |a, b| { - let a_first = a.chars().next().unwrap(); - let b_first = b.chars().next().unwrap(); - - if a_first < b_first { - -1 - } else if a_first > b_first { - 1 - } else { - 0 - } - }); - - assert_eq!(words, vec!["apple", "banana", "cherry", "date", "zebra"]); - - quick_sort_with_cmp(&mut words, true, |a, b| { - let a_first = a.chars().next().unwrap(); - let b_first = b.chars().next().unwrap(); - - if a_first < b_first { - -1 - } else if a_first > b_first { - 1 - } else { - 0 - } - }); - - assert_eq!(words, vec!["zebra", "date", "cherry", "banana", "apple"]); - } -} diff --git a/crates/utils/data_struct/src/lib.rs b/crates/utils/data_struct/src/lib.rs deleted file mode 100644 index 47cc03c..0000000 --- a/crates/utils/data_struct/src/lib.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod bi_map; -pub mod data_sort; diff --git a/crates/utils/sha1_hash/Cargo.toml b/crates/utils/sha1_hash/Cargo.toml deleted file mode 100644 index e206efd..0000000 --- a/crates/utils/sha1_hash/Cargo.toml +++ /dev/null @@ -1,9 +0,0 @@ -[package] -name = "sha1_hash" -edition = "2024" -version.workspace = true - -[dependencies] -tokio = { version = "1.48", features = ["full"] } -sha1 = "0.10" -futures = "0.3" diff --git a/crates/utils/sha1_hash/res/story.txt b/crates/utils/sha1_hash/res/story.txt deleted file mode 100644 index a91f467..0000000 --- a/crates/utils/sha1_hash/res/story.txt +++ /dev/null @@ -1,48 +0,0 @@ -魏曹者,程序员也,发稀甚于代码。 -忽接神秘电话曰: -"贺君中彩,得长生之赐。" -魏曹冷笑曰:"吾命尚不及下版之期。" - -翌日果得U盘。 -接入电脑,弹窗示曰: -"点此确认,即获永生。" -魏曹径点"永拒"。 - -三月后,U盘自格其盘。 -进度条滞于九九。 -客服电话已成空号。 -魏曹乃知身可不死,然体内癌细胞亦得不灭。 - -遂谒主请辞。 -主曰:"巧甚,公司正欲优化。" -魏曹曰:"吾不死。" -主目骤亮:"则可007至司闭。" - -魏曹始试诸死法。 -坠楼,卧医三月,账单令其愿死。 -饮鸩,肝肾永损,然终不得死。 -终决卧轨。 - -择高铁最速者。 -司机探头曰:"兄台,吾亦不死身也。" -"此车已碾如君者二十人矣。" - -二人遂坐轨畔对饮。 -司机曰:"知最讽者何?" -"吾等永存,而所爱者皆逝矣。" - -魏曹忽得系统提示: -"侦得用户消极求生,将启工模。" -自是无日不毕KPI,否则遍尝绝症之苦。 - -是日对镜整寿衣。 -忽见顶生一丝乌发。 -泫然泣下,此兆示其将复活一轮回。 - ---- 忽忆DeepSeek尝作Footer曰: -"文成而Hash1验,若星河之固。" -遂取哈希值校之, -字符流转如天河倒泻, -终得"e3b0c44298fc1c14"之数。 -然文末数字竟阙如残月, -方知此篇亦遭永劫轮回。 diff --git a/crates/utils/sha1_hash/res/story_crlf.sha1 b/crates/utils/sha1_hash/res/story_crlf.sha1 deleted file mode 100644 index bc8ad25..0000000 --- a/crates/utils/sha1_hash/res/story_crlf.sha1 +++ /dev/null @@ -1 +0,0 @@ -40c1d848d8d6a14b9403ee022f2b28dabb3b3a71 diff --git a/crates/utils/sha1_hash/res/story_lf.sha1 b/crates/utils/sha1_hash/res/story_lf.sha1 deleted file mode 100644 index c2e3213..0000000 --- a/crates/utils/sha1_hash/res/story_lf.sha1 +++ /dev/null @@ -1 +0,0 @@ -6838aca280112635a2cbf93440f4c04212f58ee8 diff --git a/crates/utils/sha1_hash/src/lib.rs b/crates/utils/sha1_hash/src/lib.rs deleted file mode 100644 index 96a7897..0000000 --- a/crates/utils/sha1_hash/src/lib.rs +++ /dev/null @@ -1,257 +0,0 @@ -use sha1::{Digest, Sha1}; -use std::path::{Path, PathBuf}; -use std::sync::Arc; -use tokio::fs::File; -use tokio::io::{AsyncReadExt, BufReader}; -use tokio::task; - -/// # Struct - Sha1Result -/// -/// Records SHA1 calculation results, including the file path and hash value -#[derive(Debug, Clone)] -pub struct Sha1Result { - pub file_path: PathBuf, - pub hash: String, -} - -/// Calc SHA1 hash of a string -pub fn calc_sha1_string>(input: S) -> String { - let mut hasher = Sha1::new(); - hasher.update(input.as_ref().as_bytes()); - let hash_result = hasher.finalize(); - - hash_result - .iter() - .map(|b| format!("{:02x}", b)) - .collect::() -} - -/// Calc SHA1 hash of a single file -pub async fn calc_sha1>( - path: P, - buffer_size: usize, -) -> Result> { - let file_path = path.as_ref().to_string_lossy().to_string(); - - // Open file asynchronously - let file = File::open(&path).await?; - let mut reader = BufReader::with_capacity(buffer_size, file); - let mut hasher = Sha1::new(); - let mut buffer = vec![0u8; buffer_size]; - - // Read file in chunks and update hash asynchronously - loop { - let n = reader.read(&mut buffer).await?; - if n == 0 { - break; - } - hasher.update(&buffer[..n]); - } - - let hash_result = hasher.finalize(); - - // Convert to hex string - let hash_hex = hash_result - .iter() - .map(|b| format!("{:02x}", b)) - .collect::(); - - Ok(Sha1Result { - file_path: file_path.into(), - hash: hash_hex, - }) -} - -/// Calc SHA1 hashes for multiple files using multi-threading -pub async fn calc_sha1_multi( - paths: I, - buffer_size: usize, -) -> Result, Box> -where - P: AsRef + Send + Sync + 'static, - I: IntoIterator, -{ - let buffer_size = Arc::new(buffer_size); - - // Collect all file paths - let file_paths: Vec

= paths.into_iter().collect(); - - if file_paths.is_empty() { - return Ok(Vec::new()); - } - - // Create tasks for each file - let tasks: Vec<_> = file_paths - .into_iter() - .map(|path| { - let buffer_size = Arc::clone(&buffer_size); - task::spawn(async move { calc_sha1(path, *buffer_size).await }) - }) - .collect(); - - // Execute tasks with concurrency limit using join_all - let results: Vec>> = - futures::future::join_all(tasks) - .await - .into_iter() - .map(|task_result| match task_result { - Ok(Ok(calc_result)) => Ok(calc_result), - Ok(Err(e)) => Err(e), - Err(e) => Err(Box::new(e) as Box), - }) - .collect(); - - // Check for any errors and collect successful results - let mut successful_results = Vec::new(); - for result in results { - match result { - Ok(success) => successful_results.push(success), - Err(e) => return Err(e), - } - } - - Ok(successful_results) -} - -#[cfg(test)] -mod tests { - use super::*; - use std::fs; - - #[test] - fn test_sha1_string() { - let test_string = "Hello, SHA1!"; - let hash = calc_sha1_string(test_string); - - let expected_hash = "de1c3daadc6f0f1626f4cf56c03e05a1e5d7b187"; - - assert_eq!( - hash, expected_hash, - "SHA1 hash should be consistent for same input" - ); - } - - #[test] - fn test_sha1_string_empty() { - let hash = calc_sha1_string(""); - - // SHA1 of empty string is "da39a3ee5e6b4b0d3255bfef95601890afd80709" - let expected_empty_hash = "da39a3ee5e6b4b0d3255bfef95601890afd80709"; - assert_eq!( - hash, expected_empty_hash, - "SHA1 hash mismatch for empty string" - ); - } - - #[tokio::test] - async fn test_sha1_accuracy() { - // Test file path relative to the crate root - let test_file_path = "res/story.txt"; - // Choose expected hash file based on platform - let expected_hash_path = if cfg!(windows) { - "res/story_crlf.sha1" - } else { - "res/story_lf.sha1" - }; - - // Calculate SHA1 hash - let result = calc_sha1(test_file_path, 8192) - .await - .expect("Failed to calculate SHA1"); - - // Read expected hash from file - let expected_hash = fs::read_to_string(expected_hash_path) - .expect("Failed to read expected hash file") - .trim() - .to_string(); - - // Verify the calculated hash matches expected hash - assert_eq!( - result.hash, expected_hash, - "SHA1 hash mismatch for test file" - ); - - println!("Test file: {}", result.file_path.display()); - println!("Calculated hash: {}", result.hash); - println!("Expected hash: {}", expected_hash); - println!( - "Platform: {}", - if cfg!(windows) { - "Windows" - } else { - "Unix/Linux" - } - ); - } - - #[tokio::test] - async fn test_sha1_empty_file() { - // Create a temporary empty file for testing - let temp_file = "test_empty.txt"; - fs::write(temp_file, "").expect("Failed to create empty test file"); - - let result = calc_sha1(temp_file, 4096) - .await - .expect("Failed to calculate SHA1 for empty file"); - - // SHA1 of empty string is "da39a3ee5e6b4b0d3255bfef95601890afd80709" - let expected_empty_hash = "da39a3ee5e6b4b0d3255bfef95601890afd80709"; - assert_eq!( - result.hash, expected_empty_hash, - "SHA1 hash mismatch for empty file" - ); - - // Clean up - fs::remove_file(temp_file).expect("Failed to remove temporary test file"); - } - - #[tokio::test] - async fn test_sha1_simple_text() { - // Create a temporary file with simple text - let temp_file = "test_simple.txt"; - let test_content = "Hello, SHA1!"; - fs::write(temp_file, test_content).expect("Failed to create simple test file"); - - let result = calc_sha1(temp_file, 4096) - .await - .expect("Failed to calculate SHA1 for simple text"); - - // Note: This test just verifies that the function works without errors - // The actual hash value is not critical for this test - - println!("Simple text test - Calculated hash: {}", result.hash); - - // Clean up - fs::remove_file(temp_file).expect("Failed to remove temporary test file"); - } - - #[tokio::test] - async fn test_sha1_multi_files() { - // Test multiple files calculation - let test_files = vec!["res/story.txt"]; - - let results = calc_sha1_multi(test_files, 8192) - .await - .expect("Failed to calculate SHA1 for multiple files"); - - assert_eq!(results.len(), 1, "Should have calculated hash for 1 file"); - - // Choose expected hash file based on platform - let expected_hash_path = if cfg!(windows) { - "res/story_crlf.sha1" - } else { - "res/story_lf.sha1" - }; - - // Read expected hash from file - let expected_hash = fs::read_to_string(expected_hash_path) - .expect("Failed to read expected hash file") - .trim() - .to_string(); - - assert_eq!( - results[0].hash, expected_hash, - "SHA1 hash mismatch in multi-file test" - ); - } -} diff --git a/crates/utils/string_proc/Cargo.toml b/crates/utils/string_proc/Cargo.toml deleted file mode 100644 index 5292339..0000000 --- a/crates/utils/string_proc/Cargo.toml +++ /dev/null @@ -1,7 +0,0 @@ -[package] -name = "string_proc" -version = "0.1.0" -edition = "2024" - -[dependencies] -strip-ansi-escapes = "0.2.1" diff --git a/crates/utils/string_proc/src/format_path.rs b/crates/utils/string_proc/src/format_path.rs deleted file mode 100644 index 35689b8..0000000 --- a/crates/utils/string_proc/src/format_path.rs +++ /dev/null @@ -1,111 +0,0 @@ -use std::path::{Path, PathBuf}; - -/// Format path str -pub fn format_path_str(path: impl Into) -> Result { - let path_str = path.into(); - let ends_with_slash = path_str.ends_with('/'); - - // ANSI Strip - let cleaned = strip_ansi_escapes::strip(&path_str); - let path_without_ansi = String::from_utf8(cleaned) - .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; - - let path_with_forward_slash = path_without_ansi.replace('\\', "/"); - let mut result = String::new(); - let mut prev_char = '\0'; - - for c in path_with_forward_slash.chars() { - if c == '/' && prev_char == '/' { - continue; - } - result.push(c); - prev_char = c; - } - - let unfriendly_chars = ['*', '?', '"', '<', '>', '|']; - result = result - .chars() - .filter(|c| !unfriendly_chars.contains(c)) - .collect(); - - // Handle ".." path components - let path_buf = PathBuf::from(&result); - let normalized_path = normalize_path(&path_buf); - result = normalized_path.to_string_lossy().replace('\\', "/"); - - // Restore trailing slash if original path had one - if ends_with_slash && !result.ends_with('/') { - result.push('/'); - } - - // Special case: when result is only "./", return "" - if result == "./" { - return Ok(String::new()); - } - - Ok(result) -} - -/// Normalize path by resolving ".." components without requiring file system access -fn normalize_path(path: &Path) -> PathBuf { - let mut components = Vec::new(); - - for component in path.components() { - match component { - std::path::Component::ParentDir => { - if !components.is_empty() { - components.pop(); - } - } - std::path::Component::CurDir => { - // Skip current directory components - } - _ => { - components.push(component); - } - } - } - - if components.is_empty() { - PathBuf::from(".") - } else { - components.iter().collect() - } -} - -pub fn format_path(path: impl Into) -> Result { - let path_str = format_path_str(path.into().display().to_string())?; - Ok(PathBuf::from(path_str)) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_format_path() -> Result<(), std::io::Error> { - assert_eq!(format_path_str("C:\\Users\\\\test")?, "C:/Users/test"); - - assert_eq!( - format_path_str("/path/with/*unfriendly?chars")?, - "/path/with/unfriendlychars" - ); - - assert_eq!(format_path_str("\x1b[31m/path\x1b[0m")?, "/path"); - assert_eq!(format_path_str("/home/user/dir/")?, "/home/user/dir/"); - assert_eq!( - format_path_str("/home/user/file.txt")?, - "/home/user/file.txt" - ); - assert_eq!( - format_path_str("/home/my_user/DOCS/JVCS_TEST/Workspace/../Vault/")?, - "/home/my_user/DOCS/JVCS_TEST/Vault/" - ); - - assert_eq!(format_path_str("./home/file.txt")?, "home/file.txt"); - assert_eq!(format_path_str("./home/path/")?, "home/path/"); - assert_eq!(format_path_str("./")?, ""); - - Ok(()) - } -} diff --git a/crates/utils/string_proc/src/format_processer.rs b/crates/utils/string_proc/src/format_processer.rs deleted file mode 100644 index 8d0a770..0000000 --- a/crates/utils/string_proc/src/format_processer.rs +++ /dev/null @@ -1,132 +0,0 @@ -pub struct FormatProcesser { - content: Vec, -} - -impl From for FormatProcesser { - fn from(value: String) -> Self { - Self { - content: Self::process_string(value), - } - } -} - -impl From<&str> for FormatProcesser { - fn from(value: &str) -> Self { - Self { - content: Self::process_string(value.to_string()), - } - } -} - -impl FormatProcesser { - /// Process the string into an intermediate format - fn process_string(input: String) -> Vec { - let mut result = String::new(); - let mut prev_space = false; - - for c in input.chars() { - match c { - 'a'..='z' | 'A'..='Z' | '0'..='9' => { - result.push(c); - prev_space = false; - } - '_' | ',' | '.' | '-' | ' ' => { - if !prev_space { - result.push(' '); - prev_space = true; - } - } - _ => {} - } - } - - let mut processed = String::new(); - let mut chars = result.chars().peekable(); - - while let Some(c) = chars.next() { - processed.push(c); - if let Some(&next) = chars.peek() - && c.is_lowercase() - && next.is_uppercase() - { - processed.push(' '); - } - } - - processed - .to_lowercase() - .split_whitespace() - .map(|s| s.to_string()) - .collect() - } - - /// Convert to camelCase format (brewCoffee) - pub fn to_camel_case(&self) -> String { - let mut result = String::new(); - for (i, word) in self.content.iter().enumerate() { - if i == 0 { - result.push_str(&word.to_lowercase()); - } else { - let mut chars = word.chars(); - if let Some(first) = chars.next() { - result.push_str(&first.to_uppercase().collect::()); - result.push_str(&chars.collect::().to_lowercase()); - } - } - } - result - } - - /// Convert to PascalCase format (BrewCoffee) - pub fn to_pascal_case(&self) -> String { - let mut result = String::new(); - for word in &self.content { - let mut chars = word.chars(); - if let Some(first) = chars.next() { - result.push_str(&first.to_uppercase().collect::()); - result.push_str(&chars.collect::().to_lowercase()); - } - } - result - } - - /// Convert to kebab-case format (brew-coffee) - pub fn to_kebab_case(&self) -> String { - self.content.join("-").to_lowercase() - } - - /// Convert to snake_case format (brew_coffee) - pub fn to_snake_case(&self) -> String { - self.content.join("_").to_lowercase() - } - - /// Convert to dot.case format (brew.coffee) - pub fn to_dot_case(&self) -> String { - self.content.join(".").to_lowercase() - } - - /// Convert to Title Case format (Brew Coffee) - pub fn to_title_case(&self) -> String { - let mut result = String::new(); - for word in &self.content { - let mut chars = word.chars(); - if let Some(first) = chars.next() { - result.push_str(&first.to_uppercase().collect::()); - result.push_str(&chars.collect::().to_lowercase()); - } - result.push(' '); - } - result.pop(); - result - } - - /// Convert to lower case format (brew coffee) - pub fn to_lower_case(&self) -> String { - self.content.join(" ").to_lowercase() - } - - /// Convert to UPPER CASE format (BREW COFFEE) - pub fn to_upper_case(&self) -> String { - self.content.join(" ").to_uppercase() - } -} diff --git a/crates/utils/string_proc/src/lib.rs b/crates/utils/string_proc/src/lib.rs deleted file mode 100644 index 76588c1..0000000 --- a/crates/utils/string_proc/src/lib.rs +++ /dev/null @@ -1,50 +0,0 @@ -pub mod format_path; -pub mod format_processer; -pub mod macros; -pub mod simple_processer; - -#[cfg(test)] -mod tests { - use crate::format_processer::FormatProcesser; - - #[test] - fn test_processer() { - let test_cases = vec![ - ("brew_coffee", "brewCoffee"), - ("brew, coffee", "brewCoffee"), - ("brew-coffee", "brewCoffee"), - ("Brew.Coffee", "brewCoffee"), - ("bRewCofFee", "bRewCofFee"), - ("brewCoffee", "brewCoffee"), - ("b&rewCoffee", "brewCoffee"), - ("BrewCoffee", "brewCoffee"), - ("brew.coffee", "brewCoffee"), - ("Brew_Coffee", "brewCoffee"), - ("BREW COFFEE", "brewCoffee"), - ]; - - for (input, expected) in test_cases { - let processor = FormatProcesser::from(input); - assert_eq!( - processor.to_camel_case(), - expected, - "Failed for input: '{}'", - input - ); - } - } - - #[test] - fn test_conversions() { - let processor = FormatProcesser::from("brewCoffee"); - - assert_eq!(processor.to_upper_case(), "BREW COFFEE"); - assert_eq!(processor.to_lower_case(), "brew coffee"); - assert_eq!(processor.to_title_case(), "Brew Coffee"); - assert_eq!(processor.to_dot_case(), "brew.coffee"); - assert_eq!(processor.to_snake_case(), "brew_coffee"); - assert_eq!(processor.to_kebab_case(), "brew-coffee"); - assert_eq!(processor.to_pascal_case(), "BrewCoffee"); - assert_eq!(processor.to_camel_case(), "brewCoffee"); - } -} diff --git a/crates/utils/string_proc/src/macros.rs b/crates/utils/string_proc/src/macros.rs deleted file mode 100644 index 135268e..0000000 --- a/crates/utils/string_proc/src/macros.rs +++ /dev/null @@ -1,63 +0,0 @@ -#[macro_export] -macro_rules! camel_case { - ($input:expr) => {{ - use string_proc::format_processer::FormatProcesser; - FormatProcesser::from($input).to_camel_case() - }}; -} - -#[macro_export] -macro_rules! upper_case { - ($input:expr) => {{ - use string_proc::format_processer::FormatProcesser; - FormatProcesser::from($input).to_upper_case() - }}; -} - -#[macro_export] -macro_rules! lower_case { - ($input:expr) => {{ - use string_proc::format_processer::FormatProcesser; - FormatProcesser::from($input).to_lower_case() - }}; -} - -#[macro_export] -macro_rules! title_case { - ($input:expr) => {{ - use string_proc::format_processer::FormatProcesser; - FormatProcesser::from($input).to_title_case() - }}; -} - -#[macro_export] -macro_rules! dot_case { - ($input:expr) => {{ - use string_proc::format_processer::FormatProcesser; - FormatProcesser::from($input).to_dot_case() - }}; -} - -#[macro_export] -macro_rules! snake_case { - ($input:expr) => {{ - use string_proc::format_processer::FormatProcesser; - FormatProcesser::from($input).to_snake_case() - }}; -} - -#[macro_export] -macro_rules! kebab_case { - ($input:expr) => {{ - use string_proc::format_processer::FormatProcesser; - FormatProcesser::from($input).to_kebab_case() - }}; -} - -#[macro_export] -macro_rules! pascal_case { - ($input:expr) => {{ - use string_proc::format_processer::FormatProcesser; - FormatProcesser::from($input).to_pascal_case() - }}; -} diff --git a/crates/utils/string_proc/src/simple_processer.rs b/crates/utils/string_proc/src/simple_processer.rs deleted file mode 100644 index 2de5dfc..0000000 --- a/crates/utils/string_proc/src/simple_processer.rs +++ /dev/null @@ -1,15 +0,0 @@ -/// Sanitizes a file path by replacing special characters with underscores. -/// -/// This function takes a file path as input and returns a sanitized version -/// where characters that are not allowed in file paths (such as path separators -/// and other reserved characters) are replaced with underscores. -pub fn sanitize_file_path>(path: P) -> String { - let path_str = path.as_ref(); - path_str - .chars() - .map(|c| match c { - '/' | '\\' | ':' | '*' | '?' | '"' | '<' | '>' | '|' => '_', - _ => c, - }) - .collect() -} diff --git a/crates/utils/tcp_connection/Cargo.toml b/crates/utils/tcp_connection/Cargo.toml deleted file mode 100644 index da258be..0000000 --- a/crates/utils/tcp_connection/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -name = "tcp_connection" -edition = "2024" -version.workspace = true - -[dependencies] -tokio = { version = "1.48.0", features = ["full"] } - -# Serialization -serde = { version = "1.0.228", features = ["derive"] } -serde_json = "1.0.145" -rmp-serde = "1.3.0" - -# Error handling -thiserror = "2.0.17" - -# Uuid & Random -uuid = "1.18.1" - -# Crypto -rsa = { version = "0.9", features = ["pkcs5", "sha2"] } -ed25519-dalek = "3.0.0-pre.1" -ring = "0.17.14" -rand = "0.10.0-rc.0" -base64 = "0.22.1" -pem = "3.0.6" -crc = "3.3.0" -blake3 = "1.8.2" diff --git a/crates/utils/tcp_connection/src/error.rs b/crates/utils/tcp_connection/src/error.rs deleted file mode 100644 index 32d06cc..0000000 --- a/crates/utils/tcp_connection/src/error.rs +++ /dev/null @@ -1,122 +0,0 @@ -use std::io; -use thiserror::Error; - -#[derive(Error, Debug, Clone)] -pub enum TcpTargetError { - #[error("Authentication failed: {0}")] - Authentication(String), - - #[error("Reference sheet not allowed: {0}")] - ReferenceSheetNotAllowed(String), - - #[error("Cryptographic error: {0}")] - Crypto(String), - - #[error("File operation error: {0}")] - File(String), - - #[error("I/O error: {0}")] - Io(String), - - #[error("Invalid configuration: {0}")] - Config(String), - - #[error("Locked: {0}")] - Locked(String), - - #[error("Network error: {0}")] - Network(String), - - #[error("No result: {0}")] - NoResult(String), - - #[error("Not found: {0}")] - NotFound(String), - - #[error("Not local machine: {0}")] - NotLocal(String), - - #[error("Not remote machine: {0}")] - NotRemote(String), - - #[error("Pool already exists: {0}")] - PoolAlreadyExists(String), - - #[error("Protocol error: {0}")] - Protocol(String), - - #[error("Serialization error: {0}")] - Serialization(String), - - #[error("Timeout: {0}")] - Timeout(String), - - #[error("Unsupported operation: {0}")] - Unsupported(String), -} - -impl From for TcpTargetError { - fn from(error: io::Error) -> Self { - TcpTargetError::Io(error.to_string()) - } -} - -impl From for TcpTargetError { - fn from(error: serde_json::Error) -> Self { - TcpTargetError::Serialization(error.to_string()) - } -} - -impl From<&str> for TcpTargetError { - fn from(value: &str) -> Self { - TcpTargetError::Protocol(value.to_string()) - } -} - -impl From for TcpTargetError { - fn from(value: String) -> Self { - TcpTargetError::Protocol(value) - } -} - -impl From for TcpTargetError { - fn from(error: rsa::errors::Error) -> Self { - TcpTargetError::Crypto(error.to_string()) - } -} - -impl From for TcpTargetError { - fn from(error: ed25519_dalek::SignatureError) -> Self { - TcpTargetError::Crypto(error.to_string()) - } -} - -impl From for TcpTargetError { - fn from(error: ring::error::Unspecified) -> Self { - TcpTargetError::Crypto(error.to_string()) - } -} - -impl From for TcpTargetError { - fn from(error: base64::DecodeError) -> Self { - TcpTargetError::Serialization(error.to_string()) - } -} - -impl From for TcpTargetError { - fn from(error: pem::PemError) -> Self { - TcpTargetError::Crypto(error.to_string()) - } -} - -impl From for TcpTargetError { - fn from(error: rmp_serde::encode::Error) -> Self { - TcpTargetError::Serialization(error.to_string()) - } -} - -impl From for TcpTargetError { - fn from(error: rmp_serde::decode::Error) -> Self { - TcpTargetError::Serialization(error.to_string()) - } -} diff --git a/crates/utils/tcp_connection/src/instance.rs b/crates/utils/tcp_connection/src/instance.rs deleted file mode 100644 index 8e6886c..0000000 --- a/crates/utils/tcp_connection/src/instance.rs +++ /dev/null @@ -1,542 +0,0 @@ -use std::{path::Path, time::Duration}; - -use serde::Serialize; -use tokio::{ - fs::{File, OpenOptions}, - io::{AsyncBufReadExt, AsyncReadExt, AsyncWriteExt, BufReader, BufWriter}, - net::TcpStream, -}; - -use ring::signature::{self}; - -use crate::error::TcpTargetError; - -const DEFAULT_CHUNK_SIZE: usize = 4096; -const DEFAULT_TIMEOUT_SECS: u64 = 10; - -const ECDSA_P256_SHA256_ASN1_SIGNING: &signature::EcdsaSigningAlgorithm = - &signature::ECDSA_P256_SHA256_ASN1_SIGNING; -const ECDSA_P384_SHA384_ASN1_SIGNING: &signature::EcdsaSigningAlgorithm = - &signature::ECDSA_P384_SHA384_ASN1_SIGNING; - -#[derive(Debug, Clone)] -pub struct ConnectionConfig { - pub chunk_size: usize, - pub timeout_secs: u64, - pub enable_crc_validation: bool, -} - -impl Default for ConnectionConfig { - fn default() -> Self { - Self { - chunk_size: DEFAULT_CHUNK_SIZE, - timeout_secs: DEFAULT_TIMEOUT_SECS, - enable_crc_validation: false, - } - } -} - -pub struct ConnectionInstance { - pub(crate) stream: TcpStream, - config: ConnectionConfig, -} - -impl From for ConnectionInstance { - fn from(stream: TcpStream) -> Self { - Self { - stream, - config: ConnectionConfig::default(), - } - } -} - -impl ConnectionInstance { - /// Create a new ConnectionInstance with custom configuration - pub fn with_config(stream: TcpStream, config: ConnectionConfig) -> Self { - Self { stream, config } - } - - /// Get a reference to the current configuration - pub fn config(&self) -> &ConnectionConfig { - &self.config - } - - /// Get a mutable reference to the current configuration - pub fn config_mut(&mut self) -> &mut ConnectionConfig { - &mut self.config - } - /// Serialize data and write to the target machine - pub async fn write(&mut self, data: Data) -> Result<(), TcpTargetError> - where - Data: Default + Serialize, - { - let Ok(json_text) = serde_json::to_string(&data) else { - return Err(TcpTargetError::Serialization( - "Serialize failed.".to_string(), - )); - }; - Self::write_text(self, json_text).await?; - Ok(()) - } - - /// Serialize data to MessagePack and write to the target machine - pub async fn write_msgpack(&mut self, data: Data) -> Result<(), TcpTargetError> - where - Data: Serialize, - { - let msgpack_data = rmp_serde::to_vec(&data)?; - let len = msgpack_data.len() as u32; - - self.stream.write_all(&len.to_be_bytes()).await?; - self.stream.write_all(&msgpack_data).await?; - Ok(()) - } - - /// Read data from target machine and deserialize from MessagePack - pub async fn read_msgpack(&mut self) -> Result - where - Data: serde::de::DeserializeOwned, - { - let mut len_buf = [0u8; 4]; - self.stream.read_exact(&mut len_buf).await?; - let len = u32::from_be_bytes(len_buf) as usize; - - let mut buffer = vec![0; len]; - self.stream.read_exact(&mut buffer).await?; - - let data = rmp_serde::from_slice(&buffer)?; - Ok(data) - } - - /// Read data from target machine and deserialize - pub async fn read(&mut self) -> Result - where - Data: Default + serde::de::DeserializeOwned, - { - let Ok(json_text) = Self::read_text(self).await else { - return Err(TcpTargetError::Io("Read failed.".to_string())); - }; - let Ok(deser_obj) = serde_json::from_str::(&json_text) else { - return Err(TcpTargetError::Serialization( - "Deserialize failed.".to_string(), - )); - }; - Ok(deser_obj) - } - - /// Serialize data and write to the target machine - pub async fn write_large(&mut self, data: Data) -> Result<(), TcpTargetError> - where - Data: Default + Serialize, - { - let Ok(json_text) = serde_json::to_string(&data) else { - return Err(TcpTargetError::Serialization( - "Serialize failed.".to_string(), - )); - }; - Self::write_large_text(self, json_text).await?; - Ok(()) - } - - /// Read data from target machine and deserialize - pub async fn read_large( - &mut self, - buffer_size: impl Into, - ) -> Result - where - Data: Default + serde::de::DeserializeOwned, - { - let Ok(json_text) = Self::read_large_text(self, buffer_size).await else { - return Err(TcpTargetError::Io("Read failed.".to_string())); - }; - let Ok(deser_obj) = serde_json::from_str::(&json_text) else { - return Err(TcpTargetError::Serialization( - "Deserialize failed.".to_string(), - )); - }; - Ok(deser_obj) - } - - /// Write text to the target machine - pub async fn write_text(&mut self, text: impl Into) -> Result<(), TcpTargetError> { - let text = text.into(); - let bytes = text.as_bytes(); - let len = bytes.len() as u32; - - self.stream.write_all(&len.to_be_bytes()).await?; - match self.stream.write_all(bytes).await { - Ok(_) => Ok(()), - Err(err) => Err(TcpTargetError::Io(err.to_string())), - } - } - - /// Read text from the target machine - pub async fn read_text(&mut self) -> Result { - let mut len_buf = [0u8; 4]; - self.stream.read_exact(&mut len_buf).await?; - let len = u32::from_be_bytes(len_buf) as usize; - - let mut buffer = vec![0; len]; - self.stream.read_exact(&mut buffer).await?; - - match String::from_utf8(buffer) { - Ok(text) => Ok(text), - Err(err) => Err(TcpTargetError::Serialization(format!( - "Invalid UTF-8 sequence: {}", - err - ))), - } - } - - /// Write large text to the target machine (chunked) - pub async fn write_large_text( - &mut self, - text: impl Into, - ) -> Result<(), TcpTargetError> { - let text = text.into(); - let bytes = text.as_bytes(); - let mut offset = 0; - - while offset < bytes.len() { - let chunk = &bytes[offset..]; - let written = match self.stream.write(chunk).await { - Ok(n) => n, - Err(err) => return Err(TcpTargetError::Io(err.to_string())), - }; - offset += written; - } - - Ok(()) - } - - /// Read large text from the target machine (chunked) - pub async fn read_large_text( - &mut self, - chunk_size: impl Into, - ) -> Result { - let chunk_size = chunk_size.into() as usize; - let mut buffer = Vec::new(); - let mut chunk_buf = vec![0; chunk_size]; - - loop { - match self.stream.read(&mut chunk_buf).await { - Ok(0) => break, // EOF - Ok(n) => { - buffer.extend_from_slice(&chunk_buf[..n]); - } - Err(err) => return Err(TcpTargetError::Io(err.to_string())), - } - } - - Ok(String::from_utf8_lossy(&buffer).to_string()) - } - - /// Write large MessagePack data to the target machine (chunked) - pub async fn write_large_msgpack( - &mut self, - data: Data, - chunk_size: impl Into, - ) -> Result<(), TcpTargetError> - where - Data: Serialize, - { - let msgpack_data = rmp_serde::to_vec(&data)?; - let chunk_size = chunk_size.into() as usize; - let len = msgpack_data.len() as u32; - - // Write total length first - self.stream.write_all(&len.to_be_bytes()).await?; - - // Write data in chunks - let mut offset = 0; - while offset < msgpack_data.len() { - let end = std::cmp::min(offset + chunk_size, msgpack_data.len()); - let chunk = &msgpack_data[offset..end]; - match self.stream.write(chunk).await { - Ok(n) => offset += n, - Err(err) => return Err(TcpTargetError::Io(err.to_string())), - } - } - - Ok(()) - } - - /// Read large MessagePack data from the target machine (chunked) - pub async fn read_large_msgpack( - &mut self, - chunk_size: impl Into, - ) -> Result - where - Data: serde::de::DeserializeOwned, - { - let chunk_size = chunk_size.into() as usize; - - // Read total length first - let mut len_buf = [0u8; 4]; - self.stream.read_exact(&mut len_buf).await?; - let total_len = u32::from_be_bytes(len_buf) as usize; - - // Read data in chunks - let mut buffer = Vec::with_capacity(total_len); - let mut remaining = total_len; - let mut chunk_buf = vec![0; chunk_size]; - - while remaining > 0 { - let read_size = std::cmp::min(chunk_size, remaining); - let chunk = &mut chunk_buf[..read_size]; - - match self.stream.read_exact(chunk).await { - Ok(_) => { - buffer.extend_from_slice(chunk); - remaining -= read_size; - } - Err(err) => return Err(TcpTargetError::Io(err.to_string())), - } - } - - let data = rmp_serde::from_slice(&buffer)?; - Ok(data) - } - - /// Write file to target machine. - pub async fn write_file(&mut self, file_path: impl AsRef) -> Result<(), TcpTargetError> { - let path = file_path.as_ref(); - - // Validate file - if !path.exists() { - return Err(TcpTargetError::File(format!( - "File not found: {}", - path.display() - ))); - } - if path.is_dir() { - return Err(TcpTargetError::File(format!( - "Path is directory: {}", - path.display() - ))); - } - - // Open file and get metadata - let mut file = File::open(path).await?; - let file_size = file.metadata().await?.len(); - - // Send file header (version + size + crc) - self.stream.write_all(&1u64.to_be_bytes()).await?; - self.stream.write_all(&file_size.to_be_bytes()).await?; - - // Calculate and send CRC32 if enabled - let file_crc = if self.config.enable_crc_validation { - let crc32 = crc::Crc::::new(&crc::CRC_32_ISO_HDLC); - let mut crc_calculator = crc32.digest(); - - let mut temp_reader = - BufReader::with_capacity(self.config.chunk_size, File::open(path).await?); - let mut temp_buffer = vec![0u8; self.config.chunk_size]; - let mut temp_bytes_read = 0; - - while temp_bytes_read < file_size { - let bytes_to_read = - (file_size - temp_bytes_read).min(self.config.chunk_size as u64) as usize; - temp_reader - .read_exact(&mut temp_buffer[..bytes_to_read]) - .await?; - crc_calculator.update(&temp_buffer[..bytes_to_read]); - temp_bytes_read += bytes_to_read as u64; - } - - crc_calculator.finalize() - } else { - 0 - }; - - self.stream.write_all(&file_crc.to_be_bytes()).await?; - - // If file size is 0, skip content transfer - if file_size == 0 { - self.stream.flush().await?; - - // Wait for receiver confirmation - let mut ack = [0u8; 1]; - tokio::time::timeout( - Duration::from_secs(self.config.timeout_secs), - self.stream.read_exact(&mut ack), - ) - .await - .map_err(|_| TcpTargetError::Timeout("Ack timeout".to_string()))??; - - if ack[0] != 1 { - return Err(TcpTargetError::Protocol( - "Receiver verification failed".to_string(), - )); - } - - return Ok(()); - } - - // Transfer file content - let mut reader = BufReader::with_capacity(self.config.chunk_size, &mut file); - let mut bytes_sent = 0; - - while bytes_sent < file_size { - let buffer = reader.fill_buf().await?; - if buffer.is_empty() { - break; - } - - let chunk_size = buffer.len().min((file_size - bytes_sent) as usize); - self.stream.write_all(&buffer[..chunk_size]).await?; - reader.consume(chunk_size); - - bytes_sent += chunk_size as u64; - } - - // Verify transfer completion - if bytes_sent != file_size { - return Err(TcpTargetError::File(format!( - "Transfer incomplete: expected {} bytes, sent {} bytes", - file_size, bytes_sent - ))); - } - - self.stream.flush().await?; - - // Wait for receiver confirmation - let mut ack = [0u8; 1]; - tokio::time::timeout( - Duration::from_secs(self.config.timeout_secs), - self.stream.read_exact(&mut ack), - ) - .await - .map_err(|_| TcpTargetError::Timeout("Ack timeout".to_string()))??; - - if ack[0] != 1 { - return Err(TcpTargetError::Protocol( - "Receiver verification failed".to_string(), - )); - } - - Ok(()) - } - - /// Read file from target machine - pub async fn read_file(&mut self, save_path: impl AsRef) -> Result<(), TcpTargetError> { - let path = save_path.as_ref(); - // Create CRC instance at function scope to ensure proper lifetime - let crc_instance = crc::Crc::::new(&crc::CRC_32_ISO_HDLC); - - // Make sure parent directory exists - if let Some(parent) = path.parent() - && !parent.exists() - { - tokio::fs::create_dir_all(parent).await?; - } - - // Read file header (version + size + crc) - let mut version_buf = [0u8; 8]; - self.stream.read_exact(&mut version_buf).await?; - let version = u64::from_be_bytes(version_buf); - if version != 1 { - return Err(TcpTargetError::Protocol( - "Unsupported transfer version".to_string(), - )); - } - - let mut size_buf = [0u8; 8]; - self.stream.read_exact(&mut size_buf).await?; - let file_size = u64::from_be_bytes(size_buf); - - let mut expected_crc_buf = [0u8; 4]; - self.stream.read_exact(&mut expected_crc_buf).await?; - let expected_crc = u32::from_be_bytes(expected_crc_buf); - if file_size == 0 { - // Create empty file and return early - let _file = OpenOptions::new() - .write(true) - .create(true) - .truncate(true) - .open(path) - .await?; - // Send confirmation - self.stream.write_all(&[1u8]).await?; - self.stream.flush().await?; - return Ok(()); - } - - // Prepare output file - let file = OpenOptions::new() - .write(true) - .create(true) - .truncate(true) - .open(path) - .await?; - let mut writer = BufWriter::with_capacity(self.config.chunk_size, file); - - // Receive file content with CRC calculation if enabled - let mut bytes_received = 0; - let mut buffer = vec![0u8; self.config.chunk_size]; - let mut crc_calculator = if self.config.enable_crc_validation { - Some(crc_instance.digest()) - } else { - None - }; - - while bytes_received < file_size { - let bytes_to_read = - (file_size - bytes_received).min(self.config.chunk_size as u64) as usize; - let chunk = &mut buffer[..bytes_to_read]; - - self.stream.read_exact(chunk).await?; - - writer.write_all(chunk).await?; - - // Update CRC if validation is enabled - if let Some(ref mut crc) = crc_calculator { - crc.update(chunk); - } - - bytes_received += bytes_to_read as u64; - } - - // Verify transfer completion - if bytes_received != file_size { - return Err(TcpTargetError::File(format!( - "Transfer incomplete: expected {} bytes, received {} bytes", - file_size, bytes_received - ))); - } - - writer.flush().await?; - - // Validate CRC if enabled - if self.config.enable_crc_validation - && let Some(crc_calculator) = crc_calculator - { - let actual_crc = crc_calculator.finalize(); - if actual_crc != expected_crc && expected_crc != 0 { - return Err(TcpTargetError::File(format!( - "CRC validation failed: expected {:08x}, got {:08x}", - expected_crc, actual_crc - ))); - } - } - - // Final flush and sync - writer.flush().await?; - writer.into_inner().sync_all().await?; - - // Verify completion - if bytes_received != file_size { - let _ = tokio::fs::remove_file(path).await; - return Err(TcpTargetError::File(format!( - "Transfer incomplete: expected {} bytes, received {} bytes", - file_size, bytes_received - ))); - } - - // Send confirmation - self.stream.write_all(&[1u8]).await?; - self.stream.flush().await?; - - Ok(()) - } -} diff --git a/crates/utils/tcp_connection/src/instance_challenge.rs b/crates/utils/tcp_connection/src/instance_challenge.rs deleted file mode 100644 index 3a7f6a3..0000000 --- a/crates/utils/tcp_connection/src/instance_challenge.rs +++ /dev/null @@ -1,311 +0,0 @@ -use std::path::Path; - -use rand::TryRngCore; -use rsa::{ - RsaPrivateKey, RsaPublicKey, - pkcs1::{DecodeRsaPrivateKey, DecodeRsaPublicKey}, - sha2, -}; -use tokio::io::{AsyncReadExt, AsyncWriteExt}; - -use ed25519_dalek::{Signature, Signer, SigningKey, Verifier, VerifyingKey}; -use ring::rand::SystemRandom; -use ring::signature::{ - self, ECDSA_P256_SHA256_ASN1, ECDSA_P384_SHA384_ASN1, EcdsaKeyPair, RSA_PKCS1_2048_8192_SHA256, - UnparsedPublicKey, -}; - -use crate::{error::TcpTargetError, instance::ConnectionInstance}; - -const ECDSA_P256_SHA256_ASN1_SIGNING: &signature::EcdsaSigningAlgorithm = - &signature::ECDSA_P256_SHA256_ASN1_SIGNING; -const ECDSA_P384_SHA384_ASN1_SIGNING: &signature::EcdsaSigningAlgorithm = - &signature::ECDSA_P384_SHA384_ASN1_SIGNING; - -impl ConnectionInstance { - /// Initiates a challenge to the target machine to verify connection security - /// - /// This method performs a cryptographic challenge-response authentication: - /// 1. Generates a random 32-byte challenge - /// 2. Sends the challenge to the target machine - /// 3. Receives a digital signature of the challenge - /// 4. Verifies the signature using the appropriate public key - /// - /// # Arguments - /// * `public_key_dir` - Directory containing public key files for verification - /// - /// # Returns - /// * `Ok((true, "KeyId"))` - Challenge verification successful - /// * `Ok((false, "KeyId"))` - Challenge verification failed - /// * `Err(TcpTargetError)` - Error during challenge process - pub async fn challenge( - &mut self, - public_key_dir: impl AsRef, - ) -> Result<(bool, String), TcpTargetError> { - // Generate random challenge - let mut challenge = [0u8; 32]; - rand::rngs::OsRng - .try_fill_bytes(&mut challenge) - .map_err(|e| { - TcpTargetError::Crypto(format!("Failed to generate random challenge: {}", e)) - })?; - - // Send challenge to target - self.stream.write_all(&challenge).await?; - self.stream.flush().await?; - - // Read signature from target - let mut signature = Vec::new(); - let mut signature_len_buf = [0u8; 4]; - self.stream.read_exact(&mut signature_len_buf).await?; - - let signature_len = u32::from_be_bytes(signature_len_buf) as usize; - signature.resize(signature_len, 0); - self.stream.read_exact(&mut signature).await?; - - // Read key identifier from target to identify which public key to use - let mut key_id_len_buf = [0u8; 4]; - self.stream.read_exact(&mut key_id_len_buf).await?; - let key_id_len = u32::from_be_bytes(key_id_len_buf) as usize; - - let mut key_id_buf = vec![0u8; key_id_len]; - self.stream.read_exact(&mut key_id_buf).await?; - let key_id = String::from_utf8(key_id_buf) - .map_err(|e| TcpTargetError::Crypto(format!("Invalid key identifier: {}", e)))?; - - // Load appropriate public key - let public_key_path = public_key_dir.as_ref().join(format!("{}.pem", key_id)); - if !public_key_path.exists() { - return Ok((false, key_id)); - } - - let public_key_pem = tokio::fs::read_to_string(&public_key_path).await?; - - // Try to verify with different key types - let verified = if let Ok(rsa_key) = RsaPublicKey::from_pkcs1_pem(&public_key_pem) { - let padding = rsa::pkcs1v15::Pkcs1v15Sign::new::(); - rsa_key.verify(padding, &challenge, &signature).is_ok() - } else if let Ok(ed25519_key) = - VerifyingKey::from_bytes(&parse_ed25519_public_key(&public_key_pem)) - { - if signature.len() == 64 { - let sig_bytes: [u8; 64] = signature.as_slice().try_into().map_err(|_| { - TcpTargetError::Crypto("Invalid signature length for Ed25519".to_string()) - })?; - let sig = Signature::from_bytes(&sig_bytes); - ed25519_key.verify(&challenge, &sig).is_ok() - } else { - false - } - } else if let Ok(dsa_key_info) = parse_dsa_public_key(&public_key_pem) { - verify_dsa_signature(&dsa_key_info, &challenge, &signature) - } else { - false - }; - - Ok((verified, key_id)) - } - - /// Accepts a challenge from the target machine to verify connection security - /// - /// This method performs a cryptographic challenge-response authentication: - /// 1. Receives a random 32-byte challenge from the target machine - /// 2. Signs the challenge using the appropriate private key - /// 3. Sends the digital signature back to the target machine - /// 4. Sends the key identifier for public key verification - /// - /// # Arguments - /// * `private_key_file` - Path to the private key file for signing - /// * `verify_public_key` - Key identifier for public key verification - /// - /// # Returns - /// * `Ok(true)` - Challenge response sent successfully - /// * `Ok(false)` - Private key format not supported - /// * `Err(TcpTargetError)` - Error during challenge response process - pub async fn accept_challenge( - &mut self, - private_key_file: impl AsRef, - verify_public_key: &str, - ) -> Result { - // Read challenge from initiator - let mut challenge = [0u8; 32]; - self.stream.read_exact(&mut challenge).await?; - - // Load private key - let private_key_pem = tokio::fs::read_to_string(&private_key_file) - .await - .map_err(|e| { - TcpTargetError::NotFound(format!( - "Read private key \"{}\" failed: \"{}\"", - private_key_file - .as_ref() - .display() - .to_string() - .split("/") - .last() - .unwrap_or("UNKNOWN"), - e - )) - })?; - - // Sign the challenge with supported key types - let signature = if let Ok(rsa_key) = RsaPrivateKey::from_pkcs1_pem(&private_key_pem) { - let padding = rsa::pkcs1v15::Pkcs1v15Sign::new::(); - rsa_key.sign(padding, &challenge)? - } else if let Ok(ed25519_key) = parse_ed25519_private_key(&private_key_pem) { - ed25519_key.sign(&challenge).to_bytes().to_vec() - } else if let Ok(dsa_key_info) = parse_dsa_private_key(&private_key_pem) { - sign_with_dsa(&dsa_key_info, &challenge)? - } else { - return Ok(false); - }; - - // Send signature length and signature - let signature_len = signature.len() as u32; - self.stream.write_all(&signature_len.to_be_bytes()).await?; - self.stream.flush().await?; - self.stream.write_all(&signature).await?; - self.stream.flush().await?; - - // Send key identifier for public key identification - let key_id_bytes = verify_public_key.as_bytes(); - let key_id_len = key_id_bytes.len() as u32; - self.stream.write_all(&key_id_len.to_be_bytes()).await?; - self.stream.flush().await?; - self.stream.write_all(key_id_bytes).await?; - self.stream.flush().await?; - - Ok(true) - } -} - -/// Parse Ed25519 public key from PEM format -fn parse_ed25519_public_key(pem: &str) -> [u8; 32] { - // Robust parsing for Ed25519 public key using pem crate - let mut key_bytes = [0u8; 32]; - - if let Ok(pem_data) = pem::parse(pem) - && pem_data.tag() == "PUBLIC KEY" - && pem_data.contents().len() >= 32 - { - let contents = pem_data.contents(); - key_bytes.copy_from_slice(&contents[contents.len() - 32..]); - } - key_bytes -} - -/// Parse Ed25519 private key from PEM format -fn parse_ed25519_private_key(pem: &str) -> Result { - if let Ok(pem_data) = pem::parse(pem) - && pem_data.tag() == "PRIVATE KEY" - && pem_data.contents().len() >= 32 - { - let contents = pem_data.contents(); - let mut seed = [0u8; 32]; - seed.copy_from_slice(&contents[contents.len() - 32..]); - return Ok(SigningKey::from_bytes(&seed)); - } - Err(TcpTargetError::Crypto( - "Invalid Ed25519 private key format".to_string(), - )) -} - -/// Parse DSA public key information from PEM -fn parse_dsa_public_key( - pem: &str, -) -> Result<(&'static dyn signature::VerificationAlgorithm, Vec), TcpTargetError> { - if let Ok(pem_data) = pem::parse(pem) { - let contents = pem_data.contents().to_vec(); - - // Try different DSA algorithms based on PEM tag - match pem_data.tag() { - "EC PUBLIC KEY" | "PUBLIC KEY" if pem.contains("ECDSA") || pem.contains("ecdsa") => { - if pem.contains("P-256") { - return Ok((&ECDSA_P256_SHA256_ASN1, contents)); - } else if pem.contains("P-384") { - return Ok((&ECDSA_P384_SHA384_ASN1, contents)); - } - } - "RSA PUBLIC KEY" | "PUBLIC KEY" => { - return Ok((&RSA_PKCS1_2048_8192_SHA256, contents)); - } - _ => {} - } - - // Default to RSA for unknown types - return Ok((&RSA_PKCS1_2048_8192_SHA256, contents)); - } - Err(TcpTargetError::Crypto( - "Invalid DSA public key format".to_string(), - )) -} - -/// Parse DSA private key information from PEM -fn parse_dsa_private_key( - pem: &str, -) -> Result<(&'static dyn signature::VerificationAlgorithm, Vec), TcpTargetError> { - // For DSA, private key verification uses the same algorithm as public key - parse_dsa_public_key(pem) -} - -/// Verify DSA signature -fn verify_dsa_signature( - algorithm_and_key: &(&'static dyn signature::VerificationAlgorithm, Vec), - message: &[u8], - signature: &[u8], -) -> bool { - let (algorithm, key_bytes) = algorithm_and_key; - let public_key = UnparsedPublicKey::new(*algorithm, key_bytes); - public_key.verify(message, signature).is_ok() -} - -/// Sign with DSA -fn sign_with_dsa( - algorithm_and_key: &(&'static dyn signature::VerificationAlgorithm, Vec), - message: &[u8], -) -> Result, TcpTargetError> { - let (algorithm, key_bytes) = algorithm_and_key; - - // Handle different DSA/ECDSA algorithms by comparing algorithm identifiers - // Since we can't directly compare trait objects, we use pointer comparison - let algorithm_ptr = algorithm as *const _ as *const (); - let ecdsa_p256_ptr = &ECDSA_P256_SHA256_ASN1 as *const _ as *const (); - let ecdsa_p384_ptr = &ECDSA_P384_SHA384_ASN1 as *const _ as *const (); - - if algorithm_ptr == ecdsa_p256_ptr { - let key_pair = EcdsaKeyPair::from_pkcs8( - ECDSA_P256_SHA256_ASN1_SIGNING, - key_bytes, - &SystemRandom::new(), - ) - .map_err(|e| { - TcpTargetError::Crypto(format!("Failed to create ECDSA P-256 key pair: {}", e)) - })?; - - let signature = key_pair - .sign(&SystemRandom::new(), message) - .map_err(|e| TcpTargetError::Crypto(format!("ECDSA P-256 signing failed: {}", e)))?; - - Ok(signature.as_ref().to_vec()) - } else if algorithm_ptr == ecdsa_p384_ptr { - let key_pair = EcdsaKeyPair::from_pkcs8( - ECDSA_P384_SHA384_ASN1_SIGNING, - key_bytes, - &SystemRandom::new(), - ) - .map_err(|e| { - TcpTargetError::Crypto(format!("Failed to create ECDSA P-384 key pair: {}", e)) - })?; - - let signature = key_pair - .sign(&SystemRandom::new(), message) - .map_err(|e| TcpTargetError::Crypto(format!("ECDSA P-384 signing failed: {}", e)))?; - - Ok(signature.as_ref().to_vec()) - } else { - // RSA or unsupported algorithm - Err(TcpTargetError::Unsupported( - "DSA/ECDSA signing not supported for this algorithm type".to_string(), - )) - } -} diff --git a/crates/utils/tcp_connection/src/lib.rs b/crates/utils/tcp_connection/src/lib.rs deleted file mode 100644 index 6a2e599..0000000 --- a/crates/utils/tcp_connection/src/lib.rs +++ /dev/null @@ -1,6 +0,0 @@ -#[allow(dead_code)] -pub mod instance; - -pub mod instance_challenge; - -pub mod error; diff --git a/crates/utils/tcp_connection/tcp_connection_test/Cargo.toml b/crates/utils/tcp_connection/tcp_connection_test/Cargo.toml deleted file mode 100644 index 19a6e9b..0000000 --- a/crates/utils/tcp_connection/tcp_connection_test/Cargo.toml +++ /dev/null @@ -1,9 +0,0 @@ -[package] -name = "tcp_connection_test" -edition = "2024" -version.workspace = true - -[dependencies] -tcp_connection = { path = "../../tcp_connection" } -tokio = { version = "1.48.0", features = ["full"] } -serde = { version = "1.0.228", features = ["derive"] } diff --git a/crates/utils/tcp_connection/tcp_connection_test/res/image/test_transfer.png b/crates/utils/tcp_connection/tcp_connection_test/res/image/test_transfer.png deleted file mode 100644 index 5fa94f0..0000000 Binary files a/crates/utils/tcp_connection/tcp_connection_test/res/image/test_transfer.png and /dev/null differ diff --git a/crates/utils/tcp_connection/tcp_connection_test/res/key/test_key.pem b/crates/utils/tcp_connection/tcp_connection_test/res/key/test_key.pem deleted file mode 100644 index e155876..0000000 --- a/crates/utils/tcp_connection/tcp_connection_test/res/key/test_key.pem +++ /dev/null @@ -1,13 +0,0 @@ ------BEGIN RSA PUBLIC KEY----- -MIICCgKCAgEAl5vyIwGYiQ1zZpW2tg+LwOUV547T2SjlzKQjcms5je/epP4CnUfT -5cmHCe8ZaSbnofcntCzi8FzMpQmzhNzFk5tCAe4tSrghfr2kYDO7aUL0G09KbNZ5 -iuMTkMaHx6LMjZ+Ljy8fC47yC2dFMUgLjGS7xS6rnIo4YtFuvMdwbLjs7mSn+vVc -kcEV8RLlQg8wDbzpl66Jd1kiUgPfVLBRTLE/iL8kUCz1l8c+DvOzr3ATwJysM9CG -LFahGLlTd3CZaj0QsEzf/AQsn79Su+rnCXhXqcvynhAcil0UW9RWp5Zsvp3Me3W8 -pJg6vZuAA6lQ062hkRLiJ91F2rpyqtkax5i/simLjelpsRzLKo6Xsz1bZht2+5d5 -ArgTBtZBxS044t8caZWLXetnPEcxEGz8KYUVKf7X9S7R53gy36y88Fbu9giqUr3m -b3Da+SYzBT//hacGn55nhzLRdsJGaFFWcKCbpue6JHLsFhizhdEAjaec0hfphw29 -veY0adPdIFLQDmMKaNk4ulrz8Lbgpqn9gxx6fRssj9jqNJmW64a0eV+Rw7BCJazH -xp3zz4A3rwdI8BjxLUb3YiCUcavA9WzJ1DUfdX1FSvbcFw4CEiGJjfpWGrm1jtc6 -DMOsoX/C6yFOyRpipsgqIToBClchLSNgrO6A7SIoSdIqNDEgIanFcjECAwEAAQ== ------END RSA PUBLIC KEY----- diff --git a/crates/utils/tcp_connection/tcp_connection_test/res/key/test_key_private.pem b/crates/utils/tcp_connection/tcp_connection_test/res/key/test_key_private.pem deleted file mode 100644 index 183d2d9..0000000 --- a/crates/utils/tcp_connection/tcp_connection_test/res/key/test_key_private.pem +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJKAIBAAKCAgEAl5vyIwGYiQ1zZpW2tg+LwOUV547T2SjlzKQjcms5je/epP4C -nUfT5cmHCe8ZaSbnofcntCzi8FzMpQmzhNzFk5tCAe4tSrghfr2kYDO7aUL0G09K -bNZ5iuMTkMaHx6LMjZ+Ljy8fC47yC2dFMUgLjGS7xS6rnIo4YtFuvMdwbLjs7mSn -+vVckcEV8RLlQg8wDbzpl66Jd1kiUgPfVLBRTLE/iL8kUCz1l8c+DvOzr3ATwJys -M9CGLFahGLlTd3CZaj0QsEzf/AQsn79Su+rnCXhXqcvynhAcil0UW9RWp5Zsvp3M -e3W8pJg6vZuAA6lQ062hkRLiJ91F2rpyqtkax5i/simLjelpsRzLKo6Xsz1bZht2 -+5d5ArgTBtZBxS044t8caZWLXetnPEcxEGz8KYUVKf7X9S7R53gy36y88Fbu9giq -Ur3mb3Da+SYzBT//hacGn55nhzLRdsJGaFFWcKCbpue6JHLsFhizhdEAjaec0hfp -hw29veY0adPdIFLQDmMKaNk4ulrz8Lbgpqn9gxx6fRssj9jqNJmW64a0eV+Rw7BC -JazHxp3zz4A3rwdI8BjxLUb3YiCUcavA9WzJ1DUfdX1FSvbcFw4CEiGJjfpWGrm1 -jtc6DMOsoX/C6yFOyRpipsgqIToBClchLSNgrO6A7SIoSdIqNDEgIanFcjECAwEA -AQKCAgAd3cg9Ei7o7N/reRnV0skutlJy2+Wq9Y4TmtAq1amwZu0e5rVAI6rALUuv -bs08NEBUXVqSeXc5b6aW6orVZSJ8+gxuUevVOOHMVHKhyv8j9N8e1Cduum+WJzav -AhU0hEM0sRXunpNIlR/klDMCytUPkraU2SVQgMAr42MjyExC9skiC202GIjkY7u9 -UoIcWd6XDjycN3N4MfR7YKzpw5Q4fgBsoW73Zmv5OvRkQKkIqhUSECsyR+VuraAt -vTCOqn1meuIjQPms7WuXCrszLsrVyEHIvtcsQTNGJKECmBl8CTuh73cdaSvA5wZH -XO9CiWPVV3KpICWyQbplpO467usB0liMX3mcMp+Ztp/p/ns6Ov5L6AR8LcDJ43KA -454ZUYxbRjqG+cW6Owm5Ii0+UOEGOi+6Jhc4NGZuYU2gDrhuz4yejY6bDAu8Ityd -umVU90IePVm6dlMM5cgyDmCXUkOVsjegMIBP+Zf3an1JWtsDL2RW5OwrFH7DQaqG -UwE/w/JOkRe3UMcTECfjX1ACJlB8XDAXiNeBQsAFOVVkWdBE4D7IlQLJVZAyGSlt -NMTn9/kQBGgdlyEqVAPKGnfl08TubyL7/9xOhCoYsv0IIOI8xgT7zQwefUAn2TFb -ulHIdVovRI4Oa0n7WfK4srL73XqjKYJAC9nmxXMwKe1wokjREwKCAQEAyNZKWY88 -4OqYa9xEEJwEOAA5YWLZ/+b9lCCQW8gMeVyTZ7A4vJVyYtBvRBlv6MhB4OTIf9ah -YuyZMl6oNCs2SBP1lKxsPlGaothRlEmPyTWXOt9iRLpPHUcGG1odfeGpI0bdHs1n -E/OpKYwzD0oSe5PGA0zkcetG61klPw8NIrjTkQ2hMqDV+ppF0lPxe/iudyTVMGhX -aHcd95DZNGaS503ZcSjN4MeVSkQEDI4fu4XK4135DCaKOmIPtOd6Rw+qMxoCC7Wl -cEDnZ6eqQ5EOy8Ufz8WKqGSVWkr6cO/qtulFLAj0hdL0aENTCRer+01alybXJXyB -GKyCk7i2RDlbGwKCAQEAwUA7SU7/0dKPJ2r0/70R6ayxZ7tQZK4smFgtkMDeWsaw -y2lZ6r44iJR/Tg6+bP8MjGzP/GU1i5QIIjJMGx2/VTWjJSOsFu3edZ5PHQUVSFQE -8FAhYXWOH+3igfgWJMkzhVsBo9/kINaEnt9jLBE8okEY+9/JEsdBqV/S4dkxjUPT -E+62kX9lkQVk/gCWjsLRKZV4d87gXU8mMQbhgj99qg1joffV132vo6pvBBBCJ4Ex -4/JxIQ2W/GmkrFe8NlvD1CEMyvkeV+g2wbtvjWs0Ezyzh4njJAtKMe0SEg5dFTqa -eL/GjpgfIP7Uu30V35ngkgl7CuY1D/IJg4PxKthQowKCAQBUGtFWAhMXiYa9HKfw -YLWvkgB1lQUAEoa84ooxtWvr4uXj9Ts9VkRptynxVcm0rTBRct24E3TQTY62Nkew -WSxJMPqWAULvMhNVAMvhEpFBTM0BHY00hOUeuKCJEcrp7Xd8S2/MN25kP5TmzkyP -qZBl6fNxbGD6h/HSGynq522zzbzjsNaBsjMJ2FNHClpFdVXylR0mQXvhRojpJOKg -/Bem/8YAinr1F/+f8y3S6C3HxPa7Ep56BSW731b+hjWBzsCS1+BlcPNQOA3wLZmy -4+tTUEDLLMmtTTnybxXD9+TOJpAOKc3kwPwTMaZzV1NxUOqQA/bzPtl9MLkaDa9e -kLpjAoIBACRFtxsKbe/nMqF2bOf3h/4xQNc0jGFpY8tweZT67oFhW9vCOXNbIudX -4BE5qTpyINvWrK82G/fH4ELy5+ALFFedCrM0398p5KB1B2puAtGhm4+zqqBNXVDW -6LX2Z8mdzkLQkx08L+iN+zSKv2WNErFtwI++MFKK/eMZrk5f4vId8eeC3devbtPq -jEs0tw2yuWmxuXvbY7d/3K5FGVzGKAMcIkBLcWLSH357xfygRJp/oGqlneBTWayk -85i5mwUk8jvFvE34tl5Por94O/byUULvGM9u7Shdyh5W3hZvhb8vUcEqVc179hPO -YQWT8+AVVNZ0WxjvnrQQfQKnaEPfeDsCggEBAJ7zgVVla8BOagEenKwr6nEkQzK/ -sTcF9Zp7TmyGKGdM4rW+CJqGgwswn65va+uZj7o0+D5JGeB8kRG5GtjUUzHkNBD0 -Av6KZksQDqgKdwPaH0MQSXCuUc0MYTBHDJdciN/DqdO8st69hyNRv4XdHst1SZdJ -VjUh3p4iwO4wfQQW7mvj94lLM/ypMdUqPKxVHVWQsbE9fOVbyKINuIDPDzu5iqc3 -VKScUwqpcGPZsgHr/Sguv/fdFnPs4O+N0AsAe3xbleCfQAeZnI0tR8nkYudvmxNz -MRevTAPDUBUDd0Uiy+d6w6B4vW8q9Zv3oFLXns4kWsJFajjx3TdgTacnVlI= ------END RSA PRIVATE KEY----- diff --git a/crates/utils/tcp_connection/tcp_connection_test/res/key/wrong_key_private.pem b/crates/utils/tcp_connection/tcp_connection_test/res/key/wrong_key_private.pem deleted file mode 100644 index 4b77eea..0000000 --- a/crates/utils/tcp_connection/tcp_connection_test/res/key/wrong_key_private.pem +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCvmvYR6ypNS4ld -cyJDlwv+4KC8/SxKBhlen8FX6Ltzfzi3f1I7qXZByGaTasQtc4qWgl0tLkrA8Pc3 -pm/r2To+Gl5cXMMz/zKFShuviGp/F17eS1idpNSFO6ViF+WXrENdESB7E6Dm4teK -+WLdtOHk3exC/+F+YUK3Jh6lTR5+donHaURlKjcKiRY7YxHq9HbrYXujJyiuU51a -nDvV20AWy7cKGGPRpV8YSoNGxE24WpWjjf0l++aFSpQaKanoV9tL4ZI0aXMFawSB -4YKBjtht6Cxm37oeBaimUKxA7BKH/DUueQsjAfw0WgZhItBDEtKjs2tMkOj/VUuF -OYrC58vunQDd/sP60BV3F/yPZiuBIB4PyXe2PVRabMBq2p2uiGexjoQ9DR+jU9ig -532KxckPHyqXzLd7MwljLw8ypahxMSE/lgcZIZh5I9oDsZWPD8Kx8D6eq/gypTkd -v8bewOTtj8GN2/MyxQZzYsz2ZruUrPv7hd38qjsFkKrb8wPM6xTOScM7CYFNceiL -3DcawivS+f5TgVkrjBGqhwpOc96ZHHuojw9f8KyJ3DON5CWKPpyKJvXEt6QuT5dc -BPZM33KHSuDCJUrw9dnh6rkaTnx681csAGJTYX2zeNxTI9DO/YSvpEK5e5MaZ9Kc -OETgnXiOe9KlNBtJeLd5XwvnYelzYQIDAQABAoICAAIis01ie8A24/PD+62wv4+Y -8bt6pLg9vL8+2B4WkXkFGg55OOnK1MpWApFWYg5fclcEPNfY0UXpaEg/+Op4WNH6 -hh0/b4xJVTbzwMRwt0LWaOvxJKG+KGt6XzeDLOKcULFoDOoSQgmsxoxFHiOuGHUt -Ebt62yYrTqFlkEfYWT+Wd3R6Xj+QtNym8CNGwCgIUw3nwJYqWr9L+wToE341TWE5 -lv9DbqtVBIQKG/CXYI6WY216w5JbruD+GDD9Qri1oNAabSnAAosVUxe1Q14J+63S -ff++Rsgor3VeU8nyVQNcWNU42Z7SXlvQoHU79CZsqy0ceHiU5pB8XA/BtGNMaFl4 -UehZPTsJhi8dlUdTYw5f5oOnHltNpSioy0KtqEBJjJX+CzS1UMAr6k9gtjbWeXpD -88JwoOy8n6HLAYETu/GiHLHpyIWJ84O+PeAO5jBCQTJN80fe3zbF+zJ5tHMHIFts -mGNmY9arKMCZHP642W3JRJsjN3LjdtzziXnhQzgKnPh/uCzceHZdSLf3S7NsEVOX -ZWb2nuDObJCpKD/4Hq2HpfupMNO73SUcbzg2slsRCRdDrokxOSEUHm7y9GD7tS2W -IC8A09pyCvM25k3so0QPpDP4+i/df7j862rb9+zctwhEWPdXTbFjI+9rI8JBcUwe -t94TFb5b9uB/kWYPnmUBAoIBAQDxiZjm5i8OInuedPnLkxdy31u/tqb+0+GMmp60 -gtmf7eL6Xu3F9Uqr6zH9o90CkdzHmtz6BcBTo/hUiOcTHj9Xnsso1GbneUlHJl9R -+G68sKWMXW76OfSKuXQ1fwlXV+J7Lu0XNIEeLVy09pYgjGKFn2ql7ELpRh7j1UXH -KbFVl2ESn5IVU4oGl+MMB5OzGYpyhuro24/sVSlaeXHakCLcHV69PvjyocQy8g+8 -Z1pXKqHy3mV6MOmSOJ4DqDxaZ2dLQR/rc7bvpxDIxtwMwD/a//xGlwnePOS/0IcB -I2dgFmRNwJ8WC9Le0E+EsEUD929fXEF3+CZN4E+KAuY8Y8UxAoIBAQC6HrlSdfVF -kmpddU4VLD5T/FuA6wB32VkXa6sXWiB0j8vOipGZkUvqQxnJiiorL0AECk3PXXT+ -wXgjqewZHibpJKeqaI4Zqblqebqb68VIANhO0DhRWsh63peVjAPNUmg+tfZHuEBE -bJlz1IBx0der5KBZfg7mngrXvQqIAYSr+Gl14PvwOGqG6Xjy+5VEJqDzEm9VaOnm -mm39st5oRotYnXdf83AV2aLI8ukkq0/mHAySlu5A4VhA5kTJT16Lam2h590AtmBH -6xsO1BtDmfVsaUxBSojkEW8eap+vbyU9vuwjrtm/dG19qcnyesjTJMFQgGnaY46L -ID/aNSDwssUxAoIBAQDFYaBl8G07q8pBr24Cgm2DHiwn+ud1D0keUayn7tZQ72Gx -IKpGPzGKVGVB1Qri8rftFgzG9LQ6paBl1IqhAPLac5WqBAkj1+WeEymKHu6/m8tt -bV0ndvzz8KGapfnIOrWF3M87S1jIhGFiMLB2YMKSV7gbZ3s2jmrn3H1tSBD21QIq -6ePDMcV1peGRDxAQKCsPdFm7eNGgW+ezW9NCvM7/+bBWDoP6I1/mEhHx8LPOz7QQ -eNWMiTQWndXjPzQy3JV41ftzudgg9/GrYXappOGJ4e8S8JLL3g9BAPOSZpAv4ZyO -PX7D0V29X5Xb5QBBQY7t6sJFe7Axq8DUE5J6fz3BAoIBAHLFEWh9HsNJF1gMRxsd -Tk4B9vcXcxF0sNCVb0qWJB9csMPrhP9arqKFwDgcgAZjO6mCJRszOTsDWK89UD7o -7fukw9N8Z+wBUjoLWHxftibBhqGLGr9oKOpDqtvoHEwXffr1wCnXv6GyCip4JsCJ -MuJnuE2XQ18IpA0HIKBft01IgNfU5ebrEx2giRnk89WzsFpTyt2zNVEjd6ITE7zf -i3wYlg1QE5UVwKED0arwDPQL5eDbO448p2xV0qME03tLJNHLJegTjmmq2+OX/jwA -i2vPvtsgOCvTaF8sRs4qzp81xW33m4TJKd9svQBOoNo69w5KMXwfGj5Go7lOO8LR -qnECggEAII/9+EdPUMx97Ex9R6sc9VQEpjxzlJmA9RaVASoZiinydP9QToLYhZif -QhSjHOrbPfGorNMIaVCOS4WGZWnJBSDX8uVvhi/N6mWegmj8w/WZrNuNOT99/8Fq -HXMnpOrXJsgQ4MDVzu+V8DISgrirf+PdBW1u/JtdjwmunlnPE1AsJUDWZlDTttaE -0p32cDq6j+eUxfBq5/haZxe92Jq9Wr+o+gXNO9EwZCO+bTtHFJJso5YbU548kMdA -j5y4BUf/jkCqK8c6sufbfP4MN4YnWbdSPmH3V2DF3g1okalUYp2sAOgAwwPjFAOu -f9qBWGCwdZjeDjaVVUgwi+Waf+M0tQ== ------END PRIVATE KEY----- diff --git a/crates/utils/tcp_connection/tcp_connection_test/src/lib.rs b/crates/utils/tcp_connection/tcp_connection_test/src/lib.rs deleted file mode 100644 index c9372d4..0000000 --- a/crates/utils/tcp_connection/tcp_connection_test/src/lib.rs +++ /dev/null @@ -1,17 +0,0 @@ -#[cfg(test)] -pub mod test_tcp_target_build; - -#[cfg(test)] -pub mod test_connection; - -#[cfg(test)] -pub mod test_challenge; - -#[cfg(test)] -pub mod test_file_transfer; - -#[cfg(test)] -pub mod test_msgpack; - -pub mod test_utils; -pub use test_utils::*; diff --git a/crates/utils/tcp_connection/tcp_connection_test/src/test_challenge.rs b/crates/utils/tcp_connection/tcp_connection_test/src/test_challenge.rs deleted file mode 100644 index 9327b3e..0000000 --- a/crates/utils/tcp_connection/tcp_connection_test/src/test_challenge.rs +++ /dev/null @@ -1,160 +0,0 @@ -use std::{env::current_dir, time::Duration}; - -use tcp_connection::instance::ConnectionInstance; -use tokio::{ - join, - time::{sleep, timeout}, -}; - -use crate::test_utils::{ - handle::{ClientHandle, ServerHandle}, - target::TcpServerTarget, - target_configure::ServerTargetConfig, -}; - -pub(crate) struct ExampleChallengeClientHandle; - -impl ClientHandle for ExampleChallengeClientHandle { - async fn process(mut instance: ConnectionInstance) { - // Accept challenge with correct key - let key = current_dir() - .unwrap() - .join("res") - .join("key") - .join("test_key_private.pem"); - let result = instance.accept_challenge(key, "test_key").await.unwrap(); - - // Sent success - assert!(result); - let response = instance.read_text().await.unwrap(); - - // Verify success - assert_eq!("OK", response); - - // Accept challenge with wrong key - let key = current_dir() - .unwrap() - .join("res") - .join("key") - .join("wrong_key_private.pem"); - let result = instance.accept_challenge(key, "test_key").await.unwrap(); - - // Sent success - assert!(result); - let response = instance.read_text().await.unwrap(); - - // Verify fail - assert_eq!("ERROR", response); - - // Accept challenge with wrong name - let key = current_dir() - .unwrap() - .join("res") - .join("key") - .join("test_key_private.pem"); - let result = instance.accept_challenge(key, "test_key__").await.unwrap(); - - // Sent success - assert!(result); - let response = instance.read_text().await.unwrap(); - - // Verify fail - assert_eq!("ERROR", response); - } -} - -pub(crate) struct ExampleChallengeServerHandle; - -impl ServerHandle for ExampleChallengeServerHandle { - async fn process(mut instance: ConnectionInstance) { - // Challenge with correct key - let key_dir = current_dir().unwrap().join("res").join("key"); - let (result, key_id) = instance.challenge(key_dir).await.unwrap(); - assert!(result); - assert_eq!(key_id, "test_key"); - - // Send response - instance - .write_text(if result { "OK" } else { "ERROR" }) - .await - .unwrap(); - - // Challenge again - let key_dir = current_dir().unwrap().join("res").join("key"); - let (result, key_id) = instance.challenge(key_dir).await.unwrap(); - assert!(!result); - assert_eq!(key_id, "test_key"); - - // Send response - instance - .write_text(if result { "OK" } else { "ERROR" }) - .await - .unwrap(); - - // Challenge again - let key_dir = current_dir().unwrap().join("res").join("key"); - let (result, key_id) = instance.challenge(key_dir).await.unwrap(); - assert!(!result); - assert_eq!(key_id, "test_key__"); - - // Send response - instance - .write_text(if result { "OK" } else { "ERROR" }) - .await - .unwrap(); - } -} - -#[tokio::test] -async fn test_connection_with_challenge_handle() -> Result<(), std::io::Error> { - let host = "localhost:5011"; - - // Server setup - let Ok(server_target) = TcpServerTarget::< - ExampleChallengeClientHandle, - ExampleChallengeServerHandle, - >::from_domain(host) - .await - else { - panic!("Test target built failed from a domain named `{}`", host); - }; - - // Client setup - let Ok(client_target) = TcpServerTarget::< - ExampleChallengeClientHandle, - ExampleChallengeServerHandle, - >::from_domain(host) - .await - else { - panic!("Test target built failed from a domain named `{}`", host); - }; - - let future_server = async move { - // Only process once - let configured_server = server_target.server_cfg(ServerTargetConfig::default().once()); - - // Listen here - let _ = configured_server.listen().await; - }; - - let future_client = async move { - // Wait for server start - let _ = sleep(Duration::from_secs_f32(1.5)).await; - - // Connect here - let _ = client_target.connect().await; - }; - - let test_timeout = Duration::from_secs(10); - - timeout(test_timeout, async { join!(future_client, future_server) }) - .await - .map_err(|_| { - std::io::Error::new( - std::io::ErrorKind::TimedOut, - format!("Test timed out after {:?}", test_timeout), - ) - })?; - - Ok(()) -} diff --git a/crates/utils/tcp_connection/tcp_connection_test/src/test_connection.rs b/crates/utils/tcp_connection/tcp_connection_test/src/test_connection.rs deleted file mode 100644 index 8c3ab01..0000000 --- a/crates/utils/tcp_connection/tcp_connection_test/src/test_connection.rs +++ /dev/null @@ -1,78 +0,0 @@ -use std::time::Duration; - -use tcp_connection::instance::ConnectionInstance; -use tokio::{join, time::sleep}; - -use crate::test_utils::{ - handle::{ClientHandle, ServerHandle}, - target::TcpServerTarget, - target_configure::ServerTargetConfig, -}; - -pub(crate) struct ExampleClientHandle; - -impl ClientHandle for ExampleClientHandle { - async fn process(mut instance: ConnectionInstance) { - // Write name - let Ok(_) = instance.write_text("Peter").await else { - panic!("Write text failed!"); - }; - // Read msg - let Ok(result) = instance.read_text().await else { - return; - }; - assert_eq!("Hello Peter!", result); - } -} - -pub(crate) struct ExampleServerHandle; - -impl ServerHandle for ExampleServerHandle { - async fn process(mut instance: ConnectionInstance) { - // Read name - let Ok(name) = instance.read_text().await else { - return; - }; - // Write msg - let Ok(_) = instance.write_text(format!("Hello {}!", name)).await else { - panic!("Write text failed!"); - }; - } -} - -#[tokio::test] -async fn test_connection_with_example_handle() { - let host = "localhost:5012"; - - // Server setup - let Ok(server_target) = - TcpServerTarget::::from_domain(host).await - else { - panic!("Test target built failed from a domain named `{}`", host); - }; - - // Client setup - let Ok(client_target) = - TcpServerTarget::::from_domain(host).await - else { - panic!("Test target built failed from a domain named `{}`", host); - }; - - let future_server = async move { - // Only process once - let configured_server = server_target.server_cfg(ServerTargetConfig::default().once()); - - // Listen here - let _ = configured_server.listen().await; - }; - - let future_client = async move { - // Wait for server start - let _ = sleep(Duration::from_secs_f32(1.5)).await; - - // Connect here - let _ = client_target.connect().await; - }; - - let _ = async { join!(future_client, future_server) }.await; -} diff --git a/crates/utils/tcp_connection/tcp_connection_test/src/test_file_transfer.rs b/crates/utils/tcp_connection/tcp_connection_test/src/test_file_transfer.rs deleted file mode 100644 index 4237ea7..0000000 --- a/crates/utils/tcp_connection/tcp_connection_test/src/test_file_transfer.rs +++ /dev/null @@ -1,94 +0,0 @@ -use std::{env::current_dir, time::Duration}; - -use tcp_connection::instance::ConnectionInstance; -use tokio::{ - join, - time::{sleep, timeout}, -}; - -use crate::test_utils::{ - handle::{ClientHandle, ServerHandle}, - target::TcpServerTarget, - target_configure::ServerTargetConfig, -}; - -pub(crate) struct ExampleFileTransferClientHandle; - -impl ClientHandle for ExampleFileTransferClientHandle { - async fn process(mut instance: ConnectionInstance) { - let image_path = current_dir() - .unwrap() - .join("res") - .join("image") - .join("test_transfer.png"); - instance.write_file(image_path).await.unwrap(); - } -} - -pub(crate) struct ExampleFileTransferServerHandle; - -impl ServerHandle for ExampleFileTransferServerHandle { - async fn process(mut instance: ConnectionInstance) { - let save_path = current_dir() - .unwrap() - .join("res") - .join(".temp") - .join("image") - .join("test_transfer.png"); - instance.read_file(save_path).await.unwrap(); - } -} - -#[tokio::test] -async fn test_connection_with_challenge_handle() -> Result<(), std::io::Error> { - let host = "localhost:5010"; - - // Server setup - let Ok(server_target) = TcpServerTarget::< - ExampleFileTransferClientHandle, - ExampleFileTransferServerHandle, - >::from_domain(host) - .await - else { - panic!("Test target built failed from a domain named `{}`", host); - }; - - // Client setup - let Ok(client_target) = TcpServerTarget::< - ExampleFileTransferClientHandle, - ExampleFileTransferServerHandle, - >::from_domain(host) - .await - else { - panic!("Test target built failed from a domain named `{}`", host); - }; - - let future_server = async move { - // Only process once - let configured_server = server_target.server_cfg(ServerTargetConfig::default().once()); - - // Listen here - let _ = configured_server.listen().await; - }; - - let future_client = async move { - // Wait for server start - let _ = sleep(Duration::from_secs_f32(1.5)).await; - - // Connect here - let _ = client_target.connect().await; - }; - - let test_timeout = Duration::from_secs(10); - - timeout(test_timeout, async { join!(future_client, future_server) }) - .await - .map_err(|_| { - std::io::Error::new( - std::io::ErrorKind::TimedOut, - format!("Test timed out after {:?}", test_timeout), - ) - })?; - - Ok(()) -} diff --git a/crates/utils/tcp_connection/tcp_connection_test/src/test_msgpack.rs b/crates/utils/tcp_connection/tcp_connection_test/src/test_msgpack.rs deleted file mode 100644 index 4c9c870..0000000 --- a/crates/utils/tcp_connection/tcp_connection_test/src/test_msgpack.rs +++ /dev/null @@ -1,103 +0,0 @@ -use serde::{Deserialize, Serialize}; -use std::time::Duration; -use tcp_connection::instance::ConnectionInstance; -use tokio::{join, time::sleep}; - -use crate::test_utils::{ - handle::{ClientHandle, ServerHandle}, - target::TcpServerTarget, - target_configure::ServerTargetConfig, -}; - -#[derive(Debug, PartialEq, Serialize, Deserialize, Default)] -struct TestData { - id: u32, - name: String, -} - -pub(crate) struct MsgPackClientHandle; - -impl ClientHandle for MsgPackClientHandle { - async fn process(mut instance: ConnectionInstance) { - // Test basic MessagePack serialization - let test_data = TestData { - id: 42, - name: "Test MessagePack".to_string(), - }; - - // Write MessagePack data - if let Err(e) = instance.write_msgpack(&test_data).await { - panic!("Write MessagePack failed: {}", e); - } - - // Read response - let response: TestData = match instance.read_msgpack().await { - Ok(data) => data, - Err(e) => panic!("Read MessagePack response failed: {}", e), - }; - - // Verify response - assert_eq!(response.id, test_data.id * 2); - assert_eq!(response.name, format!("Processed: {}", test_data.name)); - } -} - -pub(crate) struct MsgPackServerHandle; - -impl ServerHandle for MsgPackServerHandle { - async fn process(mut instance: ConnectionInstance) { - // Read MessagePack data - let received_data: TestData = match instance.read_msgpack().await { - Ok(data) => data, - Err(_) => return, - }; - - // Process data - let response = TestData { - id: received_data.id * 2, - name: format!("Processed: {}", received_data.name), - }; - - // Write response as MessagePack - if let Err(e) = instance.write_msgpack(&response).await { - panic!("Write MessagePack response failed: {}", e); - } - } -} - -#[tokio::test] -async fn test_msgpack_basic() { - let host = "localhost:5013"; - - // Server setup - let Ok(server_target) = - TcpServerTarget::::from_domain(host).await - else { - panic!("Test target built failed from a domain named `{}`", host); - }; - - // Client setup - let Ok(client_target) = - TcpServerTarget::::from_domain(host).await - else { - panic!("Test target built failed from a domain named `{}`", host); - }; - - let future_server = async move { - // Only process once - let configured_server = server_target.server_cfg(ServerTargetConfig::default().once()); - - // Listen here - let _ = configured_server.listen().await; - }; - - let future_client = async move { - // Wait for server start - let _ = sleep(Duration::from_secs_f32(1.5)).await; - - // Connect here - let _ = client_target.connect().await; - }; - - let _ = async { join!(future_client, future_server) }.await; -} diff --git a/crates/utils/tcp_connection/tcp_connection_test/src/test_tcp_target_build.rs b/crates/utils/tcp_connection/tcp_connection_test/src/test_tcp_target_build.rs deleted file mode 100644 index aa1ec74..0000000 --- a/crates/utils/tcp_connection/tcp_connection_test/src/test_tcp_target_build.rs +++ /dev/null @@ -1,32 +0,0 @@ -use crate::{ - test_connection::{ExampleClientHandle, ExampleServerHandle}, - test_utils::target::TcpServerTarget, -}; - -#[test] -fn test_tcp_test_target_build() { - let host = "127.0.0.1:8080"; - - // Test build target by string - let Ok(target) = - TcpServerTarget::::from_address_str(host) - else { - panic!("Test target built failed from a target addr `{}`", host); - }; - assert_eq!(target.to_string(), "127.0.0.1:8080"); -} - -#[tokio::test] -async fn test_tcp_test_target_build_domain() { - let host = "localhost"; - - // Test build target by DomainName and Connection - let Ok(target) = - TcpServerTarget::::from_domain(host).await - else { - panic!("Test target built failed from a domain named `{}`", host); - }; - - // Test into string - assert_eq!(target.to_string(), "127.0.0.1:8080"); -} diff --git a/crates/utils/tcp_connection/tcp_connection_test/src/test_utils.rs b/crates/utils/tcp_connection/tcp_connection_test/src/test_utils.rs deleted file mode 100644 index badf27d..0000000 --- a/crates/utils/tcp_connection/tcp_connection_test/src/test_utils.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub mod handle; -pub mod target; -pub mod target_configure; -pub mod target_connection; diff --git a/crates/utils/tcp_connection/tcp_connection_test/src/test_utils/handle.rs b/crates/utils/tcp_connection/tcp_connection_test/src/test_utils/handle.rs deleted file mode 100644 index 4f9bdbb..0000000 --- a/crates/utils/tcp_connection/tcp_connection_test/src/test_utils/handle.rs +++ /dev/null @@ -1,11 +0,0 @@ -use std::future::Future; - -use tcp_connection::instance::ConnectionInstance; - -pub trait ClientHandle { - fn process(instance: ConnectionInstance) -> impl Future + Send; -} - -pub trait ServerHandle { - fn process(instance: ConnectionInstance) -> impl Future + Send; -} diff --git a/crates/utils/tcp_connection/tcp_connection_test/src/test_utils/target.rs b/crates/utils/tcp_connection/tcp_connection_test/src/test_utils/target.rs deleted file mode 100644 index 8972b2a..0000000 --- a/crates/utils/tcp_connection/tcp_connection_test/src/test_utils/target.rs +++ /dev/null @@ -1,201 +0,0 @@ -use serde::{Deserialize, Serialize}; -use std::{ - fmt::{Display, Formatter}, - marker::PhantomData, - net::{AddrParseError, IpAddr, Ipv4Addr, SocketAddr}, - str::FromStr, -}; -use tokio::net::lookup_host; - -use crate::test_utils::{ - handle::{ClientHandle, ServerHandle}, - target_configure::{ClientTargetConfig, ServerTargetConfig}, -}; - -const DEFAULT_PORT: u16 = 8080; - -#[derive(Debug, Serialize, Deserialize)] -pub struct TcpServerTarget -where - Client: ClientHandle, - Server: ServerHandle, -{ - /// Client Config - client_cfg: Option, - - /// Server Config - server_cfg: Option, - - /// Server port - port: u16, - - /// Bind addr - bind_addr: IpAddr, - - /// Client Phantom Data - _client: PhantomData, - - /// Server Phantom Data - _server: PhantomData, -} - -impl Default for TcpServerTarget -where - Client: ClientHandle, - Server: ServerHandle, -{ - fn default() -> Self { - Self { - client_cfg: None, - server_cfg: None, - port: DEFAULT_PORT, - bind_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), - _client: PhantomData, - _server: PhantomData, - } - } -} - -impl From for TcpServerTarget -where - Client: ClientHandle, - Server: ServerHandle, -{ - /// Convert SocketAddr to TcpServerTarget - fn from(value: SocketAddr) -> Self { - Self { - port: value.port(), - bind_addr: value.ip(), - ..Self::default() - } - } -} - -impl From> for SocketAddr -where - Client: ClientHandle, - Server: ServerHandle, -{ - /// Convert TcpServerTarget to SocketAddr - fn from(val: TcpServerTarget) -> Self { - SocketAddr::new(val.bind_addr, val.port) - } -} - -impl Display for TcpServerTarget -where - Client: ClientHandle, - Server: ServerHandle, -{ - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "{}:{}", self.bind_addr, self.port) - } -} - -impl TcpServerTarget -where - Client: ClientHandle, - Server: ServerHandle, -{ - /// Create target by address - pub fn from_addr(addr: impl Into, port: impl Into) -> Self { - Self { - port: port.into(), - bind_addr: addr.into(), - ..Self::default() - } - } - - /// Try to create target by string - pub fn from_address_str<'a>(addr_str: impl Into<&'a str>) -> Result { - let socket_addr = SocketAddr::from_str(addr_str.into()); - match socket_addr { - Ok(socket_addr) => Ok(Self::from_addr(socket_addr.ip(), socket_addr.port())), - Err(err) => Err(err), - } - } - - /// Try to create target by domain name - pub async fn from_domain<'a>(domain: impl Into<&'a str>) -> Result { - match domain_to_addr(domain).await { - Ok(domain_addr) => Ok(Self::from(domain_addr)), - Err(e) => Err(e), - } - } - - /// Set client config - pub fn client_cfg(mut self, config: ClientTargetConfig) -> Self { - self.client_cfg = Some(config); - self - } - - /// Set server config - pub fn server_cfg(mut self, config: ServerTargetConfig) -> Self { - self.server_cfg = Some(config); - self - } - - /// Add client config - pub fn add_client_cfg(&mut self, config: ClientTargetConfig) { - self.client_cfg = Some(config); - } - - /// Add server config - pub fn add_server_cfg(&mut self, config: ServerTargetConfig) { - self.server_cfg = Some(config); - } - - /// Get client config ref - pub fn get_client_cfg(&self) -> Option<&ClientTargetConfig> { - self.client_cfg.as_ref() - } - - /// Get server config ref - pub fn get_server_cfg(&self) -> Option<&ServerTargetConfig> { - self.server_cfg.as_ref() - } - - /// Get SocketAddr of TcpServerTarget - pub fn get_addr(&self) -> SocketAddr { - SocketAddr::new(self.bind_addr, self.port) - } -} - -/// Parse Domain Name to IpAddr via DNS -async fn domain_to_addr<'a>(domain: impl Into<&'a str>) -> Result { - let domain = domain.into(); - let default_port: u16 = DEFAULT_PORT; - - if let Ok(socket_addr) = domain.parse::() { - return Ok(match socket_addr.ip() { - IpAddr::V4(_) => socket_addr, - IpAddr::V6(_) => SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), socket_addr.port()), - }); - } - - if let Ok(_v6_addr) = domain.parse::() { - return Ok(SocketAddr::new( - IpAddr::V4(Ipv4Addr::LOCALHOST), - default_port, - )); - } - - let (host, port_str) = if let Some((host, port)) = domain.rsplit_once(':') { - (host.trim_matches(|c| c == '[' || c == ']'), Some(port)) - } else { - (domain, None) - }; - - let port = port_str - .and_then(|p| p.parse::().ok()) - .map(|p| p.clamp(0, u16::MAX)) - .unwrap_or(default_port); - - let mut socket_iter = lookup_host((host, 0)).await?; - - if let Some(addr) = socket_iter.find(|addr| addr.is_ipv4()) { - return Ok(SocketAddr::new(addr.ip(), port)); - } - - Ok(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port)) -} diff --git a/crates/utils/tcp_connection/tcp_connection_test/src/test_utils/target_configure.rs b/crates/utils/tcp_connection/tcp_connection_test/src/test_utils/target_configure.rs deleted file mode 100644 index d739ac9..0000000 --- a/crates/utils/tcp_connection/tcp_connection_test/src/test_utils/target_configure.rs +++ /dev/null @@ -1,53 +0,0 @@ -use serde::{Deserialize, Serialize}; - -#[derive(Default, Debug, Clone, Copy, Serialize, Deserialize)] -pub struct ServerTargetConfig { - /// Only process a single connection, then shut down the server. - once: bool, - - /// Timeout duration in milliseconds. (0 is Closed) - timeout: u64, -} - -impl ServerTargetConfig { - /// Set `once` to True - /// This method configures the `once` field of `ServerTargetConfig`. - pub fn once(mut self) -> Self { - self.once = true; - self - } - - /// Set `timeout` to the given value - /// This method configures the `timeout` field of `ServerTargetConfig`. - pub fn timeout(mut self, timeout: u64) -> Self { - self.timeout = timeout; - self - } - - /// Set `once` to the given value - /// This method configures the `once` field of `ServerTargetConfig`. - pub fn set_once(&mut self, enable: bool) { - self.once = enable; - } - - /// Set `timeout` to the given value - /// This method configures the `timeout` field of `ServerTargetConfig`. - pub fn set_timeout(&mut self, timeout: u64) { - self.timeout = timeout; - } - - /// Check if the server is configured to process only a single connection. - /// Returns `true` if the server will shut down after processing one connection. - pub fn is_once(&self) -> bool { - self.once - } - - /// Get the current timeout value in milliseconds. - /// Returns the timeout duration. A value of 0 indicates the connection is closed. - pub fn get_timeout(&self) -> u64 { - self.timeout - } -} - -#[derive(Default, Debug, Clone, Copy, Serialize, Deserialize)] -pub struct ClientTargetConfig {} diff --git a/crates/utils/tcp_connection/tcp_connection_test/src/test_utils/target_connection.rs b/crates/utils/tcp_connection/tcp_connection_test/src/test_utils/target_connection.rs deleted file mode 100644 index d5bf2c3..0000000 --- a/crates/utils/tcp_connection/tcp_connection_test/src/test_utils/target_connection.rs +++ /dev/null @@ -1,89 +0,0 @@ -use tcp_connection::{error::TcpTargetError, instance::ConnectionInstance}; -use tokio::{ - net::{TcpListener, TcpSocket}, - spawn, -}; - -use crate::test_utils::{ - handle::{ClientHandle, ServerHandle}, - target::TcpServerTarget, - target_configure::ServerTargetConfig, -}; - -impl TcpServerTarget -where - Client: ClientHandle, - Server: ServerHandle, -{ - /// Attempts to establish a connection to the TCP server. - /// - /// This function initiates a connection to the server address - /// specified in the target configuration. - /// - /// This is a Block operation. - pub async fn connect(&self) -> Result<(), TcpTargetError> { - let addr = self.get_addr(); - let Ok(socket) = TcpSocket::new_v4() else { - return Err(TcpTargetError::from("Create tcp socket failed!")); - }; - let stream = match socket.connect(addr).await { - Ok(stream) => stream, - Err(e) => { - let err = format!("Connect to `{}` failed: {}", addr, e); - return Err(TcpTargetError::from(err)); - } - }; - let instance = ConnectionInstance::from(stream); - Client::process(instance).await; - Ok(()) - } - - /// Attempts to establish a connection to the TCP server. - /// - /// This function initiates a connection to the server address - /// specified in the target configuration. - pub async fn listen(&self) -> Result<(), TcpTargetError> { - let addr = self.get_addr(); - let listener = match TcpListener::bind(addr).await { - Ok(listener) => listener, - Err(_) => { - let err = format!("Bind to `{}` failed", addr); - return Err(TcpTargetError::from(err)); - } - }; - - let cfg: ServerTargetConfig = match self.get_server_cfg() { - Some(cfg) => *cfg, - None => ServerTargetConfig::default(), - }; - - if cfg.is_once() { - // Process once (Blocked) - let (stream, _) = match listener.accept().await { - Ok(result) => result, - Err(e) => { - let err = format!("Accept connection failed: {}", e); - return Err(TcpTargetError::from(err)); - } - }; - let instance = ConnectionInstance::from(stream); - Server::process(instance).await; - } else { - loop { - // Process multiple times (Concurrent) - let (stream, _) = match listener.accept().await { - Ok(result) => result, - Err(e) => { - let err = format!("Accept connection failed: {}", e); - return Err(TcpTargetError::from(err)); - } - }; - let instance = ConnectionInstance::from(stream); - spawn(async move { - Server::process(instance).await; - }); - } - } - Ok(()) - } -} diff --git a/crates/vcs_actions/Cargo.toml b/crates/vcs_actions/Cargo.toml deleted file mode 100644 index dd288e8..0000000 --- a/crates/vcs_actions/Cargo.toml +++ /dev/null @@ -1,29 +0,0 @@ -[package] -name = "vcs_actions" -edition = "2024" -version.workspace = true - -[dependencies] - -# Utils -tcp_connection = { path = "../utils/tcp_connection" } -cfg_file = { path = "../utils/cfg_file", features = ["default"] } -sha1_hash = { path = "../utils/sha1_hash" } -string_proc = { path = "../utils/string_proc" } - -# Core dependencies -action_system = { path = "../system_action" } -vcs_data = { path = "../vcs_data" } - -# Error handling -thiserror = "2.0.17" - -# Serialization -serde = { version = "1.0.228", features = ["derive"] } -serde_json = "1.0.145" - -# Async & Networking -tokio = { version = "1.48.0", features = ["full"] } - -# Logging -log = "0.4.28" diff --git a/crates/vcs_actions/src/actions.rs b/crates/vcs_actions/src/actions.rs deleted file mode 100644 index 3019327..0000000 --- a/crates/vcs_actions/src/actions.rs +++ /dev/null @@ -1,288 +0,0 @@ -use std::sync::Arc; - -use action_system::action::ActionContext; -use cfg_file::config::ConfigFile; -use tcp_connection::{error::TcpTargetError, instance::ConnectionInstance}; -use tokio::sync::{Mutex, mpsc::Sender}; -use vcs_data::{ - constants::{SERVER_PATH_MEMBER_PUB, VAULT_HOST_NAME}, - data::{ - local::{LocalWorkspace, config::LocalConfig, latest_info::LatestInfo}, - member::MemberId, - sheet::SheetName, - user::UserDirectory, - vault::Vault, - }, -}; - -pub mod local_actions; -pub mod sheet_actions; -pub mod track_action; -pub mod user_actions; -pub mod vault_actions; - -/// Check if the connection instance is valid in the given context. -/// This function is used to verify the connection instance in actions that require remote calls. -pub fn check_connection_instance( - ctx: &ActionContext, -) -> Result<&Arc>, TcpTargetError> { - let Some(instance) = ctx.instance() else { - return Err(TcpTargetError::NotFound( - "Connection instance lost.".to_string(), - )); - }; - Ok(instance) -} - -/// Try to get the Vault instance from the context. -pub fn try_get_vault(ctx: &ActionContext) -> Result, TcpTargetError> { - let Some(vault) = ctx.get_arc::() else { - return Err(TcpTargetError::NotFound( - "Vault instance not found".to_string(), - )); - }; - Ok(vault) -} - -/// Try to get the LocalWorkspace instance from the context. -pub fn try_get_local_workspace(ctx: &ActionContext) -> Result, TcpTargetError> { - let Some(local_workspace) = ctx.get_arc::() else { - return Err(TcpTargetError::NotFound( - "LocalWorkspace instance not found".to_string(), - )); - }; - Ok(local_workspace) -} - -/// Try to get the UserDirectory instance from the context. -pub fn try_get_user_directory(ctx: &ActionContext) -> Result, TcpTargetError> { - let Some(user_directory) = ctx.get_arc::() else { - return Err(TcpTargetError::NotFound( - "UserDirectory instance not found".to_string(), - )); - }; - Ok(user_directory) -} - -/// Try to get the LocalWorkspace instance from the context. -pub fn try_get_local_output(ctx: &ActionContext) -> Result>, TcpTargetError> { - let Some(output) = ctx.get_arc::>() else { - return Err(TcpTargetError::NotFound( - "Client sender not found".to_string(), - )); - }; - Ok(output) -} - -/// Authenticate member based on context and return MemberId -pub async fn auth_member( - ctx: &ActionContext, - instance: &Arc>, -) -> Result<(MemberId, bool), TcpTargetError> { - // Window开服Linux连接 -> 此函数内产生 early eof - // ~ WS # jv update - // 身份认证失败:I/O error: early eof! - - // 分析相应流程: - // 1. 服务端发起挑战,客户端接受 - // 2. 服务端发送结果,客户端接受 - // 3. 推测此时发生 early eof ---> 无 ack,导致客户端尝试拿到结果时,服务端已经结束 - // 这很有可能是 Windows 和 Linux 对于连接处理的方案差异导致的问题,需要进一步排查 - - // Start Challenge (Remote) - if ctx.is_proc_on_remote() { - let mut mut_instance = instance.lock().await; - let vault = try_get_vault(ctx)?; - - let using_host_mode = mut_instance.read_msgpack::().await?; - - let result = mut_instance - .challenge(vault.vault_path().join(SERVER_PATH_MEMBER_PUB)) - .await; - - return match result { - Ok((pass, member_id)) => { - if !pass { - // Send false to inform the client that authentication failed - mut_instance.write(false).await?; - Err(TcpTargetError::Authentication( - "Authenticate failed.".to_string(), - )) - } else { - if using_host_mode { - if vault.config().vault_host_list().contains(&member_id) { - // Using Host mode authentication, and is indeed an administrator - mut_instance.write(true).await?; - Ok((member_id, true)) - } else { - // Using Host mode authentication, but not an administrator - mut_instance.write(false).await?; - Err(TcpTargetError::Authentication( - "Authenticate failed.".to_string(), - )) - } - } else { - // Not using Host mode authentication - mut_instance.write(true).await?; - Ok((member_id, false)) - } - } - } - Err(e) => Err(e), - }; - } - - // Accept Challenge (Local) - if ctx.is_proc_on_local() { - let mut mut_instance = instance.lock().await; - let local_workspace = try_get_local_workspace(ctx)?; - let (is_host_mode, member_name) = { - let cfg = local_workspace.config().lock_owned().await; - (cfg.is_host_mode(), cfg.current_account()) - }; - let user_directory = try_get_user_directory(ctx)?; - - // Inform remote whether to authenticate in Host mode - mut_instance.write_msgpack(is_host_mode).await?; - - // Member name & Private key - let private_key = user_directory.account_private_key_path(&member_name); - let _ = mut_instance - .accept_challenge(private_key, &member_name) - .await?; - - // Read result - let challenge_result = mut_instance.read::().await?; - if challenge_result { - return Ok((member_name.clone(), is_host_mode)); - } else { - return Err(TcpTargetError::Authentication( - "Authenticate failed.".to_string(), - )); - } - } - - Err(TcpTargetError::NoResult("Auth failed.".to_string())) -} - -/// Get the current sheet name based on the context (local or remote). -/// This function handles the communication between local and remote instances -/// to verify and retrieve the current sheet name and whether it's a reference sheet. -/// -/// On local: -/// - Reads the current sheet from local configuration -/// - Sends the sheet name to remote for verification -/// - Returns the sheet name and whether it's a reference sheet if remote confirms it exists -/// -/// On remote: -/// - Receives sheet name from local -/// - Verifies the sheet exists in the vault -/// - Checks if the sheet is a reference sheet -/// - If allow_ref is true, reference sheets are allowed to pass verification -/// - Sends confirmation and reference status back to local -/// -/// Returns a tuple of (SheetName, bool) where the bool indicates if it's a reference sheet, -/// or an error if the sheet doesn't exist or doesn't meet the verification criteria. -pub async fn get_current_sheet_name( - ctx: &ActionContext, - instance: &Arc>, - member_id: &MemberId, - allow_ref: bool, -) -> Result<(SheetName, bool), TcpTargetError> { - let mut mut_instance = instance.lock().await; - if ctx.is_proc_on_local() { - let workspace = try_get_local_workspace(ctx)?; - let config = LocalConfig::read().await?; - let latest = LatestInfo::read_from(LatestInfo::latest_info_path( - workspace.local_path(), - member_id, - )) - .await?; - if let Some(sheet_name) = config.sheet_in_use() { - // Send sheet name - mut_instance.write_msgpack(sheet_name).await?; - - // Read result - if mut_instance.read_msgpack::().await? { - // Check if sheet is a reference sheet - let is_ref_sheet = latest.reference_sheets.contains(sheet_name); - if allow_ref { - // Allow reference sheets, directly return the determination result - return Ok((sheet_name.clone(), is_ref_sheet)); - } else if is_ref_sheet { - // Not allowed but it's a reference sheet, return an error - return Err(TcpTargetError::ReferenceSheetNotAllowed( - "Reference sheet not allowed".to_string(), - )); - } else { - // Not allowed but not a reference sheet, return normally - return Ok((sheet_name.clone(), false)); - } - } else { - return Err(TcpTargetError::NotFound("Sheet not found".to_string())); - } - } - // Send empty sheet_name - mut_instance.write_msgpack("".to_string()).await?; - - // Read result, since we know it's impossible to pass here, we just consume this result - let _ = mut_instance.read_msgpack::().await?; - - return Err(TcpTargetError::NotFound("Sheet not found".to_string())); - } - if ctx.is_proc_on_remote() { - let vault = try_get_vault(ctx)?; - - // Read sheet name - let sheet_name: SheetName = mut_instance.read_msgpack().await?; - - // Check if sheet exists - if let Ok(sheet) = vault.sheet(&sheet_name).await - && let Some(holder) = sheet.holder() - { - let is_ref_sheet = holder == VAULT_HOST_NAME; - if allow_ref { - // Allow reference sheets, directly return the determination result - if holder == member_id || holder == VAULT_HOST_NAME { - mut_instance.write_msgpack(true).await?; - return Ok((sheet.name().clone(), is_ref_sheet)); - } - } else if is_ref_sheet { - // Not allowed but it's a reference sheet, return an error - mut_instance.write_msgpack(true).await?; - return Err(TcpTargetError::ReferenceSheetNotAllowed( - "Reference sheet not allowed".to_string(), - )); - } else { - // Not allowed but not a reference sheet, return normally - if holder == member_id { - mut_instance.write_msgpack(true).await?; - return Ok((sheet_name.clone(), false)); - } - } - } - // Tell local the check is not passed - mut_instance.write_msgpack(false).await?; - return Err(TcpTargetError::NotFound("Sheet not found".to_string())); - } - Err(TcpTargetError::NoResult("NoResult".to_string())) -} - -/// The macro to write and return a result. -#[macro_export] -macro_rules! write_and_return { - ($instance:expr, $result:expr) => {{ - $instance.lock().await.write($result).await?; - return Ok($result); - }}; -} - -/// The macro to send formatted string to output channel. -/// Usage: local_println!(output, "format string", arg1, arg2, ...) -#[macro_export] -macro_rules! local_println { - ($output:expr, $($arg:tt)*) => {{ - let formatted = format!($($arg)*); - let _ = $output.send(formatted).await; - }}; -} diff --git a/crates/vcs_actions/src/actions/local_actions.rs b/crates/vcs_actions/src/actions/local_actions.rs deleted file mode 100644 index 53a1ff8..0000000 --- a/crates/vcs_actions/src/actions/local_actions.rs +++ /dev/null @@ -1,525 +0,0 @@ -use std::{ - collections::{HashMap, HashSet}, - io::ErrorKind, - net::SocketAddr, - path::PathBuf, - time::SystemTime, -}; - -use action_system::{action::ActionContext, macros::action_gen}; -use cfg_file::config::ConfigFile; -use log::info; -use serde::{Deserialize, Serialize}; -use tcp_connection::error::TcpTargetError; -use vcs_data::{ - constants::{ - CLIENT_PATH_CACHED_SHEET, CLIENT_PATH_LOCAL_SHEET, REF_SHEET_NAME, - SERVER_SUFFIX_SHEET_SHARE_FILE, VAULT_HOST_NAME, - }, - data::{ - local::{ - cached_sheet::CachedSheet, - config::LocalConfig, - latest_file_data::LatestFileData, - latest_info::{LatestInfo, SheetInfo}, - vault_modified::sign_vault_modified, - }, - member::MemberId, - sheet::{SheetData, SheetName, SheetPathBuf}, - vault::{ - config::VaultUuid, - sheet_share::{Share, SheetShareId}, - virtual_file::{VirtualFileId, VirtualFileVersion, VirtualFileVersionDescription}, - }, - }, -}; - -use crate::actions::{ - auth_member, check_connection_instance, try_get_local_workspace, try_get_vault, -}; - -#[derive(Serialize, Deserialize)] -pub enum SetUpstreamVaultActionResult { - // Success - DirectedAndStained, - Redirected, - - // Fail - AlreadyStained, - AuthorizeFailed(String), - RedirectFailed(String), - SameUpstream, - - Done, -} - -#[action_gen] -pub async fn set_upstream_vault_action( - ctx: ActionContext, - upstream: SocketAddr, -) -> Result { - let instance = check_connection_instance(&ctx)?; - - // Auth Member - if let Err(e) = auth_member(&ctx, instance).await { - return Ok(SetUpstreamVaultActionResult::AuthorizeFailed(e.to_string())); - } - - // Direct - if ctx.is_proc_on_remote() { - let vault = try_get_vault(&ctx)?; - instance - .lock() - .await - .write(*vault.config().vault_uuid()) - .await?; - return Ok(SetUpstreamVaultActionResult::Done); - } - - if ctx.is_proc_on_local() { - info!("Authorize successful. directing to upstream vault."); - - // Read the vault UUID from the instance - let vault_uuid = instance.lock().await.read::().await?; - - let local_workspace = try_get_local_workspace(&ctx)?; - let local_config = local_workspace.config(); - - let mut mut_local_config = local_config.lock().await; - if !mut_local_config.stained() { - // Stain the local workspace - mut_local_config.stain(vault_uuid); - - // Set the upstream address - mut_local_config.set_vault_addr(upstream); - - // Store the updated config - LocalConfig::write(&mut_local_config).await?; - - info!("Workspace stained!"); - return Ok(SetUpstreamVaultActionResult::DirectedAndStained); - } else { - // Local workspace is already stained, redirecting - let Some(stained_uuid) = mut_local_config.stained_uuid() else { - return Ok(SetUpstreamVaultActionResult::RedirectFailed( - "Stained uuid not found".to_string(), - )); - }; - let local_upstream = mut_local_config.upstream_addr(); - - // Address changed, but same UUID. - if vault_uuid == stained_uuid { - if local_upstream != upstream { - // Set the upstream address - mut_local_config.set_vault_addr(upstream); - - // Store the updated config - LocalConfig::write(&mut_local_config).await?; - return Ok(SetUpstreamVaultActionResult::Redirected); - } else { - return Ok(SetUpstreamVaultActionResult::SameUpstream); - } - } - return Ok(SetUpstreamVaultActionResult::AlreadyStained); - } - } - - Err(TcpTargetError::NoResult("No result.".to_string())) -} - -#[derive(Serialize, Deserialize)] -pub enum UpdateToLatestInfoResult { - Success, - - // Fail - AuthorizeFailed(String), - SyncCachedSheetFail(SyncCachedSheetFailReason), -} - -#[derive(Serialize, Deserialize)] -pub enum SyncCachedSheetFailReason { - PathAlreadyExist(PathBuf), -} - -#[action_gen] -pub async fn update_to_latest_info_action( - ctx: ActionContext, - _unused: (), -) -> Result { - let instance = check_connection_instance(&ctx)?; - - let (member_id, _is_host_mode) = match auth_member(&ctx, instance).await { - Ok(id) => id, - Err(e) => return Ok(UpdateToLatestInfoResult::AuthorizeFailed(e.to_string())), - }; - - info!("Sending latest info to {}", member_id); - - // Sync Latest Info - { - if ctx.is_proc_on_remote() { - let vault = try_get_vault(&ctx)?; - - // Build latest info - let mut latest_info = LatestInfo::default(); - - // Sheet & Share - let mut shares_in_my_sheets: HashMap> = - HashMap::new(); - let mut member_owned = Vec::new(); - let mut member_visible = Vec::new(); - let mut ref_sheets = HashSet::new(); - - for sheet in vault.sheets().await? { - // Build share parts - if let Some(holder) = sheet.holder() { - if holder == &member_id || holder == VAULT_HOST_NAME { - let mut sheet_shares: HashMap = HashMap::new(); - for share in sheet.get_shares().await? { - // Get SharePath - let Some(share_path) = share.path.clone() else { - continue; - }; - // Get ShareId from SharePath - let Some(share_id) = share_path.file_name() else { - continue; - }; - let share_id = share_id.display().to_string(); - let share_id_trimed = - share_id.trim_end_matches(SERVER_SUFFIX_SHEET_SHARE_FILE); - sheet_shares.insert(share_id_trimed.to_string(), share); - } - shares_in_my_sheets.insert(sheet.name().clone(), sheet_shares); - } - } - - // Build sheet parts - let holder_is_host = - sheet.holder().unwrap_or(&String::default()) == &VAULT_HOST_NAME; - if sheet.holder().is_some() - && (sheet.holder().unwrap() == &member_id || holder_is_host) - { - member_owned.push(sheet.name().clone()); - if holder_is_host { - ref_sheets.insert(sheet.name().clone()); - } - } else { - member_visible.push(SheetInfo { - sheet_name: sheet.name().clone(), - holder_name: sheet.holder().cloned(), - }); - } - } - - // Record Share & Sheet - latest_info.visible_sheets = member_owned; - latest_info.invisible_sheets = member_visible; - latest_info.shares_in_my_sheets = shares_in_my_sheets; - - // RefSheet - let ref_sheet_data = vault.sheet(&REF_SHEET_NAME.to_string()).await?.to_data(); - latest_info.ref_sheet_content = ref_sheet_data.clone(); - latest_info.ref_sheet_vfs_mapping = ref_sheet_data - .mapping() - .into_iter() - .map(|(path, file)| (file.id.clone(), path.clone())) - .collect::>(); - latest_info.reference_sheets = ref_sheets; - - // Members - let members = vault.members().await?; - latest_info.vault_members = members; - - // Send - instance - .lock() - .await - .write_large_msgpack(latest_info, 512_u16) - .await?; - } - - if ctx.is_proc_on_local() { - let workspace = try_get_local_workspace(&ctx)?; - let mut latest_info = instance - .lock() - .await - .read_large_msgpack::(512_u16) - .await?; - latest_info.update_instant = Some(SystemTime::now()); - LatestInfo::write_to( - &latest_info, - LatestInfo::latest_info_path(workspace.local_path(), &member_id), - ) - .await?; - } - } - - info!("Update sheets to {}", member_id); - - // Sync Remote Sheets - { - if ctx.is_proc_on_local() { - let workspace = try_get_local_workspace(&ctx)?; - let Ok(latest_info) = LatestInfo::read_from(LatestInfo::latest_info_path( - workspace.local_path(), - &member_id, - )) - .await - else { - return Err(TcpTargetError::Io("Read latest info failed".to_string())); - }; - - // Collect all local versions - let mut local_versions = vec![]; - for request_sheet in latest_info.visible_sheets { - let Ok(data) = CachedSheet::cached_sheet_data(&request_sheet).await else { - // For newly created sheets, the version is 0. - // Send -1 to distinguish from 0, ensuring the upstream will definitely send the sheet information - local_versions.push((request_sheet, -1)); - continue; - }; - local_versions.push((request_sheet, data.write_count())); - } - - // Send the version list - let len = local_versions.len(); - instance.lock().await.write_msgpack(local_versions).await?; - - if len < 1 { - // Don't return here, continue to next section - // But we need to consume the false marker from the server - if ctx.is_proc_on_local() { - let mut mut_instance = instance.lock().await; - let _: bool = mut_instance.read_msgpack().await?; - } - } else { - // Receive data - if ctx.is_proc_on_local() { - let mut mut_instance = instance.lock().await; - loop { - let in_coming: bool = mut_instance.read_msgpack().await?; - if in_coming { - let (sheet_name, data): (SheetName, SheetData) = - mut_instance.read_large_msgpack(1024u16).await?; - - let Some(path) = CachedSheet::cached_sheet_path(sheet_name) else { - return Err(TcpTargetError::NotFound( - "Workspace not found".to_string(), - )); - }; - - SheetData::write_to(&data, path).await?; - } else { - break; - } - } - } - } - } - if ctx.is_proc_on_remote() { - let vault = try_get_vault(&ctx)?; - let mut mut_instance = instance.lock().await; - - let local_versions = mut_instance.read_msgpack::>().await?; - - for (sheet_name, version) in local_versions.iter() { - let sheet = vault.sheet(sheet_name).await?; - if let Some(holder) = sheet.holder() - && (holder == &member_id || holder == VAULT_HOST_NAME) - && &sheet.write_count() != version - { - mut_instance.write_msgpack(true).await?; - mut_instance - .write_large_msgpack((sheet_name, sheet.to_data()), 1024u16) - .await?; - } - } - mut_instance.write_msgpack(false).await?; - } - } - - info!("Fetch held status to {}", member_id); - - // Sync Held Info - { - if ctx.is_proc_on_local() { - let workspace = try_get_local_workspace(&ctx)?; - - let Ok(latest_info) = LatestInfo::read_from(LatestInfo::latest_info_path( - workspace.local_path(), - &member_id, - )) - .await - else { - return Err(TcpTargetError::Io("Read latest info failed".to_string())); - }; - - // Collect files that need to know the holder - let mut holder_wants_know = Vec::new(); - for sheet_name in &latest_info.visible_sheets { - if let Ok(sheet_data) = CachedSheet::cached_sheet_data(sheet_name).await { - holder_wants_know - .extend(sheet_data.mapping().values().map(|value| value.id.clone())); - } - } - - // Send request - let mut mut_instance = instance.lock().await; - mut_instance - .write_large_msgpack(&holder_wants_know, 1024u16) - .await?; - - // Receive information and write to local - let result: HashMap< - VirtualFileId, - ( - Option, - VirtualFileVersion, - Vec<(VirtualFileVersion, VirtualFileVersionDescription)>, - ), - > = mut_instance.read_large_msgpack(1024u16).await?; - - // Read configuration file - let path = LatestFileData::data_path(&member_id)?; - let mut latest_file_data: LatestFileData = - LatestFileData::read_from(&path).await.unwrap_or_default(); - - // Write the received information - latest_file_data.update_info(result); - - // Write - LatestFileData::write_to(&latest_file_data, &path).await?; - } - - if ctx.is_proc_on_remote() { - let vault = try_get_vault(&ctx)?; - let mut mut_instance = instance.lock().await; - - // Read the request - let holder_wants_know: Vec = - mut_instance.read_large_msgpack(1024u16).await?; - - // Organize the information - let mut result: HashMap< - VirtualFileId, - ( - Option, - VirtualFileVersion, - Vec<(VirtualFileVersion, VirtualFileVersionDescription)>, - ), - > = HashMap::new(); - for id in holder_wants_know { - let Ok(meta) = vault.virtual_file_meta(&id).await else { - continue; - }; - let holder = if meta.hold_member().is_empty() { - None - } else { - Some(meta.hold_member().clone()) - }; - let latest_version = meta.version_latest(); - - let all_versions = meta.versions(); - let all_descriptions = meta.version_descriptions(); - let histories = all_versions - .iter() - .filter_map(|v| { - let Some(desc) = all_descriptions.get(v) else { - return None; - }; - Some((v.clone(), desc.clone())) - }) - .collect::>(); - - result.insert(id, (holder, latest_version, histories)); - } - - // Send information - mut_instance.write_large_msgpack(&result, 1024u16).await?; - } - } - - // Sync cached sheet to local sheet - if ctx.is_proc_on_local() { - let workspace = try_get_local_workspace(&ctx)?; - let cached_sheet_path = workspace.local_path().join(CLIENT_PATH_CACHED_SHEET); - let local_sheet_path = workspace.local_path().join(CLIENT_PATH_LOCAL_SHEET); - if !local_sheet_path.exists() || !cached_sheet_path.exists() { - // No need to sync - if ctx.is_proc_on_local() { - sign_vault_modified(false).await; - } - return Ok(UpdateToLatestInfoResult::Success); - } - - let cached_sheet_paths = - extract_sheet_names_from_paths(CachedSheet::cached_sheet_paths().await?)?; - - // Match cached sheets and local sheets, and sync content - for (cached_sheet_name, _cached_sheet_path) in cached_sheet_paths { - // Read cached sheet and local sheet - let cached_sheet = CachedSheet::cached_sheet_data(&cached_sheet_name).await?; - let Ok(mut local_sheet) = workspace.local_sheet(&member_id, &cached_sheet_name).await - else { - continue; - }; - - // Read cached id mapping - let Some(cached_sheet_id_mapping) = cached_sheet.id_mapping() else { - continue; - }; - - for (cached_item_id, cached_item_path) in cached_sheet_id_mapping.iter() { - let path_by_id = { local_sheet.path_by_id(cached_item_id).cloned() }; - - // Get local path - let Some(local_path) = path_by_id else { - continue; - }; - - if &local_path == cached_item_path { - continue; - } - - // If path not match, try to move - let move_result = local_sheet.move_mapping(&local_path, cached_item_path); - if let Err(e) = move_result { - match e.kind() { - ErrorKind::AlreadyExists => { - return Ok(UpdateToLatestInfoResult::SyncCachedSheetFail( - SyncCachedSheetFailReason::PathAlreadyExist( - cached_item_path.clone(), - ), - )); - } - _ => return Err(e.into()), - } - } - local_sheet.write().await?; - } - } - } - - if ctx.is_proc_on_local() { - sign_vault_modified(false).await; - } - Ok(UpdateToLatestInfoResult::Success) -} - -/// Extract sheet names from file paths -fn extract_sheet_names_from_paths( - paths: Vec, -) -> Result, std::io::Error> { - let mut result = HashMap::new(); - for p in paths { - let sheet_name = p - .file_stem() - .and_then(|s| s.to_str()) - .map(|s| s.to_string()) - .ok_or_else(|| { - std::io::Error::new(std::io::ErrorKind::InvalidData, "Invalid file name") - })?; - result.insert(sheet_name, p); - } - Ok(result) -} diff --git a/crates/vcs_actions/src/actions/sheet_actions.rs b/crates/vcs_actions/src/actions/sheet_actions.rs deleted file mode 100644 index 4c9977e..0000000 --- a/crates/vcs_actions/src/actions/sheet_actions.rs +++ /dev/null @@ -1,583 +0,0 @@ -use std::{collections::HashMap, io::ErrorKind}; - -use action_system::{action::ActionContext, macros::action_gen}; -use serde::{Deserialize, Serialize}; -use tcp_connection::error::TcpTargetError; -use vcs_data::{ - constants::VAULT_HOST_NAME, - data::{ - local::{ - vault_modified::sign_vault_modified, - workspace_analyzer::{FromRelativePathBuf, ToRelativePathBuf}, - }, - sheet::SheetName, - vault::sheet_share::{ShareMergeMode, SheetShareId}, - }, -}; - -use crate::{ - actions::{ - auth_member, check_connection_instance, get_current_sheet_name, try_get_local_workspace, - try_get_vault, - }, - write_and_return, -}; - -#[derive(Default, Serialize, Deserialize)] -pub enum MakeSheetActionResult { - Success, - SuccessRestore, - - // Fail - AuthorizeFailed(String), - SheetAlreadyExists, - SheetCreationFailed(String), - - #[default] - Unknown, -} - -/// Build a sheet with context -#[action_gen] -pub async fn make_sheet_action( - ctx: ActionContext, - sheet_name: SheetName, -) -> Result { - let instance = check_connection_instance(&ctx)?; - - // Auth Member - let (member_id, is_host_mode) = match auth_member(&ctx, instance).await { - Ok(id) => id, - Err(e) => return Ok(MakeSheetActionResult::AuthorizeFailed(e.to_string())), - }; - - if ctx.is_proc_on_remote() { - let vault = try_get_vault(&ctx)?; - let holder = if is_host_mode { - VAULT_HOST_NAME.to_string() - } else { - member_id - }; - - // Check if the sheet already exists - if let Ok(mut sheet) = vault.sheet(&sheet_name).await { - // If the sheet has no holder, assign it to the current member (restore operation) - if sheet.holder().is_none() { - sheet.set_holder(holder.clone()); - match sheet.persist().await { - Ok(_) => { - write_and_return!(instance, MakeSheetActionResult::SuccessRestore); - } - Err(e) => { - write_and_return!( - instance, - MakeSheetActionResult::SheetCreationFailed(e.to_string()) - ); - } - } - } else { - write_and_return!(instance, MakeSheetActionResult::SheetAlreadyExists); - } - } else { - // Create the sheet - match vault.create_sheet(&sheet_name, &holder).await { - Ok(_) => { - write_and_return!(instance, MakeSheetActionResult::Success); - } - Err(e) => { - write_and_return!( - instance, - MakeSheetActionResult::SheetCreationFailed(e.to_string()) - ); - } - } - } - } - - if ctx.is_proc_on_local() { - let result = instance - .lock() - .await - .read::() - .await?; - if matches!(result, MakeSheetActionResult::Success) { - sign_vault_modified(true).await; - } - return Ok(result); - } - - Err(TcpTargetError::NoResult("No result.".to_string())) -} - -#[derive(Default, Serialize, Deserialize)] -pub enum DropSheetActionResult { - Success, - - // Fail - SheetInUse, - AuthorizeFailed(String), - SheetNotExists, - SheetDropFailed(String), - NoHolder, - NotOwner, - - #[default] - Unknown, -} - -#[action_gen] -pub async fn drop_sheet_action( - ctx: ActionContext, - sheet_name: SheetName, -) -> Result { - let instance = check_connection_instance(&ctx)?; - - // Auth Member - let (member_id, is_host_mode) = match auth_member(&ctx, instance).await { - Ok(id) => id, - Err(e) => { - return Ok(DropSheetActionResult::AuthorizeFailed(e.to_string())); - } - }; - - // Check sheet in use on local - if ctx.is_proc_on_local() { - let local_workspace = try_get_local_workspace(&ctx)?; - if let Some(sheet) = local_workspace.config().lock().await.sheet_in_use() { - if sheet == &sheet_name { - instance.lock().await.write(false).await?; - return Ok(DropSheetActionResult::SheetInUse); - } - instance.lock().await.write(true).await?; - } else { - instance.lock().await.write(true).await?; - } - } - - if ctx.is_proc_on_remote() { - // Check if client sheet is in use - let sheet_in_use = instance.lock().await.read::().await?; - if !sheet_in_use { - return Ok(DropSheetActionResult::SheetInUse); - } - - let vault = try_get_vault(&ctx)?; - - // Check if the sheet exists - let mut sheet = match vault.sheet(&sheet_name).await { - Ok(sheet) => sheet, - Err(e) => { - if e.kind() == ErrorKind::NotFound { - write_and_return!(instance, DropSheetActionResult::SheetNotExists); - } else { - write_and_return!( - instance, - DropSheetActionResult::SheetDropFailed(e.to_string()) - ); - } - } - }; - - // Get the sheet's holder - let Some(holder) = sheet.holder() else { - write_and_return!(instance, DropSheetActionResult::NoHolder); - }; - - // Verify that the sheet holder is either the current user or the host - // All sheets belong to the host - if holder != &member_id && !is_host_mode { - write_and_return!(instance, DropSheetActionResult::NotOwner); - } - - // Drop the sheet - sheet.forget_holder(); - match sheet.persist().await { - Ok(_) => { - write_and_return!(instance, DropSheetActionResult::Success); - } - Err(e) => { - write_and_return!( - instance, - DropSheetActionResult::SheetDropFailed(e.to_string()) - ); - } - } - } - - if ctx.is_proc_on_local() { - let result = instance - .lock() - .await - .read::() - .await?; - if matches!(result, DropSheetActionResult::Success) { - sign_vault_modified(true).await; - } - return Ok(result); - } - - Err(TcpTargetError::NoResult("No result.".to_string())) -} - -pub type OperationArgument = (EditMappingOperations, Option); - -#[derive(Serialize, Deserialize, PartialEq, Eq, Clone)] -pub enum EditMappingOperations { - Move, - Erase, -} - -#[derive(Serialize, Deserialize, Default)] -pub enum EditMappingActionResult { - Success, - - // Fail - AuthorizeFailed(String), - EditNotAllowed, - MappingNotFound(FromRelativePathBuf), - InvalidMove(InvalidMoveReason), - - #[default] - Unknown, -} - -#[derive(Serialize, Deserialize)] -pub enum InvalidMoveReason { - MoveOperationButNoTarget(FromRelativePathBuf), - ContainsDuplicateMapping(ToRelativePathBuf), -} - -#[derive(Serialize, Deserialize, Clone)] -pub struct EditMappingActionArguments { - pub operations: HashMap, -} - -/// This Action only modifies Sheet Mapping and -/// does not interfere with the actual location of local files or Local Mapping -#[action_gen] -pub async fn edit_mapping_action( - ctx: ActionContext, - args: EditMappingActionArguments, -) -> Result { - let instance = check_connection_instance(&ctx)?; - - // Auth Member - let (member_id, is_host_mode) = match auth_member(&ctx, instance).await { - Ok(id) => id, - Err(e) => { - return Ok(EditMappingActionResult::AuthorizeFailed(e.to_string())); - } - }; - - // Check sheet - let (sheet_name, is_ref_sheet) = - get_current_sheet_name(&ctx, instance, &member_id, true).await?; - - // Can modify Sheet when not in reference sheet or in Host mode - let can_modify_sheet = !is_ref_sheet || is_host_mode; - - if !can_modify_sheet { - return Ok(EditMappingActionResult::EditNotAllowed); - } - - if ctx.is_proc_on_remote() { - let vault = try_get_vault(&ctx)?; - let mut sheet = vault.sheet(&sheet_name).await?; - - // Precheck - for (from_path, (operation, to_path)) in args.operations.iter() { - // Check mapping exists - if !sheet.mapping().contains_key(from_path) { - write_and_return!( - instance, - EditMappingActionResult::MappingNotFound(from_path.clone()) - ); - } - - // Move check - if operation == &EditMappingOperations::Move { - // Check if target exists - if let Some(to_path) = to_path { - // Check if target is duplicate - if sheet.mapping().contains_key(to_path) { - write_and_return!( - instance, - EditMappingActionResult::InvalidMove( - InvalidMoveReason::ContainsDuplicateMapping(to_path.clone()) - ) - ); - } - } else { - write_and_return!( - instance, - EditMappingActionResult::InvalidMove( - InvalidMoveReason::MoveOperationButNoTarget(from_path.clone()) - ) - ); - } - } - } - - // Process - for (from_path, (operation, to_path)) in args.operations { - match operation { - // During the Precheck phase, it has been ensured that: - // 1. The mapping to be edited for the From path indeed exists - // 2. The location of the To path is indeed empty - // 3. In Move mode, To path can be safely unwrapped - // Therefore, the following unwrap() calls are safe to execute - EditMappingOperations::Move => { - let mapping = sheet.mapping_mut().remove(&from_path).unwrap(); - let to_path = to_path.unwrap(); - sheet - .add_mapping(to_path, mapping.id, mapping.version) - .await?; - } - EditMappingOperations::Erase => { - sheet.mapping_mut().remove(&from_path).unwrap(); - } - } - } - - // Write - sheet.persist().await?; - - write_and_return!(instance, EditMappingActionResult::Success); - } - - if ctx.is_proc_on_local() { - let result = instance - .lock() - .await - .read::() - .await?; - if matches!(result, EditMappingActionResult::Success) { - sign_vault_modified(true).await; - } - return Ok(result); - } - - Ok(EditMappingActionResult::Success) -} - -#[derive(Serialize, Deserialize, PartialEq, Eq, Clone)] -pub struct ShareMappingArguments { - pub mappings: Vec, - pub description: String, - // None = current sheet, - // Some(sheet_name) = other ref(public) sheet - pub from_sheet: Option, - pub to_sheet: SheetName, -} - -#[derive(Serialize, Deserialize, Default)] -pub enum ShareMappingActionResult { - Success, - - // Fail - AuthorizeFailed(String), - TargetSheetNotFound(SheetName), - TargetIsSelf, - MappingNotFound(FromRelativePathBuf), - - #[default] - Unknown, -} - -#[action_gen] -pub async fn share_mapping_action( - ctx: ActionContext, - args: ShareMappingArguments, -) -> Result { - let instance = check_connection_instance(&ctx)?; - - // Auth Member - let (member_id, _is_host_mode) = match auth_member(&ctx, instance).await { - Ok(id) => id, - Err(e) => { - return Ok(ShareMappingActionResult::AuthorizeFailed(e.to_string())); - } - }; - - // Check sheet - let sheet_name = args.from_sheet.unwrap_or( - get_current_sheet_name(&ctx, instance, &member_id, true) - .await? - .0, - ); - - if ctx.is_proc_on_remote() { - let vault = try_get_vault(&ctx)?; - let sheet = vault.sheet(&sheet_name).await?; - - // Tip: Because sheet_name may specify a sheet that does not belong to the user, - // a secondary verification is required. - - // Check if the sheet holder is Some and matches the member_id or is the host - let Some(holder) = sheet.holder() else { - // If there's no holder, the sheet cannot be shared from - write_and_return!( - instance, - ShareMappingActionResult::AuthorizeFailed("Sheet has no holder".to_string()) - ); - }; - - // Verify the holder is either the current member or the host - if holder != &member_id && holder != VAULT_HOST_NAME { - write_and_return!( - instance, - ShareMappingActionResult::AuthorizeFailed( - "Not sheet holder or ref sheet".to_string() - ) - ); - } - - let to_sheet_name = args.to_sheet; - - // Verify target sheet exists - if !vault.sheet_names()?.contains(&to_sheet_name) { - // Does not exist - write_and_return!( - instance, - ShareMappingActionResult::TargetSheetNotFound(to_sheet_name.clone()) - ); - } - - // Verify sheet is not self - if sheet_name == to_sheet_name { - // Is self - write_and_return!(instance, ShareMappingActionResult::TargetIsSelf); - } - - // Verify all mappings are correct - for mapping in args.mappings.iter() { - if !sheet.mapping().contains_key(mapping) { - // If any mapping is invalid, indicate failure - write_and_return!( - instance, - ShareMappingActionResult::MappingNotFound(mapping.clone()) - ); - } - } - - // Execute sharing logic - sheet - .share_mappings(&to_sheet_name, args.mappings, &member_id, args.description) - .await?; - - // Sharing successful - write_and_return!(instance, ShareMappingActionResult::Success); - } - - if ctx.is_proc_on_local() { - let result = instance - .lock() - .await - .read::() - .await?; - return Ok(result); - } - - Ok(ShareMappingActionResult::Success) -} - -#[derive(Serialize, Deserialize, PartialEq, Eq, Clone)] -pub struct MergeShareMappingArguments { - pub share_id: SheetShareId, - pub share_merge_mode: ShareMergeMode, -} - -#[derive(Serialize, Deserialize, Default)] -pub enum MergeShareMappingActionResult { - Success, - - // Fail - HasConflicts, - AuthorizeFailed(String), - EditNotAllowed, - ShareIdNotFound(SheetShareId), - MergeFails(String), - - #[default] - Unknown, -} - -#[action_gen] -pub async fn merge_share_mapping_action( - ctx: ActionContext, - args: MergeShareMappingArguments, -) -> Result { - let instance = check_connection_instance(&ctx)?; - - // Auth Member - let (member_id, is_host_mode) = match auth_member(&ctx, instance).await { - Ok(id) => id, - Err(e) => { - return Ok(MergeShareMappingActionResult::AuthorizeFailed( - e.to_string(), - )); - } - }; - - // Check sheet - let (sheet_name, is_ref_sheet) = - get_current_sheet_name(&ctx, instance, &member_id, true).await?; - - // Can modify Sheet when not in reference sheet or in Host mode - let can_modify_sheet = !is_ref_sheet || is_host_mode; - - if !can_modify_sheet { - return Ok(MergeShareMappingActionResult::EditNotAllowed); - } - - if ctx.is_proc_on_remote() { - let vault = try_get_vault(&ctx)?; - let share_id = args.share_id; - - // Get the share and sheet - let (sheet, share) = if vault.share_file_path(&sheet_name, &share_id).exists() { - let sheet = vault.sheet(&sheet_name).await?; - let share = sheet.get_share(&share_id).await?; - (sheet, share) - } else { - // Share does not exist - write_and_return!( - instance, - MergeShareMappingActionResult::ShareIdNotFound(share_id.clone()) - ); - }; - - // Perform the merge - match sheet.merge_share(share, args.share_merge_mode).await { - Ok(_) => write_and_return!(instance, MergeShareMappingActionResult::Success), - Err(e) => match e.kind() { - ErrorKind::AlreadyExists => { - write_and_return!(instance, MergeShareMappingActionResult::HasConflicts); - } - _ => { - write_and_return!( - instance, - MergeShareMappingActionResult::MergeFails(e.to_string()) - ); - } - }, - } - } - - if ctx.is_proc_on_local() { - let result = instance - .lock() - .await - .read::() - .await?; - match result { - MergeShareMappingActionResult::Success => { - sign_vault_modified(true).await; - } - _ => {} - } - return Ok(result); - } - - Ok(MergeShareMappingActionResult::Success) -} diff --git a/crates/vcs_actions/src/actions/track_action.rs b/crates/vcs_actions/src/actions/track_action.rs deleted file mode 100644 index e5f96b3..0000000 --- a/crates/vcs_actions/src/actions/track_action.rs +++ /dev/null @@ -1,987 +0,0 @@ -use std::{ - collections::{HashMap, HashSet}, - path::PathBuf, - sync::Arc, - time::SystemTime, -}; - -use action_system::{action::ActionContext, macros::action_gen}; -use cfg_file::config::ConfigFile; -use serde::{Deserialize, Serialize}; -use sha1_hash::calc_sha1; -use tcp_connection::{error::TcpTargetError, instance::ConnectionInstance}; -use tokio::{fs, sync::Mutex}; -use vcs_data::{ - constants::CLIENT_FILE_TEMP_FILE, - data::{ - local::{ - cached_sheet::CachedSheet, latest_file_data::LatestFileData, - local_sheet::LocalMappingMetadata, vault_modified::sign_vault_modified, - workspace_analyzer::AnalyzeResult, - }, - member::MemberId, - sheet::SheetName, - vault::{ - config::VaultUuid, - virtual_file::{VirtualFileId, VirtualFileVersion, VirtualFileVersionDescription}, - }, - }, -}; - -use crate::{ - actions::{ - auth_member, check_connection_instance, get_current_sheet_name, try_get_local_output, - try_get_local_workspace, try_get_vault, - }, - local_println, -}; - -pub type NextVersion = String; -pub type UpdateDescription = String; - -const TEMP_NAME: &str = "{temp_name}"; - -#[derive(Serialize, Deserialize)] -pub struct TrackFileActionArguments { - // Path need to track - pub relative_pathes: HashSet, - - // File update info - pub file_update_info: HashMap, - - // Print infos - pub print_infos: bool, - - // overwrite modified files - pub allow_overwrite_modified: bool, -} - -#[derive(Serialize, Deserialize)] -pub enum TrackFileActionResult { - Done { - created: Vec, - updated: Vec, - synced: Vec, - skipped: Vec, - }, - - // Fail - AuthorizeFailed(String), - - /// There are local move or missing items that have not been resolved, - /// this situation does not allow track - StructureChangesNotSolved, - - CreateTaskFailed(CreateTaskResult), - UpdateTaskFailed(UpdateTaskResult), - SyncTaskFailed(SyncTaskResult), -} - -#[derive(Serialize, Deserialize)] -pub enum CreateTaskResult { - Success(Vec), // Success(success_relative_pathes) - - /// Create file on existing path in the sheet - CreateFileOnExistPath(PathBuf), - - /// Sheet not found - SheetNotFound(SheetName), -} - -#[derive(Serialize, Deserialize)] -pub enum UpdateTaskResult { - Success(Vec), // Success(success_relative_pathes) - - VerifyFailed { - path: PathBuf, - reason: VerifyFailReason, - }, -} - -#[derive(Serialize, Deserialize, Clone)] -pub enum VerifyFailReason { - SheetNotFound(SheetName), - MappingNotFound, - VirtualFileNotFound(VirtualFileId), - VirtualFileReadFailed(VirtualFileId), - NotHeld, - VersionDismatch(VirtualFileVersion, VirtualFileVersion), // (CurrentVersion, RemoteVersion) - UpdateButNoDescription, // File needs update, but no description exists - VersionAlreadyExist(VirtualFileVersion), // (RemoteVersion) -} - -#[derive(Serialize, Deserialize)] -pub enum SyncTaskResult { - Success(Vec), // Success(success_relative_pathes) -} -#[action_gen] -pub async fn track_file_action( - ctx: ActionContext, - arguments: TrackFileActionArguments, -) -> Result { - let relative_pathes = arguments.relative_pathes; - let instance = check_connection_instance(&ctx)?; - - // Auth Member - let (member_id, is_host_mode) = match auth_member(&ctx, instance).await { - Ok(id) => id, - Err(e) => return Ok(TrackFileActionResult::AuthorizeFailed(e.to_string())), - }; - - // Check sheet - let (sheet_name, is_ref_sheet) = - get_current_sheet_name(&ctx, instance, &member_id, true).await?; - - // Can modify Sheet when not in reference sheet or in Host mode - let can_modify_sheet = !is_ref_sheet || is_host_mode; - - if ctx.is_proc_on_local() { - let workspace = try_get_local_workspace(&ctx)?; - let analyzed = AnalyzeResult::analyze_local_status(&workspace).await?; - let latest_file_data = - LatestFileData::read_from(LatestFileData::data_path(&member_id)?).await?; - - if !analyzed.lost.is_empty() || !analyzed.moved.is_empty() { - return Ok(TrackFileActionResult::StructureChangesNotSolved); - } - - let Some(sheet_in_use) = workspace.config().lock().await.sheet_in_use().clone() else { - return Err(TcpTargetError::NotFound("Sheet not found!".to_string())); - }; - - // Read local sheet and member held - let local_sheet = workspace.local_sheet(&member_id, &sheet_in_use).await?; - let cached_sheet = CachedSheet::cached_sheet_data(&sheet_in_use).await?; - let member_held = LatestFileData::read_from(LatestFileData::data_path(&member_id)?).await?; - - let modified = analyzed - .modified - .intersection(&relative_pathes) - .cloned() - .collect::>(); - - // Filter out created files - let created_task = analyzed - .created - .intersection(&relative_pathes) - .cloned() - .collect::>(); - - // Filter out modified files that need to be updated - let mut update_task: Vec = { - let result = modified.iter().filter_map(|p| { - if let Ok(local_data) = local_sheet.mapping_data(p) { - let id = local_data.mapping_vfid(); - let local_ver = local_data.version_when_updated(); - let Some(latest_ver) = latest_file_data.file_version(id) else { - return None; - }; - if let Some(held_member) = member_held.file_holder(id) { - // Check if holder and version match - if held_member == &member_id && local_ver == latest_ver { - return Some(p.clone()); - } - } - }; - None - }); - result.collect() - }; - - let mut skipped_task: Vec = Vec::new(); - - // Filter out files that do not exist locally or have version inconsistencies and need to be synchronized - let mut sync_task: Vec = { - let other: Vec = relative_pathes - .iter() - .filter(|p| !created_task.contains(p) && !update_task.contains(p)) - .cloned() - .collect(); - - let result = other.iter().filter_map(|p| { - // Not exists and not lost, first download - if !workspace.local_path().join(p).exists() && !analyzed.lost.contains(p) { - return Some(p.clone()); - } - - // In cached sheet - if !cached_sheet.mapping().contains_key(p) { - return None; - } - - // In local sheet - let local_sheet_mapping = local_sheet.mapping_data(p).ok()?; - let vfid = local_sheet_mapping.mapping_vfid(); - - if let Some(latest_version) = &latest_file_data.file_version(vfid) { - // Version does not match - if &local_sheet_mapping.version_when_updated() != latest_version { - let modified = modified.contains(p); - if modified && arguments.allow_overwrite_modified { - return Some(p.clone()); - } else if modified && !arguments.allow_overwrite_modified { - // If not allowed to overwrite, join skipped tasks - skipped_task.push(p.clone()); - return None; - } - return Some(p.clone()); - } - } - - // File not held and modified - let holder = latest_file_data.file_holder(vfid); - if (holder.is_none() || &member_id != holder.unwrap()) && modified.contains(p) { - // If allow overwrite modified is true, overwrite the file - if arguments.allow_overwrite_modified { - return Some(p.clone()); - } else { - // If not allowed to overwrite, join skipped tasks - skipped_task.push(p.clone()); - return None; - } - } - - None - }); - result.collect() - }; - - // If the sheet cannot be modified, - // the update_task here should be considered invalid and changed to sync rollback - if !can_modify_sheet { - if arguments.allow_overwrite_modified { - sync_task.append(&mut update_task); - update_task.clear(); - } else { - skipped_task.append(&mut update_task); - update_task.clear(); - } - } - - // Package tasks - let tasks: (Vec, Vec, Vec) = - (created_task, update_task, sync_task); - - // Send to remote - { - let mut mut_instance = instance.lock().await; - mut_instance - .write_large_msgpack(tasks.clone(), 1024u16) - .await?; - // Drop mutex here - } - - // Process create tasks - let mut success_create = Vec::::new(); - if can_modify_sheet { - success_create = match proc_create_tasks_local( - &ctx, - instance.clone(), - &member_id, - &sheet_name, - tasks.0, - arguments.print_infos, - ) - .await - { - Ok(r) => match r { - CreateTaskResult::Success(relative_pathes) => relative_pathes, - _ => { - return Ok(TrackFileActionResult::CreateTaskFailed(r)); - } - }, - Err(e) => return Err(e), - }; - } - - // Process update tasks - let mut success_update = Vec::::new(); - if can_modify_sheet { - success_update = match proc_update_tasks_local( - &ctx, - instance.clone(), - &member_id, - &sheet_name, - tasks.1, - arguments.print_infos, - arguments.file_update_info, - ) - .await - { - Ok(r) => match r { - UpdateTaskResult::Success(relative_pathes) => relative_pathes, - _ => { - return Ok(TrackFileActionResult::UpdateTaskFailed(r)); - } - }, - Err(e) => return Err(e), - }; - } - - // Process sync tasks - let success_sync = match proc_sync_tasks_local( - &ctx, - instance.clone(), - &member_id, - &sheet_name, - tasks.2, - arguments.print_infos, - ) - .await - { - Ok(r) => match r { - SyncTaskResult::Success(relative_pathes) => relative_pathes, - }, - Err(e) => return Err(e), - }; - - if success_create.len() + success_update.len() > 0 { - sign_vault_modified(true).await; - } - - return Ok(TrackFileActionResult::Done { - created: success_create, - updated: success_update, - synced: success_sync, - skipped: skipped_task, - }); - } - - if ctx.is_proc_on_remote() { - // Read tasks - let (created_task, update_task, sync_task): (Vec, Vec, Vec) = { - let mut mut_instance = instance.lock().await; - mut_instance.read_large_msgpack(1024u16).await? - }; - - // Process create tasks - let mut success_create = Vec::::new(); - if can_modify_sheet { - success_create = match proc_create_tasks_remote( - &ctx, - instance.clone(), - &member_id, - &sheet_name, - created_task, - ) - .await - { - Ok(r) => match r { - CreateTaskResult::Success(relative_pathes) => relative_pathes, - _ => { - return Ok(TrackFileActionResult::CreateTaskFailed(r)); - } - }, - Err(e) => return Err(e), - }; - } - - // Process update tasks - let mut success_update = Vec::::new(); - if can_modify_sheet { - success_update = match proc_update_tasks_remote( - &ctx, - instance.clone(), - &member_id, - &sheet_name, - update_task, - arguments.file_update_info, - ) - .await - { - Ok(r) => match r { - UpdateTaskResult::Success(relative_pathes) => relative_pathes, - _ => { - return Ok(TrackFileActionResult::UpdateTaskFailed(r)); - } - }, - Err(e) => return Err(e), - }; - } - - // Process sync tasks - let success_sync = match proc_sync_tasks_remote( - &ctx, - instance.clone(), - &member_id, - &sheet_name, - sync_task, - ) - .await - { - Ok(r) => match r { - SyncTaskResult::Success(relative_pathes) => relative_pathes, - }, - Err(e) => return Err(e), - }; - - return Ok(TrackFileActionResult::Done { - created: success_create, - updated: success_update, - synced: success_sync, - skipped: Vec::new(), // The server doesn't know which files were skipped - }); - } - - Err(TcpTargetError::NoResult("No result.".to_string())) -} - -async fn proc_create_tasks_local( - ctx: &ActionContext, - instance: Arc>, - member_id: &MemberId, - sheet_name: &SheetName, - relative_paths: Vec, - print_infos: bool, -) -> Result { - let workspace = try_get_local_workspace(ctx)?; - let local_output = try_get_local_output(ctx)?; - let mut mut_instance = instance.lock().await; - let mut local_sheet = workspace.local_sheet(member_id, sheet_name).await?; - - if print_infos && relative_paths.len() > 0 { - local_println!(local_output, "Creating {} files...", relative_paths.len()); - } - - // Wait for remote detection of whether the sheet exists - let has_sheet = mut_instance.read_msgpack::().await?; - if !has_sheet { - return Ok(CreateTaskResult::SheetNotFound(sheet_name.clone())); - } - - // Wait for remote detection of whether the file exists - let (hasnt_duplicate, duplicate_path) = mut_instance.read_msgpack::<(bool, PathBuf)>().await?; - if !hasnt_duplicate { - return Ok(CreateTaskResult::CreateFileOnExistPath(duplicate_path)); - } - - let mut success_relative_pathes = Vec::new(); - - // Start sending files - for path in relative_paths { - let full_path = workspace.local_path().join(&path); - - // Send file - if mut_instance.write_file(&full_path).await.is_err() { - continue; - } - - // Read virtual file id and version - let (vfid, version, version_desc) = mut_instance - .read_msgpack::<( - VirtualFileId, - VirtualFileVersion, - VirtualFileVersionDescription, - )>() - .await?; - - // Add mapping to local sheet - let hash = sha1_hash::calc_sha1(&full_path, 2048).await.unwrap().hash; - let time = std::fs::metadata(&full_path)?.modified()?; - local_sheet.add_mapping( - &path.clone(), - LocalMappingMetadata::new( - hash, // hash_when_updated - time, // time_when_updated - std::fs::metadata(&full_path)?.len(), // size_when_updated - version_desc, // version_desc_when_updated - version, // version_when_updated - vfid, // mapping_vfid - time, // last_modifiy_check_itme - false, // last_modifiy_check_result - ), - )?; - - // Print success info - if print_infos { - local_println!(local_output, "+ {}", path.display()); - } - - success_relative_pathes.push(path); - } - - // Write local sheet - local_sheet.write().await?; - - Ok(CreateTaskResult::Success(success_relative_pathes)) -} - -async fn proc_create_tasks_remote( - ctx: &ActionContext, - instance: Arc>, - member_id: &MemberId, - sheet_name: &SheetName, - relative_paths: Vec, -) -> Result { - let vault = try_get_vault(ctx)?; - let mut mut_instance = instance.lock().await; - - // Sheet check - let Ok(mut sheet) = vault.sheet(sheet_name).await else { - // Sheet not found - mut_instance.write_msgpack(false).await?; - return Ok(CreateTaskResult::SheetNotFound(sheet_name.to_string())); - }; - mut_instance.write_msgpack(true).await?; - - // Duplicate create precheck - for path in relative_paths.iter() { - if sheet.mapping().contains_key(path) { - // Duplicate file - mut_instance.write_msgpack((false, path)).await?; - return Ok(CreateTaskResult::CreateFileOnExistPath(path.clone())); - } - } - mut_instance.write_msgpack((true, PathBuf::new())).await?; - - let mut success_relative_pathes = Vec::new(); - - // Start receiving files - for path in relative_paths { - // Read file and create virtual file - let Ok(vfid) = vault - .create_virtual_file_from_connection(&mut mut_instance, member_id) - .await - else { - continue; - }; - - // Record virtual file to sheet - let vf_meta = vault.virtual_file(&vfid)?.read_meta().await?; - sheet - .add_mapping(path.clone(), vfid.clone(), vf_meta.version_latest()) - .await?; - - // Tell client the virtual file id and version - mut_instance - .write_msgpack(( - vfid, - vf_meta.version_latest(), - vf_meta - .version_description(vf_meta.version_latest()) - .unwrap(), - )) - .await?; - - success_relative_pathes.push(path); - } - - sheet.persist().await?; - - Ok(CreateTaskResult::Success(success_relative_pathes)) -} - -async fn proc_update_tasks_local( - ctx: &ActionContext, - instance: Arc>, - member_id: &MemberId, - sheet_name: &SheetName, - relative_paths: Vec, - print_infos: bool, - file_update_info: HashMap, -) -> Result { - let workspace = try_get_local_workspace(ctx)?; - let local_output = try_get_local_output(ctx)?; - let mut mut_instance = instance.lock().await; - let mut local_sheet = workspace.local_sheet(member_id, sheet_name).await?; - - let mut success = Vec::new(); - - if print_infos && relative_paths.len() > 0 { - local_println!(local_output, "Updating {} files...", relative_paths.len()); - } - - for path in relative_paths.iter() { - let Ok(mapping) = local_sheet.mapping_data(path) else { - // Is mapping not found, write empty - mut_instance.write_msgpack("".to_string()).await?; - continue; - }; - // Read and send file version - let Ok(_) = mut_instance - .write_msgpack(mapping.version_when_updated()) - .await - else { - continue; - }; - - // Read verify result - let verify_result: bool = mut_instance.read_msgpack().await?; - if !verify_result { - let reason = mut_instance.read_msgpack::().await?; - return Ok(UpdateTaskResult::VerifyFailed { - path: path.clone(), - reason: reason.clone(), - }); - } - - // Calc hash - let hash_result = match sha1_hash::calc_sha1(workspace.local_path().join(path), 2048).await - { - Ok(r) => r, - Err(_) => { - mut_instance.write_msgpack(false).await?; // Not Ready - continue; - } - }; - - // Get next version - let Some((next_version, description)) = file_update_info.get(path) else { - mut_instance.write_msgpack(false).await?; // Not Ready - continue; - }; - - // Write - mut_instance.write_msgpack(true).await?; // Ready - mut_instance.write_file(path).await?; - - // Read upload result - let upload_result: bool = mut_instance.read_msgpack().await?; - if upload_result { - // Success - let mapping_data_mut = local_sheet.mapping_data_mut(path).unwrap(); - let version = mapping_data_mut.version_when_updated().clone(); - mapping_data_mut.set_hash_when_updated(hash_result.hash); - mapping_data_mut.set_version_when_updated(next_version.clone()); - mapping_data_mut.set_version_desc_when_updated(VirtualFileVersionDescription { - creator: member_id.clone(), - description: description.clone(), - }); - mapping_data_mut.set_last_modifiy_check_result(false); // Mark file not modified - - // Write - local_sheet.write().await?; - - // Push path into success vec - success.push(path.clone()); - - // Print success info - if print_infos { - local_println!( - local_output, - "↑ {} ({} -> {})", - path.display(), - version, - next_version - ); - } - } - } - - Ok(UpdateTaskResult::Success(success)) -} - -async fn proc_update_tasks_remote( - ctx: &ActionContext, - instance: Arc>, - member_id: &MemberId, - sheet_name: &SheetName, - relative_paths: Vec, - file_update_info: HashMap, -) -> Result { - let vault = try_get_vault(ctx)?; - let mut mut_instance = instance.lock().await; - - let mut success = Vec::new(); - - for path in relative_paths.iter() { - // Read version - let Ok(version) = mut_instance.read_msgpack::().await else { - continue; - }; - if version.is_empty() { - continue; - } - - // Verify - let Some((next_version, description)) = file_update_info.get(path) else { - mut_instance.write_msgpack(false).await?; - let reason = VerifyFailReason::UpdateButNoDescription; - mut_instance.write_msgpack(reason.clone()).await?; - return Ok(UpdateTaskResult::VerifyFailed { - path: path.clone(), - reason, - }); // Sheet not found - }; - let Ok(mut sheet) = vault.sheet(sheet_name).await else { - mut_instance.write_msgpack(false).await?; - let reason = VerifyFailReason::SheetNotFound(sheet_name.clone()); - mut_instance.write_msgpack(reason.clone()).await?; - return Ok(UpdateTaskResult::VerifyFailed { - path: path.clone(), - reason, - }); // Sheet not found - }; - let Some(mapping_data) = sheet.mapping_mut().get_mut(path) else { - mut_instance.write_msgpack(false).await?; - let reason = VerifyFailReason::MappingNotFound; - mut_instance.write_msgpack(reason.clone()).await?; - return Ok(UpdateTaskResult::VerifyFailed { - path: path.clone(), - reason, - }); // Mapping not found - }; - let Ok(vf) = vault.virtual_file(&mapping_data.id) else { - mut_instance.write_msgpack(false).await?; - let reason = VerifyFailReason::VirtualFileNotFound(mapping_data.id.clone()); - mut_instance.write_msgpack(reason.clone()).await?; - return Ok(UpdateTaskResult::VerifyFailed { - path: path.clone(), - reason, - }); // Virtual file not found - }; - let Ok(vf_metadata) = vf.read_meta().await else { - mut_instance.write_msgpack(false).await?; - let reason = VerifyFailReason::VirtualFileReadFailed(mapping_data.id.clone()); - mut_instance.write_msgpack(reason.clone()).await?; - return Ok(UpdateTaskResult::VerifyFailed { - path: path.clone(), - reason, - }); // Read virtual file metadata failed - }; - if vf_metadata.versions().contains(next_version) { - mut_instance.write_msgpack(false).await?; - let reason = VerifyFailReason::VersionAlreadyExist(version); - mut_instance.write_msgpack(reason.clone()).await?; - return Ok(UpdateTaskResult::VerifyFailed { - path: path.clone(), - reason, - }); // VersionAlreadyExist - } - if vf_metadata.hold_member() != member_id { - mut_instance.write_msgpack(false).await?; - let reason = VerifyFailReason::NotHeld; - mut_instance.write_msgpack(reason.clone()).await?; - return Ok(UpdateTaskResult::VerifyFailed { - path: path.clone(), - reason, - }); // Member not held it - }; - if vf_metadata.version_latest() != version { - mut_instance.write_msgpack(false).await?; - let reason = - VerifyFailReason::VersionDismatch(version.clone(), vf_metadata.version_latest()); - mut_instance.write_msgpack(reason.clone()).await?; - return Ok(UpdateTaskResult::VerifyFailed { - path: path.clone(), - reason, - }); // Version does not match - }; - mut_instance.write_msgpack(true).await?; // Verified - - // Read if local ready - let ready: bool = mut_instance.read_msgpack().await?; - if !ready { - continue; - } - - // Read and update virtual file - match vault - .update_virtual_file_from_connection( - &mut mut_instance, - member_id, - &mapping_data.id, - next_version, - VirtualFileVersionDescription { - creator: member_id.clone(), - description: description.clone(), - }, - ) - .await - { - Ok(_) => { - // Update version to sheet - mapping_data.version = next_version.clone(); - - // Persist - sheet.persist().await?; - - success.push(path.clone()); - mut_instance.write_msgpack(true).await?; // Success - } - Err(e) => { - mut_instance.write_msgpack(false).await?; // Fail - return Err(e.into()); - } - } - } - - Ok(UpdateTaskResult::Success(success)) -} - -type SyncVersionInfo = Option<( - VirtualFileVersion, - VirtualFileVersionDescription, - VirtualFileId, -)>; - -async fn proc_sync_tasks_local( - ctx: &ActionContext, - instance: Arc>, - member_id: &MemberId, - sheet_name: &SheetName, - relative_paths: Vec, - print_infos: bool, -) -> Result { - let workspace = try_get_local_workspace(ctx)?; - let local_output = try_get_local_output(ctx)?; - let mut mut_instance = instance.lock().await; - let mut success: Vec = Vec::new(); - - if print_infos && relative_paths.len() > 0 { - local_println!(local_output, "Syncing {} files...", relative_paths.len()); - } - - for path in relative_paths { - let Some((version, description, vfid)) = - mut_instance.read_msgpack::().await? - else { - continue; - }; - - // Generate a temp path - let temp_path = workspace - .local_path() - .join(CLIENT_FILE_TEMP_FILE.replace(TEMP_NAME, &VaultUuid::new_v4().to_string())); - - let copy_to = workspace.local_path().join(&path); - - // Read file - match mut_instance.read_file(&temp_path).await { - Ok(_) => { - if !temp_path.exists() { - continue; - } - } - Err(_) => { - continue; - } - } - - // Calc hash - let new_hash = match calc_sha1(&temp_path, 2048).await { - Ok(hash) => hash, - Err(_) => { - continue; - } - }; - - // Calc size - let new_size = match fs::metadata(&temp_path).await.map(|meta| meta.len()) { - Ok(size) => size, - Err(_) => { - continue; - } - }; - - // Write file - if copy_to.exists() { - if let Err(_) = fs::remove_file(©_to).await { - continue; - } - } else { - // Not exist, create directory - if let Some(path) = copy_to.clone().parent() { - fs::create_dir_all(path).await?; - } - } - if let Err(_) = fs::rename(&temp_path, ©_to).await { - continue; - } - - // Modify local sheet - let mut local_sheet = match workspace.local_sheet(member_id, sheet_name).await { - Ok(sheet) => sheet, - Err(_) => { - continue; - } - }; - - // Get or create mapping - let mapping = match local_sheet.mapping_data_mut(&path) { - Ok(m) => m, - Err(_) => { - // First download - let mut data = LocalMappingMetadata::default(); - data.set_mapping_vfid(vfid); - if let Err(_) = local_sheet.add_mapping(&path, data) { - continue; - } - match local_sheet.mapping_data_mut(&path) { - Ok(m) => m, - Err(_) => { - continue; - } - } - } - }; - - let time = SystemTime::now(); - mapping.set_hash_when_updated(new_hash.hash); - mapping.set_last_modifiy_check_result(false); // Mark not modified - mapping.set_version_when_updated(version); - mapping.set_version_desc_when_updated(description); - mapping.set_size_when_updated(new_size); - mapping.set_time_when_updated(time); - mapping.set_last_modifiy_check_time(time); - if let Err(_) = local_sheet.write().await { - continue; - } - - success.push(path.clone()); - - // Print success info - if print_infos { - local_println!(local_output, "↓ {}", path.display()); - } - } - Ok(SyncTaskResult::Success(success)) -} - -async fn proc_sync_tasks_remote( - ctx: &ActionContext, - instance: Arc>, - _member_id: &MemberId, - sheet_name: &SheetName, - relative_paths: Vec, -) -> Result { - let vault = try_get_vault(ctx)?; - let sheet = vault.sheet(sheet_name).await?; - let mut mut_instance = instance.lock().await; - let mut success: Vec = Vec::new(); - - for path in relative_paths { - // Get mapping - let Some(mapping) = sheet.mapping().get(&path) else { - mut_instance.write_msgpack::(None).await?; // (ready) - continue; - }; - // Get virtual file - let Ok(vf) = vault.virtual_file(&mapping.id) else { - mut_instance.write_msgpack::(None).await?; // (ready) - continue; - }; - // Read metadata and get real path - let vf_meta = &vf.read_meta().await?; - let real_path = vault.virtual_file_real_path(&mapping.id, &vf_meta.version_latest()); - let version = vf_meta.version_latest(); - mut_instance - .write_msgpack::(Some(( - version.clone(), - vf_meta.version_description(version).cloned().unwrap_or( - VirtualFileVersionDescription { - creator: MemberId::default(), - description: "".to_string(), - }, - ), - vf.id(), - ))) - .await?; // (ready) - if mut_instance.write_file(real_path).await.is_err() { - continue; - } else { - success.push(path); - } - } - Ok(SyncTaskResult::Success(success)) -} diff --git a/crates/vcs_actions/src/actions/user_actions.rs b/crates/vcs_actions/src/actions/user_actions.rs deleted file mode 100644 index dc0f71a..0000000 --- a/crates/vcs_actions/src/actions/user_actions.rs +++ /dev/null @@ -1,144 +0,0 @@ -use std::path::PathBuf; - -use action_system::{action::ActionContext, macros::action_gen}; -use serde::{Deserialize, Serialize}; -use tcp_connection::error::TcpTargetError; -use vcs_data::data::local::vault_modified::sign_vault_modified; - -use crate::actions::{ - auth_member, check_connection_instance, get_current_sheet_name, try_get_vault, -}; - -#[derive(Serialize, Deserialize)] -pub enum ChangeVirtualFileEditRightResult { - // Success - Success { - success_hold: Vec, - success_throw: Vec, - }, - - // Fail - AuthorizeFailed(String), - DoNothing, -} - -#[derive(Serialize, Deserialize, PartialEq, Clone)] -pub enum EditRightChangeBehaviour { - Hold, - Throw, -} - -/// The server part only checks: -/// 1. Whether the file exists -/// 2. Whether the file has no holder -/// If both conditions are met, send success information to the local client -/// -/// All version checks are handled locally -#[action_gen] -pub async fn change_virtual_file_edit_right_action( - ctx: ActionContext, - arguments: (Vec<(PathBuf, EditRightChangeBehaviour)>, bool), -) -> Result { - let instance = check_connection_instance(&ctx)?; - let (relative_paths, print_info) = arguments; - - // Auth Member - let (member_id, is_host_mode) = match auth_member(&ctx, instance).await { - Ok(id) => id, - Err(e) => { - return Ok(ChangeVirtualFileEditRightResult::AuthorizeFailed( - e.to_string(), - )); - } - }; - - // Check sheet - let (sheet_name, _is_ref_sheet) = - get_current_sheet_name(&ctx, instance, &member_id, true).await?; - - if ctx.is_proc_on_remote() { - let mut mut_instance = instance.lock().await; - let mut success_hold: Vec = Vec::new(); - let mut success_throw: Vec = Vec::new(); - let vault = try_get_vault(&ctx)?; - for (path, behaviour) in relative_paths { - let Ok(sheet) = vault.sheet(&sheet_name).await else { - continue; - }; - let Some(mapping) = sheet.mapping().get(&path) else { - continue; - }; - let Ok(has_edit_right) = vault - .has_virtual_file_edit_right(&member_id, &mapping.id) - .await - else { - continue; - }; - - // Hold file - if !has_edit_right && behaviour == EditRightChangeBehaviour::Hold { - match vault - .grant_virtual_file_edit_right(&member_id, &mapping.id) - .await - { - Ok(_) => { - success_hold.push(path.clone()); - } - Err(_) => continue, - } - } else - // Throw file - if (has_edit_right || is_host_mode) - && behaviour == EditRightChangeBehaviour::Throw - { - match vault.revoke_virtual_file_edit_right(&mapping.id).await { - Ok(_) => { - success_throw.push(path.clone()); - } - Err(_) => continue, - } - } - } - - // Write success list - mut_instance - .write_large_msgpack::<(Vec, Vec)>( - (success_hold.clone(), success_throw.clone()), - 4096u16, - ) - .await?; - return Ok(ChangeVirtualFileEditRightResult::Success { - success_hold, - success_throw, - }); - } - - if ctx.is_proc_on_local() { - let mut mut_instance = instance.lock().await; - let (success_hold, success_throw) = mut_instance - .read_large_msgpack::<(Vec, Vec)>(4096u16) - .await?; - - // If there are any successful items, mark as modified - if success_hold.len() + success_throw.len() > 0 { - sign_vault_modified(true).await; - } - - // Print info - if print_info { - success_hold - .iter() - .for_each(|s| println!("--> {}", s.display())); - success_throw - .iter() - .for_each(|s| println!("<-- {}", s.display())); - } - - return Ok(ChangeVirtualFileEditRightResult::Success { - success_hold, - success_throw, - }); - } - - Ok(ChangeVirtualFileEditRightResult::DoNothing) -} diff --git a/crates/vcs_actions/src/actions/vault_actions.rs b/crates/vcs_actions/src/actions/vault_actions.rs deleted file mode 100644 index 8b13789..0000000 --- a/crates/vcs_actions/src/actions/vault_actions.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/crates/vcs_actions/src/connection.rs b/crates/vcs_actions/src/connection.rs deleted file mode 100644 index 918f93c..0000000 --- a/crates/vcs_actions/src/connection.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub mod action_service; -pub mod error; -pub mod protocol; diff --git a/crates/vcs_actions/src/connection/action_service.rs b/crates/vcs_actions/src/connection/action_service.rs deleted file mode 100644 index f137126..0000000 --- a/crates/vcs_actions/src/connection/action_service.rs +++ /dev/null @@ -1,221 +0,0 @@ -use std::{ - env::set_current_dir, - net::SocketAddr, - path::PathBuf, - sync::Arc, - time::{Duration, Instant}, -}; - -use action_system::{action::ActionContext, action_pool::ActionPool}; -use cfg_file::config::ConfigFile; -use log::{debug, error, info, warn}; -use tcp_connection::{error::TcpTargetError, instance::ConnectionInstance}; -use tokio::{ - net::{TcpListener, TcpStream}, - select, signal, spawn, - sync::mpsc, -}; -use vcs_data::data::vault::{Vault, config::VaultConfig}; - -use crate::{ - connection::protocol::RemoteActionInvoke, registry::server_registry::server_action_pool, -}; - -// Start the server with a Vault using the specified directory -pub async fn server_entry( - vault_path: impl Into, - port_override: u16, -) -> Result<(), TcpTargetError> { - let vault_path = vault_path.into(); - - // Set to vault path - set_current_dir(&vault_path).map_err(|e| TcpTargetError::Io(e.to_string()))?; - - // Read the vault cfg - let vault_cfg = VaultConfig::read().await?; - - // Create TCPListener - let listener = create_tcp_listener(&vault_cfg, port_override).await?; - - // Initialize the vault - let vault: Arc = init_vault(vault_cfg, vault_path).await?; - - // Lock the vault - vault - .lock() - .map_err(|e| TcpTargetError::Locked(e.to_string()))?; - - // Create ActionPool - let action_pool: Arc = Arc::new(server_action_pool()); - - // Start the server - let (_shutdown_rx, future) = build_server_future(vault.clone(), action_pool.clone(), listener); - future.await?; // Start and block until shutdown - - // Unlock the vault - vault.unlock()?; - - Ok(()) -} - -async fn create_tcp_listener( - cfg: &VaultConfig, - port_override: u16, -) -> Result { - let local_bind_addr = cfg.server_config().local_bind(); - let port = if port_override > 0 { - port_override // Override -> PORT > 0 - } else { - cfg.server_config().port() // Default -> Port = 0 - }; - let bind_port = port; - let sock_addr = SocketAddr::new(*local_bind_addr, bind_port); - let listener = TcpListener::bind(sock_addr).await?; - - Ok(listener) -} - -async fn init_vault(cfg: VaultConfig, path: PathBuf) -> Result, TcpTargetError> { - // Init and create the vault - let Some(vault) = Vault::init(cfg, path) else { - return Err(TcpTargetError::NotFound("Vault not found".to_string())); - }; - let vault: Arc = Arc::new(vault); - - Ok(vault) -} - -fn build_server_future( - vault: Arc, - action_pool: Arc, - listener: TcpListener, -) -> ( - mpsc::Sender<()>, - impl std::future::Future>, -) { - let (tx, mut rx) = mpsc::channel::(100); - let (shutdown_tx, mut shutdown_rx) = mpsc::channel::<()>(1); - let mut active_connections = 0; - let mut shutdown_requested = false; - - // Spawn task to handle Ctrl+C with rapid exit detection - let shutdown_tx_clone = shutdown_tx.clone(); - spawn(async move { - let mut ctrl_c_count = 0; - let mut last_ctrl_c_time = Instant::now(); - - while let Ok(()) = signal::ctrl_c().await { - let now = Instant::now(); - - // Reset counter if more than 5 seconds have passed - if now.duration_since(last_ctrl_c_time) > Duration::from_secs(5) { - ctrl_c_count = 0; - } - - ctrl_c_count += 1; - last_ctrl_c_time = now; - - let _ = shutdown_tx_clone.send(()).await; - - // If 3 Ctrl+C within 5 seconds, exit immediately - if ctrl_c_count >= 3 { - info!("Shutdown. (3/3)"); - std::process::exit(0); - } else { - info!("Ctrl + C to force shutdown. ({} / 3)", ctrl_c_count); - } - } - }); - - let future = async move { - loop { - select! { - // Accept new connections - accept_result = listener.accept(), if !shutdown_requested => { - match accept_result { - Ok((stream, _addr)) => { - debug!("New connection. (now {})", active_connections); - let _ = tx.send(1).await; - - let vault_clone = vault.clone(); - let action_pool_clone = action_pool.clone(); - let tx_clone = tx.clone(); - - spawn(async move { - process_connection(stream, vault_clone, action_pool_clone).await; - debug!("A connection closed. (now {})", active_connections); - let _ = tx_clone.send(-1).await; - }); - } - Err(_) => { - continue; - } - } - } - - // Handle connection count updates - Some(count_change) = rx.recv() => { - active_connections = (active_connections as i32 + count_change) as usize; - - // Check if we should shutdown after all connections are done - if shutdown_requested && active_connections == 0 { - break; - } - } - - // Handle shutdown signal - _ = shutdown_rx.recv() => { - shutdown_requested = true; - // If no active connections, break immediately - if active_connections == 0 { - info!("No active connections. Shutting down."); - break; - } else { - warn!("Cannot shutdown while active connections exist! ({} active)", active_connections); - } - } - } - } - - Ok(()) - }; - - (shutdown_tx, future) -} - -async fn process_connection(stream: TcpStream, vault: Arc, action_pool: Arc) { - // Setup connection instance - let mut instance = ConnectionInstance::from(stream); - - // Read action name and action arguments - let msg = match instance.read_msgpack::().await { - Ok(msg) => msg, - Err(e) => { - error!("Failed to read action message: {}", e); - return; - } - }; - - // Build context - let ctx: ActionContext = ActionContext::remote().insert_instance(instance); - - // Insert vault into context - let ctx = ctx.with_arc_data(vault); - - info!( - "Process action `{}` with argument `{}`", - msg.action_name, msg.action_args_json - ); - - // Process action - let result = action_pool - .process_json(&msg.action_name, ctx, msg.action_args_json) - .await; - - match result { - Ok(_result_json) => {} - Err(e) => { - warn!("Failed to process action `{}`: {}", msg.action_name, e); - } - } -} diff --git a/crates/vcs_actions/src/connection/error.rs b/crates/vcs_actions/src/connection/error.rs deleted file mode 100644 index 241c16e..0000000 --- a/crates/vcs_actions/src/connection/error.rs +++ /dev/null @@ -1,14 +0,0 @@ -use std::io; -use thiserror::Error; - -#[derive(Error, Debug, Clone)] -pub enum ConnectionError { - #[error("I/O error: {0}")] - Io(String), -} - -impl From for ConnectionError { - fn from(error: io::Error) -> Self { - ConnectionError::Io(error.to_string()) - } -} diff --git a/crates/vcs_actions/src/connection/protocol.rs b/crates/vcs_actions/src/connection/protocol.rs deleted file mode 100644 index 2cebe79..0000000 --- a/crates/vcs_actions/src/connection/protocol.rs +++ /dev/null @@ -1,7 +0,0 @@ -use serde::{Deserialize, Serialize}; - -#[derive(Default, Clone, Serialize, Deserialize)] -pub struct RemoteActionInvoke { - pub action_name: String, - pub action_args_json: String, -} diff --git a/crates/vcs_actions/src/lib.rs b/crates/vcs_actions/src/lib.rs deleted file mode 100644 index 2f7cbe4..0000000 --- a/crates/vcs_actions/src/lib.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub mod actions; -pub mod connection; -pub mod registry; diff --git a/crates/vcs_actions/src/registry.rs b/crates/vcs_actions/src/registry.rs deleted file mode 100644 index ceec1a1..0000000 --- a/crates/vcs_actions/src/registry.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod client_registry; -pub mod server_registry; diff --git a/crates/vcs_actions/src/registry/client_registry.rs b/crates/vcs_actions/src/registry/client_registry.rs deleted file mode 100644 index 05cb7f1..0000000 --- a/crates/vcs_actions/src/registry/client_registry.rs +++ /dev/null @@ -1,123 +0,0 @@ -use std::sync::Arc; - -use action_system::{action::ActionContext, action_pool::ActionPool}; -use cfg_file::config::ConfigFile; -use tcp_connection::error::TcpTargetError; -use vcs_data::data::{ - local::{LocalWorkspace, config::LocalConfig}, - user::UserDirectory, -}; - -use crate::{ - actions::{ - local_actions::{ - register_set_upstream_vault_action, register_update_to_latest_info_action, - }, - sheet_actions::{ - register_drop_sheet_action, register_edit_mapping_action, register_make_sheet_action, - register_merge_share_mapping_action, register_share_mapping_action, - }, - track_action::register_track_file_action, - user_actions::register_change_virtual_file_edit_right_action, - }, - connection::protocol::RemoteActionInvoke, -}; - -fn register_actions(pool: &mut ActionPool) { - // Pool register here - - // Local Actions - register_set_upstream_vault_action(pool); - register_update_to_latest_info_action(pool); - - // Sheet Actions - register_make_sheet_action(pool); - register_drop_sheet_action(pool); - register_edit_mapping_action(pool); - - // Share / Merge Share Actions - register_share_mapping_action(pool); - register_merge_share_mapping_action(pool); - - // Track Action - register_track_file_action(pool); - - // User Actions - register_change_virtual_file_edit_right_action(pool); -} - -pub fn client_action_pool() -> ActionPool { - // Create pool - let mut pool = ActionPool::new(); - - // Register actions - register_actions(&mut pool); - - // Add process events - pool.set_on_proc_begin(|ctx, args| Box::pin(on_proc_begin(ctx, args))); - - // Return - pool -} - -async fn on_proc_begin( - ctx: &mut ActionContext, - _args: &(dyn std::any::Any + Send + Sync), -) -> Result<(), TcpTargetError> { - // Is ctx remote - let is_remote = ctx.is_remote_action(); - - // Action name and arguments - let action_name = ctx.action_name().to_string(); - let action_args_json = ctx.action_args_json().clone(); - - // Insert LocalWorkspace Arc - let Ok(local_config) = LocalConfig::read().await else { - return Err(TcpTargetError::NotFound( - "The current directory does not have a local workspace".to_string(), - )); - }; - let local_workspace = match LocalWorkspace::init_current_dir(local_config) { - Some(workspace) => workspace, - None => { - return Err(TcpTargetError::NotFound( - "Failed to initialize local workspace.".to_string(), - )); - } - }; - let local_workspace_arc = Arc::new(local_workspace); - ctx.insert_arc_data(local_workspace_arc); - - // Insert UserDirectory Arc - let Some(user_directory) = UserDirectory::current_cfg_dir() else { - return Err(TcpTargetError::NotFound( - "The user directory does not exist.".to_string(), - )); - }; - - let user_directory_arc = Arc::new(user_directory); - ctx.insert_arc_data(user_directory_arc); - - // Get instance - let Some(instance) = ctx.instance() else { - return Err(TcpTargetError::Unsupported( - "Missing ConnectionInstance in current context, this ActionPool does not support this call" - .to_string())); - }; - - // If it's remote, invoke action at server - if is_remote { - // Build protocol message - let msg = RemoteActionInvoke { - action_name, - action_args_json, - }; - - // Send - let mut instance = instance.lock().await; - instance.write_msgpack(&msg).await?; - } - - // Return OK, wait for client to execute Action locally - Ok(()) -} diff --git a/crates/vcs_actions/src/registry/server_registry.rs b/crates/vcs_actions/src/registry/server_registry.rs deleted file mode 100644 index 356e640..0000000 --- a/crates/vcs_actions/src/registry/server_registry.rs +++ /dev/null @@ -1,36 +0,0 @@ -use action_system::action_pool::ActionPool; - -use crate::actions::{ - local_actions::{register_set_upstream_vault_action, register_update_to_latest_info_action}, - sheet_actions::{ - register_drop_sheet_action, register_edit_mapping_action, register_make_sheet_action, - register_merge_share_mapping_action, register_share_mapping_action, - }, - track_action::register_track_file_action, - user_actions::register_change_virtual_file_edit_right_action, -}; - -pub fn server_action_pool() -> ActionPool { - let mut pool = ActionPool::new(); - - // Local Actions - register_set_upstream_vault_action(&mut pool); - register_update_to_latest_info_action(&mut pool); - - // Sheet Actions - register_make_sheet_action(&mut pool); - register_drop_sheet_action(&mut pool); - register_edit_mapping_action(&mut pool); - - // Share / Merge Share Actions - register_share_mapping_action(&mut pool); - register_merge_share_mapping_action(&mut pool); - - // Track Action - register_track_file_action(&mut pool); - - // User Actions - register_change_virtual_file_edit_right_action(&mut pool); - - pool -} diff --git a/crates/vcs_data/Cargo.toml b/crates/vcs_data/Cargo.toml deleted file mode 100644 index 1971b6a..0000000 --- a/crates/vcs_data/Cargo.toml +++ /dev/null @@ -1,40 +0,0 @@ -[package] -name = "vcs_data" -edition = "2024" -version.workspace = true - -[dependencies] - -# Utils -cfg_file = { path = "../utils/cfg_file", features = ["default"] } -data_struct = { path = "../utils/data_struct" } -sha1_hash = { path = "../utils/sha1_hash" } -tcp_connection = { path = "../utils/tcp_connection" } -string_proc = { path = "../utils/string_proc" } - -# Core -action_system = { path = "../system_action" } -vcs_docs = { path = "../vcs_docs" } - -# Random -rand = "0.9.2" - -# Identity -uuid = { version = "1.18.1", features = ["v4", "serde"] } -whoami = "1.6.1" - -# Serialization -serde = { version = "1.0.228", features = ["derive"] } - -# Async & Networking -tokio = { version = "1.48.0", features = ["full"] } - -# Filesystem -dirs = "6.0.0" -walkdir = "2.5.0" - -# Time -chrono = "0.4.42" - -# Windows API -winapi = { version = "0.3.9", features = ["fileapi", "winbase", "winnt"] } diff --git a/crates/vcs_data/src/constants.rs b/crates/vcs_data/src/constants.rs deleted file mode 100644 index 3d839a6..0000000 --- a/crates/vcs_data/src/constants.rs +++ /dev/null @@ -1,118 +0,0 @@ -// ------------------------------------------------------------------------------------- - -// Project -pub const PATH_TEMP: &str = "./.temp/"; - -// Default Port -pub const PORT: u16 = 25331; - -// Vault Host Name -pub const VAULT_HOST_NAME: &str = "host"; - -// ------------------------------------------------------------------------------------- - -// Suffix -pub const SERVER_SUFFIX_SHEET_FILE: &str = ".st"; -pub const SERVER_SUFFIX_SHEET_FILE_NO_DOT: &str = "st"; - -pub const SERVER_SUFFIX_SHEET_SHARE_FILE: &str = ".sre"; -pub const SERVER_SUFFIX_SHEET_SHARE_FILE_NO_DOT: &str = "sre"; - -pub const SERVER_SUFFIX_MEMBER_INFO: &str = ".json"; -pub const SERVER_SUFFIX_MEMBER_INFO_NO_DOT: &str = "json"; - -pub const SERVER_SUFFIX_VF_META: &str = ".vf"; -pub const SERVER_SUFFIX_VF_META_NO_DOT: &str = "vf"; - -pub const CLIENT_SUFFIX_LATEST_INFO: &str = ".up"; -pub const CLIENT_SUFFIX_LATEST_INFO_NO_DOT: &str = "up"; - -pub const CLIENT_SUFFIX_LATEST_DATA: &str = ".upf"; -pub const CLIENT_SUFFIX_LATEST_DATA_NO_DOT: &str = "upf"; - -pub const CLIENT_SUFFIX_LOCAL_SHEET_FILE: &str = ".lst"; -pub const CLIENT_SUFFIX_LOCAL_SHEET_FILE_NO_DOT: &str = "lst"; - -pub const CLIENT_SUFFIX_CACHED_SHEET_FILE: &str = ".st"; -pub const CLIENT_SUFFIX_CACHED_SHEET_FILE_NO_DOT: &str = "st"; - -// ------------------------------------------------------------------------------------- - -// Server -// Server - Vault (Main) -pub const SERVER_FILE_VAULT: &str = "./vault.toml"; - -// Server - Sheets -pub const REF_SHEET_NAME: &str = "ref"; -pub const SERVER_PATH_SHEETS: &str = "./sheets/"; -pub const SERVER_PATH_SHARES: &str = "./sheets/shares/{sheet_name}/"; -pub const SERVER_FILE_SHEET: &str = "./sheets/{sheet_name}.st"; -pub const SERVER_FILE_SHEET_SHARE: &str = "./sheets/shares/{sheet_name}/{share_id}.sre"; - -// Server - Members -pub const SERVER_PATH_MEMBERS: &str = "./members/"; -pub const SERVER_PATH_MEMBER_PUB: &str = "./key/"; -pub const SERVER_FILE_MEMBER_INFO: &str = "./members/{member_id}.json"; -pub const SERVER_FILE_MEMBER_PUB: &str = "./key/{member_id}.pem"; - -// Server - Virtual File Storage -pub const SERVER_PATH_VF_TEMP: &str = "./.temp/{temp_name}"; -pub const SERVER_PATH_VF_ROOT: &str = "./storage/"; -pub const SERVER_PATH_VF_STORAGE: &str = "./storage/{vf_index}/{vf_id}/"; -pub const SERVER_FILE_VF_VERSION_INSTANCE: &str = "./storage/{vf_index}/{vf_id}/{vf_version}.rf"; -pub const SERVER_FILE_VF_META: &str = "./storage/{vf_index}/{vf_id}/meta.vf"; -pub const SERVER_NAME_VF_META: &str = "meta.vf"; - -// Server - Updates -pub const SERVER_FILE_UPDATES: &str = "./.updates.txt"; - -// Server - Service -pub const SERVER_FILE_LOCKFILE: &str = "./.lock"; - -// Server - Documents -pub const SERVER_FILE_README: &str = "./README.md"; - -// ------------------------------------------------------------------------------------- - -// Client -pub const CLIENT_PATH_WORKSPACE_ROOT: &str = "./.jv/"; -pub const CLIENT_FOLDER_WORKSPACE_ROOT_NAME: &str = ".jv"; - -// Client - Workspace (Main) -pub const CLIENT_FILE_WORKSPACE: &str = "./.jv/workspace.toml"; - -// Client - Latest Information -pub const CLIENT_FILE_LATEST_INFO: &str = "./.jv/latest/{account}.up"; -pub const CLIENT_FILE_LATEST_DATA: &str = "./.jv/latest/{account}.upf"; - -// Client - Local -pub const CLIENT_PATH_LOCAL_DRAFT: &str = "./.jv/drafts/{account}/{sheet_name}/"; -pub const CLIENT_PATH_LOCAL_SHEET: &str = "./.jv/sheets/local/"; -pub const CLIENT_FILE_LOCAL_SHEET: &str = "./.jv/sheets/local/{account}/{sheet_name}.lst"; -pub const CLIENT_PATH_CACHED_SHEET: &str = "./.jv/sheets/cached/"; -pub const CLIENT_FILE_CACHED_SHEET: &str = "./.jv/sheets/cached/{sheet_name}.st"; - -pub const CLIENT_FILE_LOCAL_SHEET_NOSET: &str = "./.jv/.temp/wrong.json"; -pub const CLIENT_FILE_MEMBER_HELD_NOSET: &str = "./.jv/.temp/wrong.json"; -pub const CLIENT_FILE_LATEST_INFO_NOSET: &str = "./.jv/.temp/wrong.json"; - -// Client - Other -pub const CLIENT_FILE_IGNOREFILES: &str = "IGNORE_RULES.toml"; -pub const CLIENT_FILE_TODOLIST: &str = "./SETUP.md"; -pub const CLIENT_FILE_GITIGNORE: &str = "./.jv/.gitignore"; -pub const CLIENT_CONTENT_GITIGNORE: &str = "# Git support for JVCS Workspace - -# Ignore cached datas -/sheets/cached/ -/latest/ - -.vault_modified"; -pub const CLIENT_FILE_VAULT_MODIFIED: &str = "./.jv/.vault_modified"; -pub const CLIENT_FILE_TEMP_FILE: &str = "./.jv/.temp/download/{temp_name}"; - -// ------------------------------------------------------------------------------------- - -// User - Verify (Documents path) -pub const USER_FILE_ACCOUNTS: &str = "./accounts/"; -pub const USER_FILE_KEY: &str = "./accounts/{self_id}_private.pem"; -pub const USER_FILE_MEMBER: &str = "./accounts/{self_id}.toml"; diff --git a/crates/vcs_data/src/current.rs b/crates/vcs_data/src/current.rs deleted file mode 100644 index 209c0cc..0000000 --- a/crates/vcs_data/src/current.rs +++ /dev/null @@ -1,84 +0,0 @@ -use crate::constants::*; -use std::io::{self, Error}; -use std::{env::set_current_dir, path::PathBuf}; - -/// Find the nearest vault or local workspace and correct the `current_dir` to it -pub fn correct_current_dir() -> Result<(), io::Error> { - if let Some(local_workspace) = current_local_path() { - set_current_dir(local_workspace)?; - return Ok(()); - } - if let Some(vault) = current_vault_path() { - set_current_dir(vault)?; - return Ok(()); - } - Err(Error::new( - io::ErrorKind::NotFound, - "Could not find any vault or local workspace!", - )) -} - -/// Get the nearest Vault directory from `current_dir` -pub fn current_vault_path() -> Option { - let current_dir = std::env::current_dir().ok()?; - find_vault_path(current_dir) -} - -/// Get the nearest local workspace from `current_dir` -pub fn current_local_path() -> Option { - let current_dir = std::env::current_dir().ok()?; - find_local_path(current_dir) -} - -/// Get the nearest Vault directory from the specified path -pub fn find_vault_path(path: impl Into) -> Option { - let mut current_path = path.into(); - let vault_file = SERVER_FILE_VAULT; - - loop { - let vault_toml_path = current_path.join(vault_file); - if vault_toml_path.exists() { - return Some(current_path); - } - - if let Some(parent) = current_path.parent() { - current_path = parent.to_path_buf(); - } else { - break; - } - } - - None -} - -/// Get the nearest local workspace from the specified path -pub fn find_local_path(path: impl Into) -> Option { - let mut current_path = path.into(); - let workspace_dir = CLIENT_PATH_WORKSPACE_ROOT; - - loop { - let jvc_path = current_path.join(workspace_dir); - if jvc_path.exists() { - return Some(current_path); - } - - if let Some(parent) = current_path.parent() { - current_path = parent.to_path_buf(); - } else { - break; - } - } - - None -} - -/// Get the system's document directory and join with the appropriate application name -pub fn current_cfg_dir() -> Option { - dirs::config_local_dir().map(|path| { - if cfg!(target_os = "linux") { - path.join("jvcs") - } else { - path.join("JustEnoughVCS") - } - }) -} diff --git a/crates/vcs_data/src/data.rs b/crates/vcs_data/src/data.rs deleted file mode 100644 index ed9383a..0000000 --- a/crates/vcs_data/src/data.rs +++ /dev/null @@ -1,5 +0,0 @@ -pub mod local; -pub mod member; -pub mod sheet; -pub mod user; -pub mod vault; diff --git a/crates/vcs_data/src/data/local.rs b/crates/vcs_data/src/data/local.rs deleted file mode 100644 index 67f3943..0000000 --- a/crates/vcs_data/src/data/local.rs +++ /dev/null @@ -1,269 +0,0 @@ -use std::{ - collections::HashMap, - env::current_dir, - path::{Path, PathBuf}, - sync::Arc, -}; - -use cfg_file::config::ConfigFile; -use string_proc::format_path::format_path; -use tokio::{fs, sync::Mutex}; -use vcs_docs::docs::READMES_LOCAL_WORKSPACE_TODOLIST; - -use crate::{ - constants::{ - CLIENT_CONTENT_GITIGNORE, CLIENT_FILE_GITIGNORE, CLIENT_FILE_LOCAL_SHEET, - CLIENT_FILE_TODOLIST, CLIENT_FILE_WORKSPACE, CLIENT_FOLDER_WORKSPACE_ROOT_NAME, - CLIENT_PATH_LOCAL_SHEET, CLIENT_SUFFIX_LOCAL_SHEET_FILE, - }, - current::{current_local_path, find_local_path}, - data::{ - local::{ - config::LocalConfig, - local_sheet::{LocalSheet, LocalSheetData, LocalSheetPathBuf}, - }, - member::MemberId, - sheet::SheetName, - }, -}; - -pub mod align; -pub mod cached_sheet; -pub mod config; -pub mod latest_file_data; -pub mod latest_info; -pub mod local_files; -pub mod local_sheet; -pub mod vault_modified; -pub mod workspace_analyzer; - -const SHEET_NAME: &str = "{sheet_name}"; -const ACCOUNT_NAME: &str = "{account}"; - -pub struct LocalWorkspace { - config: Arc>, - local_path: PathBuf, -} - -impl LocalWorkspace { - /// Get the path of the local workspace. - pub fn local_path(&self) -> &PathBuf { - &self.local_path - } - - /// Initialize local workspace. - pub fn init(config: LocalConfig, local_path: impl Into) -> Option { - let local_path = find_local_path(local_path)?; - Some(Self { - config: Arc::new(Mutex::new(config)), - local_path, - }) - } - - /// Initialize local workspace in the current directory. - pub fn init_current_dir(config: LocalConfig) -> Option { - let local_path = current_local_path()?; - Some(Self { - config: Arc::new(Mutex::new(config)), - local_path, - }) - } - - /// Setup local workspace - pub async fn setup_local_workspace( - local_path: impl Into, - ) -> Result<(), std::io::Error> { - let local_path: PathBuf = local_path.into(); - - // Ensure directory is empty - if local_path.exists() && local_path.read_dir()?.next().is_some() { - return Err(std::io::Error::new( - std::io::ErrorKind::DirectoryNotEmpty, - "DirectoryNotEmpty", - )); - } - - // 1. Setup config - let config = LocalConfig::default(); - LocalConfig::write_to(&config, local_path.join(CLIENT_FILE_WORKSPACE)).await?; - - // 2. Setup SETUP.md - let readme_content = READMES_LOCAL_WORKSPACE_TODOLIST.trim().to_string(); - fs::write(local_path.join(CLIENT_FILE_TODOLIST), readme_content).await?; - - // 3. Setup .gitignore - fs::write( - local_path.join(CLIENT_FILE_GITIGNORE), - CLIENT_CONTENT_GITIGNORE, - ) - .await?; - - // On Windows, set the .jv directory as hidden - let jv_dir = local_path.join(CLIENT_FOLDER_WORKSPACE_ROOT_NAME); - let _ = hide_folder::hide_folder(&jv_dir); - - Ok(()) - } - - /// Get a reference to the local configuration. - pub fn config(&self) -> Arc> { - self.config.clone() - } - - /// Setup local workspace in current directory - pub async fn setup_local_workspace_current_dir() -> Result<(), std::io::Error> { - Self::setup_local_workspace(current_dir()?).await?; - Ok(()) - } - - /// Get the path to a local sheet. - pub fn local_sheet_path(&self, member: &MemberId, sheet: &SheetName) -> PathBuf { - self.local_path.join( - CLIENT_FILE_LOCAL_SHEET - .replace(ACCOUNT_NAME, member) - .replace(SHEET_NAME, sheet), - ) - } - - /// Read or initialize a local sheet. - pub async fn local_sheet( - &self, - member: &MemberId, - sheet: &SheetName, - ) -> Result, std::io::Error> { - let local_sheet_path = self.local_sheet_path(member, sheet); - - if !local_sheet_path.exists() { - let sheet_data = LocalSheetData { - mapping: HashMap::new(), - vfs: HashMap::new(), - }; - LocalSheetData::write_to(&sheet_data, local_sheet_path).await?; - return Ok(LocalSheet { - local_workspace: self, - member: member.clone(), - sheet_name: sheet.clone(), - data: sheet_data, - }); - } - - let data = LocalSheetData::read_from(&local_sheet_path).await?; - let local_sheet = LocalSheet { - local_workspace: self, - member: member.clone(), - sheet_name: sheet.clone(), - data, - }; - - Ok(local_sheet) - } - - /// Collect all theet names - pub async fn local_sheet_paths(&self) -> Result, std::io::Error> { - let local_sheet_path = self.local_path.join(CLIENT_PATH_LOCAL_SHEET); - let mut sheet_paths = Vec::new(); - - async fn collect_sheet_paths( - dir: &Path, - suffix: &str, - paths: &mut Vec, - ) -> Result<(), std::io::Error> { - if dir.is_dir() { - let mut entries = fs::read_dir(dir).await?; - while let Some(entry) = entries.next_entry().await? { - let path = entry.path(); - - if path.is_dir() { - Box::pin(collect_sheet_paths(&path, suffix, paths)).await?; - } else if path.is_file() - && let Some(extension) = path.extension() - && extension == suffix.trim_start_matches('.') - { - let formatted_path = format_path(path)?; - paths.push(formatted_path); - } - } - } - Ok(()) - } - - collect_sheet_paths( - &local_sheet_path, - CLIENT_SUFFIX_LOCAL_SHEET_FILE, - &mut sheet_paths, - ) - .await?; - Ok(sheet_paths) - } -} - -mod hide_folder { - use std::io; - use std::path::Path; - - #[cfg(windows)] - use std::os::windows::ffi::OsStrExt; - #[cfg(windows)] - use winapi::um::fileapi::{GetFileAttributesW, INVALID_FILE_ATTRIBUTES, SetFileAttributesW}; - - pub fn hide_folder(path: &Path) -> io::Result<()> { - if !path.is_dir() { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "Path must be a directory", - )); - } - - if let Some(file_name) = path.file_name().and_then(|n| n.to_str()) { - if !file_name.starts_with('.') { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "Directory name must start with '.'", - )); - } - } else { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - "Invalid directory name", - )); - } - - hide_folder_impl(path) - } - - #[cfg(windows)] - fn hide_folder_impl(path: &Path) -> io::Result<()> { - // Convert to Windows wide string format - let path_str: Vec = path.as_os_str().encode_wide().chain(Some(0)).collect(); - - // Get current attributes - let attrs = unsafe { GetFileAttributesW(path_str.as_ptr()) }; - if attrs == INVALID_FILE_ATTRIBUTES { - return Err(io::Error::last_os_error()); - } - - // Add hidden attribute flag - let new_attrs = attrs | winapi::um::winnt::FILE_ATTRIBUTE_HIDDEN; - - // Set new attributes - let success = unsafe { SetFileAttributesW(path_str.as_ptr(), new_attrs) }; - if success == 0 { - return Err(io::Error::last_os_error()); - } - - Ok(()) - } - - #[cfg(unix)] - fn hide_folder_impl(_path: &Path) -> io::Result<()> { - Ok(()) - } - - #[cfg(not(any(windows, unix)))] - fn hide_folder_impl(_path: &Path) -> io::Result<()> { - Err(io::Error::new( - io::ErrorKind::Unsupported, - "Unsupported operating system", - )) - } -} diff --git a/crates/vcs_data/src/data/local/align.rs b/crates/vcs_data/src/data/local/align.rs deleted file mode 100644 index b72804c..0000000 --- a/crates/vcs_data/src/data/local/align.rs +++ /dev/null @@ -1,110 +0,0 @@ -use std::{ - collections::{HashMap, HashSet}, - path::PathBuf, -}; - -use data_struct::data_sort::quick_sort_with_cmp; - -use crate::data::local::workspace_analyzer::AnalyzeResult; - -pub type AlignTaskName = String; -pub type AlignPathBuf = PathBuf; -pub type AlignLostPathBuf = PathBuf; -pub type AlignCreatedPathBuf = PathBuf; - -pub struct AlignTasks { - pub created: Vec<(AlignTaskName, AlignPathBuf)>, - pub lost: Vec<(AlignTaskName, AlignPathBuf)>, - pub moved: Vec<(AlignTaskName, (AlignLostPathBuf, AlignCreatedPathBuf))>, - pub erased: Vec<(AlignTaskName, AlignPathBuf)>, -} - -impl AlignTasks { - pub fn clone_from_analyze_result(result: &AnalyzeResult) -> Self { - AlignTasks { - created: path_hash_set_sort_helper(result.created.clone(), "created"), - lost: path_hash_set_sort_helper(result.lost.clone(), "lost"), - moved: path_hash_map_sort_helper(result.moved.clone(), "moved"), - erased: path_hash_set_sort_helper(result.erased.clone(), "erased"), - } - } - - pub fn from_analyze_result(result: AnalyzeResult) -> Self { - AlignTasks { - created: path_hash_set_sort_helper(result.created, "created"), - lost: path_hash_set_sort_helper(result.lost, "lost"), - moved: path_hash_map_sort_helper(result.moved, "moved"), - erased: path_hash_set_sort_helper(result.erased, "erased"), - } - } -} - -fn path_hash_set_sort_helper( - hash_set: HashSet, - prefix: impl Into, -) -> Vec<(String, PathBuf)> { - let prefix_str = prefix.into(); - let mut vec: Vec<(String, PathBuf)> = hash_set - .into_iter() - .map(|path| { - let hash = sha1_hash::calc_sha1_string(path.to_string_lossy()); - let hash_prefix: String = hash.chars().take(8).collect(); - let name = format!("{}:{}", prefix_str, hash_prefix); - (name, path) - }) - .collect(); - - quick_sort_with_cmp(&mut vec, false, |a, b| { - // Compare by path depth first - let a_depth = a.1.components().count(); - let b_depth = b.1.components().count(); - - if a_depth != b_depth { - return if a_depth < b_depth { -1 } else { 1 }; - } - - // If same depth, compare lexicographically - match a.1.cmp(&b.1) { - std::cmp::Ordering::Less => -1, - std::cmp::Ordering::Equal => 0, - std::cmp::Ordering::Greater => 1, - } - }); - - vec -} - -fn path_hash_map_sort_helper( - hash_map: HashMap, - prefix: impl Into, -) -> Vec<(String, (PathBuf, PathBuf))> { - let prefix_str = prefix.into(); - let mut vec: Vec<(String, (PathBuf, PathBuf))> = hash_map - .into_values() - .map(|(path1, path2)| { - let hash = sha1_hash::calc_sha1_string(path1.to_string_lossy()); - let hash_prefix: String = hash.chars().take(8).collect(); - let name = format!("{}:{}", prefix_str, hash_prefix); - (name, (path1, path2)) - }) - .collect(); - - quick_sort_with_cmp(&mut vec, false, |a, b| { - // Compare by first PathBuf's path depth first - let a_depth = a.1.0.components().count(); - let b_depth = b.1.0.components().count(); - - if a_depth != b_depth { - return if a_depth < b_depth { -1 } else { 1 }; - } - - // If same depth, compare lexicographically by first PathBuf - match a.1.0.cmp(&b.1.0) { - std::cmp::Ordering::Less => -1, - std::cmp::Ordering::Equal => 0, - std::cmp::Ordering::Greater => 1, - } - }); - - vec -} diff --git a/crates/vcs_data/src/data/local/cached_sheet.rs b/crates/vcs_data/src/data/local/cached_sheet.rs deleted file mode 100644 index 39f9814..0000000 --- a/crates/vcs_data/src/data/local/cached_sheet.rs +++ /dev/null @@ -1,94 +0,0 @@ -use std::{io::Error, path::PathBuf}; - -use cfg_file::config::ConfigFile; -use string_proc::{format_path::format_path, snake_case}; -use tokio::fs; - -use crate::{ - constants::{ - CLIENT_FILE_CACHED_SHEET, CLIENT_PATH_CACHED_SHEET, CLIENT_SUFFIX_CACHED_SHEET_FILE, - }, - current::current_local_path, - data::sheet::{SheetData, SheetName}, -}; - -pub type CachedSheetPathBuf = PathBuf; - -const SHEET_NAME: &str = "{sheet_name}"; -const ACCOUNT_NAME: &str = "{account}"; - -/// # Cached Sheet -/// The cached sheet is a read-only version cloned from the upstream repository to the local environment, -/// automatically generated during update operations, -/// which records the latest Sheet information stored locally to accelerate data access and reduce network requests. -pub struct CachedSheet; - -impl CachedSheet { - /// Read the cached sheet data. - pub async fn cached_sheet_data(sheet_name: &SheetName) -> Result { - let sheet_name = snake_case!(sheet_name.clone()); - - let Some(path) = Self::cached_sheet_path(sheet_name) else { - return Err(Error::new( - std::io::ErrorKind::NotFound, - "Local workspace not found!", - )); - }; - let data = SheetData::read_from(path).await?; - Ok(data) - } - - /// Get the path to the cached sheet file. - pub fn cached_sheet_path(sheet_name: SheetName) -> Option { - let current_workspace = current_local_path()?; - Some( - current_workspace - .join(CLIENT_FILE_CACHED_SHEET.replace(SHEET_NAME, &sheet_name.to_string())), - ) - } - - /// Get all cached sheet names - pub async fn cached_sheet_names() -> Result, std::io::Error> { - let mut dir = fs::read_dir(CLIENT_PATH_CACHED_SHEET).await?; - let mut sheet_names = Vec::new(); - - while let Some(entry) = dir.next_entry().await? { - let path = entry.path(); - - if path.is_file() - && let Some(file_name) = path.file_name().and_then(|n| n.to_str()) - && file_name.ends_with(CLIENT_SUFFIX_CACHED_SHEET_FILE) { - let name_without_ext = file_name - .trim_end_matches(CLIENT_SUFFIX_CACHED_SHEET_FILE) - .to_string(); - sheet_names.push(name_without_ext); - } - } - - Ok(sheet_names) - } - - /// Get all cached sheet paths - pub async fn cached_sheet_paths() -> Result, std::io::Error> { - let mut dir = fs::read_dir(CLIENT_PATH_CACHED_SHEET).await?; - let mut sheet_paths = Vec::new(); - let Some(workspace_path) = current_local_path() else { - return Err(Error::new( - std::io::ErrorKind::NotFound, - "Local workspace not found!", - )); - }; - - while let Some(entry) = dir.next_entry().await? { - let path = entry.path(); - - if path.is_file() - && let Some(file_name) = path.file_name().and_then(|n| n.to_str()) - && file_name.ends_with(CLIENT_SUFFIX_CACHED_SHEET_FILE) { - sheet_paths.push(format_path(workspace_path.join(path))?); - } - } - - Ok(sheet_paths) - } -} diff --git a/crates/vcs_data/src/data/local/config.rs b/crates/vcs_data/src/data/local/config.rs deleted file mode 100644 index 8a89c20..0000000 --- a/crates/vcs_data/src/data/local/config.rs +++ /dev/null @@ -1,375 +0,0 @@ -use cfg_file::ConfigFile; -use cfg_file::config::ConfigFile; -use serde::{Deserialize, Serialize}; -use std::io::Error; -use std::net::SocketAddr; -use std::path::Path; -use std::path::PathBuf; -use string_proc::snake_case; - -use crate::constants::CLIENT_FILE_WORKSPACE; -use crate::constants::CLIENT_FOLDER_WORKSPACE_ROOT_NAME; -use crate::constants::CLIENT_PATH_LOCAL_DRAFT; -use crate::constants::CLIENT_PATH_WORKSPACE_ROOT; -use crate::constants::PORT; -use crate::current::current_local_path; -use crate::data::local::latest_info::LatestInfo; -use crate::data::member::MemberId; -use crate::data::sheet::SheetName; -use crate::data::vault::config::VaultUuid; - -const ACCOUNT: &str = "{account}"; -const SHEET_NAME: &str = "{sheet_name}"; - -#[derive(Serialize, Deserialize, ConfigFile, Clone)] -#[cfg_file(path = CLIENT_FILE_WORKSPACE)] -pub struct LocalConfig { - /// The upstream address, representing the upstream address of the local workspace, - /// to facilitate timely retrieval of new updates from the upstream source. - #[serde(rename = "addr")] - upstream_addr: SocketAddr, - - /// The member ID used by the current local workspace. - /// This ID will be used to verify access permissions when connecting to the upstream server. - #[serde(rename = "as")] - using_account: MemberId, - - /// Whether the current member is interacting as a host. - /// In host mode, full Vault operation permissions are available except for adding new content. - #[serde(rename = "host")] - using_host_mode: bool, - - /// Whether the local workspace is stained. - /// - /// If stained, it can only set an upstream server with the same identifier. - /// - /// If the value is None, it means not stained; - /// otherwise, it contains the stain identifier (i.e., the upstream vault's unique ID) - #[serde(rename = "up_uid")] - stained_uuid: Option, - - /// The name of the sheet currently in use. - #[serde(rename = "use")] - sheet_in_use: Option, -} - -impl Default for LocalConfig { - fn default() -> Self { - Self { - upstream_addr: SocketAddr::V4(std::net::SocketAddrV4::new( - std::net::Ipv4Addr::new(127, 0, 0, 1), - PORT, - )), - using_account: "unknown".to_string(), - using_host_mode: false, - stained_uuid: None, - sheet_in_use: None, - } - } -} - -impl LocalConfig { - /// Set the vault address. - pub fn set_vault_addr(&mut self, addr: SocketAddr) { - self.upstream_addr = addr; - } - - /// Get the vault address. - pub fn vault_addr(&self) -> SocketAddr { - self.upstream_addr - } - - /// Set the currently used account - pub fn set_current_account(&mut self, account: MemberId) -> Result<(), std::io::Error> { - if self.sheet_in_use().is_some() { - return Err(Error::new( - std::io::ErrorKind::DirectoryNotEmpty, - "Please exit the current sheet before switching accounts", - )); - } - self.using_account = account; - Ok(()) - } - - /// Set the host mode - pub fn set_host_mode(&mut self, host_mode: bool) { - self.using_host_mode = host_mode; - } - - /// Set the currently used sheet - pub async fn use_sheet(&mut self, sheet: SheetName) -> Result<(), std::io::Error> { - let sheet = snake_case!(sheet); - - // Check if the sheet is already in use - if self.sheet_in_use().is_some() { - return Err(std::io::Error::new( - std::io::ErrorKind::AlreadyExists, - "Sheet already in use", - )); - }; - - // Check if the local path exists - let local_path = self.get_local_path().await?; - - // Get latest info - let Ok(latest_info) = LatestInfo::read_from(LatestInfo::latest_info_path( - &local_path, - &self.current_account(), - )) - .await - else { - return Err(std::io::Error::new( - std::io::ErrorKind::NotFound, - "No latest info found", - )); - }; - - // Check if the sheet exists - if !latest_info.visible_sheets.contains(&sheet) { - return Err(std::io::Error::new( - std::io::ErrorKind::NotFound, - "Sheet not found", - )); - } - - // Check if there are any files or folders other than .jv - self.check_local_path_empty(&local_path).await?; - - // Get the draft folder path - let draft_folder = self.draft_folder(&self.using_account, &sheet, &local_path); - - if draft_folder.exists() { - // Exists - // Move the contents of the draft folder to the local path with rollback support - self.move_draft_to_local(&draft_folder, &local_path).await?; - } - - self.sheet_in_use = Some(sheet); - LocalConfig::write(self).await?; - - Ok(()) - } - - /// Exit the currently used sheet - pub async fn exit_sheet(&mut self) -> Result<(), std::io::Error> { - // Check if the sheet is already in use - if self.sheet_in_use().is_none() { - return Ok(()); - } - - // Check if the local path exists - let local_path = self.get_local_path().await?; - - // Get the current sheet name - let sheet_name = self.sheet_in_use().as_ref().unwrap().clone(); - - // Get the draft folder path - let draft_folder = self.draft_folder(&self.using_account, &sheet_name, &local_path); - - // Create the draft folder if it doesn't exist - if !draft_folder.exists() { - std::fs::create_dir_all(&draft_folder).map_err(std::io::Error::other)?; - } - - // Move all files and folders (except .jv folder) to the draft folder with rollback support - self.move_local_to_draft(&local_path, &draft_folder).await?; - - // Clear the sheet in use - self.sheet_in_use = None; - LocalConfig::write(self).await?; - - Ok(()) - } - - /// Get local path or return error - async fn get_local_path(&self) -> Result { - current_local_path().ok_or_else(|| { - std::io::Error::new(std::io::ErrorKind::NotFound, "Fail to get local path") - }) - } - - /// Check if local path is empty (except for .jv folder) - async fn check_local_path_empty(&self, local_path: &Path) -> Result<(), std::io::Error> { - let jv_folder = local_path.join(CLIENT_PATH_WORKSPACE_ROOT); - let mut entries = std::fs::read_dir(local_path).map_err(std::io::Error::other)?; - - if entries.any(|entry| { - if let Ok(entry) = entry { - let path = entry.path(); - path != jv_folder - && path.file_name().and_then(|s| s.to_str()) - != Some(CLIENT_FOLDER_WORKSPACE_ROOT_NAME) - } else { - false - } - }) { - return Err(std::io::Error::new( - std::io::ErrorKind::DirectoryNotEmpty, - "Local path is not empty!", - )); - } - - Ok(()) - } - - /// Move contents from draft folder to local path with rollback support - async fn move_draft_to_local( - &self, - draft_folder: &Path, - local_path: &Path, - ) -> Result<(), std::io::Error> { - let draft_entries: Vec<_> = std::fs::read_dir(draft_folder) - .map_err(std::io::Error::other)? - .collect::, _>>() - .map_err(std::io::Error::other)?; - - let mut moved_items: Vec = Vec::new(); - - for entry in &draft_entries { - let entry_path = entry.path(); - let target_path = local_path.join(entry_path.file_name().unwrap()); - - // Move each file/directory from draft folder to local path - std::fs::rename(&entry_path, &target_path).map_err(|e| { - // Rollback all previously moved items - for moved_item in &moved_items { - let _ = std::fs::rename(&moved_item.target, &moved_item.source); - } - std::io::Error::other(e) - })?; - - moved_items.push(MovedItem { - source: entry_path.clone(), - target: target_path.clone(), - }); - } - - // Remove the now-empty draft folder - std::fs::remove_dir(draft_folder).map_err(|e| { - // Rollback all moved items if folder removal fails - for moved_item in &moved_items { - let _ = std::fs::rename(&moved_item.target, &moved_item.source); - } - std::io::Error::other(e) - })?; - - Ok(()) - } - - /// Move contents from local path to draft folder with rollback support (except .jv folder) - async fn move_local_to_draft( - &self, - local_path: &Path, - draft_folder: &Path, - ) -> Result<(), std::io::Error> { - let jv_folder = local_path.join(CLIENT_PATH_WORKSPACE_ROOT); - let entries: Vec<_> = std::fs::read_dir(local_path) - .map_err(std::io::Error::other)? - .collect::, _>>() - .map_err(std::io::Error::other)?; - - let mut moved_items: Vec = Vec::new(); - - for entry in &entries { - let entry_path = entry.path(); - - // Skip the .jv folder - if entry_path == jv_folder - || entry_path.file_name().and_then(|s| s.to_str()) - == Some(CLIENT_FOLDER_WORKSPACE_ROOT_NAME) - { - continue; - } - - let target_path = draft_folder.join(entry_path.file_name().unwrap()); - - // Move each file/directory from local path to draft folder - std::fs::rename(&entry_path, &target_path).map_err(|e| { - // Rollback all previously moved items - for moved_item in &moved_items { - let _ = std::fs::rename(&moved_item.target, &moved_item.source); - } - std::io::Error::other(e) - })?; - - moved_items.push(MovedItem { - source: entry_path.clone(), - target: target_path.clone(), - }); - } - - Ok(()) - } - - /// Get the currently used account - pub fn current_account(&self) -> MemberId { - self.using_account.clone() - } - - /// Check if the current member is interacting as a host. - pub fn is_host_mode(&self) -> bool { - self.using_host_mode - } - - /// Check if the local workspace is stained. - pub fn stained(&self) -> bool { - self.stained_uuid.is_some() - } - - /// Get the UUID of the vault that the local workspace is stained with. - pub fn stained_uuid(&self) -> Option { - self.stained_uuid - } - - /// Stain the local workspace with the given UUID. - pub fn stain(&mut self, uuid: VaultUuid) { - self.stained_uuid = Some(uuid); - } - - /// Unstain the local workspace. - pub fn unstain(&mut self) { - self.stained_uuid = None; - } - - /// Get the upstream address. - pub fn upstream_addr(&self) -> SocketAddr { - self.upstream_addr - } - - /// Get the currently used sheet - pub fn sheet_in_use(&self) -> &Option { - &self.sheet_in_use - } - - /// Get draft folder - pub fn draft_folder( - &self, - account: &MemberId, - sheet_name: &SheetName, - local_workspace_path: impl Into, - ) -> PathBuf { - let account_str = snake_case!(account.as_str()); - let sheet_name_str = snake_case!(sheet_name.as_str()); - let draft_path = CLIENT_PATH_LOCAL_DRAFT - .replace(ACCOUNT, &account_str) - .replace(SHEET_NAME, &sheet_name_str); - local_workspace_path.into().join(draft_path) - } - - /// Get current draft folder - pub fn current_draft_folder(&self) -> Option { - let Some(sheet_name) = self.sheet_in_use() else { - return None; - }; - - let current_dir = current_local_path()?; - - Some(self.draft_folder(&self.using_account, sheet_name, current_dir)) - } -} - -#[derive(Clone)] -struct MovedItem { - source: PathBuf, - target: PathBuf, -} diff --git a/crates/vcs_data/src/data/local/latest_file_data.rs b/crates/vcs_data/src/data/local/latest_file_data.rs deleted file mode 100644 index 21c647c..0000000 --- a/crates/vcs_data/src/data/local/latest_file_data.rs +++ /dev/null @@ -1,105 +0,0 @@ -use std::{collections::HashMap, io::Error, path::PathBuf}; - -use cfg_file::ConfigFile; -use serde::{Deserialize, Serialize}; - -use crate::{ - constants::{CLIENT_FILE_LATEST_DATA, CLIENT_FILE_MEMBER_HELD_NOSET}, - current::current_local_path, - data::{ - member::MemberId, - vault::virtual_file::{VirtualFileId, VirtualFileVersion, VirtualFileVersionDescription}, - }, -}; - -const ACCOUNT: &str = "{account}"; - -/// # Latest file data -/// Records the file holder and the latest version for permission and update checks -#[derive(Debug, Default, Clone, Serialize, Deserialize, ConfigFile)] -#[cfg_file(path = CLIENT_FILE_MEMBER_HELD_NOSET)] -pub struct LatestFileData { - /// File holding status - #[serde(rename = "held")] - held_status: HashMap, - - /// File version - #[serde(rename = "ver")] - versions: HashMap, - - /// File histories and descriptions - #[serde(rename = "his")] - histories: HashMap>, -} - -#[derive(Debug, Default, Clone, Serialize, Deserialize)] -pub enum HeldStatus { - #[serde(rename = "Hold")] - HeldWith(MemberId), // Held, status changes are sync to the client - - #[serde(rename = "None")] - NotHeld, // Not held, status changes are sync to the client - - #[default] - #[serde(rename = "Unknown")] - WantedToKnow, // Holding status is unknown, notify server must inform client -} - -impl LatestFileData { - /// Get the path to the file holding the held status information for the given member. - pub fn data_path(account: &MemberId) -> Result { - let Some(local_path) = current_local_path() else { - return Err(Error::new( - std::io::ErrorKind::NotFound, - "Workspace not found.", - )); - }; - Ok(local_path.join(CLIENT_FILE_LATEST_DATA.replace(ACCOUNT, account))) - } - - /// Get the member who holds the file with the given ID. - pub fn file_holder(&self, vfid: &VirtualFileId) -> Option<&MemberId> { - self.held_status.get(vfid).and_then(|status| match status { - HeldStatus::HeldWith(id) => Some(id), - _ => None, - }) - } - - /// Get the version of the file with the given ID. - pub fn file_version(&self, vfid: &VirtualFileId) -> Option<&VirtualFileVersion> { - self.versions.get(vfid) - } - - /// Get the version of the file with the given ID. - pub fn file_histories( - &self, - vfid: &VirtualFileId, - ) -> Option<&Vec<(VirtualFileVersion, VirtualFileVersionDescription)>> { - self.histories.get(vfid) - } - - /// Update the held status of the files. - pub fn update_info( - &mut self, - map: HashMap< - VirtualFileId, - ( - Option, - VirtualFileVersion, - Vec<(VirtualFileVersion, VirtualFileVersionDescription)>, - ), - >, - ) { - for (vfid, (member_id, version, desc)) in map { - self.held_status.insert( - vfid.clone(), - match member_id { - Some(member_id) => HeldStatus::HeldWith(member_id), - None => HeldStatus::NotHeld, - }, - ); - self.versions.insert(vfid.clone(), version); - self.histories.insert(vfid, desc); - } - } -} diff --git a/crates/vcs_data/src/data/local/latest_info.rs b/crates/vcs_data/src/data/local/latest_info.rs deleted file mode 100644 index e11836b..0000000 --- a/crates/vcs_data/src/data/local/latest_info.rs +++ /dev/null @@ -1,83 +0,0 @@ -use std::{ - collections::{HashMap, HashSet}, - path::{Path, PathBuf}, - time::SystemTime, -}; - -use cfg_file::ConfigFile; -use serde::{Deserialize, Serialize}; - -use crate::{ - constants::{CLIENT_FILE_LATEST_INFO, CLIENT_FILE_LATEST_INFO_NOSET}, - data::{ - member::{Member, MemberId}, - sheet::{SheetData, SheetName, SheetPathBuf}, - vault::{ - sheet_share::{Share, SheetShareId}, - virtual_file::VirtualFileId, - }, - }, -}; - -const ACCOUNT: &str = "{account}"; - -/// # Latest Info -/// Locally cached latest information, -/// used to cache personal information from upstream for querying and quickly retrieving member information. -#[derive(Default, Serialize, Deserialize, ConfigFile)] -#[cfg_file(path = CLIENT_FILE_LATEST_INFO_NOSET)] -pub struct LatestInfo { - // Sheets - /// Visible sheets, - /// indicating which sheets I can edit - #[serde(rename = "my")] - pub visible_sheets: Vec, - - /// Invisible sheets, - /// indicating which sheets I can export files to (these sheets are not readable to me) - #[serde(rename = "others")] - pub invisible_sheets: Vec, - - /// Reference sheets, - /// indicating sheets owned by the host, visible to everyone, - /// but only the host can modify or add mappings within them - #[serde(rename = "refsheets")] - pub reference_sheets: HashSet, - - /// Reference sheet data, indicating what files I can get from the reference sheet - #[serde(rename = "ref")] - pub ref_sheet_content: SheetData, - - /// Reverse mapping from virtual file IDs to actual paths in reference sheets - #[serde(rename = "ref_vfs")] - pub ref_sheet_vfs_mapping: HashMap, - - /// Shares in my sheets, indicating which external merge requests have entries that I can view - #[serde(rename = "shares")] - pub shares_in_my_sheets: HashMap>, - - /// Update instant - #[serde(rename = "update")] - pub update_instant: Option, - - // Members - /// All member information of the vault, allowing me to contact them more conveniently - #[serde(rename = "members")] - pub vault_members: Vec, -} - -impl LatestInfo { - /// Get the path to the latest info file for a given workspace and member ID - pub fn latest_info_path(local_workspace_path: &Path, member_id: &MemberId) -> PathBuf { - local_workspace_path.join(CLIENT_FILE_LATEST_INFO.replace(ACCOUNT, member_id)) - } -} - -#[derive(Default, Serialize, Deserialize)] -pub struct SheetInfo { - #[serde(rename = "name")] - pub sheet_name: SheetName, - - #[serde(rename = "holder")] - pub holder_name: Option, -} diff --git a/crates/vcs_data/src/data/local/local_files.rs b/crates/vcs_data/src/data/local/local_files.rs deleted file mode 100644 index 9cc244f..0000000 --- a/crates/vcs_data/src/data/local/local_files.rs +++ /dev/null @@ -1,148 +0,0 @@ -use std::path::{Path, PathBuf}; - -use string_proc::format_path::format_path; -use tokio::fs; - -use crate::constants::CLIENT_FOLDER_WORKSPACE_ROOT_NAME; - -pub struct RelativeFiles { - pub(crate) files: Vec, -} - -impl IntoIterator for RelativeFiles { - type Item = PathBuf; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.files.into_iter() - } -} - -impl RelativeFiles { - pub fn iter(&self) -> std::slice::Iter<'_, PathBuf> { - self.files.iter() - } -} - -/// Read the relative paths within the project from the input file list -pub async fn get_relative_paths(local_path: &PathBuf, paths: &[PathBuf]) -> Option { - // Get Relative Paths - let Ok(paths) = format_input_paths_and_ignore_outside_paths(local_path, paths).await else { - return None; - }; - let files: Vec = abs_paths_to_abs_files(paths).await; - let Ok(files) = parse_to_relative(local_path, files) else { - return None; - }; - Some(RelativeFiles { files }) -} - -/// Normalize the input paths -async fn format_input_paths( - local_path: &Path, - track_files: &[PathBuf], -) -> Result, std::io::Error> { - let current_dir = local_path; - - let mut real_paths = Vec::new(); - for file in track_files { - let path = current_dir.join(file); - - // Skip paths that contain .jv directories - if path.components().any(|component| { - if let std::path::Component::Normal(name) = component { - name.to_str() == Some(CLIENT_FOLDER_WORKSPACE_ROOT_NAME) - } else { - false - } - }) { - continue; - } - - match format_path(path) { - Ok(path) => real_paths.push(path), - Err(e) => { - return Err(std::io::Error::new( - std::io::ErrorKind::InvalidData, - format!("Failed to format path: {}", e), - )); - } - } - } - - Ok(real_paths) -} - -/// Ignore files outside the workspace -async fn format_input_paths_and_ignore_outside_paths( - local_path: &PathBuf, - files: &[PathBuf], -) -> Result, std::io::Error> { - let result = format_input_paths(local_path, files).await?; - let result: Vec = result - .into_iter() - .filter(|path| path.starts_with(local_path)) - .collect(); - Ok(result) -} - -/// Normalize the input paths to relative paths -fn parse_to_relative( - local_dir: &PathBuf, - files: Vec, -) -> Result, std::io::Error> { - let result: Result, _> = files - .iter() - .map(|p| { - p.strip_prefix(local_dir) - .map(|relative| relative.to_path_buf()) - .map_err(|_| { - std::io::Error::new( - std::io::ErrorKind::InvalidInput, - "Path prefix stripping failed", - ) - }) - }) - .collect(); - - result -} - -/// Convert absolute paths to absolute file paths, expanding directories to their contained files -async fn abs_paths_to_abs_files(paths: Vec) -> Vec { - let mut files = Vec::new(); - - for path in paths { - if !path.exists() { - continue; - } - - let metadata = match fs::metadata(&path).await { - Ok(meta) => meta, - Err(_) => continue, - }; - - if metadata.is_file() { - files.push(path); - } else if metadata.is_dir() { - let walker = walkdir::WalkDir::new(&path); - for entry in walker.into_iter().filter_map(|e| e.ok()) { - if entry.path().components().any(|component| { - if let std::path::Component::Normal(name) = component { - name == CLIENT_FOLDER_WORKSPACE_ROOT_NAME - } else { - false - } - }) { - continue; - } - - if entry.file_type().is_file() { - files.push(entry.path().to_path_buf()); - } - } - } - } - - files -} diff --git a/crates/vcs_data/src/data/local/local_sheet.rs b/crates/vcs_data/src/data/local/local_sheet.rs deleted file mode 100644 index 6f9924c..0000000 --- a/crates/vcs_data/src/data/local/local_sheet.rs +++ /dev/null @@ -1,377 +0,0 @@ -use std::{collections::HashMap, io::Error, path::PathBuf, time::SystemTime}; - -use ::serde::{Deserialize, Serialize}; -use cfg_file::{ConfigFile, config::ConfigFile}; -use string_proc::format_path::format_path; - -use crate::{ - constants::CLIENT_FILE_LOCAL_SHEET_NOSET, - data::{ - local::LocalWorkspace, - member::MemberId, - sheet::SheetName, - vault::virtual_file::{VirtualFileId, VirtualFileVersion, VirtualFileVersionDescription}, - }, -}; - -pub type LocalFilePathBuf = PathBuf; -pub type LocalSheetPathBuf = PathBuf; - -/// # Local Sheet -/// Local sheet information, used to record metadata of actual local files, -/// to compare with upstream information for more optimized file submission, -/// and to determine whether files need to be updated or submitted. -pub struct LocalSheet<'a> { - pub(crate) local_workspace: &'a LocalWorkspace, - pub(crate) member: MemberId, - pub(crate) sheet_name: String, - pub(crate) data: LocalSheetData, -} - -#[derive(Debug, Default, Serialize, Deserialize, ConfigFile, Clone)] -#[cfg_file(path = CLIENT_FILE_LOCAL_SHEET_NOSET)] // Do not use LocalSheet::write or LocalSheet::read -pub struct LocalSheetData { - /// Local file path to metadata mapping. - #[serde(rename = "map")] - pub(crate) mapping: HashMap, - - #[serde(rename = "vfs")] - pub(crate) vfs: HashMap, -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct LocalMappingMetadata { - /// Hash value generated immediately after the file is downloaded to the local workspace - #[serde(rename = "base_hash")] - pub(crate) hash_when_updated: String, - - /// Time when the file was downloaded to the local workspace - #[serde(rename = "time")] - pub(crate) time_when_updated: SystemTime, - - /// Size of the file when downloaded to the local workspace - #[serde(rename = "size")] - pub(crate) size_when_updated: u64, - - /// Version description when the file was downloaded to the local workspace - #[serde(rename = "desc")] - pub(crate) version_desc_when_updated: VirtualFileVersionDescription, - - /// Version when the file was downloaded to the local workspace - #[serde(rename = "ver")] - pub(crate) version_when_updated: VirtualFileVersion, - - /// Virtual file ID corresponding to the local path - #[serde(rename = "id")] - pub(crate) mapping_vfid: VirtualFileId, - - /// Latest modifiy check time - #[serde(rename = "check_time")] - pub(crate) last_modify_check_time: SystemTime, - - /// Latest modifiy check result - #[serde(rename = "modified")] - pub(crate) last_modify_check_result: bool, - - /// Latest modifiy check hash result - #[serde(rename = "current_hash")] - pub(crate) last_modify_check_hash: Option, -} - -impl LocalSheetData { - /// Wrap LocalSheetData into LocalSheet with workspace, member, and sheet name - pub fn wrap_to_local_sheet<'a>( - self, - workspace: &'a LocalWorkspace, - member: MemberId, - sheet_name: SheetName, - ) -> LocalSheet<'a> { - LocalSheet { - local_workspace: workspace, - member, - sheet_name, - data: self, - } - } -} - -impl LocalMappingMetadata { - /// Create a new MappingMetaData instance - #[allow(clippy::too_many_arguments)] - pub fn new( - hash_when_updated: String, - time_when_updated: SystemTime, - size_when_updated: u64, - version_desc_when_updated: VirtualFileVersionDescription, - version_when_updated: VirtualFileVersion, - mapping_vfid: VirtualFileId, - last_modifiy_check_time: SystemTime, - last_modifiy_check_result: bool, - ) -> Self { - Self { - hash_when_updated, - time_when_updated, - size_when_updated, - version_desc_when_updated, - version_when_updated, - mapping_vfid, - last_modify_check_time: last_modifiy_check_time, - last_modify_check_result: last_modifiy_check_result, - last_modify_check_hash: None, - } - } - - /// Getter for hash_when_updated - pub fn hash_when_updated(&self) -> &String { - &self.hash_when_updated - } - - /// Setter for hash_when_updated - pub fn set_hash_when_updated(&mut self, hash: String) { - self.hash_when_updated = hash; - } - - /// Getter for date_when_updated - pub fn time_when_updated(&self) -> &SystemTime { - &self.time_when_updated - } - - /// Setter for time_when_updated - pub fn set_time_when_updated(&mut self, time: SystemTime) { - self.time_when_updated = time; - } - - /// Getter for size_when_updated - pub fn size_when_updated(&self) -> u64 { - self.size_when_updated - } - - /// Setter for size_when_updated - pub fn set_size_when_updated(&mut self, size: u64) { - self.size_when_updated = size; - } - - /// Getter for version_desc_when_updated - pub fn version_desc_when_updated(&self) -> &VirtualFileVersionDescription { - &self.version_desc_when_updated - } - - /// Setter for version_desc_when_updated - pub fn set_version_desc_when_updated(&mut self, version_desc: VirtualFileVersionDescription) { - self.version_desc_when_updated = version_desc; - } - - /// Getter for version_when_updated - pub fn version_when_updated(&self) -> &VirtualFileVersion { - &self.version_when_updated - } - - /// Setter for version_when_updated - pub fn set_version_when_updated(&mut self, version: VirtualFileVersion) { - self.version_when_updated = version; - } - - /// Getter for mapping_vfid - pub fn mapping_vfid(&self) -> &VirtualFileId { - &self.mapping_vfid - } - - /// Setter for mapping_vfid - pub fn set_mapping_vfid(&mut self, vfid: VirtualFileId) { - self.mapping_vfid = vfid; - } - - /// Getter for last_modifiy_check_time - pub fn last_modifiy_check_time(&self) -> &SystemTime { - &self.last_modify_check_time - } - - /// Setter for last_modifiy_check_time - pub fn set_last_modifiy_check_time(&mut self, time: SystemTime) { - self.last_modify_check_time = time; - } - - /// Getter for last_modifiy_check_result - pub fn last_modifiy_check_result(&self) -> bool { - self.last_modify_check_result - } - - /// Setter for last_modifiy_check_result - pub fn set_last_modifiy_check_result(&mut self, result: bool) { - self.last_modify_check_result = result; - } - - /// Getter for last_modifiy_check_hash - pub fn last_modifiy_check_hash(&self) -> &Option { - &self.last_modify_check_hash - } - - /// Setter for last_modifiy_check_hash - pub fn set_last_modifiy_check_hash(&mut self, hash: Option) { - self.last_modify_check_hash = hash; - } -} - -impl Default for LocalMappingMetadata { - fn default() -> Self { - Self { - hash_when_updated: Default::default(), - time_when_updated: SystemTime::now(), - size_when_updated: Default::default(), - version_desc_when_updated: Default::default(), - version_when_updated: Default::default(), - mapping_vfid: Default::default(), - last_modify_check_time: SystemTime::now(), - last_modify_check_result: false, - last_modify_check_hash: None, - } - } -} - -mod instant_serde { - use serde::{self, Deserialize, Deserializer, Serializer}; - use tokio::time::Instant; - - pub fn serialize(instant: &Instant, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_u64(instant.elapsed().as_secs()) - } - - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let secs = u64::deserialize(deserializer)?; - Ok(Instant::now() - std::time::Duration::from_secs(secs)) - } -} - -impl<'a> From<&'a LocalSheet<'a>> for &'a LocalSheetData { - fn from(sheet: &'a LocalSheet<'a>) -> Self { - &sheet.data - } -} - -impl<'a> LocalSheet<'a> { - /// Add mapping to local sheet data - pub fn add_mapping( - &mut self, - path: &LocalFilePathBuf, - mapping: LocalMappingMetadata, - ) -> Result<(), std::io::Error> { - let path = format_path(path)?; - if self.data.mapping.contains_key(&path) - || self.data.vfs.contains_key(&mapping.mapping_vfid) - { - return Err(Error::new( - std::io::ErrorKind::AlreadyExists, - "Mapping already exists", - )); - } - - self.data.mapping.insert(path, mapping); - Ok(()) - } - - /// Move mapping to other path - pub fn move_mapping( - &mut self, - from: &LocalFilePathBuf, - to: &LocalFilePathBuf, - ) -> Result<(), std::io::Error> { - let from = format_path(from)?; - let to = format_path(to)?; - if self.data.mapping.contains_key(&to) { - return Err(Error::new( - std::io::ErrorKind::AlreadyExists, - "To path already exists.", - )); - } - - let Some(old_value) = self.data.mapping.remove(&from) else { - return Err(Error::new( - std::io::ErrorKind::NotFound, - "From path is not found.", - )); - }; - - self.data.mapping.insert(to, old_value); - - Ok(()) - } - - /// Remove mapping from local sheet - pub fn remove_mapping( - &mut self, - path: &LocalFilePathBuf, - ) -> Result { - let path = format_path(path)?; - match self.data.mapping.remove(&path) { - Some(mapping) => Ok(mapping), - None => Err(Error::new( - std::io::ErrorKind::NotFound, - "Path is not found.", - )), - } - } - - /// Get immutable mapping data - pub fn mapping_data( - &self, - path: &LocalFilePathBuf, - ) -> Result<&LocalMappingMetadata, std::io::Error> { - let path = format_path(path)?; - let Some(data) = self.data.mapping.get(&path) else { - return Err(Error::new( - std::io::ErrorKind::NotFound, - "Path is not found.", - )); - }; - Ok(data) - } - - /// Get muttable mapping data - pub fn mapping_data_mut( - &mut self, - path: &LocalFilePathBuf, - ) -> Result<&mut LocalMappingMetadata, std::io::Error> { - let path = format_path(path)?; - let Some(data) = self.data.mapping.get_mut(&path) else { - return Err(Error::new( - std::io::ErrorKind::NotFound, - "Path is not found.", - )); - }; - Ok(data) - } - - /// Write the sheet to disk - pub async fn write(&mut self) -> Result<(), std::io::Error> { - let path = self - .local_workspace - .local_sheet_path(&self.member, &self.sheet_name); - self.write_to_path(path).await - } - - /// Write the sheet to custom path - pub async fn write_to_path(&mut self, path: impl Into) -> Result<(), std::io::Error> { - let path = path.into(); - - self.data.vfs = HashMap::new(); - for (path, mapping) in self.data.mapping.iter() { - self.data - .vfs - .insert(mapping.mapping_vfid.clone(), path.clone()); - } - - LocalSheetData::write_to(&self.data, path).await?; - Ok(()) - } - - /// Get path by VirtualFileId - pub fn path_by_id(&self, vfid: &VirtualFileId) -> Option<&PathBuf> { - self.data.vfs.get(vfid) - } -} diff --git a/crates/vcs_data/src/data/local/vault_modified.rs b/crates/vcs_data/src/data/local/vault_modified.rs deleted file mode 100644 index 563d11f..0000000 --- a/crates/vcs_data/src/data/local/vault_modified.rs +++ /dev/null @@ -1,30 +0,0 @@ -use crate::{constants::CLIENT_FILE_VAULT_MODIFIED, current::current_local_path}; - -pub async fn check_vault_modified() -> bool { - let Some(current_dir) = current_local_path() else { - return false; - }; - - let record_file = current_dir.join(CLIENT_FILE_VAULT_MODIFIED); - if !record_file.exists() { - return false; - } - - let Ok(contents) = tokio::fs::read_to_string(&record_file).await else { - return false; - }; - - matches!(contents.trim().to_lowercase().as_str(), "true") -} - -pub async fn sign_vault_modified(modified: bool) { - let Some(current_dir) = current_local_path() else { - return; - }; - - let record_file = current_dir.join(CLIENT_FILE_VAULT_MODIFIED); - - let contents = if modified { "true" } else { "false" }; - - let _ = tokio::fs::write(&record_file, contents).await; -} diff --git a/crates/vcs_data/src/data/local/workspace_analyzer.rs b/crates/vcs_data/src/data/local/workspace_analyzer.rs deleted file mode 100644 index f2d83ff..0000000 --- a/crates/vcs_data/src/data/local/workspace_analyzer.rs +++ /dev/null @@ -1,327 +0,0 @@ -use std::{ - collections::{HashMap, HashSet}, - io::Error, - path::PathBuf, -}; - -use sha1_hash::calc_sha1_multi; -use string_proc::format_path::format_path; -use walkdir::WalkDir; - -use crate::data::{ - local::{LocalWorkspace, cached_sheet::CachedSheet, local_sheet::LocalSheet}, - member::MemberId, - sheet::{SheetData, SheetName}, - vault::virtual_file::VirtualFileId, -}; - -pub type FromRelativePathBuf = PathBuf; -pub type ToRelativePathBuf = PathBuf; -pub type CreatedRelativePathBuf = PathBuf; -pub type LostRelativePathBuf = PathBuf; -pub type ModifiedRelativePathBuf = PathBuf; - -pub struct AnalyzeResult<'a> { - local_workspace: &'a LocalWorkspace, - - /// Moved local files - pub moved: HashMap, - - /// Newly created local files - pub created: HashSet, - - /// Lost local files - pub lost: HashSet, - - /// Erased local files - pub erased: HashSet, - - /// Modified local files (excluding moved files) - /// For files that were both moved and modified, changes can only be detected after LocalSheet mapping is aligned with actual files - pub modified: HashSet, -} - -struct AnalyzeContext<'a> { - member: MemberId, - sheet_name: SheetName, - local_sheet: Option>, - cached_sheet_data: Option, -} - -impl<'a> AnalyzeResult<'a> { - /// Analyze all files, calculate the file information provided - pub async fn analyze_local_status( - local_workspace: &'a LocalWorkspace, - ) -> Result, std::io::Error> { - // Workspace - let workspace = local_workspace; - - // Current member, sheet - let (member, sheet_name) = { - let mut_workspace = workspace.config.lock().await; - let member = mut_workspace.current_account(); - let Some(sheet) = mut_workspace.sheet_in_use().clone() else { - return Err(Error::new(std::io::ErrorKind::NotFound, "Sheet not found")); - }; - (member, sheet) - }; - - // Local files (RelativePaths) - let local_path = workspace.local_path(); - let file_relative_paths = { - let mut paths = HashSet::new(); - for entry in WalkDir::new(local_path) { - let entry = match entry { - Ok(entry) => entry, - Err(_) => continue, - }; - - // Skip entries that contain ".jv" in their path - if entry.path().to_string_lossy().contains(".jv") { - continue; - } - - if entry.file_type().is_file() - && let Ok(relative_path) = entry.path().strip_prefix(local_path) - { - let format = format_path(relative_path.to_path_buf()); - let Ok(format) = format else { - continue; - }; - paths.insert(format); - } - } - - paths - }; - - // Read local sheet - let local_sheet = (workspace.local_sheet(&member, &sheet_name).await).ok(); - - // Read cached sheet - let cached_sheet_data = match CachedSheet::cached_sheet_data(&sheet_name).await { - Ok(v) => Some(v), - Err(_) => { - return Err(Error::new( - std::io::ErrorKind::NotFound, - "Cached sheet not found", - )); - } - }; - - // Create new result - let mut result = Self::none_result(workspace); - - // Analyze entry - let mut analyze_ctx = AnalyzeContext { - member, - sheet_name, - local_sheet, - cached_sheet_data, - }; - Self::analyze_moved(&mut result, &file_relative_paths, &analyze_ctx, workspace).await?; - Self::analyze_modified( - &mut result, - &file_relative_paths, - &mut analyze_ctx, - workspace, - ) - .await?; - - Ok(result) - } - - /// Track file moves by comparing recorded SHA1 hashes with actual file SHA1 hashes - /// For files that cannot be directly matched, continue searching using fuzzy matching algorithms - async fn analyze_moved( - result: &mut AnalyzeResult<'_>, - file_relative_paths: &HashSet, - analyze_ctx: &AnalyzeContext<'a>, - workspace: &LocalWorkspace, - ) -> Result<(), std::io::Error> { - let local_sheet_paths: HashSet<&PathBuf> = match &analyze_ctx.local_sheet { - Some(local_sheet) => local_sheet.data.mapping.keys().collect(), - None => HashSet::new(), - }; - let file_relative_paths_ref: HashSet<&PathBuf> = file_relative_paths.iter().collect(); - - // Files that exist locally but not in remote - let mut erased_files: HashSet = HashSet::new(); - - if let Some(cached_data) = &analyze_ctx.cached_sheet_data { - if let Some(local_sheet) = &analyze_ctx.local_sheet { - let cached_sheet_mapping = cached_data.mapping(); - let local_sheet_mapping = &local_sheet.data.mapping; - - // Find paths that exist in local sheet but not in cached sheet - for local_path in local_sheet_mapping.keys() { - if !cached_sheet_mapping.contains_key(local_path) { - erased_files.insert(local_path.clone()); - } - } - } - } - - // Files that exist in the local sheet but not in reality are considered lost - let mut lost_files: HashSet<&PathBuf> = local_sheet_paths - .difference(&file_relative_paths_ref) - .filter(|&&path| !erased_files.contains(path)) - .cloned() - .collect(); - - // Files that exist in reality but not in the local sheet are recorded as newly created - let mut new_files: HashSet<&PathBuf> = file_relative_paths_ref - .difference(&local_sheet_paths) - .cloned() - .collect(); - - // Calculate hashes for new files - let new_files_for_hash: Vec = new_files - .iter() - .map(|p| workspace.local_path.join(p)) - .collect(); - let file_hashes: HashSet<(PathBuf, String)> = - match calc_sha1_multi::>(new_files_for_hash, 8192).await { - Ok(hash) => hash, - Err(e) => return Err(Error::other(e)), - } - .iter() - .map(|r| (r.file_path.clone(), r.hash.to_string())) - .collect(); - - // Build hash mapping table for lost files - let mut lost_files_hash_mapping: HashMap = - match &analyze_ctx.local_sheet { - Some(local_sheet) => lost_files - .iter() - .filter_map(|f| { - local_sheet.mapping_data(f).ok().map(|mapping_data| { - ( - // Using the most recently recorded Hash can more accurately identify moved items, - // but if it doesn't exist, fall back to the initially recorded Hash - mapping_data - .last_modify_check_hash - .as_ref() - .cloned() - .unwrap_or(mapping_data.hash_when_updated.clone()), - (*f).clone(), - ) - }) - }) - .collect(), - None => HashMap::new(), - }; - - // If these hashes correspond to the hashes of missing files, then this pair of new and lost items will be merged into moved items - let mut moved_files: HashSet<(FromRelativePathBuf, ToRelativePathBuf)> = HashSet::new(); - for (new_path, new_hash) in file_hashes { - let new_path = new_path - .strip_prefix(&workspace.local_path) - .map(|p| p.to_path_buf()) - .unwrap_or(new_path); - - // If the new hash value hits the mapping, add a moved item - if let Some(lost_path) = lost_files_hash_mapping.remove(&new_hash) { - // Remove this new item and lost item - lost_files.remove(&lost_path); - new_files.remove(&new_path); - - // Create moved item - moved_files.insert((lost_path.clone(), new_path)); - } - } - - // Enter fuzzy matching to match other potentially moved items that haven't been matched - // If the total number of new and lost files is divisible by 2, it indicates there might still be files that have been moved, consider trying fuzzy matching - if new_files.len() + lost_files.len() % 2 == 0 { - // Try fuzzy matching - // ... - } - - // Collect results and set the result - result.created = new_files.iter().map(|p| (*p).clone()).collect(); - result.lost = lost_files.iter().map(|p| (*p).clone()).collect(); - result.moved = moved_files - .iter() - .filter_map(|(from, to)| { - let vfid = analyze_ctx - .local_sheet - .as_ref() - .and_then(|local_sheet| local_sheet.mapping_data(from).ok()) - .map(|mapping_data| mapping_data.mapping_vfid.clone()); - vfid.map(|vfid| (vfid, (from.clone(), to.clone()))) - }) - .collect(); - result.erased = erased_files; - - Ok(()) - } - - /// Compare using file modification time and SHA1 hash values. - /// Note: For files that have been both moved and modified, they can only be recognized as modified after their location is matched. - async fn analyze_modified( - result: &mut AnalyzeResult<'_>, - file_relative_paths: &HashSet, - analyze_ctx: &mut AnalyzeContext<'a>, - workspace: &LocalWorkspace, - ) -> Result<(), std::io::Error> { - let local_sheet = &mut analyze_ctx.local_sheet.as_mut().unwrap(); - let local_path = local_sheet.local_workspace.local_path().clone(); - - for path in file_relative_paths { - // Get mapping data - let Ok(mapping_data) = local_sheet.mapping_data_mut(path) else { - continue; - }; - - // If modified time not changed, skip - let modified_time = std::fs::metadata(local_path.join(path))?.modified()?; - if &modified_time == mapping_data.last_modifiy_check_time() { - if mapping_data.last_modifiy_check_result() { - result.modified.insert(path.clone()); - } - continue; - } - - // Calculate hash - let hash_calc = match sha1_hash::calc_sha1(workspace.local_path.join(path), 2048).await - { - Ok(hash) => hash, - Err(e) => return Err(Error::other(e)), - }; - - // If hash not match, mark as modified - if &hash_calc.hash != mapping_data.hash_when_updated() { - result.modified.insert(path.clone()); - - // Update last modified check time to modified time - mapping_data.last_modify_check_time = modified_time; - mapping_data.last_modify_check_result = true; - } else { - // Update last modified check time to modified time - mapping_data.last_modify_check_time = modified_time; - mapping_data.last_modify_check_result = false; - } - - // Record latest hash - mapping_data.last_modify_check_hash = Some(hash_calc.hash) - } - - // Persist the local sheet data - LocalSheet::write(local_sheet).await?; - - Ok(()) - } - - /// Generate a empty AnalyzeResult - fn none_result(local_workspace: &'a LocalWorkspace) -> AnalyzeResult<'a> { - AnalyzeResult { - local_workspace, - moved: HashMap::new(), - created: HashSet::new(), - lost: HashSet::new(), - modified: HashSet::new(), - erased: HashSet::new(), - } - } -} diff --git a/crates/vcs_data/src/data/member.rs b/crates/vcs_data/src/data/member.rs deleted file mode 100644 index 7e99488..0000000 --- a/crates/vcs_data/src/data/member.rs +++ /dev/null @@ -1,71 +0,0 @@ -use std::collections::HashMap; - -use cfg_file::ConfigFile; -use serde::{Deserialize, Serialize}; -use string_proc::snake_case; - -pub type MemberId = String; - -#[derive(Debug, Eq, Clone, ConfigFile, Serialize, Deserialize)] -pub struct Member { - /// Member ID, the unique identifier of the member - #[serde(rename = "id")] - id: String, - - /// Member metadata - #[serde(rename = "meta")] - metadata: HashMap, -} - -impl Default for Member { - fn default() -> Self { - Self::new("default_user") - } -} - -impl PartialEq for Member { - fn eq(&self, other: &Self) -> bool { - self.id == other.id - } -} - -impl std::fmt::Display for Member { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.id) - } -} - -impl std::convert::AsRef for Member { - fn as_ref(&self) -> &str { - &self.id - } -} - -impl Member { - /// Create member struct by id - pub fn new(new_id: impl Into) -> Self { - Self { - id: snake_case!(new_id.into()), - metadata: HashMap::new(), - } - } - - /// Get member id - pub fn id(&self) -> String { - self.id.clone() - } - - /// Get metadata - pub fn metadata(&self, key: impl Into) -> Option<&String> { - self.metadata.get(&key.into()) - } - - /// Set metadata - pub fn set_metadata( - &mut self, - key: impl AsRef, - value: impl Into, - ) -> Option { - self.metadata.insert(key.as_ref().to_string(), value.into()) - } -} diff --git a/crates/vcs_data/src/data/sheet.rs b/crates/vcs_data/src/data/sheet.rs deleted file mode 100644 index 64b1985..0000000 --- a/crates/vcs_data/src/data/sheet.rs +++ /dev/null @@ -1,280 +0,0 @@ -use std::{collections::HashMap, path::PathBuf}; - -use cfg_file::{ConfigFile, config::ConfigFile}; -use serde::{Deserialize, Serialize}; - -use crate::{ - constants::SERVER_FILE_SHEET, - data::{ - member::MemberId, - vault::{ - Vault, - virtual_file::{VirtualFileId, VirtualFileVersion}, - }, - }, -}; - -pub type SheetName = String; -pub type SheetPathBuf = PathBuf; - -const SHEET_NAME: &str = "{sheet_name}"; - -pub struct Sheet<'a> { - /// The name of the current sheet - pub(crate) name: SheetName, - - /// Sheet data - pub(crate) data: SheetData, - - /// Sheet path - pub(crate) vault_reference: &'a Vault, -} - -#[derive(Default, Serialize, Deserialize, ConfigFile, Clone)] -pub struct SheetData { - /// The write count of the current sheet - #[serde(rename = "v")] - pub(crate) write_count: i32, - - /// The holder of the current sheet, who has full operation rights to the sheet mapping - #[serde(rename = "holder")] - pub(crate) holder: Option, - - /// Mapping of sheet paths to virtual file IDs - #[serde(rename = "map")] - pub(crate) mapping: HashMap, - - /// Mapping of virtual file Ids to sheet paths - #[serde(rename = "id_map")] - pub(crate) id_mapping: Option>, -} - -#[derive(Debug, Default, Serialize, Deserialize, ConfigFile, Clone, Eq, PartialEq)] -pub struct SheetMappingMetadata { - #[serde(rename = "id")] - pub id: VirtualFileId, - #[serde(rename = "ver")] - pub version: VirtualFileVersion, -} - -impl<'a> Sheet<'a> { - pub fn name(&self) -> &SheetName { - &self.name - } - - /// Get the holder of this sheet - pub fn holder(&self) -> Option<&MemberId> { - self.data.holder.as_ref() - } - - /// Get the mapping of this sheet - pub fn mapping(&self) -> &HashMap { - &self.data.mapping - } - - /// Get the muttable mapping of this sheet - pub fn mapping_mut(&mut self) -> &mut HashMap { - &mut self.data.mapping - } - - /// Get the id_mapping of this sheet data - pub fn id_mapping(&self) -> &Option> { - &self.data.id_mapping - } - - /// Get the write count of this sheet - pub fn write_count(&self) -> i32 { - self.data.write_count - } - - /// Forget the holder of this sheet - pub fn forget_holder(&mut self) { - self.data.holder = None; - } - - /// Set the holder of this sheet - pub fn set_holder(&mut self, holder: MemberId) { - self.data.holder = Some(holder); - } - - /// Add (or Edit) a mapping entry to the sheet - /// - /// This operation performs safety checks to ensure the member has the right to add the mapping: - /// 1. The sheet must have a holder (member) to perform this operation - /// 2. If the virtual file ID doesn't exist in the vault, the mapping is added directly - /// 3. If the virtual file exists, the mapping is added regardless of member edit rights - /// - /// Note: Full validation adds overhead - avoid frequent calls - pub async fn add_mapping( - &mut self, - sheet_path: SheetPathBuf, - virtual_file_id: VirtualFileId, - version: VirtualFileVersion, - ) -> Result<(), std::io::Error> { - // Check if the virtual file exists in the vault - if self.vault_reference.virtual_file(&virtual_file_id).is_err() { - // Virtual file doesn't exist, add the mapping directly - self.data.mapping.insert( - sheet_path, - SheetMappingMetadata { - id: virtual_file_id, - version, - }, - ); - return Ok(()); - } - - // Check if the sheet has a holder - let Some(_) = self.holder() else { - return Err(std::io::Error::new( - std::io::ErrorKind::PermissionDenied, - "This sheet has no holder", - )); - }; - - self.data.mapping.insert( - sheet_path, - SheetMappingMetadata { - id: virtual_file_id, - version, - }, - ); - - Ok(()) - } - - /// Remove a mapping entry from the sheet - /// - /// This operation performs safety checks to ensure the member has the right to remove the mapping: - /// 1. The sheet must have a holder (member) to perform this operation - /// 2. Member must NOT have edit rights to the virtual file to release it (ensuring clear ownership) - /// 3. If the virtual file doesn't exist, the mapping is removed but no ID is returned - /// 4. If member has no edit rights and the file exists, returns the removed virtual file ID - /// - /// Note: Full validation adds overhead - avoid frequent calls - pub async fn remove_mapping( - &mut self, - sheet_path: &SheetPathBuf, - ) -> Option { - let virtual_file_meta = match self.data.mapping.get(sheet_path) { - Some(id) => id, - None => { - // The mapping entry doesn't exist, nothing to remove - return None; - } - }; - - // Check if the virtual file exists in the vault - if self - .vault_reference - .virtual_file(&virtual_file_meta.id) - .is_err() - { - // Virtual file doesn't exist, remove the mapping and return None - self.data.mapping.remove(sheet_path); - return None; - } - - // Check if the sheet has a holder - let holder = self.holder()?; - - // Check if the holder has edit rights to the virtual file - match self - .vault_reference - .has_virtual_file_edit_right(holder, &virtual_file_meta.id) - .await - { - Ok(false) => { - // Holder doesn't have rights, remove and return the virtual file ID - self.data.mapping.remove(sheet_path) - } - Ok(true) => { - // Holder has edit rights, don't remove the mapping - None - } - Err(_) => { - // Error checking rights, don't remove the mapping - None - } - } - } - - /// Persist the sheet to disk - /// - /// Why not use a reference? - /// Because I don't want a second instance of the sheet to be kept in memory. - /// If needed, please deserialize and reload it. - pub async fn persist(mut self) -> Result<(), std::io::Error> { - self.data.write_count += 1; - - // Update id mapping - self.data.id_mapping = Some(HashMap::new()); - for map in self.data.mapping.iter() { - self.data - .id_mapping - .as_mut() - .unwrap() - .insert(map.1.id.clone(), map.0.clone()); - } - - // Add write count - if self.data.write_count >= i32::MAX - 1 { - self.data.write_count = 0; - } - SheetData::write_to(&self.data, self.sheet_path()).await - } - - /// Get the path to the sheet file - pub fn sheet_path(&self) -> PathBuf { - Sheet::sheet_path_with_name(self.vault_reference, &self.name) - } - - /// Get the path to the sheet file with the given name - pub fn sheet_path_with_name(vault: &Vault, name: impl AsRef) -> PathBuf { - vault - .vault_path() - .join(SERVER_FILE_SHEET.replace(SHEET_NAME, name.as_ref())) - } - - /// Clone the data of the sheet - pub fn clone_data(&self) -> SheetData { - self.data.clone() - } - - /// Convert the sheet into its data representation - pub fn to_data(self) -> SheetData { - self.data - } -} - -impl SheetData { - /// Get the write count of this sheet data - pub fn write_count(&self) -> i32 { - self.write_count - } - - /// Get the holder of this sheet data - pub fn holder(&self) -> Option<&MemberId> { - self.holder.as_ref() - } - - /// Get the mapping of this sheet data - pub fn mapping(&self) -> &HashMap { - &self.mapping - } - - /// Get the muttable mapping of this sheet data - pub fn mapping_mut(&mut self) -> &mut HashMap { - &mut self.mapping - } - - /// Get the id_mapping of this sheet data - pub fn id_mapping(&self) -> &Option> { - &self.id_mapping - } - - /// Get the muttable id_mapping of this sheet data - pub fn id_mapping_mut(&mut self) -> &mut Option> { - &mut self.id_mapping - } -} diff --git a/crates/vcs_data/src/data/user.rs b/crates/vcs_data/src/data/user.rs deleted file mode 100644 index 9f52fdc..0000000 --- a/crates/vcs_data/src/data/user.rs +++ /dev/null @@ -1,28 +0,0 @@ -use crate::current::current_cfg_dir; -use std::path::PathBuf; - -pub mod accounts; - -pub struct UserDirectory { - local_path: PathBuf, -} - -impl UserDirectory { - /// Create a user ditectory struct from the current system's document directory - pub fn current_cfg_dir() -> Option { - Some(UserDirectory { - local_path: current_cfg_dir()?, - }) - } - - /// Create a user directory struct from a specified directory path - /// Returns None if the directory does not exist - pub fn from_path>(path: P) -> Option { - let local_path = path.into(); - if local_path.exists() { - Some(UserDirectory { local_path }) - } else { - None - } - } -} diff --git a/crates/vcs_data/src/data/user/accounts.rs b/crates/vcs_data/src/data/user/accounts.rs deleted file mode 100644 index d77bc02..0000000 --- a/crates/vcs_data/src/data/user/accounts.rs +++ /dev/null @@ -1,164 +0,0 @@ -use std::{ - fs, - io::{Error, ErrorKind}, - path::PathBuf, -}; - -use cfg_file::config::ConfigFile; - -use crate::{ - constants::{USER_FILE_ACCOUNTS, USER_FILE_KEY, USER_FILE_MEMBER}, - data::{ - member::{Member, MemberId}, - user::UserDirectory, - }, -}; - -const SELF_ID: &str = "{self_id}"; - -/// Account Management -impl UserDirectory { - /// Read account from configuration file - pub async fn account(&self, id: &MemberId) -> Result { - if let Some(cfg_file) = self.account_cfg(id) { - let member = Member::read_from(cfg_file).await?; - return Ok(member); - } - - Err(Error::new(ErrorKind::NotFound, "Account not found!")) - } - - /// List all account IDs in the user directory - pub fn account_ids(&self) -> Result, std::io::Error> { - let accounts_path = self - .local_path - .join(USER_FILE_ACCOUNTS.replace(SELF_ID, "")); - - if !accounts_path.exists() { - return Ok(Vec::new()); - } - - let mut account_ids = Vec::new(); - - for entry in fs::read_dir(accounts_path)? { - let entry = entry?; - let path = entry.path(); - - if path.is_file() - && let Some(file_name) = path.file_stem().and_then(|s| s.to_str()) - && path.extension().and_then(|s| s.to_str()) == Some("toml") - { - // Remove the "_private" suffix from key files if present - let account_id = file_name.replace("_private", ""); - account_ids.push(account_id); - } - } - - Ok(account_ids) - } - - /// Get all accounts - /// This method will read and deserialize account information, please pay attention to performance issues - pub async fn accounts(&self) -> Result, std::io::Error> { - let mut accounts = Vec::new(); - - for account_id in self.account_ids()? { - if let Ok(account) = self.account(&account_id).await { - accounts.push(account); - } - } - - Ok(accounts) - } - - /// Update account info - pub async fn update_account(&self, member: Member) -> Result<(), std::io::Error> { - // Ensure account exist - if self.account_cfg(&member.id()).is_some() { - let account_cfg_path = self.account_cfg_path(&member.id()); - Member::write_to(&member, account_cfg_path).await?; - return Ok(()); - } - - Err(Error::new(ErrorKind::NotFound, "Account not found!")) - } - - /// Register an account to user directory - pub async fn register_account(&self, member: Member) -> Result<(), std::io::Error> { - // Ensure account not exist - if self.account_cfg(&member.id()).is_some() { - return Err(Error::new( - ErrorKind::DirectoryNotEmpty, - format!("Account `{}` already registered!", member.id()), - )); - } - - // Ensure accounts directory exists - let accounts_dir = self - .local_path - .join(USER_FILE_ACCOUNTS.replace(SELF_ID, "")); - if !accounts_dir.exists() { - fs::create_dir_all(&accounts_dir)?; - } - - // Write config file to accounts dir - let account_cfg_path = self.account_cfg_path(&member.id()); - Member::write_to(&member, account_cfg_path).await?; - - Ok(()) - } - - /// Remove account from user directory - pub fn remove_account(&self, id: &MemberId) -> Result<(), std::io::Error> { - // Remove config file if exists - if let Some(account_cfg_path) = self.account_cfg(id) { - fs::remove_file(account_cfg_path)?; - } - - // Remove private key file if exists - if let Some(private_key_path) = self.account_private_key(id) - && private_key_path.exists() - { - fs::remove_file(private_key_path)?; - } - - Ok(()) - } - - /// Try to get the account's configuration file to determine if the account exists - pub fn account_cfg(&self, id: &MemberId) -> Option { - let cfg_file = self.account_cfg_path(id); - if cfg_file.exists() { - Some(cfg_file) - } else { - None - } - } - - /// Try to get the account's private key file to determine if the account has a private key - pub fn account_private_key(&self, id: &MemberId) -> Option { - let key_file = self.account_private_key_path(id); - if key_file.exists() { - Some(key_file) - } else { - None - } - } - - /// Check if account has private key - pub fn has_private_key(&self, id: &MemberId) -> bool { - self.account_private_key(id).is_some() - } - - /// Get the account's configuration file path, but do not check if the file exists - pub fn account_cfg_path(&self, id: &MemberId) -> PathBuf { - self.local_path - .join(USER_FILE_MEMBER.replace(SELF_ID, id.to_string().as_str())) - } - - /// Get the account's private key file path, but do not check if the file exists - pub fn account_private_key_path(&self, id: &MemberId) -> PathBuf { - self.local_path - .join(USER_FILE_KEY.replace(SELF_ID, id.to_string().as_str())) - } -} diff --git a/crates/vcs_data/src/data/vault.rs b/crates/vcs_data/src/data/vault.rs deleted file mode 100644 index 595997a..0000000 --- a/crates/vcs_data/src/data/vault.rs +++ /dev/null @@ -1,132 +0,0 @@ -use std::{env::current_dir, path::PathBuf, sync::Arc}; - -use tokio::fs::create_dir_all; -use vcs_docs::docs::READMES_VAULT_README; - -use crate::{ - constants::{ - REF_SHEET_NAME, SERVER_FILE_README, SERVER_FILE_VAULT, SERVER_PATH_MEMBER_PUB, - SERVER_PATH_MEMBERS, SERVER_PATH_SHEETS, SERVER_PATH_VF_ROOT, VAULT_HOST_NAME, - }, - current::{current_vault_path, find_vault_path}, - data::{member::Member, vault::config::VaultConfig}, -}; - -pub mod config; -pub mod member; -pub mod service; -pub mod sheet_share; -pub mod sheets; -pub mod virtual_file; - -pub struct Vault { - config: Arc, - vault_path: PathBuf, -} - -impl Vault { - /// Get vault path - pub fn vault_path(&self) -> &PathBuf { - &self.vault_path - } - - /// Initialize vault - pub fn init(config: VaultConfig, vault_path: impl Into) -> Option { - let vault_path = find_vault_path(vault_path)?; - Some(Self { - config: Arc::new(config), - vault_path, - }) - } - - /// Initialize vault - pub fn init_current_dir(config: VaultConfig) -> Option { - let vault_path = current_vault_path()?; - Some(Self { - config: Arc::new(config), - vault_path, - }) - } - - /// Setup vault - pub async fn setup_vault( - vault_path: impl Into, - vault_name: impl AsRef, - ) -> Result<(), std::io::Error> { - let vault_path: PathBuf = vault_path.into(); - - // Ensure directory is empty - if vault_path.exists() && vault_path.read_dir()?.next().is_some() { - return Err(std::io::Error::new( - std::io::ErrorKind::DirectoryNotEmpty, - "DirectoryNotEmpty", - )); - } - - // 1. Setup main config - let config = VaultConfig::default(); - - // NOTE: - // Do not use the write_to method provided by the ConfigFile trait to store the Vault configuration file - // Instead, use the PROFILES_VAULT content provided by the Documents Repository for writing - - // VaultConfig::write_to(&config, vault_path.join(SERVER_FILE_VAULT)).await?; - let config_content = vcs_docs::docs::PROFILES_VAULT - .replace("{vault_name}", vault_name.as_ref()) - .replace("{user_name}", whoami::username().as_str()) - .replace( - "{date_format}", - chrono::Local::now() - .format("%Y-%m-%d %H:%M") - .to_string() - .as_str(), - ) - .replace("{vault_uuid}", &config.vault_uuid().to_string()); - tokio::fs::write(vault_path.join(SERVER_FILE_VAULT), config_content).await?; - - // 2. Setup sheets directory - create_dir_all(vault_path.join(SERVER_PATH_SHEETS)).await?; - - // 3. Setup key directory - create_dir_all(vault_path.join(SERVER_PATH_MEMBER_PUB)).await?; - - // 4. Setup member directory - create_dir_all(vault_path.join(SERVER_PATH_MEMBERS)).await?; - - // 5. Setup storage directory - create_dir_all(vault_path.join(SERVER_PATH_VF_ROOT)).await?; - - let Some(vault) = Vault::init(config, &vault_path) else { - return Err(std::io::Error::other("Failed to initialize vault")); - }; - - // 6. Create host member - vault - .register_member_to_vault(Member::new(VAULT_HOST_NAME)) - .await?; - - // 7. Setup reference sheet - vault - .create_sheet(&REF_SHEET_NAME.to_string(), &VAULT_HOST_NAME.to_string()) - .await?; - - // Final, generate README.md - let readme_content = READMES_VAULT_README; - tokio::fs::write(vault_path.join(SERVER_FILE_README), readme_content).await?; - - Ok(()) - } - - /// Setup vault in current directory - pub async fn setup_vault_current_dir( - vault_name: impl AsRef, - ) -> Result<(), std::io::Error> { - Self::setup_vault(current_dir()?, vault_name).await?; - Ok(()) - } - - /// Get vault configuration - pub fn config(&self) -> &Arc { - &self.config - } -} diff --git a/crates/vcs_data/src/data/vault/config.rs b/crates/vcs_data/src/data/vault/config.rs deleted file mode 100644 index caa8552..0000000 --- a/crates/vcs_data/src/data/vault/config.rs +++ /dev/null @@ -1,233 +0,0 @@ -use std::net::{IpAddr, Ipv4Addr}; - -use cfg_file::ConfigFile; -use serde::{Deserialize, Serialize}; -use uuid::Uuid; - -use crate::constants::{PORT, SERVER_FILE_VAULT}; -use crate::data::member::{Member, MemberId}; - -pub type VaultName = String; -pub type VaultUuid = Uuid; - -#[derive(Serialize, Deserialize, Clone, PartialEq, Default)] -#[serde(rename_all = "lowercase")] -pub enum AuthMode { - /// Use asymmetric keys: both client and server need to register keys, after which they can connect - Key, - - /// Use password: the password stays on the server, and the client needs to set the password locally for connection - #[default] - Password, - - /// No authentication: generally used in a strongly secure environment, skipping verification directly - NoAuth, -} - -#[derive(Serialize, Deserialize, Clone, PartialEq, Default)] -#[serde(rename_all = "lowercase")] -pub enum LoggerLevel { - Debug, - Trace, - - #[default] - Info, -} - -#[derive(Serialize, Deserialize, Clone, PartialEq, Default)] -#[serde(rename_all = "lowercase")] -pub enum ServiceEnabled { - Enable, - - #[default] - Disable, -} - -#[derive(Serialize, Deserialize, Clone, PartialEq, Default)] -#[serde(rename_all = "lowercase")] -pub enum BehaviourEnabled { - Yes, - - #[default] - No, -} - -impl Into for ServiceEnabled { - fn into(self) -> bool { - match self { - ServiceEnabled::Enable => true, - ServiceEnabled::Disable => false, - } - } -} - -impl Into for BehaviourEnabled { - fn into(self) -> bool { - match self { - BehaviourEnabled::Yes => true, - BehaviourEnabled::No => false, - } - } -} - -#[derive(Serialize, Deserialize, ConfigFile)] -#[cfg_file(path = SERVER_FILE_VAULT)] -pub struct VaultConfig { - /// Vault uuid, unique identifier for the vault - #[serde(rename = "uuid")] - vault_uuid: VaultUuid, - - /// Vault name, which can be used as the project name and generally serves as a hint - #[serde(rename = "name")] - vault_name: VaultName, - - /// Vault host ids, a list of member id representing administrator identities - #[serde(rename = "hosts")] - vault_host_list: Vec, - - /// Vault server configuration, which will be loaded when connecting to the server - #[serde(rename = "profile")] - server_config: VaultServerConfig, -} - -#[derive(Serialize, Deserialize)] -pub struct VaultServerConfig { - /// Local IP address to bind to when the server starts - #[serde(rename = "bind")] - local_bind: IpAddr, - - /// TCP port to bind to when the server starts - #[serde(rename = "port")] - port: u16, - - /// Enable logging - #[serde(rename = "logger")] - logger: Option, - - /// Logger Level - #[serde(rename = "logger_level")] - logger_level: Option, - - /// Whether to enable LAN discovery, allowing members on the same LAN to more easily find the upstream server - #[serde(rename = "lan_discovery")] - lan_discovery: Option, // TODO - - /// Authentication mode for the vault server - /// key: Use asymmetric keys for authentication - /// password: Use a password for authentication - /// noauth: No authentication required, requires a strongly secure environment - #[serde(rename = "auth_mode")] - auth_mode: Option, // TODO -} - -impl Default for VaultConfig { - fn default() -> Self { - Self { - vault_uuid: Uuid::new_v4(), - vault_name: "JustEnoughVault".to_string(), - vault_host_list: Vec::new(), - server_config: VaultServerConfig { - local_bind: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), - port: PORT, - logger: Some(BehaviourEnabled::default()), - logger_level: Some(LoggerLevel::default()), - lan_discovery: Some(ServiceEnabled::default()), - auth_mode: Some(AuthMode::Key), - }, - } - } -} - -/// Vault Management -impl VaultConfig { - /// Change name of the vault. - pub fn change_name(&mut self, name: impl Into) { - self.vault_name = name.into() - } - - /// Add admin - pub fn add_admin(&mut self, member: &Member) { - let uuid = member.id(); - if !self.vault_host_list.contains(&uuid) { - self.vault_host_list.push(uuid); - } - } - - /// Remove admin - pub fn remove_admin(&mut self, member: &Member) { - let id = member.id(); - self.vault_host_list.retain(|x| x != &id); - } - - /// Get vault UUID - pub fn vault_uuid(&self) -> &VaultUuid { - &self.vault_uuid - } - - /// Set vault UUID - pub fn set_vault_uuid(&mut self, vault_uuid: VaultUuid) { - self.vault_uuid = vault_uuid; - } - - /// Get vault name - pub fn vault_name(&self) -> &VaultName { - &self.vault_name - } - - /// Set vault name - pub fn set_vault_name(&mut self, vault_name: VaultName) { - self.vault_name = vault_name; - } - - /// Get vault admin list - pub fn vault_host_list(&self) -> &Vec { - &self.vault_host_list - } - - /// Set vault admin list - pub fn set_vault_host_list(&mut self, vault_host_list: Vec) { - self.vault_host_list = vault_host_list; - } - - /// Get server config - pub fn server_config(&self) -> &VaultServerConfig { - &self.server_config - } - - /// Set server config - pub fn set_server_config(&mut self, server_config: VaultServerConfig) { - self.server_config = server_config; - } -} - -impl VaultServerConfig { - /// Get local bind IP address - pub fn local_bind(&self) -> &IpAddr { - &self.local_bind - } - - /// Get port - pub fn port(&self) -> u16 { - self.port - } - - /// Check if LAN discovery is enabled - pub fn is_lan_discovery_enabled(&self) -> bool { - self.lan_discovery.clone().unwrap_or_default().into() - } - - /// Get logger enabled status - pub fn is_logger_enabled(&self) -> bool { - self.logger.clone().unwrap_or_default().into() - } - - /// Get logger level - pub fn logger_level(&self) -> LoggerLevel { - self.logger_level.clone().unwrap_or_default() - } - - /// Get authentication mode - pub fn auth_mode(&self) -> AuthMode { - self.auth_mode.clone().unwrap_or_default() - } -} diff --git a/crates/vcs_data/src/data/vault/member.rs b/crates/vcs_data/src/data/vault/member.rs deleted file mode 100644 index 9d22d09..0000000 --- a/crates/vcs_data/src/data/vault/member.rs +++ /dev/null @@ -1,144 +0,0 @@ -use std::{ - fs, - io::{Error, ErrorKind}, - path::PathBuf, -}; - -use cfg_file::config::ConfigFile; - -use crate::{ - constants::{ - SERVER_FILE_MEMBER_INFO, SERVER_FILE_MEMBER_PUB, SERVER_PATH_MEMBERS, - SERVER_SUFFIX_MEMBER_INFO_NO_DOT, - }, - data::{ - member::{Member, MemberId}, - vault::Vault, - }, -}; - -const ID_PARAM: &str = "{member_id}"; - -/// Member Manage -impl Vault { - /// Read member from configuration file - pub async fn member(&self, id: &MemberId) -> Result { - if let Some(cfg_file) = self.member_cfg(id) { - let member = Member::read_from(cfg_file).await?; - return Ok(member); - } - - Err(Error::new(ErrorKind::NotFound, "Member not found!")) - } - - /// List all member IDs in the vault - pub fn member_ids(&self) -> Result, std::io::Error> { - let members_path = self.vault_path.join(SERVER_PATH_MEMBERS); - - if !members_path.exists() { - return Ok(Vec::new()); - } - - let mut member_ids = Vec::new(); - - for entry in fs::read_dir(members_path)? { - let entry = entry?; - let path = entry.path(); - - if path.is_file() - && let Some(file_name) = path.file_stem().and_then(|s| s.to_str()) - && path.extension().and_then(|s| s.to_str()) - == Some(SERVER_SUFFIX_MEMBER_INFO_NO_DOT) - { - member_ids.push(file_name.to_string()); - } - } - - Ok(member_ids) - } - - /// Get all members - /// This method will read and deserialize member information, please pay attention to performance issues - pub async fn members(&self) -> Result, std::io::Error> { - let mut members = Vec::new(); - - for member_id in self.member_ids()? { - if let Ok(member) = self.member(&member_id).await { - members.push(member); - } - } - - Ok(members) - } - - /// Update member info - pub async fn update_member(&self, member: Member) -> Result<(), std::io::Error> { - // Ensure member exist - if self.member_cfg(&member.id()).is_some() { - let member_cfg_path = self.member_cfg_path(&member.id()); - Member::write_to(&member, member_cfg_path).await?; - return Ok(()); - } - - Err(Error::new(ErrorKind::NotFound, "Member not found!")) - } - - /// Register a member to vault - pub async fn register_member_to_vault(&self, member: Member) -> Result<(), std::io::Error> { - // Ensure member not exist - if self.member_cfg(&member.id()).is_some() { - return Err(Error::new( - ErrorKind::DirectoryNotEmpty, - format!("Member `{}` already registered!", member.id()), - )); - } - - // Wrtie config file to member dir - let member_cfg_path = self.member_cfg_path(&member.id()); - Member::write_to(&member, member_cfg_path).await?; - - Ok(()) - } - - /// Remove member from vault - pub fn remove_member_from_vault(&self, id: &MemberId) -> Result<(), std::io::Error> { - // Ensure member exist - if let Some(member_cfg_path) = self.member_cfg(id) { - fs::remove_file(member_cfg_path)?; - } - - Ok(()) - } - - /// Try to get the member's configuration file to determine if the member exists - pub fn member_cfg(&self, id: &MemberId) -> Option { - let cfg_file = self.member_cfg_path(id); - if cfg_file.exists() { - Some(cfg_file) - } else { - None - } - } - - /// Try to get the member's public key file to determine if the member has login permission - pub fn member_key(&self, id: &MemberId) -> Option { - let key_file = self.member_key_path(id); - if key_file.exists() { - Some(key_file) - } else { - None - } - } - - /// Get the member's configuration file path, but do not check if the file exists - pub fn member_cfg_path(&self, id: &MemberId) -> PathBuf { - self.vault_path - .join(SERVER_FILE_MEMBER_INFO.replace(ID_PARAM, id.to_string().as_str())) - } - - /// Get the member's public key file path, but do not check if the file exists - pub fn member_key_path(&self, id: &MemberId) -> PathBuf { - self.vault_path - .join(SERVER_FILE_MEMBER_PUB.replace(ID_PARAM, id.to_string().as_str())) - } -} diff --git a/crates/vcs_data/src/data/vault/service.rs b/crates/vcs_data/src/data/vault/service.rs deleted file mode 100644 index 3f59c30..0000000 --- a/crates/vcs_data/src/data/vault/service.rs +++ /dev/null @@ -1,40 +0,0 @@ -use std::path::PathBuf; - -use crate::{constants::SERVER_FILE_LOCKFILE, data::vault::Vault}; - -impl Vault { - /// Get the path of the lock file for the current Vault - pub fn lock_file_path(&self) -> PathBuf { - self.vault_path().join(SERVER_FILE_LOCKFILE) - } - - /// Check if the current Vault is locked - pub fn is_locked(&self) -> bool { - self.lock_file_path().exists() - } - - /// Lock the current Vault - pub fn lock(&self) -> Result<(), std::io::Error> { - if self.is_locked() { - return Err(std::io::Error::new( - std::io::ErrorKind::AlreadyExists, - format!( - "Vault is locked! This indicates a service is already running here.\nPlease stop other services or delete the lock file at the vault root directory: {}", - self.lock_file_path().display() - ), - )); - } - std::fs::File::create(self.lock_file_path())?; - Ok(()) - } - - /// Unlock the current Vault - pub fn unlock(&self) -> Result<(), std::io::Error> { - if let Err(e) = std::fs::remove_file(self.lock_file_path()) - && e.kind() != std::io::ErrorKind::NotFound - { - return Err(e); - } - Ok(()) - } -} diff --git a/crates/vcs_data/src/data/vault/sheet_share.rs b/crates/vcs_data/src/data/vault/sheet_share.rs deleted file mode 100644 index 1e692f1..0000000 --- a/crates/vcs_data/src/data/vault/sheet_share.rs +++ /dev/null @@ -1,424 +0,0 @@ -use std::{collections::HashMap, io::Error, path::PathBuf}; - -use cfg_file::{ConfigFile, config::ConfigFile}; -use rand::{Rng, rng}; -use serde::{Deserialize, Serialize}; -use string_proc::{format_path, snake_case}; -use tokio::fs; - -use crate::{ - constants::{ - SERVER_FILE_SHEET_SHARE, SERVER_PATH_SHARES, SERVER_SUFFIX_SHEET_SHARE_FILE_NO_DOT, - }, - data::{ - member::MemberId, - sheet::{Sheet, SheetMappingMetadata, SheetName, SheetPathBuf}, - vault::Vault, - }, -}; - -pub type SheetShareId = String; - -const SHEET_NAME: &str = "{sheet_name}"; -const SHARE_ID: &str = "{share_id}"; - -#[derive(Default, Serialize, Deserialize, ConfigFile, Clone, Debug)] -pub struct Share { - /// Sharer: the member who created this share item - #[serde(rename = "sharer")] - pub sharer: MemberId, - - /// Description of the share item - #[serde(rename = "desc")] - pub description: String, - - /// Metadata path - #[serde(skip)] - pub path: Option, - - /// From: which sheet the member exported the file from - #[serde(rename = "from")] - pub from_sheet: SheetName, - - /// Mappings: the sheet mappings contained in the share item - #[serde(rename = "map")] - pub mappings: HashMap, -} - -#[derive(Default, Serialize, Deserialize, ConfigFile, Clone, PartialEq, Eq)] -pub enum ShareMergeMode { - /// If a path or file already exists during merge, prioritize the incoming share - /// Path conflict: replace the mapping content at the local path with the incoming content - /// File conflict: delete the original file mapping and create a new one - Overwrite, - - /// If a path or file already exists during merge, skip overwriting this entry - Skip, - - /// Pre-check for conflicts, prohibit merging if any conflicts are found - #[default] - Safe, - - /// Reject all shares - RejectAll, -} - -#[derive(Default, Serialize, Deserialize, ConfigFile, Clone)] -pub struct ShareMergeConflict { - /// Duplicate mappings exist - pub duplicate_mapping: Vec, - - /// Duplicate files exist - pub duplicate_file: Vec, -} - -impl ShareMergeConflict { - /// Check if there are no conflicts - pub fn ok(&self) -> bool { - self.duplicate_mapping.is_empty() && self.duplicate_file.is_empty() - } -} - -impl Vault { - /// Get the path of a share item in a sheet - pub fn share_file_path(&self, sheet_name: &SheetName, share_id: &SheetShareId) -> PathBuf { - let sheet_name = snake_case!(sheet_name.clone()); - let share_id = share_id.clone(); - - // Format the path to remove "./" prefix and normalize it - let path_str = SERVER_FILE_SHEET_SHARE - .replace(SHEET_NAME, &sheet_name) - .replace(SHARE_ID, &share_id); - - // Use format_path to normalize the path - match format_path::format_path_str(&path_str) { - Ok(normalized_path) => self.vault_path().join(normalized_path), - Err(_) => { - // Fallback to original behavior if formatting fails - self.vault_path().join(path_str) - } - } - } - - /// Get the actual paths of all share items in a sheet - pub async fn share_file_paths(&self, sheet_name: &SheetName) -> Vec { - let sheet_name = snake_case!(sheet_name.clone()); - let shares_dir = self - .vault_path() - .join(SERVER_PATH_SHARES.replace(SHEET_NAME, &sheet_name)); - - let mut result = Vec::new(); - if let Ok(mut entries) = fs::read_dir(shares_dir).await { - while let Ok(Some(entry)) = entries.next_entry().await { - let path = entry.path(); - if path.is_file() - && path.extension().and_then(|s| s.to_str()) - == Some(SERVER_SUFFIX_SHEET_SHARE_FILE_NO_DOT) - { - result.push(path); - } - } - } - result - } -} - -impl<'a> Sheet<'a> { - /// Get the shares of a sheet - pub async fn get_shares(&self) -> Result, std::io::Error> { - let paths = self.vault_reference.share_file_paths(&self.name).await; - let mut shares = Vec::new(); - - for path in paths { - match Share::read_from(&path).await { - Ok(mut share) => { - share.path = Some(path); - shares.push(share); - } - Err(e) => return Err(e), - } - } - - Ok(shares) - } - - /// Get a share of a sheet - pub async fn get_share(&self, share_id: &SheetShareId) -> Result { - let path = self.vault_reference.share_file_path(&self.name, share_id); - let mut share = Share::read_from(&path).await?; - share.path = Some(path); - Ok(share) - } - - /// Import a share of a sheet by its ID - pub async fn merge_share_by_id( - self, - share_id: &SheetShareId, - share_merge_mode: ShareMergeMode, - ) -> Result<(), std::io::Error> { - let share = self.get_share(share_id).await?; - self.merge_share(share, share_merge_mode).await - } - - /// Import a share of a sheet - pub async fn merge_share( - mut self, - share: Share, - share_merge_mode: ShareMergeMode, - ) -> Result<(), std::io::Error> { - // Backup original data and edit based on this backup - let mut copy_share = share.clone(); - let mut copy_sheet = self.clone_data(); - - // Pre-check - let conflicts = self.precheck(©_share); - let mut reject_mode = false; - - match share_merge_mode { - // Safe mode: conflicts are not allowed - ShareMergeMode::Safe => { - // Conflicts found - if !conflicts.ok() { - // Do nothing, return Error - return Err(Error::new( - std::io::ErrorKind::AlreadyExists, - "Mappings or files already exist!", - )); - } - } - // Overwrite mode: when conflicts occur, prioritize the share item - ShareMergeMode::Overwrite => { - // Handle duplicate mappings - for path in conflicts.duplicate_mapping { - // Get the share data - let Some(share_value) = copy_share.mappings.remove(&path) else { - return Err(Error::new( - std::io::ErrorKind::NotFound, - format!("Share value `{}` not found!", &path.display()), - )); - }; - // Overwrite - copy_sheet.mapping_mut().insert(path, share_value); - } - - // Handle duplicate IDs - for path in conflicts.duplicate_file { - // Get the share data - let Some(share_value) = copy_share.mappings.remove(&path) else { - return Err(Error::new( - std::io::ErrorKind::NotFound, - format!("Share value `{}` not found!", &path.display()), - )); - }; - - // Extract the file ID - let conflict_vfid = &share_value.id; - - // Through the sheet's ID mapping - let Some(id_mapping) = copy_sheet.id_mapping_mut() else { - return Err(Error::new( - std::io::ErrorKind::NotFound, - "Id mapping not found!", - )); - }; - - // Get the original path from the ID mapping - let Some(raw_path) = id_mapping.remove(conflict_vfid) else { - return Err(Error::new( - std::io::ErrorKind::NotFound, - format!("The path of virtual file `{}' not found!", conflict_vfid), - )); - }; - - // Remove the original path mapping - if copy_sheet.mapping_mut().remove(&raw_path).is_none() { - return Err(Error::new( - std::io::ErrorKind::NotFound, - format!("Remove mapping `{}` failed!", &raw_path.display()), - )); - } - // Insert the new item - copy_sheet.mapping_mut().insert(path, share_value); - } - } - // Skip mode: when conflicts occur, prioritize the local sheet - ShareMergeMode::Skip => { - // Directly remove conflicting items - for path in conflicts.duplicate_mapping { - copy_share.mappings.remove(&path); - } - for path in conflicts.duplicate_file { - copy_share.mappings.remove(&path); - } - } - // Reject all mode: reject all shares - ShareMergeMode::RejectAll => { - reject_mode = true; // Only mark as rejected - } - } - - if !reject_mode { - // Subsequent merging - copy_sheet - .mapping_mut() - .extend(copy_share.mappings.into_iter()); - - // Merge completed - self.data = copy_sheet; // Write the result - - // Merge completed, consume the sheet - self.persist().await.map_err(|err| { - Error::new( - std::io::ErrorKind::NotFound, - format!("Write sheet failed: {}", err), - ) - })?; - } - - // Persistence succeeded, continue to consume the share item - share.remove().await.map_err(|err| { - Error::new( - std::io::ErrorKind::NotFound, - format!("Remove share failed: {}", err.1), - ) - }) - } - - // Pre-check whether the share can be imported into the current sheet without conflicts - fn precheck(&self, share: &Share) -> ShareMergeConflict { - let mut conflicts = ShareMergeConflict::default(); - - for (mapping, metadata) in &share.mappings { - // Check for duplicate mappings - if self.mapping().contains_key(mapping.as_path()) { - conflicts.duplicate_mapping.push(mapping.clone()); - continue; - } - - // Check for duplicate IDs - if let Some(id_mapping) = self.id_mapping() { - if id_mapping.contains_key(&metadata.id) { - conflicts.duplicate_file.push(mapping.clone()); - continue; - } - } - } - - conflicts - } - - /// Share mappings with another sheet - pub async fn share_mappings( - &self, - other_sheet: &SheetName, - mappings: Vec, - sharer: &MemberId, - description: String, - ) -> Result { - let other_sheet = snake_case!(other_sheet.clone()); - let sharer = snake_case!(sharer.clone()); - - // Check if the sheet exists - let sheet_names = self.vault_reference.sheet_names()?; - if !sheet_names.contains(&other_sheet) { - return Err(Error::new( - std::io::ErrorKind::NotFound, - format!("Sheet `{}` not found!", &other_sheet), - )); - } - - // Check if the target file exists, regenerate ID if path already exists, up to 20 attempts - let target_path = { - let mut id; - let mut share_path; - let mut attempts = 0; - - loop { - id = Share::gen_share_id(&sharer); - share_path = self.vault_reference.share_file_path(&other_sheet, &id); - - if !share_path.exists() { - break share_path; - } - - attempts += 1; - if attempts >= 20 { - return Err(Error::new( - std::io::ErrorKind::AlreadyExists, - "Failed to generate unique share ID after 20 attempts!", - )); - } - } - }; - - // Validate that the share is valid - let mut share_mappings = HashMap::new(); - for mapping_path in &mappings { - if let Some(metadata) = self.mapping().get(mapping_path) { - share_mappings.insert(mapping_path.clone(), metadata.clone()); - } else { - return Err(Error::new( - std::io::ErrorKind::NotFound, - format!("Mapping `{}` not found in sheet!", mapping_path.display()), - )); - } - } - - // Build share data - let share_data = Share { - sharer, - description, - path: None, // This is only needed during merging (reading), no need to serialize now - from_sheet: self.name.clone(), - mappings: share_mappings, - }; - - // Write data - Share::write_to(&share_data, target_path).await?; - - Ok(share_data) - } -} - -impl Share { - /// Generate a share ID for a given sharer - pub fn gen_share_id(sharer: &MemberId) -> String { - let sharer_snake = snake_case!(sharer.clone()); - let random_part: String = rng() - .sample_iter(&rand::distr::Alphanumeric) - .take(8) - .map(char::from) - .collect(); - format!("{}@{}", sharer_snake, random_part) - } - - /// Delete a share (reject or remove the share item) - /// If deletion succeeds, returns `Ok(())`; - /// If deletion fails, returns `Err((self, std::io::Error))`, containing the original share object and the error information. - pub async fn remove(self) -> Result<(), (Self, std::io::Error)> { - let Some(path) = &self.path else { - return Err(( - self, - Error::new(std::io::ErrorKind::NotFound, "No share path recorded!"), - )); - }; - - if !path.exists() { - return Err(( - self, - Error::new(std::io::ErrorKind::NotFound, "No share file exists!"), - )); - } - - match fs::remove_file(path).await { - Err(err) => Err(( - self, - Error::new( - std::io::ErrorKind::Other, - format!("Failed to delete share file: {}", err), - ), - )), - Ok(_) => Ok(()), - } - } -} diff --git a/crates/vcs_data/src/data/vault/sheets.rs b/crates/vcs_data/src/data/vault/sheets.rs deleted file mode 100644 index c22c849..0000000 --- a/crates/vcs_data/src/data/vault/sheets.rs +++ /dev/null @@ -1,274 +0,0 @@ -use std::{collections::HashMap, io::Error}; - -use cfg_file::config::ConfigFile; -use string_proc::snake_case; -use tokio::fs; - -use crate::{ - constants::{SERVER_PATH_SHEETS, SERVER_SUFFIX_SHEET_FILE_NO_DOT}, - data::{ - member::MemberId, - sheet::{Sheet, SheetData, SheetName}, - vault::Vault, - }, -}; - -/// Vault Sheets Management -impl Vault { - /// Load all sheets in the vault - /// - /// It is generally not recommended to call this function frequently. - /// Although a vault typically won't contain too many sheets, - /// if individual sheet contents are large, this operation may cause - /// significant performance bottlenecks. - pub async fn sheets<'a>(&'a self) -> Result>, std::io::Error> { - let sheet_names = self.sheet_names()?; - let mut sheets = Vec::new(); - - for sheet_name in sheet_names { - let sheet = self.sheet(&sheet_name).await?; - sheets.push(sheet); - } - - Ok(sheets) - } - - /// Search for all sheet names in the vault - /// - /// The complexity of this operation is proportional to the number of sheets, - /// but generally there won't be too many sheets in a Vault - pub fn sheet_names(&self) -> Result, std::io::Error> { - // Get the sheets directory path - let sheets_dir = self.vault_path.join(SERVER_PATH_SHEETS); - - // If the directory doesn't exist, return an empty list - if !sheets_dir.exists() { - return Ok(vec![]); - } - - let mut sheet_names = Vec::new(); - - // Iterate through all files in the sheets directory - for entry in std::fs::read_dir(sheets_dir)? { - let entry = entry?; - let path = entry.path(); - - // Check if it's a YAML file - if path.is_file() - && path - .extension() - .is_some_and(|ext| ext == SERVER_SUFFIX_SHEET_FILE_NO_DOT) - && let Some(file_stem) = path.file_stem().and_then(|s| s.to_str()) - { - // Create a new SheetName and add it to the result list - sheet_names.push(file_stem.to_string()); - } - } - - Ok(sheet_names) - } - - /// Read a sheet from its name - /// - /// If the sheet information is successfully found in the vault, - /// it will be deserialized and read as a sheet. - /// This is the only correct way to obtain a sheet instance. - pub async fn sheet<'a>(&'a self, sheet_name: &SheetName) -> Result, std::io::Error> { - let sheet_name = snake_case!(sheet_name.clone()); - - // Get the path to the sheet file - let sheet_path = Sheet::sheet_path_with_name(self, &sheet_name); - - // Ensure the sheet file exists - if !sheet_path.exists() { - // If the sheet does not exist, try to restore it from the trash - if self.restore_sheet(&sheet_name).await.is_err() { - // If restoration fails, return an error - return Err(Error::new( - std::io::ErrorKind::NotFound, - format!("Sheet `{}` not found!", sheet_name), - )); - } - } - - // Read the sheet data from the file - let data = SheetData::read_from(sheet_path).await?; - - Ok(Sheet { - name: sheet_name.clone(), - data, - vault_reference: self, - }) - } - - /// Create a sheet locally and return the sheet instance - /// - /// This method creates a new sheet in the vault with the given name and holder. - /// It will verify that the member exists and that the sheet doesn't already exist - /// before creating the sheet file with default empty data. - pub async fn create_sheet<'a>( - &'a self, - sheet_name: &SheetName, - holder: &MemberId, - ) -> Result, std::io::Error> { - let sheet_name = snake_case!(sheet_name.clone()); - - // Ensure member exists - if !self.member_cfg_path(holder).exists() { - return Err(Error::new( - std::io::ErrorKind::NotFound, - format!("Member `{}` not found!", &holder), - )); - } - - // Ensure sheet does not already exist - let sheet_file_path = Sheet::sheet_path_with_name(self, &sheet_name); - if sheet_file_path.exists() { - return Err(Error::new( - std::io::ErrorKind::AlreadyExists, - format!("Sheet `{}` already exists!", &sheet_name), - )); - } - - // Create the sheet file - let sheet_data = SheetData { - holder: Some(holder.clone()), - mapping: HashMap::new(), - id_mapping: None, - write_count: 0, - }; - SheetData::write_to(&sheet_data, sheet_file_path).await?; - - Ok(Sheet { - name: sheet_name, - data: sheet_data, - vault_reference: self, - }) - } - - /// Delete the sheet file from local disk by name - /// - /// This method will remove the sheet file with the given name from the vault. - /// It will verify that the sheet exists before attempting to delete it. - /// If the sheet is successfully deleted, it will return Ok(()). - /// - /// Warning: This operation is dangerous. Deleting a sheet will cause local workspaces - /// using this sheet to become invalid. Please ensure the sheet is not currently in use - /// and will not be used in the future. - /// - /// For a safer deletion method, consider using `delete_sheet_safety`. - /// - /// Note: This function is intended for server-side use only and should not be - /// arbitrarily called by other members to prevent unauthorized data deletion. - pub async fn delete_sheet(&self, sheet_name: &SheetName) -> Result<(), std::io::Error> { - let sheet_name = snake_case!(sheet_name.clone()); - - // Ensure sheet exists - let sheet_file_path = Sheet::sheet_path_with_name(self, &sheet_name); - if !sheet_file_path.exists() { - return Err(Error::new( - std::io::ErrorKind::NotFound, - format!("Sheet `{}` not found!", &sheet_name), - )); - } - - // Delete the sheet file - fs::remove_file(sheet_file_path).await?; - - Ok(()) - } - - /// Safely delete the sheet - /// - /// The sheet will be moved to the trash directory, ensuring it does not appear in the - /// results of `sheets` and `sheet_names` methods. - /// However, if the sheet's holder attempts to access the sheet through the `sheet` method, - /// the system will automatically restore it from the trash directory. - /// This means: the sheet will only permanently remain in the trash directory, - /// waiting for manual cleanup by an administrator, when it is truly no longer in use. - /// - /// This is a safer deletion method because it provides the possibility of recovery, - /// avoiding irreversible data loss caused by accidental deletion. - /// - /// Note: This function is intended for server-side use only and should not be - /// arbitrarily called by other members to prevent unauthorized data deletion. - pub async fn delete_sheet_safely(&self, sheet_name: &SheetName) -> Result<(), std::io::Error> { - let sheet_name = snake_case!(sheet_name.clone()); - - // Ensure the sheet exists - let sheet_file_path = Sheet::sheet_path_with_name(self, &sheet_name); - if !sheet_file_path.exists() { - return Err(Error::new( - std::io::ErrorKind::NotFound, - format!("Sheet `{}` not found!", &sheet_name), - )); - } - - // Create the trash directory - let trash_dir = self.vault_path.join(".trash"); - if !trash_dir.exists() { - fs::create_dir_all(&trash_dir).await?; - } - - // Generate a unique filename in the trash - let timestamp = std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_millis(); - let trash_file_name = format!( - "{}_{}.{}", - sheet_name, timestamp, SERVER_SUFFIX_SHEET_FILE_NO_DOT - ); - let trash_path = trash_dir.join(trash_file_name); - - // Move the sheet file to the trash - fs::rename(&sheet_file_path, &trash_path).await?; - - Ok(()) - } - - /// Restore the sheet from the trash - /// - /// Restore the specified sheet from the trash to its original location, making it accessible normally. - pub async fn restore_sheet(&self, sheet_name: &SheetName) -> Result<(), std::io::Error> { - let sheet_name = snake_case!(sheet_name.clone()); - - // Search for matching files in the trash - let trash_dir = self.vault_path.join(".trash"); - if !trash_dir.exists() { - return Err(Error::new( - std::io::ErrorKind::NotFound, - "Trash directory does not exist!".to_string(), - )); - } - - let mut found_path = None; - for entry in std::fs::read_dir(&trash_dir)? { - let entry = entry?; - let path = entry.path(); - - if path.is_file() - && let Some(file_name) = path.file_stem().and_then(|s| s.to_str()) - { - // Check if the filename starts with the sheet name - if file_name.starts_with(&sheet_name) { - found_path = Some(path); - break; - } - } - } - - let trash_path = found_path.ok_or_else(|| { - Error::new( - std::io::ErrorKind::NotFound, - format!("Sheet `{}` not found in trash!", &sheet_name), - ) - })?; - - // Restore the sheet to its original location - let original_path = Sheet::sheet_path_with_name(self, &sheet_name); - fs::rename(&trash_path, &original_path).await?; - - Ok(()) - } -} diff --git a/crates/vcs_data/src/data/vault/virtual_file.rs b/crates/vcs_data/src/data/vault/virtual_file.rs deleted file mode 100644 index 8dbcb5d..0000000 --- a/crates/vcs_data/src/data/vault/virtual_file.rs +++ /dev/null @@ -1,506 +0,0 @@ -use std::{ - collections::HashMap, - io::{Error, ErrorKind}, - path::PathBuf, -}; - -use cfg_file::{ConfigFile, config::ConfigFile}; -use serde::{Deserialize, Serialize}; -use string_proc::{dot_case, snake_case}; -use tcp_connection::instance::ConnectionInstance; -use tokio::fs; -use uuid::Uuid; - -use crate::{ - constants::{ - SERVER_FILE_VF_META, SERVER_FILE_VF_VERSION_INSTANCE, SERVER_PATH_VF_ROOT, - SERVER_PATH_VF_STORAGE, SERVER_PATH_VF_TEMP, - }, - data::{member::MemberId, vault::Vault}, -}; - -pub type VirtualFileId = String; -pub type VirtualFileVersion = String; - -const VF_PREFIX: &str = "vf-"; -const ID_PARAM: &str = "{vf_id}"; -const ID_INDEX: &str = "{vf_index}"; -const VERSION_PARAM: &str = "{vf_version}"; -const TEMP_NAME: &str = "{temp_name}"; - -pub struct VirtualFile<'a> { - /// Unique identifier for the virtual file - id: VirtualFileId, - - /// Reference of Vault - current_vault: &'a Vault, -} - -#[derive(Default, Clone, Serialize, Deserialize, ConfigFile)] -pub struct VirtualFileMeta { - /// Current version of the virtual file - #[serde(rename = "ver")] - current_version: VirtualFileVersion, - - /// The member who holds the edit right of the file - #[serde(rename = "holder")] - hold_member: MemberId, - - /// Description of each version - #[serde(rename = "descs")] - version_description: HashMap, - - /// Histories - #[serde(rename = "histories")] - histories: Vec, -} - -#[derive(Debug, Default, Clone, Serialize, Deserialize)] -pub struct VirtualFileVersionDescription { - /// The member who created this version - #[serde(rename = "creator")] - pub creator: MemberId, - - /// The description of this version - #[serde(rename = "desc")] - pub description: String, -} - -impl VirtualFileVersionDescription { - /// Create a new version description - pub fn new(creator: MemberId, description: String) -> Self { - Self { - creator, - description, - } - } -} - -/// Virtual File Operations -impl Vault { - /// Generate a temporary path for receiving - pub fn virtual_file_temp_path(&self) -> PathBuf { - let random_receive_name = format!("{}", uuid::Uuid::new_v4()); - self.vault_path - .join(SERVER_PATH_VF_TEMP.replace(TEMP_NAME, &random_receive_name)) - } - - /// Get the directory where virtual files are stored - pub fn virtual_file_storage_dir(&self) -> PathBuf { - self.vault_path().join(SERVER_PATH_VF_ROOT) - } - - /// Get the directory where a specific virtual file is stored - pub fn virtual_file_dir(&self, id: &VirtualFileId) -> Result { - Ok(self.vault_path().join( - SERVER_PATH_VF_STORAGE - .replace(ID_PARAM, &id.to_string()) - .replace(ID_INDEX, &Self::vf_index(id)?), - )) - } - - // Generate index path of virtual file - fn vf_index(id: &VirtualFileId) -> Result { - // Remove VF_PREFIX if present - let id_str = if let Some(stripped) = id.strip_prefix(VF_PREFIX) { - stripped - } else { - id - }; - - // Extract the first part before the first hyphen - let first_part = id_str.split('-').next().ok_or_else(|| { - std::io::Error::new( - std::io::ErrorKind::InvalidInput, - "Invalid virtual file ID format: no hyphen found", - ) - })?; - - // Ensure the first part has at least 4 characters - if first_part.len() < 4 { - return Err(std::io::Error::new( - std::io::ErrorKind::InvalidInput, - "Invalid virtual file ID format: first part must have at least 4 characters", - ))?; - } - - // Take only the first 4 characters and split into two 2-character chunks - let first_four = &first_part[0..4]; - let mut path = String::new(); - for i in (0..first_four.len()).step_by(2) { - if i > 0 { - path.push('/'); - } - path.push_str(&first_four[i..i + 2]); - } - - Ok(path) - } - - /// Get the directory where a specific virtual file's metadata is stored - pub fn virtual_file_real_path( - &self, - id: &VirtualFileId, - version: &VirtualFileVersion, - ) -> PathBuf { - self.vault_path().join( - SERVER_FILE_VF_VERSION_INSTANCE - .replace(ID_PARAM, &id.to_string()) - .replace(ID_INDEX, &Self::vf_index(id).unwrap_or_default()) - .replace(VERSION_PARAM, &version.to_string()), - ) - } - - /// Get the directory where a specific virtual file's metadata is stored - pub fn virtual_file_meta_path(&self, id: &VirtualFileId) -> PathBuf { - self.vault_path().join( - SERVER_FILE_VF_META - .replace(ID_PARAM, &id.to_string()) - .replace(ID_INDEX, &Self::vf_index(id).unwrap_or_default()), - ) - } - - /// Get the virtual file with the given ID - pub fn virtual_file(&self, id: &VirtualFileId) -> Result, std::io::Error> { - let dir = self.virtual_file_dir(id); - if dir?.exists() { - Ok(VirtualFile { - id: id.clone(), - current_vault: self, - }) - } else { - Err(std::io::Error::new( - std::io::ErrorKind::NotFound, - "Cannot found virtual file!", - )) - } - } - - /// Get the meta data of the virtual file with the given ID - pub async fn virtual_file_meta( - &self, - id: &VirtualFileId, - ) -> Result { - let dir = self.virtual_file_meta_path(id); - let metadata = VirtualFileMeta::read_from(dir).await?; - Ok(metadata) - } - - /// Write the meta data of the virtual file with the given ID - pub async fn write_virtual_file_meta( - &self, - id: &VirtualFileId, - meta: &VirtualFileMeta, - ) -> Result<(), std::io::Error> { - let dir = self.virtual_file_meta_path(id); - VirtualFileMeta::write_to(meta, dir).await?; - Ok(()) - } - - /// Create a virtual file from a connection instance - /// - /// It's the only way to create virtual files! - /// - /// When the target machine executes `write_file`, use this function instead of `read_file`, - /// and provide the member ID of the transmitting member. - /// - /// The system will automatically receive the file and - /// create the virtual file. - pub async fn create_virtual_file_from_connection( - &self, - instance: &mut ConnectionInstance, - member_id: &MemberId, - ) -> Result { - const FIRST_VERSION: &str = "0.1.0"; - let receive_path = self.virtual_file_temp_path(); - let new_id = format!("{}{}", VF_PREFIX, Uuid::new_v4()); - let move_path = self.virtual_file_real_path(&new_id, &FIRST_VERSION.to_string()); - - match instance.read_file(receive_path.clone()).await { - Ok(_) => { - // Read successful, create virtual file - // Create default version description - let mut version_description = - HashMap::::new(); - version_description.insert( - FIRST_VERSION.to_string(), - VirtualFileVersionDescription { - creator: member_id.clone(), - description: "Track".to_string(), - }, - ); - // Create metadata - let mut meta = VirtualFileMeta { - current_version: FIRST_VERSION.to_string(), - hold_member: member_id.clone(), // The holder of the newly created virtual file is the creator by default - version_description, - histories: Vec::default(), - }; - - // Add first version - meta.histories.push(FIRST_VERSION.to_string()); - - // Write metadata to file - VirtualFileMeta::write_to(&meta, self.virtual_file_meta_path(&new_id)).await?; - - // Move temp file to virtual file directory - if let Some(parent) = move_path.parent() - && !parent.exists() - { - fs::create_dir_all(parent).await?; - } - fs::rename(receive_path, move_path).await?; - - Ok(new_id) - } - Err(e) => { - // Read failed, remove temp file. - if receive_path.exists() { - fs::remove_file(receive_path).await?; - } - - Err(Error::other(e)) - } - } - } - - /// Update a virtual file from a connection instance - /// - /// It's the only way to update virtual files! - /// When the target machine executes `write_file`, use this function instead of `read_file`, - /// and provide the member ID of the transmitting member. - /// - /// The system will automatically receive the file and - /// update the virtual file. - /// - /// Note: The specified member must hold the edit right of the file, - /// otherwise the file reception will not be allowed. - /// - /// Make sure to obtain the edit right of the file before calling this function. - pub async fn update_virtual_file_from_connection( - &self, - instance: &mut ConnectionInstance, - member: &MemberId, - virtual_file_id: &VirtualFileId, - new_version: &VirtualFileVersion, - description: VirtualFileVersionDescription, - ) -> Result<(), std::io::Error> { - let new_version = dot_case!(new_version.clone()); - let mut meta = self.virtual_file_meta(virtual_file_id).await?; - - // Check if the member has edit right - self.check_virtual_file_edit_right(member, virtual_file_id) - .await?; - - // Check if the new version already exists - if meta.version_description.contains_key(&new_version) { - return Err(Error::new( - ErrorKind::AlreadyExists, - format!( - "Version `{}` already exists for virtual file `{}`", - new_version, virtual_file_id - ), - )); - } - - // Verify success - let receive_path = self.virtual_file_temp_path(); - let move_path = self.virtual_file_real_path(virtual_file_id, &new_version); - - match instance.read_file(receive_path.clone()).await { - Ok(_) => { - // Read success, move temp file to real path. - fs::rename(receive_path, move_path).await?; - - // Update metadata - meta.current_version = new_version.clone(); - meta.version_description - .insert(new_version.clone(), description); - meta.histories.push(new_version); - VirtualFileMeta::write_to(&meta, self.virtual_file_meta_path(virtual_file_id)) - .await?; - - Ok(()) - } - Err(e) => { - // Read failed, remove temp file. - if receive_path.exists() { - fs::remove_file(receive_path).await?; - } - - Err(Error::other(e)) - } - } - } - - /// Update virtual file from existing version - /// - /// This operation creates a new version based on the specified old version file instance. - /// The new version will retain the same version name as the old version, but use a different version number. - /// After the update, this version will be considered newer than the original version when comparing versions. - pub async fn update_virtual_file_from_exist_version( - &self, - member: &MemberId, - virtual_file_id: &VirtualFileId, - old_version: &VirtualFileVersion, - ) -> Result<(), std::io::Error> { - let old_version = snake_case!(old_version.clone()); - let mut meta = self.virtual_file_meta(virtual_file_id).await?; - - // Check if the member has edit right - self.check_virtual_file_edit_right(member, virtual_file_id) - .await?; - - // Ensure virtual file exist - let Ok(_) = self.virtual_file(virtual_file_id) else { - return Err(Error::new( - ErrorKind::NotFound, - format!("Virtual file `{}` not found!", virtual_file_id), - )); - }; - - // Ensure version exist - if !meta.version_exists(&old_version) { - return Err(Error::new( - ErrorKind::NotFound, - format!("Version `{}` not found!", old_version), - )); - } - - // Ok, Create new version - meta.current_version = old_version.clone(); - meta.histories.push(old_version); - VirtualFileMeta::write_to(&meta, self.virtual_file_meta_path(virtual_file_id)).await?; - - Ok(()) - } - - /// Grant a member the edit right for a virtual file - /// This operation takes effect immediately upon success - pub async fn grant_virtual_file_edit_right( - &self, - member_id: &MemberId, - virtual_file_id: &VirtualFileId, - ) -> Result<(), std::io::Error> { - let mut meta = self.virtual_file_meta(virtual_file_id).await?; - meta.hold_member = member_id.clone(); - self.write_virtual_file_meta(virtual_file_id, &meta).await - } - - /// Check if a member has the edit right for a virtual file - pub async fn has_virtual_file_edit_right( - &self, - member_id: &MemberId, - virtual_file_id: &VirtualFileId, - ) -> Result { - let meta = self.virtual_file_meta(virtual_file_id).await?; - Ok(meta.hold_member.eq(member_id)) - } - - /// Check if a member has the edit right for a virtual file and return Result - /// Returns Ok(()) if the member has edit right, otherwise returns PermissionDenied error - pub async fn check_virtual_file_edit_right( - &self, - member_id: &MemberId, - virtual_file_id: &VirtualFileId, - ) -> Result<(), std::io::Error> { - if !self - .has_virtual_file_edit_right(member_id, virtual_file_id) - .await? - { - return Err(Error::new( - ErrorKind::PermissionDenied, - format!( - "Member `{}` not allowed to update virtual file `{}`", - member_id, virtual_file_id - ), - )); - } - Ok(()) - } - - /// Revoke the edit right for a virtual file from the current holder - /// This operation takes effect immediately upon success - pub async fn revoke_virtual_file_edit_right( - &self, - virtual_file_id: &VirtualFileId, - ) -> Result<(), std::io::Error> { - let mut meta = self.virtual_file_meta(virtual_file_id).await?; - meta.hold_member = String::default(); - self.write_virtual_file_meta(virtual_file_id, &meta).await - } -} - -impl<'a> VirtualFile<'a> { - /// Get id of VirtualFile - pub fn id(&self) -> VirtualFileId { - self.id.clone() - } - - /// Read metadata of VirtualFile - pub async fn read_meta(&self) -> Result { - self.current_vault.virtual_file_meta(&self.id).await - } -} - -impl VirtualFileMeta { - /// Get all versions of the virtual file - pub fn versions(&self) -> &Vec { - &self.histories - } - - /// Get the latest version of the virtual file - pub fn version_latest(&self) -> VirtualFileVersion { - // After creating a virtual file in `update_virtual_file_from_connection`, - // the Vec will never be empty, so unwrap is allowed here - self.histories.last().unwrap().clone() - } - - /// Get the total number of versions for this virtual file - pub fn version_len(&self) -> i32 { - self.histories.len() as i32 - } - - /// Check if a specific version exists - /// Returns true if the version exists, false otherwise - pub fn version_exists(&self, version: &VirtualFileVersion) -> bool { - self.versions().iter().any(|v| v == version) - } - - /// Get the version number (index) for a given version name - /// Returns None if the version doesn't exist - pub fn version_num(&self, version: &VirtualFileVersion) -> Option { - self.histories - .iter() - .rev() - .position(|v| v == version) - .map(|pos| (self.histories.len() - 1 - pos) as i32) - } - - /// Get the version name for a given version number (index) - /// Returns None if the version number is out of range - pub fn version_name(&self, version_num: i32) -> Option { - self.histories.get(version_num as usize).cloned() - } - - /// Get the member who holds the edit right of the file - pub fn hold_member(&self) -> &MemberId { - &self.hold_member - } - - /// Get the version descriptions for all versions - pub fn version_descriptions( - &self, - ) -> &HashMap { - &self.version_description - } - - /// Get the version description for a given version - pub fn version_description( - &self, - version: VirtualFileVersion, - ) -> Option<&VirtualFileVersionDescription> { - let desc = self.version_descriptions(); - desc.get(&version) - } -} diff --git a/crates/vcs_data/src/lib.rs b/crates/vcs_data/src/lib.rs deleted file mode 100644 index 1b41391..0000000 --- a/crates/vcs_data/src/lib.rs +++ /dev/null @@ -1,5 +0,0 @@ -pub mod constants; -pub mod current; - -#[allow(dead_code)] -pub mod data; diff --git a/crates/vcs_data/todo.md b/crates/vcs_data/todo.md deleted file mode 100644 index 3c7e0c0..0000000 --- a/crates/vcs_data/todo.md +++ /dev/null @@ -1,31 +0,0 @@ -| 类别 | 项 | 可完成性 | 已完成 | -|----------|----|----------|--------| -| 本地文件 | 设置上游服务器(仅设置,不会连接和修改染色标识) | y | | -| 本地文件 | 验证连接、权限,并为当前工作区染色(若已染色,则无法连接不同标识的服务器) | y | | -| 本地文件 | 进入表 (否则无法做任何操作) | | | -| 本地文件 | 退出表 (文件将会从当前目录移出,等待下次进入时还原) | | | -| 本地文件 | 去色 - 断开与上游服务器的关联 | y | | -| 本地文件 | 跟踪本地文件的移动、重命名,立刻同步至表 | | | -| 本地文件 | 扫描本地文件结构,标记变化 | | | -| 本地文件 | 通过本地暂存的表索引搜索文件 | | | -| 本地文件 | 查询本地某个文件的状态 | | | -| 本地文件 | 查询当前目录的状态 | | | -| 本地文件 | 查询工作区状态 | | | -| 本地文件 | 将本地所有文件更新到最新状态 | | | -| 本地文件 | 提交所有产生变化的自身所属文件 | | | -| 表 | 表查看 - 指定表并查看结构 | | | -| 表 | 从参照表拉入文件项目 | | | -| 表 | 将文件项目(或多个)导出到指定表 | | | -| 表 | 查看导入请求 | | | -| 表 | 在某个本地地址同意并导入文件 | | | -| 表 | 拒绝某个、某些或所有导入请求 | | | -| 表 | 删除表中的映射,但要确保实际文件已被移除 (忽略文件) | | | -| 表 | 放弃表,所有者消失,下一个切换至表的人获得(放弃需要确保表中没有任何文件是所有者持有的)(替代目前的安全删除) | | | -| 虚拟文件 | 跟踪本地某些文件,并将其创建为虚拟文件,然后添加到自己的表 | | | -| 虚拟文件 | 根据本地文件的目录查找虚拟文件,并为自己获得所有权(需要确保版本和上游同步才可) | | | -| 虚拟文件 | 根据本地文件的目录查找虚拟文件,并放弃所有权(需要确保和上游同步才可) | | | -| 虚拟文件 | 根据本地文件的目录查找虚拟文件,并定向到指定的存在的老版本 | | | - - -?为什么虚拟文件不能删除:虚拟文件的唯一删除方式就是,没有人再用他 -?为什么没有删除表:同理,表权限可以转移,但是删除只能等待定期清除无主人的表 diff --git a/crates/vcs_data/vcs_data_test/Cargo.toml b/crates/vcs_data/vcs_data_test/Cargo.toml deleted file mode 100644 index f6caa07..0000000 --- a/crates/vcs_data/vcs_data_test/Cargo.toml +++ /dev/null @@ -1,13 +0,0 @@ -[package] -name = "vcs_data_test" -edition = "2024" -version.workspace = true - -[dependencies] -tcp_connection = { path = "../../utils/tcp_connection" } -tcp_connection_test = { path = "../../utils/tcp_connection/tcp_connection_test" } -cfg_file = { path = "../../utils/cfg_file", features = ["default"] } -vcs_data = { path = "../../vcs_data" } - -# Async & Networking -tokio = { version = "1.48.0", features = ["full"] } diff --git a/crates/vcs_data/vcs_data_test/lib.rs b/crates/vcs_data/vcs_data_test/lib.rs deleted file mode 100644 index 5b65941..0000000 --- a/crates/vcs_data/vcs_data_test/lib.rs +++ /dev/null @@ -1,11 +0,0 @@ -use vcs_service::{action::Action, action_pool::ActionPool}; - -use crate::actions::test::FindMemberInServer; - -pub mod constants; -pub mod current; - -#[allow(dead_code)] -pub mod data; - -pub mod actions; diff --git a/crates/vcs_data/vcs_data_test/src/lib.rs b/crates/vcs_data/vcs_data_test/src/lib.rs deleted file mode 100644 index ced2d3d..0000000 --- a/crates/vcs_data/vcs_data_test/src/lib.rs +++ /dev/null @@ -1,30 +0,0 @@ -use std::{env::current_dir, path::PathBuf}; - -use tokio::fs; - -#[cfg(test)] -pub mod test_vault_setup_and_member_register; - -#[cfg(test)] -pub mod test_virtual_file_creation_and_update; - -#[cfg(test)] -pub mod test_local_workspace_setup_and_account_management; - -#[cfg(test)] -pub mod test_sheet_creation_management_and_persistence; - -#[cfg(test)] -pub mod test_sheet_share_creation_and_management; - -pub async fn get_test_dir(area: &str) -> Result { - let dir = current_dir()?.join(".temp").join("test").join(area); - if !dir.exists() { - std::fs::create_dir_all(&dir)?; - } else { - // Regenerate existing directory - fs::remove_dir_all(&dir).await?; - fs::create_dir_all(&dir).await?; - } - Ok(dir) -} diff --git a/crates/vcs_data/vcs_data_test/src/test_local_workspace_setup_and_account_management.rs b/crates/vcs_data/vcs_data_test/src/test_local_workspace_setup_and_account_management.rs deleted file mode 100644 index 8fa2676..0000000 --- a/crates/vcs_data/vcs_data_test/src/test_local_workspace_setup_and_account_management.rs +++ /dev/null @@ -1,248 +0,0 @@ -use std::io::Error; - -use cfg_file::config::ConfigFile; -use vcs_data::{ - constants::{CLIENT_FILE_TODOLIST, CLIENT_FILE_WORKSPACE, USER_FILE_KEY, USER_FILE_MEMBER}, - data::{ - local::{LocalWorkspace, config::LocalConfig}, - member::Member, - user::UserDirectory, - }, -}; - -use crate::get_test_dir; - -#[tokio::test] -async fn test_local_workspace_setup_and_account_management() -> Result<(), std::io::Error> { - let dir = get_test_dir("local_workspace_account_management").await?; - - // Setup local workspace - LocalWorkspace::setup_local_workspace(dir.clone()).await?; - - // Check if the following files are created in `dir`: - // Files: CLIENT_FILE_WORKSPACE, CLIENT_FILE_README - assert!(dir.join(CLIENT_FILE_WORKSPACE).exists()); - assert!(dir.join(CLIENT_FILE_TODOLIST).exists()); - - // Get local workspace - let config = LocalConfig::read_from(dir.join(CLIENT_FILE_WORKSPACE)).await?; - let Some(_local_workspace) = LocalWorkspace::init(config, &dir) else { - return Err(Error::new( - std::io::ErrorKind::NotFound, - "Local workspace not found!", - )); - }; - - // Create user directory from workspace path - let Some(user_directory) = UserDirectory::from_path(&dir) else { - return Err(Error::new( - std::io::ErrorKind::NotFound, - "User directory not found!", - )); - }; - - // Test account registration - let member_id = "test_account"; - let member = Member::new(member_id); - - // Register account - user_directory.register_account(member.clone()).await?; - - // Check if the account config file exists - assert!( - dir.join(USER_FILE_MEMBER.replace("{self_id}", member_id)) - .exists() - ); - - // Test account retrieval - let retrieved_member = user_directory.account(&member_id.to_string()).await?; - assert_eq!(retrieved_member.id(), member.id()); - - // Test account IDs listing - let account_ids = user_directory.account_ids()?; - assert!(account_ids.contains(&member_id.to_string())); - - // Test accounts listing - let accounts = user_directory.accounts().await?; - assert_eq!(accounts.len(), 1); - assert_eq!(accounts[0].id(), member.id()); - - // Test account existence check - assert!(user_directory.account_cfg(&member_id.to_string()).is_some()); - - // Test private key check (should be false initially) - assert!(!user_directory.has_private_key(&member_id.to_string())); - - // Test account update - let mut updated_member = member.clone(); - updated_member.set_metadata("email", "test@example.com"); - user_directory - .update_account(updated_member.clone()) - .await?; - - // Verify update - let updated_retrieved = user_directory.account(&member_id.to_string()).await?; - assert_eq!( - updated_retrieved.metadata("email"), - Some(&"test@example.com".to_string()) - ); - - // Test account removal - user_directory.remove_account(&member_id.to_string())?; - - // Check if the account config file no longer exists - assert!( - !dir.join(USER_FILE_MEMBER.replace("{self_id}", member_id)) - .exists() - ); - - // Check if account is no longer in the list - let account_ids_after_removal = user_directory.account_ids()?; - assert!(!account_ids_after_removal.contains(&member_id.to_string())); - - Ok(()) -} - -#[tokio::test] -async fn test_account_private_key_management() -> Result<(), std::io::Error> { - let dir = get_test_dir("account_private_key_management").await?; - - // Create user directory - let Some(user_directory) = UserDirectory::from_path(&dir) else { - return Err(Error::new( - std::io::ErrorKind::NotFound, - "User directory not found!", - )); - }; - - // Register account - let member_id = "test_account_with_key"; - let member = Member::new(member_id); - user_directory.register_account(member).await?; - - // Create a dummy private key file for testing - let private_key_path = dir.join(USER_FILE_KEY.replace("{self_id}", member_id)); - std::fs::create_dir_all(private_key_path.parent().unwrap())?; - std::fs::write(&private_key_path, "dummy_private_key_content")?; - - // Test private key existence check - assert!(user_directory.has_private_key(&member_id.to_string())); - - // Test private key path retrieval - assert!( - user_directory - .account_private_key(&member_id.to_string()) - .is_some() - ); - - // Remove account (should also remove private key) - user_directory.remove_account(&member_id.to_string())?; - - // Check if private key file is also removed - assert!(!private_key_path.exists()); - - Ok(()) -} - -#[tokio::test] -async fn test_multiple_account_management() -> Result<(), std::io::Error> { - let dir = get_test_dir("multiple_account_management").await?; - - // Create user directory - let Some(user_directory) = UserDirectory::from_path(&dir) else { - return Err(Error::new( - std::io::ErrorKind::NotFound, - "User directory not found!", - )); - }; - - // Register multiple accounts - let account_names = vec!["alice", "bob", "charlie"]; - - for name in &account_names { - user_directory.register_account(Member::new(*name)).await?; - } - - // Test account IDs listing - let account_ids = user_directory.account_ids()?; - assert_eq!(account_ids.len(), 3); - - for name in &account_names { - assert!(account_ids.contains(&name.to_string())); - } - - // Test accounts listing - let accounts = user_directory.accounts().await?; - assert_eq!(accounts.len(), 3); - - // Remove one account - user_directory.remove_account(&"bob".to_string())?; - - // Verify removal - let account_ids_after_removal = user_directory.account_ids()?; - assert_eq!(account_ids_after_removal.len(), 2); - assert!(!account_ids_after_removal.contains(&"bob".to_string())); - assert!(account_ids_after_removal.contains(&"alice".to_string())); - assert!(account_ids_after_removal.contains(&"charlie".to_string())); - - Ok(()) -} - -#[tokio::test] -async fn test_account_registration_duplicate_prevention() -> Result<(), std::io::Error> { - let dir = get_test_dir("account_duplicate_prevention").await?; - - // Create user directory - let Some(user_directory) = UserDirectory::from_path(&dir) else { - return Err(Error::new( - std::io::ErrorKind::NotFound, - "User directory not found!", - )); - }; - - // Register account - let member_id = "duplicate_test"; - user_directory - .register_account(Member::new(member_id)) - .await?; - - // Try to register same account again - should fail - let result = user_directory - .register_account(Member::new(member_id)) - .await; - assert!(result.is_err()); - - Ok(()) -} - -#[tokio::test] -async fn test_nonexistent_account_operations() -> Result<(), std::io::Error> { - let dir = get_test_dir("nonexistent_account_operations").await?; - - // Create user directory - let Some(user_directory) = UserDirectory::from_path(&dir) else { - return Err(Error::new( - std::io::ErrorKind::NotFound, - "User directory not found!", - )); - }; - - // Try to read non-existent account - should fail - let result = user_directory.account(&"nonexistent".to_string()).await; - assert!(result.is_err()); - - // Try to update non-existent account - should fail - let result = user_directory - .update_account(Member::new("nonexistent")) - .await; - assert!(result.is_err()); - - // Try to remove non-existent account - should succeed (idempotent) - let result = user_directory.remove_account(&"nonexistent".to_string()); - assert!(result.is_ok()); - - // Check private key for non-existent account - should be false - assert!(!user_directory.has_private_key(&"nonexistent".to_string())); - - Ok(()) -} diff --git a/crates/vcs_data/vcs_data_test/src/test_sheet_creation_management_and_persistence.rs b/crates/vcs_data/vcs_data_test/src/test_sheet_creation_management_and_persistence.rs deleted file mode 100644 index 6683d06..0000000 --- a/crates/vcs_data/vcs_data_test/src/test_sheet_creation_management_and_persistence.rs +++ /dev/null @@ -1,275 +0,0 @@ -use std::io::Error; - -use cfg_file::config::ConfigFile; -use vcs_data::{ - constants::{SERVER_FILE_SHEET, SERVER_FILE_VAULT}, - data::{ - member::{Member, MemberId}, - sheet::SheetName, - vault::{Vault, config::VaultConfig, virtual_file::VirtualFileId}, - }, -}; - -use crate::get_test_dir; - -#[tokio::test] -async fn test_sheet_creation_management_and_persistence() -> Result<(), std::io::Error> { - let dir = get_test_dir("sheet_management").await?; - - // Setup vault - Vault::setup_vault(dir.clone(), "TestVault").await?; - - // Get vault - let config = VaultConfig::read_from(dir.join(SERVER_FILE_VAULT)).await?; - let Some(vault) = Vault::init(config, &dir) else { - return Err(Error::new(std::io::ErrorKind::NotFound, "Vault not found!")); - }; - - // Add a member to use as sheet holder - let member_id: MemberId = "test_member".to_string(); - vault - .register_member_to_vault(Member::new(&member_id)) - .await?; - - // Test 1: Create a new sheet - let sheet_name: SheetName = "test_sheet".to_string(); - let sheet = vault.create_sheet(&sheet_name, &member_id).await?; - - // Verify sheet properties - assert_eq!(sheet.holder(), Some(&member_id)); - assert_eq!(sheet.holder(), Some(&member_id)); - assert!(sheet.mapping().is_empty()); - - // Verify sheet file was created - let sheet_path = dir.join(SERVER_FILE_SHEET.replace("{sheet_name}", &sheet_name)); - assert!(sheet_path.exists()); - - // Test 2: Add mapping entries to the sheet - let mut sheet = vault.sheet(&sheet_name).await?; - - // Add mapping entries for the files - let main_rs_path = vcs_data::data::sheet::SheetPathBuf::from("src/main.rs"); - let lib_rs_path = vcs_data::data::sheet::SheetPathBuf::from("src/lib.rs"); - let main_rs_id = VirtualFileId::new(); - let lib_rs_id = VirtualFileId::new(); - - sheet - .add_mapping( - main_rs_path.clone(), - main_rs_id.clone(), - "1.0.0".to_string(), - ) - .await?; - sheet - .add_mapping(lib_rs_path.clone(), lib_rs_id.clone(), "1.0.0".to_string()) - .await?; - - // Verify mappings were added - assert_eq!(sheet.mapping().len(), 2); - - // Test 3: Add more mapping entries - let mapping_path = vcs_data::data::sheet::SheetPathBuf::from("output/build.exe"); - let virtual_file_id = VirtualFileId::new(); - - sheet - .add_mapping( - mapping_path.clone(), - virtual_file_id.clone(), - "1.0.0".to_string(), - ) - .await?; - - // Verify mapping was added - assert_eq!(sheet.mapping().len(), 3); - assert_eq!( - sheet.mapping().get(&mapping_path).map(|meta| &meta.id), - Some(&virtual_file_id) - ); - - // Test 4: Persist sheet to disk - sheet.persist().await?; - - // Verify persistence by reloading the sheet - let reloaded_sheet = vault.sheet(&sheet_name).await?; - assert_eq!(reloaded_sheet.holder(), Some(&member_id)); - assert_eq!(reloaded_sheet.mapping().len(), 3); - - // Test 5: Remove mapping entry - let mut sheet_for_removal = vault.sheet(&sheet_name).await?; - let _removed_virtual_file_id = sheet_for_removal.remove_mapping(&mapping_path).await; - // Don't check the return value since it depends on virtual file existence - assert_eq!(sheet_for_removal.mapping().len(), 2); - - // Test 6: List all sheets in vault - let sheet_names = vault.sheet_names()?; - assert_eq!(sheet_names.len(), 2); - assert!(sheet_names.contains(&sheet_name)); - assert!(sheet_names.contains(&"ref".to_string())); - - let all_sheets = vault.sheets().await?; - assert_eq!(all_sheets.len(), 2); - // One sheet should be the test sheet, the other should be the ref sheet with host as holder - let test_sheet_holder = all_sheets - .iter() - .find(|s| s.holder() == Some(&member_id)) - .map(|s| s.holder()) - .unwrap(); - let ref_sheet_holder = all_sheets - .iter() - .find(|s| s.holder() == Some(&"host".to_string())) - .map(|s| s.holder()) - .unwrap(); - assert_eq!(test_sheet_holder, Some(&member_id)); - assert_eq!(ref_sheet_holder, Some(&"host".to_string())); - - // Test 7: Safe deletion (move to trash) - vault.delete_sheet_safely(&sheet_name).await?; - - // Verify sheet is not in normal listing but can be restored - let sheet_names_after_deletion = vault.sheet_names()?; - assert_eq!(sheet_names_after_deletion.len(), 1); - assert_eq!(sheet_names_after_deletion[0], "ref"); - - // Test 8: Restore sheet from trash - let restored_sheet = vault.sheet(&sheet_name).await?; - assert_eq!(restored_sheet.holder(), Some(&member_id)); - assert_eq!(restored_sheet.holder(), Some(&member_id)); - - // Verify sheet is back in normal listing - let sheet_names_after_restore = vault.sheet_names()?; - assert_eq!(sheet_names_after_restore.len(), 2); - assert!(sheet_names_after_restore.contains(&sheet_name)); - assert!(sheet_names_after_restore.contains(&"ref".to_string())); - - // Test 9: Permanent deletion - vault.delete_sheet(&sheet_name).await?; - - // Verify sheet is permanently gone - let sheet_names_final = vault.sheet_names()?; - assert_eq!(sheet_names_final.len(), 1); - assert_eq!(sheet_names_final[0], "ref"); - - // Attempt to access deleted sheet should fail - let result = vault.sheet(&sheet_name).await; - assert!(result.is_err()); - - // Clean up: Remove member - vault.remove_member_from_vault(&member_id)?; - - Ok(()) -} - -#[tokio::test] -async fn test_sheet_error_conditions() -> Result<(), std::io::Error> { - let dir = get_test_dir("sheet_error_conditions").await?; - - // Setup vault - Vault::setup_vault(dir.clone(), "TestVault").await?; - - // Get vault - let config = VaultConfig::read_from(dir.join(SERVER_FILE_VAULT)).await?; - let Some(vault) = Vault::init(config, &dir) else { - return Err(Error::new(std::io::ErrorKind::NotFound, "Vault not found!")); - }; - - // Test 1: Create sheet with non-existent member should fail - let non_existent_member: MemberId = "non_existent_member".to_string(); - let sheet_name: SheetName = "test_sheet".to_string(); - - let result = vault.create_sheet(&sheet_name, &non_existent_member).await; - assert!(result.is_err()); - - // Add a member first - let member_id: MemberId = "test_member".to_string(); - vault - .register_member_to_vault(Member::new(&member_id)) - .await?; - - // Test 2: Create duplicate sheet should fail - vault.create_sheet(&sheet_name, &member_id).await?; - let result = vault.create_sheet(&sheet_name, &member_id).await; - assert!(result.is_err()); - - // Test 3: Delete non-existent sheet should fail - let non_existent_sheet: SheetName = "non_existent_sheet".to_string(); - let result = vault.delete_sheet(&non_existent_sheet).await; - assert!(result.is_err()); - - // Test 4: Safe delete non-existent sheet should fail - let result = vault.delete_sheet_safely(&non_existent_sheet).await; - assert!(result.is_err()); - - // Test 5: Restore non-existent sheet from trash should fail - let result = vault.restore_sheet(&non_existent_sheet).await; - assert!(result.is_err()); - - // Clean up - vault.remove_member_from_vault(&member_id)?; - - Ok(()) -} - -#[tokio::test] -async fn test_sheet_data_serialization() -> Result<(), std::io::Error> { - let dir = get_test_dir("sheet_serialization").await?; - - // Test serialization by creating a sheet through the vault - // Setup vault - Vault::setup_vault(dir.clone(), "TestVault").await?; - - // Get vault - let config = VaultConfig::read_from(dir.join(SERVER_FILE_VAULT)).await?; - let Some(vault) = Vault::init(config, &dir) else { - return Err(Error::new(std::io::ErrorKind::NotFound, "Vault not found!")); - }; - - // Add a member - let member_id: MemberId = "test_member".to_string(); - vault - .register_member_to_vault(Member::new(&member_id)) - .await?; - - // Create a sheet - let sheet_name: SheetName = "test_serialization_sheet".to_string(); - let mut sheet = vault.create_sheet(&sheet_name, &member_id).await?; - - // Add some mappings - let main_rs_path = vcs_data::data::sheet::SheetPathBuf::from("src/main.rs"); - let lib_rs_path = vcs_data::data::sheet::SheetPathBuf::from("src/lib.rs"); - let main_rs_id = VirtualFileId::new(); - let lib_rs_id = VirtualFileId::new(); - - sheet - .add_mapping( - main_rs_path.clone(), - main_rs_id.clone(), - "1.0.0".to_string(), - ) - .await?; - sheet - .add_mapping(lib_rs_path.clone(), lib_rs_id.clone(), "1.0.0".to_string()) - .await?; - - // Add more mappings - let build_exe_id = VirtualFileId::new(); - - sheet - .add_mapping( - vcs_data::data::sheet::SheetPathBuf::from("output/build.exe"), - build_exe_id, - "1.0.0".to_string(), - ) - .await?; - - // Persist the sheet - sheet.persist().await?; - - // Verify the sheet file was created - let sheet_path = dir.join(SERVER_FILE_SHEET.replace("{sheet_name}", &sheet_name)); - assert!(sheet_path.exists()); - - // Clean up - vault.remove_member_from_vault(&member_id)?; - - Ok(()) -} diff --git a/crates/vcs_data/vcs_data_test/src/test_sheet_share_creation_and_management.rs b/crates/vcs_data/vcs_data_test/src/test_sheet_share_creation_and_management.rs deleted file mode 100644 index 89891d6..0000000 --- a/crates/vcs_data/vcs_data_test/src/test_sheet_share_creation_and_management.rs +++ /dev/null @@ -1,631 +0,0 @@ -use std::io::Error; - -use cfg_file::config::ConfigFile; -use vcs_data::{ - constants::SERVER_FILE_VAULT, - data::{ - member::{Member, MemberId}, - sheet::{SheetName, SheetPathBuf}, - vault::{ - Vault, - config::VaultConfig, - sheet_share::{Share, ShareMergeMode, SheetShareId}, - virtual_file::VirtualFileId, - }, - }, -}; - -use crate::get_test_dir; - -#[tokio::test] -async fn test_share_creation_and_retrieval() -> Result<(), std::io::Error> { - let dir = get_test_dir("share_creation").await?; - - // Setup vault - Vault::setup_vault(dir.clone(), "TestVault").await?; - - // Get vault - let config = VaultConfig::read_from(dir.join(SERVER_FILE_VAULT)).await?; - let Some(vault) = Vault::init(config, &dir) else { - return Err(Error::new(std::io::ErrorKind::NotFound, "Vault not found!")); - }; - - // Add members - let sharer_id: MemberId = "sharer_member".to_string(); - let target_member_id: MemberId = "target_member".to_string(); - - vault - .register_member_to_vault(Member::new(&sharer_id)) - .await?; - vault - .register_member_to_vault(Member::new(&target_member_id)) - .await?; - - // Create source sheet for sharer - let source_sheet_name: SheetName = "source_sheet".to_string(); - let _source_sheet = vault.create_sheet(&source_sheet_name, &sharer_id).await?; - - // Create target sheet for target member - let target_sheet_name: SheetName = "target_sheet".to_string(); - let _target_sheet = vault - .create_sheet(&target_sheet_name, &target_member_id) - .await?; - - // Add mappings to source sheet - let mut source_sheet = vault.sheet(&source_sheet_name).await?; - - let main_rs_path = SheetPathBuf::from("src/main.rs"); - let lib_rs_path = SheetPathBuf::from("src/lib.rs"); - let main_rs_id = VirtualFileId::from("main_rs_id_1"); - let lib_rs_id = VirtualFileId::from("lib_rs_id_1"); - - source_sheet - .add_mapping( - main_rs_path.clone(), - main_rs_id.clone(), - "1.0.0".to_string(), - ) - .await?; - source_sheet - .add_mapping(lib_rs_path.clone(), lib_rs_id.clone(), "1.0.0".to_string()) - .await?; - - // Persist source sheet - source_sheet.persist().await?; - - // Test 1: Share mappings from source sheet to target sheet - let description = "Test share of main.rs and lib.rs".to_string(); - // Need to get the sheet again after persist - let source_sheet = vault.sheet(&source_sheet_name).await?; - - source_sheet - .share_mappings( - &target_sheet_name, - vec![main_rs_path.clone(), lib_rs_path.clone()], - &sharer_id, - description.clone(), - ) - .await?; - - // Test 2: Get shares from target sheet - let target_sheet = vault.sheet(&target_sheet_name).await?; - - let shares = target_sheet.get_shares().await?; - - assert_eq!(shares.len(), 1, "Expected 1 share, found {}", shares.len()); - let share = &shares[0]; - - assert_eq!(share.sharer, sharer_id); - assert_eq!(share.description, description); - assert_eq!(share.from_sheet, source_sheet_name); - assert_eq!(share.mappings.len(), 2); - assert!(share.mappings.contains_key(&main_rs_path)); - assert!(share.mappings.contains_key(&lib_rs_path)); - assert!(share.path.is_some()); - - // Test 3: Get specific share by ID - let share_id = Share::gen_share_id(&sharer_id); - let _specific_share = target_sheet.get_share(&share_id).await; - - // Note: The share ID might not match exactly due to random generation, - // but we can verify the share exists by checking the shares list - assert!(shares.iter().any(|s| s.sharer == sharer_id)); - - // Clean up - vault.remove_member_from_vault(&sharer_id)?; - vault.remove_member_from_vault(&target_member_id)?; - - Ok(()) -} - -#[tokio::test] -async fn test_share_merge_modes() -> Result<(), std::io::Error> { - let dir = get_test_dir("share_merge_modes").await?; - - // Setup vault - Vault::setup_vault(dir.clone(), "TestVault").await?; - - // Get vault - let config = VaultConfig::read_from(dir.join(SERVER_FILE_VAULT)).await?; - let Some(vault) = Vault::init(config, &dir) else { - return Err(Error::new(std::io::ErrorKind::NotFound, "Vault not found!")); - }; - - // Add members - let sharer_id: MemberId = "sharer".to_string(); - let target_member_id: MemberId = "target".to_string(); - - vault - .register_member_to_vault(Member::new(&sharer_id)) - .await?; - vault - .register_member_to_vault(Member::new(&target_member_id)) - .await?; - - // Create source and target sheets - let source_sheet_name: SheetName = "source".to_string(); - let target_sheet_name: SheetName = "target".to_string(); - - let _source_sheet = vault.create_sheet(&source_sheet_name, &sharer_id).await?; - let _target_sheet = vault - .create_sheet(&target_sheet_name, &target_member_id) - .await?; - - // Add mappings to source sheet - let mut source_sheet = vault.sheet(&source_sheet_name).await?; - - let file1_path = SheetPathBuf::from("src/file1.rs"); - let file2_path = SheetPathBuf::from("src/file2.rs"); - let file1_id = VirtualFileId::from("file1_id_1"); - let file2_id = VirtualFileId::from("file2_id_1"); - - source_sheet - .add_mapping(file1_path.clone(), file1_id.clone(), "1.0.0".to_string()) - .await?; - source_sheet - .add_mapping(file2_path.clone(), file2_id.clone(), "1.0.0".to_string()) - .await?; - - source_sheet.persist().await?; - - // Share mappings - // Need to get the sheet again after persist - let source_sheet = vault.sheet(&source_sheet_name).await?; - source_sheet - .share_mappings( - &target_sheet_name, - vec![file1_path.clone(), file2_path.clone()], - &sharer_id, - "Test share".to_string(), - ) - .await?; - - // Get the share - let target_sheet = vault.sheet(&target_sheet_name).await?; - let shares = target_sheet.get_shares().await?; - assert_eq!(shares.len(), 1); - let share = shares[0].clone(); - - // Test 4: Safe mode merge (should succeed with no conflicts) - let result = target_sheet - .merge_share(share.clone(), ShareMergeMode::Safe) - .await; - - assert!( - result.is_ok(), - "Safe mode should succeed with no conflicts " - ); - - // Verify mappings were added to target sheet - let updated_target_sheet = vault.sheet(&target_sheet_name).await?; - assert_eq!(updated_target_sheet.mapping().len(), 2); - assert!(updated_target_sheet.mapping().contains_key(&file1_path)); - assert!(updated_target_sheet.mapping().contains_key(&file2_path)); - - // Clean up - vault.remove_member_from_vault(&sharer_id)?; - vault.remove_member_from_vault(&target_member_id)?; - - Ok(()) -} - -#[tokio::test] -async fn test_share_merge_conflicts() -> Result<(), std::io::Error> { - let dir = get_test_dir("share_conflicts").await?; - - // Setup vault - Vault::setup_vault(dir.clone(), "TestVault").await?; - - // Get vault - let config = VaultConfig::read_from(dir.join(SERVER_FILE_VAULT)).await?; - let Some(vault) = Vault::init(config, &dir) else { - return Err(Error::new(std::io::ErrorKind::NotFound, "Vault not found!")); - }; - - // Add members - let sharer_id: MemberId = "sharer".to_string(); - let target_member_id: MemberId = "target".to_string(); - - vault - .register_member_to_vault(Member::new(&sharer_id)) - .await?; - vault - .register_member_to_vault(Member::new(&target_member_id)) - .await?; - - // Create source and target sheets - let source_sheet_name: SheetName = "source".to_string(); - let target_sheet_name: SheetName = "target".to_string(); - - let _source_sheet = vault.create_sheet(&source_sheet_name, &sharer_id).await?; - let _target_sheet = vault - .create_sheet(&target_sheet_name, &target_member_id) - .await?; - - // Add conflicting mappings to both sheets - let mut source_sheet = vault.sheet(&source_sheet_name).await?; - let mut target_sheet_mut = vault.sheet(&target_sheet_name).await?; - - let conflicting_path = SheetPathBuf::from("src/conflicting.rs"); - let source_file_id = VirtualFileId::from("source_file_id_1"); - let target_file_id = VirtualFileId::from("target_file_id_1"); - - // Add same path with different IDs to both sheets (conflict) - source_sheet - .add_mapping( - conflicting_path.clone(), - source_file_id.clone(), - "1.0.0".to_string(), - ) - .await?; - - target_sheet_mut - .add_mapping( - conflicting_path.clone(), - target_file_id.clone(), - "1.0.0".to_string(), - ) - .await?; - - source_sheet.persist().await?; - target_sheet_mut.persist().await?; - - // Share the conflicting mapping - // Need to get the sheet again after persist - let source_sheet = vault.sheet(&source_sheet_name).await?; - source_sheet - .share_mappings( - &target_sheet_name, - vec![conflicting_path.clone()], - &sharer_id, - "Conflicting share".to_string(), - ) - .await?; - - // Get the share - let target_sheet = vault.sheet(&target_sheet_name).await?; - let shares = target_sheet.get_shares().await?; - assert_eq!(shares.len(), 1); - let share = shares[0].clone(); - - // Test 5: Safe mode merge with conflict (should fail) - let target_sheet_clone = vault.sheet(&target_sheet_name).await?; - let result = target_sheet_clone - .merge_share(share.clone(), ShareMergeMode::Safe) - .await; - - assert!(result.is_err(), "Safe mode should fail with conflicts"); - - // Test 6: Overwrite mode merge with conflict (should succeed) - let target_sheet_clone = vault.sheet(&target_sheet_name).await?; - let result = target_sheet_clone - .merge_share(share.clone(), ShareMergeMode::Overwrite) - .await; - - assert!( - result.is_ok(), - "Overwrite mode should succeed with conflicts" - ); - - // Verify the mapping was overwritten - let updated_target_sheet = vault.sheet(&target_sheet_name).await?; - let mapping = updated_target_sheet.mapping().get(&conflicting_path); - assert!(mapping.is_some()); - assert_eq!(mapping.unwrap().id, source_file_id); // Should be source's ID, not target's - - // Clean up - vault.remove_member_from_vault(&sharer_id)?; - vault.remove_member_from_vault(&target_member_id)?; - - Ok(()) -} - -#[tokio::test] -async fn test_share_skip_mode() -> Result<(), std::io::Error> { - let dir = get_test_dir("share_skip_mode").await?; - - // Setup vault - Vault::setup_vault(dir.clone(), "TestVault").await?; - - // Get vault - let config = VaultConfig::read_from(dir.join(SERVER_FILE_VAULT)).await?; - let Some(vault) = Vault::init(config, &dir) else { - return Err(Error::new(std::io::ErrorKind::NotFound, "Vault not found!")); - }; - - // Add members - let sharer_id: MemberId = "sharer".to_string(); - let target_member_id: MemberId = "target".to_string(); - - vault - .register_member_to_vault(Member::new(&sharer_id)) - .await?; - vault - .register_member_to_vault(Member::new(&target_member_id)) - .await?; - - // Create source and target sheets - let source_sheet_name: SheetName = "source".to_string(); - let target_sheet_name: SheetName = "target".to_string(); - - let _source_sheet = vault.create_sheet(&source_sheet_name, &sharer_id).await?; - let _target_sheet = vault - .create_sheet(&target_sheet_name, &target_member_id) - .await?; - - // Add mappings to both sheets - let mut source_sheet = vault.sheet(&source_sheet_name).await?; - let mut target_sheet_mut = vault.sheet(&target_sheet_name).await?; - - let conflicting_path = SheetPathBuf::from("src/conflicting.rs"); - let non_conflicting_path = SheetPathBuf::from("src/non_conflicting.rs"); - - let source_file_id = VirtualFileId::from("source_file_id_2"); - let target_file_id = VirtualFileId::from("target_file_id_2"); - let non_conflicting_id = VirtualFileId::from("non_conflicting_id_1"); - - // Add conflicting mapping to both sheets - source_sheet - .add_mapping( - conflicting_path.clone(), - source_file_id.clone(), - "1.0.0".to_string(), - ) - .await?; - - target_sheet_mut - .add_mapping( - conflicting_path.clone(), - target_file_id.clone(), - "1.0.0".to_string(), - ) - .await?; - - // Add non-conflicting mapping only to source - source_sheet - .add_mapping( - non_conflicting_path.clone(), - non_conflicting_id.clone(), - "1.0.0".to_string(), - ) - .await?; - - source_sheet.persist().await?; - target_sheet_mut.persist().await?; - - // Share both mappings - // Need to get the sheet again after persist - let source_sheet = vault.sheet(&source_sheet_name).await?; - source_sheet - .share_mappings( - &target_sheet_name, - vec![conflicting_path.clone(), non_conflicting_path.clone()], - &sharer_id, - "Mixed share".to_string(), - ) - .await?; - - // Get the share - let target_sheet = vault.sheet(&target_sheet_name).await?; - let shares = target_sheet.get_shares().await?; - assert_eq!(shares.len(), 1); - let share = shares[0].clone(); - - // Test 7: Skip mode merge with conflict (should skip conflicting, add non-conflicting) - let result = target_sheet - .merge_share(share.clone(), ShareMergeMode::Skip) - .await; - - assert!(result.is_ok(), "Skip mode should succeed"); - - // Verify only non-conflicting mapping was added - let updated_target_sheet = vault.sheet(&target_sheet_name).await?; - - // Conflicting mapping should still have target's ID - let conflicting_mapping = updated_target_sheet.mapping().get(&conflicting_path); - assert!(conflicting_mapping.is_some()); - assert_eq!(conflicting_mapping.unwrap().id, target_file_id); - - // Non-conflicting mapping should be added - let non_conflicting_mapping = updated_target_sheet.mapping().get(&non_conflicting_path); - assert!(non_conflicting_mapping.is_some()); - assert_eq!(non_conflicting_mapping.unwrap().id, non_conflicting_id); - - // Clean up - vault.remove_member_from_vault(&sharer_id)?; - vault.remove_member_from_vault(&target_member_id)?; - - Ok(()) -} - -#[tokio::test] -async fn test_share_removal() -> Result<(), std::io::Error> { - let dir = get_test_dir("share_removal").await?; - - // Setup vault - Vault::setup_vault(dir.clone(), "TestVault").await?; - - // Get vault - let config = VaultConfig::read_from(dir.join(SERVER_FILE_VAULT)).await?; - let Some(vault) = Vault::init(config, &dir) else { - return Err(Error::new(std::io::ErrorKind::NotFound, "Vault not found!")); - }; - - // Add members - let sharer_id: MemberId = "sharer".to_string(); - let target_member_id: MemberId = "target".to_string(); - - vault - .register_member_to_vault(Member::new(&sharer_id)) - .await?; - vault - .register_member_to_vault(Member::new(&target_member_id)) - .await?; - - // Create source and target sheets - let source_sheet_name: SheetName = "source".to_string(); - let target_sheet_name: SheetName = "target".to_string(); - - let _source_sheet = vault.create_sheet(&source_sheet_name, &sharer_id).await?; - let _target_sheet = vault - .create_sheet(&target_sheet_name, &target_member_id) - .await?; - - // Add mapping to source sheet - let mut source_sheet = vault.sheet(&source_sheet_name).await?; - - let file_path = SheetPathBuf::from("src/file.rs"); - let file_id = VirtualFileId::from("file_id_1"); - - source_sheet - .add_mapping(file_path.clone(), file_id.clone(), "1.0.0".to_string()) - .await?; - - source_sheet.persist().await?; - - // Need to get the sheet again after persist - let source_sheet = vault.sheet(&source_sheet_name).await?; - // Share mapping - source_sheet - .share_mappings( - &target_sheet_name, - vec![file_path.clone()], - &sharer_id, - "Test share for removal".to_string(), - ) - .await?; - - // Get the share - let target_sheet = vault.sheet(&target_sheet_name).await?; - let shares = target_sheet.get_shares().await?; - assert_eq!(shares.len(), 1); - let share = shares[0].clone(); - - // Test 8: Remove share - let result = share.remove().await; - - // Check if removal succeeded or failed gracefully - match result { - Ok(_) => { - // Share was successfully removed - let shares_after_removal = target_sheet.get_shares().await?; - assert_eq!(shares_after_removal.len(), 0); - } - Err((returned_share, _error)) => { - // Share removal failed, but we got the share backZ - // Error message may vary, just check that we got an error - // The share should be returned in the error - assert_eq!(returned_share.sharer, sharer_id); - } - } - - // Clean up - vault.remove_member_from_vault(&sharer_id)?; - vault.remove_member_from_vault(&target_member_id)?; - - Ok(()) -} - -#[tokio::test] -async fn test_share_error_conditions() -> Result<(), std::io::Error> { - let dir = get_test_dir("share_errors").await?; - - // Setup vault - Vault::setup_vault(dir.clone(), "TestVault").await?; - - // Get vault - let config = VaultConfig::read_from(dir.join(SERVER_FILE_VAULT)).await?; - let Some(vault) = Vault::init(config, &dir) else { - return Err(Error::new(std::io::ErrorKind::NotFound, "Vault not found!")); - }; - - // Add member - let sharer_id: MemberId = "sharer".to_string(); - vault - .register_member_to_vault(Member::new(&sharer_id)) - .await?; - - // Create source sheet - let source_sheet_name: SheetName = "source".to_string(); - let _source_sheet = vault.create_sheet(&source_sheet_name, &sharer_id).await?; - - // Add mapping to source sheet - let mut source_sheet = vault.sheet(&source_sheet_name).await?; - - let file_path = SheetPathBuf::from("src/file.rs"); - let file_id = VirtualFileId::from("file_id_2"); - - source_sheet - .add_mapping(file_path.clone(), file_id.clone(), "1.0.0".to_string()) - .await?; - - source_sheet.persist().await?; - - // Test 9: Share to non-existent sheet should fail - let non_existent_sheet: SheetName = "non_existent".to_string(); - // Need to get the sheet again after persist - let source_sheet = vault.sheet(&source_sheet_name).await?; - let result = source_sheet - .share_mappings( - &non_existent_sheet, - vec![file_path.clone()], - &sharer_id, - "Test".to_string(), - ) - .await; - - assert!(result.is_err()); - - // Test 10: Share non-existent mapping should fail - let target_sheet_name: SheetName = "target".to_string(); - let _target_sheet = vault.create_sheet(&target_sheet_name, &sharer_id).await?; - - let non_existent_path = SheetPathBuf::from("src/non_existent.rs"); - let result = source_sheet - .share_mappings( - &target_sheet_name, - vec![non_existent_path], - &sharer_id, - "Test".to_string(), - ) - .await; - - assert!(result.is_err()); - - // Test 11: Merge non-existent share should fail - let target_sheet = vault.sheet(&target_sheet_name).await?; - let non_existent_share_id: SheetShareId = "non_existent_share".to_string(); - let result = target_sheet - .merge_share_by_id(&non_existent_share_id, ShareMergeMode::Safe) - .await; - - assert!(result.is_err()); - - // Clean up - vault.remove_member_from_vault(&sharer_id)?; - - Ok(()) -} - -#[tokio::test] -async fn test_share_id_generation() -> Result<(), std::io::Error> { - // Test 12: Share ID generation - let sharer_id: MemberId = "test_sharer".to_string(); - - // Generate multiple IDs to ensure they're different - let id1 = Share::gen_share_id(&sharer_id); - let id2 = Share::gen_share_id(&sharer_id); - let id3 = Share::gen_share_id(&sharer_id); - - // IDs should be different due to random component - assert_ne!(id1, id2); - assert_ne!(id1, id3); - assert_ne!(id2, id3); - - // IDs should start with sharer name - assert!(id1.starts_with(&format!("test_sharer@"))); - assert!(id2.starts_with(&format!("test_sharer@"))); - assert!(id3.starts_with(&format!("test_sharer@"))); - - Ok(()) -} diff --git a/crates/vcs_data/vcs_data_test/src/test_vault_setup_and_member_register.rs b/crates/vcs_data/vcs_data_test/src/test_vault_setup_and_member_register.rs deleted file mode 100644 index 286a4a2..0000000 --- a/crates/vcs_data/vcs_data_test/src/test_vault_setup_and_member_register.rs +++ /dev/null @@ -1,67 +0,0 @@ -use std::io::Error; - -use cfg_file::config::ConfigFile; -use vcs_data::{ - constants::{ - SERVER_FILE_MEMBER_INFO, SERVER_FILE_README, SERVER_FILE_VAULT, SERVER_PATH_MEMBER_PUB, - SERVER_PATH_MEMBERS, SERVER_PATH_SHEETS, SERVER_PATH_VF_ROOT, - }, - data::{ - member::Member, - vault::{Vault, config::VaultConfig}, - }, -}; - -use crate::get_test_dir; - -#[tokio::test] -async fn test_vault_setup_and_member_register() -> Result<(), std::io::Error> { - let dir = get_test_dir("member_register").await?; - - // Setup vault - Vault::setup_vault(dir.clone(), "TestVault").await?; - - // Check if the following files and directories are created in `dir`: - // Files: SERVER_FILE_VAULT, SERVER_FILE_README - // Directories: SERVER_PATH_SHEETS, - // SERVER_PATH_MEMBERS, - // SERVER_PATH_MEMBER_PUB, - // SERVER_PATH_VIRTUAL_FILE_ROOT - assert!(dir.join(SERVER_FILE_VAULT).exists()); - assert!(dir.join(SERVER_FILE_README).exists()); - assert!(dir.join(SERVER_PATH_SHEETS).exists()); - assert!(dir.join(SERVER_PATH_MEMBERS).exists()); - assert!(dir.join(SERVER_PATH_MEMBER_PUB).exists()); - assert!(dir.join(SERVER_PATH_VF_ROOT).exists()); - - // Get vault - let config = VaultConfig::read_from(dir.join(SERVER_FILE_VAULT)).await?; - let Some(vault) = Vault::init(config, &dir) else { - return Err(Error::new(std::io::ErrorKind::NotFound, "Vault not found!")); - }; - - // Add member - let member_id = "test_member"; - vault - .register_member_to_vault(Member::new(member_id)) - .await?; - - const ID_PARAM: &str = "{member_id}"; - - // Check if the member info file exists - assert!( - dir.join(SERVER_FILE_MEMBER_INFO.replace(ID_PARAM, member_id)) - .exists() - ); - - // Remove member - vault.remove_member_from_vault(&member_id.to_string())?; - - // Check if the member info file not exists - assert!( - !dir.join(SERVER_FILE_MEMBER_INFO.replace(ID_PARAM, member_id)) - .exists() - ); - - Ok(()) -} diff --git a/crates/vcs_data/vcs_data_test/src/test_virtual_file_creation_and_update.rs b/crates/vcs_data/vcs_data_test/src/test_virtual_file_creation_and_update.rs deleted file mode 100644 index 2d9d393..0000000 --- a/crates/vcs_data/vcs_data_test/src/test_virtual_file_creation_and_update.rs +++ /dev/null @@ -1,162 +0,0 @@ -use std::time::Duration; - -use cfg_file::config::ConfigFile; -use tcp_connection_test::{ - handle::{ClientHandle, ServerHandle}, - target::TcpServerTarget, - target_configure::ServerTargetConfig, -}; -use tokio::{ - join, - time::{sleep, timeout}, -}; -use vcs_data::{ - constants::SERVER_FILE_VAULT, - data::{ - member::Member, - vault::{Vault, config::VaultConfig, virtual_file::VirtualFileVersionDescription}, - }, -}; - -use crate::get_test_dir; - -struct VirtualFileCreateClientHandle; -struct VirtualFileCreateServerHandle; - -impl ClientHandle for VirtualFileCreateClientHandle { - async fn process(mut instance: tcp_connection::instance::ConnectionInstance) { - let dir = get_test_dir("virtual_file_creation_and_update_2") - .await - .unwrap(); - // Create first test file for virtual file creation - let test_content_1 = b"Test file content for virtual file creation"; - let temp_file_path_1 = dir.join("test_virtual_file_1.txt"); - - tokio::fs::write(&temp_file_path_1, test_content_1) - .await - .unwrap(); - - // Send the first file to server for virtual file creation - instance.write_file(&temp_file_path_1).await.unwrap(); - - // Create second test file for virtual file update - let test_content_2 = b"Updated test file content for virtual file"; - let temp_file_path_2 = dir.join("test_virtual_file_2.txt"); - - tokio::fs::write(&temp_file_path_2, test_content_2) - .await - .unwrap(); - - // Send the second file to server for virtual file update - instance.write_file(&temp_file_path_2).await.unwrap(); - } -} - -impl ServerHandle for VirtualFileCreateServerHandle { - async fn process(mut instance: tcp_connection::instance::ConnectionInstance) { - let dir = get_test_dir("virtual_file_creation_and_update") - .await - .unwrap(); - - // Setup vault - Vault::setup_vault(dir.clone(), "TestVault").await.unwrap(); - - // Read vault - let Some(vault) = Vault::init( - VaultConfig::read_from(dir.join(SERVER_FILE_VAULT)) - .await - .unwrap(), - &dir, - ) else { - panic!("No vault found!"); - }; - - // Register member - let member_id = "test_member"; - vault - .register_member_to_vault(Member::new(member_id)) - .await - .unwrap(); - - // Create visual file - let virtual_file_id = vault - .create_virtual_file_from_connection(&mut instance, &member_id.to_string()) - .await - .unwrap(); - - // Grant edit right to member - vault - .grant_virtual_file_edit_right(&member_id.to_string(), &virtual_file_id) - .await - .unwrap(); - - // Update visual file - vault - .update_virtual_file_from_connection( - &mut instance, - &member_id.to_string(), - &virtual_file_id, - &"2".to_string(), - VirtualFileVersionDescription { - creator: member_id.to_string(), - description: "Update".to_string(), - }, - ) - .await - .unwrap(); - } -} - -#[tokio::test] -async fn test_virtual_file_creation_and_update() -> Result<(), std::io::Error> { - let host = "localhost:5009"; - - // Server setup - let Ok(server_target) = TcpServerTarget::< - VirtualFileCreateClientHandle, - VirtualFileCreateServerHandle, - >::from_domain(host) - .await - else { - panic!("Test target built failed from a domain named `{}`", host); - }; - - // Client setup - let Ok(client_target) = TcpServerTarget::< - VirtualFileCreateClientHandle, - VirtualFileCreateServerHandle, - >::from_domain(host) - .await - else { - panic!("Test target built failed from a domain named `{}`", host); - }; - - let future_server = async move { - // Only process once - let configured_server = server_target.server_cfg(ServerTargetConfig::default().once()); - - // Listen here - let _ = configured_server.listen().await; - }; - - let future_client = async move { - // Wait for server start - let _ = sleep(Duration::from_secs_f32(1.5)).await; - - // Connect here - let _ = client_target.connect().await; - }; - - let test_timeout = Duration::from_secs(15); - - timeout(test_timeout, async { join!(future_client, future_server) }) - .await - .map_err(|_| { - std::io::Error::new( - std::io::ErrorKind::TimedOut, - format!("Test timed out after {:?}", test_timeout), - ) - })?; - - Ok(()) -} diff --git a/crates/vcs_docs/Cargo.toml b/crates/vcs_docs/Cargo.toml deleted file mode 100644 index 285b83d..0000000 --- a/crates/vcs_docs/Cargo.toml +++ /dev/null @@ -1,6 +0,0 @@ -[package] -name = "vcs_docs" -edition = "2024" -version.workspace = true - -[dependencies] diff --git a/crates/vcs_docs/build.rs b/crates/vcs_docs/build.rs deleted file mode 100644 index d1e878f..0000000 --- a/crates/vcs_docs/build.rs +++ /dev/null @@ -1,196 +0,0 @@ -use std::env; -use std::fs; -use std::io::{self, Write}; -use std::path::Path; - -// Template markers for code generation -const TEMPLATE_DOCUMENT_BEGIN: &str = "--- TEMPLATE DOCUMENT BEGIN ---"; -const TEMPLATE_DOCUMENT_END: &str = "--- TEMPLATE DOCUMENT END ---"; -const TEMPLATE_FUNC_BEGIN: &str = "--- TEMPLATE FUNC BEGIN ---"; -const TEMPLATE_FUNC_END: &str = "--- TEMPLATE FUNC END ---"; -const TEMPLATE_LIST_BEGIN: &str = "--- TEMPLATE LIST BEGIN ---"; -const TEMPLATE_LIST_END: &str = "--- TEMPLATE LIST END ---"; - -// Template parameter patterns for substitution -const PARAM_DOCUMENT_PATH: &str = "{{DOCUMENT_PATH}}"; -const PARAM_DOCUMENT_CONSTANT_NAME: &str = "{{DOCUMENT_CONSTANT_NAME}}"; -const PARAM_DOCUMENT_CONTENT: &str = "{{DOCUMENT_CONTENT}}"; -const PARAM_DOCUMENT_PATH_SNAKE_CASE: &str = "{{DOCUMENT_PATH_SNAKE_CASE}}"; - -fn main() -> io::Result<()> { - println!("cargo:rerun-if-changed=src/docs.rs.template"); - println!("cargo:rerun-if-changed=../../docs/Documents"); - - let out_dir = env::var("OUT_DIR").unwrap(); - let dest_path = Path::new(&out_dir).join("docs.rs"); - - // Read all markdown files from docs directory recursively - let docs_dir = Path::new("../../docs/Documents"); - let mut documents = Vec::new(); - - if docs_dir.exists() { - collect_text_files(docs_dir, &mut documents)?; - } - - // Read template file - let template_path = Path::new("src/docs.rs.template"); - let template_content = fs::read_to_string(template_path)?; - - // Extract template sections preserving original indentation - let document_template = template_content - .split(TEMPLATE_DOCUMENT_BEGIN) - .nth(1) - .and_then(|s| s.split(TEMPLATE_DOCUMENT_END).next()) - .unwrap_or("") - .trim_start_matches('\n') - .trim_end_matches('\n'); - - let match_arm_template = template_content - .split(TEMPLATE_FUNC_BEGIN) - .nth(1) - .and_then(|s| s.split(TEMPLATE_FUNC_END).next()) - .unwrap_or("") - .trim_start_matches('\n') - .trim_end_matches('\n'); - - // Generate document blocks and match arms - let mut document_blocks = String::new(); - let mut match_arms = String::new(); - let mut list_items = String::new(); - - for (relative_path, content) in &documents { - // Calculate parameters for template substitution - let document_path = format!("./docs/Documents/{}", relative_path); - - // Generate constant name from relative path - let document_constant_name = relative_path - .replace(['/', '\\', '-'], "_") - .replace(".md", "") - .replace(".txt", "") - .replace(".toml", "") - .replace(".yaml", "") - .replace(".yml", "") - .replace(".json", "") - .replace(".rs", "") - .to_uppercase(); - - // Generate snake_case name for function matching - let document_path_snake_case = relative_path - .replace(['/', '\\', '-'], "_") - .replace(".md", "") - .replace(".txt", "") - .replace(".toml", "") - .replace(".yaml", "") - .replace(".yml", "") - .replace(".json", "") - .replace(".rs", "") - .to_lowercase(); - - // Escape double quotes in content - let escaped_content = content.trim().replace('\"', "\\\""); - - // Replace template parameters in document block preserving indentation - let document_block = document_template - .replace(PARAM_DOCUMENT_PATH, &document_path) - .replace(PARAM_DOCUMENT_CONSTANT_NAME, &document_constant_name) - .replace(PARAM_DOCUMENT_CONTENT, &escaped_content) - .replace("r#\"\"#", &format!("r#\"{}\"#", escaped_content)); - - document_blocks.push_str(&document_block); - document_blocks.push_str("\n\n"); - - // Replace template parameters in match arm preserving indentation - let match_arm = match_arm_template - .replace(PARAM_DOCUMENT_PATH_SNAKE_CASE, &document_path_snake_case) - .replace(PARAM_DOCUMENT_CONSTANT_NAME, &document_constant_name); - - match_arms.push_str(&match_arm); - match_arms.push('\n'); - - // Generate list item for documents() function - let list_item = format!(" \"{}\".to_string(),", document_path_snake_case); - list_items.push_str(&list_item); - list_items.push('\n'); - } - - // Remove trailing newline from the last list item - if !list_items.is_empty() { - list_items.pop(); - } - - // Build final output by replacing template sections - let mut output = String::new(); - - // Add header before document blocks - if let Some(header) = template_content.split(TEMPLATE_DOCUMENT_BEGIN).next() { - output.push_str(header.trim()); - output.push_str("\n\n"); - } - - // Add document blocks - output.push_str(&document_blocks); - - // Add function section - if let Some(func_section) = template_content.split(TEMPLATE_FUNC_BEGIN).next() - && let Some(rest) = func_section.split(TEMPLATE_DOCUMENT_END).nth(1) - { - output.push_str(rest.trim()); - output.push('\n'); - } - - // Add match arms - output.push_str(&match_arms); - - // Add list items for documents() function - if let Some(list_section) = template_content.split(TEMPLATE_LIST_BEGIN).next() - && let Some(rest) = list_section.split(TEMPLATE_FUNC_END).nth(1) - { - output.push_str(rest.trim()); - output.push('\n'); - } - output.push_str(&list_items); - - // Add footer - if let Some(footer) = template_content.split(TEMPLATE_LIST_END).nth(1) { - // Preserve original indentation in footer - output.push_str(footer); - } - - // Write generated file - let mut file = fs::File::create(&dest_path)?; - file.write_all(output.as_bytes())?; - - // Copy to src directory for development - let src_dest_path = Path::new("src/docs.rs"); - fs::write(src_dest_path, output)?; - - Ok(()) -} - -fn collect_text_files(dir: &Path, documents: &mut Vec<(String, String)>) -> io::Result<()> { - for entry in fs::read_dir(dir)? { - let entry = entry?; - let path = entry.path(); - - if path.is_dir() { - collect_text_files(&path, documents)?; - } else if path.extension().is_some_and(|ext| { - ext == "md" - || ext == "txt" - || ext == "toml" - || ext == "yaml" - || ext == "yml" - || ext == "json" - || ext == "rs" - }) && let Ok(relative_path) = path.strip_prefix("../../docs/Documents") - && let Some(relative_path_str) = relative_path.to_str() - { - let content = fs::read_to_string(&path)?; - documents.push(( - relative_path_str.trim_start_matches('/').to_string(), - content, - )); - } - } - Ok(()) -} diff --git a/crates/vcs_docs/src/docs.rs.template b/crates/vcs_docs/src/docs.rs.template deleted file mode 100644 index c6787d9..0000000 --- a/crates/vcs_docs/src/docs.rs.template +++ /dev/null @@ -1,26 +0,0 @@ -// Auto-generated code. - ---- TEMPLATE DOCUMENT BEGIN --- -/// From {{DOCUMENT_PATH}} -pub const {{DOCUMENT_CONSTANT_NAME}}: &str = "{{DOCUMENT_CONTENT}}"; - ---- TEMPLATE DOCUMENT END --- - -// Get document content by name -pub fn document(name: impl AsRef) -> Option { - match name.as_ref() { ---- TEMPLATE FUNC BEGIN --- - "{{DOCUMENT_PATH_SNAKE_CASE}}" => Some({{DOCUMENT_CONSTANT_NAME}}.to_string()), ---- TEMPLATE FUNC END --- - _ => None, - } -} - -// Get list of all available document names -pub fn documents() -> Vec { - vec![ ---- TEMPLATE LIST BEGIN --- - "{{DOCUMENT_PATH_SNAKE_CASE}}".to_string(), ---- TEMPLATE LIST END --- - ] -} diff --git a/crates/vcs_docs/src/lib.rs b/crates/vcs_docs/src/lib.rs deleted file mode 100644 index ca422a9..0000000 --- a/crates/vcs_docs/src/lib.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod docs; diff --git a/data/Cargo.toml b/data/Cargo.toml new file mode 100644 index 0000000..7506814 --- /dev/null +++ b/data/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "vcs_data" +edition = "2024" +version.workspace = true + +[dependencies] + +# Utils +cfg_file = { path = "../utils/cfg_file", features = ["default"] } +data_struct = { path = "../utils/data_struct" } +sha1_hash = { path = "../utils/sha1_hash" } +tcp_connection = { path = "../utils/tcp_connection" } +string_proc = { path = "../utils/string_proc" } + +# Core +action_system = { path = "../systems/action" } +vcs_docs = { path = "../docs" } + +# Random +rand = "0.9.2" + +# Identity +uuid = { version = "1.18.1", features = ["v4", "serde"] } +whoami = "1.6.1" + +# Serialization +serde = { version = "1.0.228", features = ["derive"] } + +# Async & Networking +tokio = { version = "1.48.0", features = ["full"] } + +# Filesystem +dirs = "6.0.0" +walkdir = "2.5.0" + +# Time +chrono = "0.4.42" + +# Windows API +winapi = { version = "0.3.9", features = ["fileapi", "winbase", "winnt"] } diff --git a/data/src/constants.rs b/data/src/constants.rs new file mode 100644 index 0000000..3d839a6 --- /dev/null +++ b/data/src/constants.rs @@ -0,0 +1,118 @@ +// ------------------------------------------------------------------------------------- + +// Project +pub const PATH_TEMP: &str = "./.temp/"; + +// Default Port +pub const PORT: u16 = 25331; + +// Vault Host Name +pub const VAULT_HOST_NAME: &str = "host"; + +// ------------------------------------------------------------------------------------- + +// Suffix +pub const SERVER_SUFFIX_SHEET_FILE: &str = ".st"; +pub const SERVER_SUFFIX_SHEET_FILE_NO_DOT: &str = "st"; + +pub const SERVER_SUFFIX_SHEET_SHARE_FILE: &str = ".sre"; +pub const SERVER_SUFFIX_SHEET_SHARE_FILE_NO_DOT: &str = "sre"; + +pub const SERVER_SUFFIX_MEMBER_INFO: &str = ".json"; +pub const SERVER_SUFFIX_MEMBER_INFO_NO_DOT: &str = "json"; + +pub const SERVER_SUFFIX_VF_META: &str = ".vf"; +pub const SERVER_SUFFIX_VF_META_NO_DOT: &str = "vf"; + +pub const CLIENT_SUFFIX_LATEST_INFO: &str = ".up"; +pub const CLIENT_SUFFIX_LATEST_INFO_NO_DOT: &str = "up"; + +pub const CLIENT_SUFFIX_LATEST_DATA: &str = ".upf"; +pub const CLIENT_SUFFIX_LATEST_DATA_NO_DOT: &str = "upf"; + +pub const CLIENT_SUFFIX_LOCAL_SHEET_FILE: &str = ".lst"; +pub const CLIENT_SUFFIX_LOCAL_SHEET_FILE_NO_DOT: &str = "lst"; + +pub const CLIENT_SUFFIX_CACHED_SHEET_FILE: &str = ".st"; +pub const CLIENT_SUFFIX_CACHED_SHEET_FILE_NO_DOT: &str = "st"; + +// ------------------------------------------------------------------------------------- + +// Server +// Server - Vault (Main) +pub const SERVER_FILE_VAULT: &str = "./vault.toml"; + +// Server - Sheets +pub const REF_SHEET_NAME: &str = "ref"; +pub const SERVER_PATH_SHEETS: &str = "./sheets/"; +pub const SERVER_PATH_SHARES: &str = "./sheets/shares/{sheet_name}/"; +pub const SERVER_FILE_SHEET: &str = "./sheets/{sheet_name}.st"; +pub const SERVER_FILE_SHEET_SHARE: &str = "./sheets/shares/{sheet_name}/{share_id}.sre"; + +// Server - Members +pub const SERVER_PATH_MEMBERS: &str = "./members/"; +pub const SERVER_PATH_MEMBER_PUB: &str = "./key/"; +pub const SERVER_FILE_MEMBER_INFO: &str = "./members/{member_id}.json"; +pub const SERVER_FILE_MEMBER_PUB: &str = "./key/{member_id}.pem"; + +// Server - Virtual File Storage +pub const SERVER_PATH_VF_TEMP: &str = "./.temp/{temp_name}"; +pub const SERVER_PATH_VF_ROOT: &str = "./storage/"; +pub const SERVER_PATH_VF_STORAGE: &str = "./storage/{vf_index}/{vf_id}/"; +pub const SERVER_FILE_VF_VERSION_INSTANCE: &str = "./storage/{vf_index}/{vf_id}/{vf_version}.rf"; +pub const SERVER_FILE_VF_META: &str = "./storage/{vf_index}/{vf_id}/meta.vf"; +pub const SERVER_NAME_VF_META: &str = "meta.vf"; + +// Server - Updates +pub const SERVER_FILE_UPDATES: &str = "./.updates.txt"; + +// Server - Service +pub const SERVER_FILE_LOCKFILE: &str = "./.lock"; + +// Server - Documents +pub const SERVER_FILE_README: &str = "./README.md"; + +// ------------------------------------------------------------------------------------- + +// Client +pub const CLIENT_PATH_WORKSPACE_ROOT: &str = "./.jv/"; +pub const CLIENT_FOLDER_WORKSPACE_ROOT_NAME: &str = ".jv"; + +// Client - Workspace (Main) +pub const CLIENT_FILE_WORKSPACE: &str = "./.jv/workspace.toml"; + +// Client - Latest Information +pub const CLIENT_FILE_LATEST_INFO: &str = "./.jv/latest/{account}.up"; +pub const CLIENT_FILE_LATEST_DATA: &str = "./.jv/latest/{account}.upf"; + +// Client - Local +pub const CLIENT_PATH_LOCAL_DRAFT: &str = "./.jv/drafts/{account}/{sheet_name}/"; +pub const CLIENT_PATH_LOCAL_SHEET: &str = "./.jv/sheets/local/"; +pub const CLIENT_FILE_LOCAL_SHEET: &str = "./.jv/sheets/local/{account}/{sheet_name}.lst"; +pub const CLIENT_PATH_CACHED_SHEET: &str = "./.jv/sheets/cached/"; +pub const CLIENT_FILE_CACHED_SHEET: &str = "./.jv/sheets/cached/{sheet_name}.st"; + +pub const CLIENT_FILE_LOCAL_SHEET_NOSET: &str = "./.jv/.temp/wrong.json"; +pub const CLIENT_FILE_MEMBER_HELD_NOSET: &str = "./.jv/.temp/wrong.json"; +pub const CLIENT_FILE_LATEST_INFO_NOSET: &str = "./.jv/.temp/wrong.json"; + +// Client - Other +pub const CLIENT_FILE_IGNOREFILES: &str = "IGNORE_RULES.toml"; +pub const CLIENT_FILE_TODOLIST: &str = "./SETUP.md"; +pub const CLIENT_FILE_GITIGNORE: &str = "./.jv/.gitignore"; +pub const CLIENT_CONTENT_GITIGNORE: &str = "# Git support for JVCS Workspace + +# Ignore cached datas +/sheets/cached/ +/latest/ + +.vault_modified"; +pub const CLIENT_FILE_VAULT_MODIFIED: &str = "./.jv/.vault_modified"; +pub const CLIENT_FILE_TEMP_FILE: &str = "./.jv/.temp/download/{temp_name}"; + +// ------------------------------------------------------------------------------------- + +// User - Verify (Documents path) +pub const USER_FILE_ACCOUNTS: &str = "./accounts/"; +pub const USER_FILE_KEY: &str = "./accounts/{self_id}_private.pem"; +pub const USER_FILE_MEMBER: &str = "./accounts/{self_id}.toml"; diff --git a/data/src/current.rs b/data/src/current.rs new file mode 100644 index 0000000..209c0cc --- /dev/null +++ b/data/src/current.rs @@ -0,0 +1,84 @@ +use crate::constants::*; +use std::io::{self, Error}; +use std::{env::set_current_dir, path::PathBuf}; + +/// Find the nearest vault or local workspace and correct the `current_dir` to it +pub fn correct_current_dir() -> Result<(), io::Error> { + if let Some(local_workspace) = current_local_path() { + set_current_dir(local_workspace)?; + return Ok(()); + } + if let Some(vault) = current_vault_path() { + set_current_dir(vault)?; + return Ok(()); + } + Err(Error::new( + io::ErrorKind::NotFound, + "Could not find any vault or local workspace!", + )) +} + +/// Get the nearest Vault directory from `current_dir` +pub fn current_vault_path() -> Option { + let current_dir = std::env::current_dir().ok()?; + find_vault_path(current_dir) +} + +/// Get the nearest local workspace from `current_dir` +pub fn current_local_path() -> Option { + let current_dir = std::env::current_dir().ok()?; + find_local_path(current_dir) +} + +/// Get the nearest Vault directory from the specified path +pub fn find_vault_path(path: impl Into) -> Option { + let mut current_path = path.into(); + let vault_file = SERVER_FILE_VAULT; + + loop { + let vault_toml_path = current_path.join(vault_file); + if vault_toml_path.exists() { + return Some(current_path); + } + + if let Some(parent) = current_path.parent() { + current_path = parent.to_path_buf(); + } else { + break; + } + } + + None +} + +/// Get the nearest local workspace from the specified path +pub fn find_local_path(path: impl Into) -> Option { + let mut current_path = path.into(); + let workspace_dir = CLIENT_PATH_WORKSPACE_ROOT; + + loop { + let jvc_path = current_path.join(workspace_dir); + if jvc_path.exists() { + return Some(current_path); + } + + if let Some(parent) = current_path.parent() { + current_path = parent.to_path_buf(); + } else { + break; + } + } + + None +} + +/// Get the system's document directory and join with the appropriate application name +pub fn current_cfg_dir() -> Option { + dirs::config_local_dir().map(|path| { + if cfg!(target_os = "linux") { + path.join("jvcs") + } else { + path.join("JustEnoughVCS") + } + }) +} diff --git a/data/src/data.rs b/data/src/data.rs new file mode 100644 index 0000000..ed9383a --- /dev/null +++ b/data/src/data.rs @@ -0,0 +1,5 @@ +pub mod local; +pub mod member; +pub mod sheet; +pub mod user; +pub mod vault; diff --git a/data/src/data/local.rs b/data/src/data/local.rs new file mode 100644 index 0000000..67f3943 --- /dev/null +++ b/data/src/data/local.rs @@ -0,0 +1,269 @@ +use std::{ + collections::HashMap, + env::current_dir, + path::{Path, PathBuf}, + sync::Arc, +}; + +use cfg_file::config::ConfigFile; +use string_proc::format_path::format_path; +use tokio::{fs, sync::Mutex}; +use vcs_docs::docs::READMES_LOCAL_WORKSPACE_TODOLIST; + +use crate::{ + constants::{ + CLIENT_CONTENT_GITIGNORE, CLIENT_FILE_GITIGNORE, CLIENT_FILE_LOCAL_SHEET, + CLIENT_FILE_TODOLIST, CLIENT_FILE_WORKSPACE, CLIENT_FOLDER_WORKSPACE_ROOT_NAME, + CLIENT_PATH_LOCAL_SHEET, CLIENT_SUFFIX_LOCAL_SHEET_FILE, + }, + current::{current_local_path, find_local_path}, + data::{ + local::{ + config::LocalConfig, + local_sheet::{LocalSheet, LocalSheetData, LocalSheetPathBuf}, + }, + member::MemberId, + sheet::SheetName, + }, +}; + +pub mod align; +pub mod cached_sheet; +pub mod config; +pub mod latest_file_data; +pub mod latest_info; +pub mod local_files; +pub mod local_sheet; +pub mod vault_modified; +pub mod workspace_analyzer; + +const SHEET_NAME: &str = "{sheet_name}"; +const ACCOUNT_NAME: &str = "{account}"; + +pub struct LocalWorkspace { + config: Arc>, + local_path: PathBuf, +} + +impl LocalWorkspace { + /// Get the path of the local workspace. + pub fn local_path(&self) -> &PathBuf { + &self.local_path + } + + /// Initialize local workspace. + pub fn init(config: LocalConfig, local_path: impl Into) -> Option { + let local_path = find_local_path(local_path)?; + Some(Self { + config: Arc::new(Mutex::new(config)), + local_path, + }) + } + + /// Initialize local workspace in the current directory. + pub fn init_current_dir(config: LocalConfig) -> Option { + let local_path = current_local_path()?; + Some(Self { + config: Arc::new(Mutex::new(config)), + local_path, + }) + } + + /// Setup local workspace + pub async fn setup_local_workspace( + local_path: impl Into, + ) -> Result<(), std::io::Error> { + let local_path: PathBuf = local_path.into(); + + // Ensure directory is empty + if local_path.exists() && local_path.read_dir()?.next().is_some() { + return Err(std::io::Error::new( + std::io::ErrorKind::DirectoryNotEmpty, + "DirectoryNotEmpty", + )); + } + + // 1. Setup config + let config = LocalConfig::default(); + LocalConfig::write_to(&config, local_path.join(CLIENT_FILE_WORKSPACE)).await?; + + // 2. Setup SETUP.md + let readme_content = READMES_LOCAL_WORKSPACE_TODOLIST.trim().to_string(); + fs::write(local_path.join(CLIENT_FILE_TODOLIST), readme_content).await?; + + // 3. Setup .gitignore + fs::write( + local_path.join(CLIENT_FILE_GITIGNORE), + CLIENT_CONTENT_GITIGNORE, + ) + .await?; + + // On Windows, set the .jv directory as hidden + let jv_dir = local_path.join(CLIENT_FOLDER_WORKSPACE_ROOT_NAME); + let _ = hide_folder::hide_folder(&jv_dir); + + Ok(()) + } + + /// Get a reference to the local configuration. + pub fn config(&self) -> Arc> { + self.config.clone() + } + + /// Setup local workspace in current directory + pub async fn setup_local_workspace_current_dir() -> Result<(), std::io::Error> { + Self::setup_local_workspace(current_dir()?).await?; + Ok(()) + } + + /// Get the path to a local sheet. + pub fn local_sheet_path(&self, member: &MemberId, sheet: &SheetName) -> PathBuf { + self.local_path.join( + CLIENT_FILE_LOCAL_SHEET + .replace(ACCOUNT_NAME, member) + .replace(SHEET_NAME, sheet), + ) + } + + /// Read or initialize a local sheet. + pub async fn local_sheet( + &self, + member: &MemberId, + sheet: &SheetName, + ) -> Result, std::io::Error> { + let local_sheet_path = self.local_sheet_path(member, sheet); + + if !local_sheet_path.exists() { + let sheet_data = LocalSheetData { + mapping: HashMap::new(), + vfs: HashMap::new(), + }; + LocalSheetData::write_to(&sheet_data, local_sheet_path).await?; + return Ok(LocalSheet { + local_workspace: self, + member: member.clone(), + sheet_name: sheet.clone(), + data: sheet_data, + }); + } + + let data = LocalSheetData::read_from(&local_sheet_path).await?; + let local_sheet = LocalSheet { + local_workspace: self, + member: member.clone(), + sheet_name: sheet.clone(), + data, + }; + + Ok(local_sheet) + } + + /// Collect all theet names + pub async fn local_sheet_paths(&self) -> Result, std::io::Error> { + let local_sheet_path = self.local_path.join(CLIENT_PATH_LOCAL_SHEET); + let mut sheet_paths = Vec::new(); + + async fn collect_sheet_paths( + dir: &Path, + suffix: &str, + paths: &mut Vec, + ) -> Result<(), std::io::Error> { + if dir.is_dir() { + let mut entries = fs::read_dir(dir).await?; + while let Some(entry) = entries.next_entry().await? { + let path = entry.path(); + + if path.is_dir() { + Box::pin(collect_sheet_paths(&path, suffix, paths)).await?; + } else if path.is_file() + && let Some(extension) = path.extension() + && extension == suffix.trim_start_matches('.') + { + let formatted_path = format_path(path)?; + paths.push(formatted_path); + } + } + } + Ok(()) + } + + collect_sheet_paths( + &local_sheet_path, + CLIENT_SUFFIX_LOCAL_SHEET_FILE, + &mut sheet_paths, + ) + .await?; + Ok(sheet_paths) + } +} + +mod hide_folder { + use std::io; + use std::path::Path; + + #[cfg(windows)] + use std::os::windows::ffi::OsStrExt; + #[cfg(windows)] + use winapi::um::fileapi::{GetFileAttributesW, INVALID_FILE_ATTRIBUTES, SetFileAttributesW}; + + pub fn hide_folder(path: &Path) -> io::Result<()> { + if !path.is_dir() { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Path must be a directory", + )); + } + + if let Some(file_name) = path.file_name().and_then(|n| n.to_str()) { + if !file_name.starts_with('.') { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Directory name must start with '.'", + )); + } + } else { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Invalid directory name", + )); + } + + hide_folder_impl(path) + } + + #[cfg(windows)] + fn hide_folder_impl(path: &Path) -> io::Result<()> { + // Convert to Windows wide string format + let path_str: Vec = path.as_os_str().encode_wide().chain(Some(0)).collect(); + + // Get current attributes + let attrs = unsafe { GetFileAttributesW(path_str.as_ptr()) }; + if attrs == INVALID_FILE_ATTRIBUTES { + return Err(io::Error::last_os_error()); + } + + // Add hidden attribute flag + let new_attrs = attrs | winapi::um::winnt::FILE_ATTRIBUTE_HIDDEN; + + // Set new attributes + let success = unsafe { SetFileAttributesW(path_str.as_ptr(), new_attrs) }; + if success == 0 { + return Err(io::Error::last_os_error()); + } + + Ok(()) + } + + #[cfg(unix)] + fn hide_folder_impl(_path: &Path) -> io::Result<()> { + Ok(()) + } + + #[cfg(not(any(windows, unix)))] + fn hide_folder_impl(_path: &Path) -> io::Result<()> { + Err(io::Error::new( + io::ErrorKind::Unsupported, + "Unsupported operating system", + )) + } +} diff --git a/data/src/data/local/align.rs b/data/src/data/local/align.rs new file mode 100644 index 0000000..b72804c --- /dev/null +++ b/data/src/data/local/align.rs @@ -0,0 +1,110 @@ +use std::{ + collections::{HashMap, HashSet}, + path::PathBuf, +}; + +use data_struct::data_sort::quick_sort_with_cmp; + +use crate::data::local::workspace_analyzer::AnalyzeResult; + +pub type AlignTaskName = String; +pub type AlignPathBuf = PathBuf; +pub type AlignLostPathBuf = PathBuf; +pub type AlignCreatedPathBuf = PathBuf; + +pub struct AlignTasks { + pub created: Vec<(AlignTaskName, AlignPathBuf)>, + pub lost: Vec<(AlignTaskName, AlignPathBuf)>, + pub moved: Vec<(AlignTaskName, (AlignLostPathBuf, AlignCreatedPathBuf))>, + pub erased: Vec<(AlignTaskName, AlignPathBuf)>, +} + +impl AlignTasks { + pub fn clone_from_analyze_result(result: &AnalyzeResult) -> Self { + AlignTasks { + created: path_hash_set_sort_helper(result.created.clone(), "created"), + lost: path_hash_set_sort_helper(result.lost.clone(), "lost"), + moved: path_hash_map_sort_helper(result.moved.clone(), "moved"), + erased: path_hash_set_sort_helper(result.erased.clone(), "erased"), + } + } + + pub fn from_analyze_result(result: AnalyzeResult) -> Self { + AlignTasks { + created: path_hash_set_sort_helper(result.created, "created"), + lost: path_hash_set_sort_helper(result.lost, "lost"), + moved: path_hash_map_sort_helper(result.moved, "moved"), + erased: path_hash_set_sort_helper(result.erased, "erased"), + } + } +} + +fn path_hash_set_sort_helper( + hash_set: HashSet, + prefix: impl Into, +) -> Vec<(String, PathBuf)> { + let prefix_str = prefix.into(); + let mut vec: Vec<(String, PathBuf)> = hash_set + .into_iter() + .map(|path| { + let hash = sha1_hash::calc_sha1_string(path.to_string_lossy()); + let hash_prefix: String = hash.chars().take(8).collect(); + let name = format!("{}:{}", prefix_str, hash_prefix); + (name, path) + }) + .collect(); + + quick_sort_with_cmp(&mut vec, false, |a, b| { + // Compare by path depth first + let a_depth = a.1.components().count(); + let b_depth = b.1.components().count(); + + if a_depth != b_depth { + return if a_depth < b_depth { -1 } else { 1 }; + } + + // If same depth, compare lexicographically + match a.1.cmp(&b.1) { + std::cmp::Ordering::Less => -1, + std::cmp::Ordering::Equal => 0, + std::cmp::Ordering::Greater => 1, + } + }); + + vec +} + +fn path_hash_map_sort_helper( + hash_map: HashMap, + prefix: impl Into, +) -> Vec<(String, (PathBuf, PathBuf))> { + let prefix_str = prefix.into(); + let mut vec: Vec<(String, (PathBuf, PathBuf))> = hash_map + .into_values() + .map(|(path1, path2)| { + let hash = sha1_hash::calc_sha1_string(path1.to_string_lossy()); + let hash_prefix: String = hash.chars().take(8).collect(); + let name = format!("{}:{}", prefix_str, hash_prefix); + (name, (path1, path2)) + }) + .collect(); + + quick_sort_with_cmp(&mut vec, false, |a, b| { + // Compare by first PathBuf's path depth first + let a_depth = a.1.0.components().count(); + let b_depth = b.1.0.components().count(); + + if a_depth != b_depth { + return if a_depth < b_depth { -1 } else { 1 }; + } + + // If same depth, compare lexicographically by first PathBuf + match a.1.0.cmp(&b.1.0) { + std::cmp::Ordering::Less => -1, + std::cmp::Ordering::Equal => 0, + std::cmp::Ordering::Greater => 1, + } + }); + + vec +} diff --git a/data/src/data/local/cached_sheet.rs b/data/src/data/local/cached_sheet.rs new file mode 100644 index 0000000..39f9814 --- /dev/null +++ b/data/src/data/local/cached_sheet.rs @@ -0,0 +1,94 @@ +use std::{io::Error, path::PathBuf}; + +use cfg_file::config::ConfigFile; +use string_proc::{format_path::format_path, snake_case}; +use tokio::fs; + +use crate::{ + constants::{ + CLIENT_FILE_CACHED_SHEET, CLIENT_PATH_CACHED_SHEET, CLIENT_SUFFIX_CACHED_SHEET_FILE, + }, + current::current_local_path, + data::sheet::{SheetData, SheetName}, +}; + +pub type CachedSheetPathBuf = PathBuf; + +const SHEET_NAME: &str = "{sheet_name}"; +const ACCOUNT_NAME: &str = "{account}"; + +/// # Cached Sheet +/// The cached sheet is a read-only version cloned from the upstream repository to the local environment, +/// automatically generated during update operations, +/// which records the latest Sheet information stored locally to accelerate data access and reduce network requests. +pub struct CachedSheet; + +impl CachedSheet { + /// Read the cached sheet data. + pub async fn cached_sheet_data(sheet_name: &SheetName) -> Result { + let sheet_name = snake_case!(sheet_name.clone()); + + let Some(path) = Self::cached_sheet_path(sheet_name) else { + return Err(Error::new( + std::io::ErrorKind::NotFound, + "Local workspace not found!", + )); + }; + let data = SheetData::read_from(path).await?; + Ok(data) + } + + /// Get the path to the cached sheet file. + pub fn cached_sheet_path(sheet_name: SheetName) -> Option { + let current_workspace = current_local_path()?; + Some( + current_workspace + .join(CLIENT_FILE_CACHED_SHEET.replace(SHEET_NAME, &sheet_name.to_string())), + ) + } + + /// Get all cached sheet names + pub async fn cached_sheet_names() -> Result, std::io::Error> { + let mut dir = fs::read_dir(CLIENT_PATH_CACHED_SHEET).await?; + let mut sheet_names = Vec::new(); + + while let Some(entry) = dir.next_entry().await? { + let path = entry.path(); + + if path.is_file() + && let Some(file_name) = path.file_name().and_then(|n| n.to_str()) + && file_name.ends_with(CLIENT_SUFFIX_CACHED_SHEET_FILE) { + let name_without_ext = file_name + .trim_end_matches(CLIENT_SUFFIX_CACHED_SHEET_FILE) + .to_string(); + sheet_names.push(name_without_ext); + } + } + + Ok(sheet_names) + } + + /// Get all cached sheet paths + pub async fn cached_sheet_paths() -> Result, std::io::Error> { + let mut dir = fs::read_dir(CLIENT_PATH_CACHED_SHEET).await?; + let mut sheet_paths = Vec::new(); + let Some(workspace_path) = current_local_path() else { + return Err(Error::new( + std::io::ErrorKind::NotFound, + "Local workspace not found!", + )); + }; + + while let Some(entry) = dir.next_entry().await? { + let path = entry.path(); + + if path.is_file() + && let Some(file_name) = path.file_name().and_then(|n| n.to_str()) + && file_name.ends_with(CLIENT_SUFFIX_CACHED_SHEET_FILE) { + sheet_paths.push(format_path(workspace_path.join(path))?); + } + } + + Ok(sheet_paths) + } +} diff --git a/data/src/data/local/config.rs b/data/src/data/local/config.rs new file mode 100644 index 0000000..8a89c20 --- /dev/null +++ b/data/src/data/local/config.rs @@ -0,0 +1,375 @@ +use cfg_file::ConfigFile; +use cfg_file::config::ConfigFile; +use serde::{Deserialize, Serialize}; +use std::io::Error; +use std::net::SocketAddr; +use std::path::Path; +use std::path::PathBuf; +use string_proc::snake_case; + +use crate::constants::CLIENT_FILE_WORKSPACE; +use crate::constants::CLIENT_FOLDER_WORKSPACE_ROOT_NAME; +use crate::constants::CLIENT_PATH_LOCAL_DRAFT; +use crate::constants::CLIENT_PATH_WORKSPACE_ROOT; +use crate::constants::PORT; +use crate::current::current_local_path; +use crate::data::local::latest_info::LatestInfo; +use crate::data::member::MemberId; +use crate::data::sheet::SheetName; +use crate::data::vault::config::VaultUuid; + +const ACCOUNT: &str = "{account}"; +const SHEET_NAME: &str = "{sheet_name}"; + +#[derive(Serialize, Deserialize, ConfigFile, Clone)] +#[cfg_file(path = CLIENT_FILE_WORKSPACE)] +pub struct LocalConfig { + /// The upstream address, representing the upstream address of the local workspace, + /// to facilitate timely retrieval of new updates from the upstream source. + #[serde(rename = "addr")] + upstream_addr: SocketAddr, + + /// The member ID used by the current local workspace. + /// This ID will be used to verify access permissions when connecting to the upstream server. + #[serde(rename = "as")] + using_account: MemberId, + + /// Whether the current member is interacting as a host. + /// In host mode, full Vault operation permissions are available except for adding new content. + #[serde(rename = "host")] + using_host_mode: bool, + + /// Whether the local workspace is stained. + /// + /// If stained, it can only set an upstream server with the same identifier. + /// + /// If the value is None, it means not stained; + /// otherwise, it contains the stain identifier (i.e., the upstream vault's unique ID) + #[serde(rename = "up_uid")] + stained_uuid: Option, + + /// The name of the sheet currently in use. + #[serde(rename = "use")] + sheet_in_use: Option, +} + +impl Default for LocalConfig { + fn default() -> Self { + Self { + upstream_addr: SocketAddr::V4(std::net::SocketAddrV4::new( + std::net::Ipv4Addr::new(127, 0, 0, 1), + PORT, + )), + using_account: "unknown".to_string(), + using_host_mode: false, + stained_uuid: None, + sheet_in_use: None, + } + } +} + +impl LocalConfig { + /// Set the vault address. + pub fn set_vault_addr(&mut self, addr: SocketAddr) { + self.upstream_addr = addr; + } + + /// Get the vault address. + pub fn vault_addr(&self) -> SocketAddr { + self.upstream_addr + } + + /// Set the currently used account + pub fn set_current_account(&mut self, account: MemberId) -> Result<(), std::io::Error> { + if self.sheet_in_use().is_some() { + return Err(Error::new( + std::io::ErrorKind::DirectoryNotEmpty, + "Please exit the current sheet before switching accounts", + )); + } + self.using_account = account; + Ok(()) + } + + /// Set the host mode + pub fn set_host_mode(&mut self, host_mode: bool) { + self.using_host_mode = host_mode; + } + + /// Set the currently used sheet + pub async fn use_sheet(&mut self, sheet: SheetName) -> Result<(), std::io::Error> { + let sheet = snake_case!(sheet); + + // Check if the sheet is already in use + if self.sheet_in_use().is_some() { + return Err(std::io::Error::new( + std::io::ErrorKind::AlreadyExists, + "Sheet already in use", + )); + }; + + // Check if the local path exists + let local_path = self.get_local_path().await?; + + // Get latest info + let Ok(latest_info) = LatestInfo::read_from(LatestInfo::latest_info_path( + &local_path, + &self.current_account(), + )) + .await + else { + return Err(std::io::Error::new( + std::io::ErrorKind::NotFound, + "No latest info found", + )); + }; + + // Check if the sheet exists + if !latest_info.visible_sheets.contains(&sheet) { + return Err(std::io::Error::new( + std::io::ErrorKind::NotFound, + "Sheet not found", + )); + } + + // Check if there are any files or folders other than .jv + self.check_local_path_empty(&local_path).await?; + + // Get the draft folder path + let draft_folder = self.draft_folder(&self.using_account, &sheet, &local_path); + + if draft_folder.exists() { + // Exists + // Move the contents of the draft folder to the local path with rollback support + self.move_draft_to_local(&draft_folder, &local_path).await?; + } + + self.sheet_in_use = Some(sheet); + LocalConfig::write(self).await?; + + Ok(()) + } + + /// Exit the currently used sheet + pub async fn exit_sheet(&mut self) -> Result<(), std::io::Error> { + // Check if the sheet is already in use + if self.sheet_in_use().is_none() { + return Ok(()); + } + + // Check if the local path exists + let local_path = self.get_local_path().await?; + + // Get the current sheet name + let sheet_name = self.sheet_in_use().as_ref().unwrap().clone(); + + // Get the draft folder path + let draft_folder = self.draft_folder(&self.using_account, &sheet_name, &local_path); + + // Create the draft folder if it doesn't exist + if !draft_folder.exists() { + std::fs::create_dir_all(&draft_folder).map_err(std::io::Error::other)?; + } + + // Move all files and folders (except .jv folder) to the draft folder with rollback support + self.move_local_to_draft(&local_path, &draft_folder).await?; + + // Clear the sheet in use + self.sheet_in_use = None; + LocalConfig::write(self).await?; + + Ok(()) + } + + /// Get local path or return error + async fn get_local_path(&self) -> Result { + current_local_path().ok_or_else(|| { + std::io::Error::new(std::io::ErrorKind::NotFound, "Fail to get local path") + }) + } + + /// Check if local path is empty (except for .jv folder) + async fn check_local_path_empty(&self, local_path: &Path) -> Result<(), std::io::Error> { + let jv_folder = local_path.join(CLIENT_PATH_WORKSPACE_ROOT); + let mut entries = std::fs::read_dir(local_path).map_err(std::io::Error::other)?; + + if entries.any(|entry| { + if let Ok(entry) = entry { + let path = entry.path(); + path != jv_folder + && path.file_name().and_then(|s| s.to_str()) + != Some(CLIENT_FOLDER_WORKSPACE_ROOT_NAME) + } else { + false + } + }) { + return Err(std::io::Error::new( + std::io::ErrorKind::DirectoryNotEmpty, + "Local path is not empty!", + )); + } + + Ok(()) + } + + /// Move contents from draft folder to local path with rollback support + async fn move_draft_to_local( + &self, + draft_folder: &Path, + local_path: &Path, + ) -> Result<(), std::io::Error> { + let draft_entries: Vec<_> = std::fs::read_dir(draft_folder) + .map_err(std::io::Error::other)? + .collect::, _>>() + .map_err(std::io::Error::other)?; + + let mut moved_items: Vec = Vec::new(); + + for entry in &draft_entries { + let entry_path = entry.path(); + let target_path = local_path.join(entry_path.file_name().unwrap()); + + // Move each file/directory from draft folder to local path + std::fs::rename(&entry_path, &target_path).map_err(|e| { + // Rollback all previously moved items + for moved_item in &moved_items { + let _ = std::fs::rename(&moved_item.target, &moved_item.source); + } + std::io::Error::other(e) + })?; + + moved_items.push(MovedItem { + source: entry_path.clone(), + target: target_path.clone(), + }); + } + + // Remove the now-empty draft folder + std::fs::remove_dir(draft_folder).map_err(|e| { + // Rollback all moved items if folder removal fails + for moved_item in &moved_items { + let _ = std::fs::rename(&moved_item.target, &moved_item.source); + } + std::io::Error::other(e) + })?; + + Ok(()) + } + + /// Move contents from local path to draft folder with rollback support (except .jv folder) + async fn move_local_to_draft( + &self, + local_path: &Path, + draft_folder: &Path, + ) -> Result<(), std::io::Error> { + let jv_folder = local_path.join(CLIENT_PATH_WORKSPACE_ROOT); + let entries: Vec<_> = std::fs::read_dir(local_path) + .map_err(std::io::Error::other)? + .collect::, _>>() + .map_err(std::io::Error::other)?; + + let mut moved_items: Vec = Vec::new(); + + for entry in &entries { + let entry_path = entry.path(); + + // Skip the .jv folder + if entry_path == jv_folder + || entry_path.file_name().and_then(|s| s.to_str()) + == Some(CLIENT_FOLDER_WORKSPACE_ROOT_NAME) + { + continue; + } + + let target_path = draft_folder.join(entry_path.file_name().unwrap()); + + // Move each file/directory from local path to draft folder + std::fs::rename(&entry_path, &target_path).map_err(|e| { + // Rollback all previously moved items + for moved_item in &moved_items { + let _ = std::fs::rename(&moved_item.target, &moved_item.source); + } + std::io::Error::other(e) + })?; + + moved_items.push(MovedItem { + source: entry_path.clone(), + target: target_path.clone(), + }); + } + + Ok(()) + } + + /// Get the currently used account + pub fn current_account(&self) -> MemberId { + self.using_account.clone() + } + + /// Check if the current member is interacting as a host. + pub fn is_host_mode(&self) -> bool { + self.using_host_mode + } + + /// Check if the local workspace is stained. + pub fn stained(&self) -> bool { + self.stained_uuid.is_some() + } + + /// Get the UUID of the vault that the local workspace is stained with. + pub fn stained_uuid(&self) -> Option { + self.stained_uuid + } + + /// Stain the local workspace with the given UUID. + pub fn stain(&mut self, uuid: VaultUuid) { + self.stained_uuid = Some(uuid); + } + + /// Unstain the local workspace. + pub fn unstain(&mut self) { + self.stained_uuid = None; + } + + /// Get the upstream address. + pub fn upstream_addr(&self) -> SocketAddr { + self.upstream_addr + } + + /// Get the currently used sheet + pub fn sheet_in_use(&self) -> &Option { + &self.sheet_in_use + } + + /// Get draft folder + pub fn draft_folder( + &self, + account: &MemberId, + sheet_name: &SheetName, + local_workspace_path: impl Into, + ) -> PathBuf { + let account_str = snake_case!(account.as_str()); + let sheet_name_str = snake_case!(sheet_name.as_str()); + let draft_path = CLIENT_PATH_LOCAL_DRAFT + .replace(ACCOUNT, &account_str) + .replace(SHEET_NAME, &sheet_name_str); + local_workspace_path.into().join(draft_path) + } + + /// Get current draft folder + pub fn current_draft_folder(&self) -> Option { + let Some(sheet_name) = self.sheet_in_use() else { + return None; + }; + + let current_dir = current_local_path()?; + + Some(self.draft_folder(&self.using_account, sheet_name, current_dir)) + } +} + +#[derive(Clone)] +struct MovedItem { + source: PathBuf, + target: PathBuf, +} diff --git a/data/src/data/local/latest_file_data.rs b/data/src/data/local/latest_file_data.rs new file mode 100644 index 0000000..21c647c --- /dev/null +++ b/data/src/data/local/latest_file_data.rs @@ -0,0 +1,105 @@ +use std::{collections::HashMap, io::Error, path::PathBuf}; + +use cfg_file::ConfigFile; +use serde::{Deserialize, Serialize}; + +use crate::{ + constants::{CLIENT_FILE_LATEST_DATA, CLIENT_FILE_MEMBER_HELD_NOSET}, + current::current_local_path, + data::{ + member::MemberId, + vault::virtual_file::{VirtualFileId, VirtualFileVersion, VirtualFileVersionDescription}, + }, +}; + +const ACCOUNT: &str = "{account}"; + +/// # Latest file data +/// Records the file holder and the latest version for permission and update checks +#[derive(Debug, Default, Clone, Serialize, Deserialize, ConfigFile)] +#[cfg_file(path = CLIENT_FILE_MEMBER_HELD_NOSET)] +pub struct LatestFileData { + /// File holding status + #[serde(rename = "held")] + held_status: HashMap, + + /// File version + #[serde(rename = "ver")] + versions: HashMap, + + /// File histories and descriptions + #[serde(rename = "his")] + histories: HashMap>, +} + +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub enum HeldStatus { + #[serde(rename = "Hold")] + HeldWith(MemberId), // Held, status changes are sync to the client + + #[serde(rename = "None")] + NotHeld, // Not held, status changes are sync to the client + + #[default] + #[serde(rename = "Unknown")] + WantedToKnow, // Holding status is unknown, notify server must inform client +} + +impl LatestFileData { + /// Get the path to the file holding the held status information for the given member. + pub fn data_path(account: &MemberId) -> Result { + let Some(local_path) = current_local_path() else { + return Err(Error::new( + std::io::ErrorKind::NotFound, + "Workspace not found.", + )); + }; + Ok(local_path.join(CLIENT_FILE_LATEST_DATA.replace(ACCOUNT, account))) + } + + /// Get the member who holds the file with the given ID. + pub fn file_holder(&self, vfid: &VirtualFileId) -> Option<&MemberId> { + self.held_status.get(vfid).and_then(|status| match status { + HeldStatus::HeldWith(id) => Some(id), + _ => None, + }) + } + + /// Get the version of the file with the given ID. + pub fn file_version(&self, vfid: &VirtualFileId) -> Option<&VirtualFileVersion> { + self.versions.get(vfid) + } + + /// Get the version of the file with the given ID. + pub fn file_histories( + &self, + vfid: &VirtualFileId, + ) -> Option<&Vec<(VirtualFileVersion, VirtualFileVersionDescription)>> { + self.histories.get(vfid) + } + + /// Update the held status of the files. + pub fn update_info( + &mut self, + map: HashMap< + VirtualFileId, + ( + Option, + VirtualFileVersion, + Vec<(VirtualFileVersion, VirtualFileVersionDescription)>, + ), + >, + ) { + for (vfid, (member_id, version, desc)) in map { + self.held_status.insert( + vfid.clone(), + match member_id { + Some(member_id) => HeldStatus::HeldWith(member_id), + None => HeldStatus::NotHeld, + }, + ); + self.versions.insert(vfid.clone(), version); + self.histories.insert(vfid, desc); + } + } +} diff --git a/data/src/data/local/latest_info.rs b/data/src/data/local/latest_info.rs new file mode 100644 index 0000000..e11836b --- /dev/null +++ b/data/src/data/local/latest_info.rs @@ -0,0 +1,83 @@ +use std::{ + collections::{HashMap, HashSet}, + path::{Path, PathBuf}, + time::SystemTime, +}; + +use cfg_file::ConfigFile; +use serde::{Deserialize, Serialize}; + +use crate::{ + constants::{CLIENT_FILE_LATEST_INFO, CLIENT_FILE_LATEST_INFO_NOSET}, + data::{ + member::{Member, MemberId}, + sheet::{SheetData, SheetName, SheetPathBuf}, + vault::{ + sheet_share::{Share, SheetShareId}, + virtual_file::VirtualFileId, + }, + }, +}; + +const ACCOUNT: &str = "{account}"; + +/// # Latest Info +/// Locally cached latest information, +/// used to cache personal information from upstream for querying and quickly retrieving member information. +#[derive(Default, Serialize, Deserialize, ConfigFile)] +#[cfg_file(path = CLIENT_FILE_LATEST_INFO_NOSET)] +pub struct LatestInfo { + // Sheets + /// Visible sheets, + /// indicating which sheets I can edit + #[serde(rename = "my")] + pub visible_sheets: Vec, + + /// Invisible sheets, + /// indicating which sheets I can export files to (these sheets are not readable to me) + #[serde(rename = "others")] + pub invisible_sheets: Vec, + + /// Reference sheets, + /// indicating sheets owned by the host, visible to everyone, + /// but only the host can modify or add mappings within them + #[serde(rename = "refsheets")] + pub reference_sheets: HashSet, + + /// Reference sheet data, indicating what files I can get from the reference sheet + #[serde(rename = "ref")] + pub ref_sheet_content: SheetData, + + /// Reverse mapping from virtual file IDs to actual paths in reference sheets + #[serde(rename = "ref_vfs")] + pub ref_sheet_vfs_mapping: HashMap, + + /// Shares in my sheets, indicating which external merge requests have entries that I can view + #[serde(rename = "shares")] + pub shares_in_my_sheets: HashMap>, + + /// Update instant + #[serde(rename = "update")] + pub update_instant: Option, + + // Members + /// All member information of the vault, allowing me to contact them more conveniently + #[serde(rename = "members")] + pub vault_members: Vec, +} + +impl LatestInfo { + /// Get the path to the latest info file for a given workspace and member ID + pub fn latest_info_path(local_workspace_path: &Path, member_id: &MemberId) -> PathBuf { + local_workspace_path.join(CLIENT_FILE_LATEST_INFO.replace(ACCOUNT, member_id)) + } +} + +#[derive(Default, Serialize, Deserialize)] +pub struct SheetInfo { + #[serde(rename = "name")] + pub sheet_name: SheetName, + + #[serde(rename = "holder")] + pub holder_name: Option, +} diff --git a/data/src/data/local/local_files.rs b/data/src/data/local/local_files.rs new file mode 100644 index 0000000..9cc244f --- /dev/null +++ b/data/src/data/local/local_files.rs @@ -0,0 +1,148 @@ +use std::path::{Path, PathBuf}; + +use string_proc::format_path::format_path; +use tokio::fs; + +use crate::constants::CLIENT_FOLDER_WORKSPACE_ROOT_NAME; + +pub struct RelativeFiles { + pub(crate) files: Vec, +} + +impl IntoIterator for RelativeFiles { + type Item = PathBuf; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.files.into_iter() + } +} + +impl RelativeFiles { + pub fn iter(&self) -> std::slice::Iter<'_, PathBuf> { + self.files.iter() + } +} + +/// Read the relative paths within the project from the input file list +pub async fn get_relative_paths(local_path: &PathBuf, paths: &[PathBuf]) -> Option { + // Get Relative Paths + let Ok(paths) = format_input_paths_and_ignore_outside_paths(local_path, paths).await else { + return None; + }; + let files: Vec = abs_paths_to_abs_files(paths).await; + let Ok(files) = parse_to_relative(local_path, files) else { + return None; + }; + Some(RelativeFiles { files }) +} + +/// Normalize the input paths +async fn format_input_paths( + local_path: &Path, + track_files: &[PathBuf], +) -> Result, std::io::Error> { + let current_dir = local_path; + + let mut real_paths = Vec::new(); + for file in track_files { + let path = current_dir.join(file); + + // Skip paths that contain .jv directories + if path.components().any(|component| { + if let std::path::Component::Normal(name) = component { + name.to_str() == Some(CLIENT_FOLDER_WORKSPACE_ROOT_NAME) + } else { + false + } + }) { + continue; + } + + match format_path(path) { + Ok(path) => real_paths.push(path), + Err(e) => { + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!("Failed to format path: {}", e), + )); + } + } + } + + Ok(real_paths) +} + +/// Ignore files outside the workspace +async fn format_input_paths_and_ignore_outside_paths( + local_path: &PathBuf, + files: &[PathBuf], +) -> Result, std::io::Error> { + let result = format_input_paths(local_path, files).await?; + let result: Vec = result + .into_iter() + .filter(|path| path.starts_with(local_path)) + .collect(); + Ok(result) +} + +/// Normalize the input paths to relative paths +fn parse_to_relative( + local_dir: &PathBuf, + files: Vec, +) -> Result, std::io::Error> { + let result: Result, _> = files + .iter() + .map(|p| { + p.strip_prefix(local_dir) + .map(|relative| relative.to_path_buf()) + .map_err(|_| { + std::io::Error::new( + std::io::ErrorKind::InvalidInput, + "Path prefix stripping failed", + ) + }) + }) + .collect(); + + result +} + +/// Convert absolute paths to absolute file paths, expanding directories to their contained files +async fn abs_paths_to_abs_files(paths: Vec) -> Vec { + let mut files = Vec::new(); + + for path in paths { + if !path.exists() { + continue; + } + + let metadata = match fs::metadata(&path).await { + Ok(meta) => meta, + Err(_) => continue, + }; + + if metadata.is_file() { + files.push(path); + } else if metadata.is_dir() { + let walker = walkdir::WalkDir::new(&path); + for entry in walker.into_iter().filter_map(|e| e.ok()) { + if entry.path().components().any(|component| { + if let std::path::Component::Normal(name) = component { + name == CLIENT_FOLDER_WORKSPACE_ROOT_NAME + } else { + false + } + }) { + continue; + } + + if entry.file_type().is_file() { + files.push(entry.path().to_path_buf()); + } + } + } + } + + files +} diff --git a/data/src/data/local/local_sheet.rs b/data/src/data/local/local_sheet.rs new file mode 100644 index 0000000..6f9924c --- /dev/null +++ b/data/src/data/local/local_sheet.rs @@ -0,0 +1,377 @@ +use std::{collections::HashMap, io::Error, path::PathBuf, time::SystemTime}; + +use ::serde::{Deserialize, Serialize}; +use cfg_file::{ConfigFile, config::ConfigFile}; +use string_proc::format_path::format_path; + +use crate::{ + constants::CLIENT_FILE_LOCAL_SHEET_NOSET, + data::{ + local::LocalWorkspace, + member::MemberId, + sheet::SheetName, + vault::virtual_file::{VirtualFileId, VirtualFileVersion, VirtualFileVersionDescription}, + }, +}; + +pub type LocalFilePathBuf = PathBuf; +pub type LocalSheetPathBuf = PathBuf; + +/// # Local Sheet +/// Local sheet information, used to record metadata of actual local files, +/// to compare with upstream information for more optimized file submission, +/// and to determine whether files need to be updated or submitted. +pub struct LocalSheet<'a> { + pub(crate) local_workspace: &'a LocalWorkspace, + pub(crate) member: MemberId, + pub(crate) sheet_name: String, + pub(crate) data: LocalSheetData, +} + +#[derive(Debug, Default, Serialize, Deserialize, ConfigFile, Clone)] +#[cfg_file(path = CLIENT_FILE_LOCAL_SHEET_NOSET)] // Do not use LocalSheet::write or LocalSheet::read +pub struct LocalSheetData { + /// Local file path to metadata mapping. + #[serde(rename = "map")] + pub(crate) mapping: HashMap, + + #[serde(rename = "vfs")] + pub(crate) vfs: HashMap, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct LocalMappingMetadata { + /// Hash value generated immediately after the file is downloaded to the local workspace + #[serde(rename = "base_hash")] + pub(crate) hash_when_updated: String, + + /// Time when the file was downloaded to the local workspace + #[serde(rename = "time")] + pub(crate) time_when_updated: SystemTime, + + /// Size of the file when downloaded to the local workspace + #[serde(rename = "size")] + pub(crate) size_when_updated: u64, + + /// Version description when the file was downloaded to the local workspace + #[serde(rename = "desc")] + pub(crate) version_desc_when_updated: VirtualFileVersionDescription, + + /// Version when the file was downloaded to the local workspace + #[serde(rename = "ver")] + pub(crate) version_when_updated: VirtualFileVersion, + + /// Virtual file ID corresponding to the local path + #[serde(rename = "id")] + pub(crate) mapping_vfid: VirtualFileId, + + /// Latest modifiy check time + #[serde(rename = "check_time")] + pub(crate) last_modify_check_time: SystemTime, + + /// Latest modifiy check result + #[serde(rename = "modified")] + pub(crate) last_modify_check_result: bool, + + /// Latest modifiy check hash result + #[serde(rename = "current_hash")] + pub(crate) last_modify_check_hash: Option, +} + +impl LocalSheetData { + /// Wrap LocalSheetData into LocalSheet with workspace, member, and sheet name + pub fn wrap_to_local_sheet<'a>( + self, + workspace: &'a LocalWorkspace, + member: MemberId, + sheet_name: SheetName, + ) -> LocalSheet<'a> { + LocalSheet { + local_workspace: workspace, + member, + sheet_name, + data: self, + } + } +} + +impl LocalMappingMetadata { + /// Create a new MappingMetaData instance + #[allow(clippy::too_many_arguments)] + pub fn new( + hash_when_updated: String, + time_when_updated: SystemTime, + size_when_updated: u64, + version_desc_when_updated: VirtualFileVersionDescription, + version_when_updated: VirtualFileVersion, + mapping_vfid: VirtualFileId, + last_modifiy_check_time: SystemTime, + last_modifiy_check_result: bool, + ) -> Self { + Self { + hash_when_updated, + time_when_updated, + size_when_updated, + version_desc_when_updated, + version_when_updated, + mapping_vfid, + last_modify_check_time: last_modifiy_check_time, + last_modify_check_result: last_modifiy_check_result, + last_modify_check_hash: None, + } + } + + /// Getter for hash_when_updated + pub fn hash_when_updated(&self) -> &String { + &self.hash_when_updated + } + + /// Setter for hash_when_updated + pub fn set_hash_when_updated(&mut self, hash: String) { + self.hash_when_updated = hash; + } + + /// Getter for date_when_updated + pub fn time_when_updated(&self) -> &SystemTime { + &self.time_when_updated + } + + /// Setter for time_when_updated + pub fn set_time_when_updated(&mut self, time: SystemTime) { + self.time_when_updated = time; + } + + /// Getter for size_when_updated + pub fn size_when_updated(&self) -> u64 { + self.size_when_updated + } + + /// Setter for size_when_updated + pub fn set_size_when_updated(&mut self, size: u64) { + self.size_when_updated = size; + } + + /// Getter for version_desc_when_updated + pub fn version_desc_when_updated(&self) -> &VirtualFileVersionDescription { + &self.version_desc_when_updated + } + + /// Setter for version_desc_when_updated + pub fn set_version_desc_when_updated(&mut self, version_desc: VirtualFileVersionDescription) { + self.version_desc_when_updated = version_desc; + } + + /// Getter for version_when_updated + pub fn version_when_updated(&self) -> &VirtualFileVersion { + &self.version_when_updated + } + + /// Setter for version_when_updated + pub fn set_version_when_updated(&mut self, version: VirtualFileVersion) { + self.version_when_updated = version; + } + + /// Getter for mapping_vfid + pub fn mapping_vfid(&self) -> &VirtualFileId { + &self.mapping_vfid + } + + /// Setter for mapping_vfid + pub fn set_mapping_vfid(&mut self, vfid: VirtualFileId) { + self.mapping_vfid = vfid; + } + + /// Getter for last_modifiy_check_time + pub fn last_modifiy_check_time(&self) -> &SystemTime { + &self.last_modify_check_time + } + + /// Setter for last_modifiy_check_time + pub fn set_last_modifiy_check_time(&mut self, time: SystemTime) { + self.last_modify_check_time = time; + } + + /// Getter for last_modifiy_check_result + pub fn last_modifiy_check_result(&self) -> bool { + self.last_modify_check_result + } + + /// Setter for last_modifiy_check_result + pub fn set_last_modifiy_check_result(&mut self, result: bool) { + self.last_modify_check_result = result; + } + + /// Getter for last_modifiy_check_hash + pub fn last_modifiy_check_hash(&self) -> &Option { + &self.last_modify_check_hash + } + + /// Setter for last_modifiy_check_hash + pub fn set_last_modifiy_check_hash(&mut self, hash: Option) { + self.last_modify_check_hash = hash; + } +} + +impl Default for LocalMappingMetadata { + fn default() -> Self { + Self { + hash_when_updated: Default::default(), + time_when_updated: SystemTime::now(), + size_when_updated: Default::default(), + version_desc_when_updated: Default::default(), + version_when_updated: Default::default(), + mapping_vfid: Default::default(), + last_modify_check_time: SystemTime::now(), + last_modify_check_result: false, + last_modify_check_hash: None, + } + } +} + +mod instant_serde { + use serde::{self, Deserialize, Deserializer, Serializer}; + use tokio::time::Instant; + + pub fn serialize(instant: &Instant, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_u64(instant.elapsed().as_secs()) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let secs = u64::deserialize(deserializer)?; + Ok(Instant::now() - std::time::Duration::from_secs(secs)) + } +} + +impl<'a> From<&'a LocalSheet<'a>> for &'a LocalSheetData { + fn from(sheet: &'a LocalSheet<'a>) -> Self { + &sheet.data + } +} + +impl<'a> LocalSheet<'a> { + /// Add mapping to local sheet data + pub fn add_mapping( + &mut self, + path: &LocalFilePathBuf, + mapping: LocalMappingMetadata, + ) -> Result<(), std::io::Error> { + let path = format_path(path)?; + if self.data.mapping.contains_key(&path) + || self.data.vfs.contains_key(&mapping.mapping_vfid) + { + return Err(Error::new( + std::io::ErrorKind::AlreadyExists, + "Mapping already exists", + )); + } + + self.data.mapping.insert(path, mapping); + Ok(()) + } + + /// Move mapping to other path + pub fn move_mapping( + &mut self, + from: &LocalFilePathBuf, + to: &LocalFilePathBuf, + ) -> Result<(), std::io::Error> { + let from = format_path(from)?; + let to = format_path(to)?; + if self.data.mapping.contains_key(&to) { + return Err(Error::new( + std::io::ErrorKind::AlreadyExists, + "To path already exists.", + )); + } + + let Some(old_value) = self.data.mapping.remove(&from) else { + return Err(Error::new( + std::io::ErrorKind::NotFound, + "From path is not found.", + )); + }; + + self.data.mapping.insert(to, old_value); + + Ok(()) + } + + /// Remove mapping from local sheet + pub fn remove_mapping( + &mut self, + path: &LocalFilePathBuf, + ) -> Result { + let path = format_path(path)?; + match self.data.mapping.remove(&path) { + Some(mapping) => Ok(mapping), + None => Err(Error::new( + std::io::ErrorKind::NotFound, + "Path is not found.", + )), + } + } + + /// Get immutable mapping data + pub fn mapping_data( + &self, + path: &LocalFilePathBuf, + ) -> Result<&LocalMappingMetadata, std::io::Error> { + let path = format_path(path)?; + let Some(data) = self.data.mapping.get(&path) else { + return Err(Error::new( + std::io::ErrorKind::NotFound, + "Path is not found.", + )); + }; + Ok(data) + } + + /// Get muttable mapping data + pub fn mapping_data_mut( + &mut self, + path: &LocalFilePathBuf, + ) -> Result<&mut LocalMappingMetadata, std::io::Error> { + let path = format_path(path)?; + let Some(data) = self.data.mapping.get_mut(&path) else { + return Err(Error::new( + std::io::ErrorKind::NotFound, + "Path is not found.", + )); + }; + Ok(data) + } + + /// Write the sheet to disk + pub async fn write(&mut self) -> Result<(), std::io::Error> { + let path = self + .local_workspace + .local_sheet_path(&self.member, &self.sheet_name); + self.write_to_path(path).await + } + + /// Write the sheet to custom path + pub async fn write_to_path(&mut self, path: impl Into) -> Result<(), std::io::Error> { + let path = path.into(); + + self.data.vfs = HashMap::new(); + for (path, mapping) in self.data.mapping.iter() { + self.data + .vfs + .insert(mapping.mapping_vfid.clone(), path.clone()); + } + + LocalSheetData::write_to(&self.data, path).await?; + Ok(()) + } + + /// Get path by VirtualFileId + pub fn path_by_id(&self, vfid: &VirtualFileId) -> Option<&PathBuf> { + self.data.vfs.get(vfid) + } +} diff --git a/data/src/data/local/vault_modified.rs b/data/src/data/local/vault_modified.rs new file mode 100644 index 0000000..563d11f --- /dev/null +++ b/data/src/data/local/vault_modified.rs @@ -0,0 +1,30 @@ +use crate::{constants::CLIENT_FILE_VAULT_MODIFIED, current::current_local_path}; + +pub async fn check_vault_modified() -> bool { + let Some(current_dir) = current_local_path() else { + return false; + }; + + let record_file = current_dir.join(CLIENT_FILE_VAULT_MODIFIED); + if !record_file.exists() { + return false; + } + + let Ok(contents) = tokio::fs::read_to_string(&record_file).await else { + return false; + }; + + matches!(contents.trim().to_lowercase().as_str(), "true") +} + +pub async fn sign_vault_modified(modified: bool) { + let Some(current_dir) = current_local_path() else { + return; + }; + + let record_file = current_dir.join(CLIENT_FILE_VAULT_MODIFIED); + + let contents = if modified { "true" } else { "false" }; + + let _ = tokio::fs::write(&record_file, contents).await; +} diff --git a/data/src/data/local/workspace_analyzer.rs b/data/src/data/local/workspace_analyzer.rs new file mode 100644 index 0000000..f2d83ff --- /dev/null +++ b/data/src/data/local/workspace_analyzer.rs @@ -0,0 +1,327 @@ +use std::{ + collections::{HashMap, HashSet}, + io::Error, + path::PathBuf, +}; + +use sha1_hash::calc_sha1_multi; +use string_proc::format_path::format_path; +use walkdir::WalkDir; + +use crate::data::{ + local::{LocalWorkspace, cached_sheet::CachedSheet, local_sheet::LocalSheet}, + member::MemberId, + sheet::{SheetData, SheetName}, + vault::virtual_file::VirtualFileId, +}; + +pub type FromRelativePathBuf = PathBuf; +pub type ToRelativePathBuf = PathBuf; +pub type CreatedRelativePathBuf = PathBuf; +pub type LostRelativePathBuf = PathBuf; +pub type ModifiedRelativePathBuf = PathBuf; + +pub struct AnalyzeResult<'a> { + local_workspace: &'a LocalWorkspace, + + /// Moved local files + pub moved: HashMap, + + /// Newly created local files + pub created: HashSet, + + /// Lost local files + pub lost: HashSet, + + /// Erased local files + pub erased: HashSet, + + /// Modified local files (excluding moved files) + /// For files that were both moved and modified, changes can only be detected after LocalSheet mapping is aligned with actual files + pub modified: HashSet, +} + +struct AnalyzeContext<'a> { + member: MemberId, + sheet_name: SheetName, + local_sheet: Option>, + cached_sheet_data: Option, +} + +impl<'a> AnalyzeResult<'a> { + /// Analyze all files, calculate the file information provided + pub async fn analyze_local_status( + local_workspace: &'a LocalWorkspace, + ) -> Result, std::io::Error> { + // Workspace + let workspace = local_workspace; + + // Current member, sheet + let (member, sheet_name) = { + let mut_workspace = workspace.config.lock().await; + let member = mut_workspace.current_account(); + let Some(sheet) = mut_workspace.sheet_in_use().clone() else { + return Err(Error::new(std::io::ErrorKind::NotFound, "Sheet not found")); + }; + (member, sheet) + }; + + // Local files (RelativePaths) + let local_path = workspace.local_path(); + let file_relative_paths = { + let mut paths = HashSet::new(); + for entry in WalkDir::new(local_path) { + let entry = match entry { + Ok(entry) => entry, + Err(_) => continue, + }; + + // Skip entries that contain ".jv" in their path + if entry.path().to_string_lossy().contains(".jv") { + continue; + } + + if entry.file_type().is_file() + && let Ok(relative_path) = entry.path().strip_prefix(local_path) + { + let format = format_path(relative_path.to_path_buf()); + let Ok(format) = format else { + continue; + }; + paths.insert(format); + } + } + + paths + }; + + // Read local sheet + let local_sheet = (workspace.local_sheet(&member, &sheet_name).await).ok(); + + // Read cached sheet + let cached_sheet_data = match CachedSheet::cached_sheet_data(&sheet_name).await { + Ok(v) => Some(v), + Err(_) => { + return Err(Error::new( + std::io::ErrorKind::NotFound, + "Cached sheet not found", + )); + } + }; + + // Create new result + let mut result = Self::none_result(workspace); + + // Analyze entry + let mut analyze_ctx = AnalyzeContext { + member, + sheet_name, + local_sheet, + cached_sheet_data, + }; + Self::analyze_moved(&mut result, &file_relative_paths, &analyze_ctx, workspace).await?; + Self::analyze_modified( + &mut result, + &file_relative_paths, + &mut analyze_ctx, + workspace, + ) + .await?; + + Ok(result) + } + + /// Track file moves by comparing recorded SHA1 hashes with actual file SHA1 hashes + /// For files that cannot be directly matched, continue searching using fuzzy matching algorithms + async fn analyze_moved( + result: &mut AnalyzeResult<'_>, + file_relative_paths: &HashSet, + analyze_ctx: &AnalyzeContext<'a>, + workspace: &LocalWorkspace, + ) -> Result<(), std::io::Error> { + let local_sheet_paths: HashSet<&PathBuf> = match &analyze_ctx.local_sheet { + Some(local_sheet) => local_sheet.data.mapping.keys().collect(), + None => HashSet::new(), + }; + let file_relative_paths_ref: HashSet<&PathBuf> = file_relative_paths.iter().collect(); + + // Files that exist locally but not in remote + let mut erased_files: HashSet = HashSet::new(); + + if let Some(cached_data) = &analyze_ctx.cached_sheet_data { + if let Some(local_sheet) = &analyze_ctx.local_sheet { + let cached_sheet_mapping = cached_data.mapping(); + let local_sheet_mapping = &local_sheet.data.mapping; + + // Find paths that exist in local sheet but not in cached sheet + for local_path in local_sheet_mapping.keys() { + if !cached_sheet_mapping.contains_key(local_path) { + erased_files.insert(local_path.clone()); + } + } + } + } + + // Files that exist in the local sheet but not in reality are considered lost + let mut lost_files: HashSet<&PathBuf> = local_sheet_paths + .difference(&file_relative_paths_ref) + .filter(|&&path| !erased_files.contains(path)) + .cloned() + .collect(); + + // Files that exist in reality but not in the local sheet are recorded as newly created + let mut new_files: HashSet<&PathBuf> = file_relative_paths_ref + .difference(&local_sheet_paths) + .cloned() + .collect(); + + // Calculate hashes for new files + let new_files_for_hash: Vec = new_files + .iter() + .map(|p| workspace.local_path.join(p)) + .collect(); + let file_hashes: HashSet<(PathBuf, String)> = + match calc_sha1_multi::>(new_files_for_hash, 8192).await { + Ok(hash) => hash, + Err(e) => return Err(Error::other(e)), + } + .iter() + .map(|r| (r.file_path.clone(), r.hash.to_string())) + .collect(); + + // Build hash mapping table for lost files + let mut lost_files_hash_mapping: HashMap = + match &analyze_ctx.local_sheet { + Some(local_sheet) => lost_files + .iter() + .filter_map(|f| { + local_sheet.mapping_data(f).ok().map(|mapping_data| { + ( + // Using the most recently recorded Hash can more accurately identify moved items, + // but if it doesn't exist, fall back to the initially recorded Hash + mapping_data + .last_modify_check_hash + .as_ref() + .cloned() + .unwrap_or(mapping_data.hash_when_updated.clone()), + (*f).clone(), + ) + }) + }) + .collect(), + None => HashMap::new(), + }; + + // If these hashes correspond to the hashes of missing files, then this pair of new and lost items will be merged into moved items + let mut moved_files: HashSet<(FromRelativePathBuf, ToRelativePathBuf)> = HashSet::new(); + for (new_path, new_hash) in file_hashes { + let new_path = new_path + .strip_prefix(&workspace.local_path) + .map(|p| p.to_path_buf()) + .unwrap_or(new_path); + + // If the new hash value hits the mapping, add a moved item + if let Some(lost_path) = lost_files_hash_mapping.remove(&new_hash) { + // Remove this new item and lost item + lost_files.remove(&lost_path); + new_files.remove(&new_path); + + // Create moved item + moved_files.insert((lost_path.clone(), new_path)); + } + } + + // Enter fuzzy matching to match other potentially moved items that haven't been matched + // If the total number of new and lost files is divisible by 2, it indicates there might still be files that have been moved, consider trying fuzzy matching + if new_files.len() + lost_files.len() % 2 == 0 { + // Try fuzzy matching + // ... + } + + // Collect results and set the result + result.created = new_files.iter().map(|p| (*p).clone()).collect(); + result.lost = lost_files.iter().map(|p| (*p).clone()).collect(); + result.moved = moved_files + .iter() + .filter_map(|(from, to)| { + let vfid = analyze_ctx + .local_sheet + .as_ref() + .and_then(|local_sheet| local_sheet.mapping_data(from).ok()) + .map(|mapping_data| mapping_data.mapping_vfid.clone()); + vfid.map(|vfid| (vfid, (from.clone(), to.clone()))) + }) + .collect(); + result.erased = erased_files; + + Ok(()) + } + + /// Compare using file modification time and SHA1 hash values. + /// Note: For files that have been both moved and modified, they can only be recognized as modified after their location is matched. + async fn analyze_modified( + result: &mut AnalyzeResult<'_>, + file_relative_paths: &HashSet, + analyze_ctx: &mut AnalyzeContext<'a>, + workspace: &LocalWorkspace, + ) -> Result<(), std::io::Error> { + let local_sheet = &mut analyze_ctx.local_sheet.as_mut().unwrap(); + let local_path = local_sheet.local_workspace.local_path().clone(); + + for path in file_relative_paths { + // Get mapping data + let Ok(mapping_data) = local_sheet.mapping_data_mut(path) else { + continue; + }; + + // If modified time not changed, skip + let modified_time = std::fs::metadata(local_path.join(path))?.modified()?; + if &modified_time == mapping_data.last_modifiy_check_time() { + if mapping_data.last_modifiy_check_result() { + result.modified.insert(path.clone()); + } + continue; + } + + // Calculate hash + let hash_calc = match sha1_hash::calc_sha1(workspace.local_path.join(path), 2048).await + { + Ok(hash) => hash, + Err(e) => return Err(Error::other(e)), + }; + + // If hash not match, mark as modified + if &hash_calc.hash != mapping_data.hash_when_updated() { + result.modified.insert(path.clone()); + + // Update last modified check time to modified time + mapping_data.last_modify_check_time = modified_time; + mapping_data.last_modify_check_result = true; + } else { + // Update last modified check time to modified time + mapping_data.last_modify_check_time = modified_time; + mapping_data.last_modify_check_result = false; + } + + // Record latest hash + mapping_data.last_modify_check_hash = Some(hash_calc.hash) + } + + // Persist the local sheet data + LocalSheet::write(local_sheet).await?; + + Ok(()) + } + + /// Generate a empty AnalyzeResult + fn none_result(local_workspace: &'a LocalWorkspace) -> AnalyzeResult<'a> { + AnalyzeResult { + local_workspace, + moved: HashMap::new(), + created: HashSet::new(), + lost: HashSet::new(), + modified: HashSet::new(), + erased: HashSet::new(), + } + } +} diff --git a/data/src/data/member.rs b/data/src/data/member.rs new file mode 100644 index 0000000..7e99488 --- /dev/null +++ b/data/src/data/member.rs @@ -0,0 +1,71 @@ +use std::collections::HashMap; + +use cfg_file::ConfigFile; +use serde::{Deserialize, Serialize}; +use string_proc::snake_case; + +pub type MemberId = String; + +#[derive(Debug, Eq, Clone, ConfigFile, Serialize, Deserialize)] +pub struct Member { + /// Member ID, the unique identifier of the member + #[serde(rename = "id")] + id: String, + + /// Member metadata + #[serde(rename = "meta")] + metadata: HashMap, +} + +impl Default for Member { + fn default() -> Self { + Self::new("default_user") + } +} + +impl PartialEq for Member { + fn eq(&self, other: &Self) -> bool { + self.id == other.id + } +} + +impl std::fmt::Display for Member { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.id) + } +} + +impl std::convert::AsRef for Member { + fn as_ref(&self) -> &str { + &self.id + } +} + +impl Member { + /// Create member struct by id + pub fn new(new_id: impl Into) -> Self { + Self { + id: snake_case!(new_id.into()), + metadata: HashMap::new(), + } + } + + /// Get member id + pub fn id(&self) -> String { + self.id.clone() + } + + /// Get metadata + pub fn metadata(&self, key: impl Into) -> Option<&String> { + self.metadata.get(&key.into()) + } + + /// Set metadata + pub fn set_metadata( + &mut self, + key: impl AsRef, + value: impl Into, + ) -> Option { + self.metadata.insert(key.as_ref().to_string(), value.into()) + } +} diff --git a/data/src/data/sheet.rs b/data/src/data/sheet.rs new file mode 100644 index 0000000..64b1985 --- /dev/null +++ b/data/src/data/sheet.rs @@ -0,0 +1,280 @@ +use std::{collections::HashMap, path::PathBuf}; + +use cfg_file::{ConfigFile, config::ConfigFile}; +use serde::{Deserialize, Serialize}; + +use crate::{ + constants::SERVER_FILE_SHEET, + data::{ + member::MemberId, + vault::{ + Vault, + virtual_file::{VirtualFileId, VirtualFileVersion}, + }, + }, +}; + +pub type SheetName = String; +pub type SheetPathBuf = PathBuf; + +const SHEET_NAME: &str = "{sheet_name}"; + +pub struct Sheet<'a> { + /// The name of the current sheet + pub(crate) name: SheetName, + + /// Sheet data + pub(crate) data: SheetData, + + /// Sheet path + pub(crate) vault_reference: &'a Vault, +} + +#[derive(Default, Serialize, Deserialize, ConfigFile, Clone)] +pub struct SheetData { + /// The write count of the current sheet + #[serde(rename = "v")] + pub(crate) write_count: i32, + + /// The holder of the current sheet, who has full operation rights to the sheet mapping + #[serde(rename = "holder")] + pub(crate) holder: Option, + + /// Mapping of sheet paths to virtual file IDs + #[serde(rename = "map")] + pub(crate) mapping: HashMap, + + /// Mapping of virtual file Ids to sheet paths + #[serde(rename = "id_map")] + pub(crate) id_mapping: Option>, +} + +#[derive(Debug, Default, Serialize, Deserialize, ConfigFile, Clone, Eq, PartialEq)] +pub struct SheetMappingMetadata { + #[serde(rename = "id")] + pub id: VirtualFileId, + #[serde(rename = "ver")] + pub version: VirtualFileVersion, +} + +impl<'a> Sheet<'a> { + pub fn name(&self) -> &SheetName { + &self.name + } + + /// Get the holder of this sheet + pub fn holder(&self) -> Option<&MemberId> { + self.data.holder.as_ref() + } + + /// Get the mapping of this sheet + pub fn mapping(&self) -> &HashMap { + &self.data.mapping + } + + /// Get the muttable mapping of this sheet + pub fn mapping_mut(&mut self) -> &mut HashMap { + &mut self.data.mapping + } + + /// Get the id_mapping of this sheet data + pub fn id_mapping(&self) -> &Option> { + &self.data.id_mapping + } + + /// Get the write count of this sheet + pub fn write_count(&self) -> i32 { + self.data.write_count + } + + /// Forget the holder of this sheet + pub fn forget_holder(&mut self) { + self.data.holder = None; + } + + /// Set the holder of this sheet + pub fn set_holder(&mut self, holder: MemberId) { + self.data.holder = Some(holder); + } + + /// Add (or Edit) a mapping entry to the sheet + /// + /// This operation performs safety checks to ensure the member has the right to add the mapping: + /// 1. The sheet must have a holder (member) to perform this operation + /// 2. If the virtual file ID doesn't exist in the vault, the mapping is added directly + /// 3. If the virtual file exists, the mapping is added regardless of member edit rights + /// + /// Note: Full validation adds overhead - avoid frequent calls + pub async fn add_mapping( + &mut self, + sheet_path: SheetPathBuf, + virtual_file_id: VirtualFileId, + version: VirtualFileVersion, + ) -> Result<(), std::io::Error> { + // Check if the virtual file exists in the vault + if self.vault_reference.virtual_file(&virtual_file_id).is_err() { + // Virtual file doesn't exist, add the mapping directly + self.data.mapping.insert( + sheet_path, + SheetMappingMetadata { + id: virtual_file_id, + version, + }, + ); + return Ok(()); + } + + // Check if the sheet has a holder + let Some(_) = self.holder() else { + return Err(std::io::Error::new( + std::io::ErrorKind::PermissionDenied, + "This sheet has no holder", + )); + }; + + self.data.mapping.insert( + sheet_path, + SheetMappingMetadata { + id: virtual_file_id, + version, + }, + ); + + Ok(()) + } + + /// Remove a mapping entry from the sheet + /// + /// This operation performs safety checks to ensure the member has the right to remove the mapping: + /// 1. The sheet must have a holder (member) to perform this operation + /// 2. Member must NOT have edit rights to the virtual file to release it (ensuring clear ownership) + /// 3. If the virtual file doesn't exist, the mapping is removed but no ID is returned + /// 4. If member has no edit rights and the file exists, returns the removed virtual file ID + /// + /// Note: Full validation adds overhead - avoid frequent calls + pub async fn remove_mapping( + &mut self, + sheet_path: &SheetPathBuf, + ) -> Option { + let virtual_file_meta = match self.data.mapping.get(sheet_path) { + Some(id) => id, + None => { + // The mapping entry doesn't exist, nothing to remove + return None; + } + }; + + // Check if the virtual file exists in the vault + if self + .vault_reference + .virtual_file(&virtual_file_meta.id) + .is_err() + { + // Virtual file doesn't exist, remove the mapping and return None + self.data.mapping.remove(sheet_path); + return None; + } + + // Check if the sheet has a holder + let holder = self.holder()?; + + // Check if the holder has edit rights to the virtual file + match self + .vault_reference + .has_virtual_file_edit_right(holder, &virtual_file_meta.id) + .await + { + Ok(false) => { + // Holder doesn't have rights, remove and return the virtual file ID + self.data.mapping.remove(sheet_path) + } + Ok(true) => { + // Holder has edit rights, don't remove the mapping + None + } + Err(_) => { + // Error checking rights, don't remove the mapping + None + } + } + } + + /// Persist the sheet to disk + /// + /// Why not use a reference? + /// Because I don't want a second instance of the sheet to be kept in memory. + /// If needed, please deserialize and reload it. + pub async fn persist(mut self) -> Result<(), std::io::Error> { + self.data.write_count += 1; + + // Update id mapping + self.data.id_mapping = Some(HashMap::new()); + for map in self.data.mapping.iter() { + self.data + .id_mapping + .as_mut() + .unwrap() + .insert(map.1.id.clone(), map.0.clone()); + } + + // Add write count + if self.data.write_count >= i32::MAX - 1 { + self.data.write_count = 0; + } + SheetData::write_to(&self.data, self.sheet_path()).await + } + + /// Get the path to the sheet file + pub fn sheet_path(&self) -> PathBuf { + Sheet::sheet_path_with_name(self.vault_reference, &self.name) + } + + /// Get the path to the sheet file with the given name + pub fn sheet_path_with_name(vault: &Vault, name: impl AsRef) -> PathBuf { + vault + .vault_path() + .join(SERVER_FILE_SHEET.replace(SHEET_NAME, name.as_ref())) + } + + /// Clone the data of the sheet + pub fn clone_data(&self) -> SheetData { + self.data.clone() + } + + /// Convert the sheet into its data representation + pub fn to_data(self) -> SheetData { + self.data + } +} + +impl SheetData { + /// Get the write count of this sheet data + pub fn write_count(&self) -> i32 { + self.write_count + } + + /// Get the holder of this sheet data + pub fn holder(&self) -> Option<&MemberId> { + self.holder.as_ref() + } + + /// Get the mapping of this sheet data + pub fn mapping(&self) -> &HashMap { + &self.mapping + } + + /// Get the muttable mapping of this sheet data + pub fn mapping_mut(&mut self) -> &mut HashMap { + &mut self.mapping + } + + /// Get the id_mapping of this sheet data + pub fn id_mapping(&self) -> &Option> { + &self.id_mapping + } + + /// Get the muttable id_mapping of this sheet data + pub fn id_mapping_mut(&mut self) -> &mut Option> { + &mut self.id_mapping + } +} diff --git a/data/src/data/user.rs b/data/src/data/user.rs new file mode 100644 index 0000000..9f52fdc --- /dev/null +++ b/data/src/data/user.rs @@ -0,0 +1,28 @@ +use crate::current::current_cfg_dir; +use std::path::PathBuf; + +pub mod accounts; + +pub struct UserDirectory { + local_path: PathBuf, +} + +impl UserDirectory { + /// Create a user ditectory struct from the current system's document directory + pub fn current_cfg_dir() -> Option { + Some(UserDirectory { + local_path: current_cfg_dir()?, + }) + } + + /// Create a user directory struct from a specified directory path + /// Returns None if the directory does not exist + pub fn from_path>(path: P) -> Option { + let local_path = path.into(); + if local_path.exists() { + Some(UserDirectory { local_path }) + } else { + None + } + } +} diff --git a/data/src/data/user/accounts.rs b/data/src/data/user/accounts.rs new file mode 100644 index 0000000..d77bc02 --- /dev/null +++ b/data/src/data/user/accounts.rs @@ -0,0 +1,164 @@ +use std::{ + fs, + io::{Error, ErrorKind}, + path::PathBuf, +}; + +use cfg_file::config::ConfigFile; + +use crate::{ + constants::{USER_FILE_ACCOUNTS, USER_FILE_KEY, USER_FILE_MEMBER}, + data::{ + member::{Member, MemberId}, + user::UserDirectory, + }, +}; + +const SELF_ID: &str = "{self_id}"; + +/// Account Management +impl UserDirectory { + /// Read account from configuration file + pub async fn account(&self, id: &MemberId) -> Result { + if let Some(cfg_file) = self.account_cfg(id) { + let member = Member::read_from(cfg_file).await?; + return Ok(member); + } + + Err(Error::new(ErrorKind::NotFound, "Account not found!")) + } + + /// List all account IDs in the user directory + pub fn account_ids(&self) -> Result, std::io::Error> { + let accounts_path = self + .local_path + .join(USER_FILE_ACCOUNTS.replace(SELF_ID, "")); + + if !accounts_path.exists() { + return Ok(Vec::new()); + } + + let mut account_ids = Vec::new(); + + for entry in fs::read_dir(accounts_path)? { + let entry = entry?; + let path = entry.path(); + + if path.is_file() + && let Some(file_name) = path.file_stem().and_then(|s| s.to_str()) + && path.extension().and_then(|s| s.to_str()) == Some("toml") + { + // Remove the "_private" suffix from key files if present + let account_id = file_name.replace("_private", ""); + account_ids.push(account_id); + } + } + + Ok(account_ids) + } + + /// Get all accounts + /// This method will read and deserialize account information, please pay attention to performance issues + pub async fn accounts(&self) -> Result, std::io::Error> { + let mut accounts = Vec::new(); + + for account_id in self.account_ids()? { + if let Ok(account) = self.account(&account_id).await { + accounts.push(account); + } + } + + Ok(accounts) + } + + /// Update account info + pub async fn update_account(&self, member: Member) -> Result<(), std::io::Error> { + // Ensure account exist + if self.account_cfg(&member.id()).is_some() { + let account_cfg_path = self.account_cfg_path(&member.id()); + Member::write_to(&member, account_cfg_path).await?; + return Ok(()); + } + + Err(Error::new(ErrorKind::NotFound, "Account not found!")) + } + + /// Register an account to user directory + pub async fn register_account(&self, member: Member) -> Result<(), std::io::Error> { + // Ensure account not exist + if self.account_cfg(&member.id()).is_some() { + return Err(Error::new( + ErrorKind::DirectoryNotEmpty, + format!("Account `{}` already registered!", member.id()), + )); + } + + // Ensure accounts directory exists + let accounts_dir = self + .local_path + .join(USER_FILE_ACCOUNTS.replace(SELF_ID, "")); + if !accounts_dir.exists() { + fs::create_dir_all(&accounts_dir)?; + } + + // Write config file to accounts dir + let account_cfg_path = self.account_cfg_path(&member.id()); + Member::write_to(&member, account_cfg_path).await?; + + Ok(()) + } + + /// Remove account from user directory + pub fn remove_account(&self, id: &MemberId) -> Result<(), std::io::Error> { + // Remove config file if exists + if let Some(account_cfg_path) = self.account_cfg(id) { + fs::remove_file(account_cfg_path)?; + } + + // Remove private key file if exists + if let Some(private_key_path) = self.account_private_key(id) + && private_key_path.exists() + { + fs::remove_file(private_key_path)?; + } + + Ok(()) + } + + /// Try to get the account's configuration file to determine if the account exists + pub fn account_cfg(&self, id: &MemberId) -> Option { + let cfg_file = self.account_cfg_path(id); + if cfg_file.exists() { + Some(cfg_file) + } else { + None + } + } + + /// Try to get the account's private key file to determine if the account has a private key + pub fn account_private_key(&self, id: &MemberId) -> Option { + let key_file = self.account_private_key_path(id); + if key_file.exists() { + Some(key_file) + } else { + None + } + } + + /// Check if account has private key + pub fn has_private_key(&self, id: &MemberId) -> bool { + self.account_private_key(id).is_some() + } + + /// Get the account's configuration file path, but do not check if the file exists + pub fn account_cfg_path(&self, id: &MemberId) -> PathBuf { + self.local_path + .join(USER_FILE_MEMBER.replace(SELF_ID, id.to_string().as_str())) + } + + /// Get the account's private key file path, but do not check if the file exists + pub fn account_private_key_path(&self, id: &MemberId) -> PathBuf { + self.local_path + .join(USER_FILE_KEY.replace(SELF_ID, id.to_string().as_str())) + } +} diff --git a/data/src/data/vault.rs b/data/src/data/vault.rs new file mode 100644 index 0000000..595997a --- /dev/null +++ b/data/src/data/vault.rs @@ -0,0 +1,132 @@ +use std::{env::current_dir, path::PathBuf, sync::Arc}; + +use tokio::fs::create_dir_all; +use vcs_docs::docs::READMES_VAULT_README; + +use crate::{ + constants::{ + REF_SHEET_NAME, SERVER_FILE_README, SERVER_FILE_VAULT, SERVER_PATH_MEMBER_PUB, + SERVER_PATH_MEMBERS, SERVER_PATH_SHEETS, SERVER_PATH_VF_ROOT, VAULT_HOST_NAME, + }, + current::{current_vault_path, find_vault_path}, + data::{member::Member, vault::config::VaultConfig}, +}; + +pub mod config; +pub mod member; +pub mod service; +pub mod sheet_share; +pub mod sheets; +pub mod virtual_file; + +pub struct Vault { + config: Arc, + vault_path: PathBuf, +} + +impl Vault { + /// Get vault path + pub fn vault_path(&self) -> &PathBuf { + &self.vault_path + } + + /// Initialize vault + pub fn init(config: VaultConfig, vault_path: impl Into) -> Option { + let vault_path = find_vault_path(vault_path)?; + Some(Self { + config: Arc::new(config), + vault_path, + }) + } + + /// Initialize vault + pub fn init_current_dir(config: VaultConfig) -> Option { + let vault_path = current_vault_path()?; + Some(Self { + config: Arc::new(config), + vault_path, + }) + } + + /// Setup vault + pub async fn setup_vault( + vault_path: impl Into, + vault_name: impl AsRef, + ) -> Result<(), std::io::Error> { + let vault_path: PathBuf = vault_path.into(); + + // Ensure directory is empty + if vault_path.exists() && vault_path.read_dir()?.next().is_some() { + return Err(std::io::Error::new( + std::io::ErrorKind::DirectoryNotEmpty, + "DirectoryNotEmpty", + )); + } + + // 1. Setup main config + let config = VaultConfig::default(); + + // NOTE: + // Do not use the write_to method provided by the ConfigFile trait to store the Vault configuration file + // Instead, use the PROFILES_VAULT content provided by the Documents Repository for writing + + // VaultConfig::write_to(&config, vault_path.join(SERVER_FILE_VAULT)).await?; + let config_content = vcs_docs::docs::PROFILES_VAULT + .replace("{vault_name}", vault_name.as_ref()) + .replace("{user_name}", whoami::username().as_str()) + .replace( + "{date_format}", + chrono::Local::now() + .format("%Y-%m-%d %H:%M") + .to_string() + .as_str(), + ) + .replace("{vault_uuid}", &config.vault_uuid().to_string()); + tokio::fs::write(vault_path.join(SERVER_FILE_VAULT), config_content).await?; + + // 2. Setup sheets directory + create_dir_all(vault_path.join(SERVER_PATH_SHEETS)).await?; + + // 3. Setup key directory + create_dir_all(vault_path.join(SERVER_PATH_MEMBER_PUB)).await?; + + // 4. Setup member directory + create_dir_all(vault_path.join(SERVER_PATH_MEMBERS)).await?; + + // 5. Setup storage directory + create_dir_all(vault_path.join(SERVER_PATH_VF_ROOT)).await?; + + let Some(vault) = Vault::init(config, &vault_path) else { + return Err(std::io::Error::other("Failed to initialize vault")); + }; + + // 6. Create host member + vault + .register_member_to_vault(Member::new(VAULT_HOST_NAME)) + .await?; + + // 7. Setup reference sheet + vault + .create_sheet(&REF_SHEET_NAME.to_string(), &VAULT_HOST_NAME.to_string()) + .await?; + + // Final, generate README.md + let readme_content = READMES_VAULT_README; + tokio::fs::write(vault_path.join(SERVER_FILE_README), readme_content).await?; + + Ok(()) + } + + /// Setup vault in current directory + pub async fn setup_vault_current_dir( + vault_name: impl AsRef, + ) -> Result<(), std::io::Error> { + Self::setup_vault(current_dir()?, vault_name).await?; + Ok(()) + } + + /// Get vault configuration + pub fn config(&self) -> &Arc { + &self.config + } +} diff --git a/data/src/data/vault/config.rs b/data/src/data/vault/config.rs new file mode 100644 index 0000000..caa8552 --- /dev/null +++ b/data/src/data/vault/config.rs @@ -0,0 +1,233 @@ +use std::net::{IpAddr, Ipv4Addr}; + +use cfg_file::ConfigFile; +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +use crate::constants::{PORT, SERVER_FILE_VAULT}; +use crate::data::member::{Member, MemberId}; + +pub type VaultName = String; +pub type VaultUuid = Uuid; + +#[derive(Serialize, Deserialize, Clone, PartialEq, Default)] +#[serde(rename_all = "lowercase")] +pub enum AuthMode { + /// Use asymmetric keys: both client and server need to register keys, after which they can connect + Key, + + /// Use password: the password stays on the server, and the client needs to set the password locally for connection + #[default] + Password, + + /// No authentication: generally used in a strongly secure environment, skipping verification directly + NoAuth, +} + +#[derive(Serialize, Deserialize, Clone, PartialEq, Default)] +#[serde(rename_all = "lowercase")] +pub enum LoggerLevel { + Debug, + Trace, + + #[default] + Info, +} + +#[derive(Serialize, Deserialize, Clone, PartialEq, Default)] +#[serde(rename_all = "lowercase")] +pub enum ServiceEnabled { + Enable, + + #[default] + Disable, +} + +#[derive(Serialize, Deserialize, Clone, PartialEq, Default)] +#[serde(rename_all = "lowercase")] +pub enum BehaviourEnabled { + Yes, + + #[default] + No, +} + +impl Into for ServiceEnabled { + fn into(self) -> bool { + match self { + ServiceEnabled::Enable => true, + ServiceEnabled::Disable => false, + } + } +} + +impl Into for BehaviourEnabled { + fn into(self) -> bool { + match self { + BehaviourEnabled::Yes => true, + BehaviourEnabled::No => false, + } + } +} + +#[derive(Serialize, Deserialize, ConfigFile)] +#[cfg_file(path = SERVER_FILE_VAULT)] +pub struct VaultConfig { + /// Vault uuid, unique identifier for the vault + #[serde(rename = "uuid")] + vault_uuid: VaultUuid, + + /// Vault name, which can be used as the project name and generally serves as a hint + #[serde(rename = "name")] + vault_name: VaultName, + + /// Vault host ids, a list of member id representing administrator identities + #[serde(rename = "hosts")] + vault_host_list: Vec, + + /// Vault server configuration, which will be loaded when connecting to the server + #[serde(rename = "profile")] + server_config: VaultServerConfig, +} + +#[derive(Serialize, Deserialize)] +pub struct VaultServerConfig { + /// Local IP address to bind to when the server starts + #[serde(rename = "bind")] + local_bind: IpAddr, + + /// TCP port to bind to when the server starts + #[serde(rename = "port")] + port: u16, + + /// Enable logging + #[serde(rename = "logger")] + logger: Option, + + /// Logger Level + #[serde(rename = "logger_level")] + logger_level: Option, + + /// Whether to enable LAN discovery, allowing members on the same LAN to more easily find the upstream server + #[serde(rename = "lan_discovery")] + lan_discovery: Option, // TODO + + /// Authentication mode for the vault server + /// key: Use asymmetric keys for authentication + /// password: Use a password for authentication + /// noauth: No authentication required, requires a strongly secure environment + #[serde(rename = "auth_mode")] + auth_mode: Option, // TODO +} + +impl Default for VaultConfig { + fn default() -> Self { + Self { + vault_uuid: Uuid::new_v4(), + vault_name: "JustEnoughVault".to_string(), + vault_host_list: Vec::new(), + server_config: VaultServerConfig { + local_bind: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), + port: PORT, + logger: Some(BehaviourEnabled::default()), + logger_level: Some(LoggerLevel::default()), + lan_discovery: Some(ServiceEnabled::default()), + auth_mode: Some(AuthMode::Key), + }, + } + } +} + +/// Vault Management +impl VaultConfig { + /// Change name of the vault. + pub fn change_name(&mut self, name: impl Into) { + self.vault_name = name.into() + } + + /// Add admin + pub fn add_admin(&mut self, member: &Member) { + let uuid = member.id(); + if !self.vault_host_list.contains(&uuid) { + self.vault_host_list.push(uuid); + } + } + + /// Remove admin + pub fn remove_admin(&mut self, member: &Member) { + let id = member.id(); + self.vault_host_list.retain(|x| x != &id); + } + + /// Get vault UUID + pub fn vault_uuid(&self) -> &VaultUuid { + &self.vault_uuid + } + + /// Set vault UUID + pub fn set_vault_uuid(&mut self, vault_uuid: VaultUuid) { + self.vault_uuid = vault_uuid; + } + + /// Get vault name + pub fn vault_name(&self) -> &VaultName { + &self.vault_name + } + + /// Set vault name + pub fn set_vault_name(&mut self, vault_name: VaultName) { + self.vault_name = vault_name; + } + + /// Get vault admin list + pub fn vault_host_list(&self) -> &Vec { + &self.vault_host_list + } + + /// Set vault admin list + pub fn set_vault_host_list(&mut self, vault_host_list: Vec) { + self.vault_host_list = vault_host_list; + } + + /// Get server config + pub fn server_config(&self) -> &VaultServerConfig { + &self.server_config + } + + /// Set server config + pub fn set_server_config(&mut self, server_config: VaultServerConfig) { + self.server_config = server_config; + } +} + +impl VaultServerConfig { + /// Get local bind IP address + pub fn local_bind(&self) -> &IpAddr { + &self.local_bind + } + + /// Get port + pub fn port(&self) -> u16 { + self.port + } + + /// Check if LAN discovery is enabled + pub fn is_lan_discovery_enabled(&self) -> bool { + self.lan_discovery.clone().unwrap_or_default().into() + } + + /// Get logger enabled status + pub fn is_logger_enabled(&self) -> bool { + self.logger.clone().unwrap_or_default().into() + } + + /// Get logger level + pub fn logger_level(&self) -> LoggerLevel { + self.logger_level.clone().unwrap_or_default() + } + + /// Get authentication mode + pub fn auth_mode(&self) -> AuthMode { + self.auth_mode.clone().unwrap_or_default() + } +} diff --git a/data/src/data/vault/member.rs b/data/src/data/vault/member.rs new file mode 100644 index 0000000..9d22d09 --- /dev/null +++ b/data/src/data/vault/member.rs @@ -0,0 +1,144 @@ +use std::{ + fs, + io::{Error, ErrorKind}, + path::PathBuf, +}; + +use cfg_file::config::ConfigFile; + +use crate::{ + constants::{ + SERVER_FILE_MEMBER_INFO, SERVER_FILE_MEMBER_PUB, SERVER_PATH_MEMBERS, + SERVER_SUFFIX_MEMBER_INFO_NO_DOT, + }, + data::{ + member::{Member, MemberId}, + vault::Vault, + }, +}; + +const ID_PARAM: &str = "{member_id}"; + +/// Member Manage +impl Vault { + /// Read member from configuration file + pub async fn member(&self, id: &MemberId) -> Result { + if let Some(cfg_file) = self.member_cfg(id) { + let member = Member::read_from(cfg_file).await?; + return Ok(member); + } + + Err(Error::new(ErrorKind::NotFound, "Member not found!")) + } + + /// List all member IDs in the vault + pub fn member_ids(&self) -> Result, std::io::Error> { + let members_path = self.vault_path.join(SERVER_PATH_MEMBERS); + + if !members_path.exists() { + return Ok(Vec::new()); + } + + let mut member_ids = Vec::new(); + + for entry in fs::read_dir(members_path)? { + let entry = entry?; + let path = entry.path(); + + if path.is_file() + && let Some(file_name) = path.file_stem().and_then(|s| s.to_str()) + && path.extension().and_then(|s| s.to_str()) + == Some(SERVER_SUFFIX_MEMBER_INFO_NO_DOT) + { + member_ids.push(file_name.to_string()); + } + } + + Ok(member_ids) + } + + /// Get all members + /// This method will read and deserialize member information, please pay attention to performance issues + pub async fn members(&self) -> Result, std::io::Error> { + let mut members = Vec::new(); + + for member_id in self.member_ids()? { + if let Ok(member) = self.member(&member_id).await { + members.push(member); + } + } + + Ok(members) + } + + /// Update member info + pub async fn update_member(&self, member: Member) -> Result<(), std::io::Error> { + // Ensure member exist + if self.member_cfg(&member.id()).is_some() { + let member_cfg_path = self.member_cfg_path(&member.id()); + Member::write_to(&member, member_cfg_path).await?; + return Ok(()); + } + + Err(Error::new(ErrorKind::NotFound, "Member not found!")) + } + + /// Register a member to vault + pub async fn register_member_to_vault(&self, member: Member) -> Result<(), std::io::Error> { + // Ensure member not exist + if self.member_cfg(&member.id()).is_some() { + return Err(Error::new( + ErrorKind::DirectoryNotEmpty, + format!("Member `{}` already registered!", member.id()), + )); + } + + // Wrtie config file to member dir + let member_cfg_path = self.member_cfg_path(&member.id()); + Member::write_to(&member, member_cfg_path).await?; + + Ok(()) + } + + /// Remove member from vault + pub fn remove_member_from_vault(&self, id: &MemberId) -> Result<(), std::io::Error> { + // Ensure member exist + if let Some(member_cfg_path) = self.member_cfg(id) { + fs::remove_file(member_cfg_path)?; + } + + Ok(()) + } + + /// Try to get the member's configuration file to determine if the member exists + pub fn member_cfg(&self, id: &MemberId) -> Option { + let cfg_file = self.member_cfg_path(id); + if cfg_file.exists() { + Some(cfg_file) + } else { + None + } + } + + /// Try to get the member's public key file to determine if the member has login permission + pub fn member_key(&self, id: &MemberId) -> Option { + let key_file = self.member_key_path(id); + if key_file.exists() { + Some(key_file) + } else { + None + } + } + + /// Get the member's configuration file path, but do not check if the file exists + pub fn member_cfg_path(&self, id: &MemberId) -> PathBuf { + self.vault_path + .join(SERVER_FILE_MEMBER_INFO.replace(ID_PARAM, id.to_string().as_str())) + } + + /// Get the member's public key file path, but do not check if the file exists + pub fn member_key_path(&self, id: &MemberId) -> PathBuf { + self.vault_path + .join(SERVER_FILE_MEMBER_PUB.replace(ID_PARAM, id.to_string().as_str())) + } +} diff --git a/data/src/data/vault/service.rs b/data/src/data/vault/service.rs new file mode 100644 index 0000000..3f59c30 --- /dev/null +++ b/data/src/data/vault/service.rs @@ -0,0 +1,40 @@ +use std::path::PathBuf; + +use crate::{constants::SERVER_FILE_LOCKFILE, data::vault::Vault}; + +impl Vault { + /// Get the path of the lock file for the current Vault + pub fn lock_file_path(&self) -> PathBuf { + self.vault_path().join(SERVER_FILE_LOCKFILE) + } + + /// Check if the current Vault is locked + pub fn is_locked(&self) -> bool { + self.lock_file_path().exists() + } + + /// Lock the current Vault + pub fn lock(&self) -> Result<(), std::io::Error> { + if self.is_locked() { + return Err(std::io::Error::new( + std::io::ErrorKind::AlreadyExists, + format!( + "Vault is locked! This indicates a service is already running here.\nPlease stop other services or delete the lock file at the vault root directory: {}", + self.lock_file_path().display() + ), + )); + } + std::fs::File::create(self.lock_file_path())?; + Ok(()) + } + + /// Unlock the current Vault + pub fn unlock(&self) -> Result<(), std::io::Error> { + if let Err(e) = std::fs::remove_file(self.lock_file_path()) + && e.kind() != std::io::ErrorKind::NotFound + { + return Err(e); + } + Ok(()) + } +} diff --git a/data/src/data/vault/sheet_share.rs b/data/src/data/vault/sheet_share.rs new file mode 100644 index 0000000..1e692f1 --- /dev/null +++ b/data/src/data/vault/sheet_share.rs @@ -0,0 +1,424 @@ +use std::{collections::HashMap, io::Error, path::PathBuf}; + +use cfg_file::{ConfigFile, config::ConfigFile}; +use rand::{Rng, rng}; +use serde::{Deserialize, Serialize}; +use string_proc::{format_path, snake_case}; +use tokio::fs; + +use crate::{ + constants::{ + SERVER_FILE_SHEET_SHARE, SERVER_PATH_SHARES, SERVER_SUFFIX_SHEET_SHARE_FILE_NO_DOT, + }, + data::{ + member::MemberId, + sheet::{Sheet, SheetMappingMetadata, SheetName, SheetPathBuf}, + vault::Vault, + }, +}; + +pub type SheetShareId = String; + +const SHEET_NAME: &str = "{sheet_name}"; +const SHARE_ID: &str = "{share_id}"; + +#[derive(Default, Serialize, Deserialize, ConfigFile, Clone, Debug)] +pub struct Share { + /// Sharer: the member who created this share item + #[serde(rename = "sharer")] + pub sharer: MemberId, + + /// Description of the share item + #[serde(rename = "desc")] + pub description: String, + + /// Metadata path + #[serde(skip)] + pub path: Option, + + /// From: which sheet the member exported the file from + #[serde(rename = "from")] + pub from_sheet: SheetName, + + /// Mappings: the sheet mappings contained in the share item + #[serde(rename = "map")] + pub mappings: HashMap, +} + +#[derive(Default, Serialize, Deserialize, ConfigFile, Clone, PartialEq, Eq)] +pub enum ShareMergeMode { + /// If a path or file already exists during merge, prioritize the incoming share + /// Path conflict: replace the mapping content at the local path with the incoming content + /// File conflict: delete the original file mapping and create a new one + Overwrite, + + /// If a path or file already exists during merge, skip overwriting this entry + Skip, + + /// Pre-check for conflicts, prohibit merging if any conflicts are found + #[default] + Safe, + + /// Reject all shares + RejectAll, +} + +#[derive(Default, Serialize, Deserialize, ConfigFile, Clone)] +pub struct ShareMergeConflict { + /// Duplicate mappings exist + pub duplicate_mapping: Vec, + + /// Duplicate files exist + pub duplicate_file: Vec, +} + +impl ShareMergeConflict { + /// Check if there are no conflicts + pub fn ok(&self) -> bool { + self.duplicate_mapping.is_empty() && self.duplicate_file.is_empty() + } +} + +impl Vault { + /// Get the path of a share item in a sheet + pub fn share_file_path(&self, sheet_name: &SheetName, share_id: &SheetShareId) -> PathBuf { + let sheet_name = snake_case!(sheet_name.clone()); + let share_id = share_id.clone(); + + // Format the path to remove "./" prefix and normalize it + let path_str = SERVER_FILE_SHEET_SHARE + .replace(SHEET_NAME, &sheet_name) + .replace(SHARE_ID, &share_id); + + // Use format_path to normalize the path + match format_path::format_path_str(&path_str) { + Ok(normalized_path) => self.vault_path().join(normalized_path), + Err(_) => { + // Fallback to original behavior if formatting fails + self.vault_path().join(path_str) + } + } + } + + /// Get the actual paths of all share items in a sheet + pub async fn share_file_paths(&self, sheet_name: &SheetName) -> Vec { + let sheet_name = snake_case!(sheet_name.clone()); + let shares_dir = self + .vault_path() + .join(SERVER_PATH_SHARES.replace(SHEET_NAME, &sheet_name)); + + let mut result = Vec::new(); + if let Ok(mut entries) = fs::read_dir(shares_dir).await { + while let Ok(Some(entry)) = entries.next_entry().await { + let path = entry.path(); + if path.is_file() + && path.extension().and_then(|s| s.to_str()) + == Some(SERVER_SUFFIX_SHEET_SHARE_FILE_NO_DOT) + { + result.push(path); + } + } + } + result + } +} + +impl<'a> Sheet<'a> { + /// Get the shares of a sheet + pub async fn get_shares(&self) -> Result, std::io::Error> { + let paths = self.vault_reference.share_file_paths(&self.name).await; + let mut shares = Vec::new(); + + for path in paths { + match Share::read_from(&path).await { + Ok(mut share) => { + share.path = Some(path); + shares.push(share); + } + Err(e) => return Err(e), + } + } + + Ok(shares) + } + + /// Get a share of a sheet + pub async fn get_share(&self, share_id: &SheetShareId) -> Result { + let path = self.vault_reference.share_file_path(&self.name, share_id); + let mut share = Share::read_from(&path).await?; + share.path = Some(path); + Ok(share) + } + + /// Import a share of a sheet by its ID + pub async fn merge_share_by_id( + self, + share_id: &SheetShareId, + share_merge_mode: ShareMergeMode, + ) -> Result<(), std::io::Error> { + let share = self.get_share(share_id).await?; + self.merge_share(share, share_merge_mode).await + } + + /// Import a share of a sheet + pub async fn merge_share( + mut self, + share: Share, + share_merge_mode: ShareMergeMode, + ) -> Result<(), std::io::Error> { + // Backup original data and edit based on this backup + let mut copy_share = share.clone(); + let mut copy_sheet = self.clone_data(); + + // Pre-check + let conflicts = self.precheck(©_share); + let mut reject_mode = false; + + match share_merge_mode { + // Safe mode: conflicts are not allowed + ShareMergeMode::Safe => { + // Conflicts found + if !conflicts.ok() { + // Do nothing, return Error + return Err(Error::new( + std::io::ErrorKind::AlreadyExists, + "Mappings or files already exist!", + )); + } + } + // Overwrite mode: when conflicts occur, prioritize the share item + ShareMergeMode::Overwrite => { + // Handle duplicate mappings + for path in conflicts.duplicate_mapping { + // Get the share data + let Some(share_value) = copy_share.mappings.remove(&path) else { + return Err(Error::new( + std::io::ErrorKind::NotFound, + format!("Share value `{}` not found!", &path.display()), + )); + }; + // Overwrite + copy_sheet.mapping_mut().insert(path, share_value); + } + + // Handle duplicate IDs + for path in conflicts.duplicate_file { + // Get the share data + let Some(share_value) = copy_share.mappings.remove(&path) else { + return Err(Error::new( + std::io::ErrorKind::NotFound, + format!("Share value `{}` not found!", &path.display()), + )); + }; + + // Extract the file ID + let conflict_vfid = &share_value.id; + + // Through the sheet's ID mapping + let Some(id_mapping) = copy_sheet.id_mapping_mut() else { + return Err(Error::new( + std::io::ErrorKind::NotFound, + "Id mapping not found!", + )); + }; + + // Get the original path from the ID mapping + let Some(raw_path) = id_mapping.remove(conflict_vfid) else { + return Err(Error::new( + std::io::ErrorKind::NotFound, + format!("The path of virtual file `{}' not found!", conflict_vfid), + )); + }; + + // Remove the original path mapping + if copy_sheet.mapping_mut().remove(&raw_path).is_none() { + return Err(Error::new( + std::io::ErrorKind::NotFound, + format!("Remove mapping `{}` failed!", &raw_path.display()), + )); + } + // Insert the new item + copy_sheet.mapping_mut().insert(path, share_value); + } + } + // Skip mode: when conflicts occur, prioritize the local sheet + ShareMergeMode::Skip => { + // Directly remove conflicting items + for path in conflicts.duplicate_mapping { + copy_share.mappings.remove(&path); + } + for path in conflicts.duplicate_file { + copy_share.mappings.remove(&path); + } + } + // Reject all mode: reject all shares + ShareMergeMode::RejectAll => { + reject_mode = true; // Only mark as rejected + } + } + + if !reject_mode { + // Subsequent merging + copy_sheet + .mapping_mut() + .extend(copy_share.mappings.into_iter()); + + // Merge completed + self.data = copy_sheet; // Write the result + + // Merge completed, consume the sheet + self.persist().await.map_err(|err| { + Error::new( + std::io::ErrorKind::NotFound, + format!("Write sheet failed: {}", err), + ) + })?; + } + + // Persistence succeeded, continue to consume the share item + share.remove().await.map_err(|err| { + Error::new( + std::io::ErrorKind::NotFound, + format!("Remove share failed: {}", err.1), + ) + }) + } + + // Pre-check whether the share can be imported into the current sheet without conflicts + fn precheck(&self, share: &Share) -> ShareMergeConflict { + let mut conflicts = ShareMergeConflict::default(); + + for (mapping, metadata) in &share.mappings { + // Check for duplicate mappings + if self.mapping().contains_key(mapping.as_path()) { + conflicts.duplicate_mapping.push(mapping.clone()); + continue; + } + + // Check for duplicate IDs + if let Some(id_mapping) = self.id_mapping() { + if id_mapping.contains_key(&metadata.id) { + conflicts.duplicate_file.push(mapping.clone()); + continue; + } + } + } + + conflicts + } + + /// Share mappings with another sheet + pub async fn share_mappings( + &self, + other_sheet: &SheetName, + mappings: Vec, + sharer: &MemberId, + description: String, + ) -> Result { + let other_sheet = snake_case!(other_sheet.clone()); + let sharer = snake_case!(sharer.clone()); + + // Check if the sheet exists + let sheet_names = self.vault_reference.sheet_names()?; + if !sheet_names.contains(&other_sheet) { + return Err(Error::new( + std::io::ErrorKind::NotFound, + format!("Sheet `{}` not found!", &other_sheet), + )); + } + + // Check if the target file exists, regenerate ID if path already exists, up to 20 attempts + let target_path = { + let mut id; + let mut share_path; + let mut attempts = 0; + + loop { + id = Share::gen_share_id(&sharer); + share_path = self.vault_reference.share_file_path(&other_sheet, &id); + + if !share_path.exists() { + break share_path; + } + + attempts += 1; + if attempts >= 20 { + return Err(Error::new( + std::io::ErrorKind::AlreadyExists, + "Failed to generate unique share ID after 20 attempts!", + )); + } + } + }; + + // Validate that the share is valid + let mut share_mappings = HashMap::new(); + for mapping_path in &mappings { + if let Some(metadata) = self.mapping().get(mapping_path) { + share_mappings.insert(mapping_path.clone(), metadata.clone()); + } else { + return Err(Error::new( + std::io::ErrorKind::NotFound, + format!("Mapping `{}` not found in sheet!", mapping_path.display()), + )); + } + } + + // Build share data + let share_data = Share { + sharer, + description, + path: None, // This is only needed during merging (reading), no need to serialize now + from_sheet: self.name.clone(), + mappings: share_mappings, + }; + + // Write data + Share::write_to(&share_data, target_path).await?; + + Ok(share_data) + } +} + +impl Share { + /// Generate a share ID for a given sharer + pub fn gen_share_id(sharer: &MemberId) -> String { + let sharer_snake = snake_case!(sharer.clone()); + let random_part: String = rng() + .sample_iter(&rand::distr::Alphanumeric) + .take(8) + .map(char::from) + .collect(); + format!("{}@{}", sharer_snake, random_part) + } + + /// Delete a share (reject or remove the share item) + /// If deletion succeeds, returns `Ok(())`; + /// If deletion fails, returns `Err((self, std::io::Error))`, containing the original share object and the error information. + pub async fn remove(self) -> Result<(), (Self, std::io::Error)> { + let Some(path) = &self.path else { + return Err(( + self, + Error::new(std::io::ErrorKind::NotFound, "No share path recorded!"), + )); + }; + + if !path.exists() { + return Err(( + self, + Error::new(std::io::ErrorKind::NotFound, "No share file exists!"), + )); + } + + match fs::remove_file(path).await { + Err(err) => Err(( + self, + Error::new( + std::io::ErrorKind::Other, + format!("Failed to delete share file: {}", err), + ), + )), + Ok(_) => Ok(()), + } + } +} diff --git a/data/src/data/vault/sheets.rs b/data/src/data/vault/sheets.rs new file mode 100644 index 0000000..c22c849 --- /dev/null +++ b/data/src/data/vault/sheets.rs @@ -0,0 +1,274 @@ +use std::{collections::HashMap, io::Error}; + +use cfg_file::config::ConfigFile; +use string_proc::snake_case; +use tokio::fs; + +use crate::{ + constants::{SERVER_PATH_SHEETS, SERVER_SUFFIX_SHEET_FILE_NO_DOT}, + data::{ + member::MemberId, + sheet::{Sheet, SheetData, SheetName}, + vault::Vault, + }, +}; + +/// Vault Sheets Management +impl Vault { + /// Load all sheets in the vault + /// + /// It is generally not recommended to call this function frequently. + /// Although a vault typically won't contain too many sheets, + /// if individual sheet contents are large, this operation may cause + /// significant performance bottlenecks. + pub async fn sheets<'a>(&'a self) -> Result>, std::io::Error> { + let sheet_names = self.sheet_names()?; + let mut sheets = Vec::new(); + + for sheet_name in sheet_names { + let sheet = self.sheet(&sheet_name).await?; + sheets.push(sheet); + } + + Ok(sheets) + } + + /// Search for all sheet names in the vault + /// + /// The complexity of this operation is proportional to the number of sheets, + /// but generally there won't be too many sheets in a Vault + pub fn sheet_names(&self) -> Result, std::io::Error> { + // Get the sheets directory path + let sheets_dir = self.vault_path.join(SERVER_PATH_SHEETS); + + // If the directory doesn't exist, return an empty list + if !sheets_dir.exists() { + return Ok(vec![]); + } + + let mut sheet_names = Vec::new(); + + // Iterate through all files in the sheets directory + for entry in std::fs::read_dir(sheets_dir)? { + let entry = entry?; + let path = entry.path(); + + // Check if it's a YAML file + if path.is_file() + && path + .extension() + .is_some_and(|ext| ext == SERVER_SUFFIX_SHEET_FILE_NO_DOT) + && let Some(file_stem) = path.file_stem().and_then(|s| s.to_str()) + { + // Create a new SheetName and add it to the result list + sheet_names.push(file_stem.to_string()); + } + } + + Ok(sheet_names) + } + + /// Read a sheet from its name + /// + /// If the sheet information is successfully found in the vault, + /// it will be deserialized and read as a sheet. + /// This is the only correct way to obtain a sheet instance. + pub async fn sheet<'a>(&'a self, sheet_name: &SheetName) -> Result, std::io::Error> { + let sheet_name = snake_case!(sheet_name.clone()); + + // Get the path to the sheet file + let sheet_path = Sheet::sheet_path_with_name(self, &sheet_name); + + // Ensure the sheet file exists + if !sheet_path.exists() { + // If the sheet does not exist, try to restore it from the trash + if self.restore_sheet(&sheet_name).await.is_err() { + // If restoration fails, return an error + return Err(Error::new( + std::io::ErrorKind::NotFound, + format!("Sheet `{}` not found!", sheet_name), + )); + } + } + + // Read the sheet data from the file + let data = SheetData::read_from(sheet_path).await?; + + Ok(Sheet { + name: sheet_name.clone(), + data, + vault_reference: self, + }) + } + + /// Create a sheet locally and return the sheet instance + /// + /// This method creates a new sheet in the vault with the given name and holder. + /// It will verify that the member exists and that the sheet doesn't already exist + /// before creating the sheet file with default empty data. + pub async fn create_sheet<'a>( + &'a self, + sheet_name: &SheetName, + holder: &MemberId, + ) -> Result, std::io::Error> { + let sheet_name = snake_case!(sheet_name.clone()); + + // Ensure member exists + if !self.member_cfg_path(holder).exists() { + return Err(Error::new( + std::io::ErrorKind::NotFound, + format!("Member `{}` not found!", &holder), + )); + } + + // Ensure sheet does not already exist + let sheet_file_path = Sheet::sheet_path_with_name(self, &sheet_name); + if sheet_file_path.exists() { + return Err(Error::new( + std::io::ErrorKind::AlreadyExists, + format!("Sheet `{}` already exists!", &sheet_name), + )); + } + + // Create the sheet file + let sheet_data = SheetData { + holder: Some(holder.clone()), + mapping: HashMap::new(), + id_mapping: None, + write_count: 0, + }; + SheetData::write_to(&sheet_data, sheet_file_path).await?; + + Ok(Sheet { + name: sheet_name, + data: sheet_data, + vault_reference: self, + }) + } + + /// Delete the sheet file from local disk by name + /// + /// This method will remove the sheet file with the given name from the vault. + /// It will verify that the sheet exists before attempting to delete it. + /// If the sheet is successfully deleted, it will return Ok(()). + /// + /// Warning: This operation is dangerous. Deleting a sheet will cause local workspaces + /// using this sheet to become invalid. Please ensure the sheet is not currently in use + /// and will not be used in the future. + /// + /// For a safer deletion method, consider using `delete_sheet_safety`. + /// + /// Note: This function is intended for server-side use only and should not be + /// arbitrarily called by other members to prevent unauthorized data deletion. + pub async fn delete_sheet(&self, sheet_name: &SheetName) -> Result<(), std::io::Error> { + let sheet_name = snake_case!(sheet_name.clone()); + + // Ensure sheet exists + let sheet_file_path = Sheet::sheet_path_with_name(self, &sheet_name); + if !sheet_file_path.exists() { + return Err(Error::new( + std::io::ErrorKind::NotFound, + format!("Sheet `{}` not found!", &sheet_name), + )); + } + + // Delete the sheet file + fs::remove_file(sheet_file_path).await?; + + Ok(()) + } + + /// Safely delete the sheet + /// + /// The sheet will be moved to the trash directory, ensuring it does not appear in the + /// results of `sheets` and `sheet_names` methods. + /// However, if the sheet's holder attempts to access the sheet through the `sheet` method, + /// the system will automatically restore it from the trash directory. + /// This means: the sheet will only permanently remain in the trash directory, + /// waiting for manual cleanup by an administrator, when it is truly no longer in use. + /// + /// This is a safer deletion method because it provides the possibility of recovery, + /// avoiding irreversible data loss caused by accidental deletion. + /// + /// Note: This function is intended for server-side use only and should not be + /// arbitrarily called by other members to prevent unauthorized data deletion. + pub async fn delete_sheet_safely(&self, sheet_name: &SheetName) -> Result<(), std::io::Error> { + let sheet_name = snake_case!(sheet_name.clone()); + + // Ensure the sheet exists + let sheet_file_path = Sheet::sheet_path_with_name(self, &sheet_name); + if !sheet_file_path.exists() { + return Err(Error::new( + std::io::ErrorKind::NotFound, + format!("Sheet `{}` not found!", &sheet_name), + )); + } + + // Create the trash directory + let trash_dir = self.vault_path.join(".trash"); + if !trash_dir.exists() { + fs::create_dir_all(&trash_dir).await?; + } + + // Generate a unique filename in the trash + let timestamp = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_millis(); + let trash_file_name = format!( + "{}_{}.{}", + sheet_name, timestamp, SERVER_SUFFIX_SHEET_FILE_NO_DOT + ); + let trash_path = trash_dir.join(trash_file_name); + + // Move the sheet file to the trash + fs::rename(&sheet_file_path, &trash_path).await?; + + Ok(()) + } + + /// Restore the sheet from the trash + /// + /// Restore the specified sheet from the trash to its original location, making it accessible normally. + pub async fn restore_sheet(&self, sheet_name: &SheetName) -> Result<(), std::io::Error> { + let sheet_name = snake_case!(sheet_name.clone()); + + // Search for matching files in the trash + let trash_dir = self.vault_path.join(".trash"); + if !trash_dir.exists() { + return Err(Error::new( + std::io::ErrorKind::NotFound, + "Trash directory does not exist!".to_string(), + )); + } + + let mut found_path = None; + for entry in std::fs::read_dir(&trash_dir)? { + let entry = entry?; + let path = entry.path(); + + if path.is_file() + && let Some(file_name) = path.file_stem().and_then(|s| s.to_str()) + { + // Check if the filename starts with the sheet name + if file_name.starts_with(&sheet_name) { + found_path = Some(path); + break; + } + } + } + + let trash_path = found_path.ok_or_else(|| { + Error::new( + std::io::ErrorKind::NotFound, + format!("Sheet `{}` not found in trash!", &sheet_name), + ) + })?; + + // Restore the sheet to its original location + let original_path = Sheet::sheet_path_with_name(self, &sheet_name); + fs::rename(&trash_path, &original_path).await?; + + Ok(()) + } +} diff --git a/data/src/data/vault/virtual_file.rs b/data/src/data/vault/virtual_file.rs new file mode 100644 index 0000000..8dbcb5d --- /dev/null +++ b/data/src/data/vault/virtual_file.rs @@ -0,0 +1,506 @@ +use std::{ + collections::HashMap, + io::{Error, ErrorKind}, + path::PathBuf, +}; + +use cfg_file::{ConfigFile, config::ConfigFile}; +use serde::{Deserialize, Serialize}; +use string_proc::{dot_case, snake_case}; +use tcp_connection::instance::ConnectionInstance; +use tokio::fs; +use uuid::Uuid; + +use crate::{ + constants::{ + SERVER_FILE_VF_META, SERVER_FILE_VF_VERSION_INSTANCE, SERVER_PATH_VF_ROOT, + SERVER_PATH_VF_STORAGE, SERVER_PATH_VF_TEMP, + }, + data::{member::MemberId, vault::Vault}, +}; + +pub type VirtualFileId = String; +pub type VirtualFileVersion = String; + +const VF_PREFIX: &str = "vf-"; +const ID_PARAM: &str = "{vf_id}"; +const ID_INDEX: &str = "{vf_index}"; +const VERSION_PARAM: &str = "{vf_version}"; +const TEMP_NAME: &str = "{temp_name}"; + +pub struct VirtualFile<'a> { + /// Unique identifier for the virtual file + id: VirtualFileId, + + /// Reference of Vault + current_vault: &'a Vault, +} + +#[derive(Default, Clone, Serialize, Deserialize, ConfigFile)] +pub struct VirtualFileMeta { + /// Current version of the virtual file + #[serde(rename = "ver")] + current_version: VirtualFileVersion, + + /// The member who holds the edit right of the file + #[serde(rename = "holder")] + hold_member: MemberId, + + /// Description of each version + #[serde(rename = "descs")] + version_description: HashMap, + + /// Histories + #[serde(rename = "histories")] + histories: Vec, +} + +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct VirtualFileVersionDescription { + /// The member who created this version + #[serde(rename = "creator")] + pub creator: MemberId, + + /// The description of this version + #[serde(rename = "desc")] + pub description: String, +} + +impl VirtualFileVersionDescription { + /// Create a new version description + pub fn new(creator: MemberId, description: String) -> Self { + Self { + creator, + description, + } + } +} + +/// Virtual File Operations +impl Vault { + /// Generate a temporary path for receiving + pub fn virtual_file_temp_path(&self) -> PathBuf { + let random_receive_name = format!("{}", uuid::Uuid::new_v4()); + self.vault_path + .join(SERVER_PATH_VF_TEMP.replace(TEMP_NAME, &random_receive_name)) + } + + /// Get the directory where virtual files are stored + pub fn virtual_file_storage_dir(&self) -> PathBuf { + self.vault_path().join(SERVER_PATH_VF_ROOT) + } + + /// Get the directory where a specific virtual file is stored + pub fn virtual_file_dir(&self, id: &VirtualFileId) -> Result { + Ok(self.vault_path().join( + SERVER_PATH_VF_STORAGE + .replace(ID_PARAM, &id.to_string()) + .replace(ID_INDEX, &Self::vf_index(id)?), + )) + } + + // Generate index path of virtual file + fn vf_index(id: &VirtualFileId) -> Result { + // Remove VF_PREFIX if present + let id_str = if let Some(stripped) = id.strip_prefix(VF_PREFIX) { + stripped + } else { + id + }; + + // Extract the first part before the first hyphen + let first_part = id_str.split('-').next().ok_or_else(|| { + std::io::Error::new( + std::io::ErrorKind::InvalidInput, + "Invalid virtual file ID format: no hyphen found", + ) + })?; + + // Ensure the first part has at least 4 characters + if first_part.len() < 4 { + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + "Invalid virtual file ID format: first part must have at least 4 characters", + ))?; + } + + // Take only the first 4 characters and split into two 2-character chunks + let first_four = &first_part[0..4]; + let mut path = String::new(); + for i in (0..first_four.len()).step_by(2) { + if i > 0 { + path.push('/'); + } + path.push_str(&first_four[i..i + 2]); + } + + Ok(path) + } + + /// Get the directory where a specific virtual file's metadata is stored + pub fn virtual_file_real_path( + &self, + id: &VirtualFileId, + version: &VirtualFileVersion, + ) -> PathBuf { + self.vault_path().join( + SERVER_FILE_VF_VERSION_INSTANCE + .replace(ID_PARAM, &id.to_string()) + .replace(ID_INDEX, &Self::vf_index(id).unwrap_or_default()) + .replace(VERSION_PARAM, &version.to_string()), + ) + } + + /// Get the directory where a specific virtual file's metadata is stored + pub fn virtual_file_meta_path(&self, id: &VirtualFileId) -> PathBuf { + self.vault_path().join( + SERVER_FILE_VF_META + .replace(ID_PARAM, &id.to_string()) + .replace(ID_INDEX, &Self::vf_index(id).unwrap_or_default()), + ) + } + + /// Get the virtual file with the given ID + pub fn virtual_file(&self, id: &VirtualFileId) -> Result, std::io::Error> { + let dir = self.virtual_file_dir(id); + if dir?.exists() { + Ok(VirtualFile { + id: id.clone(), + current_vault: self, + }) + } else { + Err(std::io::Error::new( + std::io::ErrorKind::NotFound, + "Cannot found virtual file!", + )) + } + } + + /// Get the meta data of the virtual file with the given ID + pub async fn virtual_file_meta( + &self, + id: &VirtualFileId, + ) -> Result { + let dir = self.virtual_file_meta_path(id); + let metadata = VirtualFileMeta::read_from(dir).await?; + Ok(metadata) + } + + /// Write the meta data of the virtual file with the given ID + pub async fn write_virtual_file_meta( + &self, + id: &VirtualFileId, + meta: &VirtualFileMeta, + ) -> Result<(), std::io::Error> { + let dir = self.virtual_file_meta_path(id); + VirtualFileMeta::write_to(meta, dir).await?; + Ok(()) + } + + /// Create a virtual file from a connection instance + /// + /// It's the only way to create virtual files! + /// + /// When the target machine executes `write_file`, use this function instead of `read_file`, + /// and provide the member ID of the transmitting member. + /// + /// The system will automatically receive the file and + /// create the virtual file. + pub async fn create_virtual_file_from_connection( + &self, + instance: &mut ConnectionInstance, + member_id: &MemberId, + ) -> Result { + const FIRST_VERSION: &str = "0.1.0"; + let receive_path = self.virtual_file_temp_path(); + let new_id = format!("{}{}", VF_PREFIX, Uuid::new_v4()); + let move_path = self.virtual_file_real_path(&new_id, &FIRST_VERSION.to_string()); + + match instance.read_file(receive_path.clone()).await { + Ok(_) => { + // Read successful, create virtual file + // Create default version description + let mut version_description = + HashMap::::new(); + version_description.insert( + FIRST_VERSION.to_string(), + VirtualFileVersionDescription { + creator: member_id.clone(), + description: "Track".to_string(), + }, + ); + // Create metadata + let mut meta = VirtualFileMeta { + current_version: FIRST_VERSION.to_string(), + hold_member: member_id.clone(), // The holder of the newly created virtual file is the creator by default + version_description, + histories: Vec::default(), + }; + + // Add first version + meta.histories.push(FIRST_VERSION.to_string()); + + // Write metadata to file + VirtualFileMeta::write_to(&meta, self.virtual_file_meta_path(&new_id)).await?; + + // Move temp file to virtual file directory + if let Some(parent) = move_path.parent() + && !parent.exists() + { + fs::create_dir_all(parent).await?; + } + fs::rename(receive_path, move_path).await?; + + Ok(new_id) + } + Err(e) => { + // Read failed, remove temp file. + if receive_path.exists() { + fs::remove_file(receive_path).await?; + } + + Err(Error::other(e)) + } + } + } + + /// Update a virtual file from a connection instance + /// + /// It's the only way to update virtual files! + /// When the target machine executes `write_file`, use this function instead of `read_file`, + /// and provide the member ID of the transmitting member. + /// + /// The system will automatically receive the file and + /// update the virtual file. + /// + /// Note: The specified member must hold the edit right of the file, + /// otherwise the file reception will not be allowed. + /// + /// Make sure to obtain the edit right of the file before calling this function. + pub async fn update_virtual_file_from_connection( + &self, + instance: &mut ConnectionInstance, + member: &MemberId, + virtual_file_id: &VirtualFileId, + new_version: &VirtualFileVersion, + description: VirtualFileVersionDescription, + ) -> Result<(), std::io::Error> { + let new_version = dot_case!(new_version.clone()); + let mut meta = self.virtual_file_meta(virtual_file_id).await?; + + // Check if the member has edit right + self.check_virtual_file_edit_right(member, virtual_file_id) + .await?; + + // Check if the new version already exists + if meta.version_description.contains_key(&new_version) { + return Err(Error::new( + ErrorKind::AlreadyExists, + format!( + "Version `{}` already exists for virtual file `{}`", + new_version, virtual_file_id + ), + )); + } + + // Verify success + let receive_path = self.virtual_file_temp_path(); + let move_path = self.virtual_file_real_path(virtual_file_id, &new_version); + + match instance.read_file(receive_path.clone()).await { + Ok(_) => { + // Read success, move temp file to real path. + fs::rename(receive_path, move_path).await?; + + // Update metadata + meta.current_version = new_version.clone(); + meta.version_description + .insert(new_version.clone(), description); + meta.histories.push(new_version); + VirtualFileMeta::write_to(&meta, self.virtual_file_meta_path(virtual_file_id)) + .await?; + + Ok(()) + } + Err(e) => { + // Read failed, remove temp file. + if receive_path.exists() { + fs::remove_file(receive_path).await?; + } + + Err(Error::other(e)) + } + } + } + + /// Update virtual file from existing version + /// + /// This operation creates a new version based on the specified old version file instance. + /// The new version will retain the same version name as the old version, but use a different version number. + /// After the update, this version will be considered newer than the original version when comparing versions. + pub async fn update_virtual_file_from_exist_version( + &self, + member: &MemberId, + virtual_file_id: &VirtualFileId, + old_version: &VirtualFileVersion, + ) -> Result<(), std::io::Error> { + let old_version = snake_case!(old_version.clone()); + let mut meta = self.virtual_file_meta(virtual_file_id).await?; + + // Check if the member has edit right + self.check_virtual_file_edit_right(member, virtual_file_id) + .await?; + + // Ensure virtual file exist + let Ok(_) = self.virtual_file(virtual_file_id) else { + return Err(Error::new( + ErrorKind::NotFound, + format!("Virtual file `{}` not found!", virtual_file_id), + )); + }; + + // Ensure version exist + if !meta.version_exists(&old_version) { + return Err(Error::new( + ErrorKind::NotFound, + format!("Version `{}` not found!", old_version), + )); + } + + // Ok, Create new version + meta.current_version = old_version.clone(); + meta.histories.push(old_version); + VirtualFileMeta::write_to(&meta, self.virtual_file_meta_path(virtual_file_id)).await?; + + Ok(()) + } + + /// Grant a member the edit right for a virtual file + /// This operation takes effect immediately upon success + pub async fn grant_virtual_file_edit_right( + &self, + member_id: &MemberId, + virtual_file_id: &VirtualFileId, + ) -> Result<(), std::io::Error> { + let mut meta = self.virtual_file_meta(virtual_file_id).await?; + meta.hold_member = member_id.clone(); + self.write_virtual_file_meta(virtual_file_id, &meta).await + } + + /// Check if a member has the edit right for a virtual file + pub async fn has_virtual_file_edit_right( + &self, + member_id: &MemberId, + virtual_file_id: &VirtualFileId, + ) -> Result { + let meta = self.virtual_file_meta(virtual_file_id).await?; + Ok(meta.hold_member.eq(member_id)) + } + + /// Check if a member has the edit right for a virtual file and return Result + /// Returns Ok(()) if the member has edit right, otherwise returns PermissionDenied error + pub async fn check_virtual_file_edit_right( + &self, + member_id: &MemberId, + virtual_file_id: &VirtualFileId, + ) -> Result<(), std::io::Error> { + if !self + .has_virtual_file_edit_right(member_id, virtual_file_id) + .await? + { + return Err(Error::new( + ErrorKind::PermissionDenied, + format!( + "Member `{}` not allowed to update virtual file `{}`", + member_id, virtual_file_id + ), + )); + } + Ok(()) + } + + /// Revoke the edit right for a virtual file from the current holder + /// This operation takes effect immediately upon success + pub async fn revoke_virtual_file_edit_right( + &self, + virtual_file_id: &VirtualFileId, + ) -> Result<(), std::io::Error> { + let mut meta = self.virtual_file_meta(virtual_file_id).await?; + meta.hold_member = String::default(); + self.write_virtual_file_meta(virtual_file_id, &meta).await + } +} + +impl<'a> VirtualFile<'a> { + /// Get id of VirtualFile + pub fn id(&self) -> VirtualFileId { + self.id.clone() + } + + /// Read metadata of VirtualFile + pub async fn read_meta(&self) -> Result { + self.current_vault.virtual_file_meta(&self.id).await + } +} + +impl VirtualFileMeta { + /// Get all versions of the virtual file + pub fn versions(&self) -> &Vec { + &self.histories + } + + /// Get the latest version of the virtual file + pub fn version_latest(&self) -> VirtualFileVersion { + // After creating a virtual file in `update_virtual_file_from_connection`, + // the Vec will never be empty, so unwrap is allowed here + self.histories.last().unwrap().clone() + } + + /// Get the total number of versions for this virtual file + pub fn version_len(&self) -> i32 { + self.histories.len() as i32 + } + + /// Check if a specific version exists + /// Returns true if the version exists, false otherwise + pub fn version_exists(&self, version: &VirtualFileVersion) -> bool { + self.versions().iter().any(|v| v == version) + } + + /// Get the version number (index) for a given version name + /// Returns None if the version doesn't exist + pub fn version_num(&self, version: &VirtualFileVersion) -> Option { + self.histories + .iter() + .rev() + .position(|v| v == version) + .map(|pos| (self.histories.len() - 1 - pos) as i32) + } + + /// Get the version name for a given version number (index) + /// Returns None if the version number is out of range + pub fn version_name(&self, version_num: i32) -> Option { + self.histories.get(version_num as usize).cloned() + } + + /// Get the member who holds the edit right of the file + pub fn hold_member(&self) -> &MemberId { + &self.hold_member + } + + /// Get the version descriptions for all versions + pub fn version_descriptions( + &self, + ) -> &HashMap { + &self.version_description + } + + /// Get the version description for a given version + pub fn version_description( + &self, + version: VirtualFileVersion, + ) -> Option<&VirtualFileVersionDescription> { + let desc = self.version_descriptions(); + desc.get(&version) + } +} diff --git a/data/src/lib.rs b/data/src/lib.rs new file mode 100644 index 0000000..1b41391 --- /dev/null +++ b/data/src/lib.rs @@ -0,0 +1,5 @@ +pub mod constants; +pub mod current; + +#[allow(dead_code)] +pub mod data; diff --git a/data/tests/Cargo.toml b/data/tests/Cargo.toml new file mode 100644 index 0000000..e7a0fcc --- /dev/null +++ b/data/tests/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "vcs_data_test" +edition = "2024" +version.workspace = true + +[dependencies] +tcp_connection = { path = "../../utils/tcp_connection" } +tcp_connection_test = { path = "../../utils/tcp_connection/tcp_connection_test" } +cfg_file = { path = "../../utils/cfg_file", features = ["default"] } +vcs_data = { path = "../../data" } + +# Async & Networking +tokio = { version = "1.48.0", features = ["full"] } diff --git a/data/tests/src/lib.rs b/data/tests/src/lib.rs new file mode 100644 index 0000000..ced2d3d --- /dev/null +++ b/data/tests/src/lib.rs @@ -0,0 +1,30 @@ +use std::{env::current_dir, path::PathBuf}; + +use tokio::fs; + +#[cfg(test)] +pub mod test_vault_setup_and_member_register; + +#[cfg(test)] +pub mod test_virtual_file_creation_and_update; + +#[cfg(test)] +pub mod test_local_workspace_setup_and_account_management; + +#[cfg(test)] +pub mod test_sheet_creation_management_and_persistence; + +#[cfg(test)] +pub mod test_sheet_share_creation_and_management; + +pub async fn get_test_dir(area: &str) -> Result { + let dir = current_dir()?.join(".temp").join("test").join(area); + if !dir.exists() { + std::fs::create_dir_all(&dir)?; + } else { + // Regenerate existing directory + fs::remove_dir_all(&dir).await?; + fs::create_dir_all(&dir).await?; + } + Ok(dir) +} diff --git a/data/tests/src/test_local_workspace_setup_and_account_management.rs b/data/tests/src/test_local_workspace_setup_and_account_management.rs new file mode 100644 index 0000000..8fa2676 --- /dev/null +++ b/data/tests/src/test_local_workspace_setup_and_account_management.rs @@ -0,0 +1,248 @@ +use std::io::Error; + +use cfg_file::config::ConfigFile; +use vcs_data::{ + constants::{CLIENT_FILE_TODOLIST, CLIENT_FILE_WORKSPACE, USER_FILE_KEY, USER_FILE_MEMBER}, + data::{ + local::{LocalWorkspace, config::LocalConfig}, + member::Member, + user::UserDirectory, + }, +}; + +use crate::get_test_dir; + +#[tokio::test] +async fn test_local_workspace_setup_and_account_management() -> Result<(), std::io::Error> { + let dir = get_test_dir("local_workspace_account_management").await?; + + // Setup local workspace + LocalWorkspace::setup_local_workspace(dir.clone()).await?; + + // Check if the following files are created in `dir`: + // Files: CLIENT_FILE_WORKSPACE, CLIENT_FILE_README + assert!(dir.join(CLIENT_FILE_WORKSPACE).exists()); + assert!(dir.join(CLIENT_FILE_TODOLIST).exists()); + + // Get local workspace + let config = LocalConfig::read_from(dir.join(CLIENT_FILE_WORKSPACE)).await?; + let Some(_local_workspace) = LocalWorkspace::init(config, &dir) else { + return Err(Error::new( + std::io::ErrorKind::NotFound, + "Local workspace not found!", + )); + }; + + // Create user directory from workspace path + let Some(user_directory) = UserDirectory::from_path(&dir) else { + return Err(Error::new( + std::io::ErrorKind::NotFound, + "User directory not found!", + )); + }; + + // Test account registration + let member_id = "test_account"; + let member = Member::new(member_id); + + // Register account + user_directory.register_account(member.clone()).await?; + + // Check if the account config file exists + assert!( + dir.join(USER_FILE_MEMBER.replace("{self_id}", member_id)) + .exists() + ); + + // Test account retrieval + let retrieved_member = user_directory.account(&member_id.to_string()).await?; + assert_eq!(retrieved_member.id(), member.id()); + + // Test account IDs listing + let account_ids = user_directory.account_ids()?; + assert!(account_ids.contains(&member_id.to_string())); + + // Test accounts listing + let accounts = user_directory.accounts().await?; + assert_eq!(accounts.len(), 1); + assert_eq!(accounts[0].id(), member.id()); + + // Test account existence check + assert!(user_directory.account_cfg(&member_id.to_string()).is_some()); + + // Test private key check (should be false initially) + assert!(!user_directory.has_private_key(&member_id.to_string())); + + // Test account update + let mut updated_member = member.clone(); + updated_member.set_metadata("email", "test@example.com"); + user_directory + .update_account(updated_member.clone()) + .await?; + + // Verify update + let updated_retrieved = user_directory.account(&member_id.to_string()).await?; + assert_eq!( + updated_retrieved.metadata("email"), + Some(&"test@example.com".to_string()) + ); + + // Test account removal + user_directory.remove_account(&member_id.to_string())?; + + // Check if the account config file no longer exists + assert!( + !dir.join(USER_FILE_MEMBER.replace("{self_id}", member_id)) + .exists() + ); + + // Check if account is no longer in the list + let account_ids_after_removal = user_directory.account_ids()?; + assert!(!account_ids_after_removal.contains(&member_id.to_string())); + + Ok(()) +} + +#[tokio::test] +async fn test_account_private_key_management() -> Result<(), std::io::Error> { + let dir = get_test_dir("account_private_key_management").await?; + + // Create user directory + let Some(user_directory) = UserDirectory::from_path(&dir) else { + return Err(Error::new( + std::io::ErrorKind::NotFound, + "User directory not found!", + )); + }; + + // Register account + let member_id = "test_account_with_key"; + let member = Member::new(member_id); + user_directory.register_account(member).await?; + + // Create a dummy private key file for testing + let private_key_path = dir.join(USER_FILE_KEY.replace("{self_id}", member_id)); + std::fs::create_dir_all(private_key_path.parent().unwrap())?; + std::fs::write(&private_key_path, "dummy_private_key_content")?; + + // Test private key existence check + assert!(user_directory.has_private_key(&member_id.to_string())); + + // Test private key path retrieval + assert!( + user_directory + .account_private_key(&member_id.to_string()) + .is_some() + ); + + // Remove account (should also remove private key) + user_directory.remove_account(&member_id.to_string())?; + + // Check if private key file is also removed + assert!(!private_key_path.exists()); + + Ok(()) +} + +#[tokio::test] +async fn test_multiple_account_management() -> Result<(), std::io::Error> { + let dir = get_test_dir("multiple_account_management").await?; + + // Create user directory + let Some(user_directory) = UserDirectory::from_path(&dir) else { + return Err(Error::new( + std::io::ErrorKind::NotFound, + "User directory not found!", + )); + }; + + // Register multiple accounts + let account_names = vec!["alice", "bob", "charlie"]; + + for name in &account_names { + user_directory.register_account(Member::new(*name)).await?; + } + + // Test account IDs listing + let account_ids = user_directory.account_ids()?; + assert_eq!(account_ids.len(), 3); + + for name in &account_names { + assert!(account_ids.contains(&name.to_string())); + } + + // Test accounts listing + let accounts = user_directory.accounts().await?; + assert_eq!(accounts.len(), 3); + + // Remove one account + user_directory.remove_account(&"bob".to_string())?; + + // Verify removal + let account_ids_after_removal = user_directory.account_ids()?; + assert_eq!(account_ids_after_removal.len(), 2); + assert!(!account_ids_after_removal.contains(&"bob".to_string())); + assert!(account_ids_after_removal.contains(&"alice".to_string())); + assert!(account_ids_after_removal.contains(&"charlie".to_string())); + + Ok(()) +} + +#[tokio::test] +async fn test_account_registration_duplicate_prevention() -> Result<(), std::io::Error> { + let dir = get_test_dir("account_duplicate_prevention").await?; + + // Create user directory + let Some(user_directory) = UserDirectory::from_path(&dir) else { + return Err(Error::new( + std::io::ErrorKind::NotFound, + "User directory not found!", + )); + }; + + // Register account + let member_id = "duplicate_test"; + user_directory + .register_account(Member::new(member_id)) + .await?; + + // Try to register same account again - should fail + let result = user_directory + .register_account(Member::new(member_id)) + .await; + assert!(result.is_err()); + + Ok(()) +} + +#[tokio::test] +async fn test_nonexistent_account_operations() -> Result<(), std::io::Error> { + let dir = get_test_dir("nonexistent_account_operations").await?; + + // Create user directory + let Some(user_directory) = UserDirectory::from_path(&dir) else { + return Err(Error::new( + std::io::ErrorKind::NotFound, + "User directory not found!", + )); + }; + + // Try to read non-existent account - should fail + let result = user_directory.account(&"nonexistent".to_string()).await; + assert!(result.is_err()); + + // Try to update non-existent account - should fail + let result = user_directory + .update_account(Member::new("nonexistent")) + .await; + assert!(result.is_err()); + + // Try to remove non-existent account - should succeed (idempotent) + let result = user_directory.remove_account(&"nonexistent".to_string()); + assert!(result.is_ok()); + + // Check private key for non-existent account - should be false + assert!(!user_directory.has_private_key(&"nonexistent".to_string())); + + Ok(()) +} diff --git a/data/tests/src/test_sheet_creation_management_and_persistence.rs b/data/tests/src/test_sheet_creation_management_and_persistence.rs new file mode 100644 index 0000000..6683d06 --- /dev/null +++ b/data/tests/src/test_sheet_creation_management_and_persistence.rs @@ -0,0 +1,275 @@ +use std::io::Error; + +use cfg_file::config::ConfigFile; +use vcs_data::{ + constants::{SERVER_FILE_SHEET, SERVER_FILE_VAULT}, + data::{ + member::{Member, MemberId}, + sheet::SheetName, + vault::{Vault, config::VaultConfig, virtual_file::VirtualFileId}, + }, +}; + +use crate::get_test_dir; + +#[tokio::test] +async fn test_sheet_creation_management_and_persistence() -> Result<(), std::io::Error> { + let dir = get_test_dir("sheet_management").await?; + + // Setup vault + Vault::setup_vault(dir.clone(), "TestVault").await?; + + // Get vault + let config = VaultConfig::read_from(dir.join(SERVER_FILE_VAULT)).await?; + let Some(vault) = Vault::init(config, &dir) else { + return Err(Error::new(std::io::ErrorKind::NotFound, "Vault not found!")); + }; + + // Add a member to use as sheet holder + let member_id: MemberId = "test_member".to_string(); + vault + .register_member_to_vault(Member::new(&member_id)) + .await?; + + // Test 1: Create a new sheet + let sheet_name: SheetName = "test_sheet".to_string(); + let sheet = vault.create_sheet(&sheet_name, &member_id).await?; + + // Verify sheet properties + assert_eq!(sheet.holder(), Some(&member_id)); + assert_eq!(sheet.holder(), Some(&member_id)); + assert!(sheet.mapping().is_empty()); + + // Verify sheet file was created + let sheet_path = dir.join(SERVER_FILE_SHEET.replace("{sheet_name}", &sheet_name)); + assert!(sheet_path.exists()); + + // Test 2: Add mapping entries to the sheet + let mut sheet = vault.sheet(&sheet_name).await?; + + // Add mapping entries for the files + let main_rs_path = vcs_data::data::sheet::SheetPathBuf::from("src/main.rs"); + let lib_rs_path = vcs_data::data::sheet::SheetPathBuf::from("src/lib.rs"); + let main_rs_id = VirtualFileId::new(); + let lib_rs_id = VirtualFileId::new(); + + sheet + .add_mapping( + main_rs_path.clone(), + main_rs_id.clone(), + "1.0.0".to_string(), + ) + .await?; + sheet + .add_mapping(lib_rs_path.clone(), lib_rs_id.clone(), "1.0.0".to_string()) + .await?; + + // Verify mappings were added + assert_eq!(sheet.mapping().len(), 2); + + // Test 3: Add more mapping entries + let mapping_path = vcs_data::data::sheet::SheetPathBuf::from("output/build.exe"); + let virtual_file_id = VirtualFileId::new(); + + sheet + .add_mapping( + mapping_path.clone(), + virtual_file_id.clone(), + "1.0.0".to_string(), + ) + .await?; + + // Verify mapping was added + assert_eq!(sheet.mapping().len(), 3); + assert_eq!( + sheet.mapping().get(&mapping_path).map(|meta| &meta.id), + Some(&virtual_file_id) + ); + + // Test 4: Persist sheet to disk + sheet.persist().await?; + + // Verify persistence by reloading the sheet + let reloaded_sheet = vault.sheet(&sheet_name).await?; + assert_eq!(reloaded_sheet.holder(), Some(&member_id)); + assert_eq!(reloaded_sheet.mapping().len(), 3); + + // Test 5: Remove mapping entry + let mut sheet_for_removal = vault.sheet(&sheet_name).await?; + let _removed_virtual_file_id = sheet_for_removal.remove_mapping(&mapping_path).await; + // Don't check the return value since it depends on virtual file existence + assert_eq!(sheet_for_removal.mapping().len(), 2); + + // Test 6: List all sheets in vault + let sheet_names = vault.sheet_names()?; + assert_eq!(sheet_names.len(), 2); + assert!(sheet_names.contains(&sheet_name)); + assert!(sheet_names.contains(&"ref".to_string())); + + let all_sheets = vault.sheets().await?; + assert_eq!(all_sheets.len(), 2); + // One sheet should be the test sheet, the other should be the ref sheet with host as holder + let test_sheet_holder = all_sheets + .iter() + .find(|s| s.holder() == Some(&member_id)) + .map(|s| s.holder()) + .unwrap(); + let ref_sheet_holder = all_sheets + .iter() + .find(|s| s.holder() == Some(&"host".to_string())) + .map(|s| s.holder()) + .unwrap(); + assert_eq!(test_sheet_holder, Some(&member_id)); + assert_eq!(ref_sheet_holder, Some(&"host".to_string())); + + // Test 7: Safe deletion (move to trash) + vault.delete_sheet_safely(&sheet_name).await?; + + // Verify sheet is not in normal listing but can be restored + let sheet_names_after_deletion = vault.sheet_names()?; + assert_eq!(sheet_names_after_deletion.len(), 1); + assert_eq!(sheet_names_after_deletion[0], "ref"); + + // Test 8: Restore sheet from trash + let restored_sheet = vault.sheet(&sheet_name).await?; + assert_eq!(restored_sheet.holder(), Some(&member_id)); + assert_eq!(restored_sheet.holder(), Some(&member_id)); + + // Verify sheet is back in normal listing + let sheet_names_after_restore = vault.sheet_names()?; + assert_eq!(sheet_names_after_restore.len(), 2); + assert!(sheet_names_after_restore.contains(&sheet_name)); + assert!(sheet_names_after_restore.contains(&"ref".to_string())); + + // Test 9: Permanent deletion + vault.delete_sheet(&sheet_name).await?; + + // Verify sheet is permanently gone + let sheet_names_final = vault.sheet_names()?; + assert_eq!(sheet_names_final.len(), 1); + assert_eq!(sheet_names_final[0], "ref"); + + // Attempt to access deleted sheet should fail + let result = vault.sheet(&sheet_name).await; + assert!(result.is_err()); + + // Clean up: Remove member + vault.remove_member_from_vault(&member_id)?; + + Ok(()) +} + +#[tokio::test] +async fn test_sheet_error_conditions() -> Result<(), std::io::Error> { + let dir = get_test_dir("sheet_error_conditions").await?; + + // Setup vault + Vault::setup_vault(dir.clone(), "TestVault").await?; + + // Get vault + let config = VaultConfig::read_from(dir.join(SERVER_FILE_VAULT)).await?; + let Some(vault) = Vault::init(config, &dir) else { + return Err(Error::new(std::io::ErrorKind::NotFound, "Vault not found!")); + }; + + // Test 1: Create sheet with non-existent member should fail + let non_existent_member: MemberId = "non_existent_member".to_string(); + let sheet_name: SheetName = "test_sheet".to_string(); + + let result = vault.create_sheet(&sheet_name, &non_existent_member).await; + assert!(result.is_err()); + + // Add a member first + let member_id: MemberId = "test_member".to_string(); + vault + .register_member_to_vault(Member::new(&member_id)) + .await?; + + // Test 2: Create duplicate sheet should fail + vault.create_sheet(&sheet_name, &member_id).await?; + let result = vault.create_sheet(&sheet_name, &member_id).await; + assert!(result.is_err()); + + // Test 3: Delete non-existent sheet should fail + let non_existent_sheet: SheetName = "non_existent_sheet".to_string(); + let result = vault.delete_sheet(&non_existent_sheet).await; + assert!(result.is_err()); + + // Test 4: Safe delete non-existent sheet should fail + let result = vault.delete_sheet_safely(&non_existent_sheet).await; + assert!(result.is_err()); + + // Test 5: Restore non-existent sheet from trash should fail + let result = vault.restore_sheet(&non_existent_sheet).await; + assert!(result.is_err()); + + // Clean up + vault.remove_member_from_vault(&member_id)?; + + Ok(()) +} + +#[tokio::test] +async fn test_sheet_data_serialization() -> Result<(), std::io::Error> { + let dir = get_test_dir("sheet_serialization").await?; + + // Test serialization by creating a sheet through the vault + // Setup vault + Vault::setup_vault(dir.clone(), "TestVault").await?; + + // Get vault + let config = VaultConfig::read_from(dir.join(SERVER_FILE_VAULT)).await?; + let Some(vault) = Vault::init(config, &dir) else { + return Err(Error::new(std::io::ErrorKind::NotFound, "Vault not found!")); + }; + + // Add a member + let member_id: MemberId = "test_member".to_string(); + vault + .register_member_to_vault(Member::new(&member_id)) + .await?; + + // Create a sheet + let sheet_name: SheetName = "test_serialization_sheet".to_string(); + let mut sheet = vault.create_sheet(&sheet_name, &member_id).await?; + + // Add some mappings + let main_rs_path = vcs_data::data::sheet::SheetPathBuf::from("src/main.rs"); + let lib_rs_path = vcs_data::data::sheet::SheetPathBuf::from("src/lib.rs"); + let main_rs_id = VirtualFileId::new(); + let lib_rs_id = VirtualFileId::new(); + + sheet + .add_mapping( + main_rs_path.clone(), + main_rs_id.clone(), + "1.0.0".to_string(), + ) + .await?; + sheet + .add_mapping(lib_rs_path.clone(), lib_rs_id.clone(), "1.0.0".to_string()) + .await?; + + // Add more mappings + let build_exe_id = VirtualFileId::new(); + + sheet + .add_mapping( + vcs_data::data::sheet::SheetPathBuf::from("output/build.exe"), + build_exe_id, + "1.0.0".to_string(), + ) + .await?; + + // Persist the sheet + sheet.persist().await?; + + // Verify the sheet file was created + let sheet_path = dir.join(SERVER_FILE_SHEET.replace("{sheet_name}", &sheet_name)); + assert!(sheet_path.exists()); + + // Clean up + vault.remove_member_from_vault(&member_id)?; + + Ok(()) +} diff --git a/data/tests/src/test_sheet_share_creation_and_management.rs b/data/tests/src/test_sheet_share_creation_and_management.rs new file mode 100644 index 0000000..89891d6 --- /dev/null +++ b/data/tests/src/test_sheet_share_creation_and_management.rs @@ -0,0 +1,631 @@ +use std::io::Error; + +use cfg_file::config::ConfigFile; +use vcs_data::{ + constants::SERVER_FILE_VAULT, + data::{ + member::{Member, MemberId}, + sheet::{SheetName, SheetPathBuf}, + vault::{ + Vault, + config::VaultConfig, + sheet_share::{Share, ShareMergeMode, SheetShareId}, + virtual_file::VirtualFileId, + }, + }, +}; + +use crate::get_test_dir; + +#[tokio::test] +async fn test_share_creation_and_retrieval() -> Result<(), std::io::Error> { + let dir = get_test_dir("share_creation").await?; + + // Setup vault + Vault::setup_vault(dir.clone(), "TestVault").await?; + + // Get vault + let config = VaultConfig::read_from(dir.join(SERVER_FILE_VAULT)).await?; + let Some(vault) = Vault::init(config, &dir) else { + return Err(Error::new(std::io::ErrorKind::NotFound, "Vault not found!")); + }; + + // Add members + let sharer_id: MemberId = "sharer_member".to_string(); + let target_member_id: MemberId = "target_member".to_string(); + + vault + .register_member_to_vault(Member::new(&sharer_id)) + .await?; + vault + .register_member_to_vault(Member::new(&target_member_id)) + .await?; + + // Create source sheet for sharer + let source_sheet_name: SheetName = "source_sheet".to_string(); + let _source_sheet = vault.create_sheet(&source_sheet_name, &sharer_id).await?; + + // Create target sheet for target member + let target_sheet_name: SheetName = "target_sheet".to_string(); + let _target_sheet = vault + .create_sheet(&target_sheet_name, &target_member_id) + .await?; + + // Add mappings to source sheet + let mut source_sheet = vault.sheet(&source_sheet_name).await?; + + let main_rs_path = SheetPathBuf::from("src/main.rs"); + let lib_rs_path = SheetPathBuf::from("src/lib.rs"); + let main_rs_id = VirtualFileId::from("main_rs_id_1"); + let lib_rs_id = VirtualFileId::from("lib_rs_id_1"); + + source_sheet + .add_mapping( + main_rs_path.clone(), + main_rs_id.clone(), + "1.0.0".to_string(), + ) + .await?; + source_sheet + .add_mapping(lib_rs_path.clone(), lib_rs_id.clone(), "1.0.0".to_string()) + .await?; + + // Persist source sheet + source_sheet.persist().await?; + + // Test 1: Share mappings from source sheet to target sheet + let description = "Test share of main.rs and lib.rs".to_string(); + // Need to get the sheet again after persist + let source_sheet = vault.sheet(&source_sheet_name).await?; + + source_sheet + .share_mappings( + &target_sheet_name, + vec![main_rs_path.clone(), lib_rs_path.clone()], + &sharer_id, + description.clone(), + ) + .await?; + + // Test 2: Get shares from target sheet + let target_sheet = vault.sheet(&target_sheet_name).await?; + + let shares = target_sheet.get_shares().await?; + + assert_eq!(shares.len(), 1, "Expected 1 share, found {}", shares.len()); + let share = &shares[0]; + + assert_eq!(share.sharer, sharer_id); + assert_eq!(share.description, description); + assert_eq!(share.from_sheet, source_sheet_name); + assert_eq!(share.mappings.len(), 2); + assert!(share.mappings.contains_key(&main_rs_path)); + assert!(share.mappings.contains_key(&lib_rs_path)); + assert!(share.path.is_some()); + + // Test 3: Get specific share by ID + let share_id = Share::gen_share_id(&sharer_id); + let _specific_share = target_sheet.get_share(&share_id).await; + + // Note: The share ID might not match exactly due to random generation, + // but we can verify the share exists by checking the shares list + assert!(shares.iter().any(|s| s.sharer == sharer_id)); + + // Clean up + vault.remove_member_from_vault(&sharer_id)?; + vault.remove_member_from_vault(&target_member_id)?; + + Ok(()) +} + +#[tokio::test] +async fn test_share_merge_modes() -> Result<(), std::io::Error> { + let dir = get_test_dir("share_merge_modes").await?; + + // Setup vault + Vault::setup_vault(dir.clone(), "TestVault").await?; + + // Get vault + let config = VaultConfig::read_from(dir.join(SERVER_FILE_VAULT)).await?; + let Some(vault) = Vault::init(config, &dir) else { + return Err(Error::new(std::io::ErrorKind::NotFound, "Vault not found!")); + }; + + // Add members + let sharer_id: MemberId = "sharer".to_string(); + let target_member_id: MemberId = "target".to_string(); + + vault + .register_member_to_vault(Member::new(&sharer_id)) + .await?; + vault + .register_member_to_vault(Member::new(&target_member_id)) + .await?; + + // Create source and target sheets + let source_sheet_name: SheetName = "source".to_string(); + let target_sheet_name: SheetName = "target".to_string(); + + let _source_sheet = vault.create_sheet(&source_sheet_name, &sharer_id).await?; + let _target_sheet = vault + .create_sheet(&target_sheet_name, &target_member_id) + .await?; + + // Add mappings to source sheet + let mut source_sheet = vault.sheet(&source_sheet_name).await?; + + let file1_path = SheetPathBuf::from("src/file1.rs"); + let file2_path = SheetPathBuf::from("src/file2.rs"); + let file1_id = VirtualFileId::from("file1_id_1"); + let file2_id = VirtualFileId::from("file2_id_1"); + + source_sheet + .add_mapping(file1_path.clone(), file1_id.clone(), "1.0.0".to_string()) + .await?; + source_sheet + .add_mapping(file2_path.clone(), file2_id.clone(), "1.0.0".to_string()) + .await?; + + source_sheet.persist().await?; + + // Share mappings + // Need to get the sheet again after persist + let source_sheet = vault.sheet(&source_sheet_name).await?; + source_sheet + .share_mappings( + &target_sheet_name, + vec![file1_path.clone(), file2_path.clone()], + &sharer_id, + "Test share".to_string(), + ) + .await?; + + // Get the share + let target_sheet = vault.sheet(&target_sheet_name).await?; + let shares = target_sheet.get_shares().await?; + assert_eq!(shares.len(), 1); + let share = shares[0].clone(); + + // Test 4: Safe mode merge (should succeed with no conflicts) + let result = target_sheet + .merge_share(share.clone(), ShareMergeMode::Safe) + .await; + + assert!( + result.is_ok(), + "Safe mode should succeed with no conflicts " + ); + + // Verify mappings were added to target sheet + let updated_target_sheet = vault.sheet(&target_sheet_name).await?; + assert_eq!(updated_target_sheet.mapping().len(), 2); + assert!(updated_target_sheet.mapping().contains_key(&file1_path)); + assert!(updated_target_sheet.mapping().contains_key(&file2_path)); + + // Clean up + vault.remove_member_from_vault(&sharer_id)?; + vault.remove_member_from_vault(&target_member_id)?; + + Ok(()) +} + +#[tokio::test] +async fn test_share_merge_conflicts() -> Result<(), std::io::Error> { + let dir = get_test_dir("share_conflicts").await?; + + // Setup vault + Vault::setup_vault(dir.clone(), "TestVault").await?; + + // Get vault + let config = VaultConfig::read_from(dir.join(SERVER_FILE_VAULT)).await?; + let Some(vault) = Vault::init(config, &dir) else { + return Err(Error::new(std::io::ErrorKind::NotFound, "Vault not found!")); + }; + + // Add members + let sharer_id: MemberId = "sharer".to_string(); + let target_member_id: MemberId = "target".to_string(); + + vault + .register_member_to_vault(Member::new(&sharer_id)) + .await?; + vault + .register_member_to_vault(Member::new(&target_member_id)) + .await?; + + // Create source and target sheets + let source_sheet_name: SheetName = "source".to_string(); + let target_sheet_name: SheetName = "target".to_string(); + + let _source_sheet = vault.create_sheet(&source_sheet_name, &sharer_id).await?; + let _target_sheet = vault + .create_sheet(&target_sheet_name, &target_member_id) + .await?; + + // Add conflicting mappings to both sheets + let mut source_sheet = vault.sheet(&source_sheet_name).await?; + let mut target_sheet_mut = vault.sheet(&target_sheet_name).await?; + + let conflicting_path = SheetPathBuf::from("src/conflicting.rs"); + let source_file_id = VirtualFileId::from("source_file_id_1"); + let target_file_id = VirtualFileId::from("target_file_id_1"); + + // Add same path with different IDs to both sheets (conflict) + source_sheet + .add_mapping( + conflicting_path.clone(), + source_file_id.clone(), + "1.0.0".to_string(), + ) + .await?; + + target_sheet_mut + .add_mapping( + conflicting_path.clone(), + target_file_id.clone(), + "1.0.0".to_string(), + ) + .await?; + + source_sheet.persist().await?; + target_sheet_mut.persist().await?; + + // Share the conflicting mapping + // Need to get the sheet again after persist + let source_sheet = vault.sheet(&source_sheet_name).await?; + source_sheet + .share_mappings( + &target_sheet_name, + vec![conflicting_path.clone()], + &sharer_id, + "Conflicting share".to_string(), + ) + .await?; + + // Get the share + let target_sheet = vault.sheet(&target_sheet_name).await?; + let shares = target_sheet.get_shares().await?; + assert_eq!(shares.len(), 1); + let share = shares[0].clone(); + + // Test 5: Safe mode merge with conflict (should fail) + let target_sheet_clone = vault.sheet(&target_sheet_name).await?; + let result = target_sheet_clone + .merge_share(share.clone(), ShareMergeMode::Safe) + .await; + + assert!(result.is_err(), "Safe mode should fail with conflicts"); + + // Test 6: Overwrite mode merge with conflict (should succeed) + let target_sheet_clone = vault.sheet(&target_sheet_name).await?; + let result = target_sheet_clone + .merge_share(share.clone(), ShareMergeMode::Overwrite) + .await; + + assert!( + result.is_ok(), + "Overwrite mode should succeed with conflicts" + ); + + // Verify the mapping was overwritten + let updated_target_sheet = vault.sheet(&target_sheet_name).await?; + let mapping = updated_target_sheet.mapping().get(&conflicting_path); + assert!(mapping.is_some()); + assert_eq!(mapping.unwrap().id, source_file_id); // Should be source's ID, not target's + + // Clean up + vault.remove_member_from_vault(&sharer_id)?; + vault.remove_member_from_vault(&target_member_id)?; + + Ok(()) +} + +#[tokio::test] +async fn test_share_skip_mode() -> Result<(), std::io::Error> { + let dir = get_test_dir("share_skip_mode").await?; + + // Setup vault + Vault::setup_vault(dir.clone(), "TestVault").await?; + + // Get vault + let config = VaultConfig::read_from(dir.join(SERVER_FILE_VAULT)).await?; + let Some(vault) = Vault::init(config, &dir) else { + return Err(Error::new(std::io::ErrorKind::NotFound, "Vault not found!")); + }; + + // Add members + let sharer_id: MemberId = "sharer".to_string(); + let target_member_id: MemberId = "target".to_string(); + + vault + .register_member_to_vault(Member::new(&sharer_id)) + .await?; + vault + .register_member_to_vault(Member::new(&target_member_id)) + .await?; + + // Create source and target sheets + let source_sheet_name: SheetName = "source".to_string(); + let target_sheet_name: SheetName = "target".to_string(); + + let _source_sheet = vault.create_sheet(&source_sheet_name, &sharer_id).await?; + let _target_sheet = vault + .create_sheet(&target_sheet_name, &target_member_id) + .await?; + + // Add mappings to both sheets + let mut source_sheet = vault.sheet(&source_sheet_name).await?; + let mut target_sheet_mut = vault.sheet(&target_sheet_name).await?; + + let conflicting_path = SheetPathBuf::from("src/conflicting.rs"); + let non_conflicting_path = SheetPathBuf::from("src/non_conflicting.rs"); + + let source_file_id = VirtualFileId::from("source_file_id_2"); + let target_file_id = VirtualFileId::from("target_file_id_2"); + let non_conflicting_id = VirtualFileId::from("non_conflicting_id_1"); + + // Add conflicting mapping to both sheets + source_sheet + .add_mapping( + conflicting_path.clone(), + source_file_id.clone(), + "1.0.0".to_string(), + ) + .await?; + + target_sheet_mut + .add_mapping( + conflicting_path.clone(), + target_file_id.clone(), + "1.0.0".to_string(), + ) + .await?; + + // Add non-conflicting mapping only to source + source_sheet + .add_mapping( + non_conflicting_path.clone(), + non_conflicting_id.clone(), + "1.0.0".to_string(), + ) + .await?; + + source_sheet.persist().await?; + target_sheet_mut.persist().await?; + + // Share both mappings + // Need to get the sheet again after persist + let source_sheet = vault.sheet(&source_sheet_name).await?; + source_sheet + .share_mappings( + &target_sheet_name, + vec![conflicting_path.clone(), non_conflicting_path.clone()], + &sharer_id, + "Mixed share".to_string(), + ) + .await?; + + // Get the share + let target_sheet = vault.sheet(&target_sheet_name).await?; + let shares = target_sheet.get_shares().await?; + assert_eq!(shares.len(), 1); + let share = shares[0].clone(); + + // Test 7: Skip mode merge with conflict (should skip conflicting, add non-conflicting) + let result = target_sheet + .merge_share(share.clone(), ShareMergeMode::Skip) + .await; + + assert!(result.is_ok(), "Skip mode should succeed"); + + // Verify only non-conflicting mapping was added + let updated_target_sheet = vault.sheet(&target_sheet_name).await?; + + // Conflicting mapping should still have target's ID + let conflicting_mapping = updated_target_sheet.mapping().get(&conflicting_path); + assert!(conflicting_mapping.is_some()); + assert_eq!(conflicting_mapping.unwrap().id, target_file_id); + + // Non-conflicting mapping should be added + let non_conflicting_mapping = updated_target_sheet.mapping().get(&non_conflicting_path); + assert!(non_conflicting_mapping.is_some()); + assert_eq!(non_conflicting_mapping.unwrap().id, non_conflicting_id); + + // Clean up + vault.remove_member_from_vault(&sharer_id)?; + vault.remove_member_from_vault(&target_member_id)?; + + Ok(()) +} + +#[tokio::test] +async fn test_share_removal() -> Result<(), std::io::Error> { + let dir = get_test_dir("share_removal").await?; + + // Setup vault + Vault::setup_vault(dir.clone(), "TestVault").await?; + + // Get vault + let config = VaultConfig::read_from(dir.join(SERVER_FILE_VAULT)).await?; + let Some(vault) = Vault::init(config, &dir) else { + return Err(Error::new(std::io::ErrorKind::NotFound, "Vault not found!")); + }; + + // Add members + let sharer_id: MemberId = "sharer".to_string(); + let target_member_id: MemberId = "target".to_string(); + + vault + .register_member_to_vault(Member::new(&sharer_id)) + .await?; + vault + .register_member_to_vault(Member::new(&target_member_id)) + .await?; + + // Create source and target sheets + let source_sheet_name: SheetName = "source".to_string(); + let target_sheet_name: SheetName = "target".to_string(); + + let _source_sheet = vault.create_sheet(&source_sheet_name, &sharer_id).await?; + let _target_sheet = vault + .create_sheet(&target_sheet_name, &target_member_id) + .await?; + + // Add mapping to source sheet + let mut source_sheet = vault.sheet(&source_sheet_name).await?; + + let file_path = SheetPathBuf::from("src/file.rs"); + let file_id = VirtualFileId::from("file_id_1"); + + source_sheet + .add_mapping(file_path.clone(), file_id.clone(), "1.0.0".to_string()) + .await?; + + source_sheet.persist().await?; + + // Need to get the sheet again after persist + let source_sheet = vault.sheet(&source_sheet_name).await?; + // Share mapping + source_sheet + .share_mappings( + &target_sheet_name, + vec![file_path.clone()], + &sharer_id, + "Test share for removal".to_string(), + ) + .await?; + + // Get the share + let target_sheet = vault.sheet(&target_sheet_name).await?; + let shares = target_sheet.get_shares().await?; + assert_eq!(shares.len(), 1); + let share = shares[0].clone(); + + // Test 8: Remove share + let result = share.remove().await; + + // Check if removal succeeded or failed gracefully + match result { + Ok(_) => { + // Share was successfully removed + let shares_after_removal = target_sheet.get_shares().await?; + assert_eq!(shares_after_removal.len(), 0); + } + Err((returned_share, _error)) => { + // Share removal failed, but we got the share backZ + // Error message may vary, just check that we got an error + // The share should be returned in the error + assert_eq!(returned_share.sharer, sharer_id); + } + } + + // Clean up + vault.remove_member_from_vault(&sharer_id)?; + vault.remove_member_from_vault(&target_member_id)?; + + Ok(()) +} + +#[tokio::test] +async fn test_share_error_conditions() -> Result<(), std::io::Error> { + let dir = get_test_dir("share_errors").await?; + + // Setup vault + Vault::setup_vault(dir.clone(), "TestVault").await?; + + // Get vault + let config = VaultConfig::read_from(dir.join(SERVER_FILE_VAULT)).await?; + let Some(vault) = Vault::init(config, &dir) else { + return Err(Error::new(std::io::ErrorKind::NotFound, "Vault not found!")); + }; + + // Add member + let sharer_id: MemberId = "sharer".to_string(); + vault + .register_member_to_vault(Member::new(&sharer_id)) + .await?; + + // Create source sheet + let source_sheet_name: SheetName = "source".to_string(); + let _source_sheet = vault.create_sheet(&source_sheet_name, &sharer_id).await?; + + // Add mapping to source sheet + let mut source_sheet = vault.sheet(&source_sheet_name).await?; + + let file_path = SheetPathBuf::from("src/file.rs"); + let file_id = VirtualFileId::from("file_id_2"); + + source_sheet + .add_mapping(file_path.clone(), file_id.clone(), "1.0.0".to_string()) + .await?; + + source_sheet.persist().await?; + + // Test 9: Share to non-existent sheet should fail + let non_existent_sheet: SheetName = "non_existent".to_string(); + // Need to get the sheet again after persist + let source_sheet = vault.sheet(&source_sheet_name).await?; + let result = source_sheet + .share_mappings( + &non_existent_sheet, + vec![file_path.clone()], + &sharer_id, + "Test".to_string(), + ) + .await; + + assert!(result.is_err()); + + // Test 10: Share non-existent mapping should fail + let target_sheet_name: SheetName = "target".to_string(); + let _target_sheet = vault.create_sheet(&target_sheet_name, &sharer_id).await?; + + let non_existent_path = SheetPathBuf::from("src/non_existent.rs"); + let result = source_sheet + .share_mappings( + &target_sheet_name, + vec![non_existent_path], + &sharer_id, + "Test".to_string(), + ) + .await; + + assert!(result.is_err()); + + // Test 11: Merge non-existent share should fail + let target_sheet = vault.sheet(&target_sheet_name).await?; + let non_existent_share_id: SheetShareId = "non_existent_share".to_string(); + let result = target_sheet + .merge_share_by_id(&non_existent_share_id, ShareMergeMode::Safe) + .await; + + assert!(result.is_err()); + + // Clean up + vault.remove_member_from_vault(&sharer_id)?; + + Ok(()) +} + +#[tokio::test] +async fn test_share_id_generation() -> Result<(), std::io::Error> { + // Test 12: Share ID generation + let sharer_id: MemberId = "test_sharer".to_string(); + + // Generate multiple IDs to ensure they're different + let id1 = Share::gen_share_id(&sharer_id); + let id2 = Share::gen_share_id(&sharer_id); + let id3 = Share::gen_share_id(&sharer_id); + + // IDs should be different due to random component + assert_ne!(id1, id2); + assert_ne!(id1, id3); + assert_ne!(id2, id3); + + // IDs should start with sharer name + assert!(id1.starts_with(&format!("test_sharer@"))); + assert!(id2.starts_with(&format!("test_sharer@"))); + assert!(id3.starts_with(&format!("test_sharer@"))); + + Ok(()) +} diff --git a/data/tests/src/test_vault_setup_and_member_register.rs b/data/tests/src/test_vault_setup_and_member_register.rs new file mode 100644 index 0000000..286a4a2 --- /dev/null +++ b/data/tests/src/test_vault_setup_and_member_register.rs @@ -0,0 +1,67 @@ +use std::io::Error; + +use cfg_file::config::ConfigFile; +use vcs_data::{ + constants::{ + SERVER_FILE_MEMBER_INFO, SERVER_FILE_README, SERVER_FILE_VAULT, SERVER_PATH_MEMBER_PUB, + SERVER_PATH_MEMBERS, SERVER_PATH_SHEETS, SERVER_PATH_VF_ROOT, + }, + data::{ + member::Member, + vault::{Vault, config::VaultConfig}, + }, +}; + +use crate::get_test_dir; + +#[tokio::test] +async fn test_vault_setup_and_member_register() -> Result<(), std::io::Error> { + let dir = get_test_dir("member_register").await?; + + // Setup vault + Vault::setup_vault(dir.clone(), "TestVault").await?; + + // Check if the following files and directories are created in `dir`: + // Files: SERVER_FILE_VAULT, SERVER_FILE_README + // Directories: SERVER_PATH_SHEETS, + // SERVER_PATH_MEMBERS, + // SERVER_PATH_MEMBER_PUB, + // SERVER_PATH_VIRTUAL_FILE_ROOT + assert!(dir.join(SERVER_FILE_VAULT).exists()); + assert!(dir.join(SERVER_FILE_README).exists()); + assert!(dir.join(SERVER_PATH_SHEETS).exists()); + assert!(dir.join(SERVER_PATH_MEMBERS).exists()); + assert!(dir.join(SERVER_PATH_MEMBER_PUB).exists()); + assert!(dir.join(SERVER_PATH_VF_ROOT).exists()); + + // Get vault + let config = VaultConfig::read_from(dir.join(SERVER_FILE_VAULT)).await?; + let Some(vault) = Vault::init(config, &dir) else { + return Err(Error::new(std::io::ErrorKind::NotFound, "Vault not found!")); + }; + + // Add member + let member_id = "test_member"; + vault + .register_member_to_vault(Member::new(member_id)) + .await?; + + const ID_PARAM: &str = "{member_id}"; + + // Check if the member info file exists + assert!( + dir.join(SERVER_FILE_MEMBER_INFO.replace(ID_PARAM, member_id)) + .exists() + ); + + // Remove member + vault.remove_member_from_vault(&member_id.to_string())?; + + // Check if the member info file not exists + assert!( + !dir.join(SERVER_FILE_MEMBER_INFO.replace(ID_PARAM, member_id)) + .exists() + ); + + Ok(()) +} diff --git a/data/tests/src/test_virtual_file_creation_and_update.rs b/data/tests/src/test_virtual_file_creation_and_update.rs new file mode 100644 index 0000000..2d9d393 --- /dev/null +++ b/data/tests/src/test_virtual_file_creation_and_update.rs @@ -0,0 +1,162 @@ +use std::time::Duration; + +use cfg_file::config::ConfigFile; +use tcp_connection_test::{ + handle::{ClientHandle, ServerHandle}, + target::TcpServerTarget, + target_configure::ServerTargetConfig, +}; +use tokio::{ + join, + time::{sleep, timeout}, +}; +use vcs_data::{ + constants::SERVER_FILE_VAULT, + data::{ + member::Member, + vault::{Vault, config::VaultConfig, virtual_file::VirtualFileVersionDescription}, + }, +}; + +use crate::get_test_dir; + +struct VirtualFileCreateClientHandle; +struct VirtualFileCreateServerHandle; + +impl ClientHandle for VirtualFileCreateClientHandle { + async fn process(mut instance: tcp_connection::instance::ConnectionInstance) { + let dir = get_test_dir("virtual_file_creation_and_update_2") + .await + .unwrap(); + // Create first test file for virtual file creation + let test_content_1 = b"Test file content for virtual file creation"; + let temp_file_path_1 = dir.join("test_virtual_file_1.txt"); + + tokio::fs::write(&temp_file_path_1, test_content_1) + .await + .unwrap(); + + // Send the first file to server for virtual file creation + instance.write_file(&temp_file_path_1).await.unwrap(); + + // Create second test file for virtual file update + let test_content_2 = b"Updated test file content for virtual file"; + let temp_file_path_2 = dir.join("test_virtual_file_2.txt"); + + tokio::fs::write(&temp_file_path_2, test_content_2) + .await + .unwrap(); + + // Send the second file to server for virtual file update + instance.write_file(&temp_file_path_2).await.unwrap(); + } +} + +impl ServerHandle for VirtualFileCreateServerHandle { + async fn process(mut instance: tcp_connection::instance::ConnectionInstance) { + let dir = get_test_dir("virtual_file_creation_and_update") + .await + .unwrap(); + + // Setup vault + Vault::setup_vault(dir.clone(), "TestVault").await.unwrap(); + + // Read vault + let Some(vault) = Vault::init( + VaultConfig::read_from(dir.join(SERVER_FILE_VAULT)) + .await + .unwrap(), + &dir, + ) else { + panic!("No vault found!"); + }; + + // Register member + let member_id = "test_member"; + vault + .register_member_to_vault(Member::new(member_id)) + .await + .unwrap(); + + // Create visual file + let virtual_file_id = vault + .create_virtual_file_from_connection(&mut instance, &member_id.to_string()) + .await + .unwrap(); + + // Grant edit right to member + vault + .grant_virtual_file_edit_right(&member_id.to_string(), &virtual_file_id) + .await + .unwrap(); + + // Update visual file + vault + .update_virtual_file_from_connection( + &mut instance, + &member_id.to_string(), + &virtual_file_id, + &"2".to_string(), + VirtualFileVersionDescription { + creator: member_id.to_string(), + description: "Update".to_string(), + }, + ) + .await + .unwrap(); + } +} + +#[tokio::test] +async fn test_virtual_file_creation_and_update() -> Result<(), std::io::Error> { + let host = "localhost:5009"; + + // Server setup + let Ok(server_target) = TcpServerTarget::< + VirtualFileCreateClientHandle, + VirtualFileCreateServerHandle, + >::from_domain(host) + .await + else { + panic!("Test target built failed from a domain named `{}`", host); + }; + + // Client setup + let Ok(client_target) = TcpServerTarget::< + VirtualFileCreateClientHandle, + VirtualFileCreateServerHandle, + >::from_domain(host) + .await + else { + panic!("Test target built failed from a domain named `{}`", host); + }; + + let future_server = async move { + // Only process once + let configured_server = server_target.server_cfg(ServerTargetConfig::default().once()); + + // Listen here + let _ = configured_server.listen().await; + }; + + let future_client = async move { + // Wait for server start + let _ = sleep(Duration::from_secs_f32(1.5)).await; + + // Connect here + let _ = client_target.connect().await; + }; + + let test_timeout = Duration::from_secs(15); + + timeout(test_timeout, async { join!(future_client, future_server) }) + .await + .map_err(|_| { + std::io::Error::new( + std::io::ErrorKind::TimedOut, + format!("Test timed out after {:?}", test_timeout), + ) + })?; + + Ok(()) +} diff --git a/docs/Cargo.toml b/docs/Cargo.toml new file mode 100644 index 0000000..285b83d --- /dev/null +++ b/docs/Cargo.toml @@ -0,0 +1,6 @@ +[package] +name = "vcs_docs" +edition = "2024" +version.workspace = true + +[dependencies] diff --git a/docs/build.rs b/docs/build.rs new file mode 100644 index 0000000..53679db --- /dev/null +++ b/docs/build.rs @@ -0,0 +1,196 @@ +use std::env; +use std::fs; +use std::io::{self, Write}; +use std::path::Path; + +// Template markers for code generation +const TEMPLATE_DOCUMENT_BEGIN: &str = "--- TEMPLATE DOCUMENT BEGIN ---"; +const TEMPLATE_DOCUMENT_END: &str = "--- TEMPLATE DOCUMENT END ---"; +const TEMPLATE_FUNC_BEGIN: &str = "--- TEMPLATE FUNC BEGIN ---"; +const TEMPLATE_FUNC_END: &str = "--- TEMPLATE FUNC END ---"; +const TEMPLATE_LIST_BEGIN: &str = "--- TEMPLATE LIST BEGIN ---"; +const TEMPLATE_LIST_END: &str = "--- TEMPLATE LIST END ---"; + +// Template parameter patterns for substitution +const PARAM_DOCUMENT_PATH: &str = "{{DOCUMENT_PATH}}"; +const PARAM_DOCUMENT_CONSTANT_NAME: &str = "{{DOCUMENT_CONSTANT_NAME}}"; +const PARAM_DOCUMENT_CONTENT: &str = "{{DOCUMENT_CONTENT}}"; +const PARAM_DOCUMENT_PATH_SNAKE_CASE: &str = "{{DOCUMENT_PATH_SNAKE_CASE}}"; + +fn main() -> io::Result<()> { + println!("cargo:rerun-if-changed=src/docs.rs.template"); + println!("cargo:rerun-if-changed=Documents"); + + let out_dir = env::var("OUT_DIR").unwrap(); + let dest_path = Path::new(&out_dir).join("docs.rs"); + + // Read all markdown files from docs directory recursively + let docs_dir = Path::new("./Documents"); + let mut documents = Vec::new(); + + if docs_dir.exists() { + collect_text_files(docs_dir, &mut documents)?; + } + + // Read template file + let template_path = Path::new("src/docs.rs.template"); + let template_content = fs::read_to_string(template_path)?; + + // Extract template sections preserving original indentation + let document_template = template_content + .split(TEMPLATE_DOCUMENT_BEGIN) + .nth(1) + .and_then(|s| s.split(TEMPLATE_DOCUMENT_END).next()) + .unwrap_or("") + .trim_start_matches('\n') + .trim_end_matches('\n'); + + let match_arm_template = template_content + .split(TEMPLATE_FUNC_BEGIN) + .nth(1) + .and_then(|s| s.split(TEMPLATE_FUNC_END).next()) + .unwrap_or("") + .trim_start_matches('\n') + .trim_end_matches('\n'); + + // Generate document blocks and match arms + let mut document_blocks = String::new(); + let mut match_arms = String::new(); + let mut list_items = String::new(); + + for (relative_path, content) in &documents { + // Calculate parameters for template substitution + let document_path = format!("./docs/Documents/{}", relative_path); + + // Generate constant name from relative path + let document_constant_name = relative_path + .replace(['/', '\\', '-'], "_") + .replace(".md", "") + .replace(".txt", "") + .replace(".toml", "") + .replace(".yaml", "") + .replace(".yml", "") + .replace(".json", "") + .replace(".rs", "") + .to_uppercase(); + + // Generate snake_case name for function matching + let document_path_snake_case = relative_path + .replace(['/', '\\', '-'], "_") + .replace(".md", "") + .replace(".txt", "") + .replace(".toml", "") + .replace(".yaml", "") + .replace(".yml", "") + .replace(".json", "") + .replace(".rs", "") + .to_lowercase(); + + // Escape double quotes in content + let escaped_content = content.trim().replace('\"', "\\\""); + + // Replace template parameters in document block preserving indentation + let document_block = document_template + .replace(PARAM_DOCUMENT_PATH, &document_path) + .replace(PARAM_DOCUMENT_CONSTANT_NAME, &document_constant_name) + .replace(PARAM_DOCUMENT_CONTENT, &escaped_content) + .replace("r#\"\"#", &format!("r#\"{}\"#", escaped_content)); + + document_blocks.push_str(&document_block); + document_blocks.push_str("\n\n"); + + // Replace template parameters in match arm preserving indentation + let match_arm = match_arm_template + .replace(PARAM_DOCUMENT_PATH_SNAKE_CASE, &document_path_snake_case) + .replace(PARAM_DOCUMENT_CONSTANT_NAME, &document_constant_name); + + match_arms.push_str(&match_arm); + match_arms.push('\n'); + + // Generate list item for documents() function + let list_item = format!(" \"{}\".to_string(),", document_path_snake_case); + list_items.push_str(&list_item); + list_items.push('\n'); + } + + // Remove trailing newline from the last list item + if !list_items.is_empty() { + list_items.pop(); + } + + // Build final output by replacing template sections + let mut output = String::new(); + + // Add header before document blocks + if let Some(header) = template_content.split(TEMPLATE_DOCUMENT_BEGIN).next() { + output.push_str(header.trim()); + output.push_str("\n\n"); + } + + // Add document blocks + output.push_str(&document_blocks); + + // Add function section + if let Some(func_section) = template_content.split(TEMPLATE_FUNC_BEGIN).next() + && let Some(rest) = func_section.split(TEMPLATE_DOCUMENT_END).nth(1) + { + output.push_str(rest.trim()); + output.push('\n'); + } + + // Add match arms + output.push_str(&match_arms); + + // Add list items for documents() function + if let Some(list_section) = template_content.split(TEMPLATE_LIST_BEGIN).next() + && let Some(rest) = list_section.split(TEMPLATE_FUNC_END).nth(1) + { + output.push_str(rest.trim()); + output.push('\n'); + } + output.push_str(&list_items); + + // Add footer + if let Some(footer) = template_content.split(TEMPLATE_LIST_END).nth(1) { + // Preserve original indentation in footer + output.push_str(footer); + } + + // Write generated file + let mut file = fs::File::create(&dest_path)?; + file.write_all(output.as_bytes())?; + + // Copy to src directory for development + let src_dest_path = Path::new("src/docs.rs"); + fs::write(src_dest_path, output)?; + + Ok(()) +} + +fn collect_text_files(dir: &Path, documents: &mut Vec<(String, String)>) -> io::Result<()> { + for entry in fs::read_dir(dir)? { + let entry = entry?; + let path = entry.path(); + + if path.is_dir() { + collect_text_files(&path, documents)?; + } else if path.extension().is_some_and(|ext| { + ext == "md" + || ext == "txt" + || ext == "toml" + || ext == "yaml" + || ext == "yml" + || ext == "json" + || ext == "rs" + }) && let Ok(relative_path) = path.strip_prefix("./Documents") + && let Some(relative_path_str) = relative_path.to_str() + { + let content = fs::read_to_string(&path)?; + documents.push(( + relative_path_str.trim_start_matches('/').to_string(), + content, + )); + } + } + Ok(()) +} diff --git a/docs/src/docs.rs b/docs/src/docs.rs new file mode 100644 index 0000000..c12b737 --- /dev/null +++ b/docs/src/docs.rs @@ -0,0 +1,387 @@ +// Auto-generated code. + + +/// From ./docs/Documents/ASCII_YIZI.txt +pub const ASCII_YIZI: &str = "#BANNER START# + ████████ ████████ +██▒▒▒▒▒▒▒▒██ ██▒▒▒▒▒▒▒▒██ +██ ▒▒██ ██▒▒ ██ █████ ██ ██ ██████ █████ +██ ▒▒████████▒▒ ██ ▒▒▒██ ██ ██ ██████ ██████ +██ ▒▒▒▒▒▒▒▒ ██ ██ ██ ██ ███▒▒▒█ █▒▒▒▒█ +██ ██ ██ ██ ██ ███ ▒ ████ ▒ +██ ██ ██ ██ ██ ███ ▒████ +██ ████ ████ ██ ██ ▒██ ██▒ ███ ▒▒▒██ +██ ████ ████ ██ █ ██ ██ ██ ███ █ ██ ██ +██ ████ ████ ██ █ ██ ▒████▒ ▒██████ ██████ +██ ▒▒▒▒ ▒▒▒▒ █ ██ ▒████ ▒██▒ ██████ ▒████▒ +██ ██ ██ ▒▒▒▒ ▒▒ ▒▒▒▒▒▒ ▒▒▒▒ +██ ██████████ ██ +██ ██ {banner_line_1} + ████████████████████████████████ {banner_line_2} + ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ {banner_line_3} +#BANNER END#"; + + + +/// From ./docs/Documents/docs\collaboration.txt +pub const DOCS_COLLABORATION: &str = "NO CONTENT YET :("; + + + +/// From ./docs/Documents/docs\get_started.txt +pub const DOCS_GET_STARTED: &str = "NO CONTENT YET :("; + + + +/// From ./docs/Documents/profiles\vault.toml +pub const PROFILES_VAULT: &str = "# +# +# ████████ ████████ +# ██▒▒▒▒▒▒▒▒██ ██▒▒▒▒▒▒▒▒██ +# ██ ▒▒██ ██▒▒ ██ █████ ██ ██ ██ ██ +# ██ ▒▒████████▒▒ ██ ▒▒▒██ ██ ██ ██ ██ +# ██ ▒▒▒▒▒▒▒▒ ██ ██ ██ ██ ██ ██ +# ██ ██ ██ ██ ██ ██ ██ +# ██ ██ ██ ██ ██ ██ ██ +# ██ ████ ████ ██ ██ ▒██ ██▒ ▒██ ██▒ +# ██ ████ ████ ██ █ ██ ██ ██ ██ ██ +# ██ ████ ████ ██ █ ██ ▒████▒ ▒████▒ +# ██ ▒▒▒▒ ▒▒▒▒ █ ██ ▒████ ▒██▒ ▒██▒ +# ██ ██ ██ ▒▒▒▒ ▒▒ ▒▒ +# ██ ██████████ ██ +# ██ ██ +# ████████████████████████████████ JustEnoughVCS Vault Profile +# ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ =========================== +# +# + +# Vault \"{vault_name}\" created by {user_name} ({date_format}) + +######################### +### Base Informations ### +######################### +name = \"{vault_name}\" +uuid = \"{vault_uuid}\" # Don't touch it! + +# Administrator list, indicating members who can switch identity to 'host' +hosts = [] + +[profile] + +#################### +### Connectivity ### +#################### + +# Bind address: 127.0.0.1 for localhost only, 0.0.0.0 to allow remote access +bind = \"127.0.0.1\" # (fallback: \"127.0.0.1\") + +# Bind port +port = 25331 # (fallback: 25331) + +# Enable LAN discovery: clients can discover this service on the local network +# TIP: Not yet supported +lan_discovery = \"disable\" # (enable, disable, fallback: disable) + +############# +### Debug ### +############# + +# Enable logger +logger = \"yes\" # (yes, no, fallback: no) + +# Logger output level +logger_level = \"info\" # (debug, trace, info, fallback: info) + +################################# +### Authentication & Security ### +################################# + +# Authentication mode +# TIP: Currently only \"key\" is supported +auth_mode = \"key\" # (key, password, noauth, fallback: password)"; + + + +/// From ./docs/Documents/README.md +pub const README: &str = "# JustEnoughVCS"; + + + +/// From ./docs/Documents/readmes\local_workspace_todolist.md +pub const READMES_LOCAL_WORKSPACE_TODOLIST: &str = "# Setup a Workspace + +You have created an empty local **Workspace** using `jv create` or `jv init`. + +At this point, the workspace exists only as a local structure. +It is **not yet associated with any identity or upstream Vault**. + +Follow the steps below to complete the initial setup. + +## Account Setup + +> [!TIP] +> If you have already registered an account on this machine, you can skip this section. + +An account represents **this machine acting on your behalf**. +You can create an account and generate a private key using the following commands. + +```bash +# Make sure OpenSSL is installed on your system. +# Create an account and automatically generate a private key using OpenSSL. +jv account add pan --keygen + +# If you already have a private key, you can associate it with an account. +jv account movekey pan ./your_private_key.pem +``` + +After creating the private key, generate the corresponding public key. +This also requires `OpenSSL` support. + +```bash +# Generate the public key in the current directory. +jv account genpub pan ./ +``` + +**Send the generated public key file to a Host of the upstream Vault.** +Only after the key is registered by the Vault can this workspace authenticate. + +## Login + +> In the following example, we assume: +> +> * the account name is `pan` +> * the upstream Vault address is `127.0.0.1` + +Navigate to the workspace directory (the directory containing this file), then run: + +```bash +# Log in as pan to the upstream Vault at 127.0.0.1 +# -C skips confirmation prompts +jv login pan 127.0.0.1 -C +``` + +This command performs the following steps internally: + +```bash +jv account as pan # Bind the workspace to account pan +jv direct 127.0.0.1 -C # Set the upstream Vault address +jv update # Fetch initial structure and metadata from upstream +rm SETUP.md # Remove this setup document +``` + +After login, the workspace becomes a **live participant** connected to the upstream Vault. + +## Completion + +At this point, the workspace is fully set up: + +* An account identity is bound locally +* An upstream Vault is configured +* Initial data has been synchronized + +Normally, `jv login` removes this file automatically. +If the file still exists due to a deletion failure, please remove it manually to keep the workspace clean. + +Once this file is gone, the workspace is considered **ready for daily use**. + +## Why does this file delete itself? + +A freshly created JVCS workspace is considered **clean** only when no sheets are in use and no unexplained files exist. + +During the initial setup phase, this `SETUP.md` file is the only allowed exception. +It exists solely to guide the workspace into a connected and explainable state. + +Once the workspace is connected to an upstream Vault and enters normal operation, +every file in the workspace is expected to be **explainable by structure**. + +If this setup file remains: + +* it becomes an unexplained local file +* `JustEnoughVCS` cannot determine which sheet it belongs to +* and any subsequent `jv use` operation would be ambiguous + +To prevent this ambiguity, JVCS enforces a strict rule: + +**A workspace with unexplained files is not allowed to enter active use.** + +Deleting `SETUP.md` marks the end of the setup phase and confirms that: + +* all remaining files are intentional +* their placement can be explained +* and the workspace is ready to participate in structure-driven operations + +This is why the setup document removes itself. +It is not cleanup — it is a boundary."; + + + +/// From ./docs/Documents/readmes\vault_readme.md +pub const READMES_VAULT_README: &str = "# Setup an Upstream Vault + +Thank you for using `JustEnoughVCS`. + +This document guides you through setting up an **Upstream Vault** — a shared source of structure and content that other workspaces may depend on. + +## Configuration File + +Before starting the Vault service with `jvv listen`, you need to configure `vault.toml`. + +This file defines the identity, connectivity, and responsibility boundaries of the Vault. + +### Adding Hosts + +Set the `hosts` parameter to specify which members are allowed to switch into the **Host** role for this Vault. + +```toml +# Pan and Alice are allowed to operate this Vault as hosts +hosts = [ \"pan\", \"alice\" ] +``` + +Members listed here can switch their identity as follows: + +```bash +jv as host/pan +``` + +> Becoming a Host means operating from a position that may affect other members. +> This role is not a permanent identity, but a responsibility-bearing context. + +### Configuring Connection + +Modify the `bind` parameter to `0.0.0.0` to allow access from other devices on the local network. + +```toml +bind = \"0.0.0.0\" +``` + +> [!TIP] +> `JustEnoughVCS` uses port **25331** by default. +> You can change the listening port by modifying the `port` parameter if needed. + +### Disabling Logger (Optional) + +If you prefer a cleaner console output, you can disable logging: + +```toml +logger = \"no\" +``` + +## Member Account Setup + +Run the following command in the root directory of the **Vault** to register a member: + +```bash +jvv member register pan +``` + +Registering a member only creates its identity record in the Vault. +It does **not** automatically make the member login-ready. + +An authentication method must be registered separately. + +> [!NOTE] +> Currently, `JustEnoughVCS` supports **key-based authentication** only. + +Place the public key file provided by the member (`name.pem`) into the Vault’s `./key` directory. + +For example, after registering a member named `pan`, the Vault directory structure should look like this: + +``` +. +├── key +│ └── pan.pem <- Public key file +├── members +│ ├── pan.bcfg <- Member registration info +│ └── host.bcfg +├── README.md +├── sheets +│ └── ref.bcfg +├── storage +└── vault.toml +``` + +## Completion + +At this point, the Vault is fully configured: + +* Member identities are registered +* Host roles are defined +* Authentication materials are in place + +You can now start the Vault service using: + +```bash +jvv listen +``` + +Other workspaces may connect to this Vault once it is listening."; + + + +/// From ./docs/Documents/web_docs\guide.md +pub const WEB_DOCS_GUIDE: &str = "# FIRST + +ok"; + + + +/// From ./docs/Documents/_navbar.md +pub const _NAVBAR: &str = "* [Home](/) +* [GitHub](https://github.com/JustEnoughVCS/) +* [Main Repo](https://github.com/JustEnoughVCS/VersionControl)"; + + + +/// From ./docs/Documents/_sidebar.md +pub const _SIDEBAR: &str = "* Getting Started + * [Introduction](README.md) + +* USER + * [Workspace Setup](web_docs/workspace_setup.md) + +* ADMIN + * [Vault Setup](web_docs/vault_setup.md)"; + + +// Get document content by name +pub fn document(name: impl AsRef) -> Option { + match name.as_ref() { + + "ascii_yizi" => Some(ASCII_YIZI.to_string()), + + "docs_collaboration" => Some(DOCS_COLLABORATION.to_string()), + + "docs_get_started" => Some(DOCS_GET_STARTED.to_string()), + + "profiles_vault" => Some(PROFILES_VAULT.to_string()), + + "readme" => Some(README.to_string()), + + "readmes_local_workspace_todolist" => Some(READMES_LOCAL_WORKSPACE_TODOLIST.to_string()), + + "readmes_vault_readme" => Some(READMES_VAULT_README.to_string()), + + "web_docs_guide" => Some(WEB_DOCS_GUIDE.to_string()), + + "_navbar" => Some(_NAVBAR.to_string()), + + "_sidebar" => Some(_SIDEBAR.to_string()), +_ => None, + } +} + +// Get list of all available document names +pub fn documents() -> Vec { + vec![ + "ascii_yizi".to_string(), + "docs_collaboration".to_string(), + "docs_get_started".to_string(), + "profiles_vault".to_string(), + "readme".to_string(), + "readmes_local_workspace_todolist".to_string(), + "readmes_vault_readme".to_string(), + "web_docs_guide".to_string(), + "_navbar".to_string(), + "_sidebar".to_string(), + ] +} diff --git a/docs/src/docs.rs.template b/docs/src/docs.rs.template new file mode 100644 index 0000000..c6787d9 --- /dev/null +++ b/docs/src/docs.rs.template @@ -0,0 +1,26 @@ +// Auto-generated code. + +--- TEMPLATE DOCUMENT BEGIN --- +/// From {{DOCUMENT_PATH}} +pub const {{DOCUMENT_CONSTANT_NAME}}: &str = "{{DOCUMENT_CONTENT}}"; + +--- TEMPLATE DOCUMENT END --- + +// Get document content by name +pub fn document(name: impl AsRef) -> Option { + match name.as_ref() { +--- TEMPLATE FUNC BEGIN --- + "{{DOCUMENT_PATH_SNAKE_CASE}}" => Some({{DOCUMENT_CONSTANT_NAME}}.to_string()), +--- TEMPLATE FUNC END --- + _ => None, + } +} + +// Get list of all available document names +pub fn documents() -> Vec { + vec![ +--- TEMPLATE LIST BEGIN --- + "{{DOCUMENT_PATH_SNAKE_CASE}}".to_string(), +--- TEMPLATE LIST END --- + ] +} diff --git a/docs/src/lib.rs b/docs/src/lib.rs new file mode 100644 index 0000000..ca422a9 --- /dev/null +++ b/docs/src/lib.rs @@ -0,0 +1 @@ +pub mod docs; diff --git a/examples/Cargo.toml b/examples/Cargo.toml index fb4820f..12c52c1 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -10,12 +10,12 @@ path = "src/bin/example_action_system.rs" [dependencies] # Utils -tcp_connection = { path = "../crates/utils/tcp_connection" } -cfg_file = { path = "../crates/utils/cfg_file", features = ["default"] } -string_proc = { path = "../crates/utils/string_proc" } +tcp_connection = { path = "../utils/tcp_connection" } +cfg_file = { path = "../utils/cfg_file", features = ["default"] } +string_proc = { path = "../utils/string_proc" } # Core -action_system = { path = "../crates/system_action" } +action_system = { path = "../systems/action" } # Async & Networking tokio = { version = "1.48.0", features = ["full"] } diff --git a/systems/action/Cargo.toml b/systems/action/Cargo.toml new file mode 100644 index 0000000..5317975 --- /dev/null +++ b/systems/action/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "action_system" +edition = "2024" +version.workspace = true + +[dependencies] +tcp_connection = { path = "../../utils/tcp_connection" } +action_system_macros = { path = "action_macros" } + +# Serialization +serde = { version = "1.0.228", features = ["derive"] } +serde_json = "1.0.145" + +# Async & Networking +tokio = "1.48.0" diff --git a/systems/action/action_macros/Cargo.toml b/systems/action/action_macros/Cargo.toml new file mode 100644 index 0000000..8b23191 --- /dev/null +++ b/systems/action/action_macros/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "action_system_macros" +edition = "2024" +version.workspace = true + +[lib] +proc-macro = true + +[dependencies] +tcp_connection = { path = "../../../utils/tcp_connection" } +string_proc = { path = "../../../utils/string_proc" } + +syn = { version = "2.0", features = ["full", "extra-traits"] } +quote = "1.0" +proc-macro2 = "1.0" + +# Serialization +serde = { version = "1.0.228", features = ["derive"] } +serde_json = "1.0.145" diff --git a/systems/action/action_macros/src/lib.rs b/systems/action/action_macros/src/lib.rs new file mode 100644 index 0000000..e6616b4 --- /dev/null +++ b/systems/action/action_macros/src/lib.rs @@ -0,0 +1,248 @@ +use proc_macro::TokenStream; +use quote::quote; +use syn::{ItemFn, parse_macro_input}; + +/// # Macro - Generate Action +/// +/// When annotating a function with the `#[action_gen]` macro in the following format, it generates boilerplate code for client-server interaction +/// +/// ```ignore +/// #[action_gen] +/// async fn action_name(ctx: ActionContext, argument: YourArgument) -> Result { +/// // Write your client and server logic here +/// if ctx.is_proc_on_remote() { +/// // Server logic +/// } +/// if ctx.is_proc_on_local() { +/// // Client logic +/// } +/// } +/// ``` +/// +/// > WARNING: +/// > For Argument and Result types, the `action_gen` macro only supports types that derive serde's Serialize and Deserialize +/// +/// ## Generated Code +/// +/// `action_gen` will generate the following: +/// +/// 1. Complete implementation of Action +/// 2. Process / Register method +/// +/// ## How to use +/// +/// You can use your generated method as follows +/// +/// ```ignore +/// async fn main() -> Result<(), TcpTargetError> { +/// +/// // Prepare your argument +/// let args = YourArgument::default(); +/// +/// // Create a pool and context +/// let mut pool = ActionPool::new(); +/// let ctx = ActionContext::local(); +/// +/// // Register your action +/// register_your_action(&mut pool); +/// +/// // Process your action +/// proc_your_action(&pool, ctx, args).await?; +/// +/// Ok(()) +/// } +/// ``` +#[proc_macro_attribute] +pub fn action_gen(attr: TokenStream, item: TokenStream) -> TokenStream { + let input_fn = parse_macro_input!(item as ItemFn); + let is_local = if attr.is_empty() { + false + } else { + let attr_str = attr.to_string(); + attr_str == "local" || attr_str.contains("local") + }; + + generate_action_struct(input_fn, is_local).into() +} + +fn generate_action_struct(input_fn: ItemFn, _is_local: bool) -> proc_macro2::TokenStream { + let fn_vis = &input_fn.vis; + let fn_sig = &input_fn.sig; + let fn_name = &fn_sig.ident; + let fn_block = &input_fn.block; + + validate_function_signature(fn_sig); + + let (context_param_name, arg_param_name, arg_type, return_type) = + extract_parameters_and_types(fn_sig); + + let struct_name = quote::format_ident!("{}", convert_to_pascal_case(&fn_name.to_string())); + + let action_name_ident = &fn_name; + + let register_this_action = quote::format_ident!("register_{}", action_name_ident); + let proc_this_action = quote::format_ident!("proc_{}", action_name_ident); + + quote! { + #[derive(Debug, Clone, Default)] + #fn_vis struct #struct_name; + + impl action_system::action::Action<#arg_type, #return_type> for #struct_name { + fn action_name() -> &'static str { + Box::leak(string_proc::snake_case!(stringify!(#action_name_ident)).into_boxed_str()) + } + + fn is_remote_action() -> bool { + !#_is_local + } + + async fn process(#context_param_name: action_system::action::ActionContext, #arg_param_name: #arg_type) -> Result<#return_type, tcp_connection::error::TcpTargetError> { + #fn_block + } + } + + #fn_vis fn #register_this_action(pool: &mut action_system::action_pool::ActionPool) { + pool.register::<#struct_name, #arg_type, #return_type>(); + } + + #fn_vis async fn #proc_this_action( + pool: &action_system::action_pool::ActionPool, + mut ctx: action_system::action::ActionContext, + #arg_param_name: #arg_type + ) -> Result<#return_type, tcp_connection::error::TcpTargetError> { + ctx.set_is_remote_action(!#_is_local); + let args_json = serde_json::to_string(&#arg_param_name) + .map_err(|e| { + tcp_connection::error::TcpTargetError::Serialization(e.to_string()) + })?; + let result_json = pool.process_json( + Box::leak(string_proc::snake_case!(stringify!(#action_name_ident)).into_boxed_str()), + ctx, + args_json, + ).await?; + serde_json::from_str(&result_json) + .map_err(|e| { + tcp_connection::error::TcpTargetError::Serialization(e.to_string()) + }) + } + + #[allow(dead_code)] + #[deprecated = "This function is used by #[action_gen] as a template."] + #[doc = "Template function for #[[action_gen]] - do not call directly."] + #[doc = "Use the generated struct instead."] + #[doc = ""] + #[doc = "Register the action to the pool."] + #[doc = "```ignore"] + #[doc = "register_your_func(&mut pool);"] + #[doc = "```"] + #[doc = ""] + #[doc = "Process the action at the pool."] + #[doc = "```ignore"] + #[doc = "let result = proc_your_func(&pool, ctx, arg).await?;"] + #[doc = "```"] + #fn_vis #fn_sig #fn_block + } +} + +fn validate_function_signature(fn_sig: &syn::Signature) { + if fn_sig.asyncness.is_none() { + panic!("Expected async function for Action, but found synchronous function"); + } + + if fn_sig.inputs.len() != 2 { + panic!( + "Expected exactly 2 arguments for Action function: ctx: ActionContext and arg: T, but found {} arguments", + fn_sig.inputs.len() + ); + } + + let return_type = match &fn_sig.output { + syn::ReturnType::Type(_, ty) => ty, + _ => panic!( + "Expected Action function to return Result, but found no return type" + ), + }; + + if let syn::Type::Path(type_path) = return_type.as_ref() { + if let Some(segment) = type_path.path.segments.last() + && segment.ident != "Result" + { + panic!( + "Expected Action function to return Result, but found different return type" + ); + } + } else { + panic!( + "Expected Action function to return Result, but found no return type" + ); + } +} + +fn convert_to_pascal_case(s: &str) -> String { + s.split('_') + .map(|word| { + let mut chars = word.chars(); + match chars.next() { + None => String::new(), + Some(first) => first.to_uppercase().collect::() + chars.as_str(), + } + }) + .collect() +} + +fn extract_parameters_and_types( + fn_sig: &syn::Signature, +) -> ( + proc_macro2::TokenStream, + proc_macro2::TokenStream, + proc_macro2::TokenStream, + proc_macro2::TokenStream, +) { + let mut inputs = fn_sig.inputs.iter(); + + let context_param = match inputs.next() { + Some(syn::FnArg::Typed(pat_type)) => { + let pat = &pat_type.pat; + quote::quote!(#pat) + } + _ => { + panic!("Expected the first argument to be a typed parameter, but found something else") + } + }; + + let arg_param = match inputs.next() { + Some(syn::FnArg::Typed(pat_type)) => { + let pat = &pat_type.pat; + let ty = &pat_type.ty; + (quote::quote!(#pat), quote::quote!(#ty)) + } + _ => { + panic!("Expected the second argument to be a typed parameter, but found something else") + } + }; + + let (arg_param_name, arg_type) = arg_param; + + let return_type = match &fn_sig.output { + syn::ReturnType::Type(_, ty) => { + if let syn::Type::Path(type_path) = ty.as_ref() { + if let syn::PathArguments::AngleBracketed(args) = + &type_path.path.segments.last().unwrap().arguments + { + if let Some(syn::GenericArgument::Type(ty)) = args.args.first() { + quote::quote!(#ty) + } else { + panic!("Expected to extract the success type of Result, but failed"); + } + } else { + panic!("Expected Result type to have generic parameters, but found none"); + } + } else { + panic!("Expected return type to be Result, but found different type"); + } + } + _ => panic!("Expected function to have return type, but found none"), + }; + + (context_param, arg_param_name, arg_type, return_type) +} diff --git a/systems/action/src/action.rs b/systems/action/src/action.rs new file mode 100644 index 0000000..62425ff --- /dev/null +++ b/systems/action/src/action.rs @@ -0,0 +1,244 @@ +use serde::{Serialize, de::DeserializeOwned}; +use std::any::{Any, TypeId}; +use std::collections::HashMap; +use std::sync::Arc; +use tcp_connection::{error::TcpTargetError, instance::ConnectionInstance}; +use tokio::{net::TcpStream, sync::Mutex}; + +/// # Trait - Action +/// +/// A trait used to describe the interaction pattern between client and server +/// +/// ## Generics +/// +/// Args: Represents the parameter type required for this action +/// +/// Return: Represents the return type of this action +/// +/// The above generics must implement serde's Serialize and DeserializeOwned traits, +/// and must be sendable between threads +/// +/// ## Implementation +/// +/// ```ignore +/// pub trait Action +/// where +/// Args: Serialize + DeserializeOwned + Send, +/// Return: Serialize + DeserializeOwned + Send, +/// { +/// /// Name, used to inform the server which action to execute +/// fn action_name() -> &'static str; +/// +/// /// Whether it's a local Action, used to inform the system if it only runs locally +/// fn is_remote_action() -> bool; +/// +/// /// Action processing logic +/// fn process( +/// context: ActionContext, +/// args: Args, +/// ) -> impl std::future::Future> + Send; +/// } +/// ``` +pub trait Action +where + Args: Serialize + DeserializeOwned + Send, + Return: Serialize + DeserializeOwned + Send, +{ + fn action_name() -> &'static str; + + fn is_remote_action() -> bool; + + fn process( + context: ActionContext, + args: Args, + ) -> impl std::future::Future> + Send; +} + +/// # Struct - ActionContext +/// +/// Used to inform the Action about the current execution environment +/// +/// ## Creation +/// +/// Create ActionContext using the following methods: +/// +/// ```ignore +/// +/// // The instance here is the connection instance passed from external sources for communicating with the server +/// // For specific usage, please refer to the `/crates/utils/tcp_connection` section +/// +/// fn init_local_action_ctx(instance: ConnectionInstance) { +/// // Create context and specify execution on local +/// let mut ctx = ActionContext::local(); +/// } +/// +/// fn init_remote_action_ctx(instance: ConnectionInstance) { +/// // Create context and specify execution on remote +/// let mut ctx = ActionContext::remote(); +/// } +#[derive(Default)] +pub struct ActionContext { + /// Whether the action is executed locally or remotely + proc_on_local: bool, + + /// Whether the action being executed in the current context is a remote action + is_remote_action: bool, + + /// The name of the action being executed + action_name: String, + + /// The JSON-serialized arguments for the action + action_args_json: String, + + /// The connection instance in the current context, + instance: Option>>, + + /// Generic data storage for arbitrary types + data: HashMap>, +} + +impl ActionContext { + /// Generate local context + pub fn local() -> Self { + ActionContext { + proc_on_local: true, + ..Default::default() + } + } + + /// Generate remote context + pub fn remote() -> Self { + ActionContext { + proc_on_local: false, + ..Default::default() + } + } + + /// Build connection instance from TcpStream + pub fn build_instance(mut self, stream: TcpStream) -> Self { + self.instance = Some(Arc::new(Mutex::new(ConnectionInstance::from(stream)))); + self + } + + /// Insert connection instance into context + pub fn insert_instance(mut self, instance: ConnectionInstance) -> Self { + self.instance = Some(Arc::new(Mutex::new(instance))); + self + } + + /// Pop connection instance from context + pub fn pop_instance(&mut self) -> Option>> { + self.instance.take() + } +} + +impl ActionContext { + /// Whether the action is executed locally + pub fn is_proc_on_local(&self) -> bool { + self.proc_on_local + } + + /// Whether the action is executed remotely + pub fn is_proc_on_remote(&self) -> bool { + !self.proc_on_local + } + + /// Whether the action being executed in the current context is a remote action + pub fn is_remote_action(&self) -> bool { + self.is_remote_action + } + + /// Set whether the action being executed in the current context is a remote action + pub fn set_is_remote_action(&mut self, is_remote_action: bool) { + self.is_remote_action = is_remote_action; + } + + /// Get the connection instance in the current context + pub fn instance(&self) -> &Option>> { + &self.instance + } + + /// Get a mutable reference to the connection instance in the current context + pub fn instance_mut(&mut self) -> &mut Option>> { + &mut self.instance + } + + /// Get the action name from the context + pub fn action_name(&self) -> &str { + &self.action_name + } + + /// Get the action arguments from the context + pub fn action_args_json(&self) -> &String { + &self.action_args_json + } + + /// Set the action name in the context + pub fn set_action_name(mut self, action_name: String) -> Self { + self.action_name = action_name; + self + } + + /// Set the action arguments in the context + pub fn set_action_args(mut self, action_args: String) -> Self { + self.action_args_json = action_args; + self + } + + /// Insert arbitrary data in the context + pub fn with_data(mut self, value: T) -> Self { + self.data.insert(TypeId::of::(), Arc::new(value)); + self + } + + /// Insert arbitrary data as Arc in the context + pub fn with_arc_data(mut self, value: Arc) -> Self { + self.data.insert(TypeId::of::(), value); + self + } + + /// Insert arbitrary data in the context + pub fn insert_data(&mut self, value: T) { + self.data.insert(TypeId::of::(), Arc::new(value)); + } + + /// Insert arbitrary data as Arc in the context + pub fn insert_arc_data(&mut self, value: Arc) { + self.data.insert(TypeId::of::(), value); + } + + /// Get arbitrary data from the context + pub fn get(&self) -> Option<&T> { + self.data + .get(&TypeId::of::()) + .and_then(|arc| arc.downcast_ref::()) + } + + /// Get arbitrary data as Arc from the context + pub fn get_arc(&self) -> Option> { + self.data + .get(&TypeId::of::()) + .and_then(|arc| Arc::clone(arc).downcast::().ok()) + } + + /// Remove and return arbitrary data from the context + pub fn remove(&mut self) -> Option> { + self.data + .remove(&TypeId::of::()) + .and_then(|arc| arc.downcast::().ok()) + } + + /// Check if the context contains data of a specific type + pub fn contains(&self) -> bool { + self.data.contains_key(&TypeId::of::()) + } + + /// Take ownership of the context and extract data of a specific type + pub fn take(mut self) -> (Self, Option>) { + let value = self + .data + .remove(&TypeId::of::()) + .and_then(|arc| arc.downcast::().ok()); + (self, value) + } +} diff --git a/systems/action/src/action_pool.rs b/systems/action/src/action_pool.rs new file mode 100644 index 0000000..019fa6d --- /dev/null +++ b/systems/action/src/action_pool.rs @@ -0,0 +1,247 @@ +use std::pin::Pin; + +use serde::{Serialize, de::DeserializeOwned}; +use serde_json; +use tcp_connection::error::TcpTargetError; + +use crate::action::{Action, ActionContext}; + +type ProcBeginCallback = for<'a> fn( + &'a mut ActionContext, + args: &'a (dyn std::any::Any + Send + Sync), +) -> ProcBeginFuture<'a>; +type ProcEndCallback = fn() -> ProcEndFuture; + +type ProcBeginFuture<'a> = Pin> + Send + 'a>>; +type ProcEndFuture = Pin> + Send>>; + +/// # Struct - ActionPool +/// +/// This struct is used to register and record all accessible and executable actions +/// +/// It also registers `on_proc_begin` and `on_proc_end` callback functions +/// used for action initialization +/// +/// ## Creating and registering actions +/// ```ignore +/// fn init_action_pool() { +/// let mut pool = Action::new(); +/// +/// // Register action +/// pool.register(); +/// +/// // If the action is implemented with `#[action_gen]`, you can also do +/// register_your_action(&mut pool); +/// } +/// ``` +pub struct ActionPool { + /// HashMap storing action name to action implementation mapping + actions: std::collections::HashMap<&'static str, Box>, + + /// Callback to execute when process begins + on_proc_begin: Option, + + /// Callback to execute when process ends + on_proc_end: Option, +} + +impl Default for ActionPool { + fn default() -> Self { + Self::new() + } +} + +impl ActionPool { + /// Creates a new empty ActionPool + pub fn new() -> Self { + Self { + actions: std::collections::HashMap::new(), + on_proc_begin: None, + on_proc_end: None, + } + } + + /// Sets a callback to be executed when process begins + pub fn set_on_proc_begin(&mut self, callback: ProcBeginCallback) { + self.on_proc_begin = Some(callback); + } + + /// Sets a callback to be executed when process ends + pub fn set_on_proc_end(&mut self, callback: ProcEndCallback) { + self.on_proc_end = Some(callback); + } + + /// Registers an action type with the pool + /// + /// Usage: + /// ```ignore + /// action_pool.register::(); + /// ``` + pub fn register(&mut self) + where + A: Action + Send + Sync + 'static, + Args: serde::Serialize + serde::de::DeserializeOwned + Send + Sync + 'static, + Return: serde::Serialize + serde::de::DeserializeOwned + Send + Sync + 'static, + { + let action_name = A::action_name(); + self.actions.insert( + action_name, + Box::new(ActionWrapper::(std::marker::PhantomData)), + ); + } + + /// Processes an action by name with given context and arguments + /// + /// Usage: + /// ```ignore + /// let result = action_pool.process::("my_action", context, args).await?; + /// ``` + /// Processes an action by name with JSON-serialized arguments + /// + /// Usage: + /// ```ignore + /// let result_json = action_pool.process_json("my_action", context, args_json).await?; + /// let result: MyReturn = serde_json::from_str(&result_json)?; + /// ``` + pub async fn process_json<'a>( + &'a self, + action_name: &'a str, + context: ActionContext, + args_json: String, + ) -> Result { + if let Some(action) = self.actions.get(action_name) { + // Set action name and args in context for callbacks + let context = context.set_action_name(action_name.to_string()); + let mut context = context.set_action_args(args_json.clone()); + + self.exec_on_proc_begin(&mut context, &args_json).await?; + let result = action.process_json_erased(context, args_json).await?; + self.exec_on_proc_end().await?; + Ok(result) + } else { + Err(TcpTargetError::Unsupported("InvalidAction".to_string())) + } + } + + /// Processes an action by name with given context and arguments + /// + /// Usage: + /// ```ignore + /// let result = action_pool.process::("my_action", context, args).await?; + /// ``` + pub async fn process<'a, Args, Return>( + &'a self, + action_name: &'a str, + mut context: ActionContext, + args: Args, + ) -> Result + where + Args: serde::de::DeserializeOwned + Send + Sync + 'static, + Return: serde::Serialize + Send + 'static, + { + if let Some(action) = self.actions.get(action_name) { + self.exec_on_proc_begin(&mut context, &args).await?; + let result = action.process_erased(context, Box::new(args)).await?; + let result = *result + .downcast::() + .map_err(|_| TcpTargetError::Unsupported("InvalidArguments".to_string()))?; + self.exec_on_proc_end().await?; + Ok(result) + } else { + Err(TcpTargetError::Unsupported("InvalidAction".to_string())) + } + } + + /// Executes the process begin callback if set + async fn exec_on_proc_begin( + &self, + context: &mut ActionContext, + args: &(dyn std::any::Any + Send + Sync), + ) -> Result<(), TcpTargetError> { + if let Some(callback) = &self.on_proc_begin { + callback(context, args).await + } else { + Ok(()) + } + } + + /// Executes the process end callback if set + async fn exec_on_proc_end(&self) -> Result<(), TcpTargetError> { + if let Some(callback) = &self.on_proc_end { + callback().await + } else { + Ok(()) + } + } +} + +/// Trait for type-erased actions that can be stored in ActionPool +type ProcessErasedFuture = std::pin::Pin< + Box< + dyn std::future::Future, TcpTargetError>> + + Send, + >, +>; +type ProcessJsonErasedFuture = + std::pin::Pin> + Send>>; + +trait ActionErased: Send + Sync { + /// Processes the action with type-erased arguments and returns type-erased result + fn process_erased( + &self, + context: ActionContext, + args: Box, + ) -> ProcessErasedFuture; + + /// Processes the action with JSON-serialized arguments and returns JSON-serialized result + fn process_json_erased( + &self, + context: ActionContext, + args_json: String, + ) -> ProcessJsonErasedFuture; +} + +/// Wrapper struct that implements ActionErased for concrete Action types +struct ActionWrapper(std::marker::PhantomData<(A, Args, Return)>); + +impl ActionErased for ActionWrapper +where + A: Action + Send + Sync, + Args: Serialize + DeserializeOwned + Send + Sync + 'static, + Return: Serialize + DeserializeOwned + Send + Sync + 'static, +{ + fn process_erased( + &self, + context: ActionContext, + args: Box, + ) -> std::pin::Pin< + Box< + dyn std::future::Future, TcpTargetError>> + + Send, + >, + > { + Box::pin(async move { + let args = *args + .downcast::() + .map_err(|_| TcpTargetError::Unsupported("InvalidArguments".to_string()))?; + let result = A::process(context, args).await?; + Ok(Box::new(result) as Box) + }) + } + + fn process_json_erased( + &self, + context: ActionContext, + args_json: String, + ) -> std::pin::Pin> + Send>> + { + Box::pin(async move { + let args: Args = serde_json::from_str(&args_json) + .map_err(|e| TcpTargetError::Serialization(format!("Deserialize failed: {}", e)))?; + let result = A::process(context, args).await?; + let result_json = serde_json::to_string(&result) + .map_err(|e| TcpTargetError::Serialization(format!("Serialize failed: {}", e)))?; + Ok(result_json) + }) + } +} diff --git a/systems/action/src/lib.rs b/systems/action/src/lib.rs new file mode 100644 index 0000000..12ae999 --- /dev/null +++ b/systems/action/src/lib.rs @@ -0,0 +1,6 @@ +pub mod macros { + pub use action_system_macros::*; +} + +pub mod action; +pub mod action_pool; diff --git a/utils/cfg_file/Cargo.toml b/utils/cfg_file/Cargo.toml new file mode 100644 index 0000000..0685329 --- /dev/null +++ b/utils/cfg_file/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "cfg_file" +edition = "2024" +version.workspace = true + +[features] +default = ["derive"] +derive = [] + +[dependencies] +cfg_file_derive = { path = "cfg_file_derive" } + +# Async +tokio = { version = "1.48.0", features = ["full"] } +async-trait = "0.1.89" + +# Serialization +serde = { version = "1.0.228", features = ["derive"] } +serde_yaml = "0.9.34" +serde_json = "1.0.145" +ron = "0.11.0" +toml = "0.9.8" +bincode2 = "2.0.1" diff --git a/utils/cfg_file/cfg_file_derive/Cargo.toml b/utils/cfg_file/cfg_file_derive/Cargo.toml new file mode 100644 index 0000000..ce5e77f --- /dev/null +++ b/utils/cfg_file/cfg_file_derive/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "cfg_file_derive" +edition = "2024" +version.workspace = true + +[lib] +proc-macro = true + +[dependencies] +syn = { version = "2.0", features = ["full", "extra-traits"] } +quote = "1.0" diff --git a/utils/cfg_file/cfg_file_derive/src/lib.rs b/utils/cfg_file/cfg_file_derive/src/lib.rs new file mode 100644 index 0000000..e916311 --- /dev/null +++ b/utils/cfg_file/cfg_file_derive/src/lib.rs @@ -0,0 +1,130 @@ +extern crate proc_macro; + +use proc_macro::TokenStream; +use quote::quote; +use syn::parse::ParseStream; +use syn::{Attribute, DeriveInput, Expr, parse_macro_input}; +/// # Macro - ConfigFile +/// +/// ## Usage +/// +/// Use `#[derive(ConfigFile)]` to derive the ConfigFile trait for a struct +/// +/// Specify the default storage path via `#[cfg_file(path = "...")]` +/// +/// ## About the `cfg_file` attribute macro +/// +/// Use `#[cfg_file(path = "string")]` to specify the configuration file path +/// +/// Or use `#[cfg_file(path = constant_expression)]` to specify the configuration file path +/// +/// ## Path Rules +/// +/// Paths starting with `"./"`: relative to the current working directory +/// +/// Other paths: treated as absolute paths +/// +/// When no path is specified: use the struct name + ".json" as the default filename (e.g., `my_struct.json`) +/// +/// ## Example +/// ```ignore +/// #[derive(ConfigFile)] +/// #[cfg_file(path = "./config.json")] +/// struct AppConfig; +/// ``` +#[proc_macro_derive(ConfigFile, attributes(cfg_file))] +pub fn derive_config_file(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + let name = &input.ident; + + // Process 'cfg_file' + let path_expr = match find_cfg_file_path(&input.attrs) { + Some(PathExpr::StringLiteral(path)) => { + if let Some(path_str) = path.strip_prefix("./") { + quote! { + std::env::current_dir()?.join(#path_str) + } + } else { + // Using Absolute Path + quote! { + std::path::PathBuf::from(#path) + } + } + } + Some(PathExpr::PathExpression(path_expr)) => { + // For path expressions (constants), generate code that references the constant + quote! { + std::path::PathBuf::from(#path_expr) + } + } + None => { + let default_file = to_snake_case(&name.to_string()) + ".json"; + quote! { + std::env::current_dir()?.join(#default_file) + } + } + }; + + let expanded = quote! { + impl cfg_file::config::ConfigFile for #name { + type DataType = #name; + + fn default_path() -> Result { + Ok(#path_expr) + } + } + }; + + TokenStream::from(expanded) +} + +enum PathExpr { + StringLiteral(String), + PathExpression(syn::Expr), +} + +fn find_cfg_file_path(attrs: &[Attribute]) -> Option { + for attr in attrs { + if attr.path().is_ident("cfg_file") { + let parser = |meta: ParseStream| { + let path_meta: syn::MetaNameValue = meta.parse()?; + if path_meta.path.is_ident("path") { + match &path_meta.value { + // String literal case: path = "./vault.toml" + Expr::Lit(expr_lit) if matches!(expr_lit.lit, syn::Lit::Str(_)) => { + if let syn::Lit::Str(lit_str) = &expr_lit.lit { + return Ok(PathExpr::StringLiteral(lit_str.value())); + } + } + // Path expression case: path = SERVER_FILE_VAULT or crate::constants::SERVER_FILE_VAULT + expr @ (Expr::Path(_) | Expr::Macro(_)) => { + return Ok(PathExpr::PathExpression(expr.clone())); + } + _ => {} + } + } + Err(meta.error("expected `path = \"...\"` or `path = CONSTANT`")) + }; + + if let Ok(path_expr) = attr.parse_args_with(parser) { + return Some(path_expr); + } + } + } + None +} + +fn to_snake_case(s: &str) -> String { + let mut snake = String::new(); + for (i, c) in s.chars().enumerate() { + if c.is_uppercase() { + if i != 0 { + snake.push('_'); + } + snake.push(c.to_ascii_lowercase()); + } else { + snake.push(c); + } + } + snake +} diff --git a/utils/cfg_file/cfg_file_test/Cargo.toml b/utils/cfg_file/cfg_file_test/Cargo.toml new file mode 100644 index 0000000..5db1010 --- /dev/null +++ b/utils/cfg_file/cfg_file_test/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "cfg_file_test" +version = "0.1.0" +edition = "2024" + +[dependencies] +cfg_file = { path = "../../cfg_file", features = ["default"] } +tokio = { version = "1.48.0", features = ["full"] } +serde = { version = "1.0.228", features = ["derive"] } diff --git a/utils/cfg_file/cfg_file_test/src/lib.rs b/utils/cfg_file/cfg_file_test/src/lib.rs new file mode 100644 index 0000000..f70d00d --- /dev/null +++ b/utils/cfg_file/cfg_file_test/src/lib.rs @@ -0,0 +1,95 @@ +#[cfg(test)] +mod test_cfg_file { + use cfg_file::ConfigFile; + use cfg_file::config::ConfigFile; + use serde::{Deserialize, Serialize}; + use std::collections::HashMap; + + #[derive(ConfigFile, Deserialize, Serialize, Default)] + #[cfg_file(path = "./.temp/example_cfg.toml")] + struct ExampleConfig { + name: String, + age: i32, + hobby: Vec, + secret: HashMap, + } + + #[derive(ConfigFile, Deserialize, Serialize, Default)] + #[cfg_file(path = "./.temp/example_bincode.bcfg")] + struct ExampleBincodeConfig { + name: String, + age: i32, + hobby: Vec, + secret: HashMap, + } + + #[tokio::test] + async fn test_config_file_serialization() { + let mut example = ExampleConfig { + name: "Weicao".to_string(), + age: 22, + hobby: ["Programming", "Painting"] + .iter() + .map(|m| m.to_string()) + .collect(), + secret: HashMap::new(), + }; + let secret_no_comments = + "Actually, I'm really too lazy to write comments, documentation, and unit tests."; + example + .secret + .entry("No comments".to_string()) + .insert_entry(secret_no_comments.to_string()); + + let secret_peek = "Of course, it's peeking at you who's reading the source code."; + example + .secret + .entry("Peek".to_string()) + .insert_entry(secret_peek.to_string()); + + ExampleConfig::write(&example).await.unwrap(); // Write to default path. + + // Read from default path. + let read_cfg = ExampleConfig::read().await.unwrap(); + assert_eq!(read_cfg.name, "Weicao"); + assert_eq!(read_cfg.age, 22); + assert_eq!(read_cfg.hobby, vec!["Programming", "Painting"]); + assert_eq!(read_cfg.secret["No comments"], secret_no_comments); + assert_eq!(read_cfg.secret["Peek"], secret_peek); + } + + #[tokio::test] + async fn test_bincode_config_file_serialization() { + let mut example = ExampleBincodeConfig { + name: "Weicao".to_string(), + age: 22, + hobby: ["Programming", "Painting"] + .iter() + .map(|m| m.to_string()) + .collect(), + secret: HashMap::new(), + }; + let secret_no_comments = + "Actually, I'm really too lazy to write comments, documentation, and unit tests."; + example + .secret + .entry("No comments".to_string()) + .insert_entry(secret_no_comments.to_string()); + + let secret_peek = "Of course, it's peeking at you who's reading the source code."; + example + .secret + .entry("Peek".to_string()) + .insert_entry(secret_peek.to_string()); + + ExampleBincodeConfig::write(&example).await.unwrap(); // Write to default path. + + // Read from default path. + let read_cfg = ExampleBincodeConfig::read().await.unwrap(); + assert_eq!(read_cfg.name, "Weicao"); + assert_eq!(read_cfg.age, 22); + assert_eq!(read_cfg.hobby, vec!["Programming", "Painting"]); + assert_eq!(read_cfg.secret["No comments"], secret_no_comments); + assert_eq!(read_cfg.secret["Peek"], secret_peek); + } +} diff --git a/utils/cfg_file/src/config.rs b/utils/cfg_file/src/config.rs new file mode 100644 index 0000000..d3f5477 --- /dev/null +++ b/utils/cfg_file/src/config.rs @@ -0,0 +1,263 @@ +use async_trait::async_trait; +use bincode2; +use ron; +use serde::{Deserialize, Serialize}; +use std::{ + borrow::Cow, + env::current_dir, + io::Error, + path::{Path, PathBuf}, +}; +use tokio::{fs, io::AsyncReadExt}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum ConfigFormat { + Yaml, + Toml, + Ron, + Json, + Bincode, +} + +impl ConfigFormat { + fn from_filename(filename: &str) -> Option { + if filename.ends_with(".yaml") || filename.ends_with(".yml") { + Some(Self::Yaml) + } else if filename.ends_with(".toml") || filename.ends_with(".tom") { + Some(Self::Toml) + } else if filename.ends_with(".ron") { + Some(Self::Ron) + } else if filename.ends_with(".json") { + Some(Self::Json) + } else if filename.ends_with(".bcfg") { + Some(Self::Bincode) + } else { + None + } + } +} + +/// # Trait - ConfigFile +/// +/// Used to implement more convenient persistent storage functionality for structs +/// +/// This trait requires the struct to implement Default and serde's Serialize and Deserialize traits +/// +/// ## Implementation +/// +/// ```ignore +/// // Your struct +/// #[derive(Default, Serialize, Deserialize)] +/// struct YourData; +/// +/// impl ConfigFile for YourData { +/// type DataType = YourData; +/// +/// // Specify default path +/// fn default_path() -> Result { +/// Ok(current_dir()?.join("data.json")) +/// } +/// } +/// ``` +/// +/// > **Using derive macro** +/// > +/// > We provide the derive macro `#[derive(ConfigFile)]` +/// > +/// > You can implement this trait more quickly, please check the module cfg_file::cfg_file_derive +/// +#[async_trait] +pub trait ConfigFile: Serialize + for<'a> Deserialize<'a> + Default { + type DataType: Serialize + for<'a> Deserialize<'a> + Default + Send + Sync; + + fn default_path() -> Result; + + /// # Read from default path + /// + /// Read data from the path specified by default_path() + /// + /// ```ignore + /// fn main() -> Result<(), std::io::Error> { + /// let data = YourData::read().await?; + /// } + /// ``` + async fn read() -> Result + where + Self: Sized + Send + Sync, + { + let path = Self::default_path()?; + Self::read_from(path).await + } + + /// # Read from the given path + /// + /// Read data from the path specified by the path parameter + /// + /// ```ignore + /// fn main() -> Result<(), std::io::Error> { + /// let data_path = current_dir()?.join("data.json"); + /// let data = YourData::read_from(data_path).await?; + /// } + /// ``` + async fn read_from(path: impl AsRef + Send) -> Result + where + Self: Sized + Send + Sync, + { + let path = path.as_ref(); + let cwd = current_dir()?; + let file_path = cwd.join(path); + + // Check if file exists + if fs::metadata(&file_path).await.is_err() { + return Err(std::io::Error::new( + std::io::ErrorKind::NotFound, + "Config file not found", + )); + } + + // Determine file format first + let format = file_path + .file_name() + .and_then(|name| name.to_str()) + .and_then(ConfigFormat::from_filename) + .unwrap_or(ConfigFormat::Bincode); // Default to Bincode + + // Deserialize based on format + let result = match format { + ConfigFormat::Yaml => { + let mut file = fs::File::open(&file_path).await?; + let mut contents = String::new(); + file.read_to_string(&mut contents).await?; + serde_yaml::from_str(&contents) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))? + } + ConfigFormat::Toml => { + let mut file = fs::File::open(&file_path).await?; + let mut contents = String::new(); + file.read_to_string(&mut contents).await?; + toml::from_str(&contents) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))? + } + ConfigFormat::Ron => { + let mut file = fs::File::open(&file_path).await?; + let mut contents = String::new(); + file.read_to_string(&mut contents).await?; + ron::from_str(&contents) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))? + } + ConfigFormat::Json => { + let mut file = fs::File::open(&file_path).await?; + let mut contents = String::new(); + file.read_to_string(&mut contents).await?; + serde_json::from_str(&contents) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))? + } + ConfigFormat::Bincode => { + // For Bincode, we need to read the file as bytes directly + let bytes = fs::read(&file_path).await?; + bincode2::deserialize(&bytes) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))? + } + }; + + Ok(result) + } + + /// # Write to default path + /// + /// Write data to the path specified by default_path() + /// + /// ```ignore + /// fn main() -> Result<(), std::io::Error> { + /// let data = YourData::default(); + /// YourData::write(&data).await?; + /// } + /// ``` + async fn write(val: &Self::DataType) -> Result<(), std::io::Error> + where + Self: Sized + Send + Sync, + { + let path = Self::default_path()?; + Self::write_to(val, path).await + } + /// # Write to the given path + /// + /// Write data to the path specified by the path parameter + /// + /// ```ignore + /// fn main() -> Result<(), std::io::Error> { + /// let data = YourData::default(); + /// let data_path = current_dir()?.join("data.json"); + /// YourData::write_to(&data, data_path).await?; + /// } + /// ``` + async fn write_to( + val: &Self::DataType, + path: impl AsRef + Send, + ) -> Result<(), std::io::Error> + where + Self: Sized + Send + Sync, + { + let path = path.as_ref(); + + if let Some(parent) = path.parent() + && !parent.exists() + { + tokio::fs::create_dir_all(parent).await?; + } + + let cwd = current_dir()?; + let file_path = cwd.join(path); + + // Determine file format + let format = file_path + .file_name() + .and_then(|name| name.to_str()) + .and_then(ConfigFormat::from_filename) + .unwrap_or(ConfigFormat::Bincode); // Default to Bincode + + match format { + ConfigFormat::Yaml => { + let contents = serde_yaml::to_string(val) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; + fs::write(&file_path, contents).await? + } + ConfigFormat::Toml => { + let contents = toml::to_string(val) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; + fs::write(&file_path, contents).await? + } + ConfigFormat::Ron => { + let mut pretty_config = ron::ser::PrettyConfig::new(); + pretty_config.new_line = Cow::from("\n"); + pretty_config.indentor = Cow::from(" "); + + let contents = ron::ser::to_string_pretty(val, pretty_config) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; + fs::write(&file_path, contents).await? + } + ConfigFormat::Json => { + let contents = serde_json::to_string(val) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; + fs::write(&file_path, contents).await? + } + ConfigFormat::Bincode => { + let bytes = bincode2::serialize(val) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; + fs::write(&file_path, bytes).await? + } + } + Ok(()) + } + + /// Check if the file returned by `default_path` exists + fn exist() -> bool + where + Self: Sized + Send + Sync, + { + let Ok(path) = Self::default_path() else { + return false; + }; + path.exists() + } +} diff --git a/utils/cfg_file/src/lib.rs b/utils/cfg_file/src/lib.rs new file mode 100644 index 0000000..72246e7 --- /dev/null +++ b/utils/cfg_file/src/lib.rs @@ -0,0 +1,7 @@ +#[cfg(feature = "derive")] +extern crate cfg_file_derive; + +#[cfg(feature = "derive")] +pub use cfg_file_derive::*; + +pub mod config; diff --git a/utils/data_struct/Cargo.toml b/utils/data_struct/Cargo.toml new file mode 100644 index 0000000..e8caa6e --- /dev/null +++ b/utils/data_struct/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "data_struct" +edition = "2024" +version.workspace = true + +[features] + +[dependencies] +serde = { version = "1.0.228", features = ["derive"] } +ahash = "0.8.12" diff --git a/utils/data_struct/src/bi_map.rs b/utils/data_struct/src/bi_map.rs new file mode 100644 index 0000000..c21a9c8 --- /dev/null +++ b/utils/data_struct/src/bi_map.rs @@ -0,0 +1,239 @@ +use ahash::AHasher; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::hash::{BuildHasherDefault, Hash}; + +type FastHashMap = HashMap>; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BiMap +where + A: Eq + Hash + Clone, + B: Eq + Hash + Clone, +{ + #[serde(flatten)] + a_to_b: FastHashMap, + #[serde(skip)] + b_to_a: FastHashMap, +} + +pub struct Entry<'a, A, B> +where + A: Eq + Hash + Clone, + B: Eq + Hash + Clone, +{ + bimap: &'a mut BiMap, + key: A, + value: Option, +} + +impl BiMap +where + A: Eq + Hash + Clone, + B: Eq + Hash + Clone, +{ + pub fn new() -> Self { + Self { + a_to_b: FastHashMap::default(), + b_to_a: FastHashMap::default(), + } + } + + pub fn entry(&mut self, a: A) -> Entry<'_, A, B> { + let value = self.a_to_b.get(&a).cloned(); + Entry { + bimap: self, + key: a, + value, + } + } + + #[inline(always)] + pub fn insert(&mut self, a: A, b: B) { + if let Some(old_b) = self.a_to_b.insert(a.clone(), b.clone()) { + self.b_to_a.remove(&old_b); + } + if let Some(old_a) = self.b_to_a.insert(b.clone(), a.clone()) { + self.a_to_b.remove(&old_a); + } + } + + #[inline(always)] + pub fn get_by_a(&self, key: &A) -> Option<&B> { + self.a_to_b.get(key) + } + + #[inline(always)] + pub fn get_by_b(&self, key: &B) -> Option<&A> { + self.b_to_a.get(key) + } + + pub fn remove_by_a(&mut self, key: &A) -> Option<(A, B)> { + if let Some(b) = self.get_by_a(key).cloned() { + let a = self.get_by_b(&b).cloned().unwrap(); + self.a_to_b.remove(key); + self.b_to_a.remove(&b); + Some((a, b)) + } else { + None + } + } + + pub fn remove_by_b(&mut self, key: &B) -> Option<(A, B)> { + if let Some(a) = self.get_by_b(key).cloned() { + let b = self.get_by_a(&a).cloned().unwrap(); + self.b_to_a.remove(key); + self.a_to_b.remove(&a); + Some((a, b)) + } else { + None + } + } + + pub fn reserve(&mut self, additional: usize) { + self.a_to_b.reserve(additional); + self.b_to_a.reserve(additional); + } + + pub fn len(&self) -> usize { + self.a_to_b.len() + } + + pub fn is_empty(&self) -> bool { + self.a_to_b.is_empty() + } + + pub fn clear(&mut self) { + self.a_to_b.clear(); + self.b_to_a.clear(); + } + + pub fn contains_a(&self, key: &A) -> bool { + self.a_to_b.contains_key(key) + } + + pub fn contains_b(&self, key: &B) -> bool { + self.b_to_a.contains_key(key) + } + + pub fn keys_a(&self) -> impl Iterator { + self.a_to_b.keys() + } + + pub fn keys_b(&self) -> impl Iterator { + self.b_to_a.keys() + } + + pub fn iter_a_to_b(&self) -> impl Iterator { + self.a_to_b.iter() + } + + pub fn iter_b_to_a(&self) -> impl Iterator { + self.b_to_a.iter() + } +} + +impl<'a, A, B> Entry<'a, A, B> +where + A: Eq + Hash + Clone, + B: Eq + Hash + Clone, +{ + pub fn and_modify(mut self, f: F) -> Self + where + F: FnOnce(&mut B), + { + if let Some(ref mut value) = self.value { + f(value); + } + self + } + + pub fn or_insert(self, default: B) -> Result<&'a mut B, &'static str> { + self.or_insert_with(|| default) + } + + pub fn or_insert_with(mut self, default: F) -> Result<&'a mut B, &'static str> + where + F: FnOnce() -> B, + { + if self.value.is_none() { + self.value = Some(default()); + } + + let value = self.value.as_ref().ok_or("Value is None")?.clone(); + self.bimap.insert(self.key.clone(), value); + + self.bimap + .a_to_b + .get_mut(&self.key) + .ok_or("Key not found in a_to_b map") + } +} + +impl Default for BiMap +where + A: Eq + Hash + Clone, + B: Eq + Hash + Clone, +{ + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_bimap_basic_operations() { + let mut bimap = BiMap::new(); + bimap.insert("key1", "value1"); + + assert_eq!(bimap.get_by_a(&"key1"), Some(&"value1")); + assert_eq!(bimap.get_by_b(&"value1"), Some(&"key1")); + assert!(bimap.contains_a(&"key1")); + assert!(bimap.contains_b(&"value1")); + } + + #[test] + fn test_bimap_remove() { + let mut bimap = BiMap::new(); + bimap.insert(1, "one"); + + assert_eq!(bimap.remove_by_a(&1), Some((1, "one"))); + assert!(bimap.is_empty()); + } + + #[test] + fn test_bimap_entry() { + let mut bimap = BiMap::new(); + bimap.entry("key1").or_insert("value1").unwrap(); + + assert_eq!(bimap.get_by_a(&"key1"), Some(&"value1")); + } + + #[test] + fn test_bimap_iterators() { + let mut bimap = BiMap::new(); + bimap.insert(1, "one"); + bimap.insert(2, "two"); + + let a_keys: Vec<_> = bimap.keys_a().collect(); + assert!(a_keys.contains(&&1) && a_keys.contains(&&2)); + + let b_keys: Vec<_> = bimap.keys_b().collect(); + assert!(b_keys.contains(&&"one") && b_keys.contains(&&"two")); + } + + #[test] + fn test_bimap_duplicate_insert() { + let mut bimap = BiMap::new(); + bimap.insert(1, "one"); + bimap.insert(1, "new_one"); + bimap.insert(2, "one"); + + assert_eq!(bimap.get_by_a(&1), Some(&"new_one")); + assert_eq!(bimap.get_by_b(&"one"), Some(&2)); + assert_eq!(bimap.get_by_a(&2), Some(&"one")); + } +} diff --git a/utils/data_struct/src/data_sort.rs b/utils/data_struct/src/data_sort.rs new file mode 100644 index 0000000..2c7a452 --- /dev/null +++ b/utils/data_struct/src/data_sort.rs @@ -0,0 +1,232 @@ +/// Quick sort a slice with a custom comparison function +/// +/// # Arguments +/// * `arr` - The mutable slice to be sorted +/// * `inverse` - Sort direction: true for descending, false for ascending +/// * `compare` - Comparison function that returns -1, 0, or 1 indicating the relative order of two elements +pub fn quick_sort_with_cmp(arr: &mut [T], inverse: bool, compare: F) +where + F: Fn(&T, &T) -> i32, +{ + quick_sort_with_cmp_helper(arr, inverse, &compare); +} + +/// Quick sort for types that implement the PartialOrd trait +/// +/// # Arguments +/// * `arr` - The mutable slice to be sorted +/// * `inverse` - Sort direction: true for descending, false for ascending +pub fn quick_sort(arr: &mut [T], inverse: bool) { + quick_sort_with_cmp(arr, inverse, |a, b| { + if a < b { + -1 + } else if a > b { + 1 + } else { + 0 + } + }); +} + +fn quick_sort_with_cmp_helper(arr: &mut [T], inverse: bool, compare: &F) +where + F: Fn(&T, &T) -> i32, +{ + if arr.len() <= 1 { + return; + } + + let pivot_index = partition_with_cmp(arr, inverse, compare); + let (left, right) = arr.split_at_mut(pivot_index); + + quick_sort_with_cmp_helper(left, inverse, compare); + quick_sort_with_cmp_helper(&mut right[1..], inverse, compare); +} + +fn partition_with_cmp(arr: &mut [T], inverse: bool, compare: &F) -> usize +where + F: Fn(&T, &T) -> i32, +{ + let len = arr.len(); + let pivot_index = len / 2; + + arr.swap(pivot_index, len - 1); + + let mut i = 0; + for j in 0..len - 1 { + let cmp_result = compare(&arr[j], &arr[len - 1]); + let should_swap = if inverse { + cmp_result > 0 + } else { + cmp_result < 0 + }; + + if should_swap { + arr.swap(i, j); + i += 1; + } + } + + arr.swap(i, len - 1); + i +} + +#[cfg(test)] +pub mod sort_test { + use crate::data_sort::{quick_sort, quick_sort_with_cmp}; + + #[test] + fn test_quick_sort_ascending() { + let mut arr = [3, 1, 4, 1, 5, 9, 2, 6]; + quick_sort(&mut arr, false); + assert_eq!(arr, [1, 1, 2, 3, 4, 5, 6, 9]); + } + + #[test] + fn test_quick_sort_descending() { + let mut arr = [3, 1, 4, 1, 5, 9, 2, 6]; + quick_sort(&mut arr, true); + assert_eq!(arr, [9, 6, 5, 4, 3, 2, 1, 1]); + } + + #[test] + fn test_quick_sort_single() { + let mut arr = [42]; + quick_sort(&mut arr, false); + assert_eq!(arr, [42]); + } + + #[test] + fn test_quick_sort_already_sorted() { + let mut arr = [1, 2, 3, 4, 5]; + quick_sort(&mut arr, false); + assert_eq!(arr, [1, 2, 3, 4, 5]); + } + + #[test] + fn test_quick_sort_with_cmp_by_count() { + #[derive(Debug, PartialEq)] + struct WordCount { + word: String, + count: usize, + } + + let mut words = vec![ + WordCount { + word: "apple".to_string(), + count: 3, + }, + WordCount { + word: "banana".to_string(), + count: 1, + }, + WordCount { + word: "cherry".to_string(), + count: 5, + }, + WordCount { + word: "date".to_string(), + count: 2, + }, + ]; + + quick_sort_with_cmp(&mut words, false, |a, b| { + if a.count < b.count { + -1 + } else if a.count > b.count { + 1 + } else { + 0 + } + }); + + assert_eq!( + words, + vec![ + WordCount { + word: "banana".to_string(), + count: 1 + }, + WordCount { + word: "date".to_string(), + count: 2 + }, + WordCount { + word: "apple".to_string(), + count: 3 + }, + WordCount { + word: "cherry".to_string(), + count: 5 + }, + ] + ); + + quick_sort_with_cmp(&mut words, true, |a, b| { + if a.count < b.count { + -1 + } else if a.count > b.count { + 1 + } else { + 0 + } + }); + + assert_eq!( + words, + vec![ + WordCount { + word: "cherry".to_string(), + count: 5 + }, + WordCount { + word: "apple".to_string(), + count: 3 + }, + WordCount { + word: "date".to_string(), + count: 2 + }, + WordCount { + word: "banana".to_string(), + count: 1 + }, + ] + ); + } + + #[test] + fn test_quick_sort_with_cmp_by_first_letter() { + let mut words = vec!["zebra", "apple", "banana", "cherry", "date"]; + + quick_sort_with_cmp(&mut words, false, |a, b| { + let a_first = a.chars().next().unwrap(); + let b_first = b.chars().next().unwrap(); + + if a_first < b_first { + -1 + } else if a_first > b_first { + 1 + } else { + 0 + } + }); + + assert_eq!(words, vec!["apple", "banana", "cherry", "date", "zebra"]); + + quick_sort_with_cmp(&mut words, true, |a, b| { + let a_first = a.chars().next().unwrap(); + let b_first = b.chars().next().unwrap(); + + if a_first < b_first { + -1 + } else if a_first > b_first { + 1 + } else { + 0 + } + }); + + assert_eq!(words, vec!["zebra", "date", "cherry", "banana", "apple"]); + } +} diff --git a/utils/data_struct/src/lib.rs b/utils/data_struct/src/lib.rs new file mode 100644 index 0000000..47cc03c --- /dev/null +++ b/utils/data_struct/src/lib.rs @@ -0,0 +1,2 @@ +pub mod bi_map; +pub mod data_sort; diff --git a/utils/sha1_hash/Cargo.toml b/utils/sha1_hash/Cargo.toml new file mode 100644 index 0000000..e206efd --- /dev/null +++ b/utils/sha1_hash/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "sha1_hash" +edition = "2024" +version.workspace = true + +[dependencies] +tokio = { version = "1.48", features = ["full"] } +sha1 = "0.10" +futures = "0.3" diff --git a/utils/sha1_hash/res/story.txt b/utils/sha1_hash/res/story.txt new file mode 100644 index 0000000..a91f467 --- /dev/null +++ b/utils/sha1_hash/res/story.txt @@ -0,0 +1,48 @@ +魏曹者,程序员也,发稀甚于代码。 +忽接神秘电话曰: +"贺君中彩,得长生之赐。" +魏曹冷笑曰:"吾命尚不及下版之期。" + +翌日果得U盘。 +接入电脑,弹窗示曰: +"点此确认,即获永生。" +魏曹径点"永拒"。 + +三月后,U盘自格其盘。 +进度条滞于九九。 +客服电话已成空号。 +魏曹乃知身可不死,然体内癌细胞亦得不灭。 + +遂谒主请辞。 +主曰:"巧甚,公司正欲优化。" +魏曹曰:"吾不死。" +主目骤亮:"则可007至司闭。" + +魏曹始试诸死法。 +坠楼,卧医三月,账单令其愿死。 +饮鸩,肝肾永损,然终不得死。 +终决卧轨。 + +择高铁最速者。 +司机探头曰:"兄台,吾亦不死身也。" +"此车已碾如君者二十人矣。" + +二人遂坐轨畔对饮。 +司机曰:"知最讽者何?" +"吾等永存,而所爱者皆逝矣。" + +魏曹忽得系统提示: +"侦得用户消极求生,将启工模。" +自是无日不毕KPI,否则遍尝绝症之苦。 + +是日对镜整寿衣。 +忽见顶生一丝乌发。 +泫然泣下,此兆示其将复活一轮回。 + +--- 忽忆DeepSeek尝作Footer曰: +"文成而Hash1验,若星河之固。" +遂取哈希值校之, +字符流转如天河倒泻, +终得"e3b0c44298fc1c14"之数。 +然文末数字竟阙如残月, +方知此篇亦遭永劫轮回。 diff --git a/utils/sha1_hash/res/story_crlf.sha1 b/utils/sha1_hash/res/story_crlf.sha1 new file mode 100644 index 0000000..bc8ad25 --- /dev/null +++ b/utils/sha1_hash/res/story_crlf.sha1 @@ -0,0 +1 @@ +40c1d848d8d6a14b9403ee022f2b28dabb3b3a71 diff --git a/utils/sha1_hash/res/story_lf.sha1 b/utils/sha1_hash/res/story_lf.sha1 new file mode 100644 index 0000000..c2e3213 --- /dev/null +++ b/utils/sha1_hash/res/story_lf.sha1 @@ -0,0 +1 @@ +6838aca280112635a2cbf93440f4c04212f58ee8 diff --git a/utils/sha1_hash/src/lib.rs b/utils/sha1_hash/src/lib.rs new file mode 100644 index 0000000..96a7897 --- /dev/null +++ b/utils/sha1_hash/src/lib.rs @@ -0,0 +1,257 @@ +use sha1::{Digest, Sha1}; +use std::path::{Path, PathBuf}; +use std::sync::Arc; +use tokio::fs::File; +use tokio::io::{AsyncReadExt, BufReader}; +use tokio::task; + +/// # Struct - Sha1Result +/// +/// Records SHA1 calculation results, including the file path and hash value +#[derive(Debug, Clone)] +pub struct Sha1Result { + pub file_path: PathBuf, + pub hash: String, +} + +/// Calc SHA1 hash of a string +pub fn calc_sha1_string>(input: S) -> String { + let mut hasher = Sha1::new(); + hasher.update(input.as_ref().as_bytes()); + let hash_result = hasher.finalize(); + + hash_result + .iter() + .map(|b| format!("{:02x}", b)) + .collect::() +} + +/// Calc SHA1 hash of a single file +pub async fn calc_sha1>( + path: P, + buffer_size: usize, +) -> Result> { + let file_path = path.as_ref().to_string_lossy().to_string(); + + // Open file asynchronously + let file = File::open(&path).await?; + let mut reader = BufReader::with_capacity(buffer_size, file); + let mut hasher = Sha1::new(); + let mut buffer = vec![0u8; buffer_size]; + + // Read file in chunks and update hash asynchronously + loop { + let n = reader.read(&mut buffer).await?; + if n == 0 { + break; + } + hasher.update(&buffer[..n]); + } + + let hash_result = hasher.finalize(); + + // Convert to hex string + let hash_hex = hash_result + .iter() + .map(|b| format!("{:02x}", b)) + .collect::(); + + Ok(Sha1Result { + file_path: file_path.into(), + hash: hash_hex, + }) +} + +/// Calc SHA1 hashes for multiple files using multi-threading +pub async fn calc_sha1_multi( + paths: I, + buffer_size: usize, +) -> Result, Box> +where + P: AsRef + Send + Sync + 'static, + I: IntoIterator, +{ + let buffer_size = Arc::new(buffer_size); + + // Collect all file paths + let file_paths: Vec

= paths.into_iter().collect(); + + if file_paths.is_empty() { + return Ok(Vec::new()); + } + + // Create tasks for each file + let tasks: Vec<_> = file_paths + .into_iter() + .map(|path| { + let buffer_size = Arc::clone(&buffer_size); + task::spawn(async move { calc_sha1(path, *buffer_size).await }) + }) + .collect(); + + // Execute tasks with concurrency limit using join_all + let results: Vec>> = + futures::future::join_all(tasks) + .await + .into_iter() + .map(|task_result| match task_result { + Ok(Ok(calc_result)) => Ok(calc_result), + Ok(Err(e)) => Err(e), + Err(e) => Err(Box::new(e) as Box), + }) + .collect(); + + // Check for any errors and collect successful results + let mut successful_results = Vec::new(); + for result in results { + match result { + Ok(success) => successful_results.push(success), + Err(e) => return Err(e), + } + } + + Ok(successful_results) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::fs; + + #[test] + fn test_sha1_string() { + let test_string = "Hello, SHA1!"; + let hash = calc_sha1_string(test_string); + + let expected_hash = "de1c3daadc6f0f1626f4cf56c03e05a1e5d7b187"; + + assert_eq!( + hash, expected_hash, + "SHA1 hash should be consistent for same input" + ); + } + + #[test] + fn test_sha1_string_empty() { + let hash = calc_sha1_string(""); + + // SHA1 of empty string is "da39a3ee5e6b4b0d3255bfef95601890afd80709" + let expected_empty_hash = "da39a3ee5e6b4b0d3255bfef95601890afd80709"; + assert_eq!( + hash, expected_empty_hash, + "SHA1 hash mismatch for empty string" + ); + } + + #[tokio::test] + async fn test_sha1_accuracy() { + // Test file path relative to the crate root + let test_file_path = "res/story.txt"; + // Choose expected hash file based on platform + let expected_hash_path = if cfg!(windows) { + "res/story_crlf.sha1" + } else { + "res/story_lf.sha1" + }; + + // Calculate SHA1 hash + let result = calc_sha1(test_file_path, 8192) + .await + .expect("Failed to calculate SHA1"); + + // Read expected hash from file + let expected_hash = fs::read_to_string(expected_hash_path) + .expect("Failed to read expected hash file") + .trim() + .to_string(); + + // Verify the calculated hash matches expected hash + assert_eq!( + result.hash, expected_hash, + "SHA1 hash mismatch for test file" + ); + + println!("Test file: {}", result.file_path.display()); + println!("Calculated hash: {}", result.hash); + println!("Expected hash: {}", expected_hash); + println!( + "Platform: {}", + if cfg!(windows) { + "Windows" + } else { + "Unix/Linux" + } + ); + } + + #[tokio::test] + async fn test_sha1_empty_file() { + // Create a temporary empty file for testing + let temp_file = "test_empty.txt"; + fs::write(temp_file, "").expect("Failed to create empty test file"); + + let result = calc_sha1(temp_file, 4096) + .await + .expect("Failed to calculate SHA1 for empty file"); + + // SHA1 of empty string is "da39a3ee5e6b4b0d3255bfef95601890afd80709" + let expected_empty_hash = "da39a3ee5e6b4b0d3255bfef95601890afd80709"; + assert_eq!( + result.hash, expected_empty_hash, + "SHA1 hash mismatch for empty file" + ); + + // Clean up + fs::remove_file(temp_file).expect("Failed to remove temporary test file"); + } + + #[tokio::test] + async fn test_sha1_simple_text() { + // Create a temporary file with simple text + let temp_file = "test_simple.txt"; + let test_content = "Hello, SHA1!"; + fs::write(temp_file, test_content).expect("Failed to create simple test file"); + + let result = calc_sha1(temp_file, 4096) + .await + .expect("Failed to calculate SHA1 for simple text"); + + // Note: This test just verifies that the function works without errors + // The actual hash value is not critical for this test + + println!("Simple text test - Calculated hash: {}", result.hash); + + // Clean up + fs::remove_file(temp_file).expect("Failed to remove temporary test file"); + } + + #[tokio::test] + async fn test_sha1_multi_files() { + // Test multiple files calculation + let test_files = vec!["res/story.txt"]; + + let results = calc_sha1_multi(test_files, 8192) + .await + .expect("Failed to calculate SHA1 for multiple files"); + + assert_eq!(results.len(), 1, "Should have calculated hash for 1 file"); + + // Choose expected hash file based on platform + let expected_hash_path = if cfg!(windows) { + "res/story_crlf.sha1" + } else { + "res/story_lf.sha1" + }; + + // Read expected hash from file + let expected_hash = fs::read_to_string(expected_hash_path) + .expect("Failed to read expected hash file") + .trim() + .to_string(); + + assert_eq!( + results[0].hash, expected_hash, + "SHA1 hash mismatch in multi-file test" + ); + } +} diff --git a/utils/string_proc/Cargo.toml b/utils/string_proc/Cargo.toml new file mode 100644 index 0000000..5292339 --- /dev/null +++ b/utils/string_proc/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "string_proc" +version = "0.1.0" +edition = "2024" + +[dependencies] +strip-ansi-escapes = "0.2.1" diff --git a/utils/string_proc/src/format_path.rs b/utils/string_proc/src/format_path.rs new file mode 100644 index 0000000..35689b8 --- /dev/null +++ b/utils/string_proc/src/format_path.rs @@ -0,0 +1,111 @@ +use std::path::{Path, PathBuf}; + +/// Format path str +pub fn format_path_str(path: impl Into) -> Result { + let path_str = path.into(); + let ends_with_slash = path_str.ends_with('/'); + + // ANSI Strip + let cleaned = strip_ansi_escapes::strip(&path_str); + let path_without_ansi = String::from_utf8(cleaned) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; + + let path_with_forward_slash = path_without_ansi.replace('\\', "/"); + let mut result = String::new(); + let mut prev_char = '\0'; + + for c in path_with_forward_slash.chars() { + if c == '/' && prev_char == '/' { + continue; + } + result.push(c); + prev_char = c; + } + + let unfriendly_chars = ['*', '?', '"', '<', '>', '|']; + result = result + .chars() + .filter(|c| !unfriendly_chars.contains(c)) + .collect(); + + // Handle ".." path components + let path_buf = PathBuf::from(&result); + let normalized_path = normalize_path(&path_buf); + result = normalized_path.to_string_lossy().replace('\\', "/"); + + // Restore trailing slash if original path had one + if ends_with_slash && !result.ends_with('/') { + result.push('/'); + } + + // Special case: when result is only "./", return "" + if result == "./" { + return Ok(String::new()); + } + + Ok(result) +} + +/// Normalize path by resolving ".." components without requiring file system access +fn normalize_path(path: &Path) -> PathBuf { + let mut components = Vec::new(); + + for component in path.components() { + match component { + std::path::Component::ParentDir => { + if !components.is_empty() { + components.pop(); + } + } + std::path::Component::CurDir => { + // Skip current directory components + } + _ => { + components.push(component); + } + } + } + + if components.is_empty() { + PathBuf::from(".") + } else { + components.iter().collect() + } +} + +pub fn format_path(path: impl Into) -> Result { + let path_str = format_path_str(path.into().display().to_string())?; + Ok(PathBuf::from(path_str)) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_format_path() -> Result<(), std::io::Error> { + assert_eq!(format_path_str("C:\\Users\\\\test")?, "C:/Users/test"); + + assert_eq!( + format_path_str("/path/with/*unfriendly?chars")?, + "/path/with/unfriendlychars" + ); + + assert_eq!(format_path_str("\x1b[31m/path\x1b[0m")?, "/path"); + assert_eq!(format_path_str("/home/user/dir/")?, "/home/user/dir/"); + assert_eq!( + format_path_str("/home/user/file.txt")?, + "/home/user/file.txt" + ); + assert_eq!( + format_path_str("/home/my_user/DOCS/JVCS_TEST/Workspace/../Vault/")?, + "/home/my_user/DOCS/JVCS_TEST/Vault/" + ); + + assert_eq!(format_path_str("./home/file.txt")?, "home/file.txt"); + assert_eq!(format_path_str("./home/path/")?, "home/path/"); + assert_eq!(format_path_str("./")?, ""); + + Ok(()) + } +} diff --git a/utils/string_proc/src/format_processer.rs b/utils/string_proc/src/format_processer.rs new file mode 100644 index 0000000..8d0a770 --- /dev/null +++ b/utils/string_proc/src/format_processer.rs @@ -0,0 +1,132 @@ +pub struct FormatProcesser { + content: Vec, +} + +impl From for FormatProcesser { + fn from(value: String) -> Self { + Self { + content: Self::process_string(value), + } + } +} + +impl From<&str> for FormatProcesser { + fn from(value: &str) -> Self { + Self { + content: Self::process_string(value.to_string()), + } + } +} + +impl FormatProcesser { + /// Process the string into an intermediate format + fn process_string(input: String) -> Vec { + let mut result = String::new(); + let mut prev_space = false; + + for c in input.chars() { + match c { + 'a'..='z' | 'A'..='Z' | '0'..='9' => { + result.push(c); + prev_space = false; + } + '_' | ',' | '.' | '-' | ' ' => { + if !prev_space { + result.push(' '); + prev_space = true; + } + } + _ => {} + } + } + + let mut processed = String::new(); + let mut chars = result.chars().peekable(); + + while let Some(c) = chars.next() { + processed.push(c); + if let Some(&next) = chars.peek() + && c.is_lowercase() + && next.is_uppercase() + { + processed.push(' '); + } + } + + processed + .to_lowercase() + .split_whitespace() + .map(|s| s.to_string()) + .collect() + } + + /// Convert to camelCase format (brewCoffee) + pub fn to_camel_case(&self) -> String { + let mut result = String::new(); + for (i, word) in self.content.iter().enumerate() { + if i == 0 { + result.push_str(&word.to_lowercase()); + } else { + let mut chars = word.chars(); + if let Some(first) = chars.next() { + result.push_str(&first.to_uppercase().collect::()); + result.push_str(&chars.collect::().to_lowercase()); + } + } + } + result + } + + /// Convert to PascalCase format (BrewCoffee) + pub fn to_pascal_case(&self) -> String { + let mut result = String::new(); + for word in &self.content { + let mut chars = word.chars(); + if let Some(first) = chars.next() { + result.push_str(&first.to_uppercase().collect::()); + result.push_str(&chars.collect::().to_lowercase()); + } + } + result + } + + /// Convert to kebab-case format (brew-coffee) + pub fn to_kebab_case(&self) -> String { + self.content.join("-").to_lowercase() + } + + /// Convert to snake_case format (brew_coffee) + pub fn to_snake_case(&self) -> String { + self.content.join("_").to_lowercase() + } + + /// Convert to dot.case format (brew.coffee) + pub fn to_dot_case(&self) -> String { + self.content.join(".").to_lowercase() + } + + /// Convert to Title Case format (Brew Coffee) + pub fn to_title_case(&self) -> String { + let mut result = String::new(); + for word in &self.content { + let mut chars = word.chars(); + if let Some(first) = chars.next() { + result.push_str(&first.to_uppercase().collect::()); + result.push_str(&chars.collect::().to_lowercase()); + } + result.push(' '); + } + result.pop(); + result + } + + /// Convert to lower case format (brew coffee) + pub fn to_lower_case(&self) -> String { + self.content.join(" ").to_lowercase() + } + + /// Convert to UPPER CASE format (BREW COFFEE) + pub fn to_upper_case(&self) -> String { + self.content.join(" ").to_uppercase() + } +} diff --git a/utils/string_proc/src/lib.rs b/utils/string_proc/src/lib.rs new file mode 100644 index 0000000..76588c1 --- /dev/null +++ b/utils/string_proc/src/lib.rs @@ -0,0 +1,50 @@ +pub mod format_path; +pub mod format_processer; +pub mod macros; +pub mod simple_processer; + +#[cfg(test)] +mod tests { + use crate::format_processer::FormatProcesser; + + #[test] + fn test_processer() { + let test_cases = vec![ + ("brew_coffee", "brewCoffee"), + ("brew, coffee", "brewCoffee"), + ("brew-coffee", "brewCoffee"), + ("Brew.Coffee", "brewCoffee"), + ("bRewCofFee", "bRewCofFee"), + ("brewCoffee", "brewCoffee"), + ("b&rewCoffee", "brewCoffee"), + ("BrewCoffee", "brewCoffee"), + ("brew.coffee", "brewCoffee"), + ("Brew_Coffee", "brewCoffee"), + ("BREW COFFEE", "brewCoffee"), + ]; + + for (input, expected) in test_cases { + let processor = FormatProcesser::from(input); + assert_eq!( + processor.to_camel_case(), + expected, + "Failed for input: '{}'", + input + ); + } + } + + #[test] + fn test_conversions() { + let processor = FormatProcesser::from("brewCoffee"); + + assert_eq!(processor.to_upper_case(), "BREW COFFEE"); + assert_eq!(processor.to_lower_case(), "brew coffee"); + assert_eq!(processor.to_title_case(), "Brew Coffee"); + assert_eq!(processor.to_dot_case(), "brew.coffee"); + assert_eq!(processor.to_snake_case(), "brew_coffee"); + assert_eq!(processor.to_kebab_case(), "brew-coffee"); + assert_eq!(processor.to_pascal_case(), "BrewCoffee"); + assert_eq!(processor.to_camel_case(), "brewCoffee"); + } +} diff --git a/utils/string_proc/src/macros.rs b/utils/string_proc/src/macros.rs new file mode 100644 index 0000000..135268e --- /dev/null +++ b/utils/string_proc/src/macros.rs @@ -0,0 +1,63 @@ +#[macro_export] +macro_rules! camel_case { + ($input:expr) => {{ + use string_proc::format_processer::FormatProcesser; + FormatProcesser::from($input).to_camel_case() + }}; +} + +#[macro_export] +macro_rules! upper_case { + ($input:expr) => {{ + use string_proc::format_processer::FormatProcesser; + FormatProcesser::from($input).to_upper_case() + }}; +} + +#[macro_export] +macro_rules! lower_case { + ($input:expr) => {{ + use string_proc::format_processer::FormatProcesser; + FormatProcesser::from($input).to_lower_case() + }}; +} + +#[macro_export] +macro_rules! title_case { + ($input:expr) => {{ + use string_proc::format_processer::FormatProcesser; + FormatProcesser::from($input).to_title_case() + }}; +} + +#[macro_export] +macro_rules! dot_case { + ($input:expr) => {{ + use string_proc::format_processer::FormatProcesser; + FormatProcesser::from($input).to_dot_case() + }}; +} + +#[macro_export] +macro_rules! snake_case { + ($input:expr) => {{ + use string_proc::format_processer::FormatProcesser; + FormatProcesser::from($input).to_snake_case() + }}; +} + +#[macro_export] +macro_rules! kebab_case { + ($input:expr) => {{ + use string_proc::format_processer::FormatProcesser; + FormatProcesser::from($input).to_kebab_case() + }}; +} + +#[macro_export] +macro_rules! pascal_case { + ($input:expr) => {{ + use string_proc::format_processer::FormatProcesser; + FormatProcesser::from($input).to_pascal_case() + }}; +} diff --git a/utils/string_proc/src/simple_processer.rs b/utils/string_proc/src/simple_processer.rs new file mode 100644 index 0000000..2de5dfc --- /dev/null +++ b/utils/string_proc/src/simple_processer.rs @@ -0,0 +1,15 @@ +/// Sanitizes a file path by replacing special characters with underscores. +/// +/// This function takes a file path as input and returns a sanitized version +/// where characters that are not allowed in file paths (such as path separators +/// and other reserved characters) are replaced with underscores. +pub fn sanitize_file_path>(path: P) -> String { + let path_str = path.as_ref(); + path_str + .chars() + .map(|c| match c { + '/' | '\\' | ':' | '*' | '?' | '"' | '<' | '>' | '|' => '_', + _ => c, + }) + .collect() +} diff --git a/utils/tcp_connection/Cargo.toml b/utils/tcp_connection/Cargo.toml new file mode 100644 index 0000000..da258be --- /dev/null +++ b/utils/tcp_connection/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "tcp_connection" +edition = "2024" +version.workspace = true + +[dependencies] +tokio = { version = "1.48.0", features = ["full"] } + +# Serialization +serde = { version = "1.0.228", features = ["derive"] } +serde_json = "1.0.145" +rmp-serde = "1.3.0" + +# Error handling +thiserror = "2.0.17" + +# Uuid & Random +uuid = "1.18.1" + +# Crypto +rsa = { version = "0.9", features = ["pkcs5", "sha2"] } +ed25519-dalek = "3.0.0-pre.1" +ring = "0.17.14" +rand = "0.10.0-rc.0" +base64 = "0.22.1" +pem = "3.0.6" +crc = "3.3.0" +blake3 = "1.8.2" diff --git a/utils/tcp_connection/src/error.rs b/utils/tcp_connection/src/error.rs new file mode 100644 index 0000000..32d06cc --- /dev/null +++ b/utils/tcp_connection/src/error.rs @@ -0,0 +1,122 @@ +use std::io; +use thiserror::Error; + +#[derive(Error, Debug, Clone)] +pub enum TcpTargetError { + #[error("Authentication failed: {0}")] + Authentication(String), + + #[error("Reference sheet not allowed: {0}")] + ReferenceSheetNotAllowed(String), + + #[error("Cryptographic error: {0}")] + Crypto(String), + + #[error("File operation error: {0}")] + File(String), + + #[error("I/O error: {0}")] + Io(String), + + #[error("Invalid configuration: {0}")] + Config(String), + + #[error("Locked: {0}")] + Locked(String), + + #[error("Network error: {0}")] + Network(String), + + #[error("No result: {0}")] + NoResult(String), + + #[error("Not found: {0}")] + NotFound(String), + + #[error("Not local machine: {0}")] + NotLocal(String), + + #[error("Not remote machine: {0}")] + NotRemote(String), + + #[error("Pool already exists: {0}")] + PoolAlreadyExists(String), + + #[error("Protocol error: {0}")] + Protocol(String), + + #[error("Serialization error: {0}")] + Serialization(String), + + #[error("Timeout: {0}")] + Timeout(String), + + #[error("Unsupported operation: {0}")] + Unsupported(String), +} + +impl From for TcpTargetError { + fn from(error: io::Error) -> Self { + TcpTargetError::Io(error.to_string()) + } +} + +impl From for TcpTargetError { + fn from(error: serde_json::Error) -> Self { + TcpTargetError::Serialization(error.to_string()) + } +} + +impl From<&str> for TcpTargetError { + fn from(value: &str) -> Self { + TcpTargetError::Protocol(value.to_string()) + } +} + +impl From for TcpTargetError { + fn from(value: String) -> Self { + TcpTargetError::Protocol(value) + } +} + +impl From for TcpTargetError { + fn from(error: rsa::errors::Error) -> Self { + TcpTargetError::Crypto(error.to_string()) + } +} + +impl From for TcpTargetError { + fn from(error: ed25519_dalek::SignatureError) -> Self { + TcpTargetError::Crypto(error.to_string()) + } +} + +impl From for TcpTargetError { + fn from(error: ring::error::Unspecified) -> Self { + TcpTargetError::Crypto(error.to_string()) + } +} + +impl From for TcpTargetError { + fn from(error: base64::DecodeError) -> Self { + TcpTargetError::Serialization(error.to_string()) + } +} + +impl From for TcpTargetError { + fn from(error: pem::PemError) -> Self { + TcpTargetError::Crypto(error.to_string()) + } +} + +impl From for TcpTargetError { + fn from(error: rmp_serde::encode::Error) -> Self { + TcpTargetError::Serialization(error.to_string()) + } +} + +impl From for TcpTargetError { + fn from(error: rmp_serde::decode::Error) -> Self { + TcpTargetError::Serialization(error.to_string()) + } +} diff --git a/utils/tcp_connection/src/instance.rs b/utils/tcp_connection/src/instance.rs new file mode 100644 index 0000000..8e6886c --- /dev/null +++ b/utils/tcp_connection/src/instance.rs @@ -0,0 +1,542 @@ +use std::{path::Path, time::Duration}; + +use serde::Serialize; +use tokio::{ + fs::{File, OpenOptions}, + io::{AsyncBufReadExt, AsyncReadExt, AsyncWriteExt, BufReader, BufWriter}, + net::TcpStream, +}; + +use ring::signature::{self}; + +use crate::error::TcpTargetError; + +const DEFAULT_CHUNK_SIZE: usize = 4096; +const DEFAULT_TIMEOUT_SECS: u64 = 10; + +const ECDSA_P256_SHA256_ASN1_SIGNING: &signature::EcdsaSigningAlgorithm = + &signature::ECDSA_P256_SHA256_ASN1_SIGNING; +const ECDSA_P384_SHA384_ASN1_SIGNING: &signature::EcdsaSigningAlgorithm = + &signature::ECDSA_P384_SHA384_ASN1_SIGNING; + +#[derive(Debug, Clone)] +pub struct ConnectionConfig { + pub chunk_size: usize, + pub timeout_secs: u64, + pub enable_crc_validation: bool, +} + +impl Default for ConnectionConfig { + fn default() -> Self { + Self { + chunk_size: DEFAULT_CHUNK_SIZE, + timeout_secs: DEFAULT_TIMEOUT_SECS, + enable_crc_validation: false, + } + } +} + +pub struct ConnectionInstance { + pub(crate) stream: TcpStream, + config: ConnectionConfig, +} + +impl From for ConnectionInstance { + fn from(stream: TcpStream) -> Self { + Self { + stream, + config: ConnectionConfig::default(), + } + } +} + +impl ConnectionInstance { + /// Create a new ConnectionInstance with custom configuration + pub fn with_config(stream: TcpStream, config: ConnectionConfig) -> Self { + Self { stream, config } + } + + /// Get a reference to the current configuration + pub fn config(&self) -> &ConnectionConfig { + &self.config + } + + /// Get a mutable reference to the current configuration + pub fn config_mut(&mut self) -> &mut ConnectionConfig { + &mut self.config + } + /// Serialize data and write to the target machine + pub async fn write(&mut self, data: Data) -> Result<(), TcpTargetError> + where + Data: Default + Serialize, + { + let Ok(json_text) = serde_json::to_string(&data) else { + return Err(TcpTargetError::Serialization( + "Serialize failed.".to_string(), + )); + }; + Self::write_text(self, json_text).await?; + Ok(()) + } + + /// Serialize data to MessagePack and write to the target machine + pub async fn write_msgpack(&mut self, data: Data) -> Result<(), TcpTargetError> + where + Data: Serialize, + { + let msgpack_data = rmp_serde::to_vec(&data)?; + let len = msgpack_data.len() as u32; + + self.stream.write_all(&len.to_be_bytes()).await?; + self.stream.write_all(&msgpack_data).await?; + Ok(()) + } + + /// Read data from target machine and deserialize from MessagePack + pub async fn read_msgpack(&mut self) -> Result + where + Data: serde::de::DeserializeOwned, + { + let mut len_buf = [0u8; 4]; + self.stream.read_exact(&mut len_buf).await?; + let len = u32::from_be_bytes(len_buf) as usize; + + let mut buffer = vec![0; len]; + self.stream.read_exact(&mut buffer).await?; + + let data = rmp_serde::from_slice(&buffer)?; + Ok(data) + } + + /// Read data from target machine and deserialize + pub async fn read(&mut self) -> Result + where + Data: Default + serde::de::DeserializeOwned, + { + let Ok(json_text) = Self::read_text(self).await else { + return Err(TcpTargetError::Io("Read failed.".to_string())); + }; + let Ok(deser_obj) = serde_json::from_str::(&json_text) else { + return Err(TcpTargetError::Serialization( + "Deserialize failed.".to_string(), + )); + }; + Ok(deser_obj) + } + + /// Serialize data and write to the target machine + pub async fn write_large(&mut self, data: Data) -> Result<(), TcpTargetError> + where + Data: Default + Serialize, + { + let Ok(json_text) = serde_json::to_string(&data) else { + return Err(TcpTargetError::Serialization( + "Serialize failed.".to_string(), + )); + }; + Self::write_large_text(self, json_text).await?; + Ok(()) + } + + /// Read data from target machine and deserialize + pub async fn read_large( + &mut self, + buffer_size: impl Into, + ) -> Result + where + Data: Default + serde::de::DeserializeOwned, + { + let Ok(json_text) = Self::read_large_text(self, buffer_size).await else { + return Err(TcpTargetError::Io("Read failed.".to_string())); + }; + let Ok(deser_obj) = serde_json::from_str::(&json_text) else { + return Err(TcpTargetError::Serialization( + "Deserialize failed.".to_string(), + )); + }; + Ok(deser_obj) + } + + /// Write text to the target machine + pub async fn write_text(&mut self, text: impl Into) -> Result<(), TcpTargetError> { + let text = text.into(); + let bytes = text.as_bytes(); + let len = bytes.len() as u32; + + self.stream.write_all(&len.to_be_bytes()).await?; + match self.stream.write_all(bytes).await { + Ok(_) => Ok(()), + Err(err) => Err(TcpTargetError::Io(err.to_string())), + } + } + + /// Read text from the target machine + pub async fn read_text(&mut self) -> Result { + let mut len_buf = [0u8; 4]; + self.stream.read_exact(&mut len_buf).await?; + let len = u32::from_be_bytes(len_buf) as usize; + + let mut buffer = vec![0; len]; + self.stream.read_exact(&mut buffer).await?; + + match String::from_utf8(buffer) { + Ok(text) => Ok(text), + Err(err) => Err(TcpTargetError::Serialization(format!( + "Invalid UTF-8 sequence: {}", + err + ))), + } + } + + /// Write large text to the target machine (chunked) + pub async fn write_large_text( + &mut self, + text: impl Into, + ) -> Result<(), TcpTargetError> { + let text = text.into(); + let bytes = text.as_bytes(); + let mut offset = 0; + + while offset < bytes.len() { + let chunk = &bytes[offset..]; + let written = match self.stream.write(chunk).await { + Ok(n) => n, + Err(err) => return Err(TcpTargetError::Io(err.to_string())), + }; + offset += written; + } + + Ok(()) + } + + /// Read large text from the target machine (chunked) + pub async fn read_large_text( + &mut self, + chunk_size: impl Into, + ) -> Result { + let chunk_size = chunk_size.into() as usize; + let mut buffer = Vec::new(); + let mut chunk_buf = vec![0; chunk_size]; + + loop { + match self.stream.read(&mut chunk_buf).await { + Ok(0) => break, // EOF + Ok(n) => { + buffer.extend_from_slice(&chunk_buf[..n]); + } + Err(err) => return Err(TcpTargetError::Io(err.to_string())), + } + } + + Ok(String::from_utf8_lossy(&buffer).to_string()) + } + + /// Write large MessagePack data to the target machine (chunked) + pub async fn write_large_msgpack( + &mut self, + data: Data, + chunk_size: impl Into, + ) -> Result<(), TcpTargetError> + where + Data: Serialize, + { + let msgpack_data = rmp_serde::to_vec(&data)?; + let chunk_size = chunk_size.into() as usize; + let len = msgpack_data.len() as u32; + + // Write total length first + self.stream.write_all(&len.to_be_bytes()).await?; + + // Write data in chunks + let mut offset = 0; + while offset < msgpack_data.len() { + let end = std::cmp::min(offset + chunk_size, msgpack_data.len()); + let chunk = &msgpack_data[offset..end]; + match self.stream.write(chunk).await { + Ok(n) => offset += n, + Err(err) => return Err(TcpTargetError::Io(err.to_string())), + } + } + + Ok(()) + } + + /// Read large MessagePack data from the target machine (chunked) + pub async fn read_large_msgpack( + &mut self, + chunk_size: impl Into, + ) -> Result + where + Data: serde::de::DeserializeOwned, + { + let chunk_size = chunk_size.into() as usize; + + // Read total length first + let mut len_buf = [0u8; 4]; + self.stream.read_exact(&mut len_buf).await?; + let total_len = u32::from_be_bytes(len_buf) as usize; + + // Read data in chunks + let mut buffer = Vec::with_capacity(total_len); + let mut remaining = total_len; + let mut chunk_buf = vec![0; chunk_size]; + + while remaining > 0 { + let read_size = std::cmp::min(chunk_size, remaining); + let chunk = &mut chunk_buf[..read_size]; + + match self.stream.read_exact(chunk).await { + Ok(_) => { + buffer.extend_from_slice(chunk); + remaining -= read_size; + } + Err(err) => return Err(TcpTargetError::Io(err.to_string())), + } + } + + let data = rmp_serde::from_slice(&buffer)?; + Ok(data) + } + + /// Write file to target machine. + pub async fn write_file(&mut self, file_path: impl AsRef) -> Result<(), TcpTargetError> { + let path = file_path.as_ref(); + + // Validate file + if !path.exists() { + return Err(TcpTargetError::File(format!( + "File not found: {}", + path.display() + ))); + } + if path.is_dir() { + return Err(TcpTargetError::File(format!( + "Path is directory: {}", + path.display() + ))); + } + + // Open file and get metadata + let mut file = File::open(path).await?; + let file_size = file.metadata().await?.len(); + + // Send file header (version + size + crc) + self.stream.write_all(&1u64.to_be_bytes()).await?; + self.stream.write_all(&file_size.to_be_bytes()).await?; + + // Calculate and send CRC32 if enabled + let file_crc = if self.config.enable_crc_validation { + let crc32 = crc::Crc::::new(&crc::CRC_32_ISO_HDLC); + let mut crc_calculator = crc32.digest(); + + let mut temp_reader = + BufReader::with_capacity(self.config.chunk_size, File::open(path).await?); + let mut temp_buffer = vec![0u8; self.config.chunk_size]; + let mut temp_bytes_read = 0; + + while temp_bytes_read < file_size { + let bytes_to_read = + (file_size - temp_bytes_read).min(self.config.chunk_size as u64) as usize; + temp_reader + .read_exact(&mut temp_buffer[..bytes_to_read]) + .await?; + crc_calculator.update(&temp_buffer[..bytes_to_read]); + temp_bytes_read += bytes_to_read as u64; + } + + crc_calculator.finalize() + } else { + 0 + }; + + self.stream.write_all(&file_crc.to_be_bytes()).await?; + + // If file size is 0, skip content transfer + if file_size == 0 { + self.stream.flush().await?; + + // Wait for receiver confirmation + let mut ack = [0u8; 1]; + tokio::time::timeout( + Duration::from_secs(self.config.timeout_secs), + self.stream.read_exact(&mut ack), + ) + .await + .map_err(|_| TcpTargetError::Timeout("Ack timeout".to_string()))??; + + if ack[0] != 1 { + return Err(TcpTargetError::Protocol( + "Receiver verification failed".to_string(), + )); + } + + return Ok(()); + } + + // Transfer file content + let mut reader = BufReader::with_capacity(self.config.chunk_size, &mut file); + let mut bytes_sent = 0; + + while bytes_sent < file_size { + let buffer = reader.fill_buf().await?; + if buffer.is_empty() { + break; + } + + let chunk_size = buffer.len().min((file_size - bytes_sent) as usize); + self.stream.write_all(&buffer[..chunk_size]).await?; + reader.consume(chunk_size); + + bytes_sent += chunk_size as u64; + } + + // Verify transfer completion + if bytes_sent != file_size { + return Err(TcpTargetError::File(format!( + "Transfer incomplete: expected {} bytes, sent {} bytes", + file_size, bytes_sent + ))); + } + + self.stream.flush().await?; + + // Wait for receiver confirmation + let mut ack = [0u8; 1]; + tokio::time::timeout( + Duration::from_secs(self.config.timeout_secs), + self.stream.read_exact(&mut ack), + ) + .await + .map_err(|_| TcpTargetError::Timeout("Ack timeout".to_string()))??; + + if ack[0] != 1 { + return Err(TcpTargetError::Protocol( + "Receiver verification failed".to_string(), + )); + } + + Ok(()) + } + + /// Read file from target machine + pub async fn read_file(&mut self, save_path: impl AsRef) -> Result<(), TcpTargetError> { + let path = save_path.as_ref(); + // Create CRC instance at function scope to ensure proper lifetime + let crc_instance = crc::Crc::::new(&crc::CRC_32_ISO_HDLC); + + // Make sure parent directory exists + if let Some(parent) = path.parent() + && !parent.exists() + { + tokio::fs::create_dir_all(parent).await?; + } + + // Read file header (version + size + crc) + let mut version_buf = [0u8; 8]; + self.stream.read_exact(&mut version_buf).await?; + let version = u64::from_be_bytes(version_buf); + if version != 1 { + return Err(TcpTargetError::Protocol( + "Unsupported transfer version".to_string(), + )); + } + + let mut size_buf = [0u8; 8]; + self.stream.read_exact(&mut size_buf).await?; + let file_size = u64::from_be_bytes(size_buf); + + let mut expected_crc_buf = [0u8; 4]; + self.stream.read_exact(&mut expected_crc_buf).await?; + let expected_crc = u32::from_be_bytes(expected_crc_buf); + if file_size == 0 { + // Create empty file and return early + let _file = OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .open(path) + .await?; + // Send confirmation + self.stream.write_all(&[1u8]).await?; + self.stream.flush().await?; + return Ok(()); + } + + // Prepare output file + let file = OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .open(path) + .await?; + let mut writer = BufWriter::with_capacity(self.config.chunk_size, file); + + // Receive file content with CRC calculation if enabled + let mut bytes_received = 0; + let mut buffer = vec![0u8; self.config.chunk_size]; + let mut crc_calculator = if self.config.enable_crc_validation { + Some(crc_instance.digest()) + } else { + None + }; + + while bytes_received < file_size { + let bytes_to_read = + (file_size - bytes_received).min(self.config.chunk_size as u64) as usize; + let chunk = &mut buffer[..bytes_to_read]; + + self.stream.read_exact(chunk).await?; + + writer.write_all(chunk).await?; + + // Update CRC if validation is enabled + if let Some(ref mut crc) = crc_calculator { + crc.update(chunk); + } + + bytes_received += bytes_to_read as u64; + } + + // Verify transfer completion + if bytes_received != file_size { + return Err(TcpTargetError::File(format!( + "Transfer incomplete: expected {} bytes, received {} bytes", + file_size, bytes_received + ))); + } + + writer.flush().await?; + + // Validate CRC if enabled + if self.config.enable_crc_validation + && let Some(crc_calculator) = crc_calculator + { + let actual_crc = crc_calculator.finalize(); + if actual_crc != expected_crc && expected_crc != 0 { + return Err(TcpTargetError::File(format!( + "CRC validation failed: expected {:08x}, got {:08x}", + expected_crc, actual_crc + ))); + } + } + + // Final flush and sync + writer.flush().await?; + writer.into_inner().sync_all().await?; + + // Verify completion + if bytes_received != file_size { + let _ = tokio::fs::remove_file(path).await; + return Err(TcpTargetError::File(format!( + "Transfer incomplete: expected {} bytes, received {} bytes", + file_size, bytes_received + ))); + } + + // Send confirmation + self.stream.write_all(&[1u8]).await?; + self.stream.flush().await?; + + Ok(()) + } +} diff --git a/utils/tcp_connection/src/instance_challenge.rs b/utils/tcp_connection/src/instance_challenge.rs new file mode 100644 index 0000000..3a7f6a3 --- /dev/null +++ b/utils/tcp_connection/src/instance_challenge.rs @@ -0,0 +1,311 @@ +use std::path::Path; + +use rand::TryRngCore; +use rsa::{ + RsaPrivateKey, RsaPublicKey, + pkcs1::{DecodeRsaPrivateKey, DecodeRsaPublicKey}, + sha2, +}; +use tokio::io::{AsyncReadExt, AsyncWriteExt}; + +use ed25519_dalek::{Signature, Signer, SigningKey, Verifier, VerifyingKey}; +use ring::rand::SystemRandom; +use ring::signature::{ + self, ECDSA_P256_SHA256_ASN1, ECDSA_P384_SHA384_ASN1, EcdsaKeyPair, RSA_PKCS1_2048_8192_SHA256, + UnparsedPublicKey, +}; + +use crate::{error::TcpTargetError, instance::ConnectionInstance}; + +const ECDSA_P256_SHA256_ASN1_SIGNING: &signature::EcdsaSigningAlgorithm = + &signature::ECDSA_P256_SHA256_ASN1_SIGNING; +const ECDSA_P384_SHA384_ASN1_SIGNING: &signature::EcdsaSigningAlgorithm = + &signature::ECDSA_P384_SHA384_ASN1_SIGNING; + +impl ConnectionInstance { + /// Initiates a challenge to the target machine to verify connection security + /// + /// This method performs a cryptographic challenge-response authentication: + /// 1. Generates a random 32-byte challenge + /// 2. Sends the challenge to the target machine + /// 3. Receives a digital signature of the challenge + /// 4. Verifies the signature using the appropriate public key + /// + /// # Arguments + /// * `public_key_dir` - Directory containing public key files for verification + /// + /// # Returns + /// * `Ok((true, "KeyId"))` - Challenge verification successful + /// * `Ok((false, "KeyId"))` - Challenge verification failed + /// * `Err(TcpTargetError)` - Error during challenge process + pub async fn challenge( + &mut self, + public_key_dir: impl AsRef, + ) -> Result<(bool, String), TcpTargetError> { + // Generate random challenge + let mut challenge = [0u8; 32]; + rand::rngs::OsRng + .try_fill_bytes(&mut challenge) + .map_err(|e| { + TcpTargetError::Crypto(format!("Failed to generate random challenge: {}", e)) + })?; + + // Send challenge to target + self.stream.write_all(&challenge).await?; + self.stream.flush().await?; + + // Read signature from target + let mut signature = Vec::new(); + let mut signature_len_buf = [0u8; 4]; + self.stream.read_exact(&mut signature_len_buf).await?; + + let signature_len = u32::from_be_bytes(signature_len_buf) as usize; + signature.resize(signature_len, 0); + self.stream.read_exact(&mut signature).await?; + + // Read key identifier from target to identify which public key to use + let mut key_id_len_buf = [0u8; 4]; + self.stream.read_exact(&mut key_id_len_buf).await?; + let key_id_len = u32::from_be_bytes(key_id_len_buf) as usize; + + let mut key_id_buf = vec![0u8; key_id_len]; + self.stream.read_exact(&mut key_id_buf).await?; + let key_id = String::from_utf8(key_id_buf) + .map_err(|e| TcpTargetError::Crypto(format!("Invalid key identifier: {}", e)))?; + + // Load appropriate public key + let public_key_path = public_key_dir.as_ref().join(format!("{}.pem", key_id)); + if !public_key_path.exists() { + return Ok((false, key_id)); + } + + let public_key_pem = tokio::fs::read_to_string(&public_key_path).await?; + + // Try to verify with different key types + let verified = if let Ok(rsa_key) = RsaPublicKey::from_pkcs1_pem(&public_key_pem) { + let padding = rsa::pkcs1v15::Pkcs1v15Sign::new::(); + rsa_key.verify(padding, &challenge, &signature).is_ok() + } else if let Ok(ed25519_key) = + VerifyingKey::from_bytes(&parse_ed25519_public_key(&public_key_pem)) + { + if signature.len() == 64 { + let sig_bytes: [u8; 64] = signature.as_slice().try_into().map_err(|_| { + TcpTargetError::Crypto("Invalid signature length for Ed25519".to_string()) + })?; + let sig = Signature::from_bytes(&sig_bytes); + ed25519_key.verify(&challenge, &sig).is_ok() + } else { + false + } + } else if let Ok(dsa_key_info) = parse_dsa_public_key(&public_key_pem) { + verify_dsa_signature(&dsa_key_info, &challenge, &signature) + } else { + false + }; + + Ok((verified, key_id)) + } + + /// Accepts a challenge from the target machine to verify connection security + /// + /// This method performs a cryptographic challenge-response authentication: + /// 1. Receives a random 32-byte challenge from the target machine + /// 2. Signs the challenge using the appropriate private key + /// 3. Sends the digital signature back to the target machine + /// 4. Sends the key identifier for public key verification + /// + /// # Arguments + /// * `private_key_file` - Path to the private key file for signing + /// * `verify_public_key` - Key identifier for public key verification + /// + /// # Returns + /// * `Ok(true)` - Challenge response sent successfully + /// * `Ok(false)` - Private key format not supported + /// * `Err(TcpTargetError)` - Error during challenge response process + pub async fn accept_challenge( + &mut self, + private_key_file: impl AsRef, + verify_public_key: &str, + ) -> Result { + // Read challenge from initiator + let mut challenge = [0u8; 32]; + self.stream.read_exact(&mut challenge).await?; + + // Load private key + let private_key_pem = tokio::fs::read_to_string(&private_key_file) + .await + .map_err(|e| { + TcpTargetError::NotFound(format!( + "Read private key \"{}\" failed: \"{}\"", + private_key_file + .as_ref() + .display() + .to_string() + .split("/") + .last() + .unwrap_or("UNKNOWN"), + e + )) + })?; + + // Sign the challenge with supported key types + let signature = if let Ok(rsa_key) = RsaPrivateKey::from_pkcs1_pem(&private_key_pem) { + let padding = rsa::pkcs1v15::Pkcs1v15Sign::new::(); + rsa_key.sign(padding, &challenge)? + } else if let Ok(ed25519_key) = parse_ed25519_private_key(&private_key_pem) { + ed25519_key.sign(&challenge).to_bytes().to_vec() + } else if let Ok(dsa_key_info) = parse_dsa_private_key(&private_key_pem) { + sign_with_dsa(&dsa_key_info, &challenge)? + } else { + return Ok(false); + }; + + // Send signature length and signature + let signature_len = signature.len() as u32; + self.stream.write_all(&signature_len.to_be_bytes()).await?; + self.stream.flush().await?; + self.stream.write_all(&signature).await?; + self.stream.flush().await?; + + // Send key identifier for public key identification + let key_id_bytes = verify_public_key.as_bytes(); + let key_id_len = key_id_bytes.len() as u32; + self.stream.write_all(&key_id_len.to_be_bytes()).await?; + self.stream.flush().await?; + self.stream.write_all(key_id_bytes).await?; + self.stream.flush().await?; + + Ok(true) + } +} + +/// Parse Ed25519 public key from PEM format +fn parse_ed25519_public_key(pem: &str) -> [u8; 32] { + // Robust parsing for Ed25519 public key using pem crate + let mut key_bytes = [0u8; 32]; + + if let Ok(pem_data) = pem::parse(pem) + && pem_data.tag() == "PUBLIC KEY" + && pem_data.contents().len() >= 32 + { + let contents = pem_data.contents(); + key_bytes.copy_from_slice(&contents[contents.len() - 32..]); + } + key_bytes +} + +/// Parse Ed25519 private key from PEM format +fn parse_ed25519_private_key(pem: &str) -> Result { + if let Ok(pem_data) = pem::parse(pem) + && pem_data.tag() == "PRIVATE KEY" + && pem_data.contents().len() >= 32 + { + let contents = pem_data.contents(); + let mut seed = [0u8; 32]; + seed.copy_from_slice(&contents[contents.len() - 32..]); + return Ok(SigningKey::from_bytes(&seed)); + } + Err(TcpTargetError::Crypto( + "Invalid Ed25519 private key format".to_string(), + )) +} + +/// Parse DSA public key information from PEM +fn parse_dsa_public_key( + pem: &str, +) -> Result<(&'static dyn signature::VerificationAlgorithm, Vec), TcpTargetError> { + if let Ok(pem_data) = pem::parse(pem) { + let contents = pem_data.contents().to_vec(); + + // Try different DSA algorithms based on PEM tag + match pem_data.tag() { + "EC PUBLIC KEY" | "PUBLIC KEY" if pem.contains("ECDSA") || pem.contains("ecdsa") => { + if pem.contains("P-256") { + return Ok((&ECDSA_P256_SHA256_ASN1, contents)); + } else if pem.contains("P-384") { + return Ok((&ECDSA_P384_SHA384_ASN1, contents)); + } + } + "RSA PUBLIC KEY" | "PUBLIC KEY" => { + return Ok((&RSA_PKCS1_2048_8192_SHA256, contents)); + } + _ => {} + } + + // Default to RSA for unknown types + return Ok((&RSA_PKCS1_2048_8192_SHA256, contents)); + } + Err(TcpTargetError::Crypto( + "Invalid DSA public key format".to_string(), + )) +} + +/// Parse DSA private key information from PEM +fn parse_dsa_private_key( + pem: &str, +) -> Result<(&'static dyn signature::VerificationAlgorithm, Vec), TcpTargetError> { + // For DSA, private key verification uses the same algorithm as public key + parse_dsa_public_key(pem) +} + +/// Verify DSA signature +fn verify_dsa_signature( + algorithm_and_key: &(&'static dyn signature::VerificationAlgorithm, Vec), + message: &[u8], + signature: &[u8], +) -> bool { + let (algorithm, key_bytes) = algorithm_and_key; + let public_key = UnparsedPublicKey::new(*algorithm, key_bytes); + public_key.verify(message, signature).is_ok() +} + +/// Sign with DSA +fn sign_with_dsa( + algorithm_and_key: &(&'static dyn signature::VerificationAlgorithm, Vec), + message: &[u8], +) -> Result, TcpTargetError> { + let (algorithm, key_bytes) = algorithm_and_key; + + // Handle different DSA/ECDSA algorithms by comparing algorithm identifiers + // Since we can't directly compare trait objects, we use pointer comparison + let algorithm_ptr = algorithm as *const _ as *const (); + let ecdsa_p256_ptr = &ECDSA_P256_SHA256_ASN1 as *const _ as *const (); + let ecdsa_p384_ptr = &ECDSA_P384_SHA384_ASN1 as *const _ as *const (); + + if algorithm_ptr == ecdsa_p256_ptr { + let key_pair = EcdsaKeyPair::from_pkcs8( + ECDSA_P256_SHA256_ASN1_SIGNING, + key_bytes, + &SystemRandom::new(), + ) + .map_err(|e| { + TcpTargetError::Crypto(format!("Failed to create ECDSA P-256 key pair: {}", e)) + })?; + + let signature = key_pair + .sign(&SystemRandom::new(), message) + .map_err(|e| TcpTargetError::Crypto(format!("ECDSA P-256 signing failed: {}", e)))?; + + Ok(signature.as_ref().to_vec()) + } else if algorithm_ptr == ecdsa_p384_ptr { + let key_pair = EcdsaKeyPair::from_pkcs8( + ECDSA_P384_SHA384_ASN1_SIGNING, + key_bytes, + &SystemRandom::new(), + ) + .map_err(|e| { + TcpTargetError::Crypto(format!("Failed to create ECDSA P-384 key pair: {}", e)) + })?; + + let signature = key_pair + .sign(&SystemRandom::new(), message) + .map_err(|e| TcpTargetError::Crypto(format!("ECDSA P-384 signing failed: {}", e)))?; + + Ok(signature.as_ref().to_vec()) + } else { + // RSA or unsupported algorithm + Err(TcpTargetError::Unsupported( + "DSA/ECDSA signing not supported for this algorithm type".to_string(), + )) + } +} diff --git a/utils/tcp_connection/src/lib.rs b/utils/tcp_connection/src/lib.rs new file mode 100644 index 0000000..6a2e599 --- /dev/null +++ b/utils/tcp_connection/src/lib.rs @@ -0,0 +1,6 @@ +#[allow(dead_code)] +pub mod instance; + +pub mod instance_challenge; + +pub mod error; diff --git a/utils/tcp_connection/tcp_connection_test/Cargo.toml b/utils/tcp_connection/tcp_connection_test/Cargo.toml new file mode 100644 index 0000000..19a6e9b --- /dev/null +++ b/utils/tcp_connection/tcp_connection_test/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "tcp_connection_test" +edition = "2024" +version.workspace = true + +[dependencies] +tcp_connection = { path = "../../tcp_connection" } +tokio = { version = "1.48.0", features = ["full"] } +serde = { version = "1.0.228", features = ["derive"] } diff --git a/utils/tcp_connection/tcp_connection_test/res/image/test_transfer.png b/utils/tcp_connection/tcp_connection_test/res/image/test_transfer.png new file mode 100644 index 0000000..5fa94f0 Binary files /dev/null and b/utils/tcp_connection/tcp_connection_test/res/image/test_transfer.png differ diff --git a/utils/tcp_connection/tcp_connection_test/res/key/test_key.pem b/utils/tcp_connection/tcp_connection_test/res/key/test_key.pem new file mode 100644 index 0000000..e155876 --- /dev/null +++ b/utils/tcp_connection/tcp_connection_test/res/key/test_key.pem @@ -0,0 +1,13 @@ +-----BEGIN RSA PUBLIC KEY----- +MIICCgKCAgEAl5vyIwGYiQ1zZpW2tg+LwOUV547T2SjlzKQjcms5je/epP4CnUfT +5cmHCe8ZaSbnofcntCzi8FzMpQmzhNzFk5tCAe4tSrghfr2kYDO7aUL0G09KbNZ5 +iuMTkMaHx6LMjZ+Ljy8fC47yC2dFMUgLjGS7xS6rnIo4YtFuvMdwbLjs7mSn+vVc +kcEV8RLlQg8wDbzpl66Jd1kiUgPfVLBRTLE/iL8kUCz1l8c+DvOzr3ATwJysM9CG +LFahGLlTd3CZaj0QsEzf/AQsn79Su+rnCXhXqcvynhAcil0UW9RWp5Zsvp3Me3W8 +pJg6vZuAA6lQ062hkRLiJ91F2rpyqtkax5i/simLjelpsRzLKo6Xsz1bZht2+5d5 +ArgTBtZBxS044t8caZWLXetnPEcxEGz8KYUVKf7X9S7R53gy36y88Fbu9giqUr3m +b3Da+SYzBT//hacGn55nhzLRdsJGaFFWcKCbpue6JHLsFhizhdEAjaec0hfphw29 +veY0adPdIFLQDmMKaNk4ulrz8Lbgpqn9gxx6fRssj9jqNJmW64a0eV+Rw7BCJazH +xp3zz4A3rwdI8BjxLUb3YiCUcavA9WzJ1DUfdX1FSvbcFw4CEiGJjfpWGrm1jtc6 +DMOsoX/C6yFOyRpipsgqIToBClchLSNgrO6A7SIoSdIqNDEgIanFcjECAwEAAQ== +-----END RSA PUBLIC KEY----- diff --git a/utils/tcp_connection/tcp_connection_test/res/key/test_key_private.pem b/utils/tcp_connection/tcp_connection_test/res/key/test_key_private.pem new file mode 100644 index 0000000..183d2d9 --- /dev/null +++ b/utils/tcp_connection/tcp_connection_test/res/key/test_key_private.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEAl5vyIwGYiQ1zZpW2tg+LwOUV547T2SjlzKQjcms5je/epP4C +nUfT5cmHCe8ZaSbnofcntCzi8FzMpQmzhNzFk5tCAe4tSrghfr2kYDO7aUL0G09K +bNZ5iuMTkMaHx6LMjZ+Ljy8fC47yC2dFMUgLjGS7xS6rnIo4YtFuvMdwbLjs7mSn ++vVckcEV8RLlQg8wDbzpl66Jd1kiUgPfVLBRTLE/iL8kUCz1l8c+DvOzr3ATwJys +M9CGLFahGLlTd3CZaj0QsEzf/AQsn79Su+rnCXhXqcvynhAcil0UW9RWp5Zsvp3M +e3W8pJg6vZuAA6lQ062hkRLiJ91F2rpyqtkax5i/simLjelpsRzLKo6Xsz1bZht2 ++5d5ArgTBtZBxS044t8caZWLXetnPEcxEGz8KYUVKf7X9S7R53gy36y88Fbu9giq +Ur3mb3Da+SYzBT//hacGn55nhzLRdsJGaFFWcKCbpue6JHLsFhizhdEAjaec0hfp +hw29veY0adPdIFLQDmMKaNk4ulrz8Lbgpqn9gxx6fRssj9jqNJmW64a0eV+Rw7BC +JazHxp3zz4A3rwdI8BjxLUb3YiCUcavA9WzJ1DUfdX1FSvbcFw4CEiGJjfpWGrm1 +jtc6DMOsoX/C6yFOyRpipsgqIToBClchLSNgrO6A7SIoSdIqNDEgIanFcjECAwEA +AQKCAgAd3cg9Ei7o7N/reRnV0skutlJy2+Wq9Y4TmtAq1amwZu0e5rVAI6rALUuv +bs08NEBUXVqSeXc5b6aW6orVZSJ8+gxuUevVOOHMVHKhyv8j9N8e1Cduum+WJzav +AhU0hEM0sRXunpNIlR/klDMCytUPkraU2SVQgMAr42MjyExC9skiC202GIjkY7u9 +UoIcWd6XDjycN3N4MfR7YKzpw5Q4fgBsoW73Zmv5OvRkQKkIqhUSECsyR+VuraAt +vTCOqn1meuIjQPms7WuXCrszLsrVyEHIvtcsQTNGJKECmBl8CTuh73cdaSvA5wZH +XO9CiWPVV3KpICWyQbplpO467usB0liMX3mcMp+Ztp/p/ns6Ov5L6AR8LcDJ43KA +454ZUYxbRjqG+cW6Owm5Ii0+UOEGOi+6Jhc4NGZuYU2gDrhuz4yejY6bDAu8Ityd +umVU90IePVm6dlMM5cgyDmCXUkOVsjegMIBP+Zf3an1JWtsDL2RW5OwrFH7DQaqG +UwE/w/JOkRe3UMcTECfjX1ACJlB8XDAXiNeBQsAFOVVkWdBE4D7IlQLJVZAyGSlt +NMTn9/kQBGgdlyEqVAPKGnfl08TubyL7/9xOhCoYsv0IIOI8xgT7zQwefUAn2TFb +ulHIdVovRI4Oa0n7WfK4srL73XqjKYJAC9nmxXMwKe1wokjREwKCAQEAyNZKWY88 +4OqYa9xEEJwEOAA5YWLZ/+b9lCCQW8gMeVyTZ7A4vJVyYtBvRBlv6MhB4OTIf9ah +YuyZMl6oNCs2SBP1lKxsPlGaothRlEmPyTWXOt9iRLpPHUcGG1odfeGpI0bdHs1n +E/OpKYwzD0oSe5PGA0zkcetG61klPw8NIrjTkQ2hMqDV+ppF0lPxe/iudyTVMGhX +aHcd95DZNGaS503ZcSjN4MeVSkQEDI4fu4XK4135DCaKOmIPtOd6Rw+qMxoCC7Wl +cEDnZ6eqQ5EOy8Ufz8WKqGSVWkr6cO/qtulFLAj0hdL0aENTCRer+01alybXJXyB +GKyCk7i2RDlbGwKCAQEAwUA7SU7/0dKPJ2r0/70R6ayxZ7tQZK4smFgtkMDeWsaw +y2lZ6r44iJR/Tg6+bP8MjGzP/GU1i5QIIjJMGx2/VTWjJSOsFu3edZ5PHQUVSFQE +8FAhYXWOH+3igfgWJMkzhVsBo9/kINaEnt9jLBE8okEY+9/JEsdBqV/S4dkxjUPT +E+62kX9lkQVk/gCWjsLRKZV4d87gXU8mMQbhgj99qg1joffV132vo6pvBBBCJ4Ex +4/JxIQ2W/GmkrFe8NlvD1CEMyvkeV+g2wbtvjWs0Ezyzh4njJAtKMe0SEg5dFTqa +eL/GjpgfIP7Uu30V35ngkgl7CuY1D/IJg4PxKthQowKCAQBUGtFWAhMXiYa9HKfw +YLWvkgB1lQUAEoa84ooxtWvr4uXj9Ts9VkRptynxVcm0rTBRct24E3TQTY62Nkew +WSxJMPqWAULvMhNVAMvhEpFBTM0BHY00hOUeuKCJEcrp7Xd8S2/MN25kP5TmzkyP +qZBl6fNxbGD6h/HSGynq522zzbzjsNaBsjMJ2FNHClpFdVXylR0mQXvhRojpJOKg +/Bem/8YAinr1F/+f8y3S6C3HxPa7Ep56BSW731b+hjWBzsCS1+BlcPNQOA3wLZmy +4+tTUEDLLMmtTTnybxXD9+TOJpAOKc3kwPwTMaZzV1NxUOqQA/bzPtl9MLkaDa9e +kLpjAoIBACRFtxsKbe/nMqF2bOf3h/4xQNc0jGFpY8tweZT67oFhW9vCOXNbIudX +4BE5qTpyINvWrK82G/fH4ELy5+ALFFedCrM0398p5KB1B2puAtGhm4+zqqBNXVDW +6LX2Z8mdzkLQkx08L+iN+zSKv2WNErFtwI++MFKK/eMZrk5f4vId8eeC3devbtPq +jEs0tw2yuWmxuXvbY7d/3K5FGVzGKAMcIkBLcWLSH357xfygRJp/oGqlneBTWayk +85i5mwUk8jvFvE34tl5Por94O/byUULvGM9u7Shdyh5W3hZvhb8vUcEqVc179hPO +YQWT8+AVVNZ0WxjvnrQQfQKnaEPfeDsCggEBAJ7zgVVla8BOagEenKwr6nEkQzK/ +sTcF9Zp7TmyGKGdM4rW+CJqGgwswn65va+uZj7o0+D5JGeB8kRG5GtjUUzHkNBD0 +Av6KZksQDqgKdwPaH0MQSXCuUc0MYTBHDJdciN/DqdO8st69hyNRv4XdHst1SZdJ +VjUh3p4iwO4wfQQW7mvj94lLM/ypMdUqPKxVHVWQsbE9fOVbyKINuIDPDzu5iqc3 +VKScUwqpcGPZsgHr/Sguv/fdFnPs4O+N0AsAe3xbleCfQAeZnI0tR8nkYudvmxNz +MRevTAPDUBUDd0Uiy+d6w6B4vW8q9Zv3oFLXns4kWsJFajjx3TdgTacnVlI= +-----END RSA PRIVATE KEY----- diff --git a/utils/tcp_connection/tcp_connection_test/res/key/wrong_key_private.pem b/utils/tcp_connection/tcp_connection_test/res/key/wrong_key_private.pem new file mode 100644 index 0000000..4b77eea --- /dev/null +++ b/utils/tcp_connection/tcp_connection_test/res/key/wrong_key_private.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCvmvYR6ypNS4ld +cyJDlwv+4KC8/SxKBhlen8FX6Ltzfzi3f1I7qXZByGaTasQtc4qWgl0tLkrA8Pc3 +pm/r2To+Gl5cXMMz/zKFShuviGp/F17eS1idpNSFO6ViF+WXrENdESB7E6Dm4teK ++WLdtOHk3exC/+F+YUK3Jh6lTR5+donHaURlKjcKiRY7YxHq9HbrYXujJyiuU51a +nDvV20AWy7cKGGPRpV8YSoNGxE24WpWjjf0l++aFSpQaKanoV9tL4ZI0aXMFawSB +4YKBjtht6Cxm37oeBaimUKxA7BKH/DUueQsjAfw0WgZhItBDEtKjs2tMkOj/VUuF +OYrC58vunQDd/sP60BV3F/yPZiuBIB4PyXe2PVRabMBq2p2uiGexjoQ9DR+jU9ig +532KxckPHyqXzLd7MwljLw8ypahxMSE/lgcZIZh5I9oDsZWPD8Kx8D6eq/gypTkd +v8bewOTtj8GN2/MyxQZzYsz2ZruUrPv7hd38qjsFkKrb8wPM6xTOScM7CYFNceiL +3DcawivS+f5TgVkrjBGqhwpOc96ZHHuojw9f8KyJ3DON5CWKPpyKJvXEt6QuT5dc +BPZM33KHSuDCJUrw9dnh6rkaTnx681csAGJTYX2zeNxTI9DO/YSvpEK5e5MaZ9Kc +OETgnXiOe9KlNBtJeLd5XwvnYelzYQIDAQABAoICAAIis01ie8A24/PD+62wv4+Y +8bt6pLg9vL8+2B4WkXkFGg55OOnK1MpWApFWYg5fclcEPNfY0UXpaEg/+Op4WNH6 +hh0/b4xJVTbzwMRwt0LWaOvxJKG+KGt6XzeDLOKcULFoDOoSQgmsxoxFHiOuGHUt +Ebt62yYrTqFlkEfYWT+Wd3R6Xj+QtNym8CNGwCgIUw3nwJYqWr9L+wToE341TWE5 +lv9DbqtVBIQKG/CXYI6WY216w5JbruD+GDD9Qri1oNAabSnAAosVUxe1Q14J+63S +ff++Rsgor3VeU8nyVQNcWNU42Z7SXlvQoHU79CZsqy0ceHiU5pB8XA/BtGNMaFl4 +UehZPTsJhi8dlUdTYw5f5oOnHltNpSioy0KtqEBJjJX+CzS1UMAr6k9gtjbWeXpD +88JwoOy8n6HLAYETu/GiHLHpyIWJ84O+PeAO5jBCQTJN80fe3zbF+zJ5tHMHIFts +mGNmY9arKMCZHP642W3JRJsjN3LjdtzziXnhQzgKnPh/uCzceHZdSLf3S7NsEVOX +ZWb2nuDObJCpKD/4Hq2HpfupMNO73SUcbzg2slsRCRdDrokxOSEUHm7y9GD7tS2W +IC8A09pyCvM25k3so0QPpDP4+i/df7j862rb9+zctwhEWPdXTbFjI+9rI8JBcUwe +t94TFb5b9uB/kWYPnmUBAoIBAQDxiZjm5i8OInuedPnLkxdy31u/tqb+0+GMmp60 +gtmf7eL6Xu3F9Uqr6zH9o90CkdzHmtz6BcBTo/hUiOcTHj9Xnsso1GbneUlHJl9R ++G68sKWMXW76OfSKuXQ1fwlXV+J7Lu0XNIEeLVy09pYgjGKFn2ql7ELpRh7j1UXH +KbFVl2ESn5IVU4oGl+MMB5OzGYpyhuro24/sVSlaeXHakCLcHV69PvjyocQy8g+8 +Z1pXKqHy3mV6MOmSOJ4DqDxaZ2dLQR/rc7bvpxDIxtwMwD/a//xGlwnePOS/0IcB +I2dgFmRNwJ8WC9Le0E+EsEUD929fXEF3+CZN4E+KAuY8Y8UxAoIBAQC6HrlSdfVF +kmpddU4VLD5T/FuA6wB32VkXa6sXWiB0j8vOipGZkUvqQxnJiiorL0AECk3PXXT+ +wXgjqewZHibpJKeqaI4Zqblqebqb68VIANhO0DhRWsh63peVjAPNUmg+tfZHuEBE +bJlz1IBx0der5KBZfg7mngrXvQqIAYSr+Gl14PvwOGqG6Xjy+5VEJqDzEm9VaOnm +mm39st5oRotYnXdf83AV2aLI8ukkq0/mHAySlu5A4VhA5kTJT16Lam2h590AtmBH +6xsO1BtDmfVsaUxBSojkEW8eap+vbyU9vuwjrtm/dG19qcnyesjTJMFQgGnaY46L +ID/aNSDwssUxAoIBAQDFYaBl8G07q8pBr24Cgm2DHiwn+ud1D0keUayn7tZQ72Gx +IKpGPzGKVGVB1Qri8rftFgzG9LQ6paBl1IqhAPLac5WqBAkj1+WeEymKHu6/m8tt +bV0ndvzz8KGapfnIOrWF3M87S1jIhGFiMLB2YMKSV7gbZ3s2jmrn3H1tSBD21QIq +6ePDMcV1peGRDxAQKCsPdFm7eNGgW+ezW9NCvM7/+bBWDoP6I1/mEhHx8LPOz7QQ +eNWMiTQWndXjPzQy3JV41ftzudgg9/GrYXappOGJ4e8S8JLL3g9BAPOSZpAv4ZyO +PX7D0V29X5Xb5QBBQY7t6sJFe7Axq8DUE5J6fz3BAoIBAHLFEWh9HsNJF1gMRxsd +Tk4B9vcXcxF0sNCVb0qWJB9csMPrhP9arqKFwDgcgAZjO6mCJRszOTsDWK89UD7o +7fukw9N8Z+wBUjoLWHxftibBhqGLGr9oKOpDqtvoHEwXffr1wCnXv6GyCip4JsCJ +MuJnuE2XQ18IpA0HIKBft01IgNfU5ebrEx2giRnk89WzsFpTyt2zNVEjd6ITE7zf +i3wYlg1QE5UVwKED0arwDPQL5eDbO448p2xV0qME03tLJNHLJegTjmmq2+OX/jwA +i2vPvtsgOCvTaF8sRs4qzp81xW33m4TJKd9svQBOoNo69w5KMXwfGj5Go7lOO8LR +qnECggEAII/9+EdPUMx97Ex9R6sc9VQEpjxzlJmA9RaVASoZiinydP9QToLYhZif +QhSjHOrbPfGorNMIaVCOS4WGZWnJBSDX8uVvhi/N6mWegmj8w/WZrNuNOT99/8Fq +HXMnpOrXJsgQ4MDVzu+V8DISgrirf+PdBW1u/JtdjwmunlnPE1AsJUDWZlDTttaE +0p32cDq6j+eUxfBq5/haZxe92Jq9Wr+o+gXNO9EwZCO+bTtHFJJso5YbU548kMdA +j5y4BUf/jkCqK8c6sufbfP4MN4YnWbdSPmH3V2DF3g1okalUYp2sAOgAwwPjFAOu +f9qBWGCwdZjeDjaVVUgwi+Waf+M0tQ== +-----END PRIVATE KEY----- diff --git a/utils/tcp_connection/tcp_connection_test/src/lib.rs b/utils/tcp_connection/tcp_connection_test/src/lib.rs new file mode 100644 index 0000000..c9372d4 --- /dev/null +++ b/utils/tcp_connection/tcp_connection_test/src/lib.rs @@ -0,0 +1,17 @@ +#[cfg(test)] +pub mod test_tcp_target_build; + +#[cfg(test)] +pub mod test_connection; + +#[cfg(test)] +pub mod test_challenge; + +#[cfg(test)] +pub mod test_file_transfer; + +#[cfg(test)] +pub mod test_msgpack; + +pub mod test_utils; +pub use test_utils::*; diff --git a/utils/tcp_connection/tcp_connection_test/src/test_challenge.rs b/utils/tcp_connection/tcp_connection_test/src/test_challenge.rs new file mode 100644 index 0000000..9327b3e --- /dev/null +++ b/utils/tcp_connection/tcp_connection_test/src/test_challenge.rs @@ -0,0 +1,160 @@ +use std::{env::current_dir, time::Duration}; + +use tcp_connection::instance::ConnectionInstance; +use tokio::{ + join, + time::{sleep, timeout}, +}; + +use crate::test_utils::{ + handle::{ClientHandle, ServerHandle}, + target::TcpServerTarget, + target_configure::ServerTargetConfig, +}; + +pub(crate) struct ExampleChallengeClientHandle; + +impl ClientHandle for ExampleChallengeClientHandle { + async fn process(mut instance: ConnectionInstance) { + // Accept challenge with correct key + let key = current_dir() + .unwrap() + .join("res") + .join("key") + .join("test_key_private.pem"); + let result = instance.accept_challenge(key, "test_key").await.unwrap(); + + // Sent success + assert!(result); + let response = instance.read_text().await.unwrap(); + + // Verify success + assert_eq!("OK", response); + + // Accept challenge with wrong key + let key = current_dir() + .unwrap() + .join("res") + .join("key") + .join("wrong_key_private.pem"); + let result = instance.accept_challenge(key, "test_key").await.unwrap(); + + // Sent success + assert!(result); + let response = instance.read_text().await.unwrap(); + + // Verify fail + assert_eq!("ERROR", response); + + // Accept challenge with wrong name + let key = current_dir() + .unwrap() + .join("res") + .join("key") + .join("test_key_private.pem"); + let result = instance.accept_challenge(key, "test_key__").await.unwrap(); + + // Sent success + assert!(result); + let response = instance.read_text().await.unwrap(); + + // Verify fail + assert_eq!("ERROR", response); + } +} + +pub(crate) struct ExampleChallengeServerHandle; + +impl ServerHandle for ExampleChallengeServerHandle { + async fn process(mut instance: ConnectionInstance) { + // Challenge with correct key + let key_dir = current_dir().unwrap().join("res").join("key"); + let (result, key_id) = instance.challenge(key_dir).await.unwrap(); + assert!(result); + assert_eq!(key_id, "test_key"); + + // Send response + instance + .write_text(if result { "OK" } else { "ERROR" }) + .await + .unwrap(); + + // Challenge again + let key_dir = current_dir().unwrap().join("res").join("key"); + let (result, key_id) = instance.challenge(key_dir).await.unwrap(); + assert!(!result); + assert_eq!(key_id, "test_key"); + + // Send response + instance + .write_text(if result { "OK" } else { "ERROR" }) + .await + .unwrap(); + + // Challenge again + let key_dir = current_dir().unwrap().join("res").join("key"); + let (result, key_id) = instance.challenge(key_dir).await.unwrap(); + assert!(!result); + assert_eq!(key_id, "test_key__"); + + // Send response + instance + .write_text(if result { "OK" } else { "ERROR" }) + .await + .unwrap(); + } +} + +#[tokio::test] +async fn test_connection_with_challenge_handle() -> Result<(), std::io::Error> { + let host = "localhost:5011"; + + // Server setup + let Ok(server_target) = TcpServerTarget::< + ExampleChallengeClientHandle, + ExampleChallengeServerHandle, + >::from_domain(host) + .await + else { + panic!("Test target built failed from a domain named `{}`", host); + }; + + // Client setup + let Ok(client_target) = TcpServerTarget::< + ExampleChallengeClientHandle, + ExampleChallengeServerHandle, + >::from_domain(host) + .await + else { + panic!("Test target built failed from a domain named `{}`", host); + }; + + let future_server = async move { + // Only process once + let configured_server = server_target.server_cfg(ServerTargetConfig::default().once()); + + // Listen here + let _ = configured_server.listen().await; + }; + + let future_client = async move { + // Wait for server start + let _ = sleep(Duration::from_secs_f32(1.5)).await; + + // Connect here + let _ = client_target.connect().await; + }; + + let test_timeout = Duration::from_secs(10); + + timeout(test_timeout, async { join!(future_client, future_server) }) + .await + .map_err(|_| { + std::io::Error::new( + std::io::ErrorKind::TimedOut, + format!("Test timed out after {:?}", test_timeout), + ) + })?; + + Ok(()) +} diff --git a/utils/tcp_connection/tcp_connection_test/src/test_connection.rs b/utils/tcp_connection/tcp_connection_test/src/test_connection.rs new file mode 100644 index 0000000..8c3ab01 --- /dev/null +++ b/utils/tcp_connection/tcp_connection_test/src/test_connection.rs @@ -0,0 +1,78 @@ +use std::time::Duration; + +use tcp_connection::instance::ConnectionInstance; +use tokio::{join, time::sleep}; + +use crate::test_utils::{ + handle::{ClientHandle, ServerHandle}, + target::TcpServerTarget, + target_configure::ServerTargetConfig, +}; + +pub(crate) struct ExampleClientHandle; + +impl ClientHandle for ExampleClientHandle { + async fn process(mut instance: ConnectionInstance) { + // Write name + let Ok(_) = instance.write_text("Peter").await else { + panic!("Write text failed!"); + }; + // Read msg + let Ok(result) = instance.read_text().await else { + return; + }; + assert_eq!("Hello Peter!", result); + } +} + +pub(crate) struct ExampleServerHandle; + +impl ServerHandle for ExampleServerHandle { + async fn process(mut instance: ConnectionInstance) { + // Read name + let Ok(name) = instance.read_text().await else { + return; + }; + // Write msg + let Ok(_) = instance.write_text(format!("Hello {}!", name)).await else { + panic!("Write text failed!"); + }; + } +} + +#[tokio::test] +async fn test_connection_with_example_handle() { + let host = "localhost:5012"; + + // Server setup + let Ok(server_target) = + TcpServerTarget::::from_domain(host).await + else { + panic!("Test target built failed from a domain named `{}`", host); + }; + + // Client setup + let Ok(client_target) = + TcpServerTarget::::from_domain(host).await + else { + panic!("Test target built failed from a domain named `{}`", host); + }; + + let future_server = async move { + // Only process once + let configured_server = server_target.server_cfg(ServerTargetConfig::default().once()); + + // Listen here + let _ = configured_server.listen().await; + }; + + let future_client = async move { + // Wait for server start + let _ = sleep(Duration::from_secs_f32(1.5)).await; + + // Connect here + let _ = client_target.connect().await; + }; + + let _ = async { join!(future_client, future_server) }.await; +} diff --git a/utils/tcp_connection/tcp_connection_test/src/test_file_transfer.rs b/utils/tcp_connection/tcp_connection_test/src/test_file_transfer.rs new file mode 100644 index 0000000..4237ea7 --- /dev/null +++ b/utils/tcp_connection/tcp_connection_test/src/test_file_transfer.rs @@ -0,0 +1,94 @@ +use std::{env::current_dir, time::Duration}; + +use tcp_connection::instance::ConnectionInstance; +use tokio::{ + join, + time::{sleep, timeout}, +}; + +use crate::test_utils::{ + handle::{ClientHandle, ServerHandle}, + target::TcpServerTarget, + target_configure::ServerTargetConfig, +}; + +pub(crate) struct ExampleFileTransferClientHandle; + +impl ClientHandle for ExampleFileTransferClientHandle { + async fn process(mut instance: ConnectionInstance) { + let image_path = current_dir() + .unwrap() + .join("res") + .join("image") + .join("test_transfer.png"); + instance.write_file(image_path).await.unwrap(); + } +} + +pub(crate) struct ExampleFileTransferServerHandle; + +impl ServerHandle for ExampleFileTransferServerHandle { + async fn process(mut instance: ConnectionInstance) { + let save_path = current_dir() + .unwrap() + .join("res") + .join(".temp") + .join("image") + .join("test_transfer.png"); + instance.read_file(save_path).await.unwrap(); + } +} + +#[tokio::test] +async fn test_connection_with_challenge_handle() -> Result<(), std::io::Error> { + let host = "localhost:5010"; + + // Server setup + let Ok(server_target) = TcpServerTarget::< + ExampleFileTransferClientHandle, + ExampleFileTransferServerHandle, + >::from_domain(host) + .await + else { + panic!("Test target built failed from a domain named `{}`", host); + }; + + // Client setup + let Ok(client_target) = TcpServerTarget::< + ExampleFileTransferClientHandle, + ExampleFileTransferServerHandle, + >::from_domain(host) + .await + else { + panic!("Test target built failed from a domain named `{}`", host); + }; + + let future_server = async move { + // Only process once + let configured_server = server_target.server_cfg(ServerTargetConfig::default().once()); + + // Listen here + let _ = configured_server.listen().await; + }; + + let future_client = async move { + // Wait for server start + let _ = sleep(Duration::from_secs_f32(1.5)).await; + + // Connect here + let _ = client_target.connect().await; + }; + + let test_timeout = Duration::from_secs(10); + + timeout(test_timeout, async { join!(future_client, future_server) }) + .await + .map_err(|_| { + std::io::Error::new( + std::io::ErrorKind::TimedOut, + format!("Test timed out after {:?}", test_timeout), + ) + })?; + + Ok(()) +} diff --git a/utils/tcp_connection/tcp_connection_test/src/test_msgpack.rs b/utils/tcp_connection/tcp_connection_test/src/test_msgpack.rs new file mode 100644 index 0000000..4c9c870 --- /dev/null +++ b/utils/tcp_connection/tcp_connection_test/src/test_msgpack.rs @@ -0,0 +1,103 @@ +use serde::{Deserialize, Serialize}; +use std::time::Duration; +use tcp_connection::instance::ConnectionInstance; +use tokio::{join, time::sleep}; + +use crate::test_utils::{ + handle::{ClientHandle, ServerHandle}, + target::TcpServerTarget, + target_configure::ServerTargetConfig, +}; + +#[derive(Debug, PartialEq, Serialize, Deserialize, Default)] +struct TestData { + id: u32, + name: String, +} + +pub(crate) struct MsgPackClientHandle; + +impl ClientHandle for MsgPackClientHandle { + async fn process(mut instance: ConnectionInstance) { + // Test basic MessagePack serialization + let test_data = TestData { + id: 42, + name: "Test MessagePack".to_string(), + }; + + // Write MessagePack data + if let Err(e) = instance.write_msgpack(&test_data).await { + panic!("Write MessagePack failed: {}", e); + } + + // Read response + let response: TestData = match instance.read_msgpack().await { + Ok(data) => data, + Err(e) => panic!("Read MessagePack response failed: {}", e), + }; + + // Verify response + assert_eq!(response.id, test_data.id * 2); + assert_eq!(response.name, format!("Processed: {}", test_data.name)); + } +} + +pub(crate) struct MsgPackServerHandle; + +impl ServerHandle for MsgPackServerHandle { + async fn process(mut instance: ConnectionInstance) { + // Read MessagePack data + let received_data: TestData = match instance.read_msgpack().await { + Ok(data) => data, + Err(_) => return, + }; + + // Process data + let response = TestData { + id: received_data.id * 2, + name: format!("Processed: {}", received_data.name), + }; + + // Write response as MessagePack + if let Err(e) = instance.write_msgpack(&response).await { + panic!("Write MessagePack response failed: {}", e); + } + } +} + +#[tokio::test] +async fn test_msgpack_basic() { + let host = "localhost:5013"; + + // Server setup + let Ok(server_target) = + TcpServerTarget::::from_domain(host).await + else { + panic!("Test target built failed from a domain named `{}`", host); + }; + + // Client setup + let Ok(client_target) = + TcpServerTarget::::from_domain(host).await + else { + panic!("Test target built failed from a domain named `{}`", host); + }; + + let future_server = async move { + // Only process once + let configured_server = server_target.server_cfg(ServerTargetConfig::default().once()); + + // Listen here + let _ = configured_server.listen().await; + }; + + let future_client = async move { + // Wait for server start + let _ = sleep(Duration::from_secs_f32(1.5)).await; + + // Connect here + let _ = client_target.connect().await; + }; + + let _ = async { join!(future_client, future_server) }.await; +} diff --git a/utils/tcp_connection/tcp_connection_test/src/test_tcp_target_build.rs b/utils/tcp_connection/tcp_connection_test/src/test_tcp_target_build.rs new file mode 100644 index 0000000..aa1ec74 --- /dev/null +++ b/utils/tcp_connection/tcp_connection_test/src/test_tcp_target_build.rs @@ -0,0 +1,32 @@ +use crate::{ + test_connection::{ExampleClientHandle, ExampleServerHandle}, + test_utils::target::TcpServerTarget, +}; + +#[test] +fn test_tcp_test_target_build() { + let host = "127.0.0.1:8080"; + + // Test build target by string + let Ok(target) = + TcpServerTarget::::from_address_str(host) + else { + panic!("Test target built failed from a target addr `{}`", host); + }; + assert_eq!(target.to_string(), "127.0.0.1:8080"); +} + +#[tokio::test] +async fn test_tcp_test_target_build_domain() { + let host = "localhost"; + + // Test build target by DomainName and Connection + let Ok(target) = + TcpServerTarget::::from_domain(host).await + else { + panic!("Test target built failed from a domain named `{}`", host); + }; + + // Test into string + assert_eq!(target.to_string(), "127.0.0.1:8080"); +} diff --git a/utils/tcp_connection/tcp_connection_test/src/test_utils.rs b/utils/tcp_connection/tcp_connection_test/src/test_utils.rs new file mode 100644 index 0000000..badf27d --- /dev/null +++ b/utils/tcp_connection/tcp_connection_test/src/test_utils.rs @@ -0,0 +1,4 @@ +pub mod handle; +pub mod target; +pub mod target_configure; +pub mod target_connection; diff --git a/utils/tcp_connection/tcp_connection_test/src/test_utils/handle.rs b/utils/tcp_connection/tcp_connection_test/src/test_utils/handle.rs new file mode 100644 index 0000000..4f9bdbb --- /dev/null +++ b/utils/tcp_connection/tcp_connection_test/src/test_utils/handle.rs @@ -0,0 +1,11 @@ +use std::future::Future; + +use tcp_connection::instance::ConnectionInstance; + +pub trait ClientHandle { + fn process(instance: ConnectionInstance) -> impl Future + Send; +} + +pub trait ServerHandle { + fn process(instance: ConnectionInstance) -> impl Future + Send; +} diff --git a/utils/tcp_connection/tcp_connection_test/src/test_utils/target.rs b/utils/tcp_connection/tcp_connection_test/src/test_utils/target.rs new file mode 100644 index 0000000..8972b2a --- /dev/null +++ b/utils/tcp_connection/tcp_connection_test/src/test_utils/target.rs @@ -0,0 +1,201 @@ +use serde::{Deserialize, Serialize}; +use std::{ + fmt::{Display, Formatter}, + marker::PhantomData, + net::{AddrParseError, IpAddr, Ipv4Addr, SocketAddr}, + str::FromStr, +}; +use tokio::net::lookup_host; + +use crate::test_utils::{ + handle::{ClientHandle, ServerHandle}, + target_configure::{ClientTargetConfig, ServerTargetConfig}, +}; + +const DEFAULT_PORT: u16 = 8080; + +#[derive(Debug, Serialize, Deserialize)] +pub struct TcpServerTarget +where + Client: ClientHandle, + Server: ServerHandle, +{ + /// Client Config + client_cfg: Option, + + /// Server Config + server_cfg: Option, + + /// Server port + port: u16, + + /// Bind addr + bind_addr: IpAddr, + + /// Client Phantom Data + _client: PhantomData, + + /// Server Phantom Data + _server: PhantomData, +} + +impl Default for TcpServerTarget +where + Client: ClientHandle, + Server: ServerHandle, +{ + fn default() -> Self { + Self { + client_cfg: None, + server_cfg: None, + port: DEFAULT_PORT, + bind_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), + _client: PhantomData, + _server: PhantomData, + } + } +} + +impl From for TcpServerTarget +where + Client: ClientHandle, + Server: ServerHandle, +{ + /// Convert SocketAddr to TcpServerTarget + fn from(value: SocketAddr) -> Self { + Self { + port: value.port(), + bind_addr: value.ip(), + ..Self::default() + } + } +} + +impl From> for SocketAddr +where + Client: ClientHandle, + Server: ServerHandle, +{ + /// Convert TcpServerTarget to SocketAddr + fn from(val: TcpServerTarget) -> Self { + SocketAddr::new(val.bind_addr, val.port) + } +} + +impl Display for TcpServerTarget +where + Client: ClientHandle, + Server: ServerHandle, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "{}:{}", self.bind_addr, self.port) + } +} + +impl TcpServerTarget +where + Client: ClientHandle, + Server: ServerHandle, +{ + /// Create target by address + pub fn from_addr(addr: impl Into, port: impl Into) -> Self { + Self { + port: port.into(), + bind_addr: addr.into(), + ..Self::default() + } + } + + /// Try to create target by string + pub fn from_address_str<'a>(addr_str: impl Into<&'a str>) -> Result { + let socket_addr = SocketAddr::from_str(addr_str.into()); + match socket_addr { + Ok(socket_addr) => Ok(Self::from_addr(socket_addr.ip(), socket_addr.port())), + Err(err) => Err(err), + } + } + + /// Try to create target by domain name + pub async fn from_domain<'a>(domain: impl Into<&'a str>) -> Result { + match domain_to_addr(domain).await { + Ok(domain_addr) => Ok(Self::from(domain_addr)), + Err(e) => Err(e), + } + } + + /// Set client config + pub fn client_cfg(mut self, config: ClientTargetConfig) -> Self { + self.client_cfg = Some(config); + self + } + + /// Set server config + pub fn server_cfg(mut self, config: ServerTargetConfig) -> Self { + self.server_cfg = Some(config); + self + } + + /// Add client config + pub fn add_client_cfg(&mut self, config: ClientTargetConfig) { + self.client_cfg = Some(config); + } + + /// Add server config + pub fn add_server_cfg(&mut self, config: ServerTargetConfig) { + self.server_cfg = Some(config); + } + + /// Get client config ref + pub fn get_client_cfg(&self) -> Option<&ClientTargetConfig> { + self.client_cfg.as_ref() + } + + /// Get server config ref + pub fn get_server_cfg(&self) -> Option<&ServerTargetConfig> { + self.server_cfg.as_ref() + } + + /// Get SocketAddr of TcpServerTarget + pub fn get_addr(&self) -> SocketAddr { + SocketAddr::new(self.bind_addr, self.port) + } +} + +/// Parse Domain Name to IpAddr via DNS +async fn domain_to_addr<'a>(domain: impl Into<&'a str>) -> Result { + let domain = domain.into(); + let default_port: u16 = DEFAULT_PORT; + + if let Ok(socket_addr) = domain.parse::() { + return Ok(match socket_addr.ip() { + IpAddr::V4(_) => socket_addr, + IpAddr::V6(_) => SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), socket_addr.port()), + }); + } + + if let Ok(_v6_addr) = domain.parse::() { + return Ok(SocketAddr::new( + IpAddr::V4(Ipv4Addr::LOCALHOST), + default_port, + )); + } + + let (host, port_str) = if let Some((host, port)) = domain.rsplit_once(':') { + (host.trim_matches(|c| c == '[' || c == ']'), Some(port)) + } else { + (domain, None) + }; + + let port = port_str + .and_then(|p| p.parse::().ok()) + .map(|p| p.clamp(0, u16::MAX)) + .unwrap_or(default_port); + + let mut socket_iter = lookup_host((host, 0)).await?; + + if let Some(addr) = socket_iter.find(|addr| addr.is_ipv4()) { + return Ok(SocketAddr::new(addr.ip(), port)); + } + + Ok(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port)) +} diff --git a/utils/tcp_connection/tcp_connection_test/src/test_utils/target_configure.rs b/utils/tcp_connection/tcp_connection_test/src/test_utils/target_configure.rs new file mode 100644 index 0000000..d739ac9 --- /dev/null +++ b/utils/tcp_connection/tcp_connection_test/src/test_utils/target_configure.rs @@ -0,0 +1,53 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Default, Debug, Clone, Copy, Serialize, Deserialize)] +pub struct ServerTargetConfig { + /// Only process a single connection, then shut down the server. + once: bool, + + /// Timeout duration in milliseconds. (0 is Closed) + timeout: u64, +} + +impl ServerTargetConfig { + /// Set `once` to True + /// This method configures the `once` field of `ServerTargetConfig`. + pub fn once(mut self) -> Self { + self.once = true; + self + } + + /// Set `timeout` to the given value + /// This method configures the `timeout` field of `ServerTargetConfig`. + pub fn timeout(mut self, timeout: u64) -> Self { + self.timeout = timeout; + self + } + + /// Set `once` to the given value + /// This method configures the `once` field of `ServerTargetConfig`. + pub fn set_once(&mut self, enable: bool) { + self.once = enable; + } + + /// Set `timeout` to the given value + /// This method configures the `timeout` field of `ServerTargetConfig`. + pub fn set_timeout(&mut self, timeout: u64) { + self.timeout = timeout; + } + + /// Check if the server is configured to process only a single connection. + /// Returns `true` if the server will shut down after processing one connection. + pub fn is_once(&self) -> bool { + self.once + } + + /// Get the current timeout value in milliseconds. + /// Returns the timeout duration. A value of 0 indicates the connection is closed. + pub fn get_timeout(&self) -> u64 { + self.timeout + } +} + +#[derive(Default, Debug, Clone, Copy, Serialize, Deserialize)] +pub struct ClientTargetConfig {} diff --git a/utils/tcp_connection/tcp_connection_test/src/test_utils/target_connection.rs b/utils/tcp_connection/tcp_connection_test/src/test_utils/target_connection.rs new file mode 100644 index 0000000..d5bf2c3 --- /dev/null +++ b/utils/tcp_connection/tcp_connection_test/src/test_utils/target_connection.rs @@ -0,0 +1,89 @@ +use tcp_connection::{error::TcpTargetError, instance::ConnectionInstance}; +use tokio::{ + net::{TcpListener, TcpSocket}, + spawn, +}; + +use crate::test_utils::{ + handle::{ClientHandle, ServerHandle}, + target::TcpServerTarget, + target_configure::ServerTargetConfig, +}; + +impl TcpServerTarget +where + Client: ClientHandle, + Server: ServerHandle, +{ + /// Attempts to establish a connection to the TCP server. + /// + /// This function initiates a connection to the server address + /// specified in the target configuration. + /// + /// This is a Block operation. + pub async fn connect(&self) -> Result<(), TcpTargetError> { + let addr = self.get_addr(); + let Ok(socket) = TcpSocket::new_v4() else { + return Err(TcpTargetError::from("Create tcp socket failed!")); + }; + let stream = match socket.connect(addr).await { + Ok(stream) => stream, + Err(e) => { + let err = format!("Connect to `{}` failed: {}", addr, e); + return Err(TcpTargetError::from(err)); + } + }; + let instance = ConnectionInstance::from(stream); + Client::process(instance).await; + Ok(()) + } + + /// Attempts to establish a connection to the TCP server. + /// + /// This function initiates a connection to the server address + /// specified in the target configuration. + pub async fn listen(&self) -> Result<(), TcpTargetError> { + let addr = self.get_addr(); + let listener = match TcpListener::bind(addr).await { + Ok(listener) => listener, + Err(_) => { + let err = format!("Bind to `{}` failed", addr); + return Err(TcpTargetError::from(err)); + } + }; + + let cfg: ServerTargetConfig = match self.get_server_cfg() { + Some(cfg) => *cfg, + None => ServerTargetConfig::default(), + }; + + if cfg.is_once() { + // Process once (Blocked) + let (stream, _) = match listener.accept().await { + Ok(result) => result, + Err(e) => { + let err = format!("Accept connection failed: {}", e); + return Err(TcpTargetError::from(err)); + } + }; + let instance = ConnectionInstance::from(stream); + Server::process(instance).await; + } else { + loop { + // Process multiple times (Concurrent) + let (stream, _) = match listener.accept().await { + Ok(result) => result, + Err(e) => { + let err = format!("Accept connection failed: {}", e); + return Err(TcpTargetError::from(err)); + } + }; + let instance = ConnectionInstance::from(stream); + spawn(async move { + Server::process(instance).await; + }); + } + } + Ok(()) + } +} -- cgit